code
stringlengths
114
1.05M
path
stringlengths
3
312
quality_prob
float64
0.5
0.99
learning_prob
float64
0.2
1
filename
stringlengths
3
168
kind
stringclasses
1 value
package types /* This slightly unconventional use of type aliasing is meant to provide a hook for documentation of the different uses of interface{} that exists in QFrame. Since there is nothing like a union or a sum type in Go, QFrame settles for the use of interface{} for some input. Hopefully this construct says a bit more than nothing about the empty interfaces used. */ /* DataSlice can be a slice of any of the supported data types. The following types are currently supported: []bool []float64 []int []string []*string */ type DataSlice = interface{} /* SliceFuncOrBuiltInId can be a function taking a slice of type T and returning a value of type T. For example: func(x []float64) float64 func(x []int) int func(x []*string) *string func(x []bool) bool Or it can be a string identifying a built in function. For example: "sum" IMPORTANT: Reference arguments (eg. slices) must never be assumed to be valid after that the passed function returns. Under the hood reuse and other performance enhancements may trigger unexpected behaviour if this is ever done. If, for some reason, you want to retain the data a copy must be made. */ type SliceFuncOrBuiltInId = interface{} /* DataFuncOrBuiltInId can be a function taking one argument of type T and returning a value of type U. For example: func(x float64) float64 func(x float64) int Or it can be a function taking zero arguments returning a value of type T. For example: func() float64 func() int Or it can be a function taking two arguments of type T and returning a value of type T. Note that arguments and return values must all have the same type in this case. For example: func(x, y float64) float64 func(x, y int) int Or it can be a string identifying a built in function. For example: "abs" IMPORTANT: Pointer arguments (eg. *string) must never be assumed to be valid after that the passed function returns. Under the hood reuse and other performance enhancements may trigger unexpected behaviour if this is ever done. If, for some reason, you want to retain the data a copy must be made. */ type DataFuncOrBuiltInId = interface{}
types/aliases.go
0.668339
0.627181
aliases.go
starcoder
package containerscan var nginxScanJSON = ` { "customerGUID": "1<PASSWORD>-92ce-44f8-914e-cbe71830d566", "imageTag": "nginx:1.18.0", "imageHash": "", "wlid": "wlid://cluster-test/namespace-test/deployment-davidg", "containerName": "nginx-1", "timestamp": 1628091365, "layers": [ { "layerHash": "sha256:f7ec5a41d630a33a2d1db59b95d89d93de7ae5a619a3a8571b78457e48266eba", "parentLayerHash": "", "vulnerabilities": [ { "name": "CVE-2009-0854", "imageHash": "sha256:c2c45d506085d300b72a6d4b10e3dce104228080a2cf095fc38333afe237e2be", "imageTag": "", "packageName": "dash", "packageVersion": "", "link": "https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2009-0854", "description": "Untrusted search path vulnerability in dash 0.5.4, when used as a login shell, allows local users to execute arbitrary code via a Trojan horse .profile file in the current working directory.", "severity": "Medium", "metadata": null, "fixedIn": [ { "name": "", "imageTag": "", "version": "0:0" } ], "relevant": "" }, { "name": "CVE-2019-13627", "imageHash": "sha256:c2c45d506085d300b72a6d4b10e3dce104228080a2cf095fc38333afe237e2be", "imageTag": "", "packageName": "libgcrypt20", "packageVersion": "", "link": "https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-13627", "description": "It was discovered that there was a ECDSA timing attack in the libgcrypt20 cryptographic library. Version affected: 1.8.4-5, 1.7.6-2+deb9u3, and 1.6.3-2+deb8u4. Versions fixed: 1.8.5-2 and 1.6.3-2+deb8u7.", "severity": "Medium", "metadata": null, "fixedIn": [ { "name": "", "imageTag": "", "version": "0:0" } ], "relevant": "" }, { "name": "CVE-2021-33560", "imageHash": "sha256:c2c45d506085d300b72a6d4b10e3dce104228080a2cf095fc38333afe237e2be", "imageTag": "", "packageName": "libgcrypt20", "packageVersion": "", "link": "https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-33560", "description": "Libgcrypt before 1.8.8 and 1.9.x before 1.9.3 mishandles ElGamal encryption because it lacks exponent blinding to address a side-channel attack against mpi_powm, and the window size is not chosen appropriately. (There is also an interoperability problem because the selection of the k integer value does not properly consider the differences between basic ElGamal encryption and generalized ElGamal encryption.) This, for example, affects use of ElGamal in OpenPGP.", "severity": "High", "metadata": null, "fixedIn": [ { "name": "", "imageTag": "", "version": "0:1.8.4-5+deb10u1" } ], "relevant": "" }, { "name": "CVE-2021-3345", "imageHash": "sha256:c2c45d506085d300b72a6d4b10e3dce104228080a2cf095fc38333afe237e2be", "imageTag": "", "packageName": "libgcrypt20", "packageVersion": "", "link": "https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-3345", "description": "_gcry_md_block_write in cipher/hash-common.c in Libgcrypt version 1.9.0 has a heap-based buffer overflow when the digest final function sets a large count value. It is recommended to upgrade to 1.9.1 or later.", "severity": "High", "metadata": null, "fixedIn": [ { "name": "", "imageTag": "", "version": "0:0" } ], "relevant": "" }, { "name": "CVE-2010-0834", "imageHash": "sha256:c2c45d506085d300b72a6d4b10e3dce104228080a2cf095fc38333afe237e2be", "imageTag": "", "packageName": "base-files", "packageVersion": "", "link": "https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2010-0834", "description": "The base-files package before 5.0.0ubuntu7.1 on Ubuntu 9.10 and before 5.0.0ubuntu20.10.04.2 on Ubuntu 10.04 LTS, as shipped on Dell Latitude 2110 netbooks, does not require authentication for package installation, which allows remote archive servers and man-in-the-middle attackers to execute arbitrary code via a crafted package.", "severity": "High", "metadata": null, "fixedIn": [ { "name": "", "imageTag": "", "version": "0:0" } ], "relevant": "" }, { "name": "CVE-2018-6557", "imageHash": "sha256:c2c45d506085d300b72a6d4b10e3dce104228080a2cf095fc38333afe237e2be", "imageTag": "", "packageName": "base-files", "packageVersion": "", "link": "https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-6557", "description": "The MOTD update script in the base-files package in Ubuntu 18.04 LTS before 10.1ubuntu2.2, and Ubuntu 18.10 before 10.1ubuntu6 incorrectly handled temporary files. A local attacker could use this issue to cause a denial of service, or possibly escalate privileges if kernel symlink restrictions were disabled.", "severity": "High", "metadata": null, "fixedIn": [ { "name": "", "imageTag": "", "version": "0:0" } ], "relevant": "" }, { "name": "CVE-2013-0223", "imageHash": "sha256:c2c45d506085d300b72a6d4b10e3dce104228080a2cf095fc38333afe237e2be", "imageTag": "", "packageName": "coreutils", "packageVersion": "", "link": "https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2013-0223", "description": "The SUSE coreutils-i18n.patch for GNU coreutils allows context-dependent attackers to cause a denial of service (segmentation fault and crash) via a long string to the join command, when using the -i switch, which triggers a stack-based buffer overflow in the alloca function.", "severity": "Low", "metadata": null, "fixedIn": [ { "name": "", "imageTag": "", "version": "0:0" } ], "relevant": "" }, { "name": "CVE-2015-4041", "imageHash": "sha256:c2c45d506085d300b72a6d4b10e3dce104228080a2cf095fc38333afe237e2be", "imageTag": "", "packageName": "coreutils", "packageVersion": "", "link": "https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2015-4041", "description": "The keycompare_mb function in sort.c in sort in GNU Coreutils through 8.23 on 64-bit platforms performs a size calculation without considering the number of bytes occupied by multibyte characters, which allows attackers to cause a denial of service (heap-based buffer overflow and application crash) or possibly have unspecified other impact via long UTF-8 strings.", "severity": "High", "metadata": null, "fixedIn": [ { "name": "", "imageTag": "", "version": "0:0" } ], "relevant": "" }, { "name": "CVE-2009-4135", "imageHash": "sha256:c2c45d506085d300b72a6d4b10e3dce104228080a2cf095fc38333afe237e2be", "imageTag": "", "packageName": "coreutils", "packageVersion": "", "link": "https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2009-4135", "description": "The distcheck rule in dist-check.mk in GNU coreutils 5.2.1 through 8.1 allows local users to gain privileges via a symlink attack on a file in a directory tree under /tmp.", "severity": "Medium", "metadata": null, "fixedIn": [ { "name": "", "imageTag": "", "version": "0:0" } ], "relevant": "" }, { "name": "CVE-2015-4042", "imageHash": "sha256:c2c45d506085d300b72a6d4b10e3dce104228080a2cf095fc38333afe237e2be", "imageTag": "", "packageName": "coreutils", "packageVersion": "", "link": "https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2015-4042", "description": "Integer overflow in the keycompare_mb function in sort.c in sort in GNU Coreutils through 8.23 might allow attackers to cause a denial of service (application crash) or possibly have unspecified other impact via long strings.", "severity": "Critical", "metadata": null, "fixedIn": [ { "name": "", "imageTag": "", "version": "0:0" } ], "relevant": "" }, { "name": "CVE-2013-0221", "imageHash": "sha256:c2c45d506085d300b72a6d4b10e3dce104228080a2cf095fc38333afe237e2be", "imageTag": "", "packageName": "coreutils", "packageVersion": "", "link": "https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2013-0221", "description": "The SUSE coreutils-i18n.patch for GNU coreutils allows context-dependent attackers to cause a denial of service (segmentation fault and crash) via a long string to the sort command, when using the (1) -d or (2) -M switch, which triggers a stack-based buffer overflow in the alloca function.", "severity": "Medium", "metadata": null, "fixedIn": [ { "name": "", "imageTag": "", "version": "0:0" } ], "relevant": "" }, { "name": "CVE-2013-0222", "imageHash": "sha256:c2c45d506085d300b72a6d4b10e3dce104228080a2cf095fc38333afe237e2be", "imageTag": "", "packageName": "coreutils", "packageVersion": "", "link": "https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2013-0222", "description": "The SUSE coreutils-i18n.patch for GNU coreutils allows context-dependent attackers to cause a denial of service (segmentation fault and crash) via a long string to the uniq command, which triggers a stack-based buffer overflow in the alloca function.", "severity": "Low", "metadata": null, "fixedIn": [ { "name": "", "imageTag": "", "version": "0:0" } ], "relevant": "" }, { "name": "CVE-2016-2781", "imageHash": "sha256:c2c45d506085d300b72a6d4b10e3dce104228080a2cf095fc38333afe237e2be", "imageTag": "", "packageName": "coreutils", "packageVersion": "", "link": "https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2016-2781", "description": "chroot in GNU coreutils, when used with --userspec, allows local users to escape to the parent session via a crafted TIOCSTI ioctl call, which pushes characters to the terminal's input buffer.", "severity": "Medium", "metadata": null, "fixedIn": [ { "name": "", "imageTag": "", "version": "0:0" } ], "relevant": "" }, { "name": "CVE-2017-18018", "imageHash": "sha256:c2c45d506085d300b72a6d4b10e3dce104228080a2cf095fc38333afe237e2be", "imageTag": "", "packageName": "coreutils", "packageVersion": "", "link": "https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2017-18018", "description": "In GNU Coreutils through 8.29, chown-core.c in chown and chgrp does not prevent replacement of a plain file with a symlink during use of the POSIX \"-R -L\" options, which allows local users to modify the ownership of arbitrary files by leveraging a race condition.", "severity": "Medium", "metadata": null, "fixedIn": [ { "name": "", "imageTag": "", "version": "0:0" } ], "relevant": "" }, { "name": "CVE-2021-20193", "imageHash": "sha256:c2c45d506085d300b72a6d4b10e3dce104228080a2cf095fc38333afe237e2be", "imageTag": "", "packageName": "tar", "packageVersion": "", "link": "https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-20193", "description": "A flaw was found in the src/list.c of tar 1.33 and earlier. This flaw allows an attacker who can submit a crafted input file to tar to cause uncontrolled consumption of memory. The highest threat from this vulnerability is to system availability.", "severity": "Medium", "metadata": null, "fixedIn": [ { "name": "", "imageTag": "", "version": "0:0" } ], "relevant": "" }, { "name": "CVE-2005-2541", "imageHash": "sha256:c2c45d506085d300b72a6d4b10e3dce104228080a2cf095fc38333afe237e2be", "imageTag": "", "packageName": "tar", "packageVersion": "", "link": "https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2005-2541", "description": "Tar 1.15.1 does not properly warn the user when extracting setuid or setgid files, which may allow local users or remote attackers to gain privileges.", "severity": "High", "metadata": null, "fixedIn": [ { "name": "", "imageTag": "", "version": "0:0" } ], "relevant": "" }, { "name": "CVE-2019-9923", "imageHash": "sha256:c2c45d506085d300b72a6d4b10e3dce104228080a2cf095fc38333afe237e2be", "imageTag": "", "packageName": "tar", "packageVersion": "", "link": "https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-9923", "description": "pax_decode_header in sparse.c in GNU Tar before 1.32 had a NULL pointer dereference when parsing certain archives that have malformed extended headers.", "severity": "High", "metadata": null, "fixedIn": [ { "name": "", "imageTag": "", "version": "0:0" } ], "relevant": "" }, { "name": "CVE-2018-1000654", "imageHash": "sha256:c2c45d506085d300b72a6d4b10e3dce104228080a2cf095fc38333afe237e2be", "imageTag": "", "packageName": "libtasn1-6", "packageVersion": "", "link": "https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-1000654", "description": "GNU Libtasn1-4.13 libtasn1-4.13 version libtasn1-4.13, libtasn1-4.12 contains a DoS, specifically CPU usage will reach 100% when running asn1Paser against the POC due to an issue in _asn1_expand_object_id(p_tree), after a long time, the program will be killed. This attack appears to be exploitable via parsing a crafted file.", "severity": "High", "metadata": null, "fixedIn": [ { "name": "", "imageTag": "", "version": "0:0" } ], "relevant": "" }, { "name": "CVE-2011-3374", "imageHash": "sha256:c2c45d506085d300b72a6d4b10e3dce104228080a2cf095fc38333afe237e2be", "imageTag": "", "packageName": "apt", "packageVersion": "", "link": "https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2011-3374", "description": "It was found that apt-key in apt, all versions, do not correctly validate gpg keys with the master keyring, leading to a potential man-in-the-middle attack.", "severity": "Medium", "metadata": null, "fixedIn": [ { "name": "", "imageTag": "", "version": "0:0" } ], "relevant": "" }, { "name": "CVE-2021-37600", "imageHash": "sha256:c2c45d506085d300b72a6d4b10e3dce104228080a2cf095fc38333afe237e2be", "imageTag": "", "packageName": "util-linux", "packageVersion": "", "link": "https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37600", "description": "An integer overflow in util-linux through 2.37.1 can potentially cause a buffer overflow if an attacker were able to use system resources in a way that leads to a large number in the /proc/sysvipc/sem file.", "severity": "Unknown", "metadata": null, "fixedIn": [ { "name": "", "imageTag": "", "version": "0:0" } ], "relevant": "" }, { "name": "CVE-2007-0822", "imageHash": "sha256:c2c45d506085d300b72a6d4b10e3dce104228080a2cf095fc38333afe237e2be", "imageTag": "", "packageName": "util-linux", "packageVersion": "", "link": "https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2007-0822", "description": "umount, when running with the Linux 2.6.15 kernel on Slackware Linux 10.2, allows local users to trigger a NULL dereference and application crash by invoking the program with a pathname for a USB pen drive that was mounted and then physically removed, which might allow the users to obtain sensitive information, including core file contents.", "severity": "Low", "metadata": null, "fixedIn": [ { "name": "", "imageTag": "", "version": "0:0" } ], "relevant": "" }, { "name": "CVE-2004-1349", "imageHash": "sha256:c2c45d506085d300b72a6d4b10e3dce104228080a2cf095fc38333afe237e2be", "imageTag": "", "packageName": "gzip", "packageVersion": "", "link": "https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2004-1349", "description": "gzip before 1.3 in Solaris 8, when called with the -f or -force flags, will change the permissions of files that are hard linked to the target files, which allows local users to view or modify these files.", "severity": "Low", "metadata": null, "fixedIn": [ { "name": "", "imageTag": "", "version": "0:0" } ], "relevant": "" }, { "name": "CVE-2004-0603", "imageHash": "sha256:c2c45d506085d300b72a6d4b10e3dce104228080a2cf095fc38333afe237e2be", "imageTag": "", "packageName": "gzip", "packageVersion": "", "link": "https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2004-0603", "description": "gzexe in gzip 1.3.3 and earlier will execute an argument when the creation of a temp file fails instead of exiting the program, which could allow remote attackers or local users to execute arbitrary commands, a different vulnerability than CVE-1999-1332.", "severity": "High", "metadata": null, "fixedIn": [ { "name": "", "imageTag": "", "version": "0:0" } ], "relevant": "" }, { "name": "CVE-2010-0002", "imageHash": "sha256:c2c45d506085d300b72a6d4b10e3dce104228080a2cf095fc38333afe237e2be", "imageTag": "", "packageName": "bash", "packageVersion": "", "link": "https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2010-0002", "description": "The /etc/profile.d/60alias.sh script in the Mandriva bash package for Bash 2.05b, 3.0, 3.2, 3.2.48, and 4.0 enables the --show-control-chars option in LS_OPTIONS, which allows local users to send escape sequences to terminal emulators, or hide the existence of a file, via a crafted filename.", "severity": "Low", "metadata": null, "fixedIn": [ { "name": "", "imageTag": "", "version": "0:0" } ], "relevant": "" }, { "name": "CVE-2019-18276", "imageHash": "sha256:c2c45d506085d300b72a6d4b10e3dce104228080a2cf095fc38333afe237e2be", "imageTag": "", "packageName": "bash", "packageVersion": "", "link": "https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-18276", "description": "An issue was discovered in disable_priv_mode in shell.c in GNU Bash through 5.0 patch 11. By default, if Bash is run with its effective UID not equal to its real UID, it will drop privileges by setting its effective UID to its real UID. However, it does so incorrectly. On Linux and other systems that support \"saved UID\" functionality, the saved UID is not dropped. An attacker with command execution in the shell can use \"enable -f\" for runtime loading of a new builtin, which can be a shared object that calls setuid() and therefore regains privileges. However, binaries running with an effective UID of 0 are unaffected.", "severity": "High", "metadata": null, "fixedIn": [ { "name": "", "imageTag": "", "version": "0:0" } ], "relevant": "" } ], "packageToFile": null }, { "layerHash": "sha256:0b20d28b5eb3007f70c43cdd8efcdb04016aa193192e5911cda5b7590ffaa635", "parentLayerHash": "sha256:f7ec5a41d630a33a2d1db59b95d89d93de7ae5a619a3a8571b78457e48266eba", "vulnerabilities": [], "packageToFile": null }, { "layerHash": "sha256:1576642c97761adf346890bf67c43473217160a9a203ef47d0bc6020af652798", "parentLayerHash": "sha256:0b20d28b5eb3007f70c43cdd8efcdb04016aa193192e5911cda5b7590ffaa635", "vulnerabilities": [], "packageToFile": null }, { "layerHash": "sha256:c12a848bad84d57e3f5faafab5880484434aee3bf8bdde4d519753b7c81254fd", "parentLayerHash": "sha256:1576642c97761adf346890bf67c43473217160a9a203ef47d0bc6020af652798", "vulnerabilities": [], "packageToFile": null }, { "layerHash": "sha256:03f221d9cf00a7077231c6dcac3c95182727c7e7fd44fd2b2e882a01dcda2d70", "parentLayerHash": "sha256:c12a848bad84d57e3f5faafab5880484434aee3bf8bdde4d519753b7c81254fd", "vulnerabilities": [], "packageToFile": null } ], "listOfDangerousArtifcats": [ "bin/dash", "bin/bash", "usr/bin/curl" ] } `
containerscan/jsonrawscan.go
0.511961
0.41834
jsonrawscan.go
starcoder
package plaid import ( "encoding/json" ) // TransferAuthorizationDecisionRationale The rationale for Plaid's decision regarding a proposed transfer. Will be null for `approved` decisions. type TransferAuthorizationDecisionRationale struct { // A code representing the rationale for permitting or declining the proposed transfer. Possible values are: `NSF` – Transaction likely to result in a return due to insufficient funds. `RISK` - Transaction is high-risk. `MANUALLY_VERIFIED_ITEM` – Item created via same-day micro deposits, limited information available. Plaid can only offer `permitted` as a transaction decision. `LOGIN_REQUIRED` – Unable to collect the account information required for an authorization decision due to Item staleness. Can be rectified using Link update mode. `ERROR` – Unable to collect the account information required for an authorization decision due to an error. Code string `json:"code"` // A human-readable description of the code associated with a permitted transfer or transfer decline. Description string `json:"description"` AdditionalProperties map[string]interface{} } type _TransferAuthorizationDecisionRationale TransferAuthorizationDecisionRationale // NewTransferAuthorizationDecisionRationale instantiates a new TransferAuthorizationDecisionRationale object // This constructor will assign default values to properties that have it defined, // and makes sure properties required by API are set, but the set of arguments // will change when the set of required properties is changed func NewTransferAuthorizationDecisionRationale(code string, description string) *TransferAuthorizationDecisionRationale { this := TransferAuthorizationDecisionRationale{} this.Code = code this.Description = description return &this } // NewTransferAuthorizationDecisionRationaleWithDefaults instantiates a new TransferAuthorizationDecisionRationale object // This constructor will only assign default values to properties that have it defined, // but it doesn't guarantee that properties required by API are set func NewTransferAuthorizationDecisionRationaleWithDefaults() *TransferAuthorizationDecisionRationale { this := TransferAuthorizationDecisionRationale{} return &this } // GetCode returns the Code field value func (o *TransferAuthorizationDecisionRationale) GetCode() string { if o == nil { var ret string return ret } return o.Code } // GetCodeOk returns a tuple with the Code field value // and a boolean to check if the value has been set. func (o *TransferAuthorizationDecisionRationale) GetCodeOk() (*string, bool) { if o == nil { return nil, false } return &o.Code, true } // SetCode sets field value func (o *TransferAuthorizationDecisionRationale) SetCode(v string) { o.Code = v } // GetDescription returns the Description field value func (o *TransferAuthorizationDecisionRationale) GetDescription() string { if o == nil { var ret string return ret } return o.Description } // GetDescriptionOk returns a tuple with the Description field value // and a boolean to check if the value has been set. func (o *TransferAuthorizationDecisionRationale) GetDescriptionOk() (*string, bool) { if o == nil { return nil, false } return &o.Description, true } // SetDescription sets field value func (o *TransferAuthorizationDecisionRationale) SetDescription(v string) { o.Description = v } func (o TransferAuthorizationDecisionRationale) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} if true { toSerialize["code"] = o.Code } if true { toSerialize["description"] = o.Description } for key, value := range o.AdditionalProperties { toSerialize[key] = value } return json.Marshal(toSerialize) } func (o *TransferAuthorizationDecisionRationale) UnmarshalJSON(bytes []byte) (err error) { varTransferAuthorizationDecisionRationale := _TransferAuthorizationDecisionRationale{} if err = json.Unmarshal(bytes, &varTransferAuthorizationDecisionRationale); err == nil { *o = TransferAuthorizationDecisionRationale(varTransferAuthorizationDecisionRationale) } additionalProperties := make(map[string]interface{}) if err = json.Unmarshal(bytes, &additionalProperties); err == nil { delete(additionalProperties, "code") delete(additionalProperties, "description") o.AdditionalProperties = additionalProperties } return err } type NullableTransferAuthorizationDecisionRationale struct { value *TransferAuthorizationDecisionRationale isSet bool } func (v NullableTransferAuthorizationDecisionRationale) Get() *TransferAuthorizationDecisionRationale { return v.value } func (v *NullableTransferAuthorizationDecisionRationale) Set(val *TransferAuthorizationDecisionRationale) { v.value = val v.isSet = true } func (v NullableTransferAuthorizationDecisionRationale) IsSet() bool { return v.isSet } func (v *NullableTransferAuthorizationDecisionRationale) Unset() { v.value = nil v.isSet = false } func NewNullableTransferAuthorizationDecisionRationale(val *TransferAuthorizationDecisionRationale) *NullableTransferAuthorizationDecisionRationale { return &NullableTransferAuthorizationDecisionRationale{value: val, isSet: true} } func (v NullableTransferAuthorizationDecisionRationale) MarshalJSON() ([]byte, error) { return json.Marshal(v.value) } func (v *NullableTransferAuthorizationDecisionRationale) UnmarshalJSON(src []byte) error { v.isSet = true return json.Unmarshal(src, &v.value) }
plaid/model_transfer_authorization_decision_rationale.go
0.810929
0.675934
model_transfer_authorization_decision_rationale.go
starcoder
package orderedcodec import ( "bytes" "errors" "math" "github.com/matrixorigin/matrixone/pkg/container/types" ) var ( errorDoNotComeHere = errors.New("do not come here") ) const ( //Actually,the 0x00 represents the NULL in encoded bytes. //So, 0x00 escaped to {0x00 0xff} byteToBeEscaped byte = 0x00 byteEscapedToFirstByte byte = 0x00 byteEscapedToSecondByte byte = 0xFF //suffix for denoting the end of the bytes byteForBytesEnding byte = 0x01 ) // EncodeKey encodes the value into the ordered bytes func (oe *OrderedEncoder) EncodeKey(data []byte, value interface{}) ([]byte, *EncodedItem) { if value == nil { return oe.EncodeNull(data) } switch v := value.(type) { case bool: return oe.EncodeBool(data, v) case int8: return oe.EncodeInt8(data, v) case int16: return oe.EncodeInt16(data, v) case int32: return oe.EncodeInt32(data, v) case int64: return oe.EncodeInt64(data, v) case types.Date: return oe.EncodeDate(data, v) case types.Datetime: return oe.EncodeDatetime(data, v) case uint8: return oe.EncodeUint8(data, v) case uint16: return oe.EncodeUint16(data, v) case uint32: return oe.EncodeUint32(data, v) case uint64: return oe.EncodeUint64(data, v) case float32: return oe.EncodeFloat32(data, v) case float64: return oe.EncodeFloat64(data, v) case []byte: return oe.EncodeBytes(data, v) case string: return oe.EncodeString(data, v) default: panic(errorDoNotComeHere) } } // EncodeNull encodes the NULL and appends the result to the buffer func (oe *OrderedEncoder) EncodeNull(data []byte) ([]byte, *EncodedItem) { return append(data, nullEncoding), nil } func (oe *OrderedEncoder) EncodeBool(data []byte, value bool) ([]byte, *EncodedItem) { if value { return oe.EncodeUint64(data, 1) } return oe.EncodeUint64(data, 0) } func (oe *OrderedEncoder) EncodeInt8(data []byte, value int8) ([]byte, *EncodedItem) { return oe.EncodeInt64(data, int64(value)) } func (oe *OrderedEncoder) EncodeInt16(data []byte, value int16) ([]byte, *EncodedItem) { return oe.EncodeInt64(data, int64(value)) } func (oe *OrderedEncoder) EncodeInt32(data []byte, value int32) ([]byte, *EncodedItem) { return oe.EncodeInt64(data, int64(value)) } func (oe *OrderedEncoder) EncodeInt64(data []byte, value int64) ([]byte, *EncodedItem) { if value < 0 { if value >= -0xff { return append(data, encodingPrefixForIntegerMinimum+7, byte(value)), nil } else if value >= -0xffff { return append(data, encodingPrefixForIntegerMinimum+6, byte(value>>8), byte(value)), nil } else if value >= -0xffffff { return append(data, encodingPrefixForIntegerMinimum+5, byte(value>>16), byte(value>>8), byte(value)), nil } else if value >= -0xffffffff { return append(data, encodingPrefixForIntegerMinimum+4, byte(value>>24), byte(value>>16), byte(value>>8), byte(value)), nil } else if value >= -0xffffffffff { return append(data, encodingPrefixForIntegerMinimum+3, byte(value>>32), byte(value>>24), byte(value>>16), byte(value>>8), byte(value)), nil } else if value >= -0xffffffffffff { return append(data, encodingPrefixForIntegerMinimum+2, byte(value>>40), byte(value>>32), byte(value>>24), byte(value>>16), byte(value>>8), byte(value)), nil } else if value >= -0xffffffffffffff { return append(data, encodingPrefixForIntegerMinimum+1, byte(value>>48), byte(value>>40), byte(value>>32), byte(value>>24), byte(value>>16), byte(value>>8), byte(value)), nil } else { return append(data, encodingPrefixForIntegerMinimum, byte(value>>56), byte(value>>48), byte(value>>40), byte(value>>32), byte(value>>24), byte(value>>16), byte(value>>8), byte(value)), nil } } return oe.EncodeUint64(data, uint64(value)) } func (oe *OrderedEncoder) EncodeDate(data []byte, value types.Date) ([]byte, *EncodedItem) { return oe.EncodeInt32(data, int32(value)) } func (oe *OrderedEncoder) EncodeDatetime(data []byte, value types.Datetime) ([]byte, *EncodedItem) { return oe.EncodeInt64(data, int64(value)) } func (oe *OrderedEncoder) EncodeUint8(data []byte, value uint8) ([]byte, *EncodedItem) { return oe.EncodeUint64(data, uint64(value)) } func (oe *OrderedEncoder) EncodeUint16(data []byte, value uint16) ([]byte, *EncodedItem) { return oe.EncodeUint64(data, uint64(value)) } func (oe *OrderedEncoder) EncodeUint32(data []byte, value uint32) ([]byte, *EncodedItem) { return oe.EncodeUint64(data, uint64(value)) } // EncodeUint64 encodes the uint64 into ordered bytes with uvarint encoding // and appends them to the buffer. // The variable length is encoded into the first byte. func (oe *OrderedEncoder) EncodeUint64(data []byte, value uint64) ([]byte, *EncodedItem) { if value <= encodingPrefixForSplit { return append(data, encodingPrefixForIntegerZero+byte(value)), nil } else if value <= 0xff { return append(data, encodingPrefixForIntMax-7, byte(value)), nil } else if value <= 0xffff { return append(data, encodingPrefixForIntMax-6, byte(value>>8), byte(value)), nil } else if value <= 0xffffff { return append(data, encodingPrefixForIntMax-5, byte(value>>16), byte(value>>8), byte(value)), nil } else if value <= 0xffffffff { return append(data, encodingPrefixForIntMax-4, byte(value>>24), byte(value>>16), byte(value>>8), byte(value)), nil } else if value <= 0xffffffffff { return append(data, encodingPrefixForIntMax-3, byte(value>>32), byte(value>>24), byte(value>>16), byte(value>>8), byte(value)), nil } else if value <= 0xffffffffffff { return append(data, encodingPrefixForIntMax-2, byte(value>>40), byte(value>>32), byte(value>>24), byte(value>>16), byte(value>>8), byte(value)), nil } else if value <= 0xffffffffffffff { return append(data, encodingPrefixForIntMax-1, byte(value>>48), byte(value>>40), byte(value>>32), byte(value>>24), byte(value>>16), byte(value>>8), byte(value)), nil } else { return append(data, encodingPrefixForIntMax, byte(value>>56), byte(value>>48), byte(value>>40), byte(value>>32), byte(value>>24), byte(value>>16), byte(value>>8), byte(value)), nil } } func (oe *OrderedEncoder) EncodeUint64ForFloat(data []byte, value uint64) ([]byte, *EncodedItem) { return append(data, byte(value>>56), byte(value>>48), byte(value>>40), byte(value>>32), byte(value>>24), byte(value>>16), byte(value>>8), byte(value)), nil } func (oe *OrderedEncoder) EncodeFloat32(data []byte, value float32) ([]byte, *EncodedItem) { return oe.EncodeFloat64(data, float64(value)) } func (oe *OrderedEncoder) EncodeFloat64(data []byte, value float64) ([]byte, *EncodedItem) { if math.IsNaN(value) { return append(data, encodingfloatNaN), nil } else if value == 0 { return append(data, encodingfloatZero), nil } x := math.Float64bits(value) if x&(1<<63) != 0 { x = ^x data = append(data, encodingfloatNeg) } else { data = append(data, encodingfloatPos) } return oe.EncodeUint64ForFloat(data, x) } // EncodeBytes encodes the bytes with escaping and appends them to the buffer. func (oe *OrderedEncoder) EncodeBytes(data []byte, value []byte) ([]byte, *EncodedItem) { data = append(data, encodingPrefixForBytes) return oe.encodeBytesWithSuffix(data, value, byteForBytesEnding) } //encodeBytes encodes the bytes with escaping and suffix byte. //The encoded bytes is appened with the suffix slice {0x00, suffix}. func (oe *OrderedEncoder) encodeBytesWithSuffix(data []byte, value []byte, suffix byte) ([]byte, *EncodedItem) { data, _ = oe.encodeBytes(data, value) return append(data, byteToBeEscaped, suffix), nil } //encodeBytes encodes the bytes with escaping func (oe *OrderedEncoder) encodeBytes(data []byte, value []byte) ([]byte, *EncodedItem) { for { p := bytes.IndexByte(value, byteToBeEscaped) if p == -1 { break } //bytes before the escaped byte data = append(data, value[:p]...) data = append(data, byteEscapedToFirstByte, byteEscapedToSecondByte) //rest bytes value = value[p+1:] } return append(data, value...), nil } // EncodeString encods the string into bytes with escaping and appends them to the buffer. func (oe *OrderedEncoder) EncodeString(data []byte, value string) ([]byte, *EncodedItem) { return oe.EncodeBytes(data, []byte(value)) } func NewOrderedEncoder() *OrderedEncoder { return &OrderedEncoder{} }
pkg/vm/engine/tpe/orderedcodec/encoder.go
0.595963
0.516717
encoder.go
starcoder
package main import ( "aoc2021/utils" "fmt" "sort" ) type Point struct { row int col int } // recursively check the current point and adjacent neighbors, summing the larger points (up until 9) func traverse(x, y int, grid [][]int, visitedPoints utils.Set[Point]) int { if (visitedPoints.Contains(Point{x, y})) { // duplicate! return -1 to offset the +1 that the recursive call adds return -1 } // add this point to the visited set visitedPoints.Add(Point{x, y}) sum := 0 if x != 0 && grid[x-1][y] != 9 && grid[x][y] < grid[x-1][y] { sum += traverse(x-1, y, grid, visitedPoints) + 1 } if y != 0 && grid[x][y-1] != 9 && grid[x][y] < grid[x][y-1] { sum += traverse(x, y-1, grid, visitedPoints) + 1 } if x != len(grid)-1 && grid[x+1][y] != 9 && grid[x][y] < grid[x+1][y] { sum += traverse(x+1, y, grid, visitedPoints) + 1 } if y != len(grid[x])-1 && grid[x][y+1] != 9 && grid[x][y] < grid[x][y+1] { sum += traverse(x, y+1, grid, visitedPoints) + 1 } return sum } func main() { lines := utils.ReadFile("input.txt") grid := make([][]int, 0) for _, line := range lines { inner := make([]int, 0) for _, num := range line { inner = append(inner, int(num-'0')) } grid = append(grid, inner) } // part 1 lowRiskSum := 0 for i := range grid { for j, num := range grid[i] { // each level of the if statement checks if there is a valid point next to the current one and then if we have a larger value (if it's smaller, than this point can't be a lowest point!) if i == 0 || grid[i-1][j] > num { if j == 0 || grid[i][j-1] > num { if i == len(grid)-1 || grid[i+1][j] > num { if j == len(grid[i])-1 || grid[i][j+1] > num { lowRiskSum += num + 1 } } } } } } fmt.Println(lowRiskSum) // part2 lowestPoints := make([]Point, 0) for i := range grid { for j, num := range grid[i] { // each level of the if statement checks if there is a valid point next to the current one and then if we have a larger value (if it's smaller, than this point can't be a lowest point!) if i == 0 || grid[i-1][j] > num { if j == 0 || grid[i][j-1] > num { if i == len(grid)-1 || grid[i+1][j] > num { if j == len(grid[i])-1 || grid[i][j+1] > num { lowestPoints = append(lowestPoints, Point{i, j}) } } } } } } basinSizes := make([]int, 0) for _, point := range lowestPoints { fmt.Println(point) basinSizes = append(basinSizes, traverse(point.row, point.col, grid, make(utils.Set[Point], 0))+1) } sort.Sort(sort.Reverse(sort.IntSlice(basinSizes))) fmt.Println(basinSizes[0] * basinSizes[1] * basinSizes[2]) }
daphillips/09/day9.go
0.581303
0.534127
day9.go
starcoder
package mat import ( "gonum.org/v1/gonum/blas" "gonum.org/v1/gonum/blas/blas64" ) var ( diagDense *DiagDense _ Matrix = diagDense _ Diagonal = diagDense _ MutableDiagonal = diagDense _ Triangular = diagDense _ Symmetric = diagDense _ Banded = diagDense _ RawBander = diagDense _ RawSymBander = diagDense ) // Diagonal represents a diagonal matrix, that is a square matrix that only // has non-zero terms on the diagonal. type Diagonal interface { Matrix // Diag returns the number of rows/columns in the matrix Diag() int } // MutableDiagonal is a Diagonal matrix whose elements can be set. type MutableDiagonal interface { Diagonal SetDiag(i int, v float64) } // DiagDense represents a diagonal matrix in dense storage format. type DiagDense struct { data []float64 } // NewDiagonal creates a new Diagonal matrix with n rows and n columns. // The length of data must be n or data must be nil, otherwise NewDiagonal // will panic. func NewDiagonal(n int, data []float64) *DiagDense { if n < 0 { panic("mat: negative dimension") } if data == nil { data = make([]float64, n) } if len(data) != n { panic(ErrShape) } return &DiagDense{ data: data, } } // Diag returns the dimension of the receiver. func (d *DiagDense) Diag() int { return len(d.data) } // Dims returns the dimensions of the matrix. func (d *DiagDense) Dims() (r, c int) { return len(d.data), len(d.data) } // T returns the transpose of the matrix. func (d *DiagDense) T() Matrix { return d } // TTri returns the transpose of the matrix. Note that Diagonal matrices are // Upper by default func (d *DiagDense) TTri() Triangular { return TransposeTri{d} } func (d *DiagDense) TBand() Banded { return TransposeBand{d} } func (d *DiagDense) Bandwidth() (kl, ku int) { return 0, 0 } // Symmetric implements the Symmetric interface. func (d *DiagDense) Symmetric() int { return len(d.data) } // Triangle implements the Triangular interface. func (d *DiagDense) Triangle() (int, TriKind) { return len(d.data), Upper } func (d *DiagDense) RawBand() blas64.Band { return blas64.Band{ Rows: len(d.data), Cols: len(d.data), KL: 0, KU: 0, Stride: 1, Data: d.data, } } func (d *DiagDense) RawSymBand() blas64.SymmetricBand { return blas64.SymmetricBand{ N: len(d.data), K: 0, Stride: 1, Uplo: blas.Upper, Data: d.data, } }
vendor/gonum.org/v1/gonum/mat/diagonal.go
0.829665
0.515864
diagonal.go
starcoder
package missing_cloud_hardening import ( "github.com/threagile/threagile/model" "sort" ) func Category() model.RiskCategory { return model.RiskCategory{ Id: "missing-cloud-hardening", Title: "Missing Cloud Hardening", Description: "Cloud components should be hardened according to the cloud vendor best practices. This affects their " + "configuration, auditing, and further areas.", Impact: "If this risk is unmitigated, attackers might access cloud components in an unintended way.", ASVS: "V1 - Architecture, Design and Threat Modeling Requirements", CheatSheet: "https://cheatsheetseries.owasp.org/cheatsheets/Attack_Surface_Analysis_Cheat_Sheet.html", Action: "Cloud Hardening", Mitigation: "Apply hardening of all cloud components and services, taking special care to follow the individual risk descriptions (which " + "depend on the cloud provider tags in the model). " + "<br><br>For <b>Amazon Web Services (AWS)</b>: Follow the <i>CIS Benchmark for Amazon Web Services</i> (see also the automated checks of cloud audit tools like <i>\"PacBot\", \"CloudSploit\", \"CloudMapper\", \"ScoutSuite\", or \"Prowler AWS CIS Benchmark Tool\"</i>). " + "<br>For EC2 and other servers running Amazon Linux, follow the <i>CIS Benchmark for Amazon Linux</i> and switch to IMDSv2. " + "<br>For S3 buckets follow the <i>Security Best Practices for Amazon S3</i> at <a href=\"https://docs.aws.amazon.com/AmazonS3/latest/dev/security-best-practices.html\">https://docs.aws.amazon.com/AmazonS3/latest/dev/security-best-practices.html</a> to avoid accidental leakage. " + "<br>Also take a look at some of these tools: <a href=\"https://github.com/toniblyx/my-arsenal-of-aws-security-tools\">https://github.com/toniblyx/my-arsenal-of-aws-security-tools</a> " + "<br><br>For <b>Microsoft Azure</b>: Follow the <i>CIS Benchmark for Microsoft Azure</i> (see also the automated checks of cloud audit tools like <i>\"CloudSploit\" or \"ScoutSuite\"</i>)." + "<br><br>For <b>Google Cloud Platform</b>: Follow the <i>CIS Benchmark for Google Cloud Computing Platform</i> (see also the automated checks of cloud audit tools like <i>\"CloudSploit\" or \"ScoutSuite\"</i>). " + "<br><br>For <b>Oracle Cloud Platform</b>: Follow the hardening best practices (see also the automated checks of cloud audit tools like <i>\"CloudSploit\"</i>).", Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?", Function: model.Operations, STRIDE: model.Tampering, DetectionLogic: "In-scope cloud components (either residing in cloud trust boundaries or more specifically tagged with cloud provider types).", RiskAssessment: "The risk rating depends on the sensitivity of the technical asset itself and of the data assets processed and stored.", FalsePositives: "Cloud components not running parts of the target architecture can be considered " + "as false positives after individual review.", ModelFailurePossibleReason: false, CWE: 1008, } } var specificSubtagsAWS = []string{"aws:vpc", "aws:ec2", "aws:s3", "aws:ebs", "aws:apigateway", "aws:lambda", "aws:dynamodb", "aws:rds", "aws:sqs", "aws:iam"} func SupportedTags() []string { res := []string{ "aws", // Amazon AWS "azure", // Microsoft Azure "gcp", // Google Cloud Platform "ocp", // Oracle Cloud Platform } res = append(res, specificSubtagsAWS...) return res } func GenerateRisks() []model.Risk { risks := make([]model.Risk, 0) sharedRuntimesWithUnspecificCloudRisks := make(map[string]bool, 0) trustBoundariesWithUnspecificCloudRisks := make(map[string]bool, 0) techAssetsWithUnspecificCloudRisks := make(map[string]bool, 0) sharedRuntimeIDsAWS := make(map[string]bool, 0) trustBoundaryIDsAWS := make(map[string]bool, 0) techAssetIDsAWS := make(map[string]bool, 0) sharedRuntimeIDsAzure := make(map[string]bool, 0) trustBoundaryIDsAzure := make(map[string]bool, 0) techAssetIDsAzure := make(map[string]bool, 0) sharedRuntimeIDsGCP := make(map[string]bool, 0) trustBoundaryIDsGCP := make(map[string]bool, 0) techAssetIDsGCP := make(map[string]bool, 0) sharedRuntimeIDsOCP := make(map[string]bool, 0) trustBoundaryIDsOCP := make(map[string]bool, 0) techAssetIDsOCP := make(map[string]bool, 0) techAssetIDsWithSubtagSpecificCloudRisks := make(map[string]bool, 0) for _, trustBoundary := range model.ParsedModelRoot.TrustBoundaries { taggedOuterTB := trustBoundary.IsTaggedWithAny(SupportedTags()...) // false = generic cloud risks only // true = cloud-individual risks if taggedOuterTB || trustBoundary.Type.IsWithinCloud() { addTrustBoundaryAccordingToBasetag(trustBoundary, trustBoundariesWithUnspecificCloudRisks, trustBoundaryIDsAWS, trustBoundaryIDsAzure, trustBoundaryIDsGCP, trustBoundaryIDsOCP) for _, techAssetID := range trustBoundary.RecursivelyAllTechnicalAssetIDsInside() { added := false tA := model.ParsedModelRoot.TechnicalAssets[techAssetID] if tA.IsTaggedWithAny(SupportedTags()...) { addAccordingToBasetag(tA, tA.Tags, techAssetIDsWithSubtagSpecificCloudRisks, techAssetIDsAWS, techAssetIDsAzure, techAssetIDsGCP, techAssetIDsOCP) added = true } else if taggedOuterTB { addAccordingToBasetag(tA, trustBoundary.Tags, techAssetIDsWithSubtagSpecificCloudRisks, techAssetIDsAWS, techAssetIDsAzure, techAssetIDsGCP, techAssetIDsOCP) added = true } if !added { techAssetsWithUnspecificCloudRisks[techAssetID] = true } } } } // now loop over all technical assets, trust boundaries, and shared runtimes model-wide by tag for _, tA := range model.TechnicalAssetsTaggedWithAny(SupportedTags()...) { addAccordingToBasetag(tA, tA.Tags, techAssetIDsWithSubtagSpecificCloudRisks, techAssetIDsAWS, techAssetIDsAzure, techAssetIDsGCP, techAssetIDsOCP) } for _, tB := range model.TrustBoundariesTaggedWithAny(SupportedTags()...) { for _, candidateID := range tB.RecursivelyAllTechnicalAssetIDsInside() { tA := model.ParsedModelRoot.TechnicalAssets[candidateID] if tA.IsTaggedWithAny(SupportedTags()...) { addAccordingToBasetag(tA, tA.Tags, techAssetIDsWithSubtagSpecificCloudRisks, techAssetIDsAWS, techAssetIDsAzure, techAssetIDsGCP, techAssetIDsOCP) } else { addAccordingToBasetag(tA, tB.Tags, techAssetIDsWithSubtagSpecificCloudRisks, techAssetIDsAWS, techAssetIDsAzure, techAssetIDsGCP, techAssetIDsOCP) } } } for _, sR := range model.SharedRuntimesTaggedWithAny(SupportedTags()...) { addSharedRuntimeAccordingToBasetag(sR, sharedRuntimesWithUnspecificCloudRisks, sharedRuntimeIDsAWS, sharedRuntimeIDsAzure, sharedRuntimeIDsGCP, sharedRuntimeIDsOCP) for _, candidateID := range sR.TechnicalAssetsRunning { tA := model.ParsedModelRoot.TechnicalAssets[candidateID] addAccordingToBasetag(tA, sR.Tags, techAssetIDsWithSubtagSpecificCloudRisks, techAssetIDsAWS, techAssetIDsAzure, techAssetIDsGCP, techAssetIDsOCP) } } // remove from sharedRuntimesWithUnspecificCloudRisks all specific tagged assets for id := range sharedRuntimeIDsAWS { delete(sharedRuntimesWithUnspecificCloudRisks, id) } for id := range sharedRuntimeIDsAzure { delete(sharedRuntimesWithUnspecificCloudRisks, id) } for id := range sharedRuntimeIDsGCP { delete(sharedRuntimesWithUnspecificCloudRisks, id) } for id := range sharedRuntimeIDsOCP { delete(sharedRuntimesWithUnspecificCloudRisks, id) } // remove from trustBoundariesWithUnspecificCloudRisks all specific tagged assets for id := range trustBoundaryIDsAWS { delete(trustBoundariesWithUnspecificCloudRisks, id) } for id := range trustBoundaryIDsAzure { delete(trustBoundariesWithUnspecificCloudRisks, id) } for id := range trustBoundaryIDsGCP { delete(trustBoundariesWithUnspecificCloudRisks, id) } for id := range trustBoundaryIDsOCP { delete(trustBoundariesWithUnspecificCloudRisks, id) } // remove from techAssetsWithUnspecificCloudRisks all specific tagged assets for techAssetID := range techAssetIDsWithSubtagSpecificCloudRisks { delete(techAssetsWithUnspecificCloudRisks, techAssetID) } for techAssetID := range techAssetIDsAWS { delete(techAssetsWithUnspecificCloudRisks, techAssetID) } for techAssetID := range techAssetIDsAzure { delete(techAssetsWithUnspecificCloudRisks, techAssetID) } for techAssetID := range techAssetIDsGCP { delete(techAssetsWithUnspecificCloudRisks, techAssetID) } for techAssetID := range techAssetIDsOCP { delete(techAssetsWithUnspecificCloudRisks, techAssetID) } // NOW ACTUALLY CREATE THE RISKS addedAWS, addedAzure, addedGCP, addedOCP := false, false, false, false // first try to add shared runtimes... for id := range sharedRuntimeIDsAWS { risks = append(risks, createRiskForSharedRuntime(model.ParsedModelRoot.SharedRuntimes[id], "AWS", "CIS Benchmark for AWS")) addedAWS = true } for id := range sharedRuntimeIDsAzure { risks = append(risks, createRiskForSharedRuntime(model.ParsedModelRoot.SharedRuntimes[id], "Azure", "CIS Benchmark for Microsoft Azure")) addedAzure = true } for id := range sharedRuntimeIDsGCP { risks = append(risks, createRiskForSharedRuntime(model.ParsedModelRoot.SharedRuntimes[id], "GCP", "CIS Benchmark for Google Cloud Computing Platform")) addedGCP = true } for id := range sharedRuntimeIDsOCP { risks = append(risks, createRiskForSharedRuntime(model.ParsedModelRoot.SharedRuntimes[id], "OCP", "Vendor Best Practices for Oracle Cloud Platform")) addedOCP = true } for id := range sharedRuntimesWithUnspecificCloudRisks { risks = append(risks, createRiskForSharedRuntime(model.ParsedModelRoot.SharedRuntimes[id], "", "")) } // ... followed by trust boundaries for the generic risks for id := range trustBoundaryIDsAWS { risks = append(risks, createRiskForTrustBoundary(model.ParsedModelRoot.TrustBoundaries[id], "AWS", "CIS Benchmark for AWS")) addedAWS = true } for id := range trustBoundaryIDsAzure { risks = append(risks, createRiskForTrustBoundary(model.ParsedModelRoot.TrustBoundaries[id], "Azure", "CIS Benchmark for Microsoft Azure")) addedAzure = true } for id := range trustBoundaryIDsGCP { risks = append(risks, createRiskForTrustBoundary(model.ParsedModelRoot.TrustBoundaries[id], "GCP", "CIS Benchmark for Google Cloud Computing Platform")) addedGCP = true } for id := range trustBoundaryIDsOCP { risks = append(risks, createRiskForTrustBoundary(model.ParsedModelRoot.TrustBoundaries[id], "OCP", "Vendor Best Practices for Oracle Cloud Platform")) addedOCP = true } for id := range trustBoundariesWithUnspecificCloudRisks { risks = append(risks, createRiskForTrustBoundary(model.ParsedModelRoot.TrustBoundaries[id], "", "")) } // just use the most sensitive asset as an example - to only create one general "AWS cloud hardening" risk, not many if !addedAWS { mostRelevantAsset := findMostSensitiveTechnicalAsset(techAssetIDsAWS) if !mostRelevantAsset.IsZero() { risks = append(risks, createRiskForTechnicalAsset(mostRelevantAsset, "AWS", "CIS Benchmark for AWS")) addedAWS = true } } // just use the most sensitive asset as an example - to only create one general "Azure cloud hardening" risk, not many if !addedAzure { mostRelevantAsset := findMostSensitiveTechnicalAsset(techAssetIDsAzure) if !mostRelevantAsset.IsZero() { risks = append(risks, createRiskForTechnicalAsset(mostRelevantAsset, "Azure", "CIS Benchmark for Microsoft Azure")) addedAzure = true } } // just use the most sensitive asset as an example - to only create one general "GCP cloud hardening" risk, not many if !addedGCP { mostRelevantAsset := findMostSensitiveTechnicalAsset(techAssetIDsGCP) if !mostRelevantAsset.IsZero() { risks = append(risks, createRiskForTechnicalAsset(mostRelevantAsset, "GCP", "CIS Benchmark for Google Cloud Computing Platform")) addedGCP = true } } // just use the most sensitive asset as an example - to only create one general "GCP cloud hardening" risk, not many if !addedOCP { mostRelevantAsset := findMostSensitiveTechnicalAsset(techAssetIDsOCP) if !mostRelevantAsset.IsZero() { risks = append(risks, createRiskForTechnicalAsset(mostRelevantAsset, "OCP", "Vendor Best Practices for Oracle Cloud Platform")) addedOCP = true } } // now also add all tech asset specific tag-specific risks, as they are specific to the asset anyway (therefore don't set added to true here) for id := range techAssetIDsWithSubtagSpecificCloudRisks { tA := model.ParsedModelRoot.TechnicalAssets[id] if tA.IsTaggedWithAnyTraversingUp("aws:ec2") { risks = append(risks, createRiskForTechnicalAsset(tA, "EC2", "CIS Benchmark for Amazon Linux")) } if tA.IsTaggedWithAnyTraversingUp("aws:s3") { risks = append(risks, createRiskForTechnicalAsset(tA, "S3", "Security Best Practices for AWS S3")) } // TODO add more tag-specific risks like also for aws:lambda etc. here } return risks } func addTrustBoundaryAccordingToBasetag(trustBoundary model.TrustBoundary, trustBoundariesWithUnspecificCloudRisks map[string]bool, trustBoundaryIDsAWS map[string]bool, trustBoundaryIDsAzure map[string]bool, trustBoundaryIDsGCP map[string]bool, trustBoundaryIDsOCP map[string]bool) { if trustBoundary.IsTaggedWithAny(SupportedTags()...) { if trustBoundary.IsTaggedWithBaseTag("aws") { trustBoundaryIDsAWS[trustBoundary.Id] = true } if trustBoundary.IsTaggedWithBaseTag("azure") { trustBoundaryIDsAzure[trustBoundary.Id] = true } if trustBoundary.IsTaggedWithBaseTag("gcp") { trustBoundaryIDsGCP[trustBoundary.Id] = true } if trustBoundary.IsTaggedWithBaseTag("ocp") { trustBoundaryIDsOCP[trustBoundary.Id] = true } } else { trustBoundariesWithUnspecificCloudRisks[trustBoundary.Id] = true } } func addSharedRuntimeAccordingToBasetag(sharedRuntime model.SharedRuntime, sharedRuntimesWithUnspecificCloudRisks map[string]bool, sharedRuntimeIDsAWS map[string]bool, sharedRuntimeIDsAzure map[string]bool, sharedRuntimeIDsGCP map[string]bool, sharedRuntimeIDsOCP map[string]bool) { if sharedRuntime.IsTaggedWithAny(SupportedTags()...) { if sharedRuntime.IsTaggedWithBaseTag("aws") { sharedRuntimeIDsAWS[sharedRuntime.Id] = true } if sharedRuntime.IsTaggedWithBaseTag("azure") { sharedRuntimeIDsAzure[sharedRuntime.Id] = true } if sharedRuntime.IsTaggedWithBaseTag("gcp") { sharedRuntimeIDsGCP[sharedRuntime.Id] = true } if sharedRuntime.IsTaggedWithBaseTag("ocp") { sharedRuntimeIDsOCP[sharedRuntime.Id] = true } } else { sharedRuntimesWithUnspecificCloudRisks[sharedRuntime.Id] = true } } func addAccordingToBasetag(techAsset model.TechnicalAsset, tags []string, techAssetIDsWithTagSpecificCloudRisks map[string]bool, techAssetIDsAWS map[string]bool, techAssetIDsAzure map[string]bool, techAssetIDsGCP map[string]bool, techAssetIDsOCP map[string]bool) { if techAsset.IsTaggedWithAny(specificSubtagsAWS...) { techAssetIDsWithTagSpecificCloudRisks[techAsset.Id] = true } if model.IsTaggedWithBaseTag(tags, "aws") { techAssetIDsAWS[techAsset.Id] = true } if model.IsTaggedWithBaseTag(tags, "azure") { techAssetIDsAzure[techAsset.Id] = true } if model.IsTaggedWithBaseTag(tags, "gcp") { techAssetIDsGCP[techAsset.Id] = true } if model.IsTaggedWithBaseTag(tags, "ocp") { techAssetIDsOCP[techAsset.Id] = true } } func findMostSensitiveTechnicalAsset(techAssets map[string]bool) model.TechnicalAsset { var mostRelevantAsset model.TechnicalAsset keys := make([]string, 0, len(techAssets)) for k := range techAssets { keys = append(keys, k) } sort.Strings(keys) for _, id := range keys { tA := model.ParsedModelRoot.TechnicalAssets[id] if mostRelevantAsset.IsZero() || tA.HighestSensitivityScore() > mostRelevantAsset.HighestSensitivityScore() { mostRelevantAsset = tA } } return mostRelevantAsset } func createRiskForSharedRuntime(sharedRuntime model.SharedRuntime, prefix, details string) model.Risk { if len(prefix) > 0 { prefix = " (" + prefix + ")" } title := "<b>Missing Cloud Hardening" + prefix + "</b> risk at <b>" + sharedRuntime.Title + "</b>" if len(details) > 0 { title += ": <u>" + details + "</u>" } impact := model.MediumImpact if sharedRuntime.HighestConfidentiality() >= model.Restricted || sharedRuntime.HighestIntegrity() >= model.Critical || sharedRuntime.HighestAvailability() >= model.Critical { impact = model.HighImpact } if sharedRuntime.HighestConfidentiality() == model.Sensitive || sharedRuntime.HighestIntegrity() == model.MissionCritical || sharedRuntime.HighestAvailability() == model.MissionCritical { impact = model.VeryHighImpact } // create risk risk := model.Risk{ Category: Category(), Severity: model.CalculateSeverity(model.Unlikely, impact), ExploitationLikelihood: model.Unlikely, ExploitationImpact: impact, Title: title, MostRelevantSharedRuntimeId: sharedRuntime.Id, DataBreachProbability: model.Probable, DataBreachTechnicalAssetIDs: sharedRuntime.TechnicalAssetsRunning, } risk.SyntheticId = risk.Category.Id + "@" + sharedRuntime.Id return risk } func createRiskForTrustBoundary(trustBoundary model.TrustBoundary, prefix, details string) model.Risk { if len(prefix) > 0 { prefix = " (" + prefix + ")" } title := "<b>Missing Cloud Hardening" + prefix + "</b> risk at <b>" + trustBoundary.Title + "</b>" if len(details) > 0 { title += ": <u>" + details + "</u>" } impact := model.MediumImpact if trustBoundary.HighestConfidentiality() >= model.Restricted || trustBoundary.HighestIntegrity() >= model.Critical || trustBoundary.HighestAvailability() >= model.Critical { impact = model.HighImpact } if trustBoundary.HighestConfidentiality() == model.Sensitive || trustBoundary.HighestIntegrity() == model.MissionCritical || trustBoundary.HighestAvailability() == model.MissionCritical { impact = model.VeryHighImpact } // create risk risk := model.Risk{ Category: Category(), Severity: model.CalculateSeverity(model.Unlikely, impact), ExploitationLikelihood: model.Unlikely, ExploitationImpact: impact, Title: title, MostRelevantTrustBoundaryId: trustBoundary.Id, DataBreachProbability: model.Probable, DataBreachTechnicalAssetIDs: trustBoundary.RecursivelyAllTechnicalAssetIDsInside(), } risk.SyntheticId = risk.Category.Id + "@" + trustBoundary.Id return risk } func createRiskForTechnicalAsset(technicalAsset model.TechnicalAsset, prefix, details string) model.Risk { if len(prefix) > 0 { prefix = " (" + prefix + ")" } title := "<b>Missing Cloud Hardening" + prefix + "</b> risk at <b>" + technicalAsset.Title + "</b>" if len(details) > 0 { title += ": <u>" + details + "</u>" } impact := model.MediumImpact if technicalAsset.HighestConfidentiality() >= model.Restricted || technicalAsset.HighestIntegrity() >= model.Critical || technicalAsset.HighestAvailability() >= model.Critical { impact = model.HighImpact } if technicalAsset.HighestConfidentiality() == model.Sensitive || technicalAsset.HighestIntegrity() == model.MissionCritical || technicalAsset.HighestAvailability() == model.MissionCritical { impact = model.VeryHighImpact } // create risk risk := model.Risk{ Category: Category(), Severity: model.CalculateSeverity(model.Unlikely, impact), ExploitationLikelihood: model.Unlikely, ExploitationImpact: impact, Title: title, MostRelevantTechnicalAssetId: technicalAsset.Id, DataBreachProbability: model.Probable, DataBreachTechnicalAssetIDs: []string{technicalAsset.Id}, } risk.SyntheticId = risk.Category.Id + "@" + technicalAsset.Id return risk }
risks/built-in/missing-cloud-hardening/missing-cloud-hardening-rule.go
0.61682
0.502441
missing-cloud-hardening-rule.go
starcoder
package poly // Intersects detects if a point intersects another polygon func (p Point) Intersects(exterior Polygon, holes []Polygon) bool { return p.Inside(exterior, holes) } // Intersects detects if a polygon intersects another polygon func (shape Polygon) Intersects(exterior Polygon, holes []Polygon) bool { return shape.doesIntersects(false, exterior, holes) } // LineStringIntersects detects if a polygon intersects a linestring func (shape Polygon) LineStringIntersects(exterior Polygon, holes []Polygon) bool { return shape.doesIntersects(true, exterior, holes) } func (shape Polygon) doesIntersects(isLineString bool, exterior Polygon, holes []Polygon) bool { switch len(shape) { case 0: return false case 1: switch len(exterior) { case 0: return false case 1: return shape[0].X == exterior[0].X && shape[0].Y == shape[0].Y default: return shape[0].Inside(exterior, holes) } default: switch len(exterior) { case 0: return false case 1: return exterior[0].Inside(shape, holes) } } if !shape.Rect().IntersectsRect(exterior.Rect()) { return false } for i := 0; i < len(shape); i++ { for j := 0; j < len(exterior); j++ { if lineintersects( shape[i], shape[(i+1)%len(shape)], exterior[j], exterior[(j+1)%len(exterior)], ) { return true } } } for _, hole := range holes { if shape.Inside(hole, nil) { return false } } if shape.Inside(exterior, nil) { return true } if !isLineString { if exterior.Inside(shape, nil) { return true } } return false } func lineintersects( a, b Point, // segment 1 c, d Point, // segment 2 ) bool { // do the bounding boxes intersect? // the following checks without swapping values. if a.Y > b.Y { if c.Y > d.Y { if b.Y > c.Y || a.Y < d.Y { return false } } else { if b.Y > d.Y || a.Y < c.Y { return false } } } else { if c.Y > d.Y { if a.Y > c.Y || b.Y < d.Y { return false } } else { if a.Y > d.Y || b.Y < c.Y { return false } } } if a.X > b.X { if c.X > d.X { if b.X > c.X || a.X < d.X { return false } } else { if b.X > d.X || a.X < c.X { return false } } } else { if c.X > d.X { if a.X > c.X || b.X < d.X { return false } } else { if a.X > d.X || b.X < c.X { return false } } } // the following code is from http://ideone.com/PnPJgb cmpx, cmpy := c.X-a.X, c.Y-a.Y rx, ry := b.X-a.X, b.Y-a.Y cmpxr := cmpx*ry - cmpy*rx if cmpxr == 0 { // Lines are collinear, and so intersect if they have any overlap if !(((c.X-a.X <= 0) != (c.X-b.X <= 0)) || ((c.Y-a.Y <= 0) != (c.Y-b.Y <= 0))) { return false } return true } sx, sy := d.X-c.X, d.Y-c.Y cmpxs := cmpx*sy - cmpy*sx rxs := rx*sy - ry*sx if rxs == 0 { return false // Lines are parallel. } rxsr := 1 / rxs t := cmpxs * rxsr u := cmpxr * rxsr if !((t >= 0) && (t <= 1) && (u >= 0) && (u <= 1)) { return false } return true }
vendor/github.com/tidwall/tile38/geojson/poly/intersects.go
0.781247
0.692444
intersects.go
starcoder
package builders import ( "go/ast" "go/token" ) // BinaryOp returns binary expression with given token. func BinaryOp(x ast.Expr, tok token.Token, y ast.Expr) *ast.BinaryExpr { return &ast.BinaryExpr{ X: x, Op: tok, Y: y, } } // Arithmetic // Add returns x + y expression. func Add(x, y ast.Expr) *ast.BinaryExpr { return BinaryOp(x, token.ADD, y) } // Sub returns x - y expression. func Sub(x, y ast.Expr) *ast.BinaryExpr { return BinaryOp(x, token.SUB, y) } // Mul returns x * y expression. func Mul(x, y ast.Expr) *ast.BinaryExpr { return BinaryOp(x, token.MUL, y) } // Div returns x / y expression. func Div(x, y ast.Expr) *ast.BinaryExpr { return BinaryOp(x, token.QUO, y) } // Rem returns x % y expression. func Rem(x, y ast.Expr) *ast.BinaryExpr { return BinaryOp(x, token.REM, y) } // Compare // Eq returns x == y expression. func Eq(x, y ast.Expr) *ast.BinaryExpr { return BinaryOp(x, token.EQL, y) } // NotEq returns x != y expression. func NotEq(x, y ast.Expr) *ast.BinaryExpr { return BinaryOp(x, token.NEQ, y) } // Greater returns x > y expression. func Greater(x, y ast.Expr) *ast.BinaryExpr { return BinaryOp(x, token.GTR, y) } // GreaterOrEq returns x >= y expression. func GreaterOrEq(x, y ast.Expr) *ast.BinaryExpr { return BinaryOp(x, token.GEQ, y) } // Less returns x < y expression. func Less(x, y ast.Expr) *ast.BinaryExpr { return BinaryOp(x, token.LSS, y) } // LessOrEq returns x <= y expression. func LessOrEq(x, y ast.Expr) *ast.BinaryExpr { return BinaryOp(x, token.LEQ, y) } // Logic // And returns x && y expression. func And(x, y ast.Expr) *ast.BinaryExpr { return BinaryOp(x, token.LAND, y) } // And returns x || y expression. func Or(x, y ast.Expr) *ast.BinaryExpr { return BinaryOp(x, token.LOR, y) } // Bitwise // BAnd returns x & y expression. func BAnd(x, y ast.Expr) *ast.BinaryExpr { return BinaryOp(x, token.AND, y) } // BOr returns x | y expression. func BOr(x, y ast.Expr) *ast.BinaryExpr { return BinaryOp(x, token.OR, y) } // BOr returns x ^ y expression. func Xor(x, y ast.Expr) *ast.BinaryExpr { return BinaryOp(x, token.XOR, y) } // AddNot returns x &^ y expression. func AddNot(x, y ast.Expr) *ast.BinaryExpr { return BinaryOp(x, token.AND_NOT, y) } // ShiftLeft returns x << y expression. func ShiftLeft(x, y ast.Expr) *ast.BinaryExpr { return BinaryOp(x, token.SHL, y) } // ShiftRight returns x >> y expression. func ShiftRight(x, y ast.Expr) *ast.BinaryExpr { return BinaryOp(x, token.SHR, y) } // Channel // Send returns x <- y expression. func Send(x, y ast.Expr) *ast.BinaryExpr { return BinaryOp(x, token.ARROW, y) }
binary_op.go
0.872524
0.716467
binary_op.go
starcoder
package gen type floatConstantValuesSequence struct { v float64 } func NewFloatConstantValuesSequence(v float64) FloatValuesSequence { return &floatConstantValuesSequence{ v: v, } } func (g *floatConstantValuesSequence) Reset() { } func (g *floatConstantValuesSequence) Write(vs []float64) { for i := 0; i < len(vs); i++ { vs[i] = g.v } } type integerConstantValuesSequence struct { v int64 } func NewIntegerConstantValuesSequence(v int64) IntegerValuesSequence { return &integerConstantValuesSequence{ v: v, } } func (g *integerConstantValuesSequence) Reset() { } func (g *integerConstantValuesSequence) Write(vs []int64) { for i := 0; i < len(vs); i++ { vs[i] = g.v } } type unsignedConstantValuesSequence struct { v uint64 } func NewUnsignedConstantValuesSequence(v uint64) UnsignedValuesSequence { return &unsignedConstantValuesSequence{ v: v, } } func (g *unsignedConstantValuesSequence) Reset() { } func (g *unsignedConstantValuesSequence) Write(vs []uint64) { for i := 0; i < len(vs); i++ { vs[i] = g.v } } type stringConstantValuesSequence struct { v string } func NewStringConstantValuesSequence(v string) StringValuesSequence { return &stringConstantValuesSequence{ v: v, } } func (g *stringConstantValuesSequence) Reset() { } func (g *stringConstantValuesSequence) Write(vs []string) { for i := 0; i < len(vs); i++ { vs[i] = g.v } } type booleanConstantValuesSequence struct { v bool } func NewBooleanConstantValuesSequence(v bool) BooleanValuesSequence { return &booleanConstantValuesSequence{ v: v, } } func (g *booleanConstantValuesSequence) Reset() { } func (g *booleanConstantValuesSequence) Write(vs []bool) { for i := 0; i < len(vs); i++ { vs[i] = g.v } } type floatArrayValuesSequence struct { v []float64 vi int } func NewFloatArrayValuesSequence(v []float64) FloatValuesSequence { return &floatArrayValuesSequence{ v: v, } } func (g *floatArrayValuesSequence) Reset() { g.vi = 0 } func (g *floatArrayValuesSequence) Write(vs []float64) { var ( v = g.v vi = g.vi ) for i := 0; i < len(vs); i++ { if vi >= len(v) { vi = 0 } vs[i] = v[vi] vi += 1 } g.vi = vi } type integerArrayValuesSequence struct { v []int64 vi int } func NewIntegerArrayValuesSequence(v []int64) IntegerValuesSequence { return &integerArrayValuesSequence{ v: v, } } func (g *integerArrayValuesSequence) Reset() { g.vi = 0 } func (g *integerArrayValuesSequence) Write(vs []int64) { var ( v = g.v vi = g.vi ) for i := 0; i < len(vs); i++ { if vi >= len(v) { vi = 0 } vs[i] = v[vi] vi += 1 } g.vi = vi } type unsignedArrayValuesSequence struct { v []uint64 vi int } func NewUnsignedArrayValuesSequence(v []uint64) UnsignedValuesSequence { return &unsignedArrayValuesSequence{ v: v, } } func (g *unsignedArrayValuesSequence) Reset() { g.vi = 0 } func (g *unsignedArrayValuesSequence) Write(vs []uint64) { var ( v = g.v vi = g.vi ) for i := 0; i < len(vs); i++ { if vi >= len(v) { vi = 0 } vs[i] = v[vi] vi += 1 } g.vi = vi } type stringArrayValuesSequence struct { v []string vi int } func NewStringArrayValuesSequence(v []string) StringValuesSequence { return &stringArrayValuesSequence{ v: v, } } func (g *stringArrayValuesSequence) Reset() { g.vi = 0 } func (g *stringArrayValuesSequence) Write(vs []string) { var ( v = g.v vi = g.vi ) for i := 0; i < len(vs); i++ { if vi >= len(v) { vi = 0 } vs[i] = v[vi] vi += 1 } g.vi = vi } type booleanArrayValuesSequence struct { v []bool vi int } func NewBooleanArrayValuesSequence(v []bool) BooleanValuesSequence { return &booleanArrayValuesSequence{ v: v, } } func (g *booleanArrayValuesSequence) Reset() { g.vi = 0 } func (g *booleanArrayValuesSequence) Write(vs []bool) { var ( v = g.v vi = g.vi ) for i := 0; i < len(vs); i++ { if vi >= len(v) { vi = 0 } vs[i] = v[vi] vi += 1 } g.vi = vi }
vend/db/pkg/data/gen/values.gen.go
0.667256
0.52074
values.gen.go
starcoder
package easyga import ( "math/rand" "sync" ) // Rand is a pointer to the rand which can be changed outside var Rand *rand.Rand // GeneticAlgorithm is a struct that contains everything of genetic algorithm. type GeneticAlgorithm struct { Parameters GeneticAlgorithmParameters Functions GeneticAlgorithmFunctions Population GeneticAlgorithmPopulation } // Init method will initialize the original population. func (ga *GeneticAlgorithm) Init(customParameters GeneticAlgorithmParameters, customFunctions GeneticAlgorithmFunctions) (err error) { // Check parameters before initialization if err = customParameters.check(); err == nil { // Initialize parameters ga.Parameters = customParameters // Initialize Seed of rand Rand = rand.New(rand.NewSource(ga.Parameters.RandomSeed)) // Initialize functions ga.Functions = customFunctions ga.Functions.Init() // Initialize population ga.Population.Iteration = 0 ga.Population.Init(ga.Parameters.ChromosomeLength, ga.Parameters.PopulationSize, ga.Parameters.GenotypeNumber, ga.Functions.ChromosomeInitFunction) // Update fitness of first generation ga.updateFitness() } return } // Run method will create a loop for find best result. func (ga *GeneticAlgorithm) Run() (best Chromosome, fitness float64, iteration int) { for !ga.checkStop() { // Selection - Select parents from population parentsPair := ga.selection() // Crossover - perform crossover on parents creating population ga.crossover(parentsPair) // Mutation - perform mutation of population ga.mutation() // Update fitness ga.updateFitness() // Update iteration ga.Population.Iteration++ // Export some data for statistic ga.statistic() } bestIndex, bestFitness := ga.Population.FindBest() fitness = bestFitness iteration = ga.Population.Iteration best = ga.Population.Chromosomes[bestIndex] return } func (ga *GeneticAlgorithm) selection() (parentsPair [][2]int) { selectedPopulation := ga.Functions.SelectFunction(ga) selector := 0 for selector < ga.Population.Size { parentIndex1, parentIndex2 := selectedPopulation[selector], selectedPopulation[selector+1] selector += 2 parentsPair = append(parentsPair, [2]int{parentIndex1, parentIndex2}) } return parentsPair } func (ga *GeneticAlgorithm) crossover(parentsPair [][2]int) { var nextPopulation GeneticAlgorithmPopulation // Copy information to next population nextPopulation.Size = ga.Population.Size nextPopulation.Iteration = ga.Population.Iteration for i := 0; i < len(parentsPair); i++ { // Get the indexes of parents parent1 := ga.Population.Chromosomes[parentsPair[i][0]] parent2 := ga.Population.Chromosomes[parentsPair[i][1]] // Initialize children chromosome var child1, child2 *Chromosome // Crossover with probability if Rand.Float64() < ga.Parameters.CrossoverProbability { child1, child2 = ga.Functions.CrossOverFunction(&parent1, &parent2) child1.GenotypeNumber = ga.Parameters.GenotypeNumber child2.GenotypeNumber = ga.Parameters.GenotypeNumber } else { child1, child2 = &parent1, &parent2 } // Add child to the next generation population nextPopulation.Chromosomes = append(nextPopulation.Chromosomes, *child1, *child2) } // Update population ga.Population = nextPopulation } func (ga *GeneticAlgorithm) mutation() { var routineWait sync.WaitGroup for i := 0; i < ga.Population.Size; i++ { // Mutate with probability if Rand.Float64() < ga.Parameters.MutationProbability { if ga.Parameters.UseRoutine { routineWait.Add(1) go func(index int, counter *sync.WaitGroup) { ga.Functions.MutateFunction(&ga.Population.Chromosomes[index]) counter.Done() }(i, &routineWait) } else { ga.Functions.MutateFunction(&ga.Population.Chromosomes[i]) } } } routineWait.Wait() } func (ga *GeneticAlgorithm) updateFitness() { var routineWait sync.WaitGroup for i := 0; i < ga.Population.Size; i++ { if ga.Parameters.UseRoutine { routineWait.Add(1) go func(index int, counter *sync.WaitGroup) { ga.Functions.FitnessFunction(&ga.Population.Chromosomes[index]) counter.Done() }(i, &routineWait) } else { ga.Functions.FitnessFunction(&ga.Population.Chromosomes[i]) } } routineWait.Wait() } func (ga *GeneticAlgorithm) checkStop() bool { return ga.Functions.CheckStopFunction(ga) } func (ga *GeneticAlgorithm) statistic() { ga.Functions.StatisticFunction(ga) }
ga.go
0.66356
0.424472
ga.go
starcoder
package funk import ( "reflect" ) // Reduce takes a collection and reduces it to a single value using a reduction // function (or a valid symbol) and an accumulator value. func Reduce(arr, reduceFunc, acc interface{}) float64 { arrValue := redirectValue(reflect.ValueOf(arr)) if !IsIteratee(arrValue.Interface()) { panic("First parameter must be an iteratee") } returnType := reflect.TypeOf(Reduce).Out(0) isFunc := IsFunc(reduceFunc, []int{2}, []int{1}) isRune := reflect.TypeOf(reduceFunc).Kind() == reflect.Int32 if !(isFunc || isRune) { panic("Second argument must be a valid function or rune") } accValue := reflect.ValueOf(acc) sliceElemType := sliceElem(arrValue.Type()) if isRune { if arrValue.Kind() == reflect.Slice && sliceElemType.Kind() == reflect.Interface { accValue = accValue.Convert(returnType) } else { accValue = accValue.Convert(sliceElemType) } } else { accValue = accValue.Convert(reflect.TypeOf(reduceFunc).In(0)) } accType := accValue.Type() // Generate reduce function if was passed as rune if isRune { reduceSign := reduceFunc.(int32) if ok := map[rune]bool{'+': true, '*': true}[reduceSign]; !ok { panic("Invalid reduce sign, allowed: '+' and '*'") } in := []reflect.Type{accType, sliceElemType} out := []reflect.Type{accType} funcType := reflect.FuncOf(in, out, false) reduceFunc = reflect.MakeFunc(funcType, func(args []reflect.Value) []reflect.Value { acc := args[0].Interface() elem := args[1].Interface() var result float64 params := []interface{}{acc, elem} switch reduceSign { case '+': result = Sum(params) case '*': result = Product(params) } return []reflect.Value{reflect.ValueOf(result).Convert(accType)} }).Interface() } funcValue := reflect.ValueOf(reduceFunc) funcType := funcValue.Type() for i := 0; i < arrValue.Len(); i++ { if accType.ConvertibleTo(funcType.In(0)) { accValue = accValue.Convert(funcType.In(0)) } arrElementValue := arrValue.Index(i) if sliceElemType.ConvertibleTo(funcType.In(1)) { arrElementValue = arrElementValue.Convert(funcType.In(1)) } result := funcValue.Call([]reflect.Value{accValue, arrElementValue}) accValue = result[0] } resultInterface := accValue.Convert(returnType).Interface() return resultInterface.(float64) }
reduce.go
0.675551
0.507263
reduce.go
starcoder
package network import ( "bufio" "encoding/csv" "io" "neuraldeep/utils" "os" "strconv" ) const ( sizeLine = 1 + 784 // label + image pixels ) // LoadData mixes <NAME>'s load_data() and load_data_wrapper() functions into one through the use of the `Input` object. // Another difference is that we aren't actually using slightly different formats for the training data and the validation / test data, // ie. the `Input.Label` field will hold both the digital value of the classification and the 10-dimensional vector. func LoadData() (training Dataset, validation Dataset, test Dataset, err error) { _, err = utils.Unzip("./data/mnist_train.zip", "./data/loaded/") if err != nil { return } trainingFile, err := os.Open("./data/loaded/mnist_train.csv") if err != nil { return } defer trainingFile.Close() r1 := csv.NewReader(bufio.NewReader(trainingFile)) sizeTraining := 50_000 training = make(Dataset, sizeTraining) sizeValidation := 10_000 validation = make(Dataset, sizeValidation) line := 0 for { record, e := r1.Read() if e == io.EOF { break } input, e := readLine(record, 10) if e != nil { err = e return } if line < sizeTraining { // Training data training[line] = &input } else { // Validation data validation[line-sizeTraining] = &input } line++ } // Test data _, err = utils.Unzip("./data/mnist_test.zip", "./data/loaded/") if err != nil { return } testFile, err := os.Open("./data/loaded/mnist_test.csv") if err != nil { return } defer testFile.Close() r2 := csv.NewReader(bufio.NewReader(testFile)) test = make(Dataset, 10_000) line = 0 for { record, e := r2.Read() if e == io.EOF { break } input, e := readLine(record, 10) if e != nil { err = e return } test[line] = &input line++ } return } func readLine(record []string, size int) (input Input, err error) { data := make([]float64, sizeLine) for i := 0; i < sizeLine; i++ { d, e := strconv.ParseFloat(record[i], 64) if e != nil { err = e return } data[i] = d } input = Input{ Data: data[1:], Label: ToLabel(data[0], size), } return }
network/loader.go
0.606032
0.475484
loader.go
starcoder
package main import ( "github.com/go-gl/mathgl/mgl32" "math" ) const ( MouseSensitivity = 0.7 ) type Camera struct { xAngle float32 zAngle float32 cameraPosition mgl32.Vec3 windowHandler *WindowHandler } func NewCamera(windowHandler *WindowHandler) *Camera { return &Camera{ xAngle: float32(0), zAngle: float32(3), cameraPosition: mgl32.Vec3{-50, 256, -50}, windowHandler: windowHandler, } } func (c *Camera) GetViewMatrix() mgl32.Mat4 { matrix := mgl32.Ident4() matrix = matrix.Mul4(mgl32.HomogRotate3DX(c.xAngle - mgl32.DegToRad(90))) matrix = matrix.Mul4(mgl32.HomogRotate3DZ(c.zAngle)) matrix = matrix.Mul4(mgl32.Translate3D(c.cameraPosition.X(), c.cameraPosition.Y(), c.cameraPosition.Z())) return matrix } func (c *Camera) GetPerspectiveMatrix() mgl32.Mat4 { ratio := float64(windowWidth) / float64(windowHeight) return mgl32.Perspective(45.0, float32(ratio), 0.1, 4096.0) } func (c *Camera) UpdateViewMatrix() { // Move the camera around using WASD keys speed := float32(200 * c.windowHandler.getTimeSinceLastFrame()) dir := []float32{0, 0, 0} if c.windowHandler.inputHandler.isActive(PLAYER_FORWARD) { dir[2] += speed } else if c.windowHandler.inputHandler.isActive(PLAYER_BACKWARD) { dir[2] -= speed } else if c.windowHandler.inputHandler.isActive(PLAYER_LEFT) { dir[0] += speed } else if c.windowHandler.inputHandler.isActive(PLAYER_RIGHT) { dir[0] -= speed } cameraMatrix := mgl32.Ident4() cameraMatrix = cameraMatrix.Mul4(mgl32.HomogRotate3DX(c.xAngle - mgl32.DegToRad(90))) cameraMatrix = cameraMatrix.Mul4(mgl32.HomogRotate3DZ(c.zAngle)) cameraMatrix = cameraMatrix.Inv() movementDelta := cameraMatrix.Mul4x1(mgl32.Vec4{dir[0], dir[1], dir[2], 0.0}) c.cameraPosition = c.cameraPosition.Add(mgl32.Vec3{movementDelta.X(), movementDelta.Y(), movementDelta.Z()}) offset := c.windowHandler.inputHandler.getCursorChange() xOffset := float32(offset[0] * MouseSensitivity) yOffset := float32(offset[1] * MouseSensitivity) c.zAngle += xOffset * 0.025 for c.zAngle < 0 { c.zAngle += math.Pi * 2 } for c.zAngle >= math.Pi*2 { c.zAngle -= math.Pi * 2 } c.xAngle += yOffset * 0.025 for c.xAngle < -math.Pi*0.5 { c.xAngle = -math.Pi * 0.5 } for c.xAngle > math.Pi*0.5 { c.xAngle = math.Pi * 0.5 } } func (c *Camera) GetCameraPosition() [3]float32 { position := c.cameraPosition return [3]float32{-position.X(), -position.Y(), -position.Z()} }
src/camera.go
0.730674
0.468
camera.go
starcoder
package table import ( "fmt" "sort" ) type sortDirection int const ( sortDirectionAsc sortDirection = iota sortDirectionDesc ) type sortColumn struct { columnKey string direction sortDirection } // SortByAsc sets the main sorting column to the given key, in ascending order. // If a previous sort was used, it is replaced by the given column each time // this function is called. Values are sorted as numbers if possible, or just // as simple string comparisons if not numbers. func (m Model) SortByAsc(columnKey string) Model { m.sortOrder = []sortColumn{ { columnKey: columnKey, direction: sortDirectionAsc, }, } return m } // SortByDesc sets the main sorting column to the given key, in descending order. // If a previous sort was used, it is replaced by the given column each time // this function is called. Values are sorted as numbers if possible, or just // as simple string comparisons if not numbers. func (m Model) SortByDesc(columnKey string) Model { m.sortOrder = []sortColumn{ { columnKey: columnKey, direction: sortDirectionDesc, }, } return m } // ThenSortByAsc provides a secondary sort after the first, in ascending order. // Can be chained multiple times, applying to smaller subgroups each time. func (m Model) ThenSortByAsc(columnKey string) Model { m.sortOrder = append([]sortColumn{ { columnKey: columnKey, direction: sortDirectionAsc, }, }, m.sortOrder...) return m } // ThenSortByDesc provides a secondary sort after the first, in descending order. // Can be chained multiple times, applying to smaller subgroups each time. func (m Model) ThenSortByDesc(columnKey string) Model { m.sortOrder = append([]sortColumn{ { columnKey: columnKey, direction: sortDirectionDesc, }, }, m.sortOrder...) return m } type sortableTable struct { rows []Row byColumn sortColumn } func (s *sortableTable) Len() int { return len(s.rows) } func (s *sortableTable) Swap(i, j int) { old := s.rows[i] s.rows[i] = s.rows[j] s.rows[j] = old } func (s *sortableTable) extractString(i int, column string) string { iData, exists := s.rows[i].Data[column] if !exists { return "" } switch iData := iData.(type) { case StyledCell: return fmt.Sprintf("%v", iData.Data) case string: return iData default: return fmt.Sprintf("%v", iData) } } func (s *sortableTable) extractNumber(i int, column string) (float64, bool) { iData, exists := s.rows[i].Data[column] if !exists { return 0, false } return asNumber(iData) } func (s *sortableTable) Less(first, second int) bool { firstNum, firstNumIsValid := s.extractNumber(first, s.byColumn.columnKey) secondNum, secondNumIsValid := s.extractNumber(second, s.byColumn.columnKey) if firstNumIsValid && secondNumIsValid { if s.byColumn.direction == sortDirectionAsc { return firstNum < secondNum } return firstNum > secondNum } firstVal := s.extractString(first, s.byColumn.columnKey) secondVal := s.extractString(second, s.byColumn.columnKey) if s.byColumn.direction == sortDirectionAsc { return firstVal < secondVal } return firstVal > secondVal } func getSortedRows(sortOrder []sortColumn, rows []Row) []Row { var sortedRows []Row if len(sortOrder) == 0 { sortedRows = rows return sortedRows } sortedRows = make([]Row, len(rows)) copy(sortedRows, rows) for _, byColumn := range sortOrder { sorted := &sortableTable{ rows: sortedRows, byColumn: byColumn, } sort.Stable(sorted) sortedRows = sorted.rows } return sortedRows }
table/sort.go
0.749729
0.405331
sort.go
starcoder
package main import ( // "fmt" ui "github.com/gizak/termui" "io/ioutil" "strconv" ) // Block type Block struct { TermBlock *ui.Block Color ui.Attribute Breakable int Focused bool } func (b *Block) SetPosition(x, y int) { b.TermBlock.X = x b.TermBlock.Y = y } func (b *Block) Render() { if b.Focused { b.TermBlock.BorderFg = ui.AttrReverse | b.Color } else { b.TermBlock.BorderFg = b.Color } ui.Render(b.TermBlock) } func (b *Block) Get() string { output := strconv.Itoa(b.TermBlock.Y) + " " output += strconv.Itoa(b.TermBlock.X) + " " output += strconv.Itoa(b.TermBlock.Width) + " " output += strconv.Itoa(b.TermBlock.Height) + " " output += strconv.Itoa(int(b.Color) - 1) output += " 1\n" return output } func (b *Block) Intersects(x int, y int) bool { return (y >= b.TermBlock.Y && y < (b.TermBlock.Y+b.TermBlock.Height) && x >= b.TermBlock.X && x < (b.TermBlock.X+b.TermBlock.Width)) } func (b *Block) IntersectsBlock(other Block) bool { aX1 := b.TermBlock.X aX2 := b.TermBlock.X + b.TermBlock.Width aY1 := b.TermBlock.Y aY2 := b.TermBlock.Y + b.TermBlock.Height bX1 := other.TermBlock.X bX2 := other.TermBlock.X + other.TermBlock.Width bY1 := other.TermBlock.Y bY2 := other.TermBlock.Y + other.TermBlock.Height xOverlap := ValueInRange(aX1, bX1, bX2) || ValueInRange(bX1, aX1, aX2) yOverlap := ValueInRange(aY1, bY1, bY2) || ValueInRange(bY1, aY1, aY2) return xOverlap && yOverlap } //~Block func ValueInRange(value, min, max int) bool { return ((value >= min) && (value < max)) } func RenderBlocks(blocks []Block) { for _, block := range blocks { block.Render() } } func DumpToFile(blocks []Block) { var blockAttrs string for _, block := range blocks { blockAttrs += block.Get() } err := ioutil.WriteFile("level1.txt", []byte(blockAttrs), 0644) if err != nil { panic(err) } } //Button type Button struct { Block *ui.Par Color ui.Attribute Active bool } func (b *Button) Toggle() { b.Active = !b.Active } func (b *Button) Render() { if b.Active { b.Block.BorderFg = b.Color } else { b.Block.BorderFg = ui.ColorWhite } ui.Render(b.Block) } func (b *Button) Init(x, y, height, width int, color ui.Attribute, colorName string) { if b.Block == nil { b.Block = ui.NewPar(colorName) b.Block.X = x b.Block.Y = y b.Block.Height = height b.Block.Width = width b.Block.BorderFg = color b.Color = color } } //~ColorButton func RenderButtons(blocks []Button) { for _, block := range blocks { block.Render() } } func ActivateButton(buttons []Button, index int) { for i := 0; i < 7; i++ { if i == index { buttons[i].Active = true } else { buttons[i].Active = false } } } func FindIntersecting(blocks []Block, mouseX, mouseY int) (index int, found bool) { for i, block := range blocks { if block.Intersects(mouseX, mouseY) { return i, true } } return -1, false } func FindIntersectingBlock(blocks []Block, newBlock Block) (index int, found bool) { for i, block := range blocks { if block.IntersectsBlock(newBlock) { return i, true } } return -1, false } func main() { if err := ui.Init(); err != nil { panic(err) } defer ui.Close() workspace := ui.NewPar("") workspace.Height = ui.TermHeight() workspace.BorderLabel = "Workspace" workspace.BorderLabelFg = ui.ColorWhite controls := ui.NewPar("") controls.Height = ui.TermHeight() controls.BorderLabel = "Controls" controls.BorderLabelFg = ui.ColorWhite blocks := make([]Block, 0, 100) buttons := make([]Button, 9, 9) buttons[0].Init(ui.TermWidth()-22, 18, 3, 15, ui.ColorRed, " Red ") buttons[1].Init(ui.TermWidth()-22, 21, 3, 15, ui.ColorGreen, " Green ") buttons[2].Init(ui.TermWidth()-22, 24, 3, 15, ui.ColorYellow, " Yellow ") buttons[3].Init(ui.TermWidth()-22, 27, 3, 15, ui.ColorBlue, " Blue ") buttons[4].Init(ui.TermWidth()-22, 30, 3, 15, ui.ColorMagenta, " Magenta ") buttons[5].Init(ui.TermWidth()-22, 33, 3, 15, ui.ColorCyan, " Cyan ") buttons[6].Init(ui.TermWidth()-22, 36, 3, 15, ui.ColorWhite|ui.AttrBold, " White ") buttons[7].Init(ui.TermWidth()-22, 10, 3, 15, ui.ColorRed, " New Block ") buttons[8].Init(ui.TermWidth()-22, 13, 3, 15, ui.ColorRed, " Move or Del ") buttons[0].Active = true buttons[7].Active = true newMode := true moveMode := false var selectedBlock *Block var selectedIndex int selectedColor := ui.ColorRed ui.Body.AddRows( ui.NewRow( ui.NewCol(9, 0, workspace), ui.NewCol(3, 0, controls))) ui.Body.Align() ui.Render(ui.Body) RenderButtons(buttons) ui.Handle("<MouseLeft>", func(e ui.Event) { mouseX := e.Payload.(ui.Mouse).X mouseY := e.Payload.(ui.Mouse).Y if newMode { newBlock := ui.NewBlock() newBlock.Height = 3 newBlock.Width = 6 newBlock.X = mouseX newBlock.Y = mouseY newBBlock := Block{ newBlock, selectedColor, 1, false, } _, found := FindIntersectingBlock(blocks, newBBlock) if !found { blocks = append(blocks, newBBlock) } } else if moveMode { foundIndex, found := FindIntersecting(blocks, mouseX, mouseY) if found { if selectedBlock != nil { selectedBlock.Focused = false } blocks[foundIndex].Focused = true selectedBlock = &blocks[foundIndex] selectedIndex = foundIndex } else if selectedBlock != nil { selectedBlock.SetPosition(mouseX, mouseY) } } ui.Clear() ui.Render(ui.Body) RenderButtons(buttons) RenderBlocks(blocks) }) ui.Handle("t", func(ui.Event) { newMode = !newMode moveMode = !moveMode buttons[7].Toggle() buttons[8].Toggle() if selectedBlock != nil { selectedBlock.Focused = false selectedBlock = nil selectedIndex = -1 } }) ui.Handle("<Delete>", func(ui.Event) { if selectedIndex >= 0 { blocks = append(blocks[:selectedIndex], blocks[selectedIndex+1:]...) selectedIndex = -1 selectedBlock = nil } }) ui.Handle("<Keyboard>", func(e ui.Event) { i, err := strconv.Atoi(e.ID) if err == nil && i >= 1 && i <= 7 { selectedColor = buttons[i-1].Color ActivateButton(buttons, i-1) if selectedBlock != nil { selectedBlock.Color = buttons[i-1].Color } } ui.Clear() ui.Render(ui.Body) RenderButtons(buttons) RenderBlocks(blocks) }) ui.Handle("q", func(ui.Event) { ui.StopLoop() }) ui.Handle("<Enter>", func(ui.Event) { DumpToFile(blocks) }) ui.Loop() }
main.go
0.602646
0.408454
main.go
starcoder
package primitives import ( "github.com/alexandreLamarre/Golang-Ray-Tracing-Renderer/pkg/algebra" "github.com/alexandreLamarre/Golang-Ray-Tracing-Renderer/pkg/canvas" "math" ) //Sphere Data type for a 3D sphere type Sphere struct { parent Shape origin *algebra.Vector radius float64 transform *algebra.Matrix material *canvas.Material } // NewSphere creates a new Sphere datatype at origin 0,0,0 with unit radius and no ray intersections func NewSphere(m *algebra.Matrix) *Sphere { mat := m if m == nil || len(m.Get()) != 4 || len(m.Get()[0]) != 4 { mat = algebra.IdentityMatrix(4) } return &Sphere{origin: algebra.NewPoint(0, 0, 0), radius: 1.0, transform: mat, material: canvas.NewDefaultMaterial(), parent: nil} } func NewGlassSphere(m *algebra.Matrix, refractiveIndex float64) *Sphere { matrix := m if m == nil || len(m.Get()) != 4 || len(m.Get()[0]) != 4 { matrix = algebra.IdentityMatrix(4) } material := canvas.NewDefaultMaterial() material.Transparency = 1.0 material.RefractiveIndex = refractiveIndex return &Sphere{origin: algebra.NewPoint(0, 0, 0), radius: 1.0, transform: matrix, material: material, parent: nil} } // Sphere interface Shape Methods func (s *Sphere) GetPosition() *algebra.Vector { return s.origin } //SetTransform sets the Sphere's transformation func (s *Sphere) SetTransform(m *algebra.Matrix) { if len(m.Get()) != 4 || len(m.Get()[0]) != 4 { panic(algebra.ExpectedDimension(4)) } s.transform = m } func (s *Sphere) GetTransform() *algebra.Matrix { return s.transform } //SetMaterial sets the Sphere's material func (s *Sphere) SetMaterial(m *canvas.Material) { s.material = m } //GetMaterial returns the spheres Material func (s *Sphere) GetMaterial() *canvas.Material { return s.material } //SetParent Setter for parent shape func (s *Sphere) SetParent(shape Shape) { s.parent = shape } //GetParent Getter for parent shape func (s *Sphere) GetParent() Shape { return s.parent } //GetBounds Getter for default bounds of this Shape func (s *Sphere) GetBounds() (*algebra.Vector, *algebra.Vector) { return algebra.NewPoint(-1, -1, -1), algebra.NewPoint(1, 1, 1) } //NormalAt returns the normal to the sphere at the location "point" func (s *Sphere) LocalNormalAt(point *algebra.Vector, hit *Intersection) (*algebra.Vector, error) { sphereNormal, err := point.Subtract(algebra.NewPoint(0, 0, 0)) return sphereNormal, err } //LocalIntersect returns the intersection of a ray with a sphere func (s *Sphere) LocalIntersect(r *algebra.Ray) ([]*Intersection, bool) { got := r.Get() origin := got["origin"] direction := got["direction"] sphereToRay, err := origin.Subtract(s.GetPosition()) if err != nil { panic(err) } a, err := algebra.DotProduct(direction, direction) if err != nil { panic(err) } if a == 0 { panic(algebra.ZeroDivide(0)) } b, err := algebra.DotProduct(direction, sphereToRay) if err != nil { panic(err) } b = 2 * b c, err := algebra.DotProduct(sphereToRay, sphereToRay) if err != nil { panic(err) } c = c - 1 discriminant := math.Pow(b, 2) - (4 * a * c) if discriminant < 0 { // No rays intersect the sphere return []*Intersection{}, false } t1 := (-b - math.Sqrt(discriminant)) / (2 * a) t2 := (-b + math.Sqrt(discriminant)) / (2 * a) return []*Intersection{NewIntersection(s, t1), NewIntersection(s, t2)}, true }
pkg/geometry/primitives/sphere.go
0.854415
0.528229
sphere.go
starcoder
package sparse import ( "math" "github.com/gonum/floats" "gonum.org/v1/gonum/mat" ) // Cholesky shadows the gonum mat.Cholesly type type Cholesky struct { // internal representation is CSR in lower triangular form chol *CSR // some operations use a columnar version cholc *CSC cond float64 } // Dims of the matrix func (ch *Cholesky) Dims() (r, c int) { return ch.chol.Dims() } // Symmetric of matrix func (ch *Cholesky) Symmetric() int { r, _ := ch.Dims() return r } func min(a, b int) int { if a < b { return a } return b } // At from the matrix func (ch *Cholesky) At(i, j int) float64 { var val float64 ri := ch.chol.RowView(i).(*Vector) rj := ch.chol.RowView(j).(*Vector) // FIXME: check types val = dotSparseSparseNoSortBefore(ri, rj, min(i, j)+1) return val } // T is the same as symmetric func (ch *Cholesky) T() mat.Matrix { return ch } func newCSR(r, c int) *CSR { // FIXME: creating a CSR directly leads to panics coo := NewCOO(r, c, nil, nil, nil) return coo.ToCSR() } func newCSC(r, c int) *CSC { // FIXME: creating a CSC directly leads to panics coo := NewCOO(r, c, nil, nil, nil) return coo.ToCSC() } // Det returns the determinant of the factored matrix func (ch *Cholesky) Det() float64 { return math.Exp(ch.LogDet()) } // LogDet returns ln(determinant) of the factored matrix func (ch *Cholesky) LogDet() float64 { det := 0.0 for i := 0; i < ch.Symmetric(); i++ { det += 2 * math.Log(ch.chol.At(i, i)) } return det } // Factorize a CSR // the CSR must be symmetric positive-definite or this won't work // FIXME: enforce sym positive definite func (ch *Cholesky) Factorize(a *CSR) { r, c := a.Dims() if r != c { panic(mat.ErrShape) } ch.chol = newCSR(r, c) cholCSR(a, ch.chol) } // LTo returns the factored matrix in lower-triangular form as a CSR func (ch *Cholesky) LTo(dst *CSR) { r, c := ch.chol.Dims() rDst, cDst := dst.Dims() if r != rDst || c != cDst { panic(mat.ErrShape) } ch.chol.DoNonZero(func(i, j int, v float64) { dst.Set(i, j, v) }) } func (ch *Cholesky) buildCholC() { if ch.cholc == nil { r := ch.Symmetric() ch.cholc = newCSC(r, r) ch.chol.DoNonZero(func(i, j int, v float64) { ch.cholc.Set(i, j, v) }) } } // SolveVecTo shadows Cholesky.SolveVecTo // dst is Dense as this doesn't make any sense with sparse solutions func (ch *Cholesky) SolveVecTo(dst *mat.VecDense, b mat.Vector) error { r := ch.Symmetric() dstLen := dst.Len() if r != dstLen { panic(mat.ErrShape) } // we are going to need to scan down columns too ch.buildCholC() // textbook setup and approach: // Ax=b // LLtx=b // L is ch.Chol // forward substitute // Ly=b y := mat.NewVecDense(r, nil) for i := 0; i < r; i++ { denom := ch.chol.At(i, i) k := b.AtVec(i) sum := 0.0 ch.chol.DoRowNonZero(i, func(x, z int, v float64) { if z < i { sum += y.AtVec(z) * v } }) y.SetVec(i, (k-sum)/denom) } // backward substitute // Lt x=y for i := r - 1; i >= 0; i-- { denom := ch.chol.At(i, i) k := y.AtVec(i) sum := 0.0 ch.cholc.DoColNonZero(i, func(x, z int, v float64) { if x > i { sum += dst.AtVec(x) * v } }) dst.SetVec(i, (k-sum)/denom) } return nil } // SolveTo goes column-by-column and applies SolveVecTo func (ch *Cholesky) SolveTo(dst *mat.Dense, b mat.Matrix) error { rows, cols := b.Dims() n := ch.Symmetric() if dst.IsEmpty() { dst.ReuseAs(n, cols) } bv, bHasColView := b.(mat.ColViewer) for c := 0; c < cols; c++ { dstView := dst.ColView(c).(*mat.VecDense) if bHasColView { cv := bv.ColView(c) ch.SolveVecTo(dstView, cv) } else { cv := mat.NewVecDense(rows, nil) ch.SolveVecTo(dstView, cv) } } return nil } // basic textbook "dot product" algo, here for comparison against the // sparse version func cholSimple(matrix mat.Matrix, lower *mat.TriDense) { r, _ := matrix.Dims() for i := 0; i < r; i++ { for j := 0; j <= i; j++ { var sum float64 if i == j { for k := 0; k < j; k++ { sum += math.Pow(lower.At(j, k), 2) } lower.SetTri(j, j, math.Sqrt(matrix.At(j, j)-sum)) } else { for k := 0; k < j; k++ { sum += lower.At(i, k) * lower.At(j, k) } lower.SetTri(i, j, (matrix.At(i, j)-sum)/lower.At(j, j)) } } } } // the core sparse factoring algo // this is simply the textbook "dot product" algo using a sparse dot func cholCSR(matrix *CSR, lower *CSR) { r, _ := matrix.Dims() for i := 0; i < r; i++ { if matrix.RowNNZ(i) == 0 { continue } // rowDotSum := 0.0 // aPos := 0 // bPos := 0 // thisSum := 0.0 for j := 0; j <= i; j++ { iRow := lower.RowView(i) iRowS, iRowIsSparse := iRow.(*Vector) jRow := lower.RowView(j) jRowS, jRowIsSparse := jRow.(*Vector) if !iRowIsSparse || !jRowIsSparse { panic(mat.ErrShape) } if i == j { sum := floats.Dot(jRowS.data, jRowS.data) if sum == 0.0 && matrix.At(i, i) == 0.0 { continue } lower.Set(j, j, math.Sqrt(matrix.At(i, i)-sum)) } else { // thisSum, _, _ = dotSparseSparseNoSortBeforeWithStart(iRowS, jRowS, j, aPos, bPos) // rowDotSum = thisSum rowDotSum := dotSparseSparseNoSort(iRowS, jRowS) if rowDotSum == 0.0 && matrix.At(i, j) == 0.0 { continue } lower.Set(i, j, (matrix.At(i, j)-rowDotSum)/lower.At(j, j)) } } } }
cholesky.go
0.699049
0.460471
cholesky.go
starcoder
package cucumberexpressions import ( "fmt" "reflect" "regexp" "strings" ) var escapeRegexp = regexp.MustCompile(`([\\^\[({$.|?*+})\]])`) type CucumberExpression struct { source string parameterTypes []*ParameterType treeRegexp *TreeRegexp parameterTypeRegistry *ParameterTypeRegistry } func NewCucumberExpression(expression string, parameterTypeRegistry *ParameterTypeRegistry) (Expression, error) { result := &CucumberExpression{source: expression, parameterTypeRegistry: parameterTypeRegistry} ast, err := parse(expression) if err != nil { return nil, err } pattern, err := result.rewriteNodeToRegex(ast) if err != nil { return nil, err } result.treeRegexp = NewTreeRegexp(regexp.MustCompile(pattern)) return result, nil } func (c *CucumberExpression) rewriteNodeToRegex(node node) (string, error) { switch node.NodeType { case textNode: return c.processEscapes(node.Token), nil case optionalNode: return c.rewriteOptional(node) case alternationNode: return c.rewriteAlternation(node) case alternativeNode: return c.rewriteAlternative(node) case parameterNode: return c.rewriteParameter(node) case expressionNode: return c.rewriteExpression(node) default: // Can't happen as long as the switch case is exhaustive return "", NewCucumberExpressionError(fmt.Sprintf("Could not rewrite %s", c.source)) } } func (c *CucumberExpression) processEscapes(expression string) string { return escapeRegexp.ReplaceAllString(expression, `\$1`) } func (c *CucumberExpression) rewriteOptional(node node) (string, error) { err := c.assertNoParameters(node, c.createParameterIsNotAllowedInOptional()) if err != nil { return "", err } err = c.assertNoOptionals(node, c.createOptionalIsNotAllowedInOptional()) if err != nil { return "", err } err = c.assertNotEmpty(node, c.createOptionalMayNotBeEmpty()) if err != nil { return "", err } return c.rewriteNodesToRegex(node.Nodes, "", "(?:", ")?") } func (c *CucumberExpression) createParameterIsNotAllowedInOptional() func(node) error { return func(node node) error { return createParameterIsNotAllowedInOptional(node, c.source) } } func (c *CucumberExpression) createOptionalIsNotAllowedInOptional() func(node) error { return func(node node) error { return createOptionalIsNotAllowedInOptional(node, c.source) } } func (c *CucumberExpression) createOptionalMayNotBeEmpty() func(node) error { return func(node node) error { return createOptionalMayNotBeEmpty(node, c.source) } } func (c *CucumberExpression) rewriteAlternation(node node) (string, error) { // Make sure the alternative parts aren't empty and don't contain parameter types for _, alternative := range node.Nodes { if len(alternative.Nodes) == 0 { return "", createAlternativeMayNotBeEmpty(alternative, c.source) } err := c.assertNotEmpty(alternative, c.createAlternativeMayNotExclusivelyContainOptionals()) if err != nil { return "", err } } return c.rewriteNodesToRegex(node.Nodes, "|", "(?:", ")") } func (c *CucumberExpression) createAlternativeMayNotExclusivelyContainOptionals() func(node) error { return func(node node) error { return createAlternativeMayNotExclusivelyContainOptionals(node, c.source) } } func (c *CucumberExpression) rewriteAlternative(node node) (string, error) { return c.rewriteNodesToRegex(node.Nodes, "", "", "") } func (c *CucumberExpression) rewriteParameter(node node) (string, error) { buildCaptureRegexp := func(regexps []*regexp.Regexp) string { if len(regexps) == 1 { return fmt.Sprintf("(%s)", regexps[0].String()) } captureGroups := make([]string, len(regexps)) for i, r := range regexps { captureGroups[i] = fmt.Sprintf("(?:%s)", r.String()) } return fmt.Sprintf("(%s)", strings.Join(captureGroups, "|")) } typeName := node.text() parameterType := c.parameterTypeRegistry.LookupByTypeName(typeName) if parameterType == nil { return "", createUndefinedParameterType(node, c.source, typeName) } c.parameterTypes = append(c.parameterTypes, parameterType) return buildCaptureRegexp(parameterType.regexps), nil } func (c *CucumberExpression) rewriteExpression(node node) (string, error) { return c.rewriteNodesToRegex(node.Nodes, "", "^", "$") } func (c *CucumberExpression) rewriteNodesToRegex(nodes []node, delimiter string, prefix string, suffix string) (string, error) { builder := strings.Builder{} builder.WriteString(prefix) for i, node := range nodes { if i > 0 { builder.WriteString(delimiter) } s, err := c.rewriteNodeToRegex(node) if err != nil { return s, err } builder.WriteString(s) } builder.WriteString(suffix) return builder.String(), nil } func (c *CucumberExpression) assertNotEmpty(node node, createNodeWasNotEmptyError func(node) error) error { for _, node := range node.Nodes { if node.NodeType == textNode { return nil } } return createNodeWasNotEmptyError(node) } func (c *CucumberExpression) assertNoParameters(node node, createParameterIsNotAllowedInOptionalError func(node) error) error { for _, node := range node.Nodes { if node.NodeType == parameterNode { return createParameterIsNotAllowedInOptionalError(node) } } return nil } func (c *CucumberExpression) assertNoOptionals(node node, createOptionalIsNotAllowedInOptionalError func(node) error) error { for _, node := range node.Nodes { if node.NodeType == optionalNode { return createOptionalIsNotAllowedInOptionalError(node) } } return nil } func (c *CucumberExpression) Match(text string, typeHints ...reflect.Type) ([]*Argument, error) { hintOrDefault := func(i int, typeHints ...reflect.Type) reflect.Type { typeHint := reflect.TypeOf("") if i < len(typeHints) { typeHint = typeHints[i] } return typeHint } parameterTypes := make([]*ParameterType, len(c.parameterTypes)) copy(parameterTypes, c.parameterTypes) for i := 0; i < len(parameterTypes); i++ { if parameterTypes[i].isAnonymous() { typeHint := hintOrDefault(i, typeHints...) parameterType, err := parameterTypes[i].deAnonymize(typeHint, c.objectMapperTransformer(typeHint)) if err != nil { return nil, err } parameterTypes[i] = parameterType } } return BuildArguments(c.treeRegexp, text, parameterTypes), nil } func (c *CucumberExpression) Regexp() *regexp.Regexp { return c.treeRegexp.Regexp() } func (c *CucumberExpression) Source() string { return c.source } func (c *CucumberExpression) objectMapperTransformer(typeHint reflect.Type) func(args ...*string) interface{} { return func(args ...*string) interface{} { i, err := c.parameterTypeRegistry.defaultTransformer.Transform(*args[0], typeHint) if err != nil { panic(err) } return i } }
cucumber-expressions/go/cucumber_expression.go
0.636805
0.428712
cucumber_expression.go
starcoder
package graph import ( "fmt" "strings" "github.com/gonum/graph/concrete" ) // FilterPackages receives a graph and a set of packagePrefixes contained within the graph. // Returns a new graph with the sub-tree for each node matching the packagePrefix collapsed // into just that node. Relationships among packagePrefixes are kept: edges originating from // packagePrefix subpackages are re-written to originate from the packagePrefix, and edges // terminating at packagePrefix subpackages are re-written to terminate at the packagePrefix. func FilterPackages(g *MutableDirectedGraph, packagePrefixes []string) (*MutableDirectedGraph, error) { collapsedGraph := NewMutableDirectedGraph(g.rootNodeNames) // copy all nodes to new graph for _, n := range g.Nodes() { node, ok := n.(*Node) if !ok { continue } collapsedNodeName := getFilteredNodeName(packagePrefixes, node.UniqueName) _, exists := collapsedGraph.NodeByName(collapsedNodeName) if exists { continue } err := collapsedGraph.AddNode(&Node{ UniqueName: collapsedNodeName, Id: n.ID(), LabelName: labelNameForNode(collapsedNodeName), }) if err != nil { return nil, err } } // add edges to collapsed graph for _, from := range g.Nodes() { node, ok := from.(*Node) if !ok { return nil, fmt.Errorf("expected nodes in graph to be of type *Node") } fromNodeName := getFilteredNodeName(packagePrefixes, node.UniqueName) fromNode, exists := collapsedGraph.NodeByName(fromNodeName) if !exists { return nil, fmt.Errorf("expected node with name %q to exist in collapsed graph", fromNodeName) } for _, to := range g.From(from) { node, ok := to.(*Node) if !ok { return nil, fmt.Errorf("expected nodes in graph to be of type *Node") } toNodeName := getFilteredNodeName(packagePrefixes, node.UniqueName) if fromNodeName == toNodeName { continue } toNode, exists := collapsedGraph.NodeByName(toNodeName) if !exists { return nil, fmt.Errorf("expected node with name %q to exist in collapsed graph", toNodeName) } if collapsedGraph.HasEdgeFromTo(fromNode, toNode) { continue } collapsedGraph.SetEdge(concrete.Edge{ F: fromNode, T: toNode, }, 0) } } return collapsedGraph, nil } func getFilteredNodeName(collapsedPrefixes []string, packageName string) string { for _, prefix := range collapsedPrefixes { if strings.HasPrefix(packageName, prefix) { return prefix } } return packageName }
kubernetes-model/vendor/github.com/openshift/origin/tools/depcheck/pkg/graph/filter.go
0.723895
0.427038
filter.go
starcoder
package timetogo import ( "fmt" "time" "github.com/dsoprea/go-logging" "github.com/google/flatbuffers/go" "github.com/dsoprea/time-to-go/protocol/ttgstream" ) var ( streamLogger1 = log.NewLogger("timetogo.stream_protocol_1") ) // StreamIndexedSequenceInfo1 briefly describes all series. type StreamIndexedSequenceInfo1 struct { // uuid uniquely identifies the series uuid string // headRecordTime is the timestamp of the first record headRecordTime time.Time // tailRecordTime is the timestamp of the last record tailRecordTime time.Time // absolutePosition is the absolute position of the boundary marker (NUL) absolutePosition int64 } // NewStreamIndexedSequenceInfo1 returns a sequence-info structure. func NewStreamIndexedSequenceInfo1(uuid string, headRecordTime, tailRecordTime time.Time, absolutePosition int64) *StreamIndexedSequenceInfo1 { return &StreamIndexedSequenceInfo1{ uuid: uuid, headRecordTime: headRecordTime.UTC(), tailRecordTime: tailRecordTime.UTC(), absolutePosition: absolutePosition, } } // NewStreamIndexedSequenceInfo1WithSeriesFooter returns a summary // `StreamIndexedSequenceInfo1` struct representing the given // `SeriesFooter`-compatible struct. func NewStreamIndexedSequenceInfo1WithSeriesFooter(seriesFooter SeriesFooter, absolutePosition int64) *StreamIndexedSequenceInfo1 { return &StreamIndexedSequenceInfo1{ uuid: seriesFooter.Uuid(), headRecordTime: seriesFooter.HeadRecordTime().UTC(), tailRecordTime: seriesFooter.TailRecordTime().UTC(), absolutePosition: absolutePosition, } } // Uuid is the timestamp of the first record func (sisi StreamIndexedSequenceInfo1) Uuid() string { return sisi.uuid } // HeadRecordTime is the timestamp of the first record func (sisi StreamIndexedSequenceInfo1) HeadRecordTime() time.Time { return sisi.headRecordTime } // TailRecordTime is the timestamp of the last record func (sisi StreamIndexedSequenceInfo1) TailRecordTime() time.Time { return sisi.tailRecordTime } // AbsolutePosition is the absolute position of the boundary marker (NUL) func (sisi StreamIndexedSequenceInfo1) AbsolutePosition() int64 { return sisi.absolutePosition } func (sisi StreamIndexedSequenceInfo1) String() string { return fmt.Sprintf("StreamIndexedSequenceInfo1<UUID=[%s] HEAD=[%s] TAIL=[%s] POSITION=(%d)", sisi.uuid, sisi.headRecordTime, sisi.tailRecordTime, sisi.absolutePosition) } // writeStreamFooter writes a block of data that describes the entire stream. func (sw *StreamWriter) writeStreamFooter(streamFooter StreamFooter) (size int, err error) { defer func() { if state := recover(); state != nil { err = log.Wrap(state.(error)) } }() sw.b.Reset() // Allocate series items. sequences := streamFooter.Series() sisiOffsets := make([]flatbuffers.UOffsetT, len(sequences)) for i, sisi := range sequences { uuidPosition := sw.b.CreateString(sisi.Uuid()) ttgstream.StreamIndexedSequenceInfoStart(sw.b) ttgstream.StreamIndexedSequenceInfoAddUuid(sw.b, uuidPosition) ttgstream.StreamIndexedSequenceInfoAddHeadRecordEpoch(sw.b, uint64(sisi.HeadRecordTime().Unix())) ttgstream.StreamIndexedSequenceInfoAddTailRecordEpoch(sw.b, uint64(sisi.TailRecordTime().Unix())) ttgstream.StreamIndexedSequenceInfoAddAbsolutePosition(sw.b, sisi.AbsolutePosition()) sisiOffset := ttgstream.StreamIndexedSequenceInfoEnd(sw.b) sisiOffsets[i] = sisiOffset } // Allocate vector. seriesCount := len(sequences) ttgstream.StreamFooter1StartSeriesVector(sw.b, seriesCount) for i := len(sisiOffsets) - 1; i >= 0; i-- { sisiOffset := sisiOffsets[i] sw.b.PrependUOffsetT(sisiOffset) } seriesVectorOffset := sw.b.EndVector(seriesCount) // Build footer. ttgstream.StreamFooter1Start(sw.b) ttgstream.StreamFooter1AddSeries(sw.b, seriesVectorOffset) sfPosition := ttgstream.StreamFooter1End(sw.b) sw.b.Finish(sfPosition) data := sw.b.FinishedBytes() streamLogger1.Debugf(nil, "Writing (%d) bytes for stream footer.", len(data)) err = sw.pushStreamMilestone(MtStreamFooterHeadByte, fmt.Sprintf("Stream: %s", streamFooter)) log.PanicIf(err) n, err := sw.w.Write(data) log.PanicIf(err) sw.bumpPosition(int64(n)) footerVersion := uint16(1) shadowSize, err := sw.writeShadowFooter(footerVersion, FtStreamFooter, uint16(len(data))) log.PanicIf(err) size = len(data) + shadowSize return size, nil } func (sw *StreamWriter) writeStreamFooterWithSeriesFooters(series []SeriesFooter, offsets []int64) (footerSize int, err error) { defer func() { if state := recover(); state != nil { err = log.Wrap(state.(error)) } }() indexedSeries := make([]StreamIndexedSequenceInfo, len(series)) for i, seriesFooter := range series { sisi := NewStreamIndexedSequenceInfo1WithSeriesFooter( seriesFooter, offsets[i]) indexedSeries[i] = sisi } streamFooter := NewStreamFooter1FromStreamIndexedSequenceInfoSlice(indexedSeries) footerSize, err = sw.writeStreamFooter(streamFooter) log.PanicIf(err) return footerSize, nil } // StreamFooter1 represents the stream footer (version 1) that's encoded in the // stream. type StreamFooter1 struct { series []StreamIndexedSequenceInfo } func (sf *StreamFooter1) String() string { return fmt.Sprintf("StreamFooter1<COUNT=(%d)>", len(sf.Series())) } // Series returns a list of all of the summary series information. func (sf *StreamFooter1) Series() []StreamIndexedSequenceInfo { return sf.series } // NewStreamFooter1FromStreamIndexedSequenceInfoSlice returns a new // `StreamFooter`-compatible struct. func NewStreamFooter1FromStreamIndexedSequenceInfoSlice(series []StreamIndexedSequenceInfo) StreamFooter { sf := &StreamFooter1{ series: series, } return sf } // NewStreamFooter1FromEncoded decodes the given bytes and returns a // `StreamFooter`-compatible struct. func NewStreamFooter1FromEncoded(footerBytes []byte) (sf StreamFooter, err error) { defer func() { if state := recover(); state != nil { err = log.Wrap(state.(error)) } }() sfEncoded := ttgstream.GetRootAsStreamFooter1(footerBytes, 0) seriesCount := sfEncoded.SeriesLength() series := make([]StreamIndexedSequenceInfo, seriesCount) for i := 0; i < seriesCount; i++ { sisiEncoded := ttgstream.StreamIndexedSequenceInfo{} found := sfEncoded.Series(&sisiEncoded, i) if found == false { log.Panicf("could not find series (%d) info in stream info", i) } headRecordTime := time.Unix(int64(sisiEncoded.HeadRecordEpoch()), 0).In(time.UTC) tailRecordTime := time.Unix(int64(sisiEncoded.TailRecordEpoch()), 0).In(time.UTC) sisi := &StreamIndexedSequenceInfo1{ uuid: string(sisiEncoded.Uuid()), headRecordTime: headRecordTime, tailRecordTime: tailRecordTime, absolutePosition: sisiEncoded.AbsolutePosition(), } series[i] = sisi } sf = NewStreamFooter1FromStreamIndexedSequenceInfoSlice(series) return sf, nil }
stream_protocol_1.go
0.727685
0.434881
stream_protocol_1.go
starcoder
package expression import ( "errors" "fmt" "reflect" "strconv" ) type parseError struct { error offset int tokenOffset int } type parser struct { input scanner mode ParseMode types FieldTypeMap // current token token token tokenOffset int tokenText string } func (p *parser) error(msg string) { panic(parseError{ error: errors.New(msg), offset: p.tokenOffset, tokenOffset: p.tokenOffset, }) } func (p *parser) errorf(msg string, args ...interface{}) { panic(parseError{ error: fmt.Errorf(msg, args...), offset: p.tokenOffset, tokenOffset: p.tokenOffset, }) } func (p *parser) next() { p.tokenOffset, p.token, p.tokenText = p.input.nextToken() } func (p *parser) makeNativeIntegerType(i interface{}, t ValueType) interface{} { var toType reflect.Type switch t { case ValueTypeSignedInt8: toType = reflect.TypeOf(int8(0)) case ValueTypeSignedInt16: toType = reflect.TypeOf(int16(0)) case ValueTypeSignedInt32: toType = reflect.TypeOf(int32(0)) case ValueTypeSignedInt64: toType = reflect.TypeOf(int64(0)) case ValueTypeUnsignedInt8: toType = reflect.TypeOf(uint8(0)) case ValueTypeUnsignedInt16: toType = reflect.TypeOf(uint16(0)) case ValueTypeUnsignedInt32: toType = reflect.TypeOf(uint32(0)) case ValueTypeUnsignedInt64: toType = reflect.TypeOf(uint64(0)) } return reflect.ValueOf(i).Convert(toType).Interface() } func (p *parser) fixupBinaryExpr(be *binaryExpr) expr { i, iok := be.x.(*identExpr) if !iok { p.errorf("lhs of %s must be an identifier", binaryOpStrings[be.op]) } v, vok := be.y.(*valueExpr) if !vok { p.errorf("rhs of %s must be a literal value", binaryOpStrings[be.op]) } if i.Type() == v.Type() { return be } if !i.Type().IsInteger() || !v.Type().IsInteger() { p.errorf("type mismatch (%s vs %s)", ValueTypeStrings[i.Type()], ValueTypeStrings[v.Type()]) } v.v = p.makeNativeIntegerType(v.v, i.Type()) return be } func (p *parser) parseOperand() expr { switch p.token { case tokenIdentifier: if t, ok := p.types[p.tokenText]; ok { e := &identExpr{name: p.tokenText, t: t} p.next() return e } p.errorf("unknown field: %q", p.tokenText) case tokenString: e := &valueExpr{v: p.tokenText} p.next() return e case tokenInteger: v, err := strconv.ParseUint(p.tokenText, 0, 64) if err != nil { // This is a sanity check. It should not actually be // reachable in production code. If it happens, it // points to a bug in scanner.scanNumber p.errorf("internal error parsing integer literal %q: %v", p.tokenText, err) } e := &valueExpr{v: v} p.next() return e case tokenLParen: p.next() e := p.parseLogicalOr() if e == nil { p.error("illegal operand") } if p.token != tokenRParen { p.error("expected closing paren") } p.next() return e case tokenMinus: p.next() op := p.parseOperand() switch e := op.(type) { case *valueExpr: switch v := e.v.(type) { case int64: e.v = uint64(-v) case uint64: e.v = -int64(v) default: p.error("illegal unary -") } default: p.error("illegal unary -") } return op case tokenNot: p.next() x := p.parseLogicalOr() if x.Type() != ValueTypeBool { p.errorf("operand to NOT must be type bool; got %s", ValueTypeStrings[x.Type()]) } return &unaryExpr{ op: unaryOpNot, x: x, } case tokenEOF: return nil } p.errorf("unexpected token %q", p.tokenText) return nil // unreachable } func (p *parser) parseBitwiseAnd() expr { x := p.parseOperand() if x == nil { return nil } if p.token != tokenBitwiseAnd { return x } p.next() y := p.parseOperand() if y == nil { p.error("missing rhs for &") } e := &binaryExpr{ op: binaryOpBitwiseAnd, x: x, y: y, } if !e.x.Type().IsInteger() { p.errorf("illegal type for &: %s", ValueTypeStrings[e.x.Type()]) } x = p.fixupBinaryExpr(e) x = &binaryExpr{ op: binaryOpNE, x: x, y: &valueExpr{v: p.makeNativeIntegerType(0, x.Type())}, } return x } func (p *parser) parseComparison() expr { x := p.parseBitwiseAnd() if x == nil { return nil } var op binaryOp switch p.token { case tokenLT: op = binaryOpLT case tokenLE: op = binaryOpLE case tokenGT: op = binaryOpGT case tokenGE: op = binaryOpGE default: return x } p.next() y := p.parseBitwiseAnd() if y == nil { p.errorf("missing rhs for %s", binaryOpStrings[op]) } e := &binaryExpr{ op: op, x: x, y: y, } x = p.fixupBinaryExpr(e) if !e.x.Type().IsNumeric() { p.errorf("operands for %s comparison must be numeric; got %s", binaryOpStrings[op], ValueTypeStrings[x.Type()]) } return x } func (p *parser) parseEquality() expr { x := p.parseComparison() if x == nil { return nil } var op binaryOp switch p.token { case tokenEQ: op = binaryOpEQ case tokenNE: op = binaryOpNE case tokenLike: op = binaryOpLike default: return x } p.next() y := p.parseComparison() if y == nil { p.errorf("missing rhs for %s", binaryOpStrings[op]) } e := &binaryExpr{ op: op, x: x, y: y, } x = p.fixupBinaryExpr(e) if op == binaryOpLike && e.x.Type() != ValueTypeString { p.errorf("operands for ~ must be string; got %s", ValueTypeStrings[e.x.Type()]) } return x } func (p *parser) parseLogicalAnd() expr { x := p.parseEquality() if x == nil { return nil } for { if p.token != tokenLogicalAnd { return x } p.next() y := p.parseEquality() if y == nil { p.error("missing rhs for logical and") } if x.Type() != ValueTypeBool { p.errorf("lhs for logical and must be bool; got %s", ValueTypeStrings[x.Type()]) } if y.Type() != ValueTypeBool { p.errorf("rhs for logical and must be bool; got %s", ValueTypeStrings[y.Type()]) } x = &binaryExpr{ op: binaryOpLogicalAnd, x: x, y: y, } } } func (p *parser) parseLogicalOr() expr { x := p.parseLogicalAnd() if x == nil { return nil } for { if p.token != tokenLogicalOr { return x } p.next() y := p.parseLogicalAnd() if y == nil { p.error("missing rhs for logical or") } if x.Type() != ValueTypeBool { p.errorf("lhs for logical or must be bool; got %s", ValueTypeStrings[x.Type()]) } if y.Type() != ValueTypeBool { p.errorf("rhs for logical or must be bool; got %s", ValueTypeStrings[y.Type()]) } x = &binaryExpr{ op: binaryOpLogicalOr, x: x, y: y, } } } func (p *parser) parse() (e *Expression, err error) { defer func() { if r := recover(); r != nil { if pe, ok := r.(parseError); ok { err = pe.error } else { panic(r) } } }() p.next() ast := p.parseLogicalOr() if p.token != tokenEOF { p.errorf("unexpected token %q", p.tokenText) } if ast == nil { ast = &valueExpr{v: true} } e = &Expression{ ast: ast, types: p.types, } return }
pkg/expression/parser.go
0.584271
0.419707
parser.go
starcoder
package fauxgl import ( "math" "github.com/fogleman/simplify" ) type Mesh struct { Triangles []*Triangle Lines []*Line box *Box } func NewEmptyMesh() *Mesh { return &Mesh{} } func NewMesh(triangles []*Triangle, lines []*Line) *Mesh { return &Mesh{triangles, lines, nil} } func NewTriangleMesh(triangles []*Triangle) *Mesh { return &Mesh{triangles, nil, nil} } func NewLineMesh(lines []*Line) *Mesh { return &Mesh{nil, lines, nil} } func (m *Mesh) dirty() { m.box = nil } func (m *Mesh) Copy() *Mesh { triangles := make([]*Triangle, len(m.Triangles)) lines := make([]*Line, len(m.Lines)) for i, t := range m.Triangles { a := *t triangles[i] = &a } for i, l := range m.Lines { a := *l lines[i] = &a } return NewMesh(triangles, lines) } func (a *Mesh) Add(b *Mesh) { a.Triangles = append(a.Triangles, b.Triangles...) a.Lines = append(a.Lines, b.Lines...) a.dirty() } func (m *Mesh) SetColor(c Color) { for _, t := range m.Triangles { t.SetColor(c) } } func (m *Mesh) Volume() float64 { var v float64 for _, t := range m.Triangles { p1 := t.V1.Position p2 := t.V2.Position p3 := t.V3.Position v += p1.X*(p2.Y*p3.Z-p3.Y*p2.Z) - p2.X*(p1.Y*p3.Z-p3.Y*p1.Z) + p3.X*(p1.Y*p2.Z-p2.Y*p1.Z) } return math.Abs(v / 6) } func (m *Mesh) SurfaceArea() float64 { var a float64 for _, t := range m.Triangles { a += t.Area() } return a } func smoothNormalsThreshold(normal Vector, normals []Vector, threshold float64) Vector { result := Vector{} for _, x := range normals { if x.Dot(normal) >= threshold { result = result.Add(x) } } return result.Normalize() } func (m *Mesh) SmoothNormalsThreshold(radians float64) { threshold := math.Cos(radians) lookup := make(map[Vector][]Vector) for _, t := range m.Triangles { lookup[t.V1.Position] = append(lookup[t.V1.Position], t.V1.Normal) lookup[t.V2.Position] = append(lookup[t.V2.Position], t.V2.Normal) lookup[t.V3.Position] = append(lookup[t.V3.Position], t.V3.Normal) } for _, t := range m.Triangles { t.V1.Normal = smoothNormalsThreshold(t.V1.Normal, lookup[t.V1.Position], threshold) t.V2.Normal = smoothNormalsThreshold(t.V2.Normal, lookup[t.V2.Position], threshold) t.V3.Normal = smoothNormalsThreshold(t.V3.Normal, lookup[t.V3.Position], threshold) } } func (m *Mesh) SmoothNormals() { lookup := make(map[Vector]Vector) for _, t := range m.Triangles { lookup[t.V1.Position] = lookup[t.V1.Position].Add(t.V1.Normal) lookup[t.V2.Position] = lookup[t.V2.Position].Add(t.V2.Normal) lookup[t.V3.Position] = lookup[t.V3.Position].Add(t.V3.Normal) } for k, v := range lookup { lookup[k] = v.Normalize() } for _, t := range m.Triangles { t.V1.Normal = lookup[t.V1.Position] t.V2.Normal = lookup[t.V2.Position] t.V3.Normal = lookup[t.V3.Position] } } func (m *Mesh) UnitCube() Matrix { const r = 0.5 return m.FitInside(Box{Vector{-r, -r, -r}, Vector{r, r, r}}, Vector{0.5, 0.5, 0.5}) } func (m *Mesh) BiUnitCube() Matrix { const r = 1 return m.FitInside(Box{Vector{-r, -r, -r}, Vector{r, r, r}}, Vector{0.5, 0.5, 0.5}) } func (m *Mesh) MoveTo(position, anchor Vector) Matrix { matrix := Translate(position.Sub(m.BoundingBox().Anchor(anchor))) m.Transform(matrix) return matrix } func (m *Mesh) Center() Matrix { return m.MoveTo(Vector{}, Vector{0.5, 0.5, 0.5}) } func (m *Mesh) FitInside(box Box, anchor Vector) Matrix { scale := box.Size().Div(m.BoundingBox().Size()).MinComponent() extra := box.Size().Sub(m.BoundingBox().Size().MulScalar(scale)) matrix := Identity() matrix = matrix.Translate(m.BoundingBox().Min.Negate()) matrix = matrix.Scale(Vector{scale, scale, scale}) matrix = matrix.Translate(box.Min.Add(extra.Mul(anchor))) m.Transform(matrix) return matrix } func (m *Mesh) BoundingBox() Box { if m.box == nil { box := EmptyBox for _, t := range m.Triangles { box = box.Extend(t.BoundingBox()) } for _, l := range m.Lines { box = box.Extend(l.BoundingBox()) } m.box = &box } return *m.box } func (m *Mesh) Transform(matrix Matrix) { for _, t := range m.Triangles { t.Transform(matrix) } for _, l := range m.Lines { l.Transform(matrix) } m.dirty() } func (m *Mesh) ReverseWinding() { for _, t := range m.Triangles { t.ReverseWinding() } } func (m *Mesh) Simplify(factor float64) { st := make([]*simplify.Triangle, len(m.Triangles)) for i, t := range m.Triangles { v1 := simplify.Vector(t.V1.Position) v2 := simplify.Vector(t.V2.Position) v3 := simplify.Vector(t.V3.Position) st[i] = simplify.NewTriangle(v1, v2, v3) } sm := simplify.NewMesh(st) sm = sm.Simplify(factor) m.Triangles = make([]*Triangle, len(sm.Triangles)) for i, t := range sm.Triangles { v1 := Vector(t.V1) v2 := Vector(t.V2) v3 := Vector(t.V3) m.Triangles[i] = NewTriangleForPoints(v1, v2, v3) } m.dirty() } func (m *Mesh) SaveSTL(path string) error { return SaveSTL(path, m) } func (m *Mesh) Silhouette(eye Vector, offset float64) *Mesh { return silhouette(m, eye, offset) } func (m *Mesh) SplitTriangles(maxEdgeLength float64) { var triangles []*Triangle var split func(t *Triangle) split = func(t *Triangle) { v1 := t.V1 v2 := t.V2 v3 := t.V3 p1 := v1.Position p2 := v2.Position p3 := v3.Position d12 := p1.Distance(p2) d23 := p2.Distance(p3) d31 := p3.Distance(p1) max := math.Max(d12, math.Max(d23, d31)) if max <= maxEdgeLength { triangles = append(triangles, t) } else if d12 == max { v := InterpolateVertexes(v1, v2, v3, VectorW{0.5, 0.5, 0, 1}) t1 := NewTriangle(v3, v1, v) t2 := NewTriangle(v2, v3, v) split(t1) split(t2) } else if d23 == max { v := InterpolateVertexes(v1, v2, v3, VectorW{0, 0.5, 0.5, 1}) t1 := NewTriangle(v1, v2, v) t2 := NewTriangle(v3, v1, v) split(t1) split(t2) } else { v := InterpolateVertexes(v1, v2, v3, VectorW{0.5, 0, 0.5, 1}) t1 := NewTriangle(v2, v3, v) t2 := NewTriangle(v1, v2, v) split(t1) split(t2) } } for _, t := range m.Triangles { split(t) } m.Triangles = triangles m.dirty() }
mesh.go
0.738198
0.609175
mesh.go
starcoder
// Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // http://www.apache.org/licenses/LICENSE-2.0 // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package e2e import ( "bytes" "fmt" "io" "os/exec" "strings" "testing" "github.com/pkg/errors" ) type kn struct { namespace string } const ( seperatorHeavy = "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" seperatorLight = "╍╍╍╍╍╍╍╍╍╍╍╍╍╍╍╍╍╍╍╍╍╍╍╍╍╍╍╍╍╍╍╍╍╍╍╍╍╍╍╍╍╍╍╍╍╍╍╍╍╍╍╍╍╍╍╍╍╍╍╍╍╍╍╍╍╍╍╍╍╍╍╍╍╍╍╍╍╍╍╍╍╍╍╍╍╍╍╍╍╍╍" ) // Run the 'kn' CLI with args and opts func (k kn) Run(args ...string) KnRunResult { return RunKn(k.namespace, args) } // Helper methods for calling out to the test cluster type kubectl struct { namespace string } // Run the 'kubectl' CLI with args and opts func (k kubectl) Run(args ...string) (string, error) { return RunKubectl(k.namespace, args...) } // Collector for results type KnRunResultCollector struct { results []KnRunResult extraDumps []string t *testing.T } func NewKnRunResultCollector(t *testing.T) *KnRunResultCollector { return &KnRunResultCollector{ results: []KnRunResult{}, t: t, extraDumps: []string{}, } } func (c *KnRunResultCollector) AssertNoError(result KnRunResult) { c.results = append(c.results, result) if result.Error != nil { c.t.Logf("ERROR: %v", result.Stderr) c.t.FailNow() } } func (c *KnRunResultCollector) AssertError(result KnRunResult) { c.results = append(c.results, result) if result.Error == nil { c.t.Log("ERROR: Error expected but no error happened") c.t.FailNow() } } // AddDump adds extra dump information to the collector which is printed // out if an error occurs func (c *KnRunResultCollector) AddDump(kind string, name string, namespace string) { dumpInfo := extractDumpInfoWithName(kind, name, namespace) if dumpInfo != "" { c.extraDumps = append(c.extraDumps, dumpInfo) } } func (c *KnRunResultCollector) DumpIfFailed() { if c.t.Failed() { c.t.Log(c.errorDetails()) } } func (c *KnRunResultCollector) errorDetails() string { var out = bytes.Buffer{} fmt.Fprintln(&out, "=== FAIL: =======================[[ERROR]]========================") c.printCommands(&out) var dumpInfos []string if len(c.results) > 0 { dumpInfo := c.results[len(c.results)-1].DumpInfo if dumpInfo != "" { dumpInfos = append(dumpInfos, dumpInfo) } } dumpInfos = append(dumpInfos, c.extraDumps...) for _, d := range dumpInfos { fmt.Fprintln(&out, "--------------------------[[DUMP]]-------------------------------") fmt.Fprintf(&out, d) } fmt.Fprintln(&out, "=================================================================") return out.String() } func (c *KnRunResultCollector) printCommands(out io.Writer) { for i, result := range c.results { c.printCommand(out, result) if i < len(c.results)-1 { fmt.Fprintf(out, "┣━%s\n", seperatorHeavy) } } } func (c *KnRunResultCollector) printCommand(out io.Writer, result KnRunResult) { fmt.Fprintf(out, "🦆 %s\n", result.CmdLine) for _, l := range strings.Split(result.Stdout, "\n") { fmt.Fprintf(out, "┃ %s\n", l) } if result.Stderr != "" { errorPrefix := "🔥" if result.ErrorExpected { errorPrefix = "︙" } for _, l := range strings.Split(result.Stderr, "\n") { fmt.Fprintf(out, "%s %s\n", errorPrefix, l) } } } // ======================================================== // Functions: // Result of a "kn" call type KnRunResult struct { // Command line called CmdLine string // Standard output of command Stdout string // Standard error of command Stderr string // And extra dump informations in case of an unexpected error DumpInfo string // Error occurred during execution Error error // Was an error expected ? ErrorExpected bool } // RunKn runs "kn" in a given namespace func RunKn(namespace string, args []string) KnRunResult { if namespace != "" { args = append(args, "--namespace", namespace) } stdout, stderr, err := runCli("kn", args) result := KnRunResult{ CmdLine: cmdCLIDesc("kn", args), Stdout: stdout, Stderr: stderr, Error: err, } if err != nil { command := args[0] if command == "source" && len(args) > 1 { command = "source " + args[1] args = args[1:] } result.DumpInfo = extractDumpInfo(command, args, namespace) } return result } // RunKubectl runs "kubectl" in a given namespace func RunKubectl(namespace string, args ...string) (string, error) { if namespace != "" { args = append(args, "--namespace", namespace) } stdout, stderr, err := runCli("kubectl", args) if err != nil { return stdout, errors.Wrap(err, fmt.Sprintf("stderr: %s", stderr)) } return stdout, nil } func runCli(cli string, args []string) (string, string, error) { var stderr bytes.Buffer var stdout bytes.Buffer cmd := exec.Command(cli, args...) cmd.Stderr = &stderr cmd.Stdout = &stdout cmd.Stdin = nil err := cmd.Run() return stdout.String(), stderr.String(), err } type dumpFunc func(namespace string, args []string) string // Dump handler for specific commands ("service", "revision") which should add extra infos // Relies on that argv[1] is the command and argv[3] is the name of the object var dumpHandlers = map[string]dumpFunc{ "service": dumpService, "revision": dumpRevision, "route": dumpRoute, "trigger": dumpTrigger, "source apiserver": dumpApiServerSource, } func extractDumpInfoWithName(command string, name string, namespace string) string { return extractDumpInfo(command, []string{command, "", name}, namespace) } func extractDumpInfo(command string, args []string, namespace string) string { dumpHandler := dumpHandlers[command] if dumpHandler != nil { return dumpHandler(namespace, args) } return "" } func dumpService(namespace string, args []string) string { // For list like operation we don't have a name if len(args) < 3 || args[2] == "" { return "" } name := args[2] var buffer bytes.Buffer // Service info appendResourceInfo(&buffer, "ksvc", name, namespace) fmt.Fprintf(&buffer, "%s\n", seperatorHeavy) // Service's configuration appendResourceInfo(&buffer, "configuration", name, namespace) fmt.Fprintf(&buffer, "%s\n", seperatorHeavy) // Get all revisions for this service appendResourceInfoWithNameSelector(&buffer, "revision", name, namespace, "serving.knative.dev/service") // Get all routes for this service appendResourceInfoWithNameSelector(&buffer, "route", name, namespace, "serving.knative.dev/service") return buffer.String() } func dumpRevision(namespace string, args []string) string { return simpleDump("revision", args, namespace) } func dumpRoute(namespace string, args []string) string { return simpleDump("route", args, namespace) } func dumpApiServerSource(namespace string, args []string) string { return simpleDump("apiserversource", args, namespace) } func dumpTrigger(namespace string, args []string) string { return simpleDump("trigger", args, namespace) } func simpleDump(kind string, args []string, namespace string) string { if len(args) < 3 || args[2] == "" { return "" } var buffer bytes.Buffer appendResourceInfo(&buffer, kind, args[2], namespace) return buffer.String() } func appendResourceInfo(buffer *bytes.Buffer, kind string, name string, namespace string) { appendResourceInfoWithNameSelector(buffer, kind, name, namespace, "") } func appendResourceInfoWithNameSelector(buffer *bytes.Buffer, kind string, name string, namespace string, selector string) { var extra string argsDescribe := []string{"describe", kind} argsGet := []string{"get", "-oyaml", kind} if selector != "" { labelArg := fmt.Sprintf("%s=%s", selector, name) argsDescribe = append(argsDescribe, "--selector", labelArg) argsGet = append(argsGet, "--selector", labelArg) extra = fmt.Sprintf(" --selector %s", labelArg) } else { argsDescribe = append(argsDescribe, name) argsGet = append(argsGet, name) extra = "" } out, err := RunKubectl(namespace, argsDescribe...) appendCLIOutput(buffer, fmt.Sprintf("kubectl describe %s %s --namespace %s%s", kind, name, namespace, extra), out, err) fmt.Fprintf(buffer, "%s\n", seperatorLight) out, err = RunKubectl(namespace, argsGet...) appendCLIOutput(buffer, fmt.Sprintf("kubectl get %s %s --namespace %s -oyaml%s", kind, name, namespace, extra), out, err) } func appendCLIOutput(buffer *bytes.Buffer, desc string, out string, err error) { buffer.WriteString(fmt.Sprintf("==== %s\n", desc)) if err != nil { buffer.WriteString(fmt.Sprintf("%s: %v\n", "!!!! ERROR", err)) } buffer.WriteString(out) } func cmdCLIDesc(cli string, args []string) string { return fmt.Sprintf("%s %s", cli, strings.Join(args, " ")) }
test/e2e/cli.go
0.667798
0.577555
cli.go
starcoder
package feistel import ( "bytes" "encoding/binary" "io" ) type cbc struct{} // CBC contains the Encrypt and Decrypt functions using the CBC algorithm var CBC cbc // EncryptReader reads data from an reader and writes the encrypted data to the writer func (cbc) EncryptReader(r io.Reader, w io.Writer, rounds int, keys []uint32, previousBlock uint64) error { var block [8]byte prevLeft := uint32(previousBlock>>32) & 0xFFFFFFFF prevRight := uint32(previousBlock) & 0xFFFFFFFF for { left, right, err := readInt(r, block) if err != nil { if err == io.EOF { return nil } return err } left ^= prevLeft right ^= prevRight left, right = Encrypt(left, right, rounds, keys) if err := writeInt(w, block, left, right); err != nil { return err } prevLeft = left prevRight = right } } // Encrypt encrypts a provided buffer and returns it func (cbc) Encrypt(buf []byte, rounds int, keys []uint32, previousBlock uint64) ([]byte, error) { var out bytes.Buffer if err := CBC.EncryptReader(bytes.NewBuffer(buf), &out, rounds, keys, previousBlock); err != nil { return nil, err } return out.Bytes(), nil } // EncryptUInt64 encrypts a provided uint64 and returns it func (cbc) EncryptUInt64(n uint64, rounds int, keys []uint32, previousBlock uint64) (uint64, error) { buf := make([]byte, 8) binary.BigEndian.PutUint64(buf, n) var err error buf, err = CBC.Encrypt(buf, rounds, keys, previousBlock) return binary.BigEndian.Uint64(buf), err } // EncryptInt64 encrypts a provided int64 and returns it func (cbc) EncryptInt64(n int64, rounds int, keys []uint32, previousBlock uint64) (int64, error) { i, err := CBC.EncryptUInt64(uint64(n), rounds, keys, previousBlock) return (int64(i)), err } // DecryptReader reads data from an reader and writes the decrypted data to the writer func (cbc) DecryptReader(r io.Reader, w io.Writer, rounds int, keys []uint32, previousBlock uint64) error { var block [8]byte var savedLeft uint32 var savedRight uint32 prevLeft := uint32(previousBlock>>32) & 0xFFFFFFFF prevRight := uint32(previousBlock) & 0xFFFFFFFF for { left, right, err := readInt(r, block) if err != nil { if err == io.EOF { return nil } return err } savedLeft = left savedRight = right left, right = Decrypt(left, right, rounds, keys) left ^= prevLeft right ^= prevRight if err := writeInt(w, block, left, right); err != nil { return err } prevLeft = savedLeft prevRight = savedRight } } // Decrypt decrypts a provided buffer and returns it func (cbc) Decrypt(buf []byte, rounds int, keys []uint32, previousBlock uint64) ([]byte, error) { var out bytes.Buffer if err := CBC.DecryptReader(bytes.NewBuffer(buf), &out, rounds, keys, previousBlock); err != nil { return nil, err } return out.Bytes(), nil } // DecryptUInt64 decrypts a provided uint64 and returns it func (cbc) DecryptUInt64(n uint64, rounds int, keys []uint32, previousBlock uint64) (uint64, error) { buf := make([]byte, 8) binary.BigEndian.PutUint64(buf, n) var err error buf, err = CBC.Decrypt(buf, rounds, keys, previousBlock) return binary.BigEndian.Uint64(buf), err } // DecryptUnt64 decrypts a provided int64 and returns it func (cbc) DecryptInt64(n int64, rounds int, keys []uint32, previousBlock uint64) (int64, error) { i, err := CBC.DecryptUInt64(uint64(n), rounds, keys, previousBlock) return (int64(i)), err }
cbc.go
0.6137
0.435181
cbc.go
starcoder
package blackbox import ( "github.com/pkg/errors" ) const ( // PredictorZero returns the value unmodified PredictorZero = 0 // PredictorPrevious return the value substracted from the interframe PredictorPrevious = 1 // PredictorStraightLine assumes that the slope between the current measurement and the previous one will be similar to the slope between the previous measurement and the one before that. // This is common for fields which increase at a steady rate, such as the "time" field. The predictor is `history_age_2 - 2 * history_age_1`. PredictorStraightLine = 2 // PredicatorAverage2 is the average of the two previously logged values of the field (i.e. `(history_age_1 + history_age_2) / 2`). // It is used when there is significant random noise involved in the field, which means that the average of the recent history is a better predictor of the next value than the previous value on its own would be (for example, in gyroscope or motor measurements). PredicatorAverage2 = 3 // PredictorMinThrottle subtracts the value of "minthrottle" which is included in the log header. // In Cleanflight, motors always lie in the range of `[minthrottle ... maxthrottle]` when the craft is armed, so this predictor is used for the first motor value in intraframes. PredictorMinThrottle = 4 // PredictorMotor0 is set to the value of `motor[0]` which was decoded earlier within the current frame. // It is used in intraframes for every motor after the first one, because the motor commands typically lie in a tight grouping. PredictorMotor0 = 5 // PredictorInc assumes that the field will be incremented by 1 unit for every main loop iteration. This is used to predict the `loopIteration` field, which increases by 1 for every loop iteration. PredictorInc = 6 // Predictor1500 is set to a fixed value of 1500. // It is preferred for logging servo values in intraframes, since these typically lie close to the midpoint of 1500us. Predictor1500 = 8 // PredictorVbatRef is set to the "vbatref" field written in the log header. // It is used when logging intraframe battery voltages in Cleanflight, since these are expected to be broadly similar to the first battery voltage seen during arming. PredictorVbatRef = 9 // PredictorMinMotor returns the value and the minimum motor low output summed PredictorMinMotor = 11 ) // ApplyPrediction a predictor on a field and return the resulting value func ApplyPrediction(frameDef LogDefinition, values []int64, fieldIndex int, predictor int, value int64, previous *MainFrame, previous2 *MainFrame) (int64, error) { // First see if we have a prediction that doesn't require a previous frame as reference: switch predictor { case PredictorZero: // No correction to apply break case PredictorMinThrottle: value += int64(frameDef.Sysconfig.MinThrottle) case Predictor1500: value += 1500 case PredictorMotor0: motor0idx, err := frameDef.GetFieldIndex(FieldMotor0) if err != nil { return value, err } value += values[motor0idx] case PredictorVbatRef: value += int64(frameDef.Sysconfig.Vbatref) case PredictorPrevious: if previous == nil { break } value = value + previous.values[fieldIndex] case PredictorStraightLine: if previous == nil { break } if previous2 == nil { return value, errors.New("Not enough frames provided to apply predicate") } value = value + 2*previous.values[fieldIndex] - previous2.values[fieldIndex] case PredicatorAverage2: if previous == nil { break } if previous2 == nil { return value, errors.New("Not enough frames provided to apply predicate") } value = value + (previous.values[fieldIndex]+previous2.values[fieldIndex])/2 case PredictorMinMotor: value += int64(frameDef.Sysconfig.MotorOutputLow) default: return value, errors.Errorf("Unsupported field predictor %d", predictor) } return value, nil }
src/blackbox/predictor.go
0.854763
0.758018
predictor.go
starcoder
package pegomock import ( "reflect" ) func EqBool(value bool) bool { RegisterMatcher(&EqMatcher{Value: value}) return false } func NotEqBool(value bool) bool { RegisterMatcher(&NotEqMatcher{Value: value}) return false } func AnyBool() bool { RegisterMatcher(NewAnyMatcher(reflect.TypeOf(false))) return false } func BoolThat(matcher ArgumentMatcher) bool { RegisterMatcher(matcher) return false } func EqBoolSlice(value []bool) []bool { RegisterMatcher(&EqMatcher{Value: value}) return nil } func NotEqBoolSlice(value []bool) []bool { RegisterMatcher(&NotEqMatcher{Value: value}) return nil } func AnyBoolSlice() []bool { RegisterMatcher(NewAnyMatcher(reflect.SliceOf(reflect.TypeOf(false)))) return nil } func BoolSliceThat(matcher ArgumentMatcher) []bool { RegisterMatcher(matcher) return nil } func EqInt(value int) int { RegisterMatcher(&EqMatcher{Value: value}) return 0 } func NotEqInt(value int) int { RegisterMatcher(&NotEqMatcher{Value: value}) return 0 } func AnyInt() int { RegisterMatcher(NewAnyMatcher(reflect.TypeOf(0))) return 0 } func IntThat(matcher ArgumentMatcher) int { RegisterMatcher(matcher) return 0 } func EqIntSlice(value []int) []int { RegisterMatcher(&EqMatcher{Value: value}) return nil } func NotEqIntSlice(value []int) []int { RegisterMatcher(&NotEqMatcher{Value: value}) return nil } func AnyIntSlice() []int { RegisterMatcher(NewAnyMatcher(reflect.SliceOf(reflect.TypeOf(0)))) return nil } func IntSliceThat(matcher ArgumentMatcher) []int { RegisterMatcher(matcher) return nil } func EqInt8(value int8) int8 { RegisterMatcher(&EqMatcher{Value: value}) return 0 } func NotEqInt8(value int8) int8 { RegisterMatcher(&NotEqMatcher{Value: value}) return 0 } func AnyInt8() int8 { RegisterMatcher(NewAnyMatcher(reflect.TypeOf((int8)(0)))) return 0 } func Int8That(matcher ArgumentMatcher) int8 { RegisterMatcher(matcher) return 0 } func EqInt8Slice(value []int8) []int8 { RegisterMatcher(&EqMatcher{Value: value}) return nil } func NotEqInt8Slice(value []int8) []int8 { RegisterMatcher(&NotEqMatcher{Value: value}) return nil } func AnyInt8Slice() []int8 { RegisterMatcher(NewAnyMatcher(reflect.SliceOf(reflect.TypeOf((int8)(0))))) return nil } func Int8SliceThat(matcher ArgumentMatcher) []int8 { RegisterMatcher(matcher) return nil } func EqInt16(value int16) int16 { RegisterMatcher(&EqMatcher{Value: value}) return 0 } func NotEqInt16(value int16) int16 { RegisterMatcher(&NotEqMatcher{Value: value}) return 0 } func AnyInt16() int16 { RegisterMatcher(NewAnyMatcher(reflect.TypeOf((int16)(0)))) return 0 } func Int16That(matcher ArgumentMatcher) int16 { RegisterMatcher(matcher) return 0 } func EqInt16Slice(value []int16) []int16 { RegisterMatcher(&EqMatcher{Value: value}) return nil } func NotEqInt16Slice(value []int16) []int16 { RegisterMatcher(&NotEqMatcher{Value: value}) return nil } func AnyInt16Slice() []int16 { RegisterMatcher(NewAnyMatcher(reflect.SliceOf(reflect.TypeOf((int16)(0))))) return nil } func Int16SliceThat(matcher ArgumentMatcher) []int16 { RegisterMatcher(matcher) return nil } func EqInt32(value int32) int32 { RegisterMatcher(&EqMatcher{Value: value}) return 0 } func NotEqInt32(value int32) int32 { RegisterMatcher(&NotEqMatcher{Value: value}) return 0 } func AnyInt32() int32 { RegisterMatcher(NewAnyMatcher(reflect.TypeOf((int32)(0)))) return 0 } func Int32That(matcher ArgumentMatcher) int32 { RegisterMatcher(matcher) return 0 } func EqInt32Slice(value []int32) []int32 { RegisterMatcher(&EqMatcher{Value: value}) return nil } func NotEqInt32Slice(value []int32) []int32 { RegisterMatcher(&NotEqMatcher{Value: value}) return nil } func AnyInt32Slice() []int32 { RegisterMatcher(NewAnyMatcher(reflect.SliceOf(reflect.TypeOf((int32)(0))))) return nil } func Int32SliceThat(matcher ArgumentMatcher) []int32 { RegisterMatcher(matcher) return nil } func EqInt64(value int64) int64 { RegisterMatcher(&EqMatcher{Value: value}) return 0 } func NotEqInt64(value int64) int64 { RegisterMatcher(&NotEqMatcher{Value: value}) return 0 } func AnyInt64() int64 { RegisterMatcher(NewAnyMatcher(reflect.TypeOf((int64)(0)))) return 0 } func Int64That(matcher ArgumentMatcher) int64 { RegisterMatcher(matcher) return 0 } func EqInt64Slice(value []int64) []int64 { RegisterMatcher(&EqMatcher{Value: value}) return nil } func NotEqInt64Slice(value []int64) []int64 { RegisterMatcher(&NotEqMatcher{Value: value}) return nil } func AnyInt64Slice() []int64 { RegisterMatcher(NewAnyMatcher(reflect.SliceOf(reflect.TypeOf((int64)(0))))) return nil } func Int64SliceThat(matcher ArgumentMatcher) []int64 { RegisterMatcher(matcher) return nil } func EqUint(value uint) uint { RegisterMatcher(&EqMatcher{Value: value}) return 0 } func NotEqUint(value uint) uint { RegisterMatcher(&NotEqMatcher{Value: value}) return 0 } func AnyUint() uint { RegisterMatcher(NewAnyMatcher(reflect.TypeOf((uint)(0)))) return 0 } func UintThat(matcher ArgumentMatcher) uint { RegisterMatcher(matcher) return 0 } func EqUintSlice(value []uint) []uint { RegisterMatcher(&EqMatcher{Value: value}) return nil } func NotEqUintSlice(value []uint) []uint { RegisterMatcher(&NotEqMatcher{Value: value}) return nil } func AnyUintSlice() []uint { RegisterMatcher(NewAnyMatcher(reflect.SliceOf(reflect.TypeOf((uint)(0))))) return nil } func UintSliceThat(matcher ArgumentMatcher) []uint { RegisterMatcher(matcher) return nil } func EqUint8(value uint8) uint8 { RegisterMatcher(&EqMatcher{Value: value}) return 0 } func NotEqUint8(value uint8) uint8 { RegisterMatcher(&NotEqMatcher{Value: value}) return 0 } func AnyUint8() uint8 { RegisterMatcher(NewAnyMatcher(reflect.TypeOf((uint8)(0)))) return 0 } func Uint8That(matcher ArgumentMatcher) uint8 { RegisterMatcher(matcher) return 0 } func EqUint8Slice(value []uint8) []uint8 { RegisterMatcher(&EqMatcher{Value: value}) return nil } func NotEqUint8Slice(value []uint8) []uint8 { RegisterMatcher(&NotEqMatcher{Value: value}) return nil } func AnyUint8Slice() []uint8 { RegisterMatcher(NewAnyMatcher(reflect.SliceOf(reflect.TypeOf((uint8)(0))))) return nil } func Uint8SliceThat(matcher ArgumentMatcher) []uint8 { RegisterMatcher(matcher) return nil } func EqUint16(value uint16) uint16 { RegisterMatcher(&EqMatcher{Value: value}) return 0 } func NotEqUint16(value uint16) uint16 { RegisterMatcher(&NotEqMatcher{Value: value}) return 0 } func AnyUint16() uint16 { RegisterMatcher(NewAnyMatcher(reflect.TypeOf((uint16)(0)))) return 0 } func Uint16That(matcher ArgumentMatcher) uint16 { RegisterMatcher(matcher) return 0 } func EqUint16Slice(value []uint16) []uint16 { RegisterMatcher(&EqMatcher{Value: value}) return nil } func NotEqUint16Slice(value []uint16) []uint16 { RegisterMatcher(&NotEqMatcher{Value: value}) return nil } func AnyUint16Slice() []uint16 { RegisterMatcher(NewAnyMatcher(reflect.SliceOf(reflect.TypeOf((uint16)(0))))) return nil } func Uint16SliceThat(matcher ArgumentMatcher) []uint16 { RegisterMatcher(matcher) return nil } func EqUint32(value uint32) uint32 { RegisterMatcher(&EqMatcher{Value: value}) return 0 } func NotEqUint32(value uint32) uint32 { RegisterMatcher(&NotEqMatcher{Value: value}) return 0 } func AnyUint32() uint32 { RegisterMatcher(NewAnyMatcher(reflect.TypeOf((uint32)(0)))) return 0 } func Uint32That(matcher ArgumentMatcher) uint32 { RegisterMatcher(matcher) return 0 } func EqUint32Slice(value []uint32) []uint32 { RegisterMatcher(&EqMatcher{Value: value}) return nil } func NotEqUint32Slice(value []uint32) []uint32 { RegisterMatcher(&NotEqMatcher{Value: value}) return nil } func AnyUint32Slice() []uint32 { RegisterMatcher(NewAnyMatcher(reflect.SliceOf(reflect.TypeOf((uint32)(0))))) return nil } func Uint32SliceThat(matcher ArgumentMatcher) []uint32 { RegisterMatcher(matcher) return nil } func EqUint64(value uint64) uint64 { RegisterMatcher(&EqMatcher{Value: value}) return 0 } func NotEqUint64(value uint64) uint64 { RegisterMatcher(&NotEqMatcher{Value: value}) return 0 } func AnyUint64() uint64 { RegisterMatcher(NewAnyMatcher(reflect.TypeOf((uint64)(0)))) return 0 } func Uint64That(matcher ArgumentMatcher) uint64 { RegisterMatcher(matcher) return 0 } func EqUint64Slice(value []uint64) []uint64 { RegisterMatcher(&EqMatcher{Value: value}) return nil } func NotEqUint64Slice(value []uint64) []uint64 { RegisterMatcher(&NotEqMatcher{Value: value}) return nil } func AnyUint64Slice() []uint64 { RegisterMatcher(NewAnyMatcher(reflect.SliceOf(reflect.TypeOf((uint64)(0))))) return nil } func Uint64SliceThat(matcher ArgumentMatcher) []uint64 { RegisterMatcher(matcher) return nil } func EqUintptr(value uintptr) uintptr { RegisterMatcher(&EqMatcher{Value: value}) return 0 } func NotEqUintptr(value uintptr) uintptr { RegisterMatcher(&NotEqMatcher{Value: value}) return 0 } func AnyUintptr() uintptr { RegisterMatcher(NewAnyMatcher(reflect.TypeOf((uintptr)(0)))) return 0 } func UintptrThat(matcher ArgumentMatcher) uintptr { RegisterMatcher(matcher) return 0 } func EqUintptrSlice(value []uintptr) []uintptr { RegisterMatcher(&EqMatcher{Value: value}) return nil } func NotEqUintptrSlice(value []uintptr) []uintptr { RegisterMatcher(&NotEqMatcher{Value: value}) return nil } func AnyUintptrSlice() []uintptr { RegisterMatcher(NewAnyMatcher(reflect.SliceOf(reflect.TypeOf((uintptr)(0))))) return nil } func UintptrSliceThat(matcher ArgumentMatcher) []uintptr { RegisterMatcher(matcher) return nil } func EqFloat32(value float32) float32 { RegisterMatcher(&EqMatcher{Value: value}) return 0 } func NotEqFloat32(value float32) float32 { RegisterMatcher(&NotEqMatcher{Value: value}) return 0 } func AnyFloat32() float32 { RegisterMatcher(NewAnyMatcher(reflect.TypeOf((float32)(0)))) return 0 } func Float32That(matcher ArgumentMatcher) float32 { RegisterMatcher(matcher) return 0 } func EqFloat32Slice(value []float32) []float32 { RegisterMatcher(&EqMatcher{Value: value}) return nil } func NotEqFloat32Slice(value []float32) []float32 { RegisterMatcher(&NotEqMatcher{Value: value}) return nil } func AnyFloat32Slice() []float32 { RegisterMatcher(NewAnyMatcher(reflect.SliceOf(reflect.TypeOf((float32)(0))))) return nil } func Float32SliceThat(matcher ArgumentMatcher) []float32 { RegisterMatcher(matcher) return nil } func EqFloat64(value float64) float64 { RegisterMatcher(&EqMatcher{Value: value}) return 0 } func NotEqFloat64(value float64) float64 { RegisterMatcher(&NotEqMatcher{Value: value}) return 0 } func AnyFloat64() float64 { RegisterMatcher(NewAnyMatcher(reflect.TypeOf((float64)(0)))) return 0 } func Float64That(matcher ArgumentMatcher) float64 { RegisterMatcher(matcher) return 0 } func EqFloat64Slice(value []float64) []float64 { RegisterMatcher(&EqMatcher{Value: value}) return nil } func NotEqFloat64Slice(value []float64) []float64 { RegisterMatcher(&NotEqMatcher{Value: value}) return nil } func AnyFloat64Slice() []float64 { RegisterMatcher(NewAnyMatcher(reflect.SliceOf(reflect.TypeOf((float64)(0))))) return nil } func Float64SliceThat(matcher ArgumentMatcher) []float64 { RegisterMatcher(matcher) return nil } func EqComplex64(value complex64) complex64 { RegisterMatcher(&EqMatcher{Value: value}) return 0 } func NotEqComplex64(value complex64) complex64 { RegisterMatcher(&NotEqMatcher{Value: value}) return 0 } func AnyComplex64() complex64 { RegisterMatcher(NewAnyMatcher(reflect.TypeOf((complex64)(0)))) return 0 } func Complex64That(matcher ArgumentMatcher) complex64 { RegisterMatcher(matcher) return 0 } func EqComplex64Slice(value []complex64) []complex64 { RegisterMatcher(&EqMatcher{Value: value}) return nil } func NotEqComplex64Slice(value []complex64) []complex64 { RegisterMatcher(&NotEqMatcher{Value: value}) return nil } func AnyComplex64Slice() []complex64 { RegisterMatcher(NewAnyMatcher(reflect.SliceOf(reflect.TypeOf((complex64)(0))))) return nil } func Complex64SliceThat(matcher ArgumentMatcher) []complex64 { RegisterMatcher(matcher) return nil } func EqComplex128(value complex128) complex128 { RegisterMatcher(&EqMatcher{Value: value}) return 0 } func NotEqComplex128(value complex128) complex128 { RegisterMatcher(&NotEqMatcher{Value: value}) return 0 } func AnyComplex128() complex128 { RegisterMatcher(NewAnyMatcher(reflect.TypeOf((complex128)(0)))) return 0 } func Complex128That(matcher ArgumentMatcher) complex128 { RegisterMatcher(matcher) return 0 } func EqComplex128Slice(value []complex128) []complex128 { RegisterMatcher(&EqMatcher{Value: value}) return nil } func NotEqComplex128Slice(value []complex128) []complex128 { RegisterMatcher(&NotEqMatcher{Value: value}) return nil } func AnyComplex128Slice() []complex128 { RegisterMatcher(NewAnyMatcher(reflect.SliceOf(reflect.TypeOf((complex128)(0))))) return nil } func Complex128SliceThat(matcher ArgumentMatcher) []complex128 { RegisterMatcher(matcher) return nil } func EqString(value string) string { RegisterMatcher(&EqMatcher{Value: value}) return "" } func NotEqString(value string) string { RegisterMatcher(&NotEqMatcher{Value: value}) return "" } func AnyString() string { RegisterMatcher(NewAnyMatcher(reflect.TypeOf(""))) return "" } func StringThat(matcher ArgumentMatcher) string { RegisterMatcher(matcher) return "" } func EqStringSlice(value []string) []string { RegisterMatcher(&EqMatcher{Value: value}) return nil } func NotEqStringSlice(value []string) []string { RegisterMatcher(&NotEqMatcher{Value: value}) return nil } func AnyStringSlice() []string { RegisterMatcher(NewAnyMatcher(reflect.SliceOf(reflect.TypeOf("")))) return nil } func StringSliceThat(matcher ArgumentMatcher) []string { RegisterMatcher(matcher) return nil } func EqInterface(value interface{}) interface{} { RegisterMatcher(&EqMatcher{Value: value}) return nil } func NotEqInterface(value interface{}) interface{} { RegisterMatcher(&NotEqMatcher{Value: value}) return nil } func AnyInterface() interface{} { RegisterMatcher(NewAnyMatcher(reflect.TypeOf((*interface{})(nil)).Elem())) return nil } func InterfaceThat(matcher ArgumentMatcher) interface{} { RegisterMatcher(matcher) return nil } func EqInterfaceSlice(value []interface{}) []interface{} { RegisterMatcher(&EqMatcher{Value: value}) return nil } func NotEqInterfaceSlice(value []interface{}) []interface{} { RegisterMatcher(&NotEqMatcher{Value: value}) return nil } func AnyInterfaceSlice() []interface{} { RegisterMatcher(NewAnyMatcher(reflect.SliceOf(reflect.TypeOf((*interface{})(nil)).Elem()))) return nil } func InterfaceSliceThat(matcher ArgumentMatcher) []interface{} { RegisterMatcher(matcher) return nil }
matcher_factories.go
0.770551
0.524273
matcher_factories.go
starcoder
package ztable import ( "errors" "fmt" "math" "runtime" "strconv" "gonum.org/v1/gonum/integrate/quad" ) // ZTable is the core z-score table component type ZTable struct { // list of unexported fields zScoreMap map[string]int leafNodes []*LeafNode rootNode *Node } // Options is a config for the NewZTable function which allows bucket size customization. // Default bucket size is 30 type Options struct { BucketSize int // number of table values to group together in tree leaf-node } // Node struct type Node struct { // list of unexported fields value float64 leftNode *Node rightNode *Node index int } // LeafNode struct type LeafNode struct { // list of unexported fields zScore string percentage float64 } // FindPercentage returns the percentage of a given z-score from the table func (zt *ZTable) FindPercentage(zScore float64) float64 { key := fmt.Sprintf(`%.2f`, zScore) if index, ok := zt.zScoreMap[key]; ok { return zt.leafNodes[index].percentage } return 0 } // FindZScore returns the closest z-score given a percentage value func (zt *ZTable) FindZScore(percentage float64) (float64, error) { currNode := zt.rootNode startingIndex := 0 if currNode != nil { for { if percentage > currNode.value && currNode.rightNode != nil { currNode = currNode.rightNode } else if percentage <= currNode.value && currNode.leftNode != nil { currNode = currNode.leftNode } else { startingIndex = currNode.index break } } if startingIndex == 0 && percentage < zt.leafNodes[0].percentage { return strconv.ParseFloat(zt.leafNodes[0].zScore, 64) } for startingIndex < len(zt.leafNodes) { currLeaf := zt.leafNodes[startingIndex] if percentage == currLeaf.percentage || startingIndex+1 >= len(zt.leafNodes) { return strconv.ParseFloat(currLeaf.zScore, 64) } else if nextLeaf := zt.leafNodes[startingIndex+1]; percentage < nextLeaf.percentage { if percentage-currLeaf.percentage <= nextLeaf.percentage-percentage { return strconv.ParseFloat(currLeaf.zScore, 64) } return strconv.ParseFloat(nextLeaf.zScore, 64) } startingIndex++ } } return 0, errors.New("Unable to find ZScore given percentage") } // NewZTable creates and returns a new ZTable object. 'options' allows you to set the bucket size of the leaf nodes. // Tuning bucket size will allow you to trade off memory for speed and vice-versa in the z-score lookup, func NewZTable(options *Options) *ZTable { bucketSize := 30 if options != nil && options.BucketSize != 0 { bucketSize = options.BucketSize } zTable := ZTable{ zScoreMap: make(map[string]int), leafNodes: []*LeafNode{}, } zScore := float64(-4) for zScore <= 4 { concurrent := runtime.GOMAXPROCS(0) percentage := quad.Fixed(normalProbabilityDensity, math.Inf(-1), zScore, 1000, nil, concurrent) index := len(zTable.leafNodes) if index%bucketSize == 0 { } zScoreString := fmt.Sprintf(`%.2f`, zScore) zTable.zScoreMap[zScoreString] = index zTable.leafNodes = append(zTable.leafNodes, &LeafNode{zScore: zScoreString, percentage: percentage}) zScore = zScore + 0.01 } initLayer := []*Node{} i := 0 for i < len(zTable.leafNodes) { if i+bucketSize < len(zTable.leafNodes) { initLayer = append(initLayer, &Node{ value: zTable.leafNodes[i+bucketSize-1].percentage, index: i, }) } i = i + bucketSize } rootNodeSlice := buildTree(initLayer) if len(rootNodeSlice) > 0 { zTable.rootNode = rootNodeSlice[0] } return &zTable } func normalProbabilityDensity(x float64) float64 { return (1 / math.Sqrt(2*math.Pi)) * math.Exp((-1*x*x)/2) } func buildTree(layer []*Node) []*Node { currLayer := []*Node{} count := 0 for count < len(layer)-1 { currLayer = append(currLayer, &Node{ value: (layer[count].value + layer[count+1].value) / 2, leftNode: layer[count], rightNode: layer[count+1], }) count = count + 1 } if len(currLayer) <= 1 { return currLayer } return buildTree(currLayer) }
ztable.go
0.662796
0.487002
ztable.go
starcoder
package inertia import ( pr "github.com/StStep/go-test-simulation/internal/physics/prop" fl "gonum.org/v1/gonum/floats" ) type Inertia struct { Prop *pr.Prop // Physics properties to use with math curVelocity [2]float64 // Represents current velocity vector cmdVelocity [2]float64 // Represents commanded velocity vector } // Forward, Backward, Right, Left func NewInertia(prop *pr.Prop) *Inertia { return &Inertia{Prop: prop} } // Turn rate used for setting arc for current direction func (m *Inertia) TurnRate() float64 { return m.Prop.TurnRateAt(fl.Norm(m.curVelocity[:], 2)) } func (m *Inertia) Velocity() [2]float64 { return m.cmdVelocity } func (m *Inertia) Command() ([2]float64, float64) { t := m.cmdVelocity[:] speed := fl.Norm(t, 2) if speed == 0 { return [2]float64{0, 0}, 0 } fl.Scale(1/speed, t) return [2]float64{t[0], t[1]}, speed } // dir[1] > 0 ? Front : Back; dir[0] > 0 ? Right : Left func (m *Inertia) SetCommand(dir [2]float64, speed float64) { // Set to unit vector if not already fl.Scale(1/fl.Norm(dir[:], 2), dir[:]) // Check max horizantal velocity hsp := dir[0] if hsp > 0 { hsp *= m.Prop.MaxVelocity[2] } else { hsp *= m.Prop.MaxVelocity[3] } // Check max vertical max velocity vsp := dir[1] if vsp > 0 { vsp *= m.Prop.MaxVelocity[0] } else { vsp *= m.Prop.MaxVelocity[1] } // Cap based on calc max adjSpeed := speed if mx := fl.Norm([]float64{hsp, vsp}, 2); adjSpeed > mx { adjSpeed = mx } // Set vel to dir scaled by speed copy(m.cmdVelocity[:], dir[:]) fl.Scale(adjSpeed, m.cmdVelocity[:]) } func (m *Inertia) PhyStep(del float64) { var diff [2]float64 fl.SubTo(diff[:], m.curVelocity[:], m.cmdVelocity[:]) // Check horizantal velocity, use right if positive hind := 3 if m.curVelocity[0] > 0 { hind = 2 } else if m.curVelocity[0] < 0 { hind = 3 } else if m.cmdVelocity[0] > 0 { hind = 2 } else { hind = 3 } hdiff := diff[0] if hdiff > 0 { hdiff -= m.Prop.Deceleration[hind] * del if hdiff < 0 { hdiff = 0 } } else if hdiff < 0 { hdiff += m.Prop.Acceleration[hind] * del if hdiff > 0 { hdiff = 0 } } else { hdiff = 0 } // Check vertival velocity, use forward if positive vind := 1 if m.curVelocity[1] > 0 { vind = 0 } else if m.curVelocity[1] < 0 { vind = 1 } else if m.cmdVelocity[1] > 0 { vind = 0 } else { vind = 1 } vdiff := diff[1] if vdiff > 0 { vdiff -= m.Prop.Deceleration[vind] * del if vdiff < 0 { vdiff = 0 } } else if vdiff < 0 { vdiff += m.Prop.Acceleration[vind] * del if vdiff > 0 { vdiff = 0 } } else { vdiff = 0 } // PhyStep vel fl.AddTo(m.curVelocity[:], []float64{hdiff, vdiff}, m.cmdVelocity[:]) }
internal/physics/inertia/inertia.go
0.703448
0.6922
inertia.go
starcoder
package vm import ( "time" "github.com/runner-mei/errors" ) func NewArithmeticError(op, left, right string) error { return errors.New("cloudn't '" + left + "' " + op + " '" + right + "'") } func PlusFunc(left, right func(Context) (Value, error)) func(Context) (Value, error) { return func(ctx Context) (Value, error) { leftValue, err := left(ctx) if err != nil { return Null(), err } rightValue, err := right(ctx) if err != nil { return Null(), err } return Plus(leftValue, rightValue) } } func Plus(leftValue, rightValue Value) (Value, error) { switch rightValue.Type { case ValueNull: return Null(), NewArithmeticError("+", leftValue.Type.String(), rightValue.Type.String()) case ValueBool: return Null(), NewArithmeticError("+", leftValue.Type.String(), rightValue.Type.String()) case ValueString: return Null(), NewArithmeticError("+", leftValue.Type.String(), rightValue.Type.String()) case ValueInt64: return plusInt(leftValue, rightValue.Int64) case ValueUint64: return plusUint(leftValue, rightValue.Uint64) case ValueFloat64: return plusFloat(leftValue, rightValue.Float64) case ValueDatetime: return plusDatetime(leftValue, IntToDatetime(rightValue.Int64)) case ValueInterval: return plusInterval(leftValue, IntToInterval(rightValue.Int64)) default: return Null(), NewArithmeticError("+", leftValue.Type.String(), rightValue.Type.String()) } } func plusInt(left Value, right int64) (Value, error) { switch left.Type { case ValueNull: return Null(), NewArithmeticError("+", left.Type.String(), "int") case ValueBool: return Null(), NewArithmeticError("+", left.Type.String(), "int") case ValueString: return Null(), NewArithmeticError("+", left.Type.String(), "int") case ValueInt64: return IntToValue(left.Int64 + right), nil case ValueUint64: if right < 0 { u64 := uint64(-right) if left.Uint64 < u64 { return IntToValue(right + int64(left.Uint64)), nil } return UintToValue(left.Uint64 - u64), nil } return UintToValue(left.Uint64 + uint64(right)), nil case ValueFloat64: return FloatToValue(left.Float64 + float64(right)), nil default: return Null(), NewArithmeticError("+", left.Type.String(), "int") } } func plusUint(left Value, right uint64) (Value, error) { switch left.Type { case ValueNull: return Null(), NewArithmeticError("+", left.Type.String(), "uint") case ValueBool: return Null(), NewArithmeticError("+", left.Type.String(), "uint") case ValueString: return Null(), NewArithmeticError("+", left.Type.String(), "uint") case ValueInt64: if left.Int64 < 0 { u64 := uint64(-left.Int64) if u64 > right { return IntToValue(left.Int64 + int64(right)), nil } return UintToValue(right - u64), nil } return IntToValue(left.Int64 + int64(right)), nil case ValueUint64: return UintToValue(left.Uint64 + right), nil case ValueFloat64: return FloatToValue(left.Float64 + float64(right)), nil default: return Null(), NewArithmeticError("+", left.Type.String(), "uint") } } func plusFloat(left Value, right float64) (Value, error) { switch left.Type { case ValueNull: return Null(), NewArithmeticError("+", left.Type.String(), "float") case ValueBool: return Null(), NewArithmeticError("+", left.Type.String(), "float") case ValueString: return Null(), NewArithmeticError("+", left.Type.String(), "float") case ValueInt64: return FloatToValue(float64(left.Int64) + right), nil case ValueUint64: return FloatToValue(float64(left.Uint64) + right), nil case ValueFloat64: return FloatToValue(left.Float64 + float64(right)), nil default: return Null(), NewArithmeticError("+", left.Type.String(), "float") } } func plusDatetime(left Value, right time.Time) (Value, error) { if left.Type != ValueInterval { return Null(), NewArithmeticError("+", left.Type.String(), "datetime") } return DatetimeToValue(right.Add(IntToInterval(left.Int64))), nil } func plusInterval(left Value, right time.Duration) (Value, error) { if left.Type != ValueDatetime { return Null(), NewArithmeticError("+", left.Type.String(), "datetime") } t := IntToDatetime(left.Int64) return DatetimeToValue(t.Add(right)), nil }
vm/plus.go
0.759047
0.527438
plus.go
starcoder
package strings // Calculating edit distance between strings of length m and n is O(mn). // In our case we also don't want distance against the full query string, // we only want it against the "best" substring. // The function GetBestMatchPositions below finds the "best" candidate // substrings in time O(m+n). "Best" here just means that they have mostly // the same letters (above some threshold) as the reference string. import ( "math" gostrings "strings" ) const match_float_threshold = 0.67 var exclude_start_chars = make(map[byte]bool) // Given a reference string, match its character counts against sliding windows of the query // string. If the counts match above a certain threshold (0.67) then we'll (separately) do a // Levenshtein match against those windows of the query string. func GetBestMatchPositions(origReference, origQuery string) []int { reference := []byte(ConvertSpecialCharsToSpace(gostrings.ToLower(origReference))) query := []byte(ConvertSpecialCharsToSpace(gostrings.ToLower(origQuery))) intersectionCounts := getIntersections(reference, query) threshold := int(math.Round(match_float_threshold * float64(len(reference)))) return getLocalMaximaAboveThreshold(intersectionCounts, threshold) } func init() { exclude_start_chars[' '] = true } // We'll slide the reference string along the query string, getting intersection counts at each position // as we go. We only need to do the full calculation the first time; subsequent calculations only require // us to drop the old first char and add the new last char. func getIntersections(reference, query []byte) (intersectionTracker []int) { refCounts := getCounts(reference) lenRef := len(reference) // doing the 0th iteration of the loop outside b/c there's no dropChar/addChar-handling: startPos := 0 endPos := min2i(len(query), lenRef) windowCounts := getCounts(query[:endPos]) intersection := getIntersection(refCounts, windowCounts) lastIntersection := 0 intersectionTracker = appendCheckedCount(intersectionTracker, intersection, lastIntersection, query, startPos) for startPos, endPos = startPos+1, endPos+1; endPos <= len(query); startPos, endPos = startPos+1, endPos+1 { // sliding the window, so drop one char and add a new one: dropChar := char_map[query[startPos-1]] addChar := char_map[query[endPos-1]] // remove dropped char from intersection: if windowCounts[dropChar] -= 1; windowCounts[dropChar] < refCounts[dropChar] { intersection -= 1 } // add new char to intersection: if windowCounts[addChar] += 1; windowCounts[addChar] <= refCounts[addChar] { intersection += 1 } intersectionTracker = appendCheckedCount(intersectionTracker, intersection, lastIntersection, query, startPos) lastIntersection = intersection } return } func getLocalMaximaAboveThreshold(arr []int, threshold int) []int { positions := make([]int, 0) last := len(arr) - 1 for i := 0; i < len(arr); i++ { if (i == 0 || arr[i] >= arr[i-1]) && (i == last || arr[i] >= arr[i+1]) && (arr[i] >= threshold) { positions = append(positions, i) } } return positions } func getIntersection(refCounts, queryCounts []int) int { intersection := 0 for i := 0; i < len(refCounts); i++ { intersection += min2i(refCounts[i], queryCounts[i]) } return intersection } func appendCheckedCount(intersectionTracker []int, intersection, lastIntersection int, query []byte, startPos int) []int { if startPos < len(query) && !exclude_start_chars[query[startPos]] { return append(intersectionTracker, intersection) } else { return append(intersectionTracker, lastIntersection) } } func getCounts(s []byte) []int { counts := make([]int, alphabet_size) for _, ch := range s { counts[char_map[ch]] += 1 } return counts } func min2i(i, j int) int { if i < j { return i } return j } func min(x ...int) int { minVal := math.MaxInt64 for _, i := range x { if i < minVal { minVal = i } } return minVal } func max(x ...int) int { maxVal := math.MinInt64 for _, i := range x { if i > maxVal { maxVal = i } } return maxVal }
strings/best_match_positions.go
0.687735
0.676119
best_match_positions.go
starcoder
package mathutil import ( "sort" ) // SliceInt represets a slice of integers and provides functions on that slice. type SliceInt struct { Elements []int Stats SliceIntStats } // NewSliceInt creates and returns an empty SliceInt struct. func NewSliceInt() SliceInt { sint := SliceInt{Elements: []int{}} return sint } // Append adds an element to the integer slice. func (sint *SliceInt) Append(num int) { sint.Elements = append(sint.Elements, num) } // Len returns the number of items in the integer slice. func (sint *SliceInt) Len() int { return len(sint.Elements) } // Sort sorts the elements in the integer slice. func (sint *SliceInt) Sort() { sort.Ints(sint.Elements) } // Min returns the minimum element value in the integer slice. func (sint *SliceInt) Min() (int, error) { if len(sint.Elements) == 0 { return 0, ErrEmptyList } if !sort.IntsAreSorted(sint.Elements) { sort.Ints(sint.Elements) } return sint.Elements[0], nil } // Max returns the maximum element value in the integer slice. func (sint *SliceInt) Max() (int, error) { if len(sint.Elements) == 0 { return 0, ErrEmptyList } if !sort.IntsAreSorted(sint.Elements) { sort.Ints(sint.Elements) } return sint.Elements[len(sint.Elements)-1], nil } // Sum returns sum of all the elements in the integer slice. func (sint *SliceInt) Sum() (int, error) { if len(sint.Elements) == 0 { return 0, ErrEmptyList } sum := int(0) for _, num := range sint.Elements { sum += num } return sum, nil } // Average is an alias for Mean. func (sint *SliceInt) Average() (float64, error) { return sint.Mean() } // Mean returns the arithmetic mean of the integer slice. func (sint *SliceInt) Mean() (float64, error) { if len(sint.Elements) == 0 { return 0, ErrEmptyList } sum, err := sint.Sum() if err != nil { return 0, err } return float64(sum) / float64(len(sint.Elements)), nil } // Median returns the median or middle value of the sorted integer slice. func (sint *SliceInt) Median() (int, error) { if len(sint.Elements) == 0 { return 0, ErrEmptyList } if !sort.IntsAreSorted(sint.Elements) { sort.Ints(sint.Elements) } mid := int64(float64(len(sint.Elements)) / 2) return sint.Elements[mid], nil } // BuildStats builds a stats struct for current integer slice elements. func (sint *SliceInt) BuildStats() (SliceIntStats, error) { stats := NewSliceIntStats() stats.Len = sint.Len() max, err := sint.Max() if err != nil { return stats, err } stats.Max = max min, err := sint.Min() if err != nil { return stats, err } stats.Min = min mean, err := sint.Mean() if err != nil { return stats, err } stats.Mean = mean median, err := sint.Median() if err != nil { return stats, err } stats.Median = median sum, err := sint.Sum() if err != nil { return stats, err } stats.Sum = sum sint.Stats = stats return stats, nil } // SliceIntStats represents a set of statistics for a set of integers. type SliceIntStats struct { Len int Max int Mean float64 Median int Min int Sum int } // NewSliceIntStats returns a new initialized SliceIntStats struct. func NewSliceIntStats() SliceIntStats { stats := SliceIntStats{ Len: 0, Max: 0, Mean: 0, Median: 0, Min: 0, Sum: 0} return stats }
math/mathutil/sliceint.go
0.837753
0.522568
sliceint.go
starcoder
package trinary import ( "math" "strings" . "github.com/iotaledger/iota.go/consts" "github.com/pkg/errors" ) var ( // TryteToTritsLUT is a Look-up-table for Trytes to Trits conversion. TryteToTritsLUT = [][]int8{ {0, 0, 0}, {1, 0, 0}, {-1, 1, 0}, {0, 1, 0}, {1, 1, 0}, {-1, -1, 1}, {0, -1, 1}, {1, -1, 1}, {-1, 0, 1}, {0, 0, 1}, {1, 0, 1}, {-1, 1, 1}, {0, 1, 1}, {1, 1, 1}, {-1, -1, -1}, {0, -1, -1}, {1, -1, -1}, {-1, 0, -1}, {0, 0, -1}, {1, 0, -1}, {-1, 1, -1}, {0, 1, -1}, {1, 1, -1}, {-1, -1, 0}, {0, -1, 0}, {1, -1, 0}, {-1, 0, 0}, } // Pow27LUT is a Look-up-table for Decoding Trits to int64 Pow27LUT = []int64{1, 27, 729, 19683, 531441, 14348907, 387420489, 10460353203, 282429536481, 7625597484987, 205891132094649, 5559060566555523, 150094635296999136, 4052555153018976256} byteRadix = [5]int8{1, 3, 9, 27, 81} encodedZero = []int8{1, 0, 0, -1} ) // Trits is a slice of int8. You should not use cast, use NewTrits instead to ensure the validity. type Trits = []int8 // ValidTrit returns true if t is a valid trit. func ValidTrit(t int8) bool { if t == -1 || t == 0 || t == 1 { return true } return false } // ValidTrits returns true if t is valid trits. func ValidTrits(t Trits) error { for i, tt := range t { if valid := ValidTrit(tt); !valid { return errors.Wrapf(ErrInvalidTrit, "at index %d", i) } } return nil } // NewTrits casts Trits and checks its validity. func NewTrits(t []int8) (Trits, error) { err := ValidTrits(t) return t, err } // TritsEqual returns true if t and b are equal Trits. func TritsEqual(a Trits, b Trits) (bool, error) { if err := ValidTrits(a); err != nil { return false, err } if err := ValidTrits(b); err != nil { return false, err } if len(a) != len(b) { return false, nil } for i := range a { if a[i] != b[i] { return false, nil } } return true, nil } // MustAbsInt64 returns the absolute value of an int64. func MustAbsInt64(n int64) int64 { if n == -1<<63 { panic("value out of range") } y := n >> 63 // y ← x ⟫ 63 return (n ^ y) - y // (x ⨁ y) - y } func nearestGreaterMultipleOfThree(value uint64) uint64 { rem := value % uint64(Radix) if rem == 0 { return value } return value + uint64(Radix) - rem } // MinTrits returns the length of trits needed to encode the value. func MinTrits(value int64) uint64 { var num uint64 = 1 var vp uint64 = 1 valueAbs := uint64(MustAbsInt64(value)) for uint64(valueAbs) > vp { vp = vp*uint64(Radix) + 1 num++ } return num } // EncodedLength returns the length of trits needed to encode the value + encoding information. func EncodedLength(value int64) uint64 { if value == 0 { return uint64(len(encodedZero)) } length := nearestGreaterMultipleOfThree(MinTrits(value)) // trits length + encoding length return length + MinTrits((1<<(length/uint64(Radix)))-1) } // IntToTrytes converts int64 to a slice of trytes. func IntToTrytes(value int64, trytesCnt int) Trytes { remainder := value if value < 0 { remainder = -value } var t Trytes for tryte := 0; tryte < trytesCnt; tryte++ { idx := remainder % 27 remainder /= 27 if idx > 13 { remainder += 1 } if value < 0 && idx != 0 { idx = 27 - idx } t += string(TryteAlphabet[idx]) } return t } // TrytesToInt converts a slice of trytes to int64. func TrytesToInt(t Trytes) int64 { var val int64 for i := len(t) - 1; i >= 0; i-- { idx := strings.Index(TryteAlphabet, string(t[i])) if idx > 13 { idx = idx - 27 } val = val*27 + int64(idx) } return val } // IntToTrits converts int64 to a slice of trits. func IntToTrits(value int64) Trits { if value == 0 { return Trits{0} } negative := value < 0 size := MinTrits(value) valueAbs := MustAbsInt64(value) t := make(Trits, size) for i := 0; i < int(size); i++ { if valueAbs == 0 { break } trit := int8((valueAbs+1)%(TrinaryRadix) - 1) if negative { trit = -trit } t[i] = trit valueAbs++ valueAbs /= TrinaryRadix } return t } // TritsToInt converts a slice of trits into an integer and assumes little-endian notation. func TritsToInt(t Trits) int64 { var val int64 for i := len(t) - 1; i >= 0; i-- { val = val*3 + int64(t[i]) } return val } // EncodeInt64 encodes an int64 as a slice of trits with encoding information. func EncodeInt64(value int64) (t Trits, size uint64, err error) { size = EncodedLength(value) if value == 0 { return encodedZero, size, nil } var encoding int64 = 0 index := 0 length := nearestGreaterMultipleOfThree(MinTrits(MustAbsInt64(value))) t = make(Trits, size) copy(t, IntToTrits(value)) for i := 0; i < int(length)-TrinaryRadix; i += TrinaryRadix { if TritsToInt(t[i:i+TrinaryRadix]) >= 0 { encoding |= 1 << uint(index) for j := 0; j < TrinaryRadix; j++ { t[i+j] = -t[i+j] } } index++ } if TritsToInt(t[length-TrinaryRadix:length]) <= 0 { encoding |= 1 << uint(index) for i := 1; i < TrinaryRadix+1; i++ { t[int(length)-i] = -t[int(length)-i] } } copy(t[length:], IntToTrits(encoding)) return t, size, nil } // DecodeInt64 decodes a slice of trits with encoding information as an int64. func DecodeInt64(t Trits) (value int64, size uint64, err error) { numTrits := uint64(len(t)) equal, err := TritsEqual(t[0:4], encodedZero) if err != nil { return 0, 0, err } if equal { return 0, EncodedLength(0), nil } value = 0 var encodingStart uint64 = 0 for (encodingStart < numTrits) && (TritsToInt(t[encodingStart:encodingStart+TrinaryRadix]) <= 0) { encodingStart += TrinaryRadix } if encodingStart >= numTrits { return 0, 0, errors.New("encodingStart > numTrits") } encodingStart += TrinaryRadix encodingLength := MinTrits((1 << (encodingStart / TrinaryRadix)) - 1) encoding := TritsToInt(t[encodingStart : encodingStart+encodingLength]) // Bound checking for the lookup table if encodingStart/TrinaryRadix > 13 { return 0, 0, errors.New("encodingStart/TrinaryRadix > 13") } for i := 0; i < int(encodingStart/TrinaryRadix); i++ { tryteValue := TritsToInt(t[i*TrinaryRadix : (i*TrinaryRadix)+TrinaryRadix]) if ((encoding >> uint(i)) & 1) == 1 { tryteValue = -tryteValue } value += Pow27LUT[i] * tryteValue } return value, encodingStart + encodingLength, nil } // CanTritsToTrytes returns true if t can be converted to trytes. func CanTritsToTrytes(trits Trits) bool { if len(trits) == 0 { return false } return len(trits)%3 == 0 } // TrailingZeros returns the number of trailing zeros of the given trits. func TrailingZeros(trits Trits) int64 { z := int64(0) for i := len(trits) - 1; i >= 0 && trits[i] == 0; i-- { z++ } return z } // TritsToTrytes converts a slice of trits into trytes. Returns an error if len(t)%3!=0 func TritsToTrytes(trits Trits) (Trytes, error) { if !CanTritsToTrytes(trits) { return "", errors.Wrap(ErrInvalidTritsLength, "trits slice size must be a multiple of 3") } o := make([]byte, len(trits)/3) for i := 0; i < len(trits)/3; i++ { j := trits[i*3] + trits[i*3+1]*3 + trits[i*3+2]*9 if j < 0 { j += int8(len(TryteAlphabet)) } o[i] = TryteAlphabet[j] } return Trytes(o), nil } // MustTritsToTrytes converts a slice of trits into trytes. Panics if len(t)%3!=0 func MustTritsToTrytes(trits Trits) Trytes { trytes, err := TritsToTrytes(trits) if err != nil { panic(err) } return trytes } // CanBeHash returns the validity of the trit length. func CanBeHash(trits Trits) bool { return len(trits) == HashTrinarySize } // TrytesToBytes is only defined for hashes (81 Trytes). It returns 48 bytes. func TrytesToBytes(trytes Trytes) ([]byte, error) { trits, err := TrytesToTrits(trytes) if err != nil { return nil, err } return TritsToBytes(trits), nil } // MustTrytesToBytes is only defined for hashes (81 Trytes). It returns 48 bytes. func MustTrytesToBytes(trytes Trytes) []byte { bytes, err := TrytesToBytes(trytes) if err != nil { panic(err) } return bytes } // BytesToTrytes converts bytes to Trytes. Returns an error if the bytes slice is not 48 in length. func BytesToTrytes(bytes []byte, numTrytes ...int) (Trytes, error) { numTrits := []int{} if len(numTrytes) > 0 { numTrits = append(numTrits, numTrytes[0]*3) } trits, err := BytesToTrits(bytes, numTrits...) if err != nil { return "", err } trits = PadTrits(trits, int(nearestGreaterMultipleOfThree(uint64(len(trits))))) return TritsToTrytes(trits) } // MustBytesToTrytes converts bytes to Trytes. func MustBytesToTrytes(bytes []byte, numTrytes ...int) Trytes { trytes, err := BytesToTrytes(bytes, numTrytes...) if err != nil { panic(err) } return trytes } // TritsToBytes packs an array of trits into an array of bytes (5 packed trits in 1 byte) func TritsToBytes(trits Trits) (bytes []byte) { tritsLength := len(trits) bytesLength := (tritsLength + NumberOfTritsInAByte - 1) / NumberOfTritsInAByte bytes = make([]byte, bytesLength) tritIdx := bytesLength * NumberOfTritsInAByte for byteNum := bytesLength - 1; byteNum >= 0; byteNum-- { var value int8 = 0 for i := 0; i < NumberOfTritsInAByte; i++ { tritIdx-- if tritIdx < tritsLength { value = value*Radix + trits[tritIdx] } } bytes[byteNum] = byte(value) } return bytes } // BytesToTrits unpacks an array of bytes into an array of trits func BytesToTrits(bytes []byte, numTrits ...int) (trits Trits, err error) { bytesLength := len(bytes) tritsLength := bytesLength * NumberOfTritsInAByte if len(numTrits) > 0 { tritsLength = numTrits[0] minTritLength := (bytesLength-1)*NumberOfTritsInAByte + 1 maxTritLength := bytesLength * NumberOfTritsInAByte if tritsLength < minTritLength || tritsLength > maxTritLength { return nil, errors.Wrapf(ErrInvalidTritsLength, "must be %d-%d in size", minTritLength, maxTritLength) } } trits = make(Trits, tritsLength) for byteNum := 0; byteNum < bytesLength; byteNum++ { value := int8(bytes[byteNum]) tritOffset := byteNum * NumberOfTritsInAByte for tritNum := NumberOfTritsInAByte - 1; tritNum >= 0; tritNum-- { var trit int8 = 0 tritIdx := tritOffset + tritNum if tritIdx < tritsLength { byteRadixHalf := byteRadix[tritNum] >> 1 if value > byteRadixHalf { value -= byteRadix[tritNum] trit = 1 } else if value < (-byteRadixHalf) { value += byteRadix[tritNum] trit = -1 } trits[tritIdx] = trit } } } return trits, nil } // ReverseTrits reverses the given trits. func ReverseTrits(trits Trits) Trits { for left, right := 0, len(trits)-1; left < right; left, right = left+1, right-1 { trits[left], trits[right] = trits[right], trits[left] } return trits } // Trytes is a string of trytes. Use NewTrytes() instead of typecasting. type Trytes = string // Hash represents a trinary hash type Hash = Trytes // Hashes is a slice of Hash. type Hashes = []Hash // ValidTrytes returns true if t is made of valid trytes. func ValidTrytes(trytes Trytes) error { if trytes == "" { return ErrInvalidTrytes } for _, runeVal := range trytes { if (runeVal < 'A' || runeVal > 'Z') && runeVal != '9' { return ErrInvalidTrytes } } return nil } // ValidTryte returns the validity of a tryte (must be rune A-Z or 9) func ValidTryte(t rune) error { return ValidTrytes(string(t)) } // NewTrytes casts to Trytes and checks its validity. func NewTrytes(s string) (Trytes, error) { err := ValidTrytes(s) return s, err } // TrytesToTrits converts a slice of trytes into trits. func TrytesToTrits(trytes Trytes) (Trits, error) { if err := ValidTrytes(trytes); err != nil { return nil, err } trits := make(Trits, len(trytes)*3) for i := range trytes { idx := strings.Index(TryteAlphabet, string(trytes[i:i+1])) copy(trits[i*3:i*3+3], TryteToTritsLUT[idx]) } return trits, nil } // MustTrytesToTrits converts a slice of trytes into trits. func MustTrytesToTrits(trytes Trytes) Trits { trits, err := TrytesToTrits(trytes) if err != nil { panic(err) } return trits } // Pad pads the given trytes with 9s up to the given size. func Pad(trytes Trytes, size int) Trytes { if len(trytes) >= size { return trytes } out := make([]byte, size) copy(out, []byte(trytes)) for i := len(trytes); i < size; i++ { out[i] = '9' } return Trytes(out) } // PadTrits pads the given trits with 0 up to the given size. func PadTrits(trits Trits, size int) Trits { if len(trits) >= size { return trits } sized := make(Trits, size) for i := 0; i < size; i++ { if len(trits) > i { sized[i] = trits[i] continue } sized[i] = 0 } return sized } // Sum returns the sum of two trits. func Sum(a int8, b int8) int8 { s := a + b switch s { case 2: return -1 case -2: return 1 default: return s } } func cons(a int8, b int8) int8 { if a == b { return a } return 0 } func any(a int8, b int8) int8 { s := a + b if s > 0 { return 1 } if s < 0 { return -1 } return 0 } func fullAdd(a int8, b int8, c int8) [2]int8 { sA := Sum(a, b) cA := cons(a, b) cB := cons(sA, c) cOut := any(cA, cB) sOut := Sum(sA, c) return [2]int8{sOut, cOut} } // AddTrits adds a to b. func AddTrits(a Trits, b Trits) Trits { maxLen := int64(math.Max(float64(len(a)), float64(len(b)))) if maxLen == 0 { return Trits{0} } out := make(Trits, maxLen) var aI, bI, carry int8 for i := 0; i < len(out); i++ { if i < len(a) { aI = a[i] } else { aI = 0 } if i < len(b) { bI = b[i] } else { bI = 0 } fA := fullAdd(aI, bI, carry) out[i] = fA[0] carry = fA[1] } return out }
trinary/trinary.go
0.648244
0.434461
trinary.go
starcoder
package levenshtein import ( "fmt" ) // stateLimit is the maximum number of states allowed. var stateLimit = 10000 // StateLimit is the maximum number of states allowed. func StateLimit() int { return stateLimit } // SetStateLimit sets the maximum number of states allowed. func SetStateLimit(v int) { stateLimit = v } // errTooManyStates is returned if you attempt to build a Levenshtein // automaton which requires too many states. func errTooManyStates() error { return fmt.Errorf("dfa contains more than %d states", StateLimit()) } // Levenshtein implements the vellum.Automaton interface for matching // terms within the specified Levenshtein edit-distance of the queried // term. This automaton recognizes utf-8 encoded bytes and computes // the edit distance on the result code-points, not on the raw bytes. type Levenshtein struct { prog *dynamicLevenshtein dfa *dfa } // New creates a new Levenshtein automaton for the specified // query string and edit distance. func New(query string, distance int) (*Levenshtein, error) { lev := &dynamicLevenshtein{ query: query, distance: uint(distance), } dfabuilder := newDfaBuilder(lev) dfa, err := dfabuilder.build() if err != nil { return nil, err } return &Levenshtein{ prog: lev, dfa: dfa, }, nil } // Start returns the start state of this automaton. func (l *Levenshtein) Start() int { return 1 } // IsMatch returns if the specified state is a matching state. func (l *Levenshtein) IsMatch(s int) bool { if s < len(l.dfa.states) { return l.dfa.states[s].match } return false } // CanMatch returns if the specified state can ever transition to a matching // state. func (l *Levenshtein) CanMatch(s int) bool { if s < len(l.dfa.states) && s > 0 { return true } return false } // WillAlwaysMatch returns if the specified state will always end in a // matching state. func (l *Levenshtein) WillAlwaysMatch(s int) bool { return false } // Accept returns the new state, resulting from the transite byte b // when currently in the state s. func (l *Levenshtein) Accept(s int, b byte) int { if s < len(l.dfa.states) { return l.dfa.states[s].next[b] } return 0 }
levenshtein/levenshtein.go
0.753557
0.447521
levenshtein.go
starcoder
package lamda // MapPredicate function type MapPredicate func(interface{}) interface{} // MapStringPredicate function type MapStringPredicate func(string) string // MapBytePredicate function type MapBytePredicate func(byte) byte // MapIntPredicate function type MapIntPredicate func(int) int // MapInt16Predicate function type MapInt16Predicate func(int16) int16 // MapInt32Predicate function type MapInt32Predicate func(int32) int32 // MapInt64Predicate function type MapInt64Predicate func(int64) int64 // MapFloat32Predicate function type MapFloat32Predicate func(float32) float32 // MapFloat64Predicate function type MapFloat64Predicate func(float64) float64 // MapBoolPredicate function type MapBoolPredicate func(bool) bool // Map creates a new array populated with the results of calling // a provided function on every element in the calling array func Map(array []interface{}, pred MapPredicate) []interface{} { result := make([]interface{}, 0) for _, item := range array { result = append(result, pred(item)) } return result } // MapString creates a new array populated with the results of calling // a provided function on every element in the calling array func MapString(array []string, pred MapStringPredicate) []string { result := make([]string, 0) for _, item := range array { result = append(result, pred(item)) } return result } // MapByte creates a new array populated with the results of calling // a provided function on every element in the calling array func MapByte(array []byte, pred MapBytePredicate) []byte { result := make([]byte, 0) for _, item := range array { result = append(result, pred(item)) } return result } // MapInt creates a new array populated with the results of calling // a provided function on every element in the calling array func MapInt(array []int, pred MapIntPredicate) []int { result := make([]int, 0) for _, item := range array { result = append(result, pred(item)) } return result } // MapInt16 creates a new array populated with the results of calling // a provided function on every element in the calling array func MapInt16(array []int16, pred MapInt16Predicate) []int16 { result := make([]int16, 0) for _, item := range array { result = append(result, pred(item)) } return result } // MapInt32 creates a new array populated with the results of calling // a provided function on every element in the calling array func MapInt32(array []int32, pred MapInt32Predicate) []int32 { result := make([]int32, 0) for _, item := range array { result = append(result, pred(item)) } return result } // MapInt64 creates a new array populated with the results of calling // a provided function on every element in the calling array func MapInt64(array []int64, pred MapInt64Predicate) []int64 { result := make([]int64, 0) for _, item := range array { result = append(result, pred(item)) } return result } // MapFloat32 creates a new array populated with the results of calling // a provided function on every element in the calling array func MapFloat32(array []float32, pred MapFloat32Predicate) []float32 { result := make([]float32, 0) for _, item := range array { result = append(result, pred(item)) } return result } // MapFloat64 creates a new array populated with the results of calling // a provided function on every element in the calling array func MapFloat64(array []float64, pred MapFloat64Predicate) []float64 { result := make([]float64, 0) for _, item := range array { result = append(result, pred(item)) } return result } // MapBool creates a new array populated with the results of calling // a provided function on every element in the calling array func MapBool(array []bool, pred MapBoolPredicate) []bool { result := make([]bool, 0) for _, item := range array { result = append(result, pred(item)) } return result }
lamda/map.go
0.736116
0.405419
map.go
starcoder
package mpl3115a2 import ( "encoding/binary" "errors" "time" i2c "github.com/d2r2/go-i2c" ) // Register map const ( // Alias for DR_STATUS or F_STATUS STATUS = 0x00 // 20-bit realtime pressure sample OUT_PRES_MSB_CSB_LSB = 0x01 OUT_PRES_BYTES = 3 // 12-bit realtime temperature sample OUT_TEMP_MSB_LSB = 0x04 OUT_TEMP_BYTES = 2 // Data ready status information DR_STATUS = 0x06 // 20-bit pressure change data OUT_PRES_DELTA_MSB_CSB_LSB = 0x07 OUT_PRES_DELTA_BYTES = 3 // 12-bit temperature change data OUT_TEMP_DELTA_MSB_LSB = 0x0A OUT_TEMP_DELTA_BYTES = 2 // Fixed device ID number WHO_AM_I = 0x0C // FIFO status: no FIFO event detected F_STATUS = 0x0D // FIFO 8-bit data access F_DATA = 0x0E // FIFO setup F_SETUP = 0x0F // Time since FIFO overflow TIME_DLY = 0x10 // Current system mode SYSMOD = 0x11 // Interrupt status INT_SOURCE = 0x12 // Data event flag configuration PT_DATA_CFG = 0x13 // Barometric input for altitude calculation BAR_IN_MSB_LSB = 0x14 BAR_IN_BYTES = 2 // Pressure/altitude target PRES_TGT_MSB_LSB = 0x16 PRES_TGT_BYTES = 2 // Temperature target value T_TGT = 0x18 // Pressure/altitude window PRES_WND_MSB_LSB = 0x19 PRES_WND_BYTES = 2 // Temperature window TEMP_WND = 0x1B // Minimum pressure/altitude PRES_MIN_MSB_CSB_LSB = 0x1C PRES_MIN_BYTES = 3 // Minimum temperature TEMP_MIN_MSB_LSB = 0x1E TEMP_MIN_BYTES = 2 // Maximum pressure/altitude PRES_MAX_MSB_CSB_LSB = 0x21 PRES_MAX_BYTES = 3 // Maximum temperature TEMP_MAX_MSB_LSB = 0x24 TEMP_MAX_BYTES = 2 // Control register: Modes, oversampling CTRL_REG1 = 0x26 // Control register: Acquisition time step CTRL_REG2 = 0x27 // Control register: Interrupt pin configuration CTRL_REG3 = 0x28 // Control register: Interrupt enables CTRL_REG4 = 0x29 // Control register: Interrupt output pin assignment CTRL_REG5 = 0x2A // Pressure data offset OFF_PRES = 0x2B // Temperature data offset OFF_TEMP = 0x2C // Altitude data offset OFF_H = 0x2D ) // Flag can keep any sensor register specific bit flags. type Flag byte const ( // DR_STATUS flag: Pressure/altitude or temperature data ready. PRES_TEMP_DATA_READY Flag = 0x8 // DR_STATUS flag: Pressure/altitude new data available. PRES_DATA_READY Flag = 0x4 // DR_STATUS flag: Temperature new data available. TEMP_DATA_READY Flag = 0x2 ) // PressureType signify which type of // pressure measurement in use. type PressureType int const ( // Measure pressure in Pa Barometer PressureType = iota + 1 // Measure altitude in m Altimeter ) // RawPressure keeps raw pressure data received from sensor. type RawPressure struct { PRES_MSB byte PRES_CSB byte PRES_LSB byte } // ConvertToSignedQ16Dot4 convert raw data to signed Q16.4, // where integer and fraction parts returned in separate fields. // Used for altimeter mode. func (v *RawPressure) ConvertToSignedQ16Dot4() (int16, uint8) { presFrac := (v.PRES_LSB & 0xF0) >> 4 presInt := int16((uint16(v.PRES_MSB) << 8) | uint16(v.PRES_CSB)) return presInt, presFrac } // ConvertToUnsignedQ18Dot2 convert raw data to unsigned Q18.2, // where integer and fraction parts returned in separate fields. // Used for barometer mode. func (v *RawPressure) ConvertToUnsignedQ18Dot2() (uint32, uint8) { presFrac := (v.PRES_LSB & 0xF0) >> 4 presInt := (uint32(v.PRES_MSB) << 10) | (uint32(v.PRES_CSB) << 2) | uint32(presFrac>>2) presFrac &= 0x3 return presInt, presFrac } // RawTemperature keeps raw temperature data received from sensor. type RawTemperature struct { TEMP_MSB byte TEMP_LSB byte } // ConvertToSignedQ8Dot4 convert raw data to signed Q8.4, // where integer and fraction parts returned in separate fields. func (v *RawTemperature) ConvertToSignedQ8Dot4() (int8, uint8) { tempFrac := (v.TEMP_LSB & 0xF0) >> 4 tempInt := int8(v.TEMP_MSB) return tempInt, tempFrac } // MPL3115A2 keeps sensor itself. type MPL3115A2 struct { } // NewMPL3115A2 return new sensor instance. func NewMPL3115A2() *MPL3115A2 { v := &MPL3115A2{} return v } // Oversample ratio should in range [0..7], where final value equal to 2^osr. func (v *MPL3115A2) encodeCtrlOverSampleRatio(oversample int) (byte, error) { if oversample < 0 || oversample > 7 { return 0, errors.New("oversample ratio should be in range [0..7]") } b := byte(oversample) << 3 return b, nil } // Define measure for altimeter/barometer mode. // Altimeter mode return "pressure" value in meters, // barometer mode in Pascals. func (v *MPL3115A2) encodeCtrlAltimeterMode(altimeterMode bool) (byte, error) { if altimeterMode { return 0x80, nil } return 0, nil } // Activate/deactivate reset bit. func (v *MPL3115A2) encodeCtrlResetBit(activateReset bool) (byte, error) { if activateReset { return 0x4, nil } return 0, nil } // Put sensor in ACTIVE/STANDBY mode. func (v *MPL3115A2) encodeCtrlActiveStatus(activateSensor bool) (byte, error) { if activateSensor { return 0x1, nil } return 0, nil } // Read STATUS register. func (v *MPL3115A2) readStatusReg(i2c *i2c.I2C) (Flag, error) { status, err := i2c.ReadRegU8(STATUS) if err != nil { return 0, err } return Flag(status), nil } // Write CTRL_REG1 register. func (v *MPL3115A2) writeCtrlReg1(i2c *i2c.I2C, value byte) error { err := i2c.WriteRegU8(CTRL_REG1, value) if err != nil { return err } return nil } // Write PT_DATE_CFG register to define events. func (v *MPL3115A2) writeEventMode(i2c *i2c.I2C, temperatureEvent, preasureEvent bool) error { var flags byte if temperatureEvent { flags |= 0x1 } if preasureEvent { flags |= 0x2 } if temperatureEvent || preasureEvent { flags |= 0x4 } err := i2c.WriteRegU8(PT_DATA_CFG, flags) if err != nil { return err } return nil } // MeasureAltitude measure altitude in meters with specific // precision defined by oversample ratio. func (v *MPL3115A2) MeasureAltitude(i2c *i2c.I2C, oversampleRatio int) (float32, float32, error) { up, ut, err := v.measureRaw(i2c, oversampleRatio, Altimeter) if err != nil { return 0, 0, err } presInt, presFrac := up.ConvertToSignedQ16Dot4() tempInt, tempFrac := ut.ConvertToSignedQ8Dot4() alt := float32(presInt) + float32(presFrac)/(1<<4) t := float32(tempInt) + float32(tempFrac)/(1<<4) return alt, t, nil } // MeasurePressure measure pressure in Pa with specific // precision defined by oversample ratio. func (v *MPL3115A2) MeasurePressure(i2c *i2c.I2C, oversampleRation int) (float32, float32, error) { up, ut, err := v.measureRaw(i2c, oversampleRation, Barometer) if err != nil { return 0, 0, err } presInt, presFrac := up.ConvertToUnsignedQ18Dot2() tempInt, tempFrac := ut.ConvertToSignedQ8Dot4() pres := float32(presInt) + float32(presFrac)/(1<<2) t := float32(tempInt) + float32(tempFrac)/(1<<4) return pres, t, nil } // Initialize sensor and made raw measurement // to read uncompensated pressure and temperature. func (v *MPL3115A2) measureRaw(i2c *i2c.I2C, overampleRatio int, pressureType PressureType) (*RawPressure, *RawTemperature, error) { lg.Debug("Measurement pressure and temperature...") // enable Altimeter mode var barometerType bool if pressureType == Altimeter { barometerType = true } flags, err := v.encodeCtrlAltimeterMode(barometerType) if err != nil { return nil, nil, err } // define Oversample Ratio to 2^oversampleRatio b, err := v.encodeCtrlOverSampleRatio(overampleRatio) if err != nil { return nil, nil, err } flags |= b // activate Altimeter mode and set Oversample Ratio err = v.writeCtrlReg1(i2c, flags) if err != nil { return nil, nil, err } // enable events for temperature and pressure err = v.writeEventMode(i2c, true, true) if err != nil { return nil, nil, err } // get activate sensor bit b, err = v.encodeCtrlActiveStatus(true) if err != nil { return nil, nil, err } flags |= b // activate sensor err = v.writeCtrlReg1(i2c, flags) if err != nil { return nil, nil, err } // read status until measurement is done for { var n time.Duration = 1 // n = 1 << overampleRatio time.Sleep(time.Millisecond * 2 * n) status, err := v.readStatusReg(i2c) if err != nil { return nil, nil, err } if status&PRES_TEMP_DATA_READY != 0 { break } } up, ut, err := v.readRawPressureTemperature(i2c) if err != nil { return nil, nil, err } return up, ut, nil } // Read uncompensated temperature and pressure sensor measurement. func (v *MPL3115A2) readRawPressureTemperature(i2c *i2c.I2C) (*RawPressure, *RawTemperature, error) { _, err := i2c.WriteBytes([]byte{STATUS}) if err != nil { return nil, nil, err } var data struct { STATUS byte RawPressure RawTemperature } err = readDataToStruct(i2c, 1+OUT_PRES_BYTES+OUT_TEMP_BYTES, binary.LittleEndian, &data) if err != nil { return nil, nil, err } // lg.Debugf("Data = %+v", data) return &data.RawPressure, &data.RawTemperature, nil } // ModifySeaLevelPressure call allow to change default sea level value 101326 Pa to custom one. func (v *MPL3115A2) ModifySeaLevelPressure(i2c *i2c.I2C, pressureAtSeeLevel uint32) error { // divide by 2 pressureAtSeeLevel = pressureAtSeeLevel / 2 b := []byte{BAR_IN_MSB_LSB, byte(pressureAtSeeLevel >> 8), byte(pressureAtSeeLevel & 0xFF)} _, err := i2c.WriteBytes(b) if err != nil { return err } return nil } // GetDefaultSeaLevelPressure return average barometric pressure // on the sea level defined as 101325 Pa. func (v *MPL3115A2) GetDefaultSeaLevelPressure() uint32 { return 101326 } // Reset reboot sensor and initialize some sensor registers. func (v *MPL3115A2) Reset(i2c *i2c.I2C) error { lg.Debug("Reset sensor...") flags, err := v.encodeCtrlResetBit(true) if err != nil { return err } // activate reset bit err = v.writeCtrlReg1(i2c, flags) // ignore error, since sensor terminates i2c-connection return nil } // CompensateAltitude shift altitude from -128 to +127 meters. // Default value is 0. Can be used for sensor calibration. func (v *MPL3115A2) CompensateAltitude(i2c *i2c.I2C, shiftM int8) error { b := []byte{OFF_H, byte(shiftM)} _, err := i2c.WriteBytes(b) if err != nil { return err } return nil } // CompensatePressure shift pressure from -512 to +508 Pascal. // Default value is 0. Can be user for sensor calibration. func (v *MPL3115A2) CompensatePressure(i2c *i2c.I2C, shiftPa int16) error { if shiftPa > 508 || shiftPa < -512 { return errors.New("pressure compensation exceed range [-512..+508]") } // divide by 4 shiftPa = shiftPa / 4 b := []byte{OFF_PRES, byte(shiftPa)} _, err := i2c.WriteBytes(b) if err != nil { return err } return nil } // CompensateTemperature shift temperature from -8 to +7.9375 *C. // Default value is 0. Can be used for sensor calibration. func (v *MPL3115A2) CompensateTemperature(i2c *i2c.I2C, shiftTemp float32) error { if shiftTemp > 7.9375 || shiftTemp < -8 { return errors.New("temperature compensation exceed range [-8..+7.9375]") } // multiply by 16 shiftTemp = shiftTemp * 16 b := []byte{OFF_PRES, byte(shiftTemp)} _, err := i2c.WriteBytes(b) if err != nil { return err } return nil }
mpl3115a2.go
0.525856
0.408631
mpl3115a2.go
starcoder
package main import ( "math" "github.com/unixpickle/model3d/model3d" ) const ( StarThickness = 1.0 StarPointRadius = 2.0 StarRingRadius = 0.2 StarHolderRadius = 0.4 StarHolderLength = 2.0 StarHolderThickness = 0.05 StarHolderOffset = 0.4 StarNumPoints = 6 ) func CreateStarSolid() model3d.Solid { baseMesh := CreateStarMesh() return model3d.JoinedSolid{ model3d.NewColliderSolid(model3d.MeshToCollider(baseMesh)), CreateHolder(model3d.X(StarPointRadius - StarHolderOffset)), &model3d.Torus{ Axis: model3d.Z(1), Center: model3d.X(-StarPointRadius), InnerRadius: 0.05, OuterRadius: StarRingRadius, }, } } func CreateStarMesh() *model3d.Mesh { midPoint := model3d.Z(StarThickness / 2) mesh := model3d.NewMesh() for i := 0; i < StarNumPoints*2; i += 2 { theta0 := float64(i-1) / float64(StarNumPoints*2) * math.Pi * 2 theta1 := float64(i) / float64(StarNumPoints*2) * math.Pi * 2 theta2 := float64(i+1) / float64(StarNumPoints*2) * math.Pi * 2 p1 := model3d.XY(math.Cos(theta0), math.Sin(theta0)) p2 := model3d.XY(math.Cos(theta1), math.Sin(theta1)).Scale(StarPointRadius) p3 := model3d.XY(math.Cos(theta2), math.Sin(theta2)) mesh.Add(&model3d.Triangle{p2, p1, midPoint}) mesh.Add(&model3d.Triangle{p2, p3, midPoint}) } mesh.AddMesh(mesh.MapCoords(model3d.XYZ(1, 1, -1).Mul)) // We created the mesh in a lazy way, so we must // fix holes and normals. mesh = mesh.Repair(1e-5) mesh, _ = mesh.RepairNormals(1e-5) return mesh } func CreateHolder(tip model3d.Coord3D) model3d.Solid { conePoint := func(t, theta float64) model3d.Coord3D { r := StarHolderRadius * math.Sqrt(t) x := t*StarHolderLength + tip.X return model3d.XYZ(x, math.Cos(theta)*r, math.Sin(theta)*r) } surfaceMesh := model3d.NewMesh() dTheta := math.Pi * 2 / 100.0 dT := 1.0 / 100.0 for t := 0.0; t < 1.0; t += dT { for theta := 0.0; theta < math.Pi*2; theta += dTheta { p1 := conePoint(t, theta) p2 := conePoint(t, theta+dTheta) p3 := conePoint(t+dT, theta+dTheta) p4 := conePoint(t+dT, theta) surfaceMesh.AddQuad(p1, p2, p3, p4) } } return model3d.NewColliderSolidHollow(model3d.MeshToCollider(surfaceMesh), StarHolderThickness) }
examples/decoration/tree_ornament/star.go
0.750187
0.430207
star.go
starcoder
package git import ( "errors" "strconv" "strings" "github.com/github/git-sizer/counts" ) // Tree represents a Git tree object. type Tree struct { data string } // ParseTree parses the tree object whose contents are contained in // `data`. `oid` is currently unused. func ParseTree(oid OID, data []byte) (*Tree, error) { return &Tree{string(data)}, nil } // Size returns the size of the tree object. func (tree Tree) Size() counts.Count32 { return counts.NewCount32(uint64(len(tree.data))) } // TreeEntry represents an entry in a Git tree object. Note that Name // shares memory with the tree data that were originally read; i.e., // retaining a pointer to Name keeps the tree data reachable. type TreeEntry struct { Name string OID OID Filemode uint } // TreeIter is an iterator over the entries in a Git tree object. type TreeIter struct { // The as-yet-unread part of the tree's data. data string } // Iter returns an iterator over the entries in `tree`. func (tree *Tree) Iter() *TreeIter { return &TreeIter{ data: tree.data, } } // NextEntry returns either the next entry in a Git tree, or a `false` // boolean value if there are no more entries. func (iter *TreeIter) NextEntry() (TreeEntry, bool, error) { var entry TreeEntry if len(iter.data) == 0 { return TreeEntry{}, false, nil } spAt := strings.IndexByte(iter.data, ' ') if spAt < 0 { return TreeEntry{}, false, errors.New("failed to find SP after mode") } mode, err := strconv.ParseUint(iter.data[:spAt], 8, 32) if err != nil { return TreeEntry{}, false, err } entry.Filemode = uint(mode) iter.data = iter.data[spAt+1:] nulAt := strings.IndexByte(iter.data, 0) if nulAt < 0 { return TreeEntry{}, false, errors.New("failed to find NUL after filename") } entry.Name = iter.data[:nulAt] iter.data = iter.data[nulAt+1:] if len(iter.data) < 20 { return TreeEntry{}, false, errors.New("tree entry ends unexpectedly") } copy(entry.OID.v[0:20], iter.data[0:20]) iter.data = iter.data[20:] return entry, true, nil }
git/tree.go
0.84075
0.458167
tree.go
starcoder
package cartesiantree import ( "fmt" "math/rand" ) // CTree ... type CTree struct { x uint32 y float32 left *CTree right *CTree init bool } // NewCartesianTree ... func NewCartesianTree() *CTree { t := &CTree{} t.init = false return t } func newCartesianTree(x uint32, y float32, left *CTree, right *CTree) *CTree { t := &CTree{} t.x = x t.y = y t.left = nil t.right = nil if left != nil && left.init { t.left = left } if right != nil && right.init { t.right = right } t.init = true return t } func (t CTree) merge(l *CTree, r *CTree) *CTree { if l == nil { return r } if r == nil { return l } if r.y > l.y { newTree := t.merge(l, r.left) return newCartesianTree(r.x, r.y, newTree, r.right) } newTree := t.merge(l.right, r) return newCartesianTree(l.x, l.y, l.left, newTree) } func (t *CTree) split(x uint32, l *CTree, r *CTree) { var newTree CTree if t.x <= x { if t.right == nil { r = nil } else { t.right.split(x, &newTree, r) } *l = *newCartesianTree(t.x, t.y, t.left, &newTree) } else { if t.left == nil { l = nil } else { t.left.split(x, l, &newTree) } *r = *newCartesianTree(t.x, t.y, &newTree, t.right) } } // Add ... func (t *CTree) Add(x uint32) { if !t.init { t.init = true t.x = x t.y = rand.Float32() return } var l, r CTree t.split(x, &l, &r) m := newCartesianTree(x, rand.Float32(), nil, nil) ttmp := t.merge(&l, m) tmp := t.merge(ttmp, &r) t.x = tmp.x t.y = tmp.y t.left = tmp.left t.right = tmp.right } // Remove ... func (t *CTree) Remove(x uint32) { var l, m, r CTree t.split(x-1, &l, &r) r.split(x, &m, &r) tmp := t.merge(&l, &r) t.x = tmp.x t.y = tmp.y t.left = tmp.left t.right = tmp.right } // GetSortedArray ... func (t *CTree) GetSortedArray(res *[]uint32) { if t == nil { return } t.left.GetSortedArray(res) *res = append(*res, t.x) t.right.GetSortedArray(res) } // just for fun... func (t *CTree) print() { if t == nil { return } t.left.print() fmt.Printf("%d:%f\n", t.x, t.y) t.right.print() }
cartesiantree/cartesiantree.go
0.586286
0.523116
cartesiantree.go
starcoder
package internal import ( "fmt" "math" ) type grotskyNumber float64 func applyOpToNums(op func(x, y float64) interface{}, arguments ...interface{}) (interface{}, error) { x := arguments[0].(grotskyNumber) y, ok := arguments[1].(grotskyNumber) if !ok { return nil, errExpectedNumber } return op(float64(x), float64(y)), nil } var numberBinaryOperations = map[operator]func(x, y float64) interface{}{ opAdd: func(x, y float64) interface{} { return grotskyNumber(x + y) }, opSub: func(x, y float64) interface{} { return grotskyNumber(x - y) }, opDiv: func(x, y float64) interface{} { return grotskyNumber(x / y) }, opMod: func(x, y float64) interface{} { return grotskyNumber(int64(x) % int64(y)) }, opMul: func(x, y float64) interface{} { return grotskyNumber(x * y) }, opPow: func(x, y float64) interface{} { return grotskyNumber(math.Pow(x, y)) }, opEq: func(x, y float64) interface{} { return grotskyBool(x == y) }, opNeq: func(x, y float64) interface{} { return grotskyBool(x != y) }, opLt: func(x, y float64) interface{} { return grotskyBool(x < y) }, opLte: func(x, y float64) interface{} { return grotskyBool(x <= y) }, opGt: func(x, y float64) interface{} { return grotskyBool(x > y) }, opGte: func(x, y float64) interface{} { return grotskyBool(x >= y) }, } func (n grotskyNumber) get(state *interpreterState, tk *token) interface{} { state.runtimeErr(errUndefinedProp, tk) return nil } func (n grotskyNumber) set(state *interpreterState, name *token, value interface{}) { state.runtimeErr(errReadOnly, name) } func (n grotskyNumber) getOperator(op operator) (operatorApply, error) { if apply, ok := numberBinaryOperations[op]; ok { return func(arguments ...interface{}) (interface{}, error) { return applyOpToNums(apply, append([]interface{}{n}, arguments...)...) }, nil } // op == opNeg return func(arguments ...interface{}) (interface{}, error) { return grotskyNumber(-n), nil }, nil } func (n grotskyNumber) String() string { return fmt.Sprintf("%v", float64(n)) }
internal/grotskyNumber.go
0.68721
0.421254
grotskyNumber.go
starcoder
package main import ( "encoding/base64" "encoding/json" "fmt" "io/ioutil" "math/big" "os" "strconv" "strings" "gitlab.com/NebulousLabs/Sia/encoding" "gitlab.com/NebulousLabs/Sia/types" "gitlab.com/NebulousLabs/errors" ) var ( // errUnableToParseSize is returned when the input is unable to be parsed // into a file size unit errUnableToParseSize = errors.New("unable to parse size") // errUnableToParseTimeout is returned when the input is unable to be parsed // into a timeout unit errUnableToParseTimeout = errors.New("unable to parse timeout") // errUnableToParseRateLimit is returned when the input is unable to be // parsed into a rate limit unit errUnableToParseRateLimit = errors.New("unable to parse ratelimit") ) // parseFilesize converts strings of form 10GB to a size in bytes. Fractional // sizes are truncated at the byte size. func parseFilesize(strSize string) (string, error) { units := []struct { suffix string multiplier int64 }{ {"kb", 1e3}, {"mb", 1e6}, {"gb", 1e9}, {"tb", 1e12}, {"kib", 1 << 10}, {"mib", 1 << 20}, {"gib", 1 << 30}, {"tib", 1 << 40}, {"b", 1}, // must be after others else it'll match on them all } strSize = strings.ToLower(strSize) for _, unit := range units { if strings.HasSuffix(strSize, unit.suffix) { r, ok := new(big.Rat).SetString(strings.TrimSuffix(strSize, unit.suffix)) if !ok { return "", errUnableToParseSize } r.Mul(r, new(big.Rat).SetInt(big.NewInt(unit.multiplier))) if !r.IsInt() { f, _ := r.Float64() return fmt.Sprintf("%d", int64(f)), nil } return r.RatString(), nil } } return "", errUnableToParseSize } // periodUnits turns a period in terms of blocks to a number of weeks. func periodUnits(blocks types.BlockHeight) string { return fmt.Sprint(blocks / 1008) // 1008 blocks per week } // parsePeriod converts a duration specified in blocks, hours, or weeks to a // number of blocks. func parsePeriod(period string) (string, error) { units := []struct { suffix string multiplier float64 }{ {"b", 1}, // blocks {"block", 1}, // blocks {"blocks", 1}, // blocks {"h", 6}, // hours {"hour", 6}, // hours {"hours", 6}, // hours {"d", 144}, // days {"day", 144}, // days {"days", 144}, // days {"w", 1008}, // weeks {"week", 1008}, // weeks {"weeks", 1008}, // weeks } period = strings.ToLower(period) for _, unit := range units { if strings.HasSuffix(period, unit.suffix) { var base float64 _, err := fmt.Sscan(strings.TrimSuffix(period, unit.suffix), &base) if err != nil { return "", errUnableToParseSize } blocks := int(base * unit.multiplier) return fmt.Sprint(blocks), nil } } return "", errUnableToParseSize } // parseTimeout converts a duration specified in seconds, hours, days or weeks // to a number of seconds func parseTimeout(duration string) (string, error) { units := []struct { suffix string multiplier float64 }{ {"s", 1}, // seconds {"second", 1}, // seconds {"seconds", 1}, // seconds {"h", 3600}, // hours {"hour", 3600}, // hours {"hours", 3600}, // hours {"d", 86400}, // days {"day", 86400}, // days {"days", 86400}, // days {"w", 604800}, // weeks {"week", 604800}, // weeks {"weeks", 604800}, // weeks } duration = strings.ToLower(duration) for _, unit := range units { if strings.HasSuffix(duration, unit.suffix) { var base float64 _, err := fmt.Sscan(strings.TrimSuffix(duration, unit.suffix), &base) if err != nil { return "", errUnableToParseTimeout } seconds := int(base * unit.multiplier) return fmt.Sprint(seconds), nil } } return "", errUnableToParseTimeout } // currencyUnits converts a types.Currency to a string with human-readable // units. The unit used will be the largest unit that results in a value // greater than 1. The value is rounded to 4 significant digits. func currencyUnits(c types.Currency) string { pico := types.SiacoinPrecision.Div64(1e12) if c.Cmp(pico) < 0 { return c.String() + " H" } // iterate until we find a unit greater than c mag := pico unit := "" for _, unit = range []string{"pS", "nS", "uS", "mS", "SC", "KS", "MS", "GS", "TS"} { if c.Cmp(mag.Mul64(1e3)) < 0 { break } else if unit != "TS" { // don't want to perform this multiply on the last iter; that // would give us 1.235 TS instead of 1235 TS mag = mag.Mul64(1e3) } } num := new(big.Rat).SetInt(c.Big()) denom := new(big.Rat).SetInt(mag.Big()) res, _ := new(big.Rat).Mul(num, denom.Inv(denom)).Float64() return fmt.Sprintf("%.4g %s", res, unit) } // parseCurrency converts a siacoin amount to base units. func parseCurrency(amount string) (string, error) { units := []string{"pS", "nS", "uS", "mS", "SC", "KS", "MS", "GS", "TS"} for i, unit := range units { if strings.HasSuffix(amount, unit) { // scan into big.Rat r, ok := new(big.Rat).SetString(strings.TrimSuffix(amount, unit)) if !ok { return "", errors.New("malformed amount") } // convert units exp := 24 + 3*(int64(i)-4) mag := new(big.Int).Exp(big.NewInt(10), big.NewInt(exp), nil) r.Mul(r, new(big.Rat).SetInt(mag)) // r must be an integer at this point if !r.IsInt() { return "", errors.New("non-integer number of hastings") } return r.RatString(), nil } } // check for hastings separately if strings.HasSuffix(amount, "H") { return strings.TrimSuffix(amount, "H"), nil } return "", errors.New("amount is missing units; run 'wallet --help' for a list of units") } // parseRatelimit converts a ratelimit input string of to an int64 representing // the bytes per second ratelimit. func parseRatelimit(rateLimitStr string) (int64, error) { // Check for 0 values signifying that the no limit is being set if rateLimitStr == "0" { return 0, nil } // Create struct of rates. Have to start at the high end so that B/s is // checked last, otherwise it would return false positives rates := []struct { unit string factor float64 }{ {"TB/s", 1e12}, {"GB/s", 1e9}, {"MB/s", 1e6}, {"KB/s", 1e3}, {"B/s", 1e0}, {"Tbps", 1e12 / 8}, {"Gbps", 1e9 / 8}, {"Mbps", 1e6 / 8}, {"Kbps", 1e3 / 8}, {"Bps", 1e0 / 8}, } for _, rate := range rates { if !strings.HasSuffix(rateLimitStr, rate.unit) { continue } // trim units and spaces rateLimitStr = strings.TrimSuffix(rateLimitStr, rate.unit) rateLimitStr = strings.TrimSpace(rateLimitStr) // Check for empty string meaning only the units were provided if rateLimitStr == "" { return 0, errUnableToParseRateLimit } // convert string to float for exponation rateLimitFloat, err := strconv.ParseFloat(rateLimitStr, 64) if err != nil { return 0, errors.Compose(errUnableToParseRateLimit, err) } // Check for Bps to make sure it is greater than 8 Bps meaning that it is at // least 1 B/s if rateLimitFloat < 8 && rate.unit == "Bps" { return 0, errors.AddContext(errUnableToParseRateLimit, "Bps rate limit cannot be < 8 Bps") } // Determine factor and convert to in64 for bps rateLimit := int64(rateLimitFloat * rate.factor) return rateLimit, nil } return 0, errUnableToParseRateLimit } // ratelimitUnits converts an int64 to a string with human-readable ratelimit // units. The unit used will be the largest unit that results in a value greater // than 1. The value is rounded to 4 significant digits. func ratelimitUnits(ratelimit int64) string { // Check for bps if ratelimit < 1e3 { return fmt.Sprintf("%v %s", ratelimit, "B/s") } // iterate until we find a unit greater than c mag := 1e3 unit := "" for _, unit = range []string{"KB/s", "MB/s", "GB/s", "TB/s"} { if float64(ratelimit) < mag*1e3 { break } else if unit != "TB/s" { // don't want to perform this multiply on the last iter; that // would give us 1.235 tbps instead of 1235 tbps mag = mag * 1e3 } } return fmt.Sprintf("%.4g %s", float64(ratelimit)/mag, unit) } // yesNo returns "Yes" if b is true, and "No" if b is false. func yesNo(b bool) string { if b { return "Yes" } return "No" } // parseTxn decodes a transaction from s, which can be JSON, base64, or a path // to a file containing either encoding. func parseTxn(s string) (types.Transaction, error) { // first assume s is a file txnBytes, err := ioutil.ReadFile(s) if os.IsNotExist(err) { // assume s is a literal encoding txnBytes = []byte(s) } else if err != nil { return types.Transaction{}, errors.New("could not read transaction file: " + err.Error()) } // txnBytes now contains either s or the contents of the file, so it is // either JSON or base64 var txn types.Transaction if json.Valid(txnBytes) { if err := json.Unmarshal(txnBytes, &txn); err != nil { return types.Transaction{}, errors.New("could not decode JSON transaction: " + err.Error()) } } else { bin, err := base64.StdEncoding.DecodeString(string(txnBytes)) if err != nil { return types.Transaction{}, errors.New("argument is not valid JSON, base64, or filepath") } if err := encoding.Unmarshal(bin, &txn); err != nil { return types.Transaction{}, errors.New("could not decode binary transaction: " + err.Error()) } } return txn, nil }
cmd/siac/parse.go
0.532911
0.4953
parse.go
starcoder
package num import "math" // NormalizeAngle takes an angle in radians and scales it to [-2PI,2PI]. func NormalizeAngle(theta float64) float64 { if -2*math.Pi <= theta && theta <= 2*math.Pi { return theta } f := theta / (2 * math.Pi) if f < 0 { return 2 * math.Pi * (f - math.Ceil(f)) } return 2 * math.Pi * (f - math.Floor(f)) } // NormalizeAngleDeg takes an angle in degrees and scales it to [-360, 360]. func NormalizeAngleDeg(theta float64) float64 { if -360 <= theta && theta <= 360 { return theta } f := theta / 360 if f < 0 { return 360 * (f - math.Ceil(f)) } return 360 * (f - math.Floor(f)) } // UnitLerp does a linear interpolation of x to between toMin and toMax, with the // assumption that x is in the range [0.0, 1,0]. Essentially, a special case // of general linear interpolation. func UnitLerp(x, toMin, toMax float64) float64 { return toMin + x*(toMax-toMin) } // ReverseUnitLerp interpolates an x in the range [xMin, xMax] to the range [0,1]. func ReverseUnitLerp(x, xMin, xMax float64) float64 { return (x - xMax) / (xMax - xMin) } // Lerp does a linear interpolation of x to between toMin and toMax where xMin // and xMax are the lower and upper bounds of x. func Lerp(x, xMin, xMax, toMin, toMax float64) float64 { return toMin + (toMax-toMin)*((xMax-x)/(xMax-xMin)) } // SmoothStep uses a 3rd order polynomial to produce a smooth interpolation // of x to the range [0,1] when x is also in the range [0,1]. To interpolate // any x into a suitable argument for this function, use ReverseUnitLerp() first. // See: https://en.wikipedia.org/wiki/Smoothstep func SmoothStep(x float64) float64 { if x <= 0 { return 0 } if 1 <= x { return 1 } return x * x * (3 - 2*x) } // SmootherStep uses a 5th order polynomial to produce a smooth interpolation // of x to the range [0,1] when x is also in the range [0,1]. // See: https://en.wikipedia.org/wiki/Smoothstep func SmootherStep(x float64) float64 { if x <= 0 { return 0 } if 1 <= x { return 1 } return x * x * x * (x*(x*6-15) + 10) } // SmoothestStep uses a 7th order polynomial to produce a smooth interpolation // of x to the range [0,1] when x is also in the range [0,1]. // See: https://en.wikipedia.org/wiki/Smoothstep func SmoothestStep(x float64) float64 { if x <= 0 { return 0 } if 1 <= x { return 1 } return x * x * x * x * (x*(x*(x*-20+70)-84) + 35) } // Sigmoid returns the interpolation of x to the range [0,1] according to the // logistic function S(x) = 1 / (1 + e^-x). // See: https://en.wikipedia.org/wiki/Sigmoid_function func Sigmoid(x float64) float64 { return 1 / (1 + math.Exp(-x)) } // ClampFloat clamps x between min and max. func ClampFloat(x, min, max float64) float64 { if x <= min { return min } if x >= max { return max } return x }
num/interpolate.go
0.899652
0.806396
interpolate.go
starcoder
package hyperloglog import ( "errors" "hash" "hash/fnv" "math" ) // HyperLogLog probabilistic data struct for cardinality estimation. type HyperLogLog struct { registers []uint8 // registers bucket m uint // number of registers b uint32 // number of bits to find registers bucket number alpha float64 // bias-correction constant hash hash.Hash32 // hash function } const ( exp32 float64 = 4294967296 negexp32 float64 = -4294967296 alpha16 float64 = 0.673 alpha32 float64 = 0.697 alpha64 float64 = 0.709 ) // New creates a new HyperLogLog with `m` registers bucket. // `m` should be a power of two. func New(m uint) (*HyperLogLog, error) { if (m & (m - 1)) != 0 { m = adjustM(m) } return &HyperLogLog{ registers: make([]uint8, m), m: m, b: uint32(math.Ceil(math.Log2(float64(m)))), alpha: calculateAlpha(m), hash: fnv.New32(), }, nil } // NewGuess creates a new HyperLogLog within the given standard error. func NewGuess(stdErr float64) (*HyperLogLog, error) { m := math.Pow(1.04/stdErr, 2) return New(uint(math.Pow(2, math.Ceil(math.Log2(m))))) } // Add adds the data to the set. func (h *HyperLogLog) Add(data []byte) { var ( hash = h.calculateHash(data) k = 32 - h.b r = calculateConsecutiveZeros(hash, k) j = hash >> uint(k) ) if r > h.registers[j] { h.registers[j] = r } } // Count returns the estimated cardinality of the set. func (h *HyperLogLog) Count() uint64 { sum, m := 0.0, float64(h.m) for _, rv := range h.registers { sum += 1.0 / math.Pow(2.0, float64(rv)) } estimate := h.alpha * m * m / sum if estimate <= 5.0/2.0*m { // Small range correction v := 0 for _, r := range h.registers { if r == 0 { v++ } } if v > 0 { estimate = m * math.Log(m/float64(v)) } } else if estimate > 1.0/30.0*exp32 { // Large range correction estimate = negexp32 * math.Log(1-estimate/exp32) } return uint64(estimate) } // Merge combines the HyperLogLog with the other. func (h *HyperLogLog) Merge(other *HyperLogLog) error { if h.m != other.m { return errors.New("registers bucket number must match") } for j, r := range other.registers { if r > h.registers[j] { h.registers[j] = r } } return nil } // Reset restores the HyperLogLog to its original state. func (h *HyperLogLog) Reset() { h.registers = make([]uint8, h.m) } // SetHash sets the hashing function. func (h *HyperLogLog) SetHash(hasher hash.Hash32) { h.hash = hasher } func (h *HyperLogLog) calculateHash(data []byte) uint32 { h.hash.Reset() h.hash.Write(data) sum := h.hash.Sum32() return sum } func calculateAlpha(m uint) float64 { var a float64 switch m { case 16: a = alpha16 case 32: a = alpha32 case 64: a = alpha64 default: a = 0.7213 / (1.0 + 1.079/float64(m)) } return a } // calculateConsecutiveZeros calculates the position of the rightmost 1-bit. func calculateConsecutiveZeros(val, max uint32) uint8 { r := uint32(1) for val&1 == 0 && r <= max { r++ val >>= 1 } return uint8(r) } func adjustM(x uint) uint { m := uint(1) for m < x { m <<= 1 } return m }
pkg/hyperloglog/hyperloglog.go
0.77806
0.433622
hyperloglog.go
starcoder
package memmetrics import ( "math" "sort" "time" ) // SplitRatios provides simple anomaly detection for requests latencies. // it splits values into good or bad category based on the threshold and the median value. // If all values are not far from the median, it will return all values in 'good' set. // Precision is the smallest value to consider, e.g. if set to millisecond, microseconds will be ignored. func SplitLatencies(values []time.Duration, precision time.Duration) (good map[time.Duration]bool, bad map[time.Duration]bool) { // Find the max latency M and then map each latency L to the ratio L/M and then call SplitFloat64 v2r := map[float64]time.Duration{} ratios := make([]float64, len(values)) m := maxTime(values) for i, v := range values { ratio := float64(v/precision+1) / float64(m/precision+1) // +1 is to avoid division by 0 v2r[ratio] = v ratios[i] = ratio } good, bad = make(map[time.Duration]bool), make(map[time.Duration]bool) // Note that multiplier makes this function way less sensitive than ratios detector, this is to avoid noise. vgood, vbad := SplitFloat64(2, 0, ratios) for r, _ := range vgood { good[v2r[r]] = true } for r, _ := range vbad { bad[v2r[r]] = true } return good, bad } // SplitRatios provides simple anomaly detection for ratio values, that are all in the range [0, 1] // it splits values into good or bad category based on the threshold and the median value. // If all values are not far from the median, it will return all values in 'good' set. func SplitRatios(values []float64) (good map[float64]bool, bad map[float64]bool) { return SplitFloat64(1.5, 0, values) } // SplitFloat64 provides simple anomaly detection for skewed data sets with no particular distribution. // In essence it applies the formula if(v > median(values) + threshold * medianAbsoluteDeviation) -> anomaly // There's a corner case where there are just 2 values, so by definition there's no value that exceeds the threshold. // This case is solved by introducing additional value that we know is good, e.g. 0. That helps to improve the detection results // on such data sets. func SplitFloat64(threshold, sentinel float64, values []float64) (good map[float64]bool, bad map[float64]bool) { good, bad = make(map[float64]bool), make(map[float64]bool) var newValues []float64 if len(values)%2 == 0 { newValues = make([]float64, len(values)+1) copy(newValues, values) // Add a sentinel endpoint so we can distinguish outliers better newValues[len(newValues)-1] = sentinel } else { newValues = values } m := median(newValues) mAbs := medianAbsoluteDeviation(newValues) for _, v := range values { if v > (m+mAbs)*threshold { bad[v] = true } else { good[v] = true } } return good, bad } func median(values []float64) float64 { vals := make([]float64, len(values)) copy(vals, values) sort.Float64s(vals) l := len(vals) if l%2 != 0 { return vals[l/2] } return (vals[l/2-1] + vals[l/2]) / 2.0 } func medianAbsoluteDeviation(values []float64) float64 { m := median(values) distances := make([]float64, len(values)) for i, v := range values { distances[i] = math.Abs(v - m) } return median(distances) } func maxTime(vals []time.Duration) time.Duration { val := vals[0] for _, v := range vals { if v > val { val = v } } return val }
vendor/github.com/vulcand/oxy/memmetrics/anomaly.go
0.730866
0.597461
anomaly.go
starcoder
package cast import "strconv" // AsFloat64 to convert as a float64 func AsFloat64(v interface{}) (float64, bool) { switch d := indirect(v).(type) { case bool: if d { return 1, true } return 0, true case int: return float64(d), true case int64: return float64(d), true case int32: return float64(d), true case int16: return float64(d), true case int8: return float64(d), true case uint: return float64(d), true case uint64: return float64(d), true case uint32: return float64(d), true case uint16: return float64(d), true case uint8: return float64(d), true case float64: return d, true case float32: return float64(d), true case string: if n, err := strconv.ParseFloat(d, 64); err == nil { return n, true } return 0, false default: return 0, false } } // AsFloat64Slice to convert as a slice of float64 func AsFloat64Slice(values ...interface{}) ([]float64, bool) { arr := make([]float64, 0, len(values)) b := true for _, v := range values { cv, ok := AsFloat64(v) b = b && ok arr = append(arr, cv) } return arr, b } // AsFloat32 to convert as a float32 func AsFloat32(v interface{}) (float32, bool) { switch d := indirect(v).(type) { case bool: if d { return 1, true } return 0, true case int: return float32(d), true case int64: return float32(d), true case int32: return float32(d), true case int16: return float32(d), true case int8: return float32(d), true case uint: return float32(d), true case uint64: return float32(d), true case uint32: return float32(d), true case uint16: return float32(d), true case uint8: return float32(d), true case float64: return float32(d), true case float32: return d, true case string: if n, err := strconv.ParseFloat(d, 32); err == nil { return float32(n), true } return 0, false default: return 0, false } } // AsFloat32Slice to convert as a slice of float32 func AsFloat32Slice(values ...interface{}) ([]float32, bool) { arr := make([]float32, 0, len(values)) b := true for _, v := range values { cv, ok := AsFloat32(v) b = b && ok arr = append(arr, cv) } return arr, b }
float.go
0.693369
0.504028
float.go
starcoder
package main import "chaincode/errors" // TrainingTask is a node of a ComputeDAG. It represents a training task // (i.e. a Traintuple, a CompositeTraintuple or an Aggregatetuple) type TrainingTask struct { ID string InModelsIDs []string InputIndex int Depth int TaskType AssetType } // ComputeDAG is a Directed Acyclic Graph (DAG) // used for compute plans type ComputeDAG struct { OrderTasks []TrainingTask IDToTrainTask map[string]TrainTask } // Create a Directed Acyclic Graph (DAG) from a compute plan func createComputeDAG(cp inputComputePlan, IDToTrainTask map[string]TrainTask) (ComputeDAG, error) { DAG := ComputeDAG{} for i, traintuple := range cp.Traintuples { task := TrainingTask{ ID: traintuple.ID, InModelsIDs: traintuple.InModelsIDs, InputIndex: i, TaskType: TraintupleType, } DAG.OrderTasks = append(DAG.OrderTasks, task) } for i, traintuple := range cp.CompositeTraintuples { task := TrainingTask{ ID: traintuple.ID, InModelsIDs: []string{traintuple.InHeadModelID, traintuple.InTrunkModelID}, InputIndex: i, TaskType: CompositeTraintupleType, } DAG.OrderTasks = append(DAG.OrderTasks, task) } for i, traintuple := range cp.Aggregatetuples { task := TrainingTask{ ID: traintuple.ID, InModelsIDs: traintuple.InModelsIDs, InputIndex: i, TaskType: AggregatetupleType, } DAG.OrderTasks = append(DAG.OrderTasks, task) } DAG.IDToTrainTask = IDToTrainTask err := DAG.sort() if err != nil { return DAG, err } return DAG, nil } // Sort the DAG's task list, or return an error if there is a cyclic dependency in inModelIDs func (dag *ComputeDAG) sort() error { current := dag.OrderTasks var temp, final []TrainingTask if dag.IDToTrainTask == nil { dag.IDToTrainTask = make(map[string]TrainTask) } for i := 0; len(current) != 0; { depth := 0 ready := true for _, ID := range current[i].InModelsIDs { if ID == "" { continue } parent, ok := dag.IDToTrainTask[ID] ready = ready && ok if !ok { break } depth = max(depth, parent.Depth+1) } if ready { current[i].Depth = depth final = append(final, current[i]) if _, ok := dag.IDToTrainTask[current[i].ID]; ok { return errors.BadRequest("compute plan error: Duplicate training task ID: %s", current[i].ID) } dag.IDToTrainTask[current[i].ID] = TrainTask{Depth: current[i].Depth} } else { temp = append(temp, current[i]) } if i != len(current)-1 { i++ continue } if len(temp) == len(current) { var errorIDs []string for _, c := range current { errorIDs = append(errorIDs, c.ID) } return errors.BadRequest("compute plan error: Cyclic or missing dependency among inModels IDs: %v", errorIDs) } i = 0 current = temp temp = []TrainingTask{} } dag.OrderTasks = final return nil } func max(x, y int) int { if x < y { return y } return x }
chaincode/compute_plan_dag.go
0.554953
0.598576
compute_plan_dag.go
starcoder
package unbounded import ( "github.com/cnotch/algo/container/queue" ) // Value 树存储的值类型 type Value = interface{} // Tree 分支无限制的有根树 type Tree struct { // 双亲树 parent *Tree // 左孩子 leftChild *Tree // 右兄弟 rightBrother *Tree // 树节点存储的值 Value Value } // New 实例化 Tree func New(v Value) *Tree { return &Tree{Value: v} } // IsRoot 是否是跟节点. func (t *Tree) IsRoot() bool { return t.parent == nil } // IsParentOf 判断是否是 x 的双亲. func (t *Tree) IsParentOf(x *Tree) bool { return x.parent != nil && t == x.parent } // IsChildOf 判断是否是 x 的孩子. func (t *Tree) IsChildOf(x *Tree) bool { return t.parent != nil && t.parent == x } // IsAncestorOf 判断是否是 x 的祖先. func (t *Tree) IsAncestorOf(x *Tree) bool { for p := x.parent; p != nil; p = p.parent { if p == t { return true } } return false } // IsDescendantOf 判断是否是 x 的子孙. func (t *Tree) IsDescendantOf(x *Tree) bool { return x.IsAncestorOf(t) } // IsBrotherOf 判断是否是 x 的兄弟. func (t *Tree) IsBrotherOf(x *Tree) bool { return t.parent != nil && x.parent == t.parent } // Level 返回层次(0: 表示是跟). func (t *Tree) Level() int { lvl := -1 p := t for p != nil { lvl++ p = p.parent } return lvl } // Count Returns the number of child tree children. func (t *Tree) Count(includeSubTrees bool) int { num := 0 if includeSubTrees { t.count(&num) return num } for p := t.leftChild; p != nil; p = p.rightBrother { num++ } return num } func (t *Tree) count(num *int) { for p := t.leftChild; p != nil; p = p.rightBrother { *num++ p.count(num) } } // Root 返回跟. func (t *Tree) Root() *Tree { r := t for p := t.parent; p != nil; p = p.parent { r = p } return r } // Parent 返回双亲. func (t *Tree) Parent() *Tree { return t.parent } // Next 返回下一个兄弟分支或 nil. // 时间复杂性 O(1). func (t *Tree) Next() *Tree { if p := t.rightBrother; t.parent != nil { return p } return nil } // Prev 返回上一个兄弟分支或 nil. // 时间复杂性 O(n), n 为 t 的兄弟数量. func (t *Tree) Prev() *Tree { if t.parent == nil { return nil } var prev *Tree for p := t.parent.leftChild; p != nil; prev, p = p, p.rightBrother { if p == t { break } } return prev } // FirstChild 返回 first 孩子分支. // 时间复杂性 O(1). func (t *Tree) FirstChild() *Tree { return t.leftChild } // LastChild 返回 last 孩子分支. // 时间复杂性 O(n), n 为 t 的孩子数量. func (t *Tree) LastChild() *Tree { var last *Tree for p := t.leftChild; p != nil; last, p = p, p.rightBrother { } return last } // Remove 移除子孙分支 x,并返回它存储的值 x.Value. // 必须 x != nil; // 该操作仅将 x 和双亲脱钩, x 本身包含的值和分支仍然有效; 可继续操作或插入到其他树中; // 时间复杂性 O(n), n 为 x 的兄弟数量. func (t *Tree) Remove(x *Tree) Value { if t.IsAncestorOf(x) { x.parent.remove(x) // 在其双亲中做最终的移除操作 } return x.Value } // AddFirst 添加值为 v 的 first 孩子并返回新分支. // 时间复杂性 O(1). func (t *Tree) AddFirst(v Value) *Tree { return t.insertValue(v, nil) } // AddLast 添加值为 v 的 last 孩子并返回新分支. // 时间复杂性 O(n), n 为 t 的孩子数量. func (t *Tree) AddLast(v Value) *Tree { return t.insertValue(v, t.LastChild()) } // Add 在 at 后插入值为 v 的兄弟分支并返回。 // 必须 at != nil; // 如果 at 不是 t 的子孙分支,直接返回 nil // 时间复杂性 O(1). func (t *Tree) Add(v Value, at *Tree) *Tree { if t.IsAncestorOf(at) { return at.parent.insertValue(v, at) } return nil } // InsertFirst 插入 x 作为 t 的 first 孩子并返回 x 或 nil. // x 必须不为 nil; // 如果 x.parent != nil 或 x == t.Root(), 返回 nil; // 时间复杂性 O(1). func (t *Tree) InsertFirst(x *Tree) *Tree { if x.parent != nil || x == t.Root() { return nil } return t.insert(x, nil) } // InsertLast 插入 x 作为 t 的 last 孩子并返回 x 或 nil. // x 必须不为 nil; // 如果 x.parent != nil 或 x == t.Root(), 返回 nil; // 时间复杂性 O(n), n 为 t 的孩子数量. func (t *Tree) InsertLast(x *Tree) *Tree { if x.parent != nil || x == t.Root() { return nil } return t.insert(x, t.LastChild()) } // Insert 在 at 后插入兄弟分支 x 并返回 x 或 nil. // x != nil && at != nil; // 如果 x.parent != nil 或 x == t.Root(),返回 nil; // 如果 at != nil 而且 at 不是 t 的子孙分支,直接返回 nil; // 时间复杂性 O(1). func (t *Tree) Insert(x, at *Tree) *Tree { if x.parent != nil || x == t.Root() || !t.IsAncestorOf(at) { return nil } return at.parent.insert(x, at) } // MoveToFirst 移动 x 作为 t 的 first 孩子. // 必须 x != nil; // 如果 x 不是 t 的子孙分支,直接返回 nil; // 时间复杂性 O(n), n 为 x 的兄弟数量. func (t *Tree) MoveToFirst(x *Tree) { if !t.IsAncestorOf(x) { return } if t == x.parent { t.move(x, nil) } else { x.parent.remove(x) t.insert(x, nil) } } // MoveToLast 移动 x 作为 last 分支. // 必须 x != nil; // 如果 x 不是 t 的子孙分支,直接返回; // 时间复杂性 O(n+m), n 为 x 的兄弟数量, m 为 t 的孩子数量. func (t *Tree) MoveToLast(x *Tree) { if !t.IsAncestorOf(x) { return } if t == x.parent { t.move(x, t.LastChild()) } else { x.parent.remove(x) t.insert(x, t.LastChild()) } } // Move 移动 x 到 at 后. // 必须 x != nil && at != nil; // 如果 x、at 不是 t 的分支 或 x == at,直接返回; // 时间复杂性 O(n), n 为 x 的兄弟数量. func (t *Tree) Move(x, at *Tree) { if x == at || !t.IsAncestorOf(x) || !t.IsAncestorOf(at) { return } if x.parent == at.parent { at.parent.move(x, at) } else { x.parent.remove(x) at.parent.insert(x, at) } } // insert 在 at 后插入 x 并返回 x. func (t *Tree) insert(x, at *Tree) *Tree { if at == nil { x.rightBrother = t.leftChild t.leftChild = x } else { x.rightBrother = at.rightBrother at.rightBrother = x } x.parent = t return x } // insert 的便捷包装. func (t *Tree) insertValue(v Value, at *Tree) *Tree { return t.insert(&Tree{Value: v}, at) } // remove 移除 x 分支并返回 x. func (t *Tree) remove(x *Tree) *Tree { prev := x.Prev() if prev == nil { t.leftChild = x.rightBrother } else { prev.rightBrother = x.rightBrother } x.rightBrother = nil // avoid memory leaks x.parent = nil return x } // move 移动 x 到 at 后,并返回 x. func (t *Tree) move(x, at *Tree) *Tree { if x == at { return x } if at == nil { if x == t.leftChild { return x } prev := x.Prev() prev.rightBrother = x.rightBrother x.rightBrother = t.leftChild t.leftChild = x } else { prev := x.Prev() if prev == nil { t.leftChild = x.rightBrother } else { prev.rightBrother = x.rightBrother } x.rightBrother = at.rightBrother at.rightBrother = x } return x } // Clear Removes all child tree branches . func (t *Tree) Clear() { if t.leftChild == nil { return } var next *Tree for p := t.leftChild; p != nil; p = next { next = p.rightBrother p.Clear() p.parent = nil // avoid memory leaks p.rightBrother = nil } t.leftChild = nil } // DepthTravel 深度优先遍历. func (t *Tree) DepthTravel(visit func(t *Tree)) { visit(t) for p := t.leftChild; p != nil; p = p.rightBrother { p.DepthTravel(visit) } } // BreadthTravel 广度优先遍历(按层扫描). func (t *Tree) BreadthTravel(visit func(t *Tree)) { var q queue.Queue q.Enqueue(t) for q.Len() > 0 { i, _ := q.Dequeue() curr := i.(*Tree) visit(curr) for p := curr.leftChild; p != nil; p = p.rightBrother { q.Enqueue(p) } } }
container/tree/unbounded/tree.go
0.558327
0.410047
tree.go
starcoder
package scanner import ( "regexp" "strings" ) // Token is the structure containing a scanned token. type Token struct { Data string } // IsComment returns if the token is a comment. func (t *Token) IsComment() bool { return strings.HasPrefix(t.Data, "#") } // IsString returns if the current token begins a string. func (t *Token) IsString() bool { strTest := regexp.MustCompile(`(^""".*$)|(^".*$)|(^'.*$)|(^[^"""]*"""[,|\)]*$)|(^[^"]*"[,|\)]*$)|(^[^']*'[,|\)]*$)`) return strTest.MatchString(t.Data) } // IsBracket returns if the current token begins a bracketed set func (t *Token) IsBracket() bool { return strings.HasPrefix(t.Data, "[") || strings.HasSuffix(t.Data, "]") } // IsClass returns if the current token is begins a class definition. func (t *Token) IsClass() bool { return strings.ToLower(t.Data) == "class" } // IsHomepage returns if the current token begins with a homepage variable declaration. func (t *Token) IsHomepage() bool { return strings.ToLower(t.Data) == "homepage" } // IsURL returns if the current token begins with a valid URL header. func (t *Token) IsURL() bool { return strings.HasPrefix(t.Data, "url") } // IsGitURL returns if the current token begins with a valid GitURL header. func (t *Token) IsGitURL() bool { return strings.HasPrefix(t.Data, "git") } //IsChecksum returns uf the current token begins with a valid checksum header. func (t *Token) IsChecksum() bool { return strings.HasPrefix(strings.ToLower(t.Data), "sha") } // IsVersion returns if the current token begins with a valid version header. func (t *Token) IsVersion() bool { return strings.HasPrefix(t.Data, "version(") } // IsDependency returns if the current token begins with a valid version header. func (t *Token) IsDependency() bool { return strings.HasPrefix(strings.ToLower(t.Data), "depends_on(") } // IsFunction returns if the current token begins a function. func (t *Token) IsFunction() bool { return strings.ToLower(t.Data) == "def" || strings.ToLower(t.Data) == "for" } // IsBranch returns if the current token defines a branch keyword. func (t *Token) IsBranch() bool { return strings.HasPrefix(strings.ToLower(t.Data), "branch") } // IsBoolean returns if the current token is a boolean. func (t *Token) IsBoolean() bool { return strings.HasPrefix(strings.ToLower(t.Data), "true") || strings.HasPrefix(strings.ToLower(t.Data), "false") } // IsSubmodule returns if the current token is a submodule keyword. func (t *Token) IsSubmodule() bool { return strings.HasPrefix(strings.ToLower(t.Data), "submodules") } // IsExpand returns if the current token is an expand keyword. func (t *Token) IsExpand() bool { return strings.HasPrefix(strings.ToLower(t.Data), "expand") } // IsCommit returns if the current token is an commit keyword. func (t *Token) IsCommit() bool { return strings.HasPrefix(strings.ToLower(t.Data), "commit") } // IsMaintainers returns if the current token is an maintainers keyword. func (t *Token) IsMaintainers() bool { return strings.HasPrefix(strings.ToLower(t.Data), "maintainers") } // IsList returns if the current token is an list declaration. func (t *Token) IsList() bool { return strings.HasPrefix(strings.ToLower(t.Data), "[") || strings.HasSuffix(strings.ToLower(t.Data), "]") } // IsExtension returns if the current token is an extension keyword. func (t *Token) IsExtension() bool { return strings.HasPrefix(strings.ToLower(t.Data), "extension") } // IsTag returns if the current token is an tag keyword. func (t *Token) IsTag() bool { return strings.HasPrefix(strings.ToLower(t.Data), "tag") }
scanner/token.go
0.798226
0.586464
token.go
starcoder
package indicators import ( "math" "github.com/dellosaneil/stocktracking-backend/util" ) func RelativeStrengthIndex(prices []float64, period int) []float64 { var rsi []float64 priceChange := calculatePriceChange(prices) averageGains, averageLosses := getAverages(priceChange, period) rs := (averageGains[0] / averageLosses[0]) r := calculateRSI(rs) rsi = append(rsi, r) for index := 1; index < len(averageGains); index++ { rs = averageGains[index] / averageLosses[index] rsi = append(rsi, calculateRSI(rs)) } return rsi } func calculateRSI(relativeStrength float64) float64 { return util.RoundPrecision(float64(100)-(float64(100)/(float64(1)+relativeStrength)), 4) } func calculatePriceChange(prices []float64) []float64 { var priceChange []float64 previousPrice := prices[0] for index := 1; index < len(prices); index++ { change := prices[index] - previousPrice priceChange = append(priceChange, change) previousPrice = prices[index] } return priceChange } func getAverages(priceChange []float64, period int) ([]float64, []float64) { var gains []float64 var losses []float64 var averageGains []float64 var averageLosses []float64 for index := 0; index < period; index++ { if priceChange[index] > 0 { gains = append(gains, priceChange[index]) } else { losses = append(losses, math.Abs(priceChange[index])) } } firstGainAverage := util.Sum(gains) / float64(period) firstLossAverage := util.Sum(losses) / float64(period) averageGains = append(averageGains, firstGainAverage) averageLosses = append(averageLosses, firstLossAverage) for index := period; index < len(priceChange); index++ { previousAverageGain := averageGains[index-period] previousAverageLoss := averageLosses[index-period] currentPrice := priceChange[index] aGain := previousAverageGain * float64(period-1) aLoss := previousAverageLoss * float64(period-1) if currentPrice > 0 { aGain += currentPrice } else { aLoss += math.Abs(currentPrice) } newAverageGain := aGain / float64(period) newAverageLoss := math.Abs(aLoss) / float64(period) averageGains = append(averageGains, newAverageGain) averageLosses = append(averageLosses, newAverageLoss) } return averageGains, averageLosses }
indicators/RelativeStrengthIndex.go
0.701509
0.561034
RelativeStrengthIndex.go
starcoder
package movers import ( "math" "github.com/wieku/danser-go/beatmap/objects" "github.com/wieku/danser-go/settings" "github.com/wieku/danser-go/bmath" "github.com/wieku/danser-go/bmath/curves" ) type AngleOffsetMover struct { lastAngle float64 lastPoint bmath.Vector2d bz *curves.Bezier startTime, endTime int64 invert float64 } func NewAngleOffsetMover() MultiPointMover { return &AngleOffsetMover{lastAngle: 0, invert: 1} } func (bm *AngleOffsetMover) Reset() { bm.lastAngle = 0 bm.invert = 1 bm.lastPoint = bmath.NewVec2d(0, 0) } func (bm *AngleOffsetMover) SetObjects(objs []objects.BaseObject) { end := objs[0] start := objs[1] endPos := end.GetBasicData().EndPos endTime := end.GetBasicData().EndTime startPos := start.GetBasicData().StartPos startTime := start.GetBasicData().StartTime distance := endPos.Dst(startPos) s1, ok1 := end.(*objects.Slider) s2, ok2 := start.(*objects.Slider) var points []bmath.Vector2d scaledDistance := distance * settings.Dance.Flower.DistanceMult newAngle := settings.Dance.Flower.AngleOffset * math.Pi / 180.0 if end.GetBasicData().StartTime > 0 && settings.Dance.Flower.LongJump >= 0 && (startTime-endTime) > settings.Dance.Flower.LongJump { scaledDistance = float64(startTime-endTime) * settings.Dance.Flower.LongJumpMult } if endPos == startPos { if settings.Dance.Flower.LongJumpOnEqualPos { scaledDistance = float64(startTime-endTime) * settings.Dance.Flower.LongJumpMult if math.Abs(float64(startTime-endTime)) > 1 { bm.lastAngle += math.Pi } pt1 := bmath.NewVec2dRad(bm.lastAngle, scaledDistance).Add(endPos) if ok1 { pt1 = bmath.NewVec2dRad(s1.GetEndAngle(), scaledDistance).Add(endPos) } if !ok2 { angle := bm.lastAngle - newAngle*bm.invert pt2 := bmath.NewVec2dRad(angle, scaledDistance).Add(startPos) if math.Abs(float64(startTime-endTime)) > 1 { bm.lastAngle = angle } points = []bmath.Vector2d{endPos, pt1, pt2, startPos} } else { pt2 := bmath.NewVec2dRad(s2.GetStartAngle(), scaledDistance).Add(startPos) points = []bmath.Vector2d{endPos, pt1, pt2, startPos} } } else { points = []bmath.Vector2d{endPos, startPos} } } else if ok1 && ok2 { bm.invert = -1 * bm.invert pt1 := bmath.NewVec2dRad(s1.GetEndAngle(), scaledDistance).Add(endPos) pt2 := bmath.NewVec2dRad(s2.GetStartAngle(), scaledDistance).Add(startPos) points = []bmath.Vector2d{endPos, pt1, pt2, startPos} } else if ok1 { bm.invert = -1 * bm.invert if math.Abs(float64(startTime-endTime)) > 1 { bm.lastAngle = endPos.AngleRV(startPos) - newAngle*bm.invert } else { bm.lastAngle = s1.GetEndAngle()+math.Pi } pt1 := bmath.NewVec2dRad(s1.GetEndAngle(), scaledDistance).Add(endPos) pt2 := bmath.NewVec2dRad(bm.lastAngle, scaledDistance).Add(startPos) points = []bmath.Vector2d{endPos, pt1, pt2, startPos} } else if ok2 { if math.Abs(float64(startTime-endTime)) > 1 { bm.lastAngle += math.Pi } pt1 := bmath.NewVec2dRad(bm.lastAngle, scaledDistance).Add(endPos) pt2 := bmath.NewVec2dRad(s2.GetStartAngle(), scaledDistance).Add(startPos) points = []bmath.Vector2d{endPos, pt1, pt2, startPos} } else { if settings.Dance.Flower.UseNewStyle { if math.Abs(float64(startTime-endTime)) > 1 && bmath.AngleBetween(endPos, bm.lastPoint, startPos) >= settings.Dance.Flower.AngleOffset*math.Pi/180.0 { bm.invert = -1 * bm.invert newAngle = settings.Dance.Flower.StreamAngleOffset * math.Pi / 180.0 } } else if startTime-endTime < settings.Dance.Flower.StreamTrigger { newAngle = settings.Dance.Flower.StreamAngleOffset * math.Pi / 180.0 } angle := endPos.AngleRV(startPos) - newAngle*bm.invert if math.Abs(float64(startTime-endTime)) <= 1 { angle = bm.lastAngle } pt1 := bmath.NewVec2dRad(bm.lastAngle+math.Pi, scaledDistance).Add(endPos) pt2 := bmath.NewVec2dRad(angle, scaledDistance).Add(startPos) if scaledDistance > 2 { bm.lastAngle = angle } if !settings.Dance.Flower.UseNewStyle && startTime-endTime < settings.Dance.Flower.StreamTrigger && !(start.GetBasicData().SliderPoint && end.GetBasicData().SliderPoint) { bm.invert = -1 * bm.invert } points = []bmath.Vector2d{endPos, pt1, pt2, startPos} } bm.bz = curves.NewBezier(points) bm.endTime = endTime bm.startTime = startTime bm.lastPoint = endPos } func (bm *AngleOffsetMover) Update(time int64) bmath.Vector2d { t := float64(time-bm.endTime) / float64(bm.startTime-bm.endTime) t = math.Max(0.0, math.Min(1.0, t)) return bm.bz.NPointAt(t) } func (bm *AngleOffsetMover) GetEndTime() int64 { return bm.startTime }
dance/movers/angleoffset.go
0.646014
0.488527
angleoffset.go
starcoder
package trace import ( "fmt" "math" "math/rand" "reflect" "strconv" "github.com/Bredgren/gotracer/trace/options" "github.com/Bredgren/gotracer/trace/ray" "github.com/Bredgren/gotracer/trace/vec" "github.com/go-gl/mathgl/mgl64" ) // Camera is a viewpoint into a scene and is able to generate Camera rays. type Camera struct { Position mgl64.Vec3 ViewDir mgl64.Vec3 UseDof bool FocalDistance float64 ApertureRadius float64 u mgl64.Vec3 v mgl64.Vec3 } // NewCamera initializes a Camera based on the options and returns it. func NewCamera(opts *options.Camera, aspectRatio float64) *Camera { fovMin, fovMax := fovMinMax(opts) fov := mgl64.Clamp(opts.Fov, fovMin, fovMax) * math.Pi / 180 // Convert option vectors lookAt := mgl64.Vec3{opts.LookAt.X, opts.LookAt.Y, opts.LookAt.Z} position := mgl64.Vec3{opts.Position.X, opts.Position.Y, opts.Position.Z} upDir := vec.Normalize(mgl64.Vec3{opts.UpDir.X, opts.UpDir.Y, opts.UpDir.Z}, vec.Y, 1) // Camera space transform m := mgl64.LookAtV(position, lookAt, upDir).Inv() viewDir := mgl64.TransformNormal(mgl64.Vec3{0, 0, -1}, m).Normalize() // Assumes distance to camera plane is 1 normalizedHeight := math.Abs(2 * math.Tan(fov/2)) u := mgl64.TransformNormal(mgl64.Vec3{1, 0, 0}.Mul(normalizedHeight*aspectRatio), m) v := mgl64.TransformNormal(mgl64.Vec3{0, -1, 0}.Mul(normalizedHeight), m) return &Camera{ Position: position, ViewDir: viewDir, UseDof: opts.Dof.Enabled, FocalDistance: opts.Dof.FocalDistance, ApertureRadius: opts.Dof.ApertureRadius, u: u, v: v, } } // Gets min and max for FOV from struct field. func fovMinMax(opts *options.Camera) (min, max float64) { t := reflect.TypeOf(*opts) fovF, ok := t.FieldByName("Fov") if !ok { panic(fmt.Errorf("Field Fov not found in *cameraOpts")) } minTag := fovF.Tag.Get("min") min, e := strconv.ParseFloat(minTag, 64) if e != nil { panic(fmt.Errorf("Struct Tag 'min' for field Fov of *cameraOpts: %v", e)) } maxTag := fovF.Tag.Get("max") max, e = strconv.ParseFloat(maxTag, 64) if e != nil { panic(fmt.Errorf("Struct Tag 'max' for field Fov of *cameraOpts: %v", e)) } return min, max } // RayThrough takes normalized window coordinates and returns the ray that goes // through that point starting from the camera. func (c *Camera) RayThrough(nx, ny float64, r *ray.Ray) { r.Origin = c.Position r.Dir = c.ViewDir.Add(c.u.Mul(nx - 0.5)).Add(c.v.Mul(ny - 0.5)).Normalize() } // DofRayThrough takes a center ray (which is not midified) and modifies r to be a randomized // ray, according do depth of field settings, whose origin is slightly off the center but func (c *Camera) DofRayThrough(center, r *ray.Ray) { focalPoint := center.At(c.FocalDistance) offsetU := c.u.Mul(rand.Float64()*c.ApertureRadius*2 - c.ApertureRadius) offsetV := c.v.Mul(rand.Float64()*c.ApertureRadius*2 - c.ApertureRadius) offsetPosition := c.Position.Add(offsetU).Add(offsetV) r.Origin = offsetPosition r.Dir = focalPoint.Sub(offsetPosition).Normalize() }
trace/camera.go
0.818628
0.487246
camera.go
starcoder
package oak import ( "github.com/oakmound/oak/v4/alg/intgeom" "github.com/oakmound/oak/v4/event" ) type Viewport struct { Position intgeom.Point2 Bounds intgeom.Rect2 BoundsEnforced bool } // ShiftViewport shifts the viewport by x,y func (w *Window) ShiftViewport(delta intgeom.Point2) { w.SetViewport(w.viewPos.Add(delta)) } // SetViewport positions the viewport to be at x,y func (w *Window) SetViewport(pt intgeom.Point2) { if w.useViewBounds { if w.viewBounds.Min.X() <= pt.X() && w.viewBounds.Max.X() >= pt.X()+w.ScreenWidth { w.viewPos[0] = pt.X() } else if w.viewBounds.Min.X() > pt.X() { w.viewPos[0] = w.viewBounds.Min.X() } else if w.viewBounds.Max.X() < pt.X()+w.ScreenWidth { w.viewPos[0] = w.viewBounds.Max.X() - w.ScreenWidth } if w.viewBounds.Min.Y() <= pt.Y() && w.viewBounds.Max.Y() >= pt.Y()+w.ScreenHeight { w.viewPos[1] = pt.Y() } else if w.viewBounds.Min.Y() > pt.Y() { w.viewPos[1] = w.viewBounds.Min.Y() } else if w.viewBounds.Max.Y() < pt.Y()+w.ScreenHeight { w.viewPos[1] = w.viewBounds.Max.Y() - w.ScreenHeight } } else { w.viewPos = pt } event.TriggerOn(w.eventHandler, ViewportUpdate, w.viewPos) } // ViewportBounds returns the boundary of this window's viewport, or the rectangle // that the viewport is not allowed to exit as it moves around. It often represents // the total size of the world within a given scene. If bounds are not enforced, ok will // be false. func (w *Window) ViewportBounds() (rect intgeom.Rect2, ok bool) { return w.viewBounds, w.useViewBounds } // RemoveViewportBounds removes restrictions on the viewport's movement. It will not // cause the viewport to update immediately. func (w *Window) RemoveViewportBounds() { w.useViewBounds = false } // SetViewportBounds sets the minimum and maximum position of the viewport, including // screen dimensions func (w *Window) SetViewportBounds(rect intgeom.Rect2) { if rect.Max[0] < w.ScreenWidth { rect.Max[0] = w.ScreenWidth } if rect.Max[1] < w.ScreenHeight { rect.Max[1] = w.ScreenHeight } w.useViewBounds = true w.viewBounds = rect newView := rect.Clamp(w.viewPos) if newView != w.viewPos { w.SetViewport(newView) } } // Viewport returns the viewport's position. Its width and height are the window's // width and height. This position plus width/height cannot exceed ViewportBounds. func (w *Window) Viewport() intgeom.Point2 { return w.viewPos }
viewport.go
0.739422
0.424591
viewport.go
starcoder
package duration import ( "fmt" "math" "os" "sort" "github.com/kshedden/statmodel/statmodel" ) // SurvfuncRight uses the method of Kaplan and Meier to estimate the // survival distribution based on (possibly) right censored data. The // caller must set Data and TimeVar before calling the Fit method. // StatusVar, WeightVar, and EntryVar are optional fields. type SurvfuncRight struct { data [][]float64 // The name of the variable containing the minimum of the // event time and entry time. timepos int // The name of a variable containing the status indicator, // which is 1 if the event occurred at the time given by // TimeVar, and 0 otherwise. This is optional, and is assumed // to be identically equal to 1 if not present. statuspos int // The name of a variable containing case weights, optional. weightpos int // The name of a variable containing entry times, optional. entrypos int // Times at which events occur, sorted. times []float64 // Number of events at each time in Times. nEvents []float64 // Number of people at risk just before each time in times nRisk []float64 // The estimated survival function evaluated at each time in Times survProb []float64 // The standard errors for the estimates in SurvProb. survProbSE []float64 events map[float64]float64 total map[float64]float64 entry map[float64]float64 } type SurvfuncRightConfig struct { WeightVar string EntryVar string } // NewSurvfuncRight creates a new value for fitting a survival function. func NewSurvfuncRight(data statmodel.Dataset, time, status string, config *SurvfuncRightConfig) (*SurvfuncRight, error) { pos := make(map[string]int) for i, v := range data.Names() { pos[v] = i } timepos, ok := pos[time] if !ok { msg := fmt.Sprintf("Time variable '%s' not found in dataset\n", time) return nil, fmt.Errorf(msg) } statuspos, ok := pos[status] if !ok { msg := fmt.Sprintf("Status variable '%s' not found in dataset\n", status) return nil, fmt.Errorf(msg) } getpos := func(cfg *SurvfuncRightConfig, field string) int { if cfg == nil { return -1 } var vn string switch field { case "weight": vn = config.WeightVar case "entry": vn = config.EntryVar default: panic("!!") } if vn == "" { return -1 } loc, ok := pos[vn] if !ok { msg := fmt.Sprintf("'%s' not found\n", vn) panic(msg) } return loc } return &SurvfuncRight{ data: data.Data(), timepos: timepos, statuspos: statuspos, weightpos: getpos(config, "weight"), entrypos: getpos(config, "entry"), }, nil } func (sf *SurvfuncRight) Fit() { sf.init() sf.scanData() sf.eventstats() sf.compress() sf.fit() } // NumRisk returns the number of people at risk at each time point // where the survival function changes. func (sf *SurvfuncRight) NumRisk() []float64 { return sf.nRisk } // SurvProb returns the estimated survival probabilities at the points // where the survival function changes. func (sf *SurvfuncRight) SurvProb() []float64 { return sf.survProb } // SurvProbSE returns the standard errors of the estimated survival // probabilities at the points where the survival function changes. func (sf *SurvfuncRight) SurvProbSE() []float64 { return sf.survProbSE } func (sf *SurvfuncRight) init() { sf.events = make(map[float64]float64) sf.total = make(map[float64]float64) sf.entry = make(map[float64]float64) } func (sf *SurvfuncRight) scanData() { var weight []float64 if sf.weightpos != -1 { weight = sf.data[sf.weightpos] } var status []float64 if sf.statuspos != -1 { status = sf.data[sf.statuspos] } var entry []float64 if sf.entrypos != -1 { entry = sf.data[sf.entrypos] } for i, t := range sf.data[sf.timepos] { w := 1.0 if sf.weightpos != -1 { w = weight[i] } if status == nil || status[i] == 1 { sf.events[t] += w } sf.total[t] += w if sf.entrypos != -1 { if entry[i] >= t { msg := fmt.Sprintf("Entry time %d is before the event/censoring time\n", i) os.Stderr.WriteString(msg) os.Exit(1) } sf.entry[entry[i]] += w } } } func (sf *SurvfuncRight) Time() []float64 { return sf.times } func rollback(x []float64) { var z float64 for i := len(x) - 1; i >= 0; i-- { z += x[i] x[i] = z } } func (sf *SurvfuncRight) eventstats() { // Get the sorted distinct times (event or censoring) sf.times = make([]float64, len(sf.total)) var i int for t := range sf.total { sf.times[i] = t i++ } sort.Float64s(sf.times) // Get the weighted event count and risk set size at each time // point (in same order as Times). sf.nEvents = make([]float64, len(sf.times)) sf.nRisk = make([]float64, len(sf.times)) for i, t := range sf.times { sf.nEvents[i] = sf.events[t] sf.nRisk[i] = sf.total[t] } rollback(sf.nRisk) // Adjust for entry times if sf.entrypos != -1 { entry := make([]float64, len(sf.times)) for t, w := range sf.entry { ii := sort.SearchFloat64s(sf.times, t) if t < sf.times[ii] { ii-- } if ii >= 0 { entry[ii] += w } } rollback(entry) for i := 0; i < len(sf.nRisk); i++ { sf.nRisk[i] -= entry[i] } } } // compress removes times where no events occurred. func (sf *SurvfuncRight) compress() { var ix []int for i := 0; i < len(sf.times); i++ { // Only retain events, except for the last point, // which is retained even if there are no events. if sf.nEvents[i] > 0 || i == len(sf.times)-1 { ix = append(ix, i) } } if len(ix) < len(sf.times) { for i, j := range ix { sf.times[i] = sf.times[j] sf.nEvents[i] = sf.nEvents[j] sf.nRisk[i] = sf.nRisk[j] } sf.times = sf.times[0:len(ix)] sf.nEvents = sf.nEvents[0:len(ix)] sf.nRisk = sf.nRisk[0:len(ix)] } } func (sf *SurvfuncRight) fit() { sf.survProb = make([]float64, len(sf.times)) x := float64(1) for i := range sf.times { x *= 1 - sf.nEvents[i]/sf.nRisk[i] sf.survProb[i] = x } sf.survProbSE = make([]float64, len(sf.times)) x = 0 if sf.weightpos == -1 { for i := range sf.times { d := sf.nEvents[i] n := sf.nRisk[i] x += d / (n * (n - d)) sf.survProbSE[i] = math.Sqrt(x) * sf.survProb[i] } } else { for i := range sf.times { d := sf.nEvents[i] n := sf.nRisk[i] x += d / (n * n) sf.survProbSE[i] = math.Sqrt(x) } } }
duration/survfunc.go
0.640861
0.531574
survfunc.go
starcoder
package synthesizer import ( "errors" ) // LookupOscillator is an oscillator that's more gentle on your CPU // By performing a table lookup to generate the required waveform.. type LookupOscillator struct { Oscillator Table *Gtable SizeOverSr float64 // convenience variable for calculations } // NewLookupOscillator creates a new oscillator which // performs a table-lookup to generate the required waveform func NewLookupOscillator(sr int, t *Gtable, phase float64) (*LookupOscillator, error) { if t == nil || len(t.data) == 0 { return nil, errors.New("Invalid table provided for lookup oscillator") } return &LookupOscillator{ Oscillator: Oscillator{ curfreq: 0.0, curphase: float64(Len(t)) * phase, incr: 0.0, }, Table: t, SizeOverSr: float64(Len(t)) / float64(sr), }, nil } // TruncateTick performs a lookup and truncates the value // index down (if the index for lookup = 10.5, return index 10) func (l *LookupOscillator) TruncateTick(freq float64) float64 { return l.BatchTruncateTick(freq, 1)[0] } // BatchTruncateTick returns a slice of samples from the oscillator of the requested length func (l *LookupOscillator) BatchTruncateTick(freq float64, nframes int) []float64 { out := make([]float64, nframes) for i := 0; i < nframes; i++ { index := l.curphase if l.curfreq != freq { l.curfreq = freq l.incr = l.SizeOverSr * l.curfreq } curphase := l.curphase curphase += l.incr for curphase > float64(Len(l.Table)) { curphase -= float64(Len(l.Table)) } for curphase < 0.0 { curphase += float64(Len(l.Table)) } l.curphase = curphase out[i] = l.Table.data[int(index)] } return out } // InterpolateTick performs a lookup but interpolates the value if the // requested index does not appear in the table. func (l *LookupOscillator) InterpolateTick(freq float64) float64 { return l.BatchInterpolateTick(freq, 1)[0] } // BatchInterpolateTick performs a lookup for N frames, and interpolates the value if the // requested index does not appear in the table. func (l *LookupOscillator) BatchInterpolateTick(freq float64, nframes int) []float64 { out := make([]float64, nframes) for i := 0; i < nframes; i++ { baseIndex := int(l.curphase) nextIndex := baseIndex + 1 if l.curfreq != freq { l.curfreq = freq l.incr = l.SizeOverSr * l.curfreq } curphase := l.curphase frac := curphase - float64(baseIndex) val := l.Table.data[baseIndex] slope := l.Table.data[nextIndex] - val val += frac * slope curphase += l.incr for curphase > float64(Len(l.Table)) { curphase -= float64(Len(l.Table)) } for curphase < 0.0 { curphase += float64(Len(l.Table)) } l.curphase = curphase out[i] = val } return out }
synthesizer/lookuposcil.go
0.784649
0.471771
lookuposcil.go
starcoder
package models import ( i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization" ) // SimulationReportOverview type SimulationReportOverview struct { // Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well. additionalData map[string]interface{} // List of recommended actions for a tenant to improve its security posture based on the attack simulation and training campaign attack type. recommendedActions []RecommendedActionable // Number of valid users in the attack simulation and training campaign. resolvedTargetsCount *int32 // Summary of simulation events in the attack simulation and training campaign. simulationEventsContent SimulationEventsContentable // Summary of assigned trainings in the attack simulation and training campaign. trainingEventsContent TrainingEventsContentable } // NewSimulationReportOverview instantiates a new simulationReportOverview and sets the default values. func NewSimulationReportOverview()(*SimulationReportOverview) { m := &SimulationReportOverview{ } m.SetAdditionalData(make(map[string]interface{})); return m } // CreateSimulationReportOverviewFromDiscriminatorValue creates a new instance of the appropriate class based on discriminator value func CreateSimulationReportOverviewFromDiscriminatorValue(parseNode i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, error) { return NewSimulationReportOverview(), nil } // GetAdditionalData gets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well. func (m *SimulationReportOverview) GetAdditionalData()(map[string]interface{}) { if m == nil { return nil } else { return m.additionalData } } // GetFieldDeserializers the deserialization information for the current model func (m *SimulationReportOverview) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) { res := make(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) res["recommendedActions"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error { val, err := n.GetCollectionOfObjectValues(CreateRecommendedActionFromDiscriminatorValue) if err != nil { return err } if val != nil { res := make([]RecommendedActionable, len(val)) for i, v := range val { res[i] = v.(RecommendedActionable) } m.SetRecommendedActions(res) } return nil } res["resolvedTargetsCount"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error { val, err := n.GetInt32Value() if err != nil { return err } if val != nil { m.SetResolvedTargetsCount(val) } return nil } res["simulationEventsContent"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error { val, err := n.GetObjectValue(CreateSimulationEventsContentFromDiscriminatorValue) if err != nil { return err } if val != nil { m.SetSimulationEventsContent(val.(SimulationEventsContentable)) } return nil } res["trainingEventsContent"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error { val, err := n.GetObjectValue(CreateTrainingEventsContentFromDiscriminatorValue) if err != nil { return err } if val != nil { m.SetTrainingEventsContent(val.(TrainingEventsContentable)) } return nil } return res } // GetRecommendedActions gets the recommendedActions property value. List of recommended actions for a tenant to improve its security posture based on the attack simulation and training campaign attack type. func (m *SimulationReportOverview) GetRecommendedActions()([]RecommendedActionable) { if m == nil { return nil } else { return m.recommendedActions } } // GetResolvedTargetsCount gets the resolvedTargetsCount property value. Number of valid users in the attack simulation and training campaign. func (m *SimulationReportOverview) GetResolvedTargetsCount()(*int32) { if m == nil { return nil } else { return m.resolvedTargetsCount } } // GetSimulationEventsContent gets the simulationEventsContent property value. Summary of simulation events in the attack simulation and training campaign. func (m *SimulationReportOverview) GetSimulationEventsContent()(SimulationEventsContentable) { if m == nil { return nil } else { return m.simulationEventsContent } } // GetTrainingEventsContent gets the trainingEventsContent property value. Summary of assigned trainings in the attack simulation and training campaign. func (m *SimulationReportOverview) GetTrainingEventsContent()(TrainingEventsContentable) { if m == nil { return nil } else { return m.trainingEventsContent } } // Serialize serializes information the current object func (m *SimulationReportOverview) Serialize(writer i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.SerializationWriter)(error) { if m.GetRecommendedActions() != nil { cast := make([]i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, len(m.GetRecommendedActions())) for i, v := range m.GetRecommendedActions() { cast[i] = v.(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable) } err := writer.WriteCollectionOfObjectValues("recommendedActions", cast) if err != nil { return err } } { err := writer.WriteInt32Value("resolvedTargetsCount", m.GetResolvedTargetsCount()) if err != nil { return err } } { err := writer.WriteObjectValue("simulationEventsContent", m.GetSimulationEventsContent()) if err != nil { return err } } { err := writer.WriteObjectValue("trainingEventsContent", m.GetTrainingEventsContent()) if err != nil { return err } } { err := writer.WriteAdditionalData(m.GetAdditionalData()) if err != nil { return err } } return nil } // SetAdditionalData sets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well. func (m *SimulationReportOverview) SetAdditionalData(value map[string]interface{})() { if m != nil { m.additionalData = value } } // SetRecommendedActions sets the recommendedActions property value. List of recommended actions for a tenant to improve its security posture based on the attack simulation and training campaign attack type. func (m *SimulationReportOverview) SetRecommendedActions(value []RecommendedActionable)() { if m != nil { m.recommendedActions = value } } // SetResolvedTargetsCount sets the resolvedTargetsCount property value. Number of valid users in the attack simulation and training campaign. func (m *SimulationReportOverview) SetResolvedTargetsCount(value *int32)() { if m != nil { m.resolvedTargetsCount = value } } // SetSimulationEventsContent sets the simulationEventsContent property value. Summary of simulation events in the attack simulation and training campaign. func (m *SimulationReportOverview) SetSimulationEventsContent(value SimulationEventsContentable)() { if m != nil { m.simulationEventsContent = value } } // SetTrainingEventsContent sets the trainingEventsContent property value. Summary of assigned trainings in the attack simulation and training campaign. func (m *SimulationReportOverview) SetTrainingEventsContent(value TrainingEventsContentable)() { if m != nil { m.trainingEventsContent = value } }
models/simulation_report_overview.go
0.674479
0.55923
simulation_report_overview.go
starcoder
package day20 import ( "strings" "github.com/chigley/advent2020" ) type Picture struct { // We keep state on what the tiles look like _and_ their IDs. Tile IDs alone // aren't sufficient because they can't tell us which translations were // performed. tiles [][]Tile tileIDs [][]int // Width/height measured in number of tiles size int // Width/height of each individual tile tileSize int } func NewPicture(size, tileSize int) Picture { tiles := make([][]Tile, size) for i := 0; i < size; i++ { tiles[i] = make([]Tile, size) } tileIDs := make([][]int, size) for i := 0; i < size; i++ { tileIDs[i] = make([]int, size) } return Picture{ tiles: tiles, tileIDs: tileIDs, size: size, tileSize: tileSize, } } // We fill in the picture from left to right, top to bottom. Moving in this // direction, NextEmptySquare returns the position of the next square to be // filled, or nil if it is already full. func (p Picture) NextEmptySquare() *advent2020.XY { for y := 0; y < p.size; y++ { for x := 0; x < p.size; x++ { if p.tiles[y][x] == nil { return &advent2020.XY{X: x, Y: y} } } } return nil } func (p Picture) Fits(t Tile, pos advent2020.XY) bool { leftNeighbour := p.TileAt(pos.Add(advent2020.XY{X: -1, Y: 0})) topNeighbour := p.TileAt(pos.Add(advent2020.XY{X: 0, Y: -1})) return (leftNeighbour == nil || leftNeighbour.RightSideMatchesLeftOf(t)) && (topNeighbour == nil || topNeighbour.BottomSideMatchesTopOf(t)) } func (p Picture) Clone() Picture { tiles := make([][]Tile, p.size) for y := 0; y < p.size; y++ { tiles[y] = make([]Tile, p.size) copy(tiles[y], p.tiles[y]) } tileIDs := make([][]int, p.size) for y := 0; y < p.size; y++ { tileIDs[y] = make([]int, p.size) copy(tileIDs[y], p.tileIDs[y]) } return Picture{ tiles: tiles, tileIDs: tileIDs, size: p.size, tileSize: p.tileSize, } } func (p Picture) Place(t Tile, tileID int, pos advent2020.XY) Picture { ret := p.Clone() ret.tiles[pos.Y][pos.X] = t ret.tileIDs[pos.Y][pos.X] = tileID return ret } func (p Picture) InBounds(pos advent2020.XY) bool { return 0 <= pos.X && pos.X < p.size && 0 <= pos.Y && pos.Y < p.size } func (p Picture) TileAt(pos advent2020.XY) Tile { if !p.InBounds(pos) { return nil } return p.tiles[pos.Y][pos.X] } func (p Picture) String() string { return p.ImageString(true) } func (p Picture) ImageString(includeBorders bool) string { var ( minY, maxY int size int ) if includeBorders { minY, maxY = 0, p.tileSize size = p.tileSize } else { minY, maxY = 1, p.tileSize-1 size = p.tileSize - 2 } var b strings.Builder for tileY := 0; tileY < p.size; tileY++ { for y := minY; y < maxY; y++ { for tileX := 0; tileX < p.size; tileX++ { t := p.tiles[tileY][tileX] if t == nil { b.WriteString(strings.Repeat("?", size)) } else { if includeBorders { b.WriteString(string(t[y])) } else { row := string(t[y]) b.WriteString(row[1 : len(row)-1]) } } if includeBorders && tileX != p.size-1 { b.WriteRune(' ') } } if !(tileY == p.size-1 && y == maxY-1) { b.WriteRune('\n') } } if includeBorders && tileY != p.size-1 { b.WriteRune('\n') } } return b.String() }
day20/picture.go
0.620966
0.479321
picture.go
starcoder
package strftime import ( "bytes" "fmt" "io" "regexp" "time" ) const ( WEEK = time.Hour * 24 * 7 ) type FormatFunc func(t time.Time) string func weekNumberFormatter(t time.Time) string { start := time.Date(t.Year(), time.January, 1, 23, 0, 0, 0, time.UTC) week := 0 for start.Before(t) { week += 1 start = start.Add(WEEK) } return fmt.Sprintf("%02d", week) } // See http://docs.python.org/2/library/time.html#time.strftime var formats = map[string]FormatFunc{ "%a": func(t time.Time) string { // Locale’s abbreviated weekday name return t.Format("Mon") }, "%A": func(t time.Time) string { // Locale’s full weekday name return t.Format("Monday") }, "%b": func(t time.Time) string { // Locale’s abbreviated month name return t.Format("Jan") }, "%B": func(t time.Time) string { // Locale’s full month name return t.Format("January") }, "%c": func(t time.Time) string { // Locale’s appropriate date and time representation return t.Format(time.RFC1123) }, "%d": func(t time.Time) string { // Day of the month as a decimal number [01,31] return t.Format("02") }, "%H": func(t time.Time) string { // Hour (24-hour clock) as a decimal number [00,23] return t.Format("15") }, "%I": func(t time.Time) string { // Hour (12-hour clock) as a decimal number [01,12] return t.Format("3") }, "%j": func(t time.Time) string { start := time.Date(t.Year(), time.January, 1, 0, 0, 0, 0, time.UTC) day := int(t.Sub(start).Hours()/24) + 1 return fmt.Sprintf("%03d", day) }, "%m": func(t time.Time) string { // Month as a decimal number [01,12] return t.Format("01") }, "%M": func(t time.Time) string { // Minute as a decimal number [00,59] return t.Format("04") }, "%p": func(t time.Time) string { // Locale’s equivalent of either AM or PM return t.Format("PM") }, "%S": func(t time.Time) string { // Second as a decimal number [00,61] return t.Format("05") }, "%U": weekNumberFormatter, // Week number of the year "%W": weekNumberFormatter, // Week number of the year "%w": func(t time.Time) string { // Weekday as a decimal number return fmt.Sprintf("%d", t.Weekday()) }, "%x": func(t time.Time) string { // Locale’s appropriate date representation return t.Format("01/02/06") }, "%X": func(t time.Time) string { // Locale’s appropriate time representation return t.Format("15:04:05") }, "%y": func(t time.Time) string { // Year without century as a decimal number [00,99] return t.Format("06") }, "%Y": func(t time.Time) string { // Year with century as a decimal number return t.Format("2006") }, "%Z": func(t time.Time) string { // Time zone name (no characters if no time zone exists) return t.Format("MST") }, } var ( // fmtRe = regexp.MustCompile("%([%aAbBcdHIjmMpSUwWxXyYZ]|[1-9]n)") fmtRe = initFormatRegexp() fmtBackquoteRe = initFormatBackquoteRegexp() ) func initFormatRegexp() *regexp.Regexp { var buf bytes.Buffer buf.WriteString("%([%") for format, _ := range formats { buf.WriteString(regexp.QuoteMeta(format[1:])) } buf.WriteString("]|[1-9]n)") re := buf.String() return regexp.MustCompile(re) } func initFormatBackquoteRegexp() *regexp.Regexp { var buf bytes.Buffer buf.WriteString("%([^") for format, _ := range formats { buf.WriteString(regexp.QuoteMeta(format[1:])) } buf.WriteString("1-9]|[1-9][^n])") re := buf.String() return regexp.MustCompile(re) } // A load from pkg/time/format.go of golang source code. // formatNano appends a fractional second, as nanoseconds, to b // and returns the result. func formatNano(nanosec uint, n int, trim bool) []byte { u := nanosec var buf [9]byte for start := len(buf); start > 0; { start-- buf[start] = byte(u%10 + '0') u /= 10 } if n > 9 { n = 9 } if trim { for n > 0 && buf[n-1] == '0' { n-- } if n == 0 { return buf[:0] } } return buf[:n] } func formatNanoForMatch(match string, t time.Time) string { // format nanosecond for a match format %[1-9]n size := int(match[1] - '0') return string(formatNano(uint(t.Nanosecond()), size, false)) } // repl replaces % directives with right time func repl(match string, t time.Time) string { if match == "%%" { return "%" } formatFunc, ok := formats[match] if ok { return formatFunc(t) } return formatNanoForMatch(match, t) } // Format return string with % directives expanded. // Will return error on unknown directive. func Format(format string, t time.Time) string { fn := func(match string) string { return repl(match, t) } return fmtRe.ReplaceAllStringFunc(format, fn) } func FormatTo(w io.Writer, format string, t time.Time) (n int, err error) { result := Format(format, t) return w.Write([]byte(result)) } type Formatter struct { format string strFormat string formatFunc func(t time.Time) []interface{} } func NewFormatter(format string) *Formatter { f := func(match string) string { if match == "%%" { return match } return "%" + match } strFormat := fmtBackquoteRe.ReplaceAllStringFunc(format, f) size := 0 f1 := func(match string) string { if match == "%%" { return match } size++ return "%s" } strFormat = fmtRe.ReplaceAllStringFunc(strFormat, f1) funs := make([]FormatFunc, 0, size) f2 := func(match string) string { if match == "%%" { return match } f, ok := formats[match] if ok { funs = append(funs, f) } else { f := func(t time.Time) string { return formatNanoForMatch(match, t) } funs = append(funs, f) } return match } fmtRe.ReplaceAllStringFunc(format, f2) formatFunc := func(t time.Time) []interface{} { result := make([]interface{}, 0, len(funs)) for _, f := range funs { result = append(result, f(t)) } return result } return &Formatter{ format: format, strFormat: strFormat, formatFunc: formatFunc, } } func (self *Formatter) Format(t time.Time) string { return fmt.Sprintf(self.strFormat, self.formatFunc(t)...) } func (self *Formatter) FormatTo(w io.Writer, t time.Time) (n int, err error) { return fmt.Fprintf(w, self.strFormat, self.formatFunc(t)...) }
vendor/github.com/hhkbp2/go-strftime/strftime.go
0.610105
0.478224
strftime.go
starcoder
package gotest import "reflect" type Assert interface { ShouldBeEqualTo(expected interface{}) ShouldNotBeEqualTo(expected interface{}) ShouldBeTrue() ShouldBeFalse() ShouldBeNil() ShouldNotBeNil() } type assert struct { objectName string actual interface{} s *scenario } // ShouldBeEqualTo checks if the func (a *assert) ShouldBeEqualTo(expected interface{}) { a.s.Logf("Then I expect %s should be equal to %v\n", a.objectName, expected) if !reflect.DeepEqual(expected, a.actual) { logWithCaller(2, "Assertion failed: expected value %v and actual value %v are not equal\n", expected, a.actual) a.s.t.Fail() if !a.s.continueOnAssertionFailed { panic("assertion failed") } return } } func (a *assert) ShouldNotBeEqualTo(expected interface{}) { a.s.Logf("Then I expect %s should not be equal to %v\n", a.objectName, expected) if reflect.DeepEqual(expected, a.actual) { logWithCaller(2, "Assertion failed: expected value %v and actual value %v are equal\n", expected, a.actual) a.s.t.Fail() if !a.s.continueOnAssertionFailed { panic("assertion failed") } return } } func (a *assert) ShouldBeTrue() { a.s.Logf("Then I expect %s should be True\n", a.objectName) b, ok := a.actual.(bool) if !ok { logWithCaller(2, "Assertion failed: the actual value passed is not boolean, but %T\n", a.actual) } if !b { logWithCaller(2, "Assertion failed: expected 'true' but it is 'false'\n") a.s.t.Fail() if !a.s.continueOnAssertionFailed { panic("assertion failed") } return } } func (a *assert) ShouldBeFalse() { a.s.Logf("Then I expect %s should be False\n", a.objectName) b, ok := a.actual.(bool) if !ok { logWithCaller(2, "Assertion failed: the actual value passed is not boolean, but %T\n", a.actual) } if b { logWithCaller(2, "Assertion failed: expected 'false' but it is 'true'\n") a.s.t.Fail() if !a.s.continueOnAssertionFailed { panic("assertion failed") } return } } func (a *assert) ShouldBeNil() { a.s.Logf("Then I expect %s should be Nil\n", a.objectName) if !a.isNil(a.actual) { logWithCaller(2, "Assertion failed: expected 'nil' value but it is not 'nil'\n") a.s.t.Fail() if !a.s.continueOnAssertionFailed { panic("assertion failed") } return } } func (a *assert) ShouldNotBeNil() { a.s.Logf("Then I expect %s should not be Nil\n", a.objectName) if a.isNil(a.actual) { logWithCaller(2, "Assertion failed: expected 'not nil' value but it is a 'nil'\n") a.s.t.Fail() if !a.s.continueOnAssertionFailed { panic("assertion failed") } return } } func (a *assert) isNil(v interface{}) bool { if v == nil { return true } switch reflect.TypeOf(v).Kind() { case reflect.Map, reflect.Chan, reflect.Slice, reflect.Ptr, reflect.Array, reflect.Interface, reflect.Func: return reflect.ValueOf(v).IsNil() default: logWithCaller(3, "passed object can not be tested for nil'ness due to its type: %T\n", v) if !a.s.continueOnAssertionFailed { panic("assertion failed") } } return false }
assert.go
0.682679
0.648355
assert.go
starcoder
package matcher import ( "github.com/iNamik/go_container/stack" "github.com/iNamik/go_lexer" ) type Matcher interface { // MatchZeroOrOneBytes consumes the next rune if it matches, always returning true MatchZeroOrOneBytes([]byte) MatcherOperator // MatchZeroOrOneRunes consumes the next rune if it matches, always returning true MatchZeroOrOneRunes([]rune) MatcherOperator // MatchZeroOrOneRune consumes the next rune if it matches, always returning true MatchZeroOrOneRune(rune) MatcherOperator // MatchZeroOrOneFunc consumes the next rune if it matches, always returning true MatchZeroOrOneFunc(lexer.MatchFn) MatcherOperator // MatchZeroOrMoreBytes consumes a run of matching runes, always returning true MatchZeroOrMoreBytes([]byte) MatcherOperator // MatchZeroOrMoreRunes consumes a run of matching runes, always returning true MatchZeroOrMoreRunes([]rune) MatcherOperator // MatchZeroOrMoreFunc consumes a run of matching runes, always returning true MatchZeroOrMoreFunc(lexer.MatchFn) MatcherOperator // MatchOneBytes consumes the next rune if its in the list of bytes MatchOneBytes([]byte) MatcherOperator // MatchOneRunes consumes the next rune if its in the list of bytes MatchOneRunes([]rune) MatcherOperator // MatchOneRune consumes the next rune if it matches MatchOneRune(rune) MatcherOperator // MatchOneFunc consumes the next rune if it matches MatchOneFunc(lexer.MatchFn) MatcherOperator // MatchOneOrMoreBytes consumes a run of matching runes MatchOneOrMoreBytes([]byte) MatcherOperator // MatchOneOrMoreRunes consumes a run of matching runes MatchOneOrMoreRunes([]rune) MatcherOperator // MatchOneOrMoreFunc consumes a run of matching runes MatchOneOrMoreFunc(lexer.MatchFn) MatcherOperator // MatchMinMaxBytes consumes a specified run of matching runes MatchMinMaxBytes([]byte, int, int) MatcherOperator // MatchMinMaxRunes consumes a specified run of matching runes MatchMinMaxRunes([]rune, int, int) MatcherOperator // MatchMinMaxFunc consumes a specified run of matching runes MatchMinMaxFunc(lexer.MatchFn, int, int) MatcherOperator // NonMatchZeroOrOneBytes consumes the next rune if it does not match, always returning true NonMatchZeroOrOneBytes([]byte) MatcherOperator // NonMatchZeroOrOneRuness consumes the next rune if it does not match, always returning true NonMatchZeroOrOneRunes([]rune) MatcherOperator // NonMatchZeroOrOneFunc consumes the next rune if it does not match, always returning true NonMatchZeroOrOneFunc(lexer.MatchFn) MatcherOperator // NonMatchZeroOrMoreBytes consumes a run of non-matching runes, always returning true NonMatchZeroOrMoreBytes([]byte) MatcherOperator // NonMatchZeroOrMoreRunes consumes a run of non-matching runes, always returning true NonMatchZeroOrMoreRunes([]rune) MatcherOperator // NonMatchZeroOrMoreFunc consumes a run of non-matching runes, always returning true NonMatchZeroOrMoreFunc(lexer.MatchFn) MatcherOperator // NonMatchOneBytes consumes the next rune if its NOT in the list of bytes NonMatchOneBytes([]byte) MatcherOperator // NonMatchOneRuness consumes the next rune if its NOT in the list of bytes NonMatchOneRunes([]rune) MatcherOperator // NonMatchOneFunc consumes the next rune if it does NOT match NonMatchOneFunc(lexer.MatchFn) MatcherOperator // NonMatchOneOrMoreBytes consumes a run of non-matching runes NonMatchOneOrMoreBytes([]byte) MatcherOperator // NonMatchOneOrMoreRunes consumes a run of non-matching runes NonMatchOneOrMoreRunes([]rune) MatcherOperator // NonMatchOneOrMoreFunc consumes a run of non-matching runes NonMatchOneOrMoreFunc(lexer.MatchFn) MatcherOperator // MatchEOF tries to match the next rune against RuneEOF MatchEOF() MatcherOperator // BeginOne begins a new grouping that is expected to match (i.e required) Begin() Matcher // End ends a grouping. NOTE You are expected to call one of the MatcherEnd // functions in order to apply the result of the grouping to your current result. End() MatcherEnd // EndMatchOne performs End(), followed by MatchOne() EndMatchOne() MatcherOperator // EndMatchZeroOrOne performs End(), followed by MatchZeroOrOne EndMatchZeroOrOne() MatcherOperator // Result returns the final result of the matcher, resetting the // matcher state if the result is false. Result() bool // Reset resets the state of the matcher Reset() Matcher } type MatcherEnd interface { // MatchOne MatchOne() MatcherOperator // MatchZeroOrOne MatchZeroOrOne() MatcherOperator } type MatcherOperator interface { // And Performs a logical 'and' between the current matcher state and the // next operand. Short-circuit logic is performed, whereby the next operand // will not actually be executed if the current matcher state is already // false And() Matcher // Or Performs a logical 'or' between the current matcher result and the // next operand. Short-circuit logic is performed, whereby the next operand // will not actually be executed if the current matcher state is already // true Or() Matcher // AndBegin performs an And(), followed by a Begin() AndBegin() Matcher // OrBegin performs an Or(), followed by a Begin() OrBegin() Matcher // End ends a grouping. NOTE You are expected to call one of the MatcherEnd // functions in order to apply the result of the grouping to your current result. End() MatcherEnd // EndMatchOne performs End(), followed by MatchOne() EndMatchOne() MatcherOperator // EndMatchZeroOrOne performs End(), followed by MatchZeroOrOne EndMatchZeroOrOne() MatcherOperator // Result returns the final result of the matcher, resetting the // matcher state if the result is false. Result() bool } // New createas a new Matcher against the specifid Lexer func New(l lexer.Lexer) Matcher { m := &matcher{ lexer: l, stack: stack.New(4), // 4 is just a nice number that seems appropriate state: &matcherState{}, } m.Reset() return m }
matcher.go
0.613352
0.494812
matcher.go
starcoder
package data type Data struct { data map[string][]string } func New() *Data { return &Data{ data: make(map[string][]string), } } // Keys returns all data names // Don't rely on it's order! func (d *Data) Keys() []string { keys := []string{} for k := range d.data { keys = append(keys, k) } return keys } func (d *Data) Exists(name string) bool { if d.data == nil { return false } _, ok := d.data[name] return ok } func (d *Data) GetAll(name string) []string { if d.Exists(name) { return d.data[name] } return []string{} } func (d *Data) Get(name string) string { if d.Exists(name) { if len(d.data[name]) == 1 { return d.data[name][0] } } return "" } func (d *Data) IsTrue(name string) bool { return d.Get(name) == "true" } func (d *Data) IsFalse(name string) bool { return d.Get(name) == "false" } // PickAll gets element and deletes it afterwards func (d *Data) PickAll(name string) []string { defer d.Delete(name) return d.GetAll(name) } // Pick gets element and deletes it afterwards func (d *Data) Pick(name string) string { defer d.Delete(name) return d.Get(name) } // Set sets (overwrites) values for name func (d *Data) Set(name string, value ...string) *Data { d.data[name] = value return d } // Add adds values for name func (d *Data) Add(name string, value ...string) *Data { for _, v := range value { d.data[name] = append(d.data[name], v) } return d } func (d *Data) SetTrue(name string) *Data { d.Set(name, "true") return d } func (d *Data) SetFalse(name string) *Data { d.Set(name, "false") return d } func (d *Data) Delete(name string) { if d.Exists(name) { delete(d.data, name) } } func (d *Data) Raw() map[string][]string { return d.data } func (d *Data) RawEnhanced() map[string]interface{} { n := make(map[string]interface{}) for k, v := range d.data { if len(v) == 1 { switch v[0] { case "true": n[k] = true case "false": n[k] = false default: n[k] = v[0] } } else { n[k] = v } } return n } // Merge merges p2 into p.data. // Later values overwrite previous ones. func (d *Data) Merge(p2 ...*Data) { for _, pp := range p2 { if pp != nil && pp.data != nil { for k, v := range pp.data { d.Set(k, v...) } } } } // Filter filters f from d.data func (d *Data) Filter(f interface{}) { for k := range d.data { keep := false switch f.(type) { case string: if k == f.(string) { keep = true } case []string: for _, k2 := range f.([]string) { if k2 == k { keep = true break } } case *Data: for _, k2 := range f.(*Data).Keys() { if k2 == k { keep = true break } } default: panic("unsupported type") } if !keep { d.Delete(k) } } } func Filter(d *Data, f interface{}) *Data { dn := New() dn.Merge(d) dn.Filter(f) return dn } // Merge merges data objects. // Later values overwrite previous ones. func Merge(data ...*Data) *Data { newData := New() for _, p := range data { if p != nil && p.data != nil { for k, v := range p.data { newData.Set(k, v...) } } } return newData } func ToData(p map[string][]string) *Data { np := New() for k, v := range p { np.Set(k, v...) } return np }
Godeps/_workspace/src/github.com/mattes/go-collect/data/data.go
0.659076
0.455441
data.go
starcoder
package vial import ( "sync" "strconv" "strings" "github.com/google/uuid" ) var once sync.Once var pathParamMatcherMutex = new(sync.RWMutex) var pathParamMatchers = map[string]*PathParamMatcher{} // PathParamCoercer is a function which takes a raw path param string value and // converts it to the expected value. type PathParamCoercer func(string) (interface{}, error) // PathParamsMatcher is a matcher for a path parameter specified in the // <type:var> part of a route. type PathParamMatcher struct { // Identifiers is a list of all the aliases that this path parameter, // matches in a route definition such as <int:id> where int is an // identifier. Identifiers []string // RegexString is a string containing a regex that matches this type of // PathParam. RegexString string // Coercer as specified above takes a string value and returns it's // coerced value. Coercer PathParamCoercer } func (self PathParamMatcher) prefix() string { return strings.ToLower(self.Identifiers[0]) } // AddPathParamMatcher adds a new path param matcher to the global registry. func AddPathParamMatcher(newMatcher *PathParamMatcher) { pathParamMatcherMutex.Lock() defer pathParamMatcherMutex.Unlock() for _, identifier := range newMatcher.Identifiers { pathParamMatchers[strings.ToLower(identifier)] = newMatcher } } // GetPathParamMatcher retrieves a PathParamMatcher for a given identifier. func GetPathParamMatcher(identifier string) (*PathParamMatcher, bool) { pathParamMatcherMutex.RLock() defer pathParamMatcherMutex.RUnlock() in, ok := pathParamMatchers[identifier] return in, ok } // StringPathParamMatcher is the most basic PathParamMatcher that simply // matches any non-forward-slash character. It is also the default behaviour. var StringPathParamMatcher = &PathParamMatcher{ Identifiers: []string{"string", ""}, RegexString: `[^\/\\]+`, Coercer: func(stringVal string) (interface{}, error) { return stringVal, nil }, } // IntPathParamMatcher matches only whole integers. var IntPathParamMatcher = &PathParamMatcher{ Identifiers: []string{"int", "integer"}, RegexString: `[0-9]+`, Coercer: func(stringVal string) (interface{}, error) { return strconv.Atoi(stringVal) }, } // FloatPathParamMatcher matches only whole floats numbers. Floats are defined // as having at least a decimal value. var FloatPathParamMatcher = &PathParamMatcher{ Identifiers: []string{"float"}, RegexString: `[0-9]*\.[0-9]+`, Coercer: func(stringVal string) (interface{}, error) { return strconv.ParseFloat(stringVal, 64) }, } // UUIDPathParamMatcher matches the generic UUID format and uses google's UUID // library to convert the uuid string to a uuid.UUID. var UUIDPathParamMatcher = &PathParamMatcher{ Identifiers: []string{"uuid"}, RegexString: `[0-9a-fA-F]{8}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?` + `[0-9a-fA-F]{4}-?[0-9a-fA-F]{12}`, Coercer: func(stringVal string) (interface{}, error) { return uuid.Parse(stringVal) }, } func init() { once.Do(func() { AddPathParamMatcher(StringPathParamMatcher) AddPathParamMatcher(IntPathParamMatcher) AddPathParamMatcher(FloatPathParamMatcher) AddPathParamMatcher(UUIDPathParamMatcher) }) }
path_param_matcher.go
0.708918
0.44083
path_param_matcher.go
starcoder
package useful import ( "io/ioutil" "strconv" "strings" ) // Min : returns minimum of integers a and b func Min(a, b int) int { if a < b { return a } return b } // Max : returns maximum of integers a and b func Max(a, b int) int { if a > b { return a } return b } // Abs : returns absolute value of integer a func Abs(a int) int { if a >= 0 { return a } return -a } // Reverse : takes pointer to array of integers, reverse part between start and end func Reverse(input *[]int, start, end int) { for i, j := start, end-1; i < j; i, j = i+1, j-1 { (*input)[i], (*input)[j] = (*input)[j], (*input)[i] } } // CircularShift : takes pointer to array of integers, performs circular shift for 'shift' positions to the left func CircularShift(input *[]int, shift int) { shift = shift % len(*input) *input = append((*input)[shift:], (*input)[0:shift]...) } // CircularShiftRunes : takes pointer to array of runes, performs circular shift for 'shift' positions to the left func CircularShiftRunes(input *[]rune, shift int) { shift = shift % len(*input) *input = append((*input)[shift:], (*input)[0:shift]...) } // FileToString : reads file into a string func FileToString(fileName string) (inputStr string) { input, _ := ioutil.ReadFile(fileName) inputStr = strings.TrimSuffix(string(input), "\n") return } // StringToLines : Split string on \n, returns a list of strings func StringToLines(input string) (inputSplit []string) { inputSplit = strings.Split(input, "\n") return } // SplitOnWhitespace : Converts string to list of words func SplitOnWhitespace(input string) (inputSplit []string) { inputSplit = strings.Fields(input) return } // StringTo2DArray : creates a list of lists of strings from a string (splits on whitespaces) func StringTo2DArray(input string) (array2D [][]string) { lines := StringToLines(input) array2D = make([][]string, len(lines)) for i, l := range lines { array2D[i] = SplitOnWhitespace(l) } return } // StringsToIntsArr1D : accepts list of strings and returns list of integers func StringsToIntsArr1D(input []string) (output []int) { output = make([]int, len(input)) for i, v := range input { output[i], _ = strconv.Atoi(v) } return } // StringsToIntsArr2D : accepts 2D array of strings and returns 2D array of integers func StringsToIntsArr2D(input [][]string) (output [][]int) { output = make([][]int, len(input)) for i, l := range input { output[i] = make([]int, len(l)) for j, v := range l { output[i][j], _ = strconv.Atoi(v) } } return } // SliceIndex : returns index of first element in a slice with property 'predicate' in range [start : end] func SliceIndex(start, end int, predicate func(i int) bool) int { for i := start; i < end; i++ { if predicate(i) { return i } } return -1 }
useful/useful.go
0.765067
0.407392
useful.go
starcoder
package aoc2021 import ( "fmt" "strings" "github.com/simonski/aoc/utils" ) /* --- Day 6: Lanternfish --- The sea floor is getting steeper. Maybe the sleigh keys got carried this way? A massive school of glowing lanternfish swims past. They must spawn quickly to reach such large numbers - maybe exponentially quickly? You should model their growth rate to be sure. Although you know nothing about this specific species of lanternfish, you make some guesses about their attributes. Surely, each lanternfish creates a new lanternfish once every 7 days. However, this process isn't necessarily synchronized between every lanternfish - one lanternfish might have 2 days left until it creates another lanternfish, while another might have 4. So, you can model each fish as a single number that represents the number of days until it creates a new lanternfish. Furthermore, you reason, a new lanternfish would surely need slightly longer before it's capable of producing more lanternfish: two more days for its first cycle. So, suppose you have a lanternfish with an internal timer value of 3: After one day, its internal timer would become 2. After another day, its internal timer would become 1. After another day, its internal timer would become 0. After another day, its internal timer would reset to 6, and it would create a new lanternfish with an internal timer of 8. After another day, the first lanternfish would have an internal timer of 5, and the second lanternfish would have an internal timer of 7. A lanternfish that creates a new fish resets its timer to 6, not 7 (because 0 is included as a valid timer value). The new lanternfish starts with an internal timer of 8 and does not start counting down until the next day. Realizing what you're trying to do, the submarine automatically produces a list of the ages of several hundred nearby lanternfish (your puzzle input). For example, suppose you were given the following list: 3,4,3,1,2 This list means that the first fish has an internal timer of 3, the second fish has an internal timer of 4, and so on until the fifth fish, which has an internal timer of 2. Simulating these fish over several days would proceed as follows: Initial state: 3,4,3,1,2 After 1 day: 2,3,2,0,1 After 2 days: 1,2,1,6,0,8 After 3 days: 0,1,0,5,6,7,8 After 4 days: 6,0,6,4,5,6,7,8,8 After 5 days: 5,6,5,3,4,5,6,7,7,8 After 6 days: 4,5,4,2,3,4,5,6,6,7 After 7 days: 3,4,3,1,2,3,4,5,5,6 After 8 days: 2,3,2,0,1,2,3,4,4,5 After 9 days: 1,2,1,6,0,1,2,3,3,4,8 After 10 days: 0,1,0,5,6,0,1,2,2,3,7,8 After 11 days: 6,0,6,4,5,6,0,1,1,2,6,7,8,8,8 After 12 days: 5,6,5,3,4,5,6,0,0,1,5,6,7,7,7,8,8 After 13 days: 4,5,4,2,3,4,5,6,6,0,4,5,6,6,6,7,7,8,8 After 14 days: 3,4,3,1,2,3,4,5,5,6,3,4,5,5,5,6,6,7,7,8 After 15 days: 2,3,2,0,1,2,3,4,4,5,2,3,4,4,4,5,5,6,6,7 After 16 days: 1,2,1,6,0,1,2,3,3,4,1,2,3,3,3,4,4,5,5,6,8 After 17 days: 0,1,0,5,6,0,1,2,2,3,0,1,2,2,2,3,3,4,4,5,7,8 After 18 days: 6,0,6,4,5,6,0,1,1,2,6,0,1,1,1,2,2,3,3,4,6,7,8,8,8,8 Each day, a 0 becomes a 6 and adds a new 8 to the end of the list, while each other number decreases by 1 if it was present at the start of the day. In this example, after 18 days, there are a total of 26 fish. After 80 days, there would be a total of 5934. Find a way to simulate lanternfish. How many lanternfish would there be after 80 days? */ // rename this to the year and day in question func (app *Application) Y2021D06P1() { bruteForceAttempt(80, DAY_2021_06_TEST_DATA) // algoAttempt(80, DAY_2021_06_TEST_DATA) // attempt1(256, DAY_2021_06_TEST_DATA) } func bruteForceAttempt(days int, dataStr string) int { s := strings.ReplaceAll(dataStr, " ", "") data := utils.SplitDataToListOfInts(s, ",") fish := data fmt.Printf("hi, fish=%v\n", fish) for day := 0; day < days; day++ { new_fish := 0 for index := 0; index < len(fish); index++ { f := fish[index] f -= 1 fish[index] = f if f < 0 { new_fish += 1 fish[index] = 6 } } for index := 0; index < new_fish; index++ { fish = append(fish, 8) } fmt.Printf("Day[%v/%v] = %v\n", day, days, len(fish)) } return len(fish) } func algo(days int, data []int) { total := 0 depth := 0 cache := make(map[int]int) for _, value := range data { count := count_children(cache, depth, days-(value+1)) // 1 is this fish itself total += count } total += len(data) fmt.Printf("algo total=%v\n", total) } // this day days is the creation day func count_children(cache map[int]int, depth int, days int) int { if cache[days] != 0 { return cache[days] } if days < 0 { return 0 } if days < 7 { // we KNOW we can't create a child, so we can safely return only this fish return 1 } // so today is a count of 1 because it has created 1 fish // now we look at where we are and work out how many can we create from here // direct_spawns := (days / 7) // the +1 is "this" fish // c := direct_spawns total := 0 for test_day := days; test_day >= 0; test_day -= 7 { // we spawn on this day - can we use this day + 9 as a spawning day? total += 1 if test_day >= 0 { total += count_children(cache, depth+1, test_day-9) } } // fmt.Printf("[depth=%v, days=%v, returning %v]\n", depth, days, total) cache[days] = total return total } // rename this to the year and day in question func (app *Application) Y2021D06P2() { s := strings.ReplaceAll(DAY_2021_06_DATA, " ", "") data := utils.SplitDataToListOfInts(s, ",") algo(256, data) } // rename and uncomment this to the year and day in question once complete for a gold star! // func (app *Application) Y20XXDXXP1Render() { // } // rename and uncomment this to the year and day in question once complete for a gold star! // func (app *Application) Y20XXDXXP2Render() { // } // this is what we will reflect and call - so both parts with run. It's up to you to make it print nicely etc. // The app reference has a CLI for logging. func (app *Application) Y2021D06() { app.Y2021D06P1() app.Y2021D06P2() }
app/aoc2021/aoc2021_06.go
0.61878
0.636918
aoc2021_06.go
starcoder
package types import ( "github.com/antihax/optional" "time" ) type IngestBudget struct { // Creation timestamp in UTC in [RFC3339](https://tools.ietf.org/html/rfc3339) format. CreatedAt time.Time `json:"createdAt"` CreatedByUser *UserInfo `json:"createdByUser"` // Last modification timestamp in UTC in [RFC3339](https://tools.ietf.org/html/rfc3339) format. ModifiedAt time.Time `json:"modifiedAt"` ModifiedByUser *UserInfo `json:"modifiedByUser"` // Display name of the ingest budget. Name string `json:"name"` // Custom field value that is used to assign Collectors to the ingest budget. FieldValue string `json:"fieldValue"` // Capacity of the ingest budget, in bytes. It takes a few minutes for Collectors to stop collecting when capacity is reached. We recommend setting a soft limit that is lower than your needed hard limit. CapacityBytes int64 `json:"capacityBytes"` // Time zone of the reset time for the ingest budget. Follow the format in the [IANA Time Zone Database](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones#List). Timezone string `json:"timezone"` // Reset time of the ingest budget in HH:MM format. ResetTime string `json:"resetTime"` // Description of the ingest budget. Description string `json:"description,omitempty"` // Action to take when ingest budget's capacity is reached. All actions are audited. Supported values are: * `stopCollecting` * `keepCollecting` Action string `json:"action"` // The threshold as a percentage of when an ingest budget's capacity usage is logged in the Audit Index. AuditThreshold int32 `json:"auditThreshold,omitempty"` // Unique identifier for the ingest budget. Id string `json:"id"` // Current usage since the last reset, in bytes. UsageBytes int64 `json:"usageBytes,omitempty"` // Status of the current usage. Can be `Normal`, `Approaching`, `Exceeded`, or `Unknown` (unable to retrieve usage). UsageStatus string `json:"usageStatus,omitempty"` // Number of collectors assigned to the ingest budget. NumberOfCollectors int64 `json:"numberOfCollectors,omitempty"` } type IngestBudgetDefinition struct { // Display name of the ingest budget. Name string `json:"name"` // Custom field value that is used to assign Collectors to the ingest budget. FieldValue string `json:"fieldValue"` // Capacity of the ingest budget, in bytes. It takes a few minutes for Collectors to stop collecting when capacity is reached. We recommend setting a soft limit that is lower than your needed hard limit. CapacityBytes int64 `json:"capacityBytes"` // Time zone of the reset time for the ingest budget. Follow the format in the [IANA Time Zone Database](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones#List). Timezone string `json:"timezone"` // Reset time of the ingest budget in HH:MM format. ResetTime string `json:"resetTime"` // Description of the ingest budget. Description string `json:"description,omitempty"` // Action to take when ingest budget's capacity is reached. All actions are audited. Supported values are: * `stopCollecting` * `keepCollecting` Action string `json:"action"` // The threshold as a percentage of when an ingest budget's capacity usage is logged in the Audit Index. AuditThreshold int32 `json:"auditThreshold,omitempty"` } type ListIngestBudgetsResponse struct { // List of ingest budgets. Data []IngestBudget `json:"data"` // Next continuation token. Next string `json:"next,omitempty"` } type ListIngestBudgetV1Opts struct { Limit optional.Int32 Token optional.String }
service/cip/types/ingest_budget_v1_types.go
0.776114
0.408159
ingest_budget_v1_types.go
starcoder
package gosmonaut import ( "math" "sort" ) // binaryNodeEntityMap uses a binary search table for storing // entites. It is well suited in this case since we only have to sort it once // between the reads and writes. Also we avoid the memory overhead of storing // the IDs twice and instead just read them from the struct. The (fake-)generic // solution performs much better than it would using the OSMEntity interface. type binaryNodeEntityMap struct { buckets [][]Node n uint64 } func newBinaryNodeEntityMap(n int) *binaryNodeEntityMap { // Calculate the number of buckets, exponent defines max number of lookups nb := n / int(math.Pow(2, 20)) if nb < 1 { nb = 1 } // Calculate the bucket sizes. Leave a small array overhead since the // distribution is not completely even. var bs int if nb == 1 { bs = n } else { bs = int(float64(n/nb) * 1.05) } // Create the buckets buckets := make([][]Node, nb) for i := 0; i < nb; i++ { buckets[i] = make([]Node, 0, bs) } return &binaryNodeEntityMap{ buckets: buckets, n: uint64(nb), } } func (m *binaryNodeEntityMap) hash(id int64) uint64 { return uint64(id) % m.n } // Must not be called after calling prepare() func (m *binaryNodeEntityMap) add(e Node) { h := m.hash(e.ID) m.buckets[h] = append(m.buckets[h], e) } // Must be called between the last write and the first read func (m *binaryNodeEntityMap) prepare() { // Sort buckets for _, b := range m.buckets { sort.Slice(b, func(i, j int) bool { return b[i].ID < b[j].ID }) } } // Must not be called before calling prepare() func (m *binaryNodeEntityMap) get(id int64) (Node, bool) { h := m.hash(id) bucket := m.buckets[h] // Binary search (we can't use sort.Search as we use int64) lo := 0 hi := len(bucket) - 1 for lo <= hi { mid := (lo + hi) / 2 midID := bucket[mid].ID if midID < id { lo = mid + 1 } else if midID > id { hi = mid - 1 } else { return bucket[mid], true } } return Node{}, false } // binaryWayEntityMap uses a binary search table for storing // entites. It is well suited in this case since we only have to sort it once // between the reads and writes. Also we avoid the memory overhead of storing // the IDs twice and instead just read them from the struct. The (fake-)generic // solution performs much better than it would using the OSMEntity interface. type binaryWayEntityMap struct { buckets [][]Way n uint64 } func newBinaryWayEntityMap(n int) *binaryWayEntityMap { // Calculate the number of buckets, exponent defines max number of lookups nb := n / int(math.Pow(2, 20)) if nb < 1 { nb = 1 } // Calculate the bucket sizes. Leave a small array overhead since the // distribution is not completely even. var bs int if nb == 1 { bs = n } else { bs = int(float64(n/nb) * 1.05) } // Create the buckets buckets := make([][]Way, nb) for i := 0; i < nb; i++ { buckets[i] = make([]Way, 0, bs) } return &binaryWayEntityMap{ buckets: buckets, n: uint64(nb), } } func (m *binaryWayEntityMap) hash(id int64) uint64 { return uint64(id) % m.n } // Must not be called after calling prepare() func (m *binaryWayEntityMap) add(e Way) { h := m.hash(e.ID) m.buckets[h] = append(m.buckets[h], e) } // Must be called between the last write and the first read func (m *binaryWayEntityMap) prepare() { // Sort buckets for _, b := range m.buckets { sort.Slice(b, func(i, j int) bool { return b[i].ID < b[j].ID }) } } // Must not be called before calling prepare() func (m *binaryWayEntityMap) get(id int64) (Way, bool) { h := m.hash(id) bucket := m.buckets[h] // Binary search (we can't use sort.Search as we use int64) lo := 0 hi := len(bucket) - 1 for lo <= hi { mid := (lo + hi) / 2 midID := bucket[mid].ID if midID < id { lo = mid + 1 } else if midID > id { hi = mid - 1 } else { return bucket[mid], true } } return Way{}, false }
binary_entity_map.go
0.678966
0.477981
binary_entity_map.go
starcoder
package schemer import ( "fmt" "reflect" "strconv" "time" "github.com/BrobridgeOrg/schemer/types" ) func getStandardValue(data interface{}) interface{} { v := reflect.ValueOf(data) switch v.Kind() { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: return v.Int() case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: return v.Uint() case reflect.Float32, reflect.Float64: return v.Float() } return data } func getValue(def *Definition, data interface{}) interface{} { if !def.NotNull && data == nil { return nil } v := getStandardValue(data) // According to definition to convert value to what we want switch def.Type { case TYPE_INT64: return getIntegerValue(def, v) case TYPE_UINT64: return getUnsignedIntegerValue(def, v) case TYPE_FLOAT64: return getFloatValue(def, v) case TYPE_BOOLEAN: return getBoolValue(def, v) case TYPE_STRING: return getStringValue(def, v) case TYPE_TIME: return def.Info.(*types.Time).GetValue(v) case TYPE_BINARY: return getBinaryValue(def, v) } // Unknown type return v } func getIntegerValue(def *Definition, data interface{}) int64 { switch d := data.(type) { case int64: return d case uint64: return int64(d) case string: result, err := strconv.ParseInt(d, 10, 64) if err != nil { return 0 } return result case bool: if d { return int64(1) } else { return int64(0) } case float64: return int64(d) case time.Time: return d.Unix() } return 0 } func getUnsignedIntegerValue(def *Definition, data interface{}) uint64 { switch d := data.(type) { case int64: if d > 0 { return uint64(d) } return 0 case uint64: return d case string: result, err := strconv.ParseUint(d, 10, 64) if err != nil { return 0 } return result case bool: if d { return uint64(1) } else { return uint64(0) } case float64: return uint64(d) case time.Time: return uint64(d.Unix()) } return 0 } func getFloatValue(def *Definition, data interface{}) float64 { switch d := data.(type) { case int64: return float64(d) case uint64: return float64(d) case string: result, err := strconv.ParseFloat(d, 64) if err != nil { return 0 } return result case bool: if d { return float64(1) } else { return float64(0) } case float64: return d case time.Time: return float64(d.Unix()) } return 0 } func getBoolValue(def *Definition, data interface{}) bool { switch d := data.(type) { case int64: if d > 0 { return true } else { return false } case uint64: if d > 0 { return true } else { return false } case string: result, err := strconv.ParseBool(d) if err != nil { return false } return result case bool: return d case float64: if d > 0 { return true } else { return false } case time.Time: return true } return false } func getStringValue(def *Definition, data interface{}) string { switch d := data.(type) { case int64: return fmt.Sprintf("%d", d) case uint64: return fmt.Sprintf("%d", d) case string: return d case bool: return fmt.Sprintf("%t", d) case float64: return strconv.FormatFloat(d, 'f', -1, 64) case time.Time: return d.UTC().Format(time.RFC3339Nano) default: return fmt.Sprintf("%v", d) } } func getBinaryValue(def *Definition, data interface{}) []byte { switch d := data.(type) { case []byte: return d case string: return []byte(d) default: arr, ok := data.([]interface{}) if !ok { return []byte("") } val := make([]byte, len(arr)) for i, v := range arr { val[i] = byte(getUnsignedIntegerValue(def, v)) } return val } } func convert(sourceDef *Definition, destDef *Definition, data interface{}) interface{} { srcData := getValue(sourceDef, data) return getValue(destDef, srcData) }
convert.go
0.533641
0.437042
convert.go
starcoder
package arithmetic import ( "errors" "fmt" "gopkg.in/guregu/null.v3" "math" "math/rand" "time" ) type IntSlice []int type ISlice []interface{} // IsPrime determines whether a given number is prime (P31). func IsPrime(number int) bool { upperDivisor := int(math.Sqrt(float64(number))) + 1 for divisor := 2; divisor < upperDivisor; divisor++ { if number%divisor == 0 { return false } } return true } // Gcd determines the greatest common divisor of two positive integer numbers (P32). func Gcd(a int, b int) (int, error) { if a < 0 || b < 0 { return -1, errors.New("The given numbers are not positive.") } // Euclid's Algorithm if b == 0 { return a, nil } else { return Gcd(b, a%b) } } // IsCoprime determines if two positive integers are coprime (P33). func IsCoprime(a int, b int) (null.Bool, error) { if a < 0 || b < 0 { return null.BoolFromPtr(nil), errors.New("The given numbers are not positive.") } gcd, _ := Gcd(a, b) return null.BoolFrom(gcd == 1), nil } // TotientPhi calculates Euler's totient function phi(m) (P34). func TotientPhi(m int) (int, error) { if m < 1 { return -1, errors.New("The given number is not positive") } if m == 1 { return 1, nil } if IsPrime(m) { return m - 1, nil } coPrimes := 1 for i := 2; i < int(m/2)+1; i++ { if m%i != 0 { if coPrime, _ := IsCoprime(i, m); coPrime.ValueOrZero() { coPrimes++ } } } for i := int(m/2) + 1; i < m; i++ { if coPrime, _ := IsCoprime(i, m); coPrime.ValueOrZero() { coPrimes++ } } return coPrimes, nil } // PrimeFactors determine the prime factors of a given positive number (P35). func PrimeFactors(number int) IntSlice { if IsPrime(number) { return IntSlice{number} } primeFactors := IntSlice{} for number%2 == 0 { primeFactors = append(primeFactors, 2) number = int(number / 2) } for i := 3; i < int(math.Sqrt(float64(number)))+1; i += 2 { for number%i == 0 { primeFactors = append(primeFactors, i) number = int(number / i) } } if number > 2 { primeFactors = append(primeFactors, number) } return primeFactors } // PrimeFactorsMult returns the length-encoded prime factor list (P36). func PrimeFactorsMult(number int) []IntSlice { primeFactors := PrimeFactors(number) encoded, current, count := []IntSlice{}, primeFactors[0], 1 for _, factor := range primeFactors[1:] { if current != factor { encoded = append(encoded, IntSlice{count, current}) current, count = factor, 1 continue } count++ } encoded = append(encoded, IntSlice{count, current}) return encoded } // TotientPhi2 calculates Euler's totient function effectively (P37). func TotientPhi2(m int) (int, error) { if m < 1 { return -1, errors.New("The given number is not positive") } if m == 1 { return 1, nil } phi := 1.0 primeFactorsEncoded := PrimeFactorsMult(m) for _, elem := range primeFactorsEncoded { m, p := elem[0], elem[1] phi *= float64(p-1) * math.Pow(float64(p), float64(m-1)) } return int(phi), nil } // CompareTotient compare the two methods of calculating Euler's totient function (P38). func CompareTotient(numberOfTests int) { rand.Seed(time.Now().UnixNano()) m := 0 fmt.Println("--------------------------------------------") fmt.Printf("%s\t%10s\t%10s\n", "Argument", "TotientPhi", "TotientPhi2") fmt.Println("============================================") for i := 0; i < numberOfTests; i++ { m = rand.Intn(20000) // TotientPhi startTotientPhi := time.Now() TotientPhi(m) elapsedTotientPhi := time.Since(startTotientPhi) // TotientPhi2 startTotientPhi2 := time.Now() TotientPhi2(m) elapsedTotientPhi2 := time.Since(startTotientPhi2) // Print time fmt.Printf("%6v\t\t%10v\t%11v\n", m, elapsedTotientPhi, elapsedTotientPhi2) } fmt.Println("--------------------------------------------") } // PrimeList returns a list of prime numbers (P39). func PrimeList(lower int, upper int) IntSlice { // Sieve of Eratosthenes. array := make([]int, upper+1) for i := 2; i < int(math.Sqrt(float64(upper)))+1; i++ { if array[i] == 0 { j := i * i for j <= upper { array[j] = 1 j += i } } } primeList := IntSlice{} for i := lower - 1; i < upper+1; i++ { if array[i] == 0 { primeList = append(primeList, i) } } return primeList } // Goldbach computes Goldbach's Conjecture (P40). func Goldbach(number int) (IntSlice, error) { if number <= 2 { return nil, errors.New("The given number is smaller than 2.") } if number%2 == 1 { return nil, errors.New("The given number is odd.") } var prime int primeList := PrimeList(3, int(number/2)) for _, prime = range primeList { if IsPrime(number - prime) { break } } return IntSlice{prime, number - prime}, nil } // GoldbachList returns a list of Goldbach compositions (P41). func GoldbachList(lower int, upper int) ([]IntSlice, error) { goldbachList := []IntSlice{} if lower%2 == 1 { lower += 1 } for i := lower; i < upper+1; i = i + 2 { numbers, err := Goldbach(i) if err != nil { return nil, err } else { goldbachList = append(goldbachList, numbers) } } return goldbachList, nil }
Golang/pkg/arithmetic/arithmetic.go
0.708112
0.450541
arithmetic.go
starcoder
package cache import ( "github.com/disgoorg/disgo/discord" "github.com/disgoorg/snowflake/v2" "golang.org/x/exp/slices" ) // PolicyNone returns a policy that will never cache anything. func PolicyNone[T any](_ T) bool { return false } // PolicyAll returns a policy that will cache all entities. func PolicyAll[T any](_ T) bool { return true } // PolicyDefault returns the default cache policy. func PolicyDefault[T any](t T) bool { return PolicyAll(t) } // PolicyMembersInclude returns a policy that will only cache members of the given guilds. func PolicyMembersInclude(guildIDs ...snowflake.ID) Policy[discord.Member] { return func(member discord.Member) bool { return slices.Contains(guildIDs, member.GuildID) } } // PolicyMembersPending is a policy that will only cache members that are pending. func PolicyMembersPending(member discord.Member) bool { return member.Pending } // PolicyMembersInVoice returns a policy that will only cache members that are connected to an audio channel. func PolicyMembersInVoice(caches Caches) Policy[discord.Member] { return func(member discord.Member) bool { _, ok := caches.VoiceStates().Get(member.GuildID, member.User.ID) return ok } } // PolicyChannelInclude returns a policy that will only cache channels of the given types. func PolicyChannelInclude(channelTypes ...discord.ChannelType) Policy[discord.Channel] { return func(channel discord.Channel) bool { return slices.Contains(channelTypes, channel.Type()) } } // PolicyChannelExclude returns a policy that will not cache channels of the given types. func PolicyChannelExclude(channelTypes ...discord.ChannelType) Policy[discord.Channel] { return func(channel discord.Channel) bool { return !slices.Contains(channelTypes, channel.Type()) } } // Policy can be used to define your own policy for when entities should be cached. type Policy[T any] func(entity T) bool // Or allows you to combine the CachePolicy with another, meaning either of them needs to be true func (p Policy[T]) Or(policy Policy[T]) Policy[T] { return func(entity T) bool { return p(entity) || policy(entity) } } // And allows you to require both CachePolicy(s) to be true for the entity to be cached func (p Policy[T]) And(policy Policy[T]) Policy[T] { return func(entity T) bool { return p(entity) && policy(entity) } } // AnyPolicy is a shorthand for CachePolicy.Or(CachePolicy).Or(CachePolicy) etc. func AnyPolicy[T any](policies ...Policy[T]) Policy[T] { var policy Policy[T] for _, p := range policies { if policy == nil { policy = p continue } policy = policy.Or(p) } return policy } // AllPolicies is a shorthand for CachePolicy.And(CachePolicy).And(CachePolicy) etc. func AllPolicies[T any](policies ...Policy[T]) Policy[T] { var policy Policy[T] for _, p := range policies { if policy == nil { policy = p continue } policy = policy.And(p) } return policy }
cache/cache_policy.go
0.782247
0.402245
cache_policy.go
starcoder
package data import ( "bytes" "encoding/json" "fmt" "time" ) // Array is an array of Values. It can be assigned to Value interface. type Array []Value // Type returns TypeID of Array. It's always TypeArray. func (a Array) Type() TypeID { return TypeArray } func (a Array) asBool() (bool, error) { return false, castError(a.Type(), TypeBool) } func (a Array) asInt() (int64, error) { return 0, castError(a.Type(), TypeInt) } func (a Array) asFloat() (float64, error) { return 0, castError(a.Type(), TypeFloat) } func (a Array) asString() (string, error) { return "", castError(a.Type(), TypeString) } func (a Array) asBlob() ([]byte, error) { return nil, castError(a.Type(), TypeBlob) } func (a Array) asTimestamp() (time.Time, error) { return time.Time{}, castError(a.Type(), TypeTimestamp) } func (a Array) asArray() (Array, error) { return a, nil } func (a Array) asMap() (Map, error) { return nil, castError(a.Type(), TypeMap) } func (a Array) clone() Value { return a.Copy() } // String returns JSON representation of an Array. func (a Array) String() string { // the String return value is defined via the // default JSON serialization bytes, err := json.Marshal(a) if err != nil { return fmt.Sprintf("(unserializable array: %v)", err) } return string(bytes) } // UnmarshalJSON reconstructs an Array from JSON. func (a *Array) UnmarshalJSON(data []byte) error { var j []interface{} dec := json.NewDecoder(bytes.NewReader(data)) dec.UseNumber() if err := dec.Decode(&j); err != nil { return err } newArray, err := NewArray(j) if err != nil { return err } *a = newArray return nil } // Copy performs deep copy of an Array. The Array returned from this method can // safely be modified without affecting the original. func (a Array) Copy() Array { out := make(Array, len(a)) for idx, val := range a { out[idx] = val.clone() } return out } // Get returns value(s) from an array as addressed by the given path expression. // See Map.Get for details. func (a Array) Get(path Path) (Value, error) { return path.evaluate(a) } // TODO: support Set
data/array.go
0.661814
0.409221
array.go
starcoder
package main func main() { } /** * 双端队列 结构 队头、队尾均可入队 */ type MyCircularDeque struct { front, rear *node len, cap int } //节点 value 前后指针 type node struct { value int pre, next *node } /** Initialize your data structure here. Set the size of the deque to be k. */ func Constructor(k int) MyCircularDeque { return MyCircularDeque{ cap: k, } } /** Adds an item at the front of Deque. Return true if the operation is successful. */ func (this *MyCircularDeque) InsertFront(value int) bool { if this.len == this.cap { return false } n := &node{ value: value, } //如果长度为0 头、尾指向该元素 if this.len == 0 { this.front = n this.rear = n } else { n.next = this.front this.front.pre = n this.front = n } this.len++ return true } /** Adds an item at the rear of Deque. Return true if the operation is successful. */ func (this *MyCircularDeque) InsertLast(value int) bool { if this.len == this.cap { return false } n := &node{ value: value, } if this.len == 0 { this.front = n this.rear = n } else { n.pre = this.rear this.rear.next = n this.rear = n } this.len++ return true } /** Deletes an item from the front of Deque. Return true if the operation is successful. */ func (this *MyCircularDeque) DeleteFront() bool { if this.len == 0 { return false } if this.len == 1 { this.front, this.rear = nil, nil } else { this.front = this.front.next this.front.pre = nil } this.len-- return true } /** Deletes an item from the rear of Deque. Return true if the operation is successful. */ func (this *MyCircularDeque) DeleteLast() bool { if this.len == 0 { return false } if this.len == 1 { this.front, this.rear = nil, nil } else { this.rear = this.rear.pre this.rear.next = nil } this.len-- return true } /** Get the front item from the deque. */ func (this *MyCircularDeque) GetFront() int { if this.len == 0 { return -1 } return this.front.value } /** Get the last item from the deque. */ func (this *MyCircularDeque) GetRear() int { if this.len == 0 { return -1 } return this.rear.value } /** Checks whether the circular deque is empty or not. */ func (this *MyCircularDeque) IsEmpty() bool { return this.len == 0 } /** Checks whether the circular deque is full or not. */ func (this *MyCircularDeque) IsFull() bool { return this.len == this.cap } /** * Your MyCircularDeque object will be instantiated and called as such: * obj := Constructor(k); * param_1 := obj.InsertFront(value); * param_2 := obj.InsertLast(value); * param_3 := obj.DeleteFront(); * param_4 := obj.DeleteLast(); * param_5 := obj.GetFront(); * param_6 := obj.GetRear(); * param_7 := obj.IsEmpty(); * param_8 := obj.IsFull(); */
Week 1/id_090/LeetCode_641_090.go
0.582135
0.435841
LeetCode_641_090.go
starcoder
package vehicle import ( "encoding/json" "fmt" "strconv" "strings" "time" "github.com/mitchellh/hashstructure" ) // List contains vehicles that were found during parsing. type List map[uint64]Vehicle // RegCountry represents a country of registration for a vehicle. type RegCountry int // List of allowed registration countries. const ( DK RegCountry = iota NO ) // String returns the string representation of the RegCountry. func (reg RegCountry) String() string { for key, val := range regCountryMap { if val == reg { return key } } return "DK" // Default. } // Type represents the overall type of vehicle, ie. car, trailer, van etc. type Type int // List of supported vehicle types. const ( Unknown Type = iota Car Bus Van Truck Trailer ) // String returns the string representation of the vehicle type. func (t Type) String() string { switch t { case Car: return "Car" case Bus: return "Bus" case Van: return "Van" case Truck: return "Truck" case Trailer: return "Trailer" default: return "Unknown" } } // TypeFromString returns the Type that matches the given string (case insensitive match). // If TypeFromString does not find a direct match, Type.Unknown is returned. func TypeFromString(str string) Type { switch strings.ToLower(str) { case "car": return Car case "bus": return Bus case "van": return Van case "truck": return Truck case "trailer": return Trailer default: return Unknown } } // Meta contains metadata for each vehicle. type Meta struct { Hash uint64 Source string Country RegCountry Ident uint64 LastUpdated time.Time Disabled bool } // Vehicle contains the core vehicle data that Autobot manages. // As vehicles are persisted in Redis / Google Memory Store, they should not contain pointers. type Vehicle struct { MetaData Meta `hash:"ignore"` Type Type RegNr string VIN string Brand string Model string FuelType string Variant string FirstRegDate time.Time } // Marshal converts the given Vehicle to a string using JSON encoding. func (v *Vehicle) Marshal() (string, error) { b, err := json.Marshal(v) if err != nil { return "", err } return string(b), nil } // Unmarshal converts the given string to a Vehicle using JSON decoding. func (v *Vehicle) Unmarshal(str string) error { return json.Unmarshal([]byte(str), v) } var regCountryMap = map[string]RegCountry{ "DK": DK, "NO": NO, } // RegCountryFromString takes a string and returns the matching country of registration. func RegCountryFromString(reg string) RegCountry { elem, ok := regCountryMap[reg] if ok { return elem } return DK // Default. } // GenHash generates a unique hash value of the vehicle. The hash is stored in the vehicle metadata. func (v *Vehicle) GenHash() error { v.MetaData.Hash, _ = hashstructure.Hash(v, nil) // hashstructure.Hash() does not cause errors. return nil } // String returns a stringified representation of the Vehicle data structure. func (v Vehicle) String() string { return v.FlexString("", " ") } // FlexString returns a stringified multi-line representation of the Vehicle data structure. func (v Vehicle) FlexString(lb, leftPad string) string { var txt strings.Builder fmt.Fprintf(&txt, "#%d (%s)%s", v.MetaData.Hash, DisabledAsString(v.MetaData.Disabled), lb) fmt.Fprintf(&txt, "%sCountry: %s%s", leftPad, v.MetaData.Country.String(), lb) fmt.Fprintf(&txt, "%sIdent: %d%s", leftPad, v.MetaData.Ident, lb) fmt.Fprintf(&txt, "%sRegNr: %s%s", leftPad, v.RegNr, lb) fmt.Fprintf(&txt, "%sVIN: %s%s", leftPad, v.VIN, lb) fmt.Fprintf(&txt, "%sBrand: %s%s", leftPad, v.Brand, lb) fmt.Fprintf(&txt, "%sModel: %s%s", leftPad, v.Model, lb) fmt.Fprintf(&txt, "%sVariant: %s%s", leftPad, v.Variant, lb) fmt.Fprintf(&txt, "%sFuelType: %s%s", leftPad, v.FuelType, lb) fmt.Fprintf(&txt, "%sRegDate: %s%s", leftPad, v.FirstRegDate.Format("2006-01-02"), lb) return txt.String() } // Slice returns most properties from Vehicle as a slice of strings, intended for use in CSV conversions. func (v Vehicle) Slice() [10]string { hash := strconv.FormatUint(v.MetaData.Hash, 10) country := v.MetaData.Country.String() ident := strconv.FormatUint(v.MetaData.Ident, 10) firstReg := v.FirstRegDate.Format("2006-01-02") props := [10]string{hash, country, ident, v.RegNr, v.VIN, v.Brand, v.Model, v.Variant, v.FuelType, firstReg} return props } // PrettyBrandName titles-cases the given brand name unless its length is 3 or below, in which case everything is // uppercased. This should handle most cases. func PrettyBrandName(brand string) string { if len(brand) <= 3 { return strings.ToUpper(brand) } return strings.Title(strings.ToLower(brand)) } // PrettyFuelType normalizes fuel-type by capitalizing the first letter only. func PrettyFuelType(ft string) string { return strings.Title(strings.ToLower(ft)) } // HashAsKey converts the given hash value into a string that can be used as key in the vehicle store. func HashAsKey(hash uint64) string { return strconv.FormatUint(hash, 10) } // DisabledAsString returns a stringified version of the Disabled field. func DisabledAsString(status bool) string { if status { return "Disabled" } return "Active" }
vehicle/vehicle.go
0.78572
0.429489
vehicle.go
starcoder
// Package jtypes (golint) package jtypes import ( "reflect" ) // Resolve (golint) func Resolve(v reflect.Value) reflect.Value { for { switch v.Kind() { case reflect.Interface, reflect.Ptr: if !v.IsNil() { v = v.Elem() break } fallthrough default: return v } } } // IsBool (golint) func IsBool(v reflect.Value) bool { return v.Kind() == reflect.Bool || resolvedKind(v) == reflect.Bool } // IsString (golint) func IsString(v reflect.Value) bool { return v.Kind() == reflect.String || resolvedKind(v) == reflect.String } // IsNumber (golint) func IsNumber(v reflect.Value) bool { return isFloat(v) || isInt(v) || isUint(v) } // IsCallable (golint) func IsCallable(v reflect.Value) bool { v = Resolve(v) return v.IsValid() && (v.Type().Implements(TypeCallable) || reflect.PtrTo(v.Type()).Implements(TypeCallable)) } // IsArray (golint) func IsArray(v reflect.Value) bool { return isArrayKind(v.Kind()) || isArrayKind(resolvedKind(v)) } func isArrayKind(k reflect.Kind) bool { return k == reflect.Slice || k == reflect.Array } // IsArrayOf (golint) func IsArrayOf(v reflect.Value, hasType func(reflect.Value) bool) bool { if !IsArray(v) { return false } v = Resolve(v) for i := 0; i < v.Len(); i++ { if !hasType(v.Index(i)) { return false } } return true } // IsMap (golint) func IsMap(v reflect.Value) bool { return resolvedKind(v) == reflect.Map } // IsStruct (golint) func IsStruct(v reflect.Value) bool { return resolvedKind(v) == reflect.Struct } // AsBool (golint) func AsBool(v reflect.Value) (bool, bool) { v = Resolve(v) switch { case IsBool(v): return v.Bool(), true default: return false, false } } // AsString (golint) func AsString(v reflect.Value) (string, bool) { v = Resolve(v) switch { case IsString(v): return v.String(), true default: return "", false } } // AsNumber (golint) func AsNumber(v reflect.Value) (float64, bool) { v = Resolve(v) switch { case isFloat(v): return v.Float(), true case isInt(v), isUint(v): return v.Convert(typeFloat64).Float(), true default: return 0, false } } // AsCallable (golint) func AsCallable(v reflect.Value) (Callable, bool) { v = Resolve(v) if v.IsValid() && v.Type().Implements(TypeCallable) && v.CanInterface() { return v.Interface().(Callable), true } if v.IsValid() && reflect.PtrTo(v.Type()).Implements(TypeCallable) && v.CanAddr() && v.Addr().CanInterface() { return v.Addr().Interface().(Callable), true } return nil, false } func isInt(v reflect.Value) bool { return isIntKind(v.Kind()) || isIntKind(resolvedKind(v)) } func isIntKind(k reflect.Kind) bool { switch k { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: return true default: return false } } func isUint(v reflect.Value) bool { return isUintKind(v.Kind()) || isUintKind(resolvedKind(v)) } func isUintKind(k reflect.Kind) bool { switch k { case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: return true default: return false } } func isFloat(v reflect.Value) bool { return isFloatKind(v.Kind()) || isFloatKind(resolvedKind(v)) } func isFloatKind(k reflect.Kind) bool { switch k { case reflect.Float32, reflect.Float64: return true default: return false } } func resolvedKind(v reflect.Value) reflect.Kind { return Resolve(v).Kind() }
jtypes/funcs.go
0.611614
0.458531
funcs.go
starcoder
package types import ( "fmt" "github.com/ethereum/go-ethereum/common/hexutil" ) // SfcConfig defines the current SFC contract configuration. type SfcConfig struct { // minValidatorStake is the minimal amount of tokens required // to register a validator account with the default self stake. MinValidatorStake hexutil.Big // maxDelegatedRatio is the maximal ratio between a validator self stake // and the sum of all the received stakes of the validator. // The value is provided as a multiplier number with 18 decimals. MaxDelegatedRatio hexutil.Big // minLockupDuration is the lowest possible number of seconds // a delegation can be locked for. MinLockupDuration hexutil.Big // maxLockupDuration is the highest possible number of seconds // a delegation can be locked for. MaxLockupDuration hexutil.Big // withdrawalPeriodEpochs is the minimal number of epochs // between an un-delegation and corresponding withdraw request. // The delay is enforced on withdraw call. WithdrawalPeriodEpochs hexutil.Big // withdrawalPeriodTime is the minimal number of seconds // between an un-delegation and corresponding withdraw request. // The delay is enforced on withdraw call. WithdrawalPeriodTime hexutil.Big } // Marshal encodes the config into bytes slice. func (sc *SfcConfig) Marshal() ([]byte, error) { // we have 6x256bit numbers here buf := make([]byte, 6*32) // copy the bytes sc.MinValidatorStake.ToInt().FillBytes(buf[:32]) sc.MaxDelegatedRatio.ToInt().FillBytes(buf[32:64]) sc.MinLockupDuration.ToInt().FillBytes(buf[64:96]) sc.MaxLockupDuration.ToInt().FillBytes(buf[96:128]) sc.WithdrawalPeriodEpochs.ToInt().FillBytes(buf[128:160]) sc.WithdrawalPeriodTime.ToInt().FillBytes(buf[160:]) return buf, nil } // Unmarshal decodes the buffer into the config set. func (sc *SfcConfig) Unmarshal(buf []byte) error { // check for the buffer length, we expect 6*32 bytes if len(buf) != 192 { return fmt.Errorf("expected 192 bytes, %d received", len(buf)) } // copy the data sc.MinValidatorStake.ToInt().SetBytes(buf[:32]) sc.MaxDelegatedRatio.ToInt().SetBytes(buf[32:64]) sc.MinLockupDuration.ToInt().SetBytes(buf[64:96]) sc.MaxLockupDuration.ToInt().SetBytes(buf[96:128]) sc.WithdrawalPeriodEpochs.ToInt().SetBytes(buf[128:160]) sc.WithdrawalPeriodTime.ToInt().SetBytes(buf[160:]) return nil }
internal/types/sfc_config.go
0.687105
0.432363
sfc_config.go
starcoder
package hdrimage import ( "encoding/binary" "fmt" "image" "image/color" "io" "github.com/DexterLB/traytor/hdrcolour" ) // Image is a stuct which will display images via its 2D colour array, wich represents the screen type Image struct { Pixels [][]hdrcolour.Colour Width, Height int Divisor int } // New will set the screen to the given width and height func New(width, height int) *Image { pixels := make([][]hdrcolour.Colour, width) for i := range pixels { pixels[i] = make([]hdrcolour.Colour, height) for j := range pixels[i] { pixels[i][j] = *hdrcolour.New(0, 0, 0) } } return &Image{Pixels: pixels, Width: width, Height: height, Divisor: 1} } // Decode reads data in the simple traytor_hdr format and produces an // image. func Decode(reader io.Reader) (*Image, error) { size := [2]uint16{} err := binary.Read(reader, binary.LittleEndian, &size) if err != nil { return nil, fmt.Errorf("cannot read header from image data: %s", err) } im := New(int(size[0]), int(size[1])) var rgba [4]float32 for i := im.Height - 1; i >= 0; i-- { for j := 0; j < im.Width; j++ { err = binary.Read(reader, binary.LittleEndian, &rgba) if err != nil { return nil, fmt.Errorf("cannot read image data: %s", err) } im.Pixels[j][i].R = rgba[0] im.Pixels[j][i].G = rgba[1] im.Pixels[j][i].B = rgba[2] } } return im, nil } // Encode writes data in the simple traytor_hdr format into a writer func (im *Image) Encode(writer io.Writer) error { size := [2]uint16{ uint16(im.Width), uint16(im.Height), } err := binary.Write(writer, binary.LittleEndian, size) if err != nil { return fmt.Errorf("cannot write header to image: %s", err) } var rgba [4]float32 for i := im.Height - 1; i >= 0; i-- { for j := 0; j < im.Width; j++ { pixel := im.AtHDR(j, i) rgba[0] = pixel.R rgba[1] = pixel.G rgba[2] = pixel.B err = binary.Write(writer, binary.LittleEndian, rgba) if err != nil { return fmt.Errorf("cannot write image data: %s", err) } } } return nil } // String returns a string which is the representaton of image: // {r, g, b}, ... {r, g, b}\n ...\n {r, g, b},...{r, g, b} func (im *Image) String() string { representation := "" for i := 0; i < im.Width; i++ { for j := 0; j < im.Height; j++ { representation += im.Pixels[i][j].String() if j != im.Width-1 { representation += ", " } } representation += "\n" } return representation } // Add adds another image to this one func (im *Image) Add(other *Image) { for i := 0; i < im.Width; i++ { for j := 0; j < im.Height; j++ { (&im.Pixels[i][j]).Add(&other.Pixels[i][j]) } } } // Add returns a new image which is the sum of the given ones func Add(a *Image, b *Image) *Image { sum := New(a.Width, a.Height) sum.Add(a) sum.Add(b) return sum } // AtHDR returns the Colour of the pixel at [x][y] (scaled by the divisor) func (im *Image) AtHDR(x, y int) *hdrcolour.Colour { if im.Divisor == 0 { return hdrcolour.New(1, 1, 1) } return im.Pixels[x][y].Scaled(1 / float32(im.Divisor)) } // At returns the sRGB Colour of the pixel at [x][y (scaled by the divisor)] func (im *Image) At(x, y int) color.Color { return im.AtHDR(x, y).To32Bit() } // ColorModel returns the image's color model (as used by Go's image interface) func (im *Image) ColorModel() color.Model { return color.RGBAModel } // Bounds returns a rectangle as big as the image func (im *Image) Bounds() image.Rectangle { return image.Rect(0, 0, im.Width, im.Height) } // FromSRGB constructs an Image from an sRGB image func FromSRGB(im image.Image) *Image { width := im.Bounds().Max.X - im.Bounds().Min.X height := im.Bounds().Max.Y - im.Bounds().Min.Y extractedImage := New(width, height) for i := 0; i < width; i++ { for j := 0; j < height; j++ { extractedImage.Pixels[i][j].Add( hdrcolour.FromColor(im.At(im.Bounds().Min.X+i, im.Bounds().Min.Y+j)), ) } } return extractedImage }
hdrimage/image.go
0.754101
0.417153
image.go
starcoder
package day14 import ( "fmt" "regexp" "strconv" "strings" ) var ( MaskInstruction = regexp.MustCompile(`^mask = ([X01]+$)`) MemoryInstruction = regexp.MustCompile(`^mem\[(\d+)\] = (\d+)$`) ) type BitMask interface { // Applies the bitmap to the instruction and stores the result in the memory apply(*Instruction, *Memory) // Parses a bit mask string and sets up state so future calls to apply() // will use the mask parse(string) error } type BitMaskV1 struct { andMask uint64 orMask uint64 } type BitMaskV2 struct { orMask uint64 floatingMasks []uint64 } type Memory struct { memory map[uint64]uint64 } type Instruction struct { Address uint64 Value uint64 } // Runs the docking code instructions using a bitmask // The bitmask can either be BitMaskV1 or BitMaskV2 func RunDockingProgram(instructions []string, bitMask BitMask) (uint64, error) { memory := &Memory{ memory: make(map[uint64]uint64), } for i, instructionString := range instructions { maskMatches := MaskInstruction.FindStringSubmatch(instructionString) if maskMatches != nil { err := bitMask.parse(maskMatches[1]) if err != nil { return 0, fmt.Errorf("Error parsing bitmask on line %v:"+err.Error(), i+1) } continue } instruc, err := parseInstruction(instructionString) if err != nil { return 0, fmt.Errorf("Error parsing instruction on line %v:"+err.Error(), i+1) } bitMask.apply(instruc, memory) } sum := uint64(0) for _, v := range memory.memory { sum += v } return sum, nil } func parseInstruction(instruction string) (*Instruction, error) { memMatches := MemoryInstruction.FindStringSubmatch(instruction) if memMatches == nil { return nil, fmt.Errorf("Failed to match instruction") } memAddress, err := strconv.ParseUint(memMatches[1], 10, 64) if err != nil { return nil, err } value, err := strconv.ParseUint(memMatches[2], 10, 64) if err != nil { return nil, err } return &Instruction{ Address: memAddress, Value: value, }, nil } func (b *BitMaskV1) apply(instruction *Instruction, memory *Memory) { memory.memory[instruction.Address] = (instruction.Value & b.andMask) | b.orMask } func (b *BitMaskV1) parse(bitMask string) error { var err error andMaskString := strings.ReplaceAll(bitMask, "X", "1") b.andMask, err = strconv.ParseUint(andMaskString, 2, 36) if err != nil { return err } b.orMask, err = parseOrMask(bitMask) if err != nil { return err } return nil } func (b *BitMaskV2) apply(instruction *Instruction, memory *Memory) { newAddress := instruction.Address | b.orMask addresses := applyFloatingMasks(b.floatingMasks, newAddress) for _, address := range addresses { memory.memory[address] = instruction.Value } } // Calculates all possible transformations of a value by applying the list // of floating masks func applyFloatingMasks(floatingMasks []uint64, value uint64) []uint64 { if len(floatingMasks) == 0 { return []uint64{value} } // Get the two variations of the value setting the first mask's bit to 0 and 1 oneValue := value | floatingMasks[0] zeroValue := value & ^floatingMasks[0] // If there's only the one floating mask we can return the two values if len(floatingMasks) == 1 { return []uint64{oneValue, zeroValue} } allResults := make([]uint64, 0) // Recursively apply all remaining maps and collect all the possible values nextMasks := floatingMasks[1:] allResults = append(allResults, applyFloatingMasks(nextMasks, oneValue)...) allResults = append(allResults, applyFloatingMasks(nextMasks, zeroValue)...) return allResults } func (b *BitMaskV2) parse(bitMask string) error { var err error b.orMask, err = parseOrMask(bitMask) if err != nil { return err } b.floatingMasks = make([]uint64, 0) for i, v := range bitMask { if v == 'X' { mask := uint64(1) << (len(bitMask) - i - 1) b.floatingMasks = append(b.floatingMasks, mask) } } return nil } func parseOrMask(bitMask string) (uint64, error) { orMaskString := strings.ReplaceAll(bitMask, "X", "0") orMask, err := strconv.ParseUint(orMaskString, 2, 36) if err != nil { return 0, err } return orMask, nil }
day14/day14.go
0.632503
0.41117
day14.go
starcoder
package hsl // RGBtoHSL : Convert RGB to HSL // rgb values are 0-255 // h is between 0.0 - 360.0 // s & l values are 0.0 - 1.0 func RGBtoHSL(r, g, b uint8) (h, s, l float64) { var rgb [3]float64 rgb[0] = float64(r) / 255.0 rgb[1] = float64(g) / 255.0 rgb[2] = float64(b) / 255.0 max := 0.0 for _, v := range rgb { if v > max { max = v } } min := 2.0 for _, v := range rgb { if v < min { min = v } } l = (max + min) / 2.0 if min == max { h = 0 s = 0 } else { d := max - min if l <= 0.5 { s = d / (max + min) } else { s = d / (2.0 - d) } if rgb[0] == max { h = (rgb[1] - rgb[2]) / d } if rgb[1] == max { h = 2.0 + (rgb[2]-rgb[0])/d } if rgb[2] == max { h = 4.0 + (rgb[0]-rgb[1])/d } h *= 60 if h < 0 { h += 360.0 } } return } // HSLtoRGB : Convert HSL to RGB format // h is between 0 - 360 // s and l are between 0.0 - 1.0 // RGB is between 0-255 func HSLtoRGB(h, s, l float64) (r, g, b uint8) { var ttr, ttg, ttb float64 if s == 0 { r = uint8(l * 255.0) g = uint8(l * 255.0) b = uint8(l * 255.0) return } var t1 float64 if l < 0.5 { t1 = l * (1.0 + s) } else { t1 = l + s - l*s } t2 := 2*l - t1 h /= 360.0 var tr, tb, tg float64 tr = h + 0.3333333333333333 tg = h tb = h - 0.3333333333333333 if tr < 0 { tr += 1 } if tg < 0 { tg += 1 } if tb < 0 { tb += 1 } if tr > 1 { tr -= 1 } if tg > 1 { tg -= 1 } if tb > 1 { tb -= 1 } if 6*tr < 1 { ttr = t2 + (t1-t2)*6*tr } else if 2*tr < 1 { ttr = t1 } else if 3*tr < 2 { ttr = t2 + (t1-t2)*(0.66666-tr)*6 } else { ttr = t2 } if 6*tg < 1 { ttg = t2 + (t1-t2)*6*tg } else if 2*tg < 1 { ttg = t1 } else if 3*tg < 2 { ttg = t2 + (t1-t2)*(0.66666-tg)*6 } else { ttg = t2 } if 6*tb < 1 { ttb = t2 + (t1-t2)*6*tb } else if 2*tb < 1 { ttb = t1 } else if 3*tb < 2 { ttb = t2 + (t1-t2)*(0.66666-tb)*6 } else { ttb = t2 } r = uint8(ttr * 255.0) g = uint8(ttg * 255.0) b = uint8(ttb * 255.0) return }
hsl/hsl.go
0.664867
0.422981
hsl.go
starcoder
package evaluator import ( "fmt" "github.com/yuzuy/yoru/ast" "github.com/yuzuy/yoru/object" "github.com/yuzuy/yoru/token" ) var ( Null = &object.Null{} True = &object.Boolean{Value: true} False = &object.Boolean{Value: false} ) func Eval(node ast.Node, env *object.Environment) object.Object { switch node := node.(type) { case *ast.Program: return evalProgram(node, env) case *ast.ExpressionStatement: return Eval(node.Expression, env) case *ast.LetStatement: val := Eval(node.Value, env) if isError(val) { return val } env.Set(node.Name.Value, val) case *ast.ReturnStatement: val := Eval(node.ReturnValue, env) if isError(val) { return val } return &object.ReturnValue{Value: val} case *ast.BlockStatement: return evalBlockStatement(node, env) case *ast.Identifier: return evalIdentifier(node, env) case *ast.IntegerLiteral: return &object.Integer{Value: node.Value} case *ast.StringLiteral: return &object.String{Value: node.Value} case *ast.Boolean: return nativeBoolToBooleanObject(node.Value) case *ast.Null: return Null case *ast.ArrayLiteral: elements := evalExpressions(node.Elements, env) if len(elements) == 1 && isError(elements[0]) { return elements[0] } return &object.Array{Elements: elements} case *ast.HashLiteral: return evalHashLiteral(node, env) case *ast.IndexExpression: left := Eval(node.Left, env) if isError(left) { return left } index := Eval(node.Index, env) if isError(index) { return index } return evalIndexExpression(left, index) case *ast.PrefixExpression: right := Eval(node.Right, env) if isError(right) { return right } return evalPrefixExpression(node.Operator, right) case *ast.InfixExpression: left := Eval(node.Left, env) if isError(left) { return left } right := Eval(node.Right, env) if isError(right) { return right } return evalInfixExpression(node.Operator, left, right) case *ast.IfExpression: return evalIfExpression(node, env) case *ast.SwitchStatement: return evalSwitchStatement(node, env) case *ast.FunctionLiteral: params := node.Parameters body := node.Body return &object.Function{Parameters: params, Body: body, Env: env} case *ast.CallExpression: function := Eval(node.Function, env) if isError(function) { return function } args := evalExpressions(node.Arguments, env) if len(args) == 1 && isError(args[0]) { return args[0] } return applyFunction(function, args) } return nil } func newError(format string, a ...interface{}) *object.Error { return &object.Error{Message: fmt.Sprintf(format, a...)} } func isError(obj object.Object) bool { if obj != nil { return obj.Type() == object.ErrorObj } return false } func evalProgram(program *ast.Program, env *object.Environment) object.Object { var result object.Object for _, stmt := range program.Statements { result = Eval(stmt, env) switch result := result.(type) { case *object.ReturnValue: return result.Value case *object.Error: return result } } return result } func nativeBoolToBooleanObject(input bool) *object.Boolean { if input { return True } return False } func evalPrefixExpression(operator string, right object.Object) object.Object { switch operator { case "!": return evalBangOperatorExpression(right) case "-": return evalMinusPrefixOperatorExpression(right) default: return newError("unknown operator: %s%s", operator, right.Type()) } } func evalBangOperatorExpression(right object.Object) object.Object { switch right { case True: return False case False: return True case Null: return True default: return False } } func evalMinusPrefixOperatorExpression(right object.Object) object.Object { if right.Type() != object.IntObj { return newError("unknown operator: -%s", right.Type()) } value := right.(*object.Integer).Value return &object.Integer{Value: -value} } func evalInfixExpression(operator string, left, right object.Object) object.Object { switch { case left.Type() == object.IntObj && right.Type() == object.IntObj: return evalIntegerInfixExpression(operator, left, right) case left.Type() == object.StringObj && right.Type() == object.StringObj: return evalStringInfixExpression(operator, left, right) case operator == "==": return nativeBoolToBooleanObject(left == right) case operator == "!=": return nativeBoolToBooleanObject(left != right) case left.Type() != right.Type(): return newError("type mismatch: %s %s %s", left.Type(), operator, right.Type()) default: return newError("unknown operator: %s %s %s", left.Type(), operator, right.Type()) } } func evalIntegerInfixExpression(operator string, left, right object.Object) object.Object { leftVal := left.(*object.Integer).Value rightVal := right.(*object.Integer).Value switch operator { case "+": return &object.Integer{Value: leftVal + rightVal} case "-": return &object.Integer{Value: leftVal - rightVal} case "*": return &object.Integer{Value: leftVal * rightVal} case "/": return &object.Integer{Value: leftVal / rightVal} case "%": return &object.Integer{Value: leftVal % rightVal} case "==": return nativeBoolToBooleanObject(leftVal == rightVal) case "!=": return nativeBoolToBooleanObject(leftVal != rightVal) case "<": return nativeBoolToBooleanObject(leftVal < rightVal) case ">": return nativeBoolToBooleanObject(leftVal > rightVal) default: return newError("unknown operator: %s %s %s", left.Type(), operator, right.Type()) } } func evalStringInfixExpression(operator string, left, right object.Object) object.Object { leftVal := left.(*object.String).Value rightVal := right.(*object.String).Value switch operator { case "+": return &object.String{Value: leftVal + rightVal} case "==": return &object.Boolean{Value: leftVal == rightVal} case "!=": return &object.Boolean{Value: leftVal != rightVal} default: return newError("unknown operator: %s %s %s", left.Type(), operator, right.Type()) } } func evalIfExpression(ie *ast.IfExpression, env *object.Environment) object.Object { condition := Eval(ie.Condition, env) if isError(condition) { return condition } if isTruthy(condition) { return Eval(ie.Consequence, env) } else if ie.Alternative != nil { return Eval(ie.Alternative, env) } else { return Null } } func evalSwitchStatement(ss *ast.SwitchStatement, env *object.Environment) object.Object { for i := 1; i <= len(ss.Cases); i++ { comparative := &ast.InfixExpression{ Token: token.Token{Type: token.EQ, Literal: "=="}, Left: ss.Target, Operator: "==", Right: ss.Cases[i].Condition, } condition := Eval(comparative, env) if isError(condition) { return condition } if isTruthy(condition) { return evalBlockStatement(&ast.BlockStatement{ Statements: ss.Cases[i].Block, }, env) } } if ss.Default != nil { return evalBlockStatement(&ast.BlockStatement{ Statements: ss.Default, }, env) } return Null } func isTruthy(obj object.Object) bool { switch obj { case Null: return false case True: return true case False: return false default: return true } } func evalIndexExpression(left, index object.Object) object.Object { switch { case left.Type() == object.ArrayObj && index.Type() == object.IntObj: return evalArrayIndexExpression(left, index) case left.Type() == object.HashObj: return evalHashIndexExpression(left, index) default: return newError("index operator not supported. %s[%s]", left.Type(), index.Type()) } } func evalHashLiteral(hash *ast.HashLiteral, env *object.Environment) object.Object { pairs := make(map[object.HashKey]object.HashPair) for k, v := range hash.Pairs { key := Eval(k, env) if isError(key) { return key } hashKey, ok := key.(object.Hashable) if !ok { return newError("unusable as hash key: %s", key.Type()) } value := Eval(v, env) if isError(value) { return value } hashed := hashKey.HashKey() pairs[hashed] = object.HashPair{Key: key, Value: value} } return &object.Hash{Pairs: pairs} } func evalHashIndexExpression(hash, index object.Object) object.Object { hashObj := hash.(*object.Hash) key, ok := index.(object.Hashable) if !ok { return newError("unusable as hash key: %s", index.Type()) } pair, ok := hashObj.Pairs[key.HashKey()] if !ok { return Null } return pair.Value } func evalArrayIndexExpression(array, index object.Object) object.Object { arrayObj := array.(*object.Array) idx := index.(*object.Integer).Value max := int64(len(arrayObj.Elements) - 1) if idx < 0 || idx > max { return Null } return arrayObj.Elements[idx] } func evalBlockStatement(bs *ast.BlockStatement, env *object.Environment) object.Object { var result object.Object for _, stmt := range bs.Statements { result = Eval(stmt, env) if result != nil { t := result.Type() if t == object.ReturnValueObj || t == object.ErrorObj { return result } } } return result } func evalIdentifier(i *ast.Identifier, env *object.Environment) object.Object { if val, ok := env.Get(i.Value); ok { return val } if builtIn, ok := builtIns[i.Value]; ok { return builtIn } return &object.Error{Message: "identifier not found: " + i.Value} } func applyFunction(fn object.Object, args []object.Object) object.Object { switch fn := fn.(type) { case *object.Function: if len(fn.Parameters) != len(args) { return newError("function requires %d arguments. got=%d", len(fn.Parameters), len(args)) } extendedEnv := extendFunctionEnv(fn, args) evaluated := Eval(fn.Body, extendedEnv) return unwrapReturnValue(evaluated) case *object.BuiltIn: return fn.Fn(args...) default: return newError("not a function: %s", fn.Type()) } } func extendFunctionEnv(fn *object.Function, args []object.Object) *object.Environment { env := object.NewEnclosedEnvironment(fn.Env) for i, v := range fn.Parameters { env.Set(v.Value, args[i]) } return env } func unwrapReturnValue(obj object.Object) object.Object { if rv, ok := obj.(*object.ReturnValue); ok { return rv.Value } return obj } func evalExpressions(exps []ast.Expression, env *object.Environment) []object.Object { var result []object.Object for _, e := range exps { evaluated := Eval(e, env) if isError(evaluated) { return []object.Object{evaluated} } result = append(result, evaluated) } return result }
evaluator/evaluator.go
0.569853
0.470858
evaluator.go
starcoder
package transport const ( // types of encoding over the wire. // EncodingNone place holder. EncodingNone byte = 0x00 // EncodingProtobuf uses protobuf as coding format. EncodingProtobuf byte = 0x10 ) const ( // types of compression over the wire. // CompressionNone does not apply compression on the payload. CompressionNone byte = 0 // CompressionSnappy apply snappy compression on payload. CompressionSnappy = 1 // CompressionGzip apply gzip compression on payload. CompressionGzip = 2 // CompressionBzip2 apply bzip2 compression on the payload. CompressionBzip2 = 3 ) // TransportFlag tell packet encoding and compression formats. type TransportFlag uint16 // GetCompression returns the compression bits from flags func (flags TransportFlag) GetCompression() byte { return byte(flags & TransportFlag(0x000F)) } // SetSnappy will set packet compression to snappy func (flags TransportFlag) SetSnappy() TransportFlag { return (flags & TransportFlag(0xFFF0)) | TransportFlag(CompressionSnappy) } // SetGzip will set packet compression to Gzip func (flags TransportFlag) SetGzip() TransportFlag { return (flags & TransportFlag(0xFFF0)) | TransportFlag(CompressionGzip) } // SetBzip2 will set packet compression to bzip2 func (flags TransportFlag) SetBzip2() TransportFlag { return (flags & TransportFlag(0xFFF0)) | TransportFlag(CompressionBzip2) } // GetEncoding will get the encoding bits from flags func (flags TransportFlag) GetEncoding() byte { return byte(flags & TransportFlag(0x00F0)) } // SetProtobuf will set packet encoding to protobuf func (flags TransportFlag) SetProtobuf() TransportFlag { return (flags & TransportFlag(0xFF0F)) | TransportFlag(EncodingProtobuf) } // GetChecksum will get the checksum from flags func (flags TransportFlag) GetChecksum() byte { return byte((flags & TransportFlag(0x7F00)) >> 8) } // SetChecksum will set the checksum in flags func (flags TransportFlag) SetChecksum(c byte) TransportFlag { return (flags & TransportFlag(0x80FF)) | (TransportFlag(c) << 8) } func (flags TransportFlag) IsValidEncoding() bool { enc := flags.GetEncoding() if enc == EncodingProtobuf { return true } return false }
secondary/transport/transport_flags.go
0.738103
0.444565
transport_flags.go
starcoder
// Package compact provides compact Merkle tree data structures. package compact import ( "bytes" "encoding/base64" "encoding/hex" "fmt" "math/bits" log "github.com/golang/glog" "github.com/google/trillian/merkle/hashers" ) // RootHashMismatchError indicates a unexpected root hash value. type RootHashMismatchError struct { ExpectedHash []byte ActualHash []byte } func (r RootHashMismatchError) Error() string { return fmt.Sprintf("root hash mismatch got: %v expected: %v", r.ActualHash, r.ExpectedHash) } // Tree is a compact Merkle tree representation. // Uses log(n) nodes to represent the current on-disk tree. type Tree struct { hasher hashers.LogHasher root []byte // the list of "dangling" left-hand nodes, NOTE: index 0 is the leaf, not the root. nodes [][]byte size int64 } func isPerfectTree(x int64) bool { return x != 0 && (x&(x-1) == 0) } // GetNodeFunc is a function prototype which can look up particular nodes within a non-compact Merkle tree. // Used by the compact Tree to populate itself with correct state when starting up with a non-empty tree. type GetNodeFunc func(depth int, index int64) ([]byte, error) // NewTreeWithState creates a new compact Tree for the passed in |size|. // This can fail if the nodes required to recreate the tree state cannot be fetched or the calculated // root hash after population does not match the value we expect. // |f| will be called a number of times with the co-ordinates of internal MerkleTree nodes whose hash values are // required to initialize the internal state of the compact Tree. |expectedRoot| is the known-good tree root // of the tree at |size|, and is used to verify the correct initial state of the compact Tree after initialisation. func NewTreeWithState(hasher hashers.LogHasher, size int64, f GetNodeFunc, expectedRoot []byte) (*Tree, error) { sizeBits := bits.Len64(uint64(size)) r := Tree{ hasher: hasher, nodes: make([][]byte, sizeBits), root: hasher.EmptyRoot(), size: size, } if isPerfectTree(size) { log.V(1).Info("Is perfect tree.") r.root = append(make([]byte, 0, len(expectedRoot)), expectedRoot...) r.nodes[sizeBits-1] = r.root } else { // Pull in the nodes we need to repopulate our compact tree and verify the root for depth := 0; depth < sizeBits; depth++ { if size&1 == 1 { index := size - 1 log.V(1).Infof("fetching d: %d i: %d, leaving size %d", depth, index, size) h, err := f(depth, index) if err != nil { log.Warningf("Failed to fetch node depth %d index %d: %s", depth, index, err) return nil, err } r.nodes[depth] = h } size >>= 1 } r.recalculateRoot(func(depth int, index int64, hash []byte) error { return nil }) } if !bytes.Equal(r.root, expectedRoot) { log.Warningf("Corrupt state, expected root %s, got %s", hex.EncodeToString(expectedRoot[:]), hex.EncodeToString(r.root[:])) return nil, RootHashMismatchError{ActualHash: r.root, ExpectedHash: expectedRoot} } log.V(1).Infof("Resuming at size %d, with root: %s", r.size, base64.StdEncoding.EncodeToString(r.root[:])) return &r, nil } // NewTree creates a new compact Tree with size zero. This always succeeds. func NewTree(hasher hashers.LogHasher) *Tree { r := Tree{ hasher: hasher, root: hasher.EmptyRoot(), nodes: make([][]byte, 0), size: 0, } return &r } // CurrentRoot returns the current root hash. func (t *Tree) CurrentRoot() []byte { return t.root } // DumpNodes logs the internal state of the compact Tree, and is used for debugging. func (t *Tree) DumpNodes() { log.Infof("Tree Nodes @ %d", t.size) mask := int64(1) numBits := bits.Len64(uint64(t.size)) for bit := 0; bit < numBits; bit++ { if t.size&mask != 0 { log.Infof("%d: %s", bit, base64.StdEncoding.EncodeToString(t.nodes[bit][:])) } else { log.Infof("%d: -", bit) } mask <<= 1 } } type setNodeFunc func(depth int, index int64, hash []byte) error func (t *Tree) recalculateRoot(f setNodeFunc) error { if t.size == 0 { return nil } index := t.size var newRoot []byte first := true mask := int64(1) numBits := bits.Len64(uint64(t.size)) for bit := 0; bit < numBits; bit++ { index >>= 1 if t.size&mask != 0 { if first { newRoot = t.nodes[bit] first = false } else { newRoot = t.hasher.HashChildren(t.nodes[bit], newRoot) if err := f(bit+1, index, newRoot); err != nil { return err } } } mask <<= 1 } t.root = newRoot return nil } // AddLeaf calculates the leafhash of |data| and appends it to the tree. // |f| is a callback which will be called multiple times with the full MerkleTree coordinates of nodes whose hash should be updated. func (t *Tree) AddLeaf(data []byte, f setNodeFunc) (int64, []byte, error) { h, err := t.hasher.HashLeaf(data) if err != nil { return 0, nil, err } seq, err := t.AddLeafHash(h, f) if err != nil { return 0, nil, err } return seq, h, err } // AddLeafHash adds the specified |leafHash| to the tree. // |f| is a callback which will be called multiple times with the full MerkleTree coordinates of nodes whose hash should be updated. func (t *Tree) AddLeafHash(leafHash []byte, f setNodeFunc) (int64, error) { defer func() { t.size++ // TODO(al): do this lazily // TODO(pavelkalinnikov): Handle recalculateRoot errors. t.recalculateRoot(f) }() assignedSeq := t.size index := assignedSeq if err := f(0, index, leafHash); err != nil { return 0, err } if t.size == 0 { // new tree t.nodes = append(t.nodes, leafHash) return assignedSeq, nil } // Initialize our running hash value to the leaf hash hash := leafHash bit := 0 // Iterate over the bits in our tree size for mask := t.size; mask > 0; mask >>= 1 { index >>= 1 if mask&1 == 0 { // Just store the running hash here; we're done. t.nodes[bit] = hash // Don't re-write the leaf hash node (we've done it above already) if bit > 0 { // Store the leaf hash node if err := f(bit, index, hash); err != nil { return 0, err } } return assignedSeq, nil } // The bit is set so we have a node at that position in the nodes list so hash it with our running hash: hash = t.hasher.HashChildren(t.nodes[bit], hash) // Store the resulting parent hash. if err := f(bit+1, index, hash); err != nil { return 0, err } // Now, clear this position in the nodes list as the hash it formerly contained will be propagated upwards. t.nodes[bit] = nil // Figure out if we're done: if bit+1 >= len(t.nodes) { // If we're extending the node list then add a new entry with our // running hash, and we're done. t.nodes = append(t.nodes, hash) return assignedSeq, nil } else if mask&0x02 == 0 { // If the node above us is unused at this tree size, then store our // running hash there, and we're done. t.nodes[bit+1] = hash return assignedSeq, nil } // Otherwise, go around again. bit++ } // We should never get here, because that'd mean we had a running hash which // we've not stored somewhere. return 0, fmt.Errorf("AddLeaf failed running hash not cleared: h: %v seq: %d", leafHash, assignedSeq) } // Size returns the current size of the tree, that is, the number of leaves ever added to the tree. func (t *Tree) Size() int64 { return t.size } // Hashes returns a copy of the set of node hashes that comprise the compact representation of the tree. func (t *Tree) Hashes() [][]byte { if isPerfectTree(t.size) { return nil } n := make([][]byte, len(t.nodes)) copy(n, t.nodes) return n } // Depth returns the number of levels in the tree. func (t *Tree) Depth() int { if t.size == 0 { return 0 } return bits.Len64(uint64(t.size - 1)) }
merkle/compact/tree.go
0.695545
0.706652
tree.go
starcoder
package bst import "github.com/kgantsov/data_structures_and_algorithms/data_structures/queue" type Node struct { value int left *Node right *Node } func NewNode(value int) *Node { node := new(Node) node.value = value return node } type BinarySearchTree struct { root *Node } func NewBinarySearchTree() *BinarySearchTree { bst := new(BinarySearchTree) bst.root = nil return bst } func addNode(value int, node *Node) *Node { if value < node.value { if node.left == nil { node.left = NewNode(value) return nil } else { return addNode(value, node.left) } } else if value > node.value { if value > node.value { if node.right == nil { node.right = NewNode(value) return nil } else { return addNode(value, node.right) } } } else { return nil } return nil } func removeNode(value int, node *Node) *Node { if node == nil { return nil } if value == node.value { if node.left == nil && node.right == nil { return nil } if node.left == nil { return node.right } if node.right == nil { return node.left } tempNode := node.right for tempNode.left != nil { tempNode = tempNode.left } node.value = tempNode.value node.right = removeNode(node.value, node.right) return node } else if value < node.value { node.left = removeNode(value, node.left) return node } else { node.right = removeNode(value, node.right) return node } return nil } func findMinHeight(node *Node) int { if node == nil { return -1 } left := findMinHeight(node.left) right := findMinHeight(node.right) if left < right { return left + 1 } else { return right + 1 } } func findMaxHeight(node *Node) int { if node == nil { return -1 } left := findMaxHeight(node.left) right := findMaxHeight(node.right) if left > right { return left + 1 } else { return right + 1 } } func inOrderTraversal(node *Node) []int { var res []int if node.left != nil { res = append(res, inOrderTraversal(node.left)...) } res = append(res, node.value) if node.right != nil { res = append(res, inOrderTraversal(node.right)...) } return res } func preOrderTraversal(node *Node) []int { var res []int res = append(res, node.value) if node.left != nil { res = append(res, preOrderTraversal(node.left)...) } if node.right != nil { res = append(res, preOrderTraversal(node.right)...) } return res } func postOrderTraversal(node *Node) []int { var res []int if node.left != nil { res = append(res, postOrderTraversal(node.left)...) } if node.right != nil { res = append(res, postOrderTraversal(node.right)...) } res = append(res, node.value) return res } func (bst *BinarySearchTree) Add(value int) { node := bst.root if node == nil { bst.root = NewNode(value) return } else { addNode(value, node) return } } func (bst *BinarySearchTree) Min() int { node := bst.root if node == nil { return 0 } for node.left != nil { node = node.left } return node.value } func (bst *BinarySearchTree) Max() int { node := bst.root if node == nil { return 0 } for node.right != nil { node = node.right } return node.value } func (bst *BinarySearchTree) Find(value int) *Node { node := bst.root if node == nil { return nil } for value != node.value { if value < node.value { node = node.left } else { node = node.right } if node == nil { return nil } } return node } func (bst *BinarySearchTree) IsPresent(value int) bool { node := bst.root if node == nil { return false } for value != node.value { if value < node.value { node = node.left } else { node = node.right } if node == nil { return false } } return true } func (bst *BinarySearchTree) Remove(value int) { bst.root = removeNode(value, bst.root) } func (bst *BinarySearchTree) FindMinHeight() int { return findMinHeight(bst.root) } func (bst *BinarySearchTree) FindMaxHeight() int { return findMaxHeight(bst.root) } func (bst *BinarySearchTree) IsBalanced() bool { min := bst.FindMinHeight() max := bst.FindMaxHeight() if min >= max - 1 { return true } else { return false } } func (bst *BinarySearchTree) InOrderTraversal() []int { values := inOrderTraversal(bst.root) return values } func (bst *BinarySearchTree) PreOrderTraversal() []int { values := preOrderTraversal(bst.root) return values } func (bst *BinarySearchTree) PostOrderTraversal() []int { values := postOrderTraversal(bst.root) return values } func (bst *BinarySearchTree) LevelOrderTraversal() []int { var values []int var node *Node q := queue.NewQueue() if bst.root != nil { q.Enqueue(bst.root) for q.Empty() != true { v, ok := q.Dequeue() if ok { node = v.(*Node) values = append(values, node.value) if node.left != nil { q.Enqueue(node.left) } if node.right != nil { q.Enqueue(node.right) } } } } return values }
data_structures/bst/bst.go
0.69451
0.446676
bst.go
starcoder
package dynamic import ( "fmt" "math" "github.com/rasaford/algorithms/internal/helper" ) // CutRod takes a list of prices for various lengths of rods and returns // the maximum achievable price for a rod of length n if all cuts are free. // The prices table has the length of the rod as an index into the array and // the corresponding price as the value for that index. func CutRod(prices []int, length int) int { if length == 0 { return 0 } q := math.MinInt32 // -infinity for i := 1; i <= length; i++ { q = helper.Max(q, prices[i]+CutRod(prices, length-i)) } return q } // CutRod takes a list of prices for various lengths of rods and returns // the maximum achievable price for a rod of length n if all cuts are free. // The prices table has the length of the rod as an index into the array and // the corresponding price as the value for that index. func CutRodMemoized(prices []int, length int) int { store := make([]int, length+1) for i := range store { store[i] = math.MinInt32 } return CutRodMemAux(prices, store, length) } func CutRodMemAux(prices, store []int, length int) int { if store[length] >= 0 { return store[length] } q := 0 if length != 0 { q = math.MinInt32 for i := 1; i <= length; i++ { q = helper.Max(q, prices[i]+CutRodMemAux(prices, store, length-i)) } } store[length] = q return q } func CutRodBottomUp(prices []int, length int) int { store := make([]int, length+1) for i := 1; i <= length; i++ { q := math.MinInt32 for j := 1; j <= i; j++ { q = helper.Max(q, prices[j]+store[i-j]) } store[i] = q } return store[length] } func CutRodBottomUpExtended(prices []int, length int) ([]int, []int) { store, cuts := make([]int, length+1), make([]int, length+1) for i := 1; i <= length; i++ { q := math.MinInt32 for j := 1; j <= i; j++ { if r := prices[j] + store[i-j]; q < r { q = r cuts[i] = j } } store[i] = q } return store, cuts } func CutRodPrint(prices []int, length int) int { store, cuts := CutRodBottomUpExtended(prices, length) n := length for n > 0 { fmt.Printf("%d ", cuts[n]) n -= cuts[n] } fmt.Println() return store[length] }
dynamic/rodCutting.go
0.680454
0.498291
rodCutting.go
starcoder
package questions import ( "container/list" "fmt" "math" "math/bits" "math/rand" "sort" "strconv" "strings" ) // QuestionOne performs addition without using arithmetic operators func QuestionOne(x, y int64) int64 { /* No arithmetic, so binary operations required addition: 0 + 0 = 00 0 + 1 = 01 1 + 0 = 01 1 + 1 = 10 can split addition into adding the result without carrying one with the result of carrying the one, without the addition this is adding an XOR of the two numbers with and AND shifted by one bit 101 + 011 = 110 + 010 = 100 + 100 = 000 + 1000 Alternatively you can iterate through the bits in increasing order of significance and perform AND, carrying the one to the next bit if required. */ if y == 0 { return x } addition := x ^ y carry := (x & y) << 1 return QuestionOne(addition, carry) } const ( Spade = iota Club Diamond Heart ) type Card struct { suit int value int } func (c Card) Print() string { switch c.suit { case Spade: return fmt.Sprintf("S%d ", c.value) case Club: return fmt.Sprintf("C%d ", c.value) case Diamond: return fmt.Sprintf("D%d ", c.value) case Heart: return fmt.Sprintf("H%d ", c.value) default: return "nope" } } // QuestionTwo performs a shuffle of a deck of cards, using a perfect random number generator func QuestionTwo(deck []*Card, position int) []*Card { for i := len(deck) - 1; i > 0; i-- { k := random(i) temp := deck[k] deck[k] = deck[i] deck[i] = temp } return deck } func initDeck() []*Card { deck := []*Card{} for _, suit := range []int{Spade, Club, Diamond, Heart} { for i := 1; i < 14; i++ { deck = append(deck, &Card{suit: suit, value: i}) } } return deck } // random returns a random int between the 0 and x func random(x int) int64 { return rand.Int63n(int64(x)) } // QuestionThree generates a random subset of the provided set func QuestionThree(set []int, count int) []int { subset := make([]int, count) copy(subset, set[:count]) for i := count; i < len(set); i++ { random := random(len(set)) if random < int64(count) { subset[random] = set[i] } } return subset } // Question Four, func QuestionFour(list []*IntMod, column int64) int64 { /* An array contains integers from 0 to N, except one is missing the integers cannot be accessed directly, but only by the jth bit of array[i] find the missing integer in O(n) time the missing number will be revealed based on */ if column >= bits.UintSize { return 0 } zeroes := []*IntMod{} ones := []*IntMod{} for _, i := range list { if i.Get(column) { // column bit is 1 ones = append(ones, i) } else { zeroes = append(zeroes, i) } } if len(zeroes) > len(ones) { return (QuestionFour(ones, column+1) << 1) | 1 } else { return (QuestionFour(zeroes, column+1) << 1) | 0 } } type IntMod struct { value int64 } func (i *IntMod) Get(j int64) bool { return bits.OnesCount(uint((1<<j)&i.value)) > 0 } /* QuestionFive Given an array of A and B, build the longest sublist containing an equal number of letters and numbers */ func QuestionFive(list []string) []string { /* Sub string must be even in length brute force by checking all subarrays, with some optimisations to allow for early exit complexity: N3 */ var subArray []string differences := map[int]int{0: -1} aCount := 0 bCount := 0 for i, element := range list { if element == "A" { aCount++ } else { bCount++ } difference := aCount - bCount marker, ok := differences[difference] if !ok { differences[difference] = i } else { subLength := i - marker if len(subArray) < subLength { subArray = list[marker+1 : i+1] } } } return subArray } /* QuestionSix Write a method to count the total number of 2s between 0 and N inclusive i.e. 22 -> 2, 12, 20, 21, 22 -> 6 */ func QuestionSix(N int) int { count := 0 for i := 0; i < len(strconv.Itoa(N)); i++ { count = count + CountTwosAtDigit(N, i) } return count } func CountTwosAtDigit(N, d int) int { power := int(math.Pow10(d)) next := power * 10 right := N % power roundDown := N - N%next roundUp := roundDown + next digit := (N / power) % 10 if digit < 2 { return roundDown / 10 } else if digit == 2 { return roundDown/10 + right + 1 } else { return roundUp / 10 } } /* QuestionSeven Given a list of keys and frequencies, and a separate list of synonym key lists construct a true frequency list */ func QuestionSeven(freq map[string]int, synonyms [][]string) map[string]int { /* key to this is the data structure we use for converting from synonyms */ g := InitGraph(freq) AddSynonyms(g, synonyms) return g.CountFrequencies() } func InitGraph(freq map[string]int) *FrequencyGraph { g := &FrequencyGraph{nodes: map[string]*FrequencyNode{}} for k, v := range freq { g.AddNode(&FrequencyNode{ name: k, freq: v, }) } return g } func AddSynonyms(graph *FrequencyGraph, synonyms [][]string) { for _, pair := range synonyms { graph.AddEdge(pair[0], pair[1]) } } type FrequencyGraph struct { nodes map[string]*FrequencyNode } func (g *FrequencyGraph) AddNode(node *FrequencyNode) { g.nodes[node.name] = node } func (g *FrequencyGraph) AddEdge(a, b string) { aNode, ok := g.nodes[a] if !ok { aNode = &FrequencyNode{ name: a, freq: 0, } g.AddNode(aNode) } bNode, ok := g.nodes[b] if !ok { bNode = &FrequencyNode{ name: b, freq: 0, } g.AddNode(bNode) } aNode.AddChild(bNode) } func (g *FrequencyGraph) Print() string { nodes := []string{} for k, node := range g.nodes { nodes = append(nodes, fmt.Sprintf("%s: %s", k, node.Print())) } return strings.Join(nodes, "\n") } func (g *FrequencyGraph) CountFrequencies() map[string]int { fmt.Println(g.Print()) counts := map[string]int{} for _, node := range g.nodes { fmt.Println(node.visited, node.name, node.freq) if !node.visited { counts[node.name] = node.CountFrequencies() } } return counts } type FrequencyNode struct { name string freq int children []*FrequencyNode visited bool } func (n *FrequencyNode) AddChild(node *FrequencyNode) { n.children = append(n.children, node) node.children = append(node.children, n) } func (n *FrequencyNode) CountFrequencies() int { n.visited = true count := n.freq for _, child := range n.children { if !child.visited { count = count + child.CountFrequencies() } } return count } func (n *FrequencyNode) Print() string { names := []string{} for _, child := range n.children { names = append(names, child.name) } return fmt.Sprintf("%s %d %t children: %s", n.name, n.freq, n.visited, strings.Join(names, ",")) } /* QuestionEight Given a set of people with height and weight, build the highest tower of people lighter and shorter than the people below */ func QuestionEight(staff []*CircusPerson) []*CircusPerson { /* Could start by sorting the set by height and weight, then incrementing across both lists. Might need to increment weight first and hieght first to avoid edge cases */ sort.Slice(staff, func(i, j int) bool { return staff[i].weight < staff[j].weight }) return LongestSubTower(staff, []*CircusPerson{}, 0) } func LongestSubTower(array []*CircusPerson, sequence []*CircusPerson, index int) []*CircusPerson { if index >= len(array) { return sequence } bestWith := []*CircusPerson{} if canAppend(sequence, array[index]) { sequenceWith := append(sequence, array[index]) bestWith = LongestSubTower(array, sequenceWith, index+1) } bestWithout := LongestSubTower(array, sequence, index+1) if len(bestWith) > len(bestWithout) { return bestWith } return bestWithout } func canAppend(solution []*CircusPerson, person *CircusPerson) bool { if len(solution) == 0 { return true } return solution[len(solution)-1].isSmaller(person) } type CircusPerson struct { weight int height int } func (c *CircusPerson) isSmaller(p *CircusPerson) bool { return c.height < p.height && c.weight < p.weight } /* QuestionNine Design an algorithm to find the kth number such that the only prime factors are 3, 5, 7. 3, 5, 7 need not be factors but no other prime */ /* func QuestionNine(k int) []int { results, primes := []int{}, []int{} factor := 1 for i := 0; i < k; i++ { factor = GetNextNumber(factor, &primes) results = append(results, factor) factor++ } return results } */ func QuestionNine(k int) []int { if k < 1 { return []int{} } values := []int{} val := 0 queue3 := list.New() queue5 := list.New() queue7 := list.New() queue3.PushBack(1) maxElement := list.Element{Value: int(^uint(0) >> 1)} for i := 0; i < k; i++ { v3 := queue3.Front() v5 := queue5.Front() v7 := queue7.Front() if v5 == nil { v5 = &maxElement } if v7 == nil { v7 = &maxElement } if v7.Value.(int) < v5.Value.(int) { if v7.Value.(int) < v3.Value.(int) { val = v7.Value.(int) } else { val = v3.Value.(int) } } else { if v5.Value.(int) < v3.Value.(int) { val = v5.Value.(int) } else { val = v3.Value.(int) } } if val == v3.Value { queue3.Remove(v3) queue3.PushBack(3 * val) queue5.PushBack(5 * val) } else if val == v5.Value { queue5.Remove(v5) queue5.PushBack(5 * val) } else if val == v7.Value { queue7.Remove(v7) } queue7.PushBack(7 * val) values = append(values, val) } return values } func GetNextNumber(factor int, primes *[]int) int { for true { prime := CheckFactor(factor, primes) if prime { fmt.Println(factor, "prime") return factor } factor++ } return 0 } func CheckFactor(i int, primes *[]int) bool { fmt.Println(i, primes) for _, p := range *primes { if i%p == 0 { return false } } if i == 1 || i%3 == 0 || i%5 == 0 || i%7 == 0 { return true } *primes = append(*primes, i) return false } /* QuestionTen Find the majority element in an array given array of positive integers, find majority element in O(N) time and O(1) space */ func QuestionTen(array []int) int { candidate := getCandidate(array) return validateCandidate(candidate, array) } func getCandidate(array []int) int { candidate, count := 0, 0 for _, elem := range array { if count == 0 { candidate = elem } if elem == candidate { count++ } else { count-- } } return candidate } func validateCandidate(candidate int, array []int) int { count := 0 for _, elem := range array { if elem == candidate { count++ } } if count > len(array)/2 { return candidate } return -1 } /* QuestionEleven Given a list of words, and a candidate word, find the shortest distance between two instances of that word. If needed to do repeatedly, could use an approach similar to my first method */ func QuestionEleven(words []string, candidate1, candidate2 string) int { var pos1, pos2 int var min float64 for i, word := range words { if word == candidate1 { pos1 = i } else if word == candidate2 { pos2 = i } else { continue } delta := math.Abs(float64(pos1 - pos2)) if delta < min || min == 0 { min = delta } } return int(min) } /* QuestionTwelve given a binode struture, representing a binary tree, convert to a doubly linked list */ func QuestionTwelve(head *BiNode) *BiNode { return convert(head) } func convert(node *BiNode) *BiNode { if node == nil { return nil } left := convert(node.nodeLess) right := convert(node.nodeMore) fmt.Println(node.data, node.nodeLess, node.nodeMore, left, right) if left != nil { attach(getTail(left), node) } if right != nil { attach(node, right) } if left == nil { return node } return left } func attach(a, b *BiNode) { a.nodeMore = b b.nodeLess = a } func getTail(node *BiNode) *BiNode { if node == nil { return nil } n := node for n.nodeMore != nil { n = n.nodeMore } return n } type BiNode struct { nodeLess *BiNode nodeMore *BiNode data int } func (node *BiNode) print() string { entries := []string{strconv.Itoa(node.data)} n := node.nodeLess for n != nil { entries = append([]string{strconv.Itoa(n.data)}, entries...) n = n.nodeLess } n = node.nodeMore for n != nil { entries = append(entries, strconv.Itoa(n.data)) n = n.nodeMore } return strings.Join(entries, " ") } func (node *BiNode) insert(n *BiNode) { if node.data < n.data { if node.nodeMore != nil { node.nodeMore.insert(n) } else { node.nodeMore = n } return } if node.nodeLess != nil { node.nodeLess.insert(n) } else { node.nodeLess = n } return } func newNode(data int) *BiNode { return &BiNode{ nodeLess: nil, nodeMore: nil, data: data, } } /* QuestionThriteen, given a string of text without spaces, reinsert the spaces to minimise the amount of invalid characters, which are not recognised */ func QuestionThirteen(text string, dict map[string]bool) (string, int) { return bestResult(text, dict, 0) } func bestResult(text string, dict map[string]bool, start int) (string, int) { if start >= len(text) { return "", 0 } maxInvalid := 20000 best := "" partial := []byte{} i := start for i < len(text) { char := text[i] partial := append(partial, char) word := string(partial) _, ok := dict[word] var invalid int if ok { invalid = 0 } else { invalid = len(partial) } if invalid < maxInvalid { textResult, invalidResult := bestResult(text, dict, i+1) if (invalid + invalidResult) < maxInvalid { maxInvalid = invalidResult + invalid best = fmt.Sprintf("%s %s", partial, textResult) if maxInvalid == 0 { break } } } i++ } return best, maxInvalid } /* QuestionFourteen find the k smallest numbers in array */ func QuestionFourteen(array []int, k int) []int { // obvious approach sort.Slice(array, func(i, j int) bool { return array[i] < array[j] }) return array[:k] } /* QuestionFifteen given list of words, find longest word that consists of other words */ func QuestionFifteen(array []string) string { words := map[string]bool{} for _, word := range array { words[word] = true } sort.Slice(array, func(i, j int) bool { return len(array[i]) > len(array[j]) }) for _, word := range array { fmt.Println(word, words) if canBuildWord(word, true, words) { return word } } return "" } func canBuildWord(word string, original bool, dict map[string]bool) bool { res, ok := dict[word] if ok && !original { return res } for i := 0; i < len(word); i++ { left := string(word[0:i]) right := string(word[i:]) lRes, lOK := dict[left] if lOK && lRes && canBuildWord(right, false, dict) { return true } } dict[word] = false return false } /* QuestionSixteen Given a sequence of appointemnets , find the highest non adjacent number of minutes */ func QuestionSixteen(array []int) int { oneAway := 0 twoAway := 0 for i := len(array) - 1; i >= 0; i-- { bestWith := array[i] + twoAway bestWithout := oneAway var current int if bestWith > bestWithout { current = bestWith } else { current = bestWithout } twoAway = oneAway oneAway = current } return oneAway }
exercises/questions/questions.go
0.645567
0.431644
questions.go
starcoder
package glinkedlist import "fmt" //Stack tracks the linked list head and tail type Stack struct { Head *Node Tail *Node Count int Debug bool } // Node is a linked list item type Node struct { Data string Pointer *Node } // Pop takes the top node from the linked list and returns func (s *Stack) Pop() Node { // Get a reference to the Tail node nodePopped := *s.Tail ptr := s.Head // Loop through the linkedList to find the preceding pointer reference for { node := *ptr if node.Pointer == s.Tail { s.Tail = &node break } ptr = node.Pointer } // Reduce the counter by 1 s.Count-- return nodePopped } // Push generates a new node for the linked list and assigns a pointer // reference if necessary func (s *Stack) Push(value string) { // Create a new node newNode := Node{value, nil} if s.Head == nil { // First iteration set the head to the newNode s.Head = &newNode } else if s.Count == 1 { // Second iteration we set the head nodes pointer to the newNode s.Head.Pointer = &newNode } else { // Subsequent iterations we set the tail nodes pointer to the newNode s.Tail.Pointer = &newNode } // Set the tail to the newNode s.Tail = &newNode // Increment the counter s.Count++ // Output debug if needed if s.Debug { fmt.Printf("Pushing:\nPointer to n: %p\t%v\n", &newNode, newNode) fmt.Printf("Current Stack: %v\n", s) } } // Remove takes a string value and removes the node with that value // re-assigning the node pointers before and after the node being removed func (s *Stack) Remove(value string) (returnNode Node) { // Start at the head ptr := s.Head node := *ptr var previousNodePtr *Node for { if node.Data == value { if ptr == s.Head { // Am I removing the Head s.Head = node.Pointer } else if ptr == s.Tail { // Am I removing the Tail s.Tail = previousNodePtr } else { // Am I removing anything else if previousNodePtr != nil { previousNodePtr.Pointer = node.Pointer } } // Remove any pointer reference returnNode = node returnNode.Pointer = nil s.Count-- return } // Break out of the loop if don't find the string if node.Pointer == nil { return } // Assign the previous node to the current node previousNodePtr = ptr // Get the next node in the list if node.Pointer != nil { ptr = node.Pointer node = *ptr } } } //Iterate loops through the stack of linked nodes func (s *Stack) Iterate(f func(n *Node)) { node := *s.Head for { if f != nil { f(&node) } if node.Pointer == nil { break } else { node = *node.Pointer } } }
gLinkedList.go
0.555194
0.418162
gLinkedList.go
starcoder
// Given a (singly) linked list with head node root, write a function to split the linked list into k consecutive linked list "parts". // The length of each part should be as equal as possible: no two parts should have a size differing by more than 1. This may lead to some parts being null. // The parts should be in order of occurrence in the input list, and parts occurring earlier should always have a size greater than or equal parts occurring later. // Return a List of ListNode's representing the linked list parts that are formed. // Examples 1->2->3->4, k = 5 // 5 equal parts [ [1], [2], [3], [4], null ] // Example 1: // Input: // root = [1, 2, 3], k = 5 // Output: [[1],[2],[3],[],[]] // Explanation: // The input and each element of the output are ListNodes, not arrays. // For example, the input root has root.val = 1, root.next.val = 2, \root.next.next.val = 3, and root.next.next.next = null. // The first element output[0] has output[0].val = 1, output[0].next = null. // The last element output[4] is null, but it's string representation as a ListNode is []. // Example 2: // Input: // root = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10], k = 3 // Output: [[1, 2, 3, 4], [5, 6, 7], [8, 9, 10]] // Explanation: // The input has been split into consecutive parts with size difference at most 1, and earlier parts are a larger size than the later parts. // Note: // - The length of root will be in the range [0, 1000]. // - Each value of a node in the input will be an integer in the range [0, 999]. // - k will be an integer in the range [1, 50]. package leetcode // ListNode is a node of a linked list. type ListNode struct { Val int Next *ListNode } func splitListToParts(root *ListNode, k int) []*ListNode { n := count(root) a, b := n/k, n%k parts := make([]*ListNode, k) prev := new(ListNode) for i := 0; i < k; i++ { prev.Next = nil parts[i] = root for j := 0; j < a; j++ { prev = root root = root.Next } if i < b { prev = root root = root.Next } } return parts } func count(root *ListNode) int { var cnt int for root != nil { root = root.Next cnt++ } return cnt }
0725/code.go
0.764452
0.713619
code.go
starcoder
package systems import ( "fmt" gc "github.com/x-hgg-x/space-invaders-go/lib/components" "github.com/x-hgg-x/space-invaders-go/lib/resources" ecs "github.com/x-hgg-x/goecs/v2" ec "github.com/x-hgg-x/goecsengine/components" "github.com/x-hgg-x/goecsengine/utils" w "github.com/x-hgg-x/goecsengine/world" ) // CollisionSystem manages collisions func CollisionSystem(world w.World) { gameComponents := world.Components.Game.(*gc.Components) gameResources := world.Resources.Game.(*resources.Game) gameEvents := &gameResources.Events audioPlayers := *world.Resources.AudioPlayers screenHeight := float64(world.Resources.ScreenDimensions.Height) // Player bullet explosion at the top of the screen world.Manager.Join(gameComponents.Player, gameComponents.Bullet, world.Components.Engine.SpriteRender, world.Components.Engine.Transform).Visit(ecs.Visit(func(playerBulletEntity ecs.Entity) { playerBullet := gameComponents.Bullet.Get(playerBulletEntity).(*gc.Bullet) playerBulletSprite := world.Components.Engine.SpriteRender.Get(playerBulletEntity).(*ec.SpriteRender) playerBulletTranslation := &world.Components.Engine.Transform.Get(playerBulletEntity).(*ec.Transform).Translation if playerBulletTranslation.Y >= screenHeight-playerBullet.Height/2 { animation := playerBulletSprite.SpriteSheet.Animations[resources.PlayerBulletExplosionAnimation] firstSprite := playerBulletSprite.SpriteSheet.Sprites[animation.SpriteNumber[0]] playerBulletTranslation.Y = screenHeight - float64(firstSprite.Height)/2 playerBulletEntity. RemoveComponent(gameComponents.Bullet). AddComponent(gameComponents.Deleted, &gc.Deleted{}). AddComponent(world.Components.Engine.AnimationControl, &ec.AnimationControl{ Animation: animation, Command: ec.AnimationCommand{Type: ec.AnimationCommandStart}, RateMultiplier: 1, }) } })) // Remove enemy bullet at the bottom of the screen world.Manager.Join(gameComponents.Enemy, gameComponents.Bullet, world.Components.Engine.Transform).Visit(ecs.Visit(func(enemyBulletEntity ecs.Entity) { enemyBullet := gameComponents.Bullet.Get(enemyBulletEntity).(*gc.Bullet) enemyBulletTranslation := &world.Components.Engine.Transform.Get(enemyBulletEntity).(*ec.Transform).Translation if enemyBulletTranslation.Y <= -enemyBullet.Height/2 { world.Manager.DeleteEntity(enemyBulletEntity) } })) // Collision between player bullets and aliens world.Manager.Join(gameComponents.Player, gameComponents.Bullet, world.Components.Engine.Transform).Visit(ecs.Visit(func(playerBulletEntity ecs.Entity) { playerBullet := gameComponents.Bullet.Get(playerBulletEntity).(*gc.Bullet) playerBulletTranslation := world.Components.Engine.Transform.Get(playerBulletEntity).(*ec.Transform).Translation world.Manager.Join(gameComponents.Alien, world.Components.Engine.AnimationControl).Visit( func(index int) (skip bool) { alienEntity := ecs.Entity(index) alien := gameComponents.Alien.Get(alienEntity).(*gc.Alien) alienSprite := world.Components.Engine.SpriteRender.Get(alienEntity).(*ec.SpriteRender) alienTranslation := world.Components.Engine.Transform.Get(alienEntity).(*ec.Transform).Translation alienAnimationControl := world.Components.Engine.AnimationControl.Get(alienEntity).(*ec.AnimationControl) if !rectangleCollision(alienTranslation.X, alienTranslation.Y, alien.Width, alien.Height, playerBulletTranslation.X, playerBulletTranslation.Y, playerBullet.Width, playerBullet.Height) { // Check next alien return false } // Only one alien is killed for each bullet world.Manager.DeleteEntity(playerBulletEntity) var newAlienAnimation *ec.Animation for key := range alienSprite.SpriteSheet.Animations { if alienSprite.SpriteSheet.Animations[key] == alienAnimationControl.Animation { switch key { case resources.AlienLoop1Animation: newAlienAnimation = alienSprite.SpriteSheet.Animations[resources.AlienDeath1Animation] gameEvents.ScoreEvents = append(gameEvents.ScoreEvents, resources.ScoreEvent{Score: 100}) case resources.AlienLoop2Animation: newAlienAnimation = alienSprite.SpriteSheet.Animations[resources.AlienDeath2Animation] gameEvents.ScoreEvents = append(gameEvents.ScoreEvents, resources.ScoreEvent{Score: 200}) case resources.AlienLoop3Animation: newAlienAnimation = alienSprite.SpriteSheet.Animations[resources.AlienDeath3Animation] gameEvents.ScoreEvents = append(gameEvents.ScoreEvents, resources.ScoreEvent{Score: 300}) case resources.AlienMasterLoopAnimation: newAlienAnimation = alienSprite.SpriteSheet.Animations[resources.AlienMasterDeathAnimation] gameEvents.ScoreEvents = append(gameEvents.ScoreEvents, resources.ScoreEvent{Score: 1000}) default: utils.LogError(fmt.Errorf("unknown animation name: '%s'", key)) } break } } if newAlienAnimation == nil { utils.LogError(fmt.Errorf("unable to find animation")) } *alienAnimationControl = ec.AnimationControl{ Animation: newAlienAnimation, Command: ec.AnimationCommand{Type: ec.AnimationCommandStart}, RateMultiplier: 1, } alienEntity.RemoveComponent(gameComponents.Alien).AddComponent(gameComponents.Deleted, &gc.Deleted{}) audioPlayers["killed"].Rewind() audioPlayers["killed"].Play() // Skip other aliens return true }) })) // Collision between player bullets and enemy bullets world.Manager.Join(gameComponents.Player, gameComponents.Bullet, world.Components.Engine.Transform).Visit(ecs.Visit(func(playerBulletEntity ecs.Entity) { playerBullet := gameComponents.Bullet.Get(playerBulletEntity).(*gc.Bullet) playerBulletTranslation := world.Components.Engine.Transform.Get(playerBulletEntity).(*ec.Transform).Translation world.Manager.Join(gameComponents.Enemy, gameComponents.Bullet, world.Components.Engine.Transform).Visit(ecs.Visit(func(enemyBulletEntity ecs.Entity) { enemyBullet := gameComponents.Bullet.Get(enemyBulletEntity).(*gc.Bullet) enemyBulletTranslation := world.Components.Engine.Transform.Get(enemyBulletEntity).(*ec.Transform).Translation if rectangleCollision(enemyBulletTranslation.X, enemyBulletTranslation.Y, enemyBullet.Width, enemyBullet.Height, playerBulletTranslation.X, playerBulletTranslation.Y, playerBullet.Width, playerBullet.Height) { world.Manager.DeleteEntity(playerBulletEntity) world.Manager.DeleteEntity(enemyBulletEntity) } })) })) // Collision between bullets and bunkers world.Manager.Join(gameComponents.Bullet, world.Components.Engine.Transform).Visit(ecs.Visit(func(bulletEntity ecs.Entity) { bullet := gameComponents.Bullet.Get(bulletEntity).(*gc.Bullet) bulletTranslation := world.Components.Engine.Transform.Get(bulletEntity).(*ec.Transform).Translation world.Manager.Join(gameComponents.Bunker, world.Components.Engine.Transform).Visit(ecs.Visit(func(bunkerEntity ecs.Entity) { bunkerPixelSize := float64(gameComponents.Bunker.Get(bunkerEntity).(*gc.Bunker).PixelSize) bunkerTranslation := world.Components.Engine.Transform.Get(bunkerEntity).(*ec.Transform).Translation if rectangleCollision(bunkerTranslation.X, bunkerTranslation.Y, bunkerPixelSize, bunkerPixelSize, bulletTranslation.X, bulletTranslation.Y, bullet.Width, bullet.Height) { world.Manager.DeleteEntity(bunkerEntity) bullet.Health -= bunkerPixelSize * bunkerPixelSize } })) if bullet.Health <= 0 { world.Manager.DeleteEntity(bulletEntity) } })) // Collision between player and enemy bullets world.Manager.Join(gameComponents.Player, gameComponents.Controllable, world.Components.Engine.SpriteRender, world.Components.Engine.Transform).Visit(ecs.Visit(func(playerEntity ecs.Entity) { playerControllable := gameComponents.Controllable.Get(playerEntity).(*gc.Controllable) playerTranslation := world.Components.Engine.Transform.Get(playerEntity).(*ec.Transform).Translation world.Manager.Join(gameComponents.Enemy, gameComponents.Bullet, world.Components.Engine.Transform).Visit( func(index int) (skip bool) { enemyBulletEntity := ecs.Entity(index) enemyBullet := gameComponents.Bullet.Get(enemyBulletEntity).(*gc.Bullet) enemyBulletTranslation := world.Components.Engine.Transform.Get(enemyBulletEntity).(*ec.Transform).Translation if !rectangleCollision(playerTranslation.X, playerTranslation.Y, playerControllable.Width, playerControllable.Height, enemyBulletTranslation.X, enemyBulletTranslation.Y, enemyBullet.Width, enemyBullet.Height) { return false } world.Manager.DeleteEntity(enemyBulletEntity) gameEvents.LifeEvents = append(gameEvents.LifeEvents, resources.LifeEvent{}) gameEvents.ScoreEvents = append(gameEvents.ScoreEvents, resources.ScoreEvent{Score: -1000}) audioPlayers["explosion"].Rewind() audioPlayers["explosion"].Play() return true }) })) // Finish level if no alien are left if world.Manager.Join(gameComponents.Alien, gameComponents.AlienMaster.Not()).Empty() { gameResources.StateEvent = resources.StateEventLevelComplete } } func rectangleCollision(r1X, r1Y, r1Width, r1Height, r2X, r2Y, r2Width, r2Height float64) bool { return r1X-r1Width/2-r2Width/2 <= r2X && r2X <= r1X+r1Width/2+r2Width/2 && r1Y-r1Height/2-r2Height/2 <= r2Y && r2Y <= r1Y+r1Height/2+r2Height/2 }
lib/systems/collision.go
0.615203
0.409575
collision.go
starcoder
package ts // ArrayPkt implements Pkt interface and represents array of bytes that // contains one MPEG-TS packet. type ArrayPkt [PktLen]byte // Slice returns refference to the content of p as SlicePkt func (p *ArrayPkt) Slice() SlicePkt { return SlicePkt(p[:]) } func (p *ArrayPkt) Bytes() []byte { return p[:] } func (p *ArrayPkt) Copy(pkt Pkt) { copy(p[:], pkt.Bytes()) } func (p *ArrayPkt) SyncOK() bool { return p[0] == 0x47 } func (p *ArrayPkt) SetSync() { p[0] = 0x47 } func (p *ArrayPkt) Pid() int16 { return int16(p[1]&0x1f)<<8 | int16(p[2]) } func (p *ArrayPkt) SetPid(pid int16) { if uint(pid) > 8191 { panic("Bad PID") } p[1] = p[1]&0xe0 | byte(pid>>8) p[2] = byte(pid) } func (p *ArrayPkt) CC() int8 { return int8(p[3] & 0xf) } func (p *ArrayPkt) SetCC(b int8) { p[3] = p[3]&0xf0 | byte(b)&0x0f } func (p *ArrayPkt) IncCC() { b := p[3] p[3] = b&0xf0 | (b+1)&0x0f } func (p *ArrayPkt) Flags() PktFlags { return PktFlags(p[1]&0xe0 | (p[3] >> 4)) } func (p *ArrayPkt) SetFlags(f PktFlags) { p[1] = p[1]&0x1f | byte(f&0xf0) p[3] = p[3]&0x0f | byte(f<<4) } func (p *ArrayPkt) AF() AF { return p.Slice().AF() } func (p *ArrayPkt) Payload() []byte { return p.Slice().Payload() } func (p *ArrayPkt) ContainsError() bool { return p[1]&0x80 != 0 } func (p *ArrayPkt) SetContainsError(b bool) { if b { p[1] |= 0x80 } else { p[1] &^= 0x80 } } func (p *ArrayPkt) PayloadUnitStart() bool { return p[1]&0x40 != 0 } func (p *ArrayPkt) SetPayloadUnitStart(b bool) { if b { p[1] |= 0x40 } else { p[1] &^= 0x40 } } func (p *ArrayPkt) Prio() bool { return p[1]&0x20 != 0 } func (p *ArrayPkt) SetPrio(b bool) { if b { p[1] |= 0x20 } else { p[1] &^= 0x20 } } func (p *ArrayPkt) ScramblingCtrl() PktScramblingCtrl { return PktScramblingCtrl((p[3] >> 6) & 3) } func (p *ArrayPkt) SetScramblingCtrl(sc PktScramblingCtrl) { p[3] = p[3]&0x3f | byte(sc&3)<<6 } func (p *ArrayPkt) ContainsAF() bool { return p[3]&0x20 != 0 } func (p *ArrayPkt) SetContainsAF(b bool) { if b { p[3] |= 0x20 } else { p[3] &^= 0x20 } } func (p *ArrayPkt) ContainsPayload() bool { return p[3]&0x10 != 0 } func (p *ArrayPkt) SetContainsPayload(b bool) { if b { p[3] |= 0x10 } else { p[3] &^= 0x10 } }
ts/arraypkt.go
0.685107
0.612715
arraypkt.go
starcoder
package geojson import ( "encoding/json" "reflect" "github.com/ctessum/geom" ) func pointCoordinates(point geom.Point) []float64 { return []float64{point.X, point.Y} } func pointsCoordinates(points []geom.Point) [][]float64 { coordinates := make([][]float64, len(points)) for i, point := range points { coordinates[i] = pointCoordinates(point) } return coordinates } func pointssCoordinates(pointss []geom.Path) [][][]float64 { coordinates := make([][][]float64, len(pointss)) for i, points := range pointss { coordinates[i] = pointsCoordinates(points) } return coordinates } func pointsssCoordinates(pointsss [][]geom.Path) [][][][]float64 { coordinates := make([][][][]float64, len(pointsss)) for i, points := range pointsss { coordinates[i] = pointssCoordinates(points) } return coordinates } func ToGeoJSON(g geom.Geom) (*Geometry, error) { switch g.(type) { case geom.Point: return &Geometry{ Type: "Point", Coordinates: pointCoordinates(g.(geom.Point)), }, nil case geom.MultiPoint: return &Geometry{ Type: "MultiPoint", Coordinates: pointsCoordinates(g.(geom.MultiPoint)), }, nil case geom.LineString: return &Geometry{ Type: "LineString", Coordinates: pointsCoordinates(g.(geom.LineString)), }, nil case geom.MultiLineString: lines := []geom.LineString(g.(geom.MultiLineString)) paths := make([]geom.Path, len(lines)) for i, line := range lines { paths[i] = geom.Path(line) } return &Geometry{ Type: "MultiLineString", Coordinates: pointssCoordinates(paths), }, nil case geom.Polygon: return &Geometry{ Type: "Polygon", Coordinates: pointssCoordinates(g.(geom.Polygon)), }, nil case geom.MultiPolygon: polys := []geom.Polygon(g.(geom.MultiPolygon)) pathsList := make([][]geom.Path, len(polys)) for i, poly := range polys { pathsList[i] = []geom.Path(poly) } return &Geometry{ Type: "MultiPolygon", Coordinates: pointsssCoordinates(pathsList), }, nil default: return nil, &UnsupportedGeometryError{reflect.TypeOf(g).String()} } } func Encode(g geom.Geom) ([]byte, error) { if object, err := ToGeoJSON(g); err == nil { return json.Marshal(object) } else { return nil, err } }
encoding/geojson/encode.go
0.665193
0.543227
encode.go
starcoder
package ioutil import ( "io" ) // A DataInput provides helpers for reading bytes from a binary stream and interprets the data for any primitive Go // type. A DataInput is always tied to a specific endianness. A DataInput should not be considered thread safe. // As soon as any error occurred, any call is a no-op and will result in the same error state. type DataInput interface { // ReadBlob reads a prefixed byte slice ReadBlob(p IntSize) []byte // ReadUTF8 reads a prefixed unmodified utf8 string sequence ReadUTF8(p IntSize) string // ReadBool reads one byte and returns 0 if the byte is zero, otherwise true ReadBool() bool // ReadUint8 reads one byte ReadUint8() uint8 // ReadBytes just reads a bunch of bytes into a newly allocated buffer ReadBytes(len int) []byte // ReadUint16 reads 2 bytes and interprets them as unsigned ReadUint16() uint16 // ReadUint24 reads 3 bytes and interprets them as unsigned ReadUint24() uint32 // ReadUint32 reads 4 bytes and interprets them as unsigned ReadUint32() uint32 // ReadUint40 reads 5 bytes and interprets them as unsigned ReadUint40() uint64 // ReadUint48 reads 6 bytes and interprets them as unsigned ReadUint48() uint64 // ReadUint56 reads 7 bytes and interprets them as unsigned ReadUint56() uint64 // ReadUint64 reads 8 bytes and interprets them as unsigned ReadUint64() uint64 // ReadInt8 reads one byte ReadInt8() int8 // ReadUint16 reads 2 bytes and interprets them as signed ReadInt16() int16 // ReadInt24 reads 3 bytes and interprets them as signed ReadInt24() int32 // ReadInt32 reads 4 bytes and interprets them as signed ReadInt32() int32 // ReadInt40 reads 5 bytes and interprets them as signed ReadInt40() int64 // ReadInt48 reads 6 bytes and interprets them as signed ReadInt48() int64 // ReadInt56 reads 7 bytes and interprets them as signed ReadInt56() int64 // ReadInt64 reads 8 bytes and interprets them as signed ReadInt64() int64 // ReadUvarint reads a variable length integer, up to 10 bytes using zig-zag protobuf encoding. ReadUvarint() uint64 // ReadVarint reads a variable length and signed integer, up to 10 bytes using zig-zag protobuf encoding. ReadVarint() int64 // ReadFloat32 reads 4 bytes and interprets them as a float32 IEEE 754 4 byte bit sequence. ReadFloat32() float32 // ReadFloat64 reads 8 bytes and interprets them as a float64 IEEE 754 4 byte bit sequence. ReadFloat64() float64 // ReadComplex64 reads two float32 IEEE 754 4 byte bit sequences for the real and imaginary parts. ReadComplex64() complex64 // ReadComplex128 reads two float64 IEEE 754 8 byte bit sequences for the real and imaginary parts. ReadComplex128() complex128 // ReadFull reads exactly len(b) bytes. If an error occurs returns the number of read bytes. ReadFull(b []byte) int // Error returns the first occurred error. Each call to any Read* method may cause an error. Error() error io.Reader io.ByteReader } // NewDataInput creates a new DataInput instance according to the given byte order func NewDataInput(order ByteOrder, reader io.Reader) DataInput { return dataInputImpl{decoder: NewDecoder(reader, true), order: order} } var _ DataInput = (*dataInputImpl)(nil) type dataInputImpl struct { order ByteOrder decoder *Decoder } func (d dataInputImpl) ReadBytes(len int) []byte { return d.decoder.ReadBytes(len) } func (d dataInputImpl) ReadUint16() uint16 { return d.decoder.ReadUint16(d.order) } func (d dataInputImpl) ReadUint24() uint32 { return d.decoder.ReadUint24(d.order) } func (d dataInputImpl) ReadUint32() uint32 { return d.decoder.ReadUint32(d.order) } func (d dataInputImpl) ReadUint40() uint64 { return d.decoder.ReadUint40(d.order) } func (d dataInputImpl) ReadUint48() uint64 { return d.decoder.ReadUint48(d.order) } func (d dataInputImpl) ReadUint56() uint64 { return d.decoder.ReadUint56(d.order) } func (d dataInputImpl) ReadUint64() uint64 { return d.decoder.ReadUint64(d.order) } func (d dataInputImpl) ReadComplex64() complex64 { return d.decoder.ReadComplex64(d.order) } func (d dataInputImpl) ReadComplex128() complex128 { return d.decoder.ReadComplex128(d.order) } func (d dataInputImpl) ReadInt40() int64 { return d.decoder.ReadInt40(d.order) } func (d dataInputImpl) ReadFloat32() float32 { return d.decoder.ReadFloat32(d.order) } func (d dataInputImpl) ReadFloat64() float64 { return d.decoder.ReadFloat64(d.order) } func (d dataInputImpl) ReadUint8() uint8 { return d.decoder.ReadUint8() } func (d dataInputImpl) ReadByte() (byte, error) { return d.decoder.ReadByte() } func (d dataInputImpl) ReadInt8() int8 { return d.decoder.ReadInt8() } func (d dataInputImpl) ReadBlob(p IntSize) []byte { return d.decoder.ReadBlob(d.order, p) } func (d dataInputImpl) ReadUTF8(p IntSize) string { return d.decoder.ReadUTF8(d.order, p) } func (d dataInputImpl) ReadBool() bool { return d.decoder.ReadBool() } func (d dataInputImpl) ReadUInt16() uint16 { return d.decoder.ReadUint16(d.order) } func (d dataInputImpl) ReadUInt24() uint32 { return d.decoder.ReadUint24(d.order) } func (d dataInputImpl) ReadUInt32() uint32 { return d.decoder.ReadUint32(d.order) } func (d dataInputImpl) ReadUInt64() uint64 { return d.decoder.ReadUint64(d.order) } func (d dataInputImpl) ReadInt16() int16 { return d.decoder.ReadInt16(d.order) } func (d dataInputImpl) ReadInt24() int32 { return d.decoder.ReadInt24(d.order) } func (d dataInputImpl) ReadInt32() int32 { return d.decoder.ReadInt32(d.order) } func (d dataInputImpl) ReadInt48() int64 { return d.decoder.ReadInt48(d.order) } func (d dataInputImpl) ReadInt56() int64 { return d.decoder.ReadInt56(d.order) } func (d dataInputImpl) ReadInt64() int64 { return d.decoder.ReadInt64(d.order) } func (d dataInputImpl) ReadUvarint() uint64 { return d.decoder.ReadUvarint() } func (d dataInputImpl) ReadVarint() int64 { return d.decoder.ReadVarint() } func (d dataInputImpl) Read(buf []byte) (int, error) { return d.decoder.Read(buf) } func (d dataInputImpl) ReadFull(b []byte) int { return d.decoder.ReadFull(b) } func (d dataInputImpl) Error() error { return d.decoder.Error() }
datainput.go
0.663996
0.748915
datainput.go
starcoder
package mvcc import ( "github.com/jrife/flock/storage/kv" "github.com/jrife/flock/storage/kv/keys" ) // NamespaceView returns a view that will prefix // all keys with ns func NamespaceView(view View, ns []byte) View { if len(ns) == 0 { return view } return &namespacedView{MapReader: kv.NamespaceMapReader(view, ns), view: view, ns: ns} } var _ (View) = (*namespacedView)(nil) type namespacedView struct { kv.MapReader view View ns []byte } func (nsView *namespacedView) Next() (View, error) { next, err := nsView.view.Next() if err != nil { return nil, err } return &namespacedView{ MapReader: nsView.MapReader, view: next, ns: nsView.ns, }, nil } func (nsView *namespacedView) Prev() (View, error) { prev, err := nsView.view.Prev() if err != nil { return nil, err } return &namespacedView{ MapReader: nsView.MapReader, view: prev, ns: nsView.ns, }, nil } func (nsView *namespacedView) Changes(keys keys.Range) (DiffIterator, error) { iterator, err := nsView.view.Changes(keys.Namespace(nsView.ns)) if err != nil { return nil, err } return &namespacedDiffIterator{DiffIterator: iterator, ns: nsView.ns}, nil } func (nsView *namespacedView) Revision() int64 { return nsView.view.Revision() } type namespacedDiffIterator struct { DiffIterator key keys.Key ns keys.Key } func (nsIter *namespacedDiffIterator) Next() bool { if !nsIter.DiffIterator.Next() { nsIter.key = nil return false } // strip the namespace prefix nsIter.key = nsIter.DiffIterator.Key()[len(nsIter.ns):] return true } func (nsIter *namespacedDiffIterator) Key() []byte { return nsIter.key } // NamespaceRevision returns a revision that will // prefix all keys with ns func NamespaceRevision(revision Revision, ns []byte) Revision { if len(ns) == 0 { return revision } return &namespacedRevision{MapUpdater: kv.NamespaceMapUpdater(revision, ns), View: NamespaceView(revision, ns)} } var _ (Revision) = (*namespacedRevision)(nil) type namespacedRevision struct { kv.MapUpdater View }
storage/mvcc/namespace.go
0.720663
0.402627
namespace.go
starcoder
package export import "github.com/prometheus/client_golang/prometheus" // IngestionKafkaExporter contains all the Prometheus metrics that are possible to gather from the Jetty service type IngestionKafkaExporter struct { Lag *prometheus.GaugeVec `description:"total lag between the offsets consumed by the Kafka indexing tasks and latest offsets in Kafka brokers across all partitions. Minimum emission period for this metric is a minute"` MaxLag *prometheus.GaugeVec `description:"max lag between the offsets consumed by the Kafka indexing tasks and latest offsets in Kafka brokers across all partitions. Minimum emission period for this metric is a minute"` AVGLag *prometheus.GaugeVec `description:"average lag between the offsets consumed by the Kafka indexing tasks and latest offsets in Kafka brokers across all partitions. Minimum emission period for this metric is a minute"` } // NewIngestionKafkaExporter returns a new Jetty exporter object func NewIngestionKafkaExporter() *IngestionKafkaExporter { ik := &IngestionKafkaExporter{ Lag: prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: "druid", Subsystem: "realtime", Name: "ingest_kafka_lag", Help: "total lag between the offsets consumed by the Kafka indexing tasks and latest offsets in Kafka brokers across all partitions. Minimum emission period for this metric is a minute", }, []string{"dataSource"}), MaxLag: prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: "druid", Subsystem: "realtime", Name: "ingest_kafka_max_lag", Help: "max lag between the offsets consumed by the Kafka indexing tasks and latest offsets in Kafka brokers across all partitions. Minimum emission period for this metric is a minute", }, []string{"dataSource"}), AVGLag: prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: "druid", Subsystem: "realtime", Name: "ingest_kafka_avg_lag", Help: "average lag between the offsets consumed by the Kafka indexing tasks and latest offsets in Kafka brokers across all partitions. Minimum emission period for this metric is a minute", }, []string{"dataSource"}), } // register all the prometheus metrics prometheus.MustRegister(ik.Lag) prometheus.MustRegister(ik.MaxLag) prometheus.MustRegister(ik.AVGLag) return ik } // SetLag . func (ik *IngestionKafkaExporter) SetLag(source string, val float64) { ik.Lag.With(prometheus.Labels{"dataSource": source}).Add(val) } // SetMaxLag . func (ik *IngestionKafkaExporter) SetMaxLag(source string, val float64) { ik.MaxLag.With(prometheus.Labels{"dataSource": source}).Add(val) } // SetAVGLag . func (ik *IngestionKafkaExporter) SetAVGLag(source string, val float64) { ik.AVGLag.With(prometheus.Labels{"dataSource": source}).Add(val) }
pkg/export/ingestion_kafka.go
0.841435
0.425784
ingestion_kafka.go
starcoder
package vector // vector encapsulates a slice and provides the algorithms (push, pop, for_each, map...) // that are missing from the slices // it offers the method to reserve and shrink the memory it occupies, improving the // performance // note the Go type reference suggests: type Vector[T any] []T // this looks short and handy but has a few flaws: // - pop/try-pop is O(n) instead of O(1) (since I have to resort to slice manipulation) // - calling the "normal" functions such as len/append/cap etc. requires a type parameter // - the growth factor is not explicitly set const vectorGrowthFactor int = 2 type Vector[T any] struct { xs []T capacity int size int } func (vec *Vector[T]) Capacity() int { return vec.capacity } func (vec *Vector[T]) Size() int { return vec.size } func (vec *Vector[T]) Empty() bool { return vec.size == 0 } func (vec *Vector[T]) Reserve(additional int) { if additional > 0 { vec.reallocate(vec.capacity + additional) } } func (vec *Vector[T]) reallocate(newCapacity int) { if newCapacity <= vec.capacity { return } xs := make([]T, newCapacity) copy(xs, vec.xs) vec.xs = xs vec.capacity = newCapacity } func (vec *Vector[T]) ShrinkToFit() { vec.reallocate(vec.size) } func (vec Vector[T]) ToSlice() []T { return vec.xs[:vec.size] } // Get does not perform boundary check func (vec *Vector[T]) Get(i int) T { return vec.xs[i] } // Set does not perform boundary check func (vec *Vector[T]) Set(i int, x T) { vec.xs[i] = x } // Back does not perform boundary check func (vec *Vector[T]) Back() T { return vec.xs[vec.size-1] } // Swap does not perform boundary check func (vec *Vector[T]) Swap(i int, j int) { tmp := vec.Get(i) vec.xs[i] = vec.xs[j] vec.xs[j] = tmp } // ResetSize resets the vector size to 0 (effectively making all the elements unavailable) // It will not cause deallocation (i.e., it is faster than deallocation, aka Clear()) // To trigger the deletion of all the elements (GC), use Clear() func (vec *Vector[T]) ResetSize() { vec.size = 0 } // Clear deallocate all the elements and reset the vector size to 0; // Slower than ResetSize, but sometimes can be the desirable option. func (vec *Vector[T]) Clear() { xs := make([]T, vec.capacity) vec.xs = xs vec.size = 0 }
pkg/vector/vector.go
0.719186
0.604545
vector.go
starcoder
package amd64 import ( "errors" ) // ============================================================================ // tree-arg instruction // dst = a OP b func (arch Amd64) Op3(asm *Asm, op Op3, a Arg, b Arg, dst Arg) *Asm { arch.op3(asm, op, a, b, dst) return asm } var op3KindError = errors.New("Amd64.op3: arguments a, b, dst must have the same kind") func (arch Amd64) op3(asm *Asm, op Op3, a Arg, b Arg, dst Arg) Amd64 { // validate kinds switch op { case SHL3, SHR3: assert(a.Kind() == dst.Kind()) assert(!b.Kind().Signed()) case SETIDX, GETIDX: assert(a.Kind().Size() == 8) default: if a.Kind() != dst.Kind() || b.Kind() != dst.Kind() { panic(op3KindError) // assert(a.Kind() == dst.Kind()) // assert(b.Kind() == dst.Kind()) } } // validate dst switch dst.(type) { case Reg, Mem: break case Const: if op != SETIDX { errorf("destination cannot be a constant: %v %v, %v, %v", op, a, b, dst) } default: errorf("unknown destination type %T, expecting Reg or Mem: %v %v, %v, %v", dst, op, a, b, dst) } if asm.Optimize3(op, a, b, dst) { return arch } switch op { case MUL3: return arch.mul3(asm, a, b, dst) case DIV3, REM3: return arch.divrem(asm, op, a, b, dst) case GETIDX, SETIDX: return arch.index(asm, op, a, b, dst) } op2 := Op2(op) if a == dst { arch.op2(asm, op2, b, dst) } else if op.IsCommutative() && b == dst { arch.op2(asm, op2, a, dst) } else if r, ok := dst.(Reg); ok && r.RegId() != b.RegId() { arch.mov(asm, a, dst).op2(asm, op2, b, dst) } else { r := asm.RegAlloc(dst.Kind()) arch.mov(asm, a, r).op2(asm, op2, b, r).mov(asm, r, dst) asm.RegFree(r) } return arch } // either a[b] = val or val = a[b] func (arch Amd64) index(asm *Asm, op Op3, a Arg, b Arg, val Arg) Amd64 { var ra, rb, rval Reg var rconst bool switch val := val.(type) { case Reg: rval = val case Const: // only SETIDX cval := val.Val() if cval == int64(int32(cval)) { rconst = true } else { rval = asm.RegAlloc(val.Kind()) defer asm.RegFree(rval) } case Mem: rval = asm.RegAlloc(Uint64) defer asm.RegFree(rval) } switch a := a.(type) { case Reg: ra = a case Mem: ra = asm.RegAlloc(Uint64) arch.load(asm, a, ra) defer asm.RegFree(ra) case Const: // depending on b's type, could be optimized as MOV Mem Reg ra = asm.RegAlloc(Uint64) arch.movConstReg(asm, a, ra) defer asm.RegFree(ra) } // b.Kind().Size() could be < 8 switch b := b.(type) { case Reg: rbx := b rb = MakeReg(rbx.RegId(), Uint64) arch.cast(asm, rbx, rb) case Mem: rb = asm.RegAlloc(Uint64) arch.cast(asm, b, rb) defer asm.RegFree(rb) case Const: k := val.Kind() idx := b.Val() off := idx * int64(k.Size()) if idx == int64(int32(idx)) && off == int64(int32(off)) { if op == GETIDX { // optimize as MOV Mem Reg arch.load(asm, MakeMem(int32(off), ra.RegId(), k), rval) arch.mov(asm, rval, val) } else if rconst { // optimize as MOV Const Mem arch.movConstMem(asm, val.(Const), MakeMem(int32(off), ra.RegId(), k)) } else { // optimize as MOV Reg Mem arch.mov(asm, val, rval) arch.store(asm, rval, MakeMem(int32(off), ra.RegId(), k)) } return arch } rb = asm.RegAlloc(Uint64) arch.movConstReg(asm, b, rb) defer asm.RegFree(rb) } switch op { case SETIDX: if rconst { return arch.indexRegRegConst(asm, ra, rb, val.(Const)) } arch.mov(asm, rval, rval) arch.indexRegRegReg(asm, op, ra, rb, rval) case GETIDX: arch.indexRegRegReg(asm, op, ra, rb, rval) arch.mov(asm, rval, val) } return arch } // either a[b] = val or val = a[b] func (arch Amd64) indexRegRegReg(asm *Asm, op Op3, a Reg, b Reg, val Reg) Amd64 { assert(b.RegId() != RSP) alo, ahi := lohi(a) blo, bhi := lohi(b) vlo, vhi := lohi(val) hi := vhi<<2 | bhi<<1 | ahi kind := val.Kind() size := kind.Size() scalebit := map[Size]uint8{1: 0x00, 2: 0x40, 4: 0x80, 8: 0xC0}[size] offlen, offbit := offlen(MakeMem(0, a.RegId(), kind), a.RegId()) op_ := uint8(0x88) if op == GETIDX { op_ = 0x8A } switch size { case 1: if hi == 0 { asm.Bytes(op_, offbit|vlo<<3|0x04, scalebit|blo<<3|alo) } else { asm.Bytes(0x40|hi, op_, offbit|vlo<<3|0x04, scalebit|blo<<3|alo) } case 2: asm.Byte(0x66) fallthrough case 4: if hi == 0 { asm.Bytes(op_|1, offbit|vlo<<3|0x04, scalebit|blo<<3|alo) } else { asm.Bytes(0x40|hi, op_|1, offbit|vlo<<3|0x04, scalebit|blo<<3|alo) } case 8: asm.Bytes(0x48|hi, op_|1, offbit|vlo<<3|0x04, scalebit|blo<<3|alo) } switch offlen { case 1: asm.Int8(0) case 4: asm.Int32(0) } return arch } // a[b] = const func (arch Amd64) indexRegRegConst(asm *Asm, a Reg, b Reg, c Const) Amd64 { assert(b.RegId() != RSP) alo, ahi := lohi(a) blo, bhi := lohi(b) hi := bhi<<1 | ahi kind := c.Kind() size := kind.Size() scalebit := map[Size]uint8{1: 0x00, 2: 0x40, 4: 0x80, 8: 0xC0}[size] offlen, offbit := offlen(MakeMem(0, a.RegId(), kind), a.RegId()) switch size { case 1: if hi == 0 { asm.Bytes(0xC6, offbit|0x04, scalebit|blo<<3|alo) } else { asm.Bytes(0x40|hi, 0xC6, offbit|0x04, scalebit|blo<<3|alo) } case 2: asm.Byte(0x66) fallthrough case 4: if hi == 0 { asm.Bytes(0xC7, offbit|0x04, scalebit|blo<<3|alo) } else { asm.Bytes(0x40|hi, 0xC7, offbit|0x04, scalebit|blo<<3|alo) } case 8: asm.Bytes(0x48|hi, 0xC7, offbit|0x04, scalebit|blo<<3|alo) } switch offlen { case 1: asm.Int8(0) case 4: asm.Int32(0) } switch size { case 1: asm.Int8(int8(c.Val())) case 2: asm.Int16(int16(c.Val())) case 4, 8: asm.Int32(int32(c.Val())) } return arch }
vendor/github.com/cosmos72/gomacro/jit/amd64/op3.go
0.518059
0.425009
op3.go
starcoder
package govalidator import ( "reflect" "regexp" "sort" "sync" ) // Validator is a wrapper for a validator function that returns bool and accepts string. type Validator func(str string) bool // CustomTypeValidator is a wrapper for validator functions that returns bool and accepts any type. // The second parameter should be the context (in the case of validating a struct: the whole object being validated). // Third param is the HTTP request, if any related to this request type CustomTypeValidator func(params *CustomValidatorParams) bool // ParamValidator is a wrapper for validator functions that accept additional parameters. type ParamValidator func(str string, params ...string) bool // InterfaceParamValidator is a wrapper for functions that accept variants parameters for an interface value type InterfaceParamValidator func(in interface{}, params ...string) bool type tagOptionsMap map[string]tagOption func (t tagOptionsMap) orderedKeys() []string { var keys []string for k := range t { keys = append(keys, k) } sort.Slice(keys, func(a, b int) bool { return t[keys[a]].order < t[keys[b]].order }) return keys } type tagOption struct { name string customErrorMessage string order int } // UnsupportedTypeError is a wrapper for reflect.Type type UnsupportedTypeError struct { Type reflect.Type } // stringValues is a slice of reflect.Value holding *reflect.StringValue. // It implements the methods to sort by string. type stringValues []reflect.Value // InterfaceParamTagMap is a map of functions accept variants parameters for an interface value var InterfaceParamTagMap = map[string]InterfaceParamValidator{ "type": IsType, } // InterfaceParamTagRegexMap maps interface param tags to their respective regexes. var InterfaceParamTagRegexMap = map[string]*regexp.Regexp{ "type": regexp.MustCompile(`^type\((.*)\)$`), } // ParamTagMap is a map of functions accept variants parameters var ParamTagMap = map[string]ParamValidator{ "length": ByteLength, "range": Range, "runelength": RuneLength, "stringlength": StringLength, "matches": StringMatches, "in": IsInRaw, "rsapub": IsRsaPub, "minstringlength": MinStringLength, "maxstringlength": MaxStringLength, } // ParamTagRegexMap maps param tags to their respective regexes. var ParamTagRegexMap = map[string]*regexp.Regexp{ "range": regexp.MustCompile("^range\\((\\d+)\\|(\\d+)\\)$"), "length": regexp.MustCompile("^length\\((\\d+)\\|(\\d+)\\)$"), "runelength": regexp.MustCompile("^runelength\\((\\d+)\\|(\\d+)\\)$"), "stringlength": regexp.MustCompile("^stringlength\\((\\d+)\\|(\\d+)\\)$"), "in": regexp.MustCompile(`^in\((.*)\)`), "matches": regexp.MustCompile(`^matches\((.+)\)$`), "rsapub": regexp.MustCompile("^rsapub\\((\\d+)\\)$"), "minstringlength": regexp.MustCompile("^minstringlength\\((\\d+)\\)$"), "maxstringlength": regexp.MustCompile("^maxstringlength\\((\\d+)\\)$"), } type customTypeTagMap struct { validators map[string]CustomTypeValidator sync.RWMutex } func (tm *customTypeTagMap) Get(name string) (CustomTypeValidator, bool) { tm.RLock() defer tm.RUnlock() v, ok := tm.validators[name] return v, ok } func (tm *customTypeTagMap) Set(name string, ctv CustomTypeValidator) { tm.Lock() defer tm.Unlock() tm.validators[name] = ctv } // CustomTypeTagMap is a map of functions that can be used as tags for ValidateStruct function. // Use this to validate compound or custom types that need to be handled as a whole, e.g. // `type UUID [16]byte` (this would be handled as an array of bytes). var CustomTypeTagMap = &customTypeTagMap{validators: make(map[string]CustomTypeValidator)} // TagMap is a map of functions, that can be used as tags for ValidateStruct function. var TagMap = map[string]Validator{ "email": IsEmail, "url": IsURL, "dialstring": IsDialString, "requrl": IsRequestURL, "requri": IsRequestURI, "alpha": IsAlpha, "utfletter": IsUTFLetter, "alphanum": IsAlphanumeric, "utfletternum": IsUTFLetterNumeric, "numeric": IsNumeric, "utfnumeric": IsUTFNumeric, "utfdigit": IsUTFDigit, "hexadecimal": IsHexadecimal, "hexcolor": IsHexcolor, "rgbcolor": IsRGBcolor, "lowercase": IsLowerCase, "uppercase": IsUpperCase, "int": IsInt, "float": IsFloat, "null": IsNull, "notnull": IsNotNull, "uuid": IsUUID, "uuidv3": IsUUIDv3, "uuidv4": IsUUIDv4, "uuidv5": IsUUIDv5, "creditcard": IsCreditCard, "isbn10": IsISBN10, "isbn13": IsISBN13, "json": IsJSON, "multibyte": IsMultibyte, "ascii": IsASCII, "printableascii": IsPrintableASCII, "fullwidth": IsFullWidth, "halfwidth": IsHalfWidth, "variablewidth": IsVariableWidth, "base64": IsBase64, "datauri": IsDataURI, "ip": IsIP, "port": IsPort, "ipv4": IsIPv4, "ipv6": IsIPv6, "dns": IsDNSName, "host": IsHost, "mac": IsMAC, "latitude": IsLatitude, "longitude": IsLongitude, "ssn": IsSSN, "semver": IsSemver, "rfc3339": IsRFC3339, "rfc3339WithoutZone": IsRFC3339WithoutZone, "ISO3166Alpha2": IsISO3166Alpha2, "ISO3166Alpha3": IsISO3166Alpha3, "ISO4217": IsISO4217, "IMEI": IsIMEI, "ulid": IsULID, } // ISO3166Entry stores country codes type ISO3166Entry struct { EnglishShortName string FrenchShortName string Alpha2Code string Alpha3Code string Numeric string } //ISO3166List based on https://www.iso.org/obp/ui/#search/code/ Code Type "Officially Assigned Codes" var ISO3166List = []ISO3166Entry{ {"Afghanistan", "Afghanistan (l')", "AF", "AFG", "004"}, {"Albania", "Albanie (l')", "AL", "ALB", "008"}, {"Antarctica", "Antarctique (l')", "AQ", "ATA", "010"}, {"Algeria", "Algérie (l')", "DZ", "DZA", "012"}, {"American Samoa", "Samoa américaines (les)", "AS", "ASM", "016"}, {"Andorra", "Andorre (l')", "AD", "AND", "020"}, {"Angola", "Angola (l')", "AO", "AGO", "024"}, {"Antigua and Barbuda", "Antigua-et-Barbuda", "AG", "ATG", "028"}, {"Azerbaijan", "Azerbaïdjan (l')", "AZ", "AZE", "031"}, {"Argentina", "Argentine (l')", "AR", "ARG", "032"}, {"Australia", "Australie (l')", "AU", "AUS", "036"}, {"Austria", "Autriche (l')", "AT", "AUT", "040"}, {"Bahamas (the)", "Bahamas (les)", "BS", "BHS", "044"}, {"Bahrain", "Bahreïn", "BH", "BHR", "048"}, {"Bangladesh", "Bangladesh (le)", "BD", "BGD", "050"}, {"Armenia", "Arménie (l')", "AM", "ARM", "051"}, {"Barbados", "Barbade (la)", "BB", "BRB", "052"}, {"Belgium", "Belgique (la)", "BE", "BEL", "056"}, {"Bermuda", "Bermudes (les)", "BM", "BMU", "060"}, {"Bhutan", "Bhoutan (le)", "BT", "BTN", "064"}, {"Bolivia (Plurinational State of)", "Bolivie (État plurinational de)", "BO", "BOL", "068"}, {"Bosnia and Herzegovina", "Bosnie-Herzégovine (la)", "BA", "BIH", "070"}, {"Botswana", "Botswana (le)", "BW", "BWA", "072"}, {"Bouvet Island", "Bouvet (l'Île)", "BV", "BVT", "074"}, {"Brazil", "Brésil (le)", "BR", "BRA", "076"}, {"Belize", "Belize (le)", "BZ", "BLZ", "084"}, {"British Indian Ocean Territory (the)", "Indien (le Territoire britannique de l'océan)", "IO", "IOT", "086"}, {"Solomon Islands", "Salomon (Îles)", "SB", "SLB", "090"}, {"Virgin Islands (British)", "Vierges britanniques (les Îles)", "VG", "VGB", "092"}, {"Brunei Darussalam", "Brunéi Darussalam (le)", "BN", "BRN", "096"}, {"Bulgaria", "Bulgarie (la)", "BG", "BGR", "100"}, {"Myanmar", "Myanmar (le)", "MM", "MMR", "104"}, {"Burundi", "Burundi (le)", "BI", "BDI", "108"}, {"Belarus", "Bélarus (le)", "BY", "BLR", "112"}, {"Cambodia", "Cambodge (le)", "KH", "KHM", "116"}, {"Cameroon", "Cameroun (le)", "CM", "CMR", "120"}, {"Canada", "Canada (le)", "CA", "CAN", "124"}, {"Cabo Verde", "Cabo Verde", "CV", "CPV", "132"}, {"Cayman Islands (the)", "Caïmans (les Îles)", "KY", "CYM", "136"}, {"Central African Republic (the)", "République centrafricaine (la)", "CF", "CAF", "140"}, {"Sri Lanka", "Sri Lanka", "LK", "LKA", "144"}, {"Chad", "Tchad (le)", "TD", "TCD", "148"}, {"Chile", "Chili (le)", "CL", "CHL", "152"}, {"China", "Chine (la)", "CN", "CHN", "156"}, {"Taiwan (Province of China)", "Taïwan (Province de Chine)", "TW", "TWN", "158"}, {"Christmas Island", "Christmas (l'Île)", "CX", "CXR", "162"}, {"Cocos (Keeling) Islands (the)", "Cocos (les Îles)/ Keeling (les Îles)", "CC", "CCK", "166"}, {"Colombia", "Colombie (la)", "CO", "COL", "170"}, {"Comoros (the)", "Comores (les)", "KM", "COM", "174"}, {"Mayotte", "Mayotte", "YT", "MYT", "175"}, {"Congo (the)", "Congo (le)", "CG", "COG", "178"}, {"Congo (the Democratic Republic of the)", "Congo (la République démocratique du)", "CD", "COD", "180"}, {"Cook Islands (the)", "Cook (les Îles)", "CK", "COK", "184"}, {"Costa Rica", "Costa Rica (le)", "CR", "CRI", "188"}, {"Croatia", "Croatie (la)", "HR", "HRV", "191"}, {"Cuba", "Cuba", "CU", "CUB", "192"}, {"Cyprus", "Chypre", "CY", "CYP", "196"}, {"Czech Republic (the)", "tchèque (la République)", "CZ", "CZE", "203"}, {"Benin", "Bénin (le)", "BJ", "BEN", "204"}, {"Denmark", "Danemark (le)", "DK", "DNK", "208"}, {"Dominica", "Dominique (la)", "DM", "DMA", "212"}, {"Dominican Republic (the)", "dominicaine (la République)", "DO", "DOM", "214"}, {"Ecuador", "Équateur (l')", "EC", "ECU", "218"}, {"El Salvador", "El Salvador", "SV", "SLV", "222"}, {"Equatorial Guinea", "Guinée équatoriale (la)", "GQ", "GNQ", "226"}, {"Ethiopia", "Éthiopie (l')", "ET", "ETH", "231"}, {"Eritrea", "Érythrée (l')", "ER", "ERI", "232"}, {"Estonia", "Estonie (l')", "EE", "EST", "233"}, {"Faroe Islands (the)", "Féroé (les Îles)", "FO", "FRO", "234"}, {"Falkland Islands (the) [Malvinas]", "Falkland (les Îles)/Malouines (les Îles)", "FK", "FLK", "238"}, {"South Georgia and the South Sandwich Islands", "Géorgie du Sud-et-les Îles Sandwich du Sud (la)", "GS", "SGS", "239"}, {"Fiji", "Fidji (les)", "FJ", "FJI", "242"}, {"Finland", "Finlande (la)", "FI", "FIN", "246"}, {"Åland Islands", "Åland(les Îles)", "AX", "ALA", "248"}, {"France", "France (la)", "FR", "FRA", "250"}, {"French Guiana", "Guyane française (la )", "GF", "GUF", "254"}, {"French Polynesia", "Polynésie française (la)", "PF", "PYF", "258"}, {"French Southern Territories (the)", "Terres australes françaises (les)", "TF", "ATF", "260"}, {"Djibouti", "Djibouti", "DJ", "DJI", "262"}, {"Gabon", "Gabon (le)", "GA", "GAB", "266"}, {"Georgia", "Géorgie (la)", "GE", "GEO", "268"}, {"Gambia (the)", "Gambie (la)", "GM", "GMB", "270"}, {"Palestine, State of", "Palestine, État de", "PS", "PSE", "275"}, {"Germany", "Allemagne (l')", "DE", "DEU", "276"}, {"Ghana", "Ghana (le)", "GH", "GHA", "288"}, {"Gibraltar", "Gibraltar", "GI", "GIB", "292"}, {"Kiribati", "Kiribati", "KI", "KIR", "296"}, {"Greece", "Grèce (la)", "GR", "GRC", "300"}, {"Greenland", "Groenland (le)", "GL", "GRL", "304"}, {"Grenada", "Grenade (la)", "GD", "GRD", "308"}, {"Guadeloupe", "Guadeloupe (la)", "GP", "GLP", "312"}, {"Guam", "Guam", "GU", "GUM", "316"}, {"Guatemala", "Guatemala (le)", "GT", "GTM", "320"}, {"Guinea", "Guinée (la)", "GN", "GIN", "324"}, {"Guyana", "Guyana (le)", "GY", "GUY", "328"}, {"Haiti", "Haïti", "HT", "HTI", "332"}, {"Heard Island and McDonald Islands", "Heard-et-Îles MacDonald (l'Île)", "HM", "HMD", "334"}, {"Holy See (the)", "Saint-Siège (le)", "VA", "VAT", "336"}, {"Honduras", "Honduras (le)", "HN", "HND", "340"}, {"Hong Kong", "Hong Kong", "HK", "HKG", "344"}, {"Hungary", "Hongrie (la)", "HU", "HUN", "348"}, {"Iceland", "Islande (l')", "IS", "ISL", "352"}, {"India", "Inde (l')", "IN", "IND", "356"}, {"Indonesia", "Indonésie (l')", "ID", "IDN", "360"}, {"Iran (Islamic Republic of)", "Iran (République Islamique d')", "IR", "IRN", "364"}, {"Iraq", "Iraq (l')", "IQ", "IRQ", "368"}, {"Ireland", "Irlande (l')", "IE", "IRL", "372"}, {"Israel", "Israël", "IL", "ISR", "376"}, {"Italy", "Italie (l')", "IT", "ITA", "380"}, {"Côte d'Ivoire", "Côte d'Ivoire (la)", "CI", "CIV", "384"}, {"Jamaica", "Jamaïque (la)", "JM", "JAM", "388"}, {"Japan", "Japon (le)", "JP", "JPN", "392"}, {"Kazakhstan", "Kazakhstan (le)", "KZ", "KAZ", "398"}, {"Jordan", "Jordanie (la)", "JO", "JOR", "400"}, {"Kenya", "Kenya (le)", "KE", "KEN", "404"}, {"Korea (the Democratic People's Republic of)", "Corée (la République populaire démocratique de)", "KP", "PRK", "408"}, {"Korea (the Republic of)", "Corée (la République de)", "KR", "KOR", "410"}, {"Kuwait", "Koweït (le)", "KW", "KWT", "414"}, {"Kyrgyzstan", "Kirghizistan (le)", "KG", "KGZ", "417"}, {"Lao People's Democratic Republic (the)", "Lao, République démocratique populaire", "LA", "LAO", "418"}, {"Lebanon", "Liban (le)", "LB", "LBN", "422"}, {"Lesotho", "Lesotho (le)", "LS", "LSO", "426"}, {"Latvia", "Lettonie (la)", "LV", "LVA", "428"}, {"Liberia", "Libéria (le)", "LR", "LBR", "430"}, {"Libya", "Libye (la)", "LY", "LBY", "434"}, {"Liechtenstein", "Liechtenstein (le)", "LI", "LIE", "438"}, {"Lithuania", "Lituanie (la)", "LT", "LTU", "440"}, {"Luxembourg", "Luxembourg (le)", "LU", "LUX", "442"}, {"Macao", "Macao", "MO", "MAC", "446"}, {"Madagascar", "Madagascar", "MG", "MDG", "450"}, {"Malawi", "Malawi (le)", "MW", "MWI", "454"}, {"Malaysia", "Malaisie (la)", "MY", "MYS", "458"}, {"Maldives", "Maldives (les)", "MV", "MDV", "462"}, {"Mali", "Mali (le)", "ML", "MLI", "466"}, {"Malta", "Malte", "MT", "MLT", "470"}, {"Martinique", "Martinique (la)", "MQ", "MTQ", "474"}, {"Mauritania", "Mauritanie (la)", "MR", "MRT", "478"}, {"Mauritius", "Maurice", "MU", "MUS", "480"}, {"Mexico", "Mexique (le)", "MX", "MEX", "484"}, {"Monaco", "Monaco", "MC", "MCO", "492"}, {"Mongolia", "Mongolie (la)", "MN", "MNG", "496"}, {"Moldova (the Republic of)", "Moldova , République de", "MD", "MDA", "498"}, {"Montenegro", "Monténégro (le)", "ME", "MNE", "499"}, {"Montserrat", "Montserrat", "MS", "MSR", "500"}, {"Morocco", "Maroc (le)", "MA", "MAR", "504"}, {"Mozambique", "Mozambique (le)", "MZ", "MOZ", "508"}, {"Oman", "Oman", "OM", "OMN", "512"}, {"Namibia", "Namibie (la)", "NA", "NAM", "516"}, {"Nauru", "Nauru", "NR", "NRU", "520"}, {"Nepal", "Népal (le)", "NP", "NPL", "524"}, {"Netherlands (the)", "Pays-Bas (les)", "NL", "NLD", "528"}, {"Curaçao", "Curaçao", "CW", "CUW", "531"}, {"Aruba", "Aruba", "AW", "ABW", "533"}, {"<NAME> (Dutch part)", "Saint-Martin (partie néerlandaise)", "SX", "SXM", "534"}, {"Bonaire, Sint Eustatius and Saba", "Bonaire, Saint-Eustache et Saba", "BQ", "BES", "535"}, {"New Caledonia", "Nouvelle-Calédonie (la)", "NC", "NCL", "540"}, {"Vanuatu", "Vanuatu (le)", "VU", "VUT", "548"}, {"New Zealand", "Nouvelle-Zélande (la)", "NZ", "NZL", "554"}, {"Nicaragua", "Nicaragua (le)", "NI", "NIC", "558"}, {"Niger (the)", "Niger (le)", "NE", "NER", "562"}, {"Nigeria", "Nigéria (le)", "NG", "NGA", "566"}, {"Niue", "Niue", "NU", "NIU", "570"}, {"Norfolk Island", "Norfolk (l'Île)", "NF", "NFK", "574"}, {"Norway", "Norvège (la)", "NO", "NOR", "578"}, {"Northern Mariana Islands (the)", "Mariannes du Nord (les Îles)", "MP", "MNP", "580"}, {"United States Minor Outlying Islands (the)", "Îles mineures éloignées des États-Unis (les)", "UM", "UMI", "581"}, {"Micronesia (Federated States of)", "Micronésie (États fédérés de)", "FM", "FSM", "583"}, {"Marshall Islands (the)", "Marshall (Îles)", "MH", "MHL", "584"}, {"Palau", "Palaos (les)", "PW", "PLW", "585"}, {"Pakistan", "Pakistan (le)", "PK", "PAK", "586"}, {"Panama", "Panama (le)", "PA", "PAN", "591"}, {"Papua New Guinea", "Papouasie-Nouvelle-Guinée (la)", "PG", "PNG", "598"}, {"Paraguay", "Paraguay (le)", "PY", "PRY", "600"}, {"Peru", "Pérou (le)", "PE", "PER", "604"}, {"Philippines (the)", "Philippines (les)", "PH", "PHL", "608"}, {"Pitcairn", "Pitcairn", "PN", "PCN", "612"}, {"Poland", "Pologne (la)", "PL", "POL", "616"}, {"Portugal", "Portugal (le)", "PT", "PRT", "620"}, {"Guinea-Bissau", "Guinée-Bissau (la)", "GW", "GNB", "624"}, {"Timor-Leste", "Timor-Leste (le)", "TL", "TLS", "626"}, {"Puerto Rico", "Porto Rico", "PR", "PRI", "630"}, {"Qatar", "Qatar (le)", "QA", "QAT", "634"}, {"Réunion", "Réunion (La)", "RE", "REU", "638"}, {"Romania", "Roumanie (la)", "RO", "ROU", "642"}, {"Russian Federation (the)", "Russie (la Fédération de)", "RU", "RUS", "643"}, {"Rwanda", "Rwanda (le)", "RW", "RWA", "646"}, {"<NAME>", "Saint-Barthélemy", "BL", "BLM", "652"}, {"Saint Helena, Ascension and <NAME>", "Sainte-Hélène, Ascension et T<NAME>", "SH", "SHN", "654"}, {"Saint Kitts and Nevis", "Saint-Kitts-et-Nevis", "KN", "KNA", "659"}, {"Anguilla", "Anguilla", "AI", "AIA", "660"}, {"<NAME>", "Sainte-Lucie", "LC", "LCA", "662"}, {"<NAME> (French part)", "Saint-Martin (partie française)", "MF", "MAF", "663"}, {"<NAME>", "Saint-Pierre-et-Miquelon", "PM", "SPM", "666"}, {"<NAME> and <NAME>", "Saint-Vincent-et-les Grenadines", "VC", "VCT", "670"}, {"<NAME>", "Saint-Marin", "SM", "SMR", "674"}, {"<NAME>", "<NAME>-et-Principe", "ST", "STP", "678"}, {"<NAME>", "Ar<NAME> (l')", "SA", "SAU", "682"}, {"Senegal", "Sénégal (le)", "SN", "SEN", "686"}, {"Serbia", "Serbie (la)", "RS", "SRB", "688"}, {"Seychelles", "Seychelles (les)", "SC", "SYC", "690"}, {"Si<NAME>", "Sierra Leone (la)", "SL", "SLE", "694"}, {"Singapore", "Singapour", "SG", "SGP", "702"}, {"Slovakia", "Slovaquie (la)", "SK", "SVK", "703"}, {"Viet Nam", "Viet Nam (le)", "VN", "VNM", "704"}, {"Slovenia", "Slovénie (la)", "SI", "SVN", "705"}, {"Somalia", "Somalie (la)", "SO", "SOM", "706"}, {"South Africa", "Afrique du Sud (l')", "ZA", "ZAF", "710"}, {"Zimbabwe", "Zimbabwe (le)", "ZW", "ZWE", "716"}, {"Spain", "Espagne (l')", "ES", "ESP", "724"}, {"South Sudan", "Soudan du Sud (le)", "SS", "SSD", "728"}, {"Sudan (the)", "Soudan (le)", "SD", "SDN", "729"}, {"Western Sahara*", "Sahara occidental (le)*", "EH", "ESH", "732"}, {"Suriname", "Suriname (le)", "SR", "SUR", "740"}, {"Svalbard and Jan Mayen", "Svalbard et l'Île Jan Mayen (le)", "SJ", "SJM", "744"}, {"Swaziland", "Swaziland (le)", "SZ", "SWZ", "748"}, {"Sweden", "Suède (la)", "SE", "SWE", "752"}, {"Switzerland", "Suisse (la)", "CH", "CHE", "756"}, {"Syrian Arab Republic", "République arabe syrienne (la)", "SY", "SYR", "760"}, {"Tajikistan", "Tadjikistan (le)", "TJ", "TJK", "762"}, {"Thailand", "Thaïlande (la)", "TH", "THA", "764"}, {"Togo", "Togo (le)", "TG", "TGO", "768"}, {"Tokelau", "Tokelau (les)", "TK", "TKL", "772"}, {"Tonga", "Tonga (les)", "TO", "TON", "776"}, {"Trinidad and Tobago", "Trinité-et-Tobago (la)", "TT", "TTO", "780"}, {"United Arab Emirates (the)", "Émirats arabes unis (les)", "AE", "ARE", "784"}, {"Tunisia", "Tunisie (la)", "TN", "TUN", "788"}, {"Turkey", "Turquie (la)", "TR", "TUR", "792"}, {"Turkmenistan", "Turkménistan (le)", "TM", "TKM", "795"}, {"Turks and Caicos Islands (the)", "Turks-et-Caïcos (les Îles)", "TC", "TCA", "796"}, {"Tuvalu", "Tuvalu (les)", "TV", "TUV", "798"}, {"Uganda", "Ouganda (l')", "UG", "UGA", "800"}, {"Ukraine", "Ukraine (l')", "UA", "UKR", "804"}, {"Macedonia (the former Yugoslav Republic of)", "Macédoine (l'ex‑République yougoslave de)", "MK", "MKD", "807"}, {"Egypt", "Égypte (l')", "EG", "EGY", "818"}, {"United Kingdom of Great Britain and Northern Ireland (the)", "Royaume-Uni de Grande-Bretagne et d'Irlande du Nord (le)", "GB", "GBR", "826"}, {"Guernsey", "Guernesey", "GG", "GGY", "831"}, {"Jersey", "Jersey", "JE", "JEY", "832"}, {"Isle of Man", "Île de Man", "IM", "IMN", "833"}, {"Tanzania, United Republic of", "Tanzanie, République-Unie de", "TZ", "TZA", "834"}, {"United States of America (the)", "États-Unis d'Amérique (les)", "US", "USA", "840"}, {"Virgin Islands (U.S.)", "Vierges des États-Unis (les Îles)", "VI", "VIR", "850"}, {"Burkina Faso", "Burkina Faso (le)", "BF", "BFA", "854"}, {"Uruguay", "Uruguay (l')", "UY", "URY", "858"}, {"Uzbekistan", "Ouzbékistan (l')", "UZ", "UZB", "860"}, {"Venezuela (Bolivarian Republic of)", "Venezuela (République bolivarienne du)", "VE", "VEN", "862"}, {"Wallis and Futuna", "Wallis-et-Futuna", "WF", "WLF", "876"}, {"Samoa", "Samoa (le)", "WS", "WSM", "882"}, {"Yemen", "Yémen (le)", "YE", "YEM", "887"}, {"Zambia", "Zambie (la)", "ZM", "ZMB", "894"}, } // ISO4217List is the list of ISO currency codes var ISO4217List = []string{ "AED", "AFN", "ALL", "AMD", "ANG", "AOA", "ARS", "AUD", "AWG", "AZN", "BAM", "BBD", "BDT", "BGN", "BHD", "BIF", "BMD", "BND", "BOB", "BOV", "BRL", "BSD", "BTN", "BWP", "BYN", "BZD", "CAD", "CDF", "CHE", "CHF", "CHW", "CLF", "CLP", "CNY", "COP", "COU", "CRC", "CUC", "CUP", "CVE", "CZK", "DJF", "DKK", "DOP", "DZD", "EGP", "ERN", "ETB", "EUR", "FJD", "FKP", "GBP", "GEL", "GHS", "GIP", "GMD", "GNF", "GTQ", "GYD", "HKD", "HNL", "HRK", "HTG", "HUF", "IDR", "ILS", "INR", "IQD", "IRR", "ISK", "JMD", "JOD", "JPY", "KES", "KGS", "KHR", "KMF", "KPW", "KRW", "KWD", "KYD", "KZT", "LAK", "LBP", "LKR", "LRD", "LSL", "LYD", "MAD", "MDL", "MGA", "MKD", "MMK", "MNT", "MOP", "MRO", "MUR", "MVR", "MWK", "MXN", "MXV", "MYR", "MZN", "NAD", "NGN", "NIO", "NOK", "NPR", "NZD", "OMR", "PAB", "PEN", "PGK", "PHP", "PKR", "PLN", "PYG", "QAR", "RON", "RSD", "RUB", "RWF", "SAR", "SBD", "SCR", "SDG", "SEK", "SGD", "SHP", "SLL", "SOS", "SRD", "SSP", "STD", "STN", "SVC", "SYP", "SZL", "THB", "TJS", "TMT", "TND", "TOP", "TRY", "TTD", "TWD", "TZS", "UAH", "UGX", "USD", "USN", "UYI", "UYU", "UYW", "UZS", "VEF", "VES", "VND", "VUV", "WST", "XAF", "XAG", "XAU", "XBA", "XBB", "XBC", "XBD", "XCD", "XDR", "XOF", "XPD", "XPF", "XPT", "XSU", "XTS", "XUA", "XXX", "YER", "ZAR", "ZMW", "ZWL", } // ISO693Entry stores ISO language codes type ISO693Entry struct { Alpha3bCode string Alpha2Code string English string } //ISO693List based on http://data.okfn.org/data/core/language-codes/r/language-codes-3b2.json var ISO693List = []ISO693Entry{ {Alpha3bCode: "aar", Alpha2Code: "aa", English: "Afar"}, {Alpha3bCode: "abk", Alpha2Code: "ab", English: "Abkhazian"}, {Alpha3bCode: "afr", Alpha2Code: "af", English: "Afrikaans"}, {Alpha3bCode: "aka", Alpha2Code: "ak", English: "Akan"}, {Alpha3bCode: "alb", Alpha2Code: "sq", English: "Albanian"}, {Alpha3bCode: "amh", Alpha2Code: "am", English: "Amharic"}, {Alpha3bCode: "ara", Alpha2Code: "ar", English: "Arabic"}, {Alpha3bCode: "arg", Alpha2Code: "an", English: "Aragonese"}, {Alpha3bCode: "arm", Alpha2Code: "hy", English: "Armenian"}, {Alpha3bCode: "asm", Alpha2Code: "as", English: "Assamese"}, {Alpha3bCode: "ava", Alpha2Code: "av", English: "Avaric"}, {Alpha3bCode: "ave", Alpha2Code: "ae", English: "Avestan"}, {Alpha3bCode: "aym", Alpha2Code: "ay", English: "Aymara"}, {Alpha3bCode: "aze", Alpha2Code: "az", English: "Azerbaijani"}, {Alpha3bCode: "bak", Alpha2Code: "ba", English: "Bashkir"}, {Alpha3bCode: "bam", Alpha2Code: "bm", English: "Bambara"}, {Alpha3bCode: "baq", Alpha2Code: "eu", English: "Basque"}, {Alpha3bCode: "bel", Alpha2Code: "be", English: "Belarusian"}, {Alpha3bCode: "ben", Alpha2Code: "bn", English: "Bengali"}, {Alpha3bCode: "bih", Alpha2Code: "bh", English: "Bihari languages"}, {Alpha3bCode: "bis", Alpha2Code: "bi", English: "Bislama"}, {Alpha3bCode: "bos", Alpha2Code: "bs", English: "Bosnian"}, {Alpha3bCode: "bre", Alpha2Code: "br", English: "Breton"}, {Alpha3bCode: "bul", Alpha2Code: "bg", English: "Bulgarian"}, {Alpha3bCode: "bur", Alpha2Code: "my", English: "Burmese"}, {Alpha3bCode: "cat", Alpha2Code: "ca", English: "Catalan; Valencian"}, {Alpha3bCode: "cha", Alpha2Code: "ch", English: "Chamorro"}, {Alpha3bCode: "che", Alpha2Code: "ce", English: "Chechen"}, {Alpha3bCode: "chi", Alpha2Code: "zh", English: "Chinese"}, {Alpha3bCode: "chu", Alpha2Code: "cu", English: "Church Slavic; Old Slavonic; Church Slavonic; Old Bulgarian; Old Church Slavonic"}, {Alpha3bCode: "chv", Alpha2Code: "cv", English: "Chuvash"}, {Alpha3bCode: "cor", Alpha2Code: "kw", English: "Cornish"}, {Alpha3bCode: "cos", Alpha2Code: "co", English: "Corsican"}, {Alpha3bCode: "cre", Alpha2Code: "cr", English: "Cree"}, {Alpha3bCode: "cze", Alpha2Code: "cs", English: "Czech"}, {Alpha3bCode: "dan", Alpha2Code: "da", English: "Danish"}, {Alpha3bCode: "div", Alpha2Code: "dv", English: "Divehi; Dhivehi; Maldivian"}, {Alpha3bCode: "dut", Alpha2Code: "nl", English: "Dutch; Flemish"}, {Alpha3bCode: "dzo", Alpha2Code: "dz", English: "Dzongkha"}, {Alpha3bCode: "eng", Alpha2Code: "en", English: "English"}, {Alpha3bCode: "epo", Alpha2Code: "eo", English: "Esperanto"}, {Alpha3bCode: "est", Alpha2Code: "et", English: "Estonian"}, {Alpha3bCode: "ewe", Alpha2Code: "ee", English: "Ewe"}, {Alpha3bCode: "fao", Alpha2Code: "fo", English: "Faroese"}, {Alpha3bCode: "fij", Alpha2Code: "fj", English: "Fijian"}, {Alpha3bCode: "fin", Alpha2Code: "fi", English: "Finnish"}, {Alpha3bCode: "fre", Alpha2Code: "fr", English: "French"}, {Alpha3bCode: "fry", Alpha2Code: "fy", English: "Western Frisian"}, {Alpha3bCode: "ful", Alpha2Code: "ff", English: "Fulah"}, {Alpha3bCode: "geo", Alpha2Code: "ka", English: "Georgian"}, {Alpha3bCode: "ger", Alpha2Code: "de", English: "German"}, {Alpha3bCode: "gla", Alpha2Code: "gd", English: "Gaelic; Scottish Gaelic"}, {Alpha3bCode: "gle", Alpha2Code: "ga", English: "Irish"}, {Alpha3bCode: "glg", Alpha2Code: "gl", English: "Galician"}, {Alpha3bCode: "glv", Alpha2Code: "gv", English: "Manx"}, {Alpha3bCode: "gre", Alpha2Code: "el", English: "Greek, Modern (1453-)"}, {Alpha3bCode: "grn", Alpha2Code: "gn", English: "Guarani"}, {Alpha3bCode: "guj", Alpha2Code: "gu", English: "Gujarati"}, {Alpha3bCode: "hat", Alpha2Code: "ht", English: "Haitian; Haitian Creole"}, {Alpha3bCode: "hau", Alpha2Code: "ha", English: "Hausa"}, {Alpha3bCode: "heb", Alpha2Code: "he", English: "Hebrew"}, {Alpha3bCode: "her", Alpha2Code: "hz", English: "Herero"}, {Alpha3bCode: "hin", Alpha2Code: "hi", English: "Hindi"}, {Alpha3bCode: "hmo", Alpha2Code: "ho", English: "Hiri Motu"}, {Alpha3bCode: "hrv", Alpha2Code: "hr", English: "Croatian"}, {Alpha3bCode: "hun", Alpha2Code: "hu", English: "Hungarian"}, {Alpha3bCode: "ibo", Alpha2Code: "ig", English: "Igbo"}, {Alpha3bCode: "ice", Alpha2Code: "is", English: "Icelandic"}, {Alpha3bCode: "ido", Alpha2Code: "io", English: "Ido"}, {Alpha3bCode: "iii", Alpha2Code: "ii", English: "Sichuan Yi; Nuosu"}, {Alpha3bCode: "iku", Alpha2Code: "iu", English: "Inuktitut"}, {Alpha3bCode: "ile", Alpha2Code: "ie", English: "Interlingue; Occidental"}, {Alpha3bCode: "ina", Alpha2Code: "ia", English: "Interlingua (International Auxiliary Language Association)"}, {Alpha3bCode: "ind", Alpha2Code: "id", English: "Indonesian"}, {Alpha3bCode: "ipk", Alpha2Code: "ik", English: "Inupiaq"}, {Alpha3bCode: "ita", Alpha2Code: "it", English: "Italian"}, {Alpha3bCode: "jav", Alpha2Code: "jv", English: "Javanese"}, {Alpha3bCode: "jpn", Alpha2Code: "ja", English: "Japanese"}, {Alpha3bCode: "kal", Alpha2Code: "kl", English: "Kalaallisut; Greenlandic"}, {Alpha3bCode: "kan", Alpha2Code: "kn", English: "Kannada"}, {Alpha3bCode: "kas", Alpha2Code: "ks", English: "Kashmiri"}, {Alpha3bCode: "kau", Alpha2Code: "kr", English: "Kanuri"}, {Alpha3bCode: "kaz", Alpha2Code: "kk", English: "Kazakh"}, {Alpha3bCode: "khm", Alpha2Code: "km", English: "Central Khmer"}, {Alpha3bCode: "kik", Alpha2Code: "ki", English: "Kikuyu; Gikuyu"}, {Alpha3bCode: "kin", Alpha2Code: "rw", English: "Kinyarwanda"}, {Alpha3bCode: "kir", Alpha2Code: "ky", English: "Kirghiz; Kyrgyz"}, {Alpha3bCode: "kom", Alpha2Code: "kv", English: "Komi"}, {Alpha3bCode: "kon", Alpha2Code: "kg", English: "Kongo"}, {Alpha3bCode: "kor", Alpha2Code: "ko", English: "Korean"}, {Alpha3bCode: "kua", Alpha2Code: "kj", English: "Kuanyama; Kwanyama"}, {Alpha3bCode: "kur", Alpha2Code: "ku", English: "Kurdish"}, {Alpha3bCode: "lao", Alpha2Code: "lo", English: "Lao"}, {Alpha3bCode: "lat", Alpha2Code: "la", English: "Latin"}, {Alpha3bCode: "lav", Alpha2Code: "lv", English: "Latvian"}, {Alpha3bCode: "lim", Alpha2Code: "li", English: "Limburgan; Limburger; Limburgish"}, {Alpha3bCode: "lin", Alpha2Code: "ln", English: "Lingala"}, {Alpha3bCode: "lit", Alpha2Code: "lt", English: "Lithuanian"}, {Alpha3bCode: "ltz", Alpha2Code: "lb", English: "Luxembourgish; Letzeburgesch"}, {Alpha3bCode: "lub", Alpha2Code: "lu", English: "Luba-Katanga"}, {Alpha3bCode: "lug", Alpha2Code: "lg", English: "Ganda"}, {Alpha3bCode: "mac", Alpha2Code: "mk", English: "Macedonian"}, {Alpha3bCode: "mah", Alpha2Code: "mh", English: "Marshallese"}, {Alpha3bCode: "mal", Alpha2Code: "ml", English: "Malayalam"}, {Alpha3bCode: "mao", Alpha2Code: "mi", English: "Maori"}, {Alpha3bCode: "mar", Alpha2Code: "mr", English: "Marathi"}, {Alpha3bCode: "may", Alpha2Code: "ms", English: "Malay"}, {Alpha3bCode: "mlg", Alpha2Code: "mg", English: "Malagasy"}, {Alpha3bCode: "mlt", Alpha2Code: "mt", English: "Maltese"}, {Alpha3bCode: "mon", Alpha2Code: "mn", English: "Mongolian"}, {Alpha3bCode: "nau", Alpha2Code: "na", English: "Nauru"}, {Alpha3bCode: "nav", Alpha2Code: "nv", English: "Navajo; Navaho"}, {Alpha3bCode: "nbl", Alpha2Code: "nr", English: "Ndebele, South; South Ndebele"}, {Alpha3bCode: "nde", Alpha2Code: "nd", English: "Ndebele, North; North Ndebele"}, {Alpha3bCode: "ndo", Alpha2Code: "ng", English: "Ndonga"}, {Alpha3bCode: "nep", Alpha2Code: "ne", English: "Nepali"}, {Alpha3bCode: "nno", Alpha2Code: "nn", English: "Norwegian Nynorsk; Nynorsk, Norwegian"}, {Alpha3bCode: "nob", Alpha2Code: "nb", English: "Bokmål, Norwegian; Norwegian Bokmål"}, {Alpha3bCode: "nor", Alpha2Code: "no", English: "Norwegian"}, {Alpha3bCode: "nya", Alpha2Code: "ny", English: "Chichewa; Chewa; Nyanja"}, {Alpha3bCode: "oci", Alpha2Code: "oc", English: "Occitan (post 1500); Provençal"}, {Alpha3bCode: "oji", Alpha2Code: "oj", English: "Ojibwa"}, {Alpha3bCode: "ori", Alpha2Code: "or", English: "Oriya"}, {Alpha3bCode: "orm", Alpha2Code: "om", English: "Oromo"}, {Alpha3bCode: "oss", Alpha2Code: "os", English: "Ossetian; Ossetic"}, {Alpha3bCode: "pan", Alpha2Code: "pa", English: "Panjabi; Punjabi"}, {Alpha3bCode: "per", Alpha2Code: "fa", English: "Persian"}, {Alpha3bCode: "pli", Alpha2Code: "pi", English: "Pali"}, {Alpha3bCode: "pol", Alpha2Code: "pl", English: "Polish"}, {Alpha3bCode: "por", Alpha2Code: "pt", English: "Portuguese"}, {Alpha3bCode: "pus", Alpha2Code: "ps", English: "Pushto; Pashto"}, {Alpha3bCode: "que", Alpha2Code: "qu", English: "Quechua"}, {Alpha3bCode: "roh", Alpha2Code: "rm", English: "Romansh"}, {Alpha3bCode: "rum", Alpha2Code: "ro", English: "Romanian; Moldavian; Moldovan"}, {Alpha3bCode: "run", Alpha2Code: "rn", English: "Rundi"}, {Alpha3bCode: "rus", Alpha2Code: "ru", English: "Russian"}, {Alpha3bCode: "sag", Alpha2Code: "sg", English: "Sango"}, {Alpha3bCode: "san", Alpha2Code: "sa", English: "Sanskrit"}, {Alpha3bCode: "sin", Alpha2Code: "si", English: "Sinhala; Sinhalese"}, {Alpha3bCode: "slo", Alpha2Code: "sk", English: "Slovak"}, {Alpha3bCode: "slv", Alpha2Code: "sl", English: "Slovenian"}, {Alpha3bCode: "sme", Alpha2Code: "se", English: "Northern Sami"}, {Alpha3bCode: "smo", Alpha2Code: "sm", English: "Samoan"}, {Alpha3bCode: "sna", Alpha2Code: "sn", English: "Shona"}, {Alpha3bCode: "snd", Alpha2Code: "sd", English: "Sindhi"}, {Alpha3bCode: "som", Alpha2Code: "so", English: "Somali"}, {Alpha3bCode: "sot", Alpha2Code: "st", English: "Sotho, Southern"}, {Alpha3bCode: "spa", Alpha2Code: "es", English: "Spanish; Castilian"}, {Alpha3bCode: "srd", Alpha2Code: "sc", English: "Sardinian"}, {Alpha3bCode: "srp", Alpha2Code: "sr", English: "Serbian"}, {Alpha3bCode: "ssw", Alpha2Code: "ss", English: "Swati"}, {Alpha3bCode: "sun", Alpha2Code: "su", English: "Sundanese"}, {Alpha3bCode: "swa", Alpha2Code: "sw", English: "Swahili"}, {Alpha3bCode: "swe", Alpha2Code: "sv", English: "Swedish"}, {Alpha3bCode: "tah", Alpha2Code: "ty", English: "Tahitian"}, {Alpha3bCode: "tam", Alpha2Code: "ta", English: "Tamil"}, {Alpha3bCode: "tat", Alpha2Code: "tt", English: "Tatar"}, {Alpha3bCode: "tel", Alpha2Code: "te", English: "Telugu"}, {Alpha3bCode: "tgk", Alpha2Code: "tg", English: "Tajik"}, {Alpha3bCode: "tgl", Alpha2Code: "tl", English: "Tagalog"}, {Alpha3bCode: "tha", Alpha2Code: "th", English: "Thai"}, {Alpha3bCode: "tib", Alpha2Code: "bo", English: "Tibetan"}, {Alpha3bCode: "tir", Alpha2Code: "ti", English: "Tigrinya"}, {Alpha3bCode: "ton", Alpha2Code: "to", English: "Tonga (Tonga Islands)"}, {Alpha3bCode: "tsn", Alpha2Code: "tn", English: "Tswana"}, {Alpha3bCode: "tso", Alpha2Code: "ts", English: "Tsonga"}, {Alpha3bCode: "tuk", Alpha2Code: "tk", English: "Turkmen"}, {Alpha3bCode: "tur", Alpha2Code: "tr", English: "Turkish"}, {Alpha3bCode: "twi", Alpha2Code: "tw", English: "Twi"}, {Alpha3bCode: "uig", Alpha2Code: "ug", English: "Uighur; Uyghur"}, {Alpha3bCode: "ukr", Alpha2Code: "uk", English: "Ukrainian"}, {Alpha3bCode: "urd", Alpha2Code: "ur", English: "Urdu"}, {Alpha3bCode: "uzb", Alpha2Code: "uz", English: "Uzbek"}, {Alpha3bCode: "ven", Alpha2Code: "ve", English: "Venda"}, {Alpha3bCode: "vie", Alpha2Code: "vi", English: "Vietnamese"}, {Alpha3bCode: "vol", Alpha2Code: "vo", English: "Volapük"}, {Alpha3bCode: "wel", Alpha2Code: "cy", English: "Welsh"}, {Alpha3bCode: "wln", Alpha2Code: "wa", English: "Walloon"}, {Alpha3bCode: "wol", Alpha2Code: "wo", English: "Wolof"}, {Alpha3bCode: "xho", Alpha2Code: "xh", English: "Xhosa"}, {Alpha3bCode: "yid", Alpha2Code: "yi", English: "Yiddish"}, {Alpha3bCode: "yor", Alpha2Code: "yo", English: "Yoruba"}, {Alpha3bCode: "zha", Alpha2Code: "za", English: "Zhuang; Chuang"}, {Alpha3bCode: "zul", Alpha2Code: "zu", English: "Zulu"}, } type CustomValidatorParams struct { Field string Value interface{} Context interface{} Extra interface{} } func NewCustomValidatorParams(field string, value interface{}, context interface{}, extra interface{}) *CustomValidatorParams { return &CustomValidatorParams{Field: field, Value: value, Context: context, Extra: extra} } func (cvp CustomValidatorParams) GetValueString() (strPtr *string) { switch cvp.Value.(type) { case string: str := cvp.Value.(string) if len(str) > 0 { strPtr = &str } case *string: if strPtr = cvp.Value.(*string); strPtr != nil && len(*strPtr) == 0 { strPtr = nil } } return } func (cvp CustomValidatorParams) GetValueInt() (intPtr *int) { switch cvp.Value.(type) { case int: newInt := cvp.Value.(int) intPtr = &newInt case *int: intPtr = cvp.Value.(*int) } return } func (cvp CustomValidatorParams) GetValueFloat() (floatPtr *float64) { switch cvp.Value.(type) { case float64: newFloat := cvp.Value.(float64) floatPtr = &newFloat case *float64: floatPtr = cvp.Value.(*float64) case float32: newFloat := float64(cvp.Value.(float32)) floatPtr = &newFloat case *float32: floatPtr32 := cvp.Value.(*float32) if floatPtr32 != nil { newFloat := float64(*floatPtr32) floatPtr = &newFloat } } return } func (cvp CustomValidatorParams) GetValueStringSlice() (strPtr []string) { if strSlice, ok := cvp.Value.([]string); ok { return strSlice } return nil } func (cvp CustomValidatorParams) GetExtraString() string { switch cvp.Extra.(type) { case string: return cvp.Extra.(string) case *string: if strPtr := cvp.Extra.(*string); strPtr != nil { return *strPtr } } return "" }
types.go
0.698946
0.431944
types.go
starcoder
package csg // Node is a node from a BSP tree type Node struct { plane *Plane front *Node back *Node polygons []*Polygon } // NewNodeFromPolygons constructs a node from a slice of polygons func NewNodeFromPolygons(p []*Polygon) *Node { n := &Node{} n.Build(p) return n } // Clone will clone this node and it's children func (n *Node) Clone() *Node { r := &Node{} if n.plane != nil { r.plane = n.plane.Clone() } if n.front != nil { r.front = n.front.Clone() } if n.back != nil { r.back = n.back.Clone() } r.polygons = make([]*Polygon, 0) for _, p := range n.polygons { r.polygons = append(r.polygons, p) } return r } // Invert flips all the normals of the polygons in this node and it's children func (n *Node) Invert() { for _, p := range n.polygons { p.Flip() } n.plane.Flip() if n.front != nil { n.front.Invert() } if n.back != nil { n.back.Invert() } temp := n.front n.front = n.back n.back = temp } func (n *Node) clipPolygons(splitter IPolygonSplitter, polygons []*Polygon) []*Polygon { if n.plane == nil { p := make([]*Polygon, 0) p = append(p, polygons...) return p } front := make([]*Polygon, 0, len(polygons)/5) back := make([]*Polygon, 0, len(polygons)/5) splitter.SplitPolygons(n.plane, polygons, &front, &back, &front, &back) if n.front != nil { front = n.front.clipPolygons(splitter, front) } if n.back != nil { back = n.back.clipPolygons(splitter, back) return append(front, back...) } return front } // ClipPolygons will clip the slice of polygons to this node and it's children func (n *Node) ClipPolygons(polygons []*Polygon) []*Polygon { return n.clipPolygons(n.getPolygonSplitter(len(polygons)), polygons) } // ClipTo will clip the node to this node and vice versa func (n *Node) ClipTo(bsp *Node) { n.polygons = bsp.ClipPolygons(n.polygons) if n.front != nil { n.front.ClipTo(bsp) } if n.back != nil { n.back.ClipTo(bsp) } } // AllPolygons will return all the polygons associated with this node and it's children func (n *Node) AllPolygons() []*Polygon { polygons := make([]*Polygon, 0) polygons = append(polygons, n.polygons...) if n.front != nil { polygons = append(polygons, n.front.AllPolygons()...) } if n.back != nil { polygons = append(polygons, n.back.AllPolygons()...) } return polygons } func (n *Node) build(splitter IPolygonSplitter, polygons []*Polygon) { if len(polygons) == 0 { return } if n.plane == nil { n.plane = polygons[0].Plane.Clone() } front := make([]*Polygon, 0) back := make([]*Polygon, 0) splitter.SplitPolygons(n.plane, polygons, &n.polygons, &n.polygons, &front, &back) if len(front) > 0 { if n.front == nil { n.front = &Node{} } n.front.Build(front) } if len(back) > 0 { if n.back == nil { n.back = &Node{} } n.back.Build(back) } } // getPolygonSplitter will use different polygon splitter implementations depending upon the // number of polygons to split, so the trade off here is managing multiple goroutines vs // a single go routine for a small number of polygons func (n *Node) getPolygonSplitter(numPolys int) IPolygonSplitter { var splitter IPolygonSplitter if numPolys > 1000 { splitter = &MultiCorePolygonSplitter{Target: &BasicPolygonSplitter{}} } else { splitter = &BasicPolygonSplitter{} } return splitter } //Build constructs a BSP tree for the given slice of polygons func (n *Node) Build(polygons []*Polygon) { n.build(n.getPolygonSplitter(len(polygons)), polygons) }
csg/node.go
0.709724
0.445047
node.go
starcoder
package types import ( "io" "strconv" "github.com/lyraproj/pcore/px" ) type CallableType struct { paramsType px.Type returnType px.Type blockType px.Type // Callable or Optional[Callable] } var CallableMetaType px.ObjectType func init() { CallableMetaType = newObjectType(`Pcore::CallableType`, `Pcore::AnyType { attributes => { param_types => { type => Optional[Type[Tuple]], value => undef }, block_type => { type => Optional[Type[Callable]], value => undef }, return_type => { type => Optional[Type], value => undef } } }`, func(ctx px.Context, args []px.Value) px.Value { return newCallableType2(args...) }) } func DefaultCallableType() *CallableType { return callableTypeDefault } func NewCallableType(paramsType px.Type, returnType px.Type, blockType px.Type) *CallableType { return &CallableType{paramsType, returnType, blockType} } func newCallableType2(args ...px.Value) *CallableType { return newCallableType3(WrapValues(args)) } func newCallableType3(args px.List) *CallableType { argc := args.Len() if argc == 0 { return DefaultCallableType() } first := args.At(0) if tv, ok := first.(*TupleType); ok { var returnType px.Type var blockType px.Type if argc > 1 { blockType, ok = args.At(1).(px.Type) if argc > 2 { returnType, ok = args.At(2).(px.Type) } } if ok { return &CallableType{tv, returnType, blockType} } } var ( rt px.Type block px.Type ok bool ) if argc == 1 || argc == 2 { // check for [[params, block], return] var iv px.List if iv, ok = first.(px.List); ok { if argc == 2 { if rt, ok = args.At(1).(px.Type); !ok { panic(illegalArgumentType(`Callable[]`, 1, `Type`, args.At(1))) } } argc = iv.Len() args = iv } } last := args.At(argc - 1) block, ok = last.(*CallableType) if !ok { block = nil var ob *OptionalType if ob, ok = last.(*OptionalType); ok { if _, ok = ob.typ.(*CallableType); ok { block = ob } } } if ok { argc-- args = args.Slice(0, argc) } return NewCallableType(tupleFromArgs(true, args), rt, block) } func (t *CallableType) BlockType() px.Type { if t.blockType == nil { return nil // Return untyped nil } return t.blockType } func (t *CallableType) CallableWith(args []px.Value, block px.Lambda) bool { if block != nil { cb := t.blockType switch ca := cb.(type) { case nil: return false case *OptionalType: cb = ca.ContainedType() } if block.PType() == nil { return false } if !isAssignable(block.PType(), cb) { return false } } else if t.blockType != nil && !isAssignable(t.blockType, anyTypeDefault) { // Required block but non provided return false } if pt, ok := t.paramsType.(*TupleType); ok { return pt.IsInstance3(args, nil) } return true } func (t *CallableType) Accept(v px.Visitor, g px.Guard) { v(t) if t.paramsType != nil { t.paramsType.Accept(v, g) } if t.blockType != nil { t.blockType.Accept(v, g) } if t.returnType != nil { t.returnType.Accept(v, g) } } func (t *CallableType) BlockName() string { return `block` } func (t *CallableType) CanSerializeAsString() bool { return canSerializeAsString(t.paramsType) && canSerializeAsString(t.blockType) && canSerializeAsString(t.returnType) } func (t *CallableType) SerializationString() string { return t.String() } func (t *CallableType) Default() px.Type { return callableTypeDefault } func (t *CallableType) Equals(o interface{}, g px.Guard) bool { _, ok := o.(*CallableType) return ok } func (t *CallableType) Generic() px.Type { return callableTypeDefault } func (t *CallableType) Get(key string) (px.Value, bool) { switch key { case `param_types`: if t.paramsType == nil { return px.Undef, true } return t.paramsType, true case `return_type`: if t.returnType == nil { return px.Undef, true } return t.returnType, true case `block_type`: if t.blockType == nil { return px.Undef, true } return t.blockType, true default: return nil, false } } func (t *CallableType) IsAssignable(o px.Type, g px.Guard) bool { oc, ok := o.(*CallableType) if !ok { return false } if t.returnType == nil && t.paramsType == nil && t.blockType == nil { return true } if t.returnType != nil { or := oc.returnType if or == nil { or = anyTypeDefault } if !isAssignable(t.returnType, or) { return false } } // NOTE: these tests are made in reverse as it is calling the callable that is constrained // (it's lower bound), not its upper bound if oc.paramsType != nil && (t.paramsType == nil || !isAssignable(oc.paramsType, t.paramsType)) { return false } if t.blockType == nil { return oc.blockType == nil } if oc.blockType == nil { return false } return isAssignable(oc.blockType, t.blockType) } func (t *CallableType) IsInstance(o px.Value, g px.Guard) bool { if l, ok := o.(px.Lambda); ok { return isAssignable(t, l.PType()) } // TODO: Maybe check Go func using reflection return false } func (t *CallableType) MetaType() px.ObjectType { return CallableMetaType } func (t *CallableType) Name() string { return `Callable` } func (t *CallableType) ParameterNames() []string { if pt, ok := t.paramsType.(*TupleType); ok { n := len(pt.types) r := make([]string, 0, n) for i := 0; i < n; { i++ r = append(r, strconv.Itoa(i)) } return r } return []string{} } func (t *CallableType) Parameters() (params []px.Value) { if *t == *callableTypeDefault { return px.EmptyValues } if pt, ok := t.paramsType.(*TupleType); ok { tupleParams := pt.Parameters() if len(tupleParams) == 0 { params = make([]px.Value, 0) } else { params = px.Select(tupleParams, func(p px.Value) bool { _, ok := p.(*UnitType); return !ok }) } } else { params = make([]px.Value, 0) } if t.blockType != nil { params = append(params, t.blockType) } if t.returnType != nil { params = []px.Value{WrapValues(params), t.returnType} } return params } func (t *CallableType) ParametersType() px.Type { if t.paramsType == nil { return nil // Return untyped nil } return t.paramsType } func (t *CallableType) Resolve(c px.Context) px.Type { if t.paramsType != nil { t.paramsType = resolve(c, t.paramsType).(*TupleType) } if t.returnType != nil { t.returnType = resolve(c, t.returnType) } if t.blockType != nil { t.blockType = resolve(c, t.blockType) } return t } func (t *CallableType) ReturnType() px.Type { return t.returnType } func (t *CallableType) String() string { return px.ToString2(t, None) } func (t *CallableType) PType() px.Type { return &TypeType{t} } func (t *CallableType) ToString(b io.Writer, s px.FormatContext, g px.RDetect) { TypeToString(t, b, s, g) } var callableTypeDefault = &CallableType{paramsType: nil, blockType: nil, returnType: nil}
types/callabletype.go
0.580352
0.522872
callabletype.go
starcoder
package values import ( "github.com/apache/arrow/go/v7/arrow/memory" fluxarray "github.com/influxdata/flux/array" "github.com/influxdata/flux/codes" "github.com/influxdata/flux/internal/errors" "github.com/influxdata/flux/semantic" ) func vectorAdd(l, r Vector, mem memory.Allocator) (Value, error) { switch l.ElementType().Nature() { case semantic.Int: x, err := fluxarray.IntAdd(l.Arr().(*fluxarray.Int), r.Arr().(*fluxarray.Int), mem) if err != nil { return nil, err } return NewVectorValue(x, semantic.BasicInt), nil case semantic.UInt: x, err := fluxarray.UintAdd(l.Arr().(*fluxarray.Uint), r.Arr().(*fluxarray.Uint), mem) if err != nil { return nil, err } return NewVectorValue(x, semantic.BasicUint), nil case semantic.Float: x, err := fluxarray.FloatAdd(l.Arr().(*fluxarray.Float), r.Arr().(*fluxarray.Float), mem) if err != nil { return nil, err } return NewVectorValue(x, semantic.BasicFloat), nil case semantic.String: x, err := fluxarray.StringAdd(l.Arr().(*fluxarray.String), r.Arr().(*fluxarray.String), mem) if err != nil { return nil, err } return NewVectorValue(x, semantic.BasicString), nil default: return nil, errors.Newf(codes.Invalid, "unsupported type for vector Add: %v", l.ElementType()) } } func vectorSub(l, r Vector, mem memory.Allocator) (Value, error) { switch l.ElementType().Nature() { case semantic.Int: x, err := fluxarray.IntSub(l.Arr().(*fluxarray.Int), r.Arr().(*fluxarray.Int), mem) if err != nil { return nil, err } return NewVectorValue(x, semantic.BasicInt), nil case semantic.UInt: x, err := fluxarray.UintSub(l.Arr().(*fluxarray.Uint), r.Arr().(*fluxarray.Uint), mem) if err != nil { return nil, err } return NewVectorValue(x, semantic.BasicUint), nil case semantic.Float: x, err := fluxarray.FloatSub(l.Arr().(*fluxarray.Float), r.Arr().(*fluxarray.Float), mem) if err != nil { return nil, err } return NewVectorValue(x, semantic.BasicFloat), nil default: return nil, errors.Newf(codes.Invalid, "unsupported type for vector Sub: %v", l.ElementType()) } } func vectorMul(l, r Vector, mem memory.Allocator) (Value, error) { switch l.ElementType().Nature() { case semantic.Int: x, err := fluxarray.IntMul(l.Arr().(*fluxarray.Int), r.Arr().(*fluxarray.Int), mem) if err != nil { return nil, err } return NewVectorValue(x, semantic.BasicInt), nil case semantic.UInt: x, err := fluxarray.UintMul(l.Arr().(*fluxarray.Uint), r.Arr().(*fluxarray.Uint), mem) if err != nil { return nil, err } return NewVectorValue(x, semantic.BasicUint), nil case semantic.Float: x, err := fluxarray.FloatMul(l.Arr().(*fluxarray.Float), r.Arr().(*fluxarray.Float), mem) if err != nil { return nil, err } return NewVectorValue(x, semantic.BasicFloat), nil default: return nil, errors.Newf(codes.Invalid, "unsupported type for vector Mul: %v", l.ElementType()) } } func vectorDiv(l, r Vector, mem memory.Allocator) (Value, error) { switch l.ElementType().Nature() { case semantic.Int: x, err := fluxarray.IntDiv(l.Arr().(*fluxarray.Int), r.Arr().(*fluxarray.Int), mem) if err != nil { return nil, err } return NewVectorValue(x, semantic.BasicInt), nil case semantic.UInt: x, err := fluxarray.UintDiv(l.Arr().(*fluxarray.Uint), r.Arr().(*fluxarray.Uint), mem) if err != nil { return nil, err } return NewVectorValue(x, semantic.BasicUint), nil case semantic.Float: x, err := fluxarray.FloatDiv(l.Arr().(*fluxarray.Float), r.Arr().(*fluxarray.Float), mem) if err != nil { return nil, err } return NewVectorValue(x, semantic.BasicFloat), nil default: return nil, errors.Newf(codes.Invalid, "unsupported type for vector Div: %v", l.ElementType()) } } func vectorMod(l, r Vector, mem memory.Allocator) (Value, error) { switch l.ElementType().Nature() { case semantic.Int: x, err := fluxarray.IntMod(l.Arr().(*fluxarray.Int), r.Arr().(*fluxarray.Int), mem) if err != nil { return nil, err } return NewVectorValue(x, semantic.BasicInt), nil case semantic.UInt: x, err := fluxarray.UintMod(l.Arr().(*fluxarray.Uint), r.Arr().(*fluxarray.Uint), mem) if err != nil { return nil, err } return NewVectorValue(x, semantic.BasicUint), nil case semantic.Float: x, err := fluxarray.FloatMod(l.Arr().(*fluxarray.Float), r.Arr().(*fluxarray.Float), mem) if err != nil { return nil, err } return NewVectorValue(x, semantic.BasicFloat), nil default: return nil, errors.Newf(codes.Invalid, "unsupported type for vector Mod: %v", l.ElementType()) } } func vectorPow(l, r Vector, mem memory.Allocator) (Value, error) { switch l.ElementType().Nature() { case semantic.Int: x, err := fluxarray.IntPow(l.Arr().(*fluxarray.Int), r.Arr().(*fluxarray.Int), mem) if err != nil { return nil, err } return NewVectorValue(x, semantic.BasicFloat), nil case semantic.UInt: x, err := fluxarray.UintPow(l.Arr().(*fluxarray.Uint), r.Arr().(*fluxarray.Uint), mem) if err != nil { return nil, err } return NewVectorValue(x, semantic.BasicFloat), nil case semantic.Float: x, err := fluxarray.FloatPow(l.Arr().(*fluxarray.Float), r.Arr().(*fluxarray.Float), mem) if err != nil { return nil, err } return NewVectorValue(x, semantic.BasicFloat), nil default: return nil, errors.Newf(codes.Invalid, "unsupported type for vector Pow: %v", l.ElementType()) } }
values/binary.gen.go
0.717408
0.609234
binary.gen.go
starcoder
package cron import "time" // SpecCron specifies a duty cycle (to the second granularity), based on a // traditional crontab specification. It is computed initially and stored as bit sets. type SpecCron struct { Second, Minute, Hour, Dom, Month, Dow uint64 } // bounds provides a range of acceptable values (plus a map of name to value). type bounds struct { min, max uint names map[string]uint } // The bounds for each field. var ( seconds = bounds{0, 59, nil} minutes = bounds{0, 59, nil} hours = bounds{0, 23, nil} dom = bounds{1, 31, nil} months = bounds{1, 12, map[string]uint{ "jan": 1, "feb": 2, "mar": 3, "apr": 4, "may": 5, "jun": 6, "jul": 7, "aug": 8, "sep": 9, "oct": 10, "nov": 11, "dec": 12, }} dow = bounds{0, 6, map[string]uint{ "sun": 0, "mon": 1, "tue": 2, "wed": 3, "thu": 4, "fri": 5, "sat": 6, }} ) const ( // Set the top bit if a star was included in the expression. starBit = 1 << 63 ) // Next returns the next time this schedule is activated, greater than the given // time. If no time can be found to satisfy the schedule, return the zero time. func (s *SpecCron) Next(t time.Time) time.Time { // General approach: // For Month, Day, Hour, Minute, Second: // Check if the time value matches. If yes, continue to the next field. // If the field doesn't match the schedule, then increment the field until it matches. // While incrementing the field, a wrap-around brings it back to the beginning // of the field list (since it is necessary to re-verify previous field // values) // Start at the earliest possible time (the upcoming second). t = t.Add(1*time.Second - time.Duration(t.Nanosecond())*time.Nanosecond) // This flag indicates whether a field has been incremented. added := false // If no time is found within five years, return zero. yearLimit := t.Year() + 5 WRAP: if t.Year() > yearLimit { return time.Time{} } // Find the first applicable month. // If it's this month, then do nothing. for 1<<uint(t.Month())&s.Month == 0 { // If we have to add a month, reset the other parts to 0. if !added { added = true // Otherwise, set the date at the beginning (since the current time is irrelevant). t = time.Date(t.Year(), t.Month(), 1, 0, 0, 0, 0, t.Location()) } t = t.AddDate(0, 1, 0) // Wrapped around. if t.Month() == time.January { goto WRAP } } // Now get a day in that month. for !dayMatches(s, t) { if !added { added = true t = time.Date(t.Year(), t.Month(), t.Day(), 0, 0, 0, 0, t.Location()) } t = t.AddDate(0, 0, 1) if t.Day() == 1 { goto WRAP } } for 1<<uint(t.Hour())&s.Hour == 0 { if !added { added = true t = time.Date(t.Year(), t.Month(), t.Day(), t.Hour(), 0, 0, 0, t.Location()) } t = t.Add(1 * time.Hour) if t.Hour() == 0 { goto WRAP } } for 1<<uint(t.Minute())&s.Minute == 0 { if !added { added = true t = t.Truncate(time.Minute) } t = t.Add(1 * time.Minute) if t.Minute() == 0 { goto WRAP } } for 1<<uint(t.Second())&s.Second == 0 { if !added { added = true t = t.Truncate(time.Second) } t = t.Add(1 * time.Second) if t.Second() == 0 { goto WRAP } } return t } // dayMatches returns true if the schedule's day-of-week and day-of-month // restrictions are satisfied by the given time. func dayMatches(s *SpecCron, t time.Time) bool { var ( domMatch bool = 1<<uint(t.Day())&s.Dom > 0 dowMatch bool = 1<<uint(t.Weekday())&s.Dow > 0 ) if s.Dom&starBit > 0 || s.Dow&starBit > 0 { return domMatch && dowMatch } return domMatch || dowMatch }
cron/spec.go
0.601125
0.535888
spec.go
starcoder
package main import "fmt" type recommend struct { Risk string `json:"risk,omitempty"` Recommendation string `json:"recommendation,omitempty"` } func getRecommend(category, plugin string) recommend { return recommendMap[fmt.Sprintf("%s/%s", category, plugin)] } // recommendMap maps risk and recommendation details to plugins. // The recommendations are based on https://github.com/aquasecurity/cloudsploit/tree/master/plugins/aws // key: category/plugin, value: recommend{} var recommendMap = map[string]recommend{ categoryACM + "/acmCertificateExpiry": { Risk: `ACM Certificate Expiry - Detect upcoming expiration of ACM certificates - Certificates that have expired will trigger warnings in all major browsers. AWS will attempt to automatically renew the certificate but may be unable to do so if email or DNS validation cannot be confirmed.`, Recommendation: `Ensure AWS is able to renew the certificate via email or DNS validation of the domain. - https://docs.aws.amazon.com/acm/latest/userguide/managed-renewal.html`, }, categoryACM + "/acmValidation": { Risk: `ACM Certificate Validation - ACM certificates should be configured to use DNS validation. - With DNS validation, ACM will automatically renew certificates before they expire, as long as the DNS CNAME record is in place.`, Recommendation: `Configure ACM managed certificates to use DNS validation. - https://aws.amazon.com/blogs/security/easier-certificate-validation-using-dns-with-aws-certificate-manager/ - https://cloudsploit.com/remediations/aws/acm/acm-certificate-validation`, }, categoryAPIGateway + "/apigatewayWafEnabled": { Risk: `API Gateway WAF Enabled - Ensures that API Gateway APIs are associated with a Web Application Firewall. - API Gateway APIs should be associated with a Web Application Firewall to ensure API security.`, Recommendation: `Associate API Gateway API with Web Application Firewall - https://docs.aws.amazon.com/apigateway/latest/developerguide/apigateway-control-access-aws-waf.html`, }, categoryAthena + "/workgroupEncrypted": { Risk: `Workgroup Encrypted - Ensures Athena workgroups are configured to encrypt all data at rest. - Athena workgroups support full server-side encryption for all data at rest which should be enabled.`, Recommendation: `Enable encryption at rest for all Athena workgroups. - https://docs.aws.amazon.com/athena/latest/ug/encryption.html`, }, categoryAthena + "/workgroupEnforceConfiguration": { Risk: `Workgroup Enforce Configuration - Ensures Athena workgroups do not allow clients to override configuration options. - Athena workgroups support the ability for clients to override configuration options, including encryption requirements. This setting should be disabled to enforce encryption mandates.`, Recommendation: `Disable the ability for clients to override Athena workgroup configuration options. - https://docs.aws.amazon.com/athena/latest/ug/workgroups-settings.html`, }, categoryAutoScaling + "/appTierAsgCloudwatchLogs": { Risk: `App-Tier Auto Scaling Group CloudWatch Logs Enabled - Ensures that App-Tier Auto Scaling Groups are using CloudWatch logs agent. - EC2 instance available within app-tier Auto Scaling Group (ASG) should use an AWS CloudWatch Logs agent to monitor, store and access log files.`, Recommendation: `Update app-tier Auto Scaling Group to use CloudWatch Logs agent - https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/Install-CloudWatch-Agent.html'`, }, categoryAutoScaling + "/appTierIamRole": { Risk: `App-Tier Launch Configurations IAM Roles - Ensures that App-Tier Auto Scaling launch configuration is configured to use a customer created IAM role. - App-Tier Auto Scaling launch configuration should have a customer created App-Tier IAM role to provide necessary credentials to access AWS services.`, Recommendation: `Update App-Tier Auto Scaling launch configuration and attach a customer created App-Tier IAM role - https://docs.aws.amazon.com/autoscaling/ec2/userguide/us-iam-role.html`, }, categoryAutoScaling + "/asgActiveNotifications": { Risk: `Auto Scaling Notifications Active - Ensures auto scaling groups have notifications active. - Notifications can be sent to an SNS endpoint when scaling actions occur, which should be set to ensure all scaling activity is recorded.`, Recommendation: `Add a notification endpoint to the auto scaling group. - https://docs.aws.amazon.com/autoscaling/ec2/userguide/ASGettingNotifications.html`, }, categoryAutoScaling + "/asgMissingELB": { Risk: `Auto Scaling Group Missing ELB - Ensures all Auto Scaling groups are referencing active load balancers. - Each Auto Scaling group with a load balancer configured should reference an active ELB.`, Recommendation: `Ensure that the Auto Scaling group load balancer has not been deleted. If so, remove it from the ASG. - https://docs.aws.amazon.com/autoscaling/ec2/userguide/attach-load-balancer-asg.html`, }, categoryAutoScaling + "/asgMissingSecurityGroups": { Risk: `Launch Configuration Referencing Missing Security Groups - Ensures that Auto Scaling launch configurations are not utilizing missing security groups. - Auto Scaling launch configuration should utilize an active security group to ensure safety of managed instances.`, Recommendation: `Ensure that the launch configuration security group has not been deleted. If so, remove it from launch configurations - https://docs.aws.amazon.com/autoscaling/ec2/userguide/GettingStartedTutorial.html`, }, categoryAutoScaling + "/asgMultiAz": { Risk: `ASG Multiple AZ - Ensures that ASGs are created to be cross-AZ for high availability. - ASGs can easily be configured to allow instances to launch in multiple availability zones. This ensures that the ASG can continue to scale, even when AWS is experiencing downtime in one or more zones.`, Recommendation: `Modify the autoscaling instance to enable scaling across multiple availability zones. - http://docs.aws.amazon.com/autoscaling/latest/userguide/AutoScalingGroup.html`, }, categoryAutoScaling + "/asgSuspendedProcesses": { Risk: `Suspended AutoScaling Groups - Ensures that there are no Amazon AutoScaling groups with suspended processes. - AutoScaling groups should not have any suspended processes to avoid disrupting the AutoScaling workflow.`, Recommendation: `Update the AutoScaling group to resume the suspended processes. - https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-suspend-resume-processes.html`, }, categoryAutoScaling + "/elbHealthCheckActive": { Risk: `ELB Health Check Active - Ensures all Auto Scaling groups have ELB health check active. - Auto Scaling groups should have ELB health checks active to replace unhealthy instances in time.`, Recommendation: `Enable ELB health check for the Auto Scaling groups. - https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-add-elb-healthcheck.html`, }, categoryAutoScaling + "/emptyASG": { Risk: `Empty AutoScaling Group - Ensures all autoscaling groups contain at least 1 instance. - AutoScaling groups that are no longer in use should be deleted to prevent accidental use.`, Recommendation: `Delete the unused AutoScaling group. - https://docs.aws.amazon.com/autoscaling/ec2/userguide/AutoScalingGroup.html`, }, categoryAutoScaling + "/sameAzElb": { Risk: `AutoScaling ELB Same Availability Zone - Ensures all autoscaling groups with attached ELBs are operating in the same availability zone. - To work properly and prevent orphaned instances, ELBs must be created in the same availability zones as the backend instances in the autoscaling group.`, Recommendation: `Update the ELB to use the same availability zones as the autoscaling group. - https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-add-availability-zone.html`, }, categoryAutoScaling + "/webTierAsgAssociatedElb": { Risk: `Web-Tier Auto Scaling Group Associated ELB - Ensures that Web-Tier Auto Scaling Group has an associated Elastic Load Balancer - Web-Tier Auto Scaling groups should have an ELB associated to distribute incoming traffic across EC2 instances.`, Recommendation: `Update Web-Tier Auto Scaling group to associate ELB to distribute incoming traffic. - https://docs.aws.amazon.com/autoscaling/ec2/userguide/attach-load-balancer-asg.html`, }, categoryAutoScaling + "/webTierAsgCloudwatchLogs": { Risk: `Web-Tier Auto Scaling Group CloudWatch Logs Enabled - Ensures that Web-Tier Auto Scaling Groups are using CloudWatch Logs agent. - EC2 instance available within web-tier Auto Scaling Group (ASG) should use an AWS CloudWatch Logs agent to monitor, store and access log files.`, Recommendation: `Update web-tier Auto Scaling Group to use CloudWatch Logs agent - https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/Install-CloudWatch-Agent.html`, }, categoryAutoScaling + "/webTierIamRole": { Risk: `Web-Tier Launch Configurations IAM Roles - Ensures that Web-Tier Auto Scaling launch configuration is configured to use a customer created IAM role. - Web-Tier Auto Scaling launch configuration should have a customer created Web-Tier IAM role to provide necessary credentials to access AWS services.`, Recommendation: `Update Web-Tier Auto Scaling launch configuration and attach a customer created Web-Tier IAM role - https://docs.aws.amazon.com/autoscaling/ec2/userguide/us-iam-role.html`, }, categoryCloudFormation + "/plainTextParameters": { Risk: `CloudFormation Plaintext Parameters - Ensures CloudFormation parameters that reference sensitive values are configured to use NoEcho. - CloudFormation supports the NoEcho property for sensitive values, which should be used to ensure secrets are not exposed in the CloudFormation UI and APIs.`, Recommendation: `Update the sensitive parameters to use the NoEcho property. - https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/parameters-section-structure.html`, }, categoryCloudFront + "/cloudfrontHttpsOnly": { Risk: `CloudFront HTTPS Only - Ensures CloudFront distributions are configured to redirect non-HTTPS traffic to HTTPS. - For maximum security, CloudFront distributions can be configured to only accept HTTPS connections or to redirect HTTP connections to HTTPS.`, Recommendation: `Remove HTTP-only listeners from distributions. - http://docs.aws.amazon.com/AWSJavaScriptSDK/latest/AWS/CloudFront.html`, }, categoryCloudFront + "/cloudfrontLoggingEnabled": { Risk: `CloudFront Logging Enabled - Ensures CloudFront distributions have request logging enabled. - Logging requests to CloudFront distributions is a helpful way of detecting and investigating potential attacks, malicious activity, or misuse of backend resources. Logs can be sent to S3 and processed for further analysis.`, Recommendation: `Enable CloudFront request logging. - http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/AccessLogs.html`, }, categoryCloudFront + "/cloudfrontWafEnabled": { Risk: `CloudFront WAF Enabled - Ensures CloudFront distributions have WAF enabled. - Enabling WAF allows control over requests to the Cloudfront Distribution, allowing or denying traffic based off rules in the Web ACL`, Recommendation: `1. Enter the WAF service. 2. Enter Web ACLs and filter by global. 3. If no Web ACL is found, Create a new global Web ACL and in Resource type to associate with web ACL, select the Cloudfront Distribution. - https://docs.aws.amazon.com/waf/latest/developerguide/web-acl-associating-cloudfront-distribution.html`, }, categoryCloudFront + "/insecureProtocols": { Risk: `Insecure CloudFront Protocols - Detects the use of insecure HTTPS SSL/TLS protocols for use with HTTPS traffic between viewers and CloudFront - CloudFront supports SSLv3 and TLSv1 protocols for use with HTTPS traffic, but only TLSv1.1 or higher should be used unless there is a valid business justification to support the older, insecure SSLv3.`, Recommendation: `Ensure that traffic sent between viewers and CloudFront is passed over HTTPS and uses TLSv1.1 or higher. - http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/secure-connections-supported-viewer-protocols-ciphers.html`, }, categoryCloudFront + "/publicS3Origin": { Risk: `Public S3 CloudFront Origin - Detects the use of an S3 bucket as a CloudFront origin without an origin access identity - When S3 is used as an origin for a CloudFront bucket, the contents should be kept private and an origin access identity should allow CloudFront access. This prevents someone from bypassing the caching benefits that CloudFront provides, repeatedly loading objects directly from S3, and amassing a large access bill.`, Recommendation: `Create an origin access identity for CloudFront, then make the contents of the S3 bucket private. - http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-restricting-access-to-s3.html`, }, categoryCloudFront + "/secureOrigin": { Risk: `Secure CloudFront Origin - Detects the use of secure web origins with secure protocols for CloudFront. - Traffic passed between the CloudFront edge nodes and the backend resource should be sent over HTTPS with modern protocols for all web-based origins.`, Recommendation: `Ensure that traffic sent between CloudFront and its origin is passed over HTTPS and uses TLSv1.1 or higher. Do not use the match-viewer option. - http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-web.html`, }, categoryCloudTrail + "/cloudtrailBucketAccessLogging": { Risk: `CloudTrail Bucket Access Logging - Ensures CloudTrail logging bucket has access logging enabled to detect tampering of log files - CloudTrail buckets should utilize access logging for an additional layer of auditing. If the log files are deleted or modified in any way, the additional access logs can help determine who made the changes.`, Recommendation: `Enable access logging on the CloudTrail bucket from the S3 console - http://docs.aws.amazon.com/AmazonS3/latest/UG/ManagingBucketLogging.html`, }, categoryCloudTrail + "/cloudtrailBucketDelete": { Risk: `CloudTrail Bucket Delete Policy - Ensures CloudTrail logging bucket has a policy to prevent deletion of logs without an MFA token - To provide additional security, CloudTrail logging buckets should require an MFA token to delete objects`, Recommendation: `Enable MFA delete on the CloudTrail bucket - http://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html#MultiFactorAuthenticationDelete`, }, categoryCloudTrail + "/cloudtrailBucketPrivate": { Risk: `CloudTrail Bucket Private - Ensures CloudTrail logging bucket is not publicly accessible - CloudTrail buckets contain large amounts of sensitive account data and should only be accessible by logged in users.`, Recommendation: `Set the S3 bucket access policy for all CloudTrail buckets to only allow known users to access its files. - http://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html`, }, categoryCloudTrail + "/cloudtrailDataEvents": { Risk: `CloudTrail Data Events - Ensure Data events are included into Amazon CloudTrail trails configuration. - AWS CloudTrail trails should be configured to enable Data Events in order to log S3 object-level API operations.`, Recommendation: `Update CloudTrail to enable data events. - https://docs.aws.amazon.com/awscloudtrail/latest/userguide/logging-data-events-with-cloudtrail.html`, }, categoryCloudTrail + "/cloudtrailDeliveryFailing": { Risk: `CloudTrail Delivery Failing - Ensures that Amazon CloudTrail trail log files are delivered to destination S3 bucket. - Amazon CloudTrail trail logs should be delivered to destination S3 bucket to be used for security audits.`, Recommendation: `Modify CloudTrail trail configurations so that logs are being delivered - https://docs.aws.amazon.com/awscloudtrail/latest/userguide/how-cloudtrail-works.html`, }, categoryCloudTrail + "/cloudtrailEnabled": { Risk: `CloudTrail Enabled - Ensures CloudTrail is enabled for all regions within an account - CloudTrail should be enabled for all regions in order to detect suspicious activity in regions that are not typically used.`, Recommendation: `Enable CloudTrail for all regions and ensure that at least one region monitors global service events - http://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-getting-started.html`, }, categoryCloudTrail + "/cloudtrailEncryption": { Risk: `CloudTrail Encryption - Ensures CloudTrail encryption at rest is enabled for logs - CloudTrail log files contain sensitive information about an account and should be encrypted at rest for additional protection.`, Recommendation: `Enable CloudTrail log encryption through the CloudTrail console or API - http://docs.aws.amazon.com/awscloudtrail/latest/userguide/encrypting-cloudtrail-log-files-with-aws-kms.html`, }, categoryCloudTrail + "/cloudtrailFileValidation": { Risk: `CloudTrail File Validation - Ensures CloudTrail file validation is enabled for all regions within an account - CloudTrail file validation is essentially a hash of the file which can be used to ensure its integrity in the case of an account compromise.`, Recommendation: `Enable CloudTrail file validation for all regions - http://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-log-file-validation-enabling.html`, }, categoryCloudTrail + "/cloudtrailObjectLock": { Risk: `Object Lock Enabled - Ensures that AWS CloudTrail S3 buckets use Object Lock for data protection and regulatory compliance.' - CloudTrail buckets should be configured to have object lock enabled. You can use it to prevent an object from being deleted or overwritten for a fixed amount of time or indefinitely.`, Recommendation: `Edit trail to use a bucket with object locking enabled. - https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-managing.html`, }, categoryCloudTrail + "/cloudtrailS3Bucket": { Risk: `CloudTrail S3 Bucket - Ensure that AWS CloudTrail trail uses the designated Amazon S3 bucket. - Ensure that your Amazon CloudTrail trail is configured to use the appropriated S3 bucket in order to meet regulatory compliance requirements within your organization.`, Recommendation: `Modify ClouTrail trails to configure designated S3 bucket - https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-update-a-trail-console.html`, }, categoryCloudTrail + "/cloudtrailToCloudwatch": { Risk: `CloudTrail To CloudWatch - Ensures CloudTrail logs are being properly delivered to CloudWatch - Sending CloudTrail logs to CloudWatch enables easy integration with AWS CloudWatch alerts, as well as an additional backup log storage location.`, Recommendation: `Enable CloudTrail CloudWatch integration for all regions - http://docs.aws.amazon.com/awscloudtrail/latest/userguide/send-cloudtrail-events-to-cloudwatch-logs.html`, }, categoryCloudTrail + "/globalLoggingDuplicated": { Risk: `CloudTrail Global Services Logging Duplicated - Ensures that AWS CloudTrail trails are not duplicating global services events in log files. - Only one trail should have Include Global Services feature enabled to avoid duplication of global services events in log files.`, Recommendation: `Update CloudTrail trails to log global services events enabled for only one trail - https://docs.aws.amazon.com/IAM/latest/UserGuide/cloudtrail-integration.html`, }, categoryCloudWatchLogs + "/monitoringMetrics": { Risk: `CloudWatch Monitoring Metrics - Ensures metric filters are setup for CloudWatch logs to detect security risks from CloudTrail. - Sending CloudTrail logs to CloudWatch is only useful if metrics are setup to detect risky activity from those logs. There are numerous metrics that should be used. For the exact filter patterns, please see this plugin on GitHub: https://github.com/cloudsploit/scans/blob/master/plugins/aws/cloudwatchlogs/monitoringMetrics.js`, Recommendation: `Enable metric filters to detect malicious activity in CloudTrail logs sent to CloudWatch. - http://docs.aws.amazon.com/awscloudtrail/latest/userguide/send-cloudtrail-events-to-cloudwatch-logs.html`, }, categoryComprehend + "/outputResultEncryption": { Risk: `Amazon Comprehend Output Result Encryption - Ensures the Comprehend service is using encryption for all result output. - Comprehend supports using KMS keys to result output, which should be enabled.`, Recommendation: `Enable output result encryption for the Comprehend job - https://docs.aws.amazon.com/comprehend/latest/dg/kms-in-comprehend.html`, }, categoryComprehend + "/volumeEncryption": { Risk: `Amazon Comprehend Volume Encryption - Ensures the Comprehend service is using encryption for all volumes storing data at rest. - Comprehend supports using KMS keys to encrypt data at rest, which should be enabled.`, Recommendation: `Enable volume encryption for the Comprehend job - https://docs.aws.amazon.com/comprehend/latest/dg/kms-in-comprehend.html`, }, categoryConfigService + "/configServiceEnabled": { Risk: `Config Service Enabled - Ensures the AWS Config Service is enabled to detect changes to account resources - The AWS Config Service tracks changes to a number of resources in an AWS account and is invaluable in determining how account changes affect other resources and in recovery in the event of an account intrusion or accidental configuration change.`, Recommendation: `Enable the AWS Config Service for all regions and resources in an account. Ensure that it is properly recording and delivering logs. - https://aws.amazon.com/config/details/`, }, categoryDMS + "/dmsEncryptionEnabled": { Risk: `DMS Encryption Enabled - Ensures DMS encryption is enabled using a CMK - Data sent through the data migration service is encrypted using KMS. Encryption is enabled by default, but it is recommended to use customer managed keys.`, Recommendation: `Enable encryption using KMS CMKs for all DMS replication instances. - https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Security.html`, }, categoryDynamoDB + "/daxClusterEncryption": { Risk: `DynamoDB Accelerator Cluster Encryption - Ensures DynamoDB Cluster Accelerator DAX clusters have encryption enabled. - DynamoDB Clusters Accelerator DAX clusters should have encryption at rest enabled to secure data from unauthorized access.`, Recommendation: `Enable encryption for DAX cluster. - https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DAXEncryptionAtRest.html`, }, categoryDynamoDB + "/dynamoKmsEncryption": { Risk: `DynamoDB KMS Encryption - Ensures DynamoDB tables are encrypted using a customer-owned KMS key. - DynamoDB tables can be encrypted using AWS-owned or customer-owned KMS keys. Customer keys should be used to ensure control over the encryption seed data.`, Recommendation: `Create a new DynamoDB table using a CMK KMS key. - https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/EncryptionAtRest.html`, }, categoryEC2 + "/allowedCustomPorts": { Risk: `Allowed Custom Ports - Ensures that security groups does not allow public access to any port. - Security groups should be used to restrict access to ports from known networks.`, Recommendation: `Modify the security group to ensure the ports are not exposed publicly - https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html`, }, categoryEC2 + "/appTierInstanceIamRole": { Risk: `App-Tier EC2 Instance IAM Role - Ensure IAM roles attached with App-Tier EC2 instances have IAM policies attached. - EC2 instances should have IAM roles configured with necessary permission to access other AWS services`, Recommendation: `Modify EC2 instances to attach IAM roles with required IAM policies - https://aws.amazon.com/blogs/security/new-attach-an-aws-iam-role-to-an-existing-amazon-ec2-instance-by-using-the-aws-cli/`, }, categoryEC2 + "/classicInstances": { Risk: `Detect EC2 Classic Instances - Ensures AWS VPC is being used for instances instead of EC2 Classic - VPCs are the latest and more secure method of launching AWS resources. EC2 Classic should not be used.`, Recommendation: `Migrate instances from EC2 Classic to VPC - http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Introduction.html`, }, categoryEC2 + "/crossVpcPublicPrivate": { Risk: `Cross VPC Public Private Communication - Ensures communication between public and private VPC tiers is not enabled. - Communication between the public tier of one VPC and the private tier of other VPCs should never be allowed. Instead, VPC peerings with proper NACLs and gateways should be used`, Recommendation: `Remove the NACL rules allowing communication between the public and private tiers of different VPCs - https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Subnets.html`, }, categoryEC2 + "/defaultSecurityGroup": { Risk: `Default Security Group - Ensure the default security groups block all traffic by default - The default security group is often used for resources launched without a defined security group. For this reason, the default rules should be to block all traffic to prevent an accidental exposure.`, Recommendation: `Update the rules for the default security group to deny all traffic by default - http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-network-security.html#default-security-group`, }, categoryEC2 + "/defaultVpcExists": { Risk: `Default VPC Exists - Determines whether the default VPC exists. - The default VPC should not be used in order to avoid launching multiple services in the same network which may not require connectivity. Each application, or network tier, should use its own VPC.`, Recommendation: `Move resources from the default VPC to a new VPC created for that application or resource group. - http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/default-vpc.html`, }, categoryEC2 + "/defaultVpcInUse": { Risk: `Default VPC In Use - Determines whether the default VPC is being used for launching EC2 instances. - The default VPC should not be used in order to avoid launching multiple services in the same network which may not require connectivity. Each application, or network tier, should use its own VPC.`, Recommendation: `Move resources from the default VPC to a new VPC created for that application or resource group. - http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/default-vpc.html`, }, categoryEC2 + "/ebsEncryptedSnapshots": { Risk: `EBS Encrypted Snapshots - Ensures EBS snapshots are encrypted at rest - EBS snapshots should have at-rest encryption enabled through AWS using KMS. If the volume was not encrypted and a snapshot was taken the snapshot will be unencrypted.`, Recommendation: `Configure volume encryption and delete unencrypted EBS snapshots. - https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSSnapshots.html#encryption-support`, }, categoryEC2 + "/ebsEncryptionEnabled": { Risk: `EBS Encryption Enabled - Ensures EBS volumes are encrypted at rest - EBS volumes should have at-rest encryption enabled through AWS using KMS. If the volume is used for a root volume, the instance must be launched from an AMI that has been encrypted as well.`, Recommendation: `Enable encryption for EBS volumes. - http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html`, }, categoryEC2 + "/ebsOldSnapshots": { Risk: `EBS Volumes Too Old Snapshots - Ensure that EBS volume snapshots are deleted after defined time period. - EBS volume snapshots older than indicated should be deleted after defined time period for cost optimization.`, Recommendation: `Delete the EBS snapshots past their defined expiration date - https://docs.amazonaws.cn/en_us/AWSEC2/latest/UserGuide/ebs-deleting-snapshot.html`, }, categoryEC2 + "/ebsSnapshotLifecycle": { Risk: `Automate EBS Snapshot Lifecycle - Ensure DLM is used to automate EBS volume snapshots management. - Amazon Data Lifecycle Manager (DLM) service enables you to manage the lifecycle of EBS volume snapshots. - Using DLM helps in enforcing regular backup schedule, retaining backups, deleting outdated EBS snapshots`, Recommendation: `Create lifecycle policy for EBS volumes. - https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/snapshot-lifecycle.html`, }, categoryEC2 + "/ebsSnapshotPrivate": { Risk: `EBS Volume Snapshot Public - Ensures EBS volume snapshots are private - EBS volumes often contain sensitive data from running EC2 instances and should be set to private so they cannot be accidentally shared with other accounts.`, Recommendation: `Ensure that each EBS snapshot has its permissions set to private. - https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-modifying-snapshot-permissions.html`, }, categoryEC2 + "/ebsSnapshotPublic": { Risk: `Amazon EBS Public Snapshots - Ensure that Amazon EBS volume snapshots are not shared to all AWS accounts. - AWS Elastic Block Store (EBS) volume snapshots should not be not publicly shared with other AWS account to avoid data exposure.`, Recommendation: `Modify the permissions of public snapshots to remove public access. - https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-modifying-snapshot-permissions.html`, }, categoryEC2 + "/ebsUnusedVolumes": { Risk: `Unused EBS Volumes - Ensures EBS volumes are in use and attached to EC2 instances - EBS volumes should be deleted if the parent instance has been deleted to prevent accidental exposure of data.`, Recommendation: `Delete the unassociated EBS volume. - https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-deleting-volume.html`, }, categoryEC2 + "/ec2MetadataOptions": { Risk: `Insecure EC2 Metadata Options - Ensures EC2 instance metadata is updated to require HttpTokens or disable HttpEndpoint - The new EC2 metadata service prevents SSRF attack escalations from accessing the sensitive instance metadata endpoints.`, Recommendation: `Update instance metadata options to use IMDSv2 - https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html#configuring-instance-metadata-service`, }, categoryEC2 + "/elasticIpLimit": { Risk: `Elastic IP Limit - Determine if the number of allocated EIPs is close to the AWS per-account limit - AWS limits accounts to certain numbers of resources. Exceeding those limits could prevent resources from launching.`, Recommendation: `Contact AWS support to increase the number of EIPs available - http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html#using-instance-addressing-limit`, }, categoryEC2 + "/encryptedAmi": { Risk: `Encrypted AMI - Ensures EBS-backed AMIs are configured to use encryption - AMIs with unencrypted data volumes can be used to launch unencrypted instances that place data at risk.`, Recommendation: `Ensure all AMIs have encrypted EBS volumes. - https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AMIEncryption.html`, }, categoryEC2 + "/excessiveSecurityGroups": { Risk: `Excessive Security Groups - Determine if there are an excessive number of security groups in the account - Keeping the number of security groups to a minimum helps reduce the attack surface of an account. - Rather than creating new groups with the same rules for each project, common rules should be grouped under the same security groups. For example, instead of adding port 22 from a known IP to every group, create a single "SSH" security group which can be used on multiple instances.`, Recommendation: `Limit the number of security groups to prevent accidental authorizations - http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/authorizing-access-to-an-instance.html`, }, categoryEC2 + "/flowLogsEnabled": { Risk: `VPC Flow Logs Enabled - Ensures VPC flow logs are enabled for traffic logging - VPC flow logs record all traffic flowing in to and out of a VPC. These logs are critical for auditing and review after security incidents.`, Recommendation: `Enable VPC flow logs for each VPC - http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/flow-logs.html`, }, categoryEC2 + "/instanceIamRole": { Risk: `Instance IAM Role - Ensures EC2 instances are using an IAM role instead of hard-coded AWS credentials - IAM roles should be assigned to all instances to enable them to access AWS resources. Using an IAM role is more secure than hard-coding AWS access keys into application code.`, Recommendation: `Attach an IAM role to the EC2 instance - http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html`, }, categoryEC2 + "/instanceKeyBasedLogin": { Risk: `EC2 Instance Key Based Login - Ensures EC2 instances have associated keys for password-less SSH login - AWS allows EC2 instances to be launched with a specified PEM key for SSH login which should be used instead of user and password login.`, Recommendation: `Ensure each EC2 instance has an associated SSH key and disable password login. - https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html`, }, categoryEC2 + "/instanceLimit": { Risk: `Instance Limit - Determine if the number of EC2 instances is close to the AWS per-account limit - AWS limits accounts to certain numbers of resources. Exceeding those limits could prevent resources from launching.`, Recommendation: `Contact AWS support to increase the number of instances available - http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html#using-instance-addressing-limit`, }, categoryEC2 + "/instanceMaxCount": { Risk: `EC2 Max Instances - Ensures the total number of EC2 instances does not exceed a set threshold. - The number of running EC2 instances should be carefully audited, especially in unused regions, to ensure only approved applications are consuming compute resources. - Many compromised AWS accounts see large numbers of EC2 instances launched.`, Recommendation: `Ensure that the number of running EC2 instances matches the expected count. - If instances are launched above the threshold, investigate to ensure they are legitimate. - https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/monitoring_ec2.html`, }, categoryEC2 + "/instanceVcpusLimit": { Risk: `Instance vCPU On-Demand Based Limits - Determine if the number of EC2 On-Demand instances is close to the regional vCPU based limit. - AWS limits accounts to certain numbers of resources per region. Exceeding those limits could prevent resources from launching.`, Recommendation: `EC2 automatically increases On Demand Instance limits based on usage, limit increases can be requested via the Limits Page on Amazon EC2 console, the EC2 service page on the Service Quotas console, or the Service Quotas API/CLI. - https://aws.amazon.com/ec2/faqs/#EC2_On-Demand_Instance_limits`, }, categoryEC2 + "/launchWizardSecurityGroups": { Risk: `EC2 LaunchWizard Security Groups - Ensures security groups created by the EC2 launch wizard are not used - The EC2 launch wizard frequently creates insecure security groups that are exposed publicly. - These groups should not be used and custom security groups should be created instead.`, Recommendation: `Delete the launch wizard security group and replace it with a custom security group. - https://docs.aws.amazon.com/launchwizard/latest/userguide/launch-wizard-sap-security-groups.html`, }, categoryEC2 + "/managedNatGateway": { Risk: `Managed NAT Gateway In Use - Ensure AWS VPC Managed NAT (Network Address Translation) Gateway service is enabled for high availability (HA). - VPCs should use highly available Managed NAT Gateways in order to enable EC2 instances to connect to the internet or with other AWS components.`, Recommendation: `Update VPCs to use Managed NAT Gateways instead of NAT instances - https://aws.amazon.com/blogs/aws/new-managed-nat-network-address-translation-gateway-for-aws/`, }, categoryEC2 + "/multipleSubnets": { Risk: `VPC Multiple Subnets - Ensures that VPCs have multiple subnets to provide a layered architecture - VPCs should be designed to have separate public and private subnets, ideally across availability zones, enabling a DMZ-style architecture.`, Recommendation: `Create at least two subnets in each VPC, utilizing one for public traffic and the other for private traffic. - https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Subnets.html#SubnetSecurity`, }, categoryEC2 + "/natMultiAz": { Risk: `NAT Multiple AZ - Ensures managed NAT instances exist in at least 2 AZs for availability purposes - Creating NAT instances in a single AZ creates a single point of failure for all systems in the VPC. - All managed NAT instances should be created in multiple AZs to ensure proper failover.`, Recommendation: `Launch managed NAT instances in multiple AZs. - http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/vpc-nat-gateway.html`, }, categoryEC2 + "/openAllPortsProtocols": { Risk: `Open All Ports Protocols - Determine if security group has all ports or protocols open to the public - Security groups should be created on a per-service basis and avoid allowing all ports or protocols.`, Recommendation: `Modify the security group to specify a specific port and protocol to allow. - http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/authorizing-access-to-an-instance.html`, }, categoryEC2 + "/openCIFS": { Risk: `Open CIFS - Determine if UDP port 445 for CIFS is open to the public - While some ports such as HTTP and HTTPS are required to be open to the public to function properly, more sensitive services such as CIFS should be restricted to known IP addresses.`, Recommendation: `Restrict UDP port 445 to known IP addresses - http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/authorizing-access-to-an-instance.html`, }, categoryEC2 + "/openCustomPorts": { Risk: `Open Custom Ports - Ensure that defined custom ports are not open to public. - Security groups should restrict access to ports from known networks.`, Recommendation: `Modify the security group to ensure the defined custom ports are not exposed publicly - https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html`, }, categoryEC2 + "/openDNS": { Risk: `Open DNS - Determine if TCP or UDP port 53 for DNS is open to the public - While some ports such as HTTP and HTTPS are required to be open to the public to function properly, more sensitive services such as DNS should be restricted to known IP addresses.`, Recommendation: `Restrict TCP and UDP port 53 to known IP addresses - http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/authorizing-access-to-an-instance.html`, }, categoryEC2 + "/openDocker": { Risk: `Open Docker - Determine if Docker port 2375 or 2376 is open to the public - While some ports such as HTTP and HTTPS are required to be open to the public to function properly, more sensitive services such as Docker should be restricted to known IP addresses.`, Recommendation: `Restrict TCP ports 2375 and 2376 to known IP addresses - http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/authorizing-access-to-an-instance.html`, }, categoryEC2 + "/openElasticsearch": { Risk: `Open Elasticsearch - Determine if TCP port 9200 for Elasticsearch is open to the public - While some ports such as HTTP and HTTPS are required to be open to the public to function properly, more sensitive services such as Elasticsearch should be restricted to known IP addresses.`, Recommendation: `Restrict TCP port 9200 to known IP addresses - http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/authorizing-access-to-an-instance.html`, }, categoryEC2 + "/openFTP": { Risk: `Open FTP - Determine if TCP port 20 or 21 for FTP is open to the public - While some ports such as HTTP and HTTPS are required to be open to the public to function properly, more sensitive services such as FTP should be restricted to known IP addresses.`, Recommendation: `Restrict TCP ports 20 and 21 to known IP addresses - http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/authorizing-access-to-an-instance.html`, }, categoryEC2 + "/openHadoopNameNode": { Risk: `Open Hadoop HDFS NameNode Metadata Service - Determine if TCP port 8020 for HDFS NameNode metadata service is open to the public - While some ports such as HTTP and HTTPS are required to be open to the public to function properly, more sensitive services such as Hadoop/HDFS should be restricted to known IP addresses.`, Recommendation: `Restrict TCP port 8020 to known IP addresses for Hadoop/HDFS - http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/authorizing-access-to-an-instance.html`, }, categoryEC2 + "/openHadoopNameNodeWebUI": { Risk: `Open Hadoop HDFS NameNode WebUI - Determine if TCP port 50070 and 50470 for Hadoop/HDFS NameNode WebUI service is open to the public - While some ports such as HTTP and HTTPS are required to be open to the public to function properly, more sensitive services such as Hadoop/HDFS should be restricted to known IP addresses.`, Recommendation: `Restrict TCP port 50070 and 50470 to known IP addresses for Hadoop/HDFS - http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/authorizing-access-to-an-instance.html`, }, categoryEC2 + "/openKibana": { Risk: `Open Kibana - Determine if TCP port 5601 for Kibana is open to the public - While some ports such as HTTP and HTTPS are required to be open to the public to function properly, more sensitive services such as Kibana should be restricted to known IP addresses.`, Recommendation: `Restrict TCP port 5601 to known IP addresses - http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/authorizing-access-to-an-instance.html`, }, categoryEC2 + "/openMySQL": { Risk: `Open MySQL - Determine if TCP port 4333 or 3306 for MySQL is open to the public - While some ports such as HTTP and HTTPS are required to be open to the public to function properly, more sensitive services such as MySQL should be restricted to known IP addresses.`, Recommendation: `Restrict TCP ports 4333 and 3306 to known IP addresses - http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/authorizing-access-to-an-instance.html`, }, categoryEC2 + "/openNetBIOS": { Risk: `Open NetBIOS - Determine if UDP port 137 or 138 for NetBIOS is open to the public - While some ports such as HTTP and HTTPS are required to be open to the public to function properly, more sensitive services such as NetBIOS should be restricted to known IP addresses.`, Recommendation: `Restrict UDP ports 137 and 138 to known IP addresses - http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/authorizing-access-to-an-instance.html`, }, categoryEC2 + "/openOracle": { Risk: `Open Oracle - Determine if TCP port 1521 for Oracle is open to the public - While some ports such as HTTP and HTTPS are required to be open to the public to function properly, more sensitive services such as Oracle should be restricted to known IP addresses.`, Recommendation: `Restrict TCP ports 1521 to known IP addresses - http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/authorizing-access-to-an-instance.html`, }, categoryEC2 + "/openOracleAutoDataWarehouse": { Risk: `Open Oracle Auto Data Warehouse - Determine if TCP port 1522 for Oracle Auto Data Warehouse is open to the public - While some ports such as HTTP and HTTPS are required to be open to the public to function properly, more sensitive services such as Oracle Auto Data Warehouse should be restricted to known IP addresses.`, Recommendation: `Restrict TCP ports 1522 to known IP addresses - http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/authorizing-access-to-an-instance.html`, }, categoryEC2 + "/openPostgreSQL": { Risk: `Open PostgreSQL - Determine if TCP port 5432 for PostgreSQL is open to the public - While some ports such as HTTP and HTTPS are required to be open to the public to function properly, more sensitive services such as PostgreSQL should be restricted to known IP addresses.`, Recommendation: `Restrict TCP port 5432 to known IP addresses - http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/authorizing-access-to-an-instance.html`, }, categoryEC2 + "/openRDP": { Risk: `Open RDP - Determine if TCP port 3389 for RDP is open to the public - While some ports such as HTTP and HTTPS are required to be open to the public to function properly, more sensitive services such as RDP should be restricted to known IP addresses.`, Recommendation: `Restrict TCP port 3389 to known IP addresses - http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/authorizing-access-to-an-instance.html`, }, categoryEC2 + "/openRPC": { Risk: `Open RPC - Determine if TCP port 135 for RPC is open to the public - While some ports such as HTTP and HTTPS are required to be open to the public to function properly, more sensitive services such as RPC should be restricted to known IP addresses.`, Recommendation: `Restrict TCP port 135 to known IP addresses - http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/authorizing-access-to-an-instance.html`, }, categoryEC2 + "/openSalt": { Risk: `Open Salt - Determine if TCP ports 4505 or 4506 for the Salt master are open to the public - Active Salt vulnerabilities, CVE-2020-11651 and CVE-2020-11652 are exploiting Salt instances exposed to the internet. These ports should be closed immediately.`, Recommendation: `Restrict TCP ports 4505 and 4506 to known IP addresses - https://help.saltstack.com/hc/en-us/articles/360043056331-New-SaltStack-Release-Critical-Vulnerability`, }, categoryEC2 + "/openSMBoTCP": { Risk: `Open SMBoTCP - Determine if TCP port 445 for Windows SMB over TCP is open to the public - While some ports such as HTTP and HTTPS are required to be open to the public to function properly, more sensitive services such as SMB should be restricted to known IP addresses.`, Recommendation: `Restrict TCP port 445 to known IP addresses - http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/authorizing-access-to-an-instance.html`, }, categoryEC2 + "/openSMTP": { Risk: `Open SMTP - Determine if TCP port 25 for SMTP is open to the public - While some ports such as HTTP and HTTPS are required to be open to the public to function properly, more sensitive services such as SMTP should be restricted to known IP addresses.`, Recommendation: `Restrict TCP port 25 to known IP addresses - http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/authorizing-access-to-an-instance.html`, }, categoryEC2 + "/openSQLServer": { Risk: `Open SQL Server - Determine if TCP port 1433 or UDP port 1434 for SQL Server is open to the public - While some ports such as HTTP and HTTPS are required to be open to the public to function properly, more sensitive services such as SQL server should be restricted to known IP addresses.`, Recommendation: `Restrict TCP port 1433 and UDP port 1434 to known IP addresses - http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/authorizing-access-to-an-instance.html`, }, categoryEC2 + "/openSSH": { Risk: `Open SSH - Determine if TCP port 22 for SSH is open to the public - While some ports such as HTTP and HTTPS are required to be open to the public to function properly, more sensitive services such as SSH should be restricted to known IP addresses.`, Recommendation: `Restrict TCP port 22 to known IP addresses - http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/authorizing-access-to-an-instance.html`, }, categoryEC2 + "/openTelnet": { Risk: `Open Telnet - Determine if TCP port 23 for Telnet is open to the public - While some ports such as HTTP and HTTPS are required to be open to the public to function properly, more sensitive services such as Telnet should be restricted to known IP addresses.`, Recommendation: `Restrict TCP port 23 to known IP addresses - http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/authorizing-access-to-an-instance.html`, }, categoryEC2 + "/openVNCClient": { Risk: `Open VNC Client - Determine if TCP port 5500 for VNC Client is open to the public - While some ports such as HTTP and HTTPS are required to be open to the public to function properly, more sensitive services such as VNC Client should be restricted to known IP addresses.`, Recommendation: `Restrict TCP port 5500 to known IP addresses - http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/authorizing-access-to-an-instance.html`, }, categoryEC2 + "/openVNCServer": { Risk: `Open VNC Server - Determine if TCP port 5900 for VNC Server is open to the public - While some ports such as HTTP and HTTPS are required to be open to the public to function properly, more sensitive services such as VNC Server should be restricted to known IP addresses.`, Recommendation: `Restrict TCP port 5900 to known IP addresses - http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/authorizing-access-to-an-instance.html`, }, categoryEC2 + "/overlappingSecurityGroups": { Risk: `Overlapping Security Groups - Determine if EC2 instances have security groups that share the same rules - Overlapping security group rules make managing EC2 instance access much more difficult. - If a rule is removed from one security group, the access may still remain in another resulting in unintended access to the instance.`, Recommendation: `Structure security groups to provide a single category of access and do not duplicate rules across groups used by the same instances. - http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/authorizing-access-to-an-instance.html`, }, categoryEC2 + "/publicAmi": { Risk: `Public AMI - Checks for publicly shared AMIs - Accidentally sharing AMIs allows any AWS user to launch an EC2 instance using the image as a base. This can potentially expose sensitive information stored on the host.`, Recommendation: `Convert the public AMI a private image. - http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/sharingamis-intro.html`, }, categoryEC2 + "/publicIpAddress": { Risk: `Public IP Address EC2 Instances - Ensures that EC2 instances do not have public IP address attached. - EC2 instances should not have a public IP address attached in order to block public access to the instances.`, Recommendation: `Remove the public IP address from the EC2 instances to block public access to the instance - https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-instance-addressing.html`, }, categoryEC2 + "/securityGroupRfc1918": { Risk: `Open RFC 1918 - Ensures EC2 security groups are configured to deny inbound traffic from RFC-1918 CIDRs - RFC-1918 IP addresses are considered reserved private addresses and should not be used in security groups.`, Recommendation: `Modify the security group to deny private reserved addresses for inbound traffic - https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html`, }, categoryEC2 + "/subnetIpAvailability": { Risk: `Subnet IP Availability - Determine if a subnet is at risk of running out of IP addresses - Subnets have finite IP addresses. Running out of IP addresses could prevent resources from launching.`, Recommendation: `Add a new subnet with larger CIDR block and migrate resources. - http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Subnets.html`, }, categoryEC2 + "/unassociatedElasticIp": { Risk: `Unassociated Elastic IP Addresses - Ensures all EIPs are allocated to a resource to avoid accidental usage or reuse and to save costs - EIPs should be deleted if they are not in use to avoid extra charges.`, Recommendation: `Delete the unassociated Elastic IP - https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html`, }, categoryEC2 + "/unusedAmi": { Risk: `Unused Amazon Machine Images - Ensures that all Amazon Machine Images are in use to ensure cost optimization. - All unused/deregistered Amazon Machine Images should be deleted to avoid extraneous cost.`, Recommendation: `Delete the unused/deregistered AMIs - https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AMIs.html`, }, categoryEC2 + "/unusedEni": { Risk: `Unused Elastic Network Interfaces - Ensures that unused AWS Elastic Network Interfaces (ENIs) are removed. - Unused AWS ENIs should be removed to follow best practices and to avoid reaching the service limit.`, Recommendation: `Delete the unused AWS Elastic Network Interfaces - https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-eni.html`, }, categoryEC2 + "/unusedVirtualPrivateGateway": { Risk: `Unused Virtual Private Gateway - Ensures that unused Virtual Private Gateways (VGWs) are removed. - Unused VGWs should be remove to follow best practices and to avoid reaching the service limit.`, Recommendation: `Remove the unused Virtual Private Gateways (VGWs) - https://docs.aws.amazon.com/vpn/latest/s2svpn/delete-vpn.html`, }, categoryEC2 + "/unusedVpcInternetGateways": { Risk: `Unused VPC Internet Gateways - Ensures that unused VPC Internet Gateways and Egress-Only Internet Gateways are removed. - Unused VPC Internet Gateways and Egress-Only Internet Gateways must be removed to avoid reaching the internet gateway limit.`, Recommendation: `Remove the unused/detached Internet Gateways and Egress-Only Internet Gateways - https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Internet_Gateway.html`, }, categoryEC2 + "/vpcElasticIpLimit": { Risk: `VPC Elastic IP Limit - Determine if the number of allocated VPC EIPs is close to the AWS per-account limit - AWS limits accounts to certain numbers of resources. Exceeding those limits could prevent resources from launching.`, Recommendation: `Contact AWS support to increase the number of EIPs available - http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html#using-instance-addressing-limit`, }, categoryEC2 + "/vpcEndpointAcceptance": { Risk: `VPC PrivateLink Endpoint Acceptance Required - Ensures VPC PrivateLink endpoints require acceptance - VPC PrivateLink endpoints should be configured to require acceptance so that access to the endpoint is controlled on a case-by-case basis.`, Recommendation: `Update the VPC PrivateLink endpoint to require acceptance - https://docs.aws.amazon.com/vpc/latest/userguide/accept-reject-endpoint-requests.html`, }, categoryEC2 + "/vpcEndpointExposed": { Risk: `VPC Endpoint Exposed - Ensure Amazon VPC endpoints are not publicly exposed. - VPC endpoints should not be publicly accessible in order to avoid any unsigned requests made to the services inside VPC.`, Recommendation: `Update VPC endpoint access policy in order to stop any unsigned requests - https://docs.aws.amazon.com/vpc/latest/userguide/vpc-endpoints-access.html`, }, categoryEC2 + "/webTierInstanceIamRole": { Risk: `Web-Tier EC2 Instance IAM Role - Ensure IAM roles attached with Web-Tier EC2 instances have IAM policies attached. - EC2 instances should have IAM roles configured with necessary permission to access other AWS services`, Recommendation: `Modify EC2 instances to attach IAM roles with required IAM policies - https://aws.amazon.com/blogs/security/new-attach-an-aws-iam-role-to-an-existing-amazon-ec2-instance-by-using-the-aws-cli/`, }, categoryECR + "/ecrRepositoryPolicy": { Risk: `ECR Repository Policy - Ensures ECR repository policies do not enable global or public access to images - ECR repository policies should limit access to images to known IAM entities and AWS accounts and avoid the use of account-level wildcards.`, Recommendation: `Update the repository policy to limit access to known IAM entities. - https://docs.aws.amazon.com/AmazonECR/latest/userguide/RepositoryPolicyExamples.html`, }, categoryECR + "/ecrRepositoryTagImmutability": { Risk: `ECR Repository Tag Immutability - Ensures ECR repository image tags cannot be overwritten - ECR repositories should be configured to prevent overwriting of image tags to avoid potentially-malicious images from being deployed to live environments.`, Recommendation: `Update ECR registry configurations to ensure image tag mutability is set to immutable. - https://docs.aws.amazon.com/AmazonECR/latest/userguide/image-tag-mutability.html`, }, categoryEFS + "/efsCmkEncrypted": { Risk: `EFS CMK Encrypted - Ensure EFS file systems are encrypted using Customer Master Keys (CMKs). - EFS file systems should use KMS Customer Master Keys (CMKs) instead of AWS managed keys for encryption in order to have full control over data encryption and decryption.`, Recommendation: `Encryption at rest key can only be configured during file system creation. Encryption of data in transit is configured when mounting your file system. 1. Backup your data in not encrypted efs 2. Recreate the EFS and use KMS CMK for encryption of data at rest. - https://docs.aws.amazon.com/efs/latest/ug/encryption-at-rest.html`, }, categoryEFS + "/efsEncryptionEnabled": { Risk: `EFS Encryption Enabled - Ensures that EFS volumes are encrypted at rest - EFS offers data at rest encryption using keys managed through AWS Key Management Service (KMS).`, Recommendation: `Encryption of data at rest can only be enabled during file system creation. Encryption of data in transit is configured when mounting your file system. 1. Backup your data in not encrypted efs 2. Recreate the EFS and select 'Enable encryption of data at rest'`, }, categoryEKS + "/eksKubernetesVersion": { Risk: `EKS Kubernetes Version - Ensures the latest version of Kubernetes is installed on EKS clusters - EKS supports provisioning clusters from several versions of Kubernetes. Clusters should be kept up to date to ensure Kubernetes security patches are applied.`, Recommendation: `Upgrade the version of Kubernetes on all EKS clusters to the latest available version. - https://docs.aws.amazon.com/eks/latest/userguide/kubernetes-versions.html`, }, categoryEKS + "/eksLoggingEnabled": { Risk: `EKS Logging Enabled - Ensures all EKS cluster logs are being sent to CloudWatch - EKS supports routing of cluster event and audit logs to CloudWatch, including control plane logs. - All logs should be sent to CloudWatch for security analysis.`, Recommendation: `Enable all EKS cluster logs to be sent to CloudWatch with proper log retention limits. - https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html`, }, categoryEKS + "/eksPrivateEndpoint": { Risk: `EKS Private Endpoint - Ensures the private endpoint setting is enabled for EKS clusters - EKS private endpoints can be used to route all traffic between the Kubernetes worker and control plane nodes over a private VPC endpoint rather than across the public internet.`, Recommendation: `Enable the private endpoint setting for all EKS clusters. - https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html`, }, categoryEKS + "/eksSecurityGroups": { Risk: `EKS Security Groups - Ensures the EKS control plane only allows inbound traffic on port 443. - The EKS control plane only requires port 443 access. Security groups for the control plane should not add additional port access.`, Recommendation: `Configure security groups for the EKS control plane to allow access only on port 443. - https://docs.aws.amazon.com/eks/latest/userguide/sec-group-reqs.html`, }, categoryElasticBeanstalk + "/managedPlatformUpdates": { Risk: `ElasticBeanstalk Managed Platform Updates - Ensures ElasticBeanstalk applications are configured to use managed updates. - Environments for an application should be configured to allow platform managed updates.`, Recommendation: `Update the environment to enable managed updates. - https://docs.aws.amazon.com/elasticbeanstalk/latest/dg/environment-platform-update-managed.html`, }, categoryELB + "/elbHttpsOnly": { Risk: `ELB HTTPS Only - Ensures ELBs are configured to only accept connections on HTTPS ports. - For maximum security, ELBs can be configured to only accept HTTPS connections. - Standard HTTP connections will be blocked. - This should only be done if the client application is configured to query HTTPS directly and not rely on a redirect from HTTP.`, Recommendation: `Remove non-HTTPS listeners from load balancer. - http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/elb-security-policy-options.html`, }, categoryELB + "/elbLoggingEnabled": { Risk: `ELB Logging Enabled - Ensures load balancers have request logging enabled. - Logging requests to ELB endpoints is a helpful way of detecting and investigating potential attacks, malicious activity, or misuse of backend resources. - Logs can be sent to S3 and processed for further analysis.`, Recommendation: `Enable ELB request logging - http://docs.aws.amazon.com/elasticloadbalancing/latest/application/load-balancer-access-logs.html`, }, categoryELB + "/elbNoInstances": { Risk: `ELB No Instances - Detects ELBs that have no backend instances attached - All ELBs should have backend server resources. - Those without any are consuming costs without providing any functionality. Additionally, old ELBs with no instances present a security concern if new instances are accidentally attached.`, Recommendation: `Delete old ELBs that no longer have backend resources. - http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-backend-instances.html`, }, categoryELB + "/insecureCiphers": { Risk: `Insecure Ciphers - Detect use of insecure ciphers on ELBs - Various security vulnerabilities have rendered several ciphers insecure. Only the recommended ciphers should be used.`, Recommendation: `Update your ELBs to use the recommended cipher suites - http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/elb-security-policy-options.html`, }, categoryELBv2 + "/elbv2DeletionProtection": { Risk: `ELBv2 Deletion Protection - Ensures ELBv2 load balancers are configured with deletion protection. - ELBv2 load balancers should be configured with deletion protection to prevent accidental deletion of live resources in production environments.`, Recommendation: `Update ELBv2 load balancers to use deletion protection to prevent accidental deletion - https://docs.aws.amazon.com/elasticloadbalancing/latest/application/application-load-balancers.html#deletion-protection`, }, categoryELBv2 + "/elbv2HttpsOnly": { Risk: `ELBv2 HTTPS Only - Ensures ELBs are configured to only accept connections on HTTPS ports. - For maximum security, ELBs can be configured to only accept HTTPS connections. - Standard HTTP connections will be blocked. This should only be done if the client application is configured to query HTTPS directly and not rely on a redirect from HTTP.`, Recommendation: `Remove non-HTTPS listeners from load balancer. - http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/elb-security-policy-options.html`, }, categoryELBv2 + "/elbv2LoggingEnabled": { Risk: `ELBv2 Logging Enabled - Ensures load balancers have request logging enabled. - Logging requests to ELB endpoints is a helpful way of detecting and investigating potential attacks, malicious activity, or misuse of backend resources. - Logs can be sent to S3 and processed for further analysis.`, Recommendation: `Enable ELB request logging - http://docs.aws.amazon.com/elasticloadbalancing/latest/application/load-balancer-access-logs.html`, }, categoryELBv2 + "/elbv2MinimumTargetInstances": { Risk: `ELBv2 Minimum Number of EC2 Target Instances - Ensures that there is a minimum number of two healthy target instances associated with each AWS ELBv2 load balancer. - There should be a minimum number of two healthy target instances associated with each AWS ELBv2 load balancer to ensure fault tolerance.`, Recommendation: `Associate at least two healthy target instances to AWS ELBv2 load balancer - https://docs.aws.amazon.com/elasticloadbalancing/latest/application/load-balancer-target-groups.html`, }, categoryELBv2 + "/elbv2NlbListenerSecurity": { Risk: `ELBv2 NLB Listener Security - Ensures that AWS Network Load Balancers have secured listener configured. - AWS Network Load Balancer should have TLS protocol listener configured to terminate TLS traffic.`, Recommendation: `Attach TLS listener to AWS Network Load Balancer - https://docs.amazonaws.cn/en_us/elasticloadbalancing/latest/network/create-tls-listener.html`, }, categoryELBv2 + "/elbv2NoInstances": { Risk: `ELBv2 No Instances - Detects ELBs that have no target groups attached - All ELBs should have backend server resources. - Those without any are consuming costs without providing any functionality. - Additionally, old ELBs with no target groups present a security concern if new target groups are accidentally attached.`, Recommendation: `Delete old ELBs that no longer have backend resources. - https://docs.aws.amazon.com/elasticloadbalancing/latest/application/load-balancer-target-groups.html`, }, categoryELBv2 + "/elbv2WafEnabled": { Risk: `ELBv2 WAF Enabled - Ensure that all Application Load Balancers have WAF enabled. - Enabling WAF allows control over requests to the load balancer, allowing or denying traffic based off rules in the Web ACL.`, Recommendation: `1. Enter the WAF service. 2. Enter Web ACLs and filter by the region the Application Load Balancer is in. 3. If no Web ACL is found, Create a new Web ACL in the region the ALB resides and in Resource type to associate with web ACL, select the Load Balancer. - https://aws.amazon.com/blogs/aws/aws-web-application-firewall-waf-for-application-load-balancers/`, }, categoryEMR + "/emrClusterLogging": { Risk: `EMR Cluster Logging - Ensure AWS Elastic MapReduce (EMR) clusters capture detailed log data to Amazon S3. - EMR cluster logging should be enabled to save log files for troubleshooting purposes.`, Recommendation: `Modify EMR clusters to enable cluster logging - https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-plan-debugging.html`, }, categoryEMR + "/emrEncryptionAtRest": { Risk: `EMR Encryption At Rest - Ensures encryption at rest for local disks is enabled for EMR clusters - EMR clusters should be configured to enable encryption at rest for local disks.`, Recommendation: `Update security configuration associated with EMR cluster to enable encryption at rest for local disks. - https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-data-encryption-options.html`, }, categoryEMR + "/emrEncryptionInTransit": { Risk: `EMR Encryption In Transit - Ensures encryption in transit is enabled for EMR clusters - EMR clusters should be configured to enable encryption in transit.`, Recommendation: `Update security configuration associated with EMR cluster to enable encryption in transit. - https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-data-encryption-options.html`, }, categoryES + "/esAccessFromIps": { Risk: `ElasticSearch Access From IP Addresses - Ensure only whitelisted IP addresses can access Amazon Elasticsearch domains. - ElasticSearch domains should only be accessible only from whitelisted IP addresses to avoid unauthorized access.`, Recommendation: `Modify Elasticseach domain access policy to allow only known/whitelisted IP addresses. - https://aws.amazon.com/blogs/security/how-to-control-access-to-your-amazon-elasticsearch-service-domain/`, }, categoryES + "/esEncryptedDomain": { Risk: `ElasticSearch Encrypted Domain - Ensures ElasticSearch domains are encrypted with KMS - ElasticSearch domains should be encrypted to ensure data at rest is secured.`, Recommendation: `Ensure encryption-at-rest is enabled for all ElasticSearch domains. - https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/encryption-at-rest.html`, }, categoryES + "/esExposedDomain": { Risk: `ElasticSearch Exposed Domain - Ensures ElasticSearch domains are not publicly exposed to all AWS accounts - ElasticSearch domains should not be publicly exposed to all AWS accounts.`, Recommendation: `Update elasticsearch domain to set access control. - https://aws.amazon.com/blogs/database/set-access-control-for-amazon-elasticsearch-service/`, }, categoryES + "/esHttpsOnly": { Risk: `ElasticSearch HTTPS Only - Ensures ElasticSearch domains are configured to enforce HTTPS connections - ElasticSearch domains should be configured to enforce HTTPS connections for all clients to ensure encryption of data in transit.`, Recommendation: `Ensure HTTPS connections are enforced for all ElasticSearch domains. - https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-createupdatedomains.html`, }, categoryES + "/esLoggingEnabled": { Risk: `ElasticSearch Logging Enabled - Ensures ElasticSearch domains are configured to log data to CloudWatch - ElasticSearch domains should be configured with logging enabled with logs sent to CloudWatch for analysis and long-term storage.`, Recommendation: `Ensure logging is enabled and a CloudWatch log group is specified for each ElasticSearch domain. - https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-createupdatedomains.html#es-createdomain-configure-slow-logs`, }, categoryES + "/esNodeToNodeEncryption": { Risk: `ElasticSearch Node To Node Encryption - Ensures ElasticSearch domain traffic is encrypted in transit between nodes - ElasticSearch domains should use node-to-node encryption to ensure data in transit remains encrypted using TLS 1.2.`, Recommendation: `Ensure node-to-node encryption is enabled for all ElasticSearch domains. - https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/ntn.html`, }, categoryES + "/esPublicEndpoint": { Risk: `ElasticSearch Public Service Domain - Ensures ElasticSearch domains are created with private VPC endpoint options - ElasticSearch domains can either be created with a public endpoint or with a VPC configuration that enables internal VPC communication. - Domains should be created without a public endpoint to prevent potential public access to the domain.`, Recommendation: `Configure the ElasticSearch domain to use a VPC endpoint for secure VPC communication. - https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-vpc.html`, }, categoryES + "/esRequireIAMAuth": { Risk: `ElasticSearch IAM Authentication - Ensures ElasticSearch domains require IAM Authentication - ElasticSearch domains can allow access without IAM authentication by having a policy that does not specify the principal or has a wildcard principal`, Recommendation: `Configure the ElasticSearch domain to have an access policy without a global principal or no principal - https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-ac.html`, }, categoryES + "/esUpgradeAvailable": { Risk: `ElasticSearch Upgrade Available - Ensures ElasticSearch domains are running the latest service software - ElasticSearch domains should be configured to run the latest service software which often contains security updates.`, Recommendation: `Ensure each ElasticSearch domain is running the latest service software and update out-of-date domains. - https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-version-migration.html`, }, categoryFirehose + "/firehoseEncrypted": { Risk: `Firehose Delivery Streams Encrypted - Ensures Firehose Delivery Stream encryption is enabled - Data sent through Firehose Delivery Streams can be encrypted using KMS server-side encryption. - Existing delivery streams can be modified to add encryption with minimal overhead.`, Recommendation: `Enable encryption using KMS for all Firehose Delivery Streams. - https://docs.aws.amazon.com/firehose/latest/dev/encryption.html`, }, categoryGlue + "/bookmarkEncryptionEnabled": { Risk: `AWS Glue Job Bookmark Encryption Enabled - Ensures that AWS Glue job bookmark encryption is enabled. - AWS Glue security configuration should have job bookmark encryption enabled in order to encrypt the bookmark data before it is sent to Amazon S3.`, Recommendation: `Recreate Glue security configurations and enable job bookmark encryption - https://docs.aws.amazon.com/glue/latest/dg/console-security-configurations.html`, }, categoryGuardDuty + "/guardDutyEnabled": { Risk: `GuardDuty is Enabled - GuardDuty provides threat intelligence by analyzing several AWS data sources for security risks and should be enabled in all accounts.`, Recommendation: `Enable GuardDuty for all AWS accounts. - https://docs.aws.amazon.com/guardduty/latest/ug/what-is-guardduty.html`, }, categoryGuardDuty + "/guardDutyMaster": { Risk: `GuardDuty Master Account - Ensures GuardDuty master account is correct - Organizations with large numbers of AWS accounts should configure GuardDuty findings from all member accounts to be sent to a consistent master account.`, Recommendation: `Configure the member account to send GuardDuty findings to a known master account. - https://docs.aws.amazon.com/guardduty/latest/ug/guardduty_accounts.html#guardduty_master`, }, categoryIAM + "/accessKeysExtra": { Risk: `Access Keys Extra - Detects the use of more than one access key by any single user - Having more than one access key for a single user increases the chance of accidental exposure. - Each account should only have one key that defines the users permissions.`, Recommendation: `Remove the extra access key for the specified user. - http://docs.aws.amazon.com/IAM/latest/UserGuide/ManagingCredentials.html`, }, categoryIAM + "/accessKeysLastUsed": { Risk: `Access Keys Last Used - Detects access keys that have not been used for a period of time and that should be decommissioned - Having numerous, unused access keys extends the attack surface. Access keys should be removed if they are no longer being used.`, Recommendation: `Log into the IAM portal and remove the offending access key. - http://docs.aws.amazon.com/IAM/latest/UserGuide/ManagingCredentials.html`, }, categoryIAM + "/accessKeysRotated": { Risk: `Access Keys Rotated - Ensures access keys are not older than 180 days in order to reduce accidental exposures - Access keys should be rotated frequently to avoid having them accidentally exposed.`, Recommendation: `To rotate an access key, first create a new key, replace the key and secret throughout your app or scripts, then set the previous key to disabled. - Once you ensure that no services are broken, then fully delete the old key. - http://docs.aws.amazon.com/IAM/latest/UserGuide/ManagingCredentials.html`, }, categoryIAM + "/canaryKeysUsed": { Risk: `Canary Keys Used - Detects when a special canary-token access key has been used - Canary access keys can be created with limited permissions and then used to detect when a potential breach occurs.`, Recommendation: `Create a canary access token and provide its user to CloudSploit. - If CloudSploit detects that the account is in use, it will trigger a failure. - https://docs.aws.amazon.com/IAM/latest/UserGuide/ManagingCredentials.html`, }, categoryIAM + "/certificateExpiry": { Risk: `Certificate Expiry - Detect upcoming expiration of certificates used with ELBs - Certificates that have expired will trigger warnings in all major browsers`, Recommendation: `Update your certificates before the expiration date - http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/elb-update-ssl-cert.html`, }, categoryIAM + "/crossAccountMfaExtIdAccess": { Risk: `Cross-Account Access External ID and MFA - Ensures that either MFA or external IDs are used to access AWS roles. - IAM roles should be configured to require either a shared external ID or use an MFA device when assuming the role.`, Recommendation: `Update the IAM role to either require MFA or use an external ID. - https://aws.amazon.com/blogs/aws/mfa-protection-for-cross-account-access/`, }, categoryIAM + "/emptyGroups": { Risk: `Empty Groups - Ensures all groups have at least one member - While having empty groups does not present a direct security risk, it does broaden the management landscape which could potentially introduce risks in the future.`, Recommendation: `Remove unused groups without users - http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_WorkingWithGroupsAndUsers.html`, }, categoryIAM + "/groupInlinePolicies": { Risk: `Group Inline Policies - Ensures that groups do not have any inline policies - Managed Policies are recommended over inline policies.`, Recommendation: `Remove inline policies attached to groups - https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_managed-vs-inline.html`, }, categoryIAM + "/iamRoleLastUsed": { Risk: `IAM Role Last Used - Ensures IAM roles that have not been used within the given time frame are deleted. - IAM roles that have not been used for a long period may contain old access policies that could allow unintended access to resources if accidentally attached to new services. - These roles should be deleted.`, Recommendation: `Delete IAM roles that have not been used within the expected time frame. - https://aws.amazon.com/about-aws/whats-new/2019/11/identify-unused-iam-roles-easily-and-remove-them-confidently-by-using-the-last-used-timestamp/`, }, categoryIAM + "/iamRolePolicies": { Risk: `IAM Role Policies - Ensures IAM role policies are properly scoped with specific permissions - Policies attached to IAM roles should be scoped to least-privileged access and avoid the use of wildcards.`, Recommendation: `Ensure that all IAM roles are scoped to specific services and API calls. - https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html`, }, categoryIAM + "/iamUserAdmins": { Risk: `IAM User Admins - Ensures the number of IAM admins in the account are minimized - While at least two IAM admin users should be configured, the total number of admins should be kept to a minimum.`, Recommendation: `Keep two users with admin permissions but ensure other IAM users have more limited permissions. - http://docs.aws.amazon.com/IAM/latest/UserGuide/getting-started_create-admin-group.html`, }, categoryIAM + "/iamUserNameRegex": { Risk: `IAM Username Matches Regex - Ensures all IAM user names match the given regex - Many organizational policies require IAM user names to follow a common naming convention. - This check ensures these conventions are followed.`, Recommendation: `Rename the IAM user name to match the provided regex. - https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users.html`, }, categoryIAM + "/iamUserUnauthorizedToEdit": { Risk: `IAM User Unauthorized to Edit - Ensures AWS IAM users that are not authorized to edit IAM access policies are decommissioned. - Only authorized IAM users should have permission to edit IAM access policies to prevent any unauthorized requests.`, Recommendation: `Update unauthorized IAM users to remove permissions to edit IAM access policies. - Update unauthorized IAM users to remove permissions to edit IAM access policies.`, }, categoryIAM + "/maxPasswordAge": { Risk: `Maximum Password Age - Ensures password policy requires passwords to be reset every 180 days - A strong password policy enforces minimum length, expirations, reuse, and symbol usage`, Recommendation: `Descrease the maximum allowed age of passwords for the password policy - http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_ManagingPasswordPolicies.html`, }, categoryIAM + "/minPasswordLength": { Risk: `Minimum Password Length - Ensures password policy requires a password of at least a minimum number of characters - A strong password policy enforces minimum length, expirations, reuse, and symbol usage`, Recommendation: `Increase the minimum length requirement for the password policy - http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_ManagingPasswordPolicies.html`, }, categoryIAM + "/noUserIamPolicies": { Risk: `No User IAM Policies - Ensures IAM policies are not connected directly to IAM users - To reduce management complexity, IAM permissions should only be assigned to roles and groups. Users can then be added to those groups. - Policies should not be applied directly to a user.`, Recommendation: `Create groups with the required policies, move the IAM users to the applicable groups, and then remove the inline and directly attached policies from the IAM user. - http://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#use-groups-for-permissions`, }, categoryIAM + "/passwordExpiration": { Risk: `Password Expiration - Ensures password policy enforces a password expiration - A strong password policy enforces minimum length, expirations, reuse, and symbol usage`, Recommendation: `Enable password expiration for the account - http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_ManagingPasswordPolicies.html`, }, categoryIAM + "/passwordRequiresLowercase": { Risk: `Password Requires Lowercase - Ensures password policy requires at least one lowercase letter - A strong password policy enforces minimum length, expirations, reuse, and symbol usage`, Recommendation: `Update the password policy to require the use of lowercase letters - http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_ManagingPasswordPolicies.html`, }, categoryIAM + "/passwordRequiresNumbers": { Risk: `Password Requires Numbers - Ensures password policy requires the use of numbers - A strong password policy enforces minimum length, expirations, reuse, and symbol usage`, Recommendation: `Update the password policy to require the use of numbers - http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_ManagingPasswordPolicies.html`, }, categoryIAM + "/passwordRequiresSymbols": { Risk: `Password Requires Symbols - Ensures password policy requires the use of symbols - A strong password policy enforces minimum length, expirations, reuse, and symbol usage`, Recommendation: `Update the password policy to require the use of symbols - http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_ManagingPasswordPolicies.html`, }, categoryIAM + "/passwordRequiresUppercase": { Risk: `Password Requires Uppercase - Ensures password policy requires at least one uppercase letter - A strong password policy enforces minimum length, expirations, reuse, and symbol usage`, Recommendation: `Update the password policy to require the use of uppercase letters - http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_ManagingPasswordPolicies.html`, }, categoryIAM + "/passwordReusePrevention": { Risk: `Password Reuse Prevention - Ensures password policy prevents previous password reuse - A strong password policy enforces minimum length, expirations, reuse, and symbol usage`, Recommendation: `Increase the minimum previous passwords that can be reused to 24. - http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_ManagingPasswordPolicies.html`, }, categoryIAM + "/rootAccessKeys": { Risk: `Root Access Keys - Ensures the root account is not using access keys - The root account should avoid using access keys. - Since the root account has full permissions across the entire account, creating access keys for it only increases the chance that they are compromised. - Instead, create IAM users with predefined roles.`, Recommendation: `Remove access keys for the root account and setup IAM users with limited permissions instead - http://docs.aws.amazon.com/general/latest/gr/managing-aws-access-keys.html`, }, categoryIAM + "/rootAccountInUse": { Risk: `Root Account In Use - Ensures the root account is not being actively used - The root account should not be used for day-to-day account management. - IAM users, roles, and groups should be used instead.`, Recommendation: `Create IAM users with appropriate group-level permissions for account access. - Create an MFA token for the root account, and store its password and token generation QR codes in a secure place. - http://docs.aws.amazon.com/general/latest/gr/root-vs-iam.html`, }, categoryIAM + "/rootHardwareMfa": { Risk: `Root Hardware MFA - Ensures the root account is using a hardware MFA device - The root account should use a hardware MFA device for added security, rather than a virtual device which could be more easily compromised.`, Recommendation: `Enable a hardware MFA device for the root account and disable any virtual devices - https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_mfa_enable_physical.html`, }, categoryIAM + "/rootMfaEnabled": { Risk: `Root MFA Enabled - Ensures a multi-factor authentication device is enabled for the root account - The root account should have an MFA device setup to enable two-factor authentication.`, Recommendation: `Enable an MFA device for the root account and then use an IAM user for managing services - http://docs.aws.amazon.com/general/latest/gr/managing-aws-access-keys.html`, }, categoryIAM + "/rootSigningCertificate": { Risk: `Root Account Active Signing Certificates - Ensures the root user is not using x509 signing certificates - AWS supports using x509 signing certificates for API access, but these should not be attached to the root user, which has full access to the account.`, Recommendation: `Delete the x509 certificates associated with the root account. - https://docs.aws.amazon.com/whitepapers/latest/aws-overview-security-processes/x.509-certificates.html`, }, categoryIAM + "/sshKeysRotated": { Risk: `SSH Keys Rotated - Ensures SSH keys are not older than 180 days in order to reduce accidental exposures - SSH keys should be rotated frequently to avoid having them accidentally exposed.`, Recommendation: `To rotate an SSH key, first create a new public-private key pair, then upload the public key to AWS and delete the old key. - http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_ssh-keys.html`, }, categoryIAM + "/usersMfaEnabled": { Risk: `Users MFA Enabled - Ensures a multi-factor authentication device is enabled for all users within the account - User accounts should have an MFA device setup to enable two-factor authentication`, Recommendation: `Enable an MFA device for the user account - http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_ManagingPasswordPolicies.html`, }, categoryIAM + "/usersPasswordAndKeys": { Risk: `Users Password And Keys - Detects whether users with a console password are also using access keys - Access keys should only be assigned to machine users and should not be used for accounts that have console password access.`, Recommendation: `Remove access keys from all users with console access. - http://docs.aws.amazon.com/IAM/latest/UserGuide/ManagingCredentials.html`, }, categoryIAM + "/usersPasswordLastUsed": { Risk: `Users Password Last Used - Detects users with password logins that have not been used for a period of time and that should be decommissioned - Having numerous, unused user accounts extends the attack surface. - If users do not log into their accounts for more than the defined period of time, the account should be deleted.`, Recommendation: `Delete old user accounts that allow password-based logins and have not been used recently. - http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_passwords_admin-change-user.html`, }, categoryKinesis + "/kinesisEncrypted": { Risk: `Kinesis Streams Encrypted - Ensures Kinesis Streams encryption is enabled - Data sent to Kinesis Streams can be encrypted using KMS server-side encryption. - Existing streams can be modified to add encryption with minimal overhead.`, Recommendation: `Enable encryption using KMS for all Kinesis Streams. - https://docs.aws.amazon.com/streams/latest/dev/server-side-encryption.html`, }, categoryKMS + "/kmsAppTierCmk": { Risk: `App-Tier KMS Customer Master Key (CMK) - Ensures that there is one Amazon KMS Customer Master Key (CMK) present in the account for App-Tier resources. - Amazon KMS should have Customer Master Key (CMK) for App-Tier to protect data in transit.`, Recommendation: `Create a Customer Master Key (CMK) with App-Tier tag - https://docs.aws.amazon.com/kms/latest/developerguide/create-keys.html`, }, categoryKMS + "/kmsDefaultKeyUsage": { Risk: `KMS Default Key Usage - Checks AWS services to ensure the default KMS key is not being used - It is recommended not to use the default key to avoid encrypting disparate sets of data with the same key. - Each application should have its own customer-managed KMS key`, Recommendation: `Avoid using the default KMS key - http://docs.aws.amazon.com/kms/latest/developerguide/concepts.html`, }, categoryKMS + "/kmsKeyPolicy": { Risk: `KMS Key Policy - Validates the KMS key policy to ensure least-privilege access. - KMS key policies should be designed to limit the number of users who can perform encrypt and decrypt operations. - Each application should use its own key to avoid over exposure.`, Recommendation: `Modify the KMS key policy to remove any wildcards and limit the number of users and roles that can perform encrypt and decrypt operations using the key. - http://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html`, }, categoryKMS + "/kmsKeyRotation": { Risk: `KMS Key Rotation - Ensures KMS keys are set to rotate on a regular schedule - All KMS keys should have key rotation enabled. - AWS will handle the rotation of the encryption key itself, as well as storage of previous keys, so previous data does not need to be re-encrypted before the rotation occurs.`, Recommendation: `Enable yearly rotation for the KMS key - http://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html`, }, categoryKMS + "/kmsScheduledDeletion": { Risk: `KMS Scheduled Deletion - Detects KMS keys that are scheduled for deletion - Deleting a KMS key will permanently prevent all data encrypted using that key from being decrypted. - Avoid deleting keys unless no encrypted data is in use.`, Recommendation: `Disable the key deletion before the scheduled deletion time. - http://docs.aws.amazon.com/kms/latest/developerguide/deleting-keys.html`, }, categoryLambda + "/lambdaLogGroups": { Risk: `Lambda Log Groups - Ensures each Lambda function has a valid log group attached to it - Every Lambda function created should automatically have a CloudWatch log group generated to handle its log streams.`, Recommendation: `Update the Lambda function permissions to allow CloudWatch logging. - https://docs.aws.amazon.com/lambda/latest/dg/monitoring-cloudwatchlogs.html`, }, categoryLambda + "/lambdaOldRuntimes": { Risk: `Lambda Old Runtimes - Ensures Lambda functions are not using out-of-date runtime environments. - Lambda runtimes should be kept current with recent versions of the underlying codebase. - Deprecated runtimes should not be used.`, Recommendation: `Upgrade the Lambda function runtime to use a more current version. - http://docs.aws.amazon.com/lambda/latest/dg/current-supported-versions.html`, }, categoryLambda + "/lambdaPublicAccess": { Risk: `Lambda Public Access - Ensures Lambda functions are not accessible globally - The Lambda function execution policy should not allow public invocation of the function.`, Recommendation: `Update the Lambda policy to prevent access from the public. - https://docs.aws.amazon.com/lambda/latest/dg/access-control-resource-based.html`, }, categoryLambda + "/lambdaVpcConfig": { Risk: `Lambda VPC Config - Ensures Lambda functions are created in a VPC. - Lambda functions should be created in an AWS VPC to avoid exposure to the Internet and to enable communication with VPC resources through NACLs and security groups.`, Recommendation: `Update the Lambda function with a VPC configuration. - https://docs.aws.amazon.com/lambda/latest/dg/vpc.html`, }, categoryOrganizations + "/enableAllFeatures": { Risk: `Enable All Organization Features - Ensures all Organization features are enabled - All AWS Organizations should be enabled to take advantage of all shared security controls and policies across all member accounts.`, Recommendation: `Enable all AWS Organizations features. - https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_org_support-all-features.html?icmpid=docs_orgs_console`, }, categoryOrganizations + "/organizationInvite": { Risk: `Organization Invite - Ensure all Organization invites are accepted - AWS Organizations invites should be accepted or rejected quickly so that member accounts can take advantage of all Organization features.`, Recommendation: `Enable all AWS Organizations features - https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_org_support-all-features.html?icmpid=docs_orgs_console`, }, categoryRDS + "/rdsAutomatedBackups": { Risk: `RDS Automated Backups - Ensures automated backups are enabled for RDS instances - AWS provides a simple method of backing up RDS instances at a regular interval. - This should be enabled to provide an option for restoring data in the event of a database compromise or hardware failure.`, Recommendation: `Enable automated backups for the RDS instance - http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_WorkingWithAutomatedBackups.html`, }, categoryRDS + "/rdsCmkEncryptionEnabled": { Risk: `RDS CMK Encryption - Ensures RDS instances are encrypted with KMS Customer Master Keys(CMKs). - RDS instances should be encrypted with Customer Master Keys in order to have full control over data encryption and decryption.`, Recommendation: `RDS does not currently allow modifications to encryption after the instance has been launched, so a new instance will need to be created with KMS CMK encryption enabled. - https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Overview.Encryption.html`, }, categoryRDS + "/rdsEncryptionEnabled": { Risk: `RDS Encryption Enabled - Ensures at-rest encryption is setup for RDS instances - AWS provides at-read encryption for RDS instances which should be enabled to ensure the integrity of data stored within the databases.`, Recommendation: `RDS does not currently allow modifications to encryption after the instance has been launched, so a new instance will need to be created with encryption enabled. - http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Overview.Encryption.html`, }, categoryRDS + "/rdsLoggingEnabled": { Risk: `RDS Logging Enabled - Ensures logging is configured for RDS instances - Logging database level events enables teams to analyze events for the purpose diagnostics as well as audit tracking for compliance purposes.`, Recommendation: `Modify the RDS instance to enable logging as required. - https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_LogAccess.html`, }, categoryRDS + "/rdsMinorVersionUpgrade": { Risk: `RDS DocumentDB Minor Version Upgrade - Ensures Auto Minor Version Upgrade is enabled on RDS and DocumentDB databases - RDS supports automatically upgrading the minor version of the database, which should be enabled to ensure security fixes are quickly deployed.`, Recommendation: `Enable automatic minor version upgrades on RDS and DocumentDB databases - https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_UpgradeDBInstance.Upgrading.html#USER_UpgradeDBInstance.Upgrading.AutoMinorVersionUpgrades`, }, categoryRDS + "/rdsMultiAz": { Risk: `RDS Multiple AZ - Ensures that RDS instances are created to be cross-AZ for high availability. - Creating RDS instances in a single AZ creates a single point of failure for all systems relying on that database. - All RDS instances should be created in multiple AZs to ensure proper failover.`, Recommendation: `Modify the RDS instance to enable scaling across multiple availability zones. - http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.MultiAZ.html`, }, categoryRDS + "/rdsPubliclyAccessible": { Risk: `RDS Publicly Accessible - Ensures RDS instances are not launched into the public cloud - Unless there is a specific business requirement, RDS instances should not have a public endpoint and should be accessed from within a VPC only.`, Recommendation: `Remove the public endpoint from the RDS instance - http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_VPC.html`, }, categoryRDS + "/rdsRestorable": { Risk: `RDS Restorable - Ensures RDS instances can be restored to a recent point - AWS will maintain a point to which the database can be restored. - This point should not drift too far into the past, or else the risk of irrecoverable data loss may occur.`, Recommendation: `Ensure the instance is running and configured properly. - If the time drifts too far, consider opening a support ticket with AWS. - http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_PIT.html`, }, categoryRDS + "/rdsSnapshotEncryption": { Risk: `RDS Snapshot Encryption - Ensures encryption is enabled for RDS snapshots to ensure encryption of data at rest. - AWS provides encryption for RDS snapshots which should be enabled to ensure that all data at rest is encrypted.`, Recommendation: `Copy the snapshot to a new snapshot that is encrypted and delete the old snapshot. - https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Overview.Encryption.html`, }, categoryRDS + "/rdsTransportEncryption": { Risk: `RDS Transport Encryption Enabled - Ensures RDS SQL Server instances have Transport Encryption enabled. - Parameter group associated with the RDS instance should have transport encryption enabled to handle encryption and decryption`, Recommendation: `Update the parameter group associated with the RDS instance to have rds.force_ssl set to true - https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.SSL.html`, }, categoryRDS + "/sqlServerTLSVersion": { Risk: `SQL Server TLS Version - Ensures RDS SQL Servers do not allow outdated TLS certificate versions - TLS 1.2 or higher should be used for all TLS connections to RDS. - A parameter group can be used to enforce this connection type.`, Recommendation: `Create a parameter group that contains the TLS version restriction and limit access to TLS 1.2 or higher - https://aws.amazon.com/about-aws/whats-new/2020/07/amazon-rds-for-sql-server-supports-disabling-old-versions-of-tls-and-ciphers/`, }, categoryRedshift + "/auditLoggingEnabled": { Risk: `Redshift Cluster Audit Logging Enabled - Ensure audit logging is enabled for Redshift clusters for security and troubleshooting purposes. - Redshift clusters should be configured to enable audit logging to log cluster usage information.`, Recommendation: `Modify Redshift clusters to enable audit logging - https://docs.aws.amazon.com/redshift/latest/mgmt/db-auditing-console.html`, }, categoryRedshift + "/redshiftAllowVersionUpgrade": { Risk: `Redshift Cluster Allow Version Upgrade - Ensure that version upgrade is enabled for Redshift clusters to automatically receive upgrades during the maintenance window. - Redshift clusters should be configured to allow version upgrades to get the newest features, bug fixes or the latest security patches released.`, Recommendation: `Modify Redshift clusters to allow version upgrade - https://docs.amazonaws.cn/en_us/redshift/latest/mgmt/redshift-mgmt.pdf`, }, categoryRedshift + "/redshiftClusterCmkEncrypted": { Risk: `Redshift Cluster CMK Encryption - Ensures Redshift clusters are encrypted using KMS customer master keys (CMKs) - KMS CMKs should be used to encrypt redshift clusters in order to have full control over data encryption and decryption.`, Recommendation: `Update Redshift clusters encryption configuration to use KMS CMKs instead of AWS managed-keys. - http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-db-encryption.html`, }, categoryRedshift + "/redshiftEncryptionEnabled": { Risk: `Redshift Encryption Enabled - Ensures at-rest encryption is setup for Redshift clusters - AWS provides at-read encryption for Redshift clusters which should be enabled to ensure the integrity of data stored within the cluster.`, Recommendation: `Redshift does not currently allow modifications to encryption after the cluster has been launched, so a new cluster will need to be created with encryption enabled. - http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-db-encryption.html`, }, categoryRedshift + "/redshiftPubliclyAccessible": { Risk: `Redshift Publicly Accessible - Ensures Redshift clusters are not launched into the public cloud - Unless there is a specific business requirement, Redshift clusters should not have a public endpoint and should be accessed from within a VPC only.`, Recommendation: `Remove the public endpoint from the Redshift cluster - http://docs.aws.amazon.com/redshift/latest/mgmt/getting-started-cluster-in-vpc.html`, }, categoryRedshift + "/redshiftSSLEnabled": { Risk: `Redshift Parameter Group SSL Required - Ensures AWS Redshift non-default parameter group associated with Redshift cluster require SSL connection. - Redshift parameter group associated with Redshift cluster should be configured to require SSL to secure data in transit.`, Recommendation: `Update Redshift parameter groups to have require-ssl parameter set to true. - https://docs.aws.amazon.com/redshift/latest/mgmt/connecting-ssl-support.html`, }, categoryRedshift + "/userActivityLoggingEnabled": { Risk: `Redshift User Activity Logging Enabled - Ensure that user activity logging is enabled for your Amazon Redshift clusters. - Redshift clusters associated parameter groups should have user activity logging enabled in order to log user activities performed.`, Recommendation: `Update Redshift parameter groups to enable user activity logging - https://docs.aws.amazon.com/redshift/latest/mgmt/db-auditing.html#db-auditing-enable-logging`, }, categoryRoute53 + "/danglingDnsRecords": { Risk: `Route53 Dangling DNS Records - Ensures that AWS Route53 DNS records are not pointing to invalid/deleted EIPs. - AWS Route53 DNS records should not point to invalid/deleted EIPs to prevent malicious activities.`, Recommendation: `Delete invalid/dangling AWS Route53 DNS records - https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-to-aws-resources.html`, }, categoryRoute53 + "/domainAutoRenew": { Risk: `Domain Auto Renew - Ensures domains are set to auto renew through Route53 - Domains purchased through Route53 should be set to auto renew. - Domains that are not renewed can quickly be acquired by a third-party and cause loss of access for customers.`, Recommendation: `Enable auto renew for the domain - http://docs.aws.amazon.com/Route53/latest/APIReference/api-enable-domain-auto-renew.html`, }, categoryRoute53 + "/domainExpiry": { Risk: `Domain Expiry - Ensures domains are not expiring too soon - Expired domains can be lost and reregistered by a third-party.`, Recommendation: `Reregister the expiring domain - http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/registrar.html`, }, categoryRoute53 + "/domainTransferLock": { Risk: `Domain Transfer Lock - Ensures domains have the transfer lock set - To avoid having a domain maliciously transferred to a third-party, all domains should enable the transfer lock unless actively being transferred.`, Recommendation: `Enable the transfer lock for the domain - http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/domain-transfer-from-route-53.html`, }, categoryS3 + "/bucketAllUsersAcl": { Risk: `S3 Bucket All Users Policy - Ensures S3 bucket policies do not allow global write, delete, or read permissions - S3 buckets can be configured to allow the global principal to access the bucket via the bucket policy. - This policy should be restricted only to known users or accounts.`, Recommendation: `Remove wildcard principals from the bucket policy statements. - https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html`, }, categoryS3 + "/bucketAllUsersPolicy": { Risk: `S3 Bucket Encryption - Ensures object encryption is enabled on S3 buckets - S3 object encryption provides fully-managed encryption of all objects uploaded to an S3 bucket.`, Recommendation: `Enable CMK KMS-based encryption for all S3 buckets. - https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html`, }, categoryS3 + "/bucketEncryption": { Risk: `S3 Bucket Encryption In Transit - Ensures S3 buckets have bucket policy statements that deny insecure transport - S3 bucket policies can be configured to deny access to the bucket over HTTP.`, Recommendation: `Add statements to the bucket policy that deny all S3 actions when SecureTransport is false. - Resources must be list of bucket ARN and bucket ARN with wildcard. - https://aws.amazon.com/premiumsupport/knowledge-center/s3-bucket-policy-for-config-rule/`, }, categoryS3 + "/bucketEncryptionInTransit": { Risk: `S3 Bucket Encryption In Transit - Ensures S3 buckets have bucket policy statements that deny insecure transport - S3 bucket policies can be configured to deny access to the bucket over HTTP.`, Recommendation: `Add statements to the bucket policy that deny all S3 actions when SecureTransport is false. - Resources must be list of bucket ARN and bucket ARN with wildcard. - https://aws.amazon.com/premiumsupport/knowledge-center/s3-bucket-policy-for-config-rule/`, }, categoryS3 + "/bucketEnforceEncryption": { Risk: `S3 Bucket Enforce Object Encryption - Ensures S3 bucket policies do not allow uploads of unencrypted objects - S3 bucket policies can be configured to block uploads of objects that are not encrypted.`, Recommendation: `Set the S3 bucket policy to deny uploads of unencrypted objects. - https://aws.amazon.com/blogs/security/how-to-prevent-uploads-of-unencrypted-objects-to-amazon-s3/`, }, categoryS3 + "/bucketLogging": { Risk: `S3 Bucket Logging - Ensures S3 bucket logging is enabled for S3 buckets - S3 bucket logging helps maintain an audit trail of access that can be used in the event of a security incident.`, Recommendation: `Enable bucket logging for each S3 bucket. - http://docs.aws.amazon.com/AmazonS3/latest/dev/Logging.html`, }, categoryS3 + "/bucketPublicAccessBlock": { Risk: `S3 Bucket Public Access Block - Ensures S3 public access block is enabled on all buckets or for AWS account - Blocking S3 public access at the account level or bucket-level ensures objects are not accidentally exposed.`, Recommendation: `Enable the S3 public access block on all S3 buckets or for AWS account. - https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html`, }, categoryS3 + "/bucketSecureTransportEnabled": { Risk: `S3 Secure Transport Enabled - Ensure AWS S3 buckets enforce SSL to secure data in transit - S3 buckets should be configured to strictly require SSL connections to deny unencrypted HTTP requests when dealing with sensitive data.`, Recommendation: `Update S3 bucket policy to enforse SSL to secure data in transit. - https://aws.amazon.com/premiumsupport/knowledge-center/s3-bucket-policy-for-config-rule/`, }, categoryS3 + "/bucketVersioning": { Risk: `S3 Bucket Versioning - Ensures object versioning is enabled on S3 buckets - Object versioning can help protect against the overwriting of objects or data loss in the event of a compromise.`, Recommendation: `Enable object versioning for buckets with sensitive contents at a minimum and for all buckets ideally. - http://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html`, }, categoryS3 + "/bucketWebsiteEnabled": { Risk: `S3 Bucket Website Enabled - Ensures S3 buckets are not configured with static website hosting - S3 buckets should not be configured with static website hosting with public objects. - Instead, a CloudFront distribution should be configured with an origin access identity.`, Recommendation: `Disable S3 bucket static website hosting in favor or CloudFront distributions. - https://aws.amazon.com/premiumsupport/knowledge-center/cloudfront-https-requests-s3/`, }, categoryS3 + "/s3Encryption": { Risk: `S3 Bucket Encryption Enforcement - All statements in all S3 bucket policies must have a condition that requires encryption at a certain level - S3 buckets support numerous types of encryption, including AES-256, KMS using a default key, KMS with a CMK, or via HSM-based key.`, Recommendation: `Configure a bucket policy to enforce encryption. - https://aws.amazon.com/blogs/security/how-to-prevent-uploads-of-unencrypted-objects-to-amazon-s3/`, }, categorySageMaker + "/notebookDataEncrypted": { Risk: `Notebook Data Encrypted - Ensure Notebook data is encrypted - An optional encryption key can be supplied during Notebook Instance creation.`, Recommendation: `An existing KMS key should be supplied during Notebook Instance creation. - https://docs.aws.amazon.com/sagemaker/latest/dg/API_CreateNotebookInstance.html#API_CreateNotebookInstance_RequestSyntax`, }, categorySageMaker + "/notebookDirectInternetAccess": { Risk: `Notebook Direct Internet Access - Ensure Notebook Instance is not publicly available. - SageMaker notebooks should not be exposed to the Internet. Public availability can be configured via the DirectInternetAccess attribute.`, Recommendation: `Disable DirectInternetAccess for each SageMaker notebook. - https://docs.aws.amazon.com/sagemaker/latest/dg/appendix-additional-considerations.html#appendix-notebook-and-internet-access`, }, categorySES + "/dkimEnabled": { Risk: `Email DKIM Enabled - Ensures DomainKeys Identified Mail (DKIM) is enabled for domains and addresses in SES. - DKIM is a security feature that allows recipients of an email to veriy that the sender domain has authorized the message and that it has not been spoofed.`, Recommendation: `Enable DKIM for all domains and addresses in all regions used to send email through SES. - http://docs.aws.amazon.com/ses/latest/DeveloperGuide/easy-dkim.html`, }, categoryShield + "/shieldAdvancedEnabled": { Risk: `Shield Advanced Enabled - Ensures AWS Shield Advanced is setup and properly configured - AWS Shield Advanced provides enhanced DDOS protection for all enrolled services within a subscribed account. Subscriptions should be active.`, Recommendation: `Enable AWS Shield Advanced for the account. - https://docs.aws.amazon.com/waf/latest/developerguide/ddos-overview.html#ddos-advanced`, }, categoryShield + "/shieldEmergencyContacts": { Risk: `Shield Emergency Contacts - Ensures AWS Shield emergency contacts are configured - AWS Shield Emergency contacts should be configured so that AWS can contact an account representative in the event of a DDOS event.`, Recommendation: `Configure emergency contacts within AWS Shield for the account. - https://docs.aws.amazon.com/waf/latest/developerguide/ddos-edit-drt.html`, }, categoryShield + "/shieldProtections": { Risk: `Shield Protections - Ensures AWS Shield Advanced is configured to protect account resources - Once AWS Shield Advanced is enabled, it can be applied to resources within the account including ELBs, CloudFront.`, Recommendation: `Enable AWS Shield Advanced on resources within the account. - https://docs.aws.amazon.com/waf/latest/developerguide/configure-new-protection.html`, }, categorySNS + "/topicCmkEncrypted": { Risk: `SNS Topic CMK Encryption - Ensures Amazon SNS topics are encrypted with KMS Customer Master Keys (CMKs). - AWS SNS topics should be encrypted with KMS Customer Master Keys (CMKs) instead of AWS managed-keys in order to have a more granular control over the SNS data-at-rest encryption and decryption process.`, Recommendation: `Update SNS topics to use Customer Master Keys (CMKs) for Server-Side Encryption. - https://docs.aws.amazon.com/sns/latest/dg/sns-server-side-encryption.html`, }, categorySNS + "/topicEncrypted": { Risk: `SNS Topic Encrypted - Ensures that Amazon SNS topics enforce Server-Side Encryption (SSE) - SNS topics should enforce Server-Side Encryption (SSE) to secure data at rest. - SSE protects the contents of messages in Amazon SNS topics using keys managed in AWS Key Management Service (AWS KMS).`, Recommendation: `Enable Server-Side Encryption to protect the content of SNS topic messages. - https://docs.aws.amazon.com/sns/latest/dg/sns-server-side-encryption.html`, }, categorySNS + "/topicPolicies": { Risk: `SNS Topic Policies - Ensures SNS topics do not allow global send or subscribe. - SNS policies should not be configured to allow any AWS user to subscribe or send messages. - This could result in data leakage or financial DDoS.`, Recommendation: `Adjust the topic policy to only allow authorized AWS users in known accounts to subscribe. - http://docs.aws.amazon.com/sns/latest/dg/AccessPolicyLanguage.html`, }, categorySQS + "/sqsCrossAccount": { Risk: `SQS Cross Account Access - Ensures SQS policies disallow cross-account access - SQS policies should be carefully restricted to prevent publishing or reading from the queue from unexpected sources. - Queue policies can be used to limit these privileges.`, Recommendation: `Update the SQS policy to prevent access from external accounts. - http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-creating-custom-policies.html`, }, categorySQS + "/sqsEncrypted": { Risk: `SQS Encrypted - Ensures SQS encryption is enabled - Messages sent to SQS queues can be encrypted using KMS server-side encryption. - Existing queues can be modified to add encryption with minimal overhead.`, Recommendation: `Enable encryption using KMS for all SQS queues. - http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html`, }, categorySQS + "/sqsPublicAccess": { Risk: `SQS Public Access - Ensures that SQS queues are not publicly accessible - SQS queues should be not be publicly accessible to prevent unauthorized actions.`, Recommendation: `Update the SQS queue policy to prevent public access. - http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-creating-custom-policies.html`, }, categorySSM + "/ssmActiveOnAllInstances": { Risk: `SSM Agent Active All Instances - Ensures SSM agents are installed and active on all servers - SSM allows for centralized monitoring of all servers and should be activated on all EC2 instances.`, Recommendation: `Install SSM on all servers and ensure it is active. - https://docs.aws.amazon.com/systems-manager/latest/userguide/systems-manager-setting-up.html`, }, categorySSM + "/ssmAgentAutoUpdateEnabled": { Risk: `SSM Agent Auto Update Enabled - Ensures the SSM agent is configured to automatically update to new versions - To ensure the latest version of the SSM agent is installed, it should be configured to consume automatic updates.`, Recommendation: `Update the SSM agent configuration for all managed instances to use automatic updates. - https://docs.aws.amazon.com/systems-manager/latest/userguide/ssm-agent-automatic-updates.html`, }, categorySSM + "/ssmAgentLatestVersion": { Risk: `SSM Agent Latest Version - Ensures SSM agents installed on Linux hosts are running the latest version - SSM agent software provides sensitive access to servers and should be kept up-to-date.`, Recommendation: `Update the SSM agent on all Linux hosts to the latest version. - https://docs.aws.amazon.com/systems-manager/latest/userguide/ssm-agent-automatic-updates.html`, }, categorySSM + "/ssmEncryptedParameters": { Risk: `SSM Encrypted Parameters - Ensures SSM Parameters are encrypted - SSM Parameters should be encrypted. - This allows their values to be used by approved systems, while restricting access to other users of the account.`, Recommendation: `Recreate unencrypted SSM Parameters with Type set to SecureString. - https://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-paramstore-about.html#sysman-paramstore-securestring`, }, categoryTransfer + "/transferLoggingEnabled": { Risk: `Transfer Logging Enabled - Ensures AWS Transfer servers have CloudWatch logging enabled. - AWS Transfer servers can log activity to CloudWatch if a proper IAM service role is provided. - This role should be configured for all servers to ensure proper access logging.`, Recommendation: `Provide a valid IAM service role for AWS Transfer servers. - https://docs.aws.amazon.com/transfer/latest/userguide/monitoring.html`, }, categoryWorkspaces + "/workspacesIpAccessControl": { Risk: `Workspaces IP Access Control - Ensures enforced IP Access Control on Workspaces - Checking the existence of IP Access control on Workspaces and ensuring that no Workspaces are open`, Recommendation: `Enable proper IP Access Controls for all workspaces - https://docs.aws.amazon.com/workspaces/latest/adminguide/amazon-workspaces-ip-access-control-groups.html`, }, categoryXRay + "/xrayEncryptionEnabled": { Risk: `XRay Encryption Enabled - Ensures CMK-based encryption is enabled for XRay traces. - AWS XRay supports default encryption based on an AWS-managed KMS key as well as encryption using a customer managed key (CMK). - For maximum security, the CMK-based encryption should be used.`, Recommendation: `Update XRay encryption configuration to use a CMK. - https://docs.aws.amazon.com/xray/latest/devguide/xray-console-encryption.html`, }, }
src/cloudsploit/recommend.go
0.787196
0.446374
recommend.go
starcoder
MAix Go Bezel https://www.sipeed.com https://wiki.sipeed.com/en/maix/board/go.html https://www.seeedstudio.com/Sipeed-MAix-GO-Suit-for-RISC-V-AI-IoT-p-2874.html */ //----------------------------------------------------------------------------- package main import . "github.com/deadsy/sdfx/sdf" //----------------------------------------------------------------------------- // material shrinkage var shrink = 1.0 / 0.999 // PLA ~0.1% //var shrink = 1.0/0.995; // ABS ~0.5% //----------------------------------------------------------------------------- var baseThickness = 3.0 //----------------------------------------------------------------------------- func boardStandoffs() SDF3 { pillarHeight := 14.0 zOfs := 0.5 * (pillarHeight + baseThickness) // standoffs with screw holes k := &StandoffParms{ PillarHeight: pillarHeight, PillarDiameter: 4.5, HoleDepth: 11.0, HoleDiameter: 2.6, // #4 screw NumberWebs: 2, WebHeight: 10, WebDiameter: 12, WebWidth: 3.5, } x := 82.0 y := 54.0 x0 := -34.0 y0 := -0.5 * y positions := V3Set{ {x0, y0, zOfs}, {x0 + x, y0, zOfs}, {x0, y0 + y, zOfs}, {x0 + x, y0 + y, zOfs}, } return Multi3D(Standoff3D(k), positions) } //----------------------------------------------------------------------------- func bezelStandoffs() SDF3 { pillarHeight := 22.0 zOfs := 0.5 * (pillarHeight + baseThickness) // standoffs with screw holes k := &StandoffParms{ PillarHeight: pillarHeight, PillarDiameter: 6.0, HoleDepth: 11.0, HoleDiameter: 2.4, // #4 screw } x := 140.0 y := 55.0 x0 := -0.5 * x y0 := -0.5 * y positions := V3Set{ {x0, y0, zOfs}, {x0 + x, y0, zOfs}, {x0, y0 + y, zOfs}, {x0 + x, y0 + y, zOfs}, } return Multi3D(Standoff3D(k), positions) } //----------------------------------------------------------------------------- func speakerHoles(d float64, ofs V2) SDF2 { holeRadius := 1.7 s0 := Circle2D(holeRadius) s1 := MakeBoltCircle2D(holeRadius, d*0.3, 6) return Transform2D(Union2D(s0, s1), Translate2d(ofs)) } func speakerHolder(d float64, ofs V2) SDF3 { thickness := 3.0 zOfs := 0.5 * (thickness + baseThickness) k := WasherParms{ Thickness: thickness, InnerRadius: 0.5 * d, OuterRadius: 0.5 * (d + 4.0), Remove: 0.3, } s := Washer3D(&k) s = Transform3D(s, RotateZ(Pi)) return Transform3D(s, Translate3d(V3{ofs.X, ofs.Y, zOfs})) } //----------------------------------------------------------------------------- func bezel() SDF3 { speakerOfs := V2{60, 14} speakerDiameter := 20.3 // bezel bezel := V2{150, 65} b0 := Box2D(bezel, 2) // lcd cutout lcd := V2{60, 46} l0 := Box2D(lcd, 2) // camera cutout c0 := Circle2D(7.25) c0 = Transform2D(c0, Translate2d(V2{42, 0})) // led hole cutout c1 := Circle2D(2) c1 = Transform2D(c1, Translate2d(V2{44, -20})) // speaker holes cutout c2 := speakerHoles(speakerDiameter, speakerOfs) // extrude the bezel s0 := Extrude3D(Difference2D(b0, Union2D(l0, c0, c1, c2)), baseThickness) // add the board standoffs s0 = Union3D(s0, boardStandoffs()) // add the bezel standoffs (with foot rounding) s1 := Union3D(s0, bezelStandoffs()) s1.(*UnionSDF3).SetMin(PolyMin(3.0)) // speaker holder s3 := speakerHolder(speakerDiameter, speakerOfs) return Union3D(s1, s3) } //----------------------------------------------------------------------------- func main() { RenderSTL(ScaleUniform3D(bezel(), shrink), 330, "bezel.stl") } //-----------------------------------------------------------------------------
examples/maixgo/main.go
0.597843
0.492737
main.go
starcoder
package unittest import ( "reflect" "strconv" "strings" "code.gitea.io/gitea/models/db" "github.com/stretchr/testify/assert" "xorm.io/builder" ) const ( // these const values are copied from `models` package to prevent from cycle-import modelsUserTypeOrganization = 1 modelsRepoWatchModeDont = 2 modelsCommentTypeComment = 0 ) var consistencyCheckMap = make(map[string]func(t assert.TestingT, bean interface{})) // CheckConsistencyFor test that all matching database entries are consistent func CheckConsistencyFor(t assert.TestingT, beansToCheck ...interface{}) { for _, bean := range beansToCheck { sliceType := reflect.SliceOf(reflect.TypeOf(bean)) sliceValue := reflect.MakeSlice(sliceType, 0, 10) ptrToSliceValue := reflect.New(sliceType) ptrToSliceValue.Elem().Set(sliceValue) assert.NoError(t, db.GetEngine(db.DefaultContext).Table(bean).Find(ptrToSliceValue.Interface())) sliceValue = ptrToSliceValue.Elem() for i := 0; i < sliceValue.Len(); i++ { entity := sliceValue.Index(i).Interface() checkForConsistency(t, entity) } } } func checkForConsistency(t assert.TestingT, bean interface{}) { tb, err := db.TableInfo(bean) assert.NoError(t, err) f := consistencyCheckMap[tb.Name] if f == nil { assert.Fail(t, "unknown bean type: %#v", bean) return } f(t, bean) } func init() { parseBool := func(v string) bool { b, _ := strconv.ParseBool(v) return b } parseInt := func(v string) int { i, _ := strconv.Atoi(v) return i } checkForUserConsistency := func(t assert.TestingT, bean interface{}) { user := reflectionWrap(bean) AssertCountByCond(t, "repository", builder.Eq{"owner_id": user.int("ID")}, user.int("NumRepos")) AssertCountByCond(t, "star", builder.Eq{"uid": user.int("ID")}, user.int("NumStars")) AssertCountByCond(t, "org_user", builder.Eq{"org_id": user.int("ID")}, user.int("NumMembers")) AssertCountByCond(t, "team", builder.Eq{"org_id": user.int("ID")}, user.int("NumTeams")) AssertCountByCond(t, "follow", builder.Eq{"user_id": user.int("ID")}, user.int("NumFollowing")) AssertCountByCond(t, "follow", builder.Eq{"follow_id": user.int("ID")}, user.int("NumFollowers")) if user.int("Type") != modelsUserTypeOrganization { assert.EqualValues(t, 0, user.int("NumMembers")) assert.EqualValues(t, 0, user.int("NumTeams")) } } checkForRepoConsistency := func(t assert.TestingT, bean interface{}) { repo := reflectionWrap(bean) assert.Equal(t, repo.str("LowerName"), strings.ToLower(repo.str("Name")), "repo: %+v", repo) AssertCountByCond(t, "star", builder.Eq{"repo_id": repo.int("ID")}, repo.int("NumStars")) AssertCountByCond(t, "milestone", builder.Eq{"repo_id": repo.int("ID")}, repo.int("NumMilestones")) AssertCountByCond(t, "repository", builder.Eq{"fork_id": repo.int("ID")}, repo.int("NumForks")) if repo.bool("IsFork") { AssertExistsAndLoadMap(t, "repository", builder.Eq{"id": repo.int("ForkID")}) } actual := GetCountByCond(t, "watch", builder.Eq{"repo_id": repo.int("ID")}. And(builder.Neq{"mode": modelsRepoWatchModeDont})) assert.EqualValues(t, repo.int("NumWatches"), actual, "Unexpected number of watches for repo %+v", repo) actual = GetCountByCond(t, "issue", builder.Eq{"is_pull": false, "repo_id": repo.int("ID")}) assert.EqualValues(t, repo.int("NumIssues"), actual, "Unexpected number of issues for repo %+v", repo) actual = GetCountByCond(t, "issue", builder.Eq{"is_pull": false, "is_closed": true, "repo_id": repo.int("ID")}) assert.EqualValues(t, repo.int("NumClosedIssues"), actual, "Unexpected number of closed issues for repo %+v", repo) actual = GetCountByCond(t, "issue", builder.Eq{"is_pull": true, "repo_id": repo.int("ID")}) assert.EqualValues(t, repo.int("NumPulls"), actual, "Unexpected number of pulls for repo %+v", repo) actual = GetCountByCond(t, "issue", builder.Eq{"is_pull": true, "is_closed": true, "repo_id": repo.int("ID")}) assert.EqualValues(t, repo.int("NumClosedPulls"), actual, "Unexpected number of closed pulls for repo %+v", repo) actual = GetCountByCond(t, "milestone", builder.Eq{"is_closed": true, "repo_id": repo.int("ID")}) assert.EqualValues(t, repo.int("NumClosedMilestones"), actual, "Unexpected number of closed milestones for repo %+v", repo) } checkForIssueConsistency := func(t assert.TestingT, bean interface{}) { issue := reflectionWrap(bean) typeComment := modelsCommentTypeComment actual := GetCountByCond(t, "comment", builder.Eq{"`type`": typeComment, "issue_id": issue.int("ID")}) assert.EqualValues(t, issue.int("NumComments"), actual, "Unexpected number of comments for issue %+v", issue) if issue.bool("IsPull") { prRow := AssertExistsAndLoadMap(t, "pull_request", builder.Eq{"issue_id": issue.int("ID")}) assert.EqualValues(t, parseInt(prRow["index"]), issue.int("Index")) } } checkForPullRequestConsistency := func(t assert.TestingT, bean interface{}) { pr := reflectionWrap(bean) issueRow := AssertExistsAndLoadMap(t, "issue", builder.Eq{"id": pr.int("IssueID")}) assert.True(t, parseBool(issueRow["is_pull"])) assert.EqualValues(t, parseInt(issueRow["index"]), pr.int("Index")) } checkForMilestoneConsistency := func(t assert.TestingT, bean interface{}) { milestone := reflectionWrap(bean) AssertCountByCond(t, "issue", builder.Eq{"milestone_id": milestone.int("ID")}, milestone.int("NumIssues")) actual := GetCountByCond(t, "issue", builder.Eq{"is_closed": true, "milestone_id": milestone.int("ID")}) assert.EqualValues(t, milestone.int("NumClosedIssues"), actual, "Unexpected number of closed issues for milestone %+v", milestone) completeness := 0 if milestone.int("NumIssues") > 0 { completeness = milestone.int("NumClosedIssues") * 100 / milestone.int("NumIssues") } assert.Equal(t, completeness, milestone.int("Completeness")) } checkForLabelConsistency := func(t assert.TestingT, bean interface{}) { label := reflectionWrap(bean) issueLabels, err := db.GetEngine(db.DefaultContext).Table("issue_label"). Where(builder.Eq{"label_id": label.int("ID")}). Query() assert.NoError(t, err) assert.EqualValues(t, label.int("NumIssues"), len(issueLabels), "Unexpected number of issue for label %+v", label) issueIDs := make([]int, len(issueLabels)) for i, issueLabel := range issueLabels { issueIDs[i], _ = strconv.Atoi(string(issueLabel["issue_id"])) } expected := int64(0) if len(issueIDs) > 0 { expected = GetCountByCond(t, "issue", builder.In("id", issueIDs).And(builder.Eq{"is_closed": true})) } assert.EqualValues(t, expected, label.int("NumClosedIssues"), "Unexpected number of closed issues for label %+v", label) } checkForTeamConsistency := func(t assert.TestingT, bean interface{}) { team := reflectionWrap(bean) AssertCountByCond(t, "team_user", builder.Eq{"team_id": team.int("ID")}, team.int("NumMembers")) AssertCountByCond(t, "team_repo", builder.Eq{"team_id": team.int("ID")}, team.int("NumRepos")) } checkForActionConsistency := func(t assert.TestingT, bean interface{}) { action := reflectionWrap(bean) repoRow := AssertExistsAndLoadMap(t, "repository", builder.Eq{"id": action.int("RepoID")}) assert.Equal(t, parseBool(repoRow["is_private"]), action.bool("IsPrivate"), "action: %+v", action) } consistencyCheckMap["user"] = checkForUserConsistency consistencyCheckMap["repository"] = checkForRepoConsistency consistencyCheckMap["issue"] = checkForIssueConsistency consistencyCheckMap["pull_request"] = checkForPullRequestConsistency consistencyCheckMap["milestone"] = checkForMilestoneConsistency consistencyCheckMap["label"] = checkForLabelConsistency consistencyCheckMap["team"] = checkForTeamConsistency consistencyCheckMap["action"] = checkForActionConsistency }
models/unittest/consistency.go
0.639624
0.578329
consistency.go
starcoder