code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1 value |
|---|---|---|---|---|---|
package commands
import (
"github.com/c-bata/go-prompt"
)
type Commands struct {
DockerSuggestions []prompt.Suggest
DockerSubSuggestions map[string][]prompt.Suggest
}
func New() Commands {
return Commands{
DockerSuggestions: []prompt.Suggest{
{Text: "attach", Description: "Attach local standard input, output, and error streams to a running container"},
{Text: "build", Description: "Build an image from a Dockerfile"},
{Text: "builder", Description: "Manage builds"},
{Text: "checkpoint", Description: "Manage checkpoints"},
{Text: "commit", Description: "Create a new image from a container’s changes"},
{Text: "config", Description: "Manage Docker configs"},
{Text: "container", Description: "Manage containers"},
{Text: "context", Description: "Manage contexts"},
{Text: "cp", Description: "Copy files/folders between a container and the local filesystem"},
{Text: "create", Description: "Create a new container"},
{Text: "diff", Description: "Inspect changes to files or directories on a container’s filesystem"},
{Text: "events", Description: "Get real time events from the server"},
{Text: "exec", Description: "Run a command in a running container"},
{Text: "export", Description: "Export a container’s filesystem as a tar archive"},
{Text: "history", Description: "Show the history of an image"},
{Text: "image", Description: "Manage images"},
{Text: "images", Description: "List images"},
{Text: "import", Description: "Import the contents from a tarball to create a filesystem image"},
{Text: "info", Description: "Display system-wide information"},
{Text: "inspect", Description: "Return low-level information on Docker objects"},
{Text: "kill", Description: "Kill one or more running containers"},
{Text: "load", Description: "Load an image from a tar archive or STDIN"},
{Text: "login", Description: "Log in to a Docker registry"},
{Text: "logout", Description: "Log out from a Docker registry"},
{Text: "logs", Description: "Fetch the logs of a container"},
{Text: "manifest", Description: "Manage Docker image manifests and manifest lists"},
{Text: "network", Description: "Manage networks"},
{Text: "node", Description: "Manage Swarm nodes"},
{Text: "pause", Description: "Pause all processes within one or more containers"},
{Text: "plugin", Description: "Manage plugins"},
{Text: "port", Description: "List port mappings or a specific mapping for the container"},
{Text: "ps", Description: "List containers"},
{Text: "pull", Description: "Pull an image or a repository from a registry"},
{Text: "push", Description: "Push an image or a repository to a registry"},
{Text: "rename", Description: "Rename a container"},
{Text: "restart", Description: "Restart one or more containers"},
{Text: "rm", Description: "Remove one or more containers"},
{Text: "rmi", Description: "Remove one or more images"},
{Text: "run", Description: "Run a command in a new container"},
{Text: "save", Description: "Save one or more images to a tar archive (streamed to STDOUT by default)"},
{Text: "search", Description: "Search the Docker Hub for images"},
{Text: "secret", Description: "Manage Docker secrets"},
{Text: "service", Description: "Manage services"},
{Text: "stack", Description: "Manage Docker stacks"},
{Text: "start", Description: "Start one or more stopped containers"},
{Text: "stats", Description: "Display a live stream of container(s) resource usage statistics"},
{Text: "stop", Description: "Stop one or more running containers"},
{Text: "swarm", Description: "Manage Swarm"},
{Text: "system", Description: "Manage Docker"},
{Text: "tag", Description: "Create a tag TARGET_IMAGE that refers to SOURCE_IMAGE"},
{Text: "top", Description: "Display the running processes of a container"},
{Text: "trust", Description: "Manage trust on Docker images"},
{Text: "unpause", Description: "Unpause all processes within one or more containers"},
{Text: "update", Description: "Update configuration of one or more containers"},
{Text: "version", Description: "Show the Docker version information"},
{Text: "volume", Description: "Manage volumes"},
{Text: "wait", Description: "Block until one or more containers stop, then print their exit codes"},
{Text: "exit", Description: "Exit command prompt"},
},
DockerSubSuggestions: map[string][]prompt.Suggest{
"attach": {
prompt.Suggest{Text: "--detach-keys", Description: "Override the key sequence for detaching a container"},
prompt.Suggest{Text: "--no-stdin", Description: "Do not attach STDIN"},
prompt.Suggest{Text: "--sig-proxy", Description: "Proxy all received signals to the process"},
},
"build": {
prompt.Suggest{Text: "--add-host", Description: "Add a custom host-to-IP mapping (host:ip)"},
prompt.Suggest{Text: "--build-arg", Description: "Set build-time variables"},
prompt.Suggest{Text: "--cache-from", Description: "Images to consider as cache sources"},
prompt.Suggest{Text: "--cgroup-parent", Description: "Optional parent cgroup for the container"},
prompt.Suggest{Text: "--compress", Description: "Compress the build context using gzip"},
prompt.Suggest{Text: "--cpu-period", Description: "Limit the CPU CFS (Completely Fair Scheduler) period"},
prompt.Suggest{Text: "--cpu-quota", Description: "Limit the CPU CFS (Completely Fair Scheduler) quota"},
prompt.Suggest{Text: "--cpu-shares", Description: "CPU shares (relative weight)"},
prompt.Suggest{Text: "--cpuset-cpus", Description: "CPUs in which to allow execution (0-3, 0,1)"},
prompt.Suggest{Text: "--cpuset-mems", Description: "MEMs in which to allow execution (0-3, 0,1)"},
prompt.Suggest{Text: "--disable-content-trust", Description: "Skip image verification"},
prompt.Suggest{Text: "--file", Description: "Name of the Dockerfile (Default is ‘PATH/Dockerfile’)"},
prompt.Suggest{Text: "--force-rm", Description: "Always remove intermediate containers"},
prompt.Suggest{Text: "--iidfile", Description: "Write the image ID to the file"},
prompt.Suggest{Text: "--isolation", Description: "Container isolation technology"},
prompt.Suggest{Text: "--label", Description: "Set metadata for an image"},
prompt.Suggest{Text: "--memory", Description: "Memory limit"},
prompt.Suggest{Text: "--memory-swap", Description: "Swap limit equal to memory plus swap: ‘-1’ to enable unlimited swap"},
prompt.Suggest{Text: "--network", Description: ""},
prompt.Suggest{Text: "--no-cache", Description: "Do not use cache when building the image"},
prompt.Suggest{Text: "--output", Description: ""},
prompt.Suggest{Text: "--platform", Description: ""},
prompt.Suggest{Text: "--progress", Description: "Set type of progress output (auto, plain, tty). Use plain to show container output"},
prompt.Suggest{Text: "--pull", Description: "Always attempt to pull a newer version of the image"},
prompt.Suggest{Text: "--quiet", Description: "Suppress the build output and print image ID on success"},
prompt.Suggest{Text: "--rm", Description: "Remove intermediate containers after a successful build"},
prompt.Suggest{Text: "--secret", Description: ""},
prompt.Suggest{Text: "--security-opt", Description: "Security options"},
prompt.Suggest{Text: "--shm-size", Description: "Size of /dev/shm"},
prompt.Suggest{Text: "--squash", Description: ""},
prompt.Suggest{Text: "--ssh", Description: ""},
prompt.Suggest{Text: "--stream", Description: ""},
prompt.Suggest{Text: "--tag", Description: "Name and optionally a tag in the ‘name:tag’ format"},
prompt.Suggest{Text: "--target", Description: "Set the target build stage to build."},
prompt.Suggest{Text: "--ulimit", Description: "Ulimit options"},
},
"commit": {
prompt.Suggest{Text: "--author", Description: "Author (e.g., “<NAME> Smith "},
prompt.Suggest{Text: "--change", Description: "Apply Dockerfile instruction to the created image"},
prompt.Suggest{Text: "--message", Description: "Commit message"},
prompt.Suggest{Text: "--pause", Description: "Pause container during commit"},
},
"cp": {
prompt.Suggest{Text: "--archive", Description: "Archive mode (copy all uid/gid information)"},
prompt.Suggest{Text: "--follow-link", Description: "Always follow symbol link in SRC_PATH"},
},
"create": {
prompt.Suggest{Text: "--add-host", Description: "Add a custom host-to-IP mapping (host:ip)"},
prompt.Suggest{Text: "--attach", Description: "Attach to STDIN, STDOUT or STDERR"},
prompt.Suggest{Text: "--blkio-weight", Description: "Block IO (relative weight), between 10 and 1000, or 0 to disable (default 0)"},
prompt.Suggest{Text: "--blkio-weight-device", Description: "Block IO weight (relative device weight)"},
prompt.Suggest{Text: "--cap-add", Description: "Add Linux capabilities"},
prompt.Suggest{Text: "--cap-drop", Description: "Drop Linux capabilities"},
prompt.Suggest{Text: "--cgroup-parent", Description: "Optional parent cgroup for the container"},
prompt.Suggest{Text: "--cidfile", Description: "Write the container ID to the file"},
prompt.Suggest{Text: "--cpu-count", Description: "CPU count (Windows only)"},
prompt.Suggest{Text: "--cpu-percent", Description: "CPU percent (Windows only)"},
prompt.Suggest{Text: "--cpu-period", Description: "Limit CPU CFS (Completely Fair Scheduler) period"},
prompt.Suggest{Text: "--cpu-quota", Description: "Limit CPU CFS (Completely Fair Scheduler) quota"},
prompt.Suggest{Text: "--cpu-rt-period", Description: ""},
prompt.Suggest{Text: "--cpu-rt-runtime", Description: ""},
prompt.Suggest{Text: "--cpu-shares", Description: "CPU shares (relative weight)"},
prompt.Suggest{Text: "--cpus", Description: ""},
prompt.Suggest{Text: "--cpuset-cpus", Description: "CPUs in which to allow execution (0-3, 0,1)"},
prompt.Suggest{Text: "--cpuset-mems", Description: "MEMs in which to allow execution (0-3, 0,1)"},
prompt.Suggest{Text: "--device", Description: "Add a host device to the container"},
prompt.Suggest{Text: "--device-cgroup-rule", Description: "Add a rule to the cgroup allowed devices list"},
prompt.Suggest{Text: "--device-read-bps", Description: "Limit read rate (bytes per second) from a device"},
prompt.Suggest{Text: "--device-read-iops", Description: "Limit read rate (IO per second) from a device"},
prompt.Suggest{Text: "--device-write-bps", Description: "Limit write rate (bytes per second) to a device"},
prompt.Suggest{Text: "--device-write-iops", Description: "Limit write rate (IO per second) to a device"},
prompt.Suggest{Text: "--disable-content-trust", Description: "Skip image verification"},
prompt.Suggest{Text: "--dns", Description: "Set custom DNS servers"},
prompt.Suggest{Text: "--dns-opt", Description: "Set DNS options"},
prompt.Suggest{Text: "--dns-option", Description: "Set DNS options"},
prompt.Suggest{Text: "--dns-search", Description: "Set custom DNS search domains"},
prompt.Suggest{Text: "--domainname", Description: "Container NIS domain name"},
prompt.Suggest{Text: "--entrypoint", Description: "Overwrite the default ENTRYPOINT of the image"},
prompt.Suggest{Text: "--env", Description: "Set environment variables"},
prompt.Suggest{Text: "--env-file", Description: "Read in a file of environment variables"},
prompt.Suggest{Text: "--expose", Description: "Expose a port or a range of ports"},
prompt.Suggest{Text: "--gpus", Description: ""},
prompt.Suggest{Text: "--group-add", Description: "Add additional groups to join"},
prompt.Suggest{Text: "--health-cmd", Description: "Command to run to check health"},
prompt.Suggest{Text: "--health-interval", Description: "Time between running the check (ms|s|m|h) (default 0s)"},
prompt.Suggest{Text: "--health-retries", Description: "Consecutive failures needed to report unhealthy"},
prompt.Suggest{Text: "--health-start-period", Description: ""},
prompt.Suggest{Text: "--health-timeout", Description: "Maximum time to allow one check to run (ms|s|m|h) (default 0s)"},
prompt.Suggest{Text: "--help", Description: "Print usage"},
prompt.Suggest{Text: "--hostname", Description: "Container host name"},
prompt.Suggest{Text: "--init", Description: ""},
prompt.Suggest{Text: "--interactive", Description: "Keep STDIN open even if not attached"},
prompt.Suggest{Text: "--io-maxbandwidth", Description: "Maximum IO bandwidth limit for the system drive (Windows only)"},
prompt.Suggest{Text: "--io-maxiops", Description: "Maximum IOps limit for the system drive (Windows only)"},
prompt.Suggest{Text: "--ip", Description: "IPv4 address (e.g., 172.30.100.104)"},
prompt.Suggest{Text: "--ip6", Description: "IPv6 address (e.g., 2001:db8::33)"},
prompt.Suggest{Text: "--ipc", Description: "IPC mode to use"},
prompt.Suggest{Text: "--isolation", Description: "Container isolation technology"},
prompt.Suggest{Text: "--kernel-memory", Description: "Kernel memory limit"},
prompt.Suggest{Text: "--label", Description: "Set meta data on a container"},
prompt.Suggest{Text: "--label-file", Description: "Read in a line delimited file of labels"},
prompt.Suggest{Text: "--link", Description: "Add link to another container"},
prompt.Suggest{Text: "--link-local-ip", Description: "Container IPv4/IPv6 link-local addresses"},
prompt.Suggest{Text: "--log-driver", Description: "Logging driver for the container"},
prompt.Suggest{Text: "--log-opt", Description: "Log driver options"},
prompt.Suggest{Text: "--mac-address", Description: "Container MAC address (e.g., 92:d0:c6:0a:29:33)"},
prompt.Suggest{Text: "--memory", Description: "Memory limit"},
prompt.Suggest{Text: "--memory-reservation", Description: "Memory soft limit"},
prompt.Suggest{Text: "--memory-swap", Description: "Swap limit equal to memory plus swap: ‘-1’ to enable unlimited swap"},
prompt.Suggest{Text: "--memory-swappiness", Description: "Tune container memory swappiness (0 to 100)"},
prompt.Suggest{Text: "--mount", Description: "Attach a filesystem mount to the container"},
prompt.Suggest{Text: "--name", Description: "Assign a name to the container"},
prompt.Suggest{Text: "--net", Description: "Connect a container to a network"},
prompt.Suggest{Text: "--net-alias", Description: "Add network-scoped alias for the container"},
prompt.Suggest{Text: "--network", Description: "Connect a container to a network"},
prompt.Suggest{Text: "--network-alias", Description: "Add network-scoped alias for the container"},
prompt.Suggest{Text: "--no-healthcheck", Description: "Disable any container-specified HEALTHCHECK"},
prompt.Suggest{Text: "--oom-kill-disable", Description: "Disable OOM Killer"},
prompt.Suggest{Text: "--oom-score-adj", Description: "Tune host’s OOM preferences (-1000 to 1000)"},
prompt.Suggest{Text: "--pid", Description: "PID namespace to use"},
prompt.Suggest{Text: "--pids-limit", Description: "Tune container pids limit (set -1 for unlimited)"},
prompt.Suggest{Text: "--platform", Description: ""},
prompt.Suggest{Text: "--privileged", Description: "Give extended privileges to this container"},
prompt.Suggest{Text: "--publish", Description: "Publish a container’s port(s) to the host"},
prompt.Suggest{Text: "--publish-all", Description: "Publish all exposed ports to random ports"},
prompt.Suggest{Text: "--read-only", Description: "Mount the container’s root filesystem as read only"},
prompt.Suggest{Text: "--restart", Description: "Restart policy to apply when a container exits"},
prompt.Suggest{Text: "--rm", Description: "Automatically remove the container when it exits"},
prompt.Suggest{Text: "--runtime", Description: "Runtime to use for this container"},
prompt.Suggest{Text: "--security-opt", Description: "Security Options"},
prompt.Suggest{Text: "--shm-size", Description: "Size of /dev/shm"},
prompt.Suggest{Text: "--stop-signal", Description: "Signal to stop a container"},
prompt.Suggest{Text: "--stop-timeout", Description: ""},
prompt.Suggest{Text: "--storage-opt", Description: "Storage driver options for the container"},
prompt.Suggest{Text: "--sysctl", Description: "Sysctl options"},
prompt.Suggest{Text: "--tmpfs", Description: "Mount a tmpfs directory"},
prompt.Suggest{Text: "--tty", Description: "Allocate a pseudo-TTY"},
prompt.Suggest{Text: "--ulimit", Description: "Ulimit options"},
prompt.Suggest{Text: "--user", Description: "Username or UID (format: <name|uid>[:<group|gid>])"},
prompt.Suggest{Text: "--userns", Description: "User namespace to use"},
prompt.Suggest{Text: "--uts", Description: "UTS namespace to use"},
prompt.Suggest{Text: "--volume", Description: "Bind mount a volume"},
prompt.Suggest{Text: "--volume-driver", Description: "Optional volume driver for the container"},
prompt.Suggest{Text: "--volumes-from", Description: "Mount volumes from the specified container(s)"},
prompt.Suggest{Text: "--workdir", Description: "Working directory inside the container"},
},
"events": {
prompt.Suggest{Text: "--filter", Description: "Filter output based on conditions provided"},
prompt.Suggest{Text: "--format", Description: "Format the output using the given Go template"},
prompt.Suggest{Text: "--since", Description: "Show all events created since timestamp"},
prompt.Suggest{Text: "--until", Description: "Stream events until this timestamp"},
},
"exec": {
prompt.Suggest{Text: "--detach", Description: "Detached mode: run command in the background"},
prompt.Suggest{Text: "--detach-keys", Description: "Override the key sequence for detaching a container"},
prompt.Suggest{Text: "--env", Description: ""},
prompt.Suggest{Text: "--interactive", Description: "Keep STDIN open even if not attached"},
prompt.Suggest{Text: "--privileged", Description: "Give extended privileges to the command"},
prompt.Suggest{Text: "--tty", Description: "Allocate a pseudo-TTY"},
prompt.Suggest{Text: "--user", Description: "Username or UID (format: <name|uid>[:<group|gid>])"},
prompt.Suggest{Text: "--workdir", Description: ""},
},
"export": {
prompt.Suggest{Text: "--output", Description: "Write to a file, instead of STDOUT"},
},
"history": {
prompt.Suggest{Text: "--format", Description: "Pretty-print images using a Go template"},
prompt.Suggest{Text: "--human", Description: "Print sizes and dates in human readable format"},
prompt.Suggest{Text: "--no-trunc", Description: "Don’t truncate output"},
prompt.Suggest{Text: "--quiet", Description: "Only show numeric IDs"},
},
"images": {
prompt.Suggest{Text: "--all", Description: "Show all images (default hides intermediate images)"},
prompt.Suggest{Text: "--digests", Description: "Show digests"},
prompt.Suggest{Text: "--filter", Description: "Filter output based on conditions provided"},
prompt.Suggest{Text: "--format", Description: "Pretty-print images using a Go template"},
prompt.Suggest{Text: "--no-trunc", Description: "Don’t truncate output"},
prompt.Suggest{Text: "--quiet", Description: "Only show numeric IDs"},
},
"import": {
prompt.Suggest{Text: "--change", Description: "Apply Dockerfile instruction to the created image"},
prompt.Suggest{Text: "--message", Description: "Set commit message for imported image"},
prompt.Suggest{Text: "--platform", Description: ""},
},
"info": {
prompt.Suggest{Text: "--format", Description: "Format the output using the given Go template"},
},
"inspect": {
prompt.Suggest{Text: "--format", Description: "Format the output using the given Go template"},
prompt.Suggest{Text: "--size", Description: "Display total file sizes if the type is container"},
prompt.Suggest{Text: "--type", Description: "Return JSON for specified type"},
},
"kill": {
prompt.Suggest{Text: "--signal", Description: "Signal to send to the container"},
},
"load": {
prompt.Suggest{Text: "--input", Description: "Read from tar archive file, instead of STDIN"},
prompt.Suggest{Text: "--quiet", Description: "Suppress the load output"},
},
"login": {
prompt.Suggest{Text: "--password", Description: "Password"},
prompt.Suggest{Text: "--password-stdin", Description: "Take the password from stdin"},
prompt.Suggest{Text: "--username", Description: "Username"},
},
"logs": {
prompt.Suggest{Text: "--details", Description: "Show extra details provided to logs"},
prompt.Suggest{Text: "--follow", Description: "Follow log output"},
prompt.Suggest{Text: "--since", Description: "Show logs since timestamp (e.g. 2013-01-02T13:23:37) or relative (e.g. 42m for 42 minutes)"},
prompt.Suggest{Text: "--tail", Description: "Number of lines to show from the end of the logs"},
prompt.Suggest{Text: "--timestamps", Description: "Show timestamps"},
prompt.Suggest{Text: "--until", Description: ""},
},
"ps": {
prompt.Suggest{Text: "--all", Description: "Show all containers (default shows just running)"},
prompt.Suggest{Text: "--filter", Description: "Filter output based on conditions provided"},
prompt.Suggest{Text: "--format", Description: "Pretty-print containers using a Go template"},
prompt.Suggest{Text: "--last", Description: "Show n last created containers (includes all states)"},
prompt.Suggest{Text: "--latest", Description: "Show the latest created container (includes all states)"},
prompt.Suggest{Text: "--no-trunc", Description: "Don’t truncate output"},
prompt.Suggest{Text: "--quiet", Description: "Only display numeric IDs"},
prompt.Suggest{Text: "--size", Description: "Display total file sizes"},
},
"pull": {
prompt.Suggest{Text: "--all-tags", Description: "Download all tagged images in the repository"},
prompt.Suggest{Text: "--disable-content-trust", Description: "Skip image verification"},
prompt.Suggest{Text: "--platform", Description: ""},
prompt.Suggest{Text: "--quiet", Description: "Suppress verbose output"},
},
"push": {
prompt.Suggest{Text: "--disable-content-trust", Description: "Skip image signing"},
},
"restart": {
prompt.Suggest{Text: "--time", Description: "Seconds to wait for stop before killing the container"},
},
"rm": {
prompt.Suggest{Text: "--force", Description: "Force the removal of a running container (uses SIGKILL)"},
prompt.Suggest{Text: "--link", Description: "Remove the specified link"},
prompt.Suggest{Text: "--volumes", Description: "Remove the volumes associated with the container"},
},
"rmi": {
prompt.Suggest{Text: "--force", Description: "Force removal of the image"},
prompt.Suggest{Text: "--no-prune", Description: "Do not delete untagged parents"},
},
"run": {
prompt.Suggest{Text: "--add-host", Description: "Add a custom host-to-IP mapping (host:ip)"},
prompt.Suggest{Text: "--attach", Description: "Attach to STDIN, STDOUT or STDERR"},
prompt.Suggest{Text: "--blkio-weight", Description: "Block IO (relative weight), between 10 and 1000, or 0 to disable (default 0)"},
prompt.Suggest{Text: "--blkio-weight-device", Description: "Block IO weight (relative device weight)"},
prompt.Suggest{Text: "--cap-add", Description: "Add Linux capabilities"},
prompt.Suggest{Text: "--cap-drop", Description: "Drop Linux capabilities"},
prompt.Suggest{Text: "--cgroup-parent", Description: "Optional parent cgroup for the container"},
prompt.Suggest{Text: "--cidfile", Description: "Write the container ID to the file"},
prompt.Suggest{Text: "--cpu-count", Description: "CPU count (Windows only)"},
prompt.Suggest{Text: "--cpu-percent", Description: "CPU percent (Windows only)"},
prompt.Suggest{Text: "--cpu-period", Description: "Limit CPU CFS (Completely Fair Scheduler) period"},
prompt.Suggest{Text: "--cpu-quota", Description: "Limit CPU CFS (Completely Fair Scheduler) quota"},
prompt.Suggest{Text: "--cpu-rt-period", Description: ""},
prompt.Suggest{Text: "--cpu-rt-runtime", Description: ""},
prompt.Suggest{Text: "--cpu-shares", Description: "CPU shares (relative weight)"},
prompt.Suggest{Text: "--cpus", Description: ""},
prompt.Suggest{Text: "--cpuset-cpus", Description: "CPUs in which to allow execution (0-3, 0,1)"},
prompt.Suggest{Text: "--cpuset-mems", Description: "MEMs in which to allow execution (0-3, 0,1)"},
prompt.Suggest{Text: "--detach", Description: "Run container in background and print container ID"},
prompt.Suggest{Text: "--detach-keys", Description: "Override the key sequence for detaching a container"},
prompt.Suggest{Text: "--device", Description: "Add a host device to the container"},
prompt.Suggest{Text: "--device-cgroup-rule", Description: "Add a rule to the cgroup allowed devices list"},
prompt.Suggest{Text: "--device-read-bps", Description: "Limit read rate (bytes per second) from a device"},
prompt.Suggest{Text: "--device-read-iops", Description: "Limit read rate (IO per second) from a device"},
prompt.Suggest{Text: "--device-write-bps", Description: "Limit write rate (bytes per second) to a device"},
prompt.Suggest{Text: "--device-write-iops", Description: "Limit write rate (IO per second) to a device"},
prompt.Suggest{Text: "--disable-content-trust", Description: "Skip image verification"},
prompt.Suggest{Text: "--dns", Description: "Set custom DNS servers"},
prompt.Suggest{Text: "--dns-opt", Description: "Set DNS options"},
prompt.Suggest{Text: "--dns-option", Description: "Set DNS options"},
prompt.Suggest{Text: "--dns-search", Description: "Set custom DNS search domains"},
prompt.Suggest{Text: "--domainname", Description: "Container NIS domain name"},
prompt.Suggest{Text: "--entrypoint", Description: "Overwrite the default ENTRYPOINT of the image"},
prompt.Suggest{Text: "--env", Description: "Set environment variables"},
prompt.Suggest{Text: "--env-file", Description: "Read in a file of environment variables"},
prompt.Suggest{Text: "--expose", Description: "Expose a port or a range of ports"},
prompt.Suggest{Text: "--gpus", Description: ""},
prompt.Suggest{Text: "--group-add", Description: "Add additional groups to join"},
prompt.Suggest{Text: "--health-cmd", Description: "Command to run to check health"},
prompt.Suggest{Text: "--health-interval", Description: "Time between running the check (ms|s|m|h) (default 0s)"},
prompt.Suggest{Text: "--health-retries", Description: "Consecutive failures needed to report unhealthy"},
prompt.Suggest{Text: "--health-start-period", Description: ""},
prompt.Suggest{Text: "--health-timeout", Description: "Maximum time to allow one check to run (ms|s|m|h) (default 0s)"},
prompt.Suggest{Text: "--help", Description: "Print usage"},
prompt.Suggest{Text: "--hostname", Description: "Container host name"},
prompt.Suggest{Text: "--init", Description: ""},
prompt.Suggest{Text: "--interactive", Description: "Keep STDIN open even if not attached"},
prompt.Suggest{Text: "--io-maxbandwidth", Description: "Maximum IO bandwidth limit for the system drive (Windows only)"},
prompt.Suggest{Text: "--io-maxiops", Description: "Maximum IOps limit for the system drive (Windows only)"},
prompt.Suggest{Text: "--ip", Description: "IPv4 address (e.g., 172.30.100.104)"},
prompt.Suggest{Text: "--ip6", Description: "IPv6 address (e.g., 2001:db8::33)"},
prompt.Suggest{Text: "--ipc", Description: "IPC mode to use"},
prompt.Suggest{Text: "--isolation", Description: "Container isolation technology"},
prompt.Suggest{Text: "--kernel-memory", Description: "Kernel memory limit"},
prompt.Suggest{Text: "--label", Description: "Set meta data on a container"},
prompt.Suggest{Text: "--label-file", Description: "Read in a line delimited file of labels"},
prompt.Suggest{Text: "--link", Description: "Add link to another container"},
prompt.Suggest{Text: "--link-local-ip", Description: "Container IPv4/IPv6 link-local addresses"},
prompt.Suggest{Text: "--log-driver", Description: "Logging driver for the container"},
prompt.Suggest{Text: "--log-opt", Description: "Log driver options"},
prompt.Suggest{Text: "--mac-address", Description: "Container MAC address (e.g., 92:d0:c6:0a:29:33)"},
prompt.Suggest{Text: "--memory", Description: "Memory limit"},
prompt.Suggest{Text: "--memory-reservation", Description: "Memory soft limit"},
prompt.Suggest{Text: "--memory-swap", Description: "Swap limit equal to memory plus swap: ‘-1’ to enable unlimited swap"},
prompt.Suggest{Text: "--memory-swappiness", Description: "Tune container memory swappiness (0 to 100)"},
prompt.Suggest{Text: "--mount", Description: "Attach a filesystem mount to the container"},
prompt.Suggest{Text: "--name", Description: "Assign a name to the container"},
prompt.Suggest{Text: "--net", Description: "Connect a container to a network"},
prompt.Suggest{Text: "--net-alias", Description: "Add network-scoped alias for the container"},
prompt.Suggest{Text: "--network", Description: "Connect a container to a network"},
prompt.Suggest{Text: "--network-alias", Description: "Add network-scoped alias for the container"},
prompt.Suggest{Text: "--no-healthcheck", Description: "Disable any container-specified HEALTHCHECK"},
prompt.Suggest{Text: "--oom-kill-disable", Description: "Disable OOM Killer"},
prompt.Suggest{Text: "--oom-score-adj", Description: "Tune host’s OOM preferences (-1000 to 1000)"},
prompt.Suggest{Text: "--pid", Description: "PID namespace to use"},
prompt.Suggest{Text: "--pids-limit", Description: "Tune container pids limit (set -1 for unlimited)"},
prompt.Suggest{Text: "--platform", Description: ""},
prompt.Suggest{Text: "--privileged", Description: "Give extended privileges to this container"},
prompt.Suggest{Text: "--publish", Description: "Publish a container’s port(s) to the host"},
prompt.Suggest{Text: "--publish-all", Description: "Publish all exposed ports to random ports"},
prompt.Suggest{Text: "--read-only", Description: "Mount the container’s root filesystem as read only"},
prompt.Suggest{Text: "--restart", Description: "Restart policy to apply when a container exits"},
prompt.Suggest{Text: "--rm", Description: "Automatically remove the container when it exits"},
prompt.Suggest{Text: "--runtime", Description: "Runtime to use for this container"},
prompt.Suggest{Text: "--security-opt", Description: "Security Options"},
prompt.Suggest{Text: "--shm-size", Description: "Size of /dev/shm"},
prompt.Suggest{Text: "--sig-proxy", Description: "Proxy received signals to the process"},
prompt.Suggest{Text: "--stop-signal", Description: "Signal to stop a container"},
prompt.Suggest{Text: "--stop-timeout", Description: ""},
prompt.Suggest{Text: "--storage-opt", Description: "Storage driver options for the container"},
prompt.Suggest{Text: "--sysctl", Description: "Sysctl options"},
prompt.Suggest{Text: "--tmpfs", Description: "Mount a tmpfs directory"},
prompt.Suggest{Text: "--tty", Description: "Allocate a pseudo-TTY"},
prompt.Suggest{Text: "--ulimit", Description: "Ulimit options"},
prompt.Suggest{Text: "--user", Description: "Username or UID (format: <name|uid>[:<group|gid>])"},
prompt.Suggest{Text: "--userns", Description: "User namespace to use"},
prompt.Suggest{Text: "--uts", Description: "UTS namespace to use"},
prompt.Suggest{Text: "--volume", Description: "Bind mount a volume"},
prompt.Suggest{Text: "--volume-driver", Description: "Optional volume driver for the container"},
prompt.Suggest{Text: "--volumes-from", Description: "Mount volumes from the specified container(s)"},
prompt.Suggest{Text: "--workdir", Description: "Working directory inside the container"},
},
"save": {
prompt.Suggest{Text: "--output", Description: "Write to a file, instead of STDOUT"},
},
"search": {
prompt.Suggest{Text: "--automated", Description: ""},
prompt.Suggest{Text: "--filter", Description: "Filter output based on conditions provided"},
prompt.Suggest{Text: "--format", Description: "Pretty-print search using a Go template"},
prompt.Suggest{Text: "--limit", Description: "Max number of search results"},
prompt.Suggest{Text: "--no-trunc", Description: "Don’t truncate output"},
prompt.Suggest{Text: "--stars", Description: ""},
},
"stack": {
prompt.Suggest{Text: "--kubeconfig", Description: ""},
prompt.Suggest{Text: "--orchestrator", Description: "Orchestrator to use (swarm|kubernetes|all)"},
},
"start": {
prompt.Suggest{Text: "--attach", Description: "Attach STDOUT/STDERR and forward signals"},
prompt.Suggest{Text: "--checkpoint", Description: ""},
prompt.Suggest{Text: "--checkpoint-dir", Description: ""},
prompt.Suggest{Text: "--detach-keys", Description: "Override the key sequence for detaching a container"},
prompt.Suggest{Text: "--interactive", Description: "Attach container’s STDIN"},
},
"stats": {
prompt.Suggest{Text: "--all", Description: "Show all containers (default shows just running)"},
prompt.Suggest{Text: "--format", Description: "Pretty-print images using a Go template"},
prompt.Suggest{Text: "--no-stream", Description: "Disable streaming stats and only pull the first result"},
prompt.Suggest{Text: "--no-trunc", Description: "Do not truncate output"},
},
"stop": {
prompt.Suggest{Text: "--time", Description: "Seconds to wait for stop before killing it"},
},
"update": {
prompt.Suggest{Text: "--blkio-weight", Description: "Block IO (relative weight), between 10 and 1000, or 0 to disable (default 0)"},
prompt.Suggest{Text: "--cpu-period", Description: "Limit CPU CFS (Completely Fair Scheduler) period"},
prompt.Suggest{Text: "--cpu-quota", Description: "Limit CPU CFS (Completely Fair Scheduler) quota"},
prompt.Suggest{Text: "--cpu-rt-period", Description: ""},
prompt.Suggest{Text: "--cpu-rt-runtime", Description: ""},
prompt.Suggest{Text: "--cpu-shares", Description: "CPU shares (relative weight)"},
prompt.Suggest{Text: "--cpus", Description: ""},
prompt.Suggest{Text: "--cpuset-cpus", Description: "CPUs in which to allow execution (0-3, 0,1)"},
prompt.Suggest{Text: "--cpuset-mems", Description: "MEMs in which to allow execution (0-3, 0,1)"},
prompt.Suggest{Text: "--kernel-memory", Description: "Kernel memory limit"},
prompt.Suggest{Text: "--memory", Description: "Memory limit"},
prompt.Suggest{Text: "--memory-reservation", Description: "Memory soft limit"},
prompt.Suggest{Text: "--memory-swap", Description: "Swap limit equal to memory plus swap: ‘-1’ to enable unlimited swap"},
prompt.Suggest{Text: "--pids-limit", Description: ""},
prompt.Suggest{Text: "--restart", Description: "Restart policy to apply when a container exits"},
},
"version": {
prompt.Suggest{Text: "--format", Description: "Format the output using the given Go template"},
prompt.Suggest{Text: "--kubeconfig", Description: ""},
},
"service": {
{Text: "create", Description: "Create a new service"},
{Text: "inspect", Description: "Display detailed information on one or more services"},
{Text: "logs", Description: "Fetch the logs of a service or task"},
{Text: "ls", Description: "List services"},
{Text: "ps", Description: "List the tasks of one or more services"},
{Text: "rm", Description: "Remove one or more services"},
{Text: "rollback", Description: "Revert changes to a service’s configuration"},
{Text: "scale", Description: "Scale one or multiple replicated services"},
{Text: "update", Description: "Update a service"},
},
"service create": {
prompt.Suggest{Text: "--config", Description: "Specify configurations to expose to the service"},
prompt.Suggest{Text: "--constraint", Description: "Placement constraints"},
prompt.Suggest{Text: "--container-label", Description: "Container labels"},
prompt.Suggest{Text: "--credential-spec", Description: "Credential spec for managed service account (Windows only)"},
prompt.Suggest{Text: "--detach", Description: "Exit immediately instead of waiting for the service to converge"},
prompt.Suggest{Text: "--dns", Description: "Set custom DNS servers"},
prompt.Suggest{Text: "--dns-option", Description: "Set DNS options"},
prompt.Suggest{Text: "--dns-search", Description: "Set custom DNS search domains"},
prompt.Suggest{Text: "--endpoint-mode", Description: "Endpoint mode (vip or dnsrr)"},
prompt.Suggest{Text: "--entrypoint", Description: "Overwrite the default ENTRYPOINT of the image"},
prompt.Suggest{Text: "--env", Description: "Set environment variables"},
prompt.Suggest{Text: "--env-file", Description: "Read in a file of environment variables"},
prompt.Suggest{Text: "--generic-resource", Description: "User defined resources"},
prompt.Suggest{Text: "--group", Description: "Set one or more supplementary user groups for the container"},
prompt.Suggest{Text: "--health-cmd", Description: "Command to run to check health"},
prompt.Suggest{Text: "--health-interval", Description: "Time between running the check (ms|s|m|h)"},
prompt.Suggest{Text: "--health-retries", Description: "Consecutive failures needed to report unhealthy"},
prompt.Suggest{Text: "--health-start-period", Description: "Start period for the container to initialize before counting retries towards unstable (ms|s|m|h)"},
prompt.Suggest{Text: "--health-timeout", Description: "Maximum time to allow one check to run (ms|s|m|h)"},
prompt.Suggest{Text: "--host", Description: "Set one or more custom host-to-IP mappings (host:ip)"},
prompt.Suggest{Text: "--hostname", Description: "Container hostname"},
prompt.Suggest{Text: "--init", Description: "Use an init inside each service container to forward signals and reap processes"},
prompt.Suggest{Text: "--isolation", Description: "Service container isolation mode"},
prompt.Suggest{Text: "--label", Description: "Service labels"},
prompt.Suggest{Text: "--limit-cpu", Description: "Limit CPUs"},
prompt.Suggest{Text: "--limit-memory", Description: "Limit Memory"},
prompt.Suggest{Text: "--log-driver", Description: "Logging driver for service"},
prompt.Suggest{Text: "--log-opt", Description: "Logging driver options"},
prompt.Suggest{Text: "--mode", Description: "Service mode (replicated or global)"},
prompt.Suggest{Text: "--mount", Description: "Attach a filesystem mount to the service"},
prompt.Suggest{Text: "--name", Description: "Service name"},
prompt.Suggest{Text: "--network", Description: "Network attachments"},
prompt.Suggest{Text: "--no-healthcheck", Description: "Disable any container-specified HEALTHCHECK"},
prompt.Suggest{Text: "--no-resolve-image", Description: "Do not query the registry to resolve image digest and supported platforms"},
prompt.Suggest{Text: "--placement-pref", Description: "Add a placement preference"},
prompt.Suggest{Text: "--publish", Description: "Publish a port as a node port"},
prompt.Suggest{Text: "--quiet", Description: "Suppress progress output"},
prompt.Suggest{Text: "--read-only", Description: "Mount the container’s root filesystem as read only"},
prompt.Suggest{Text: "--replicas", Description: "Number of tasks"},
prompt.Suggest{Text: "--replicas-max-per-node", Description: "Maximum number of tasks per node (default 0 = unlimited)"},
prompt.Suggest{Text: "--reserve-cpu", Description: "Reserve CPUs"},
prompt.Suggest{Text: "--reserve-memory", Description: "Reserve Memory"},
prompt.Suggest{Text: "--restart-condition", Description: "Restart when condition is met (“none”|”on-failure”|”any”) (default “any”)"},
prompt.Suggest{Text: "--restart-delay", Description: "Delay between restart attempts (ns|us|ms|s|m|h) (default 5s)"},
prompt.Suggest{Text: "--restart-max-attempts", Description: "Maximum number of restarts before giving up"},
prompt.Suggest{Text: "--restart-window", Description: "Window used to evaluate the restart policy (ns|us|ms|s|m|h)"},
prompt.Suggest{Text: "--rollback-delay", Description: "Delay between task rollbacks (ns|us|ms|s|m|h) (default 0s)"},
prompt.Suggest{Text: "--rollback-failure-action", Description: "Action on rollback failure (“pause”|”continue”) (default “pause”)"},
prompt.Suggest{Text: "--rollback-max-failure-ratio", Description: "Failure rate to tolerate during a rollback (default 0)"},
prompt.Suggest{Text: "--rollback-monitor", Description: "Duration after each task rollback to monitor for failure (ns|us|ms|s|m|h) (default 5s)"},
prompt.Suggest{Text: "--rollback-order", Description: "Rollback order (“start-first”|”stop-first”) (default “stop-first”)"},
prompt.Suggest{Text: "--rollback-parallelism", Description: "Maximum number of tasks rolled back simultaneously (0 to roll back all at once)"},
prompt.Suggest{Text: "--secret", Description: "Specify secrets to expose to the service"},
prompt.Suggest{Text: "--stop-grace-period", Description: "Time to wait before force killing a container (ns|us|ms|s|m|h) (default 10s)"},
prompt.Suggest{Text: "--stop-signal", Description: "Signal to stop the container"},
prompt.Suggest{Text: "--sysctl", Description: "Sysctl options"},
prompt.Suggest{Text: "--tty", Description: "Allocate a pseudo-TTY"},
prompt.Suggest{Text: "--update-delay", Description: "Delay between updates (ns|us|ms|s|m|h) (default 0s)"},
prompt.Suggest{Text: "--update-failure-action", Description: "Action on update failure (“pause”|”continue”|”rollback”) (default “pause”)"},
prompt.Suggest{Text: "--update-max-failure-ratio", Description: "Failure rate to tolerate during an update (default 0)"},
prompt.Suggest{Text: "--update-monitor", Description: "Duration after each task update to monitor for failure (ns|us|ms|s|m|h) (default 5s)"},
prompt.Suggest{Text: "--update-order", Description: "Update order (“start-first”|”stop-first”) (default “stop-first”)"},
prompt.Suggest{Text: "--update-parallelism", Description: "Maximum number of tasks updated simultaneously (0 to update all at once)"},
prompt.Suggest{Text: "--user", Description: "Username or UID (format: <name|uid>[:<group|gid>])"},
prompt.Suggest{Text: "--with-registry-auth", Description: "Send registry authentication details to swarm agents"},
prompt.Suggest{Text: "--workdir", Description: "Working directory inside the container"},
},
"service inspect": {
prompt.Suggest{Text: "--format", Description: "Format the output using the given Go template"},
prompt.Suggest{Text: "--pretty", Description: "Print the information in a human friendly format"},
},
"service logs": {
prompt.Suggest{Text: "--details", Description: "Show extra details provided to logs"},
prompt.Suggest{Text: "--follow", Description: "Follow log output"},
prompt.Suggest{Text: "--no-resolve", Description: "Do not map IDs to Names in output"},
prompt.Suggest{Text: "--no-task-ids", Description: "Do not include task IDs in output"},
prompt.Suggest{Text: "--no-trunc", Description: "Do not truncate output"},
prompt.Suggest{Text: "--raw", Description: "Do not neatly format logs"},
prompt.Suggest{Text: "--since", Description: "Show logs since timestamp (e.g. 2013-01-02T13:23:37) or relative (e.g. 42m for 42 minutes)"},
prompt.Suggest{Text: "--tail", Description: "Number of lines to show from the end of the logs"},
prompt.Suggest{Text: "--timestamps", Description: "Show timestamps"},
},
"service ls": {
prompt.Suggest{Text: "--filter", Description: "Filter output based on conditions provided"},
prompt.Suggest{Text: "--format", Description: "Pretty-print services using a Go template"},
prompt.Suggest{Text: "--quiet", Description: "Only display IDs"},
},
"service ps": {
prompt.Suggest{Text: "--filter", Description: "Filter output based on conditions provided"},
prompt.Suggest{Text: "--format", Description: "Pretty-print tasks using a Go template"},
prompt.Suggest{Text: "--no-resolve", Description: "Do not map IDs to Names"},
prompt.Suggest{Text: "--no-trunc", Description: "Do not truncate output"},
prompt.Suggest{Text: "--quiet", Description: "Only display task IDs"},
},
"service rollback": {
prompt.Suggest{Text: "--detach", Description: "Exit immediately instead of waiting for the service to converge"},
prompt.Suggest{Text: "--quiet", Description: "Suppress progress output"},
},
"service scale": {
prompt.Suggest{Text: "--detach", Description: "Exit immediately instead of waiting for the service to converge"},
},
"service update": {
prompt.Suggest{Text: "--args", Description: "Service command args"},
prompt.Suggest{Text: "--config-add", Description: "Add or update a config file on a service"},
prompt.Suggest{Text: "--config-rm", Description: "Remove a configuration file"},
prompt.Suggest{Text: "--constraint-add", Description: "Add or update a placement constraint"},
prompt.Suggest{Text: "--constraint-rm", Description: "Remove a constraint"},
prompt.Suggest{Text: "--container-label-add", Description: "Add or update a container label"},
prompt.Suggest{Text: "--container-label-rm", Description: "Remove a container label by its key"},
prompt.Suggest{Text: "--credential-spec", Description: "Credential spec for managed service account (Windows only)"},
prompt.Suggest{Text: "--detach", Description: "Exit immediately instead of waiting for the service to converge"},
prompt.Suggest{Text: "--dns-add", Description: "Add or update a custom DNS server"},
prompt.Suggest{Text: "--dns-option-add", Description: "Add or update a DNS option"},
prompt.Suggest{Text: "--dns-option-rm", Description: "Remove a DNS option"},
prompt.Suggest{Text: "--dns-rm", Description: "Remove a custom DNS server"},
prompt.Suggest{Text: "--dns-search-add", Description: "Add or update a custom DNS search domain"},
prompt.Suggest{Text: "--dns-search-rm", Description: "Remove a DNS search domain"},
prompt.Suggest{Text: "--endpoint-mode", Description: "Endpoint mode (vip or dnsrr)"},
prompt.Suggest{Text: "--entrypoint", Description: "Overwrite the default ENTRYPOINT of the image"},
prompt.Suggest{Text: "--env-add", Description: "Add or update an environment variable"},
prompt.Suggest{Text: "--env-rm", Description: "Remove an environment variable"},
prompt.Suggest{Text: "--force", Description: "Force update even if no changes require it"},
prompt.Suggest{Text: "--generic-resource-add", Description: "Add a Generic resource"},
prompt.Suggest{Text: "--generic-resource-rm", Description: "Remove a Generic resource"},
prompt.Suggest{Text: "--group-add", Description: "Add an additional supplementary user group to the container"},
prompt.Suggest{Text: "--group-rm", Description: "Remove a previously added supplementary user group from the container"},
prompt.Suggest{Text: "--health-cmd", Description: "Command to run to check health"},
prompt.Suggest{Text: "--health-interval", Description: "Time between running the check (ms|s|m|h)"},
prompt.Suggest{Text: "--health-retries", Description: "Consecutive failures needed to report unhealthy"},
prompt.Suggest{Text: "--health-start-period", Description: "Start period for the container to initialize before counting retries towards unstable (ms|s|m|h)"},
prompt.Suggest{Text: "--health-timeout", Description: "Maximum time to allow one check to run (ms|s|m|h)"},
prompt.Suggest{Text: "--host-add", Description: "Add a custom host-to-IP mapping (host:ip)"},
prompt.Suggest{Text: "--host-rm", Description: "Remove a custom host-to-IP mapping (host:ip)"},
prompt.Suggest{Text: "--hostname", Description: "Container hostname"},
prompt.Suggest{Text: "--image", Description: "Service image tag"},
prompt.Suggest{Text: "--init", Description: "Use an init inside each service container to forward signals and reap processes"},
prompt.Suggest{Text: "--isolation", Description: "Service container isolation mode"},
prompt.Suggest{Text: "--label-add", Description: "Add or update a service label"},
prompt.Suggest{Text: "--label-rm", Description: "Remove a label by its key"},
prompt.Suggest{Text: "--limit-cpu", Description: "Limit CPUs"},
prompt.Suggest{Text: "--limit-memory", Description: "Limit Memory"},
prompt.Suggest{Text: "--log-driver", Description: "Logging driver for service"},
prompt.Suggest{Text: "--log-opt", Description: "Logging driver options"},
prompt.Suggest{Text: "--mount-add", Description: "Add or update a mount on a service"},
prompt.Suggest{Text: "--mount-rm", Description: "Remove a mount by its target path"},
prompt.Suggest{Text: "--network-add", Description: "Add a network"},
prompt.Suggest{Text: "--network-rm", Description: "Remove a network"},
prompt.Suggest{Text: "--no-healthcheck", Description: "Disable any container-specified HEALTHCHECK"},
prompt.Suggest{Text: "--no-resolve-image", Description: "Do not query the registry to resolve image digest and supported platforms"},
prompt.Suggest{Text: "--placement-pref-add", Description: "Add a placement preference"},
prompt.Suggest{Text: "--placement-pref-rm", Description: "Remove a placement preference"},
prompt.Suggest{Text: "--publish-add", Description: "Add or update a published port"},
prompt.Suggest{Text: "--publish-rm", Description: "Remove a published port by its target port"},
prompt.Suggest{Text: "--quiet", Description: "Suppress progress output"},
prompt.Suggest{Text: "--read-only", Description: "Mount the container’s root filesystem as read only"},
prompt.Suggest{Text: "--replicas", Description: "Number of tasks"},
prompt.Suggest{Text: "--replicas-max-per-node", Description: "Maximum number of tasks per node (default 0 = unlimited)"},
prompt.Suggest{Text: "--reserve-cpu", Description: "Reserve CPUs"},
prompt.Suggest{Text: "--reserve-memory", Description: "Reserve Memory"},
prompt.Suggest{Text: "--restart-condition", Description: "Restart when condition is met (“none”|”on-failure”|”any”)"},
prompt.Suggest{Text: "--restart-delay", Description: "Delay between restart attempts (ns|us|ms|s|m|h)"},
prompt.Suggest{Text: "--restart-max-attempts", Description: "Maximum number of restarts before giving up"},
prompt.Suggest{Text: "--restart-window", Description: "Window used to evaluate the restart policy (ns|us|ms|s|m|h)"},
prompt.Suggest{Text: "--rollback", Description: "Rollback to previous specification"},
prompt.Suggest{Text: "--rollback-delay", Description: "Delay between task rollbacks (ns|us|ms|s|m|h)"},
prompt.Suggest{Text: "--rollback-failure-action", Description: "Action on rollback failure (“pause”|”continue”)"},
prompt.Suggest{Text: "--rollback-max-failure-ratio", Description: "Failure rate to tolerate during a rollback"},
prompt.Suggest{Text: "--rollback-monitor", Description: "Duration after each task rollback to monitor for failure (ns|us|ms|s|m|h)"},
prompt.Suggest{Text: "--rollback-order", Description: "Rollback order (“start-first”|”stop-first”)"},
prompt.Suggest{Text: "--rollback-parallelism", Description: "Maximum number of tasks rolled back simultaneously (0 to roll back all at once)"},
prompt.Suggest{Text: "--secret-add", Description: "Add or update a secret on a service"},
prompt.Suggest{Text: "--secret-rm", Description: "Remove a secret"},
prompt.Suggest{Text: "--stop-grace-period", Description: "Time to wait before force killing a container (ns|us|ms|s|m|h)"},
prompt.Suggest{Text: "--stop-signal", Description: "Signal to stop the container"},
prompt.Suggest{Text: "--sysctl-add", Description: "Add or update a Sysctl option"},
prompt.Suggest{Text: "--sysctl-rm", Description: "Remove a Sysctl option"},
prompt.Suggest{Text: "--tty", Description: "Allocate a pseudo-TTY"},
prompt.Suggest{Text: "--update-delay", Description: "Delay between updates (ns|us|ms|s|m|h)"},
prompt.Suggest{Text: "--update-failure-action", Description: "Action on update failure (“pause”|”continue”|”rollback”)"},
prompt.Suggest{Text: "--update-max-failure-ratio", Description: "Failure rate to tolerate during an update"},
prompt.Suggest{Text: "--update-monitor", Description: "Duration after each task update to monitor for failure (ns|us|ms|s|m|h)"},
prompt.Suggest{Text: "--update-order", Description: "Update order (“start-first”|”stop-first”)"},
prompt.Suggest{Text: "--update-parallelism", Description: "Maximum number of tasks updated simultaneously (0 to update all at once)"},
prompt.Suggest{Text: "--user", Description: "Username or UID (format: <name|uid>[:<group|gid>])"},
prompt.Suggest{Text: "--with-registry-auth", Description: "Send registry authentication details to swarm agents"},
prompt.Suggest{Text: "--workdir", Description: "Working directory inside the container"},
},
},
}
}
func (c *Commands) GetDockerSuggestions() []prompt.Suggest {
return c.DockerSuggestions
}
func (c *Commands) GetDockerSubSuggestions() map[string][]prompt.Suggest {
return c.DockerSubSuggestions
}
func (c *Commands) IsDockerCommand(kw string) bool {
for _, cmd := range c.DockerSuggestions {
if cmd.Text == kw {
return true
}
}
return false
}
func (c *Commands) IsDockerSubCommand(kw string) ([]prompt.Suggest, bool) {
val, ok := c.DockerSubSuggestions[kw]
return val, ok
} | lib/commands.go | 0.530723 | 0.438845 | commands.go | starcoder |
package common
import (
"image/color"
"math"
"math/rand"
)
type HSV struct {
H, S, V int
}
// ToRGB converts a HSV color mode to RGB mode
// mh, ms, mv are used to set the maximum number for HSV.
func (hs HSV) ToRGB(mh, ms, mv int) color.RGBA {
if hs.H > mh {
hs.H = mh
}
if hs.S > ms {
hs.S = ms
}
if hs.V > mv {
hs.V = mv
}
h, s, v := float64(hs.H)/float64(mh), float64(hs.S)/float64(ms), float64(hs.V)/float64(mv)
var r, g, b float64
if s == 0 { //HSV from 0 to 1
r = v * 255
g = v * 255
b = v * 255
} else {
h = h * 6
if h == 6 {
h = 0
} //H must be < 1
i := math.Floor(h) //Or ... var_i = floor( var_h )
v1 := v * (1 - s)
v2 := v * (1 - s*(h-i))
v3 := v * (1 - s*(1-(h-i)))
if i == 0 {
r = v
g = v3
b = v1
} else if i == 1 {
r = v2
g = v
b = v1
} else if i == 2 {
r = v1
g = v
b = v3
} else if i == 3 {
r = v1
g = v2
b = v
} else if i == 4 {
r = v3
g = v1
b = v
} else {
r = v
g = v1
b = v2
}
r = r * 255 //RGB results from 0 to 255
g = g * 255
b = b * 255
}
rgb := color.RGBA{
R: uint8(r),
G: uint8(g),
B: uint8(b),
A: 0,
}
return rgb
}
// ConvertCartesianToPixel converts cartesian coordinates to actual pixels in an image.
func ConvertCartesianToPixel(x, y, xaixs, yaixs float64, h, w int) (int, int) {
xr, yr := x/xaixs, y/yaixs
var i, j int
i = w/2 + int(float64(w)/2*xr)
j = h/2 + int(float64(h)/2*yr)
return i, j
}
func ConvertCartesianToPolarPixel(x, y, xaixs, yaixs float64, h, w int) (int, int) {
r, theta := ConvertCartesianToPolar(x, y)
return ConvertPolarToPixel(r, theta, xaixs, yaixs, h, w)
}
// ConvertCartesianToPolar converts points from cartesian coordinates to polar coordinates.
func ConvertCartesianToPolar(x, y float64) (float64, float64) {
r := math.Sqrt(x*x + y*y)
theta := math.Atanh(y / x)
return r, theta
}
// ConvertPolarToPixel converts polar coordinates to actual pixels in an image.
func ConvertPolarToPixel(r, theta, xaixs, yaixs float64, h, w int) (int, int) {
x, y := r*math.Cos(theta), r*math.Sin(theta)
xr, yr := x/xaixs, y/yaixs
var i, j int
i = w/2 + int(float64(w)/2*xr)
j = h/2 + int(float64(h)/2*yr)
return i, j
}
// Distance returns the Euclidean distance between two point on 2D dimension.
func Distance(x1, y1, x2, y2 float64) float64 {
return math.Sqrt(math.Pow(x1-x2, 2.0) + math.Pow(y1-y2, 2.0))
}
// RandomRangeInt returns a integer number between min and max.
func RandomRangeInt(min, max int) int {
return rand.Intn(max-min) + min
}
// RandomRangeFloat64 returns a float64 number between min and max.
func RandomRangeFloat64(min, max float64) float64 {
return min + rand.Float64()*(max-min)
}
// RandomGaussian returns a gaussian random float64 number.
func RandomGaussian(mean, std float64) float64 {
return rand.NormFloat64()*std + mean
} | common/utils.go | 0.805326 | 0.451508 | utils.go | starcoder |
package dymessage
import "fmt"
type (
// Represents a collection of message definitions. The messages
// defined in the registry may refer only these messages, which are
// also defined in the same registry.
Registry struct {
// A collection of message definitions at the positions by
// which these definitions are referenced from other ones and
// outside.
Defs []*MessageDef
}
// Represents a definition of the message structure.
MessageDef struct {
Namespace string // An optional namespace of the message definition
Name string // Name of the message definition
Registry *Registry // A registry this definition belongs to
DataType DataType // An entity data type represented by this instance
// A collection of fields that belong to the message.
Fields []*MessageFieldDef
// Number of bytes taken by primitive values. These doesn't
// include the repeated values, which are represented by a
// separate entity.
DataBufLength int
// Number of entities referenced by the root. The collections
// of entities and repeated primitive values are represented
// by a single entity.
EntityBufLength int
}
// Represents a single field of a message.
MessageFieldDef struct {
// A collection of extensions which alter the serialization and
// deserialization behavior of current field.
Extensions
Name string // A name of the field unique in bounds of the message definition
DataType DataType // Data type of the message field
Tag uint64 // A tag unique in bounds of the message definition
Repeated bool // Indicates whether the field contains a collection of items
// Offset of the field in the array of bytes if the field is of
// a primitive type and not repeated. Elsewhere, an index in the
// array of entities.
Offset int
}
)
// -----------------------------------------------------------------------------
// Implementation
// GetMessageDef gets the message definition by its data type.
func (r *Registry) GetMessageDef(dt DataType) *MessageDef {
id, n := int(dt&^DtEntity), len(r.Defs)
if id >= n {
message := fmt.Sprintf(
"expected message definition at %d, but got only %d definitions", id, n)
panic(message)
}
return r.Defs[id]
}
// NewEntity creates a new entity with all of the buffers reserved to store the
// primitive and reference fields of the entity.
func (md *MessageDef) NewEntity() *Entity {
return &Entity{
DataType: md.DataType,
Data: make([]byte, md.DataBufLength),
Entities: make([]*Entity, md.EntityBufLength),
}
}
// TryGetField gets the field with specified tag from the message definition. If
// field doesn't exist, it returns the false flag.
func (md *MessageDef) TryGetField(tag uint64) (*MessageFieldDef, bool) {
// For the small number of fields (up to ~30, which must be the majority
// of the cases) the brute-force search is more effective than using a
// map.
for _, def := range md.Fields {
if def.Tag == tag {
return def, true
}
}
return nil, false
}
// GetField gets the field with specified tag from the message definition. If
// field doesn't exist, the method panics.
func (md *MessageDef) GetField(tag uint64) *MessageFieldDef {
if def, ok := md.TryGetField(tag); ok {
return def
}
panic(fmt.Sprintf("entity doesn't contain the field with tag %d", tag))
}
// TryGetFieldByName gets the field with specified name from the message
// definition. If field doesn't exist, it returns the false flag.
func (md *MessageDef) TryGetFieldByName(name string) (*MessageFieldDef, bool) {
for _, def := range md.Fields {
if def.Name == name {
return def, true
}
}
return nil, false
}
// GetFieldByName gets the field with specified name from the message
// definition. If field doesn't exist, the method panics.
func (md *MessageDef) GetFieldByName(name string) *MessageFieldDef {
if def, ok := md.TryGetFieldByName(name); ok {
return def
}
panic(fmt.Sprintf("entity doesn't contain the field with name %q", name))
} | registry.go | 0.725843 | 0.476032 | registry.go | starcoder |
package ui
import (
"regexp"
"strconv"
"strings"
)
// Expand expands a format string using `git log` message syntax.
func Expand(format string, values map[string]string, colorize bool) string {
f := &expander{values: values, colorize: colorize}
return f.Expand(format)
}
// An expander is a stateful helper to expand a format string.
type expander struct {
// formatted holds the parts of the string that have already been formatted.
formatted []string
// values is the map of values that should be expanded.
values map[string]string
// colorize is a flag to indicate whether to use colors.
colorize bool
// skipNext is true if the next placeholder is not a placeholder and can be
// output directly as such.
skipNext bool
// padNext is an object that should be used to pad the next placeholder.
padNext *padder
}
func (f *expander) Expand(format string) string {
parts := strings.Split(format, "%")
f.formatted = make([]string, 0, len(parts))
f.append(parts[0])
for _, p := range parts[1:] {
v, t := f.expandOneVar(p)
f.append(v, t)
}
return f.crush()
}
func (f *expander) append(formattedText ...string) {
f.formatted = append(f.formatted, formattedText...)
}
func (f *expander) crush() string {
s := strings.Join(f.formatted, "")
f.formatted = nil
return s
}
var colorMap = map[string]string{
"black": "30",
"red": "31",
"green": "32",
"yellow": "33",
"blue": "34",
"magenta": "35",
"cyan": "36",
"white": "37",
"reset": "",
}
func (f *expander) expandOneVar(format string) (expand string, untouched string) {
if f.skipNext {
f.skipNext = false
return "", format
}
if format == "" {
f.skipNext = true
return "", "%"
}
if f.padNext != nil {
p := f.padNext
f.padNext = nil
e, u := f.expandOneVar(format)
return f.pad(e, p), u
}
if e, u, ok := f.expandSpecialChar(format[0], format[1:]); ok {
return e, u
}
if f.values != nil {
for i := 1; i <= len(format); i++ {
if v, exists := f.values[format[0:i]]; exists {
return v, format[i:]
}
}
}
return "", "%" + format
}
func (f *expander) expandSpecialChar(firstChar byte, format string) (expand string, untouched string, wasExpanded bool) {
switch firstChar {
case 'n':
return "\n", format, true
case 'C':
for k, v := range colorMap {
if strings.HasPrefix(format, k) {
if f.colorize {
return "\033[" + v + "m", format[len(k):], true
}
return "", format[len(k):], true
}
}
// TODO: Add custom color as specified in color.branch.* options.
// TODO: Handle auto-coloring.
case 'x':
if len(format) >= 2 {
if v, err := strconv.ParseInt(format[:2], 16, 32); err == nil {
return string(v), format[2:], true
}
}
case '+':
if e, u := f.expandOneVar(format); e != "" {
return "\n" + e, u, true
} else {
return "", u, true
}
case ' ':
if e, u := f.expandOneVar(format); e != "" {
return " " + e, u, true
} else {
return "", u, true
}
case '-':
if e, u := f.expandOneVar(format); e != "" {
return e, u, true
} else {
f.append(strings.TrimRight(f.crush(), "\n"))
return "", u, true
}
case '<', '>':
if m := paddingPattern.FindStringSubmatch(string(firstChar) + format); len(m) == 7 {
if p := padderFromConfig(m[1], m[2], m[3], m[4], m[5]); p != nil {
f.padNext = p
return "", m[6], true
}
}
}
return "", "", false
}
func (f *expander) pad(s string, p *padder) string {
size := int(p.size)
if p.sizeAsColumn {
previous := f.crush()
f.append(previous)
size -= len(previous) - strings.LastIndex(previous, "\n") - 1
}
numPadding := size - len(s)
if numPadding == 0 {
return s
}
if numPadding < 0 {
if p.usePreviousSpace {
previous := f.crush()
noBlanks := strings.TrimRight(previous, " ")
f.append(noBlanks)
numPadding += len(previous) - len(noBlanks)
}
if numPadding <= 0 {
return p.truncate(s, -numPadding)
}
}
switch p.orientation {
case padLeft:
return strings.Repeat(" ", numPadding) + s
case padMiddle:
return strings.Repeat(" ", numPadding/2) + s + strings.Repeat(" ", (numPadding+1)/2)
}
// Pad right by default.
return s + strings.Repeat(" ", numPadding)
}
type paddingOrientation int
const (
padRight paddingOrientation = iota
padLeft
padMiddle
)
type truncingMethod int
const (
noTrunc truncingMethod = iota
truncLeft
truncRight
truncMiddle
)
type padder struct {
orientation paddingOrientation
size int64
sizeAsColumn bool
usePreviousSpace bool
truncing truncingMethod
}
var paddingPattern = regexp.MustCompile(`^(>)?([><])(\|)?\((\d+)(,[rm]?trunc)?\)(.*)$`)
func padderFromConfig(alsoLeft, orientation, asColumn, size, trunc string) *padder {
p := &padder{}
if orientation == ">" {
p.orientation = padLeft
} else if alsoLeft == "" {
p.orientation = padRight
} else {
p.orientation = padMiddle
}
p.sizeAsColumn = asColumn != ""
var err error
if p.size, err = strconv.ParseInt(size, 10, 64); err != nil {
return nil
}
p.usePreviousSpace = alsoLeft != "" && p.orientation == padLeft
switch trunc {
case ",trunc":
p.truncing = truncLeft
case ",rtrunc":
p.truncing = truncRight
case ",mtrunc":
p.truncing = truncMiddle
}
return p
}
func (p *padder) truncate(s string, numReduce int) string {
if numReduce == 0 {
return s
}
numLeft := len(s) - numReduce - 2
if numLeft < 0 {
numLeft = 0
}
switch p.truncing {
case truncRight:
return ".." + s[len(s)-numLeft:]
case truncMiddle:
return s[:numLeft/2] + ".." + s[len(s)-(numLeft+1)/2:]
}
// Trunc left by default.
return s[:numLeft] + ".."
} | ui/format.go | 0.598312 | 0.422266 | format.go | starcoder |
package validpositions
// A Tree represents a tree of valid array positions. It's a data structure very specifically designed to store
// valid Bleve array positions, and computing their intersections.
// An array position is a []uint64 that denotes the array positions of a certain match
// Example: if we have a field deployment.containers.volumes.name
// and this is matched a volume name "vol1",
// then an array position of []uint64{1, 2} denotes that the match was on the object corresponding to
// deployment.GetContainers()[1].GetVolumes()[2].GetName()
// This tree data structure helps us to match only fields that have the same array positions.
type Tree struct {
root *node
maxLength int
nonEmpty bool
}
type node struct {
children map[uint64]*node
}
// NewTreeFromValues returns a new tree from the given values.
func NewTreeFromValues(valueSlices ...[]uint64) *Tree {
tree := NewTree()
for _, valueSlice := range valueSlices {
tree.Add(valueSlice)
}
return tree
}
// NewTree returns a ready-to-use tree.
func NewTree() *Tree {
return &Tree{root: newNode()}
}
func newNode() *node {
return &node{children: make(map[uint64]*node)}
}
// Empty returns whether the tree is empty.
func (t *Tree) Empty() bool {
return t == nil || !t.nonEmpty
}
// Add adds the given values to the tree.
func (t *Tree) Add(values []uint64) {
t.nonEmpty = true
if len(values) > t.maxLength {
t.maxLength = len(values)
}
t.root.add(values)
}
func (n *node) add(values []uint64) {
if len(values) == 0 {
return
}
node, ok := n.children[values[0]]
if !ok {
node = newNode()
n.children[values[0]] = node
}
node.add(values[1:])
}
// Merge merges the two trees. It essentially computes their intersection, only leaving behind
// paths that exist in both the trees.
// The tree is modified in-place. There are no guarantees that the "other" will not be touched.
func (t *Tree) Merge(other *Tree) {
if t.Empty() {
return
}
if other.Empty() {
t.root = newNode()
t.nonEmpty = false
return
}
t.root.merge(other.root)
}
func (n *node) merge(other *node) {
// If the other doesn't have any children, then its length is simply shorter than this one.
// This is fine, and doesn't affect our intersection.
// For example, if one element has array positions [1, 2] and the other has array positions [1, 2, 3]
// then they _are_ matching -- it's just that one element is more nested than the other.
if len(other.children) == 0 {
return
}
// In this case, this tree is shorter than the other tree, so we copy the other's children over.
if len(n.children) == 0 {
n.children = other.children
return
}
for val, child := range n.children {
otherChild, exists := other.children[val]
if !exists {
delete(n.children, val)
continue
}
child.merge(otherChild)
}
}
// Contains returns whether the tree contains the given set of values.
func (t *Tree) Contains(values []uint64) bool {
if t == nil {
return false
}
return t.root.contains(values)
}
func (n *node) contains(values []uint64) bool {
if len(values) == 0 {
return true
}
node, ok := n.children[values[0]]
if !ok {
return false
}
return node.contains(values[1:])
} | pkg/search/blevesearch/validpositions/tree.go | 0.882592 | 0.807309 | tree.go | starcoder |
package mat64
import (
"github.com/gonum/blas"
"github.com/gonum/blas/blas64"
"github.com/gonum/lapack/lapack64"
)
const badTriangle = "mat64: invalid triangle"
// Cholesky calculates the Cholesky decomposition of the matrix A and returns
// whether the matrix is positive definite. The returned matrix is either a
// lower triangular matrix such that A = L * L^T or an upper triangular matrix
// such that A = U^T * U depending on the upper parameter.
func (t *TriDense) Cholesky(a Symmetric, upper bool) (ok bool) {
n := a.Symmetric()
if t.isZero() {
t.mat = blas64.Triangular{
N: n,
Stride: n,
Diag: blas.NonUnit,
Data: use(t.mat.Data, n*n),
}
if upper {
t.mat.Uplo = blas.Upper
} else {
t.mat.Uplo = blas.Lower
}
} else {
if n != t.mat.N {
panic(ErrShape)
}
if (upper && t.mat.Uplo != blas.Upper) || (!upper && t.mat.Uplo != blas.Lower) {
panic(ErrTriangle)
}
}
copySymIntoTriangle(t, a)
// Potrf modifies the data in place
_, ok = lapack64.Potrf(
blas64.Symmetric{
N: t.mat.N,
Stride: t.mat.Stride,
Data: t.mat.Data,
Uplo: t.mat.Uplo,
})
return ok
}
// SolveCholesky finds the matrix m that solves A * m = b where A = L * L^T or
// A = U^T * U, and U or L are represented by t, placing the result in the
// receiver.
func (m *Dense) SolveCholesky(t Triangular, b Matrix) {
_, n := t.Dims()
bm, bn := b.Dims()
if n != bm {
panic(ErrShape)
}
m.reuseAs(bm, bn)
if b != m {
m.Copy(b)
}
// TODO(btracey): Implement an algorithm that doesn't require a copy into
// a blas64.Triangular.
ta := getBlasTriangular(t)
switch ta.Uplo {
case blas.Upper:
blas64.Trsm(blas.Left, blas.Trans, 1, ta, m.mat)
blas64.Trsm(blas.Left, blas.NoTrans, 1, ta, m.mat)
case blas.Lower:
blas64.Trsm(blas.Left, blas.NoTrans, 1, ta, m.mat)
blas64.Trsm(blas.Left, blas.Trans, 1, ta, m.mat)
default:
panic(badTriangle)
}
}
// SolveCholeskyVec finds the vector v that solves A * v = b where A = L * L^T or
// A = U^T * U, and U or L are represented by t, placing the result in the
// receiver.
func (v *Vector) SolveCholeskyVec(t Triangular, b *Vector) {
_, n := t.Dims()
vn := b.Len()
if vn != n {
panic(ErrShape)
}
v.reuseAs(n)
if v != b {
v.CopyVec(b)
}
ta := getBlasTriangular(t)
switch ta.Uplo {
case blas.Upper:
blas64.Trsv(blas.Trans, ta, v.mat)
blas64.Trsv(blas.NoTrans, ta, v.mat)
case blas.Lower:
blas64.Trsv(blas.NoTrans, ta, v.mat)
blas64.Trsv(blas.Trans, ta, v.mat)
default:
panic(badTriangle)
}
}
// SolveTri finds the matrix x that solves op(A) * X = B where A is a triangular
// matrix and op is specified by trans.
func (m *Dense) SolveTri(a Triangular, trans bool, b Matrix) {
n, _ := a.Triangle()
bm, bn := b.Dims()
if n != bm {
panic(ErrShape)
}
m.reuseAs(bm, bn)
if b != m {
m.Copy(b)
}
// TODO(btracey): Implement an algorithm that doesn't require a copy into
// a blas64.Triangular.
ta := getBlasTriangular(a)
t := blas.NoTrans
if trans {
t = blas.Trans
}
switch ta.Uplo {
case blas.Upper, blas.Lower:
blas64.Trsm(blas.Left, t, 1, ta, m.mat)
default:
panic(badTriangle)
}
} | vendor/github.com/gonum/matrix/mat64/cholesky.go | 0.627837 | 0.560974 | cholesky.go | starcoder |
package math
import (
"math"
)
// Delta represents a move between two pixel positions.
type Delta struct {
DX, DY int
}
func (d Delta) Norm0() int {
norm := 0
if d.DX > norm {
norm = d.DX
} else if -d.DX > norm {
norm = -d.DX
}
if d.DY > norm {
norm = d.DY
} else if -d.DY > norm {
norm = -d.DY
}
return norm
}
func (d Delta) Norm1() int {
norm := 0
if d.DX >= 0 {
norm += d.DX
} else {
norm -= d.DX
}
if d.DY >= 0 {
norm += d.DY
} else {
norm -= d.DY
}
return norm
}
func (d Delta) Length2() int64 {
return int64(d.DX)*int64(d.DX) + int64(d.DY)*int64(d.DY)
}
func (d Delta) Length() float64 {
return math.Sqrt(float64(d.Length2()))
}
func (d Delta) LengthFixed() Fixed {
return NewFixedInt64(d.Length2()).Sqrt()
}
func (d Delta) Add(d2 Delta) Delta {
return Delta{DX: d.DX + d2.DX, DY: d.DY + d2.DY}
}
func (d Delta) Sub(d2 Delta) Delta {
return Delta{DX: d.DX - d2.DX, DY: d.DY - d2.DY}
}
func (d Delta) Mul(n int) Delta {
return Delta{DX: d.DX * n, DY: d.DY * n}
}
func (d Delta) Mul2(mx, my int) Delta {
return Delta{DX: d.DX * mx, DY: d.DY * my}
}
func (d Delta) Div(m int) Delta {
return Delta{DX: Div(d.DX, m), DY: Div(d.DY, m)}
}
func (d Delta) MulFixed(f Fixed) Delta {
return Delta{DX: NewFixed(d.DX).Mul(f).Rint(), DY: NewFixed(d.DY).Mul(f).Rint()}
}
func (d Delta) MulFracFixed(num, denom Fixed) Delta {
return Delta{DX: NewFixed(d.DX).MulFrac(num, denom).Rint(), DY: NewFixed(d.DY).MulFrac(num, denom).Rint()}
}
func (d Delta) WithLengthFixed(f Fixed) Delta {
n := d.LengthFixed()
if n == 0 {
return d
}
return d.MulFracFixed(f, n)
}
func (d Delta) WithMaxLengthFixed(f Fixed) Delta {
n := d.LengthFixed()
if n <= f {
return d
}
return d.MulFracFixed(f, n)
}
func North() Delta {
return Delta{DX: 0, DY: -1}
}
func East() Delta {
return Delta{DX: 1, DY: 0}
}
func South() Delta {
return Delta{DX: 0, DY: 1}
}
func West() Delta {
return Delta{DX: -1, DY: 0}
}
func (d Delta) Dot(d2 Delta) int {
return d.DX*d2.DX + d.DY*d2.DY
}
func (d Delta) IsZero() bool {
return d.DX == 0 && d.DY == 0
} | internal/math/delta.go | 0.777933 | 0.561455 | delta.go | starcoder |
package DG2D
import (
"fmt"
"github.com/notargets/gocfd/utils"
)
type LagrangeElement2D struct {
N, Nfp, Np, NFaces int
R, S utils.Vector
Dr, Ds utils.Matrix
MassMatrix utils.Matrix
Cub *Cubature
JB2D *JacobiBasis2D
}
type Cubature struct {
r, s, w utils.Vector
W utils.Matrix
V, Dr, Ds, VT, DrT, DsT utils.Matrix
x, y, rx, sx, ry, sy, J utils.Matrix
mm, mmCHOL utils.Matrix
}
type NodeType string
const (
Epsilon = "Epsilon"
Hesthaven = "Hesthaven"
)
func NewLagrangeElement2D(N int, nodeType NodeType) (el *LagrangeElement2D) {
el = &LagrangeElement2D{
N: N,
Np: (N + 1) * (N + 2) / 2,
NFaces: 3,
}
if N < 0 {
panic(fmt.Errorf("Polynomial order must be >= 0, have %d", N))
}
el.Nfp = el.N + 1
el.Np = (el.N + 1) * (el.N + 2) / 2
el.NFaces = 3
// Compute nodal set
switch nodeType {
case Epsilon:
el.R, el.S = NodesEpsilon(el.N)
case Hesthaven:
el.R, el.S = XYtoRS(Nodes2D(el.N))
}
// Build reference element matrices
el.JB2D = NewJacobiBasis2D(el.N, el.R, el.S)
el.MassMatrix = el.JB2D.Vinv.Transpose().Mul(el.JB2D.Vinv)
// Initialize the (r,s) differentiation matrices on the simplex, evaluated at (r,s) at order N
/*
Vr, Vs := GradVandermonde2D(el.N, el.R, el.S)
el.Dr = Vr.Mul(el.Vinv)
el.Ds = Vs.Mul(el.Vinv)
*/
el.Dr, el.Ds = el.GetDerivativeMatrices(el.R, el.S)
// Mark fields read only
el.MassMatrix.SetReadOnly("MassMatrix")
el.Dr.SetReadOnly("Dr")
el.Ds.SetReadOnly("Ds")
return
}
func (el *LagrangeElement2D) GetDerivativeMatrices(R, S utils.Vector) (Dr, Ds utils.Matrix) {
Vr, Vs := el.JB2D.GradVandermonde2D(el.N, R, S)
Dr, Ds = Vr.Mul(el.JB2D.Vinv), Vs.Mul(el.JB2D.Vinv)
return
}
func (el *LagrangeElement2D) NewCube2D(COrder int) {
// function [cubR,cubS,cubW, Ncub] = Cubature2D(COrder)
// Purpose: provide multidimensional quadrature (i.e. cubature)
// rules to integrate up to COrder polynomials
if COrder > 28 {
COrder = 28
}
if COrder <= 28 {
cub2d := getCub(COrder)
nr := len(cub2d) / 3
cubMat := utils.NewMatrix(nr, 3, cub2d)
el.Cub = &Cubature{
r: cubMat.Col(0),
s: cubMat.Col(1),
w: cubMat.Col(2),
}
} else {
err := fmt.Errorf("Cubature2D(%d): COrder > 28 not yet tested\n", COrder)
panic(err)
/*
DVec cuba,cubwa, cubb,cubwb
DMat cubA, cubB, cubR, cubS, cubW, tA,tB
int cubNA=(int)ceil((COrder+1.0)/2.0)
int cubNB=(int)ceil((COrder+1.0)/2.0)
JacobiGQ(1.0, 0.0, cubNB-1, cubb,cubwb)
cubA = outer( ones(cubNB), cuba )
cubB = outer( cubb, ones(cubNA) )
tA = 1.0+cubA
tB = 1.0-cubB
cubR = 0.5 * tA.dm(tB) - 1.0
cubS = cubB
cubW = 0.5 * outer(cubwb, cubwa)
cub.r = cubR
cub.s = cubS
cub.w = cubW
cub.Ncub = cub.r.size()
*/
}
return
} | DG2D/LagrangeElement.go | 0.621311 | 0.581303 | LagrangeElement.go | starcoder |
package awsemfexporter
import (
"time"
"go.opentelemetry.io/collector/model/pdata"
"go.uber.org/zap"
aws "github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/metrics"
)
var deltaMetricCalculator = aws.NewFloat64DeltaCalculator()
var summaryMetricCalculator = aws.NewMetricCalculator(calculateSummaryDelta)
func calculateSummaryDelta(prev *aws.MetricValue, val interface{}, timestampMs time.Time) (interface{}, bool) {
metricEntry := val.(summaryMetricEntry)
summaryDelta := metricEntry.sum
countDelta := metricEntry.count
if prev != nil {
prevSummaryEntry := prev.RawValue.(summaryMetricEntry)
summaryDelta = metricEntry.sum - prevSummaryEntry.sum
countDelta = metricEntry.count - prevSummaryEntry.count
} else {
return summaryMetricEntry{summaryDelta, countDelta}, false
}
return summaryMetricEntry{summaryDelta, countDelta}, true
}
// dataPoint represents a processed metric data point
type dataPoint struct {
value interface{}
labels map[string]string
timestampMs int64
}
// dataPoints is a wrapper interface for:
// - pdata.NumberDataPointSlice
// - pdata.histogramDataPointSlice
// - pdata.summaryDataPointSlice
type dataPoints interface {
Len() int
// At gets the adjusted datapoint from the DataPointSlice at i-th index.
// dataPoint: the adjusted data point
// retained: indicates whether the data point is valid for further process
// NOTE: It is an expensive call as it calculates the metric value.
At(i int) (dataPoint dataPoint, retained bool)
}
// deltaMetricMetadata contains the metadata required to perform rate/delta calculation
type deltaMetricMetadata struct {
adjustToDelta bool
metricName string
timestampMs int64
namespace string
logGroup string
logStream string
}
func mergeLabels(m deltaMetricMetadata, labels map[string]string) map[string]string {
result := map[string]string{
"namespace": m.namespace,
"logGroup": m.logGroup,
"logStream": m.logStream,
}
for k, v := range labels {
result[k] = v
}
return result
}
// numberDataPointSlice is a wrapper for pdata.NumberDataPointSlice
type numberDataPointSlice struct {
instrumentationLibraryName string
deltaMetricMetadata
pdata.NumberDataPointSlice
}
// histogramDataPointSlice is a wrapper for pdata.histogramDataPointSlice
type histogramDataPointSlice struct {
instrumentationLibraryName string
pdata.HistogramDataPointSlice
}
// summaryDataPointSlice is a wrapper for pdata.summaryDataPointSlice
type summaryDataPointSlice struct {
instrumentationLibraryName string
deltaMetricMetadata
pdata.SummaryDataPointSlice
}
type summaryMetricEntry struct {
sum float64
count uint64
}
// At retrieves the NumberDataPoint at the given index and performs rate/delta calculation if necessary.
func (dps numberDataPointSlice) At(i int) (dataPoint, bool) {
metric := dps.NumberDataPointSlice.At(i)
labels := createLabels(metric.Attributes(), dps.instrumentationLibraryName)
timestampMs := unixNanoToMilliseconds(metric.Timestamp())
var metricVal float64
switch metric.Type() {
case pdata.MetricValueTypeDouble:
metricVal = metric.DoubleVal()
case pdata.MetricValueTypeInt:
metricVal = float64(metric.IntVal())
}
retained := true
if dps.adjustToDelta {
var deltaVal interface{}
deltaVal, retained = deltaMetricCalculator.Calculate(dps.metricName, mergeLabels(dps.deltaMetricMetadata, labels),
metricVal, metric.Timestamp().AsTime())
if !retained {
return dataPoint{}, retained
}
// It should not happen in practice that the previous metric value is smaller than the current one.
// If it happens, we assume that the metric is reset for some reason.
if deltaVal.(float64) >= 0 {
metricVal = deltaVal.(float64)
}
}
return dataPoint{
value: metricVal,
labels: labels,
timestampMs: timestampMs,
}, retained
}
// At retrieves the HistogramDataPoint at the given index.
func (dps histogramDataPointSlice) At(i int) (dataPoint, bool) {
metric := dps.HistogramDataPointSlice.At(i)
labels := createLabels(metric.Attributes(), dps.instrumentationLibraryName)
timestamp := unixNanoToMilliseconds(metric.Timestamp())
return dataPoint{
value: &cWMetricStats{
Count: metric.Count(),
Sum: metric.Sum(),
},
labels: labels,
timestampMs: timestamp,
}, true
}
// At retrieves the SummaryDataPoint at the given index.
func (dps summaryDataPointSlice) At(i int) (dataPoint, bool) {
metric := dps.SummaryDataPointSlice.At(i)
labels := createLabels(metric.Attributes(), dps.instrumentationLibraryName)
timestampMs := unixNanoToMilliseconds(metric.Timestamp())
sum := metric.Sum()
count := metric.Count()
retained := true
if dps.adjustToDelta {
var delta interface{}
delta, retained = summaryMetricCalculator.Calculate(dps.metricName, mergeLabels(dps.deltaMetricMetadata, labels),
summaryMetricEntry{metric.Sum(), metric.Count()}, metric.Timestamp().AsTime())
if !retained {
return dataPoint{}, retained
}
summaryMetricDelta := delta.(summaryMetricEntry)
sum = summaryMetricDelta.sum
count = summaryMetricDelta.count
}
metricVal := &cWMetricStats{
Count: count,
Sum: sum,
}
if quantileValues := metric.QuantileValues(); quantileValues.Len() > 0 {
metricVal.Min = quantileValues.At(0).Value()
metricVal.Max = quantileValues.At(quantileValues.Len() - 1).Value()
}
return dataPoint{
value: metricVal,
labels: labels,
timestampMs: timestampMs,
}, retained
}
// createLabels converts OTel AttributesMap attributes to a map
// and optionally adds in the OTel instrumentation library name
func createLabels(attributes pdata.AttributeMap, instrLibName string) map[string]string {
labels := make(map[string]string, attributes.Len()+1)
attributes.Range(func(k string, v pdata.AttributeValue) bool {
labels[k] = v.AsString()
return true
})
// Add OTel instrumentation lib name as an additional label if it is defined
if instrLibName != noInstrumentationLibraryName {
labels[oTellibDimensionKey] = instrLibName
}
return labels
}
// getDataPoints retrieves data points from OT Metric.
func getDataPoints(pmd *pdata.Metric, metadata cWMetricMetadata, logger *zap.Logger) (dps dataPoints) {
if pmd == nil {
return
}
adjusterMetadata := deltaMetricMetadata{
false,
pmd.Name(),
metadata.timestampMs,
metadata.namespace,
metadata.logGroup,
metadata.logStream,
}
switch pmd.DataType() {
case pdata.MetricDataTypeGauge:
metric := pmd.Gauge()
dps = numberDataPointSlice{
metadata.instrumentationLibraryName,
adjusterMetadata,
metric.DataPoints(),
}
case pdata.MetricDataTypeSum:
metric := pmd.Sum()
adjusterMetadata.adjustToDelta = metric.AggregationTemporality() == pdata.AggregationTemporalityCumulative
dps = numberDataPointSlice{
metadata.instrumentationLibraryName,
adjusterMetadata,
metric.DataPoints(),
}
case pdata.MetricDataTypeHistogram:
metric := pmd.Histogram()
dps = histogramDataPointSlice{
metadata.instrumentationLibraryName,
metric.DataPoints(),
}
case pdata.MetricDataTypeSummary:
metric := pmd.Summary()
// For summaries coming from the prometheus receiver, the sum and count are cumulative, whereas for summaries
// coming from other sources, e.g. SDK, the sum and count are delta by being accumulated and reset periodically.
// In order to ensure metrics are sent as deltas, we check the receiver attribute (which can be injected by
// attribute processor) from resource metrics. If it exists, and equals to prometheus, the sum and count will be
// converted.
// For more information: https://github.com/open-telemetry/opentelemetry-collector/blob/main/receiver/prometheusreceiver/DESIGN.md#summary
adjusterMetadata.adjustToDelta = metadata.receiver == prometheusReceiver
dps = summaryDataPointSlice{
metadata.instrumentationLibraryName,
adjusterMetadata,
metric.DataPoints(),
}
default:
logger.Warn("Unhandled metric data type.",
zap.String("DataType", pmd.DataType().String()),
zap.String("Name", pmd.Name()),
zap.String("Unit", pmd.Unit()),
)
}
return
} | exporter/awsemfexporter/datapoint.go | 0.774328 | 0.572364 | datapoint.go | starcoder |
package values
import (
"fmt"
"reflect"
"github.com/ppapapetrou76/go-testing/types"
)
// AnyValue is a struct that holds any type of value.
type AnyValue struct {
value interface{}
}
// IsEqualTo returns true if the value is equal to the expected value, else false.
func (s AnyValue) IsEqualTo(expected interface{}) bool {
var comparable types.Comparable
if reflect.ValueOf(s.Value()).Kind() != reflect.ValueOf(expected).Kind() {
return false
}
switch expected.(type) {
case string:
comparable = NewStringValue(s.value)
case int, int8, int16, int32, int64:
comparable = NewIntValue(s.value)
case uint, uint8, uint16, uint32, uint64:
comparable = NewUIntValue(s.value)
case bool:
comparable = NewBoolValue(s.value)
case struct{}:
comparable = NewStructValue(s.value)
case []string:
comparable = NewSliceValue(s.value)
default:
return reflect.DeepEqual(s.value, expected)
}
return comparable.IsEqualTo(expected)
}
// Value returns the actual value of the structure.
func (s AnyValue) Value() interface{} {
return s.value
}
// IsGreaterThan returns true if the value is greater than the expected value, else false.
func (s AnyValue) IsGreaterThan(expected interface{}) bool {
return s.value != expected
}
// IsGreaterOrEqualTo returns true if the value is greater than or equal to the expected value, else false.
func (s AnyValue) IsGreaterOrEqualTo(expected interface{}) bool {
return s.value != expected
}
// IsLessThan returns true if the value is less than the expected value, else false.
func (s AnyValue) IsLessThan(expected interface{}) bool {
return s.value != expected
}
// IsLessOrEqualTo returns true if the value is less than or equal to the expected value, else false.
func (s AnyValue) IsLessOrEqualTo(expected interface{}) bool {
return s.value != expected
}
// IsNil returns true if the value is nil, else false.
func (s AnyValue) IsNil() bool {
if s.value == nil {
return true
}
// nolint:exhaustive //covered by default case
switch reflect.TypeOf(s.value).Kind() {
case reflect.Ptr, reflect.Map, reflect.Array, reflect.Chan, reflect.Slice:
return reflect.ValueOf(s.value).IsNil()
default:
return false
}
}
// IsNotNil returns true if the value is not nil, else false.
func (s AnyValue) IsNotNil() bool {
return !s.IsNil()
}
// HasTypeOf returns true if the value is of the given type else false.
func (s AnyValue) HasTypeOf(t reflect.Type) bool {
return reflect.TypeOf(s.value) == t
}
// NewAnyValue creates and returns an AnyValue struct initialed with the given value.
func NewAnyValue(value interface{}) AnyValue {
switch v := value.(type) {
case nil:
return AnyValue{value: v}
case interface{}:
return AnyValue{value: v}
default:
panic(fmt.Sprintf("expected interface{} value type but got %T type", v))
}
} | internal/pkg/values/any_value.go | 0.853806 | 0.607838 | any_value.go | starcoder |
package cryptolib
import (
"encoding/binary"
"encoding/hex"
"errors"
"fmt"
"math/big"
"strconv"
)
// GetBlockSubsidyForHeight func
func GetBlockSubsidyForHeight(height uint64) uint64 {
halvings := height / 210000
// Force block reward to zero when right shift is undefined.
if halvings >= 64 {
return 0
}
subsidy := uint64(50 * 1e8)
// Subsidy is cut in half every 210,000 blocks which will occur approximately every 4 years.
subsidy >>= halvings
return subsidy
}
// DifficultyFromBits returns the mining difficulty from the nBits field in the block header.
func DifficultyFromBits(bits string) (float64, error) {
b, _ := hex.DecodeString(bits)
ib := binary.BigEndian.Uint32(b)
return targetToDifficulty(toCompactSize(ib))
}
func toCompactSize(bits uint32) *big.Int {
t := big.NewInt(int64(bits % 0x01000000))
t.Mul(t, big.NewInt(2).Exp(big.NewInt(2), big.NewInt(8*(int64(bits/0x01000000)-3)), nil))
return t
}
func targetToDifficulty(target *big.Int) (float64, error) {
a := float64(0xFFFF0000000000000000000000000000000000000000000000000000) // genesis difficulty
b, err := strconv.ParseFloat(target.String(), 64)
if err != nil {
return 0.0, err
}
return a / b, nil
}
// GetLittleEndianBytes returns a byte array in little endian from an unsigned integer of 32 bytes.
func GetLittleEndianBytes(v uint32, l uint32) []byte {
// TODO: is v hex encoded?
buf := make([]byte, l)
binary.LittleEndian.PutUint32(buf, v)
return buf
}
// VarInt takes an unsiged integer and returns a byte array in VarInt format.
// See http://learnmeabitcoin.com/glossary/varint
func VarInt(i uint64) []byte {
b := make([]byte, 9)
if i < 0xfd {
b[0] = byte(i)
return b[:1]
}
if i < 0x10000 {
b[0] = 0xfd
binary.LittleEndian.PutUint16(b[1:3], uint16(i))
return b[:3]
}
if i < 0x100000000 {
b[0] = 0xfe
binary.LittleEndian.PutUint32(b[1:5], uint32(i))
return b[:5]
}
b[0] = 0xff
binary.LittleEndian.PutUint64(b[1:9], i)
return b
}
// DecodeVarInt takes a byte array in VarInt format and returns the
// decoded unsiged integer value and it's size in bytes.
// See http://learnmeabitcoin.com/glossary/varint
func DecodeVarInt(b []byte) (result uint64, size int) {
switch b[0] {
case 0xff:
result = binary.LittleEndian.Uint64(b[1:9])
size = 9
case 0xfe:
result = uint64(binary.LittleEndian.Uint32(b[1:5]))
size = 5
case 0xfd:
result = uint64(binary.LittleEndian.Uint16(b[1:3]))
size = 3
default:
result = uint64(binary.LittleEndian.Uint16([]byte{b[0], 0x00}))
size = 1
}
return
}
// EncodeParts takes a slice of slices and returns a single slice with the appropriate OP_PUSH commands embedded.
func EncodeParts(parts [][]byte) ([]byte, error) {
b := make([]byte, 0)
for i, part := range parts {
l := int64(len(part))
if l <= 75 {
b = append(b, byte(len(part)))
b = append(b, part...)
} else if l <= 0xFF {
b = append(b, 0x4c) // OP_PUSHDATA1
b = append(b, byte(len(part)))
b = append(b, part...)
} else if l <= 0xFFFF {
b = append(b, 0x4d) // OP_PUSHDATA2
lenBuf := make([]byte, 2)
binary.LittleEndian.PutUint16(lenBuf, uint16(len(part)))
b = append(b, lenBuf...)
b = append(b, part...)
} else if l <= 0xFFFFFFFF {
b = append(b, 0x4e) // OP_PUSHDATA4
lenBuf := make([]byte, 4)
binary.LittleEndian.PutUint32(lenBuf, uint32(len(part)))
b = append(b, lenBuf...)
b = append(b, part...)
} else {
return nil, fmt.Errorf("Part %d is too big", i)
}
}
return b, nil
}
// DecodeStringParts calls DecodeParts.
func DecodeStringParts(s string) ([][]byte, error) {
b, err := hex.DecodeString(s)
if err != nil {
return nil, err
}
return DecodeParts(b)
}
// DecodeParts returns an array of strings...
func DecodeParts(b []byte) ([][]byte, error) {
var r [][]byte
for len(b) > 0 {
// Handle OP codes
switch b[0] {
case OpPUSHDATA1:
if len(b) < 2 {
return r, errors.New("Not enough data")
}
l := uint64(b[1])
if len(b) < int(2+l) {
return r, errors.New("Not enough data")
}
part := b[2 : 2+l]
r = append(r, part)
b = b[2+l:]
case OpPUSHDATA2:
if len(b) < 3 {
return r, errors.New("Not enough data")
}
l := binary.LittleEndian.Uint16(b[1:])
if len(b) < int(3+l) {
return r, errors.New("Not enough data")
}
part := b[3 : 3+l]
r = append(r, part)
b = b[3+l:]
case OpPUSHDATA4:
if len(b) < 5 {
return r, errors.New("Not enough data")
}
l := binary.LittleEndian.Uint32(b[1:])
if len(b) < int(5+l) {
return r, errors.New("Not enough data")
}
part := b[5 : 5+l]
r = append(r, part)
b = b[5+l:]
default:
if b[0] >= 0x01 && b[0] <= 0x4e {
l := uint8(b[0])
if len(b) < int(1+l) {
return r, errors.New("Not enough data")
}
part := b[1 : l+1]
r = append(r, part)
b = b[1+l:]
} else {
r = append(r, []byte{b[0]})
b = b[1:]
}
}
}
return r, nil
} | vendor/bitbucket.org/simon_ordish/cryptolib/utils.go | 0.59561 | 0.416441 | utils.go | starcoder |
// Package rla provides an implementation of RLA (Recurrent Linear Attention).
// See: "Transformers are RNNs: Fast Autoregressive Transformers with Linear Attention" by Katharopoulos et al., 2020.
// TODO: support arbitrary mapping functions
package rla
import (
"encoding/gob"
mat "github.com/nlpodyssey/spago/pkg/mat32"
"github.com/nlpodyssey/spago/pkg/ml/ag"
"github.com/nlpodyssey/spago/pkg/ml/nn"
"log"
)
var (
_ nn.Model = &Model{}
)
// Config provides configuration settings for a RLA Model.
type Config struct {
InputSize int
}
// Model contains the serializable parameters for an RLA neural network.
type Model struct {
nn.BaseModel
Config
Wk nn.Param `spago:"type:weights"`
Bk nn.Param `spago:"type:biases"`
Wv nn.Param `spago:"type:weights"`
Bv nn.Param `spago:"type:biases"`
Wq nn.Param `spago:"type:weights"`
Bq nn.Param `spago:"type:biases"`
States []*State `spago:"scope:processor"`
}
// State represent a state of the RLA recurrent network.
type State struct {
S ag.Node
Z ag.Node
Y ag.Node
}
func init() {
gob.Register(&Model{})
}
// New returns a new RLA Model, initialized according to the given configuration.
func New(config Config) *Model {
return &Model{
Config: config,
Wk: nn.NewParam(mat.NewEmptyDense(config.InputSize, config.InputSize)),
Bk: nn.NewParam(mat.NewEmptyVecDense(config.InputSize)),
Wv: nn.NewParam(mat.NewEmptyDense(config.InputSize, config.InputSize)),
Bv: nn.NewParam(mat.NewEmptyVecDense(config.InputSize)),
Wq: nn.NewParam(mat.NewEmptyDense(config.InputSize, config.InputSize)),
Bq: nn.NewParam(mat.NewEmptyVecDense(config.InputSize)),
}
}
// SetInitialState sets the initial state of the recurrent network.
// It panics if one or more states are already present.
func (m *Model) SetInitialState(state *State) {
if len(m.States) > 0 {
log.Fatal("lstm: the initial state must be set before any input")
}
m.States = append(m.States, state)
}
// Forward performs the forward step for each input node and returns the result.
func (m *Model) Forward(xs ...ag.Node) []ag.Node {
ys := make([]ag.Node, len(xs))
for i, x := range xs {
s := m.forward(x)
m.States = append(m.States, s)
ys[i] = s.Y
}
return ys
}
// LastState returns the last state of the recurrent network.
// It returns nil if there are no states.
func (m *Model) LastState() *State {
n := len(m.States)
if n == 0 {
return nil
}
return m.States[n-1]
}
func (m *Model) forward(x ag.Node) (s *State) {
g := m.Graph()
s = new(State)
key := nn.Affine(g, m.Bk, m.Wk, x)
value := nn.Affine(g, m.Bv, m.Wv, x)
query := nn.Affine(g, m.Bq, m.Wq, x)
attKey := defaultMappingFunction(g, key)
attQuery := defaultMappingFunction(g, query)
if prevState := m.LastState(); prevState != nil {
s.S = g.Add(prevState.S, g.Mul(attKey, g.T(value)))
s.Z = g.Add(prevState.Z, attKey)
} else {
s.S = g.Mul(attKey, g.T(value))
s.Z = attKey
}
s.Y = g.DivScalar(g.T(g.Mul(g.T(attQuery), s.S)), g.AddScalar(g.Dot(attQuery, s.Z), g.Constant(1e-12)))
return
}
// ELU(x) + 1
func defaultMappingFunction(g *ag.Graph, x ag.Node) ag.Node {
return g.PositiveELU(x)
} | pkg/ml/nn/recurrent/rla/rla.go | 0.646237 | 0.50708 | rla.go | starcoder |
package main
import (
"crypto/rsa"
"fmt"
)
type PresentDto struct {
Type string
// name of the server
Data string
}
func MakePresent(address string) PresentDto {
return PresentDto{Type: "present", Data: address}
}
type PeerList struct {
Peers []string
SequencerPk rsa.PublicKey
}
type PeersListDto struct {
Type string
// names of the servers
Data PeerList
}
func MakePeersList(data PeerList) PeersListDto {
return PeersListDto{Type: "peers-list", Data: data}
}
type PeersRequestDto struct {
Type string
}
func MakePeersRequest() PeersRequestDto {
return PeersRequestDto{Type: "peers-request"}
}
type SignedSequencerBlockDto struct {
Type string
Data SignedSequencerBlock
}
func MakeSignedSequencerBlockDto(data SignedSequencerBlock) SignedSequencerBlockDto {
return SignedSequencerBlockDto{Type: "signed-block", Data: data}
}
type SignedSequencerBlock struct {
Signature []byte
Block SequencerBlock
}
type SequencerBlock struct {
BlockNumber int
TransactionIds []string
}
type AccountSetupDto struct {
Type string
Data AccountSetup
}
func MakeAccountSetupDto(setup AccountSetup) AccountSetupDto {
return AccountSetupDto{Type: "account-setup", Data: setup}
}
type AccountSetup struct {
AccountId string
Amount int
}
// data transfer object for transaction
type TransactionDto struct {
Type string
Data SignedTransaction
}
func MakeTransactionDto(transaction *SignedTransaction) TransactionDto {
return TransactionDto{Type: "transaction", Data: *transaction}
}
type SignedTransaction struct {
ID string // Any string
From string // A verification key coded as a string
To string // A verification key coded as a string
Amount int // Amount to transfer
Signature string // Potential signature coded as string
}
func (l *Ledger) DoSignedTransaction(t *SignedTransaction) {
l.lock.Lock()
defer l.lock.Unlock()
if !t.IsSignatureCorrect() {
PrintStatus("Transaction " + t.ID + " has incorrect signature!")
} else if t.Amount < 0 {
PrintStatus("Transaction " + t.ID + " has incorrect negative amount!")
} else if t.Amount > l.Accounts[t.From] {
PrintStatus("Transaction " + t.ID + ": the transaction amount is higher than the account balance of the sender! ")
} else {
PrintStatus(fmt.Sprintf("Sender Balance: %d", l.Accounts[t.From]))
PrintStatus(fmt.Sprintf("Recipient Balance: %d", l.Accounts[t.To]))
l.Accounts[t.From] -= t.Amount
l.Accounts[t.To] += t.Amount
PrintStatus(fmt.Sprintf("Transaction %s performed - amount: %d", t.ID, t.Amount))
PrintStatus(fmt.Sprintf("Sender Balance: %d", l.Accounts[t.From]))
PrintStatus(fmt.Sprintf("Recipient Balance: %d", l.Accounts[t.To]))
}
} | handins/8/requests.go | 0.560734 | 0.413181 | requests.go | starcoder |
package types
import (
"time"
"github.com/spf13/pflag"
)
// The following types are used internally in problem detector. In the future this could be the
// interface between node problem detector and other problem daemons.
// We added these types because:
// 1) The kubernetes api packages are too heavy.
// 2) We want to make the interface independent with kubernetes api change.
// Severity is the severity of the problem event. Now we only have 2 severity levels: Info and Warn,
// which are corresponding to the current kubernetes event types. We may want to add more severity
// levels in the future.
type Severity string
const (
// Info is translated to a normal event.
Info Severity = "info"
// Warn is translated to a warning event.
Warn Severity = "warn"
)
// ConditionStatus is the status of the condition.
type ConditionStatus string
const (
// True means the condition status is true.
True ConditionStatus = "True"
// False means the condition status is false.
False ConditionStatus = "False"
// Unknown means the condition status is unknown.
Unknown ConditionStatus = "Unknown"
)
// Condition is the node condition used internally by problem detector.
type Condition struct {
// Type is the condition type. It should describe the condition of node in problem. For example
// KernelDeadlock, OutOfResource etc.
Type string `json:"type"`
// Status indicates whether the node is in the condition or not.
Status ConditionStatus `json:"status"`
// Transition is the time when the node transits to this condition.
Transition time.Time `json:"transition"`
// Reason is a short reason of why node goes into this condition.
Reason string `json:"reason"`
// Message is a human readable message of why node goes into this condition.
Message string `json:"message"`
// TaintEnabled is a boolean flag to taint node on specific node problems
TaintEnabled bool `json:"taintEnabled"`
// TaintKey is a key of taint when taintEnabled is true and relevant problem occured
TaintKey string `json:"taintKey"`
// TaintValue is a value of taint when taintEnabled is true and relevant problem occured
TaintValue string `json:"taintValue"`
// TaintEffect is a effect of taint when taintEnabled is true and relevant problem occured
TaintEffect string `json:"taintEffect"`
}
// Event is the event used internally by node problem detector.
type Event struct {
// Severity is the severity level of the event.
Severity Severity `json:"severity"`
// Timestamp is the time when the event is generated.
Timestamp time.Time `json:"timestamp"`
// Reason is a short reason of why the event is generated.
Reason string `json:"reason"`
// Message is a human readable message of why the event is generated.
Message string `json:"message"`
}
// Status is the status other problem daemons should report to node problem detector.
type Status struct {
// Source is the name of the problem daemon.
Source string `json:"source"`
// Events are temporary node problem events. If the status is only a condition update,
// this field could be nil. Notice that the events should be sorted from oldest to newest.
Events []Event `json:"events"`
// Conditions are the permanent node conditions. The problem daemon should always report the
// newest node conditions in this field.
Conditions []Condition `json:"conditions"`
}
// Type is the type of the problem.
type Type string
const (
// Temp means the problem is temporary, only need to report an event.
Temp Type = "temporary"
// Perm means the problem is permanent, need to change the node condition.
Perm Type = "permanent"
)
// Monitor monitors the system and reports problems and metrics according to the rules.
type Monitor interface {
// Start starts the monitor.
// The Status channel is used to report problems. If the Monitor does not report any
// problem (i.e. metrics reporting only), the channel should be set to nil.
Start() (<-chan *Status, error)
// Stop stops the monitor.
Stop()
}
// Exporter exports machine health data to certain control plane.
type Exporter interface {
// Export problems to the control plane.
ExportProblems(*Status)
}
// ProblemDaemonType is the type of the problem daemon.
// One type of problem daemon may be used to initialize multiple problem daemon instances.
type ProblemDaemonType string
// ProblemDaemonConfigPathMap represents configurations on all types of problem daemons:
// 1) Each key represents a type of problem daemon.
// 2) Each value represents the config file paths to that type of problem daemon.
type ProblemDaemonConfigPathMap map[ProblemDaemonType]*[]string
// ProblemDaemonHandler represents the initialization handler for a type problem daemon.
type ProblemDaemonHandler struct {
// CreateProblemDaemonOrDie initializes a problem daemon, panic if error occurs.
CreateProblemDaemonOrDie func(string) Monitor
// CmdOptionDescription explains how to configure the problem daemon from command line arguments.
CmdOptionDescription string
}
// ExporterType is the type of the exporter.
type ExporterType string
// ExporterHandler represents the initialization handler for a type of exporter.
type ExporterHandler struct {
// CreateExporterOrDie initializes an exporter, panic if error occurs.
CreateExporterOrDie func(CommandLineOptions) Exporter
// CmdOptionDescription explains how to configure the exporter from command line arguments.
Options CommandLineOptions
}
type CommandLineOptions interface {
SetFlags(*pflag.FlagSet)
} | pkg/types/types.go | 0.579162 | 0.441312 | types.go | starcoder |
package goavro
import (
"fmt"
"sort"
"strconv"
"strings"
)
// pcfProcessor is a function type that given a parsed JSON object, returns its
// Parsing Canonical Form according to the Avro specification.
type pcfProcessor func(s interface{}) (string, error)
// parsingCanonialForm returns the "Parsing Canonical Form" (pcf) for a parsed
// JSON structure of a valid Avro schema, or an error describing the schema
// error.
func parsingCanonicalForm(schema interface{}, parentNamespace string, typeLookup map[string]string) (string, error) {
switch val := schema.(type) {
case map[string]interface{}:
// JSON objects are decoded as a map of strings to empty interfaces
return pcfObject(val, parentNamespace, typeLookup)
case []interface{}:
// JSON arrays are decoded as a slice of empty interfaces
return pcfArray(val, parentNamespace, typeLookup)
case string:
// JSON string values are decoded as a Go string
return pcfString(val, typeLookup)
case float64:
// JSON numerical values are decoded as Go float64
return pcfNumber(val)
default:
return "", fmt.Errorf("cannot parse schema with invalid schema type; ought to be map[string]interface{}, []interface{}, string, or float64; received: %T: %v", schema, schema)
}
}
// pcfNumber returns the parsing canonical form for a numerical value.
func pcfNumber(val float64) (string, error) {
return strconv.FormatFloat(val, 'g', -1, 64), nil
}
// pcfString returns the parsing canonical form for a string value.
func pcfString(val string, typeLookup map[string]string) (string, error) {
if canonicalName, ok := typeLookup[val]; ok {
return `"` + canonicalName + `"`, nil
}
return `"` + val + `"`, nil
}
// pcfArray returns the parsing canonical form for a JSON array.
func pcfArray(val []interface{}, parentNamespace string, typeLookup map[string]string) (string, error) {
items := make([]string, len(val))
for i, el := range val {
p, err := parsingCanonicalForm(el, parentNamespace, typeLookup)
if err != nil {
return "", err
}
items[i] = p
}
return "[" + strings.Join(items, ",") + "]", nil
}
// pcfObject returns the parsing canonical form for a JSON object.
func pcfObject(jsonMap map[string]interface{}, parentNamespace string, typeLookup map[string]string) (string, error) {
pairs := make(stringPairs, 0, len(jsonMap))
// Remember the namespace to fully qualify names later
var namespace string
if namespaceJSON, ok := jsonMap["namespace"]; ok {
if namespaceStr, ok := namespaceJSON.(string); ok {
// and it's value is string (otherwise invalid schema)
if parentNamespace == "" {
namespace = namespaceStr
} else {
namespace = parentNamespace + "." + namespaceStr
}
parentNamespace = namespace
}
} else if objectType, ok := jsonMap["type"]; ok && objectType == "record" {
namespace = parentNamespace
}
for k, v := range jsonMap {
// Reduce primitive schemas to their simple form.
if len(jsonMap) == 1 && k == "type" {
if t, ok := v.(string); ok {
return "\"" + t + "\"", nil
}
}
// Only keep relevant attributes (strip 'doc', 'alias', 'namespace')
if _, ok := fieldOrder[k]; !ok {
continue
}
// Add namespace to a non-qualified name.
if k == "name" && namespace != "" {
// Check if the name isn't already qualified.
if t, ok := v.(string); ok && !strings.ContainsRune(t, '.') {
v = namespace + "." + t
typeLookup[t] = v.(string)
}
}
// Only fixed type allows size, and we must convert a string size to a
// float.
if k == "size" {
if s, ok := v.(string); ok {
s, err := strconv.ParseUint(s, 10, 0)
if err != nil {
// should never get here because already validated schema
return "", fmt.Errorf("Fixed size ought to be number greater than zero: %v", s)
}
v = float64(s)
}
}
pk, err := parsingCanonicalForm(k, parentNamespace, typeLookup)
if err != nil {
return "", err
}
pv, err := parsingCanonicalForm(v, parentNamespace, typeLookup)
if err != nil {
return "", err
}
pairs = append(pairs, stringPair{k, pk + ":" + pv})
}
// Sort keys by their order in specification.
sort.Sort(byAvroFieldOrder(pairs))
return "{" + strings.Join(pairs.Bs(), ",") + "}", nil
}
// stringPair represents a pair of string values.
type stringPair struct {
A string
B string
}
// stringPairs is a sortable slice of pairs of strings.
type stringPairs []stringPair
// Bs returns an array of second values of an array of pairs.
func (sp *stringPairs) Bs() []string {
items := make([]string, len(*sp))
for i, el := range *sp {
items[i] = el.B
}
return items
}
// fieldOrder defines fields that show up in canonical schema and specifies
// their precedence.
var fieldOrder = map[string]int{
"name": 1,
"type": 2,
"fields": 3,
"symbols": 4,
"items": 5,
"values": 6,
"size": 7,
}
// byAvroFieldOrder is equipped with a sort order of fields according to the
// specification.
type byAvroFieldOrder []stringPair
func (s byAvroFieldOrder) Len() int {
return len(s)
}
func (s byAvroFieldOrder) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (s byAvroFieldOrder) Less(i, j int) bool {
return fieldOrder[s[i].A] < fieldOrder[s[j].A]
} | vendor/github.com/linkedin/goavro/v2/canonical.go | 0.804598 | 0.453443 | canonical.go | starcoder |
As a way to play with functions and loops, let's implement a square root function: given a number x, we want to find the number z for which z² is most nearly x.
Computers typically compute the square root of x using a loop. Starting with some guess z, we can adjust z based on how close z² is to x, producing a better guess:
z -= (z*z - x) / (2*z)
Repeating this adjustment makes the guess better and better until we reach an answer that is as close to the actual square root as can be.
Implement this in the func Sqrt provided. A decent starting guess for z is 1, no matter what the input. To begin with, repeat the calculation 10 times and print each z along the way. See how close you get to the answer for various values of x (1, 2, 3, ...) and how quickly the guess improves.
Hint: To declare and initialize a floating point value, give it floating point syntax or use a conversion:
z := 1.0
z := float64(1)
Next, change the loop condition to stop once the value has stopped changing (or only changes by a very small amount). See if that's more or fewer than 10 iterations. Try other initial guesses for z, like x, or x/2. How close are your function's results to the math.Sqrt in the standard library?
(Note: If you are interested in the details of the algorithm, the z² − x above is how far away z² is from where it needs to be (x), and the division by 2z is the derivative of z², to scale how much we adjust z by how quickly z² is changing. This general approach is called Newton's method. It works well for many functions but especially well for square root.)
*/
// Start With
package main
import (
"fmt"
)
func Sqrt(x float64) float64 {
}
func main() {
fmt.Println(Sqrt(2))
}
// A Solution Is
package main
import (
"fmt"
"math"
)
func Sqrt(x float64) float64 {
z := 1.0
rounded_z := 0.0
best_guess := 0.0
for i := 1 ; i < 101 ; i++ {
z -= (z*z - x) / (2*z)
rounded_z = math.Round(z*1000)/1000
if best_guess == rounded_z {
return best_guess
} else {
best_guess = rounded_z
fmt.Println("Guess", i, "is", rounded_z)
}
}
return z
}
func main() {
x := 273.06
fmt.Println("Best guess:", Sqrt(x))
fmt.Println("Real answer:", math.Sqrt(x))
} | interview_questions/byos.go | 0.843444 | 0.974508 | byos.go | starcoder |
package ast
import (
"bytes"
"fmt"
"strings"
)
// IntegerLiteral contains the node expression and its value
type IntegerLiteral struct {
*BaseNode
Value int
}
func (il *IntegerLiteral) expressionNode() {}
// TokenLiteral is a polymorphic function to return a token literal
func (il *IntegerLiteral) TokenLiteral() string {
return il.Token.Literal
}
// String gets the string format of the Integer type token
func (il *IntegerLiteral) String() string {
return il.Token.Literal
}
// FloatLiteral contains the node expression and its value
type FloatLiteral struct {
*BaseNode
Value float64
}
func (il *FloatLiteral) expressionNode() {}
// TokenLiteral is a polymorphic function to return a token literal
func (il *FloatLiteral) TokenLiteral() string {
return il.Token.Literal
}
// FloatLiteral.String gets the string format of the Float type token
func (il *FloatLiteral) String() string {
return il.Token.Literal
}
// StringLiteral contains the node expression and its value
type StringLiteral struct {
*BaseNode
Value string
}
// Define the string literal which contains the node expression and its value
func (sl *StringLiteral) expressionNode() {}
// TokenLiteral is a polymorphic function to return a token literal
func (sl *StringLiteral) TokenLiteral() string {
return sl.Token.Literal
}
// TokenLiteral is a polymorphic function to return a token literal
func (sl *StringLiteral) String() string {
var out bytes.Buffer
out.WriteString("\"")
out.WriteString(sl.Token.Literal)
out.WriteString("\"")
return out.String()
}
// ArrayExpression defines the array expression literal which contains the node expression and its value
type ArrayExpression struct {
*BaseNode
Elements []Expression
}
func (ae *ArrayExpression) expressionNode() {}
// TokenLiteral is a polymorphic function to return a token literal
func (ae *ArrayExpression) TokenLiteral() string {
return ae.Token.Literal
}
// ArrayExpression.String gets the string format of the Array type token
func (ae *ArrayExpression) String() string {
var out bytes.Buffer
out.WriteString("[")
if len(ae.Elements) == 0 {
out.WriteString("]")
return out.String()
}
out.WriteString(ae.Elements[0].String())
for _, elem := range ae.Elements[1:] {
out.WriteString(", ")
out.WriteString(elem.String())
}
out.WriteString("]")
return out.String()
}
// ArgumentPairExpression represents a key/value pair in method parameters or arguments
type ArgumentPairExpression struct {
*BaseNode
Key Expression
Value Expression
}
func (ape *ArgumentPairExpression) expressionNode() {}
// TokenLiteral is a polymorphic function to return a token literal
func (ape *ArgumentPairExpression) TokenLiteral() string {
return ape.Token.Literal
}
// String .....
func (ape *ArgumentPairExpression) String() string {
if ape.Value == nil {
return fmt.Sprintf("%s:", ape.Key.String())
}
return fmt.Sprintf("%s: %s", ape.Key.String(), ape.Value.String())
}
// HashExpression defines the hash expression literal which contains the node expression and its value
type HashExpression struct {
*BaseNode
Data map[string]Expression
}
func (he *HashExpression) expressionNode() {}
// TokenLiteral is a polymorphic function to return a token literal
func (he *HashExpression) TokenLiteral() string {
return he.Token.Literal
}
// HashExpression.String gets the string format of the Hash type token
func (he *HashExpression) String() string {
var out bytes.Buffer
var pairs []string
for key, value := range he.Data {
pairs = append(pairs, fmt.Sprintf("%s: %s", key, value.String()))
}
out.WriteString("{")
out.WriteString(strings.Join(pairs, ", "))
out.WriteString("}")
return out.String()
}
// PrefixExpression represents an expression with a prefix
type PrefixExpression struct {
*BaseNode
Operator string
Right Expression
}
func (pe *PrefixExpression) expressionNode() {}
// TokenLiteral is a polymorphic function to return a token literal of prefix
func (pe *PrefixExpression) TokenLiteral() string {
return pe.Token.Literal
}
func (pe *PrefixExpression) String() string {
var out bytes.Buffer
out.WriteString(pe.Operator)
out.WriteString(pe.Right.String())
return out.String()
}
// InfixExpression represents an expression with an infix
type InfixExpression struct {
*BaseNode
Left Expression
Operator string
Right Expression
}
func (ie *InfixExpression) expressionNode() {}
// TokenLiteral is a polymorphic function to return a token literal of infix
func (ie *InfixExpression) TokenLiteral() string {
return ie.Token.Literal
}
func (ie *InfixExpression) String() string {
var out bytes.Buffer
out.WriteString("(")
out.WriteString(ie.Left.String())
out.WriteString(" ")
out.WriteString(ie.Operator)
out.WriteString(" ")
out.WriteString(ie.Right.String())
out.WriteString(")")
return out.String()
}
// AssignExpression represents variable assignment in Goby
type AssignExpression struct {
*BaseNode
Variables []Expression
Value Expression
// Optioned attribute is only used when infix expression is local assignment in params.
// For example: `foo(x = 10)`'s `x = 10` is an optioned assign expression
// TODO: Remove this when we can put metadata inside bytecode.
Optioned int
}
func (ae *AssignExpression) expressionNode() {}
// TokenLiteral is a polymorphic function to return a token literal of assignment
func (ae *AssignExpression) TokenLiteral() string {
return ae.Token.Literal
}
func (ae *AssignExpression) String() string {
var out bytes.Buffer
var variables []string
for _, v := range ae.Variables {
variables = append(variables, v.String())
}
out.WriteString(strings.Join(variables, ", "))
out.WriteString(" = ")
out.WriteString(ae.Value.String())
return out.String()
}
// BooleanExpression defines the boolean expression literal which contains the node expression and its value
type BooleanExpression struct {
*BaseNode
Value bool
}
func (b *BooleanExpression) expressionNode() {}
// TokenLiteral is a polymorphic function to return a token literal "true" "false"
func (b *BooleanExpression) TokenLiteral() string {
return b.Token.Literal
}
// BooleanExpression.String gets the string format of the Boolean type token
func (b *BooleanExpression) String() string {
return b.Token.Literal
}
// NilExpression represents nil node
type NilExpression struct {
*BaseNode
}
func (n *NilExpression) expressionNode() {}
// TokenLiteral is a polymorphic function to return a token literal "nil"
func (n *NilExpression) TokenLiteral() string {
return n.Token.Literal
}
// String returns `nil`
func (n *NilExpression) String() string {
return "nil"
}
// IfExpression represents an "if" expression
type IfExpression struct {
*BaseNode
Conditionals []*ConditionalExpression
Alternative *BlockStatement
}
func (ie *IfExpression) expressionNode() {}
// TokenLiteral is a polymorphic function to return a token literal "if"
func (ie *IfExpression) TokenLiteral() string {
return ie.Token.Literal
}
func (ie *IfExpression) String() string {
var out bytes.Buffer
for i, c := range ie.Conditionals {
if i == 0 {
out.WriteString("if")
out.WriteString(" ")
} else {
out.WriteString("elsif")
out.WriteString(" ")
}
out.WriteString(c.String())
}
if ie.Alternative != nil {
out.WriteString("\n")
out.WriteString("else\n")
out.WriteString(ie.Alternative.String())
}
out.WriteString("\nend")
return out.String()
}
// ConditionalExpression represents if or elsif expression
type ConditionalExpression struct {
*BaseNode
Condition Expression
Consequence *BlockStatement
}
func (ce *ConditionalExpression) expressionNode() {}
// TokenLiteral is a polymorphic function to return a token literal `if` or `elsif`
func (ce *ConditionalExpression) TokenLiteral() string {
return ce.Token.Literal
}
func (ce *ConditionalExpression) String() string {
var out bytes.Buffer
out.WriteString(ce.Condition.String())
out.WriteString("\n")
out.WriteString(ce.Consequence.String())
return out.String()
}
// CallExpression represents an expression for calling a method
type CallExpression struct {
*BaseNode
Receiver Expression
Method string
Arguments []Expression
Block *BlockStatement
BlockArguments []*Identifier
}
func (tce *CallExpression) expressionNode() {}
// TokenLiteral is a polymorphic function to return a token literal for a call expression
func (tce *CallExpression) TokenLiteral() string {
return tce.Token.Literal
}
func (tce *CallExpression) String() string {
var out bytes.Buffer
out.WriteString(tce.Receiver.String())
out.WriteString(".")
out.WriteString(tce.Method)
var args = []string{}
for _, arg := range tce.Arguments {
args = append(args, arg.String())
}
out.WriteString("(")
out.WriteString(strings.Join(args, ", "))
out.WriteString(")")
if tce.Block != nil {
var blockArgs []string
out.WriteString(" do")
if len(tce.BlockArguments) > 0 {
for _, arg := range tce.BlockArguments {
blockArgs = append(blockArgs, arg.String())
}
out.WriteString(" |")
out.WriteString(strings.Join(blockArgs, ", "))
out.WriteString("|")
}
out.WriteString("\n")
out.WriteString(tce.Block.String())
out.WriteString("\nend")
}
return out.String()
}
// SelfExpression represents a "self" expression
type SelfExpression struct {
*BaseNode
}
func (se *SelfExpression) expressionNode() {}
// TokenLiteral is a polymorphic function to return a token literal for "self"
func (se *SelfExpression) TokenLiteral() string {
return se.Token.Literal
}
func (se *SelfExpression) String() string {
return "self"
}
// YieldExpression represents a "yield" expression
type YieldExpression struct {
*BaseNode
Arguments []Expression
}
func (ye *YieldExpression) expressionNode() {}
// TokenLiteral is a polymorphic function to return a token literal for "yield"
func (ye *YieldExpression) TokenLiteral() string {
return ye.Token.Literal
}
func (ye *YieldExpression) String() string {
var out bytes.Buffer
var args []string
for _, arg := range ye.Arguments {
args = append(args, arg.String())
}
out.WriteString(ye.TokenLiteral())
out.WriteString("(")
out.WriteString(strings.Join(args, ", "))
out.WriteString(")")
return out.String()
}
// GetBlockExpression represents `get_block` call in the AST
type GetBlockExpression struct {
*BaseNode
}
func (gbe *GetBlockExpression) expressionNode() {}
// TokenLiteral is a polymorphic function to return a token literal for "get_block"
func (gbe *GetBlockExpression) TokenLiteral() string {
return gbe.Token.Literal
}
// String ...
func (gbe *GetBlockExpression) String() string {
return gbe.TokenLiteral()
}
// RangeExpression defines the range expression literal which contains the node expression and its start/end value
type RangeExpression struct {
*BaseNode
Start Expression
End Expression
}
func (re *RangeExpression) expressionNode() {}
// TokenLiteral is a polymorphic function to return a token literal for Range
func (re *RangeExpression) TokenLiteral() string {
return re.Token.Literal
}
// RangeExpression.String gets the string format of the Range type token
func (re *RangeExpression) String() string {
var out bytes.Buffer
out.WriteString("(")
out.WriteString(re.Start.String())
out.WriteString("..")
out.WriteString(re.End.String())
out.WriteString(")")
return out.String()
} | compiler/ast/expressions.go | 0.776029 | 0.591369 | expressions.go | starcoder |
package poset
import (
"bytes"
"sort"
"github.com/Fantom-foundation/go-lachesis/hash"
"github.com/Fantom-foundation/go-lachesis/inter"
"github.com/Fantom-foundation/go-lachesis/inter/idx"
)
func (p *Poset) frameConsensusTime(frame idx.Frame) inter.Timestamp {
if frame == 0 {
return p.PrevEpoch.Time
}
return p.store.GetFrameInfo(p.EpochN, frame).
LastConsensusTime
}
// fareTimestamps calculates time ratio & time offset for the new frame
func (p *Poset) fareTimestamps(
frame idx.Frame,
atropos hash.Event,
highestLamport idx.Lamport,
lowestLamport idx.Lamport,
) (
frameInfo FrameInfo,
) {
lastConsensusTime := p.frameConsensusTime(frame - 1)
// calculate difference between highest and lowest period
frameLamportPeriod := idx.MaxLamport(highestLamport-lowestLamport+1, 1)
// calculate difference between Atropos's median time and previous Atropos's consensus time (almost the same as previous median time)
nowMedianTime := p.GetEventHeader(p.EpochN, atropos).MedianTime
frameTimePeriod := inter.MaxTimestamp(nowMedianTime-lastConsensusTime, 1)
if lastConsensusTime > nowMedianTime {
frameTimePeriod = 1
}
// Calculate time ratio & time offset
timeRatio := inter.MaxTimestamp(frameTimePeriod/inter.Timestamp(frameLamportPeriod), 1)
lowestConsensusTime := lastConsensusTime + timeRatio
timeOffset := int64(lowestConsensusTime) - int64(lowestLamport)*int64(timeRatio)
// Calculate consensus timestamp of an event with highestLamport (it's always Atropos)
lastConsensusTime = inter.Timestamp(int64(highestLamport)*int64(timeRatio) + timeOffset)
// Save new timeRatio & timeOffset to frame
frameInfo = FrameInfo{
TimeOffset: timeOffset,
TimeRatio: timeRatio,
LastConsensusTime: lastConsensusTime,
}
return
}
// fareOrdering orders the events
func (p *Poset) fareOrdering(
unordered []*inter.EventHeaderData,
) (
ids hash.Events,
) {
// sort by lamport timestamp & hash
sort.Slice(unordered, func(i, j int) bool {
a, b := unordered[i], unordered[j]
if a.Lamport != b.Lamport {
return a.Lamport < b.Lamport
}
return bytes.Compare(a.Hash().Bytes(), b.Hash().Bytes()) < 0
})
ordered := unordered
ids = make(hash.Events, len(ordered))
for i, e := range ordered {
ids[i] = e.Hash()
}
return
} | poset/event_ordering.go | 0.752013 | 0.448909 | event_ordering.go | starcoder |
package image
import (
"encoding/binary"
"math"
colorExt "github.com/chai2010/image/color"
)
func pGrayAt(pix []byte) colorExt.Gray {
return colorExt.Gray{
Y: pix[1*0],
}
}
func pSetGray(pix []byte, c colorExt.Gray) {
pix[1*0] = c.Y
}
func pGray16At(pix []byte) colorExt.Gray16 {
return colorExt.Gray16{
Y: binary.BigEndian.Uint16(pix[2*0:]),
}
}
func pSetGray16(pix []byte, c colorExt.Gray16) {
binary.BigEndian.PutUint16(pix[2*0:], c.Y)
}
func pGray32iAt(pix []byte) colorExt.Gray32i {
return colorExt.Gray32i{
Y: int32(binary.BigEndian.Uint32(pix[4*0:])),
}
}
func pSetGray32i(pix []byte, c colorExt.Gray32i) {
binary.BigEndian.PutUint32(pix[4*0:], uint32(c.Y))
}
func pGray32fAt(pix []byte) colorExt.Gray32f {
return colorExt.Gray32f{
Y: math.Float32frombits(binary.BigEndian.Uint32(pix[4*0:])),
}
}
func pSetGray32f(pix []byte, c colorExt.Gray32f) {
binary.BigEndian.PutUint32(pix[4*0:], math.Float32bits(c.Y))
}
func pGray64iAt(pix []byte) colorExt.Gray64i {
return colorExt.Gray64i{
Y: int64(binary.BigEndian.Uint64(pix[8*0:])),
}
}
func pSetGray64i(pix []byte, c colorExt.Gray64i) {
binary.BigEndian.PutUint64(pix[8*0:], uint64(c.Y))
}
func pGray64fAt(pix []byte) colorExt.Gray64f {
return colorExt.Gray64f{
Y: math.Float64frombits(binary.BigEndian.Uint64(pix[8*0:])),
}
}
func pSetGray64f(pix []byte, c colorExt.Gray64f) {
binary.BigEndian.PutUint64(pix[8*0:], math.Float64bits(c.Y))
}
func pGrayAAt(pix []byte) colorExt.GrayA {
return colorExt.GrayA{
Y: pix[1*0],
A: pix[1*1],
}
}
func pSetGrayA(pix []byte, c colorExt.GrayA) {
pix[1*0] = c.Y
pix[1*1] = c.A
}
func pGrayA32At(pix []byte) colorExt.GrayA32 {
return colorExt.GrayA32{
Y: binary.BigEndian.Uint16(pix[2*0:]),
A: binary.BigEndian.Uint16(pix[2*1:]),
}
}
func pSetGrayA32(pix []byte, c colorExt.GrayA32) {
binary.BigEndian.PutUint16(pix[2*0:], c.Y)
binary.BigEndian.PutUint16(pix[2*1:], c.A)
}
func pGrayA64iAt(pix []byte) colorExt.GrayA64i {
return colorExt.GrayA64i{
Y: int32(binary.BigEndian.Uint32(pix[4*0:])),
A: int32(binary.BigEndian.Uint32(pix[4*1:])),
}
}
func pSetGrayA64i(pix []byte, c colorExt.GrayA64i) {
binary.BigEndian.PutUint32(pix[4*0:], uint32(c.Y))
binary.BigEndian.PutUint32(pix[4*1:], uint32(c.A))
}
func pGrayA64fAt(pix []byte) colorExt.GrayA64f {
return colorExt.GrayA64f{
Y: math.Float32frombits(binary.BigEndian.Uint32(pix[4*0:])),
A: math.Float32frombits(binary.BigEndian.Uint32(pix[4*1:])),
}
}
func pSetGrayA64f(pix []byte, c colorExt.GrayA64f) {
binary.BigEndian.PutUint32(pix[4*0:], math.Float32bits(c.Y))
binary.BigEndian.PutUint32(pix[4*1:], math.Float32bits(c.A))
}
func pGrayA128iAt(pix []byte) colorExt.GrayA128i {
return colorExt.GrayA128i{
Y: int64(binary.BigEndian.Uint64(pix[8*0:])),
A: int64(binary.BigEndian.Uint64(pix[8*1:])),
}
}
func pSetGrayA128i(pix []byte, c colorExt.GrayA128i) {
binary.BigEndian.PutUint64(pix[8*0:], uint64(c.Y))
binary.BigEndian.PutUint64(pix[8*1:], uint64(c.A))
}
func pGrayA128fAt(pix []byte) colorExt.GrayA128f {
return colorExt.GrayA128f{
Y: math.Float64frombits(binary.BigEndian.Uint64(pix[8*0:])),
A: math.Float64frombits(binary.BigEndian.Uint64(pix[8*1:])),
}
}
func pSetGrayA128f(pix []byte, c colorExt.GrayA128f) {
binary.BigEndian.PutUint64(pix[8*0:], math.Float64bits(c.Y))
binary.BigEndian.PutUint64(pix[8*1:], math.Float64bits(c.A))
}
func pRGBAt(pix []byte) colorExt.RGB {
return colorExt.RGB{
R: pix[1*0],
G: pix[1*1],
B: pix[1*2],
}
}
func pSetRGB(pix []byte, c colorExt.RGB) {
pix[1*0] = c.R
pix[1*1] = c.G
pix[1*2] = c.B
}
func pRGB48At(pix []byte) colorExt.RGB48 {
return colorExt.RGB48{
R: binary.BigEndian.Uint16(pix[2*0:]),
G: binary.BigEndian.Uint16(pix[2*1:]),
B: binary.BigEndian.Uint16(pix[2*2:]),
}
}
func pSetRGB48(pix []byte, c colorExt.RGB48) {
binary.BigEndian.PutUint16(pix[2*0:], c.R)
binary.BigEndian.PutUint16(pix[2*1:], c.G)
binary.BigEndian.PutUint16(pix[2*2:], c.B)
}
func pRGB96iAt(pix []byte) colorExt.RGB96i {
return colorExt.RGB96i{
R: int32(binary.BigEndian.Uint32(pix[4*0:])),
G: int32(binary.BigEndian.Uint32(pix[4*1:])),
B: int32(binary.BigEndian.Uint32(pix[4*2:])),
}
}
func pSetRGB96i(pix []byte, c colorExt.RGB96i) {
binary.BigEndian.PutUint32(pix[4*0:], uint32(c.R))
binary.BigEndian.PutUint32(pix[4*1:], uint32(c.G))
binary.BigEndian.PutUint32(pix[4*2:], uint32(c.B))
}
func pRGB96fAt(pix []byte) colorExt.RGB96f {
return colorExt.RGB96f{
R: math.Float32frombits(binary.BigEndian.Uint32(pix[4*0:])),
G: math.Float32frombits(binary.BigEndian.Uint32(pix[4*1:])),
B: math.Float32frombits(binary.BigEndian.Uint32(pix[4*2:])),
}
}
func pSetRGB96f(pix []byte, c colorExt.RGB96f) {
binary.BigEndian.PutUint32(pix[4*0:], math.Float32bits(c.R))
binary.BigEndian.PutUint32(pix[4*1:], math.Float32bits(c.G))
binary.BigEndian.PutUint32(pix[4*2:], math.Float32bits(c.B))
}
func pRGB192iAt(pix []byte) colorExt.RGB192i {
return colorExt.RGB192i{
R: int64(binary.BigEndian.Uint64(pix[8*0:])),
G: int64(binary.BigEndian.Uint64(pix[8*1:])),
B: int64(binary.BigEndian.Uint64(pix[8*2:])),
}
}
func pSetRGB192i(pix []byte, c colorExt.RGB192i) {
binary.BigEndian.PutUint64(pix[8*0:], uint64(c.R))
binary.BigEndian.PutUint64(pix[8*1:], uint64(c.G))
binary.BigEndian.PutUint64(pix[8*2:], uint64(c.B))
}
func pRGB192fAt(pix []byte) colorExt.RGB192f {
return colorExt.RGB192f{
R: math.Float64frombits(binary.BigEndian.Uint64(pix[8*0:])),
G: math.Float64frombits(binary.BigEndian.Uint64(pix[8*1:])),
B: math.Float64frombits(binary.BigEndian.Uint64(pix[8*2:])),
}
}
func pSetRGB192f(pix []byte, c colorExt.RGB192f) {
binary.BigEndian.PutUint64(pix[8*0:], math.Float64bits(c.R))
binary.BigEndian.PutUint64(pix[8*1:], math.Float64bits(c.G))
binary.BigEndian.PutUint64(pix[8*2:], math.Float64bits(c.B))
}
func pRGBAAt(pix []byte) colorExt.RGBA {
return colorExt.RGBA{
R: pix[1*0],
G: pix[1*1],
B: pix[1*2],
A: pix[1*3],
}
}
func pSetRGBA(pix []byte, c colorExt.RGBA) {
pix[1*0] = c.R
pix[1*1] = c.G
pix[1*2] = c.B
pix[1*3] = c.A
}
func pRGBA64At(pix []byte) colorExt.RGBA64 {
return colorExt.RGBA64{
R: binary.BigEndian.Uint16(pix[2*0:]),
G: binary.BigEndian.Uint16(pix[2*1:]),
B: binary.BigEndian.Uint16(pix[2*2:]),
A: binary.BigEndian.Uint16(pix[2*3:]),
}
}
func pSetRGBA64(pix []byte, c colorExt.RGBA64) {
binary.BigEndian.PutUint16(pix[2*0:], c.R)
binary.BigEndian.PutUint16(pix[2*1:], c.G)
binary.BigEndian.PutUint16(pix[2*2:], c.B)
binary.BigEndian.PutUint16(pix[2*3:], c.A)
}
func pRGBA128iAt(pix []byte) colorExt.RGBA128i {
return colorExt.RGBA128i{
R: int32(binary.BigEndian.Uint32(pix[4*0:])),
G: int32(binary.BigEndian.Uint32(pix[4*1:])),
B: int32(binary.BigEndian.Uint32(pix[4*2:])),
A: int32(binary.BigEndian.Uint32(pix[4*3:])),
}
}
func pSetRGBA128i(pix []byte, c colorExt.RGBA128i) {
binary.BigEndian.PutUint32(pix[4*0:], uint32(c.R))
binary.BigEndian.PutUint32(pix[4*1:], uint32(c.G))
binary.BigEndian.PutUint32(pix[4*2:], uint32(c.B))
binary.BigEndian.PutUint32(pix[4*3:], uint32(c.A))
}
func pRGBA128fAt(pix []byte) colorExt.RGBA128f {
return colorExt.RGBA128f{
R: math.Float32frombits(binary.BigEndian.Uint32(pix[4*0:])),
G: math.Float32frombits(binary.BigEndian.Uint32(pix[4*1:])),
B: math.Float32frombits(binary.BigEndian.Uint32(pix[4*2:])),
A: math.Float32frombits(binary.BigEndian.Uint32(pix[4*3:])),
}
}
func pSetRGBA128f(pix []byte, c colorExt.RGBA128f) {
binary.BigEndian.PutUint32(pix[4*0:], math.Float32bits(c.R))
binary.BigEndian.PutUint32(pix[4*1:], math.Float32bits(c.G))
binary.BigEndian.PutUint32(pix[4*2:], math.Float32bits(c.B))
binary.BigEndian.PutUint32(pix[4*3:], math.Float32bits(c.A))
}
func pRGBA256iAt(pix []byte) colorExt.RGBA256i {
return colorExt.RGBA256i{
R: int64(binary.BigEndian.Uint64(pix[8*0:])),
G: int64(binary.BigEndian.Uint64(pix[8*1:])),
B: int64(binary.BigEndian.Uint64(pix[8*2:])),
A: int64(binary.BigEndian.Uint64(pix[8*3:])),
}
}
func pSetRGBA256i(pix []byte, c colorExt.RGBA256i) {
binary.BigEndian.PutUint64(pix[8*0:], uint64(c.R))
binary.BigEndian.PutUint64(pix[8*1:], uint64(c.G))
binary.BigEndian.PutUint64(pix[8*2:], uint64(c.B))
binary.BigEndian.PutUint64(pix[8*3:], uint64(c.A))
}
func pRGBA256fAt(pix []byte) colorExt.RGBA256f {
return colorExt.RGBA256f{
R: math.Float64frombits(binary.BigEndian.Uint64(pix[8*0:])),
G: math.Float64frombits(binary.BigEndian.Uint64(pix[8*1:])),
B: math.Float64frombits(binary.BigEndian.Uint64(pix[8*2:])),
A: math.Float64frombits(binary.BigEndian.Uint64(pix[8*3:])),
}
}
func pSetRGBA256f(pix []byte, c colorExt.RGBA256f) {
binary.BigEndian.PutUint64(pix[8*0:], math.Float64bits(c.R))
binary.BigEndian.PutUint64(pix[8*1:], math.Float64bits(c.G))
binary.BigEndian.PutUint64(pix[8*2:], math.Float64bits(c.B))
binary.BigEndian.PutUint64(pix[8*3:], math.Float64bits(c.A))
} | utils.go | 0.773815 | 0.543772 | utils.go | starcoder |
package geojson
import (
"encoding/json"
"errors"
"fmt"
)
// A GeometryType serves to enumerate the different GeoJSON geometry types.
type GeometryType string
// The geometry types supported by GeoJSON 1.0
const (
GeometryPoint GeometryType = "Point"
GeometryMultiPoint GeometryType = "MultiPoint"
GeometryLineString GeometryType = "LineString"
GeometryMultiLineString GeometryType = "MultiLineString"
GeometryPolygon GeometryType = "Polygon"
GeometryMultiPolygon GeometryType = "MultiPolygon"
GeometryCollection GeometryType = "GeometryCollection"
)
// A Geometry correlates to a GeoJSON geometry object.
type Geometry struct {
Type GeometryType `json:"type"`
BoundingBox []float64 `json:"bbox,omitempty"`
Point []float64
MultiPoint [][]float64
LineString [][]float64
MultiLineString [][][]float64
Polygon [][][]float64
MultiPolygon [][][][]float64
Geometries []*Geometry
CRS map[string]interface{} `json:"crs,omitempty"` // Coordinate Reference System Objects are not currently supported
}
// NewPointGeometry creates and initializes a point geometry with the give coordinate.
func NewPointGeometry(coordinate []float64) *Geometry {
return &Geometry{
Type: GeometryPoint,
Point: coordinate,
}
}
// NewMultiPointGeometry creates and initializes a multi-point geometry with the given coordinates.
func NewMultiPointGeometry(coordinates ...[]float64) *Geometry {
return &Geometry{
Type: GeometryMultiPoint,
MultiPoint: coordinates,
}
}
// NewLineStringGeometry creates and initializes a line string geometry with the given coordinates.
func NewLineStringGeometry(coordinates [][]float64) *Geometry {
return &Geometry{
Type: GeometryLineString,
LineString: coordinates,
}
}
// NewMultiLineStringGeometry creates and initializes a multi-line string geometry with the given lines.
func NewMultiLineStringGeometry(lines ...[][]float64) *Geometry {
return &Geometry{
Type: GeometryMultiLineString,
MultiLineString: lines,
}
}
// NewPolygonGeometry creates and initializes a polygon geometry with the given polygon.
func NewPolygonGeometry(polygon [][][]float64) *Geometry {
return &Geometry{
Type: GeometryPolygon,
Polygon: polygon,
}
}
// NewMultiPolygonGeometry creates and initializes a multi-polygon geometry with the given polygons.
func NewMultiPolygonGeometry(polygons ...[][][]float64) *Geometry {
return &Geometry{
Type: GeometryMultiPolygon,
MultiPolygon: polygons,
}
}
// NewCollectionGeometry creates and initializes a geometry collection geometry with the given geometries.
func NewCollectionGeometry(geometries ...*Geometry) *Geometry {
return &Geometry{
Type: GeometryCollection,
Geometries: geometries,
}
}
// MarshalJSON converts the geometry object into the correct JSON.
// This fulfills the json.Marshaler interface.
func (g *Geometry) MarshalJSON() ([]byte, error) {
// defining a struct here lets us define the order of the JSON elements.
type geometry struct {
Type GeometryType `json:"type"`
BoundingBox []float64 `json:"bbox,omitempty"`
Coordinates interface{} `json:"coordinates,omitempty"`
Geometries interface{} `json:"geometries,omitempty"`
CRS map[string]interface{} `json:"crs,omitempty"`
}
geo := &geometry{
Type: g.Type,
}
if g.BoundingBox != nil && len(g.BoundingBox) != 0 {
geo.BoundingBox = g.BoundingBox
}
switch g.Type {
case GeometryPoint:
geo.Coordinates = g.Point
case GeometryMultiPoint:
geo.Coordinates = g.MultiPoint
case GeometryLineString:
geo.Coordinates = g.LineString
case GeometryMultiLineString:
geo.Coordinates = g.MultiLineString
case GeometryPolygon:
geo.Coordinates = g.Polygon
case GeometryMultiPolygon:
geo.Coordinates = g.MultiPolygon
case GeometryCollection:
geo.Geometries = g.Geometries
}
return json.Marshal(geo)
}
// UnmarshalGeometry decodes the data into a GeoJSON geometry.
// Alternately one can call json.Unmarshal(g) directly for the same result.
func UnmarshalGeometry(data []byte) (*Geometry, error) {
g := &Geometry{}
err := json.Unmarshal(data, g)
if err != nil {
return nil, err
}
return g, nil
}
// UnmarshalJSON decodes the data into a GeoJSON geometry.
// This fulfills the json.Unmarshaler interface.
func (g *Geometry) UnmarshalJSON(data []byte) error {
var object map[string]interface{}
err := json.Unmarshal(data, &object)
if err != nil {
return err
}
return decodeGeometry(g, object)
}
// Scan implements the sql.Scanner interface allowing
// geometry structs to be passed into rows.Scan(...interface{})
// The columns must be received as GeoJSON Geometry.
// When using PostGIS a spatial column would need to be wrapped in ST_AsGeoJSON.
func (g *Geometry) Scan(value interface{}) error {
var data []byte
switch value.(type) {
case string:
data = []byte(value.(string))
case []byte:
data = value.([]byte)
default:
return errors.New("unable to parse this type into geojson")
}
return g.UnmarshalJSON(data)
}
func decodeGeometry(g *Geometry, object map[string]interface{}) error {
t, ok := object["type"]
if !ok {
return errors.New("type property not defined")
}
if s, ok := t.(string); ok {
g.Type = GeometryType(s)
} else {
return errors.New("type property not string")
}
var err error
switch g.Type {
case GeometryPoint:
g.Point, err = decodePosition(object["coordinates"])
case GeometryMultiPoint:
g.MultiPoint, err = decodePositionSet(object["coordinates"])
case GeometryLineString:
g.LineString, err = decodePositionSet(object["coordinates"])
case GeometryMultiLineString:
g.MultiLineString, err = decodePathSet(object["coordinates"])
case GeometryPolygon:
g.Polygon, err = decodePathSet(object["coordinates"])
case GeometryMultiPolygon:
g.MultiPolygon, err = decodePolygonSet(object["coordinates"])
case GeometryCollection:
g.Geometries, err = decodeGeometries(object["geometries"])
}
return err
}
func decodePosition(data interface{}) ([]float64, error) {
coords, ok := data.([]interface{})
if !ok {
return nil, fmt.Errorf("not a valid position, got %v", data)
}
result := make([]float64, 0, len(coords))
for _, coord := range coords {
if f, ok := coord.(float64); ok {
result = append(result, f)
} else {
return nil, fmt.Errorf("not a valid coordinate, got %v", coord)
}
}
return result, nil
}
func decodePositionSet(data interface{}) ([][]float64, error) {
points, ok := data.([]interface{})
if !ok {
return nil, fmt.Errorf("not a valid set of positions, got %v", data)
}
result := make([][]float64, 0, len(points))
for _, point := range points {
if p, err := decodePosition(point); err == nil {
result = append(result, p)
} else {
return nil, err
}
}
return result, nil
}
func decodePathSet(data interface{}) ([][][]float64, error) {
sets, ok := data.([]interface{})
if !ok {
return nil, fmt.Errorf("not a valid path, got %v", data)
}
result := make([][][]float64, 0, len(sets))
for _, set := range sets {
if s, err := decodePositionSet(set); err == nil {
result = append(result, s)
} else {
return nil, err
}
}
return result, nil
}
func decodePolygonSet(data interface{}) ([][][][]float64, error) {
polygons, ok := data.([]interface{})
if !ok {
return nil, fmt.Errorf("not a valid polygon, got %v", data)
}
result := make([][][][]float64, 0, len(polygons))
for _, polygon := range polygons {
if p, err := decodePathSet(polygon); err == nil {
result = append(result, p)
} else {
return nil, err
}
}
return result, nil
}
func decodeGeometries(data interface{}) ([]*Geometry, error) {
if vs, ok := data.([]interface{}); ok {
geometries := make([]*Geometry, 0, len(vs))
for _, v := range vs {
g := &Geometry{}
vmap, ok := v.(map[string]interface{})
if !ok {
break
}
err := decodeGeometry(g, vmap)
if err != nil {
return nil, err
}
geometries = append(geometries, g)
}
if len(geometries) == len(vs) {
return geometries, nil
}
}
return nil, fmt.Errorf("not a valid set of geometries, got %v", data)
}
// IsPoint returns true with the geometry object is a Point type.
func (g *Geometry) IsPoint() bool {
return g.Type == GeometryPoint
}
// IsMultiPoint returns true with the geometry object is a MultiPoint type.
func (g *Geometry) IsMultiPoint() bool {
return g.Type == GeometryMultiPoint
}
// IsLineString returns true with the geometry object is a LineString type.
func (g *Geometry) IsLineString() bool {
return g.Type == GeometryLineString
}
// IsMultiLineString returns true with the geometry object is a LineString type.
func (g *Geometry) IsMultiLineString() bool {
return g.Type == GeometryMultiLineString
}
// IsPolygon returns true with the geometry object is a Polygon type.
func (g *Geometry) IsPolygon() bool {
return g.Type == GeometryPolygon
}
// IsMultiPolygon returns true with the geometry object is a MultiPolygon type.
func (g *Geometry) IsMultiPolygon() bool {
return g.Type == GeometryMultiPolygon
}
// IsCollection returns true with the geometry object is a GeometryCollection type.
func (g *Geometry) IsCollection() bool {
return g.Type == GeometryCollection
} | vendor/github.com/paulmach/go.geojson/geometry.go | 0.852966 | 0.620737 | geometry.go | starcoder |
// Package leb128 provides functions for reading integer values encoded in the
// Little Endian Base 128 (LEB128) format: https://en.wikipedia.org/wiki/LEB128
package leb128
import (
"io"
)
// ReadVarUint32Size reads a LEB128 encoded unsigned 32-bit integer from r.
// It returns the integer value, the size of the encoded value (in bytes), and
// the error (if any).
func ReadVarUint32Size(r io.Reader) (res uint32, size uint, err error) {
b := make([]byte, 1)
var shift uint
for {
if _, err = io.ReadFull(r, b); err != nil {
return
}
size++
cur := uint32(b[0])
res |= (cur & 0x7f) << (shift)
if cur&0x80 == 0 {
return res, size, nil
}
shift += 7
}
}
// ReadVarUint32 reads a LEB128 encoded unsigned 32-bit integer from r, and
// returns the integer value, and the error (if any).
func ReadVarUint32(r io.Reader) (uint32, error) {
n, _, err := ReadVarUint32Size(r)
return n, err
}
// ReadVarint32Size reads a LEB128 encoded signed 32-bit integer from r, and
// returns the integer value, the size of the encoded value, and the error
// (if any)
func ReadVarint32Size(r io.Reader) (res int32, size uint, err error) {
res64, size, err := ReadVarint64Size(r)
res = int32(res64)
return
}
// ReadVarint32 reads a LEB128 encoded signed 32-bit integer from r, and
// returns the integer value, and the error (if any).
func ReadVarint32(r io.Reader) (int32, error) {
n, _, err := ReadVarint32Size(r)
return n, err
}
// ReadVarint64Size reads a LEB128 encoded signed 64-bit integer from r, and
// returns the integer value, the size of the encoded value, and the error
// (if any)
func ReadVarint64Size(r io.Reader) (res int64, size uint, err error) {
var shift uint
var sign int64 = -1
b := make([]byte, 1)
for {
if _, err = io.ReadFull(r, b); err != nil {
return
}
size++
cur := int64(b[0])
res |= (cur & 0x7f) << shift
shift += 7
sign <<= 7
if cur&0x80 == 0 {
break
}
}
if ((sign >> 1) & res) != 0 {
res |= sign
}
return res, size, nil
}
// ReadVarint64 reads a LEB128 encoded signed 64-bit integer from r, and
// returns the integer value, and the error (if any).
func ReadVarint64(r io.Reader) (int64, error) {
n, _, err := ReadVarint64Size(r)
return n, err
} | vm/wasmvm/wasm/leb128/read.go | 0.815012 | 0.536374 | read.go | starcoder |
package symexpr
import (
"sort"
// "fmt"
)
func (n *Time) AmILess(r Expr) bool { return TIME < r.ExprType() }
func (n *Time) AmIEqual(r Expr) bool { return r.ExprType() == TIME }
func (n *Time) AmISame(r Expr) bool { return r.ExprType() == TIME }
func (n *Time) AmIAlmostSame(r Expr) bool { return r.ExprType() == TIME }
func (n *Time) Sort() { return }
func (v *Var) AmILess(r Expr) bool {
if VAR < r.ExprType() {
return true
}
if VAR > r.ExprType() {
return false
}
return v.P < r.(*Var).P
}
func (v *Var) AmIEqual(r Expr) bool { return r.ExprType() == VAR && r.(*Var).P == v.P }
func (v *Var) AmISame(r Expr) bool { return r.ExprType() == VAR && r.(*Var).P == v.P }
func (v *Var) AmIAlmostSame(r Expr) bool { return r.ExprType() == VAR && r.(*Var).P == v.P }
func (v *Var) Sort() { return }
func (c *Constant) AmILess(r Expr) bool {
if CONSTANT < r.ExprType() {
return true
}
if CONSTANT > r.ExprType() {
return false
}
return c.P < r.(*Constant).P
}
func (c *Constant) AmIEqual(r Expr) bool { return r.ExprType() == CONSTANT && r.(*Constant).P == c.P }
func (c *Constant) AmISame(r Expr) bool { return r.ExprType() == CONSTANT }
func (c *Constant) AmIAlmostSame(r Expr) bool { return r.ExprType() == CONSTANT }
func (c *Constant) Sort() { return }
func (c *ConstantF) AmILess(r Expr) bool {
if CONSTANTF < r.ExprType() {
return true
}
if CONSTANTF > r.ExprType() {
return false
}
return c.F < r.(*ConstantF).F
}
func (c *ConstantF) AmIEqual(r Expr) bool {
return r.ExprType() == CONSTANTF && r.(*ConstantF).F == c.F
}
func (c *ConstantF) AmISame(r Expr) bool { return r.ExprType() == CONSTANTF }
func (c *ConstantF) AmIAlmostSame(r Expr) bool { return r.ExprType() == CONSTANTF }
func (c *ConstantF) Sort() { return }
func (s *System) AmILess(r Expr) bool {
if SYSTEM < r.ExprType() {
return true
}
if SYSTEM > r.ExprType() {
return false
}
return s.P < r.(*System).P
}
func (s *System) AmIEqual(r Expr) bool { return r.ExprType() == SYSTEM && r.(*System).P == s.P }
func (s *System) AmISame(r Expr) bool { return r.ExprType() == SYSTEM && r.(*System).P == s.P }
func (s *System) AmIAlmostSame(r Expr) bool { return r.ExprType() == SYSTEM && r.(*System).P == s.P }
func (s *System) Sort() { return }
func (u *Neg) AmILess(r Expr) bool {
if NEG < r.ExprType() {
return true
}
if NEG > r.ExprType() {
return false
}
return u.C.AmILess(r.(*Neg).C)
}
func (u *Neg) AmIEqual(r Expr) bool { return r.ExprType() == NEG && u.C.AmIEqual(r.(*Neg).C) }
func (u *Neg) AmISame(r Expr) bool { return r.ExprType() == NEG && u.C.AmISame(r.(*Neg).C) }
func (u *Neg) AmIAlmostSame(r Expr) bool {
return r.ExprType() == NEG && u.C.AmIAlmostSame(r.(*Neg).C)
}
func (u *Neg) Sort() { u.C.Sort() }
func (u *Abs) AmILess(r Expr) bool {
if ABS < r.ExprType() {
return true
}
if ABS > r.ExprType() {
return false
}
return u.C.AmILess(r.(*Abs).C)
}
func (u *Abs) AmIEqual(r Expr) bool { return r.ExprType() == ABS && u.C.AmIEqual(r.(*Abs).C) }
func (u *Abs) AmISame(r Expr) bool { return r.ExprType() == ABS && u.C.AmISame(r.(*Abs).C) }
func (u *Abs) AmIAlmostSame(r Expr) bool {
return r.ExprType() == ABS && u.C.AmIAlmostSame(r.(*Abs).C)
}
func (u *Abs) Sort() { u.C.Sort() }
func (u *Sqrt) AmILess(r Expr) bool {
if SQRT < r.ExprType() {
return true
}
if SQRT > r.ExprType() {
return false
}
return u.C.AmILess(r.(*Sqrt).C)
}
func (u *Sqrt) AmIEqual(r Expr) bool { return r.ExprType() == SQRT && u.C.AmIEqual(r.(*Sqrt).C) }
func (u *Sqrt) AmISame(r Expr) bool { return r.ExprType() == SQRT && u.C.AmISame(r.(*Sqrt).C) }
func (u *Sqrt) AmIAlmostSame(r Expr) bool {
return r.ExprType() == SQRT && u.C.AmIAlmostSame(r.(*Sqrt).C)
}
func (u *Sqrt) Sort() { u.C.Sort() }
func (u *Sin) AmILess(r Expr) bool {
if SIN < r.ExprType() {
return true
}
if SIN > r.ExprType() {
return false
}
return u.C.AmILess(r.(*Sin).C)
}
func (u *Sin) AmIEqual(r Expr) bool { return r.ExprType() == SIN && u.C.AmIEqual(r.(*Sin).C) }
func (u *Sin) AmISame(r Expr) bool { return r.ExprType() == SIN && u.C.AmISame(r.(*Sin).C) }
func (u *Sin) AmIAlmostSame(r Expr) bool {
return r.ExprType() == SIN && u.C.AmIAlmostSame(r.(*Sin).C)
}
func (u *Sin) Sort() { u.C.Sort() }
func (u *Cos) AmILess(r Expr) bool {
if COS < r.ExprType() {
return true
}
if COS > r.ExprType() {
return false
}
return u.C.AmILess(r.(*Cos).C)
}
func (u *Cos) AmIEqual(r Expr) bool { return r.ExprType() == COS && u.C.AmIEqual(r.(*Cos).C) }
func (u *Cos) AmISame(r Expr) bool { return r.ExprType() == COS && u.C.AmISame(r.(*Cos).C) }
func (u *Cos) AmIAlmostSame(r Expr) bool {
return r.ExprType() == COS && u.C.AmIAlmostSame(r.(*Cos).C)
}
func (u *Cos) Sort() { u.C.Sort() }
func (u *Tan) AmILess(r Expr) bool {
if TAN < r.ExprType() {
return true
}
if TAN > r.ExprType() {
return false
}
return u.C.AmILess(r.(*Tan).C)
}
func (u *Tan) AmIEqual(r Expr) bool { return r.ExprType() == TAN && u.C.AmIEqual(r.(*Tan).C) }
func (u *Tan) AmISame(r Expr) bool { return r.ExprType() == TAN && u.C.AmISame(r.(*Tan).C) }
func (u *Tan) AmIAlmostSame(r Expr) bool {
return r.ExprType() == TAN && u.C.AmIAlmostSame(r.(*Tan).C)
}
func (u *Tan) Sort() { u.C.Sort() }
func (u *Exp) AmILess(r Expr) bool {
if EXP < r.ExprType() {
return true
}
if EXP > r.ExprType() {
return false
}
return u.C.AmILess(r.(*Exp).C)
}
func (u *Exp) AmIEqual(r Expr) bool { return r.ExprType() == EXP && u.C.AmIEqual(r.(*Exp).C) }
func (u *Exp) AmISame(r Expr) bool { return r.ExprType() == EXP && u.C.AmISame(r.(*Exp).C) }
func (u *Exp) AmIAlmostSame(r Expr) bool {
return r.ExprType() == EXP && u.C.AmIAlmostSame(r.(*Exp).C)
}
func (u *Exp) Sort() { u.C.Sort() }
func (u *Log) AmILess(r Expr) bool {
if LOG < r.ExprType() {
return true
}
if LOG > r.ExprType() {
return false
}
return u.C.AmILess(r.(*Log).C)
}
func (u *Log) AmIEqual(r Expr) bool { return r.ExprType() == LOG && u.C.AmIEqual(r.(*Log).C) }
func (u *Log) AmISame(r Expr) bool { return r.ExprType() == LOG && u.C.AmISame(r.(*Log).C) }
func (u *Log) AmIAlmostSame(r Expr) bool {
return r.ExprType() == LOG && u.C.AmIAlmostSame(r.(*Log).C)
}
func (u *Log) Sort() { u.C.Sort() }
func (u *PowI) AmILess(r Expr) bool {
if POWI < r.ExprType() {
return true
}
if POWI > r.ExprType() {
return false
}
if u.Base.AmILess(r.(*PowI).Base) {
return true
}
if r.(*PowI).Base.AmILess(u.Base) {
return false
}
return u.Power < r.(*PowI).Power
}
func (u *PowI) AmIEqual(r Expr) bool {
return r.ExprType() == POWI && r.(*PowI).Power == u.Power && u.Base.AmIEqual(r.(*PowI).Base)
}
func (u *PowI) AmISame(r Expr) bool {
return r.ExprType() == POWI && r.(*PowI).Power == u.Power && u.Base.AmISame(r.(*PowI).Base)
}
func (u *PowI) AmIAlmostSame(r Expr) bool {
return r.ExprType() == POWI && r.(*PowI).Power == u.Power && u.Base.AmIAlmostSame(r.(*PowI).Base)
}
func (u *PowI) Sort() { u.Base.Sort() }
func (u *PowF) AmILess(r Expr) bool {
if POWF < r.ExprType() {
return true
}
if POWF > r.ExprType() {
return false
}
if u.Base.AmILess(r.(*PowF).Base) {
return true
}
if r.(*PowF).Base.AmILess(u.Base) {
return false
}
return u.Power < r.(*PowF).Power
}
func (u *PowF) AmIEqual(r Expr) bool {
return r.ExprType() == POWF && r.(*PowF).Power == u.Power && u.Base.AmIEqual(r.(*PowF).Base)
}
func (u *PowF) AmISame(r Expr) bool {
return r.ExprType() == POWF && r.(*PowF).Power == u.Power && u.Base.AmISame(r.(*PowF).Base)
}
func (u *PowF) AmIAlmostSame(r Expr) bool {
return r.ExprType() == POWF && r.(*PowF).Power == u.Power && u.Base.AmIAlmostSame(r.(*PowF).Base)
}
func (u *PowF) Sort() { u.Base.Sort() }
func (n *PowE) AmILess(r Expr) bool {
if POWE < r.ExprType() {
return true
}
if POWE > r.ExprType() {
return false
}
if n.Base.AmILess(r.(*PowE).Base) {
return true
}
if r.(*PowE).Base.AmILess(n.Base) {
return false
}
return n.Power.AmILess(r.(*PowE).Power)
}
func (n *PowE) AmIEqual(r Expr) bool {
return r.ExprType() == POWE && n.Base.AmIEqual(r.(*PowE).Base) && n.Power.AmIEqual(r.(*PowE).Power)
}
func (n *PowE) AmISame(r Expr) bool {
return r.ExprType() == POWE && n.Base.AmISame(r.(*PowE).Base) && n.Power.AmISame(r.(*PowE).Power)
}
func (n *PowE) AmIAlmostSame(r Expr) bool {
return r.ExprType() == POWE && n.Base.AmIAlmostSame(r.(*PowE).Base) && n.Power.AmIAlmostSame(r.(*PowE).Power)
}
func (n *PowE) Sort() { n.Base.Sort(); n.Power.Sort() }
func (n *Div) AmILess(r Expr) bool {
if r == nil {
return false
}
if DIV < r.ExprType() {
return true
}
if DIV > r.ExprType() {
return false
}
rp := r.(*Div)
if n.Numer.AmILess(rp.Numer) {
return true
}
if rp.Numer.AmILess(n.Numer) {
return false
}
return n.Denom.AmILess(r.(*Div).Denom)
}
func (n *Div) AmIEqual(r Expr) bool {
if r == nil || r.ExprType() != DIV {
return false
}
rp := r.(*Div)
if (n.Numer != nil && rp.Numer == nil) || (n.Numer == nil && rp.Numer != nil) {
return false
}
if (n.Denom != nil && rp.Denom == nil) || (n.Denom == nil && rp.Denom != nil) {
return false
}
return r.ExprType() == DIV && n.Numer.AmIEqual(rp.Numer) && rp.Denom.AmIEqual(rp.Denom)
}
func (n *Div) AmISame(r Expr) bool {
if r == nil || r.ExprType() != DIV {
return false
}
rp := r.(*Div)
if (n.Numer != nil && rp.Numer == nil) || (n.Numer == nil && rp.Numer != nil) {
return false
}
if (n.Denom != nil && rp.Denom == nil) || (n.Denom == nil && rp.Denom != nil) {
return false
}
return r.ExprType() == DIV && n.Numer.AmISame(rp.Numer) && n.Denom.AmISame(rp.Denom)
}
func (n *Div) AmIAlmostSame(r Expr) bool {
if r == nil || r.ExprType() != DIV {
return false
}
rp := r.(*Div)
if (n.Numer != nil && rp.Numer == nil) || (n.Numer == nil && rp.Numer != nil) {
return false
}
if (n.Denom != nil && rp.Denom == nil) || (n.Denom == nil && rp.Denom != nil) {
return false
}
return r.ExprType() == DIV && n.Numer.AmIAlmostSame(rp.Numer) && n.Denom.AmIAlmostSame(rp.Denom)
}
func (n *Div) Sort() {
if n.Numer != nil {
n.Numer.Sort()
}
if n.Denom != nil {
n.Denom.Sort()
}
}
func (n *Add) AmILess(r Expr) bool {
if ADD < r.ExprType() {
return true
}
if ADD > r.ExprType() {
return false
}
m := r.(*Add)
ln := len(n.CS)
lm := len(m.CS)
if ln < lm {
return true
}
if lm < ln {
return false
}
for i, C := range n.CS {
if C.AmILess(m.CS[i]) {
return true
}
if m.CS[i].AmILess(C) {
return false
}
}
return false
}
func (n *Add) AmIEqual(r Expr) bool {
if r.ExprType() != ADD {
return false
}
m := r.(*Add)
if len(n.CS) != len(m.CS) {
return false
}
for i, C := range n.CS {
if !C.AmIEqual(m.CS[i]) {
return false
}
// if m.CS[i].AmILess( C ) { return false }
}
return true
}
func (n *Add) AmISame(r Expr) bool {
if r.ExprType() != ADD {
return false
}
m := r.(*Add)
if len(n.CS) != len(m.CS) {
return false
}
for i, C := range n.CS {
if !C.AmISame(m.CS[i]) {
return false
}
// if m.CS[i].AmILess( C ) { return false }
}
return true
}
func (n *Add) AmIAlmostSame(r Expr) bool {
if r.ExprType() != ADD {
return false
}
m := r.(*Add)
if len(n.CS) != len(m.CS) {
return false
}
same := true
for i, C := range n.CS {
if !C.AmIAlmostSame(m.CS[i]) {
return false
}
}
return same
}
func (n *Add) Sort() {
for _, C := range n.CS {
if C != nil {
C.Sort()
}
}
sort.Sort(ExprArray(n.CS))
i := len(n.CS) - 1
for i >= 0 && n.CS[i] == nil {
n.CS = n.CS[:i]
i = len(n.CS) - 1
}
return
}
func (n *Mul) AmILess(r Expr) bool {
if r.ExprType() != MUL {
if len(n.CS) == 2 {
if n.CS[0].ExprType() == CONSTANT && n.CS[1].AmISame(r) {
return true
}
if n.CS[0].ExprType() == CONSTANTF && n.CS[1].AmISame(r) {
return true
}
}
return false
}
m := r.(*Mul)
ln, lm := len(n.CS), len(m.CS)
sn, sm := 0, 0
if n.CS[0].ExprType() == CONSTANT || n.CS[0].ExprType() == CONSTANTF {
sn++
}
if m.CS[0].ExprType() == CONSTANT || m.CS[0].ExprType() == CONSTANTF {
sm++
}
if ln-sn != lm-sm {
return ln-sn < lm-sm
}
for i, j := sn, sm; i < ln && j < lm; {
if n.CS[i].AmILess(m.CS[j]) {
return true
}
if m.CS[j].AmILess(n.CS[i]) {
return false
}
i++
j++
}
return false
}
func (n *Mul) AmIEqual(r Expr) bool {
if r.ExprType() != MUL {
return false
}
m := r.(*Mul)
if len(n.CS) != len(m.CS) {
return false
}
for i, C := range n.CS {
if !C.AmIEqual(m.CS[i]) {
return false
}
// if m.CS[i].AmILess( C ) { return false }
}
return true
}
func (n *Mul) AmISame(r Expr) bool {
if r.ExprType() != MUL {
return false
}
m := r.(*Mul)
if len(n.CS) != len(m.CS) {
return false
}
for i, C := range n.CS {
if !C.AmISame(m.CS[i]) {
return false
}
// if m.CS[i].AmILess( C ) { return false }
}
return true
}
func (n *Mul) AmIAlmostSame(r Expr) bool {
if r.ExprType() != MUL {
// fmt.Printf("~MUL: %v %v\n", n, r)
if len(n.CS) == 2 {
if n.CS[0].ExprType() == CONSTANT && n.CS[1].AmIAlmostSame(r) {
return true
}
if n.CS[0].ExprType() == CONSTANTF && n.CS[1].AmIAlmostSame(r) {
return true
}
}
return false
}
// fmt.Printf("MUL: %v %v\n", n, r)
m := r.(*Mul)
ln, lm := len(n.CS), len(m.CS)
sn, sm := 0, 0
if n.CS[0].ExprType() == CONSTANT || n.CS[0].ExprType() == CONSTANTF {
sn++
}
if m.CS[0].ExprType() == CONSTANT || m.CS[0].ExprType() == CONSTANTF {
sm++
}
// fmt.Printf("lens: %d %d %d %d\n", sn, ln, sm, lm)
if ln-sn != lm-sm {
return false
}
for i, j := sn, sm; i < ln && j < lm; {
// fmt.Printf("COMPARE: %v %v", n.CS[i], m.CS[j])
if !n.CS[i].AmIAlmostSame(m.CS[j]) {
// fmt.Println("SAME")
return false
}
i++
j++
}
// fmt.Printf("\n")
return true
}
func (n *Mul) Sort() {
for _, C := range n.CS {
if C != nil {
C.Sort()
}
}
sort.Sort(ExprArray(n.CS))
i := len(n.CS) - 1
for i >= 0 && n.CS[i] == nil {
n.CS = n.CS[:i]
i = len(n.CS) - 1
}
return
} | compare.go | 0.571647 | 0.477432 | compare.go | starcoder |
package tak
import "fmt"
type position3 struct {
Position
alloc struct {
Height [3 * 3]uint8
Stacks [3 * 3]uint64
Groups [6]uint64
}
}
type position4 struct {
Position
alloc struct {
Height [4 * 4]uint8
Stacks [4 * 4]uint64
Groups [8]uint64
}
}
type position5 struct {
Position
alloc struct {
Height [5 * 5]uint8
Stacks [5 * 5]uint64
Groups [10]uint64
}
}
type position6 struct {
Position
alloc struct {
Height [6 * 6]uint8
Stacks [6 * 6]uint64
Groups [12]uint64
}
}
type position7 struct {
Position
alloc struct {
Height [7 * 7]uint8
Stacks [7 * 7]uint64
Groups [14]uint64
}
}
type position8 struct {
Position
alloc struct {
Height [8 * 8]uint8
Stacks [8 * 8]uint64
Groups [16]uint64
}
}
func alloc(tpl *Position) *Position {
switch tpl.Size() {
case 3:
a := &position3{Position: *tpl}
a.Height = a.alloc.Height[:]
a.Stacks = a.alloc.Stacks[:]
a.analysis.WhiteGroups = a.alloc.Groups[:0]
copy(a.Height, tpl.Height)
copy(a.Stacks, tpl.Stacks)
return &a.Position
case 4:
a := &position4{Position: *tpl}
a.Height = a.alloc.Height[:]
a.Stacks = a.alloc.Stacks[:]
a.analysis.WhiteGroups = a.alloc.Groups[:0]
copy(a.Height, tpl.Height)
copy(a.Stacks, tpl.Stacks)
return &a.Position
case 5:
a := &position5{Position: *tpl}
a.Height = a.alloc.Height[:]
a.Stacks = a.alloc.Stacks[:]
a.analysis.WhiteGroups = a.alloc.Groups[:0]
copy(a.Height, tpl.Height)
copy(a.Stacks, tpl.Stacks)
return &a.Position
case 6:
a := &position6{Position: *tpl}
a.Height = a.alloc.Height[:]
a.Stacks = a.alloc.Stacks[:]
a.analysis.WhiteGroups = a.alloc.Groups[:0]
copy(a.Height, tpl.Height)
copy(a.Stacks, tpl.Stacks)
return &a.Position
case 7:
a := &position7{Position: *tpl}
a.Height = a.alloc.Height[:]
a.Stacks = a.alloc.Stacks[:]
a.analysis.WhiteGroups = a.alloc.Groups[:0]
copy(a.Height, tpl.Height)
copy(a.Stacks, tpl.Stacks)
return &a.Position
case 8:
a := &position8{Position: *tpl}
a.Height = a.alloc.Height[:]
a.Stacks = a.alloc.Stacks[:]
a.analysis.WhiteGroups = a.alloc.Groups[:0]
copy(a.Height, tpl.Height)
copy(a.Stacks, tpl.Stacks)
return &a.Position
default:
panic(fmt.Sprintf("illegal size: %d", tpl.Size()))
}
}
func copyPosition(p *Position, out *Position) {
h := out.Height
s := out.Stacks
g := out.analysis.WhiteGroups
*out = *p
out.Height = h
out.Stacks = s
out.analysis.WhiteGroups = g[:0]
copy(out.Height, p.Height)
copy(out.Stacks, p.Stacks)
}
func Alloc(size int) *Position {
p := Position{cfg: &Config{Size: size}}
return alloc(&p)
} | tak/alloc.go | 0.577257 | 0.472257 | alloc.go | starcoder |
package http
import (
"fmt"
"net/http"
"strconv"
"strings"
)
// Range represents an RFC7233 (suffix) byte range spec. See https://tools.ietf.org/html/rfc7233#page-7
type Range struct {
// FirstBytePos is -1 if a suffix-byte-range-spec is represented.
// Otherwise, FirstBytePos is the offset of the first byte in the range.
FirstBytePos int64
// LastBytePos has two interpretations, depending on the value of FirstBytePos.
// If FirstBytePos >= 0 then
// If LastBytePos is -1 then the range includes all bytes with offset >= FirstBytePos.
// Otherwise, if LastBytePos >= 0, LastBytePos is the offset of the last byte in the range (inclusive).
// Otherwise, if FirstBytePos < 0 then
// The range includes only the last -LastBytePos (note that LastBytePos is negative) bytes of the
// requested resource.
LastBytePos int64
}
func ParseRange(req *http.Request) ([]Range, error) {
// See https://tools.ietf.org/html/rfc7233#section-2 and https://tools.ietf.org/html/rfc7230#section-7
headerValues := req.Header.Values("Range")
if len(headerValues) > 1 {
return nil, fmt.Errorf("multiple headers named Range are not supported")
}
if len(headerValues) == 0 {
return nil, nil
}
ranges, err := parseRangeHeaderValue(headerValues[0])
if err != nil {
return nil, fmt.Errorf("the header named Range has an invalid value: %w", err)
}
return ranges, nil
}
func parseRangeHeaderValue(headerValue string) (ranges []Range, err error) {
remainder := headerValue
const b = "bytes="
if !strings.HasPrefix(remainder, b) {
return nil, fmt.Errorf("value does not start with %#v", b)
}
remainder = remainder[len(b):]
for {
commaPos := strings.IndexByte(remainder, ',')
byteRangeSpec := remainder
if commaPos >= 0 {
byteRangeSpec = byteRangeSpec[:commaPos]
}
hyphenPos := strings.IndexByte(byteRangeSpec, '-')
if hyphenPos < 0 {
return nil, fmt.Errorf("value contains a byte-range-spec that contains no hyphen")
}
if hyphenPos == 0 {
// suffix range spec
suffixLength, err := strconv.ParseInt(byteRangeSpec, 10, 64)
if err != nil {
return nil, fmt.Errorf("value contains a suffix-byte-range-spec with an invalid or too large suffix-length: %w", err)
}
ranges = append(ranges, Range{
FirstBytePos: -1,
LastBytePos: suffixLength,
})
} else {
firstBytePos, err := strconv.ParseInt(byteRangeSpec[:hyphenPos], 10, 64)
if err != nil {
return nil, fmt.Errorf("value contains a byte-range-spec with an invalid or too large first-byte-pos: %w", err)
}
if hyphenPos+1 < len(byteRangeSpec) {
lastBytePos, err := strconv.ParseInt(byteRangeSpec[hyphenPos+1:], 10, 64)
if err != nil {
return nil, fmt.Errorf("value contains a byte-range-spec with an invalid or too large last-byte-pos: %w", err)
}
ranges = append(ranges, Range{
FirstBytePos: firstBytePos,
LastBytePos: lastBytePos,
})
} else {
ranges = append(ranges, Range{
FirstBytePos: firstBytePos,
LastBytePos: -1,
})
}
}
if commaPos < 0 {
break
}
owsEnd := commaPos + 1
for {
if owsEnd == len(remainder) || (remainder[owsEnd] != ' ' && remainder[owsEnd] != '\t') {
break
}
owsEnd++
}
remainder = remainder[owsEnd:]
}
return
} | http/range.go | 0.582729 | 0.429609 | range.go | starcoder |
package tdigest
import (
"fmt"
"math"
"sort"
)
type centroid struct {
mean float64
count uint32
index int
}
func (c centroid) isValid() bool {
return !math.IsNaN(c.mean) && c.count > 0
}
func (c *centroid) Update(x float64, weight uint32) {
c.count += weight
c.mean += float64(weight) * (x - c.mean) / float64(c.count)
}
var invalidCentroid = centroid{mean: math.NaN(), count: 0}
type summary struct {
keys []float64
counts []uint32
}
func newSummary(initialCapacity uint) *summary {
return &summary{
keys: make([]float64, 0, initialCapacity),
counts: make([]uint32, 0, initialCapacity),
}
}
func (s summary) Len() int {
return len(s.keys)
}
func (s *summary) Add(key float64, value uint32) error {
if math.IsNaN(key) {
return fmt.Errorf("Key must not be NaN")
}
if value == 0 {
return fmt.Errorf("Count must be >0")
}
idx := s.FindIndex(key)
if s.meanAtIndexIs(idx, key) {
s.updateAt(idx, key, value)
return nil
}
s.keys = append(s.keys, math.NaN())
s.counts = append(s.counts, 0)
copy(s.keys[idx+1:], s.keys[idx:])
copy(s.counts[idx+1:], s.counts[idx:])
s.keys[idx] = key
s.counts[idx] = value
return nil
}
func (s summary) Find(x float64) centroid {
idx := s.FindIndex(x)
if idx < s.Len() && s.keys[idx] == x {
return centroid{x, s.counts[idx], idx}
}
return invalidCentroid
}
func (s summary) FindIndex(x float64) int {
// FIXME When is linear scan better than binsearch()?
// should I even bother?
if len(s.keys) < 30 {
for i, item := range s.keys {
if item >= x {
return i
}
}
return len(s.keys)
}
return sort.Search(len(s.keys), func(i int) bool {
return s.keys[i] >= x
})
}
func (s summary) At(index int) centroid {
if s.Len()-1 < index || index < 0 {
return invalidCentroid
}
return centroid{s.keys[index], s.counts[index], index}
}
func (s summary) Iterate(f func(c centroid) bool) {
for i := 0; i < s.Len(); i++ {
if !f(centroid{s.keys[i], s.counts[i], i}) {
break
}
}
}
func (s summary) Min() centroid {
return s.At(0)
}
func (s summary) Max() centroid {
return s.At(s.Len() - 1)
}
func (s summary) Data() []centroid {
data := make([]centroid, 0, s.Len())
s.Iterate(func(c centroid) bool {
data = append(data, c)
return true
})
return data
}
func (s summary) successorAndPredecessorItems(mean float64) (centroid, centroid) {
idx := s.FindIndex(mean)
return s.At(idx + 1), s.At(idx - 1)
}
func (s summary) ceilingAndFloorItems(mean float64) (centroid, centroid) {
idx := s.FindIndex(mean)
// Case 1: item is greater than all items in the summary
if idx == s.Len() {
return invalidCentroid, s.Max()
}
item := s.At(idx)
// Case 2: item exists in the summary
if item.isValid() && mean == item.mean {
return item, item
}
// Case 3: item is smaller than all items in the summary
if idx == 0 {
return s.Min(), invalidCentroid
}
return item, s.At(idx - 1)
}
func (s summary) sumUntilMean(mean float64) uint32 {
var cumSum uint32
for i := range s.keys {
if s.keys[i] < mean {
cumSum += s.counts[i]
} else {
break
}
}
return cumSum
}
func (s *summary) updateAt(index int, mean float64, count uint32) {
c := centroid{s.keys[index], s.counts[index], index}
c.Update(mean, count)
oldMean := s.keys[index]
s.keys[index] = c.mean
s.counts[index] = c.count
if c.mean > oldMean {
s.adjustRight(index)
} else if c.mean < oldMean {
s.adjustLeft(index)
}
}
func (s *summary) adjustRight(index int) {
for i := index + 1; i < len(s.keys) && s.keys[i-1] > s.keys[i]; i++ {
s.keys[i-1], s.keys[i] = s.keys[i], s.keys[i-1]
s.counts[i-1], s.counts[i] = s.counts[i], s.counts[i-1]
}
}
func (s *summary) adjustLeft(index int) {
for i := index - 1; i >= 0 && s.keys[i] > s.keys[i+1]; i-- {
s.keys[i], s.keys[i+1] = s.keys[i+1], s.keys[i]
s.counts[i], s.counts[i+1] = s.counts[i+1], s.counts[i]
}
}
func (s summary) meanAtIndexIs(index int, mean float64) bool {
return index < len(s.keys) && s.keys[index] == mean
} | src/toolkits/go-tdigest/summary.go | 0.66454 | 0.417331 | summary.go | starcoder |
package kmeans
import (
"fmt"
"math/rand"
"time"
)
// Kmeans configuration/option struct
type Kmeans struct {
// when a plotter is set, Plot gets called after each iteration
plotter Plotter
// deltaThreshold (in percent between 0.0 and 0.1) aborts processing if
// less than n% of data points shifted clusters in the last iteration
deltaThreshold float64
// iterationThreshold aborts processing when the specified amount of
// algorithm iterations was reached
iterationThreshold int
}
// NewWithOptions returns a Kmeans configuration struct with custom settings
func NewWithOptions(deltaThreshold float64, plotter Plotter) (Kmeans, error) {
if deltaThreshold <= 0.0 || deltaThreshold >= 1.0 {
return Kmeans{}, fmt.Errorf("threshold is out of bounds (must be >0.0 and <1.0, in percent)")
}
return Kmeans{
plotter: plotter,
deltaThreshold: deltaThreshold,
iterationThreshold: 96,
}, nil
}
// New returns a Kmeans configuration struct with default settings
func New() Kmeans {
m, _ := NewWithOptions(0.01, nil)
return m
}
func randomizeClusters(k int, dataset Points) (Clusters, error) {
var c Clusters
if len(dataset) == 0 || len(dataset[0]) == 0 {
return c, fmt.Errorf("there must be at least one dimension in the data set")
}
if k == 0 {
return c, fmt.Errorf("k must be greater than 0")
}
rand.Seed(time.Now().UnixNano())
for i := 0; i < k; i++ {
var p Point
for j := 0; j < len(dataset[0]); j++ {
p = append(p, rand.Float64())
}
c = append(c, Cluster{
Center: p,
})
}
return c, nil
}
// Partition executes the k-means algorithm on the given dataset and
// partitions it into k clusters
func (m Kmeans) Partition(dataset Points, k int) (Clusters, error) {
if k > len(dataset) {
return Clusters{}, fmt.Errorf("the size of the data set must at least equal k")
}
clusters, err := randomizeClusters(k, dataset)
if err != nil {
return Clusters{}, err
}
points := make([]int, len(dataset))
changes := 1
for i := 0; changes > 0; i++ {
changes = 0
clusters.reset()
for p, point := range dataset {
ci := clusters.Nearest(point)
clusters[ci].Points = append(clusters[ci].Points, point)
if points[p] != ci {
points[p] = ci
changes++
}
}
for ci := 0; ci < len(clusters); ci++ {
if len(clusters[ci].Points) == 0 {
// During the iterations, if any of the cluster centers has no
// data points associated with it, assign a random data point
// to it.
// Also see: http://user.ceng.metu.edu.tr/~tcan/ceng465_f1314/Schedule/KMeansEmpty.html
var ri int
for {
// find a cluster with at least two data points, otherwise
// we're just emptying one cluster to fill another
ri = rand.Intn(len(dataset))
if len(clusters[points[ri]].Points) > 1 {
break
}
}
clusters[ci].Points = append(clusters[ci].Points, dataset[ri])
points[ri] = ci
}
}
if changes > 0 {
clusters.recenter()
}
if m.plotter != nil {
m.plotter.Plot(clusters, i)
}
if i == m.iterationThreshold ||
changes < int(float64(len(dataset))*m.deltaThreshold) {
// fmt.Println("Aborting:", changes, int(float64(len(dataset))*m.TerminationThreshold))
break
}
}
return clusters, nil
} | vendor/github.com/muesli/kmeans/kmeans.go | 0.729712 | 0.536313 | kmeans.go | starcoder |
package grue
import "math"
// Types here are abstracted from specific graphics library.
// Although pixel has same types, we need not to create dependency
// on pixel.
// Vec decribes point or vector on surface.
type Vec struct {
X, Y float64
}
// Add returns sum of vectors
func (v Vec) Add(u Vec) Vec {
return Vec{v.X + u.X, v.Y + u.Y}
}
// Sub return result of subtraction of u from v
func (v Vec) Sub(u Vec) Vec {
return Vec{v.X - u.X, v.Y - u.Y}
}
// Len returns the length of the vector v.
func (v Vec) Len() float64 {
return math.Hypot(v.X, v.Y)
}
// Half of the rectangle
func (v Vec) Half() Vec {
return Vec{
X: v.X / 2,
Y: v.X / 2,
}
}
// ZR return zero size rectangle centered at the point.
func (v Vec) ZR() Rect {
return Rect{
Min: v,
Max: v,
}
}
// Rect describes rectangular area on surface.
type Rect struct {
Min, Max Vec
}
// Size of the rectangle
func (r Rect) Size() Vec {
return Vec{
X: r.Max.X - r.Min.X,
Y: r.Max.Y - r.Min.Y,
}
}
// Center of the rectangle
func (r Rect) Center() Vec {
return Vec{
X: (r.Max.X + r.Min.X) / 2,
Y: (r.Max.Y + r.Min.Y) / 2,
}
}
// W calculates the width of the rectangle
func (r Rect) W() float64 {
return r.Max.X - r.Min.X
}
// H calculates the height of the rectangle
func (r Rect) H() float64 {
return r.Max.Y - r.Min.Y
}
// Expanded returns expanded rectangle by given distance in pixels.
// If d is negative, rectangle is shrunk instead.
func (r Rect) Expanded(d float64) Rect {
r.Min.X -= d
r.Min.Y -= d
r.Max.X += d
r.Max.Y += d
return r
}
// Extended returns expanded rectangle by given distances in pixels.
// If distance is negative, rectangle is shrunk instead.
func (r Rect) Extended(left, bottom, right, top float64) Rect {
r.Min.X -= left
r.Min.Y -= bottom
r.Max.X += right
r.Max.Y += top
return r
}
// Moved returns the Rect moved (both Min and Max) by the given vector delta.
func (r Rect) Moved(delta Vec) Rect {
return Rect{
Min: r.Min.Add(delta),
Max: r.Max.Add(delta),
}
}
// SetCenter returns the Rect center moved to provided pos.
func (r Rect) SetCenter(pos Vec) Rect {
w2 := r.W()
h2 := r.H()
return Rect{
Min: V(pos.X-w2, pos.Y-h2),
Max: V(pos.X+w2, pos.Y+h2),
}
}
// Contains checks whether a vector u is contained within this Rect (including it's borders).
func (r Rect) Contains(u Vec) bool {
return r.Min.X <= u.X && u.X <= r.Max.X && r.Min.Y <= u.Y && u.Y <= r.Max.Y
}
// V returns initialized Vec.
func V(x, y float64) Vec {
return Vec{x, y}
}
// R returns initialized Rect.
func R(x1, y1, x2, y2 float64) Rect {
return Rect{
Min: Vec{x1, y1},
Max: Vec{x2, y2},
}
}
// R0 creates rect with Min in (0, 0).
func R0(w, h float64) Rect {
return Rect{Max: V(w, h)}
}
// Align defines object alignment relative to parent.
type Align int
const (
// AlignDefault is default align (center in most cases)
AlignDefault Align = 0
// AlignLeft ...
AlignLeft Align = 1
// AlignRight ...
AlignRight Align = 2
// AlignTop ...
AlignTop Align = 3
// AlignBottom ...
AlignBottom Align = 4
// AlignTopLeft ...
AlignTopLeft Align = 5
// AlignTopRight ...
AlignTopRight Align = 6
// AlignBottomLeft ...
AlignBottomLeft Align = 7
// AlignBottomRight ...
AlignBottomRight Align = 8
// AlignCenter is explicit center alignment
AlignCenter Align = 10
)
// AlignToRect returns a Vec that src Rect have to be
// moved by in order to align relative to dst Rect given
// alignments alh, alv.
func (r Rect) AlignToRect(dst Rect, al Align) Vec {
delta := dst.Center()
switch al {
case AlignDefault:
fallthrough
case AlignCenter:
case AlignTopRight:
fallthrough
case AlignBottomRight:
fallthrough
case AlignRight:
delta.X += (dst.W() - r.W()) / 2
case AlignTopLeft:
fallthrough
case AlignBottomLeft:
fallthrough
case AlignLeft:
delta.X -= (dst.W() - r.W()) / 2
default:
}
switch al {
case AlignDefault:
fallthrough
case AlignCenter:
case AlignTopLeft:
fallthrough
case AlignTopRight:
fallthrough
case AlignTop:
delta.Y += (dst.H() - r.H()) / 2
case AlignBottomLeft:
fallthrough
case AlignBottomRight:
fallthrough
case AlignBottom:
delta.Y -= (dst.H() - r.H()) / 2
default:
}
return delta
}
// AlignToPoint returns a Vec that src Rect have to be
// moved by in order to align relative to dst Point given
// alignments alh, alv.
func (r Rect) AlignToPoint(dst Vec, al Align) Vec {
return r.AlignToRect(Rect{dst, dst}, al)
} | geometry.go | 0.92597 | 0.604574 | geometry.go | starcoder |
package magi
import (
"image"
"image/color"
"github.com/golang/freetype/raster"
)
type RepeatOp int
const (
RepeatBoth RepeatOp = iota
RepeatX
RepeatY
RepeatNone
)
type Pattern interface {
ColorAt(x, y int) color.Color
}
// Solid Pattern
type solidPattern struct {
color color.Color
}
func (p *solidPattern) ColorAt(x, y int) color.Color {
return p.color
}
func NewSolidPattern(color color.Color) Pattern {
return &solidPattern{color: color}
}
// Surface Pattern
type surfacePattern struct {
im image.Image
op RepeatOp
}
func (p *surfacePattern) ColorAt(x, y int) color.Color {
b := p.im.Bounds()
switch p.op {
case RepeatX:
if y >= b.Dy() {
return color.Transparent
}
case RepeatY:
if x >= b.Dx() {
return color.Transparent
}
case RepeatNone:
if x >= b.Dx() || y >= b.Dy() {
return color.Transparent
}
}
x = x%b.Dx() + b.Min.X
y = y%b.Dy() + b.Min.Y
return p.im.At(x, y)
}
func NewSurfacePattern(im image.Image, op RepeatOp) Pattern {
return &surfacePattern{im: im, op: op}
}
type patternPainter struct {
im *image.RGBA
mask *image.Alpha
p Pattern
}
// Paint satisfies the Painter interface.
func (r *patternPainter) Paint(ss []raster.Span, done bool) {
b := r.im.Bounds()
for _, s := range ss {
if s.Y < b.Min.Y {
continue
}
if s.Y >= b.Max.Y {
return
}
if s.X0 < b.Min.X {
s.X0 = b.Min.X
}
if s.X1 > b.Max.X {
s.X1 = b.Max.X
}
if s.X0 >= s.X1 {
continue
}
const m = 1<<16 - 1
y := s.Y - r.im.Rect.Min.Y
x0 := s.X0 - r.im.Rect.Min.X
// RGBAPainter.Paint() in $GOPATH/src/github.com/golang/freetype/raster/paint.go
i0 := (s.Y-r.im.Rect.Min.Y)*r.im.Stride + (s.X0-r.im.Rect.Min.X)*4
i1 := i0 + (s.X1-s.X0)*4
for i, x := i0, x0; i < i1; i, x = i+4, x+1 {
ma := s.Alpha
if r.mask != nil {
ma = ma * uint32(r.mask.AlphaAt(x, y).A) / 255
if ma == 0 {
continue
}
}
c := r.p.ColorAt(x, y)
cr, cg, cb, ca := c.RGBA()
dr := uint32(r.im.Pix[i+0])
dg := uint32(r.im.Pix[i+1])
db := uint32(r.im.Pix[i+2])
da := uint32(r.im.Pix[i+3])
a := (m - (ca * ma / m)) * 0x101
r.im.Pix[i+0] = uint8((dr*a + cr*ma) / m >> 8)
r.im.Pix[i+1] = uint8((dg*a + cg*ma) / m >> 8)
r.im.Pix[i+2] = uint8((db*a + cb*ma) / m >> 8)
r.im.Pix[i+3] = uint8((da*a + ca*ma) / m >> 8)
}
}
}
func newPatternPainter(im *image.RGBA, mask *image.Alpha, p Pattern) *patternPainter {
return &patternPainter{im, mask, p}
} | pattern.go | 0.701202 | 0.409044 | pattern.go | starcoder |
package benchstat
import (
"fmt"
"strings"
)
// A Scaler is a function that scales and formats a measurement.
// All measurements within a given table row are formatted
// using the same scaler, so that the units are consistent
// across the row.
type Scaler func(float64) string
// NewScaler returns a Scaler appropriate for formatting
// the measurement val, which has the given unit.
func NewScaler(val float64, unit string) Scaler {
if hasBaseUnit(unit, "ns/op") || hasBaseUnit(unit, "ns/GC") {
return timeScaler(val)
}
var format string
var scale float64
var suffix string
prescale := 1.0
if hasBaseUnit(unit, "MB/s") {
prescale = 1e6
}
switch x := val * prescale; {
case x >= 99500000000000:
format, scale, suffix = "%.0f", 1e12, "T"
case x >= 9950000000000:
format, scale, suffix = "%.1f", 1e12, "T"
case x >= 995000000000:
format, scale, suffix = "%.2f", 1e12, "T"
case x >= 99500000000:
format, scale, suffix = "%.0f", 1e9, "G"
case x >= 9950000000:
format, scale, suffix = "%.1f", 1e9, "G"
case x >= 995000000:
format, scale, suffix = "%.2f", 1e9, "G"
case x >= 99500000:
format, scale, suffix = "%.0f", 1e6, "M"
case x >= 9950000:
format, scale, suffix = "%.1f", 1e6, "M"
case x >= 995000:
format, scale, suffix = "%.2f", 1e6, "M"
case x >= 99500:
format, scale, suffix = "%.0f", 1e3, "k"
case x >= 9950:
format, scale, suffix = "%.1f", 1e3, "k"
case x >= 995:
format, scale, suffix = "%.2f", 1e3, "k"
case x >= 99.5:
format, scale, suffix = "%.0f", 1, ""
case x >= 9.95:
format, scale, suffix = "%.1f", 1, ""
default:
format, scale, suffix = "%.2f", 1, ""
}
if hasBaseUnit(unit, "B/op") || hasBaseUnit(unit, "bytes/op") || hasBaseUnit(unit, "bytes") {
suffix += "B"
}
if hasBaseUnit(unit, "MB/s") {
suffix += "B/s"
}
scale /= prescale
return func(val float64) string {
return fmt.Sprintf(format+suffix, val/scale)
}
}
func timeScaler(ns float64) Scaler {
var format string
var scale float64
switch x := ns / 1e9; {
case x >= 99.5:
format, scale = "%.0fs", 1
case x >= 9.95:
format, scale = "%.1fs", 1
case x >= 0.995:
format, scale = "%.2fs", 1
case x >= 0.0995:
format, scale = "%.0fms", 1000
case x >= 0.00995:
format, scale = "%.1fms", 1000
case x >= 0.000995:
format, scale = "%.2fms", 1000
case x >= 0.0000995:
format, scale = "%.0fµs", 1000*1000
case x >= 0.00000995:
format, scale = "%.1fµs", 1000*1000
case x >= 0.000000995:
format, scale = "%.2fµs", 1000*1000
case x >= 0.0000000995:
format, scale = "%.0fns", 1000*1000*1000
case x >= 0.00000000995:
format, scale = "%.1fns", 1000*1000*1000
default:
format, scale = "%.2fns", 1000*1000*1000
}
return func(ns float64) string {
return fmt.Sprintf(format, ns/1e9*scale)
}
}
// hasBaseUnit reports whether s has unit unit.
// For now, it reports whether s == unit or s ends in -unit.
func hasBaseUnit(s, unit string) bool {
return s == unit || strings.HasSuffix(s, "-"+unit)
} | benchstat/scaler.go | 0.778733 | 0.522811 | scaler.go | starcoder |
package cipher
var (
upperCase = [26]byte{'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z'}
lowerCase = [26]byte{'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z'}
)
type (
newShiftStruct struct {
distance int
}
newVigenereStruct struct {
key string
}
)
// NewCaesar returns a Caesar cipher, which is a shift cipher of distance 3.
func NewCaesar() Cipher {
return NewShift(3)
}
// NewShift returns a shift cipher of a given distance.
func NewShift(distance int) Cipher {
if (distance < 1 || distance > 25) && (distance < -25 || distance > -1) {
return nil
}
return newShiftStruct{distance}
}
// NewVigenere returns a Vigenère cipher with a given key.
func NewVigenere(key string) Cipher {
counter := 0
for i := 0; i < len(key); i++ {
if key[i] == 'a' {
counter++
} else if key[i] < 'a' || key[i] > 'z' {
return nil
}
}
if counter == len(key) {
return nil
}
return newVigenereStruct{key}
}
func (n newShiftStruct) Encode(s string) string {
sByte := make([]byte, 0)
for i := 0; i < len(s); i++ {
switch s[i] {
case 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z':
sByte = append(sByte, lowerCase[(searchIndex(s[i], upperCase)+(26+n.distance))%26])
case 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z':
sByte = append(sByte, lowerCase[(searchIndex(s[i], lowerCase)+(26+n.distance))%26])
}
}
return string(sByte)
}
func (n newShiftStruct) Decode(s string) string {
sByte := []byte(s)
for i := 0; i < len(sByte); i++ {
sByte[i] = lowerCase[(searchIndex(sByte[i], lowerCase)+(26-n.distance))%26]
}
return string(sByte)
}
func (n newVigenereStruct) Encode(s string) string {
s, n.key = reshape(s, n.key)
sByte := []byte(s)
for i := 0; i < len(s); i++ {
switch s[i] {
case 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z':
sByte[i] = lowerCase[(searchIndex(s[i], upperCase)+searchIndex(n.key[i], lowerCase))%26]
case 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z':
sByte[i] = lowerCase[(searchIndex(s[i], lowerCase)+searchIndex(n.key[i], lowerCase))%26]
}
}
return string(sByte)
}
func (n newVigenereStruct) Decode(s string) string {
s, n.key = reshape(s, n.key)
sByte := []byte(s)
for i := 0; i < len(sByte); i++ {
sByte[i] = lowerCase[(searchIndex(sByte[i], lowerCase)+(26-searchIndex(n.key[i], lowerCase)))%26]
}
return string(sByte)
}
func reshape(s, key string) (string, string) {
for i := len(s) - 1; i >= 0; i-- {
if (s[i] < 'A' || s[i] > 'Z') && (s[i] < 'a' || s[i] > 'z') {
s = s[:i] + s[i+1:]
}
}
for i := len(key) - 1; i >= 0; i-- {
if (key[i] < 'A' || key[i] > 'Z') && (key[i] < 'a' || key[i] > 'z') {
key = key[:i] + key[i+1:]
}
}
if len(s) > len(key) {
keyByte := []byte(key)
for i := 0; len(s) > len(keyByte); i++ {
keyByte = append(keyByte, key[i%len(key)])
}
key = string(keyByte)
}
return s, key
}
func searchIndex(b byte, array [26]byte) (index int) {
for {
if b == array[index] {
return index
}
index++
}
} | Extra exercises/Easy/simple-cipher/simple_cipher.go | 0.734596 | 0.470189 | simple_cipher.go | starcoder |
package main
import (
"fmt"
"math"
)
// ResultTable store the results after apply Dijsktra algorithm
type ResultTable struct {
origin *Row
table []*Row
}
// Exported methods
// Print in console the result for each vertex
func (t *ResultTable) Print() {
for _, row := range t.table {
fmt.Println(row)
}
}
// ApplyDijkstra applies the dijkstra algorithm to find the shortest path
func (t *ResultTable) ApplyDijkstra(edges []edge, v string) {
// Look for edges with from equals to the given vertex
var vertexEdges []edge
for _, edge := range edges {
if edge.from == v {
vertexEdges = append(vertexEdges, edge)
}
}
for _, edge := range vertexEdges {
from := edge.from
fromRow := t.findRowByVertice(from)
fromCost := fromRow.cost
to := edge.to
toRow := t.findRowByVertice(to)
toCost := toRow.cost
newCost := fromCost + edge.cost
// Update the cost if we find less cost than the current one
if newCost < toCost {
toRow.SetCost(newCost)
}
}
// Mark the vertex as visited for the next iterations
t.findRowByVertice(v).SetAsVisited()
// Look for next vertex
t.nextVertex(edges)
}
// NewResultTable creates a new table with the origin vertex
// called S and the rest of the vertices from the graph. All of
// them will have cost 0 and visited equal to false for the beginning
func NewResultTable(vertices []string) ResultTable {
o := &Row{vertex: "S", cost: 0, visited: false}
resultTable := ResultTable{
origin: o,
table: []*Row{o},
}
resultTable.buildVerticesRows(vertices)
return resultTable
}
// Unexported methods
func (t *ResultTable) buildVerticesRows(vertices []string) {
for _, v := range vertices {
t.table = append(t.table, &Row{vertex: v, cost: math.MaxInt64, visited: false})
}
}
func (t *ResultTable) findRowByVertice(v string) *Row {
var r *Row
for _, row := range t.table {
if row.vertex == v {
r = row
}
}
return r
}
func (t *ResultTable) nextVertex(edges []edge) {
var next *Row
var unvisited []*Row
// Find the unvisited vertices
for _, row := range t.table {
if row.visited == false {
unvisited = append(unvisited, row)
}
}
// Check which one from the unvisited vertices has
// lower cost. It will be the next vertex to visit
if len(unvisited) > 0 {
lowestCost := math.MaxInt64
for _, row := range unvisited {
if row.cost < lowestCost {
next = row
}
}
}
// Recursive call to apply the algorithm to the next vertex
if next != nil {
t.ApplyDijkstra(edges, next.vertex)
}
} | graphs/dijkstras_algorithm/results_table.go | 0.644896 | 0.498108 | results_table.go | starcoder |
package chess
const (
numOfSquaresInBoard = 64
numOfSquaresInRow = 8
)
// A Square is one of the 64 rank and file combinations that make up a chess board.
type Square int8
// File returns the square's file.
func (sq Square) File() File {
return File(int(sq) % numOfSquaresInRow)
}
// Rank returns the square's rank.
func (sq Square) Rank() Rank {
return Rank(int(sq) / numOfSquaresInRow)
}
func (sq Square) String() string {
return sq.File().String() + sq.Rank().String()
}
func (sq Square) color() Color {
if ((sq / 8) % 2) == (sq % 2) {
return Black
}
return White
}
func getSquare(f File, r Rank) Square {
return Square((int(r) * 8) + int(f))
}
const (
NoSquare Square = iota - 1
A1
B1
C1
D1
E1
F1
G1
H1
A2
B2
C2
D2
E2
F2
G2
H2
A3
B3
C3
D3
E3
F3
G3
H3
A4
B4
C4
D4
E4
F4
G4
H4
A5
B5
C5
D5
E5
F5
G5
H5
A6
B6
C6
D6
E6
F6
G6
H6
A7
B7
C7
D7
E7
F7
G7
H7
A8
B8
C8
D8
E8
F8
G8
H8
)
const (
fileChars = "abcdefgh"
rankChars = "12345678"
)
// A Rank is the rank of a square.
type Rank int8
const (
Rank1 Rank = iota
Rank2
Rank3
Rank4
Rank5
Rank6
Rank7
Rank8
)
func (r Rank) String() string {
return rankChars[r : r+1]
}
// A File is the file of a square.
type File int8
const (
FileA File = iota
FileB
FileC
FileD
FileE
FileF
FileG
FileH
)
func (f File) String() string {
return fileChars[f : f+1]
}
func StrToSquare(squareStr string) (square Square) {
if sq, ok := StrToSquareMap[squareStr]; ok {
square = sq
} else {
square = NoSquare
}
return square
}
var (
StrToSquareMap = map[string]Square{
"a1": A1, "a2": A2, "a3": A3, "a4": A4, "a5": A5, "a6": A6, "a7": A7, "a8": A8,
"b1": B1, "b2": B2, "b3": B3, "b4": B4, "b5": B5, "b6": B6, "b7": B7, "b8": B8,
"c1": C1, "c2": C2, "c3": C3, "c4": C4, "c5": C5, "c6": C6, "c7": C7, "c8": C8,
"d1": D1, "d2": D2, "d3": D3, "d4": D4, "d5": D5, "d6": D6, "d7": D7, "d8": D8,
"e1": E1, "e2": E2, "e3": E3, "e4": E4, "e5": E5, "e6": E6, "e7": E7, "e8": E8,
"f1": F1, "f2": F2, "f3": F3, "f4": F4, "f5": F5, "f6": F6, "f7": F7, "f8": F8,
"g1": G1, "g2": G2, "g3": G3, "g4": G4, "g5": G5, "g6": G6, "g7": G7, "g8": G8,
"h1": H1, "h2": H2, "h3": H3, "h4": H4, "h5": H5, "h6": H6, "h7": H7, "h8": H8,
}
) | square.go | 0.823506 | 0.407039 | square.go | starcoder |
package dmap
import (
"bytes"
"fmt"
"math"
)
// Point is a representation of a point on a map
type Point interface {
GetXY() (int, int)
}
// Map is a map to calculate dmaps with. All methods should be linear
// time, as the algorithm will call them a lot. Also your map should
// be statically sized; if the map is dynamically sized, a new dmap
// will need to be created for it whenever it changes size.
type Map interface {
SizeX() int
SizeY() int
IsPassable(int, int) bool
OOB(int, int) bool
}
// Rank is the rank of a tile - lower is closer to the target
type Rank uint16
// RankMax is the highest possible rank. It takes a value a little
// below its implementations maximum to prevent overflow
const RankMax = math.MaxUint16 - 10
// DijkstraMap is a representation of a Brogue-style 'Dijkstra'
// map. To reach a target, an AI should try to minimize the rank of
// the tile it's standing on (targets have a value of zero)
type DijkstraMap struct {
Points [][]Rank
M Map
NeigbourFunc func(d *DijkstraMap, x, y int) []WeightedPoint
}
// WeightedPoint is a Point that also has a rank
type WeightedPoint struct {
X int
Y int
Val Rank
}
// GetXY implements the Point interface
func (d *WeightedPoint) GetXY() (int, int) {
return d.X, d.Y
}
// BlankDMap creates a blank Dijkstra map to be used with the map passed to it
func BlankDMap(m Map, neigbourfunc func(d *DijkstraMap, x, y int) []WeightedPoint) *DijkstraMap {
ret := make([][]Rank, m.SizeX())
for i := range ret {
ret[i] = make([]Rank, m.SizeY())
for j := range ret[i] {
ret[i][j] = RankMax
}
}
return &DijkstraMap{ret, m, neigbourfunc}
}
// ManhattanNeighbours returns the neighbours of the block x, y to the
// north, south, east, and west
func ManhattanNeighbours(d *DijkstraMap, x, y int) []WeightedPoint {
return []WeightedPoint{
d.GetValPoint(x+1, y),
d.GetValPoint(x-1, y),
d.GetValPoint(x, y-1),
d.GetValPoint(x, y+1),
}
}
// DiagonalNeighbours returns the neighbours of the block x, y to the
// north, south, east, west, NE, SE, NW, and SW
func DiagonalNeighbours(d *DijkstraMap, x, y int) []WeightedPoint {
return []WeightedPoint{
d.GetValPoint(x+1, y),
d.GetValPoint(x-1, y),
d.GetValPoint(x, y-1),
d.GetValPoint(x, y+1),
d.GetValPoint(x+1, y+1),
d.GetValPoint(x+1, y-1),
d.GetValPoint(x-1, y+1),
d.GetValPoint(x-1, y-1),
}
}
// Calc calculates the Dijkstra map with points given as targets. You
// need to blank the map before using this method. It's recommended to
// use this one initially, but to use a Recalc instead for subsequent
// moves, since Recalc, unlike BlankDMap, doesn't allocate memory.
func (d *DijkstraMap) Calc(points ...Point) {
for _, point := range points {
x, y := point.GetXY()
d.Points[x][y] = 0
}
mademutation := true
for mademutation {
mademutation = false
for x := range d.Points {
for y := range d.Points[x] {
if d.M.IsPassable(x, y) {
ln := d.LowestNeighbour(x, y).Val
if d.Points[x][y] > ln+1 {
d.Points[x][y] = ln + 1
mademutation = true
}
}
x1, y1 := (d.M.SizeX()-1)-x, (d.M.SizeY()-1)-y
if d.M.IsPassable(x1, y1) {
ln := d.LowestNeighbour(x1, y1).Val
if d.Points[x1][y1] > ln+1 {
d.Points[x1][y1] = ln + 1
mademutation = true
}
}
}
}
}
}
// Recalc recalculates the Dijkstra map with points given as
// targets. It's essentially equivalent to a blank followed by a calc,
// but should be a bit faster because it doesn't reallocate the
// memory. As per the note for DijkstraMap, don't use this method if
// your map is dynamically sized; you'll just have to use BlankDMap
// and Calc as if creating a new dmap every update.
func (d *DijkstraMap) Recalc(points ...Point) {
for i := range d.Points {
for j := range d.Points[i] {
d.Points[i][j] = RankMax
}
}
d.Calc(points...)
}
// GetValPoint gets the weighted point at X, Y of the Dijkstra
// map. Points that are out of bounds count as maximum rank (so
// shouldn't be targeted)
func (d *DijkstraMap) GetValPoint(x, y int) WeightedPoint {
if d.M.OOB(x, y) {
return WeightedPoint{x, y, RankMax}
}
return WeightedPoint{x, y, d.Points[x][y]}
}
// LowestNeighbour returns the neighbour of the point at x, y with the
// lowest rank.
func (d *DijkstraMap) LowestNeighbour(x, y int) WeightedPoint {
vals := d.NeigbourFunc(d, x, y)
var lv Rank = RankMax
ret := vals[0]
for _, val := range vals {
if val.Val < lv {
lv = val.Val
ret = val
}
}
return ret
}
// String returns a string representation of a Dijkstra Map
func (d *DijkstraMap) String() string {
buf := bytes.Buffer{}
for x := range d.Points {
for y := range d.Points[x] {
buf.WriteString(fmt.Sprintf("%6d", d.Points[x][y]))
buf.WriteString(", ")
}
buf.WriteRune('\n')
}
return buf.String()
} | dijkstra.go | 0.743354 | 0.548553 | dijkstra.go | starcoder |
package epb
// Regulator represents an energy regulator.
type Regulator string
// Energy regulators
const (
RegulatorBrussels Regulator = "brussels"
RegulatorFlanders Regulator = "flanders"
RegulatorFrance Regulator = "france"
RegulatorWallonia Regulator = "wallonia"
)
// EnergyClass returns the energy class of a building (like `A++`, `A+`, `A`, `B`...) following the regulator rules, given its energy consumption (in in kWh/m².year).
// If the energy class cannot be calculated, en empty string is returned.
func EnergyClass(reg Regulator, cons float64) string {
switch reg {
case RegulatorBrussels:
if cons <= 0 {
return "A++"
} else if cons <= 15 {
return "A+"
} else if cons <= 30 {
return "A"
} else if cons <= 45 {
return "A-"
} else if cons <= 62 {
return "B+"
} else if cons <= 78 {
return "B"
} else if cons <= 95 {
return "B-"
} else if cons <= 113 {
return "C+"
} else if cons <= 132 {
return "C"
} else if cons <= 150 {
return "C-"
} else if cons <= 170 {
return "D+"
} else if cons <= 190 {
return "D"
} else if cons <= 210 {
return "D-"
} else if cons <= 232 {
return "E+"
} else if cons <= 253 {
return "E"
} else if cons <= 275 {
return "E-"
} else if cons <= 345 {
return "F"
}
return "G"
case RegulatorFlanders:
return ""
case RegulatorFrance:
if cons <= 50 {
return "A"
} else if cons <= 90 {
return "B"
} else if cons <= 150 {
return "C"
} else if cons <= 230 {
return "D"
} else if cons <= 330 {
return "E"
} else if cons <= 450 {
return "F"
}
return "G"
case RegulatorWallonia:
if cons <= 0 {
return "A++"
} else if cons <= 45 {
return "A+"
} else if cons <= 85 {
return "A"
} else if cons <= 170 {
return "B"
} else if cons <= 255 {
return "C"
} else if cons <= 340 {
return "D"
} else if cons <= 425 {
return "E"
} else if cons <= 510 {
return "F"
}
return "G"
}
return ""
}
// TotalConsumption returns the total consumption of a building in a year, given its energy consumption (in kWh/m².year) and its total area (in m²).
func TotalConsumption(cons, area float64) float64 {
return cons * area
} | epb.go | 0.666714 | 0.719421 | epb.go | starcoder |
package lib
import (
"fmt"
"math/big"
)
// This library implements basic float functions using big.Float objects.
// This is necessary in order to ensure interoperability across different
// machines. If we instead were to use float64's for our computations
// naively, then machines with different rounding rules or different
// precision for intermediate values could produce different results that
// would cause blockchain forks. Having our own library ensures not only
// that such forks can't occur but also makes it so that implementing a
// node in another language is fairly straightforward because all of the
// operations are implemented in software.
const (
FloatPrecision uint = 53
)
func NewFloat() *big.Float {
// We force all calculations be done at a particular precision. This keeps
// all nodes in sync and avoids consensus issues around one node using a
// different precision than another node. We also force the same rounding
// mode for all calculations.
return big.NewFloat(0.0).SetPrec(FloatPrecision).SetMode(big.ToNearestEven)
}
func IntSub(a *big.Int, b *big.Int) *big.Int {
// TODO(performance): We should do this without creating an int copy, but
// this is easier to understand and deal with for now.
return big.NewInt(0).Sub(a, b)
}
func IntMul(a *big.Int, b *big.Int) *big.Int {
// TODO(performance): We should do this without creating an int copy, but
// this is easier to understand and deal with for now.
return big.NewInt(0).Mul(a, b)
}
func IntDiv(a *big.Int, b *big.Int) *big.Int {
// TODO(performance): We should do this without creating an int copy, but
// this is easier to understand and deal with for now.
return big.NewInt(0).Quo(a, b)
}
func IntAdd(a *big.Int, b *big.Int) *big.Int {
// TODO(performance): We should do this without creating an int copy, but
// this is easier to understand and deal with for now.
return big.NewInt(0).Add(a, b)
}
func Sub(a *big.Float, b *big.Float) *big.Float {
// TODO(performance): This code currently calls NewFloat() too often. It
// does this in order to make the code easier to read but if it ever becomes
// an issue, the superfluous calls to NewFloat() should be a quick win.
return NewFloat().Sub(a, b)
}
func Mul(a *big.Float, b *big.Float) *big.Float {
// TODO(performance): This code currently calls NewFloat() too often. It
// does this in order to make the code easier to read but if it ever becomes
// an issue, the superfluous calls to NewFloat() should be a quick win.
return NewFloat().Mul(a, b)
}
func Div(a *big.Float, b *big.Float) *big.Float {
// TODO(performance): This code currently calls NewFloat() too often. It
// does this in order to make the code easier to read but if it ever becomes
// an issue, the superfluous calls to NewFloat() should be a quick win.
return NewFloat().Quo(a, b)
}
func Add(a *big.Float, b *big.Float) *big.Float {
// TODO(performance): This code currently calls NewFloat() too often. It
// does this in order to make the code easier to read but if it ever becomes
// an issue, the superfluous calls to NewFloat() should be a quick win.
return NewFloat().Add(a, b)
}
var (
// Constants for BigFloatLog
bigLn2Hi = NewFloat().SetFloat64(6.93147180369123816490e-01) /* 3fe62e42 fee00000 */
bigLn2Lo = NewFloat().SetFloat64(1.90821492927058770002e-10) /* 3dea39ef 35793c76 */
bigL1 = NewFloat().SetFloat64(6.666666666666735130e-01) /* 3FE55555 55555593 */
bigL2 = NewFloat().SetFloat64(3.999999999940941908e-01) /* 3FD99999 9997FA04 */
bigL3 = NewFloat().SetFloat64(2.857142874366239149e-01) /* 3FD24924 94229359 */
bigL4 = NewFloat().SetFloat64(2.222219843214978396e-01) /* 3FCC71C5 1D8E78AF */
bigL5 = NewFloat().SetFloat64(1.818357216161805012e-01) /* 3FC74664 96CB03DE */
bigL6 = NewFloat().SetFloat64(1.531383769920937332e-01) /* 3FC39A09 D078C69F */
bigL7 = NewFloat().SetFloat64(1.479819860511658591e-01) /* 3FC2F112 DF3E5244 */
bigSqrt2 = NewFloat().SetFloat64(1.41421356237309504880168872420969807856967187537694807317667974)
bigHalf = NewFloat().SetFloat64(.5)
bigNegativeOneOne = NewFloat().SetUint64(1)
bigOne = NewFloat().SetUint64(1)
bigTwo = NewFloat().SetUint64(2)
bigSqrt2Over2 = NewFloat().Quo(bigSqrt2, bigTwo)
// Constants for BigFloatExpMulti
bigP1 = NewFloat().SetFloat64(1.66666666666666657415e-01) /* 0x3FC55555; 0x55555555 */
bigP2 = NewFloat().SetFloat64(-2.77777777770155933842e-03) /* 0xBF66C16C; 0x16BEBD93 */
bigP3 = NewFloat().SetFloat64(6.61375632143793436117e-05) /* 0x3F11566A; 0xAF25DE2C */
bigP4 = NewFloat().SetFloat64(-1.65339022054652515390e-06) /* 0xBEBBBD41; 0xC5D26BF1 */
bigP5 = NewFloat().SetFloat64(4.13813679705723846039e-08) /* 0x3E663769; 0x72BEA4D0 */
// Constants for BigFloatExp
bigZero = NewFloat().SetUint64(0)
bigLog2e = NewFloat().SetFloat64(1.44269504088896338700e+00)
)
// Log returns the natural logarithm of x.
func BigFloatLog(x *big.Float) *big.Float {
// special cases
// TODO: We should make the special cases work at some point.
switch {
case x.IsInf():
panic(fmt.Sprintf("BigFloatLog: Cannot take log of an infinite number: %v", x))
case x.Sign() <= 0:
panic(fmt.Sprintf("BigFloatLog: Cannot take log of a number <= 0: %v", x))
}
// Reduce
f1 := NewFloat()
ki := x.MantExp(f1)
if f1.Cmp(bigSqrt2Over2) < 0 {
f1 = Mul(f1, bigTwo)
ki--
}
f := Sub(f1, bigOne)
k := NewFloat().SetInt64(int64(ki))
// Compute
twoPlusF := Add(bigTwo, f)
s := Div(f, twoPlusF)
s2 := Mul(s, s)
s4 := Mul(s2, s2)
t1 := Mul(s2, (Add(bigL1, Mul(s4, (Add(bigL3, Mul(s4, (Add(bigL5, Mul(s4, bigL7))))))))))
t2 := Mul(s4, Add(bigL2, Mul(s4, Add(bigL4, Mul(s4, bigL6)))))
R := Add(t1, t2)
hfsq := Mul(bigHalf, NewFloat().Mul(f, f))
return Sub(Mul(k, bigLn2Hi), Sub(Sub(hfsq, (Add(Mul(s, Add(hfsq, R)), Mul(k, bigLn2Lo)))), f))
}
// Log2 returns the binary logarithm of x.
// The special cases are the same as for Log.
func BigFloatLog2(x *big.Float) *big.Float {
bigTwo := NewFloat().SetUint64(2)
return Div(BigFloatLog(x), BigFloatLog(bigTwo))
}
func BigFloatExpMulti(hi, lo *big.Float, k int64) *big.Float {
r := Sub(hi, lo)
t := Mul(r, r)
c := Sub(r, Mul(t, (Add(bigP1, Mul(t, (Add(bigP2, Mul(t, (Add(bigP3, Mul(t, (Add(bigP4, Mul(t, bigP5))))))))))))))
y := Sub(bigOne, (Sub((Sub(lo, Div((Mul(r, c)), (Sub(bigTwo, c))))), hi)))
// TODO: make sure Ldexp can handle boundary k
return NewFloat().SetMantExp(y, int(k))
}
// Exp returns a big.Float representation of exp(z).
func BigFloatExp(z *big.Float) *big.Float {
if z.IsInf() {
panic("BigFloatExp: Cannot call exp with infinity")
}
// reduce; computed as r = hi - lo for extra precision.
var k int64
switch {
case z.Cmp(bigZero) < 0:
k, _ = Sub(Mul(bigLog2e, z), bigHalf).Int64()
case z.Cmp(bigZero) > 0:
k, _ = Add(Mul(bigLog2e, z), bigHalf).Int64()
}
hi := Sub(z, Mul(NewFloat().SetInt64(k), bigLn2Hi))
lo := Mul(NewFloat().SetInt64(k), bigLn2Lo)
// compute
return BigFloatExpMulti(hi, lo, k)
}
// Pow returns a big.Float representation of z**w.
func BigFloatPow(z *big.Float, w *big.Float) *big.Float {
if z.Sign() < 0 {
panic("Pow: negative base")
}
if z.Cmp(bigZero) == 0 {
return bigZero
}
// Pow(z, 0) = 1.0
if w.Sign() == 0 {
return bigOne
}
// Pow(z, 1) = z
// Pow(+Inf, n) = +Inf
if w.Cmp(bigOne) == 0 || z.IsInf() {
return NewFloat().Copy(z)
}
// Pow(z, -w) = 1 / Pow(z, w)
if w.Sign() < 0 {
x := NewFloat()
zExt := NewFloat().Copy(z).SetPrec(z.Prec() + 64)
wNeg := NewFloat().Neg(w)
return x.Quo(bigOne, BigFloatPow(zExt, wNeg)).SetPrec(z.Prec())
}
// compute w**z as exp(z log(w))
x := NewFloat().SetPrec(z.Prec() + 64)
logZ := BigFloatLog(NewFloat().Copy(z).SetPrec(z.Prec() + 64))
x.Mul(w, logZ)
x = BigFloatExp(x)
return x.SetPrec(z.Prec())
} | lib/bitclout_math.go | 0.541651 | 0.487795 | bitclout_math.go | starcoder |
package types
import (
"time"
sdk "github.com/cosmos/cosmos-sdk/types"
)
// Delegation represents a single delegation made from a delegator
// to a specific validator at a specific height (and timestamp)
// containing a given amount of tokens
type Delegation struct {
DelegatorAddress string
ValidatorAddress string
Amount sdk.Coin
Shares string
Height int64
}
// NewDelegation creates a new Delegation instance containing
// the given data
func NewDelegation(
delegator string, validatorAddress string, amount sdk.Coin, shares string, height int64,
) Delegation {
return Delegation{
DelegatorAddress: delegator,
ValidatorAddress: validatorAddress,
Amount: amount,
Shares: shares,
Height: height,
}
}
// _________________________________________________________
// UnbondingDelegation represents a single unbonding delegation
type UnbondingDelegation struct {
DelegatorAddress string
ValidatorAddress string
Amount sdk.Coin
CompletionTimestamp time.Time
Height int64
}
// NewUnbondingDelegation allows to create a new UnbondingDelegation instance
func NewUnbondingDelegation(
delegator string, validator string, amount sdk.Coin, completionTimestamp time.Time,
height int64,
) UnbondingDelegation {
return UnbondingDelegation{
DelegatorAddress: delegator,
ValidatorAddress: validator,
Amount: amount,
CompletionTimestamp: completionTimestamp,
Height: height,
}
}
// _________________________________________________________
// Redelegation represents a single re-delegations
type Redelegation struct {
DelegatorAddress string
SrcValidator string
DstValidator string
Amount sdk.Coin
CompletionTime time.Time
Height int64
}
// NewRedelegation build a new Redelegation object
func NewRedelegation(
delegator string, srcValidator, dstValidator string, amount sdk.Coin, completionTime time.Time, height int64,
) Redelegation {
return Redelegation{
DelegatorAddress: delegator,
SrcValidator: srcValidator,
DstValidator: dstValidator,
Amount: amount,
CompletionTime: completionTime,
Height: height,
}
}
//DelegationShare save the self delegation ratio on that instance
type DelegationShare struct {
ValidatorAddress string
DelegatorAddress string
Shares float64
Height int64
Timestamp time.Time
}
//NewDelegationShare get a new instance of modify self Delegation
func NewDelegationShare(
validatorAddress string, delegatorAddress string, shares float64,
height int64, timestamp time.Time,
) DelegationShare {
return DelegationShare{
ValidatorAddress: validatorAddress,
DelegatorAddress: delegatorAddress,
Shares: shares,
Height: height,
Timestamp: timestamp,
}
} | x/staking/types/delegations.go | 0.738198 | 0.417806 | delegations.go | starcoder |
package iso20022
// Set of elements used to provide information on the settlement of the instruction.
type SettlementInformation13 struct {
// Method used to settle the (batch of) payment instructions.
SettlementMethod *SettlementMethod1Code `xml:"SttlmMtd"`
// A specific purpose account used to post debit and credit entries as a result of the transaction.
SettlementAccount *CashAccount16 `xml:"SttlmAcct,omitempty"`
// Specification of a pre-agreed offering between clearing agents or the channel through which the payment instruction is processed.
ClearingSystem *ClearingSystemIdentification3Choice `xml:"ClrSys,omitempty"`
// Agent through which the instructing agent will reimburse the instructed agent.
//
// Usage: If InstructingAgent and InstructedAgent have the same reimbursement agent, then only InstructingReimbursementAgent must be used.
InstructingReimbursementAgent *BranchAndFinancialInstitutionIdentification4 `xml:"InstgRmbrsmntAgt,omitempty"`
// Unambiguous identification of the account of the instructing reimbursement agent account at its servicing agent in the payment chain.
InstructingReimbursementAgentAccount *CashAccount16 `xml:"InstgRmbrsmntAgtAcct,omitempty"`
// Agent at which the instructed agent will be reimbursed.
// Usage: If InstructedReimbursementAgent contains a branch of the InstructedAgent, then the party in InstructedAgent will claim reimbursement from that branch/will be paid by that branch.
// Usage: If InstructingAgent and InstructedAgent have the same reimbursement agent, then only InstructingReimbursementAgent must be used.
InstructedReimbursementAgent *BranchAndFinancialInstitutionIdentification4 `xml:"InstdRmbrsmntAgt,omitempty"`
// Unambiguous identification of the account of the instructed reimbursement agent account at its servicing agent in the payment chain.
InstructedReimbursementAgentAccount *CashAccount16 `xml:"InstdRmbrsmntAgtAcct,omitempty"`
// Agent at which the instructed agent will be reimbursed.
// Usage: If ThirdReimbursementAgent contains a branch of the InstructedAgent, then the party in InstructedAgent will claim reimbursement from that branch/will be paid by that branch.
ThirdReimbursementAgent *BranchAndFinancialInstitutionIdentification4 `xml:"ThrdRmbrsmntAgt,omitempty"`
// Unambiguous identification of the account of the third reimbursement agent account at its servicing agent in the payment chain.
ThirdReimbursementAgentAccount *CashAccount16 `xml:"ThrdRmbrsmntAgtAcct,omitempty"`
}
func (s *SettlementInformation13) SetSettlementMethod(value string) {
s.SettlementMethod = (*SettlementMethod1Code)(&value)
}
func (s *SettlementInformation13) AddSettlementAccount() *CashAccount16 {
s.SettlementAccount = new(CashAccount16)
return s.SettlementAccount
}
func (s *SettlementInformation13) AddClearingSystem() *ClearingSystemIdentification3Choice {
s.ClearingSystem = new(ClearingSystemIdentification3Choice)
return s.ClearingSystem
}
func (s *SettlementInformation13) AddInstructingReimbursementAgent() *BranchAndFinancialInstitutionIdentification4 {
s.InstructingReimbursementAgent = new(BranchAndFinancialInstitutionIdentification4)
return s.InstructingReimbursementAgent
}
func (s *SettlementInformation13) AddInstructingReimbursementAgentAccount() *CashAccount16 {
s.InstructingReimbursementAgentAccount = new(CashAccount16)
return s.InstructingReimbursementAgentAccount
}
func (s *SettlementInformation13) AddInstructedReimbursementAgent() *BranchAndFinancialInstitutionIdentification4 {
s.InstructedReimbursementAgent = new(BranchAndFinancialInstitutionIdentification4)
return s.InstructedReimbursementAgent
}
func (s *SettlementInformation13) AddInstructedReimbursementAgentAccount() *CashAccount16 {
s.InstructedReimbursementAgentAccount = new(CashAccount16)
return s.InstructedReimbursementAgentAccount
}
func (s *SettlementInformation13) AddThirdReimbursementAgent() *BranchAndFinancialInstitutionIdentification4 {
s.ThirdReimbursementAgent = new(BranchAndFinancialInstitutionIdentification4)
return s.ThirdReimbursementAgent
}
func (s *SettlementInformation13) AddThirdReimbursementAgentAccount() *CashAccount16 {
s.ThirdReimbursementAgentAccount = new(CashAccount16)
return s.ThirdReimbursementAgentAccount
} | SettlementInformation13.go | 0.723016 | 0.451689 | SettlementInformation13.go | starcoder |
package sqlb
import r "reflect"
/*
Variant of `[]interface{}` conforming to the `ArgDict` interface. Supports only
ordinal parameters, not named parameters. Used for `StrQ`. See the `ListQ`
shortcut.
*/
type List []interface{}
// Implement part of the `ArgDict` interface.
func (self List) IsEmpty() bool { return self.Len() == 0 }
// Implement part of the `ArgDict` interface.
func (self List) Len() int { return len(self) }
// Implement part of the `ArgDict` interface.
func (self List) GotOrdinal(key int) (interface{}, bool) {
if key >= 0 && key < len(self) {
return self[key], true
}
return nil, false
}
// Implement part of the `ArgDict` interface. Always returns `nil, false`.
func (self List) GotNamed(string) (interface{}, bool) { return nil, false }
// Implement `OrdinalRanger` to automatically validate used/unused arguments.
func (self List) RangeOrdinal(fun func(int)) {
if fun != nil {
for i := range counter(len(self)) {
fun(i)
}
}
}
/*
Variant of `map[string]interface{}` conforming to the `ArgDict` interface.
Supports only named parameters, not ordinal parameters. Used for `StrQ`. See
the `DictQ` shortcut.
*/
type Dict map[string]interface{}
// Implement part of the `ArgDict` interface.
func (self Dict) IsEmpty() bool { return self.Len() == 0 }
// Implement part of the `ArgDict` interface.
func (self Dict) Len() int { return len(self) }
// Implement part of the `ArgDict` interface. Always returns `nil, false`.
func (self Dict) GotOrdinal(int) (interface{}, bool) { return nil, false }
// Implement part of the `ArgDict` interface.
func (self Dict) GotNamed(key string) (interface{}, bool) {
val, ok := self[key]
return val, ok
}
// Implement `NamedRanger` to automatically validate used/unused arguments.
func (self Dict) RangeNamed(fun func(string)) {
if fun != nil {
for key := range self {
fun(key)
}
}
}
/*
Implements `ArgDict` by reading struct fields and methods by name. Supports only
named parameters, not ordinal parameters. The inner value must be either
invalid or a struct. Compared to `Dict`, a struct is way faster to construct,
but reading fields by name is way slower. Used for `StrQ`. See the `StructQ`
shortcut.
*/
type StructDict [1]r.Value
// Implement part of the `ArgDict` interface.
func (self StructDict) IsEmpty() bool {
return !self[0].IsValid() || isStructTypeEmpty(self[0].Type())
}
// Implement part of the `ArgDict` interface. Always returns 0.
func (self StructDict) Len() int { return 0 }
// Implement part of the `ArgDict` interface. Always returns `nil, false`.
func (self StructDict) GotOrdinal(int) (interface{}, bool) { return nil, false }
// Implement part of the `ArgDict` interface.
func (self StructDict) GotNamed(key string) (interface{}, bool) {
/**
(Tested in Go 1.17.)
In our benchmarks, making a struct dict is about 15 times faster than a normal
dict (100ns vs 1500ns for 12 fields and 12 methods), but accessing various
fields and methods is about 25 times slower on average (5000ns vs 200ns for
12 fields and 12 methods). When using only fields without methods, the
access time numbers are much closer (700ns vs 100ns for 12 fields). The
total numbers are close enough, and small enough, to justify both, depending
on the use case.
Compared to using `reflect.Value.FieldByName` and `reflect.Value.MethodByName`
every time, using a cached dict with field and method indexes improves
average access performance by about 3 times in our benchmarks.
*/
val := valueDeref(self[0])
if !val.IsValid() {
return nil, false
}
path, ok := loadStructPathMap(val.Type())[key]
if !ok {
return nil, false
}
if path.FieldIndex != nil {
return val.FieldByIndex(path.FieldIndex).Interface(), true
}
meth := val.Method(path.MethodIndex)
if meth.IsValid() {
reqGetter(val.Type(), meth.Type(), key)
return meth.Call(nil)[0].Interface(), true
}
return nil, false
} | sqlb_dict.go | 0.818229 | 0.487673 | sqlb_dict.go | starcoder |
package dominance
type Float64Vector []float64
var _ Candidate = &Float64Vector{}
func NewFloat64(size int) *Float64Vector {
vector := make(Float64Vector, size)
return &vector
}
func (v *Float64Vector) IsComparable(otherCandidate Candidate) bool {
if !isFloat64Vector(otherCandidate) {
return false
}
otherCandidateAsVector := asFloat64Vector(otherCandidate)
if !vectorLengthsMatch(v, otherCandidateAsVector) {
return false
}
return true
}
func isFloat64Vector(otherCandidate Candidate) bool {
switch otherCandidate.(type) {
case *Float64Vector:
return true
default:
return false
}
}
func asFloat64Vector(otherCandidate Candidate) *Float64Vector {
return otherCandidate.(*Float64Vector)
}
func vectorLengthsMatch(firstVector *Float64Vector, secondVector *Float64Vector) bool {
return len(*firstVector) == len(*secondVector)
}
func (v *Float64Vector) Dominates(otherCandidate Candidate) bool {
// [Engrand et.al, 1992]:
// V dominates O <=> forAll 0 <= j < |V|, it holds that, V[j] < O[j]
return v.allLessThanValuesIn(otherCandidate)
}
func (v *Float64Vector) allLessThanValuesIn(otherCandidate Candidate) bool {
otherCandidateAsVector := *asFloat64Vector(otherCandidate)
thisCandidateAsVector := *v
for index := range thisCandidateAsVector {
if thisCandidateAsVector[index] >= otherCandidateAsVector[index] {
return false
}
}
return true
}
func (v *Float64Vector) anyLessThanValuesIn(otherCandidate Candidate) bool {
otherCandidateAsVector := *asFloat64Vector(otherCandidate)
thisCandidateAsVector := *v
for index := range thisCandidateAsVector {
if thisCandidateAsVector[index] < otherCandidateAsVector[index] {
return true
}
}
return false
}
func (v *Float64Vector) allEqualOrLessThanValuesIn(otherCandidate Candidate) bool {
otherCandidateAsVector := *asFloat64Vector(otherCandidate)
thisCandidateAsVector := *v
for index := range otherCandidateAsVector {
if !(thisCandidateAsVector[index] <= otherCandidateAsVector[index]) {
return false
}
}
return true
}
func (v *Float64Vector) IsDominatedBy(otherCandidate Candidate) bool {
return otherCandidate.Dominates(v)
}
func (v *Float64Vector) DominancePresent(otherCandidate Candidate) bool {
return v.Dominates(otherCandidate) || otherCandidate.Dominates(v)
}
func (v *Float64Vector) NoDominancePresent(otherCandidate Candidate) bool {
return !(v.DominancePresent(otherCandidate))
} | pkg/dominance/Float64Vector.go | 0.744842 | 0.550849 | Float64Vector.go | starcoder |
package evm
func memorySha3(stack *Stack) (uint64, bool) {
return calcMemSize64(stack.Back(0), stack.Back(1))
}
func memoryCallDataCopy(stack *Stack) (uint64, bool) {
return calcMemSize64(stack.Back(0), stack.Back(2))
}
func memoryReturnDataCopy(stack *Stack) (uint64, bool) {
return calcMemSize64(stack.Back(0), stack.Back(2))
}
func memoryCodeCopy(stack *Stack) (uint64, bool) {
return calcMemSize64(stack.Back(0), stack.Back(2))
}
func memoryExtCodeCopy(stack *Stack) (uint64, bool) {
return calcMemSize64(stack.Back(1), stack.Back(3))
}
func memoryMLoad(stack *Stack) (uint64, bool) {
return calcMemSize64WithUint(stack.Back(0), 32)
}
func memoryMStore8(stack *Stack) (uint64, bool) {
return calcMemSize64WithUint(stack.Back(0), 1)
}
func memoryMStore(stack *Stack) (uint64, bool) {
return calcMemSize64WithUint(stack.Back(0), 32)
}
func memoryCreate(stack *Stack) (uint64, bool) {
return calcMemSize64(stack.Back(1), stack.Back(2))
}
func memoryCreate2(stack *Stack) (uint64, bool) {
return calcMemSize64(stack.Back(1), stack.Back(2))
}
func memoryCall(stack *Stack) (uint64, bool) {
x, overflow := calcMemSize64(stack.Back(5), stack.Back(6))
if overflow {
return 0, true
}
y, overflow := calcMemSize64(stack.Back(3), stack.Back(4))
if overflow {
return 0, true
}
if x > y {
return x, false
}
return y, false
}
func memoryDelegateCall(stack *Stack) (uint64, bool) {
x, overflow := calcMemSize64(stack.Back(4), stack.Back(5))
if overflow {
return 0, true
}
y, overflow := calcMemSize64(stack.Back(2), stack.Back(3))
if overflow {
return 0, true
}
if x > y {
return x, false
}
return y, false
}
func memoryStaticCall(stack *Stack) (uint64, bool) {
x, overflow := calcMemSize64(stack.Back(4), stack.Back(5))
if overflow {
return 0, true
}
y, overflow := calcMemSize64(stack.Back(2), stack.Back(3))
if overflow {
return 0, true
}
if x > y {
return x, false
}
return y, false
}
func memoryReturn(stack *Stack) (uint64, bool) {
return calcMemSize64(stack.Back(0), stack.Back(1))
}
func memoryRevert(stack *Stack) (uint64, bool) {
return calcMemSize64(stack.Back(0), stack.Back(1))
}
func memoryLog(stack *Stack) (uint64, bool) {
return calcMemSize64(stack.Back(0), stack.Back(1))
} | evm/memory_table.go | 0.78899 | 0.440108 | memory_table.go | starcoder |
package mod
import (
"image"
"image/color"
"math"
"github.com/oakmound/oak/v2/dlog"
)
// A Filter modifies an input image in place. This is useful notably for modifying
// a screen buffer, as they will refuse to be modified in any other way. This cannot
// change the dimensions of the underlying image.
type Filter func(*image.RGBA)
// AndFilter combines multiple filters into one.
func AndFilter(fs ...Filter) Filter {
return func(rgba *image.RGBA) {
for _, f := range fs {
f(rgba)
}
}
}
// ConformToPallete is not a modification, but acts like ConformToPallete
// without allocating a new *image.RGBA
func ConformToPallete(p color.Model) Filter {
return func(rgba *image.RGBA) {
bounds := rgba.Bounds()
w := bounds.Max.X
h := bounds.Max.Y
for x := 0; x < w; x++ {
for y := 0; y < h; y++ {
rgba.Set(x, y, p.Convert(rgba.At(x, y)))
}
}
}
}
// Fade reduces the alpha of an image. It takes an alpha from 0-255.
func Fade(alpha int) Filter {
return func(rgba *image.RGBA) {
bounds := rgba.Bounds()
a2 := uint32(alpha * 257)
w := bounds.Max.X
h := bounds.Max.Y
var a3 uint16
for x := 0; x < w; x++ {
for y := 0; y < h; y++ {
r, g, b, a := rgba.At(x, y).RGBA()
if a2 > a {
a3 = 0
} else {
a3 = uint16(a - a2)
}
rgba.Set(x, y, color.NRGBA64{
uint16(r),
uint16(g),
uint16(b),
a3})
}
}
}
}
// ApplyMask mixes the rgba values of two images, according to
// their alpha levels, and returns that as a new rgba.
func ApplyMask(img image.RGBA) Filter {
return func(rgba *image.RGBA) {
bounds := img.Bounds()
w := bounds.Max.X
h := bounds.Max.Y
for x := 0; x < w; x++ {
for y := 0; y < h; y++ {
r1, g1, b1, a1 := img.At(x, y).RGBA()
r2, g2, b2, a2 := rgba.At(x, y).RGBA()
a3 := a1 + a2
if a3 == 0 {
rgba.Set(x, y, color.RGBA64{0, 0, 0, 0})
continue
}
rgba.Set(x, y, color.RGBA64{
uint16(((a1 * r1) + (a2 * r2)) / a3),
uint16(((a1 * g1) + (a2 * g2)) / a3),
uint16(((a1 * b1) + (a2 * b2)) / a3),
uint16(math.Max(float64(a1), float64(a2)))})
}
}
}
}
// ApplyColor mixes a color into the rgba values of an image
// and returns that new rgba.
func ApplyColor(c color.Color) Filter {
return func(rgba *image.RGBA) {
r1, g1, b1, a1 := c.RGBA()
bounds := rgba.Bounds()
w := bounds.Max.X
h := bounds.Max.Y
for x := 0; x < w; x++ {
for y := 0; y < h; y++ {
r2, g2, b2, a2 := rgba.At(x, y).RGBA()
a3 := a1 + a2
if a2 == 0 {
rgba.Set(x, y, color.RGBA{0, 0, 0, 0})
continue
}
rgba.Set(x, y, color.RGBA64{
uint16(((a1 * r1) + (a2 * r2)) / a3),
uint16(((a1 * g1) + (a2 * g2)) / a3),
uint16(((a1 * b1) + (a2 * b2)) / a3),
uint16(a2)})
}
}
}
}
// FillMask replaces alpha 0 pixels in an RGBA with corresponding
// pixels in a second RGBA.
func FillMask(img image.RGBA) Filter {
return func(rgba *image.RGBA) {
bounds := img.Bounds()
w := bounds.Max.X
h := bounds.Max.Y
for x := 0; x < w; x++ {
for y := 0; y < h; y++ {
r1, g1, b1, a1 := rgba.At(x, y).RGBA()
r2, g2, b2, a2 := img.At(x, y).RGBA()
var tmp color.RGBA64
if a1 == 0 {
tmp = color.RGBA64{
uint16(r2),
uint16(g2),
uint16(b2),
uint16(a2),
}
} else {
tmp = color.RGBA64{
uint16(r1),
uint16(g1),
uint16(b1),
uint16(a1),
}
}
rgba.Set(x, y, tmp)
}
}
}
}
// InPlace converts a Mod to a Filter.
func InPlace(m Mod) Filter {
return func(rgba *image.RGBA) {
rgba2 := m(rgba)
bounds := rgba.Bounds()
w := bounds.Max.X
h := bounds.Max.Y
for x := 0; x < w; x++ {
for y := 0; y < h; y++ {
rgba.Set(x, y, rgba2.At(x, y))
}
}
}
}
// StripOuterAlpha from the image given a source image and a alpha level to denote stripping.
// Note that this was implement for ease of implementation but not speed.
// We could use image lib or a real depth first search to do fewer checks but this is easier...
func StripOuterAlpha(m *image.RGBA, level int) Filter {
l := uint8(level)
return func(rgba *image.RGBA) {
if m == nil {
dlog.Warn("Invalid rgba provided to stripouteralpha")
return
}
mr := m
//get an image
bounds := mr.Bounds()
w := bounds.Max.X
h := bounds.Max.Y
// check downwards for the given level.x
for x := 0; x < w; x++ {
for y := 0; y < h; y++ {
r := mr.RGBAAt(x, y)
if r.A <= l {
// Treating as transparent
rgba.Set(int(x), int(y), mr.At(x, y))
} else {
break
}
}
}
// check left to right for the given level.
for y := 0; y < h; y++ {
for x := 0; x < w; x++ {
r := mr.RGBAAt(x, y)
if r.A <= l {
// Treating as transparent
rgba.Set(int(x), int(y), mr.At(x, y))
} else {
break
}
}
}
// check bottom up for the given level.
for x := 0; x < w; x++ {
for y := h - 1; y >= 0; y-- {
r := mr.RGBAAt(x, y)
if r.A <= l {
// Treating as transparent
rgba.Set(int(x), int(y), mr.At(x, y))
} else {
break
}
}
}
// check right to left for the given level.
for y := 0; y < h; y++ {
for x := w - 1; x >= 0; x-- {
r := mr.RGBAAt(x, y)
if r.A <= l {
// Treating as transparent
rgba.Set(int(x), int(y), mr.At(x, y))
} else {
break
}
}
}
// apply image onto m
}
}
// There is no function to convert a Filter to a Mod, to promote not doing so.
// Mods are significantly less efficient than Filters. | render/mod/filter.go | 0.713631 | 0.453262 | filter.go | starcoder |
package kata
/*
Introduction
The wave (known as the Mexican wave in the English-speaking world outside North America) is an example of metachronal rhythm achieved in a packed stadium when successive groups of spectators briefly stand, yell, and raise their arms. Immediately upon stretching to full height, the spectator returns to the usual seated position.
The result is a wave of standing spectators that travels through the crowd, even though individual spectators never move away from their seats. In many large arenas the crowd is seated in a contiguous circuit all the way around the sport field, and so the wave is able to travel continuously around the arena; in discontiguous seating arrangements, the wave can instead reflect back and forth through the crowd. When the gap in seating is narrow, the wave can sometimes pass through it. Usually only one wave crest will be present at any given time in an arena, although simultaneous, counter-rotating waves have been produced. (Source Wikipedia)
Task
In this simple Kata your task is to create a function that turns a string into a Mexican Wave. You will be passed a string and you must return that string in an array where an uppercase letter is a person standing up.
Rules
1. The input string will always be lower case but maybe empty.
2. If the character in the string is whitespace then pass over it as if it was an empty seat
Example
wave("hello") => []string{"Hello", "hEllo", "heLlo", "helLo", "hellO"}
Good luck and enjoy!
*/
import (
"strings"
"unicode"
)
func Wave(word string) []string {
var result []string = []string{}
var lenWord = len(word)
if lenWord == 0 {
return result
}
var gapLowerToUpper rune = 32
for i := 0; i < lenWord; i++ {
if word[i] == 32 {
continue
}
var charList []rune
for j, item := range word {
if j == i {
item -= gapLowerToUpper
}
charList = append(charList, item)
}
result = append(result, string(charList))
}
return result
}
func Wave2(words string) (wave []string) {
wave = []string{} // leaving the array uninitialised would be better practice
for i, c := range words {
if unicode.IsSpace(c) {
continue
}
upperC := string(c - 'a' + 'A')
wave = append(wave, words[:i]+upperC+words[i+1:])
}
return
}
func Wave3(words string) []string {
var result []string
result = make([]string, 0, len(words))
for i := 0; i < len(words); i++ {
if !(unicode.IsSpace(rune(words[i]))) {
// one int index ==> single ==> rune
// curStr := strings.ToUpper(string(words[i]))
// two int index ==> range ==> string
curStr := strings.ToUpper(words[i : i+1])
result = append(result, words[:i]+curStr+words[i+1:])
}
}
return result
}
func Wave4(s string) []string {
resArr := []string{}
for i, c := range s {
// POINT: from string to rune
tmpArr := []rune(s)
if c != 32 {
tmpArr[i] = c - 32
resArr = append(resArr, string(tmpArr))
}
}
return resArr
} | katas/kyu6/mexican_wave/wave.go | 0.625209 | 0.655164 | wave.go | starcoder |
package iso20022
// Completion of a securities settlement instruction, wherein securities are delivered/debited from a securities account and received/credited to the designated securities account.
type Transfer7 struct {
// Unique and unambiguous identifier for a group of individual transfers as assigned by the instructing party. This identifier links the individual transfers together.
MasterReference *Max35Text `xml:"MstrRef,omitempty"`
// Unique and unambiguous identifier for a transfer execution, as assigned by a confirming party.
TransferConfirmationReference *Max35Text `xml:"TrfConfRef"`
// Reference that identifies the transfer in transaction.
TransferReference *Max35Text `xml:"TrfRef"`
// Unique and unambiguous investor's identification of a transfer. This reference can typically be used in a hub scenario to give the reference of the transfer as assigned by the underlying client.
ClientReference *Max35Text `xml:"ClntRef,omitempty"`
// Date and time at which the transfer was executed.
EffectiveTransferDate *DateAndDateTimeChoice `xml:"FctvTrfDt"`
// Date and time at which a transaction is completed and cleared, ie, securities are delivered.
TradeDate *DateAndDateTimeChoice `xml:"TradDt"`
// Total quantity of securities settled.
TotalUnitsNumber *FinancialInstrumentQuantity1 `xml:"TtlUnitsNb"`
// Information about the units to be transferred.
UnitsDetails []*Unit3 `xml:"UnitsDtls,omitempty"`
// Indicates whether the transfer results in a change of beneficial owner.
OwnAccountTransferIndicator *YesNoIndicator `xml:"OwnAcctTrfInd,omitempty"`
// Value of a security, as booked in an account. Book value is often different from the current market value of the security.
AveragePrice *ActiveOrHistoricCurrencyAnd13DecimalAmount `xml:"AvrgPric,omitempty"`
// Additional specific settlement information for non-regulated traded funds.
NonStandardSettlementInformation *Max350Text `xml:"NonStdSttlmInf,omitempty"`
}
func (t *Transfer7) SetMasterReference(value string) {
t.MasterReference = (*Max35Text)(&value)
}
func (t *Transfer7) SetTransferConfirmationReference(value string) {
t.TransferConfirmationReference = (*Max35Text)(&value)
}
func (t *Transfer7) SetTransferReference(value string) {
t.TransferReference = (*Max35Text)(&value)
}
func (t *Transfer7) SetClientReference(value string) {
t.ClientReference = (*Max35Text)(&value)
}
func (t *Transfer7) AddEffectiveTransferDate() *DateAndDateTimeChoice {
t.EffectiveTransferDate = new(DateAndDateTimeChoice)
return t.EffectiveTransferDate
}
func (t *Transfer7) AddTradeDate() *DateAndDateTimeChoice {
t.TradeDate = new(DateAndDateTimeChoice)
return t.TradeDate
}
func (t *Transfer7) AddTotalUnitsNumber() *FinancialInstrumentQuantity1 {
t.TotalUnitsNumber = new(FinancialInstrumentQuantity1)
return t.TotalUnitsNumber
}
func (t *Transfer7) AddUnitsDetails() *Unit3 {
newValue := new (Unit3)
t.UnitsDetails = append(t.UnitsDetails, newValue)
return newValue
}
func (t *Transfer7) SetOwnAccountTransferIndicator(value string) {
t.OwnAccountTransferIndicator = (*YesNoIndicator)(&value)
}
func (t *Transfer7) SetAveragePrice(value, currency string) {
t.AveragePrice = NewActiveOrHistoricCurrencyAnd13DecimalAmount(value, currency)
}
func (t *Transfer7) SetNonStandardSettlementInformation(value string) {
t.NonStandardSettlementInformation = (*Max350Text)(&value)
} | Transfer7.go | 0.821044 | 0.413477 | Transfer7.go | starcoder |
package change
import "math"
type Stats struct {
N int
Mean float64
Variance float64
}
func (s Stats) Stddev() float64 { return math.Sqrt(s.Variance) }
// cohen computes Cohen's d effect size between two means.
func cohen(s1, s2 Stats) float64 {
return (s1.Mean - s2.Mean) / pooledStddev(s1, s2)
}
// pooledVariance computes the pooled variance over two samples.
func pooledVariance(s1, s2 Stats) float64 {
n1 := float64(s1.N - 1)
n2 := float64(s2.N - 1)
return (n1*s1.Variance + n2*s2.Variance) / (n1 + n2)
}
// pooledStddev computes the pooled standard deviation over two samples.
func pooledStddev(s1, s2 Stats) float64 {
return math.Sqrt(pooledVariance(s1, s2))
}
// windows assists with computing statistics for windows in a sequence.
type windows struct {
n int
cumlx []float64 // cumlx[i] = sum of x[j] for j < i
cumlx2 []float64 // cumlx2[i] = sum of x[j]^2 for j < i
}
// newwindows initializes an empty windows sequence.
func newwindows() *windows {
return &windows{
n: 0,
cumlx: []float64{0},
cumlx2: []float64{0},
}
}
// push values at the end of the sequence.
func (w *windows) push(xs ...float64) {
for _, x := range xs {
w.cumlx = append(w.cumlx, w.cumlx[w.n]+x)
w.cumlx2 = append(w.cumlx2, w.cumlx2[w.n]+x*x)
w.n++
}
}
// sum of window x[l:r].
func (w *windows) sum(l, r int) float64 {
return w.cumlx[r] - w.cumlx[l]
}
// sumsq returns sum of squares in window x[l:r].
func (w *windows) sumsq(l, r int) float64 {
return w.cumlx2[r] - w.cumlx2[l]
}
// mean of the window x[l:r].
func (w *windows) mean(l, r int) float64 {
return w.sum(l, r) / float64(r-l)
}
// sampvar returns the sample variance of the window x[l:r].
func (w *windows) sampvar(l, r int) float64 {
sumsq := w.sumsq(l, r)
sum := w.sum(l, r)
n := float64(r - l)
return (sumsq - sum*sum/n) / (n - 1)
}
// stats for the window x[l:r].
func (w *windows) stats(l, r int) Stats {
return Stats{
N: r - l,
Mean: w.mean(l, r),
Variance: w.sampvar(l, r),
}
} | app/change/stats.go | 0.877962 | 0.510069 | stats.go | starcoder |
package testutil
import (
"fmt"
"io/ioutil"
"os"
"reflect"
"regexp"
"sort"
"testing"
"github.com/go-test/deep"
"github.com/pkg/errors"
"k8s.io/apimachinery/pkg/util/diff"
)
func AssertNoErr(t *testing.T, err error, msg string, args ...interface{}) {
t.Helper()
if err != nil {
t.Fatalf(errors.Wrapf(err, msg, args...).Error())
}
}
func AssertErr(t *testing.T, err error, msg string, args ...interface{}) {
t.Helper()
if err == nil {
t.Fatalf(msg, args...)
}
}
func AssertTrue(t *testing.T, b bool, msg string, args ...interface{}) {
t.Helper()
if !b {
t.Fatalf(msg, args...)
}
}
func AssertFalse(t *testing.T, b bool, msg string, args ...interface{}) {
t.Helper()
if b {
t.Fatalf(msg, args...)
}
}
// AssertStringsMatch compares string slices after sorting them.
func AssertStringsMatch(t *testing.T, expected []string, actual []string, msg string, args ...interface{}) {
t.Helper()
expSorted := make([]string, len(expected))
copy(expSorted, expected)
sort.Strings(expSorted)
actSorted := make([]string, len(actual))
copy(actSorted, actual)
sort.Strings(actSorted)
if !reflect.DeepEqual(expSorted, actSorted) {
t.Errorf(msg, args...)
}
}
// AssertRegexpMatch checks if string matches the regexp.
func AssertRegexpMatch(t *testing.T, expectedRegexp, actual string) {
rx := regexp.MustCompile(expectedRegexp)
if !rx.MatchString(actual) {
t.Errorf("Expected regexp %q to match %q", expectedRegexp, actual)
}
}
func AssertEqual(t *testing.T, expected, actual interface{}) {
t.Helper()
if !reflect.DeepEqual(expected, actual) {
t.Errorf("Expected: %v actual: %v", expected, actual)
}
}
func AssertEqualMsg(t *testing.T, expected, actual interface{}, msg string, args ...interface{}) {
t.Helper()
if !reflect.DeepEqual(expected, actual) {
t.Errorf("%v: expected: %v actual: %v", fmt.Sprintf(msg, args...), expected, actual)
}
}
func AssertObjectsEqual(t *testing.T, expected, actual interface{}) {
t.Helper()
if df := deep.Equal(expected, actual); df != nil {
t.Errorf("Objects are not equal.\n\nDiff: expected\tactual\n %v\n\nSide-by-side: %v", df, diff.ObjectGoPrintSideBySide(expected, actual))
}
}
func AssertObjectsEqualMsg(t *testing.T, expected, actual interface{}, msg string, args ...interface{}) {
t.Helper()
if df := deep.Equal(expected, actual); df != nil {
t.Errorf("%v: %v",
fmt.Sprintf(msg, args...),
fmt.Sprintf("objects are not equal.\n\nDiff:\n %v\n\nSide-by-side:\n%v", df, diff.ObjectGoPrintSideBySide(expected, actual)))
}
}
func AssertPathExists(t *testing.T, path string) {
t.Helper()
_, err := os.Stat(path)
if err != nil {
t.Errorf("path %v does not exist", path)
}
}
func AssertPathNotExists(t *testing.T, path string) {
t.Helper()
_, err := os.Stat(path)
if err == nil {
t.Errorf("path %v does exist", path)
}
}
func AssertFileContains(t *testing.T, path string, expectedContent string) {
t.Helper()
AssertPathExists(t, path)
actual, err := ioutil.ReadFile(path)
AssertNoErr(t, err, "Expected no error reading %v", path)
AssertEqual(t, expectedContent, string(actual))
} | testutil/assertions.go | 0.53437 | 0.447038 | assertions.go | starcoder |
package cios
import (
"encoding/json"
)
// DataStoreObjectLocation struct for DataStoreObjectLocation
type DataStoreObjectLocation struct {
Latitude *float32 `json:"latitude,omitempty"`
Longitude *float32 `json:"longitude,omitempty"`
Altitude *float32 `json:"altitude,omitempty"`
}
// NewDataStoreObjectLocation instantiates a new DataStoreObjectLocation object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewDataStoreObjectLocation() *DataStoreObjectLocation {
this := DataStoreObjectLocation{}
return &this
}
// NewDataStoreObjectLocationWithDefaults instantiates a new DataStoreObjectLocation object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewDataStoreObjectLocationWithDefaults() *DataStoreObjectLocation {
this := DataStoreObjectLocation{}
return &this
}
// GetLatitude returns the Latitude field value if set, zero value otherwise.
func (o *DataStoreObjectLocation) GetLatitude() float32 {
if o == nil || o.Latitude == nil {
var ret float32
return ret
}
return *o.Latitude
}
// GetLatitudeOk returns a tuple with the Latitude field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *DataStoreObjectLocation) GetLatitudeOk() (*float32, bool) {
if o == nil || o.Latitude == nil {
return nil, false
}
return o.Latitude, true
}
// HasLatitude returns a boolean if a field has been set.
func (o *DataStoreObjectLocation) HasLatitude() bool {
if o != nil && o.Latitude != nil {
return true
}
return false
}
// SetLatitude gets a reference to the given float32 and assigns it to the Latitude field.
func (o *DataStoreObjectLocation) SetLatitude(v float32) {
o.Latitude = &v
}
// GetLongitude returns the Longitude field value if set, zero value otherwise.
func (o *DataStoreObjectLocation) GetLongitude() float32 {
if o == nil || o.Longitude == nil {
var ret float32
return ret
}
return *o.Longitude
}
// GetLongitudeOk returns a tuple with the Longitude field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *DataStoreObjectLocation) GetLongitudeOk() (*float32, bool) {
if o == nil || o.Longitude == nil {
return nil, false
}
return o.Longitude, true
}
// HasLongitude returns a boolean if a field has been set.
func (o *DataStoreObjectLocation) HasLongitude() bool {
if o != nil && o.Longitude != nil {
return true
}
return false
}
// SetLongitude gets a reference to the given float32 and assigns it to the Longitude field.
func (o *DataStoreObjectLocation) SetLongitude(v float32) {
o.Longitude = &v
}
// GetAltitude returns the Altitude field value if set, zero value otherwise.
func (o *DataStoreObjectLocation) GetAltitude() float32 {
if o == nil || o.Altitude == nil {
var ret float32
return ret
}
return *o.Altitude
}
// GetAltitudeOk returns a tuple with the Altitude field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *DataStoreObjectLocation) GetAltitudeOk() (*float32, bool) {
if o == nil || o.Altitude == nil {
return nil, false
}
return o.Altitude, true
}
// HasAltitude returns a boolean if a field has been set.
func (o *DataStoreObjectLocation) HasAltitude() bool {
if o != nil && o.Altitude != nil {
return true
}
return false
}
// SetAltitude gets a reference to the given float32 and assigns it to the Altitude field.
func (o *DataStoreObjectLocation) SetAltitude(v float32) {
o.Altitude = &v
}
func (o DataStoreObjectLocation) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if o.Latitude != nil {
toSerialize["latitude"] = o.Latitude
}
if o.Longitude != nil {
toSerialize["longitude"] = o.Longitude
}
if o.Altitude != nil {
toSerialize["altitude"] = o.Altitude
}
return json.Marshal(toSerialize)
}
type NullableDataStoreObjectLocation struct {
value *DataStoreObjectLocation
isSet bool
}
func (v NullableDataStoreObjectLocation) Get() *DataStoreObjectLocation {
return v.value
}
func (v *NullableDataStoreObjectLocation) Set(val *DataStoreObjectLocation) {
v.value = val
v.isSet = true
}
func (v NullableDataStoreObjectLocation) IsSet() bool {
return v.isSet
}
func (v *NullableDataStoreObjectLocation) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableDataStoreObjectLocation(val *DataStoreObjectLocation) *NullableDataStoreObjectLocation {
return &NullableDataStoreObjectLocation{value: val, isSet: true}
}
func (v NullableDataStoreObjectLocation) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableDataStoreObjectLocation) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | cios/model_data_store_object_location.go | 0.818302 | 0.510435 | model_data_store_object_location.go | starcoder |
package bresenham
// 2016-10-22, <NAME>
// * Do not use in production. It's just an exercise
import (
"image/color"
"image/draw"
)
// Floating point
func Bresenham_1(img draw.Image, x1, y1, x2, y2 int, col color.Color) {
dx, dy := x2-x1, y2-y1
a := float64(dy) / float64(dx)
b := int(float64(y1) - a*float64(x1))
img.Set(x1, y1, col)
for x := x1 + 1; x <= x2; x++ {
y := int(a*float64(x)) + b
img.Set(x, y, col)
}
}
// Floating point with error accumulator
func Bresenham_2(img draw.Image, x1, y1, x2, y2 int, col color.Color) {
dx, dy := x2-x1, y2-y1
a := float64(dy) / float64(dx)
e, e_max, e_sub := 0.0, 0.5, 1.0
y := y1
img.Set(x1, y1, col)
for x := x1 + 1; x <= x2; x++ {
img.Set(x, y, col)
e += a
if e > e_max {
y += 1
e -= e_sub
}
}
}
// Integer float -> float * dx -> integer
func Bresenham_3(img draw.Image, x1, y1, x2, y2 int, col color.Color) {
dx, dy := x2-x1, y2-y1
// e, e_max, e_sub := 0*dx, dx/2, dx
e, e_max, e_sub := dx, dx>>1, dx
y := y1
img.Set(x1, y1, col)
for x := x1 + 1; x <= x2; x++ {
img.Set(x, y, col)
e += dy // <= dy/dx * dx
if e > e_max {
y += 1
e -= e_sub
}
}
}
// Integer; remove comparison (cmp -> bit test); remove variables; float -> float * 2 * dx -> integer
func Bresenham_4(img draw.Image, x1, y1, x2, y2 int, col color.Color) {
dx, dy := x2-x1, 2*(y2-y1)
e, slope := dx, 2*dx
for ; dx != 0; dx-- {
img.Set(x1, y1, col)
x1++
e -= dy
if e < 0 {
y1++
e += slope
}
}
}
// dx > dy; x1 < x2; y1 < y2
func BresenhamDxXRYD(img draw.Image, x1, y1, x2, y2 int, col color.Color) {
dx, dy := x2-x1, 2*(y2-y1)
e, slope := dx, 2*dx
for ; dx != 0; dx-- {
img.Set(x1, y1, col)
x1++
e -= dy
if e < 0 {
y1++
e += slope
}
}
}
// dy > dx; x1 < x2; y1 < y2
func BresenhamDyXRYD(img draw.Image, x1, y1, x2, y2 int, col color.Color) {
dx, dy := 2*(x2-x1), y2-y1
e, slope := dy, 2*dy
for ; dy != 0; dy-- {
img.Set(x1, y1, col)
y1++
e -= dx
if e < 0 {
x1++
e += slope
}
}
}
// dx > dy; x1 < x2; y1 > y2
func BresenhamDxXRYU(img draw.Image, x1, y1, x2, y2 int, col color.Color) {
dx, dy := x2-x1, 2*(y1-y2)
e, slope := dx, 2*dx
for ; dx != 0; dx-- {
img.Set(x1, y1, col)
x1++
e -= dy
if e < 0 {
y1--
e += slope
}
}
}
func BresenhamDyXRYU(img draw.Image, x1, y1, x2, y2 int, col color.Color) {
dx, dy := 2*(x2-x1), y1-y2
e, slope := dy, 2*dy
for ; dy != 0; dy-- {
img.Set(x1, y1, col)
y1--
e -= dx
if e < 0 {
x1++
e += slope
}
}
}
// Generalized with integer
func Bresenham(img draw.Image, x1, y1, x2, y2 int, col color.Color) {
var dx, dy, e, slope int
// Because drawing p1 -> p2 is equivalent to draw p2 -> p1,
// I sort points in x-axis order to handle only half of possible cases.
if x1 > x2 {
x1, y1, x2, y2 = x2, y2, x1, y1
}
dx, dy = x2-x1, y2-y1
// Because point is x-axis ordered, dx cannot be negative
if dy < 0 {
dy = -dy
}
switch {
// Is line a point ?
case x1 == x2 && y1 == y2:
img.Set(x1, y1, col)
// Is line an horizontal ?
case y1 == y2:
for ; dx != 0; dx-- {
img.Set(x1, y1, col)
x1++
}
img.Set(x1, y1, col)
// Is line a vertical ?
case x1 == x2:
if y1 > y2 {
y1, y2 = y2, y1
}
for ; dy != 0; dy-- {
img.Set(x1, y1, col)
y1++
}
img.Set(x1, y1, col)
// Is line a diagonal ?
case dx == dy:
if y1 < y2 {
for ; dx != 0; dx-- {
img.Set(x1, y1, col)
x1++
y1++
}
} else {
for ; dx != 0; dx-- {
img.Set(x1, y1, col)
x1++
y1--
}
}
img.Set(x1, y1, col)
// wider than high ?
case dx > dy:
if y1 < y2 {
// BresenhamDxXRYD(img, x1, y1, x2, y2, col)
dy, e, slope = 2*dy, dx, 2*dx
for ; dx != 0; dx-- {
img.Set(x1, y1, col)
x1++
e -= dy
if e < 0 {
y1++
e += slope
}
}
} else {
// BresenhamDxXRYU(img, x1, y1, x2, y2, col)
dy, e, slope = 2*dy, dx, 2*dx
for ; dx != 0; dx-- {
img.Set(x1, y1, col)
x1++
e -= dy
if e < 0 {
y1--
e += slope
}
}
}
img.Set(x2, y2, col)
// higher than wide.
default:
if y1 < y2 {
// BresenhamDyXRYD(img, x1, y1, x2, y2, col)
dx, e, slope = 2*dx, dy, 2*dy
for ; dy != 0; dy-- {
img.Set(x1, y1, col)
y1++
e -= dx
if e < 0 {
x1++
e += slope
}
}
} else {
// BresenhamDyXRYU(img, x1, y1, x2, y2, col)
dx, e, slope = 2*dx, dy, 2*dy
for ; dy != 0; dy-- {
img.Set(x1, y1, col)
y1--
e -= dx
if e < 0 {
x1++
e += slope
}
}
}
img.Set(x2, y2, col)
}
} | bresenham/line.go | 0.538983 | 0.563018 | line.go | starcoder |
package docs
import (
"bytes"
"encoding/json"
"strings"
"github.com/alecthomas/template"
"github.com/swaggo/swag"
)
var doc = `{
"schemes": {{ marshal .Schemes }},
"swagger": "2.0",
"info": {
"description": "{{.Description}}",
"title": "{{.Title}}",
"termsOfService": "http://swagger.io/terms/",
"contact": {
"name": "API Support",
"email": "<EMAIL>"
},
"version": "{{.Version}}"
},
"host": "{{.Host}}",
"basePath": "{{.BasePath}}",
"paths": {
"/geospatialdata": {
"get": {
"description": "Extract geospatial data from a HMS model given an s3 key",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"MCAT"
],
"summary": "Extract geospatial data",
"parameters": [
{
"type": "string",
"description": "/models/hms/Truckee_River/Truckee_River.hms",
"name": "definition_file",
"in": "query",
"required": true
}
],
"responses": {
"200": {
"description": "OK",
"schema": {
"type": "object"
}
},
"500": {
"description": "Internal Server Error",
"schema": {
"$ref": "#/definitions/handlers.SimpleResponse"
}
}
}
}
},
"/index": {
"get": {
"description": "Extract metadata from a HMS model given an s3 key",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"MCAT"
],
"summary": "Index a HMS model",
"parameters": [
{
"type": "string",
"description": "/models/hms/Truckee_River/Truckee_River.hms",
"name": "definition_file",
"in": "query",
"required": true
}
],
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/tools.Model"
}
},
"500": {
"description": "Internal Server Error",
"schema": {
"$ref": "#/definitions/handlers.SimpleResponse"
}
}
}
}
},
"/isamodel": {
"get": {
"description": "Check if the given key is a HMS model",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"MCAT"
],
"summary": "Check if the given key is a HMS model",
"parameters": [
{
"type": "string",
"description": "/models/hms/Truckee_River/Truckee_River.hms",
"name": "definition_file",
"in": "query",
"required": true
}
],
"responses": {
"200": {
"description": "OK",
"schema": {
"type": "boolean"
}
}
}
}
},
"/isgeospatial": {
"get": {
"description": "Check if the HMS model has geospatial information",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"MCAT"
],
"summary": "Check if the HMS model has geospatial information",
"parameters": [
{
"type": "string",
"description": "/models/hms/Truckee_River/Truckee_River.hms",
"name": "definition_file",
"in": "query",
"required": true
}
],
"responses": {
"200": {
"description": "OK",
"schema": {
"type": "boolean"
}
}
}
}
},
"/modeltype": {
"get": {
"description": "Extract the model type given an s3 key",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"MCAT"
],
"summary": "Extract the model type",
"parameters": [
{
"type": "string",
"description": "/models/hms/Truckee_River/Truckee_River.hms",
"name": "definition_file",
"in": "query",
"required": true
}
],
"responses": {
"200": {
"description": "HMS",
"schema": {
"type": "string"
}
},
"500": {
"description": "Internal Server Error",
"schema": {
"$ref": "#/definitions/handlers.SimpleResponse"
}
}
}
}
},
"/modelversion": {
"get": {
"description": "Extract the HMS model version given an s3 key",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"MCAT"
],
"summary": "Extract the HMS model version",
"parameters": [
{
"type": "string",
"description": "/models/hms/Truckee_River/Truckee_River.hms",
"name": "definition_file",
"in": "query",
"required": true
}
],
"responses": {
"200": {
"description": "4.0",
"schema": {
"type": "string"
}
},
"500": {
"description": "Internal Server Error",
"schema": {
"$ref": "#/definitions/handlers.SimpleResponse"
}
}
}
}
},
"/ping": {
"get": {
"description": "Check which services are operational",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"Health Check"
],
"summary": "Status Check",
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/handlers.SimpleResponse"
}
}
}
}
}
},
"definitions": {
"handlers.SimpleResponse": {
"type": "object",
"properties": {
"message": {
"type": "string"
},
"status": {
"type": "integer"
}
}
},
"tools.ControlFiles": {
"type": "object",
"properties": {
"data": {
"description": "placeholder",
"type": "object",
"additionalProperties": true
},
"paths": {
"type": "array",
"items": {
"type": "string"
}
}
}
},
"tools.ForcingFiles": {
"type": "object",
"properties": {
"data": {
"description": "placeholder",
"type": "object",
"additionalProperties": true
},
"paths": {
"type": "array",
"items": {
"type": "string"
}
}
}
},
"tools.GeometryFiles": {
"type": "object",
"properties": {
"featuresProperties": {
"description": "placeholder",
"type": "object",
"additionalProperties": true
},
"georeference": {
"description": "placeholder",
"type": "object"
},
"paths": {
"type": "array",
"items": {
"type": "string"
}
}
}
},
"tools.InputFiles": {
"type": "object",
"properties": {
"controlFiles": {
"$ref": "#/definitions/tools.ControlFiles"
},
"forcingFiles": {
"$ref": "#/definitions/tools.ForcingFiles"
},
"geometryFiles": {
"$ref": "#/definitions/tools.GeometryFiles"
},
"localVariables": {
"description": "placeholder",
"type": "object"
},
"simulationVariables": {
"description": "placeholder",
"type": "object"
}
}
},
"tools.Model": {
"type": "object",
"properties": {
"definitionFile": {
"type": "string"
},
"files": {
"$ref": "#/definitions/tools.ModelFiles"
},
"type": {
"type": "string"
},
"version": {
"type": "string"
}
}
},
"tools.ModelFiles": {
"type": "object",
"properties": {
"inputFiles": {
"$ref": "#/definitions/tools.InputFiles"
},
"outputFiles": {
"$ref": "#/definitions/tools.OutputFiles"
},
"supplementalFiles": {
"$ref": "#/definitions/tools.SupplementalFiles"
}
}
},
"tools.OutputFiles": {
"type": "object",
"properties": {
"modelPrediction": {
"description": "placeholder",
"type": "object"
},
"paths": {
"type": "array",
"items": {
"type": "string"
}
},
"runFiles": {
"type": "array",
"items": {
"type": "string"
}
},
"runLogs": {
"type": "array",
"items": {
"type": "string"
}
}
}
},
"tools.SupplementalFiles": {
"type": "object",
"properties": {
"observationalData": {
"description": "placeholder",
"type": "object"
},
"paths": {
"type": "array",
"items": {
"type": "string"
}
},
"visulizations": {
"description": "placeholder",
"type": "object"
}
}
}
}
}`
type swaggerInfo struct {
Version string
Host string
BasePath string
Schemes []string
Title string
Description string
}
// SwaggerInfo holds exported Swagger Info so clients can modify it
var SwaggerInfo = swaggerInfo{
Version: "1.0",
Host: "localhost:5900",
BasePath: "",
Schemes: []string{},
Title: "HMS MCAT API",
Description: "API for the HMS MCAT",
}
type s struct{}
func (s *s) ReadDoc() string {
sInfo := SwaggerInfo
sInfo.Description = strings.Replace(sInfo.Description, "\n", "\\n", -1)
t, err := template.New("swagger_info").Funcs(template.FuncMap{
"marshal": func(v interface{}) string {
a, _ := json.Marshal(v)
return string(a)
},
}).Parse(doc)
if err != nil {
return doc
}
var tpl bytes.Buffer
if err := t.Execute(&tpl, sInfo); err != nil {
return doc
}
return tpl.String()
}
func init() {
swag.Register(swag.Name, &s{})
} | docs/docs.go | 0.616474 | 0.453383 | docs.go | starcoder |
package hmath
import (
"fmt"
"github.com/barnex/fmath"
)
type Mat4 [16]float32
func (mat4 *Mat4) Pointer() *[16]float32 { return (*[16]float32)(mat4) }
func (mat4 *Mat4) Slice() []float32 { return mat4[:] }
func (mat4 *Mat4) String() string {
return fmt.Sprintf("[%f,%f,%f,%f,\n %f,%f,%f,%f,\n %f,%f,%f,%f,\n %f,%f,%f,%f]",
mat4[0], mat4[4], mat4[8], mat4[12], mat4[1], mat4[5], mat4[9], mat4[13], mat4[2], mat4[6], mat4[10], mat4[14], mat4[3], mat4[7], mat4[11], mat4[15])
}
func (mat4 *Mat4) SetAt(row int, column int, value float32) {
selection := 0
switch row {
case 0:
selection = 0
case 1:
selection = 4
case 2:
selection = 8
case 3:
selection = 12
}
selection = selection + column
mat4[selection] = value
}
func (mat4 *Mat4) GetAt(row int, column int) float32 {
selection := 0
switch row {
case 0:
selection = 0
case 1:
selection = 4
case 2:
selection = 8
case 3:
selection = 12
}
selection = selection + column
return mat4[selection]
}
func Mat4Identity() Mat4 {
return Mat4{
1, 0, 0, 0,
0, 1, 0, 0,
0, 0, 1, 0,
0, 0, 0, 1}
}
func Mat4Translate(v Vec3) Mat4 {
return Mat4{
1, 0, 0, 0,
0, 1, 0, 0,
0, 0, 1, 0,
v[0], v[1], v[2], 1}
}
func Mat4Scale(v Vec3) Mat4 {
return Mat4{
v[0], 0, 0, 0,
0, v[1], 0, 0,
0, 0, v[2], 0,
0, 0, 0, 1}
}
func Mat4Rotate(axis Vec3, radians float32) Mat4 {
x, y, z := axis.Norm().XYZ()
c := fmath.Cos(radians)
s := fmath.Sin(radians)
return Mat4{
x*x*(1-c) + c, y*x*(1-c) + z*s, x*z*(1-c) - y*s, 0,
x*y*(1-c) - z*s, y*y*(1-c) + c, y*z*(1-c) + x*s, 0,
x*z*(1-c) + y*s, y*z*(1-c) - x*s, z*z*(1-c) + c, 0,
0, 0, 0, 1}
}
func Mat4LookAt(eye, center, up Vec3) Mat4 {
f := center.Sub(eye).Norm()
s := f.Cross(up.Norm())
u := s.Cross(f)
return Mat4{
s[0], u[0], -f[0], 0,
s[1], u[1], -f[1], 0,
s[2], u[2], -f[2], 0,
-s.Dot(eye), -u.Dot(eye), f.Dot(eye), 1}
}
func Mat4Frustum(left, right, bottom, top, zNear, zFar float32) Mat4 {
width := right - left
height := top - bottom
depth := zFar - zNear
return Mat4{
(zNear * 2.0) / width, 0, 0, 0,
0, (zNear * 2.0) / height, 0, 0,
(left + right) / width, (bottom + top) / height, -(zNear + zFar) / depth, -1,
0, 0, -(zNear * zFar * 2.0) / depth, 0}
}
func Mat4Perspective(fovY, aspect, zNear, zFar float32) Mat4 {
f := 1.0 / fmath.Tan(fovY/2.0)
d := zNear - zFar
return Mat4{
f / aspect, 0, 0, 0,
0, f, 0, 0,
0, 0, (zFar + zNear) / d, -1,
0, 0, (2 * zFar * zNear) / d, 0}
}
func (mat4 Mat4) Mul(m2 Mat4) Mat4 {
return Mat4{
mat4[0]*m2[0] + mat4[1]*m2[4] + mat4[2]*m2[8] + mat4[3]*m2[12],
mat4[0]*m2[1] + mat4[1]*m2[5] + mat4[2]*m2[9] + mat4[3]*m2[13],
mat4[0]*m2[2] + mat4[1]*m2[6] + mat4[2]*m2[10] + mat4[3]*m2[14],
mat4[0]*m2[3] + mat4[1]*m2[7] + mat4[2]*m2[11] + mat4[3]*m2[15],
mat4[4]*m2[0] + mat4[5]*m2[4] + mat4[6]*m2[8] + mat4[7]*m2[12],
mat4[4]*m2[1] + mat4[5]*m2[5] + mat4[6]*m2[9] + mat4[7]*m2[13],
mat4[4]*m2[2] + mat4[5]*m2[6] + mat4[6]*m2[10] + mat4[7]*m2[14],
mat4[4]*m2[3] + mat4[5]*m2[7] + mat4[6]*m2[11] + mat4[7]*m2[15],
mat4[8]*m2[0] + mat4[9]*m2[4] + mat4[10]*m2[8] + mat4[11]*m2[12],
mat4[8]*m2[1] + mat4[9]*m2[5] + mat4[10]*m2[9] + mat4[11]*m2[13],
mat4[8]*m2[2] + mat4[9]*m2[6] + mat4[10]*m2[10] + mat4[11]*m2[14],
mat4[8]*m2[3] + mat4[9]*m2[7] + mat4[10]*m2[11] + mat4[11]*m2[15],
mat4[12]*m2[0] + mat4[13]*m2[4] + mat4[14]*m2[8] + mat4[15]*m2[12],
mat4[12]*m2[1] + mat4[13]*m2[5] + mat4[14]*m2[9] + mat4[15]*m2[13],
mat4[12]*m2[2] + mat4[13]*m2[6] + mat4[14]*m2[10] + mat4[15]*m2[14],
mat4[12]*m2[3] + mat4[13]*m2[7] + mat4[14]*m2[11] + mat4[15]*m2[15]}
}
func (mat4 Mat4) Invert() Mat4 {
var s, c [6]float32
s[0] = mat4[0]*mat4[5] - mat4[4]*mat4[1]
s[1] = mat4[0]*mat4[6] - mat4[4]*mat4[2]
s[2] = mat4[0]*mat4[7] - mat4[4]*mat4[3]
s[3] = mat4[1]*mat4[6] - mat4[5]*mat4[2]
s[4] = mat4[1]*mat4[7] - mat4[5]*mat4[3]
s[5] = mat4[2]*mat4[7] - mat4[6]*mat4[3]
c[0] = mat4[8]*mat4[13] - mat4[12]*mat4[9]
c[1] = mat4[8]*mat4[14] - mat4[12]*mat4[10]
c[2] = mat4[8]*mat4[15] - mat4[12]*mat4[11]
c[3] = mat4[9]*mat4[14] - mat4[13]*mat4[10]
c[4] = mat4[9]*mat4[15] - mat4[13]*mat4[11]
c[5] = mat4[10]*mat4[15] - mat4[14]*mat4[11]
// assumes it is invertible
identity := 1.0 / (s[0]*c[5] - s[1]*c[4] + s[2]*c[3] + s[3]*c[2] - s[4]*c[1] + s[5]*c[0])
return Mat4{
(mat4[5]*c[5] - mat4[6]*c[4] + mat4[7]*c[3]) * identity,
(-mat4[1]*c[5] + mat4[2]*c[4] - mat4[3]*c[3]) * identity,
(mat4[13]*s[5] - mat4[14]*s[4] + mat4[15]*s[3]) * identity,
(-mat4[9]*s[5] + mat4[10]*s[4] - mat4[11]*s[3]) * identity,
(-mat4[4]*c[5] + mat4[6]*c[2] - mat4[7]*c[1]) * identity,
(mat4[0]*c[5] - mat4[2]*c[2] + mat4[3]*c[1]) * identity,
(-mat4[12]*s[5] + mat4[14]*s[2] - mat4[15]*s[1]) * identity,
(mat4[8]*s[5] - mat4[10]*s[2] + mat4[11]*s[1]) * identity,
(mat4[4]*c[4] - mat4[5]*c[2] + mat4[7]*c[0]) * identity,
(-mat4[0]*c[4] + mat4[1]*c[2] - mat4[3]*c[0]) * identity,
(mat4[12]*s[4] - mat4[13]*s[2] + mat4[15]*s[0]) * identity,
(-mat4[8]*s[4] + mat4[9]*s[2] - mat4[11]*s[0]) * identity,
(-mat4[4]*c[3] + mat4[5]*c[1] - mat4[6]*c[0]) * identity,
(mat4[0]*c[3] - mat4[1]*c[1] + mat4[2]*c[0]) * identity,
(-mat4[12]*s[3] + mat4[13]*s[1] - mat4[14]*s[0]) * identity,
(mat4[8]*s[3] - mat4[9]*s[1] + mat4[10]*s[0]) * identity}
} | code/pkg/hmath/mat4.go | 0.658747 | 0.6372 | mat4.go | starcoder |
package geojson
import (
"encoding/json"
"errors"
"github.com/go-courier/geography"
"github.com/go-courier/geography/coordstransform"
"github.com/go-courier/geography/maptile"
)
type FeatureCollection struct {
coordsTransform *coordstransform.CoordsTransform
Type string `json:"type"`
Features []*Feature `json:"features"`
CRS map[string]interface{} `json:"crs,omitempty"`
}
// New FeatureCollection
func NewFeatureCollection() *FeatureCollection {
return &FeatureCollection{
Type: "FeatureCollection",
Features: make([]*Feature, 0),
}
}
func (fc *FeatureCollection) SetCoordsTransform(coordsTransform *coordstransform.CoordsTransform) {
fc.coordsTransform = coordsTransform
}
func (fc *FeatureCollection) AddMapTileFeature(features ...maptile.Feature) *FeatureCollection {
for _, v := range features {
fc.addMapTileFeature(v)
}
return fc
}
func (fc *FeatureCollection) addMapTileFeature(feature maptile.Feature) *FeatureCollection {
feat := feature.ToGeom()
geo := &Geometry{
Type: feat.Type(),
}
if fc.coordsTransform != nil {
feat = feat.Project(fc.coordsTransform.ToMars)
}
switch feat.Type() {
case "Point":
point, _ := feat.(geography.Point)
geo.Point = &point
break
case "MultiPoint":
point, _ := feat.(geography.MultiPoint)
geo.MultiPoint = &point
break
case "LineString":
line, _ := feat.(geography.LineString)
geo.LineString = &line
break
case "MultiLineString":
line, _ := feat.(geography.MultiLineString)
geo.MultiLineString = &line
break
case "Polygon":
polygon, _ := feat.(geography.Polygon)
geo.Polygon = &polygon
break
case "MultiPolygon":
polygon, _ := feat.(geography.MultiPolygon)
geo.MultiPolygon = &polygon
break
}
fe := &Feature{
Type: "Feature",
Geometry: geo,
Properties: feature.Properties(),
}
if fid, ok := feature.(interface {
ID() uint64
}); ok {
fe.ID = fid.ID()
}
fc.Features = append(fc.Features, fe)
return fc
}
// MarshalJSON
func (fc *FeatureCollection) MarshalJSON() ([]byte, error) {
type featureCollection FeatureCollection
fcol := &featureCollection{
Type: "FeatureCollection",
}
fcol.Features = fc.Features
if fcol.Features == nil {
fcol.Features = make([]*Feature, 0)
}
if fc.CRS != nil && len(fc.CRS) != 0 {
fcol.CRS = fc.CRS
}
return json.Marshal(fcol)
}
func (fc *FeatureCollection) ToJSON() ([]byte, error) {
return fc.MarshalJSON()
}
func (fc FeatureCollection) MarshalText() ([]byte, error) {
return fc.ToJSON()
}
func (fc *FeatureCollection) UnmarshalText(data []byte) error {
var object map[string]interface{}
err := json.Unmarshal(data, &object)
if err != nil {
return err
}
return decodeFeatureCollection(fc, object)
}
func decodeFeatureCollection(fc *FeatureCollection, object map[string]interface{}) error {
t, ok := object["type"]
if !ok {
return errors.New("type property not defined")
}
if str, ok := t.(string); ok {
fc.Type = str
} else {
return errors.New("type property not string")
}
crs, ok := object["crs"]
if ok {
if c, ok := crs.(map[string]interface{}); ok {
fc.CRS = c
}
}
features, ok := object["features"]
if !ok {
return errors.New("features property not defined")
}
feas, ok := features.([]interface{})
if !ok {
return errors.New("type property not features")
}
for _, fea := range feas {
if f, ok := fea.(map[string]interface{}); ok {
fea := &Feature{}
err := decodeFeature(fea, f)
if err != nil {
return err
}
fc.Features = append(fc.Features, fea)
} else {
return errors.New("type property not features")
}
}
return nil
} | encoding/geojson/feature_collection.go | 0.644449 | 0.412885 | feature_collection.go | starcoder |
package main
import (
"math"
)
// Sphere Maps a sphere object
type Sphere ObjectElement
func generateSphereFromObject(object *Object) (Sphere) {
return generateSphere(object.Radius, object.Compression, object.Radiate)
}
func generateSphere(radius float32, compression float32, radiate bool) (Sphere) {
var (
sphere Sphere
i int
j int
k int
l int
normalDirection float32
radiusN float32
nbVertices int32
unarySizeFull int
unarySizeShort int
resLongitude int32
longitude int
latitude int
longitudeRF float64
latitudeRF float64
vertexPositionX float32
vertexPositionY float32
vertexPositionZ float32
)
unarySizeFull = (2 * ConfigObjectTexturePhiMax / ConfigObjectTextureStepLatitude + 1) * (ConfigObjectTextureThetaMax / ConfigObjectTextureStepLongitude + 1)
unarySizeShort = (2 * ConfigObjectTexturePhiMax / ConfigObjectTextureStepLatitude) * (ConfigObjectTextureThetaMax / ConfigObjectTextureStepLongitude)
sphere.Vertices = make([]float32, 3 * unarySizeFull)
sphere.VerticeNormals = make([]float32, 3 * unarySizeFull)
sphere.Indices = make([]int32, 6 * unarySizeShort)
sphere.TextureCoords = make([]float32, 2 * unarySizeFull)
i = 0
j = 0
k = 0
l = 0
radiusN = normalizeObjectSize(radius)
nbVertices = 0
resLongitude = int32(float32(ConfigObjectTextureThetaMax) / float32(ConfigObjectTextureStepLongitude) + 1.0);
// Normal is -1 if sun, which is the light source, to avoid any self-shadow effect
if radiate == true {
normalDirection = -1.0
} else {
normalDirection = 1.0
}
// Map sphere data
for latitude = -90; latitude <= ConfigObjectTexturePhiMax; latitude += ConfigObjectTextureStepLatitude {
for longitude = 0; longitude <= ConfigObjectTextureThetaMax; longitude += ConfigObjectTextureStepLongitude {
// Convert latitude & longitude to radians
longitudeRF = float64(ConfigMathDegreeToRadian) * float64(longitude)
latitudeRF = float64(ConfigMathDegreeToRadian) * float64(latitude)
// Process vertex positions
vertexPositionX = float32(math.Sin(longitudeRF) * math.Cos(latitudeRF))
vertexPositionY = float32(math.Sin(latitudeRF)) * compression
vertexPositionZ = float32(math.Cos(latitudeRF) * math.Cos(longitudeRF))
// Bind sphere vertices
sphere.Vertices[i] = radiusN * vertexPositionX
sphere.Vertices[i + 1] = radiusN * vertexPositionY
sphere.Vertices[i + 2] = radiusN * vertexPositionZ
i += 3
// Bind sphere vertice normals
sphere.VerticeNormals[j] = normalDirection * vertexPositionX
sphere.VerticeNormals[j + 1] = normalDirection * vertexPositionY
sphere.VerticeNormals[j + 2] = normalDirection * vertexPositionZ
j += 3
// Bind sphere indices
if longitude != ConfigObjectTextureThetaMax && latitude < ConfigObjectTexturePhiMax {
sphere.Indices[k] = nbVertices
sphere.Indices[k + 1] = nbVertices + 1
sphere.Indices[k + 2] = nbVertices + 1 + resLongitude
sphere.Indices[k + 3] = nbVertices
sphere.Indices[k + 4] = nbVertices + 1 + resLongitude
sphere.Indices[k + 5] = nbVertices + resLongitude
k += 6
}
nbVertices++
// Bind sphere texture coordinates
sphere.TextureCoords[l] = float32(longitude) / float32(ConfigObjectTextureThetaMax)
sphere.TextureCoords[l + 1] = -1.0 * float32(90.0 + latitude) / float32(90.0 + ConfigObjectTexturePhiMax)
l += 2
}
}
return sphere
} | sphere.go | 0.836521 | 0.572603 | sphere.go | starcoder |
package agg
import (
"fmt"
"strings"
"github.com/emer/etable/etable"
"github.com/goki/ki/kit"
)
// Aggs is a list of different standard aggregation functions, which can be used
// to choose an aggregation function
type Aggs int
const (
// Count of number of elements
AggCount Aggs = iota
// Sum of elements
AggSum
// Product of elements
AggProd
// Min minimum value
AggMin
// Max maximum value
AggMax
// Mean mean value
AggMean
// Var sample variance (squared diffs from mean, divided by n-1)
AggVar
// Std sample standard deviation (sqrt of Var)
AggStd
// Sem sample standard error of the mean (Std divided by sqrt(n))
AggSem
// VarPop population variance (squared diffs from mean, divided by n)
AggVarPop
// StdPop population standard deviation (sqrt of VarPop)
AggStdPop
// SemPop population standard error of the mean (StdPop divided by sqrt(n))
AggSemPop
// Median middle value in sorted ordering
AggMedian
// Q1 first quartile = 25%ile value = .25 quantile value
AggQ1
// Q3 third quartile = 75%ile value = .75 quantile value
AggQ3
// SumSq sum of squares
AggSumSq
AggsN
)
//go:generate stringer -type=Aggs
var KiT_Aggs = kit.Enums.AddEnum(AggsN, kit.NotBitFlag, nil)
func (ev Aggs) MarshalJSON() ([]byte, error) { return kit.EnumMarshalJSON(ev) }
func (ev *Aggs) UnmarshalJSON(b []byte) error { return kit.EnumUnmarshalJSON(ev, b) }
// AggsName returns the name of the Aggs varaible without the Agg prefix..
func AggsName(ag Aggs) string {
return strings.TrimPrefix(ag.String(), "Agg")
}
// AggIdx returns aggregate according to given agg type applied
// to all non-Null, non-NaN elements in given IdxView indexed view of
// an etable.Table, for given column index.
// valid names are: Count, Sum, Var, Std, Sem, VarPop, StdPop, SemPop,
// Min, Max, SumSq, 25%, 1Q, Median, 50%, 2Q, 75%, 3Q (case insensitive)
// Return value is size of each column cell -- 1 for scalar 1D columns
// and N for higher-dimensional columns.
func AggIdx(ix *etable.IdxView, colIdx int, ag Aggs) []float64 {
switch ag {
case AggCount:
return CountIdx(ix, colIdx)
case AggSum:
return SumIdx(ix, colIdx)
case AggProd:
return ProdIdx(ix, colIdx)
case AggMin:
return MinIdx(ix, colIdx)
case AggMax:
return MaxIdx(ix, colIdx)
case AggMean:
return MeanIdx(ix, colIdx)
case AggVar:
return VarIdx(ix, colIdx)
case AggStd:
return StdIdx(ix, colIdx)
case AggSem:
return SemIdx(ix, colIdx)
case AggVarPop:
return VarPopIdx(ix, colIdx)
case AggStdPop:
return StdPopIdx(ix, colIdx)
case AggSemPop:
return SemPopIdx(ix, colIdx)
case AggQ1:
return Q1Idx(ix, colIdx)
case AggMedian:
return MedianIdx(ix, colIdx)
case AggQ3:
return Q3Idx(ix, colIdx)
case AggSumSq:
return SumSqIdx(ix, colIdx)
}
return nil
}
// Agg returns aggregate according to given agg type applied
// to all non-Null, non-NaN elements in given IdxView indexed view of
// an etable.Table, for given column name.
// valid names are: Count, Sum, Var, Std, Sem, VarPop, StdPop, SemPop,
// Min, Max, SumSq (case insensitive)
// If name not found, nil is returned -- use Try version for error message.
// Return value is size of each column cell -- 1 for scalar 1D columns
// and N for higher-dimensional columns.
func Agg(ix *etable.IdxView, colNm string, ag Aggs) []float64 {
colIdx := ix.Table.ColIdx(colNm)
if colIdx == -1 {
return nil
}
return AggIdx(ix, colIdx, ag)
}
// AggTry returns aggregate according to given agg type applied
// to all non-Null, non-NaN elements in given IdxView indexed view of
// an etable.Table, for given column name.
// valid names are: Count, Sum, Var, Std, Sem, VarPop, StdPop, SemPop,
// Min, Max, SumSq (case insensitive)
// If col name not found, returns error message.
// Return value is size of each column cell -- 1 for scalar 1D columns
// and N for higher-dimensional columns.
func AggTry(ix *etable.IdxView, colNm string, ag Aggs) ([]float64, error) {
colIdx, err := ix.Table.ColIdxTry(colNm)
if err != nil {
return nil, err
}
rv := AggIdx(ix, colIdx, ag)
if rv == nil {
return nil, fmt.Errorf("etable agg.AggTry: agg type: %v not recognized", ag)
}
return rv, nil
} | agg/aggs.go | 0.730866 | 0.422743 | aggs.go | starcoder |
package xmobilebackend
import (
"image"
"image/color"
"unsafe"
"golang.org/x/mobile/gl"
)
// GetImageData returns an RGBA image of the current image
func (b *XMobileBackend) GetImageData(x, y, w, h int) *image.RGBA {
b.activate()
if x < 0 {
w += x
x = 0
}
if y < 0 {
h += y
y = 0
}
if w > b.w {
w = b.w
}
if h > b.h {
h = b.h
}
var vp [4]int32
b.glctx.GetIntegerv(vp[:], gl.VIEWPORT)
size := int(vp[2] * vp[3] * 3)
if len(b.imageBuf) < size {
b.imageBuf = make([]byte, size)
}
b.glctx.ReadPixels(b.imageBuf[0:], int(vp[0]), int(vp[1]), int(vp[2]), int(vp[3]), gl.RGB, gl.UNSIGNED_BYTE)
rgba := image.NewRGBA(image.Rect(x, y, x+w, y+h))
for cy := y; cy < y+h; cy++ {
bp := (int(vp[3])-h+cy)*int(vp[2])*3 + x*3
for cx := x; cx < x+w; cx++ {
rgba.SetRGBA(cx, y+h-1-cy, color.RGBA{R: b.imageBuf[bp], G: b.imageBuf[bp+1], B: b.imageBuf[bp+2], A: 255})
bp += 3
}
}
return rgba
}
// PutImageData puts the given image at the given x/y coordinates
func (b *XMobileBackend) PutImageData(img *image.RGBA, x, y int) {
b.activate()
b.glctx.ActiveTexture(gl.TEXTURE0)
if b.imageBufTex.Value == 0 {
b.imageBufTex = b.glctx.CreateTexture()
b.glctx.BindTexture(gl.TEXTURE_2D, b.imageBufTex)
b.glctx.TexParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.LINEAR)
b.glctx.TexParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.LINEAR)
b.glctx.TexParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE)
b.glctx.TexParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE)
} else {
b.glctx.BindTexture(gl.TEXTURE_2D, b.imageBufTex)
}
w, h := img.Bounds().Dx(), img.Bounds().Dy()
if img.Stride == img.Bounds().Dx()*4 {
b.glctx.TexImage2D(gl.TEXTURE_2D, 0, gl.RGBA, w, h, gl.RGBA, gl.UNSIGNED_BYTE, img.Pix[0:])
} else {
data := make([]uint8, 0, w*h*4)
for cy := 0; cy < h; cy++ {
start := cy * img.Stride
end := start + w*4
data = append(data, img.Pix[start:end]...)
}
b.glctx.TexImage2D(gl.TEXTURE_2D, 0, gl.RGBA, w, h, gl.RGBA, gl.UNSIGNED_BYTE, data[0:])
}
dx, dy := float32(x), float32(y)
dw, dh := float32(w), float32(h)
b.glctx.BindBuffer(gl.ARRAY_BUFFER, b.buf)
data := [16]float32{dx, dy, dx + dw, dy, dx + dw, dy + dh, dx, dy + dh,
0, 0, 1, 0, 1, 1, 0, 1}
b.glctx.BufferData(gl.ARRAY_BUFFER, byteSlice(unsafe.Pointer(&data[0]), len(data)*4), gl.STREAM_DRAW)
b.glctx.UseProgram(b.ir.ID)
b.glctx.Uniform1i(b.ir.Image, 0)
b.glctx.Uniform2f(b.ir.CanvasSize, float32(b.fw), float32(b.fh))
b.glctx.Uniform1f(b.ir.GlobalAlpha, 1)
b.glctx.VertexAttribPointer(b.ir.Vertex, 2, gl.FLOAT, false, 0, 0)
b.glctx.VertexAttribPointer(b.ir.TexCoord, 2, gl.FLOAT, false, 0, 8*4)
b.glctx.EnableVertexAttribArray(b.ir.Vertex)
b.glctx.EnableVertexAttribArray(b.ir.TexCoord)
b.glctx.DrawArrays(gl.TRIANGLE_FAN, 0, 4)
b.glctx.DisableVertexAttribArray(b.ir.Vertex)
b.glctx.DisableVertexAttribArray(b.ir.TexCoord)
} | backend/xmobilebackend/imagedata.go | 0.61659 | 0.433502 | imagedata.go | starcoder |
package bn256
import (
"errors"
"math/big"
)
// G2 is an abstract cyclic group. The zero value is suitable for use as the
// output of an operation, but cannot be used as an input.
type G2 struct {
p *twistPoint
}
func (e *G2) String() string {
return "bn256.G2" + e.p.String()
}
// Base set e to g where g is the generator of the group and then returns e.
func (e *G2) Base() *G2 {
if e.p == nil {
e.p = &twistPoint{}
}
e.p.Set(twistGen)
return e
}
// Zero set e to the null element and then returns e.
func (e *G2) Zero() *G2 {
if e.p == nil {
e.p = &twistPoint{}
}
e.p.SetInfinity()
return e
}
// IsZero returns whether e is zero or not.
func (e *G2) IsZero() bool {
if e.p == nil {
return true
}
return e.p.IsInfinity()
}
// ScalarBaseMult sets e to g*k where g is the generator of the group and then
// returns out.
func (e *G2) ScalarBaseMult(k *big.Int) *G2 {
if e.p == nil {
e.p = &twistPoint{}
}
e.p.Mul(twistGen, k)
return e
}
// ScalarMult sets e to a*k and then returns e.
func (e *G2) ScalarMult(a *G2, k *big.Int) *G2 {
if e.p == nil {
e.p = &twistPoint{}
}
e.p.Mul(a.p, k)
return e
}
// Add sets e to a+b and then returns e.
func (e *G2) Add(a, b *G2) *G2 {
if e.p == nil {
e.p = &twistPoint{}
}
e.p.Add(a.p, b.p)
return e
}
// Neg sets e to -a and then returns e.
func (e *G2) Neg(a *G2) *G2 {
if e.p == nil {
e.p = &twistPoint{}
}
e.p.Neg(a.p)
return e
}
// Set sets e to a and then returns e.
func (e *G2) Set(a *G2) *G2 {
if e.p == nil {
e.p = &twistPoint{}
}
e.p.Set(a.p)
return e
}
// Marshal converts e into a byte slice.
func (e *G2) Marshal() []byte {
// Each value is a 256-bit number.
const numBytes = 256 / 8
if e.p == nil {
e.p = &twistPoint{}
}
e.p.MakeAffine()
ret := make([]byte, numBytes*4)
if e.p.IsInfinity() {
return ret
}
temp := &gfP{}
montDecode(temp, &e.p.x.x)
temp.Marshal(ret)
montDecode(temp, &e.p.x.y)
temp.Marshal(ret[numBytes:])
montDecode(temp, &e.p.y.x)
temp.Marshal(ret[2*numBytes:])
montDecode(temp, &e.p.y.y)
temp.Marshal(ret[3*numBytes:])
return ret
}
// Unmarshal sets e to the result of converting the output of Marshal back into
// a group element and then returns e.
func (e *G2) Unmarshal(m []byte) error {
// Each value is a 256-bit number.
const numBytes = 256 / 8
if len(m) != 4*numBytes {
return errors.New("bn256.G2: incorrect data length")
}
if e.p == nil {
e.p = &twistPoint{}
}
e.p.x.x.Unmarshal(m)
e.p.x.y.Unmarshal(m[numBytes:])
e.p.y.x.Unmarshal(m[2*numBytes:])
e.p.y.y.Unmarshal(m[3*numBytes:])
montEncode(&e.p.x.x, &e.p.x.x)
montEncode(&e.p.x.y, &e.p.x.y)
montEncode(&e.p.y.x, &e.p.y.x)
montEncode(&e.p.y.y, &e.p.y.y)
if e.p.x.IsZero() && e.p.y.IsZero() {
// This is the point at infinity.
e.p.SetInfinity()
} else {
e.p.z.SetOne()
e.p.t.SetOne()
if !e.p.IsOnCurve() {
return errors.New("bn256.G2: malformed point")
}
}
return nil
} | bn256/bn256g2.go | 0.754825 | 0.495117 | bn256g2.go | starcoder |
package spatial
import (
"fmt"
"github.com/qoeg/aoc2019/util"
)
// Grid represents a two dimensional grid of cells
type Grid [][]Cell
// Draw updates cells in the Grid with new cells
func (g Grid) Draw(line []Cell) {
for i := range line {
coord := line[i].Pos()
if i > 0 && g[coord.X][coord.Y].Mark() != '.' {
if found, _ := util.Contains(line, line[i]); !found {
line[i].mark = 'X'
}
}
g[coord.X][coord.Y] = line[i]
}
}
// NewGrid initializes a new Grid with the specified dimensions
func NewGrid(width, height int) (g Grid) {
g = make([][]Cell, width)
for x := 0; x < width; x++ {
g[x] = make([]Cell, height)
for y := 0; y < height; y++ {
g[x][y] = NewCell('.', Coordinate{x, y})
}
}
return g
}
// Clone creates a new clone of the Grid
func (g Grid) Clone() Grid {
clone := [][]Cell{}
for x := 0; x < len(g); x++ {
clone = append(clone, []Cell{})
for y := 0; y < len(g[x]); y++ {
clone[x] = append(clone[x], g[x][y])
}
}
return clone
}
// Neighbors finds the four adjacent cells for the Coordinate.
// The optional include parameter is a rune whitelist for if only certain runes are wanted.
func (g Grid) Neighbors(c Coordinate, include ...rune) []Cell {
n := []Cell{}
if len(g) == 0 || len(g[0]) == 0 {
fmt.Println("grid.Neighbors(): the grid is empty")
return n
}
pos := c.Pos()
sides := make([]Cell, 4)
if pos.Y != 0 {
sides[0] = g[pos.X][pos.Y-1]
}
if pos.X != 0 {
sides[1] = g[pos.X-1][pos.Y]
}
if pos.X != len(g)-1 {
sides[2] = g[pos.X+1][pos.Y]
}
if pos.Y != len(g[pos.X])-1 {
sides[3] = g[pos.X][pos.Y+1]
}
for _, s := range sides {
if len(include) == 0 {
n = append(n, s)
continue
}
for _, in := range include {
if s.mark == in {
n = append(n, s)
break
}
}
}
return n
}
// Print will print the grid to the console, and
// replace any cells with the provided cell masks
func (g Grid) Print(masks ...Cell) {
for y := 0; y < len(g[0]); y++ {
for x := 0; x < len(g); x++ {
// print any objects for the location
masked := false
for i, o := range masks {
if (o.Pos() == Coordinate{x, y}) {
fmt.Print(o)
masks = append(masks[:i], masks[i+1:]...)
masked = true
break
}
}
if !masked {
fmt.Print(string(g[x][y].mark))
}
}
fmt.Println()
}
}
// With creates a clone of the Grid with the provided cells as replacements
func (g Grid) With(cells []Cell) Grid {
clone := g.Clone()
for _, c := range cells {
clone[c.Pos().X][c.Pos().Y] = c
}
return clone
}
// Parse creates a two dimensional slice of Cells from a grid string
func Parse(input string) (grid [][]Cell) {
return ParseOut(input, nil)
}
// ParseOut creates a two dimensional slice of Cells from a grid string.
// The replace function parameter can be used to replace certain cells with a new mark.
// The replace parameter can be nil.
func ParseOut(input string, replace func(Cell) rune) (grid [][]Cell) {
var x, y int
for _, r := range input {
if r == rune(10) {
y++
x = 0
continue
}
c := NewCell(r, Coordinate{X:x, Y:y})
if replace != nil {
c.mark = replace(c)
}
if y == 0 {
grid = append(grid, []Cell{})
}
grid[x] = append(grid[x], c)
x++
}
return grid
} | spatial/grid.go | 0.730866 | 0.524943 | grid.go | starcoder |
package object
import (
"math"
"github.com/carlosroman/aun-otra-ray-tracer/go/internal/ray"
)
type Pattern struct {
Transform ray.Matrix
TransformInverse ray.Matrix
At func(point ray.Vector) RGB
IsNotEmpty bool
}
func (p Pattern) AtObj(obj Object, worldPoint ray.Vector) RGB {
objPoint := obj.WorldToObject(worldPoint)
patternPoint := p.TransformInverse.MultiplyByVector(objPoint)
return p.At(patternPoint)
}
func (p *Pattern) SetTransform(transform ray.Matrix) error {
p.Transform = transform
inverse, err := transform.Inverse()
p.TransformInverse = inverse
return err
}
type StripePattern struct {
Pattern
A, B RGB
}
func EmptyPattern() Pattern {
return Pattern{
Transform: ray.DefaultIdentityMatrix(),
TransformInverse: ray.DefaultIdentityMatrixInverse(),
At: func(_ ray.Vector) RGB {
return Black
},
}
}
func NewStripePattern(a, b RGB) (p Pattern) {
p = NewTestPattern()
p.At = func(point ray.Vector) RGB {
if math.Remainder(math.Floor(point.GetX()), 2) == 0 {
return a
}
return b
}
return p
}
func NewTestPattern() (p Pattern) {
p = Pattern{
IsNotEmpty: true,
}
_ = p.SetTransform(ray.DefaultIdentityMatrix())
p.At = func(point ray.Vector) RGB {
return RGB{
R: point.GetX(),
G: point.GetY(),
B: point.GetZ(),
}
}
return p
}
func NewGradientPattern(a, b RGB) (p Pattern) {
p = NewTestPattern()
d := b.Subtract(a)
p.At = func(point ray.Vector) RGB {
fraction := point.GetX() - math.Floor(point.GetX())
return a.Add(d.MultiplyBy(fraction))
}
return p
}
func NewRingPattern(a, b RGB) (p Pattern) {
p = NewTestPattern()
p.At = func(point ray.Vector) RGB {
valX := math.Pow(point.GetX(), 2)
valZ := math.Pow(point.GetZ(), 2)
val := math.Floor(math.Sqrt(valX + valZ))
if math.Mod(val, 2) == 0 {
return a
}
return b
}
return p
}
func NewCheckerPattern(a, b RGB) (p Pattern) {
p = NewTestPattern()
p.At = func(point ray.Vector) RGB {
val := math.Floor(point.GetX()) +
math.Floor(point.GetY()) +
math.Floor(point.GetZ())
if math.Mod(val, 2) == 0 {
return a
}
return b
}
return p
} | go/internal/object/pattern.go | 0.829181 | 0.606848 | pattern.go | starcoder |
package math
import "math"
type Box2 struct {
Min *Vector2
Max *Vector2
}
func NewBox2() *Box2 {
b := &Box2{
Min:&Vector2{X:math.Inf(0), Y:math.Inf(0)},
Max:&Vector2{X:math.Inf(-1), Y:math.Inf(-1)},
}
return b
}
func (b *Box2) Set(min, max *Vector2) *Box2 {
b.Min.Copy( min )
b.Max.Copy( max )
return b
}
func (b *Box2) SetFromPoint( points []Vector2 ) *Box2 {
b.MakeEmpty()
for i := 0 ; i < len(points); i++ {
b.ExpandByPoint( &points[i] )
}
return b
}
func (b *Box2) SetFromCenterAndSize( center, size *Vector2) *Box2 {
v1 := &Vector2{}
halfSize := v1.Copy( size ).MultiplyScalar( 0.5 )
b.Min.Copy( center ).Sub( halfSize )
b.Max.Copy( center ).Add( halfSize )
return b
}
func (b *Box2) Clone() *Box2 {
b1 := &Box2{}
b1.Set( b.Min, b.Max )
return b1
}
func (b *Box2) Copy( box *Box2 ) *Box2 {
b.Min.Copy( box.Min )
b.Max.Copy( box.Max )
return b
}
func (b *Box2) MakeEmpty() *Box2 {
b.Min.X = math.Inf(0)
b.Min.Y = math.Inf(0)
b.Max.X = math.Inf(-1)
b.Max.Y = math.Inf(-1)
return b
}
func (b *Box2) IsEmpty() bool {
return ( b.Max.X < b.Min.X ) || ( b.Max.Y < b.Min.Y )
}
func (b *Box2) SizeTarget( optionalTarget *Vector2 ) *Vector2 {
return optionalTarget.SubVectors( b.Max, b.Min )
}
func (b *Box2) Size() *Vector2 {
result := &Vector2{}
return result.SubVectors( b.Max, b.Min )
}
func (b *Box2) ExpandByPoint( point *Vector2 ) *Box2 {
b.Min.Min( point )
b.Max.Max( point )
return b
}
func (b *Box2) ExpandByVector( vector *Vector2) *Box2 {
b.Min.Sub( vector )
b.Max.Add( vector )
return b
}
func (b *Box2) ExpandByScalar( scalar float64 ) *Box2 {
b.Min.AddScalar( -scalar )
b.Max.AddScalar( scalar)
return b
}
func (b *Box2) ContainsPoint( point *Vector2) bool {
if point.X < b.Min.X || point.X > b.Max.X || point.Y < b.Min.Y || point.Y > b.Max.Y {
return false
}
return true
}
func (b *Box2) ContainsBox ( box *Box2) bool {
if ( b.Min.X <= box.Min.X ) && ( box.Max.X <= b.Max.X ) &&
( b.Min.Y <= box.Min.Y ) && ( box.Max.X <= b.Max.Y ) {
return true
}
return false
}
func (b *Box2) GetParamaeter( point *Vector2 ) *Vector2 {
result := &Vector2{}
return result.Set(
( point.X - b.Min.X ) / ( b.Max.X - b.Min.X ),
( point.Y - b.Min.Y ) / ( b.Max.Y - b.Min.Y ))
}
func (b *Box2) GetParamaeterTarget( point, optionalTarget *Vector2) *Vector2 {
return optionalTarget.Set(
( point.X - b.Min.X ) / ( b.Max.X - b.Min.X ),
( point.Y - b.Min.Y ) / ( b.Max.Y - b.Min.Y ))
}
func (b *Box2) IntersectsBox( box *Box2 ) bool {
if box.Max.X < b.Min.X || box.Min.X > b.Max.X || box.Max.Y < b.Min.Y || box.Min.Y > b.Max.Y {
return false
}
return true
}
func (b *Box2) ClampPoint( point *Vector2 ) *Vector2 {
result := &Vector2{}
return result.Copy( point ).Clamp( b.Min, b.Max )
}
func (b *Box2) ClampPointTarget( point, optionalTarget *Vector2 ) *Vector2 {
return optionalTarget.Copy( point ).Clamp( b.Min, b.Max )
}
func (b *Box2) DistanceToPoint( point *Vector2 ) float64 {
v1 := &Vector2{}
clampPoint := v1.Copy( point ).Clamp( b.Min, b.Max)
return clampPoint.Sub( point ).Length()
}
func (b *Box2) Intersect( box *Box2 ) *Box2 {
b.Min.Max( box.Min )
b.Max.Min( box.Max )
return b
}
func (b *Box2) Union( box *Box2 ) *Box2 {
b.Min.Min( box.Min )
b.Max.Max( box.Max )
return b
}
func (b *Box2) Translate( offset *Vector2 ) *Box2 {
b.Min.Add( offset )
b.Max.Add( offset )
return b
}
func (b *Box2) Equals( box *Box2 ) bool {
return box.Min.Equals( b.Min ) && box.Max.Equals( b.Max )
} | math/box2.go | 0.830732 | 0.558929 | box2.go | starcoder |
package data
import (
"math/big"
"time"
)
// expects all arguments to match the type passed in as flag
func NewUnboxed(flag TyNat, args ...Native) Sliceable {
var d Sliceable
switch flag {
case Nil:
d = NilVec{}
for _, _ = range args {
d = append(d.(NilVec), struct{}{})
}
case Bool:
d = BoolVec{}
for _, dat := range args {
d = append(d.(BoolVec), bool(dat.(BoolVal)))
}
case Int:
d = IntVec{}
for _, dat := range args {
d = append(d.(IntVec), int(dat.(IntVal)))
}
case Int8:
d = Int8Vec{}
for _, dat := range args {
d = append(d.(Int8Vec), int8(dat.(Int8Val)))
}
case Int16:
d = Int16Vec{}
for _, dat := range args {
d = append(d.(Int16Vec), int16(dat.(Int16Val)))
}
case Int32:
d = Int32Vec{}
for _, dat := range args {
d = append(d.(Int32Vec), int32(dat.(Int32Val)))
}
case Uint:
d = UintVec{}
for _, dat := range args {
d = append(d.(UintVec), uint(dat.(UintVal)))
}
case Uint8:
d = Uint8Vec{}
for _, dat := range args {
d = append(d.(Uint8Vec), uint8(dat.(Uint8Val)))
}
case Uint16:
d = Uint16Vec{}
for _, dat := range args {
d = append(d.(Uint16Vec), uint16(dat.(Uint16Val)))
}
case Uint32:
d = Uint32Vec{}
for _, dat := range args {
d = append(d.(Uint32Vec), uint32(dat.(Uint32Val)))
}
case Float:
d = FltVec{}
for _, dat := range args {
d = append(d.(FltVec), float64(dat.(FltVal)))
}
case Flt32:
d = Flt32Vec{}
for _, dat := range args {
d = append(d.(Flt32Vec), float32(dat.(Flt32Val)))
}
case Imag:
d = ImagVec{}
for _, dat := range args {
d = append(d.(ImagVec), complex128(dat.(ImagVal)))
}
case Imag64:
d = Imag64Vec{}
for _, dat := range args {
d = append(d.(Imag64Vec), complex64(dat.(Imag64Val)))
}
case Byte:
d = ByteVec{}
for _, dat := range args {
d = append(d.(ByteVec), byte(dat.(ByteVal)))
}
case Rune:
d = RuneVec{}
for _, dat := range args {
d = append(d.(RuneVec), rune(dat.(RuneVal)))
}
case Bytes:
d = BytesVec{}
for _, dat := range args {
d = append(d.(BytesVec), []byte(dat.(BytesVal)))
}
case String:
d = StrVec{}
for _, dat := range args {
d = append(d.(StrVec), string(dat.(StrVal)))
}
case BigInt:
d = BigIntVec{}
for _, dat := range args {
bi := dat.(BigIntVal)
d = append(d.(BigIntVec), big.NewInt(((*big.Int)(&bi)).Int64()))
}
case BigFlt:
d = BigFltVec{}
for _, dat := range args {
bf := dat.(BigFltVal)
f, _ := ((*big.Float)(&bf)).Float64()
d = append(d.(BigFltVec), big.NewFloat(f))
}
case Ratio:
d = RatioVec{}
for _, dat := range args {
rat := dat.(RatioVal)
d = append(d.(RatioVec), big.NewRat(
((*big.Rat)(&rat)).Num().Int64(),
((*big.Rat)(&rat)).Denom().Int64(),
))
}
case Time:
d = TimeVec{}
for _, dat := range args {
d = append(d.(TimeVec), time.Time(dat.(TimeVal)))
}
case Duration:
d = DuraVec{}
for _, dat := range args {
d = append(d.(DuraVec), time.Duration(dat.(DuraVal)))
}
case Error:
d = ErrorVec{}
for _, dat := range args {
d = append(d.(ErrorVec), error(dat.(ErrorVal).E))
}
}
return d
}
func (v ByteVec) Bytes() BytesVal { return []byte(v) }
func (v *ByteVec) Set(i int, b byte) { (*v)[i] = b }
func (v *ByteVec) Insert(i, j int, b byte) {
var s = []byte(*v)
s = append(s, byte(0))
copy(s[i+1:], s[i:])
s[i] = b
*v = ByteVec(s)
}
func (v *ByteVec) InsertSlice(i, j int, b ...byte) {
var s = []byte(*v)
*v = ByteVec(append(s[:i], append(b, s[i:]...)...))
}
func (v *ByteVec) Cut(i, j int) {
var s = []byte(*v)
copy(s[i:], s[j:])
// to prevent a possib. mem leak
for k, n := len(s)-j+i, len(s); k < n; k++ {
s[k] = byte(0)
}
*v = ByteVec(s[:len(s)-j+i])
}
func (v *ByteVec) Delete(i int) {
var s = []byte(*v)
copy(s[i:], s[i+1:])
s[len(s)-1] = byte(0)
*v = ByteVec(s[:len(s)-1])
}
func (v InterfaceSlice) GetInt(i int) interface{} { return v[i] }
func (v NilVec) GetInt(i int) Native { return NilVal(v[i]) }
func (v BoolVec) GetInt(i int) Native { return BoolVal(v[i]) }
func (v IntVec) GetInt(i int) Native { return IntVal(v[i]) }
func (v Int8Vec) GetInt(i int) Native { return Int8Val(v[i]) }
func (v Int16Vec) GetInt(i int) Native { return Int16Val(v[i]) }
func (v Int32Vec) GetInt(i int) Native { return Int32Val(v[i]) }
func (v UintVec) GetInt(i int) Native { return UintVal(v[i]) }
func (v Uint8Vec) GetInt(i int) Native { return Uint8Val(v[i]) }
func (v Uint16Vec) GetInt(i int) Native { return Uint16Val(v[i]) }
func (v Uint32Vec) GetInt(i int) Native { return Uint32Val(v[i]) }
func (v FltVec) GetInt(i int) Native { return FltVal(v[i]) }
func (v Flt32Vec) GetInt(i int) Native { return Flt32Val(v[i]) }
func (v ImagVec) GetInt(i int) Native { return ImagVal(v[i]) }
func (v Imag64Vec) GetInt(i int) Native { return Imag64Val(v[i]) }
func (v ByteVec) GetInt(i int) Native { return ByteVal(v[i]) }
func (v RuneVec) GetInt(i int) Native { return RuneVal(v[i]) }
func (v BytesVec) GetInt(i int) Native { return BytesVal(v[i]) }
func (v StrVec) GetInt(i int) Native { return StrVal(v[i]) }
func (v BigIntVec) GetInt(i int) Native { return BigIntVal((*(*big.Int)(v[i]))) }
func (v BigFltVec) GetInt(i int) Native { return BigFltVal((*(*big.Float)(v[i]))) }
func (v RatioVec) GetInt(i int) Native { return RatioVal((*(*big.Rat)(v[i]))) }
func (v TimeVec) GetInt(i int) Native { return TimeVal(v[i]) }
func (v DuraVec) GetInt(i int) Native { return DuraVal(v[i]) }
func (v ErrorVec) GetInt(i int) Native { return ErrorVal{v[i]} }
func (v FlagSet) GetInt(i int) Native { return BitFlag(v[i]) }
func (v InterfaceSlice) Get(i Native) interface{} { return v[i.(IntVal).Idx()] }
func (v NilVec) Get(i Native) Native { return NilVal(v[i.(IntVal).Idx()]) }
func (v BoolVec) Get(i Native) Native { return BoolVal(v[i.(IntVal).Idx()]) }
func (v IntVec) Get(i Native) Native { return IntVal(v[i.(IntVal).Idx()]) }
func (v Int8Vec) Get(i Native) Native { return Int8Val(v[i.(IntVal).Idx()]) }
func (v Int16Vec) Get(i Native) Native { return Int16Val(v[i.(IntVal).Idx()]) }
func (v Int32Vec) Get(i Native) Native { return Int32Val(v[i.(IntVal).Idx()]) }
func (v UintVec) Get(i Native) Native { return UintVal(v[i.(IntVal).Idx()]) }
func (v Uint8Vec) Get(i Native) Native { return Uint8Val(v[i.(IntVal).Idx()]) }
func (v Uint16Vec) Get(i Native) Native { return Uint16Val(v[i.(IntVal).Idx()]) }
func (v Uint32Vec) Get(i Native) Native { return Uint32Val(v[i.(IntVal).Idx()]) }
func (v FltVec) Get(i Native) Native { return FltVal(v[i.(IntVal).Idx()]) }
func (v Flt32Vec) Get(i Native) Native { return Flt32Val(v[i.(IntVal).Idx()]) }
func (v ImagVec) Get(i Native) Native { return ImagVal(v[i.(IntVal).Idx()]) }
func (v Imag64Vec) Get(i Native) Native { return Imag64Val(v[i.(IntVal).Idx()]) }
func (v ByteVec) Get(i Native) Native { return ByteVal(v[i.(IntVal).Idx()]) }
func (v RuneVec) Get(i Native) Native { return RuneVal(v[i.(IntVal).Idx()]) }
func (v BytesVec) Get(i Native) Native { return BytesVal(v[i.(IntVal).Idx()]) }
func (v StrVec) Get(i Native) Native { return StrVal(v[i.(IntVal).Idx()]) }
func (v BigIntVec) Get(i Native) Native { return BigIntVal((*(*big.Int)(v[i.(IntVal).Idx()]))) }
func (v BigFltVec) Get(i Native) Native { return BigFltVal((*(*big.Float)(v[i.(IntVal).Idx()]))) }
func (v RatioVec) Get(i Native) Native { return RatioVal((*(*big.Rat)(v[i.(IntVal).Idx()]))) }
func (v TimeVec) Get(i Native) Native { return TimeVal(v[i.(IntVal).Idx()]) }
func (v DuraVec) Get(i Native) Native { return DuraVal(v[i.(IntVal).Idx()]) }
func (v ErrorVec) Get(i Native) Native { return ErrorVal{v[i.(IntVal).Idx()]} }
func (v FlagSet) Get(i Native) Native { return BitFlag(v[i.(IntVal).Idx()]) }
func (v NilVec) Range(i, j int) Sliceable { return NilVec(v[i:j]) }
func (v BoolVec) Range(i, j int) Sliceable { return BoolVec(v[i:j]) }
func (v IntVec) Range(i, j int) Sliceable { return IntVec(v[i:j]) }
func (v Int8Vec) Range(i, j int) Sliceable { return Int8Vec(v[i:j]) }
func (v Int16Vec) Range(i, j int) Sliceable { return Int16Vec(v[i:j]) }
func (v Int32Vec) Range(i, j int) Sliceable { return Int32Vec(v[i:j]) }
func (v UintVec) Range(i, j int) Sliceable { return UintVec(v[i:j]) }
func (v Uint8Vec) Range(i, j int) Sliceable { return Uint8Vec(v[i:j]) }
func (v Uint16Vec) Range(i, j int) Sliceable { return Uint16Vec(v[i:j]) }
func (v Uint32Vec) Range(i, j int) Sliceable { return Uint32Vec(v[i:j]) }
func (v FltVec) Range(i, j int) Sliceable { return FltVec(v[i:j]) }
func (v Flt32Vec) Range(i, j int) Sliceable { return Flt32Vec(v[i:j]) }
func (v ImagVec) Range(i, j int) Sliceable { return ImagVec(v[i:j]) }
func (v Imag64Vec) Range(i, j int) Sliceable { return Imag64Vec(v[i:j]) }
func (v ByteVec) Range(i, j int) Sliceable { return ByteVec(v[i:j]) }
func (v RuneVec) Range(i, j int) Sliceable { return RuneVec(v[i:j]) }
func (v BytesVec) Range(i, j int) Sliceable { return BytesVec(v[i:j]) }
func (v StrVec) Range(i, j int) Sliceable { return StrVec(v[i:j]) }
func (v BigIntVec) Range(i, j int) Sliceable { return BigIntVec(v[i:j]) }
func (v BigFltVec) Range(i, j int) Sliceable { return BigFltVec(v[i:j]) }
func (v RatioVec) Range(i, j int) Sliceable { return RatioVec(v[i:j]) }
func (v TimeVec) Range(i, j int) Sliceable { return TimeVec(v[i:j]) }
func (v DuraVec) Range(i, j int) Sliceable { return DuraVec(v[i:j]) }
func (v ErrorVec) Range(i, j int) Sliceable { return ErrorVec(v[i:j]) }
func (v FlagSet) Range(i, j int) Sliceable { return FlagSet(v[i:j]) }
func (v InterfaceSlice) nat(i int) interface{} { return v[i] }
func (v NilVec) Native(i int) struct{} { return v[i] }
func (v BoolVec) Native(i int) bool { return v[i] }
func (v IntVec) Native(i int) int { return v[i] }
func (v Int8Vec) Native(i int) int8 { return v[i] }
func (v Int16Vec) Native(i int) int16 { return v[i] }
func (v Int32Vec) Native(i int) int32 { return v[i] }
func (v UintVec) Native(i int) uint { return v[i] }
func (v Uint8Vec) Native(i int) uint8 { return v[i] }
func (v Uint16Vec) Native(i int) uint16 { return v[i] }
func (v Uint32Vec) Native(i int) uint32 { return v[i] }
func (v FltVec) Native(i int) float64 { return v[i] }
func (v Flt32Vec) Native(i int) float32 { return v[i] }
func (v ImagVec) Native(i int) complex128 { return v[i] }
func (v Imag64Vec) Native(i int) complex64 { return v[i] }
func (v ByteVec) Native(i int) byte { return v[i] }
func (v RuneVec) Native(i int) rune { return v[i] }
func (v BytesVec) Native(i int) []byte { return v[i] }
func (v StrVec) Native(i int) string { return v[i] }
func (v BigIntVec) Native(i int) *big.Int { return v[i] }
func (v BigFltVec) Native(i int) *big.Float { return v[i] }
func (v RatioVec) Native(i int) *big.Rat { return v[i] }
func (v TimeVec) Native(i int) time.Time { return v[i] }
func (v DuraVec) Native(i int) time.Duration { return v[i] }
func (v FlagSet) Native(i int) BitFlag { return v[i] }
func (v ErrorVec) Native(i int) struct{ e error } { return struct{ e error }{v[i]} }
func (v NilVec) RangeNative(i, j int) []struct{} { return NilVec(v[i:j]) }
func (v BoolVec) RangeNative(i, j int) []bool { return BoolVec(v[i:j]) }
func (v IntVec) RangeNative(i, j int) []int { return IntVec(v[i:j]) }
func (v Int8Vec) RangeNative(i, j int) []int8 { return Int8Vec(v[i:j]) }
func (v Int16Vec) RangeNative(i, j int) []int16 { return Int16Vec(v[i:j]) }
func (v Int32Vec) RangeNative(i, j int) []int32 { return Int32Vec(v[i:j]) }
func (v UintVec) RangeNative(i, j int) []uint { return UintVec(v[i:j]) }
func (v Uint8Vec) RangeNative(i, j int) []uint8 { return Uint8Vec(v[i:j]) }
func (v Uint16Vec) RangeNative(i, j int) []uint16 { return Uint16Vec(v[i:j]) }
func (v Uint32Vec) RangeNative(i, j int) []uint32 { return Uint32Vec(v[i:j]) }
func (v FltVec) RangeNative(i, j int) []float64 { return FltVec(v[i:j]) }
func (v Flt32Vec) RangeNative(i, j int) []float32 { return Flt32Vec(v[i:j]) }
func (v ImagVec) RangeNative(i, j int) []complex128 { return ImagVec(v[i:j]) }
func (v Imag64Vec) RangeNative(i, j int) []complex64 { return Imag64Vec(v[i:j]) }
func (v ByteVec) RangeNative(i, j int) []byte { return ByteVec(v[i:j]) }
func (v RuneVec) RangeNative(i, j int) []rune { return RuneVec(v[i:j]) }
func (v BytesVec) RangeNative(i, j int) [][]byte { return BytesVec(v[i:j]) }
func (v StrVec) RangeNative(i, j int) []string { return StrVec(v[i:j]) }
func (v BigIntVec) RangeNative(i, j int) []*big.Int { return BigIntVec(v[i:j]) }
func (v BigFltVec) RangeNative(i, j int) []*big.Float { return BigFltVec(v[i:j]) }
func (v RatioVec) RangeNative(i, j int) []*big.Rat { return RatioVec(v[i:j]) }
func (v TimeVec) RangeNative(i, j int) []time.Time { return TimeVec(v[i:j]) }
func (v DuraVec) RangeNative(i, j int) []time.Duration { return DuraVec(v[i:j]) }
func (v ErrorVec) RangeNative(i, j int) []error { return ErrorVec(v[i:j]) }
func (v FlagSet) RangeNative(i, j int) []BitFlag { return FlagSet(v[i:j]) }
func (v NilVec) Type() TyNat { return Unboxed }
func (v BoolVec) Type() TyNat { return Unboxed }
func (v IntVec) Type() TyNat { return Unboxed }
func (v Int8Vec) Type() TyNat { return Unboxed }
func (v Int16Vec) Type() TyNat { return Unboxed }
func (v Int32Vec) Type() TyNat { return Unboxed }
func (v UintVec) Type() TyNat { return Unboxed }
func (v Uint8Vec) Type() TyNat { return Unboxed }
func (v Uint16Vec) Type() TyNat { return Unboxed }
func (v Uint32Vec) Type() TyNat { return Unboxed }
func (v FltVec) Type() TyNat { return Unboxed }
func (v Flt32Vec) Type() TyNat { return Unboxed }
func (v ImagVec) Type() TyNat { return Unboxed }
func (v Imag64Vec) Type() TyNat { return Unboxed }
func (v ByteVec) Type() TyNat { return Unboxed }
func (v RuneVec) Type() TyNat { return Unboxed }
func (v BytesVec) Type() TyNat { return Unboxed }
func (v StrVec) Type() TyNat { return Unboxed }
func (v BigIntVec) Type() TyNat { return Unboxed }
func (v BigFltVec) Type() TyNat { return Unboxed }
func (v RatioVec) Type() TyNat { return Unboxed }
func (v TimeVec) Type() TyNat { return Unboxed }
func (v DuraVec) Type() TyNat { return Unboxed }
func (v ErrorVec) Type() TyNat { return Unboxed }
func (v FlagSet) Type() TyNat { return Unboxed }
func (v NilVec) Slice() []Native {
var slice = []Native{}
for _, nat := range v {
slice = append(slice, NilVal(nat))
}
return slice
}
func (v BoolVec) Slice() []Native {
var slice = []Native{}
for _, nat := range v {
slice = append(slice, BoolVal(nat))
}
return slice
}
func (v IntVec) Slice() []Native {
var slice = []Native{}
for _, nat := range v {
slice = append(slice, IntVal(nat))
}
return slice
}
func (v Int8Vec) Slice() []Native {
var slice = []Native{}
for _, nat := range v {
slice = append(slice, Int8Val(nat))
}
return slice
}
func (v Int16Vec) Slice() []Native {
var slice = []Native{}
for _, nat := range v {
slice = append(slice, Int16Val(nat))
}
return slice
}
func (v Int32Vec) Slice() []Native {
var slice = []Native{}
for _, nat := range v {
slice = append(slice, Int32Val(nat))
}
return slice
}
func (v UintVec) Slice() []Native {
var slice = []Native{}
for _, nat := range v {
slice = append(slice, UintVal(nat))
}
return slice
}
func (v Uint8Vec) Slice() []Native {
var slice = []Native{}
for _, nat := range v {
slice = append(slice, Uint8Val(nat))
}
return slice
}
func (v Uint16Vec) Slice() []Native {
var slice = []Native{}
for _, nat := range v {
slice = append(slice, Uint16Val(nat))
}
return slice
}
func (v Uint32Vec) Slice() []Native {
var slice = []Native{}
for _, nat := range v {
slice = append(slice, Uint32Val(nat))
}
return slice
}
func (v FltVec) Slice() []Native {
var slice = []Native{}
for _, nat := range v {
slice = append(slice, FltVal(nat))
}
return slice
}
func (v Flt32Vec) Slice() []Native {
var slice = []Native{}
for _, nat := range v {
slice = append(slice, Flt32Val(nat))
}
return slice
}
func (v ImagVec) Slice() []Native {
var slice = []Native{}
for _, nat := range v {
slice = append(slice, ImagVal(nat))
}
return slice
}
func (v Imag64Vec) Slice() []Native {
var slice = []Native{}
for _, nat := range v {
slice = append(slice, Imag64Val(nat))
}
return slice
}
func (v ByteVec) Slice() []Native {
var slice = []Native{}
for _, nat := range v {
slice = append(slice, ByteVal(nat))
}
return slice
}
func (v RuneVec) Slice() []Native {
var slice = []Native{}
for _, nat := range v {
slice = append(slice, RuneVal(nat))
}
return slice
}
func (v BytesVec) Slice() []Native {
var slice = []Native{}
for _, nat := range v {
slice = append(slice, BytesVal(nat))
}
return slice
}
func (v StrVec) Slice() []Native {
var slice = []Native{}
for _, nat := range v {
slice = append(slice, StrVal(nat))
}
return slice
}
func (v BigIntVec) Slice() []Native {
var slice = []Native{}
for _, nat := range v {
slice = append(slice, (*BigIntVal)(nat))
}
return slice
}
func (v BigFltVec) Slice() []Native {
var slice = []Native{}
for _, nat := range v {
slice = append(slice, (*BigFltVal)(nat))
}
return slice
}
func (v RatioVec) Slice() []Native {
var slice = []Native{}
for _, nat := range v {
slice = append(slice, (*RatioVal)(nat))
}
return slice
}
func (v TimeVec) Slice() []Native {
var slice = []Native{}
for _, nat := range v {
slice = append(slice, TimeVal(nat))
}
return slice
}
func (v DuraVec) Slice() []Native {
var slice = []Native{}
for _, nat := range v {
slice = append(slice, DuraVal(nat))
}
return slice
}
func (v ErrorVec) Slice() []Native {
var slice = []Native{}
for _, nat := range v {
slice = append(slice, NewError(nat))
}
return slice
}
func (v FlagSet) Slice() []Native {
var slice = []Native{}
for _, nat := range v {
slice = append(slice, BitFlag(nat))
}
return slice
}
func (v NilVec) TypeElem() Typed { return Nil }
func (v BoolVec) TypeElem() Typed { return Bool }
func (v IntVec) TypeElem() Typed { return Int }
func (v Int8Vec) TypeElem() Typed { return Int8 }
func (v Int16Vec) TypeElem() Typed { return Int16 }
func (v Int32Vec) TypeElem() Typed { return Int32 }
func (v UintVec) TypeElem() Typed { return Uint }
func (v Uint8Vec) TypeElem() Typed { return Uint8 }
func (v Uint16Vec) TypeElem() Typed { return Uint16 }
func (v Uint32Vec) TypeElem() Typed { return Uint32 }
func (v FltVec) TypeElem() Typed { return Float }
func (v Flt32Vec) TypeElem() Typed { return Flt32 }
func (v ImagVec) TypeElem() Typed { return Imag }
func (v Imag64Vec) TypeElem() Typed { return Imag64 }
func (v ByteVec) TypeElem() Typed { return Byte }
func (v RuneVec) TypeElem() Typed { return Rune }
func (v BytesVec) TypeElem() Typed { return Bytes }
func (v StrVec) TypeElem() Typed { return String }
func (v BigIntVec) TypeElem() Typed { return BigInt }
func (v BigFltVec) TypeElem() Typed { return BigFlt }
func (v RatioVec) TypeElem() Typed { return Ratio }
func (v TimeVec) TypeElem() Typed { return Time }
func (v DuraVec) TypeElem() Typed { return Duration }
func (v ErrorVec) TypeElem() Typed { return Error }
func (v FlagSet) TypeElem() Typed { return Type }
func (v NilVec) Null() Native { return NilVec([]struct{}{}) }
func (v BoolVec) Null() Native { return BoolVec([]bool{}) }
func (v IntVec) Null() Native { return IntVec([]int{}) }
func (v Int8Vec) Null() Native { return Int8Vec([]int8{}) }
func (v Int16Vec) Null() Native { return Int16Vec([]int16{}) }
func (v Int32Vec) Null() Native { return Int32Vec([]int32{}) }
func (v UintVec) Null() Native { return UintVec([]uint{}) }
func (v Uint8Vec) Null() Native { return Uint8Vec([]uint8{}) }
func (v Uint16Vec) Null() Native { return Uint16Vec([]uint16{}) }
func (v Uint32Vec) Null() Native { return Uint32Vec([]uint32{}) }
func (v FltVec) Null() Native { return FltVec([]float64{}) }
func (v Flt32Vec) Null() Native { return Flt32Vec([]float32{}) }
func (v ImagVec) Null() Native { return ImagVec([]complex128{}) }
func (v Imag64Vec) Null() Native { return Imag64Vec([]complex64{}) }
func (v ByteVec) Null() Native { return ByteVec([]byte{}) }
func (v RuneVec) Null() Native { return RuneVec([]rune{}) }
func (v BytesVec) Null() Native { return BytesVec([][]byte{}) }
func (v StrVec) Null() Native { return StrVec([]string{}) }
func (v BigIntVec) Null() Native { return BigIntVec([]*big.Int{}) }
func (v BigFltVec) Null() Native { return BigFltVec([]*big.Float{}) }
func (v RatioVec) Null() Native { return RatioVec([]*big.Rat{}) }
func (v TimeVec) Null() Native { return TimeVec([]time.Time{}) }
func (v DuraVec) Null() Native { return DuraVec([]time.Duration{}) }
func (v ErrorVec) Null() Native { return ErrorVec([]error{}) }
func (v FlagSet) Null() Native { return FlagSet([]BitFlag{}) }
func (v NilVec) Len() int { return len(v) }
func (v BoolVec) Len() int { return len(v) }
func (v IntVec) Len() int { return len(v) }
func (v Int8Vec) Len() int { return len(v) }
func (v Int16Vec) Len() int { return len(v) }
func (v Int32Vec) Len() int { return len(v) }
func (v UintVec) Len() int { return len(v) }
func (v Uint8Vec) Len() int { return len(v) }
func (v Uint16Vec) Len() int { return len(v) }
func (v Uint32Vec) Len() int { return len(v) }
func (v FltVec) Len() int { return len(v) }
func (v Flt32Vec) Len() int { return len(v) }
func (v ImagVec) Len() int { return len(v) }
func (v Imag64Vec) Len() int { return len(v) }
func (v ByteVec) Len() int { return len(v) }
func (v RuneVec) Len() int { return len(v) }
func (v BytesVec) Len() int { return len(v) }
func (v StrVec) Len() int { return len(v) }
func (v BigIntVec) Len() int { return len(v) }
func (v BigFltVec) Len() int { return len(v) }
func (v RatioVec) Len() int { return len(v) }
func (v TimeVec) Len() int { return len(v) }
func (v DuraVec) Len() int { return len(v) }
func (v ErrorVec) Len() int { return len(v) }
func (v FlagSet) Len() int { return len(v) }
func (v NilVec) Empty() bool {
if v.Len() == 0 {
return true
}
return false
}
func (v BoolVec) Empty() bool {
if v.Len() == 0 {
return true
}
return false
}
func (v IntVec) Empty() bool {
if v.Len() == 0 {
return true
}
return false
}
func (v Int8Vec) Empty() bool {
if v.Len() == 0 {
return true
}
return false
}
func (v Int16Vec) Empty() bool {
if v.Len() == 0 {
return true
}
return false
}
func (v Int32Vec) Empty() bool {
if v.Len() == 0 {
return true
}
return false
}
func (v UintVec) Empty() bool {
if v.Len() == 0 {
return true
}
return false
}
func (v Uint8Vec) Empty() bool {
if v.Len() == 0 {
return true
}
return false
}
func (v Uint16Vec) Empty() bool {
if v.Len() == 0 {
return true
}
return false
}
func (v Uint32Vec) Empty() bool {
if v.Len() == 0 {
return true
}
return false
}
func (v FltVec) Empty() bool {
if v.Len() == 0 {
return true
}
return false
}
func (v Flt32Vec) Empty() bool {
if v.Len() == 0 {
return true
}
return false
}
func (v ImagVec) Empty() bool {
if v.Len() == 0 {
return true
}
return false
}
func (v Imag64Vec) Empty() bool {
if v.Len() == 0 {
return true
}
return false
}
func (v ByteVec) Empty() bool {
if v.Len() == 0 {
return true
}
return false
}
func (v RuneVec) Empty() bool {
if v.Len() == 0 {
return true
}
return false
}
func (v BytesVec) Empty() bool {
if v.Len() == 0 {
return true
}
return false
}
func (v StrVec) Empty() bool {
if v.Len() == 0 {
return true
}
return false
}
func (v BigIntVec) Empty() bool {
if v.Len() == 0 {
return true
}
return false
}
func (v BigFltVec) Empty() bool {
if v.Len() == 0 {
return true
}
return false
}
func (v RatioVec) Empty() bool {
if v.Len() == 0 {
return true
}
return false
}
func (v TimeVec) Empty() bool {
if v.Len() == 0 {
return true
}
return false
}
func (v DuraVec) Empty() bool {
if v.Len() == 0 {
return true
}
return false
}
func (v ErrorVec) Empty() bool {
if v.Len() == 0 {
return true
}
return false
}
func (v FlagSet) Empty() bool {
if v.Len() == 0 {
return true
}
return false
}
func (v NilVec) Copy() Native {
var d = NilVec{}
for _, val := range v {
d = append(d, val)
}
return d
}
func (v BoolVec) Copy() Native {
var d = BoolVec{}
for _, val := range v {
d = append(d, val)
}
return d
}
func (v IntVec) Copy() Native {
var d = IntVec{}
for _, val := range v {
d = append(d, val)
}
return d
}
func (v Int8Vec) Copy() Native {
var d = Int8Vec{}
for _, val := range v {
d = append(d, val)
}
return d
}
func (v Int16Vec) Copy() Native {
var d = Int16Vec{}
for _, val := range v {
d = append(d, val)
}
return d
}
func (v Int32Vec) Copy() Native {
var d = Int32Vec{}
for _, val := range v {
d = append(d, val)
}
return d
}
func (v UintVec) Copy() Native {
var d = UintVec{}
for _, val := range v {
d = append(d, val)
}
return d
}
func (v Uint8Vec) Copy() Native {
var d = Uint8Vec{}
for _, val := range v {
d = append(d, val)
}
return d
}
func (v Uint16Vec) Copy() Native {
var d = Uint16Vec{}
for _, val := range v {
d = append(d, val)
}
return d
}
func (v Uint32Vec) Copy() Native {
var d = Uint32Vec{}
for _, val := range v {
d = append(d, val)
}
return d
}
func (v FltVec) Copy() Native {
var d = FltVec{}
for _, val := range v {
d = append(d, val)
}
return d
}
func (v Flt32Vec) Copy() Native {
var d = Flt32Vec{}
for _, val := range v {
d = append(d, val)
}
return d
}
func (v ImagVec) Copy() Native {
var d = ImagVec{}
for _, val := range v {
d = append(d, val)
}
return d
}
func (v Imag64Vec) Copy() Native {
var d = Imag64Vec{}
for _, val := range v {
d = append(d, val)
}
return d
}
func (v ByteVec) Copy() Native {
var d = ByteVec{}
for _, val := range v {
d = append(d, val)
}
return d
}
func (v RuneVec) Copy() Native {
var d = RuneVec{}
for _, val := range v {
d = append(d, val)
}
return d
}
func (v BytesVec) Copy() Native {
var d = BytesVec{}
for _, val := range v {
d = append(d, val)
}
return d
}
func (v StrVec) Copy() Native {
var d = StrVec{}
for _, val := range v {
d = append(d, val)
}
return d
}
func (v BigIntVec) Copy() Native {
var d = BigIntVec{}
for _, val := range v {
d = append(d, val)
}
return d
}
func (v BigFltVec) Copy() Native {
var d = BigFltVec{}
for _, val := range v {
d = append(d, val)
}
return d
}
func (v RatioVec) Copy() Native {
var d = RatioVec{}
for _, val := range v {
d = append(d, val)
}
return d
}
func (v TimeVec) Copy() Native {
var d = TimeVec{}
for _, val := range v {
d = append(d, val)
}
return d
}
func (v DuraVec) Copy() Native {
var d = DuraVec{}
for _, val := range v {
d = append(d, val)
}
return d
}
func (v ErrorVec) Copy() Native {
var d = ErrorVec{}
for _, val := range v {
d = append(d, val)
}
return d
}
func (v FlagSet) Copy() Native {
var d = FlagSet{}
for _, val := range v {
d = append(d, val)
}
return d
} | data/unboxedArray.go | 0.534612 | 0.425665 | unboxedArray.go | starcoder |
package qrad
import (
"fmt"
"math"
"math/rand"
"time"
)
func init() {
rand.Seed(time.Now().Unix())
}
type Vector struct {
Elements []Complex
}
func (v Vector) Log2Size() int {
return int(math.Log2(float64(len(v.Elements))))
}
func (c Vector) At(i int) Complex {
if i > len(c.Elements) {
panic("Invalid offset")
}
return c.Elements[i]
}
func (c *Vector) Set(i int, e Complex) {
if i >= c.Size() {
panic("Invalid offset")
}
c.Elements[i] = e
}
func (c *Vector) Resize(i int) *Vector {
c.Elements = make([]Complex, i)
return c
}
func (c Vector) Size() int {
return len(c.Elements)
}
func NewVector() *Vector {
return &Vector{Elements: make([]Complex, 0)}
}
func NewQubit(i int) *Vector {
if i == 0 {
return &Vector{Elements: []Complex{complex(1, 0), complex(0, 0)}}
} else {
return &Vector{Elements: []Complex{complex(0, 0), complex(1, 0)}}
}
}
func NewVectorFromElements(elements []Complex) *Vector {
return &Vector{Elements: elements}
}
func (c *Vector) Add(a, b Vector) {
if a.Size() != b.Size() {
panic("Invalid vector lengths")
}
c.Resize(a.Size())
for i := 0; i < a.Size(); i++ {
c.Set(i, a.At(i)+b.At(i))
}
}
func (c *Vector) Sub(a, b Vector) {
if a.Size() != b.Size() {
panic("Invalid vector lengths")
}
c.Resize(a.Size())
for i := 0; i < a.Size(); i++ {
c.Set(i, a.At(i)-b.At(i))
}
}
func (c *Vector) MulScalar(scalar Complex, v Vector) {
c.Resize(v.Size())
for i := 0; i < v.Size(); i++ {
c.Set(i, scalar*v.At(i))
}
}
func (c *Vector) MulMatrix(v Vector, m Matrix) {
if v.Size() != m.Width {
panic("Invalid dimensions")
}
c.Resize(v.Size())
for h := 0; h < c.Size(); h++ {
sum := Complex(complex(0, 0))
for w := 0; w < m.Width; w++ {
sum += v.At(w) * m.At(w, h)
}
c.Set(h, sum)
}
}
func (c *Vector) TensorProduct(a, b Vector) *Vector {
if a.Size() == 0 {
c.Elements = b.Elements[:]
return c
}
if b.Size() == 0 {
c.Elements = a.Elements[:]
return c
}
c.Resize(a.Size() * b.Size())
for ah := 0; ah < a.Size(); ah++ {
for bh := 0; bh < b.Size(); bh++ {
ch := ah*b.Size() + bh
c.Set(ch, a.At(ah)*b.At(bh))
}
}
return c
}
func (c Vector) Matrix() *Matrix {
m := NewMatrix()
m.Resize(1, c.Size())
for i := 0; i < c.Size(); i++ {
m.Set(0, i, c.At(i))
}
return m
}
func (c Vector) IsNormalized() bool {
return NearEqual(c.Norm(), 1.0)
}
func (c *Vector) Normalize() {
l := c.Length()
c.MulScalar(NewComplex(1/l, 0), *c)
}
func (c *Vector) Length() float64 {
sum := float64(0)
for _, e := range c.Elements {
sum += e.Modulus() * e.Modulus()
}
return math.Sqrt(sum)
}
func (c Vector) Norm() float64 {
// | < p | p > |
bra := NewMatrix().Adjoint(*c.Matrix())
key := c.Matrix()
bra.Elements[0].Modulus()
innerProduct := bra.MulMatrix(*bra, *key)
if innerProduct.Width != 1 && innerProduct.Height != 1 {
panic("invalid inner product")
}
return innerProduct.At(0, 0).Modulus()
}
func (c Vector) Probabilities() map[int]float64 {
out := make(map[int]float64)
norm := c.Norm()
for i, e := range c.Elements {
out[i] = e.Modulus() * e.Modulus() / norm
}
return out
}
func (c Vector) Measure() int {
c.Normalize()
guess := rand.Float64()
for i, e := range c.Elements {
guess -= (e.Modulus() * e.Modulus())
if guess < 0 {
return ReverseEndianness(i, c.Log2Size())
}
}
// There's like a super super super super small chance of this happening...
panic("the numbers mason, what do they mean?")
}
func (c Vector) Equals(b Vector) bool {
if c.Size() != b.Size() {
return false
}
for i := range c.Elements {
if !c.At(i).Equals(b.At(i)) {
return false
}
}
return true
}
func (c *Vector) MeasureQubit(index int) int {
norm := c.Norm()
guess := rand.Float64()
isOne := false
qubits := int(math.Log2(float64(len(c.Elements))))
// so, we have to reverse the qubit order, i'm not 100% sure why
// first determine if this qubit is collapsing to zero or one
for i, e := range c.Elements {
// fmt.Println(i, qubits-1-index, i&(qubits-1-index), e.Modulus()*e.Modulus()/norm)
// fmt.Printf("|%02b> %s\n", i, e)
if i&(1<<(qubits-1-index)) == 0 {
continue
} else {
// fmt.Println("YAS")
}
guess -= (e.Modulus() * e.Modulus() / norm)
if guess < 0 {
isOne = true
break
}
}
for i := range c.Elements {
// find all the zero elements and set them to zero
if isOne {
if i&(1<<(qubits-1-index)) == 0 {
c.Elements[i] = NewComplex(0, 0)
}
}
if !isOne {
if i&(1<<(qubits-1-index)) != 0 {
c.Elements[i] = NewComplex(0, 0)
}
}
}
c.Normalize()
if isOne {
return 1
}
return 0
}
func (c Vector) PrintProbabilities() {
probs := c.Probabilities()
for i := 0; i < c.Size(); i++ {
fmt.Printf("%2d %08b %.2f\n", i, i, probs[i])
}
}
func (c Vector) PrintChance(bits, total int) {
norm := c.Norm()
chances := make(map[int]float64)
for i, e := range c.Elements {
bucket := i >> uint(total-bits)
// fmt.Println("bucket", bucket, i, total-bits)
chances[bucket] += (e.Modulus() * e.Modulus() / norm)
}
for i := 0; i < 1<<uint(bits); i++ {
// fmt.Printf("%04b %.02f\n", i, chances[i])
}
}
func ReverseEndianness(a, bits int) int {
out := 0
for i := 0; i < bits; i++ {
out <<= 1
if a&1 == 1 {
out++
}
a >>= 1
}
return out
} | vector.go | 0.712332 | 0.533397 | vector.go | starcoder |
package plru
import (
"math"
)
const (
blockSize = 64
blockMask = blockSize - 1
blockFull = math.MaxUint64
)
// Policy is the
type Policy struct {
blocks []uint64
counter uint64
}
// NewPolicy returns an empty Policy where size is the number of entries to
// track. The size param is rounded up to the next multiple of 64.
func NewPolicy(size uint64) *Policy {
size = ((size + 32) >> 6) << 6
return &Policy{
blocks: make([]uint64, (size+blockMask)/blockSize),
}
}
// Has returns true if the bit is set (1) and false if not (0).
func (p *Policy) Has(bit uint64) bool {
return (p.blocks[bit/blockSize] & (1 << (bit & blockMask))) > 0
}
// Hit sets the bit to 1 and clears the other bits in the block if capacity is
// reached.
func (p *Policy) Hit(bit uint64) {
block := &p.blocks[bit/blockSize]
*block |= 1 << (bit & blockMask)
if *block == blockFull {
*block = 0 | 1<<(bit&blockMask)
}
}
// Del sets the bit to 0.
func (p *Policy) Del(bit uint64) {
p.blocks[bit/blockSize] &= 0 << (bit & blockMask)
}
// Evict returns a LRU bit that you can later pass to Hit.
func (p *Policy) Evict() uint64 {
index := p.counter
block := &p.blocks[index]
if p.counter++; p.counter >= uint64(len(p.blocks)) {
p.counter = 0
}
return (index * blockSize) + bitLookup(^*block&(*block+1))
}
func bitLookup(num uint64) uint64 {
switch num {
case 1:
return 0
case 2:
return 1
case 4:
return 2
case 8:
return 3
case 16:
return 4
case 32:
return 5
case 64:
return 6
case 128:
return 7
case 256:
return 8
case 512:
return 9
case 1024:
return 10
case 2048:
return 11
case 4096:
return 12
case 8192:
return 13
case 16384:
return 14
case 32768:
return 15
case 65536:
return 16
case 131072:
return 17
case 262144:
return 18
case 524288:
return 19
case 1048576:
return 20
case 2097152:
return 21
case 4194304:
return 22
case 8388608:
return 23
case 16777216:
return 24
case 33554432:
return 25
case 67108864:
return 26
case 134217728:
return 27
case 268435456:
return 28
case 536870912:
return 29
case 1073741824:
return 30
case 2147483648:
return 31
case 4294967296:
return 32
case 8589934592:
return 33
case 17179869184:
return 34
case 34359738368:
return 35
case 68719476736:
return 36
case 137438953472:
return 37
case 274877906944:
return 38
case 549755813888:
return 39
case 1099511627776:
return 40
case 2199023255552:
return 41
case 4398046511104:
return 42
case 8796093022208:
return 43
case 17592186044416:
return 44
case 35184372088832:
return 45
case 70368744177664:
return 46
case 140737488355328:
return 47
case 281474976710656:
return 48
case 562949953421312:
return 49
case 1125899906842624:
return 50
case 2251799813685248:
return 51
case 4503599627370496:
return 52
case 9007199254740992:
return 53
case 18014398509481984:
return 54
case 36028797018963968:
return 55
case 72057594037927936:
return 56
case 144115188075855872:
return 57
case 288230376151711744:
return 58
case 576460752303423488:
return 59
case 1152921504606846976:
return 60
case 2305843009213693952:
return 61
case 4611686018427387904:
return 62
case 9223372036854775808:
return 63
}
panic("invalid bit lookup")
} | plru.go | 0.599485 | 0.466846 | plru.go | starcoder |
package tilemap
import (
"image"
"math/rand"
"os"
_ "image/png"
"github.com/faiface/pixel"
"../config"
"../hex"
)
var (
spritesheet pixel.Picture
sprites = make([]*pixel.Sprite, Max)
Batch *pixel.Batch
)
type Tile struct {
hex.Hex
Type TileType
Sprite *pixel.Sprite
Rect pixel.Rect
}
func New(q int, r int, t TileType) Tile {
tile := Tile{
hex.Hex{Q: q, R: r},
t,
sprites[t],
pixel.Rect{},
}
c := tile.ToPixel()
tile.Rect = pixel.R(
c.X-config.Tileset.Width/2.0,
c.Y-config.Tileset.Height/2.0,
c.X+config.Tileset.Width/2.0,
c.Y+config.Tileset.FullH-config.Tileset.Height/2.0,
)
return tile
}
func RandTile(q, r int) Tile {
return New(q, r, TileType(rand.Intn(int(Max-1))+1))
}
func (t Tile) Draw(target pixel.Target, matrix pixel.Matrix) {
t.Sprite.Draw(target, matrix.Moved(t.Rect.Min))
}
type Map struct {
tiles [][]Tile
height int
width int
}
func MakeMap(height, width int) *Map {
m := &Map{height: height, width: width}
m.tiles = make([][]Tile, height)
for row := 0; row < height; row++ {
m.tiles[row] = make([]Tile, width)
for col := 0; col < width; col++ {
q, r := hex.ToAxial(col, row)
m.tiles[row][col] = RandTile(q, r)
}
}
return m
}
func (m *Map) At(q, r int) Tile {
col, row := hex.ToOffset(q, r)
return m.tiles[row][col]
}
func (m *Map) SetTileType(q, r int, t TileType) {
col, row := hex.ToOffset(q, r)
m.tiles[row][col].Type = t
m.tiles[row][col].Sprite = sprites[t]
}
func (m *Map) Draw(t pixel.Target, matrix pixel.Matrix) {
for row := m.height - 1; row >= 0; row-- {
for col := 0; col < m.width; col += 2 {
m.tiles[row][col].Draw(t, matrix)
}
for col := 1; col < m.width; col += 2 {
m.tiles[row][col].Draw(t, matrix)
}
}
}
func init() {
spritesheet, err := loadPicture(config.Tileset.Filename)
if err != nil {
panic(err)
}
sprites[None] = pixel.NewSprite(spritesheet, pixel.ZR)
var t TileType = Grass
outer:
for y := spritesheet.Bounds().Max.Y; y > spritesheet.Bounds().Min.Y; y -= config.Tileset.FullH {
for x := spritesheet.Bounds().Min.X; x < spritesheet.Bounds().Max.X; x += config.Tileset.Width {
sprites[t] = pixel.NewSprite(spritesheet, pixel.R(x, y-config.Tileset.FullH, x+config.Tileset.Width, y))
t++
if t >= Max {
break outer
}
}
}
Batch = pixel.NewBatch(&pixel.TrianglesData{}, spritesheet)
}
func Sprite(t TileType) *pixel.Sprite {
return sprites[t]
}
func Random() *pixel.Sprite {
return Sprite(TileType(rand.Intn(int(Max-1)) + 1))
}
func loadPicture(path string) (pixel.Picture, error) {
file, err := os.Open(path)
if err != nil {
return nil, err
}
defer file.Close()
img, _, err := image.Decode(file)
if err != nil {
return nil, err
}
return pixel.PictureDataFromImage(img), nil
} | tilemap/tilemap.go | 0.600071 | 0.407687 | tilemap.go | starcoder |
package xtesting
import (
"fmt"
"path"
"reflect"
"runtime"
"testing"
)
// failTest outputs the error message and fails the test. Default skip is 1.
func failTest(t testing.TB, skip int, msg string, msgAndArgs ...interface{}) bool {
flag := ""
if skip >= 0 {
_, file, line, _ := runtime.Caller(skip + 1)
msg := fmt.Sprintf("%s%s:%d %s", flag, path.Base(file), line, msg)
additionMsg := messageFromMsgAndArgs(msgAndArgs...)
if len(additionMsg) > 0 {
msg += additionMsg
}
fmt.Println(msg)
}
t.Fail()
return false
}
// Equal asserts that two objects are equal.
func Equal(t testing.TB, expected, actual interface{}, msgAndArgs ...interface{}) bool {
if err := validateEqualArgs(expected, actual); err != nil {
return failTest(t, 1, fmt.Sprintf("Equal: invalid operation `%#v` == `%#v` (%v)", expected, actual, err), msgAndArgs...)
}
if !IsObjectEqual(expected, actual) {
return failTest(t, 1, fmt.Sprintf("Equal: expected `%#v`, actual `%#v`", expected, actual), msgAndArgs...)
}
return true
}
// NotEqual asserts that the specified values are Not equal.
func NotEqual(t testing.TB, expected, actual interface{}, msgAndArgs ...interface{}) bool {
if err := validateEqualArgs(expected, actual); err != nil {
return failTest(t, 1, fmt.Sprintf("NotEqual: invalid operation `%#v` != `%#v` (%v)", expected, actual, err), msgAndArgs...)
}
if IsObjectEqual(expected, actual) {
return failTest(t, 1, fmt.Sprintf("NotEqual: expected not to be `%#v`", actual), msgAndArgs...)
}
return true
}
// EqualValue asserts that two objects are equal or convertible to the same types and equal.
func EqualValue(t testing.TB, expected, actual interface{}, msgAndArgs ...interface{}) bool {
if !IsObjectValueEqual(expected, actual) {
return failTest(t, 1, fmt.Sprintf("EqualValue: expected `%#v`, actual `%#v`", expected, actual), msgAndArgs...)
}
return true
}
// NotEqualValue asserts that two objects are not equal even when converted to the same type.
func NotEqualValue(t testing.TB, expected, actual interface{}, msgAndArgs ...interface{}) bool {
if IsObjectValueEqual(expected, actual) {
return failTest(t, 1, fmt.Sprintf("NotEqualValue: expected not to be `%#v`", actual), msgAndArgs...)
}
return true
}
// SamePointer asserts that two pointers reference the same object.
func SamePointer(t testing.TB, expected, actual interface{}, msgAndArgs ...interface{}) bool {
if !IsPointerSame(expected, actual) {
return failTest(t, 1, fmt.Sprintf("SamePointer: expected `%#v` (%p), actual `%#v` (%p)", expected, expected, actual, actual), msgAndArgs...)
}
return true
}
// NotSamePointer asserts that two pointers do not reference the same object.
func NotSamePointer(t testing.TB, expected, actual interface{}, msgAndArgs ...interface{}) bool {
if IsPointerSame(expected, actual) {
return failTest(t, 1, fmt.Sprintf("SamePointer: expected not be `%#v` (%p)", actual, actual), msgAndArgs...)
}
return true
}
// Nil asserts that the specified object is nil.
func Nil(t testing.TB, object interface{}, msgAndArgs ...interface{}) bool {
if !IsObjectNil(object) {
return failTest(t, 1, fmt.Sprintf("Nil: expected `nil`, actual `%#v`", object), msgAndArgs...)
}
return true
}
// NotNil asserts that the specified object is not nil.
func NotNil(t testing.TB, object interface{}, msgAndArgs ...interface{}) bool {
if IsObjectNil(object) {
return failTest(t, 1, fmt.Sprintf("NotNil, expected not to be `nil`, actual `%#v`", object), msgAndArgs...)
}
return true
}
// True asserts that the specified value is true.
func True(t testing.TB, value bool, msgAndArgs ...interface{}) bool {
if !value {
return failTest(t, 1, fmt.Sprintf("True: expected `true`, actual `%#v`", value), msgAndArgs...)
}
return true
}
// False asserts that the specified value is false.
func False(t testing.TB, value bool, msgAndArgs ...interface{}) bool {
if value {
return failTest(t, 1, fmt.Sprintf("False: expected to be `false`, actual `%#v`", value), msgAndArgs...)
}
return true
}
// Zero asserts that the specified object is the zero value for its type.
func Zero(t testing.TB, object interface{}, msgAndArgs ...interface{}) bool {
if !IsObjectZero(object) {
return failTest(t, 1, fmt.Sprintf("Zero: expected to be zero value, actual `%#v`", object), msgAndArgs...)
}
return true
}
// NotZero asserts that the specified object is not the zero value for its type.
func NotZero(t testing.TB, object interface{}, msgAndArgs ...interface{}) bool {
if IsObjectZero(object) {
return failTest(t, 1, fmt.Sprintf("NotZero: expected not to be zero value, actual `%#v`", object), msgAndArgs...)
}
return true
}
// Empty asserts that the specified object is empty.
func Empty(t testing.TB, object interface{}, msgAndArgs ...interface{}) bool {
if !IsObjectEmpty(object) {
return failTest(t, 1, fmt.Sprintf("Empty: expected to be empty value, actual `%#v`", object), msgAndArgs...)
}
return true
}
// NotEmpty asserts that the specified object is not empty.
func NotEmpty(t testing.TB, object interface{}, msgAndArgs ...interface{}) bool {
if IsObjectEmpty(object) {
return failTest(t, 1, fmt.Sprintf("NotEmpty: expected not to be empty value, actual `%#v`", object), msgAndArgs...)
}
return true
}
// Contain asserts that the specified container contains the specified substring or element.
// Support string, array, slice or map.
func Contain(t testing.TB, container, object interface{}, msgAndArgs ...interface{}) bool {
ok, found := includeElement(container, object)
if !ok {
return failTest(t, 1, fmt.Sprintf("Contain: invalid operator len(`%#v`)", container), msgAndArgs...)
}
if !found {
return failTest(t, 1, fmt.Sprintf("Contain: `%#v` is expected to contain `%#v`", container, object), msgAndArgs...)
}
return true
}
// NotContain asserts that the specified container does not contain the specified substring or element.
// Support string, array, slice or map.
func NotContain(t testing.TB, container, object interface{}, msgAndArgs ...interface{}) bool {
ok, found := includeElement(container, object)
if !ok {
return failTest(t, 1, fmt.Sprintf("NotContain: invalid operator len(`%#v`)", container), msgAndArgs...)
}
if found {
return failTest(t, 1, fmt.Sprintf("NotContain: `%#v` is expected not to contain `%#v`", container, object), msgAndArgs...)
}
return true
}
// ElementMatch asserts that the specified listA is equal to specified listB ignoring the order of the elements.
// If there are duplicate elements, the number of appearances of each of them in both lists should match.
func ElementMatch(t testing.TB, listA, listB interface{}, msgAndArgs ...interface{}) bool {
if IsObjectEmpty(listA) && IsObjectEmpty(listB) {
return true
}
if err := validateArgIsList(listA, listB); err != nil {
return failTest(t, 1, fmt.Sprintf("ElementMatch: invalid operator: `%#v` <-> `%#v` (%v)", listA, listB, err), msgAndArgs...)
}
extraA, extraB := diffLists(listA, listB)
if len(extraA) != 0 || len(extraB) != 0 {
return failTest(t, 1, fmt.Sprintf("ElementMatch: `%#v` and `%#v` are expected to match each other", listA, listB), msgAndArgs...)
}
return true
}
// InDelta asserts that the two numerals are within delta of each other.
func InDelta(t testing.TB, expected, actual interface{}, eps float64, msgAndArgs ...interface{}) bool {
in, actualEps, err := calcDeltaInEps(expected, actual, eps)
if err != nil {
return failTest(t, 1, fmt.Sprintf("InDelta: invalid operation (%v)", err), msgAndArgs...)
}
if !in {
return failTest(t, 1, fmt.Sprintf("InDelta: max difference between `%#v` and `%#v` allowed is `%#v`, but difference was `%#v`", expected, actual, eps, actualEps), msgAndArgs...)
}
return true
}
// NotInDelta asserts that the two numerals are not within delta of each other.
func NotInDelta(t testing.TB, expected, actual interface{}, eps float64, msgAndArgs ...interface{}) bool {
in, actualEps, err := calcDeltaInEps(expected, actual, eps)
if err != nil {
return failTest(t, 1, fmt.Sprintf("NotInDelta: invalid operation (%v)", err), msgAndArgs...)
}
if in {
return failTest(t, 1, fmt.Sprintf("NotInDelta: max difference between `%#v` and `%#v` is not allowed in `%#v`, but difference was `%#v`", expected, actual, eps, actualEps), msgAndArgs...)
}
return true
}
// Implements asserts that an object is implemented by the specified interface.
func Implements(t testing.TB, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool {
interfaceType := reflect.TypeOf(interfaceObject).Elem()
if object == nil {
return failTest(t, 1, fmt.Sprintf("Implements: invalid operation for `nil`"), msgAndArgs...)
}
if !reflect.TypeOf(object).Implements(interfaceType) {
return failTest(t, 1, fmt.Sprintf("Implements: %T expected to implement `%v`, actual not implment.", object, interfaceObject), msgAndArgs...)
}
return true
}
// IsType asserts that the specified objects are of the same type.
func IsType(t testing.TB, expected interface{}, object interface{}, msgAndArgs ...interface{}) bool {
objectType := reflect.TypeOf(object)
expectedType := reflect.TypeOf(expected)
if objectType != expectedType {
return failTest(t, 1, fmt.Sprintf("IsType: expected to be of type `%s`, actual was `%s`", expectedType.String(), objectType.String()), msgAndArgs...)
}
return true
}
// Panic asserts that the code inside the specified function panics.
func Panic(t testing.TB, f func(), msgAndArgs ...interface{}) bool {
isPanic, _ := didPanic(f)
if !isPanic {
return failTest(t, 1, fmt.Sprintf("Panic: function (%p) is expected to panic, actual does not panic", f), msgAndArgs...)
}
return true
}
// NotPanic asserts that the code inside the specified function does not panic.
func NotPanic(t testing.TB, f func(), msgAndArgs ...interface{}) bool {
isPanic, value := didPanic(f)
if isPanic {
return failTest(t, 1, fmt.Sprintf("NotPanic: function (%p) is expected not to panic, acutal panic with `%v`", f, value), msgAndArgs...)
}
return true
}
// PanicWithValue asserts that the code inside the specified function panics, and the recovered value equals the expected value.
func PanicWithValue(t testing.TB, expected interface{}, f func(), msgAndArgs ...interface{}) bool {
isPanic, value := didPanic(f)
if !isPanic {
return failTest(t, 1, fmt.Sprintf("PanicWithValue: function (%p) is expected to panic with `%#v`, actual does not panic", f, expected), msgAndArgs...)
}
if !IsObjectEqual(value, expected) {
return failTest(t, 1, fmt.Sprintf("PanicWithValue: function (%p) is expected to panic with `%#v`, actual with `%#v`", f, expected, value), msgAndArgs...)
}
return true
}
/*
// Exit asserts that the code inside the specified function exits.
func Exit(t testing.TB, f func(), msgAndArgs ...interface{}) bool {
// 1. Create a temp code file, use exec.Command to run and get exit code => need to write code file manually
// https://github.com/sirupsen/logrus/blob/master/alt_exit_test.go#L75
// https://github.com/sirupsen/logrus/blob/master/alt_exit.go#L49
// https://stackoverflow.com/questions/10385551/get-exit-code-go
// 2. Use a stub function and replace os.Exit when test => need to replace all os.Exit and only for internal
// https://github.com/uber-go/zap/blob/a68efdbdd15b7816de33cdbe7e6def2a559bdf64/internal/exit/exit.go#L44
// https://github.com/uber-go/zap/blob/a68efdbdd1/zapcore/entry_test.go#L124
// https://github.com/uber-go/zap/blob/a68efdbdd15b7816de33cdbe7e6def2a559bdf64/zapcore/entry.go#L236
// 3. Use exec.Command and rerun the test with an argument => gracefullest and recommend
// https://talks.golang.org/2014/testing.slide#23
// 4. Replace os.Exit to other function (patch), and restore it later => unsafe when run os.Exec in concurrency and difficult
// https://stackoverflow.com/questions/26225513/how-to-test-os-exit-scenarios-in-go
// https://github.com/bouk/monkey/blob/master/monkey.go#L67
// https://github.com/bouk/monkey/blob/master/monkey.go#L119
return true
}
// ExitWithCode asserts that the code inside the specified function exits with a code which not equals the expected code.
func ExitWithCode(t testing.TB, code int, f func(), msgAndArgs ...interface{}) bool {
return true
}
*/ | xtesting/xtesting.go | 0.753104 | 0.578805 | xtesting.go | starcoder |
package parser
import (
"fmt"
"strconv"
"github.com/robotii/lito/compiler/ast"
"github.com/robotii/lito/compiler/parser/errors"
"github.com/robotii/lito/compiler/parser/precedence"
"github.com/robotii/lito/compiler/token"
)
// parseIntegerLiteral parses an integer from the current token
func (p *Parser) parseIntegerLiteral() ast.Expression {
value, err := strconv.ParseInt(p.curToken.Literal, 0, 64)
if err != nil {
p.error = errors.NewTypeParsingError(p.curToken.Literal, "integer", p.curToken.Line)
return nil
}
return &ast.IntegerLiteral{BaseNode: &ast.BaseNode{Token: p.curToken}, Value: int(value)}
}
func (p *Parser) parseFloatLiteral(integerPart ast.Expression) ast.Expression {
// Get the fractional part of the token
p.nextToken()
floatTok := token.Token{
Type: token.Float,
Literal: fmt.Sprintf("%s.%s", integerPart.String(), p.curToken.Literal),
Line: p.curToken.Line,
}
value, err := strconv.ParseFloat(floatTok.Literal, 64)
if err != nil {
p.error = errors.NewTypeParsingError(floatTok.Literal, "float", p.curToken.Line)
return nil
}
return &ast.FloatLiteral{BaseNode: &ast.BaseNode{Token: floatTok}, Value: float64(value)}
}
func (p *Parser) parseStringLiteral() ast.Expression {
return &ast.StringLiteral{BaseNode: &ast.BaseNode{Token: p.curToken}, Value: p.curToken.Literal}
}
func (p *Parser) parseBooleanLiteral() ast.Expression {
value, err := strconv.ParseBool(p.curToken.Literal)
if err != nil {
p.error = errors.NewTypeParsingError(p.curToken.Literal, "boolean", p.curToken.Line)
return nil
}
return &ast.BooleanExpression{BaseNode: &ast.BaseNode{Token: p.curToken}, Value: value}
}
func (p *Parser) parseNilExpression() ast.Expression {
return &ast.NilExpression{BaseNode: &ast.BaseNode{Token: p.curToken}}
}
func (p *Parser) parseHashExpression() ast.Expression {
return &ast.HashExpression{BaseNode: &ast.BaseNode{Token: p.curToken}, Data: p.parseHashPairs()}
}
func (p *Parser) parseHashPairs() map[string]ast.Expression {
pairs := map[string]ast.Expression{}
if p.peekTokenIs(token.RBrace) {
p.nextToken()
return pairs
}
p.parseHashPair(pairs)
for p.peekTokenIs(token.Comma) {
p.nextToken()
p.parseHashPair(pairs)
}
if !p.expectPeek(token.RBrace) {
return nil
}
return pairs
}
func (p *Parser) parseHashPair(pairs map[string]ast.Expression) {
var key string
var value ast.Expression
p.nextToken()
switch p.curToken.Type {
case token.Constant, token.Ident:
key = p.parseIdentifier().(ast.Variable).ReturnValue()
case token.String:
key = p.curToken.Literal
default:
p.error = errors.NewTypeParsingError(p.curToken.Literal, "hash key", p.curToken.Line)
return
}
if !p.expectPeek(token.Colon) {
return
}
p.nextToken()
value = p.parseExpression(precedence.Normal)
pairs[key] = value
}
func (p *Parser) parseArrayExpression() ast.Expression {
return &ast.ArrayExpression{BaseNode: &ast.BaseNode{Token: p.curToken}, Elements: p.parseArrayElements()}
}
func (p *Parser) parseArrayElements() []ast.Expression {
elems := []ast.Expression{}
if p.peekTokenIs(token.RBracket) {
p.nextToken()
return elems
}
p.nextToken() // start of first expression
elems = append(elems, p.parseExpression(precedence.Normal))
for p.peekTokenIs(token.Comma) {
p.nextToken() // ","
p.nextToken() // start of next expression
elems = append(elems, p.parseExpression(precedence.Normal))
}
if !p.expectPeek(token.RBracket) {
return nil
}
return elems
}
func (p *Parser) parseRangeExpression(left ast.Expression) ast.Expression {
exp := &ast.RangeExpression{
BaseNode: &ast.BaseNode{Token: p.curToken},
Start: left,
Exclusive: p.curToken.Type == token.RangeExcl,
}
prec := p.curPrecedence()
p.nextToken()
exp.End = p.parseExpression(prec)
return exp
} | compiler/parser/data_type_parsing.go | 0.640411 | 0.437463 | data_type_parsing.go | starcoder |
package ent
import (
"fmt"
"strings"
"github.com/facebook/ent/dialect/sql"
"github.com/tennashi/ent-sample/ent/namespace"
)
// Namespace is the model entity for the Namespace schema.
type Namespace struct {
config `json:"-"`
// ID of the ent.
ID string `json:"id,omitempty"`
// Name holds the value of the "name" field.
Name string `json:"name,omitempty"`
// Edges holds the relations/edges for other nodes in the graph.
// The values are being populated by the NamespaceQuery when eager-loading is set.
Edges NamespaceEdges `json:"edges"`
}
// NamespaceEdges holds the relations/edges for other nodes in the graph.
type NamespaceEdges struct {
// Members holds the value of the members edge.
Members []*User
// Admins holds the value of the admins edge.
Admins []*User
// loadedTypes holds the information for reporting if a
// type was loaded (or requested) in eager-loading or not.
loadedTypes [2]bool
}
// MembersOrErr returns the Members value or an error if the edge
// was not loaded in eager-loading.
func (e NamespaceEdges) MembersOrErr() ([]*User, error) {
if e.loadedTypes[0] {
return e.Members, nil
}
return nil, &NotLoadedError{edge: "members"}
}
// AdminsOrErr returns the Admins value or an error if the edge
// was not loaded in eager-loading.
func (e NamespaceEdges) AdminsOrErr() ([]*User, error) {
if e.loadedTypes[1] {
return e.Admins, nil
}
return nil, &NotLoadedError{edge: "admins"}
}
// scanValues returns the types for scanning values from sql.Rows.
func (*Namespace) scanValues() []interface{} {
return []interface{}{
&sql.NullString{}, // id
&sql.NullString{}, // name
}
}
// assignValues assigns the values that were returned from sql.Rows (after scanning)
// to the Namespace fields.
func (n *Namespace) assignValues(values ...interface{}) error {
if m, n := len(values), len(namespace.Columns); m < n {
return fmt.Errorf("mismatch number of scan values: %d != %d", m, n)
}
if value, ok := values[0].(*sql.NullString); !ok {
return fmt.Errorf("unexpected type %T for field id", values[0])
} else if value.Valid {
n.ID = value.String
}
values = values[1:]
if value, ok := values[0].(*sql.NullString); !ok {
return fmt.Errorf("unexpected type %T for field name", values[0])
} else if value.Valid {
n.Name = value.String
}
return nil
}
// QueryMembers queries the members edge of the Namespace.
func (n *Namespace) QueryMembers() *UserQuery {
return (&NamespaceClient{config: n.config}).QueryMembers(n)
}
// QueryAdmins queries the admins edge of the Namespace.
func (n *Namespace) QueryAdmins() *UserQuery {
return (&NamespaceClient{config: n.config}).QueryAdmins(n)
}
// Update returns a builder for updating this Namespace.
// Note that, you need to call Namespace.Unwrap() before calling this method, if this Namespace
// was returned from a transaction, and the transaction was committed or rolled back.
func (n *Namespace) Update() *NamespaceUpdateOne {
return (&NamespaceClient{config: n.config}).UpdateOne(n)
}
// Unwrap unwraps the entity that was returned from a transaction after it was closed,
// so that all next queries will be executed through the driver which created the transaction.
func (n *Namespace) Unwrap() *Namespace {
tx, ok := n.config.driver.(*txDriver)
if !ok {
panic("ent: Namespace is not a transactional entity")
}
n.config.driver = tx.drv
return n
}
// String implements the fmt.Stringer.
func (n *Namespace) String() string {
var builder strings.Builder
builder.WriteString("Namespace(")
builder.WriteString(fmt.Sprintf("id=%v", n.ID))
builder.WriteString(", name=")
builder.WriteString(n.Name)
builder.WriteByte(')')
return builder.String()
}
// Namespaces is a parsable slice of Namespace.
type Namespaces []*Namespace
func (n Namespaces) config(cfg config) {
for _i := range n {
n[_i].config = cfg
}
} | ent/namespace.go | 0.63409 | 0.415907 | namespace.go | starcoder |
package api
import (
"fmt"
"strconv"
"strings"
)
/*AssertValidType returns whether a given value is of the expected type
if the initial conversion fails, it will convert to string then convert to type where appropriate */
func AssertValidType(value interface{}, expectedType string) bool {
switch {
case expectedType == "boolean":
if _, ok := value.(bool); !ok {
if strValue, ok := value.(string); ok {
if lowerInValue := strings.ToLower(strValue); lowerInValue != "true" && lowerInValue != "false" {
return false
}
} else {
return false
}
}
case expectedType == "integer":
/* when raw ints come in from some types they can only be extracted as float64 - convert to int from here is always possible so
dont bother doing this test (cant even check its success, it always works) */
if _, ok := value.(float64); !ok {
if strValue, ok := value.(string); ok {
if _, err := strconv.Atoi(strValue); err != nil {
return false
}
} else {
return false
}
}
case expectedType == "string":
if _, ok := value.(string); !ok {
return false
}
case expectedType == "float":
if _, ok := value.(float64); !ok {
if _, err := strconv.ParseFloat(value.(string), 64); err != nil {
return false
}
}
case expectedType == "array":
if _, ok := value.([]interface{}); !ok {
return false
}
case expectedType == "object":
if _, ok := value.(map[string]interface{}); !ok {
return false
}
default:
return false
}
return true
}
// AssertValidTypeFromPath goes down a given path for a given inputData JSON, and asserts the expected valid type when at the expected level for the given path
func AssertValidTypeFromPath(path, expectedType string, inputData interface{}) error {
if splitPath := strings.Split(path, "."); len(splitPath) >= 1 {
if len(splitPath) > 1 {
if value, err := strconv.Atoi(splitPath[0]); err == nil {
nextInputData := inputData.([]interface{})[value]
return AssertValidTypeFromPath(strings.Join(splitPath[1:], "."), expectedType, nextInputData)
} else if body, valid := inputData.(map[string]interface{}); valid {
nextInputData := body[splitPath[0]]
return AssertValidTypeFromPath(strings.Join(splitPath[1:], "."), expectedType, nextInputData)
}
return fmt.Errorf("Invalid Path Item %s", splitPath[0])
}
var valueToEvaluate interface{}
if value, err := strconv.Atoi(splitPath[0]); err == nil {
valueToEvaluate = inputData.([]interface{})[value]
} else if body, valid := inputData.(map[string]interface{}); valid {
valueToEvaluate = body[splitPath[0]]
} else {
valueToEvaluate = inputData
}
if !AssertValidType(valueToEvaluate, expectedType) {
return fmt.Errorf("%s Is Invalid Expected Type %s", path, expectedType)
}
return nil
}
return fmt.Errorf("Empty Path Given For assertValidTypeFromPath")
} | pkg/api/typeUtils.go | 0.66454 | 0.540378 | typeUtils.go | starcoder |
package metric
import (
"fmt"
"sort"
)
// average is a helper for calculating an average value of something
type average struct {
Sum float64
Count int
}
// Calculate divides Sum by Count
func (a average) Calculate() float64 {
return a.Sum / float64(a.Count)
}
// averageMap is a map which contains an average value for each dev
type averageMap map[string]*average
// toList converts the map to a sortable list of AverageItem
func (a *averageMap) toList() averageList {
var d averageList
for name, average := range *a {
d = append(d, averageItem{name, average.Calculate()})
}
return d
}
func (a *averageMap) reset() {
(*a) = averageMap{}
}
func (a *averageMap) add(value float64, dev string) {
if _, ok := (*a)[dev]; !ok {
(*a)[dev] = &average{}
}
av := (*a)[dev]
av.Sum += value
av.Count++
}
func (a *averageMap) setCount(i int) {
for key := range *a {
(*a)[key].Count = i
}
}
func (a *averageMap) string(unit string) string {
d := a.toList()
averageTotal := 0.0
for _, dev := range d {
averageTotal += dev.Value
}
result := ""
result += fmt.Sprintf("Total average: %.2f %s\n", averageTotal/float64(len(d)), unit)
sort.Sort(sort.Reverse(d))
for _, dev := range d {
result += fmt.Sprintf("Average for %s: %.2f %s\n", dev.Name, dev.Value, unit)
}
return result
}
// averageItem is a simple container for dev's name and some average value
type averageItem struct {
Name string
Value float64
}
// averageList is used to sort a slice of AverageItems by Value
type averageList []averageItem
func (a averageList) Len() int {
return len(a)
}
func (a averageList) Swap(x, y int) {
a[x], a[y] = a[y], a[x]
}
func (a averageList) Less(x, y int) bool {
return a[x].Value < a[y].Value
}
func (a averageList) string(unit string) string {
averageTotal := 0.0
for _, dev := range a {
averageTotal += dev.Value
}
result := ""
result += fmt.Sprintf("Total average: %.2f %s\n", averageTotal/float64(len(a)), unit)
sort.Sort(sort.Reverse(a))
for _, dev := range a {
result += fmt.Sprintf("Average for %s: %.2f %s\n", dev.Name, dev.Value, unit)
}
return result
}
type counterMap map[string]*counter
func (c counterMap) string() string {
var cs counters
for _, i := range c {
cs = append(cs, *i)
}
sort.Sort(sort.Reverse(cs))
result := ""
for _, i := range cs {
result += fmt.Sprintf("For %s: %d\n", i.Name, i.Count)
}
return result
}
type counter struct {
Name string
Count int
}
type counters []counter
func (a counters) Len() int {
return len(a)
}
func (a counters) Swap(x, y int) {
a[x], a[y] = a[y], a[x]
}
func (a counters) Less(x, y int) bool {
return a[x].Count < a[y].Count
} | metric/util.go | 0.838581 | 0.501587 | util.go | starcoder |
// Package frames describes the Frame interface.
// A set of standard frames are also defined in this package. These are: Fixed, Window, Wild and WildMin.
package frames
import (
"errors"
"strconv"
"github.com/richardlehane/siegfried/pkg/core/bytematcher/patterns"
"github.com/richardlehane/siegfried/pkg/core/signature"
)
// Frame encapsulates a pattern with offset information, mediating between the pattern and the bytestream.
type Frame interface {
Match([]byte) (bool, []int) // Match the byte sequence in a L-R direction. Return a boolean to indicate success. If true, return an offset for where a successive match by a related frame should begin.
MatchR([]byte) (bool, []int) // Match the byte seqeuence in a reverse (R-L) direction. Return a boolean to indicate success. If true, return an offset for where a successive match by a related frame should begin.
Equals(Frame) bool
String() string
Linked(Frame, int, int) bool // Is a frame linked to a preceding frame (by a preceding or succeding relationship) with an offset and range that is less than the supplied ints?
Min() int // minimum offset
Max() int // maximum offset. Return -1 for no limit (wildcard, *)
Pat() patterns.Pattern
Save(*signature.LoadSaver)
// The following methods are inherited from the enclosed OffType
Orientation() OffType
SwitchOff() OffType
// The following methods are inherited from the enclosed pattern
Length() (int, int) // min and max lengths of the enclosed pattern
NumSequences() int // // the number of simple sequences that the enclosed pattern can be represented by. Return 0 if the pattern cannot be represented by a defined number of simple sequence (e.g. for an indirect offset pattern) or, if in your opinion, the number of sequences is unreasonably large.
Sequences() []patterns.Sequence
}
type Loader func(*signature.LoadSaver) Frame
const (
fixedLoader byte = iota
windowLoader
wildLoader
wildMinLoader
)
var loaders = [8]Loader{loadFixed, loadWindow, loadWild, loadWildMin, nil, nil, nil, nil}
func Register(id byte, l Loader) {
loaders[int(id)] = l
}
func Load(ls *signature.LoadSaver) Frame {
id := ls.LoadByte()
l := loaders[int(id)]
if l == nil {
if ls.Err == nil {
ls.Err = errors.New("bad frame loader")
}
return nil
}
return l(ls)
}
type OffType uint8
const (
BOF OffType = iota // beginning of file offset
PREV // offset from previous frame
SUCC // offset from successive frame
EOF // end of file offset
)
var OffString = [...]string{"B", "P", "S", "E"}
// Orientation returns the offset type of the frame which must be either BOF, PREV, SUCC or EOF
func (o OffType) Orientation() OffType {
return o
}
// Switchoff returns a new offset type according to a given set of rules. These are:
// - PREV -> SUCC
// - SUCC and EOF -> PREV
// This is helpful when changing the orientation of a frame (for example to allow right-left searching)
func (o OffType) SwitchOff() OffType {
switch o {
case PREV:
return SUCC
case SUCC, EOF:
return PREV
default:
return o
}
}
// Generates Fixed, Window, Wild and WildMin frames. The offsets argument controls what type of frame is created:
// - for a Wild frame, give no offsets or give a max offset of < 0 and a min of < 1
// - for a WildMin frame, give one offset, or give a max offset of < 0 and a min of > 0
// - for a Fixed frame, give two offsets that are both >= 0 and that are equal to each other
// - for a Window frame, give two offsets that are both >= 0 and that are not equal to each other.
func NewFrame(typ OffType, pat patterns.Pattern, offsets ...int) Frame {
switch len(offsets) {
case 0:
return Frame(Wild{typ, pat})
case 1:
if offsets[0] > 0 {
return Frame(WildMin{typ, offsets[0], pat})
} else {
return Frame(Wild{typ, pat})
}
}
if offsets[1] < 0 {
if offsets[0] > 0 {
return Frame(WildMin{typ, offsets[0], pat})
} else {
return Frame(Wild{typ, pat})
}
}
if offsets[0] < 0 {
offsets[0] = 0
}
if offsets[0] == offsets[1] {
return Frame(Fixed{typ, offsets[0], pat})
}
return Frame(Window{typ, offsets[0], offsets[1], pat})
}
// SwitchFrame returns a new frame with a different orientation (for example to allow right-left searching)
func SwitchFrame(f Frame, p patterns.Pattern) Frame {
return NewFrame(f.SwitchOff(), p, f.Min(), f.Max())
}
// BMHConvert converts the patterns within a slice of frames to BMH sequences if possible
func BMHConvert(fs []Frame, rev bool) []Frame {
nfs := make([]Frame, len(fs))
for i, f := range fs {
nfs[i] = NewFrame(f.Orientation(), patterns.BMH(f.Pat(), rev), f.Min(), f.Max())
}
return nfs
}
// NonZero checks whether, when converted to simple byte sequences, this frame's pattern is all 0 bytes.
func NonZero(f Frame) bool {
for _, seq := range f.Sequences() {
allzeros := true
for _, b := range seq {
if b != 0 {
allzeros = false
}
}
if allzeros {
return false
}
}
return true
}
// Total length is sum of the maximum length of the enclosed pattern and the maximum offset.
func TotalLength(f Frame) int {
_, l := f.Length()
return l + f.Max()
}
// Fixed frames are at a fixed offset e.g. 0 or 10 from the BOF, EOF or a preceding or succeeding frame.
type Fixed struct {
OffType
Off int
patterns.Pattern
}
func (f Fixed) Match(b []byte) (bool, []int) {
if f.Off >= len(b) {
return false, nil
}
if success, length := f.Test(b[f.Off:]); success {
return true, []int{f.Off + length}
}
return false, nil
}
func (f Fixed) MatchR(b []byte) (bool, []int) {
if f.Off >= len(b) {
return false, nil
}
if success, length := f.TestR(b[:len(b)-f.Off]); success {
return true, []int{f.Off + length}
}
return false, nil
}
func (f Fixed) Equals(frame Frame) bool {
f1, ok := frame.(Fixed)
if ok {
if f.OffType == f1.OffType && f.Off == f1.Off {
return f.Pattern.Equals(f1.Pattern)
}
}
return false
}
func (f Fixed) String() string {
return "F " + OffString[f.OffType] + ":" + strconv.Itoa(f.Off) + " " + f.Pattern.String()
}
func (f Fixed) Min() int {
return f.Off
}
func (f Fixed) Max() int {
return f.Off
}
func (f Fixed) Linked(prev Frame, maxDistance, maxRange int) bool {
switch f.OffType {
case PREV:
if f.Off > maxDistance {
return false
}
return true
case SUCC, EOF:
if prev.Orientation() != SUCC || prev.Max() < 0 || prev.Max() > maxDistance || prev.Max()-prev.Min() > maxRange {
return false
}
return true
default:
return false
}
}
func (f Fixed) Pat() patterns.Pattern {
return f.Pattern
}
func (f Fixed) Save(ls *signature.LoadSaver) {
ls.SaveByte(fixedLoader)
ls.SaveByte(byte(f.OffType))
ls.SaveInt(f.Off)
f.Pattern.Save(ls)
}
func loadFixed(ls *signature.LoadSaver) Frame {
return Fixed{
OffType(ls.LoadByte()),
ls.LoadInt(),
patterns.Load(ls),
}
}
// Window frames are at a range of offsets e.g. e.g. 1-1500 from the BOF, EOF or a preceding or succeeding frame.
type Window struct {
OffType
MinOff int
MaxOff int
patterns.Pattern
}
func (w Window) Match(b []byte) (bool, []int) {
ret := make([]int, 0, 1)
min, max := w.MinOff, w.MaxOff
_, m := w.Length()
max += m
if max > len(b) {
max = len(b)
}
for min < max {
success, length := w.Test(b[min:max])
if success {
ret = append(ret, min+length)
min++
} else {
if length == 0 {
break
}
min += length
}
}
if len(ret) > 0 {
return true, ret
}
return false, nil
}
func (w Window) MatchR(b []byte) (bool, []int) {
ret := make([]int, 0, 1)
min, max := w.MinOff, w.MaxOff
_, m := w.Length()
max += m
if max > len(b) {
max = len(b)
}
for min < max {
success, length := w.TestR(b[len(b)-max : len(b)-min])
if success {
ret = append(ret, min+length)
min++
} else {
if length == 0 {
break
}
min += length
}
}
if len(ret) > 0 {
return true, ret
}
return false, nil
}
func (w Window) Equals(frame Frame) bool {
w1, ok := frame.(Window)
if ok {
if w.OffType == w1.OffType && w.MinOff == w1.MinOff && w.MaxOff == w1.MaxOff {
return w.Pattern.Equals(w1.Pattern)
}
}
return false
}
func (w Window) String() string {
return "WW " + OffString[w.OffType] + ":" + strconv.Itoa(w.MinOff) + "-" + strconv.Itoa(w.MaxOff) + " " + w.Pattern.String()
}
func (w Window) Min() int {
return w.MinOff
}
func (w Window) Max() int {
return w.MaxOff
}
func (w Window) Linked(prev Frame, maxDistance, maxRange int) bool {
switch w.OffType {
case PREV:
if w.MaxOff > maxDistance || w.MaxOff-w.MinOff > maxRange {
return false
}
return true
case SUCC, EOF:
if prev.Orientation() != SUCC || prev.Max() < 0 || prev.Max() > maxDistance || prev.Max()-prev.Min() > maxRange {
return false
}
return true
default:
return false
}
}
func (w Window) Pat() patterns.Pattern {
return w.Pattern
}
func (w Window) Save(ls *signature.LoadSaver) {
ls.SaveByte(windowLoader)
ls.SaveByte(byte(w.OffType))
ls.SaveInt(w.MinOff)
ls.SaveInt(w.MaxOff)
w.Pattern.Save(ls)
}
func loadWindow(ls *signature.LoadSaver) Frame {
return Window{
OffType(ls.LoadByte()),
ls.LoadInt(),
ls.LoadInt(),
patterns.Load(ls),
}
}
// Wild frames can be at any offset (i.e. 0 to the end of the file) relative to the BOF, EOF or a preceding or succeeding frame.
type Wild struct {
OffType
patterns.Pattern
}
func (w Wild) Match(b []byte) (bool, []int) {
ret := make([]int, 0, 1)
min, max := 0, len(b)
for min < max {
success, length := w.Test(b[min:])
if success {
ret = append(ret, min+length)
min++
} else {
if length == 0 {
break
}
min += length
}
}
if len(ret) > 0 {
return true, ret
}
return false, nil
}
func (w Wild) MatchR(b []byte) (bool, []int) {
ret := make([]int, 0, 1)
min, max := 0, len(b)
for min < max {
success, length := w.TestR(b[:len(b)-min])
if success {
ret = append(ret, min+length)
min++
} else {
if length == 0 {
break
}
min += length
}
}
if len(ret) > 0 {
return true, ret
}
return false, nil
}
func (w Wild) Equals(frame Frame) bool {
w1, ok := frame.(Wild)
if ok {
if w.OffType == w1.OffType {
return w.Pattern.Equals(w1.Pattern)
}
}
return false
}
func (w Wild) String() string {
return "WL " + OffString[w.OffType] + " " + w.Pattern.String()
}
func (w Wild) Min() int {
return 0
}
func (w Wild) Max() int {
return -1
}
func (w Wild) Linked(prev Frame, maxDistance, maxRange int) bool {
switch w.OffType {
case SUCC, EOF:
if prev.Orientation() != SUCC || prev.Max() < 0 || prev.Max() > maxDistance || prev.Max()-prev.Min() > maxRange {
return false
}
return true
default:
return false
}
}
func (w Wild) Pat() patterns.Pattern {
return w.Pattern
}
func (w Wild) Save(ls *signature.LoadSaver) {
ls.SaveByte(wildLoader)
ls.SaveByte(byte(w.OffType))
w.Pattern.Save(ls)
}
func loadWild(ls *signature.LoadSaver) Frame {
return Wild{
OffType(ls.LoadByte()),
patterns.Load(ls),
}
}
// WildMin frames have a minimum but no maximum offset (e.g. 200-*) relative to the BOF, EOF or a preceding or succeeding frame.
type WildMin struct {
OffType
MinOff int
patterns.Pattern
}
func (w WildMin) Match(b []byte) (bool, []int) {
ret := make([]int, 0, 1)
min, max := w.MinOff, len(b)
for min < max {
success, length := w.Test(b[min:])
if success {
ret = append(ret, min+length)
min++
} else {
if length == 0 {
break
}
min += length
}
}
if len(ret) > 0 {
return true, ret
}
return false, nil
}
func (w WildMin) MatchR(b []byte) (bool, []int) {
ret := make([]int, 0, 1)
min, max := w.MinOff, len(b)
for min < max {
success, length := w.TestR(b[:len(b)-min])
if success {
ret = append(ret, min+length)
min++
} else {
if length == 0 {
break
}
min += length
}
}
if len(ret) > 0 {
return true, ret
}
return false, nil
}
func (w WildMin) Equals(frame Frame) bool {
w1, ok := frame.(WildMin)
if ok {
if w.OffType == w1.OffType && w.MinOff == w1.MinOff {
return w.Pattern.Equals(w1.Pattern)
}
}
return false
}
func (w WildMin) String() string {
return "WM " + OffString[w.OffType] + ":" + strconv.Itoa(w.MinOff) + " " + w.Pattern.String()
}
func (w WildMin) Min() int {
return w.MinOff
}
func (w WildMin) Max() int {
return -1
}
func (w WildMin) Linked(prev Frame, maxDistance, maxRange int) bool {
switch w.OffType {
case SUCC, EOF:
if prev.Orientation() != SUCC || prev.Max() < 0 || prev.Max() > maxDistance || prev.Max()-prev.Min() > maxRange {
return false
}
return true
default:
return false
}
}
func (w WildMin) Pat() patterns.Pattern {
return w.Pattern
}
func (w WildMin) Save(ls *signature.LoadSaver) {
ls.SaveByte(wildMinLoader)
ls.SaveByte(byte(w.OffType))
ls.SaveInt(w.MinOff)
w.Pattern.Save(ls)
}
func loadWildMin(ls *signature.LoadSaver) Frame {
return WildMin{
OffType(ls.LoadByte()),
ls.LoadInt(),
patterns.Load(ls),
}
} | pkg/core/bytematcher/frames/frames.go | 0.839898 | 0.474936 | frames.go | starcoder |
package main
import (
"fmt"
"image"
"math/rand"
"github.com/emer/emergent/env"
"github.com/emer/etable/etensor"
)
// ExEnv is an example environment, that sets a single input point in a 2D
// input state and two output states as the X and Y coordinates of point.
// It can be used as a starting point for writing your own Env, without
// having much existing code to rewrite.
type ExEnv struct {
Nm string `desc:"name of this environment"`
Dsc string `desc:"description of this environment"`
Size int `desc:"size of each dimension in 2D input"`
Point image.Point `desc:"X,Y coordinates of point"`
Input etensor.Float32 `desc:"input state, 2D Size x Size"`
X etensor.Float32 `desc:"X as a one-hot state 1D Size"`
Y etensor.Float32 `desc:"Y as a one-hot state 1D Size"`
Run env.Ctr `view:"inline" desc:"current run of model as provided during Init"`
Epoch env.Ctr `view:"inline" desc:"number of times through Seq.Max number of sequences"`
Trial env.Ctr `view:"inline" desc:"trial increments over input states -- could add Event as a lower level"`
}
func (ev *ExEnv) Name() string { return ev.Nm }
func (ev *ExEnv) Desc() string { return ev.Dsc }
// Config sets the size, number of trials to run per epoch, and configures the states
func (ev *ExEnv) Config(sz int, ntrls int) {
ev.Size = sz
ev.Trial.Max = ntrls
ev.Input.SetShape([]int{sz, sz}, nil, []string{"Y", "X"})
ev.X.SetShape([]int{sz}, nil, []string{"X"})
ev.Y.SetShape([]int{sz}, nil, []string{"Y"})
}
func (ev *ExEnv) Validate() error {
if ev.Size == 0 {
return fmt.Errorf("ExEnv: %v has size == 0 -- need to Config", ev.Nm)
}
return nil
}
func (ev *ExEnv) Counters() []env.TimeScales {
return []env.TimeScales{env.Run, env.Epoch, env.Trial}
}
func (ev *ExEnv) States() env.Elements {
els := env.Elements{
{"Input", []int{ev.Size, ev.Size}, []string{"Y", "X"}},
{"X", []int{ev.Size}, []string{"X"}},
{"Y", []int{ev.Size}, []string{"Y"}},
}
return els
}
func (ev *ExEnv) State(element string) etensor.Tensor {
switch element {
case "Input":
return &ev.Input
case "X":
return &ev.X
case "Y":
return &ev.Y
}
return nil
}
func (ev *ExEnv) Actions() env.Elements {
return nil
}
// String returns the current state as a string
func (ev *ExEnv) String() string {
return fmt.Sprintf("Pt_%d_%d", ev.Point.X, ev.Point.Y)
}
// Init is called to restart environment
func (ev *ExEnv) Init(run int) {
ev.Run.Scale = env.Run
ev.Epoch.Scale = env.Epoch
ev.Trial.Scale = env.Trial
ev.Run.Init()
ev.Epoch.Init()
ev.Trial.Init()
ev.Run.Cur = run
ev.Trial.Cur = -1 // init state -- key so that first Step() = 0
}
// NewPoint generates a new point and sets state accordingly
func (ev *ExEnv) NewPoint() {
ev.Point.X = rand.Intn(ev.Size)
ev.Point.Y = rand.Intn(ev.Size)
ev.Input.SetZeros()
ev.Input.SetFloat([]int{ev.Point.Y, ev.Point.X}, 1)
ev.X.SetZeros()
ev.X.SetFloat([]int{ev.Point.X}, 1)
ev.Y.SetZeros()
ev.Y.SetFloat([]int{ev.Point.Y}, 1)
}
// Step is called to advance the environment state
func (ev *ExEnv) Step() bool {
ev.Epoch.Same() // good idea to just reset all non-inner-most counters at start
ev.NewPoint()
if ev.Trial.Incr() { // true if wraps around Max back to 0
ev.Epoch.Incr()
}
return true
}
func (ev *ExEnv) Action(element string, input etensor.Tensor) {
// nop
}
func (ev *ExEnv) Counter(scale env.TimeScales) (cur, prv int, chg bool) {
switch scale {
case env.Run:
return ev.Run.Query()
case env.Epoch:
return ev.Epoch.Query()
case env.Trial:
return ev.Trial.Query()
}
return -1, -1, false
}
// Compile-time check that implements Env interface
var _ env.Env = (*ExEnv)(nil) | examples/env/env.go | 0.660282 | 0.439928 | env.go | starcoder |
package rel
import (
"reflect"
)
// mapLiteral is an implementation of Relation using a map
type mapLiteral struct {
// the map of tuples in the relation, with tuples in the key
rbody reflect.Value // should always hold a map
// set of candidate keys
cKeys CandKeys
// the type of the tuples contained within the relation
zero interface{}
// first error encountered during construction or during evaluation
err error
}
// TupleChan sends each tuple in the relation to a channel
func (r1 *mapLiteral) TupleChan(t interface{}) chan<- struct{} {
cancel := make(chan struct{})
// reflect on the channel
chv := reflect.ValueOf(t)
err := EnsureChan(chv.Type(), r1.zero)
if err != nil {
r1.err = err
return cancel
}
if r1.err != nil {
chv.Close()
return cancel
}
go func(res reflect.Value) {
// output channel
resSel := reflect.SelectCase{Dir: reflect.SelectSend, Chan: res}
canSel := reflect.SelectCase{Dir: reflect.SelectRecv, Chan: reflect.ValueOf(cancel)}
for _, tup := range r1.rbody.MapKeys() {
resSel.Send = tup
chosen, _, _ := reflect.Select([]reflect.SelectCase{canSel, resSel})
if chosen == 0 {
// cancel has been closed, so close the results
return
}
}
chv.Close()
}(chv)
return cancel
}
// Zero returns the zero value of the relation (a blank tuple)
func (r1 *mapLiteral) Zero() interface{} {
return r1.zero
}
// CKeys is the set of candidate keys in the relation
func (r1 *mapLiteral) CKeys() CandKeys {
return r1.cKeys
}
// GoString returns a text representation of the Relation
func (r1 *mapLiteral) GoString() string {
return goStringTabTable(r1)
}
// String returns a text representation of the Relation
func (r1 *mapLiteral) String() string {
return "Relation(" + HeadingString(r1) + ")"
}
// Project creates a new relation with less than or equal degree
// t2 has to be a new type which is a subdomain of r.
func (r1 *mapLiteral) Project(z2 interface{}) Relation {
return NewProject(r1, z2)
}
// Restrict creates a new relation with less than or equal cardinality
// p has to be a func(tup T) bool where tup is a subdomain of the input r.
func (r1 *mapLiteral) Restrict(p Predicate) Relation {
return NewRestrict(r1, p)
}
// Rename creates a new relation with new column names
// z2 has to be a struct with the same number of fields as the input relation
func (r1 *mapLiteral) Rename(z2 interface{}) Relation {
return NewRename(r1, z2)
}
// Union creates a new relation by unioning the bodies of both inputs
func (r1 *mapLiteral) Union(r2 Relation) Relation {
return NewUnion(r1, r2)
}
// Diff creates a new relation by set minusing the two inputs
func (r1 *mapLiteral) Diff(r2 Relation) Relation {
return NewDiff(r1, r2)
}
// Join creates a new relation by performing a natural join on the inputs
func (r1 *mapLiteral) Join(r2 Relation, zero interface{}) Relation {
return NewJoin(r1, r2, zero)
}
// GroupBy creates a new relation by grouping and applying a user defined func
func (r1 *mapLiteral) GroupBy(t2, gfcn interface{}) Relation {
return NewGroupBy(r1, t2, gfcn)
}
// Map creates a new relation by applying a function to tuples in the source
func (r1 *mapLiteral) Map(mfcn interface{}, ckeystr [][]string) Relation {
return NewMap(r1, mfcn, ckeystr)
}
// Err returns an error encountered during construction or computation
func (r1 *mapLiteral) Err() error {
return r1.err
} | mapliteral.go | 0.720467 | 0.458591 | mapliteral.go | starcoder |
package models
import (
i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization"
)
// WorkbookTableRow
type WorkbookTableRow struct {
Entity
// Returns the index number of the row within the rows collection of the table. Zero-indexed. Read-only.
index *int32
// Represents the raw values of the specified range. The data returned could be of type string, number, or a boolean. Cell that contain an error will return the error string.
values Jsonable
}
// NewWorkbookTableRow instantiates a new workbookTableRow and sets the default values.
func NewWorkbookTableRow()(*WorkbookTableRow) {
m := &WorkbookTableRow{
Entity: *NewEntity(),
}
return m
}
// CreateWorkbookTableRowFromDiscriminatorValue creates a new instance of the appropriate class based on discriminator value
func CreateWorkbookTableRowFromDiscriminatorValue(parseNode i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, error) {
return NewWorkbookTableRow(), nil
}
// GetFieldDeserializers the deserialization information for the current model
func (m *WorkbookTableRow) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {
res := m.Entity.GetFieldDeserializers()
res["index"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetInt32Value()
if err != nil {
return err
}
if val != nil {
m.SetIndex(val)
}
return nil
}
res["values"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetObjectValue(CreateJsonFromDiscriminatorValue)
if err != nil {
return err
}
if val != nil {
m.SetValues(val.(Jsonable))
}
return nil
}
return res
}
// GetIndex gets the index property value. Returns the index number of the row within the rows collection of the table. Zero-indexed. Read-only.
func (m *WorkbookTableRow) GetIndex()(*int32) {
if m == nil {
return nil
} else {
return m.index
}
}
// GetValues gets the values property value. Represents the raw values of the specified range. The data returned could be of type string, number, or a boolean. Cell that contain an error will return the error string.
func (m *WorkbookTableRow) GetValues()(Jsonable) {
if m == nil {
return nil
} else {
return m.values
}
}
// Serialize serializes information the current object
func (m *WorkbookTableRow) Serialize(writer i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.SerializationWriter)(error) {
err := m.Entity.Serialize(writer)
if err != nil {
return err
}
{
err = writer.WriteInt32Value("index", m.GetIndex())
if err != nil {
return err
}
}
{
err = writer.WriteObjectValue("values", m.GetValues())
if err != nil {
return err
}
}
return nil
}
// SetIndex sets the index property value. Returns the index number of the row within the rows collection of the table. Zero-indexed. Read-only.
func (m *WorkbookTableRow) SetIndex(value *int32)() {
if m != nil {
m.index = value
}
}
// SetValues sets the values property value. Represents the raw values of the specified range. The data returned could be of type string, number, or a boolean. Cell that contain an error will return the error string.
func (m *WorkbookTableRow) SetValues(value Jsonable)() {
if m != nil {
m.values = value
}
} | models/workbook_table_row.go | 0.743168 | 0.484136 | workbook_table_row.go | starcoder |
// D55 illuminant conversion functions
package white
// D55_A functions
func D55_A_Bradford(xs, ys, zs float64) (xd, yd, zd float64) {
m := [3][3]float64{
{1.1802853, 0.0968577, -0.1385564},
{0.1334256, 0.9182995, -0.0498798},
{-0.0216899, 0.0327363, 0.3731642}}
xd = m[0][0]*xs + m[0][1]*ys + m[0][2]*zs
yd = m[1][0]*xs + m[1][1]*ys + m[1][2]*zs
zd = m[2][0]*xs + m[2][1]*ys + m[2][2]*zs
return
}
func D55_A_vonKries(xs, ys, zs float64) (xd, yd, zd float64) {
m := [3][3]float64{
{1.0600958, 0.2095703, -0.1360743},
{0.0230198, 0.9822541, -0.0046445},
{0.0000000, 0.0000000, 0.3861681}}
xd = m[0][0]*xs + m[0][1]*ys + m[0][2]*zs
yd = m[1][0]*xs + m[1][1]*ys + m[1][2]*zs
zd = m[2][0]*xs + m[2][1]*ys + m[2][2]*zs
return
}
func D55_A_Xyz(xs, ys, zs float64) (xd, yd, zd float64) {
m := [3][3]float64{
{1.1480738, 0.0000000, 0.0000000},
{0.0000000, 1.0000000, 0.0000000},
{0.0000000, 0.0000000, 0.3861681}}
xd = m[0][0]*xs + m[0][1]*ys + m[0][2]*zs
yd = m[1][0]*xs + m[1][1]*ys + m[1][2]*zs
zd = m[2][0]*xs + m[2][1]*ys + m[2][2]*zs
return
}
// D55_B functions
func D55_B_Bradford(xs, ys, zs float64) (xd, yd, zd float64) {
m := [3][3]float64{
{1.0341856, 0.0188903, -0.0192078},
{0.0269143, 0.9810324, -0.0073626},
{-0.0023832, 0.0030997, 0.9239498}}
xd = m[0][0]*xs + m[0][1]*ys + m[0][2]*zs
yd = m[1][0]*xs + m[1][1]*ys + m[1][2]*zs
zd = m[2][0]*xs + m[2][1]*ys + m[2][2]*zs
return
}
func D55_B_vonKries(xs, ys, zs float64) (xd, yd, zd float64) {
m := [3][3]float64{
{1.0112458, 0.0392165, -0.0174465},
{0.0043077, 0.9966794, -0.0008694},
{0.0000000, 0.0000000, 0.9248391}}
xd = m[0][0]*xs + m[0][1]*ys + m[0][2]*zs
yd = m[1][0]*xs + m[1][1]*ys + m[1][2]*zs
zd = m[2][0]*xs + m[2][1]*ys + m[2][2]*zs
return
}
func D55_B_Xyz(xs, ys, zs float64) (xd, yd, zd float64) {
m := [3][3]float64{
{1.0354299, 0.0000000, 0.0000000},
{0.0000000, 1.0000000, 0.0000000},
{0.0000000, 0.0000000, 0.9248391}}
xd = m[0][0]*xs + m[0][1]*ys + m[0][2]*zs
yd = m[1][0]*xs + m[1][1]*ys + m[1][2]*zs
zd = m[2][0]*xs + m[2][1]*ys + m[2][2]*zs
return
}
// D55_C functions
func D55_C_Bradford(xs, ys, zs float64) (xd, yd, zd float64) {
m := [3][3]float64{
{0.9821687, -0.0067531, 0.0518013},
{-0.0044921, 0.9893393, 0.0162333},
{0.0114719, -0.0199953, 1.2928395}}
xd = m[0][0]*xs + m[0][1]*ys + m[0][2]*zs
yd = m[1][0]*xs + m[1][1]*ys + m[1][2]*zs
zd = m[2][0]*xs + m[2][1]*ys + m[2][2]*zs
return
}
func D55_C_vonKries(xs, ys, zs float64) (xd, yd, zd float64) {
m := [3][3]float64{
{0.9932501, -0.0235426, 0.0585151},
{-0.0025860, 1.0019947, 0.0005205},
{0.0000000, 0.0000000, 1.2830524}}
xd = m[0][0]*xs + m[0][1]*ys + m[0][2]*zs
yd = m[1][0]*xs + m[1][1]*ys + m[1][2]*zs
zd = m[2][0]*xs + m[2][1]*ys + m[2][2]*zs
return
}
func D55_C_Xyz(xs, ys, zs float64) (xd, yd, zd float64) {
m := [3][3]float64{
{1.0249995, 0.0000000, 0.0000000},
{0.0000000, 1.0000000, 0.0000000},
{0.0000000, 0.0000000, 1.2830524}}
xd = m[0][0]*xs + m[0][1]*ys + m[0][2]*zs
yd = m[1][0]*xs + m[1][1]*ys + m[1][2]*zs
zd = m[2][0]*xs + m[2][1]*ys + m[2][2]*zs
return
}
// D55_D50 functions
func D55_D50_Bradford(xs, ys, zs float64) (xd, yd, zd float64) {
m := [3][3]float64{
{1.0184567, 0.0093864, -0.0213199},
{0.0120291, 0.9951460, -0.0072228},
{-0.0039673, 0.0064899, 0.8925936}}
xd = m[0][0]*xs + m[0][1]*ys + m[0][2]*zs
yd = m[1][0]*xs + m[1][1]*ys + m[1][2]*zs
zd = m[2][0]*xs + m[2][1]*ys + m[2][2]*zs
return
}
func D55_D50_vonKries(xs, ys, zs float64) (xd, yd, zd float64) {
m := [3][3]float64{
{1.0063032, 0.0219819, -0.0223692},
{0.0024146, 0.9981384, -0.0004869},
{0.0000000, 0.0000000, 0.8955170}}
xd = m[0][0]*xs + m[0][1]*ys + m[0][2]*zs
yd = m[1][0]*xs + m[1][1]*ys + m[1][2]*zs
zd = m[2][0]*xs + m[2][1]*ys + m[2][2]*zs
return
}
func D55_D50_Xyz(xs, ys, zs float64) (xd, yd, zd float64) {
m := [3][3]float64{
{1.0077340, 0.0000000, 0.0000000},
{0.0000000, 1.0000000, 0.0000000},
{0.0000000, 0.0000000, 0.8955170}}
xd = m[0][0]*xs + m[0][1]*ys + m[0][2]*zs
yd = m[1][0]*xs + m[1][1]*ys + m[1][2]*zs
zd = m[2][0]*xs + m[2][1]*ys + m[2][2]*zs
return
}
// D55_D65 functions
func D55_D65_Bradford(xs, ys, zs float64) (xd, yd, zd float64) {
m := [3][3]float64{
{0.9726856, -0.0135482, 0.0361731},
{-0.0167463, 1.0049102, 0.0120598},
{0.0070026, -0.0116372, 1.1869548}}
xd = m[0][0]*xs + m[0][1]*ys + m[0][2]*zs
yd = m[1][0]*xs + m[1][1]*ys + m[1][2]*zs
zd = m[2][0]*xs + m[2][1]*ys + m[2][2]*zs
return
}
func D55_D65_vonKries(xs, ys, zs float64) (xd, yd, zd float64) {
m := [3][3]float64{
{0.9905739, -0.0328729, 0.0385701},
{-0.0036109, 1.0027841, 0.0007280},
{0.0000000, 0.0000000, 1.1815972}}
xd = m[0][0]*xs + m[0][1]*ys + m[0][2]*zs
yd = m[1][0]*xs + m[1][1]*ys + m[1][2]*zs
zd = m[2][0]*xs + m[2][1]*ys + m[2][2]*zs
return
}
func D55_D65_Xyz(xs, ys, zs float64) (xd, yd, zd float64) {
m := [3][3]float64{
{0.9933634, 0.0000000, 0.0000000},
{0.0000000, 1.0000000, 0.0000000},
{0.0000000, 0.0000000, 1.1815972}}
xd = m[0][0]*xs + m[0][1]*ys + m[0][2]*zs
yd = m[1][0]*xs + m[1][1]*ys + m[1][2]*zs
zd = m[2][0]*xs + m[2][1]*ys + m[2][2]*zs
return
}
// D55_D75 functions
func D55_D75_Bradford(xs, ys, zs float64) (xd, yd, zd float64) {
m := [3][3]float64{
{0.9535040, -0.0227860, 0.0653011},
{-0.0276552, 1.0065257, 0.0216339},
{0.0128323, -0.0214482, 1.3408176}}
xd = m[0][0]*xs + m[0][1]*ys + m[0][2]*zs
yd = m[1][0]*xs + m[1][1]*ys + m[1][2]*zs
zd = m[2][0]*xs + m[2][1]*ys + m[2][2]*zs
return
}
func D55_D75_vonKries(xs, ys, zs float64) (xd, yd, zd float64) {
m := [3][3]float64{
{0.9838756, -0.0562333, 0.0700620},
{-0.0061768, 1.0047626, 0.0012453},
{0.0000000, 0.0000000, 1.3308663}}
xd = m[0][0]*xs + m[0][1]*ys + m[0][2]*zs
yd = m[1][0]*xs + m[1][1]*ys + m[1][2]*zs
zd = m[2][0]*xs + m[2][1]*ys + m[2][2]*zs
return
}
func D55_D75_Xyz(xs, ys, zs float64) (xd, yd, zd float64) {
m := [3][3]float64{
{0.9925796, 0.0000000, 0.0000000},
{0.0000000, 1.0000000, 0.0000000},
{0.0000000, 0.0000000, 1.3308663}}
xd = m[0][0]*xs + m[0][1]*ys + m[0][2]*zs
yd = m[1][0]*xs + m[1][1]*ys + m[1][2]*zs
zd = m[2][0]*xs + m[2][1]*ys + m[2][2]*zs
return
}
// D55_E functions
func D55_E_Bradford(xs, ys, zs float64) (xd, yd, zd float64) {
m := [3][3]float64{
{1.0209581, 0.0132500, 0.0107183},
{0.0216398, 0.9773060, 0.0021580},
{0.0040457, -0.0079939, 1.0896730}}
xd = m[0][0]*xs + m[0][1]*ys + m[0][2]*zs
yd = m[1][0]*xs + m[1][1]*ys + m[1][2]*zs
zd = m[2][0]*xs + m[2][1]*ys + m[2][2]*zs
return
}
func D55_E_vonKries(xs, ys, zs float64) (xd, yd, zd float64) {
m := [3][3]float64{
{1.0064186, 0.0223804, 0.0159070},
{0.0024583, 0.9981057, -0.0004969},
{0.0000000, 0.0000000, 1.0851990}}
xd = m[0][0]*xs + m[0][1]*ys + m[0][2]*zs
yd = m[1][0]*xs + m[1][1]*ys + m[1][2]*zs
zd = m[2][0]*xs + m[2][1]*ys + m[2][2]*zs
return
}
func D55_E_Xyz(xs, ys, zs float64) (xd, yd, zd float64) {
m := [3][3]float64{
{1.0451287, 0.0000000, 0.0000000},
{0.0000000, 1.0000000, 0.0000000},
{0.0000000, 0.0000000, 1.0851990}}
xd = m[0][0]*xs + m[0][1]*ys + m[0][2]*zs
yd = m[1][0]*xs + m[1][1]*ys + m[1][2]*zs
zd = m[2][0]*xs + m[2][1]*ys + m[2][2]*zs
return
}
// D55_F2 functions
func D55_F2_Bradford(xs, ys, zs float64) (xd, yd, zd float64) {
m := [3][3]float64{
{1.0591726, 0.0309362, -0.0569879},
{0.0411624, 0.9788474, -0.0197857},
{-0.0099371, 0.0158080, 0.7245114}}
xd = m[0][0]*xs + m[0][1]*ys + m[0][2]*zs
yd = m[1][0]*xs + m[1][1]*ys + m[1][2]*zs
zd = m[2][0]*xs + m[2][1]*ys + m[2][2]*zs
return
}
func D55_F2_vonKries(xs, ys, zs float64) (xd, yd, zd float64) {
m := [3][3]float64{
{1.0199680, 0.0696350, -0.0582760},
{0.0076489, 0.9941031, -0.0015429},
{0.0000000, 0.0000000, 0.7313481}}
xd = m[0][0]*xs + m[0][1]*ys + m[0][2]*zs
yd = m[1][0]*xs + m[1][1]*ys + m[1][2]*zs
zd = m[2][0]*xs + m[2][1]*ys + m[2][2]*zs
return
}
func D55_F2_Xyz(xs, ys, zs float64) (xd, yd, zd float64) {
m := [3][3]float64{
{1.0366213, 0.0000000, 0.0000000},
{0.0000000, 1.0000000, 0.0000000},
{0.0000000, 0.0000000, 0.7313481}}
xd = m[0][0]*xs + m[0][1]*ys + m[0][2]*zs
yd = m[1][0]*xs + m[1][1]*ys + m[1][2]*zs
zd = m[2][0]*xs + m[2][1]*ys + m[2][2]*zs
return
}
// D55_F7 functions
func D55_F7_Bradford(xs, ys, zs float64) (xd, yd, zd float64) {
m := [3][3]float64{
{0.9728260, -0.0134855, 0.0358942},
{-0.0166815, 1.0049307, 0.0119703},
{0.0069439, -0.0115365, 1.1854306}}
xd = m[0][0]*xs + m[0][1]*ys + m[0][2]*zs
yd = m[1][0]*xs + m[1][1]*ys + m[1][2]*zs
zd = m[2][0]*xs + m[2][1]*ys + m[2][2]*zs
return
}
func D55_F7_vonKries(xs, ys, zs float64) (xd, yd, zd float64) {
m := [3][3]float64{
{0.9906243, -0.0326972, 0.0382619},
{-0.0035916, 1.0027692, 0.0007241},
{0.0000000, 0.0000000, 1.1801213}}
xd = m[0][0]*xs + m[0][1]*ys + m[0][2]*zs
yd = m[1][0]*xs + m[1][1]*ys + m[1][2]*zs
zd = m[2][0]*xs + m[2][1]*ys + m[2][2]*zs
return
}
func D55_F7_Xyz(xs, ys, zs float64) (xd, yd, zd float64) {
m := [3][3]float64{
{0.9933007, 0.0000000, 0.0000000},
{0.0000000, 1.0000000, 0.0000000},
{0.0000000, 0.0000000, 1.1801213}}
xd = m[0][0]*xs + m[0][1]*ys + m[0][2]*zs
yd = m[1][0]*xs + m[1][1]*ys + m[1][2]*zs
zd = m[2][0]*xs + m[2][1]*ys + m[2][2]*zs
return
}
// D55_F11 functions
func D55_F11_Bradford(xs, ys, zs float64) (xd, yd, zd float64) {
m := [3][3]float64{
{1.0762891, 0.0404531, -0.0658152},
{0.0548187, 0.9689618, -0.0232379},
{-0.0109370, 0.0170126, 0.6912198}}
xd = m[0][0]*xs + m[0][1]*ys + m[0][2]*zs
yd = m[1][0]*xs + m[1][1]*ys + m[1][2]*zs
zd = m[2][0]*xs + m[2][1]*ys + m[2][2]*zs
return
}
func D55_F11_vonKries(xs, ys, zs float64) (xd, yd, zd float64) {
m := [3][3]float64{
{1.0255820, 0.0892121, -0.0660772},
{0.0097993, 0.9924455, -0.0019769},
{0.0000000, 0.0000000, 0.6983255}}
xd = m[0][0]*xs + m[0][1]*ys + m[0][2]*zs
yd = m[1][0]*xs + m[1][1]*ys + m[1][2]*zs
zd = m[2][0]*xs + m[2][1]*ys + m[2][2]*zs
return
}
func D55_F11_Xyz(xs, ys, zs float64) (xd, yd, zd float64) {
m := [3][3]float64{
{1.0551828, 0.0000000, 0.0000000},
{0.0000000, 1.0000000, 0.0000000},
{0.0000000, 0.0000000, 0.6983255}}
xd = m[0][0]*xs + m[0][1]*ys + m[0][2]*zs
yd = m[1][0]*xs + m[1][1]*ys + m[1][2]*zs
zd = m[2][0]*xs + m[2][1]*ys + m[2][2]*zs
return
} | f64/white/d55.go | 0.52074 | 0.583945 | d55.go | starcoder |
package main
/*****************************************************************************************************
*
* Given two sorted arrays nums1 and nums2 of size m and n respectively, return the median of the two
* sorted arrays.
*
* Follow up: The overall run time complexity should be O(log (m+n)).
*
* Example 1:
*
* Input: nums1 = [1,3], nums2 = [2]
* Output: 2.00000
* Explanation: merged array = [1,2,3] and median is 2.
*
* Example 2:
*
* Input: nums1 = [1,2], nums2 = [3,4]
* Output: 2.50000
* Explanation: merged array = [1,2,3,4] and median is (2 + 3) / 2 = 2.5.
*
* Example 3:
*
* Input: nums1 = [0,0], nums2 = [0,0]
* Output: 0.00000
*
* Example 4:
*
* Input: nums1 = [], nums2 = [1]
* Output: 1.00000
*
* Example 5:
*
* Input: nums1 = [2], nums2 = []
* Output: 2.00000
*
* Constraints:
*
* nums1.length == m
* nums2.length == n
* 0 <= m <= 1000
* 0 <= n <= 1000
* 1 <= m + n <= 2000
* -106 <= nums1[i], nums2[i] <= 106
******************************************************************************************************/
// 思路:转化成求两个有序数组的第k小
// todo 此题需要recode
func findMedianSortedArrays(nums1 []int, nums2 []int) float64 {
n := len(nums1)
m := len(nums2)
left := (n + m + 1) / 2
right := (n + m + 2) / 2
//将偶数和奇数的情况合并,如果是奇数,会求两次同样的 k 。
return float64(getKth(nums1, 0, n-1, nums2, 0, m-1, left) + getKth(nums1, 0, n-1, nums2, 0, m-1, right)) * 0.5
}
func getKth(nums1 []int, start1, end1 int, nums2 []int, start2, end2, k int) int {
len1 := end1 - start1 + 1
len2 := end2 - start2 + 1
//让 len1 的长度小于 len2,这样就能保证如果有数组空了,一定是 len1
if len1 > len2 {
return getKth(nums2, start2, end2, nums1, start1, end1, k)
}
if len1 == 0 {
return nums2[start2+k-1]
}
if k == 1 {
return min(nums1[start1], nums2[start2])
}
i := start1 + min(len1, k/2) - 1
j := start2 + min(len2, k/2) - 1
if nums1[i] > nums2[j] {
return getKth(nums1, start1, end1, nums2, j+1, end2, k-(j-start2+1))
} else {
return getKth(nums1, i+1, end1, nums2, start2, end2, k-(i-start1+1))
}
}
func min(x, y int) int {
if x < y {
return x
}
return y
} | basic/Algorithm/array/4.median_of_two_sorted_arrays/4.MedianofTwoSortedArrays_zmillionaire.go | 0.536313 | 0.686954 | 4.MedianofTwoSortedArrays_zmillionaire.go | starcoder |
package state
import (
"bytes"
"errors"
"fmt"
dbm "github.com/tendermint/tm-db"
"github.com/tendermint/tendermint/crypto"
"github.com/tendermint/tendermint/types"
)
//-----------------------------------------------------
// Validate block
func validateBlock(evidencePool EvidencePool, stateDB dbm.DB, blockStore BlockStore, state State, block *types.Block) error {
// Validate internal consistency.
if err := block.ValidateBasic(); err != nil {
return err
}
// Validate basic info.
if block.Version != state.Version.Consensus {
return fmt.Errorf("wrong Block.Header.Version. Expected %v, got %v",
state.Version.Consensus,
block.Version,
)
}
if block.ChainID != state.ChainID {
return fmt.Errorf("wrong Block.Header.ChainID. Expected %v, got %v",
state.ChainID,
block.ChainID,
)
}
if block.Height != state.LastBlockHeight+1 {
return fmt.Errorf("wrong Block.Header.Height. Expected %v, got %v",
state.LastBlockHeight+1,
block.Height,
)
}
// Validate prev block info.
if !block.LastBlockID.Equals(state.LastBlockID) {
return fmt.Errorf("wrong Block.Header.LastBlockID. Expected %v, got %v",
state.LastBlockID,
block.LastBlockID,
)
}
// Validate app info
if !bytes.Equal(block.AppHash, state.AppHash) {
return fmt.Errorf("wrong Block.Header.AppHash. Expected %X, got %v",
state.AppHash,
block.AppHash,
)
}
if !bytes.Equal(block.ConsensusHash, state.ConsensusParams.Hash()) {
return fmt.Errorf("wrong Block.Header.ConsensusHash. Expected %X, got %v",
state.ConsensusParams.Hash(),
block.ConsensusHash,
)
}
if !bytes.Equal(block.LastResultsHash, state.LastResultsHash) {
return fmt.Errorf("wrong Block.Header.LastResultsHash. Expected %X, got %v",
state.LastResultsHash,
block.LastResultsHash,
)
}
if !bytes.Equal(block.ValidatorsHash, state.Validators.Hash()) {
return fmt.Errorf("wrong Block.Header.ValidatorsHash. Expected %X, got %v",
state.Validators.Hash(),
block.ValidatorsHash,
)
}
if !bytes.Equal(block.NextValidatorsHash, state.NextValidators.Hash()) {
return fmt.Errorf("wrong Block.Header.NextValidatorsHash. Expected %X, got %v",
state.NextValidators.Hash(),
block.NextValidatorsHash,
)
}
// Validate block LastCommit.
if block.Height == 1 {
if len(block.LastCommit.Signatures) != 0 {
return errors.New("block at height 1 can't have LastCommit signatures")
}
} else {
if len(block.LastCommit.Signatures) != state.LastValidators.Size() {
return types.NewErrInvalidCommitSignatures(state.LastValidators.Size(), len(block.LastCommit.Signatures))
}
err := state.LastValidators.VerifyCommit(
state.ChainID, state.LastBlockID, block.Height-1, block.LastCommit)
if err != nil {
return err
}
}
// Validate block Time
if block.Height > 1 {
if !block.Time.After(state.LastBlockTime) {
return fmt.Errorf("block time %v not greater than last block time %v",
block.Time,
state.LastBlockTime,
)
}
medianTime := MedianTime(block.LastCommit, state.LastValidators)
if !block.Time.Equal(medianTime) {
return fmt.Errorf("invalid block time. Expected %v, got %v",
medianTime,
block.Time,
)
}
} else if block.Height == 1 {
genesisTime := state.LastBlockTime
if !block.Time.Equal(genesisTime) {
return fmt.Errorf("block time %v is not equal to genesis time %v",
block.Time,
genesisTime,
)
}
}
// Limit the amount of evidence
maxNumEvidence, _ := types.MaxEvidencePerBlock(state.ConsensusParams.Block.MaxBytes)
numEvidence := int64(len(block.Evidence.Evidence))
if numEvidence > maxNumEvidence {
return types.NewErrEvidenceOverflow(maxNumEvidence, numEvidence)
}
// Validate all evidence.
for _, ev := range block.Evidence.Evidence {
if _, err := VerifyEvidence(stateDB, blockStore, state, ev); err != nil {
return types.NewErrEvidenceInvalid(ev, err)
}
if evidencePool != nil && evidencePool.IsCommitted(ev) {
return types.NewErrEvidenceInvalid(ev, errors.New("evidence was already committed"))
}
}
// NOTE: We can't actually verify it's the right proposer because we dont
// know what round the block was first proposed. So just check that it's
// a legit address and a known validator.
if len(block.ProposerAddress) != crypto.AddressSize ||
!state.Validators.HasAddress(block.ProposerAddress) {
return fmt.Errorf("block.Header.ProposerAddress, %X, is not a validator",
block.ProposerAddress,
)
}
return nil
}
// VerifyEvidence verifies the evidence fully by checking:
// - it is sufficiently recent (MaxAge)
// - it is from a key who was a validator at the given height
// - it is internally consistent
// - it was properly signed by the alleged equivocator
// - returns voting power of validator accused of misbehaviour
func VerifyEvidence(stateDB dbm.DB, blockStore BlockStore, state State, evidence types.Evidence) (int64, error) {
// General validation of evidence age
var (
height = state.LastBlockHeight
evidenceParams = state.ConsensusParams.Evidence
ageDuration = state.LastBlockTime.Sub(evidence.Time())
ageNumBlocks = height - evidence.Height()
)
if ageDuration > evidenceParams.MaxAgeDuration && ageNumBlocks > evidenceParams.MaxAgeNumBlocks {
return 0, fmt.Errorf(
"evidence from height %d (created at: %v) is too old; min height is %d and evidence can not be older than %v",
evidence.Height(),
evidence.Time(),
height-evidenceParams.MaxAgeNumBlocks,
state.LastBlockTime.Add(evidenceParams.MaxAgeDuration),
)
}
// Validation that is evidence type dependent
switch evType := evidence.(type) {
case *types.DuplicateVoteEvidence:
return verifyDuplicateVoteEvidence(stateDB, state.ChainID, evType)
case *types.BeaconInactivityEvidence:
return verifyBeaconInactivityEvidence(stateDB, blockStore, state.ChainID, evType)
case *types.DKGEvidence:
return verifyDKGEvidence(stateDB, blockStore, state.ChainID, evType)
case types.MockEvidence:
return verifyMockEvidence(stateDB, evidence)
case types.MockRandomEvidence:
return verifyMockEvidence(stateDB, evidence)
default:
return 0, fmt.Errorf("VerifyEvidence: evidence is not recognized: %T", evType)
}
}
func verifyDuplicateVoteEvidence(stateDB dbm.DB, chainID string, evidence *types.DuplicateVoteEvidence) (int64, error) {
valset, err := LoadValidators(stateDB, evidence.ValidatorHeight())
if err != nil {
// TODO: if err is just that we cant find it cuz we pruned, ignore.
// TODO: if its actually bad evidence, punish peer
return 0, err
}
// The address must have been an active validator at the height.
// NOTE: we will ignore evidence from H if the key was not a validator
// at H, even if it is a validator at some nearby H'
// XXX: this makes lite-client bisection as is unsafe
// See https://github.com/tendermint/tendermint/issues/3244
ev := evidence
height, addr := ev.ValidatorHeight(), ev.Address()
_, val := valset.GetByAddress(addr)
if val == nil {
return 0, fmt.Errorf("address %X was not a validator at height %d", addr, height)
}
if err := evidence.Verify(chainID, val.PubKey); err != nil {
return 0, err
}
return val.VotingPower, nil
}
func verifyBeaconInactivityEvidence(stateDB dbm.DB, blockStore BlockStore, chainID string, evidence *types.BeaconInactivityEvidence) (int64, error) {
blockMeta := blockStore.LoadBlockMeta(evidence.AeonStart)
if blockMeta == nil {
return 0, fmt.Errorf("could not retrieve block header for height %v", evidence.AeonStart)
}
valset, err := LoadDKGValidators(stateDB, evidence.ValidatorHeight())
if err != nil {
return 0, err
}
params, err := LoadConsensusParams(stateDB, evidence.ValidatorHeight())
if err != nil {
return 0, err
}
if err := evidence.Verify(chainID, blockMeta.Header.Entropy, valset, params.Entropy); err != nil {
return 0, err
}
_, val := valset.GetByAddress(evidence.Address())
return val.VotingPower, nil
}
func verifyDKGEvidence(stateDB dbm.DB, blockStore BlockStore, chainID string, evidence *types.DKGEvidence) (int64, error) {
blockMeta := blockStore.LoadBlockMeta(evidence.ValidatorHeight())
if blockMeta == nil {
return 0, fmt.Errorf("could not retrieve block header for height %v", evidence.ValidatorHeight())
}
valset, err := LoadDKGValidators(stateDB, evidence.ValidatorHeight())
if err != nil {
return 0, err
}
params, err := LoadConsensusParams(stateDB, evidence.ValidatorHeight())
if err != nil {
return 0, err
}
if err := evidence.Verify(chainID, blockMeta.Header.Entropy, valset, params.Entropy); err != nil {
return 0, err
}
_, val := valset.GetByAddress(evidence.Address())
return val.VotingPower, nil
}
func verifyMockEvidence(stateDB dbm.DB, evidence types.Evidence) (int64, error) {
valset, err := LoadValidators(stateDB, evidence.ValidatorHeight())
if err != nil {
return 0, err
}
ev := evidence
height, addr := ev.ValidatorHeight(), ev.Address()
_, val := valset.GetByAddress(addr)
if val == nil {
return 0, fmt.Errorf("address %X was not a validator at height %d", addr, height)
}
return val.VotingPower, nil
} | state/validation.go | 0.596903 | 0.430207 | validation.go | starcoder |
package common
import (
"math"
"sort"
"github.com/m3db/m3/src/query/graphite/ts"
)
// Range distills down a set of inputs into the range of the series.
func Range(ctx *Context, series ts.SeriesList, renamer SeriesListRenamer) (*ts.Series, error) {
numSeries := series.Len()
if numSeries == 0 {
return nil, ErrEmptySeriesList
}
normalized, start, end, millisPerStep, err := Normalize(ctx, series)
if err != nil {
return nil, err
}
numSteps := ts.NumSteps(start, end, millisPerStep)
vals := ts.NewValues(ctx, millisPerStep, numSteps)
nan := math.NaN()
for i := 0; i < numSteps; i++ {
minVal, maxVal := nan, nan
for j := 0; j < numSeries; j++ {
v := normalized.Values[j].ValueAt(i)
if math.IsNaN(v) {
continue
}
if math.IsNaN(minVal) || minVal > v {
minVal = v
}
if math.IsNaN(maxVal) || maxVal < v {
maxVal = v
}
}
if !math.IsNaN(minVal) && !math.IsNaN(maxVal) {
vals.SetValueAt(i, maxVal-minVal)
}
}
name := renamer(normalized)
return ts.NewSeries(ctx, name, start, vals), nil
}
// SafeAggregationFn is a safe aggregation function.
type SafeAggregationFn func(input []float64) (float64, int, bool)
// SafeAggregationFns is the collection of safe aggregation functions.
var SafeAggregationFns = map[string]SafeAggregationFn{
"sum": SafeSum,
"avg": SafeAverage,
"average": SafeAverage,
"max": SafeMax,
"min": SafeMin,
"median": SafeMedian,
"diff": SafeDiff,
"stddev": SafeStddev,
"range": SafeRange,
"multiply": SafeMul,
"last": SafeLast,
"count": SafeCount,
}
// SafeSort sorts the input slice and returns the number of NaNs in the input.
func SafeSort(input []float64) int {
nans := 0
for i := 0; i < len(input); i++ {
if math.IsNaN(input[i]) {
nans++
}
}
sort.Float64s(input)
return nans
}
// SafeSum returns the sum of the input slice and the number of NaNs in the input.
func SafeSum(input []float64) (float64, int, bool) {
nans := 0
sum := 0.0
for _, v := range input {
if !math.IsNaN(v) {
sum += v
} else {
nans++
}
}
if len(input) == nans {
return 0, 0, false // Either no elements or all nans.
}
return sum, nans, true
}
// SafeAverage returns the average of the input slice and the number of NaNs in the input.
func SafeAverage(input []float64) (float64, int, bool) {
sum, nans, ok := SafeSum(input)
if !ok {
return 0, 0, false
}
if len(input) == nans {
return 0, 0, false // Either no elements or all nans.
}
count := len(input) - nans
return sum / float64(count), nans, true
}
// SafeMax returns the maximum value of the input slice and the number of NaNs in the input.
func SafeMax(input []float64) (float64, int, bool) {
nans := 0
max := -math.MaxFloat64
for _, v := range input {
if math.IsNaN(v) {
nans++
continue
}
if v > max {
max = v
}
}
if len(input) == nans {
return 0, 0, false // Either no elements or all nans.
}
return max, nans, true
}
// SafeMin returns the minimum value of the input slice and the number of NaNs in the input.
func SafeMin(input []float64) (float64, int, bool) {
nans := 0
min := math.MaxFloat64
for _, v := range input {
if math.IsNaN(v) {
nans++
continue
}
if v < min {
min = v
}
}
if len(input) == nans {
return 0, 0, false // Either no elements or all nans.
}
return min, nans, true
}
// SafeMedian returns the median value of the input slice and the number of NaNs in the input.
func SafeMedian(input []float64) (float64, int, bool) {
safeValues, nans, ok := safeValues(input)
if !ok {
return 0, 0, false
}
return ts.Median(safeValues, len(safeValues)), nans, true
}
// SafeDiff returns the subtracted value of all the subsequent numbers from the 1st one and
// the number of NaNs in the input.
func SafeDiff(input []float64) (float64, int, bool) {
safeValues, nans, ok := safeValues(input)
if !ok {
return 0, 0, false
}
diff := safeValues[0]
for i := 1; i < len(safeValues); i++ {
diff -= safeValues[i]
}
return diff, nans, true
}
// SafeStddev returns the standard deviation value of the input slice and the number of NaNs in the input.
func SafeStddev(input []float64) (float64, int, bool) {
safeAvg, nans, ok := SafeAverage(input)
if !ok {
return 0, 0, false
}
safeValues, _, ok := safeValues(input)
if !ok {
return 0, 0, false
}
sum := 0.0
for _, v := range safeValues {
sum += (v - safeAvg) * (v - safeAvg)
}
return math.Sqrt(sum / float64(len(safeValues))), nans, true
}
// SafeRange returns the range value of the input slice and the number of NaNs in the input.
func SafeRange(input []float64) (float64, int, bool) {
safeMax, nans, ok := SafeMax(input)
if !ok {
return 0, 0, false
}
safeMin, _, ok := SafeMin(input)
if !ok {
return 0, 0, false
}
return safeMax - safeMin, nans, true
}
// SafeMul returns the product value of the input slice and the number of NaNs in the input.
func SafeMul(input []float64) (float64, int, bool) {
safeValues, nans, ok := safeValues(input)
if !ok {
return 0, 0, false
}
product := 1.0
for _, v := range safeValues {
product *= v
}
return product, nans, true
}
// SafeLast returns the last value of the input slice and the number of NaNs in the input.
func SafeLast(input []float64) (float64, int, bool) {
safeValues, nans, ok := safeValues(input)
if !ok {
return 0, 0, false
}
return safeValues[len(safeValues)-1], nans, true
}
// SafeCount returns the number of valid values in the input slice and the number of NaNs in the input.
func SafeCount(input []float64) (float64, int, bool) {
safeValues, nans, ok := safeValues(input)
if !ok {
return 0, 0, false
}
return float64(len(safeValues)), nans, true
}
func safeValues(input []float64) ([]float64, int, bool) {
nans := 0
safeValues := make([]float64, 0, len(input))
for _, v := range input {
if !math.IsNaN(v) {
safeValues = append(safeValues, v)
} else {
nans++
}
}
if len(input) == nans {
return nil, 0, false // Either no elements or all nans.
}
return safeValues, nans, true
} | src/query/graphite/common/aggregation.go | 0.773345 | 0.444685 | aggregation.go | starcoder |
package clusterhealth
import (
"log"
"github.com/monitoring-tools/prom-elasticsearch-exporter/elasticsearch"
"github.com/monitoring-tools/prom-elasticsearch-exporter/elasticsearch/model"
"github.com/monitoring-tools/prom-elasticsearch-exporter/metrics"
"github.com/prometheus/client_golang/prometheus"
)
// Cluster statuses
var clusterStatuses = map[string]float64{
"green": 1,
"yellow": 2,
"red": 3,
}
var (
labelsClusterHealth = []string{"cluster"}
labelsIndexHealth = []string{"cluster", "index"}
subsystemClusterHealth = "cluster_health"
subsystemIndexHealth = "cluster_health_index"
)
type clusterHealthMetric struct {
*metrics.Metric
Value func(clusterHealth *model.ClusterHealth) float64
}
type indexHealthMetric struct {
*metrics.Metric
Value func(indexHealth model.ClusterHealthIndex) float64
}
type clusterHealthStatusMetric struct {
*metrics.Metric
Value func(clusterHealth *model.ClusterHealth) float64
Labels func(clusterName, color string) []string
}
// Collector is an cluster health metrics collector
type Collector struct {
esClient elasticsearch.Client
metrics []*clusterHealthMetric
statusMetric *clusterHealthStatusMetric
indicesMetrics []*indexHealthMetric
}
func newClusterHealthMetric(name, help string, valueExtractor func(*model.ClusterHealth) float64) *clusterHealthMetric {
return &clusterHealthMetric{
Metric: metrics.New(prometheus.GaugeValue, subsystemClusterHealth, name, help, labelsClusterHealth),
Value: valueExtractor,
}
}
func newIndexHealthMetric(name, help string, valueExtractor func(model.ClusterHealthIndex) float64) *indexHealthMetric {
return &indexHealthMetric{
Metric: metrics.New(prometheus.GaugeValue, subsystemIndexHealth, name, help, labelsIndexHealth),
Value: valueExtractor,
}
}
// NewCollector returns new cluster health collector
func NewCollector(esClient elasticsearch.Client) *Collector {
return &Collector{
esClient: esClient,
metrics: []*clusterHealthMetric{
newClusterHealthMetric(
"active_primary_shards", "The number of primary shards in your cluster. This is an aggregate total across all indices.",
func(clusterHealth *model.ClusterHealth) float64 { return float64(clusterHealth.ActivePrimaryShards) },
),
newClusterHealthMetric(
"active_shards", "Aggregate total of all shards across all indices, which includes replica shards.",
func(clusterHealth *model.ClusterHealth) float64 { return float64(clusterHealth.ActiveShards) },
),
newClusterHealthMetric(
"delayed_unassigned_shards", "Shards delayed to reduce reallocation overhead",
func(clusterHealth *model.ClusterHealth) float64 {
return float64(clusterHealth.DelayedUnassignedShards)
},
),
newClusterHealthMetric(
"initializing_shards", "Count of shards that are being freshly created.",
func(clusterHealth *model.ClusterHealth) float64 { return float64(clusterHealth.InitializingShards) },
),
newClusterHealthMetric(
"number_of_data_nodes", "Number of data nodes in the cluster.",
func(clusterHealth *model.ClusterHealth) float64 { return float64(clusterHealth.NumberOfDataNodes) },
),
newClusterHealthMetric(
"number_of_in_flight_fetch", "The number of ongoing shard info requests.",
func(clusterHealth *model.ClusterHealth) float64 { return float64(clusterHealth.NumberOfInFlightFetch) },
),
newClusterHealthMetric(
"number_of_nodes", "Number of nodes in the cluster.",
func(clusterHealth *model.ClusterHealth) float64 { return float64(clusterHealth.NumberOfNodes) },
),
newClusterHealthMetric(
"number_of_pending_tasks", "Cluster level changes which have not yet been executed",
func(clusterHealth *model.ClusterHealth) float64 { return float64(clusterHealth.NumberOfPendingTasks) },
),
newClusterHealthMetric(
"relocating_shards", "The number of shards that are currently moving from one node to another node.",
func(clusterHealth *model.ClusterHealth) float64 { return float64(clusterHealth.RelocatingShards) },
),
newClusterHealthMetric(
"timed_out", "Number of cluster health checks timed out",
func(clusterHealth *model.ClusterHealth) float64 {
if clusterHealth.TimedOut {
return 1
}
return 0
},
),
newClusterHealthMetric(
"unassigned_shards", "The number of shards that exist in the cluster state, but cannot be found in the cluster itself.",
func(clusterHealth *model.ClusterHealth) float64 { return float64(clusterHealth.UnassignedShards) },
),
},
statusMetric: &clusterHealthStatusMetric{
Metric: metrics.New(prometheus.GaugeValue, subsystemClusterHealth, "status", "Cluster status. 1 = green, 2 = yellow, 3 = red", labelsClusterHealth),
Value: func(clusterHealth *model.ClusterHealth) float64 {
return clusterStatuses[clusterHealth.Status]
},
},
indicesMetrics: []*indexHealthMetric{
newIndexHealthMetric(
"status", "Index status. 1 = green, 2 = yellow, 3 = red",
func(i model.ClusterHealthIndex) float64 { return clusterStatuses[i.Status] },
),
newIndexHealthMetric(
"number_of_shards", "The number of shards that used by index",
func(i model.ClusterHealthIndex) float64 { return float64(i.NumberOfShards) },
),
newIndexHealthMetric(
"number_of_replicas", "The number of replicas of index",
func(i model.ClusterHealthIndex) float64 { return float64(i.NumberOfReplicas) },
),
newIndexHealthMetric(
"active_primary_shards", "The number of active primary shards of index",
func(i model.ClusterHealthIndex) float64 { return float64(i.ActivePrimaryShards) },
),
newIndexHealthMetric(
"active_shards", "The number of active shards of index",
func(i model.ClusterHealthIndex) float64 { return float64(i.ActiveShards) },
),
newIndexHealthMetric(
"relocating_shards", "The number of relocating shards of index",
func(i model.ClusterHealthIndex) float64 { return float64(i.RelocatingShards) },
),
newIndexHealthMetric(
"initializing_shards", "The number of initializing shards of index",
func(i model.ClusterHealthIndex) float64 { return float64(i.InitializingShards) },
),
newIndexHealthMetric(
"unassigned_shards", "The number of unassigned shards of index",
func(i model.ClusterHealthIndex) float64 { return float64(i.UnassignedShards) },
),
},
}
}
// Describe implements prometheus.Collector interface
func (c *Collector) Describe(ch chan<- *prometheus.Desc) {
for _, metric := range c.metrics {
ch <- metric.Desc()
}
for _, metric := range c.indicesMetrics {
ch <- metric.Desc()
}
ch <- c.statusMetric.Desc()
}
// Collect writes data to metrics channel
func (c *Collector) Collect(clusterName string, ch chan<- prometheus.Metric) {
resp, err := c.esClient.ClusterHealth(elasticsearch.LevelIndices)
if err != nil {
log.Println("ERROR: failed to fetch cluster health: ", err)
return
}
for _, metric := range c.metrics {
ch <- prometheus.MustNewConstMetric(
metric.Desc(),
metric.Type(),
metric.Value(resp),
clusterName,
)
}
for name, index := range resp.Indices {
for _, metric := range c.indicesMetrics {
ch <- prometheus.MustNewConstMetric(
metric.Desc(),
metric.Type(),
metric.Value(index),
clusterName, name,
)
}
}
ch <- prometheus.MustNewConstMetric(
c.statusMetric.Desc(),
c.statusMetric.Type(),
c.statusMetric.Value(resp),
clusterName,
)
} | collector/clusterhealth/collector.go | 0.651466 | 0.40645 | collector.go | starcoder |
package medtronic
import (
"fmt"
"log"
)
// Carbs represents a carb value as either grams or 10x exchanges.
type Carbs int
// CarbUnitsType represents the pump's carb unit type (grams or exchanges).
type CarbUnitsType byte
//go:generate stringer -type CarbUnitsType
const (
// Grams represents the pump's use of grams for carb units.
Grams CarbUnitsType = 1
// Exchanges represents the pump's use of exchanges for carb units.
Exchanges CarbUnitsType = 2
)
// Glucose represents a glucose value as either mg/dL or μmol/L,
// so all conversions must include a GlucoseUnitsType parameter.
type Glucose int
// GlucoseUnitsType represents the pump's glucose unit type (mg/dL or mmol/L).
type GlucoseUnitsType byte
const (
// MgPerDeciLiter represents the pump's use of mg/dL for glucose units.
MgPerDeciLiter GlucoseUnitsType = 1
// MMolPerLiter represents the pump's use of mmol/L for glucose units.
MMolPerLiter GlucoseUnitsType = 2
)
func (u GlucoseUnitsType) String() string {
switch u {
case MgPerDeciLiter:
return "mg/dL"
case MMolPerLiter:
return "μmol/L"
default:
log.Panicf("unknown glucose unit %d", u)
}
panic("unreachable")
}
func (pump *Pump) whichUnits(cmd Command) byte {
data := pump.Execute(cmd)
if pump.Error() != nil {
return 0
}
if len(data) < 2 || data[0] != 1 {
pump.BadResponse(cmd, data)
return 0
}
return data[1]
}
func intToGlucose(n int, t GlucoseUnitsType) Glucose {
switch t {
case MgPerDeciLiter:
return Glucose(n)
case MMolPerLiter:
// Convert 10x mmol/L to μmol/L
return Glucose(n) * 100
default:
log.Panicf("unknown glucose unit %d", t)
}
panic("unreachable")
}
func byteToGlucose(n byte, t GlucoseUnitsType) Glucose {
return intToGlucose(int(n), t)
}
// CarbUnits returns the pump's carb units.
func (pump *Pump) CarbUnits() CarbUnitsType {
return CarbUnitsType(pump.whichUnits(carbUnits))
}
// GlucoseUnits returns the pump's glucose units.
func (pump *Pump) GlucoseUnits() GlucoseUnitsType {
return GlucoseUnitsType(pump.whichUnits(glucoseUnits))
}
// Insulin represents quantities and rates of insulin delivery, in milliunits.
type Insulin int
func (r Insulin) String() string {
return fmt.Sprintf("%.3f", float64(r)/1000)
}
func milliUnitsPerStroke(family Family) Insulin {
if family <= 22 {
return 100
}
return 25
}
func intToInsulin(strokes int, family Family) Insulin {
return Insulin(strokes) * milliUnitsPerStroke(family)
}
func byteToInsulin(strokes uint8, family Family) Insulin {
return intToInsulin(int(strokes), family)
}
func twoByteInsulin(data []byte, family Family) Insulin {
return intToInsulin(twoByteInt(data), family)
}
func twoByteInsulinLE(data []byte) Insulin {
return intToInsulin(twoByteIntLE(data), 23)
} | units.go | 0.721056 | 0.547706 | units.go | starcoder |
package mock
import (
"github.com/pkg/errors"
D "github.com/vikneshwara-r-b/chaosmonkey/deploy"
)
const cloudProvider = "aws"
// Dep returns a mock implementation of deploy.Deployment
// Dep has 4 apps: foo, bar, baz, quux
// Each app runs in 1 account:
// foo, bar, baz run in prod
// quux runs in test
// Each app has one cluster: foo-prod, bar-prod, baz-prod
// Each cluster runs in one region: us-east-1
// Each cluster contains 1 AZ with two instances
func Dep() D.Deployment {
prod := D.AccountName("prod")
test := D.AccountName("test")
usEast1 := D.RegionName("us-east-1")
return &Deployment{map[string]D.AppMap{
"foo": {prod: D.AccountInfo{CloudProvider: cloudProvider, Clusters: D.ClusterMap{"foo-prod": {usEast1: {"foo-prod-v001": []D.InstanceID{"i-d3e3d611", "i-63f52e25"}}}}}},
"bar": {prod: D.AccountInfo{CloudProvider: cloudProvider, Clusters: D.ClusterMap{"bar-prod": {usEast1: {"bar-prod-v011": []D.InstanceID{"i-d7f06d45", "i-ce433cf1"}}}}}},
"baz": {prod: D.AccountInfo{CloudProvider: cloudProvider, Clusters: D.ClusterMap{"baz-prod": {usEast1: {"baz-prod-v004": []D.InstanceID{"i-25b86646", "i-573d46d5"}}}}}},
"quux": {test: D.AccountInfo{CloudProvider: cloudProvider, Clusters: D.ClusterMap{"quux-test": {usEast1: {"quux-test-v004": []D.InstanceID{"i-25b866ab", "i-892d46d5"}}}}}},
}}
}
// NewDeployment returns a mock implementation of deploy.Deployment
// Pass in a deploy.AppMap, for example:
// map[string]deploy.AppMap{
// "foo": deploy.AppMap{"prod": {"foo-prod": {"us-east-1": {"foo-prod-v001": []string{"i-d3e3d611", "i-63f52e25"}}}}},
// "bar": deploy.AppMap{"prod": {"bar-prod": {"us-east-1": {"bar-prod-v011": []string{"i-d7f06d45", "i-ce433cf1"}}}}},
// "baz": deploy.AppMap{"prod": {"baz-prod": {"us-east-1": {"baz-prod-v004": []string{"i-25b86646", "i-573d46d5"}}}}},
// "quux": deploy.AppMap{"test": {"quux-test": {"us-east-1": {"quux-test-v004": []string{"i-25b866ab", "i-892d46d5"}}}}},
// }
func NewDeployment(apps map[string]D.AppMap) D.Deployment {
return &Deployment{apps}
}
// Deployment implements deploy.Deployment interface
type Deployment struct {
AppMap map[string]D.AppMap
}
// Apps implements deploy.Deployment.Apps
func (d Deployment) Apps(c chan<- *D.App, apps []string) {
defer close(c)
for name, appmap := range d.AppMap {
c <- D.NewApp(name, appmap)
}
}
// GetClusterNames implements deploy.Deployment.GetClusterNames
func (d Deployment) GetClusterNames(app string, account D.AccountName) ([]D.ClusterName, error) {
result := make([]D.ClusterName, 0)
for cluster := range d.AppMap[app][account].Clusters {
result = append(result, cluster)
}
return result, nil
}
// GetRegionNames implements deploy.Deployment.GetRegionNames
func (d Deployment) GetRegionNames(app string, account D.AccountName, cluster D.ClusterName) ([]D.RegionName, error) {
result := make([]D.RegionName, 0)
for region := range d.AppMap[app][account].Clusters[cluster] {
result = append(result, region)
}
return result, nil
}
// AppNames implements deploy.Deployment.AppNames
func (d Deployment) AppNames() ([]string, error) {
result := make([]string, len(d.AppMap), len(d.AppMap))
i := 0
for app := range d.AppMap {
result[i] = app
i++
}
return result, nil
}
// GetApp implements deploy.Deployment.GetApp
func (d Deployment) GetApp(name string) (*D.App, error) {
return D.NewApp(name, d.AppMap[name]), nil
}
// CloudProvider implements deploy.Deployment.CloudProvider
func (d Deployment) CloudProvider(account string) (string, error) {
return cloudProvider, nil
}
// GetInstanceIDs implements deploy.Deployment.GetInstanceIDs
func (d Deployment) GetInstanceIDs(app string, account D.AccountName, cloudProvider string, region D.RegionName, cluster D.ClusterName) (D.ASGName, []D.InstanceID, error) {
// Return an error if the cluster doesn't exist in the region
appInfo, ok := d.AppMap[app]
if !ok {
return "", nil, errors.Errorf("no app %s", app)
}
accountInfo, ok := appInfo[account]
if !ok {
return "", nil, errors.Errorf("app %s not deployed in account %s", app, account)
}
clusterInfo, ok := accountInfo.Clusters[cluster]
if !ok {
return "", nil, errors.Errorf("no cluster %s in app:%s, account:%s", cluster, app, account)
}
asgs, ok := clusterInfo[region]
if !ok {
return "", nil, errors.Errorf("cluster %s in account %s not deployed in region %s", cluster, account, region)
}
instances := make([]D.InstanceID, 0)
// We assume there's only one asg, and retrieve the instances
var asg D.ASGName
var ids []D.InstanceID
for asg, ids = range asgs {
for _, id := range ids {
instances = append(instances, id)
}
}
return asg, instances, nil
} | mock/deployment.go | 0.587352 | 0.414662 | deployment.go | starcoder |
package popcount
// bithacks-based implementation in go for 32-bit values.
func PopCount32(v uint32) int {
/*
This is taken from bithacks.
0xf = 1111
0x5 = 0101
0x3 = 0011
0x1 = 0001
We wish to map each 2-bit pair as follows to its pop count
stored in 2 counter bits:
nibble count
00 -> 00
01 -> 01
10 -> 01
11 -> 10
Let v=[ab] be the bit string consisting of bits a and b.
Then,
[ab] - (a & 1) = v - ((v >> 1) & 0x2)
satisfies the mapping.
Generalized to 32-bits:
v - (v >> 1) & 0x55555555
Then, add the counts stored in each 2-bit pair and store in
the 4-bit nibble:
v = abcd efgh
=> (ab + cd) (ef + gh)
v & 0x3333 + ((v >> 2) & 0x3333) satisfies this for 8-bits.
There is no chance of overflow, since we are summing a 2 bit value
into a 4-bit storage unit. At this point, we've pop counted 4 bits.
Next, sum the nibbles:
v = abcd efgh ijkl mnop
=> (abcd + efgh) (ijkl + mnop)
v = (v + (v >> 4)) & 0x0F0F) for the above
For 32-bits:
v = (v + (v >> 4)) & 0x0F0F0F0F
Note that the upper nibbles of each byte will have garbage while
we add lower nibbles. However, the lower nibble will not overflow,
since we will have pop counted just 8 bits, which
can fit into the 4-bits of storage in the lower nibble. The final
mask is necessary to clear the upper nibbles.
Finally, we want to sum the lower nibbles of each byte.
Observe:
v = v0 v1 v2 v3 v4 v5 v6 v7 (each v_i is a nibble)
We want count = v0 + v2 + v4 + v6
v * 0x01010101
= v * (0x01 + 0x0100 + 0x010000 + 0x01000000)
= (v7 * 0x10000000 +
v6 * 0x01000000 +
v5 * 0x00100000 +
v4 * 0x00010000 +
v3 * 0x00001000 +
v2 * 0x00000100 +
v1 * 0x00000010 +
v0 * 0x00000001) *
(0x01 + 0x0100 + 0x010000 + 0x01000000)
At this point, v7, v5, v3, v1 are 0
= (v0 + v2 + v4 + v6) * 0x01000000 + ...
We can then extract the result:
c = (((v + (v >> 4)) & 0x0F0F0F0F) * 0x01010101) >> 24
NOTE: operator precedence in golang is different from that of C.
In particular, bitwise AND has greater precedence than binary +!
*/
v = v - ((v >> 1) & 0x55555555)
v = (v & 0x33333333) + ((v >> 2) & 0x33333333)
c := (((v + (v >> 4)) & 0x0F0F0F0F) * 0x01010101) >> 24
return int(c)
}
// bithacks-based implementation in go for 64-bit values.
func PopCount64(v uint64) int {
v = v - ((v >> 1) & 0x5555555555555555)
v = (v & 0x3333333333333333) + ((v >> 2) & 0x3333333333333333)
c := (((v + (v >> 4)) & 0x0F0F0F0F0F0F0F0F) * 0x0101010101010101) >> 56
return int(c)
}
// Processes a byte slice using both PopCount32 and PopCount64 (i.e., native go).
// This is primarily for benchmarking. Use PopCount() if SSSE3 is desired.
func PopCountData(data []byte) int {
count := 0
ii := 0
currLen := len(data)
for currLen >= 8 {
v := uint64(data[ii]) |
(uint64(data[ii+1]) << 8) |
(uint64(data[ii+2]) << 16) |
(uint64(data[ii+3]) << 24) |
(uint64(data[ii+4]) << 32) |
(uint64(data[ii+5]) << 40) |
(uint64(data[ii+6]) << 48) |
(uint64(data[ii+7]) << 56)
count += PopCount64(v)
ii += 8
currLen -= 8
}
for currLen >= 4 {
v := uint32(data[ii]) |
(uint32(data[ii+1]) << 8) |
(uint32(data[ii+2]) << 16) |
(uint32(data[ii+3]) << 24)
count += PopCount32(v)
ii += 4
currLen -= 4
}
for kk := 0; kk < currLen; kk++ {
count += PopCount32(uint32(data[ii+kk]))
}
return count
}
func popCountGeneric(data []byte) int {
sum := 0
for _, v := range data {
sum += PopCount32(uint32(v))
}
return sum
} | popcount.go | 0.642096 | 0.598342 | popcount.go | starcoder |
package xcounter
import (
"math"
"sync"
"sync/atomic"
"time"
"github.com/m3db/m3x/clock"
)
type frequencyBucket struct {
sync.RWMutex
timestamp time.Time
count int64
}
// record records the frequency value n for a given time.
// If the frequency value is too old, it's discarded.
// Returns true if the value is processed, false if it
// requires further processing.
func (b *frequencyBucket) record(t time.Time, n int64) {
b.RLock()
if processed := b.recordWithLock(t, n); processed {
b.RUnlock()
return
}
b.RUnlock()
b.Lock()
if processed := b.recordWithLock(t, n); processed {
b.Unlock()
return
}
b.timestamp = t
b.count = n
b.Unlock()
}
// countFor returns the frequency value if the bucket
// timestamp matches the given time, 0 otherwise
func (b *frequencyBucket) countFor(t time.Time) int64 {
b.RLock()
if b.timestamp != t {
b.RUnlock()
return 0
}
count := b.count
b.RUnlock()
return count
}
func (b *frequencyBucket) recordWithLock(t time.Time, n int64) bool {
// The information is too old to keep
if t.Before(b.timestamp) {
return true
}
// The timestamp matches so it's safe to record
if t.Equal(b.timestamp) {
// NB(xichen): use atomics here so multiple goroutines
// can record at the same time
atomic.AddInt64(&b.count, n)
return true
}
// Otherwise this bucket is stale, don't record
return false
}
// FrequencyCounter keeps track of the frequency counts for the
// previous interval and the current interval
type FrequencyCounter struct {
nowFn clock.NowFn
interval time.Duration
buckets []frequencyBucket
}
// NewFrequencyCounter creates a new frequency counter
func NewFrequencyCounter(opts Options) FrequencyCounter {
return FrequencyCounter{
nowFn: time.Now,
interval: opts.Interval(),
buckets: make([]frequencyBucket, opts.NumBuckets()),
}
}
// Record records a frequency value in the corresponding bucket
func (c *FrequencyCounter) Record(n int64) {
now := c.nowFn().Truncate(c.interval)
bucketIdx := c.bucketIdx(now)
c.buckets[bucketIdx].record(now, n)
}
// Count returns the frequency count between now - dur and now
func (c *FrequencyCounter) Count(dur time.Duration) int64 {
if dur <= 0 {
return 0
}
var (
n = int(math.Min(float64(dur/c.interval), float64(len(c.buckets))))
now = c.nowFn().Truncate(c.interval)
currIdx = c.bucketIdx(now)
currTime = now
total int64
)
// NB(xichen): we discount the current bucket because it may be incomplete
for i := 0; i < n; i++ {
currTime = currTime.Add(-c.interval)
currIdx--
if currIdx < 0 {
currIdx += len(c.buckets)
}
total += c.buckets[currIdx].countFor(currTime)
}
return total
}
func (c *FrequencyCounter) bucketIdx(now time.Time) int {
return int((now.UnixNano() / int64(c.interval)) % int64(len(c.buckets)))
} | src/dbnode/x/xcounter/frequency_counter.go | 0.712932 | 0.410047 | frequency_counter.go | starcoder |
package z80
import (
"fmt"
"strings"
"github.com/blackchip-org/pac8/pkg/memory"
"github.com/blackchip-org/pac8/pkg/proc"
"github.com/blackchip-org/pac8/pkg/util/bits"
)
func ReaderZ80(e proc.Eval) proc.Statement {
e.Statement.Address = e.Cursor.Pos
opcode := e.Cursor.Fetch()
e.Statement.Bytes = append(e.Statement.Bytes, opcode)
dasmTable[opcode](e)
return *e.Statement
}
func FormatterZ80() proc.CodeFormatter {
options := proc.FormatOptions{
BytesFormat: "%-11s",
}
return func(s proc.Statement) string {
return proc.Format(s, options)
}
}
func NewDisassembler(mem memory.Memory) *proc.Disassembler {
return proc.NewDisassembler(mem, ReaderZ80, FormatterZ80())
}
func op1(e proc.Eval, parts ...string) {
var out strings.Builder
for i, part := range parts {
v := part
switch {
case i == 0:
v = fmt.Sprintf("%-4s", part)
case parts[0] == "rst" && i == 1:
// Reset statements have the argment encoded in the opcode. Change
// the hex notation from & to $ in the second part
v = "$" + v[1:]
case part == "&4546":
// This is an address that is a 8-bit displacement from the
// current program counter
delta := e.Cursor.Fetch()
e.Statement.Bytes = append(e.Statement.Bytes, delta)
addr := bits.Displace(e.Statement.Address+2, delta)
v = fmt.Sprintf("$%04x", addr)
case part == "&0000":
lo := e.Cursor.Fetch()
e.Statement.Bytes = append(e.Statement.Bytes, lo)
hi := e.Cursor.Fetch()
e.Statement.Bytes = append(e.Statement.Bytes, hi)
addr := bits.Join(hi, lo)
v = fmt.Sprintf("$%04x", addr)
case part == "(&0000)":
lo := e.Cursor.Fetch()
e.Statement.Bytes = append(e.Statement.Bytes, lo)
hi := e.Cursor.Fetch()
e.Statement.Bytes = append(e.Statement.Bytes, hi)
addr := bits.Join(hi, lo)
v = fmt.Sprintf("($%04x)", addr)
case part == "&00":
arg := e.Cursor.Fetch()
e.Statement.Bytes = append(e.Statement.Bytes, arg)
v = fmt.Sprintf("$%02x", arg)
case part == "(&00)":
arg := e.Cursor.Fetch()
e.Statement.Bytes = append(e.Statement.Bytes, arg)
v = fmt.Sprintf("($%02x)", arg)
case part == "(ix+0)":
delta := e.Cursor.Fetch()
e.Statement.Bytes = append(e.Statement.Bytes, delta)
v = fmt.Sprintf("(ix+$%02x)", delta)
case part == "(iy+0)":
delta := e.Cursor.Fetch()
e.Statement.Bytes = append(e.Statement.Bytes, delta)
v = fmt.Sprintf("(iy+$%02x)", delta)
}
if i == 1 {
out.WriteString(" ")
}
if i == 2 {
out.WriteString(",")
}
out.WriteString(v)
}
e.Statement.Op = strings.TrimSpace(out.String())
}
func op2(e proc.Eval, parts ...string) {
var out strings.Builder
for i, part := range parts {
v := part
switch {
case i == 0:
v = fmt.Sprintf("%-4s", part)
case part == "(ix+0)":
delta := e.Statement.Bytes[len(e.Statement.Bytes)-2]
v = fmt.Sprintf("(ix+$%02x)", delta)
case part == "(iy+0)":
delta := e.Statement.Bytes[len(e.Statement.Bytes)-2]
v = fmt.Sprintf("(iy+$%02x)", delta)
}
if i == 1 {
out.WriteString(" ")
}
if i == 2 {
out.WriteString(",")
}
out.WriteString(v)
}
e.Statement.Op = strings.TrimSpace(out.String())
}
func opDD(e proc.Eval) {
next := e.Cursor.Peek()
if next == 0xdd || next == 0xed || next == 0xfd {
e.Statement.Op = "?dd"
return
}
opcode := e.Cursor.Fetch()
e.Statement.Bytes = append(e.Statement.Bytes, opcode)
if opcode == 0xcb {
opDDCB(e)
return
}
dasmTableDD[opcode](e)
}
func opFD(e proc.Eval) {
next := e.Cursor.Peek()
if next == 0xdd || next == 0xed || next == 0xfd {
e.Statement.Op = "?fd"
return
}
opcode := e.Cursor.Fetch()
e.Statement.Bytes = append(e.Statement.Bytes, opcode)
if opcode == 0xcb {
opFDCB(e)
return
}
dasmTableFD[opcode](e)
}
func opCB(e proc.Eval) {
opcode := e.Cursor.Fetch()
e.Statement.Bytes = append(e.Statement.Bytes, opcode)
dasmTableCB[opcode](e)
}
func opED(e proc.Eval) {
opcode := e.Cursor.Fetch()
e.Statement.Bytes = append(e.Statement.Bytes, opcode)
dasmTableED[opcode](e)
}
func opFDCB(e proc.Eval) {
delta := e.Cursor.Fetch()
e.Statement.Bytes = append(e.Statement.Bytes, delta)
opcode := e.Cursor.Fetch()
e.Statement.Bytes = append(e.Statement.Bytes, opcode)
dasmTableFDCB[opcode](e)
}
func opDDCB(e proc.Eval) {
delta := e.Cursor.Fetch()
e.Statement.Bytes = append(e.Statement.Bytes, delta)
opcode := e.Cursor.Fetch()
e.Statement.Bytes = append(e.Statement.Bytes, opcode)
dasmTableDDCB[opcode](e)
} | pkg/z80/reader.go | 0.503662 | 0.400251 | reader.go | starcoder |
package manual
func future() string {
return `Expected in next chapter
- Semicolon statement terminator
- Void assignments
- Native variable deletion
- Spell returns
- Spells
- @Args() number
- @Exists(identifier) bool
- @Len(value) number
- @Str(value) string
- @Panic(exitcode, message)
Assignments
Some of the next features will be native variable deletions and void
assignment targets to replace the @Del spell. Void assignment targets
can be used to ignore the result of an expression.
_ := 1
x := _
Types
I hope to add additional default types such as 'list', 'map', and 'file'
but these won't be intrinsic to Scarlet; they can be modified or replaced
at leisure by any inquisitive programmer.
'List' and its accompanying spells allow a list of values to be stored
in an ordered manner and operated on through random access or
sequentially.
'Map' and its accompanying spells create and store a mapping between
two values. One will represent the key and the other the mapped value.
Maps will probably not be ordered but spells might be provided by
default to return an ordered set of keys.
'File' will likely only be accessible through special spells. These
spells will accept a filename along with a function that accepts a
'File' variable. Upon invocation the file will be opened and the
function called with the 'File' as a value which can be used to
perform IO. Upon function exit or error the file is automatically
closed before the spell terminates.
Guards
Guards will probably allow inline expressions since ease and conciseness
are good properties of scripting tool. However, the inline expression
must appear on the same line as the guard condition.
[x == 0] @Println("x is 0")
Loops
Loops will likely get an optional initialiser assignment before the
loop condition where a variable can be initialised and only accessible
within the loop.
loop i := 0 [i < 5] {
i := i + 1
}
Incrementors & Decrmenetors
Number increment and decrement operations are very probable, however,
they will only be allowed as statements. This is because usage within
expressions produces difficult to read code and subtle errors that are
hard to debug.
i++
i--
Spells
Spells will be able to return multiple values soon. The results being
assignable to variables.
x := @Len(s)
I hope to add blocks as parameters to enable code to be run with the
callers scope and variables. Here is an example spell with block parameter:
@If(true, {
x := 1
})
In future I hope to add some very common but completely removable and
modifable spells to get users started. However, many of these depend on
language features not yet implemented:
x := @Args() Get the program arguments
x := @Exists("variable_name") Does a variable exist?
x := @Len(value) Find the length of lengthed value
x := @Str(value) Stringify a value of any type
@Panic(exitCode, message) Exit the script with an error message
e := @Catch({ Catch any panics and return as an error
...
})
@str. 'Str' spells
@list. 'List' type & spells
@map. 'Map' type & spells
@fmt. 'Template' type & spells
@io. Basic input and output spells`
} | _manual/future.go | 0.709523 | 0.498596 | future.go | starcoder |
package sac
import (
"github.com/seqsense/pcgol/mat"
"github.com/seqsense/pcgol/pc"
"github.com/seqsense/pcgol/pc/storage/voxelgrid"
)
type voxelGridSurfaceModel struct {
vg *voxelgrid.VoxelGrid
ra pc.Vec3RandomAccessor
vgMin, vgMax, vgSize mat.Vec3
}
const (
sqrt3 = 1.732050808
epsilon = 0.01
)
func NewVoxelGridSurfaceModel(vg *voxelgrid.VoxelGrid, ra pc.Vec3RandomAccessor) *voxelGridSurfaceModel {
vgMin, vgMax := vg.MinMax()
vgSize := vgMax.Sub(vgMin)
return &voxelGridSurfaceModel{
vg: vg,
ra: ra,
vgMin: vgMin,
vgMax: vgMax,
vgSize: vgSize,
}
}
func (voxelGridSurfaceModel) NumRange() (min, max int) {
return 3, 3
}
func (m *voxelGridSurfaceModel) Fit(ids []int) (ModelCoefficients, bool) {
if len(ids) != 3 {
return nil, false
}
p0, p1, p2 := m.ra.Vec3At(ids[0]).Sub(m.vgMin), m.ra.Vec3At(ids[1]).Sub(m.vgMin), m.ra.Vec3At(ids[2]).Sub(m.vgMin)
v1, v2 := p1.Sub(p0), p2.Sub(p0)
// Calculate normal vector of the surface made by given three points.
norm := v1.Cross(v2)
if nearZeroSq(norm.NormSq()) {
return nil, false
}
// Plane equation: norm[0]*x + norm[1]*y + norm[2]*z = d
norm = norm.Normalized()
d := norm.Dot(p0)
// Calculate edges on the voxelgrid boundary made by the plane.
nValid := [3]bool{!nearZero(norm[0]), !nearZero(norm[1]), !nearZero(norm[2])}
var o [3][4]mat.Vec3
vgn := norm.ElementMul(m.vgSize)
// List all crossing points of the plane and boundary box.
if nValid[0] {
o[0][0] = mat.Vec3{(d - vgn[1] - vgn[2]) / norm[0], m.vgSize[1], m.vgSize[2]} // y+z+
o[0][1] = mat.Vec3{(d - vgn[1]) / norm[0], m.vgSize[1], 0} // y+z-
o[0][2] = mat.Vec3{(d - vgn[2]) / norm[0], 0, m.vgSize[2]} // y-z+
o[0][3] = mat.Vec3{d / norm[0], 0, 0} // y-z-
}
if nValid[1] {
o[1][0] = mat.Vec3{m.vgSize[0], (d - vgn[0] - vgn[2]) / norm[1], m.vgSize[2]} // x+z+
o[1][1] = mat.Vec3{m.vgSize[0], (d - vgn[0]) / norm[1], 0} // x+z-
o[1][2] = mat.Vec3{0, (d - vgn[2]) / norm[1], m.vgSize[2]} // x-z+
o[1][3] = mat.Vec3{0, d / norm[1], 0} // x-z-
}
if nValid[2] {
o[2][0] = mat.Vec3{m.vgSize[0], m.vgSize[1], (d - vgn[0] - vgn[1]) / norm[2]} // x+y+
o[2][1] = mat.Vec3{m.vgSize[0], 0, (d - vgn[0]) / norm[2]} // x+y-
o[2][2] = mat.Vec3{0, m.vgSize[1], (d - vgn[1]) / norm[2]} // x-y+
o[2][3] = mat.Vec3{0, 0, d / norm[2]} // x-y-
}
type point struct {
a, i int
}
var edge [3][4][]point
isInside := func(p mat.Vec3) bool {
return !(p[0] < 0 || m.vgSize[0] < p[0] || p[1] < 0 || m.vgSize[1] < p[1] || p[2] < 0 || m.vgSize[2] < p[2])
}
// List all edges of the plane cut by boundary box.
for _, l := range []struct {
a0, i0, a1, i1 int
}{
// Commented elements are the duplication of A->B and B->A.
{0, 0, 1, 0}, {0, 0, 1, 2}, {0, 0, 2, 0}, {0, 0, 2, 2}, // (y+z+)<=>(x+z+), (y+z+)<=>(x-z+), (y+z+)<=>(x+y+), (y+z+)<=>(x-y+)
{0, 1, 1, 1}, {0, 1, 1, 3}, {0, 1, 2, 0}, {0, 1, 2, 2}, // (y+z-)<=>(x+z-), (y+z-)<=>(x-z-), (y+z-)<=>(x+y+), (y+z-)<=>(x-y+)
{0, 2, 1, 0}, {0, 2, 1, 2}, {0, 2, 2, 1}, {0, 2, 2, 3}, // (y-z+)<=>(x+z+), (y-z+)<=>(x-z+), (y-z+)<=>(x+y-), (y-z+)<=>(x-y-)
{0, 3, 1, 1}, {0, 3, 1, 3}, {0, 3, 2, 1}, {0, 3, 2, 3}, // (y-z-)<=>(x+z-), (y-z-)<=>(x-z-), (y-z-)<=>(x+y-), (y-z-)<=>(x-y-)
/*{1, 0, 0, 0}, {1, 0, 0, 2},*/ {1, 0, 2, 0}, {1, 0, 2, 1}, // (x+z+)<=>(y+z+), (x+z+)<=>(y-z+), (x+z+)<=>(x+y+), (x+z+)<=>(x+y-)
/*{1, 1, 0, 1}, {1, 1, 0, 3},*/ {1, 1, 2, 0}, {1, 1, 2, 1}, // (x+z-)<=>(y+z-), (x+z-)<=>(y-z-), (x+z-)<=>(x+y+), (x+z-)<=>(x+y-)
/*{1, 2, 0, 0}, {1, 2, 0, 2},*/ {1, 2, 2, 2}, {1, 2, 2, 3}, // (x-z+)<=>(y+z+), (x-z+)<=>(y-z+), (x-z+)<=>(x-y+), (x-z+)<=>(x-y-)
/*{1, 3, 0, 1}, {1, 3, 0, 3},*/ {1, 3, 2, 2}, {1, 3, 2, 3}, // (x-z-)<=>(y+z-), (x-z-)<=>(y-z-), (x-z-)<=>(x-y+), (x-z-)<=>(x-y-)
/*{2, 0, 0, 0}, {2, 0, 0, 1}, {2, 0, 1, 0}, {2, 0, 1, 1},*/ // (x+y+)<=>(y+z+), (x+y+)<=>(y+z-), (x+y+)<=>(x+z+), (x+y+)<=>(x+z-)
/*{2, 1, 0, 2}, {2, 1, 0, 3}, {2, 1, 1, 0}, {2, 1, 1, 1},*/ // (x+y-)<=>(y-z+), (x+y-)<=>(y-z-), (x+y-)<=>(x+z+), (x+y-)<=>(x+z-)
/*{2, 2, 0, 0}, {2, 2, 0, 1}, {2, 2, 1, 2}, {2, 2, 1, 3},*/ // (x-y+)<=>(y+z+), (x-y+)<=>(y+z-), (x-y+)<=>(x-z+), (x-y+)<=>(x-z-)
/*{2, 3, 0, 2}, {2, 3, 0, 3}, {2, 3, 1, 2}, {2, 3, 1, 3},*/ // (x-y-)<=>(y-z+), (x-y-)<=>(y-z-), (x-y-)<=>(x-z+), (x-y-)<=>(x-z-)
{0, 0, 0, 2}, {0, 0, 0, 1}, {0, 1, 0, 3}, {0, 3, 0, 2}, // (y+z+)<=>(y-z+), (y+z+)<=>(y+z-), (y+z-)<=>(y-z-), (y-z-)<=>(y-z+)
{1, 0, 1, 2}, {1, 0, 1, 1}, {1, 1, 1, 3}, {1, 3, 1, 2}, // (x+z+)<=>(x-z+), (x+z+)<=>(x+z-), (x+z-)<=>(x-z-), (x-z-)<=>(x-z+)
{2, 0, 2, 2}, {2, 0, 2, 1}, {2, 1, 2, 3}, {2, 3, 2, 2}, // (x+y+)<=>(x-y+), (x+y+)<=>(x+y-), (x+y-)<=>(x-y-), (x-y-)<=>(x-y+)
} {
if !nValid[l.a0] || !nValid[l.a1] {
continue
}
if isInside(o[l.a0][l.i0]) && isInside(o[l.a1][l.i1]) && !nearZeroSq(o[l.a0][l.i0].Sub(o[l.a1][l.i1]).NormSq()) {
edge[l.a0][l.i0] = append(edge[l.a0][l.i0], point{l.a1, l.i1})
edge[l.a1][l.i1] = append(edge[l.a1][l.i1], point{l.a0, l.i0})
}
}
// Remove duplication of the edges.
for a := 0; a < 3; a++ {
for i := 0; i < 4; i++ {
es := edge[a][i]
es2 := make([]point, 0, len(es))
for j, e := range es {
ok := true
for k := j + 1; k < len(es); k++ {
if nearZeroSq(o[e.a][e.i].Sub(o[es[k].a][es[k].i]).NormSq()) {
ok = false
break
}
}
if ok {
es2 = append(es2, e)
}
}
edge[a][i] = es2
}
}
// Find largest two connected edges of the cut section of the boundary box.
// Voxels on the surface can be approximately scanned by weighted average of the two edges.
var aO, iO int
var maxLenSq float32
for a := 0; a < 3; a++ {
for i := 0; i < 4; i++ {
es := edge[a][i]
if len(es) != 2 {
continue
}
var l float32
for _, e := range es {
l += o[a][i].Sub(o[e.a][e.i]).NormSq()
}
if l > maxLenSq {
maxLenSq = l
aO, iO = a, i
}
}
}
if maxLenSq == 0 {
// Surface not found
return nil, false
}
es := edge[aO][iO]
o0, o1, o2 := o[es[0].a][es[0].i], o[aO][iO], o[es[1].a][es[1].i]
ov1, ov2 := o0.Sub(o1), o2.Sub(o1)
// Set a voxel scan interval to make it visited at least once per voxel.
r := m.vg.Resolution() / sqrt3
return &voxelGridSurfaceModelCoefficients{
model: m,
origin: o1.Add(m.vgMin),
v1: ov1,
v2: ov2,
l1: r / ov1.Norm(),
l2: r / ov2.Norm(),
norm: norm,
d: d,
}, true
}
func nearZero(a float32) bool {
return -epsilon < a && a < epsilon
}
func nearZeroSq(a float32) bool {
return a < epsilon*epsilon
}
type voxelGridSurfaceModelCoefficients struct {
model *voxelGridSurfaceModel
origin mat.Vec3
v1, v2 mat.Vec3
l1, l2 float32
norm mat.Vec3
d float32
}
func (c *voxelGridSurfaceModelCoefficients) Evaluate() int {
added := make([]bool, c.model.vg.Len())
var cnt int
for a := float32(0); a <= 1; a += c.l1 {
for b := float32(0); b <= 1; b += c.l2 {
p := c.origin.Add(c.v1.Mul(a)).Add(c.v2.Mul(b))
addr, ok := c.model.vg.Addr(p)
if !ok {
continue
}
if !added[addr] {
added[addr] = true
cnt += len(c.model.vg.GetByAddr(addr))
}
}
}
return cnt
}
func (c *voxelGridSurfaceModelCoefficients) Inliers(d float32) []int {
vgMin := c.model.vgMin
n := c.model.ra.Len()
out := make([]int, 0, n)
for i := 0; i < n; i++ {
p := c.model.ra.Vec3At(i)
dd := c.norm.Dot(p.Sub(vgMin)) - c.d
if -d < dd && dd < d {
out = append(out, i)
}
}
return out
}
func (c *voxelGridSurfaceModelCoefficients) IsIn(p mat.Vec3, d float32) bool {
dd := c.norm.Dot(p.Sub(c.model.vgMin)) - c.d
return -d < dd && dd < d
} | pc/sac/surface.go | 0.619701 | 0.404949 | surface.go | starcoder |
package bst
// T is the internal representation of a binary search tree.
type T struct {
root *node
count int
}
// node is the internal representation of a binary tree node.
type node struct {
key int
val interface{}
l, r *node
}
// TraversalType represents one of the three know traversals.
type TraversalType int
const (
InOrder TraversalType = iota
PreOrder
PostOrder
)
// Insert adds a given key+value to the tree and returns true if it was added.
// Average: O(log(n)) Worst: O(n)
func (t *T) Insert(k int, v interface{}) (added bool) {
t.root, added = insert(t.root, k, v)
if added {
t.count++
}
return
}
// insert recusively adds a key+value in the tree.
func insert(n *node, k int, v interface{}) (r *node, added bool) {
if r = n; n == nil {
// keep track of how many elements we have in the tree
// to optimize the channel length during traversal
r = &node{key: k, val: v}
added = true
} else if k < n.key {
r.l, added = insert(n.l, k, v)
} else if k > n.key {
r.r, added = insert(n.r, k, v)
}
return
}
// Delete removes a given key from the tree and returns true if it was removed.
// Average: O(log(n)) Worst: O(n)
func (t *T) Delete(k int) (deleted bool) {
n, deleted := delete(t.root, k)
if deleted {
// Handling the case of root deletion.
if t.root.key == k {
t.root = n
}
t.count--
}
return deleted
}
// delete recursively deletes a key from the tree.
func delete(n *node, k int) (r *node, deleted bool) {
if r = n; n == nil {
return nil, false
}
if k < n.key {
r.l, deleted = delete(n.l, k)
} else if k > n.key {
r.r, deleted = delete(n.r, k)
} else {
if n.l != nil && n.r != nil {
// find the right most element in the left subtree
s := n.l
for s.r != nil {
s = s.r
}
r.key = s.key
r.val = s.val
r.l, deleted = delete(s, s.key)
} else if n.l != nil {
r = n.l
deleted = true
} else if n.r != nil {
r = n.r
deleted = true
} else {
r = nil
deleted = true
}
}
return
}
// Find returns the value found at the given key.
// Average: O(log(n)) Worst: O(n)
func (t *T) Find(k int) interface{} {
return find(t.root, k)
}
func find(n *node, k int) interface{} {
if n == nil {
return nil
}
if n.key == k {
return n.val
} else if k < n.key {
return find(n.l, k)
} else if k > n.key {
return find(n.r, k)
}
return nil
}
// Clear removes all the nodes from the tree.
// O(n)
func (t *T) Clear() {
t.root = clear(t.root)
t.count = 0
}
// clear recursively removes all the nodes.
func clear(n *node) *node {
if n != nil {
n.l = clear(n.l)
n.r = clear(n.r)
}
n = nil
return n
}
// Traverse provides an iterator over the tree.
// O(n)
func (t *T) Traverse(tt TraversalType) <-chan interface{} {
c := make(chan interface{}, t.count)
go func() {
switch tt {
case InOrder:
inOrder(t.root, c)
case PreOrder:
preOrder(t.root, c)
case PostOrder:
postOrder(t.root, c)
}
close(c)
}()
return c
}
// inOrder returns the left, parent, right nodes.
func inOrder(n *node, c chan interface{}) {
if n == nil {
return
}
inOrder(n.l, c)
c <- n.val
inOrder(n.r, c)
}
// preOrder returns the parent, left, right nodes.
func preOrder(n *node, c chan interface{}) {
if n == nil {
return
}
c <- n.val
preOrder(n.l, c)
preOrder(n.r, c)
}
// postOrder returns the left, right, parent nodes.
func postOrder(n *node, c chan interface{}) {
if n == nil {
return
}
postOrder(n.l, c)
postOrder(n.r, c)
c <- n.val
} | bst/bst.go | 0.822296 | 0.548674 | bst.go | starcoder |
package expression
import (
"strconv"
"github.com/pingcap/parser/ast"
"github.com/pingcap/parser/mysql"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/trace_util_0"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/chunk"
)
// Vectorizable checks whether a list of expressions can employ vectorized execution.
func Vectorizable(exprs []Expression) bool {
trace_util_0.Count(_chunk_executor_00000, 0)
for _, expr := range exprs {
trace_util_0.Count(_chunk_executor_00000, 2)
if HasGetSetVarFunc(expr) {
trace_util_0.Count(_chunk_executor_00000, 3)
return false
}
}
trace_util_0.Count(_chunk_executor_00000, 1)
return true
}
// HasGetSetVarFunc checks whether an expression contains SetVar/GetVar function.
func HasGetSetVarFunc(expr Expression) bool {
trace_util_0.Count(_chunk_executor_00000, 4)
scalaFunc, ok := expr.(*ScalarFunction)
if !ok {
trace_util_0.Count(_chunk_executor_00000, 9)
return false
}
trace_util_0.Count(_chunk_executor_00000, 5)
if scalaFunc.FuncName.L == ast.SetVar {
trace_util_0.Count(_chunk_executor_00000, 10)
return true
}
trace_util_0.Count(_chunk_executor_00000, 6)
if scalaFunc.FuncName.L == ast.GetVar {
trace_util_0.Count(_chunk_executor_00000, 11)
return true
}
trace_util_0.Count(_chunk_executor_00000, 7)
for _, arg := range scalaFunc.GetArgs() {
trace_util_0.Count(_chunk_executor_00000, 12)
if HasGetSetVarFunc(arg) {
trace_util_0.Count(_chunk_executor_00000, 13)
return true
}
}
trace_util_0.Count(_chunk_executor_00000, 8)
return false
}
// VectorizedExecute evaluates a list of expressions column by column and append their results to "output" Chunk.
func VectorizedExecute(ctx sessionctx.Context, exprs []Expression, iterator *chunk.Iterator4Chunk, output *chunk.Chunk) error {
trace_util_0.Count(_chunk_executor_00000, 14)
for colID, expr := range exprs {
trace_util_0.Count(_chunk_executor_00000, 16)
err := evalOneColumn(ctx, expr, iterator, output, colID)
if err != nil {
trace_util_0.Count(_chunk_executor_00000, 17)
return err
}
}
trace_util_0.Count(_chunk_executor_00000, 15)
return nil
}
func evalOneColumn(ctx sessionctx.Context, expr Expression, iterator *chunk.Iterator4Chunk, output *chunk.Chunk, colID int) (err error) {
trace_util_0.Count(_chunk_executor_00000, 18)
switch fieldType, evalType := expr.GetType(), expr.GetType().EvalType(); evalType {
case types.ETInt:
trace_util_0.Count(_chunk_executor_00000, 20)
for row := iterator.Begin(); err == nil && row != iterator.End(); row = iterator.Next() {
trace_util_0.Count(_chunk_executor_00000, 27)
err = executeToInt(ctx, expr, fieldType, row, output, colID)
}
case types.ETReal:
trace_util_0.Count(_chunk_executor_00000, 21)
for row := iterator.Begin(); err == nil && row != iterator.End(); row = iterator.Next() {
trace_util_0.Count(_chunk_executor_00000, 28)
err = executeToReal(ctx, expr, fieldType, row, output, colID)
}
case types.ETDecimal:
trace_util_0.Count(_chunk_executor_00000, 22)
for row := iterator.Begin(); err == nil && row != iterator.End(); row = iterator.Next() {
trace_util_0.Count(_chunk_executor_00000, 29)
err = executeToDecimal(ctx, expr, fieldType, row, output, colID)
}
case types.ETDatetime, types.ETTimestamp:
trace_util_0.Count(_chunk_executor_00000, 23)
for row := iterator.Begin(); err == nil && row != iterator.End(); row = iterator.Next() {
trace_util_0.Count(_chunk_executor_00000, 30)
err = executeToDatetime(ctx, expr, fieldType, row, output, colID)
}
case types.ETDuration:
trace_util_0.Count(_chunk_executor_00000, 24)
for row := iterator.Begin(); err == nil && row != iterator.End(); row = iterator.Next() {
trace_util_0.Count(_chunk_executor_00000, 31)
err = executeToDuration(ctx, expr, fieldType, row, output, colID)
}
case types.ETJson:
trace_util_0.Count(_chunk_executor_00000, 25)
for row := iterator.Begin(); err == nil && row != iterator.End(); row = iterator.Next() {
trace_util_0.Count(_chunk_executor_00000, 32)
err = executeToJSON(ctx, expr, fieldType, row, output, colID)
}
case types.ETString:
trace_util_0.Count(_chunk_executor_00000, 26)
for row := iterator.Begin(); err == nil && row != iterator.End(); row = iterator.Next() {
trace_util_0.Count(_chunk_executor_00000, 33)
err = executeToString(ctx, expr, fieldType, row, output, colID)
}
}
trace_util_0.Count(_chunk_executor_00000, 19)
return err
}
func evalOneCell(ctx sessionctx.Context, expr Expression, row chunk.Row, output *chunk.Chunk, colID int) (err error) {
trace_util_0.Count(_chunk_executor_00000, 34)
switch fieldType, evalType := expr.GetType(), expr.GetType().EvalType(); evalType {
case types.ETInt:
trace_util_0.Count(_chunk_executor_00000, 36)
err = executeToInt(ctx, expr, fieldType, row, output, colID)
case types.ETReal:
trace_util_0.Count(_chunk_executor_00000, 37)
err = executeToReal(ctx, expr, fieldType, row, output, colID)
case types.ETDecimal:
trace_util_0.Count(_chunk_executor_00000, 38)
err = executeToDecimal(ctx, expr, fieldType, row, output, colID)
case types.ETDatetime, types.ETTimestamp:
trace_util_0.Count(_chunk_executor_00000, 39)
err = executeToDatetime(ctx, expr, fieldType, row, output, colID)
case types.ETDuration:
trace_util_0.Count(_chunk_executor_00000, 40)
err = executeToDuration(ctx, expr, fieldType, row, output, colID)
case types.ETJson:
trace_util_0.Count(_chunk_executor_00000, 41)
err = executeToJSON(ctx, expr, fieldType, row, output, colID)
case types.ETString:
trace_util_0.Count(_chunk_executor_00000, 42)
err = executeToString(ctx, expr, fieldType, row, output, colID)
}
trace_util_0.Count(_chunk_executor_00000, 35)
return err
}
func executeToInt(ctx sessionctx.Context, expr Expression, fieldType *types.FieldType, row chunk.Row, output *chunk.Chunk, colID int) error {
trace_util_0.Count(_chunk_executor_00000, 43)
res, isNull, err := expr.EvalInt(ctx, row)
if err != nil {
trace_util_0.Count(_chunk_executor_00000, 48)
return err
}
trace_util_0.Count(_chunk_executor_00000, 44)
if isNull {
trace_util_0.Count(_chunk_executor_00000, 49)
output.AppendNull(colID)
return nil
}
trace_util_0.Count(_chunk_executor_00000, 45)
if fieldType.Tp == mysql.TypeBit {
trace_util_0.Count(_chunk_executor_00000, 50)
output.AppendBytes(colID, strconv.AppendUint(make([]byte, 0, 8), uint64(res), 10))
return nil
}
trace_util_0.Count(_chunk_executor_00000, 46)
if mysql.HasUnsignedFlag(fieldType.Flag) {
trace_util_0.Count(_chunk_executor_00000, 51)
output.AppendUint64(colID, uint64(res))
return nil
}
trace_util_0.Count(_chunk_executor_00000, 47)
output.AppendInt64(colID, res)
return nil
}
func executeToReal(ctx sessionctx.Context, expr Expression, fieldType *types.FieldType, row chunk.Row, output *chunk.Chunk, colID int) error {
trace_util_0.Count(_chunk_executor_00000, 52)
res, isNull, err := expr.EvalReal(ctx, row)
if err != nil {
trace_util_0.Count(_chunk_executor_00000, 56)
return err
}
trace_util_0.Count(_chunk_executor_00000, 53)
if isNull {
trace_util_0.Count(_chunk_executor_00000, 57)
output.AppendNull(colID)
return nil
}
trace_util_0.Count(_chunk_executor_00000, 54)
if fieldType.Tp == mysql.TypeFloat {
trace_util_0.Count(_chunk_executor_00000, 58)
output.AppendFloat32(colID, float32(res))
return nil
}
trace_util_0.Count(_chunk_executor_00000, 55)
output.AppendFloat64(colID, res)
return nil
}
func executeToDecimal(ctx sessionctx.Context, expr Expression, fieldType *types.FieldType, row chunk.Row, output *chunk.Chunk, colID int) error {
trace_util_0.Count(_chunk_executor_00000, 59)
res, isNull, err := expr.EvalDecimal(ctx, row)
if err != nil {
trace_util_0.Count(_chunk_executor_00000, 62)
return err
}
trace_util_0.Count(_chunk_executor_00000, 60)
if isNull {
trace_util_0.Count(_chunk_executor_00000, 63)
output.AppendNull(colID)
return nil
}
trace_util_0.Count(_chunk_executor_00000, 61)
output.AppendMyDecimal(colID, res)
return nil
}
func executeToDatetime(ctx sessionctx.Context, expr Expression, fieldType *types.FieldType, row chunk.Row, output *chunk.Chunk, colID int) error {
trace_util_0.Count(_chunk_executor_00000, 64)
res, isNull, err := expr.EvalTime(ctx, row)
if err != nil {
trace_util_0.Count(_chunk_executor_00000, 67)
return err
}
trace_util_0.Count(_chunk_executor_00000, 65)
if isNull {
trace_util_0.Count(_chunk_executor_00000, 68)
output.AppendNull(colID)
} else {
trace_util_0.Count(_chunk_executor_00000, 69)
{
output.AppendTime(colID, res)
}
}
trace_util_0.Count(_chunk_executor_00000, 66)
return nil
}
func executeToDuration(ctx sessionctx.Context, expr Expression, fieldType *types.FieldType, row chunk.Row, output *chunk.Chunk, colID int) error {
trace_util_0.Count(_chunk_executor_00000, 70)
res, isNull, err := expr.EvalDuration(ctx, row)
if err != nil {
trace_util_0.Count(_chunk_executor_00000, 73)
return err
}
trace_util_0.Count(_chunk_executor_00000, 71)
if isNull {
trace_util_0.Count(_chunk_executor_00000, 74)
output.AppendNull(colID)
} else {
trace_util_0.Count(_chunk_executor_00000, 75)
{
output.AppendDuration(colID, res)
}
}
trace_util_0.Count(_chunk_executor_00000, 72)
return nil
}
func executeToJSON(ctx sessionctx.Context, expr Expression, fieldType *types.FieldType, row chunk.Row, output *chunk.Chunk, colID int) error {
trace_util_0.Count(_chunk_executor_00000, 76)
res, isNull, err := expr.EvalJSON(ctx, row)
if err != nil {
trace_util_0.Count(_chunk_executor_00000, 79)
return err
}
trace_util_0.Count(_chunk_executor_00000, 77)
if isNull {
trace_util_0.Count(_chunk_executor_00000, 80)
output.AppendNull(colID)
} else {
trace_util_0.Count(_chunk_executor_00000, 81)
{
output.AppendJSON(colID, res)
}
}
trace_util_0.Count(_chunk_executor_00000, 78)
return nil
}
func executeToString(ctx sessionctx.Context, expr Expression, fieldType *types.FieldType, row chunk.Row, output *chunk.Chunk, colID int) error {
trace_util_0.Count(_chunk_executor_00000, 82)
res, isNull, err := expr.EvalString(ctx, row)
if err != nil {
trace_util_0.Count(_chunk_executor_00000, 85)
return err
}
trace_util_0.Count(_chunk_executor_00000, 83)
if isNull {
trace_util_0.Count(_chunk_executor_00000, 86)
output.AppendNull(colID)
} else {
trace_util_0.Count(_chunk_executor_00000, 87)
if fieldType.Tp == mysql.TypeEnum {
trace_util_0.Count(_chunk_executor_00000, 88)
val := types.Enum{Value: uint64(0), Name: res}
output.AppendEnum(colID, val)
} else {
trace_util_0.Count(_chunk_executor_00000, 89)
if fieldType.Tp == mysql.TypeSet {
trace_util_0.Count(_chunk_executor_00000, 90)
val := types.Set{Value: uint64(0), Name: res}
output.AppendSet(colID, val)
} else {
trace_util_0.Count(_chunk_executor_00000, 91)
{
output.AppendString(colID, res)
}
}
}
}
trace_util_0.Count(_chunk_executor_00000, 84)
return nil
}
// VectorizedFilter applies a list of filters to a Chunk and
// returns a bool slice, which indicates whether a row is passed the filters.
// Filters is executed vectorized.
func VectorizedFilter(ctx sessionctx.Context, filters []Expression, iterator *chunk.Iterator4Chunk, selected []bool) ([]bool, error) {
trace_util_0.Count(_chunk_executor_00000, 92)
selected = selected[:0]
for i, numRows := 0, iterator.Len(); i < numRows; i++ {
trace_util_0.Count(_chunk_executor_00000, 95)
selected = append(selected, true)
}
trace_util_0.Count(_chunk_executor_00000, 93)
for _, filter := range filters {
trace_util_0.Count(_chunk_executor_00000, 96)
isIntType := true
if filter.GetType().EvalType() != types.ETInt {
trace_util_0.Count(_chunk_executor_00000, 98)
isIntType = false
}
trace_util_0.Count(_chunk_executor_00000, 97)
for row := iterator.Begin(); row != iterator.End(); row = iterator.Next() {
trace_util_0.Count(_chunk_executor_00000, 99)
if !selected[row.Idx()] {
trace_util_0.Count(_chunk_executor_00000, 101)
continue
}
trace_util_0.Count(_chunk_executor_00000, 100)
if isIntType {
trace_util_0.Count(_chunk_executor_00000, 102)
filterResult, isNull, err := filter.EvalInt(ctx, row)
if err != nil {
trace_util_0.Count(_chunk_executor_00000, 104)
return nil, err
}
trace_util_0.Count(_chunk_executor_00000, 103)
selected[row.Idx()] = selected[row.Idx()] && !isNull && (filterResult != 0)
} else {
trace_util_0.Count(_chunk_executor_00000, 105)
{
// TODO: should rewrite the filter to `cast(expr as SIGNED) != 0` and always use `EvalInt`.
bVal, _, err := EvalBool(ctx, []Expression{filter}, row)
if err != nil {
trace_util_0.Count(_chunk_executor_00000, 107)
return nil, err
}
trace_util_0.Count(_chunk_executor_00000, 106)
selected[row.Idx()] = selected[row.Idx()] && bVal
}
}
}
}
trace_util_0.Count(_chunk_executor_00000, 94)
return selected, nil
}
var _chunk_executor_00000 = "expression/chunk_executor.go" | expression/chunk_executor.go | 0.506103 | 0.412589 | chunk_executor.go | starcoder |
package bls
import (
"encoding/binary"
"errors"
"fmt"
"math/big"
"math/bits"
)
//go:generate go run asm/asm.go -out primitivefuncs_amd64.s
// FQRepr represents a uint384. The least significant bits are first.
type FQRepr [6]uint64
// IsOdd checks if the FQRepr is odd.
func (f FQRepr) IsOdd() bool {
return f[0]&1 == 1
}
// IsEven checks if the FQRepr is even.
func (f FQRepr) IsEven() bool {
return f[0]&1 == 0
}
// IsZero checks if the FQRepr is zero.
func (f FQRepr) IsZero() bool {
for _, f := range f {
if f != 0 {
return false
}
}
return true
}
// NewFQRepr creates a new number given a uint64.
func NewFQRepr(n uint64) FQRepr {
return FQRepr{n, 0, 0, 0, 0, 0}
}
// Rsh shifts the FQRepr right by a certain number of bits.
func (f *FQRepr) Rsh(n uint) {
if n >= 64*6 {
f[0] = 0
f[1] = 0
f[2] = 0
f[3] = 0
f[4] = 0
f[5] = 0
return
}
for n >= 64 {
t := uint64(0)
for i := 5; i >= 0; i-- {
t, f[i] = f[i], t
}
n -= 64
}
if n > 0 {
t := uint64(0)
for i := 5; i >= 0; i-- {
t2 := f[i] << (64 - n)
f[i] >>= n
f[i] |= t
t = t2
}
}
}
// Div2 divides the FQRepr by 2.
func (f *FQRepr) Div2() {
t := uint64(0)
for i := 5; i >= 0; i-- {
t2 := f[i] << 63
f[i] >>= 1
f[i] |= t
t = t2
}
}
// Mul2 multiplies the FQRepr by 2.
func (f *FQRepr) Mul2() {
last := uint64(0)
for i := 0; i < 6; i++ {
tmp := f[i] >> 63
f[i] <<= 1
f[i] |= last
last = tmp
}
}
// Lsh shifts the FQRepr left by a certain number of bits.
func (f *FQRepr) Lsh(n uint) {
if n >= 64*6 {
f[0] = 0
f[1] = 0
f[2] = 0
f[3] = 0
f[4] = 0
f[5] = 0
return
}
for n >= 64 {
t := uint64(0)
for i := 0; i < 6; i++ {
t, f[i] = f[i], t
}
n -= 64
}
if n > 0 {
t := uint64(0)
for i := 0; i < 6; i++ {
t2 := f[i] >> (64 - n)
f[i] <<= n
f[i] |= t
t = t2
}
}
}
// AddNoCarry adds two FQReprs to another and does not handle
// carry.
func (f *FQRepr) AddNoCarry(g FQRepr) {
*f = AddNoCarry(*f, g)
}
// SubNoBorrow subtracts two FQReprs from another and does not handle
// borrow.
func (f *FQRepr) SubNoBorrow(g FQRepr) {
*f = SubNoBorrow(*f, g)
}
// Equals checks if two FQRepr's are equal.
func (f *FQRepr) Equals(g FQRepr) bool {
return f[0] == g[0] && f[1] == g[1] && f[2] == g[2] && f[3] == g[3] && f[4] == g[4] && f[5] == g[5]
}
// Cmp compares two FQRepr's
func (f *FQRepr) Cmp(g FQRepr) int {
for i := 5; i >= 0; i-- {
if f[i] == g[i] {
continue
}
if f[i] > g[i] {
return 1
} else if f[i] < g[i] {
return -1
}
}
return 0
}
// Copy copies a FQRepr to a new instance and returns it.
func (f *FQRepr) Copy() FQRepr {
return *f
}
// ToString converts the FQRepr to a string.
func (f FQRepr) String() string {
return fmt.Sprintf("%016x%016x%016x%016x%016x%016x", f[5], f[4], f[3], f[2], f[1], f[0])
}
// BitLen counts the number of bits the number is.
func (f FQRepr) BitLen() uint {
ret := uint(6 * 64)
for i := 5; i >= 0; i-- {
leading := uint(bits.LeadingZeros64(f[i]))
ret -= leading
if leading != 64 {
break
}
}
return ret
}
// FQReprFromBytes gets a new FQRepr from big-endian bytes.
func FQReprFromBytes(b [48]byte) FQRepr {
m0 := binary.BigEndian.Uint64(b[0:8])
m1 := binary.BigEndian.Uint64(b[8:16])
m2 := binary.BigEndian.Uint64(b[16:24])
m3 := binary.BigEndian.Uint64(b[24:32])
m4 := binary.BigEndian.Uint64(b[32:40])
m5 := binary.BigEndian.Uint64(b[40:48])
return FQRepr{m5, m4, m3, m2, m1, m0}
}
// Bytes gets the bytes used for an FQRepr.
func (f FQRepr) Bytes() [48]byte {
var out [48]byte
binary.BigEndian.PutUint64(out[0:8], f[5])
binary.BigEndian.PutUint64(out[8:16], f[4])
binary.BigEndian.PutUint64(out[16:24], f[3])
binary.BigEndian.PutUint64(out[24:32], f[2])
binary.BigEndian.PutUint64(out[32:40], f[1])
binary.BigEndian.PutUint64(out[40:48], f[0])
return out
}
// Bit checks if a bit is set (little-endian)
func (f FQRepr) Bit(n uint) bool {
return f[n/64]&(1<<(n%64)) != 0
}
// FQReprFromString creates a FQRepr from a string.
func FQReprFromString(s string, b uint) (FQRepr, error) {
out, valid := new(big.Int).SetString(s, int(b))
if !valid {
return FQRepr{}, errors.New("FQRepr not valid")
}
return FQReprFromBigInt(out)
}
func fqReprFromHexUnchecked(s string) FQRepr {
out, _ := new(big.Int).SetString(s, 16)
return fqReprFromBigIntUnchecked(out)
}
func fqReprFromStringUnchecked(s string, b uint) FQRepr {
out, _ := new(big.Int).SetString(s, int(b))
return fqReprFromBigIntUnchecked(out)
}
// ToBig gets the big.Int representation of the FQRepr.
func (f FQRepr) ToBig() *big.Int {
out := big.NewInt(0)
for i := 5; i >= 0; i-- {
out.Add(out, new(big.Int).SetUint64(f[i]))
if i != 0 {
out.Lsh(out, 64)
}
}
return out
}
var bigIntZero = big.NewInt(0)
var oneLsh64MinusOne = new(big.Int).SetUint64(0xffffffffffffffff)
// FQReprFromBigInt create a FQRepr from a big.Int.
func FQReprFromBigInt(n *big.Int) (FQRepr, error) {
if n.BitLen() > 384 || n.Sign() == -1 {
return FQRepr{}, errors.New("invalid input string")
}
out := new(big.Int).Set(n)
newf := NewFQRepr(0)
i := 0
for out.Cmp(bigIntZero) != 0 {
o := new(big.Int).And(out, oneLsh64MinusOne)
newf[i] = o.Uint64()
i++
out.Rsh(out, 64)
}
return newf, nil
}
// FQReprFromBigInt create a FQRepr from a big.Int.
func fqReprFromBigIntUnchecked(n *big.Int) FQRepr {
out := new(big.Int).Set(n)
newf := NewFQRepr(0)
i := 0
for out.Cmp(bigIntZero) != 0 {
o := new(big.Int).And(out, oneLsh64MinusOne)
newf[i] = o.Uint64()
i++
out.Rsh(out, 64)
}
return newf
} | fqrepr.go | 0.596668 | 0.505066 | fqrepr.go | starcoder |
// Package remap handles tracking the locations of Go tokens in a source text
// across a rewrite by the Go formatter.
package remap
import (
"fmt"
"go/scanner"
"go/token"
)
// A Location represents a span of byte offsets in the source text.
type Location struct {
Pos, End int // End is exclusive
}
// A Map represents a mapping between token locations in an input source text
// and locations in the correspnding output text.
type Map map[Location]Location
// Find reports whether the specified span is recorded by m, and if so returns
// the new location it was mapped to. If the input span was not found, the
// returned location is the same as the input.
func (m Map) Find(pos, end int) (Location, bool) {
key := Location{
Pos: pos,
End: end,
}
if loc, ok := m[key]; ok {
return loc, true
}
return key, false
}
func (m Map) add(opos, oend, npos, nend int) {
m[Location{Pos: opos, End: oend}] = Location{Pos: npos, End: nend}
}
// Compute constructs a location mapping from input to output. An error is
// reported if any of the tokens of output cannot be mapped.
func Compute(input, output []byte) (Map, error) {
itok := tokenize(input)
otok := tokenize(output)
if len(itok) != len(otok) {
return nil, fmt.Errorf("wrong number of tokens, %d ≠ %d", len(itok), len(otok))
}
m := make(Map)
for i, ti := range itok {
to := otok[i]
if ti.Token != to.Token {
return nil, fmt.Errorf("token %d type mismatch: %s ≠ %s", i+1, ti, to)
}
m.add(ti.pos, ti.end, to.pos, to.end)
}
return m, nil
}
// tokinfo records the span and type of a source token.
type tokinfo struct {
pos, end int
token.Token
}
func tokenize(src []byte) []tokinfo {
fs := token.NewFileSet()
var s scanner.Scanner
s.Init(fs.AddFile("src", fs.Base(), len(src)), src, nil, scanner.ScanComments)
var info []tokinfo
for {
pos, next, lit := s.Scan()
switch next {
case token.SEMICOLON:
continue
}
info = append(info, tokinfo{
pos: int(pos - 1),
end: int(pos + token.Pos(len(lit)) - 1),
Token: next,
})
if next == token.EOF {
break
}
}
return info
} | vendor/github.com/golang/protobuf/protoc-gen-go/generator/internal/remap/remap.go | 0.743354 | 0.485295 | remap.go | starcoder |
package fq12over6over2
// Fq2FallBack ...
const Fq2FallBack = `
func addE2(z, x, y *e2) {
z.A0.Add(&x.A0, &y.A0)
z.A1.Add(&x.A1, &y.A1)
}
func subE2(z, x, y *e2) {
z.A0.Sub(&x.A0, &y.A0)
z.A1.Sub(&x.A1, &y.A1)
}
func doubleE2(z, x *e2) {
z.A0.Double(&x.A0)
z.A1.Double(&x.A1)
}
func negE2(z, x *e2) {
z.A0.Neg(&x.A0)
z.A1.Neg(&x.A1)
}
func squareAdxE2(z, x *e2) {
panic("not implemented")
}
func mulAdxE2(z, x, y *e2) {
panic("not implemented")
}
`
// Fq2Common ...
const Fq2Common = `
import (
"math/big"
"github.com/consensys/gurvy/{{toLower .CurveName}}/fp"
)
// e2 is a degree two finite field extension of fp.Element
type e2 struct {
A0, A1 fp.Element
}
// Equal returns true if z equals x, fasle otherwise
func (z *e2) Equal(x *e2) bool {
return z.A0.Equal(&x.A0) && z.A1.Equal(&x.A1)
}
// SetString sets a e2 element from strings
func (z *e2) SetString(s1, s2 string) *e2 {
z.A0.SetString(s1)
z.A1.SetString(s2)
return z
}
// SetZero sets an e2 elmt to zero
func (z *e2) SetZero() *e2 {
z.A0.SetZero()
z.A1.SetZero()
return z
}
// Set sets an e2 from x
func (z *e2) Set(x *e2) *e2 {
z.A0 = x.A0
z.A1 = x.A1
return z
}
// SetOne sets z to 1 in Montgomery form and returns z
func (z *e2) SetOne() *e2 {
z.A0.SetOne()
z.A1.SetZero()
return z
}
// SetRandom sets a0 and a1 to random values
func (z *e2) SetRandom() *e2 {
z.A0.SetRandom()
z.A1.SetRandom()
return z
}
// IsZero returns true if the two elements are equal, fasle otherwise
func (z *e2) IsZero() bool {
return z.A0.IsZero() && z.A1.IsZero()
}
// Add adds two elements of e2
func (z *e2) Add(x, y *e2) *e2 {
addE2(z, x, y)
return z
}
// Sub two elements of e2
func (z *e2) Sub(x, y *e2) *e2 {
subE2(z, x, y)
return z
}
// Double doubles an e2 element
func (z *e2) Double(x *e2) *e2 {
doubleE2(z, x)
return z
}
// Neg negates an e2 element
func (z *e2) Neg(x *e2) *e2 {
negE2(z, x)
return z
}
// String implements Stringer interface for fancy printing
func (z *e2) String() string {
return (z.A0.String() + "+" + z.A1.String() + "*u")
}
// ToMont converts to mont form
func (z *e2) ToMont() *e2 {
z.A0.ToMont()
z.A1.ToMont()
return z
}
// FromMont converts from mont form
func (z *e2) FromMont() *e2 {
z.A0.FromMont()
z.A1.FromMont()
return z
}
// MulByElement multiplies an element in e2 by an element in fp
func (z *e2) MulByElement(x *e2, y *fp.Element) *e2 {
var yCopy fp.Element
yCopy.Set(y)
z.A0.Mul(&x.A0, &yCopy)
z.A1.Mul(&x.A1, &yCopy)
return z
}
// Conjugate conjugates an element in e2
func (z *e2) Conjugate(x *e2) *e2 {
z.A0 = x.A0
z.A1.Neg(&x.A1)
return z
}
// Legendre returns the Legendre symbol of z
func (z *e2) Legendre() int {
var n fp.Element
z.norm(&n)
return n.Legendre()
}
// Exp sets z=x**e and returns it
func (z *e2) Exp(x *e2, e big.Int) *e2 {
var res e2
res.SetOne()
b := e.Bytes()
for i := range b {
w := b[i]
mask := byte(0x80)
for j := 7; j >= 0; j-- {
res.Square(&res)
if (w&mask)>>j != 0 {
res.Mul(&res, x)
}
mask = mask >> 1
}
}
z.Set(&res)
return z
}
{{if eq .PMod4 3 }}
// Sqrt sets z to the square root of and returns z
// The function does not test wether the square root
// exists or not, it's up to the caller to call
// Legendre beforehand.
// cf https://eprint.iacr.org/2012/685.pdf (algo 9)
func (z *e2) Sqrt(x *e2) *e2 {
var a1, alpha, b, x0, minusone e2
var e big.Int
minusone.SetOne().Neg(&minusone)
q := fp.Modulus()
tmp := big.NewInt(3)
e.Set(q).Sub(&e, tmp).Rsh(&e, 2)
a1.Exp(x, e)
alpha.Square(&a1).
Mul(&alpha, x)
x0.Mul(x, &a1)
if alpha.Equal(&minusone) {
var c fp.Element
c.Set(&x0.A0)
z.A0.Neg(&x0.A1)
z.A1.Set(&c)
return z
}
a1.SetOne()
b.Add(&a1, &alpha)
tmp.SetUint64(1)
e.Set(q).Sub(&e, tmp).Rsh(&e, 1)
b.Exp(&b, e).Mul(&x0, &b)
z.Set(&b)
return z
}
{{else }}
// Sqrt sets z to the square root of and returns z
// The function does not test wether the square root
// exists or not, it's up to the caller to call
// Legendre beforehand.
// cf https://eprint.iacr.org/2012/685.pdf (algo 10)
func (z *e2) Sqrt(x *e2) *e2 {
// precomputation
var b, c, d, e, f, x0 e2
var _b, o fp.Element
c.SetOne()
for c.Legendre() == 1 {
c.SetRandom()
}
q := fp.Modulus()
var exp, one big.Int
one.SetUint64(1)
exp.Set(q).Sub(&exp, &one).Rsh(&exp, 1)
d.Exp(&c, exp)
e.Mul(&d, &c).Inverse(&e)
f.Mul(&d, &c).Square(&f)
// computation
exp.Rsh(&exp, 1)
b.Exp(x, exp)
b.norm(&_b)
o.SetOne()
if _b.Equal(&o) {
x0.Square(&b).Mul(&x0, x)
_b.Set(&x0.A0).Sqrt(&_b)
z.Conjugate(&b).MulByElement(z, &_b)
return z
}
x0.Square(&b).Mul(&x0, x).Mul(&x0, &f)
_b.Set(&x0.A0).Sqrt(&_b)
z.Conjugate(&b).MulByElement(z, &_b).Mul(z, &e)
return z
}
{{end}}
`
const Fq2Amd64 = `
import "golang.org/x/sys/cpu"
// supportAdx will be set only on amd64 that has MULX and ADDX instructions
var (
supportAdx = cpu.X86.HasADX && cpu.X86.HasBMI2
_ = supportAdx // used in asm
)
// q (modulus)
var qe2 = [{{.NbWords}}]uint64{
{{- range $i := .NbWordsIndexesFull}}
{{index $.Q $i}},{{end}}
}
// q'[0], see montgommery multiplication algorithm
var (
qe2Inv0 uint64 = {{index $.QInverse 0}}
_ = qe2Inv0 // used in asm
)
//go:noescape
func addE2(res,x,y *e2)
//go:noescape
func subE2(res,x,y *e2)
//go:noescape
func doubleE2(res,x *e2)
//go:noescape
func negE2(res,x *e2)
{{if eq .CurveName "bn256"}}
//go:noescape
func mulNonResE2(res, x *e2)
//go:noescape
func squareAdxE2(res, x *e2)
//go:noescape
func mulAdxE2(res, x, y *e2)
// MulByNonResidue multiplies a e2 by (9,1)
func (z *e2) MulByNonResidue(x *e2) *e2 {
mulNonResE2(z, x)
return z
}
// Mul sets z to the e2-product of x,y, returns z
func (z *e2) Mul(x, y *e2) *e2 {
mulAdxE2(z, x, y)
return z
}
// Square sets z to the e2-product of x,x, returns z
func (z *e2) Square(x *e2) *e2 {
squareAdxE2(z, x)
return z
}
{{end}}
` | internal/templates/fq12over6over2/fq2.go | 0.769817 | 0.583856 | fq2.go | starcoder |
package cmd
import (
"fmt"
"strconv"
"strings"
"github.com/spf13/cobra"
)
var queryFPRCmd = &cobra.Command{
Use: "query-fpr",
Short: "Compute the maximal false positive rate of a query",
Long: `Compute the maximal false positive rate of a query
Solomon and Kingsford apply a Chernoff bound and show that the
false positive probability for a query is:
fpr ≤ exp( -n(t-f)^2 / (2(1-f)) )
Where:
f, the false positive rate of the bloom filters
t, the minimal proportion of matched k-mers and unique k-mers of a query
n, the number of unique k-mers of the query
Reference:
1. SBT: https://doi.org/10.1038/nbt.3442
2. COBS: https://arxiv.org/abs/1905.09624v2
`,
Run: func(cmd *cobra.Command, args []string) {
opt := getOptions(cmd)
var err error
fpr := getFlagPositiveFloat64(cmd, "false-positive-rate")
if fpr >= 1 {
checkError(fmt.Errorf("value of -f/--false-positive-rate too big: %f", fpr))
}
queryCov := getFlagFloat64(cmd, "min-query-cov")
if queryCov < 0 || queryCov > 1 {
checkError(fmt.Errorf("value of -t/--min-query-cov should be in range [0, 1]"))
}
nKmers := getFlagPositiveInt(cmd, "num-kmers")
outFile := getFlagString(cmd, "out-prefix")
outfh, gw, w, err := outStream(outFile, strings.HasSuffix(strings.ToLower(outFile), ".gz"), opt.CompressionLevel)
checkError(err)
defer func() {
outfh.Flush()
if gw != nil {
gw.Close()
}
w.Close()
}()
/*
p, fpr of single bloom filter.
k, theshold of query coverage.
l, number of k-mers.
import math
fpr = lambda p,k,l: math.exp(-l * (k - p) * (k - p) / 2 / (1 - p))
fpr(0.3, 0.8, 60)
*/
fmt.Fprintf(outfh, "%s\n", strconv.FormatFloat(maxFPR(fpr, queryCov, nKmers), 'e', 4, 64))
},
}
func init() {
utilsCmd.AddCommand(queryFPRCmd)
queryFPRCmd.Flags().StringP("out-prefix", "o", "-", formatFlagUsage(`Out file prefix ("-" for stdout).`))
queryFPRCmd.Flags().Float64P("false-positive-rate", "f", 0.3,
formatFlagUsage(`False positive rate of the bloom filters in the database. range: (0, 1)`))
queryFPRCmd.Flags().Float64P("min-query-cov", "t", 0.55,
formatFlagUsage(`Minimal query coverage, i.e., proportion of matched k-mers and unique k-mers of a query. range: [0, 1]`))
queryFPRCmd.Flags().IntP("num-kmers", "n", 80, formatFlagUsage("Number of unique k-mers of the query."))
} | kmcp/cmd/query-fpr.go | 0.675122 | 0.403273 | query-fpr.go | starcoder |
package vfilter
import (
"image"
"sync"
"github.com/emer/etable/etensor"
"github.com/emer/vision/nproc"
)
// Conv performs convolution of filter over img into out.
// img *must* have border (padding) so that filters are
// applied without any bounds checking -- wrapping etc is all
// done in the padding process, which is much more efficient.
// Computation is parallel in number of different filter types
// (outer dim of flt) as that will be most memory efficient.
// img must be a 2D tensor of image values (convert RGB to grey first).
// Everything must be organized row major as etensor default.
// Out shape dims are: Y, X, Polarity (2), Angle
// where the 2 polarities (on, off) are for positive and and
// negative filter values, respectively.
func Conv(geom *Geom, flt *etensor.Float32, img, out *etensor.Float32, gain float32) {
nf := flt.Dim(0)
fy := flt.Dim(1)
fx := flt.Dim(2)
geom.FiltSz = image.Point{fx, fy}
geom.UpdtFilt()
imgSz := image.Point{img.Dim(1), img.Dim(0)}
geom.SetSize(imgSz)
oshp := []int{int(geom.Out.Y), int(geom.Out.X), 2, nf}
if !etensor.EqualInts(oshp, out.Shp) {
out.SetShape(oshp, nil, []string{"Y", "X", "Polarity", "Angle"})
}
ncpu := nproc.NumCPU()
nthrs, nper, rmdr := nproc.ThreadNs(ncpu, nf)
var wg sync.WaitGroup
for th := 0; th < nthrs; th++ {
wg.Add(1)
f := th * nper
go convThr(&wg, geom, f, nper, flt, img, out, gain)
}
if rmdr > 0 {
wg.Add(1)
f := nthrs * nper
go convThr(&wg, geom, f, rmdr, flt, img, out, gain)
}
wg.Wait()
}
// convThr is per-thread implementation
func convThr(wg *sync.WaitGroup, geom *Geom, fno, nf int, flt *etensor.Float32, img, out *etensor.Float32, gain float32) {
ist := geom.Border.Sub(geom.FiltLt)
fsz := int(geom.FiltSz.Y) * int(geom.FiltSz.X)
for fi := 0; fi < nf; fi++ {
f := fno + fi
fst := f * fsz
for y := 0; y < geom.Out.Y; y++ {
iy := int(ist.Y + y*geom.Spacing.Y)
for x := 0; x < geom.Out.X; x++ {
ix := ist.X + x*geom.Spacing.X
sum := float32(0)
fi := 0
for fy := 0; fy < geom.FiltSz.Y; fy++ {
for fx := 0; fx < geom.FiltSz.X; fx++ {
iv := img.Value([]int{iy + fy, ix + fx})
fv := flt.Values[fst+fi]
sum += iv * fv
fi++
}
}
sum *= gain
if sum > 0 {
out.Set([]int{y, x, 0, f}, sum)
out.Set([]int{y, x, 1, f}, float32(0))
} else {
out.Set([]int{y, x, 0, f}, float32(0))
out.Set([]int{y, x, 1, f}, -sum)
}
}
}
}
wg.Done()
} | vfilter/conv.go | 0.592902 | 0.426799 | conv.go | starcoder |
package models
import (
"math"
"time"
)
// Position contains positional data of a vehicle
type Position struct {
Lat float64 `json:"lat"` // Latitude in degrees * 1E7
Lon float64 `json:"lon"` // Longitude in degrees * 1E7
Alt float64 `json:"alt"` // Altitude in millimeters
RelAlt float64 `json:"relAlt"` // Altitude above ground in millimeters
Vx float64 `json:"vx"` // Ground X Speed (Latitude, positive north) in m/s * 100
Vy float64 `json:"vy"` // Ground Y Speed (Longitude, positive east) in m/s * 100
Vz float64 `json:"vz"` // Ground Z Speed (Altitude, positive down) in m/s * 100
Hdg float64 `json:"hdg"` // Vehicle heading (yaw angle) in degrees * 100. If unknown, set to: UINT16_MAX
Timestamp time.Time `json:"timestamp"`
}
// NewPosition creates and returns a new position model
func NewPosition(lat, lon, alt, relAlt, vx, vy, vz, hdg float64) Position {
return Position{
lat,
lon,
alt,
relAlt,
vx,
vy,
vz,
hdg,
time.Now(),
}
}
// NewPoint creates a position with only lat, lon and relAlt values
func NewPoint(lat, lon, relAlt float64) Position {
return Position{Lat: lat, Lon: lon, RelAlt: relAlt, Timestamp: time.Now()}
}
// LatInt returns the position's latitude value as an int32 in deg * 1e7
func (pos Position) LatInt() int32 {
return int32(pos.Lat * 1e7)
}
// SetLat sets the position's latitude value in deg
func (pos *Position) SetLat(val float64) {
pos.Lat = val
}
// LonInt returns the position's latitude value as an int32 in deg * 1e7
func (pos Position) LonInt() int32 {
return int32(pos.Lon * 1e7)
}
// SetLon sets the position's longitude value in deg
func (pos *Position) SetLon(val float64) {
pos.Lon = val
}
// SetAlt sets the position's altitude value in m
func (pos *Position) SetAlt(val float64) {
pos.Alt = val
}
// SetRelAlt sets the position's relative altitude to the ground in m
func (pos *Position) SetRelAlt(val float64) {
pos.RelAlt = val
}
// SetVx sets the position's latitudal speed in m/s
func (pos *Position) SetVx(val float64) {
pos.Vx = val
}
// SetVy sets the position's longitudal speed in m/s
func (pos *Position) SetVy(val float64) {
pos.Vy = val
}
// SetVz sets the position's vertical speed in m/s
func (pos *Position) SetVz(val float64) {
pos.Vz = val
}
// SetHdg sets the position's heading value in deg
func (pos *Position) SetHdg(val float64) {
pos.Hdg = val
}
// Translate returns a point translated by x, y, z meters
func (pos Position) Translate(x float64, y float64, z float64) Position {
out := pos
if x != 0 || y != 0 {
rEarth := float64(6371000)
rad := math.Pi / 180
deg := 180 / math.Pi
out.Lat = pos.Lat + ((y / rEarth) * deg)
latRad := out.Lat * rad
out.Lon = pos.Lon + ((x / rEarth) * deg / math.Cos(latRad))
}
if z != 0 {
out.Alt = pos.Alt + z
out.RelAlt = pos.RelAlt + z
}
return out
}
// Diff calculates the diff between two positions
func (pos Position) Diff(p2 Position) (float64, float64, float64) {
dx := p2.Lon - pos.Lon
dy := p2.Lat - pos.Lat
dz := p2.Alt - pos.Alt
rEarth := float64(6371000)
rad := math.Pi / 180
//deg := 180 / math.Pi
var x, y float64
if dx != 0 {
y = dy * rad * rEarth
}
if dy != 0 {
latRad := pos.Lat * rad
x = dx * math.Cos(latRad) * rad * rEarth
}
return x, y, dz
}
// Distance calculates the distance between two positions
func (pos Position) Distance(p2 Position) float64 {
x, y, z := pos.Diff(p2)
return math.Sqrt(math.Sqrt(x*x+y*y) + z*z)
} | models/position.go | 0.873552 | 0.633793 | position.go | starcoder |
package bow
import (
"github.com/apache/arrow/go/arrow"
)
type Type int
// How to add a Type:
// - Seek corresponding arrow.DataType and add it in `mapArrowToBowTypes`
// - add a convert function with desired logic and add case in other conversion func
// - add necessary case in buffer file
// - complete GetValue bow method
const (
// Unknown is placed first to be by default
// when allocating Type or []Type
Unknown = Type(iota)
// Float64 and following types are native arrow type supported by bow
Float64
Int64
Boolean
String
// InputDependent is used in transformation like aggregation
// when output type is infer with input type
InputDependent
// IteratorDependent is used in transformation like aggregation
// when output type is infer with iteratorType
IteratorDependent
)
var (
mapArrowToBowTypes = map[arrow.DataType]Type{
arrow.PrimitiveTypes.Float64: Float64,
arrow.PrimitiveTypes.Int64: Int64,
arrow.FixedWidthTypes.Boolean: Boolean,
arrow.BinaryTypes.String: String,
}
mapBowToArrowTypes = func() map[Type]arrow.DataType {
res := make(map[Type]arrow.DataType)
for arrowDataType, bowType := range mapArrowToBowTypes {
res[bowType] = arrowDataType
}
return res
}()
mapArrowNameToBowTypes = func() map[string]Type {
res := make(map[string]Type)
for arrowDataType, bowType := range mapArrowToBowTypes {
res[arrowDataType.Name()] = bowType
}
return res
}()
allType = func() []Type {
res := make([]Type, InputDependent-1)
for typ := Type(1); typ < InputDependent; typ++ {
res[typ-1] = typ
}
return res
}()
)
func (t Type) ArrowType() arrow.DataType {
return mapBowToArrowTypes[t]
}
func (t Type) Convert(i interface{}) interface{} {
var val interface{}
var ok bool
switch t {
case Float64:
val, ok = ToFloat64(i)
case Int64:
val, ok = ToInt64(i)
case Boolean:
val, ok = ToBoolean(i)
case String:
val, ok = ToString(i)
}
if ok {
return val
}
return nil
}
// IsSupported ensures that the type is currently supported by Bow
// and match a convertible concrete type.
func (t Type) IsSupported() bool {
_, ok := mapBowToArrowTypes[t]
return ok
}
func (t Type) String() string {
at, ok := mapBowToArrowTypes[t]
if !ok {
return "undefined"
}
return at.Name()
}
func getBowTypeFromArrowName(arrowName string) Type {
typ, ok := mapArrowNameToBowTypes[arrowName]
if !ok {
return Unknown
}
return typ
}
func getBowTypeFromArrowType(arrowType arrow.DataType) Type {
typ, ok := mapArrowToBowTypes[arrowType]
if !ok {
return Unknown
}
return typ
}
func GetAllTypes() []Type {
res := make([]Type, len(allType))
copy(res, allType)
return res
} | bowtypes.go | 0.671686 | 0.434821 | bowtypes.go | starcoder |
// SPDX-License-Identifier: MIT
// see https://spdx.org/licenses/
package tapfile
import (
"math"
)
/*
TAP file format definitions.
The TAP file contains an arbitrary numbery of data. The data is organized into a data block preceded
by a header of an appropriate type. Each header and data blocks are preceeded by a Block_Length block.
Example for a BASIC program:
Block_Length
Block_BASICHeader
Block_Length
Block_Data (containing the BASIC program)
Example for a machine code program:
Block_Length
Block_ByteHeader
Block_Length
Block_Data (containing the machine code, e.g. the contents of a .bin-file)
*/
const tapBASICHeader uint8 = 0
const tapNumArrayHeader uint8 = 1
const tapStringArrayHeader uint8 = 2
const tapBytesHeader uint8 = 3
const tapHeaderBlock uint8 = 0
const tapDataBlock uint8 = 0xff
const tapBlockMaxLength uint16 = math.MaxUint16 - 2
const tapUnused uint16 = 32768
// length block containing the length of the following block
type Block_Length struct {
length uint16 // length of the following data block, not counting this block
}
// program header or program autostart header - for storing BASIC programs
type Block_BASICHeader struct {
flag uint8 // always 0. Byte indicating a standard ROM loading header
datatype uint8 // always 0: Byte indicating a program header
filename [10]byte // loading name of the program. filled with spaces (CHR$(32))
datalength uint16 // length of the following data (after the header) = length of BASIC program + variables
autostartline uint16 // LINE parameter of SAVE command. Value 32768 means "no auto-loading"; 0..9999 are valid line numbers
programlength uint16 // length of BASIC program; remaining bytes ([data length] - [program length]) = offset of variables
checksum uint8 // simply all bytes (including flag byte) XORed
}
// numeric data array header - for storing numeric arrays
type Block_NumArrayHeader struct {
flag uint8 // always 0. Byte indicating a standard ROM loading header
datatype uint8 // always 1: Byte indicating a numeric array
filename [10]byte // loading name of the program. filled with spaces (CHR$(32))
datalength uint16 // length of the following data (after the header) = length of number array * 5 +3
unused1 uint8
variablename uint8 // (1..26 meaning A..Z) +128
unused2 uint16 // = 32768
checksum uint8 // simply all bytes (including flag byte) XORed
}
type Block_StringArrayHeader struct {
flag uint8 // always 0. Byte indicating a standard ROM loading header
datatype uint8 // always 2: Byte indicating an alphanumeric array
filename [10]byte // loading name of the program. filled with spaces (CHR$(32))
datalength uint16 // length of the following data (after the header) = length of string array +3
unused1 uint8
variablename uint8 // (1..26 meaning A..Z) +192
unused2 uint16 // = 32768
checksum uint8 // simply all bytes (including flag byte) XORed
}
type Block_BytesHeader struct {
flag uint8 // always 0. Byte indicating a standard ROM loading header
datatype uint8 // always 3: Byte indicating a bytes header
filename [10]byte // loading name of the program. filled with spaces (CHR$(32))
datalength uint16 // length of the following data (after the header), in case of a SCREEN$ header = 6912
startaddress uint16 // start address of the code in the Z80 address space, in case of a SCREEN$ header = 16384
unused uint16 // = 32768
checksum uint8 // simply all bytes (including flag byte) XORed
}
type Block_Data struct {
flag uint8 // always 255 indicating a standard ROM loading data block or any other value to build a custom data block
datablock []byte // the essential data (may be empty)
checksum uint8 // simply all bytes (including flag byte) XORed
}
type TAP_BIN_File struct {
headerlength Block_Length
header Block_BytesHeader
bindatalength Block_Length
bindata Block_Data
} | tapfile/tapfile.go | 0.584153 | 0.477189 | tapfile.go | starcoder |
package rui
// ResizeEvent is the constant for "resize-event" property tag.
// The "resize-event" is fired when the view changes its size.
// The main listener format:
// func(View, Frame).
// The additional listener formats:
// func(Frame), func(View), and func().
const ResizeEvent = "resize-event"
func (view *viewData) onResize(self View, x, y, width, height float64) {
view.frame.Left = x
view.frame.Top = y
view.frame.Width = width
view.frame.Height = height
for _, listener := range GetResizeListeners(view, "") {
listener(self, view.frame)
}
}
func (view *viewData) onItemResize(self View, index int, x, y, width, height float64) {
}
func (view *viewData) setFrameListener(tag string, value interface{}) bool {
if value == nil {
delete(view.properties, tag)
return true
}
switch value := value.(type) {
case func(View, Frame):
view.properties[tag] = []func(View, Frame){value}
case []func(View, Frame):
if len(value) > 0 {
view.properties[tag] = value
} else {
delete(view.properties, tag)
return true
}
case func(Frame):
fn := func(view View, frame Frame) {
value(frame)
}
view.properties[tag] = []func(View, Frame){fn}
case []func(Frame):
count := len(value)
if count == 0 {
delete(view.properties, tag)
return true
}
listeners := make([]func(View, Frame), count)
for i, val := range value {
if val == nil {
notCompatibleType(tag, val)
return false
}
listeners[i] = func(view View, frame Frame) {
val(frame)
}
}
view.properties[tag] = listeners
case func(View):
fn := func(view View, frame Frame) {
value(view)
}
view.properties[tag] = []func(View, Frame){fn}
case []func(View):
count := len(value)
if count == 0 {
delete(view.properties, tag)
return true
}
listeners := make([]func(View, Frame), count)
for i, val := range value {
if val == nil {
notCompatibleType(tag, val)
return false
}
listeners[i] = func(view View, frame Frame) {
val(view)
}
}
view.properties[tag] = listeners
case func():
fn := func(view View, frame Frame) {
value()
}
view.properties[tag] = []func(View, Frame){fn}
case []func():
count := len(value)
if count == 0 {
delete(view.properties, tag)
return true
}
listeners := make([]func(View, Frame), count)
for i, val := range value {
if val == nil {
notCompatibleType(tag, val)
return false
}
listeners[i] = func(view View, frame Frame) {
val()
}
}
view.properties[tag] = listeners
case []interface{}:
count := len(value)
if count == 0 {
delete(view.properties, tag)
return true
}
listeners := make([]func(View, Frame), count)
for i, val := range value {
if val == nil {
notCompatibleType(tag, val)
return false
}
switch val := val.(type) {
case func(View, Frame):
listeners[i] = val
case func(Frame):
listeners[i] = func(view View, frame Frame) {
val(frame)
}
case func(View):
listeners[i] = func(view View, frame Frame) {
val(view)
}
case func():
listeners[i] = func(view View, frame Frame) {
val()
}
default:
notCompatibleType(tag, val)
return false
}
}
view.properties[tag] = listeners
default:
notCompatibleType(tag, value)
return false
}
return true
}
func (view *viewData) setNoResizeEvent() {
view.noResizeEvent = true
}
func (view *viewData) isNoResizeEvent() bool {
return view.noResizeEvent
}
func (container *viewsContainerData) isNoResizeEvent() bool {
if container.noResizeEvent {
return true
}
if parent := container.Parent(); parent != nil {
return parent.isNoResizeEvent()
}
return false
}
func (view *viewData) Frame() Frame {
return view.frame
}
// GetViewFrame returns the size and location of view's viewport.
// If the second argument (subviewID) is "" then the value of the first argument (view) is returned
func GetViewFrame(view View, subviewID string) Frame {
if subviewID != "" {
view = ViewByID(view, subviewID)
}
if view == nil {
return Frame{}
}
return view.Frame()
}
// GetResizeListeners returns the list of "resize-event" listeners. If there are no listeners then the empty list is returned
// If the second argument (subviewID) is "" then the listeners list of the first argument (view) is returned
func GetResizeListeners(view View, subviewID string) []func(View, Frame) {
if subviewID != "" {
view = ViewByID(view, subviewID)
}
if view != nil {
if value := view.Get(ResizeEvent); value != nil {
if result, ok := value.([]func(View, Frame)); ok {
return result
}
}
}
return []func(View, Frame){}
} | resizeEvent.go | 0.639398 | 0.448245 | resizeEvent.go | starcoder |
package server
import (
"errors"
"image"
)
const maxInt = int(^uint(0) >> 1)
type MaxRectsBin struct {
Width, Height int
Padding int
usedRectangles []image.Rectangle
freeRectangles []image.Rectangle
}
func NewBin(width, height, padding int) *MaxRectsBin {
return &MaxRectsBin{
Width: width,
Height: height,
Padding: padding,
usedRectangles: make([]image.Rectangle, 0, 1),
freeRectangles: []image.Rectangle{image.Rect(0, 0, width, height)},
}
}
func (this *MaxRectsBin) Insert(rect image.Rectangle) (image.Rectangle, error) {
r, _, _ := this.FindPositionForNewNodeBestShortSideFit(rect.Dx()+this.Padding, rect.Dy()+this.Padding)
if r.Dx() == 0 {
return r, errors.New("Not enough space in atlas.")
}
this.placeRect(r)
r.Max.X -= this.Padding
r.Max.Y -= this.Padding
return r, nil
}
func (this *MaxRectsBin) InsertArray(rects []image.Rectangle) ([]image.Rectangle, error) {
r := make([]image.Rectangle, len(rects))
numRects := len(rects)
for numRects != 0 {
bestScore1 := maxInt
bestScore2 := maxInt
bestRectIndex := -1
var bestNode image.Rectangle
for i, rect := range rects {
if r[i] != image.ZR {
continue
}
newNode, score1, score2 := this.FindPositionForNewNodeBestShortSideFit(rect.Dx()+this.Padding, rect.Dy()+this.Padding)
if score1 < bestScore1 || (score1 == bestScore1 && score2 < bestScore2) {
bestScore1 = score1
bestScore2 = score2
bestNode = newNode
bestRectIndex = i
}
}
if bestRectIndex == -1 {
return nil, errors.New("Not enough space in atlas.")
} else {
this.placeRect(bestNode)
bestNode.Max.X -= this.Padding
bestNode.Max.Y -= this.Padding
r[bestRectIndex] = bestNode
numRects--
}
}
return r, nil
}
func (this *MaxRectsBin) placeRect(r image.Rectangle) {
l := len(this.freeRectangles)
for i := 0; i < l; i++ {
if this.SplitFreeNode(this.freeRectangles[i], r) {
this.freeRectangles = append(this.freeRectangles[:i], this.freeRectangles[i+1:]...)
i--
l--
}
}
this.PruneFreeList()
this.usedRectangles = append(this.usedRectangles, r)
}
/// Computes the ratio of used surface area.
func (this *MaxRectsBin) Occupancy() float32 {
usedSurfaceArea := uint64(0)
for _, rect := range this.usedRectangles {
usedSurfaceArea += uint64(rect.Dx()) * uint64(rect.Dy())
}
return float32(float64(usedSurfaceArea) / float64(this.Width*this.Height))
}
func (this *MaxRectsBin) PruneFreeList() {
/*
/// Would be nice to do something like this, to avoid a Theta(n^2) loop through each pair.
/// But unfortunately it doesn't quite cut it, since we also want to detect containment.
/// Perhaps there's another way to do this faster than Theta(n^2).
if (freeRectangles.size() > 0)
clb::sort::QuickSort(&freeRectangles[0], freeRectangles.size(), NodeSortCmp);
for(size_t i = 0; i < freeRectangles.size()-1; ++i)
if (freeRectangles[i].x == freeRectangles[i+1].x &&
freeRectangles[i].y == freeRectangles[i+1].y &&
freeRectangles[i].width == freeRectangles[i+1].width &&
freeRectangles[i].height == freeRectangles[i+1].height)
{
freeRectangles.erase(freeRectangles.begin() + i);
--i;
}
*/
/// Go through each pair and remove any rectangle that is redundant.
for i := 0; i < len(this.freeRectangles); i++ {
for j := i + 1; j < len(this.freeRectangles); j++ {
if this.freeRectangles[i].In(this.freeRectangles[j]) {
this.freeRectangles = append(this.freeRectangles[:i], this.freeRectangles[i+1:]...)
i--
break
}
if this.freeRectangles[j].In(this.freeRectangles[i]) {
this.freeRectangles = append(this.freeRectangles[:j], this.freeRectangles[j+1:]...)
j--
}
}
}
}
func (this *MaxRectsBin) SplitFreeNode(freeNode, usedNode image.Rectangle) bool {
// Test with SAT if the rectangles even intersect.
if usedNode.Min.X >= freeNode.Max.X || usedNode.Max.X <= freeNode.Min.X ||
usedNode.Min.Y >= freeNode.Max.Y || usedNode.Max.Y <= freeNode.Min.Y {
return false
}
if usedNode.Min.X < freeNode.Max.X && usedNode.Max.X > freeNode.Min.X {
// New node at the top side of the used node.
if usedNode.Min.Y > freeNode.Min.Y && usedNode.Min.Y < freeNode.Max.Y {
newNode := freeNode
newNode.Max.Y = usedNode.Min.Y
this.freeRectangles = append(this.freeRectangles, newNode)
}
// New node at the bottom side of the used node.
if usedNode.Max.Y < freeNode.Max.Y {
newNode := freeNode
newNode.Min.Y = usedNode.Max.Y
this.freeRectangles = append(this.freeRectangles, newNode)
}
}
if usedNode.Min.Y < freeNode.Max.Y && usedNode.Max.Y > freeNode.Min.Y {
// New node at the left side of the used node.
if usedNode.Min.X > freeNode.Min.X && usedNode.Min.X < freeNode.Max.X {
newNode := freeNode
newNode.Max.X = usedNode.Min.X
this.freeRectangles = append(this.freeRectangles, newNode)
}
// New node at the right side of the used node.
if usedNode.Max.X < freeNode.Max.X {
newNode := freeNode
newNode.Min.X = usedNode.Max.X
this.freeRectangles = append(this.freeRectangles, newNode)
}
}
return true
}
func abs(x int) int {
if x < 0 {
return -x
}
return x
}
func min(x, y int) int {
if x > y {
return y
}
return x
}
func max(x, y int) int {
if x > y {
return x
}
return y
}
func (this *MaxRectsBin) String() string {
d := make([]rune, (this.Width+1)*this.Height)
for x := 0; x < this.Width; x++ {
for y := 0; y < this.Height; y++ {
d[y*(this.Width+1)+x] = '0'
}
}
for y := 0; y < this.Height; y++ {
d[(y*(this.Width+1))+this.Width] = '\n'
}
for _, rect := range this.usedRectangles {
for x := rect.Min.X; x < rect.Max.X; x++ {
for y := rect.Min.Y; y < rect.Max.Y; y++ {
d[y*(this.Width+1)+x] = '#'
}
}
}
return string(d)
}
func (this *MaxRectsBin) FindPositionForNewNodeBestShortSideFit(width, height int) (bestNode image.Rectangle, bestShortSideFit, bestLongSideFit int) {
bestShortSideFit = maxInt
for _, r := range this.freeRectangles {
rW := r.Dx()
rH := r.Dy()
// Try to place the rectangle in upright (non-flipped) orientation.
if rW >= width && rH >= height {
leftoverHoriz := abs(rW - width)
leftoverVert := abs(rH - height)
shortSideFit := min(leftoverHoriz, leftoverVert)
longSideFit := max(leftoverHoriz, leftoverVert)
if shortSideFit < bestShortSideFit || (shortSideFit == bestShortSideFit && longSideFit < bestLongSideFit) {
bestNode.Min = r.Min
bestNode.Max.X = bestNode.Min.X + width
bestNode.Max.Y = bestNode.Min.Y + height
bestShortSideFit = shortSideFit
bestLongSideFit = longSideFit
}
}
/* Disable rotation
if (rW >= height && rH >= width)
{
flippedLeftoverHoriz := abs(rW - height);
flippedLeftoverVert := abs(rH - width);
flippedShortSideFit := min(flippedLeftoverHoriz, flippedLeftoverVert);
flippedLongSideFit := max(flippedLeftoverHoriz, flippedLeftoverVert);
if (flippedShortSideFit < bestShortSideFit || (flippedShortSideFit == bestShortSideFit && flippedLongSideFit < bestLongSideFit)) {
bestNode.x = freeRectangles[i].x;
bestNode.y = freeRectangles[i].y;
bestNode.width = height;
bestNode.height = width;
bestShortSideFit = flippedShortSideFit;
bestLongSideFit = flippedLongSideFit;
}
}
*/
}
return
} | server/MaxRectsBin.go | 0.718397 | 0.70797 | MaxRectsBin.go | starcoder |
package stats
import (
"fmt"
"golina/matrix"
"math"
)
// Independent Component Analysis
// https://en.wikipedia.org/wiki/Independent_component_analysis
// The following explanation are from wiki.
// Two assumptions:
// 1. The source signals are independent of each other.
// 2. The values in each source signal have non-Gaussian distributions.
// Three effects of mixing source signals:
// 1. Independence: As per assumption 1, the source signals are independent; however, their signal mixtures are not.
// This is because the signal mixtures share the same source signals.
// 2. Normality: According to the Central Limit Theorem, the distribution of a sum of independent random variables
// with finite variance tends towards a Gaussian distribution. Loosely speaking, a sum of two independent random
// variables usually has a distribution that is closer to Gaussian than any of the two original variables.
// Here we consider the value of each signal as the random variable.
// 3. Complexity: The temporal complexity of any signal mixture is greater than that of its simplest constituent
// source signal.
// FastICA (https://en.wikipedia.org/wiki/FastICA)
func FastICA(C int, tol float64, maxIter int, whitening bool, nonLinearFunc func(w *matrix.Vector, X *matrix.Matrix) (wp *matrix.Vector), dataSet *matrix.Matrix) (W, S, K, X *matrix.Matrix) {
dataSet = dataSet.T() // M x N -> N x M
N, _ := dataSet.Dims()
if C > N || C < 0 {
panic("independent components should be less or equal to observations")
}
if whitening {
// X: C x M, K: C x N
X, K = PreWhitening(C, dataSet)
// W: C x C
W = CalW(C, tol, maxIter, nonLinearFunc, X)
// S: M x C
S = W.Mul(K).Mul(dataSet).T()
} else {
// W: C x N
W = CalW(C, tol, maxIter, nonLinearFunc, dataSet)
// S: M x C
S = W.Mul(dataSet).T()
K = nil
X = nil
}
return
}
// Pre-whitening the data
func PreWhitening(C int, dataSet *matrix.Matrix) (X, K *matrix.Matrix) {
// step 1: centering
N, M := dataSet.Dims()
data := dataSet.Sub(dataSet.Mean(1).Tile(1, M))
// step 2: whitening
_, D, V := matrix.SVD(data.T())
K = matrix.ZeroMatrix(C, N)
for i := range K.Data {
for j := range K.Data[i] {
K.Data[i][j] = V.Data[i][j] / D.Data[j][j]
}
}
X = K.Mul(data).MulNum(math.Sqrt(float64(M)))
return
}
func CalW(C int, tol float64, maxIter int, nonLinearFunc func(w *matrix.Vector, X *matrix.Matrix) (wp *matrix.Vector), dataSet *matrix.Matrix) *matrix.Matrix {
// dataSet: N x M
N, _ := dataSet.Dims()
// w: 1 x N
// W: C x N
W := matrix.GenerateRandomMatrix(C, N)
iter := make([]int, C)
for i := 0; i < C; i++ {
cnt := 0
w := W.Row(i)
w = w.MulNum(1. / w.Norm())
wp := w
for {
wp = nonLinearFunc(w, dataSet)
s := make(matrix.Vector, N)
for j := 1; j < i-1; j++ {
s.Add(W.Row(j).MulNum(wp.Dot(W.Row(j))))
}
wp = wp.Sub(&s)
wp = wp.MulNum(1. / wp.Norm())
lim := math.Abs(math.Abs(wp.Dot(w)) - 1)
w = wp
cnt++
if lim < tol || cnt >= maxIter {
iter[i] = cnt
break
}
}
W.Data[i] = *wp
}
fmt.Println("iteration times for each component: ", iter)
return W
}
// standard non-linear function
// w: 1xN, X: NxM, wp: 1xN
func FuncLogcosh(w *matrix.Vector, X *matrix.Matrix) (wp *matrix.Vector) {
N, M := X.Dims()
wtx := w.ToMatrix(1, N).Mul(X).Row(0)
g, gg := make(matrix.Vector, M), make(matrix.Vector, M)
for i := 0; i < M; i++ {
g[i], gg[i] = logcosh(wtx.At(i))
}
wp = X.Mul(g.ToMatrix(M, 1)).Col(0).Sub(w.MulNum(gg.Sum())).MulNum(1. / float64(M))
return
}
// f: func, g: first derivative, gg: second derivative
func logcosh(u float64) (g, gg float64) {
// f = math.Log(math.Cosh(u))
g = math.Tanh(u)
gg = 1 - g*g
return
}
// highly robust
func FuncExp(w *matrix.Vector, X *matrix.Matrix) (wp *matrix.Vector) {
N, M := X.Dims()
wtx := w.ToMatrix(1, N).Mul(X).Row(0)
g, gg := make(matrix.Vector, M), make(matrix.Vector, M)
for i := 0; i < M; i++ {
g[i], gg[i] = exp(wtx.At(i))
}
wp = X.Mul(g.ToMatrix(M, 1)).Col(0).Sub(w.MulNum(gg.Sum())).MulNum(1. / float64(M))
return
}
func exp(u float64) (g, gg float64) {
u2 := u * u
eu2 := math.Exp(-u2 / 2)
// f = -eu2
g = u * eu2
gg = (1 - u2) * eu2
return
} | stats/ICA.go | 0.643105 | 0.800146 | ICA.go | starcoder |
package fib
// Basic recursive algorithm, with exponential run time. Simply makes two extra
// calls for level. In testing benchmarks, we do not go above 32 for this
// function due to the extreme length of time it takes to complete, it is not
// worth benchmarking. Needless to say, this function is terrible and slow.
func FibRecursive(n int) int {
if n < 2 {
return n
}
return FibRecursive(n-1) + FibRecursive(n-2)
}
// Optimized recursive option, utilizing a cache to eliminate repeated
// calculations. This algorithm runs in linear time. Makes use of a helper
// function that recurses down to n = 1, and then builds the cache moving
// backwards. The number at the greatest index is the resulting answer.
func FibRecursiveCache(n int) int {
cache := make([]int, n+1, n+1)
fibRecursiveCache(n, &cache)
return cache[n]
}
func fibRecursiveCache(n int, cache *[]int) {
if n < 2 {
(*cache)[0] = 0
(*cache)[1] = 1
return
}
fibRecursiveCache(n-1, cache)
(*cache)[n] = (*cache)[n-1] + (*cache)[n-2]
}
// Another linear recursive implementation, without utilizing a memory cache.
// Instead each recursive call completes a portion of the calculation instead.
// Utilizes a recursive helper function to increment necessary values.
func FibTailRecursive(n int) int {
return fibTailRecursive(n, 0, 1)
}
func fibTailRecursive(n, first, second int) int {
if n == 0 {
return first
}
return fibTailRecursive(n-1, second, first+second)
}
// Linear, iterative implementation. Uses a for loop, and pre delcares temp
// variables to avoid initialization every loop.
func FibIterative(n int) int {
var temp int
first := 0
second := 1
for i := 0; i < n-1; i++ {
temp = second
second = first + second
first = temp
}
return second
}
// Linear time execution, uses matrix multiplication to simplify logic
func FibPowerMatrix(n int) int {
F := [2][2]int{
[2]int{1, 1},
[2]int{1, 0},
}
if n == 0 {
return 0
}
fibPower(&F, n-1)
return F[0][0]
}
func fibPower(F *[2][2]int, n int) {
M := [2][2]int{
[2]int{1, 1},
[2]int{1, 0},
}
for i := 2; i <= n; i++ {
fibMultiply(F, &M)
}
}
func fibMultiply(F *[2][2]int, M *[2][2]int) {
f := *F
m := *M
x := f[0][0]*m[0][0] + f[0][1]*m[1][0]
y := f[0][0]*m[0][1] + f[0][1]*m[1][1]
z := f[1][0]*m[0][0] + f[1][1]*m[1][0]
w := f[1][0]*m[0][1] + f[1][1]*m[1][1]
(*F)[0][0] = x
(*F)[0][1] = y
(*F)[1][0] = z
(*F)[1][1] = w
}
func FibPowerMatrixRecursive(n int) int {
F := [2][2]int{
[2]int{1, 1},
[2]int{1, 0},
}
if n == 0 {
return 0
}
fibPowerRecursive(&F, n-1)
return F[0][0]
}
func fibPowerRecursive(F *[2][2]int, n int) {
if n == 0 || n == 1 {
return
}
M := [2][2]int{
[2]int{1, 1},
[2]int{1, 0},
}
fibPowerRecursive(F, n/2)
fibMultiply(F, F)
if n%2 != 0 {
fibMultiply(F, &M)
}
} | fib.go | 0.74158 | 0.481576 | fib.go | starcoder |
package date
import (
"strings"
"time"
)
const (
DefaultDateTime = "Y-m-d h:i:s"
DefaultDate = "Y-m-d"
)
type formatReplace struct {
from string
to string
}
var formatFromTo = [...]formatReplace{
{"d", "02"}, // Day: Day of the month, 2 digits with leading zeros. Eg: 01 to 31.
{"D", "Mon"}, // Day: A textual representation of a day, three letters. Eg: Mon through Sun.
{"w", "Monday"}, // Day: Numeric representation of the day of the week. Eg: 0 (for Sunday) through 6 (for Saturday).
{"N", "Monday"}, // Day: ISO-8601 numeric representation of the day of the week. Eg: 1 (for Monday) through 7 (for Sunday).
{"S", "02"}, // Day: English ordinal suffix for the day of the month, 2 characters. Eg: st, nd, rd or th. Works well with j.
{"l", "Monday"}, // Day: A full textual representation of the day of the week. Eg: Sunday through Saturday.
{"z", ""}, // Day: The day of the year (starting from 0). Eg: 0 through 365.
{"W", ""}, // Week: ISO-8601 week number of year, weeks starting on Monday. Eg: 42 (the 42nd week in the year).
{"F", "January"}, // Month: A full textual representation of a month, such as January or March. Eg: January through December.
{"m", "01"}, // Month: Numeric representation of a month, with leading zeros. Eg: 01 through 12.
{"M", "Jan"}, // Month: A short textual representation of a month, three letters. Eg: Jan through Dec.
{"n", "1"}, // Month: Numeric representation of a month, without leading zeros. Eg: 1 through 12.
{"t", ""}, // Month: Number of days in the given month. Eg: 28 through 31.
{"Y", "2006"}, // Year: A full numeric representation of a year, 4 digits. Eg: 1999 or 2003.
{"y", "06"}, // Year: A two digit representation of a year. Eg: 99 or 03.
{"a", "pm"}, // Time: Lowercase Ante meridiem and Post meridiem. Eg: am or pm.
{"A", "PM"}, // Time: Uppercase Ante meridiem and Post meridiem. Eg: AM or PM.
{"g", "3"}, // Time: 12-hour format of an hour without leading zeros. Eg: 1 through 12.
{"h", "03"}, // Time: 12-hour format of an hour with leading zeros. Eg: 01 through 12.
{"H", "15"}, // Time: 24-hour format of an hour with leading zeros. Eg: 00 through 23.
{"i", "04"}, // Time: Minutes with leading zeros. Eg: 00 to 59.
{"s", "05"}, // Time: Seconds with leading zeros. Eg: 00 through 59.
{"U", ""}, // Time: Seconds since the Unix Epoch (January 1 1970 00:00:00 GMT).
{"O", "-0700"}, // Zone: Difference to Greenwich time (GMT) in hours. Eg: +0200.
{"P", "-07:00"}, // Zone: Difference to Greenwich time (GMT) with colon between hours and minutes. Eg: +02:00.
{"T", "MST"}, // Zone: Timezone abbreviation. Eg: UTC, EST, MDT ...
{"c", "2006-01-02T15:04:05-07:00"}, // Format: ISO 8601 date. Eg: 2004-02-12T15:19:21+00:00.
{"r", "Mon, 02 Jan 06 15:04 MST"}, // Format: RFC 2822 formatted date. Eg: Thu, 21 Dec 2000 16:01:07 +0200.
}
func Replace(format string) string {
for _, v := range formatFromTo {
format = strings.Replace(format, v.from, v.to, -1)
}
return format
}
func Format(format string, t time.Time) string {
toFormat := Replace(format)
return t.Format(toFormat)
}
func Parse(format string, date string) (t time.Time, err error) {
toFormat := Replace(format)
return time.Parse(toFormat, date)
}
func Now(format string) string {
return Format(format, time.Now())
}
// IsLeapYear 检测日期是否是闰年
func IsLeapYear(t time.Time) bool {
y := t.Year()
switch {
case y%400 == 0:
return true
case y%100 == 0:
return true
case y%4 == 0:
return true
}
return false
}
func Days(t time.Time) int {
t = t.AddDate(0, 1, -t.Day())
return t.Day()
} | date/date.go | 0.647352 | 0.589657 | date.go | starcoder |
package op
import(
"fmt"
"math"
)
// Wrapper for a unary operation.
func wrap1(a any,f func(float64)float64)any{
var t string = fmt.Sprintf("%T", a)
switch t {
case "int": return f(float64(a.(int)))
case "int8": return f(float64(a.(int8)))
case "int16": return f(float64(a.(int16)))
case "int32": return f(float64(a.(int32)))
case "int64": return f(float64(a.(int64)))
case "uint": return f(float64(a.(uint)))
case "uint8": return f(float64(a.(uint8)))
case "uint16": return f(float64(a.(uint16)))
case "uint32": return f(float64(a.(uint32)))
case "uint64": return f(float64(a.(uint64)))
case "float32": return f(float64(a.(float32)))
case "float64": return f(a.(float64))
default: fmt.Println("Invalid type")
}
return nil
}
// Wrapper for a binary operation.
func wrap2(a,b any,f func(float64,float64)float64)any{
var t string = fmt.Sprintf("%T", a)
var t2 string = fmt.Sprintf("%T", b)
var b2 float64
switch t2 {
case "int": b2 = float64(b.(int))
case "int8": b2 = float64(b.(int8))
case "int16": b2 = float64(b.(int16))
case "int32": b2 = float64(b.(int32))
case "int64": b2 = float64(b.(int64))
case "uint": b2 = float64(b.(uint))
case "uint8": b2 = float64(b.(uint8))
case "uint16": b2 = float64(b.(uint16))
case "uint32": b2 = float64(b.(uint32))
case "uint64": b2 = float64(b.(uint64))
case "float32": b2 = float64(b.(float32))
case "float64": b2 = b.(float64)
default: fmt.Println("Invalid type")
}
switch t {
case "int": return f(float64(a.(int)),b2)
case "int8": return f(float64(a.(int8)),b2)
case "int16": return f(float64(a.(int16)),b2)
case "int32": return f(float64(a.(int32)),b2)
case "int64": return f(float64(a.(int64)),b2)
case "uint": return f(float64(a.(uint)),b2)
case "uint8": return f(float64(a.(uint8)),b2)
case "uint16": return f(float64(a.(uint16)),b2)
case "uint32": return f(float64(a.(uint32)),b2)
case "uint64": return f(float64(a.(uint64)),b2)
case "float32": return f(float64(a.(float32)),b2)
case "float64": return f(a.(float64),b2)
default: fmt.Println("Invalid type")
}
return nil
}
// Returns the absolute value of a numner and updates the State.
func (self *State)Abs(a any)any{
self.IncOperations(self.coeff["abs"]+self.off["abs"])
return wrap1(a,math.Abs)
}
// Returns the power a^b and updates the State.
func (self *State)Pow(a,b any)any{
self.IncOperations(self.coeff["pow"]+self.off["pow"])
return wrap2(a,b,math.Pow)
}
// Returns the square root of a and updates the State.
func (self *State)Sqrt(a any)any{
self.IncOperations(self.coeff["sqrt"]+self.off["sqrt"])
return wrap1(a,math.Sqrt)
}
// Returns the smaller integer bigger than a and updates the State.
func (self *State)Ceil(a any)any{
self.IncOperations(self.coeff["ceil"]+self.off["ceil"])
return wrap1(a,math.Ceil)
}
// Returns the biggest integer smaller than a and updates the State.
func (self *State)Floor(a any)any{
self.IncOperations(self.coeff["floor"]+self.off["floor"])
return wrap1(a,math.Floor)
}
// Rounds a to the nearest integer and updates the State.
func (self *State)Round(a any)any{
self.IncOperations(self.coeff["round"]+self.off["round"])
return wrap1(a,math.Round)
}
// Returns the min of a and b and updates the State.
func (self *State)Min(a,b any)any{
self.IncOperations(self.coeff["min"]+self.off["min"])
return wrap2(a,b,math.Min)
}
// Returns the max of a and b and updates the State.
func (self *State)Max(a,b any)any{
self.IncOperations(self.coeff["max"]+self.off["max"])
return wrap2(a,b,math.Max)
}
// Returns the sin of a and updates the State.
func (self *State)Sin(a any)any{
self.IncOperations(self.coeff["sin"]+self.off["sin"])
return wrap1(a,math.Sin)
}
// Returns the cos of a and updates the State.
func (self *State)Cos(a any)any{
self.IncOperations(self.coeff["cos"]+self.off["cos"])
return wrap1(a,math.Cos)
}
// Returns the asin of a and updates the State.
func (self *State)Asin(a any)any{
self.IncOperations(self.coeff["asin"]+self.off["asin"])
return wrap1(a,math.Asin)
}
// Returns the acos of a and updates the State.
func (self *State)Acos(a any)any{
self.IncOperations(self.coeff["acos"]+self.off["acos"])
return wrap1(a,math.Acos)
}
// Returns the tan of a and updates the State.
func (self *State)Tan(a any)any{
self.IncOperations(self.coeff["tan"]+self.off["tan"])
return wrap1(a,math.Tan)
}
// Returns the atan of a and updates the State.
func (self *State)Atan(a any)any{
self.IncOperations(self.coeff["atan"]+self.off["atan"])
return wrap1(a,math.Atan)
}
// Returns the sinh of a and updates the State.
func (self *State)Sinh(a any)any{
self.IncOperations(self.coeff["sinh"]+self.off["sinh"])
return wrap1(a,math.Sinh)
}
// Returns the cosh of a and updates the State.
func (self *State)Cosh(a any)any{
self.IncOperations(self.coeff["cosh"]+self.off["cosh"])
return wrap1(a,math.Cosh)
}
// Returns the asinh of a and updates the State.
func (self *State)Asinh(a any)any{
self.IncOperations(self.coeff["asinh"]+self.off["asinh"])
return wrap1(a,math.Asinh)
}
// Returns the acosh of a and updates the State.
func (self *State)Acosh(a any)any{
self.IncOperations(self.coeff["acosh"]+self.off["acosh"])
return wrap1(a,math.Acosh)
}
// Returns the tanh of a and updates the State.
func (self *State)Tanh(a any)any{
self.IncOperations(self.coeff["tanh"]+self.off["tanh"])
return wrap1(a,math.Tanh)
}
// Returns the atanh of a and updates the State.
func (self *State)Atanh(a any)any{
self.IncOperations(self.coeff["atanh"]+self.off["atanh"])
return wrap1(a,math.Atanh)
}
// Returns the natural logarithm of a and updates the State.
func (self *State)Log(a any)any{
self.IncOperations(self.coeff["log"]+self.off["log"])
return wrap1(a,math.Log)
}
// Returns the logarithm on base 2 of a and updates the State.
func (self *State)Log2(a any)any{
self.IncOperations(self.coeff["log2"]+self.off["log2"])
return wrap1(a,math.Log2)
}
// Returns the logarithm on base 10 of a and updates the State.
func (self *State)Log10(a any)any{
self.IncOperations(self.coeff["log10"]+self.off["log10"])
return wrap1(a,math.Log10)
}
// Returns exp(a) and updates the State.
func (self *State)Exp(a any)any{
self.IncOperations(self.coeff["exp"]+self.off["exp"])
return wrap1(a,math.Exp)
}
// Returns the power 2^a and updates the State.
func (self *State)Exp2(a any)any{
self.IncOperations(self.coeff["exp2"]+self.off["exp2"])
return wrap1(a,math.Exp2)
} | op/math_functions.go | 0.754553 | 0.456289 | math_functions.go | starcoder |
package snomed
import (
"golang.org/x/text/language"
"unicode"
)
// DefinitionStatusID is an indication as to whether a concept is fully defined or primitive.
// A concept may be primitive if
// a) it is inherently impossible to define the concept by defining relationships with other concepts
// b) because attributes that would distinguish the concept from another concept are not (yet) in the concept model
// c) because the concept is not in a directly clinical domain but in one of the supporting domains used to define clinical concepts
// d) because modeling of defining relationships is a continuing process and in same cases is incomplete
type DefinitionStatusID int64
// Available definition statuses (at the time of writing)
// The RF2 uses SNOMED concepts to populate enums which means that conceivably, the enum
// could be extended in future releases. This rather complicates writing any inferential code.
const (
Primitive DefinitionStatusID = 900000000000074008 // defines a primitive concept does not have sufficient defining relationships to computably distinguish them from more general concepts(supertypes.
Defined DefinitionStatusID = 900000000000073002 // defines a concept with a formal logic definition that is sufficient to distinguish its meaning from other similar concepts.
)
// IsSufficientlyDefined returns whether this concept has a formal logic definition that is sufficient to distinguish
// its meaning from other similar concepts.
func (c *Concept) IsSufficientlyDefined() bool {
return c.DefinitionStatusId == int64(Defined)
}
// DescriptionTypeID gives the type this description represents for the concept.
type DescriptionTypeID int64
// Available description TypeIDs.
// The synonym defines the preferred term for the concept in the language specified.
// The Description .term represents a term that is used to represent the associated concept in the language indicated by the Description .languageCode.
// Note: The preferred term used in a given language or dialect is marked as a synonym. Preference and acceptability of a particular synonymous term is indicated by a Language refset.
const (
Definition DescriptionTypeID = 900000000000550004 // A term representing the associated concept in the language indicated by Description .languageCode.
FullySpecifiedName DescriptionTypeID = 900000000000003001 // A term unique among active descriptions in SNOMED CT that names the meaning of a concept code in a manner that is intended to be unambiguous and stable across multiple contexts.
Synonym DescriptionTypeID = 900000000000013009 // A term that is an acceptable way to express a the meaning of a SNOMED CT concept in a particular language.
)
// LanguageTag returns the language tag for this description
func (d *Description) LanguageTag() language.Tag {
return language.Make(d.LanguageCode)
}
// IsFullySpecifiedName returns whether this is a fully specified name
func (d *Description) IsFullySpecifiedName() bool {
return d.TypeId == int64(FullySpecifiedName)
}
// IsSynonym returns whether this is a preferred term for a language
func (d *Description) IsSynonym() bool {
return d.TypeId == int64(Synonym)
}
// IsDefinition returns whether this description is simply one of (many) alternative descriptions
func (d *Description) IsDefinition() bool {
return d.TypeId == int64(Definition)
}
// CaseSignificanceID provides information about case significance for the description
type CaseSignificanceID int64
// Available case significance options
const (
EntireTermCaseInsensitive CaseSignificanceID = 900000000000448009
EntireTermCaseSensitive CaseSignificanceID = 900000000000017005
InitialCharacterCaseSensitive CaseSignificanceID = 900000000000020002
)
// Uncapitalized returns the term appropriately uncapitalized
// All terms are, by default, capitalized but we cannot naively assume
// it is possible to uncapitalize without checking case sensitivity.
func (d *Description) Uncapitalized() string {
sig := d.CaseSignificance
if sig == int64(EntireTermCaseSensitive) || sig == int64(InitialCharacterCaseSensitive) {
return d.Term
}
for i, v := range d.Term {
return string(unicode.ToLower(v)) + d.Term[i+1:]
}
return ""
}
// Valid types of characteristic types
const (
additionalRelationship int64 = 900000000000227009
definingRelationship int64 = 900000000000006009 // NB: has children inferred and stated
inferredRelationship int64 = 900000000000011006 // NB: IS-A defining
statedRelationship int64 = 900000000000010007 // NB: IS-A defining
qualifyingRelationship int64 = 900000000000225001
)
// IsAdditionalRelationship specifies whether this is a relationship to a target concept that is additional to the core
func (r *Relationship) IsAdditionalRelationship() bool {
return r.CharacteristicTypeId == additionalRelationship
}
// IsDefiningRelationship returns whether this is a relationship to a target concept that is always necessarily true from any instance of the source concept.
func (r *Relationship) IsDefiningRelationship() bool {
t := r.CharacteristicTypeId
return t == definingRelationship || t == inferredRelationship || t == statedRelationship
}
// IsQualifyingRelationship An attribute-value relationship associated with a concept code to indicate to users that it may be applied to refine the meaning of the code.
// The set of qualifying relationships provide syntactically correct values that can be presented to a user for postcoordination.
// Following the introduction of the RF2 in 2012 qualifying relationships are no longer part of the standard distributed release.
// The Machine Readable Concept Model provides a more comprehensive and flexible way to identify the full set of attributes and ranges that can be applied to refine concepts in particular domains.
func (r *Relationship) IsQualifyingRelationship() bool {
return r.CharacteristicTypeId == qualifyingRelationship
}
// Types of Reference Set
const (
rootRefset int64 = 900000000000455006 // root concept for all reference set types
refSetDescriptorRefset int64 = 900000000000456007 // represented by RefsetDescriptorReferenceSet
simpleRefset int64 = 446609009 // represented by SimpleReferenceSet
languageRefset int64 = 900000000000506000 // represented by LanguageReferenceSet
simpleMapRefset int64 = 900000000000496009 // represented by SimpleMapReferenceSet
complexMapRefset int64 = 447250001 // represented by ComplexMapReferenceSet
extendedMapRefset int64 = 609331003 // represented by ComplexMapReferenceSet
)
// Valid types of acceptability. If a term is not either acceptable or preferred, it is unacceptable in this language.
const (
acceptable int64 = 900000000000549004
preferred int64 = 900000000000548007
)
// IsAcceptable returns whether the description referenced is acceptable for this concept in this language refset
func (lrs *LanguageReferenceSet) IsAcceptable() bool {
if lrs != nil {
return lrs.AcceptabilityId == acceptable
}
return false
}
// IsPreferred returns whether the description referenced is the preferred for this concept in this language refset
func (lrs *LanguageReferenceSet) IsPreferred() bool {
if lrs != nil {
return lrs.AcceptabilityId == preferred
}
return false
}
// IsUnacceptable returns whether the description referenced is unacceptable for this concept in this language refset
func (lrs *LanguageReferenceSet) IsUnacceptable() bool {
if lrs != nil {
return lrs.AcceptabilityId != preferred && lrs.AcceptabilityId != acceptable
}
return true
} | snomed/model.go | 0.766731 | 0.580174 | model.go | starcoder |
package main
import (
"math"
. "github.com/jakecoffman/cp"
"github.com/jakecoffman/cp/examples"
)
var balanceBody *Body
var balanceSin float64
var wheelBody *Body
func biasCoef(errorBias, dt float64) float64 {
return 1.0 - math.Pow(errorBias, dt)
}
func motorPreSolve(motor *Constraint, space *Space) {
dt := space.TimeStep()
targetX := examples.Mouse.X
examples.DrawSegment(Vector{targetX, -10000}, Vector{targetX, 1000}, FColor{1, 0, 0, 1})
maxV := 500.0
targetV := Clamp(biasCoef(0.5, dt/1.2)*(targetX-balanceBody.Position().X)/dt, -maxV, maxV)
errorV := targetV - balanceBody.Velocity().X
targetSin := 3.0e-3 * biasCoef(0.1, dt) * errorV / dt
maxSin := math.Sin(0.6)
balanceSin = Clamp(balanceSin-6.0e-5*biasCoef(0.2, dt)*errorV/dt, -maxSin, maxSin)
targetA := math.Asin(Clamp(-targetSin+balanceSin, -maxSin, maxSin))
angularDiff := math.Asin(balanceBody.Rotation().Cross(ForAngle(targetA)))
targetW := biasCoef(0.1, dt/0.4) * angularDiff / dt
maxRate := 50.0
rate := Clamp(wheelBody.AngularVelocity()+balanceBody.AngularVelocity()-targetW, -maxRate, maxRate)
motor.Class.(*SimpleMotor).Rate = Clamp(rate, -maxRate, maxRate)
}
func main() {
space := NewSpace()
space.Iterations = 30
space.SetGravity(Vector{0, -500})
{
shape := space.AddShape(NewSegment(space.StaticBody, Vector{-3200, -240}, Vector{3200, -240}, 0))
shape.SetElasticity(1)
shape.SetFriction(1)
shape.SetFilter(examples.NotGrabbableFilter)
shape = space.AddShape(NewSegment(space.StaticBody, Vector{0, -200}, Vector{240, -240}, 0))
shape.SetElasticity(1)
shape.SetFriction(1)
shape.SetFilter(examples.NotGrabbableFilter)
shape = space.AddShape(NewSegment(space.StaticBody, Vector{-240, -240}, Vector{0, -200}, 0))
shape.SetElasticity(1)
shape.SetFriction(1)
shape.SetFilter(examples.NotGrabbableFilter)
}
{
radius := 20.0
mass := 1.0
moment := MomentForCircle(mass, 0, radius, Vector{})
wheelBody = space.AddBody(NewBody(mass, moment))
wheelBody.SetPosition(Vector{0, -160 + radius})
shape := space.AddShape(NewCircle(wheelBody, radius, Vector{}))
shape.SetFriction(0.7)
shape.SetFilter(NewShapeFilter(1, ALL_CATEGORIES, ALL_CATEGORIES))
}
{
cogOffset := 30.0
bb1 := BB{-5, -cogOffset, 5, cogOffset*1.2 - cogOffset}
bb2 := BB{-25, bb1.T, 25, bb1.T + 10}
mass := 3.0
moment := MomentForBox2(mass, bb1) + MomentForBox2(mass, bb2)
balanceBody = space.AddBody(NewBody(mass, moment))
balanceBody.SetPosition(Vector{0, wheelBody.Position().Y + cogOffset})
shape := space.AddShape(NewBox2(balanceBody, bb1, 0))
shape.SetFriction(1)
shape.SetFilter(NewShapeFilter(1, ALL_CATEGORIES, ALL_CATEGORIES))
shape = space.AddShape(NewBox2(balanceBody, bb2, 0))
shape.SetFriction(1)
shape.SetFilter(NewShapeFilter(1, ALL_CATEGORIES, ALL_CATEGORIES))
}
anchorA := balanceBody.WorldToLocal(wheelBody.Position())
grooveA := anchorA.Add(Vector{0, 30})
grooveB := anchorA.Add(Vector{0, -10})
space.AddConstraint(NewGrooveJoint(balanceBody, wheelBody, grooveA, grooveB, Vector{}))
space.AddConstraint(NewDampedSpring(balanceBody, wheelBody, anchorA, Vector{}, 0, 6.0e2, 30))
motor := space.AddConstraint(NewSimpleMotor(wheelBody, balanceBody, 0))
motor.PreSolve = motorPreSolve
{
width := 100.0
height := 20.0
mass := 3.0
boxBody := space.AddBody(NewBody(mass, MomentForBox(mass, width, height)))
boxBody.SetPosition(Vector{200, -100})
shape := space.AddShape(NewBox(boxBody, width, height, 0))
shape.SetFriction(0.7)
}
examples.Main(space, 1.0/60.0, update, examples.DefaultDraw)
}
func update(space *Space, dt float64) {
space.Step(dt)
examples.DrawString(Vector{-250, 100}, "This unicycle is completely driven and balanced by a single cpSimpleMotor.\nMove the mouse to make the unicycle follow it.")
} | examples/unicycle/unicycle.go | 0.756447 | 0.576751 | unicycle.go | starcoder |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.