code
stringlengths 114
1.05M
| path
stringlengths 3
312
| quality_prob
float64 0.5
0.99
| learning_prob
float64 0.2
1
| filename
stringlengths 3
168
| kind
stringclasses 1
value |
|---|---|---|---|---|---|
defmodule Envy do
@key_value_delimeter "="
@moduledoc """
Provides explicit and auto loading of env files.
## Example
The following will set the `FOO` environment variable with the value of `bar`.
```
foo=bar
```
You can define comments with `#` but can't use `#` in values without wrapping
the value in double quotes.
```
foo="#bar" # Comment
```
"""
@doc """
Loads the `.env` and the `Mix.env` specific env file.
eg: If `Mix.env` is `test` then envy will attempt to load `.env.test`.
"""
def auto_load do
Application.ensure_started(:mix)
current_env = Mix.env |> to_string |> String.downcase
[".env.#{current_env}", ".env"] |> load
end
@doc """
Loads a list of env files.
"""
def load(env_files) do
for path <- env_files do
if File.exists?(path) do
File.read!(path) |> parse
end
end
end
@doc """
Reloads `config/config.exs`. This function can be used to reload configuration
that relies on environment variables set by Envy.
This workaround is necessary since config files don't have
access to dependencies.
"""
def reload_config do
Mix.Config.read!("config/config.exs") |> Mix.Config.persist
end
@doc """
Parses env formatted file.
"""
def parse(content) do
content |> get_pairs |> load_env
end
defp get_pairs(content) do
content
|> String.split("\n")
|> Enum.reject(&blank_entry?/1)
|> Enum.reject(&comment_entry?/1)
|> Enum.map(&parse_line/1)
end
defp parse_line(line) do
[key, value] = line
|> String.trim
|> String.split(@key_value_delimeter, parts: 2)
[key, parse_value(value)]
end
defp parse_value(value) do
if String.starts_with?(value, "\"") do
unquote_string(value)
else
value |> String.split("#", parts: 2) |> List.first
end
end
defp unquote_string(value) do
value
|> String.split(~r{(?<!\\)"}, parts: 3)
|> Enum.drop(1)
|> List.first
|> String.replace(~r{\\"}, ~S("))
end
defp load_env(pairs) when is_list(pairs) do
Enum.each(pairs, fn([key, value]) ->
key = String.upcase(key)
if System.get_env(key) == nil do
System.put_env(key, value)
end
end)
end
defp blank_entry?(string) do
string == ""
end
defp comment_entry?(string) do
String.match?(string, ~r(^\s*#))
end
end
|
lib/envy.ex
| 0.804943
| 0.849285
|
envy.ex
|
starcoder
|
defmodule ControlNode.Namespace do
@moduledoc false
@supervisor ControlNode.ReleaseSupervisor
use GenServer
require Logger
alias ControlNode.{Host, Registry}
defmodule Spec do
@typedoc """
`Namespace.Spec` defines a spec with the following attributes,
* `:tag` : Tag for the namespace (eg: `:testing`)
* `:hosts` : List of hosts where the release will be deployed.
* `:registry_spec` : Defines the registry from where there release tar will
be retireved for rolling out deployment in this namepsace
* `:release_cookie` : Release cookie used by the release in this given
namespace. This cookie will be used by control node to connect to the
release nodes
* `:control_mode` : Configures mode for the given namespace, default
`"MANAGE"` . Other possible value is `"OBSERVE" | "CONNECT"` . In
`"OBSERVE"` mode user will only be allowed to deploy and observe a
release i.e. no failover mechanism are avaiable. In `"CONNECT"` mode the
control will just connect to release nodes and no other operation (like
deploy or failover) are executed.
"""
@type t :: %__MODULE__{
tag: atom,
hosts: [Host.SSH.t()],
registry_spec: Registry.Local.t(),
deployment_type: atom,
release_cookie: atom
}
defstruct tag: nil,
hosts: nil,
registry_spec: nil,
deployment_type: :incremental_replace,
release_management: :replica,
release_cookie: nil,
control_mode: "MANAGE"
end
def deploy(namespace_pid, version) do
GenServer.cast(namespace_pid, {:deploy, version})
end
def current_version(namespace_pid) do
GenServer.call(namespace_pid, :current_version)
end
def start_link(namespace_spec, release_mod) do
name = :"#{namespace_spec.tag}_#{release_mod.release_name}"
Logger.debug("Starting namespace with name #{name}")
GenServer.start_link(__MODULE__, [namespace_spec, release_mod], name: name)
end
@impl true
def init([namespace_spec, release_mod]) do
Logger.metadata(namespace: namespace_spec.tag, release: release_mod.release_name())
state = %{spec: namespace_spec, release_mod: release_mod}
{:ok, state, {:continue, :start_release_fsm}}
end
@impl true
def handle_continue(:start_release_fsm, state) do
Logger.info("Initializing namespace manager")
%{spec: namespace_spec, release_mod: release_mod} = state
ensure_started_releases(namespace_spec, release_mod)
{:noreply, state}
end
@impl true
def handle_call(
:current_version,
_from,
%{spec: namespace_spec, release_mod: release_mod} = state
) do
version_list =
Enum.map(namespace_spec.hosts, fn host_spec ->
with {:ok, vsn} <- release_mod.current_version(namespace_spec, host_spec) do
%{host: host_spec.host, version: vsn}
end
end)
{:reply, {:ok, version_list}, state}
end
@impl true
def handle_cast({:deploy, version}, %{spec: namespace_spec, release_mod: release_mod} = state) do
Enum.map(namespace_spec.hosts, fn host_spec ->
release_mod.deploy(namespace_spec, host_spec, version)
end)
{:noreply, state}
end
defp ensure_started_releases(namespace_spec, release_mod) do
Enum.map(namespace_spec.hosts, fn host_spec ->
start_release(release_mod, namespace_spec, host_spec)
end)
end
defp start_release(release_mod, namespace_spec, host_spec) do
spec = child_spec(release_mod, namespace_spec, host_spec)
case DynamicSupervisor.start_child(@supervisor, spec) do
{:ok, _pid} ->
:ok
{:ok, _pid, _info} ->
:ok
{:error, {:already_started, _pid}} ->
Logger.info("Release already running")
{:error, :already_running}
error ->
Logger.error(
"Failed to release with args: #{inspect(host_spec)}, error: #{inspect(error)}"
)
{:error, error}
end
end
defp child_spec(release_mod, namespace_spec, host_spec) do
%{id: release_mod, start: {release_mod, :start_link, [namespace_spec, host_spec]}}
end
end
|
lib/control_node/namespace.ex
| 0.80871
| 0.47926
|
namespace.ex
|
starcoder
|
defmodule Membrane.File.Sink do
@moduledoc """
Element that creates a file and stores incoming buffers there (in binary format).
When `Membrane.File.SeekEvent` is received, the element starts writing buffers starting
from `position`. By default, it overwrites previously stored bytes. You can set `insert?`
field of the event to `true` to start inserting new buffers without overwriting previous ones.
Please note, that inserting requires rewriting the file, what negatively impacts performance.
For more information refer to `Membrane.File.SeekEvent` moduledoc.
"""
use Membrane.Sink
import Mockery.Macro
alias Membrane.File.{CommonFile, Error, SeekEvent}
def_options location: [
spec: Path.t(),
description: "Path of the output file"
]
def_input_pad :input, demand_unit: :buffers, caps: :any
@impl true
def handle_init(%__MODULE__{location: location}) do
{:ok,
%{
location: Path.expand(location),
temp_location: Path.expand(location <> ".tmp"),
fd: nil,
temp_fd: nil
}}
end
@impl true
def handle_stopped_to_prepared(_ctx, %{location: location} = state) do
with {:ok, fd} <- mockable(CommonFile).open(location, [:read, :write]),
:ok <- mockable(CommonFile).truncate(fd) do
{:ok, %{state | fd: fd}}
else
error -> Error.wrap(error, :open, state)
end
end
@impl true
def handle_prepared_to_playing(_ctx, state) do
{{:ok, demand: :input}, state}
end
@impl true
def handle_write(:input, buffer, _ctx, %{fd: fd} = state) do
case mockable(CommonFile).write(fd, buffer) do
:ok -> {{:ok, demand: :input}, state}
error -> Error.wrap(error, :write, state)
end
end
@impl true
def handle_event(:input, %SeekEvent{insert?: insert?, position: position}, _ctx, state) do
if insert? do
split_file(position, state)
else
seek_file(position, state)
end
end
def handle_event(pad, event, ctx, state), do: super(pad, event, ctx, state)
@impl true
def handle_prepared_to_stopped(_ctx, %{fd: fd} = state) do
with {:ok, state} <- maybe_merge_temporary(state),
:ok <- mockable(CommonFile).close(fd) do
{:ok, %{state | fd: nil}}
else
error -> Error.wrap(error, :close, state)
end
end
defp seek_file(position, %{fd: fd} = state) do
with {:ok, state} <- maybe_merge_temporary(state),
{:ok, _position} <- mockable(CommonFile).seek(fd, position) do
{:ok, state}
else
error -> Error.wrap(error, :seek_file, state)
end
end
defp split_file(position, %{fd: fd} = state) do
with {:ok, state} <- seek_file(position, state),
{:ok, state} <- open_temporary(state),
:ok <- mockable(CommonFile).split(fd, state.temp_fd) do
{:ok, state}
else
error -> Error.wrap(error, :split_file, state)
end
end
defp maybe_merge_temporary(%{temp_fd: nil} = state), do: {:ok, state}
defp maybe_merge_temporary(%{fd: fd, temp_fd: temp_fd} = state) do
# TODO: Consider improving performance for multi-insertion scenarios by using
# multiple temporary files and merging them only once on `handle_prepared_to_stopped/2`.
with {:ok, _bytes_copied} <- mockable(CommonFile).copy(temp_fd, fd),
{:ok, state} <- remove_temporary(state) do
{:ok, state}
else
error -> Error.wrap(error, :merge_temporary, state)
end
end
defp open_temporary(%{temp_fd: nil, temp_location: temp_location} = state) do
case mockable(CommonFile).open(temp_location, [:read, :exclusive]) do
{:ok, temp_fd} -> {:ok, %{state | temp_fd: temp_fd}}
error -> Error.wrap(error, :open_temporary, state)
end
end
defp remove_temporary(%{temp_fd: temp_fd, temp_location: temp_location} = state) do
with :ok <- mockable(CommonFile).close(temp_fd),
:ok <- mockable(CommonFile).rm(temp_location) do
{:ok, %{state | temp_fd: nil}}
else
error -> Error.wrap(error, :remove_temporary, state)
end
end
end
|
lib/membrane_file/sink.ex
| 0.601242
| 0.508971
|
sink.ex
|
starcoder
|
defmodule Grizzly.ZWave.Commands.LearnModeSet do
@moduledoc """
This command is used to allow a node to be added to (or removed from) the network. When a node is
added to the network, the node is assigned a valid Home ID and NodeID.
Params:
* `:seq_number` - a command sequence number
* `:return_interview_status` - This field is used to request that the receiving node returns an additional Learn Mode Set Status
Command when the node interview is completed. It is set to either :on or :off. (optional - defaults to :off)
* `:mode` - The Mode field controls operation to one of :disable, :direct_range_only (immediate range inclusions only), or :allow_routed (accept routed inclusion)
"""
@behaviour Grizzly.ZWave.Command
alias Grizzly.ZWave
alias Grizzly.ZWave.{Command, DecodeError}
alias Grizzly.ZWave.CommandClasses.NetworkManagementBasicNode
@type mode :: :disable | :direct_range_only | :allow_routed
@type param ::
{:seq_number, ZWave.seq_number()}
| {:return_interview_status, :on | :off}
| {:mode, mode}
@impl true
def new(params) do
command = %Command{
name: :learn_mode_set,
command_byte: 0x01,
command_class: NetworkManagementBasicNode,
params: params,
impl: __MODULE__
}
{:ok, command}
end
@impl true
def encode_params(command) do
seq_number = Command.param!(command, :seq_number)
return_interview_status_bit =
Command.param(command, :return_interview_status, :off) |> encode_return_interview_status()
mode_byte = Command.param!(command, :mode) |> encode_mode()
<<seq_number, 0x00::size(7), return_interview_status_bit::size(1), mode_byte>>
end
@impl true
@spec decode_params(binary()) :: {:ok, [param()]} | {:error, DecodeError.t()}
def decode_params(
<<seq_number, 0x00::size(7), return_interview_status_bit::size(1), mode_byte>>
) do
return_interview_status = decode_return_interview_status(return_interview_status_bit)
with {:ok, mode} <- decode_mode(mode_byte) do
{:ok,
[seq_number: seq_number, return_interview_status: return_interview_status, mode: mode]}
else
{:error, %DecodeError{}} = error ->
error
end
end
defp encode_return_interview_status(:on), do: 1
defp encode_return_interview_status(:off), do: 0
defp encode_mode(:disable), do: 0x00
defp encode_mode(:direct_range_only), do: 0x01
defp encode_mode(:allow_routed), do: 0x02
defp decode_return_interview_status(0), do: :off
defp decode_return_interview_status(1), do: :on
defp decode_mode(0x00), do: {:ok, :disable}
defp decode_mode(0x01), do: {:ok, :direct_range_only}
defp decode_mode(0x02), do: {:ok, :allow_routed}
defp decode_mode(byte),
do: {:error, %DecodeError{value: byte, param: :mode, command: :learn_mode_set}}
end
|
lib/grizzly/zwave/commands/learn_mode_set.ex
| 0.903975
| 0.523542
|
learn_mode_set.ex
|
starcoder
|
defmodule Figlet.Parser.FontFileParser do
@moduledoc """
This module parses figlet font files identified by the path to the file.
A FIGlet file has 3 main parts:
- Headerline
- Comments
- Character Data
## [FIGcharacter Data](http://www.jave.de/figlet/figfont.html#figcharacterdata)
The FIGcharacter data begins on the next line after the comments and continues
to the end of the file.
## Basic Data Structure
The sub-characters in the file are given exactly as they should be output, with
two exceptions:
1. Hardblanks should be the hardblank character specified in the header line, not a blank (space).
2. Every line has one or two endmark characters, whose column locations define the width of each FIGcharacter.
In most FIGfonts, the endmark character is either "@" or "#". The FIGdriver will
eliminate the last block of consecutive equal characters from each line of
sub-characters when the font is read in. By convention, the last line of a
FIGcharacter has two endmarks, while all the rest have one. This makes it easy
to see where FIGcharacters begin and end. No line should have more than two endmarks.
## See Also
Building a parser for Figlet files would be practically impossible without a useful spec for
reference. Relevant links for the Figlet font file spec:
- http://www.jave.de/docs/figfont.txt
- https://github.com/Marak/asciimo/issues/3
"""
alias Figlet.{Char, Font, Meta}
alias Figlet.Parser.HeaderlineParser
@reqd_codepoints Enum.concat(32..126, [196, 214, 220, 228, 246, 252, 223])
defmodule Error do
@moduledoc false
defexception message: "parser error"
end
@doc """
Parses the Figlet font file at the given absolute `filepath`, returning a `%Figlet.Font{}`
struct.
Given:
`flf2a 4 3 8 15 11 0 10127`
The 6th character (the invisible one after the `a`) is a unicode value for "hardBlank"
All FIGlet fonts must contain chars 32-126, 196, 214, 220, 228, 246, 252, 223
"""
def parse(filepath, opts \\ [])
def parse(filepath, _opts) when is_binary(filepath) do
Logger.metadata(font: filepath)
case File.exists?(filepath) do
true ->
filepath
|> File.stream!([], :line)
|> Enum.reduce({:headerline, %Font{source: filepath}}, &parse_line/2)
|> case do
{:chardata, _, font, _, _} -> {:ok, font}
_other -> {:error, "Something else happened"}
end
false ->
{:error, "File not found: #{inspect(filepath)}"}
end
rescue
error in Figlet.Parser.FileParser.Error -> {:error, "#{filepath}: #{inspect(error)}"}
end
# acc: {<task>, tmp_acc, %Font{}, line_i}
# acc: {:comments, tmp_acc, font, line_i}
defp parse_line(header, {:headerline, font}) do
case HeaderlineParser.parse(header) do
{:ok, meta} ->
{:comments, "", Map.put(font, :meta, meta), 2}
{:error, error} ->
raise Error, message: error
end
end
# After the headerline, accumulate comments
defp parse_line(
line,
{:comments, comment_acc, %Font{meta: %Meta{comment_lines: comment_lines}} = font, line_i}
)
when line_i <= comment_lines do
{:comments, comment_acc <> line, font, line_i + 1}
end
# Final comment line / transition to chardata
defp parse_line(
line,
{:comments, comment_acc, %Font{} = font, _line_i}
) do
[codepoint | rem_codepoints] = @reqd_codepoints
{:chardata, %Char{codepoint: codepoint}, Map.put(font, :comments, comment_acc <> line),
rem_codepoints, 1}
end
# After the comments, accumulate the chardata
# http://www.jave.de/figlet/figfont.html#figcharacterdata
defp parse_line(
line,
{:chardata, char, %Font{meta: %{height: height}} = font, rem_codepoints, line_i}
)
when line_i < height do
updated_char =
update_in(char.slices, fn slices -> Map.put(slices, line_i, trim_endmarks(line)) end)
{:chardata, updated_char, font, rem_codepoints, line_i + 1}
end
# last line of the char
defp parse_line(
line,
{:chardata, char, %Font{meta: %{height: height}} = font,
[new_codepoint | rem_codepoints], line_i}
)
when line_i == height do
line_data = trim_endmarks(line)
updated_char =
update_in(char.slices, fn slices -> Map.put(slices, line_i, line_data) end)
|> Map.put(:width, String.length(line_data))
updated_font =
update_in(font.char_map, fn char_map -> Map.put(char_map, char.codepoint, updated_char) end)
{:chardata, %Char{codepoint: new_codepoint}, updated_font, rem_codepoints, 1}
end
# TODO: ad-hoc chars
defp parse_line(_line, acc) do
acc
end
# In most FIGfonts, the endmark character is either "@" or "#", but
# we assume it is the character that _precedes_ the newline.
defp trim_endmarks(line_acc) do
line_acc = String.trim_trailing(line_acc, "\n")
endmark = String.last(line_acc)
String.trim_trailing(line_acc, endmark)
end
end
|
lib/figlet/parser/font_file_parser.ex
| 0.535827
| 0.613439
|
font_file_parser.ex
|
starcoder
|
defmodule ExAws.S3.Crypto.AESGCMCipher do
@moduledoc """
This module wraps the logic necessary to encrypt / decrypt using [AES](https://en.wikipedia.org/wiki/Advanced_Encryption_Standard)
[GCM](https://en.wikipedia.org/wiki/Galois/Counter_Mode).
See the Erlang docs for [crypto_one_time_aead](https://erlang.org/doc/man/crypto.html#crypto_one_time_aead-7) for more info.
"""
@auth_data ""
@tag_size 16
# "12? Why 12?" you ask. Because that's what Go uses by default.
@iv_size 12
@doc """
Encrypt the given contents with the supplied key.
"""
@spec encrypt(key :: bitstring, contents :: binary) ::
{:ok, {encrypted_result :: bitstring, initialization_vector :: bitstring}}
| {:error, reason :: String.t()}
def encrypt(key, contents) when is_binary(contents) do
iv = :crypto.strong_rand_bytes(@iv_size)
try do
{ciphertext, ciphertag} =
:crypto.crypto_one_time_aead(:aes_256_gcm, key, iv, contents, @auth_data, @tag_size, true)
{:ok, {ciphertext <> ciphertag, iv}}
rescue
e in ErlangError ->
%ErlangError{original: {_tag, _location, desc}} = e
{:error, desc}
end
end
@doc """
Decrypt the given contents with the supplied key and initialization vector.
"""
@spec decrypt(key :: bitstring, contents :: bitstring, iv :: bitstring) ::
{:ok, unencrypted_result :: binary} | {:error, reason :: String.t()}
def decrypt(key, contents, iv)
when byte_size(contents) > @tag_size and byte_size(iv) == @iv_size do
textsize = (byte_size(contents) - @tag_size) * 8
<<ciphertext::bitstring-size(textsize), ciphertag::bitstring>> = contents
:aes_256_gcm
|> :crypto.crypto_one_time_aead(key, iv, ciphertext, @auth_data, ciphertag, false)
|> case do
:error ->
{:error, "Could not decrypt contents"}
result ->
{:ok, result}
end
end
def decrypt(_key, _contents, _iv),
do:
{:error,
"Encrypted contents must be at least #{@tag_size} bytes and iv must be #{@iv_size} bytes"}
end
|
lib/ex_aws_s3_crypto/aes_gcm_cipher.ex
| 0.788502
| 0.617195
|
aes_gcm_cipher.ex
|
starcoder
|
defmodule Telegraf.Serializer.LineProtocol do
@moduledoc """
Serializer for the InfluxDB Line Protocol output data format.
InfluxData recommends this data format unless another format is required for interoperability.
https://docs.influxdata.com/telegraf/v1.18/data_formats/output/influx/
"""
@behaviour Telegraf.Serializer
alias Telegraf.Metric
@impl Telegraf.Serializer
def serialize(metrics) when is_list(metrics) do
metrics
|> Enum.map(fn %Metric{} = metric ->
[
encode_name(metric.name),
metric.tag_set |> reject_nils() |> encode_tag_set(),
metric.field_set |> reject_nils() |> encode_field_set(),
encode_timestamp(metric.timestamp),
?\n
]
end)
|> IO.iodata_to_binary()
end
defp encode_name(name) when is_binary(name) do
Regex.replace(~r/[\s,]/, name, &~s/\\#{&1}/)
end
defp encode_tag_set([]), do: []
defp encode_tag_set(tag_set) do
tags =
tag_set
|> Enum.map(fn {k, v} -> [escape(to_string(k)), ?=, escape(to_string(v))] end)
|> Enum.intersperse(?,)
[?,, tags]
end
defp encode_field_set([]), do: []
defp encode_field_set(field_set) do
fields =
field_set
|> Enum.map(fn {k, v} -> [escape(to_string(k)), ?=, encode_field_value(v)] end)
|> Enum.intersperse(?,)
[?\s, fields]
end
defp encode_timestamp(nil), do: []
defp encode_timestamp(timestamp) when is_integer(timestamp) do
[?\s, to_string(timestamp)]
end
defp encode_field_value(value) when is_integer(value), do: [to_string(value), ?i]
defp encode_field_value(value) when is_float(value), do: to_string(value)
defp encode_field_value(value) when is_boolean(value), do: to_string(value)
defp encode_field_value(value) when is_binary(value) do
[?", String.replace(value, ~S("), ~S(\")), ?"]
end
defp escape(value) do
Regex.replace(~r/[\s,=]/, value, &~s/\\#{&1}/)
end
defp reject_nils(map) when is_map(map) do
Enum.reject(map, &match?({_k, nil}, &1))
end
end
|
lib/telegraf/serializer/line_protocol.ex
| 0.793786
| 0.463444
|
line_protocol.ex
|
starcoder
|
defmodule Annex.Examples.Iris do
alias Annex.{
Data.List1D,
Dataset,
Layer.Sequence,
Utils
}
require Logger
@floats ~w(sepal_length sepal_width petal_length petal_width)a
@strings ~w(species)a
@keys @floats ++ @strings
NimbleCSV.define(IrisParser, separator: ",", escape: "\0")
def cast({k, v}) when k in @floats do
{k, String.to_float(v)}
end
def cast({k, v}) when k in @strings do
{k, v}
end
def line_to_map(line) do
@keys
|> Utils.zip(line)
|> Map.new(fn kv -> cast(kv) end)
end
def load do
"./examples/iris.csv"
|> File.stream!()
|> IrisParser.parse_stream()
|> Stream.drop(1)
|> Stream.map(&line_to_map/1)
end
def species_to_label("setosa"), do: [1.0, 0.0, 0.0]
def species_to_label("versicolor"), do: [0.0, 1.0, 0.0]
def species_to_label("virginica"), do: [0.0, 0.0, 1.0]
def prep_data(%Stream{} = data) do
data
|> normalize_by_name(:sepal_length)
|> normalize_by_name(:sepal_width)
|> normalize_by_name(:petal_length)
|> normalize_by_name(:petal_width)
|> Enum.map(&prep_row/1)
|> Dataset.split(0.30)
# |> case do
# {trains, tests} -> {Enum.unzip(trains), Enum.unzip(tests)}
# end
end
def normalize_by_name(dataset, name) do
dataset
|> Enum.map(fn item -> item[name] end)
|> List1D.normalize()
|> Enum.zip(dataset)
|> Enum.map(fn {normalized, item} ->
Map.put(item, name, normalized)
end)
end
def prep_row(%{} = flower) do
data = [
flower.sepal_length,
flower.sepal_width,
flower.petal_length,
flower.petal_width
]
labels = species_to_label(flower.species)
{data, labels}
end
def run do
%Stream{} = flower_data = load()
{training_dataset, test_dataset} = prep_data(flower_data)
{%Sequence{} = seq, _output} =
Annex.sequence([
# Annex.dropout(0.001),
Annex.dense(10, 4),
Annex.activation(:tanh),
Annex.dense(3, 10),
Annex.activation(:sigmoid)
])
|> Annex.train(training_dataset,
name: :iris,
learning_rate: 0.17,
halt_condition: {:epochs, 20_000}
)
Logger.debug(fn ->
"""
Done - :iris
"""
end)
result =
test_dataset
|> Enum.map(fn {datum, label} ->
pred = Annex.predict(seq, datum)
norm = List1D.normalize(pred)
correct? =
norm
|> Enum.zip(label)
|> Enum.any?(fn {a, b} -> a == 1.0 and b == 1.0 end)
Logger.debug(fn ->
"""
TEST PRED:
- datum: #{inspect(datum)}
- label: #{inspect(label)}
- pred: #{inspect(pred)}
- norm: #{inspect(norm)}
- correct?: #{inspect(correct?)}
"""
end)
if correct?, do: 1, else: 0
end)
total = length(result)
corrects = Enum.sum(result)
wrongs = total - corrects
Logger.debug(fn ->
"""
TEST RESULTS:
accuracy: #{corrects / total}
total: #{total}
corrects: #{corrects}
wrongs: #{wrongs}
"""
end)
end
end
|
examples/iris.ex
| 0.573917
| 0.45175
|
iris.ex
|
starcoder
|
defmodule TrademarkFreeStrategicLandWarfare.Board do
@derive Jason.Encoder
@enforce_keys [:rows, :lookup]
defstruct rows: nil, lookup: nil
alias TrademarkFreeStrategicLandWarfare.Piece
@empty_board [
[nil, nil, nil, nil, nil, nil, nil, nil, nil, nil],
[nil, nil, nil, nil, nil, nil, nil, nil, nil, nil],
[nil, nil, nil, nil, nil, nil, nil, nil, nil, nil],
[nil, nil, nil, nil, nil, nil, nil, nil, nil, nil],
[nil, nil, :lake, :lake, nil, nil, :lake, :lake, nil, nil],
[nil, nil, :lake, :lake, nil, nil, :lake, :lake, nil, nil],
[nil, nil, nil, nil, nil, nil, nil, nil, nil, nil],
[nil, nil, nil, nil, nil, nil, nil, nil, nil, nil],
[nil, nil, nil, nil, nil, nil, nil, nil, nil, nil],
[nil, nil, nil, nil, nil, nil, nil, nil, nil, nil]
]
@piece_name_counts %{
flag: 1,
bomb: 6,
spy: 1,
scout: 8,
miner: 5,
sergeant: 4,
lieutenant: 4,
captain: 4,
major: 3,
colonel: 2,
general: 1,
marshall: 1
}
@type t() :: %__MODULE__{
rows: list(list()),
lookup: map()
}
@type coordinate() :: {integer(), integer()}
@type player_number() :: 1 | 2
@type direction :: :north | :east | :south | :west
@spec piece_name_counts() :: map()
def piece_name_counts() do
@piece_name_counts
end
@spec new() :: %__MODULE__{rows: [List.t()], lookup: map()}
def new() do
%__MODULE__{
rows: @empty_board,
lookup: %{}
}
end
@spec init_pieces(%__MODULE__{}, list(list(atom())), player_number()) ::
{:ok, %__MODULE__{rows: list(list()), lookup: map()}} | {:error, binary()}
def init_pieces(
%__MODULE__{} = board,
pieces,
player
) do
with :ok <- validate_piece_counts(pieces),
:ok <- validate_row_dimensions(pieces),
rows_of_pieces <- create_pieces(pieces, player) do
populate_board(board, rows_of_pieces, player)
else
{:error, x} -> {:error, "piece configuration is incorrect: #{inspect(x)}"}
end
end
@spec lookup_by_uuid(%__MODULE__{}, binary()) ::
nil | {coordinate(), %TrademarkFreeStrategicLandWarfare.Piece{}}
def lookup_by_uuid(board, uuid) do
case board.lookup[uuid] do
nil ->
nil
{_, _} = coord ->
{coord, lookup_by_coord(board, coord)}
end
end
@spec lookup_by_coord(%__MODULE__{}, coordinate()) ::
nil | %TrademarkFreeStrategicLandWarfare.Piece{}
def lookup_by_coord(board, {x, y}) do
if x >= 0 and x < 10 and y >= 0 and y < 10 do
get_in(board.rows, [Access.at(y), Access.at(x)])
else
nil
end
end
@spec remove_pieces(%__MODULE__{}, list(%TrademarkFreeStrategicLandWarfare.Piece{})) ::
%__MODULE__{}
def remove_pieces(%__MODULE__{} = board, pieces) do
Enum.reduce(pieces, board, fn piece_to_remove, acc ->
remove_piece(acc, piece_to_remove)
end)
end
@spec remove_piece(%__MODULE__{}, %TrademarkFreeStrategicLandWarfare.Piece{}) :: %__MODULE__{}
def remove_piece(%__MODULE__{} = board, %Piece{uuid: uuid}) do
remove_piece(board, uuid)
end
@spec remove_piece(%__MODULE__{}, binary()) :: %__MODULE__{}
def remove_piece(%__MODULE__{} = board, uuid) do
case lookup_by_uuid(board, uuid) do
nil ->
board
{{current_x, current_y}, _} ->
%__MODULE__{
rows:
board.rows
|> put_in([Access.at(current_y), Access.at(current_x)], nil),
lookup: Map.delete(board.lookup, uuid)
}
end
end
@spec place_piece(%__MODULE__{}, %TrademarkFreeStrategicLandWarfare.Piece{}, coordinate()) ::
{:error, binary()} | {:ok, %__MODULE__{}}
def place_piece(_, %Piece{}, {x, y}) when x in [2, 3, 6, 7] and y in [4, 5] do
{:error, "can't place a piece where a lake is"}
end
def place_piece(_, %Piece{}, {x, y}) when x < 0 or x >= 10 or y < 0 or y >= 10 do
{:error, "can't place a piece out of bounds"}
end
def place_piece(%__MODULE__{} = board, %Piece{} = piece, {x, y}) do
{:ok,
board
|> remove_piece(piece)
|> put_in([Access.key(:rows), Access.at(y), Access.at(x)], piece)
|> put_in([Access.key(:lookup), piece.uuid], {x, y})}
end
@spec(
move(%__MODULE__{}, player_number(), binary(), direction(), integer()) :: {:error, binary()},
{:ok, %__MODULE__{}}
)
def move(board, player, uuid, direction, count) do
case lookup_by_uuid(board, uuid) do
nil ->
{:error, "no piece with that name"}
{{x, y}, %Piece{player: ^player} = piece} ->
advance(board, piece, {x, y}, direction, count)
_ ->
{:error, "you cannot move the other player's piece"}
end
end
# these maybe functions are for if you choose to (maybe, if player 2) flip
# the board to think about playing from the bottom to the top always,
# rather than bottom down (for player 2).
@spec maybe_flip(%__MODULE__{}, player_number()) :: RuntimeError
def maybe_flip(%__MODULE__{}, _) do
raise "not implemented for boards, only board rows"
end
@spec maybe_flip(list(list()), player_number()) :: list(list())
def maybe_flip(rows, 1), do: rows
def maybe_flip(rows, 2) do
rows
|> Enum.reverse()
|> Enum.map(&Enum.reverse/1)
end
@spec maybe_invert_player_direction(direction(), player_number()) :: direction()
def maybe_invert_player_direction(direction, 1), do: direction
def maybe_invert_player_direction(:north, 2), do: :south
def maybe_invert_player_direction(:south, 2), do: :north
def maybe_invert_player_direction(:west, 2), do: :east
def maybe_invert_player_direction(:east, 2), do: :west
@spec maybe_translate_coord(coordinate(), player_number()) :: coordinate()
def maybe_translate_coord(coord, 1), do: coord
def maybe_translate_coord({x, y}, 2), do: {9 - x, 9 - y}
@spec new_coordinate(coordinate(), direction()) :: nil | coordinate()
def new_coordinate({x, y}, direction) do
{new_x, new_y} =
case direction do
:north -> {x, y - 1}
:south -> {x, y + 1}
:west -> {x - 1, y}
:east -> {x + 1, y}
end
if new_x >= 0 and new_x < 10 and new_y >= 0 and new_y < 10 do
{new_x, new_y}
else
nil
end
end
@spec mask_board(%__MODULE__{}, player_number()) :: %__MODULE__{}
def mask_board(board, player) do
new_rows =
Enum.map(board.rows, fn row ->
Enum.map(row, fn column ->
Piece.maybe_mask(column, player)
end)
end)
%__MODULE__{board | rows: new_rows}
end
defp populate_board(%__MODULE__{} = board, rows_of_pieces, player) do
{row_range, column_range} =
if player == 1 do
{6..9, 0..9}
else
{3..0, 9..0}
end
new_board =
rows_of_pieces
|> Enum.zip(row_range)
|> Enum.reduce(board, fn {row, y}, row_acc ->
row
|> Enum.zip(column_range)
|> Enum.reduce(row_acc, fn {piece, x}, column_acc ->
{:ok, new_board} = place_piece(column_acc, piece, {x, y})
new_board
end)
end)
{:ok, new_board}
end
defp validate_piece_counts(pieces) do
player_piece_counts =
pieces
|> List.flatten()
|> Enum.reduce(
%{},
fn name, acc -> Map.update(acc, name, 1, &(&1 + 1)) end
)
if @piece_name_counts == player_piece_counts do
:ok
else
{:error,
"invalid piece counts. see Board.piece_name_counts() for individual named piece counts, and the total must add to 40"}
end
end
defp validate_row_dimensions(pieces) do
if length(pieces) == 4 and [10, 10, 10, 10] == Enum.map(pieces, &length(&1)) do
:ok
else
{:error, "invalid row dimensions. must give a list of 4 with 10 names each"}
end
end
defp create_pieces(pieces, player) do
for row <- pieces do
for piece <- row do
Piece.new(piece, player)
end
end
end
defp advance(_, %Piece{name: :bomb}, _, _, _) do
{:error, "bombs cannot move"}
end
defp advance(_, %Piece{name: :flag}, _, _, _) do
{:error, "flags cannot move"}
end
# advance until you hit something in case of scout, and reveal piece if move more than 1
defp advance(board, %Piece{name: :scout} = piece, {x, y}, direction, count) when count > 1 do
with new_coord = {new_x, new_y} when is_integer(new_x) and is_integer(new_y) <-
new_coordinate({x, y}, direction) do
case lookup_by_coord(board, new_coord) do
nil ->
# since this space is empty, and we are advancing by at least two (on next
# iteration of this function), so other player now has knowledge that
# this is a scout piece, so revealing it.
piece = Piece.reveal(piece)
advance(board, piece, new_coord, direction, count - 1)
_ ->
# we hit something prematurely, either a lake, or a piece,
# so scout multiple square advancement ends here, and we'll let
# the "normal" advance by 1 logic version of this function
# catch any errors, move, or attack
advance(board, piece, {x, y}, direction, 1)
end
else
nil -> {:error, "attempt to move out of bounds isn't allowed"}
end
end
# have to update the lookups after moving
defp advance(board, %Piece{player: player} = piece, {x, y}, direction, 1) do
with new_coord = {new_x, new_y} when is_integer(new_x) and is_integer(new_y) <-
new_coordinate({x, y}, direction) do
case lookup_by_coord(board, new_coord) do
%Piece{player: ^player} ->
{:error, "you can't run into your own team's piece"}
%Piece{} = defender ->
attack(board, piece, defender, new_coord)
_ ->
place_piece(board, piece, new_coord)
end
else
nil -> {:error, "attempt to move out of bounds isn't allowed"}
end
end
defp advance(_, _, _, _, n) when n != 1 do
{:error, "all pieces except the scout can only advance 1"}
end
defp attack(board, attacker, defender, coord) do
case Piece.attack(attacker, defender) do
{:ok, actions} when is_list(actions) ->
pieces_to_remove = actions[:remove]
maybe_piece_to_place = [attacker.uuid, defender.uuid] -- pieces_to_remove
board_with_removed_pieces = remove_pieces(board, pieces_to_remove)
case maybe_piece_to_place do
[uuid] ->
{_, piece} = lookup_by_uuid(board, uuid)
# attack happened, so piece type had to be announced, so revealing
# it to other player for future turns.
piece = Piece.reveal(piece)
place_piece(board_with_removed_pieces, piece, coord)
[] ->
# equivalent ranks, so both pieces are removed
{:ok, board_with_removed_pieces}
end
{:ok, :win} ->
{:ok, board_with_flag_removed} =
board
|> remove_piece(defender)
|> place_piece(attacker, coord)
{:ok, :win, board_with_flag_removed}
other ->
# an {:error, reason}
other
end
end
end
|
lib/trademark_free_strategic_land_warfare/board.ex
| 0.796372
| 0.544075
|
board.ex
|
starcoder
|
defmodule Aph.TTS do
@moduledoc """
TTS generation and alignment functions.
This module takes care of everything related to TTS (text-to-speech)
generation and parsing/aligning. It's also the module that introduces the most
side effects in the entire app, because it has to shell out to various programs
and make HTTP calls.
"""
# This is the language map for the Google TTS API
@g_lang_map %{
ar: "ar-XA",
nl: "nl-NL",
en: "en-US",
fr: "fr-FR",
de: "de-DE",
hi: "hi-IN",
id: "id-ID",
it: "it-IT",
ja: "ja-JP",
ko: "ko-KR",
zh: "cmn-CN",
nb: "nb-NO",
pl: "pl-PL",
pt: "pt-PT",
ru: "ru-RU",
tr: "tr-TR",
vi: "vi-VN"
}
# This is the language map for `aeneas`
@a_lang_map %{
ar: :ara,
nl: :nld,
en: :eng,
fr: :fra,
de: :deu,
hi: :hin,
id: :ind,
it: :ita,
ja: :jpn,
ko: :kor,
zh: :cmn,
nb: :nor,
pl: :pol,
pt: :por,
ru: :rus,
tr: :tur,
vi: :vie
}
@doc """
Generates a TTS message.
Takes a database entity, a file prefix and a Aph.Main.Avatar.
The database entity can be generic, but must at least have an ID and a `content`
field. The file prefix is used to prevent filename collisions, since IDs can
be the same across multiple database tables.
First creates a temporary directory under `gentts/`, then depending on the
TTS synthesis configuration option pulls the audio from somewhere and saves it
in the temporary directory. Then calls TTS.align/3.
"""
def synthesize(entity, prefix, av) do
File.mkdir_p!("gentts/#{prefix}-#{entity.id}")
File.mkdir_p!("priv/static/tts/")
if Application.get_env(:aph, :tts) == "google" do
synthesize_google(entity, prefix, av)
else
synthesize_espeak(entity, prefix, av)
end
end
# Synthesize TTS with Google Text-to-Speech API.
defp synthesize_google(entity, prefix, av) do
api_key = Application.get_env(:aph, :google_key)
lang = @g_lang_map[String.to_atom(av.language)]
gender_num =
cond do
# en-US has no Standard-A voice for some reason
lang == "en-US" and av.gender == "FEMALE" -> "C"
lang == "en-US" and av.gender == "MALE" -> "B"
av.gender == "FEMALE" -> "A"
true -> "B"
end
body =
Jason.encode!(%{
input: %{text: entity.content},
voice: %{languageCode: lang, name: "#{lang}-Standard-#{gender_num}"},
audioConfig: %{
audioEncoding: "OGG_OPUS",
pitch: av.pitch || 0,
speakingRate: av.speed || 1.0
}
})
headers = [{"content-type", "application/json"}]
with {:ok, res} <-
HTTPoison.post(
"https://texttospeech.googleapis.com/v1/text:synthesize?key=#{api_key}",
body,
headers
),
{:ok, json} <- Jason.decode(res.body),
{:ok, content} <- Base.decode64(json["audioContent"]),
:ok <- File.write("gentts/#{prefix}-#{entity.id}/temp.ogg", content),
:ok <- align(entity.id, entity.content, prefix, av.language) do
:ok
else
{:error, err} -> {:tts_error, err}
end
end
# Synthesizes TTS using espeak.
defp synthesize_espeak(entity, prefix, av) do
# Since espeak doesn't accept the same values that the Google TTS api does,
# we have to convert them from one scale to another.
scale_pitch = (av.pitch + 20) / 40 * 99
scale_speed = floor((av.speed - 0.25) / 3.75 * 370.0 + 80.0)
with {_, 0} <-
System.cmd("espeak", [
"-p",
to_string(scale_pitch),
"-s",
to_string(scale_speed),
"-w",
"gentts/#{prefix}-#{entity.id}/temp.wav",
entity.content
]),
{_, 0} <-
System.cmd("ffmpeg", [
"-i",
"gentts/#{prefix}-#{entity.id}/temp.wav",
"-c:a",
"libopus",
"-b:a",
"96K",
"gentts/#{prefix}-#{entity.id}/temp.ogg"
]),
:ok <- align(entity.id, entity.content, prefix, av.language) do
:ok
else
{_error, 1} -> {:tts_error, "espeak failed to create audio!"}
end
end
@doc """
Removes temporary directory and moves files to a permanent location.
Takes the name of the temporary directory.
"""
def clean(name) do
with :ok <- File.cp("gentts/#{name}/out.json", "priv/static/tts/#{name}.json"),
:ok <- File.cp("gentts/#{name}/temp.ogg", "priv/static/tts/#{name}.ogg"),
{:ok, _} <- File.rm_rf("gentts/#{name}") do
:ok
else
e -> {:tts_error, e}
end
end
@doc """
Forcibly aligns an existing TTS message.
Takes the name/ID, the TTS text, and the language.
This shells out to `aeneas` and obtains a JSON file that contains timestamps
of when in the audio file which word is said.
"""
def align(name, text, prefix, lang) do
lang = @a_lang_map[String.to_atom(lang)]
with :ok <-
File.write(
"gentts/#{prefix}-#{name}/temp.txt",
text |> String.split(" ") |> Enum.join("\n")
),
{_, 0} <-
System.cmd("python3", [
"-m",
"aeneas.tools.execute_task",
"gentts/#{prefix}-#{name}/temp.ogg",
"gentts/#{prefix}-#{name}/temp.txt",
"task_language=#{Atom.to_string(lang)}|os_task_file_format=json|is_text_type=plain",
"gentts/#{prefix}-#{name}/out.json"
]) do
:ok
else
{:error, err} -> {:error, err}
{err, 1} -> {:error, err}
end
end
end
|
lib/aph/tts.ex
| 0.6508
| 0.417212
|
tts.ex
|
starcoder
|
defmodule Coxir.Struct.User do
@moduledoc """
Defines methods used to interact with Discord users.
Refer to [this](https://discordapp.com/developers/docs/resources/user#user-object)
for a list of fields and a broader documentation.
In addition, the following fields are also embedded.
- `voice` - a channel object
"""
@type user :: String.t() | map
use Coxir.Struct
alias Coxir.Struct.{Channel}
def pretty(struct) do
struct
|> replace(:voice_id, &Channel.get/1)
end
def get(user \\ "@me")
def get(%{id: id}),
do: get(id)
def get(user) do
super(user)
|> case do
nil ->
API.request(:get, "users/#{user}")
|> pretty
user -> user
end
end
@doc """
Modifies the local user.
Returns an user object upon success
or a map containing error information.
#### Params
Must be an enumerable with the fields listed below.
- `username` - the user's username
- `avatar` - the user's avatar
Refer to [this](https://discordapp.com/developers/docs/resources/user#modify-current-user)
for a broader explanation on the fields and their defaults.
"""
@spec edit(Enum.t()) :: map
def edit(params) do
API.request(:patch, "users/@me", params)
end
@doc """
Fetches a list of connections for the local user.
Refer to [this](https://discordapp.com/developers/docs/resources/user#get-user-connections)
for more information.
"""
@spec get_connections() :: list | map
def get_connections do
API.request(:get, "users/@me/connections")
end
@doc """
Fetches a list of guilds for the local user.
Returns a list of partial guild objects
or a map containing error information.
#### Query
Must be a keyword list with the fields listed below.
- `before` - get guilds before this guild ID
- `after` - get guilds after this guild ID
- `max` - max number of guilds to return
Refer to [this](https://discordapp.com/developers/docs/resources/user#get-current-user-guilds)
for a broader explanation on the fields and their defaults.
"""
@spec get_guilds(Keyword.t()) :: list | map
def get_guilds(query \\ []) do
API.request(:get, "users/@me/guilds", "", params: query)
end
@doc """
Creates a DM channel with a given user.
Returns a channel object upon success
or a map containing error information.
"""
@spec create_dm(user) :: map
def create_dm(%{id: id}),
do: create_dm(id)
def create_dm(recipient) do
API.request(:post, "users/@me/channels", %{recipient_id: recipient})
|> Channel.pretty
end
@doc """
Creates a group DM channel.
Returns a channel object upon success
or a map containing error information.
#### Params
Must be an enumerable with the fields listed below.
- `access_tokens` - access tokens of users
- `nicks` - a map of user ids and their respective nicknames
Refer to [this](https://discordapp.com/developers/docs/resources/user#create-group-dm)
for a broader explanation on the fields and their defaults.
"""
@spec create_group(Enum.t()) :: map
def create_group(params) do
API.request(:post, "users/@me/channels", params)
|> Channel.pretty
end
@doc """
Fetches a list of DM channels for the local user.
Returns a list of channel objects upon success
or a map containing error information.
"""
@spec get_dms() :: list
def get_dms do
API.request(:get, "users/@me/channels")
|> case do
list when is_list(list) ->
for channel <- list do
Channel.pretty(channel)
end
error -> error
end
end
end
|
lib/coxir/struct/user.ex
| 0.86342
| 0.455744
|
user.ex
|
starcoder
|
defmodule EZCalendar do
@moduledoc """
Functions for building calendars.
To use the Repo shortcut functions, add `EZCalendar` to your repo.
defmodule MyApp.Repo do
use Ecto.Repo, otp_app: :my_app
use EZCalendar
end
The following functions and their bang! variants then become available.
Repo.calendar(query, calendar_module, params, opts \\\\ [])
Repo.day_calendar(query, params, opts \\\\ [])
Repo.week_calendar(query, params, opts \\\\ [])
Repo.month_calendar(query, params, opts \\\\ [])
Repo.biweekly_calendar(query, params, opts \\\\ [])
You can use a keyword list for multiple queries. Each query will use
the default field(s) unless provided in the query list or in the options
and the results will be accessible by their key in each dates data field.
[
shifts: [Shift, [:starting, :ending]],
events: [Event, :starting_at],
meetings: Meeting,
deliveries: Delivery
]
|> Repo.month_calendar({2016, 11}, field: :scheduled_for)
Params can be a Date, DateTime, erl style tuple or map containing a day month and year.
The day can be ommited from the params, doing so will result in the first of the month being used.
Tuple and map values will be cast as integers for easy integration with phoenix.
The timezone and query field can be set with `:tz` and `:field` as options.
If using a biweekly calendar the `biweekly_start` can also be set.
"""
alias EZCalendar.ParamParser
defmacro __using__(_) do
quote do
def calendar(query, calendar_type, params, opts \\ []) do
EZCalendar.build_calendar(calendar_type, __MODULE__, query, params, opts)
end
def calendar!(query, calendar_type, params, opts \\ []) do
EZCalendar.build_calendar!(calendar_type, __MODULE__, query, params, opts)
end
def month_calendar(query, params, opts \\ []) do
EZCalendar.build_calendar(EZCalendar.MonthCalendar, __MODULE__, query, params, opts)
end
def month_calendar!(query, params, opts \\ []) do
EZCalendar.build_calendar!(EZCalendar.MonthCalendar, __MODULE__, query, params, opts)
end
def week_calendar(query, params, opts \\ []) do
EZCalendar.build_calendar(EZCalendar.WeekCalendar, __MODULE__, query, params, opts)
end
def week_calendar!(query, params, opts \\ []) do
EZCalendar.build_calendar!(EZCalendar.WeekCalendar, __MODULE__, query, params, opts)
end
def day_calendar(query, params, opts \\ []) do
EZCalendar.build_calendar(EZCalendar.DayCalendar, __MODULE__, query, params, opts)
end
def day_calendar!(query, params, opts \\ []) do
EZCalendar.build_calendar!(EZCalendar.DayCalendar, __MODULE__, query, params, opts)
end
def biweekly_calendar(query, params, opts \\ []) do
EZCalendar.build_calendar(EZCalendar.BiweeklyCalendar, __MODULE__, query, params, opts)
end
def biweekly_calendar!(query, params, opts \\ []) do
EZCalendar.build_calendar!(EZCalendar.BiweeklyCalendar, __MODULE__, query, params, opts)
end
end
end
@doc """
Builds a calendar struct. Returns a tuple containing `:ok` and the calendar or `:error` and the reason.
Takes a calendar module, repo, query, params and options as its arguments.
The params provided in the args are parsed into a date erl
and passed to the calendar modules `date_range` function.
The timezone and query field can be set with `:tz` and `:field` as options
"""
def build_calendar(calendar_module, repo, query, params, opts \\ []) do
case ParamParser.to_erl(params) do
{:ok, date} ->
{:ok, do_build_calendar(calendar_module, repo, query, date, opts)}
error -> error
end
end
@doc """
Builds a calendar struct. Returns a calendar or raises an error
Arguments and options are the same as `build_calendar/5`
"""
def build_calendar!(calendar_module, repo, query, params, opts \\ []) do
{:ok, date} = ParamParser.to_erl(params)
do_build_calendar(calendar_module, repo, query, date, opts)
end
defp do_build_calendar(calendar_module, repo, query, date, opts) do
{start_date, end_date} = calendar_module.date_range(date, opts)
EZCalendar.CalendarBuilder.build(query, repo, start_date, end_date, opts)
|> calendar_module.build(date)
end
end
|
lib/ez_calendar.ex
| 0.834845
| 0.481698
|
ez_calendar.ex
|
starcoder
|
defmodule FileSize.Byte do
@moduledoc """
A struct that represents a file size in bytes as lowest possible value, which
is a chunk of 8 bits each.
"""
use FileSize.Size, normalized_key: :bytes, default_unit: :b
@typedoc """
A type defining the available IEC units.
"""
@type iec_unit ::
:b
| :kib
| :mib
| :gib
| :tib
| :pib
| :eib
| :zib
| :yib
@typedoc """
A type defining the available SI units.
"""
@type si_unit ::
:b
| :kb
| :mb
| :gb
| :tb
| :pb
| :eb
| :zb
| :yb
@typedoc """
A union type combining the available IEC and SI units.
"""
@type unit :: iec_unit | si_unit
@typedoc """
The byte type.
"""
@type t :: %__MODULE__{value: number, unit: unit, bytes: number}
defimpl FileSize.Calculable do
alias FileSize.Bit
alias FileSize.Byte
def add(size, %Bit{} = other_size) do
size
|> FileSize.convert(:bit)
|> FileSize.add(other_size)
|> FileSize.convert(other_size.unit)
end
def add(size, %Byte{} = other_size) do
FileSize.from_bytes(size.bytes + other_size.bytes, size.unit)
end
def subtract(size, %Bit{} = other_size) do
size
|> FileSize.convert(:bit)
|> FileSize.subtract(other_size)
|> FileSize.convert(size.unit)
end
def subtract(size, %Byte{} = other_size) do
FileSize.from_bytes(size.bytes - other_size.bytes, size.unit)
end
end
defimpl FileSize.Comparable do
alias FileSize.Bit
alias FileSize.Utils
def compare(size, %Bit{} = other_size) do
size = FileSize.convert(size, :bit)
Utils.compare(size.bits, other_size.bits)
end
def compare(size, other_size) do
Utils.compare(size.bytes, other_size.bytes)
end
end
defimpl FileSize.Convertible do
alias FileSize.Bit
alias FileSize.Units.Info, as: UnitInfo
def normalized_value(size), do: size.bytes
def convert(%{unit: unit} = size, %{name: unit}), do: size
def convert(size, unit_info) do
value = UnitInfo.denormalize_value(unit_info, size.bytes)
value
|> convert_between_types(unit_info.mod)
|> FileSize.new(unit_info)
end
defp convert_between_types(value, Bit), do: trunc(value * 8)
defp convert_between_types(value, _), do: value
end
end
|
lib/file_size/byte.ex
| 0.896705
| 0.579698
|
byte.ex
|
starcoder
|
defmodule Prometheus.Metric.Boolean do
@moduledoc """
Boolean metric, to report booleans and flags.
Boolean is a non-standard metric that uses untyped metric underneath.
A Boolean is typically used as a flag i.e. enabled/disabled, online/offline.
Example:
```
-module(my_fuse_instrumenter).
-export([setup/0,
fuse_event/2]).
setup() ->
prometheus_boolean:declare([{name, app_fuse_state},
{labels, [name]}, %% fuse name
{help, "State of various app fuses."}]),
fuse_event(Fuse, Event) ->
case Event of
ok -> prometheus_boolean:set(app_fuse_state, [Fuse], true);
blown -> prometheus_boolean:set(app_fuse_state, [Fuse], false);
_ -> ok
end.
```
"""
use Prometheus.Erlang, :prometheus_boolean
@doc """
Creates a boolean using `spec`.
Raises `Prometheus.MissingMetricSpecKeyError` if required `spec` key is missing.<br>
Raises `Prometheus.InvalidMetricNameError` if metric name is invalid.<br>
Raises `Prometheus.InvalidMetricHelpError` if help is invalid.<br>
Raises `Prometheus.InvalidMetricLabelsError` if labels isn't a list.<br>
Raises `Prometheus.InvalidLabelNameError` if label name is invalid.<br>
Raises `Prometheus.MFAlreadyExistsError` if a boolean with
the same `spec` already exists.
"""
delegate new(spec)
@doc """
Creates a boolean using `spec`.
If a boolean with the same `spec` exists returns `false`.
Raises `Prometheus.MissingMetricSpecKeyError` if required `spec` key is missing.<br>
Raises `Prometheus.InvalidMetricNameError` if metric name is invalid.<br>
Raises `Prometheus.InvalidMetricHelpError` if help is invalid.<br>
Raises `Prometheus.InvalidMetricLabelsError` if labels isn't a list.<br>
Raises `Prometheus.InvalidLabelNameError` if label name is invalid.
"""
delegate declare(spec)
@doc """
Sets the boolean identified by `spec` to `value`.
Valid "truthy" values:
- `true`;
- `false`;
- `0` -> false;
- `number > 0` -> true;
- `[]` -> false
- `non-empty list` -> true;
- `:undefined` -> undefined
Other values will generate `Prometheus.InvalidValueError` error.
Raises `Prometheus.InvalidValueError` exception if `value` isn't
a boolean or `:undefined`.<br>
Raises `Prometheus.UnknownMetricError` exception if a boolean for `spec`
can't be found.<br>
Raises `Prometheus.InvalidMetricArityError` exception if labels count mismatch.
"""
delegate_metric set(spec, value)
@doc """
Toggles the boolean identified by `spec` to `value`.
Raises `Prometheus.InvalidValueError` exception if boolean is `:undefined`.<br>
Raises `Prometheus.UnknownMetricError` exception if a boolean for `spec`
can't be found.<br>
Raises `Prometheus.InvalidMetricArityError` exception if labels count mismatch.
"""
delegate_metric toggle(spec)
@doc """
Removes boolean series identified by spec.
Raises `Prometheus.UnknownMetricError` exception if a boolean
for `spec` can't be found.<br>
Raises `Prometheus.InvalidMetricArityError` exception if labels count mismatch.
"""
delegate_metric remove(spec)
@doc """
Resets the value of the boolean identified by `spec`.
Raises `Prometheus.UnknownMetricError` exception if a boolean
for `spec` can't be found.<br>
Raises `Prometheus.InvalidMetricArityError` exception if labels count mismatch.
"""
delegate_metric reset(spec)
@doc """
Returns the value of the boolean identified by `spec`. If there is no boolean for
given labels combination, returns `:undefined`.
Raises `Prometheus.UnknownMetricError` exception if a boolean
for `spec` can't be found.<br>
Raises `Prometheus.InvalidMetricArityError` exception if labels count mismatch.
"""
delegate_metric value(spec)
end
|
astreu/deps/prometheus_ex/lib/prometheus/metric/boolean.ex
| 0.949412
| 0.857231
|
boolean.ex
|
starcoder
|
defmodule RayTracer.Intersection do
@moduledoc """
This module is responsible for computing ray intersections with various shapes
"""
alias RayTracer.Shape
alias RayTracer.Ray
alias RayTracer.RTuple
alias RayTracer.World
import RayTracer.Constants
@type t :: %__MODULE__{
t: number,
object: Shape.t
}
@type intersections :: list(t)
@type computation :: %{
t: number,
object: Shape.t,
point: RTuple.point,
eyev: RTuple.vector,
normalv: RTuple.vector,
inside: boolean,
under_point: RTuple.point,
over_point: RTuple.point,
reflectv: RTuple.vector,
n1: number,
n2: number
}
defstruct [:t, :object]
@doc """
Builds an intersection of ray with shape `s` at distance `t`
"""
@spec new(number, Shape.t) :: t
def new(t, s) do
%__MODULE__{t: t, object: s}
end
@doc """
Builds intersectons
"""
@spec intersections(list({number, Shape.t})) :: intersections
def intersections(xs) do
xs |> Enum.map(
fn ({t, s}) -> new(t, s) end
)
end
@doc """
Builds an intersection of ray with shape at distance `t`
"""
@spec intersect(Shape.t, Ray.t) :: intersections
def intersect(shape, ray) do
object_space_ray = ray |> Ray.transform(shape.inv_transform)
Shape.local_intersect(shape, object_space_ray)
end
@doc """
Builds an intersection of ray with world `w`
"""
@spec intersect_world(World.t, Ray.t) :: intersections
def intersect_world(world, ray) do
world.objects
|> Enum.flat_map(&intersect(&1, ray))
|> Enum.sort_by(&(&1.t))
end
@doc """
Returns a first non-negative intersection from intersections list.
"""
@spec hit(intersections) :: t | nil
def hit(intersections) do
intersections
|> Enum.reject(fn(i) -> i.t < 0 end)
|> Enum.min_by(&(&1.t), fn -> nil end)
end
@doc """
Prepares data for shading computations
"""
@spec prepare_computations(t, Ray.t, intersections) :: computation
def prepare_computations(intersection, ray) do
prepare_computations(intersection, ray, [intersection])
end
def prepare_computations(intersection, ray, xs) do
p = ray |> Ray.position(intersection.t)
eyev = RTuple.negate(ray.direction)
normalv = intersection.object |> Shape.normal_at(p)
inside = inside?(normalv, eyev)
final_normal_v = (if inside, do: RTuple.negate(normalv), else: normalv)
{n1, n2} = intersection |> comp_n1_n2(xs)
over_under_diff = final_normal_v |> RTuple.mul(epsilon())
%{
t: intersection.t,
object: intersection.object,
point: p,
eyev: eyev,
normalv: final_normal_v,
inside: inside,
over_point: p |> RTuple.add(over_under_diff),
under_point: p |> RTuple.sub(over_under_diff),
reflectv: ray |> Ray.reflect(final_normal_v),
n1: n1,
n2: n2
}
end
@doc """
Computes reflectance (the amount of reflected light)
Returns Schlick's approximation of Fresnel's equation
"""
@spec schlick(computation) :: number
def schlick(comps) do
cos = RTuple.dot(comps.eyev, comps.normalv)
# Total internal reflection can occur only if n1 > n2
if comps.n1 > comps.n2 do
n = comps.n1 / comps.n2
sin2_t = n * n * (1.0 - cos * cos)
if sin2_t > 1.0 do
1.0
else
cos_t = :math.sqrt(1 - sin2_t)
# When n1 > n2 use cos(theta_t) instead
calc_schlick(comps, cos_t)
end
else
calc_schlick(comps, cos)
end
end
defp calc_schlick(comps, cos) do
r0 = ((comps.n1 - comps.n2) / (comps.n1 + comps.n2)) |> :math.pow(2)
r0 + (1 - r0) * :math.pow(1 - cos, 5)
end
# Tests if the intersection occured inside the object
@spec inside?(RTuple.vector, RTuple.vector) :: boolean
defp inside?(normalv, eyev) do
RTuple.dot(normalv, eyev) < 0
end
# Given a hit and a list of intersections computes refractive indices of the
# materials on either side of a ray-object intersection with:
# n1 - belonging to the material being exited
# n2 - belonging to the material being entered
@spec comp_n1_n2(t, intersections) :: {number, number}
defp comp_n1_n2(intersection, xs) do
comp_n1_n2(intersection, xs, 1, 1, [])
end
defp comp_n1_n2(_intersection, [], n1, n2, _containers), do: {n1, n2}
defp comp_n1_n2(intersection, [i | rest_xs], n1, n2, containers) do
n1_new = calc_new_n(n1, i, intersection, containers)
containers_new = append_or_remove(containers, i.object)
n2_new = calc_new_n(n2, i, intersection, containers_new)
if i == intersection do
# Break here, we don't need to compute any more n values
{n1_new, n2_new}
else
comp_n1_n2(intersection, rest_xs, n1_new, n2_new, containers_new)
end
end
defp calc_new_n(current_n, i, intersection, containers) do
if i == intersection do
if containers |> Enum.empty? do
1
else
Enum.at(containers, -1).material.refractive_index
end
else
current_n
end
end
defp append_or_remove(containers, object) do
index = containers |> Enum.find_index(fn x -> x == object end)
if index do
containers |> List.delete_at(index)
else
containers |> List.insert_at(-1, object)
end
end
end
|
lib/intersection.ex
| 0.90635
| 0.624336
|
intersection.ex
|
starcoder
|
defmodule RpiFbCapture do
use GenServer
@moduledoc """
Capture the Raspberry Pi's frame buffer
"""
@type option ::
{:width, non_neg_integer()}
| {:height, non_neg_integer()}
| {:display, non_neg_integer()}
@type format :: :ppm | :rgb24 | :rgb565 | :mono | :mono_column_scan
defmodule State do
@moduledoc false
defstruct port: nil,
width: 0,
height: 0,
display_width: 0,
display_height: 0,
display_id: 0,
request: nil
end
@doc """
Start up the capture process
NOTE: The Raspberry Pi capture hardware has limitations on the window
size. In general, capturing the whole display is fine. Keeping the width
as a multiple of 16 appears to be good.
Options:
* `:width` - the width of the capture window (0 for the display width)
* `:height` - the height of the capture window (0 for the display width)
* `:display` - which display to capture (defaults to 0)
"""
@spec start_link([option()]) :: :ignore | {:error, any()} | {:ok, pid()}
def start_link(args \\ []) when is_list(args) do
GenServer.start_link(__MODULE__, args)
end
@doc """
Stop the capture process
"""
@spec stop(GenServer.server()) :: :ok
def stop(server) do
GenServer.stop(server)
end
@doc """
Capture the screen in the specified format.
Formats include:
* `:ppm` - PPM-formatted data
* `:rgb24` - Raw 24-bit RGB data 8-bits R, G, then B
* `:rgb565` - Raw 16-bit data 5-bits R, 6-bits G, 5-bits B
* `:mono` - Raw 1-bpp data
* `:mono_column_scan` - Raw 1-bpp data, but scanned down columns
"""
@spec capture(GenServer.server(), format()) ::
{:ok, RpiFbCapture.Capture.t()} | {:error, atom()}
def capture(server, format) do
GenServer.call(server, {:capture, format})
end
@doc """
Adjust the value that pixels are on for monochromatic conversion.
The threshold should be 8-bits. The capture buffer is rgb565, so the
threshold will be reduced to 5 or 6 bits for the actual comparisons.
"""
@spec set_mono_threshold(GenServer.server(), byte()) :: :ok | {:error, atom()}
def set_mono_threshold(server, threshold) do
GenServer.call(server, {:mono_threshold, threshold})
end
@doc """
Helper method for saving a screen capture to a file
Example:
```elixir
iex> {:ok, cap} = RpiFbCapture.start_link()
iex> RpiRbCapture(cap, "/tmp/capture.ppm")
:ok
```
"""
@spec save(GenServer.server(), Path.t(), format()) :: :ok | {:error, atom()}
def save(server, path, format \\ :ppm) do
with {:ok, cap} = capture(server, format) do
File.write(path, cap.data)
end
end
# Server (callbacks)
@impl true
def init(args) do
executable = Application.app_dir(:rpi_fb_capture, ["priv", "rpi_fb_capture"])
width = Keyword.get(args, :width, 0)
height = Keyword.get(args, :height, 0)
display = Keyword.get(args, :display, 0)
port =
Port.open({:spawn_executable, to_charlist(executable)}, [
{:args, [to_string(display), to_string(width), to_string(height)]},
{:packet, 4},
:use_stdio,
:binary,
:exit_status
])
state = %State{port: port, width: width, height: height}
{:ok, state}
end
@impl true
def handle_call({:capture, format}, from, state) do
case state.request do
nil ->
new_state = start_capture(state, from, format)
{:noreply, new_state}
_outstanding_request ->
{:reply, {:error, :only_one_capture_at_a_time}, state}
end
end
@impl true
def handle_call({:mono_threshold, threshold}, _from, state) do
Port.command(state.port, port_cmd(:mono_threshold, threshold))
{:reply, :ok, state}
end
@impl true
def handle_info({port, {:data, data}}, %{port: port} = state) do
handle_port(state, data)
end
@impl true
def handle_info({port, {:exit_status, _status}}, %{port: port} = state) do
if state.request do
{from, _format} = state.request
GenServer.reply(from, {:error, :port_crashed})
end
{:stop, :port_crashed}
end
defp handle_port(
state,
<<display_id::native-32, display_width::native-32, display_height::native-32,
capture_width::native-32, capture_height::native-32>>
) do
# Capture information is 20 bytes - framebuffers are safely
# larger, so there's no chance of an accident here.
new_state = %{
state
| width: capture_width,
height: capture_height,
display_width: display_width,
display_height: display_height,
display_id: display_id
}
{:noreply, new_state}
end
defp handle_port(%{request: {from, format}} = state, data) do
result_data = process_response(state, format, data)
result = %RpiFbCapture.Capture{
data: result_data,
width: state.width,
height: state.height,
format: format
}
GenServer.reply(from, {:ok, result})
{:noreply, %{state | request: nil}}
end
defp start_capture(state, from, format) do
Port.command(state.port, port_cmd(:capture, format))
%{state | request: {from, format}}
end
defp port_cmd(:capture, :ppm), do: <<2>>
defp port_cmd(:capture, :rgb24), do: <<2>>
defp port_cmd(:capture, :rgb565), do: <<3>>
defp port_cmd(:capture, :mono), do: <<4>>
defp port_cmd(:capture, :mono_column_scan), do: <<5>>
defp port_cmd(:mono_threshold, value), do: <<6, value>>
defp process_response(state, :ppm, data) do
["P6 #{state.width} #{state.height} 255\n", data]
end
defp process_response(_state, _format, data), do: data
end
|
lib/rpi_fb_capture.ex
| 0.845369
| 0.784567
|
rpi_fb_capture.ex
|
starcoder
|
defmodule Statifier.Statechart do
@moduledoc """
Represents a reactive system. Contains full specification.
This corresponds to the <scxml> element defined here:
https://www.w3.org/TR/scxml/#scxml
A conformant SCXML document must have at least one <state>, <parallel> or
<final> child. At system initialization time, the SCXML Processor must enter
the states specified by the 'initial' attribute, if it is present. If it is
not present, the Processor must enter the first state in document order.
Platforms should document their default data model.
"""
alias Statifier.StateDef
alias __MODULE__
# The name of this Statechart. It is for purely informational purposes.
@type name :: String.t()
# The string of initial StateIDs as it is contained in the source
@type initial_string :: String.t()
@type states :: [StateDef.t()]
@type t :: %__MODULE__{
name: name() | nil,
conformant: boolean,
initial: initial_string | nil,
states: states
}
defstruct name: nil,
conformant: false,
initial: nil,
states: []
def build(input) do
%Statechart{}
|> put_name(input)
|> put_initial(input)
|> put_states(input)
end
def validate(%Statechart{states: [_required_state | _rest]} = statechart) do
%Statechart{statechart | conformant: true}
end
def validate(%Statechart{} = statechart) do
%Statechart{statechart | conformant: false}
end
# Incoming values will be keyed by strings not atoms
defp put_name(%Statechart{} = statechart, %{"name" => name})
when is_binary(name) do
%Statechart{statechart | name: name}
end
defp put_name(%Statechart{} = statechart, %{}), do: statechart
defp put_initial(%Statechart{} = statechart, %{"initial" => initial})
when is_binary(initial) do
%Statechart{statechart | initial: initial}
end
defp put_initial(%Statechart{} = statechart, %{}), do: statechart
defp put_states(%Statechart{} = statechart, %{"states" => states})
when is_list(states) do
%Statechart{statechart | states: Enum.map(states, &StateDef.build/1)}
end
end
|
statifier-ex/lib/statifier/statechart.ex
| 0.770249
| 0.442034
|
statechart.ex
|
starcoder
|
defmodule Norm.Contract do
@moduledoc """
Design by Contract with Norm.
This module provides a `@contract` macro that can be used to define specs for arguments and the
return value of a given function.
To use contracts, call `use Norm` which also imports all `Norm` functions.
Sometimes you may want to turn off contracts checking. For example, to skip contracts in production,
set: `config :norm, enable_contracts: Mix.env != :prod`.
## Examples
defmodule Colors do
use Norm
def rgb(), do: spec(is_integer() and &(&1 in 0..255))
def hex(), do: spec(is_binary() and &String.starts_with?(&1, "#"))
@contract rgb_to_hex(r :: rgb(), g :: rgb(), b :: rgb()) :: hex()
def rgb_to_hex(r, g, b) do
# ...
end
end
"""
@doc false
def __before_compile__(env) do
contracts = Module.get_attribute(env.module, :norm_contracts)
definitions = Module.definitions_in(env.module)
for {fun, arity} <- contracts do
unless {:"__#{fun}_without_contract__", arity} in definitions do
raise ArgumentError, "contract for undefined function #{fun}/#{arity}"
end
end
end
@doc false
defmacro __using__(_) do
quote do
import Kernel, except: [@: 1, def: 2]
import Norm.Contract
Module.register_attribute(__MODULE__, :norm_contracts, accumulate: true)
@before_compile Norm.Contract
end
end
@doc false
defmacro def(call, expr) do
quote do
if unquote(fa(call)) in @norm_contracts do
unless Module.defines?(__MODULE__, unquote(fa(call))) do
Kernel.def(unquote(wrapper_call(call)), do: unquote(wrapper_body(call)))
end
Kernel.def(unquote(call_without_contract(call)), unquote(expr))
else
Kernel.def(unquote(call), unquote(expr))
end
end
end
@doc false
defmacro @{:contract, _, expr} do
defcontract(expr)
end
defmacro @other do
quote do
Kernel.@(unquote(other))
end
end
## Internals
defp defcontract(expr) do
if Application.get_env(:norm, :enable_contracts, true) do
do_defcontract(expr)
end
end
defp do_defcontract(expr) do
{call, result_spec} =
case expr do
[{:"::", _, [call, result_spec]}] ->
{call, result_spec}
_ ->
actual = Macro.to_string({:@, [], [{:contract, [], expr}]})
raise ArgumentError,
"contract must be in the form " <>
"`@contract function(arg :: spec) :: result_spec`, got: `#{actual}`"
end
{name, call_meta, arg_specs} = call
arg_vars =
for arg_spec <- arg_specs do
case arg_spec do
{:"::", _, [{arg_name, _, _}, _spec]} ->
Macro.var(arg_name, nil)
_ ->
raise ArgumentError,
"argument spec must be in the form `arg :: spec`, " <>
"got: `#{Macro.to_string(arg_spec)}`"
end
end
conform_args =
for {:"::", _, [{arg_name, _, _}, spec]} <- arg_specs do
arg = Macro.var(arg_name, nil)
quote do
Norm.conform!(unquote(arg), unquote(spec))
end
end
conform_args = {:__block__, [], conform_args}
result = Macro.var(:result, nil)
call = {name, call_meta, arg_vars}
quote do
@norm_contracts unquote(fa(call))
def unquote(call_with_contract(call)) do
unquote(conform_args)
unquote(result) = unquote(call_without_contract(call))
Norm.conform!(unquote(result), unquote(result_spec))
unquote(result)
end
end
end
## Utilities
defp wrapper_call(call) do
{name, meta, args} = call
args = for {_, index} <- Enum.with_index(args), do: Macro.var(:"arg#{index}", nil)
{name, meta, args}
end
defp wrapper_body(call) do
{name, meta, args} = call
args = for {_, index} <- Enum.with_index(args), do: Macro.var(:"arg#{index}", nil)
{:"__#{name}_with_contract__", meta, args}
end
defp call_with_contract(call) do
{name, meta, args} = call
{:"__#{name}_with_contract__", meta, args}
end
defp call_without_contract(call) do
{name, meta, args} = call
{:"__#{name}_without_contract__", meta, args}
end
defp fa(call) do
{name, _meta, args} = call
{name, length(args)}
end
end
|
lib/norm/contract.ex
| 0.843734
| 0.495361
|
contract.ex
|
starcoder
|
defmodule Tensorflow.DeviceProperties.EnvironmentEntry do
@moduledoc false
use Protobuf, map: true, syntax: :proto3
@type t :: %__MODULE__{
key: String.t(),
value: String.t()
}
defstruct [:key, :value]
field(:key, 1, type: :string)
field(:value, 2, type: :string)
end
defmodule Tensorflow.DeviceProperties do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
type: String.t(),
vendor: String.t(),
model: String.t(),
frequency: integer,
num_cores: integer,
environment: %{String.t() => String.t()},
num_registers: integer,
l1_cache_size: integer,
l2_cache_size: integer,
l3_cache_size: integer,
shared_memory_size_per_multiprocessor: integer,
memory_size: integer,
bandwidth: integer
}
defstruct [
:type,
:vendor,
:model,
:frequency,
:num_cores,
:environment,
:num_registers,
:l1_cache_size,
:l2_cache_size,
:l3_cache_size,
:shared_memory_size_per_multiprocessor,
:memory_size,
:bandwidth
]
field(:type, 1, type: :string)
field(:vendor, 2, type: :string)
field(:model, 3, type: :string)
field(:frequency, 4, type: :int64)
field(:num_cores, 5, type: :int64)
field(:environment, 6,
repeated: true,
type: Tensorflow.DeviceProperties.EnvironmentEntry,
map: true
)
field(:num_registers, 7, type: :int64)
field(:l1_cache_size, 8, type: :int64)
field(:l2_cache_size, 9, type: :int64)
field(:l3_cache_size, 10, type: :int64)
field(:shared_memory_size_per_multiprocessor, 11, type: :int64)
field(:memory_size, 12, type: :int64)
field(:bandwidth, 13, type: :int64)
end
defmodule Tensorflow.NamedDevice do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
name: String.t(),
properties: Tensorflow.DeviceProperties.t() | nil
}
defstruct [:name, :properties]
field(:name, 1, type: :string)
field(:properties, 2, type: Tensorflow.DeviceProperties)
end
|
lib/tensorflow/core/protobuf/device_properties.pb.ex
| 0.790328
| 0.418905
|
device_properties.pb.ex
|
starcoder
|
defmodule Xema.Builder do
@moduledoc """
This module contains some convenience functions to generate schemas.
## Examples
iex> import Xema.Builder
...> schema = Xema.new integer(minimum: 1)
...> Xema.valid?(schema, 6)
true
...> Xema.valid?(schema, 0)
false
"""
@funs ~w(
any
atom
boolean
float
integer
keyword
list
map
number
string
tuple
)a
Enum.each(@funs, fn fun ->
@doc """
Returns a tuple of `:#{fun}` and the given keyword list.
## Examples
iex> Xema.Builder.#{fun}(key: 42)
{:#{fun}, [key: 42]}
"""
@spec unquote(fun)() :: unquote(fun)
def unquote(fun)() do
unquote(fun)
end
@spec unquote(fun)(keyword) :: {unquote(fun), keyword}
def unquote(fun)(keywords) when is_list(keywords) do
{unquote(fun), keywords}
end
end)
@doc """
Returns the tuple `{:ref, ref}`.
"""
def ref(ref) when is_binary(ref), do: {:ref, ref}
@doc """
Returns `:struct`.
"""
@spec strux :: :struct
def strux, do: :struct
@doc """
Returns a tuple of `:stuct` and the given keyword list.
"""
@spec strux(keyword) :: {:struct, keyword}
def strux(keywords) when is_list(keywords), do: {:struct, keywords}
@doc """
Returns the tuple `{:struct, module: module}`.
"""
@spec strux(atom) :: {:struct, module: module}
def strux(module) when is_atom(module), do: strux(module: module)
def strux(module, keywords) when is_atom(module),
do: keywords |> Keyword.put(:module, module) |> strux()
@doc """
Creates a `schema`.
"""
defmacro xema(do: schema) do
quote do
xema :default do
unquote(schema)
end
end
end
@doc """
Creates a `schema` with the given name.
"""
defmacro xema(name, do: schema) do
quote do
if Module.get_attribute(__MODULE__, :multi) == nil do
raise "Use `use Xema` to to use the `xema/2` macro."
end
Module.register_attribute(__MODULE__, :xemas, accumulate: true)
if !@multi && length(@xemas) > 0 do
raise "Use `use Xema, multi: true` to setup multiple schema in a module."
end
Module.put_attribute(
__MODULE__,
:xemas,
{unquote(name), Xema.new(unquote(schema))}
)
def valid?(unquote(name), data),
do: Xema.valid?(@xemas[unquote(name)], data)
def validate(unquote(name), data),
do: Xema.validate(@xemas[unquote(name)], data)
def validate!(unquote(name), data),
do: Xema.validate!(@xemas[unquote(name)], data)
def cast(unquote(name), data),
do: Xema.cast(@xemas[unquote(name)], data)
def cast!(unquote(name), data),
do: Xema.cast!(@xemas[unquote(name)], data)
if Module.get_attribute(__MODULE__, :default) || !@multi do
Module.put_attribute(__MODULE__, :default, false)
def valid?(data),
do: Xema.valid?(@xemas[unquote(name)], data)
def validate(data),
do: Xema.validate(@xemas[unquote(name)], data)
def validate!(data),
do: Xema.validate!(@xemas[unquote(name)], data)
def cast(data),
do: Xema.cast(@xemas[unquote(name)], data)
def cast!(data),
do: Xema.cast!(@xemas[unquote(name)], data)
end
end
end
end
|
lib/xema/builder.ex
| 0.867864
| 0.429998
|
builder.ex
|
starcoder
|
defmodule CodeFrame do
alias IO.ANSI
@moduledoc """
Generate an excerpt of a string and highlight a specific line
"""
@doc """
Generate the code frame with the given string.
## Examples
Without coloring:
```bash
iex> CodeFrame.build(myString, 5, colors: false)
4 | something
> 5 | highlighted line
| ^^^^^^^^^^^^^^^^
6 | something
```
"""
@type options :: [lines_before: integer, lines_after: integer, colors: boolean]
@spec build(String.t(), integer, options) :: String.t()
def build(input, line_pos, options \\ []) do
lines = input |> String.split("\n")
count = length(lines)
# Parse Options
showColors = Keyword.get(options, :colors, ANSI.enabled?())
l_before = max(line_pos - Keyword.get(options, :lines_before, 2), 0)
l_after = min(line_pos + Keyword.get(options, :lines_after, 2), count)
lines
|> Enum.with_index()
|> Enum.filter(fn {_, i} -> i >= l_before and i <= l_after end)
|> Enum.map(fn {x, i} -> print_column(x, i + 1, i == line_pos, showColors, count) end)
|> Enum.join("\n")
end
defp print_column(content, line, active, showColors, total) do
diff = length(Integer.digits(total, 10)) - length(Integer.digits(line, 10))
indent = String.duplicate(" ", diff)
if showColors do
print_column_colors(content, line, active, indent)
else
print_column_raw(content, line, active, indent)
end
end
defp print_column_colors(content, line, active, indent) do
if active do
# Code line
prefix = ANSI.format([:bright, :red, "> ", indent, to_string(line), " | "])
code = ANSI.format([:light_red_background, :black, " ", content, " ", ANSI.reset()])
[prefix, code, ANSI.reset()]
else
prefix = ANSI.format([:light_black, " ", indent, to_string(line), " | "])
ANSI.format([prefix, :default_color, content, ANSI.reset()])
end
end
defp print_column_raw(content, line, active, indent) do
if active do
# Code line
second_line = [" | ", String.duplicate("^", String.length(content))]
indent2 = String.duplicate(" ", length(Integer.digits(line, 10)))
["> ", to_string(line), " | ", content, "\n ", indent2, second_line]
else
[" ", indent, to_string(line), " | ", content]
end
end
end
|
lib/code_frame.ex
| 0.828072
| 0.75611
|
code_frame.ex
|
starcoder
|
defmodule Gyx.Environments.Gym do
@moduledoc """
This module is an API for accessing
Python OpenAI Gym methods
"""
alias Gyx.Helpers.Python
alias Gyx.Core.{Env, Exp}
alias Gyx.Core.Spaces.{Discrete, Box, Tuple}
import Gyx.Gym.Utils, only: [gyx_space: 1]
use Env
use GenServer
require Logger
defstruct env: nil,
current_state: nil,
session: nil,
action_space: nil,
observation_space: nil
@type space :: Discrete.t() | Box.t() | Tuple.t()
@type t :: %__MODULE__{
env: any(),
current_state: any(),
session: pid(),
action_space: space(),
observation_space: space()
}
@impl true
def init(reference_name) do
python_session = Python.start()
name =
case reference_name do
nil -> inspect(self())
name -> ":#{name}"
end
Logger.warn(inspect(self()))
Logger.warn("Gym environment not associated yet with current #{__MODULE__} process")
Logger.warn("In order to assign a Gym environment to this process,
please use #{__MODULE__}.make(#{name}, \"ENVIRONMENTNAME\")\n")
{:ok,
%__MODULE__{
env: nil,
current_state: nil,
session: python_session,
action_space: nil,
observation_space: nil
}}
end
def start_link(_, opts) do
GenServer.start_link(__MODULE__, opts[:name], opts)
end
def render(environment) do
GenServer.call(environment, {:render, :python})
end
def render(environment, output_device) do
GenServer.call(environment, {:render, output_device})
end
def render(environment, output_device, opts) do
GenServer.call(environment, {:render, output_device, opts})
end
def make(environment, environment_name) do
GenServer.call(environment, {:make, environment_name})
end
@impl true
def reset(environment) do
GenServer.call(environment, :reset)
end
def getRGB(environment) do
GenServer.call(environment, :get_rgb)
end
def handle_call(
{:make, environment_name},
_from,
%{session: session}
) do
Logger.info("Starting OpenAI Gym environment: " <> environment_name, ansi_color: :magenta)
{env, initial_state, action_space, observation_space} =
Python.call(
session,
:gym_interface,
:make,
[environment_name]
)
Logger.info("Environment created on Python process: " <> inspect(session),
ansi_color: :magenta
)
{:reply, :ok,
%__MODULE__{
env: env,
current_state: initial_state,
session: session,
action_space: gyx_space(action_space),
observation_space: gyx_space(observation_space)
}}
end
def handle_call({:act, action}, _from, state) do
{next_env, {gym_state, reward, done, info}} =
Python.call(
state.session,
:gym_interface,
:step,
[state.env, action]
)
experience = %Exp{
state: state.current_state,
action: action,
next_state: gym_state,
reward: reward,
done: done,
info: %{gym_info: info}
}
{:reply, experience, %{state | env: next_env, current_state: gym_state}}
end
@impl true
def handle_call(:reset, _from, state) do
{env, initial_state, action_space, observation_space} =
Python.call(state.session, :gym_interface, :reset, [state.env])
{:reply, %Exp{},
%{
state
| env: env,
current_state: initial_state,
action_space: gyx_space(action_space),
observation_space: gyx_space(observation_space)
}}
end
def handle_call({:render, :python}, _from, state) do
Python.call(state.session, :gym_interface, :render, [state.env])
{:noreply, state}
end
def handle_call({:render, :terminal}, _from, state) do
with rgb <- get_rgb(state.session, state.env) do
rgb
|> Matrex.resize(0.5)
|> Matrex.heatmap(:color8)
|> (fn _ -> :ok end).()
end
{:noreply, state}
end
def handle_call({:render, :terminal, [scale: scale]}, _from, state) do
with rgb <- get_rgb(state.session, state.env) do
rgb
|> Matrex.resize(scale)
|> Matrex.heatmap(:color8)
|> (fn _ -> :ok end).()
end
{:noreply, state}
end
def handle_call(:get_rgb, _from, state) do
screen_rgb = get_rgb(state.session, state.env)
{:reply, screen_rgb, state}
end
def handle_call(:observe, _from, state), do: {:reply, state.current_state, state}
defp get_rgb(python_session, env) do
with rgb_matrix <- Python.call(python_session, :gym_interface, :getScreenRGB2, [env]) do
rgb_matrix
|> Matrex.new()
end
end
end
|
lib/environments/gym/environment.ex
| 0.798029
| 0.482124
|
environment.ex
|
starcoder
|
defmodule GoogleMaps.Geocode do
@moduledoc """
Perform geocoding-related lookups against the Google Maps API.
"""
use RepoCache, ttl: :timer.hours(24)
alias GoogleMaps.Geocode.Address
alias GoogleMaps.Geocode.Input
require Logger
@type t :: {:ok, nonempty_list(Address.t())} | {:error, :zero_results | :internal_error}
@bounds %{
east: 41.3193,
north: -71.9380,
west: 42.8266,
south: -69.6189
}
@http_pool Application.get_env(:google_maps, :http_pool)
@path "/maps/api/geocode/json"
# Visits to the "Transit Near Me" page from Algolia search results already have lat/lng geocoding, but use
# different parameters for the address. We track "address" as one of our analytics
# parameters for Algolia search results, but the Phoenix form helper used in the
# /transit-near-me template requires that we use a nested "locations"["address"] data structure.
# This helper function simply looks for the address in one of those two values and falls
# back to using the lat/lng if neither can be found.
# Also used in the "Proposed Sales Location" page to identify whether an address is formatted or a String containing coordinates
def formatted_address(%{"address" => address}, _options), do: address
def formatted_address(%{"location" => %{"address" => address}}, _options), do: address
def formatted_address(%{"latitude" => lat, "longitude" => lng}, options) do
{parsed_lat, _} = Float.parse(lat)
{parsed_lng, _} = Float.parse(lng)
case options.reverse_geocode_fn.(parsed_lat, parsed_lng) do
{:ok, [first | _]} ->
first.formatted
_ ->
"#{lat}, #{lng}"
end
end
@spec calculate_position(
map(),
(String.t() -> GoogleMaps.Geocode.Address.t())
) :: {GoogleMaps.Geocode.Address.t(), String.t()}
def calculate_position(%{"latitude" => lat_str, "longitude" => lng_str} = params, geocode_fn) do
case {Float.parse(lat_str), Float.parse(lng_str)} do
{{lat, ""}, {lng, ""}} ->
addr = %GoogleMaps.Geocode.Address{
latitude: lat,
longitude: lng,
formatted: lat_str <> "," <> lng_str
}
parse_geocode_response({:ok, [addr]})
_ ->
params
|> Map.delete("latitude")
|> Map.delete("longitude")
|> calculate_position(geocode_fn)
end
end
def calculate_position(%{"location" => %{"address" => address}}, geocode_fn) do
address
|> geocode_fn.()
|> parse_geocode_response()
end
def calculate_position(_params, _geocode_fn) do
{%{}, ""}
end
defp parse_geocode_response({:ok, [location | _]}) do
{location, location.formatted}
end
defp parse_geocode_response(_) do
{%{}, ""}
end
@spec check_address(String.t(), map) :: String.t()
def check_address(address, opts) do
# address can be a String containing "lat,lon" so we check for that case
[lat, lon] =
case String.split(address, ",", parts: 2) do
[lat, lon] -> [lat, lon]
_ -> ["error", "error"]
end
if Float.parse(lat) == :error || Float.parse(lon) == :error do
address
else
formatted_address(%{"latitude" => lat, "longitude" => lon}, opts)
end
end
@spec geocode(String.t()) :: t
def geocode(address) when is_binary(address) do
cache(address, fn address ->
address
|> geocode_url
|> GoogleMaps.signed_url()
|> HTTPoison.get([], hackney: [pool: @http_pool])
|> parse_google_response(%Input{address: address})
end)
end
@spec geocode_by_place_id(String.t()) :: t
def geocode_by_place_id(place_id) do
cache(place_id, fn place_id ->
place_id
|> geocode_by_place_id_url()
|> GoogleMaps.signed_url()
|> HTTPoison.get([], hackney: [pool: @http_pool])
|> parse_google_response(%Input{address: place_id})
end)
end
@spec reverse_geocode(float, float) :: t
def reverse_geocode(latitude, longitude) when is_float(latitude) and is_float(longitude) do
cache({latitude, longitude}, fn {latitude, longitude} ->
{latitude, longitude}
|> reverse_geocode_url()
|> GoogleMaps.signed_url()
|> HTTPoison.get([], hackney: [pool: @http_pool])
|> parse_google_response(%Input{latitude: latitude, longitude: longitude})
end)
end
defp geocode_url(address) do
URI.to_string(%URI{
path: @path,
query:
URI.encode_query(
address: address,
bounds: "#{@bounds.east},#{@bounds.north}|#{@bounds.west},#{@bounds.south}"
)
})
end
defp geocode_by_place_id_url(place_id) do
URI.to_string(%URI{
path: @path,
query: URI.encode_query(place_id: place_id)
})
end
defp reverse_geocode_url({latitude, longitude}) do
URI.to_string(%URI{
path: @path,
query: URI.encode_query(latlng: "#{latitude},#{longitude}")
})
end
defp parse_google_response({:error, error}, input) do
internal_error(input, "HTTP error", fn -> "error=#{inspect(error)}" end)
end
defp parse_google_response({:ok, %{status_code: 200, body: body}}, input) do
case Poison.Parser.parse(body) do
{:error, :invalid} ->
internal_error(input, "Error parsing to JSON", fn -> "body=#{inspect(body)}" end)
{:error, {:invalid, parse_error_message}} ->
internal_error(input, "Error parsing to JSON", fn ->
"body=#{inspect(body)} error_message=#{inspect(parse_error_message)}"
end)
{:ok, json} ->
parse_json(json, input)
end
end
defp parse_google_response({:ok, %{status_code: code, body: body}}, input) do
internal_error(input, "Unexpected HTTP code", fn ->
"code=#{inspect(code)} body=#{inspect(body)}"
end)
end
@spec parse_json(map, Input.t()) :: t
defp parse_json(%{"status" => "OK", "results" => results}, input) do
results(input, Enum.map(results, &parse_result/1))
end
defp parse_json(%{"status" => "ZERO_RESULTS"}, input) do
zero_results(input)
end
defp parse_json(%{"status" => status} = parsed, input) do
internal_error(input, "API error", fn ->
"status=#{inspect(status)} error_message=#{inspect(Map.get(parsed, "error_message", ""))}"
end)
end
@spec parse_result(map) :: Address.t()
defp parse_result(%{
"geometry" => %{"location" => %{"lat" => lat, "lng" => lng}},
"formatted_address" => address
}) do
%Address{
formatted: address,
latitude: lat,
longitude: lng
}
end
@spec results(Input.t(), [Address.t()]) :: t
defp results(input, []) do
zero_results(input)
end
defp results(input, results) do
_ = Logger.info(fn -> "#{__MODULE__} input=#{inspect(input)} result=#{inspect(results)}" end)
{:ok, results}
end
@spec zero_results(Input.t()) :: t
defp zero_results(input) do
_ = Logger.info(fn -> "#{__MODULE__} input=#{inspect(input)} result=ZERO_RESULTS" end)
{:error, :zero_results}
end
@spec internal_error(Input.t(), String.t(), (() -> String.t())) :: t
defp internal_error(input, message, error_fn) do
_ =
Logger.warn(fn ->
"#{__MODULE__} input=#{inspect(input)} message=#{inspect(message)} #{error_fn.()}"
end)
{:error, :internal_error}
end
end
|
apps/google_maps/lib/google_maps/geocode.ex
| 0.80765
| 0.574693
|
geocode.ex
|
starcoder
|
defprotocol Getypex do
@moduledoc since: "0.1.0"
@fallback_to_any true
@spec check(term) :: String.t()
def check(term)
end
defimpl Getypex, for: Tuple do
def check(_tuple), do: "Tuple"
end
defimpl Getypex, for: Integer do
def check(_integer), do: "Integer"
end
defimpl Getypex, for: Float do
def check(_float), do: "Float"
end
defimpl Getypex, for: Reference do
def check(_reference), do: "Reference"
end
defimpl Getypex, for: PID do
def check(_pid), do: "PID"
end
defimpl Getypex, for: Port do
def check(_port), do: "Port"
end
defimpl Getypex, for: Map do
def check(_map), do: "Map"
end
defimpl Getypex, for: [Date, Time, DateTime, NaiveDateTime] do
def check(_), do: to_string(@for) |> String.replace("Elixir.", "")
end
defimpl Getypex, for: Atom do
def check(atom) when is_boolean(atom), do: "Atom (Boolean)"
def check(atom) do
atom
|> :code.get_object_code()
|> check_atom(atom)
end
defp check_atom({_, _, _}, _atom), do: "Atom (Module)"
defp check_atom(:error, atom) do
case match?("Elixir." <> _, Atom.to_string(atom)) do
true ->
"Atom (Unknown Module)"
false ->
"Atom"
end
end
end
defimpl Getypex, for: BitString do
def check(bitstring) do
cond do
is_binary(bitstring) and String.printable?(bitstring) -> "String (UTF-8)"
is_binary(bitstring) and String.valid?(bitstring) -> "String (UTF-8 non-printable)"
is_binary(bitstring) -> "Binary"
is_bitstring(bitstring) -> "Bitstring"
end
end
end
defimpl Getypex, for: List do
def check(list) do
cond do
list == [] -> "List (Empty)"
List.ascii_printable?(list) -> "List (Charlist)"
Keyword.keyword?(list) -> "List (Keyword list)"
List.improper?(list) -> "List (Improper list)"
true -> "List"
end
end
end
defimpl Getypex, for: Function do
def check(function) do
info = Function.info(function)
case info[:type] === :external and info[:env] === [] do
true ->
"Function (Named)"
false ->
"Function (Anonymous)"
end
end
end
defimpl Getypex, for: Any do
def check(_struct), do: "Structs"
end
|
lib/typex.ex
| 0.695441
| 0.406361
|
typex.ex
|
starcoder
|
defmodule Statisaur do
@moduledoc """
Statisaur - Statistics functions
This module currently contains the functions for
summary statistics.
"""
@doc """
Calculate the smallest value from a list of numbers.
### Examples
iex> Statisaur.min([1,2,3])
1
iex> Statisaur.min([5,0.5,2,3])
0.5
iex> Statisaur.min([])
** (ArgumentError) argument must be non-zero length list
"""
def min(list) when is_list(list) and length(list) > 1 do
Enum.min(list)
end
def min(_list), do: raise(ArgumentError, "argument must be non-zero length list")
@doc """
Calculate the largest value from a list of numbers.
### Examples
iex> Statisaur.max([1,2,3])
3
iex> Statisaur.max([5.1,0.5,2,3])
5.1
iex> Statisaur.max([])
** (ArgumentError) argument must be non-zero length list
"""
def max(list) when is_list(list) and length(list) > 1 do
Enum.max(list)
end
def max(_list), do: raise(ArgumentError, "argument must be non-zero length list")
@doc """
Calculate the sum from a list of numbers
### Examples
iex> Statisaur.sum([1,3,5,7,9])
25
iex> Statisaur.sum([1,1])
2
iex> Statisaur.sum([0.5,0.5])
1.0
iex> Statisaur.sum([])
** (ArgumentError) argument must be non-zero length list
"""
def sum(list) when is_list(list) and length(list) > 0, do: Enum.sum(list)
def sum(_list), do: raise(ArgumentError, "argument must be non-zero length list")
@doc """
Calculate the mean from a list of numbers
### Examples
iex> Statisaur.mean([1,3,5,7,9])
5.0
iex> Statisaur.mean([0.1,0.2,0.6])
0.3
iex> Statisaur.mean([])
** (ArgumentError) argument must be non-zero length list
"""
def mean(list) when is_list(list) and length(list) > 1, do: sum(list) / length(list)
def mean(_list), do: raise(ArgumentError, "argument must be non-zero length list")
@doc """
Calculate the median from a list of numbers
### Examples
iex> Statisaur.median([1,3,5,7,9])
5
iex> Statisaur.median([1,1])
1.0
iex> Statisaur.median([0.1,0.4,0.6,0.9])
0.5
iex> Statisaur.median([])
** (ArgumentError) argument must be non-zero length list
"""
def median([]), do: raise(ArgumentError, "argument must be non-zero length list")
def median(list) when is_list(list) do
n = length(list)
sorted = list |> Enum.sort()
pivot = round(n / 2)
case rem(n, 2) do
0 ->
a = sorted |> Enum.at(pivot)
b = sorted |> Enum.at(pivot - 1)
# median for an even-sized set is the mean of the middle numbers
(a + b) / 2
# this seems weird, but Float floor yields float not int :()
_ ->
sorted |> Enum.at(round(Float.floor(n / 2)))
end
end
@doc """
Calculate the frequency counts of distinct elements.
### Examples
iex> Statisaur.frequencies([1])
[{1,1}]
iex> Statisaur.frequencies([1,2,2,3])
[{1,1},{2,2},{3,1}]
iex> Statisaur.frequencies([])
** (ArgumentError) argument must be non-zero length list
"""
def frequencies(list) when is_list(list) and length(list) > 0 do
sorted = list |> Enum.sort()
vals = sorted |> Enum.uniq()
freqs = vals |> Enum.map(fn v -> Enum.count(sorted, fn x -> x == v end) end)
Enum.zip(vals, freqs)
end
def frequencies(_list), do: raise(ArgumentError, "argument must be non-zero length list")
@doc """
Calculate most commonly occurring number from a list of numbers
### Examples
iex> Statisaur.mode([1,1,2,3])
[1]
iex> Statisaur.mode([1.0,2.0,2.0,3.0,3.0])
[2.0,3.0]
iex> Statisaur.mode([])
** (ArgumentError) argument must be non-zero length list
"""
def mode([]), do: raise(ArgumentError, "argument must be non-zero length list")
def mode(list) when is_list(list) do
freqs = frequencies(list)
sorted_freqs = freqs |> Enum.sort_by(fn {_, f} -> f end, &>=/2)
{_, mode_guess} = sorted_freqs |> Enum.at(0)
sorted_freqs |> Enum.filter(fn {_, f} -> f >= mode_guess end) |> Enum.map(fn {v, _} -> v end)
end
@doc """
Create an element-wise kth-power of a list compared to a reference value.
### Examples
iex> Statisaur.powered_error( [1,2,3], 2, 1)
[-1.0,0.0,1.0]
iex> Statisaur.powered_error([],2,1)
** (ArgumentError) must be supplied a non-zero length list
"""
def powered_error([], _, _), do: raise(ArgumentError, "must be supplied a non-zero length list")
def powered_error(list, reference, k) when is_list(list) and length(list) > 1 do
list |> Enum.map(fn x -> :math.pow(x - reference, k) end)
end
@doc """
Calculate the variance from a list of numbers
### Examples
iex>Statisaur.variance([1,3,5,7,9])
10.0
iex>Statisaur.variance([0.1,0.2,0.6])
0.06999999999999999
iex>Statisaur.variance([])
** (ArgumentError) argument must be non-zero length list
"""
def variance([]), do: raise(ArgumentError, "argument must be non-zero length list")
def variance(list) when is_list(list) and length(list) > 1 do
mu = mean(list)
diffmeans = list |> powered_error(mu, 2) |> Enum.sum()
df = length(list) - 1
diffmeans / df
end
@doc """
Calculates the standard deviation from a list of numbers.
### Examples
iex> Statisaur.stddev([1,3,5,7,9]) |> Float.round(6)
3.162278
iex> Statisaur.stddev([0.1,0.2,0.6]) |> Float.round(6)
0.264575
iex> Statisaur.stddev([])
** (ArgumentError) argument must be non-zero length list
"""
def stddev(list) when is_list(list) and length(list) > 1 do
list |> variance |> :math.sqrt()
end
def stddev(_list), do: raise(ArgumentError, "argument must be non-zero length list")
@doc """
Calculate the kth moment with respect to the origin of a list of numbers.
###Examples
iex> Statisaur.raw_moment([1,2,3,4],1)
2.5
iex> Statisaur.raw_moment([1,2,3,4],2)
7.5
iex> Statisaur.raw_moment([1,2,3,4],3)
25.0
iex> Statisaur.raw_moment([1,2,3,4],4)
88.5
iex> Statisaur.raw_moment([],4)
** (ArgumentError) must be supplied a non-zero length list
"""
def raw_moment([], _), do: raise(ArgumentError, "must be supplied a non-zero length list")
def raw_moment(list, k) when is_list(list) and length(list) > 1 do
count = length(list)
(list |> Enum.map(fn x -> :math.pow(x, k) end) |> Enum.sum()) / count
end
@doc """
Calculate the kth central moment of a list of numbers.
###Examples
iex> Statisaur.central_moment([1,2,3,4],1)
0.0
iex> Statisaur.central_moment([1,2,3,4],2)
1.25
iex> Statisaur.central_moment([1,2,3,4],3)
0.0
iex> Statisaur.central_moment([1,2,3,4],4) |> Float.round(4)
2.5625
iex> Statisaur.central_moment([], 5)
** (ArgumentError) must be supplied a non-zero length list
"""
def central_moment([], _), do: raise(ArgumentError, "must be supplied a non-zero length list")
def central_moment(list, k) when is_list(list) and length(list) > 1 do
count = length(list)
mu = mean(list)
(list |> powered_error(mu, k) |> Enum.sum()) / count
end
@doc """
Calculate the kth standardized moment of a list of numbers. See [here](https://en.wikipedia.org/wiki/Standardized_moment) for definition.
###Examples
iex> Statisaur.standardized_moment([1,2,3,4],1)
0.0
iex> Statisaur.standardized_moment([1,2,3,4],2)
1.0
iex> Statisaur.standardized_moment([1,2,3,4],3)
0.0
iex> Statisaur.standardized_moment([1,2,3,4],4)
1.64
iex> Statisaur.standardized_moment([], 4)
** (ArgumentError) must be supplied a non-zero length list
"""
def standardized_moment([], _),
do: raise(ArgumentError, "must be supplied a non-zero length list")
def standardized_moment(list, k) when is_list(list) and length(list) > 1 do
m1 = raw_moment(list, 1)
num = (powered_error(list, m1, k) |> Enum.sum()) / length(list)
denom = :math.pow((powered_error(list, m1, 2) |> Enum.sum()) / length(list), k / 2)
num / denom
end
@doc """
Calculates the skewness (3rd standardized moment) of a list of numbers.
###Examples
iex> Statisaur.skewness([1,2,3,4])
0.0
iex> Statisaur.skewness([])
** (ArgumentError) argument must be non-zero length list
"""
def skewness([]), do: raise(ArgumentError, "argument must be non-zero length list")
def skewness(list) when is_list(list) and length(list) > 1 do
standardized_moment(list, 3)
end
@doc """
Calculates the kurtosis (4th standardized moment) of a list of numbers.
###Examples
iex> Statisaur.kurtosis([1,2,3,4])
1.64
iex> Statisaur.kurtosis([])
** (ArgumentError) argument must be non-zero length list
"""
def kurtosis([]), do: raise(ArgumentError, "argument must be non-zero length list")
def kurtosis(list) when is_list(list) and length(list) > 1 do
standardized_moment(list, 4)
end
@doc """
Calculates the coefficient of variation for a list of numbers.
###Examples
iex> Statisaur.coefficient_of_variation([1,2,3,4]) |> Float.round(4)
0.5164
iex> Statisaur.coefficient_of_variation([])
** (ArgumentError) argument must be non-zero length list
"""
def coefficient_of_variation([]),
do: raise(ArgumentError, "argument must be non-zero length list")
def coefficient_of_variation(list) when is_list(list) and length(list) > 1 do
stddev(list) / mean(list)
end
end
|
lib/statisaur.ex
| 0.861334
| 0.53783
|
statisaur.ex
|
starcoder
|
defmodule Timex.DateFormat.Formatters.DefaultFormatter do
@moduledoc """
Date formatting language used by default by the `DateFormat` module.
This is a novel formatting language introduced with `DateFormat`. Its main
advantage is simplicity and usage of mnemonics that are easy to memorize.
## Directive format
A directive is an optional _padding specifier_ followed by a _mnemonic_, both
enclosed in braces (`{` and `}`):
{<padding><mnemonic>}
Supported padding specifiers:
* `0` -- pads the number with zeros. Applicable to mnemonics that produce numerical result.
* `_` -- pads the number with spaces. Applicable to mnemonics that produce numerical result.
When padding specifier is omitted, numbers will not be padded.
## List of all directives
### Years and centuries
* `{YYYY}` - full year number (0..9999)
* `{YY}` - the last two digits of the year number (0.99)
* `{C}` - century number (0..99)
* `{WYYYY}` - year number corresponding to the date's ISO week (0..9999)
* `{WYY}` - year number (2 digits) corresponding to the date's ISO week (0.99)
### Months
* `{M}` - month number (1..12)
* `{Mshort}` - abbreviated month name (Jan..Dec, no padding)
* `{Mfull}` - full month name (January..December, no padding)
### Days and weekdays
* `{D}` - day number (1..31)
* `{Dord}` - ordinal day of the year (1..366)
* `{WDmon}` - weekday, Monday first (1..7, no padding)
* `{WDsun}` - weekday, Sunday first (0..6, no padding)
* `{WDshort}` - abbreviated weekday name (Mon..Sun, no padding)
* `{WDfull}` - full weekday name (Monday..Sunday, no padding)
### Weeks
* `{Wiso}` - ISO week number (1..53)
* `{Wmon}` - week number of the year, Monday first (0..53)
* `{Wsun}` - week number of the year, Sunday first (0..53)
### Time
* `{h24}` - hour of the day (0..23)
* `{h12}` - hour of the day (1..12)
* `{m}` - minutes of the hour (0..59)
* `{s}` - seconds of the minute (0..60)
* `{s-epoch}` - number of seconds since UNIX epoch
* `{am}` - lowercase am or pm (no padding)
* `{AM}` - uppercase AM or PM (no padding)
### Time zones
* `{Zname}` - time zone name, e.g. `UTC` (no padding)
* `{Z}` - time zone offset in the form `+0230` (no padding)
* `{Z:}` - time zone offset in the form `-07:30` (no padding)
* `{Z::}` - time zone offset in the form `-07:30:00` (no padding)
### Compound directives
These are shortcut directives corresponding to parts of the ISO 8601
specification. The benefit of using these over manually constructed ISO
formats is that these directives convert the date to UTC for you.
* `{ISO}` - `<date>T<time><offset>`. Full date and time
specification (e.g. `2007-08-13T16:48:01 +0300`)
* `{ISOz}` - `<date>T<time>Z`. Full date and time in UTC (e.g.
`2007-08-13T13:48:01Z`)
* `{ISOdate}` - `YYYY-MM-DD`. That is, 4-digit year number, followed by
2-digit month and day numbers (e.g. `2007-08-13`)
* `{ISOtime}` - `hh:mm:ss`. That is, 2-digit hour, minute, and second,
separated by colons (e.g. `13:04:05`). Midnight is 00 hours.
* `{ISOweek}` - `YYYY-Www`. That is, ISO week-based year, followed by ISO
week number (e.g. `2007-W09`)
* `{ISOweek-day}` - `YYYY-Www-D`. That is, an `{ISOweek}`, additionally
followed by weekday (e.g. `2007-W09-1`)
* `{ISOord}` - `YYYY-DDD`. That is, year number, followed by the ordinal
day number (e.g. `2007-113`)
These directives provide support for miscellaneous common formats:
* `{RFC822}` - e.g. `Mon, 05 Jun 14 23:20:59 UT`
* `{RFC822z}` - e.g. `Mon, 05 Jun 14 23:20:59 Z`
* `{RFC1123}` - e.g. `Tue, 05 Mar 2013 23:25:19 GMT`
* `{RFC1123z}` - e.g. `Tue, 05 Mar 2013 23:25:19 +0200`
* `{RFC3339}` - e.g. `2013-03-05T23:25:19+02:00`
* `{RFC3339z}` - e.g. `2013-03-05T23:25:19Z`
* `{ANSIC}` - e.g. `Tue Mar 5 23:25:19 2013`
* `{UNIX}` - e.g. `Tue Mar 5 23:25:19 PST 2013`
* `{kitchen}` - e.g. `3:25PM`
"""
use Timex.DateFormat.Formatters.Formatter
alias Timex.DateTime
alias Timex.Timezone
alias Timex.DateFormat.FormatError
alias Timex.Parsers.DateFormat.Directive
alias Timex.Parsers.DateFormat.Tokenizers.Default, as: Tokenizer
@spec tokenize(String.t) :: {:ok, [%Directive{}]} | {:error, term}
defdelegate tokenize(format_string), to: Tokenizer
@spec format!(%DateTime{}, String.t) :: String.t | no_return
def format!(%DateTime{} = date, format_string) do
case format(date, format_string) do
{:ok, result} -> result
{:error, reason} -> raise FormatError, message: reason
end
end
@spec format(%DateTime{}, String.t) :: {:ok, String.t} | {:error, term}
def format(%DateTime{} = date, format_string) do
case Tokenizer.tokenize(format_string) do
{:error, _} = error ->
error
directives when is_list(directives) ->
if Enum.any?(directives, fn dir -> dir.type != :char end) do
do_format(date, directives, <<>>)
else
{:error, "There were no formatting directives in the provided string."}
end
end
end
@doc """
If one wants to use the default formatting semantics with a different
tokenizer, this is the way.
"""
@spec format(%DateTime{}, String.t, atom) :: {:ok, String.t} | {:error, term}
def format(%DateTime{} = date, format_string, tokenizer) do
case tokenizer.tokenize(format_string) do
{:error, _} = error ->
error
directives when is_list(directives) ->
if Enum.any?(directives, fn dir -> dir.type != :char end) do
do_format(date, directives, <<>>)
else
{:error, "There were no formatting directives in the provided string."}
end
end
end
defp do_format(_date, [], result), do: {:ok, result}
defp do_format(_date, _, {:error, _} = error), do: error
defp do_format(date, [%Directive{type: :char, token: char} | dirs], result) when is_binary(char) do
do_format(date, dirs, <<result::binary, char::binary>>)
end
defp do_format(date, [%Directive{type: :char, token: char} | dirs], result) do
do_format(date, dirs, <<result::binary, char::utf8>>)
end
defp do_format(date, [%Directive{token: token, type: :numeric, pad: false} | dirs], result) do
formatted = format_token(token, date)
do_format(date, dirs, <<result::binary, formatted::binary>>)
end
defp do_format(date, [%Directive{token: token, type: :numeric, pad: pad, pad_type: pad_type, len: len_spec} | dirs], result) do
formatted = format_token(token, date)
len = String.length(formatted)
padding = case len_spec do
^len -> <<>>
_..hi -> build_padding(len, hi, pad_char(pad_type), pad)
:word -> pad_char(pad_type) |> String.duplicate(pad)
hi when is_number(hi) -> build_padding(len, hi, pad_char(pad_type), pad)
_ -> {:error, "Invalid numeric length specification: #{len_spec}"}
end
do_format(date, dirs, <<result::binary, padding::binary, formatted::binary>>)
end
defp do_format(date, [%Directive{token: token, type: :format, format: [tokenizer: tokenizer, format: fmt]} | dirs], result) do
# Shift the date if this format is in Zulu time
date = case token do
token when token in [:iso_8601z, :rfc_822z, :rfc3339z, :rfc_1123z] ->
Timezone.convert(date, Timezone.get(:utc))
_ ->
date
end
case format(date, fmt, tokenizer) do
{:error, _} = error -> error
{:ok, formatted} ->
do_format(date, dirs, <<result::binary, formatted::binary>>)
end
end
defp do_format(date, [%Directive{token: token} | dirs], result) do
formatted = format_token(token, date)
do_format(date, dirs, <<result::binary, formatted::binary>>)
end
defp do_format(_, dirs, _), do: {:error, "Unexpected directive type: #{dirs |> Macro.to_string}"}
defp pad_char(:zero), do: <<?0>>
defp pad_char(:space), do: <<32>>
defp build_padding(len, hi, pad, padding) do
cond do
len >= hi -> <<>>
hi - len == padding -> pad |> String.duplicate(hi - len)
hi - len > padding -> pad |> String.duplicate(padding)
hi - len < padding -> pad |> String.duplicate(padding - (hi - len))
true -> <<>>
end
end
end
|
lib/dateformat/formatters/default.ex
| 0.918384
| 0.832577
|
default.ex
|
starcoder
|
defmodule Grakn.Graql do
@moduledoc false
alias Grakn.Query
defmodule Datatypes do
@moduledoc false
defstruct string: :string,
long: :long,
double: :double,
boolean: :boolean,
date: :date
end
defmacro __using__([]) do
quote do
import Grakn.Graql
end
end
def datatypes, do: %Datatypes{}
defmacro defpattern("$" <> var, isa: entity_type) do
quote do
Query.graql("$#{unquote(var)} isa #{unquote(entity_type)};")
end
end
defmacro defpattern("$" <> var, isa: entity_type, has: attributes) do
quote do
Query.graql(
"$#{unquote(var)} isa #{unquote(entity_type)} #{unquote(expand_key_values(attributes))};"
)
end
end
defmacro defschema(label, [sub: :entity] = opts), do: define_body(label, opts)
defmacro defschema(label, [sub: :entity, has: _] = opts), do: define_body(label, opts)
defmacro defschema(label, [sub: :entity, plays: _] = opts), do: define_body(label, opts)
defmacro defschema(label, [sub: :entity, has: _, plays: _] = opts), do: define_body(label, opts)
defmacro defschema(label, [sub: :entity, plays: _, has: _] = opts), do: define_body(label, opts)
defmacro defschema(label, [sub: :attribute, datatype: _] = opts), do: define_body(label, opts)
defmacro defschema(label, [sub: :attribute, datatype: _, plays: _] = opts),
do: define_body(label, opts)
defmacro defschema(label, [sub: :attribute, plays: _, datatype: _] = opts),
do: define_body(label, opts)
defmacro defschema(label, [sub: :relation, relates: _] = opts), do: define_body(label, opts)
defmacro defschema(label, [sub: :relation, relates: _, has: _] = opts),
do: define_body(label, opts)
defmacro defschema(label, [sub: :relation, relates: _, plays: _] = opts),
do: define_body(label, opts)
defmacro defschema(label, [sub: :relation, relates: _, has: _, plays: _] = opts),
do: define_body(label, opts)
defmacro defschema(label, [sub: :relation, relates: _, plays: _, has: _] = opts),
do: define_body(label, opts)
# Rules
defmacro defschema(label, [sub: :rule, when: body, then: head] = opts) do
body_patterns =
body
|> List.wrap()
|> Enum.map(fn
%Grakn.Query{graql: pattern} -> pattern
string_pattern when is_bitstring(string_pattern) -> string_pattern
_ -> error(label, opts)
end)
head_patterns =
head
|> case do
%Grakn.Query{graql: pattern} -> pattern
string_pattern when is_bitstring(string_pattern) -> string_pattern
_ -> error(label, opts)
end
modified_opts = [sub: :rule, when: body_patterns, then: head_patterns]
quote do
define(unquote(label), unquote(modified_opts))
end
end
# Allow any arbitrary sub types
defmacro defschema(label, [sub: _type] = opts), do: define_body(label, opts)
defmacro defschema(label, [sub: _type, has: _] = opts), do: define_body(label, opts)
defmacro defschema(label, [sub: _type, plays: _] = opts), do: define_body(label, opts)
defmacro defschema(label, [sub: _type, has: _, plays: _] = opts), do: define_body(label, opts)
defmacro defschema(label, [sub: _type, plays: _, has: _] = opts), do: define_body(label, opts)
defmacro defschema(label, [sub: _type, relates: _] = opts), do: define_body(label, opts)
defmacro defschema(label, opts), do: error(label, opts)
defp error(label, opts), do: raise("Graql compile error: #{inspect({label, opts})}")
defp define_body(label, opts) do
quote do
define(unquote(label), unquote(opts))
end
end
@spec define(String.t(), keyword()) :: Grakn.Query.t()
def define(label, opts) do
modifiers =
opts
|> Enum.map(&expand_key_values/1)
|> Enum.join(", ")
Query.graql("define #{label} #{modifiers};")
end
def expand_key_values({key, [_ | _] = values}) do
case key do
rule_key when rule_key in [:when, :then] ->
statements =
values
|> List.wrap()
|> Enum.join("; ")
"#{rule_key} { #{statements}; }"
_ ->
values
|> Enum.map(fn value -> "#{key} #{value}" end)
|> Enum.join(", ")
end
end
def expand_key_values({:then, value}), do: "then { #{value}; }"
def expand_key_values({key, value}), do: "#{key} #{value}"
end
|
lib/grakn/graql.ex
| 0.659295
| 0.430207
|
graql.ex
|
starcoder
|
defmodule Pinterex do
@moduledoc """
This is the main module of the libray. Use it to access all of the available Pinterest API calls.
## Installation and Configuration
1. Add `pinterex` to your list of dependencies in `mix.exs`.
2. Add an [application setting](https://github.com/MitjaBezensek/pinterex/blob/master/lib/api/base.ex#L14) or environment variable `PINTEREST_KEY` for authorization. You can get the key [here](https://developers.pinterest.com/tools/access_token/).
## Usage
Once you have set the access token you can start making calls to the API.
Pinterex always returns one of the following:
1. `{:ok, result}` where the result is a struct that corresponds to the data returned by Pinterest (ie `Board`, `Pin`, `User`, `Interest` or a paged version of the results).
2. `{:error, message}` where the message is the error message returned by Pinterest.
Here are some example calls:
```
Pinterex.me
```
Like mentioned before, Pinterex returns structs that correspond to the data returned by Pinterest. By default the API returns only a limited subset of the fields, but you can supply a list of fields you would like to get (for a full list of the fields consult the Pinterest API documentation or look at the structs in this project). Here is how you can specify which fields would you like to get:
```
Pinterex.me([fields: ["bio", "counts", "username"]])
```
For request that return multiple items you can limit the number of returned results. The default limit is 25, and the max allowed limit is 100.
```
Pinterex.my_boards([fields: ["name", "counts", "description"], limit: 10])
```
This returns a [`PagedBoards`](https://github.com/MitjaBezensek/pinterex/blob/master/lib/structs/paged_boards.ex) struct which contains the returned `Boards` as well as a link to the next page of the results. You can easily access the next page by using the `next` field in the result. For example, here is how you can get 20 of your boards:
```
{:ok, first_10} = Pinterex.my_boards([fields: ["name", "counts", "description"], limit: 10])
{:ok, second_10} = Pinterex.my_boards([next: first_10.next])
boards = first_10.boards ++ second_20.boards
```
Note that you do not need to specify the fields and limit in the second call. The `next` url already contains the information from the previous call. When the `next` field equals `nil` it means that there are not items left to fetch.
## Influence
The design of the API was influence by [ExTwitter](https://github.com/parroty/extwitter).
"""
use Application
@doc false
def start(_type, _args) do
import Supervisor.Spec, warn: false
children = []
opts = [strategy: :one_for_one, name: Pinterex.Supervisor]
Supervisor.start_link(children, opts)
end
@doc false
defdelegate execute_request(method, path), to: Pinterex.Api.Base
@doc false
defdelegate execute_request(method, createStruct, path), to: Pinterex.Api.Base
@doc false
defdelegate execute_request(method, createStruct, path, options), to: Pinterex.Api.Base
@doc """
Used for fetching the data of the authenticated User's profile.
## Reference
By default the API returns first and last name, ID and URL of the authenticated User. Use `Pinterex.me/1` if you wish to get other fields.
[https://developers.pinterest.com/docs/api/users/](https://developers.pinterest.com/docs/api/users/)
"""
defdelegate me, to: Pinterex.Api.User
@doc """
Used for fetching the data of the authenticated User's profile.
## Reference
By default the API returns first and last name, ID and URL of the authenticated User.
[https://developers.pinterest.com/docs/api/users/](https://developers.pinterest.com/docs/api/users/)
## Parameters
- options: if we wish to get other fields of the User we pass a list of fields to fetch. To see which fields are available look at `Pinterex.Structs.User`.
## Example
`Pinterex.me([fields: ["bio", "counts"]])`
"""
defdelegate me(options), to: Pinterex.Api.User
@doc """
Returns a list of the public Boards of the authenticated User.
## Reference
By default the API returns their URLs, IDs and names. Use `Pinterex.my_boards/1` to get other fields.
[https://developers.pinterest.com/docs/api/users/](https://developers.pinterest.com/docs/api/users/)
"""
defdelegate my_boards, to: Pinterex.Api.User
@doc """
Returns a list of the public Boards of the authenticated User.
## Reference
By default the API returns their URLs, IDs and names.
[https://developers.pinterest.com/docs/api/users/](https://developers.pinterest.com/docs/api/users/)
## Parameters
- options: if we wish to get other fields of the Boards we pass a list of
fields to fetch. To see which fields are available look at `Pinterex.Structs.Board`.
## Example
`Pinterex.my_boards([fields: ["image", "counts"]])`
`Pinterex.my_boards([fields: ["image", "counts"], limit: 50])`
"""
defdelegate my_boards(options), to: Pinterex.Api.User
@doc """
Returns a list of Boards that Pinterest would suggest to the authenticated User if
they were to save the specified Pin.
## Reference
By default the API returns the IDs, URLs and names of the Boards. Use `Pinterex.my_suggested_boards/2` to get other fields.
[https://developers.pinterest.com/docs/api/users/](https://developers.pinterest.com/docs/api/users/)
## Parameters
- id: the id of the Pin for which you wish to get the suggestions (required)
"""
defdelegate my_suggested_boards(id), to: Pinterex.Api.User
@doc """
Returns a list of Boards that Pinterest would suggest to the authenticated User if
they were to save the specified Pin.
## Reference
By default the API returns the IDs, URLs and names of the Boards.
[https://developers.pinterest.com/docs/api/users/](https://developers.pinterest.com/docs/api/users/)
## Parameters
- id: the id of the Pin for which you wish to get the suggestions (required)
- options: if we wish to get other fields of the suggested Boards we pass a list of fields to fetch. To see which fields are available look at `Pinterex.Structs.Board`.
## Example
`Pinterex.my_suggested_boards("1253434223", ["image", "counts"])`
`Pinterex.my_suggested_boards("1253434223", [fields: ["image", "counts"], limit: 5])`
"""
defdelegate my_suggested_boards(id, options), to: Pinterex.Api.User
@doc """
Returns a list of Pins that the authenticated User liked.
## Reference
By default the API returns the IDs, URLs, links and descriptions of the Pins. Use `Pinterex.my_likes/1` to get other fields.
[https://developers.pinterest.com/docs/api/users/](https://developers.pinterest.com/docs/api/users/)
"""
defdelegate my_likes, to: Pinterex.Api.User
@doc """
Returns a list of Pins that the authenticated User liked.
## Reference
By default the API returns the IDs, URLs, links and descriptions of the Pins.
[https://developers.pinterest.com/docs/api/users/](https://developers.pinterest.com/docs/api/users/)
## Parameters
- options: if we wish to get other fields of the Pins we pass a list of fields to fetch. To see which fields are available look at `Pinterex.Structs.Pin`
## Example
`Pinterex.my_likes([fields: ["note", "counts"]])`
`Pinterex.my_likes([fields: ["note", "counts"], limit: 50])`
"""
defdelegate my_likes(options), to: Pinterex.Api.User
@doc """
Returns a list of Pins that the authenticated User pinned.
## Reference
By default the API returns the IDs, URLs, links and descriptions of the User's Pins. Use `Pinterex.my_pins/1` to get other fields.
[https://developers.pinterest.com/docs/api/users/](https://developers.pinterest.com/docs/api/users/)
"""
defdelegate my_pins, to: Pinterex.Api.User
@doc """
Returns a list of Pins that the authenticated User pinned.
## Reference
By default the API returns the IDs, URLs, links and descriptions of the User's Pins.
[https://developers.pinterest.com/docs/api/users/](https://developers.pinterest.com/docs/api/users/)
## Parameters
- options: if we wish to get other fields of the Pins we pass a list of fields to fetch. To see which fields are available look at `Pinterex.Structs.Pin`
## Example
`Pinterex.my_pins([fields: ["note", "counts"]])`
`Pinterex.my_pins([fields: ["note", "counts"], limit: 50])`
"""
defdelegate my_pins(options), to: Pinterex.Api.User
@doc """
Returns a list of the authenticated User's Boards that match the search query.
## Reference
By default the API returns IDs, URLs and names of the matched Boards. Use `Pinterex.search_my_boards/2` to get other fields.
[https://developers.pinterest.com/docs/api/users/](https://developers.pinterest.com/docs/api/users/)
## Parameters
- query: the query string you wish to search for (required)
"""
defdelegate search_my_boards(query), to: Pinterex.Api.User
@doc """
Returns a list of the authenticated User's Boards that match the search query.
## Reference
By default the API returns IDs, URLs and names of the matched Boards.
[https://developers.pinterest.com/docs/api/users/](https://developers.pinterest.com/docs/api/users/)
## Parameters
- query: the query string you wish to search for (required)
- options: if we wish to get other fields of the Boards we pass a list of
fields to fetch. To see which fields are available look at `Pinterex.Structs.Board`.
## Example
`Pinterex.search_my_boards("garden", [fields: ["image', "counts"]])`
`Pinterex.search_my_boards("garden", [fields: ["image', "counts"], limit: 50])`
"""
defdelegate search_my_boards(query, options), to: Pinterex.Api.User
@doc """
Returns a list of the authenticated User's Pins that match the search query.
## Reference
By default the API returns IDs, URLs, links and descriptions of the matched Pins. Use `Pinterex.search_my_pins/2` to get other fields.
[https://developers.pinterest.com/docs/api/users/](https://developers.pinterest.com/docs/api/users/)
## Parameters
- query: the query string you wish to search for (required)
"""
defdelegate search_my_pins(query), to: Pinterex.Api.User
@doc """
Returns a list of the authenticated User's Pins that match the search query.
## Reference
By default the API returns IDs, URLs, links and descriptions of the matched Pins. Use `Pinterex.searchMyPins/2` to get other fields.
[https://developers.pinterest.com/docs/api/users/](https://developers.pinterest.com/docs/api/users/)
## Parameters
- query: the query string you wish to search for (required)
- options: if we wish to get other fields of the Pins we pass a list of fields to fetch. To see which fields are available look at `Pinterex.Structs.Pin`
## Example
`Pinterex.search_my_pins("garden", [fields: ["note", "counts"]])`
`Pinterex.search_my_pins("garden", [fields: ["note", "counts"], limit: 50])`
"""
defdelegate search_my_pins(query, options), to: Pinterex.Api.User
@doc """
Returns the Users that follow the authenticated User.
## Reference
By default the API returns the first names, last names, IDs and URLs of the Users. Use `Pinterex.my_followers/1` to get other fields.
[https://developers.pinterest.com/docs/api/users/](https://developers.pinterest.com/docs/api/users/)
"""
defdelegate my_followers, to: Pinterex.Api.User
@doc """
Returns the Users that follow the authenticated User.
## Reference
By default the API returns the first names, last names, IDs and URLs of the Users.
[https://developers.pinterest.com/docs/api/users/](https://developers.pinterest.com/docs/api/users/)
## Parameters
- options: if we wish to get other fields of the Users we pass a list of fields to fetch. To see which fields are available look at `Pinterex.Structs.User`.
## Example
`Pinterex.my_followers([fields: ["bio", "counts"]])`
`Pinterex.my_followers([fields: ["bio", "counts"], limit: 50])`
"""
defdelegate my_followers(options), to: Pinterex.Api.User
@doc """
Returns a list of Boards that the authenticated User follows.
## Reference
By default the API returns the IDs, URLs and names of the Boards. Use `Pinterex.my_following_boards/1` to get other fields.
[https://developers.pinterest.com/docs/api/users/](https://developers.pinterest.com/docs/api/users/)
"""
defdelegate my_following_boards, to: Pinterex.Api.User
@doc """
Returns a list of Boards that the authenticated User follows.
## Reference
By default the API returns the IDs, URLs and names of the Boards.
[https://developers.pinterest.com/docs/api/users/](https://developers.pinterest.com/docs/api/users/)
## Parameters
- options: if we wish to get other fields of the Boards we pass a list of fields to fetch. To see which fields are available look at `Pinterex.Structs.Board`.
## Example
`Pinterex.my_following_boards([fields: ["image", "counts"]])`
`Pinterex.my_following_boards([fields: ["image", "counts"], limit: 50])`
"""
defdelegate my_following_boards(options), to: Pinterex.Api.User
@doc """
Follow the specified Board as the authenticated User.
## Reference
[https://developers.pinterest.com/docs/api/users/](https://developers.pinterest.com/docs/api/users/)
## Parameters
- board: the Board to follow. The format of the parameters should be
"username/board_name"
"""
defdelegate follow_board(board), to: Pinterex.Api.User
@doc """
Unfollow the specified Board as the authenticated User.
## Reference
[https://developers.pinterest.com/docs/api/users/](https://developers.pinterest.com/docs/api/users/)
## Parameters
- board: the Board to unfollow. The format of the parameters should be
"username/board_name"
"""
defdelegate unfollow_board(board), to: Pinterex.Api.User
@doc """
Returns a list of Interests that the authenticated User follows.
## Reference
By default the API returns the IDs and names of the Interests.
[https://developers.pinterest.com/docs/api/users/](https://developers.pinterest.com/docs/api/users/)
"""
defdelegate my_following_interests, to: Pinterex.Api.User
@doc """
Returns the Users that the authenticated User follows.
## Reference
By default the API returns the first names, last names, IDs and URLs of the Users. Use `Pinterex.my_following_users/1` to get other fields.
[https://developers.pinterest.com/docs/api/users/](https://developers.pinterest.com/docs/api/users/)
"""
defdelegate my_following_users, to: Pinterex.Api.User
@doc """
Returns the Users that the authenticated User follows.
## Reference
By default the API returns the first names, last names, IDs and URLs of the Users.
[https://developers.pinterest.com/docs/api/users/](https://developers.pinterest.com/docs/api/users/)
## Parameters
- options: if we wish to get other fields of the Users we pass a list of fields to fetch. To see which fields are available look at `Pinterex.Structs.User`.
## Example
`Pinterex.my_following_users([fields: ["bio", "counts"]])`
`Pinterex.my_following_users([fields: ["bio", "counts"], limit: 50])`
"""
defdelegate my_following_users(options), to: Pinterex.Api.User
@doc """
Returns the information of the requested Pin.
## Reference
By default the API returns the ID, URL, link and the description of the Pin. Use `Pinterex.get_pin/2` to get other fields.
[https://developers.pinterest.com/docs/api/pins/](https://developers.pinterest.com/docs/api/pins/)
## Parameters
- id: the id of the Pin
"""
defdelegate get_pin(id), to: Pinterex.Api.Pin
@doc """
Returns the information of the requested Pin.
## Reference
By default the API returns the ID, URL, link and the description of the Pin.
[https://developers.pinterest.com/docs/api/pins/](https://developers.pinterest.com/docs/api/pins/)
## Parameters
- id: the id of the Pin
- options: if we wish to get other fields of the Pin we pass a list of fields to fetch. To see which fields are available look at `Pinterex.Structs.Pin`
## Example
`Pinterex.get_pin("123456", [fields: ["note", "counts"]])`
"""
defdelegate get_pin(id, options), to: Pinterex.Api.Pin
@doc """
Returns the Board information.
## Reference
By default the API returns the ID, URL and the name of the specified Board. Use `Pinterex.get_board/2` to get other fields.
[https://developers.pinterest.com/docs/api/boards/](https://developers.pinterest.com/docs/api/boards/)
## Parameters
- board: the id of the Board whose info you wish to get. The format of the parameter should be
"username/board_name" (required)
"""
defdelegate get_board(board), to: Pinterex.Api.Board
@doc """
Returns the Board information.
## Reference
By default the API returns the ID, URL and the name of the specified Board.
[https://developers.pinterest.com/docs/api/boards/](https://developers.pinterest.com/docs/api/boards/)
## Parameters
- board: the id of the Board whose info you wish to get. The format of the parameter should be
"username/board_name" (required)
- options: if we wish to get other fields of the Board we pass a list of fields to fetch. To see which fields are available look at `Pinterex.Structs.Board`.
## Example
`Pinterex.get_board("username/board_name", [fields: ["image", "counts"]])`
"""
defdelegate get_board(board, options), to: Pinterex.Api.Board
@doc """
Delete the specified Pin for the authenticated User.
## Reference
[https://developers.pinterest.com/docs/api/pins/](https://developers.pinterest.com/docs/api/pins/)
## Parameters
- pin: the id of the Pin you wish to delete (required)
"""
defdelegate delete_pin(pin), to: Pinterex.Api.Pin
@doc """
Returns a list of Pins from the specified Board.
## Reference
By default the API returns the IDs, URLs, links and descriptions of the Pins. Use `Pinterex.get_board_pins/2` to get other fields.
[https://developers.pinterest.com/docs/api/boards/](https://developers.pinterest.com/docs/api/boards/)
## Parameters
- board: the id of the Board whose pins you wish to get. The format of the parameters should be
"username/board_name" (required)
"""
defdelegate get_board_pins(board), to: Pinterex.Api.Board
@doc """
Returns a list of Pins from the specified Board.
## Reference
By default the API returns the IDs, URLs, links and descriptions of the Pins.
[https://developers.pinterest.com/docs/api/boards/](https://developers.pinterest.com/docs/api/boards/)
## Parameters
- board: the id of the Board whose pins you wish to get. The format of the parameters should be
"username/board_name" (required)
- options: if we wish to get other fields of the Pins we pass a list of fields to fetch. To see which fields are available look at `Pinterex.Structs.Pin`.
## Example
`Pinterex.get_board_pins("username/board_name", [fields: ["note", "counts']])`
`Pinterex.get_board_pins("username/board_name", [fields: ["note", "counts'], limit: 50])`
"""
defdelegate get_board_pins(board, options), to: Pinterex.Api.Board
@doc """
Creates a Board with the specified name for the authenticated User.
## Reference
By default the API returns the ID, URL and name of the created Board.
[https://developers.pinterest.com/docs/api/boards/](https://developers.pinterest.com/docs/api/boards/)
## Parameters
- name: the name of the Board you wish to create (required)
"""
defdelegate create_board(name), to: Pinterex.Api.Board
@doc """
Creates a Board with the specified name for the authenticated User.
## Reference
By default the API returns the ID, URL and name of the created Board.
[https://developers.pinterest.com/docs/api/boards/](https://developers.pinterest.com/docs/api/boards/)
## Parameters
- name: the name of the Board you wish to create (required)
- description: the description of the Board you wish to create (optional)
"""
defdelegate create_board(name, description), to: Pinterex.Api.Board
@doc """
Deletes the specified Board for the authenticated User.
## Reference
[https://developers.pinterest.com/docs/api/boards/](https://developers.pinterest.com/docs/api/boards/)
## Parameters
- board: The board you want to delete. The format of the parameters should be "username/board_name"
(required)
"""
defdelegate delete_board(board), to: Pinterex.Api.Board
@doc """
Edit the specified Board for the authenticated User.
## Reference
By default the API returns the ID, URL, and name of the edited Board.
[https://developers.pinterest.com/docs/api/boards/](https://developers.pinterest.com/docs/api/boards/)
## Parameters
- board: the id of the Board you wish to edit. The format of the parameters should be
"username/board_name" (required)
- options: you can specify a new name and / or a new descriptions.
## Example
`Pinterex.edit_board("username/board_name", [name: "New name"])`
`Pinterex.edit_board("username/board_name", [name: "New name", description: "New description"])`
"""
defdelegate edit_board(board, options), to: Pinterex.Api.Board
end
|
lib/pinterex.ex
| 0.893838
| 0.938745
|
pinterex.ex
|
starcoder
|
defmodule StarkInfra.Event do
alias __MODULE__, as: Event
alias StarkInfra.Utils.Rest
alias StarkInfra.Utils.Check
alias StarkInfra.Utils.API
alias StarkInfra.User.Project
alias StarkInfra.User.Organization
alias StarkInfra.Error
alias StarkInfra.Utils.Parse
alias StarkInfra.CreditNote.Log, as: CreditNote
alias StarkInfra.IssuingCard.Log, as: IssuingCard
alias StarkInfra.IssuingInvoice.Log, as: IssuingInvoice
alias StarkInfra.IssuingPurchase.Log, as: IssuingPurchase
alias StarkInfra.PixKey.Log, as: PixKey
alias StarkInfra.PixClaim.Log, as: PixClaim
alias StarkInfra.CreditNote.Log, as: CreditNote
alias StarkInfra.PixRequest.Log, as: PixRequest
alias StarkInfra.PixReversal.Log, as: PixReversal
alias StarkInfra.PixChargeback.Log, as: PixChargeback
alias StarkInfra.PixInfraction.Log, as: PixInfraction
@moduledoc """
Groups Webhook-Event related functions
"""
@doc """
An Event is the notification received from the subscription to the Webhook.
Events cannot be created, but may be retrieved from the Stark Infra API to
list all generated updates on entities.
## Attributes:
- `:id` [string]: unique id returned when the event is created. ex: "5656565656565656"
- `:log` [Log]: a Log struct from one the subscription services (PixRequest.Log, PixReversal.Log, PixKey.log)
- `:created` [DateTime]: creation datetime for the notification event. ex: ~U[2020-03-26 19:32:35.418698Z]
- `:is_delivered` [bool]: true if the event has been successfully delivered to the user url. ex: false
- `:subscription` [string]: service that triggered this event. ex: "transfer", "utility-payment"
- `:workspace_id` [string]: ID of the Workspace that generated this event. Mostly used when multiple Workspaces have Webhooks registered to the same endpoint. ex: "4545454545454545"
"""
defstruct [:id, :log, :created, :is_delivered, :subscription, :workspace_id]
@type t() :: %__MODULE__{}
@doc """
Receive a single notification Event struct previously created in the Stark Infra API by passing its id
## Parameters (required):
- `id` [string]: struct unique id. ex: "5656565656565656"
## Options:
- `:user` [Organization/Project, default nil]: Organization or Project struct returned from StarkInfra.project(). Only necessary if default project or organization has not been set in configs.
## Return:
- Event struct with updated attributes
"""
@spec get(binary, user: Project.t() | Organization.t() | nil) :: {:ok, Event.t()} | {:error, [%Error{}]}
def get(id, options \\ []) do
Rest.get_id(resource(), id, options)
end
@doc """
Same as get(), but it will unwrap the error tuple and raise in case of errors.
"""
@spec get!(binary, user: Project.t() | Organization.t() | nil) :: Event.t()
def get!(id, options \\ []) do
Rest.get_id!(resource(), id, options)
end
@doc """
Receive a stream of notification Event structs previously created in the Stark Infra API
## Options:
- `:limit` [integer, default nil]: maximum number of structs to be retrieved. Unlimited if nil. ex: 35
- `:after` [Date or string, default nil]: date filter for structs created only after specified date. ex: ~D[2020-03-25]
- `:before` [Date or string, default nil]: date filter for structs created only before specified date. ex: ~D[2020-03-25]
- `:is_delivered` [bool, default nil]: filter successfully delivered events. ex: true or false
- `:user` [Organization/Project, default nil]: Organization or Project struct returned from StarkInfra.project(). Only necessary if default project or organization has not been set in configs.
## Return:
- stream of Event structs with updated attributes
"""
@spec query(
limit: integer,
after: Date.t() | binary,
before: Date.t() | binary,
is_delivered: boolean,
user: Project.t() | Organization.t()
) ::
({:cont, {:ok, [Event.t()]}} |
{:error, [Error.t()]} |
{:halt, any} |
{:suspend, any},
any -> any)
def query(options \\ []) do
Rest.get_list(resource(), options)
end
@doc """
Same as query(), but it will unwrap the error tuple and raise in case of errors.
"""
@spec query!(
limit: integer,
after: Date.t() | binary,
before: Date.t() | binary,
is_delivered: boolean,
user: Project.t() | Organization.t()
) ::
({:cont, [Event.t()]} |
{:halt, any} |
{:suspend, any},
any -> any)
def query!(options \\ []) do
Rest.get_list!(resource(), options)
end
@doc """
Receive a list of up to 100 Event structs previously created in the Stark Infra API and the cursor to the next page.
Use this function instead of query if you want to manually page your requests.
## Options:
- `:cursor` [string, default nil]: cursor returned on the previous page function call
- `:limit` [integer, default 100]: maximum number of structs to be retrieved. Max = 100. ex: 35
- `:after` [Date or string, default nil]: date filter for structs created only after specified date. ex: ~D[2020-03-25]
- `:before` [Date or string, default nil]: date filter for structs created only before specified date. ex: ~D[2020-03-25]
- `:is_delivered` [bool, default nil]: filter successfully delivered events. ex: true or false
- `:user` [Organization/Project, default nil]: Organization or Project struct returned from StarkInfra.project(). Only necessary if default project or organization has not been set in configs.
## Return:
- list of Event structs with updated attributes and cursor to retrieve the next page of Event structs
"""
@spec page(
cursor: binary,
limit: integer,
after: Date.t() | binary,
before: Date.t() | binary,
is_delivered: boolean,
user: Project.t() | Organization.t()
) ::
{:ok, {binary, [Event.t()]}} |
{:error, [%Error{}]}
def page(options \\ []) do
Rest.get_page(resource(), options)
end
@doc """
Same as page(), but it will unwrap the error tuple and raise in case of errors.
"""
@spec page!(
cursor: binary,
limit: integer,
after: Date.t() | binary,
before: Date.t() | binary,
is_delivered: boolean,
user: Project.t() | Organization.t()
) :: [Event.t()]
def page!(options \\ []) do
Rest.get_page!(resource(), options)
end
@doc """
Delete a list of notification Event entities previously created in the Stark Infra API
## Parameters (required):
- `id` [string]: Event unique id. ex: "5656565656565656"
## Options:
- `:user` [Organization/Project, default nil]: Organization or Project struct returned from StarkInfra.project(). Only necessary if default project or organization has not been set in configs.
## Return:
- deleted Event struct
"""
@spec delete(binary, user: Project.t() | Organization.t() | nil) :: {:ok, Event.t()} | {:error, [%Error{}]}
def delete(id, options \\ []) do
Rest.delete_id(resource(), id, options)
end
@doc """
Same as delete(), but it will unwrap the error tuple and raise in case of errors.
"""
@spec delete!(binary, user: Project.t() | Organization.t() | nil) :: Event.t()
def delete!(id, options \\ []) do
Rest.delete_id!(resource(), id, options)
end
@doc """
Update notification Event by passing id.
If is_delivered is true, the event will no longer be returned on queries with is_delivered=false.
## Parameters (required):
- `id` [list of strings]: Event unique ids. ex: "5656565656565656"
- `:is_delivered` [bool]: If true and event hasn't been delivered already, event will be set as delivered. ex: true
## Options:
- `:user` [Organization/Project, default nil]: Organization or Project struct returned from StarkInfra.project(). Only necessary if default project or organization has not been set in configs.
## Return:
- target Event with updated attributes
"""
@spec update(
binary,
is_delivered: bool,
user: Project.t() | Organization.t() | nil
) ::
{:ok, Event.t()} |
{:error, [%Error{}]}
def update(id, parameters \\ []) do
Rest.patch_id(resource(), id, parameters |> Check.enforced_keys([:is_delivered]) |> Enum.into(%{}))
end
@doc """
Same as update(), but it will unwrap the error tuple and raise in case of errors.
"""
@spec update!(
binary,
is_delivered: bool,
user: Project.t() | Organization.t() | nil
) :: Event.t()
def update!(id, parameters \\ []) do
Rest.patch_id!(resource(), id, parameters |> Check.enforced_keys([:is_delivered]) |> Enum.into(%{}))
end
@doc """
Create a single Event struct from a content string received from a handler listening at the request url.
If the provided digital signature does not check out with the StarkInfra public key, a
starkinfra.error.InvalidSignatureError will be raised.
## Parameters (required):
- `:content` [string]: response content from request received at user endpoint (not parsed)
- `:signature` [string]: base-64 digital signature received at response header "Digital-Signature"
## Options:
- `cache_pid` [PID, default nil]: PID of the process that holds the public key cache, returned on previous parses. If not provided, a new cache process will be generated.
- `:user` [Organization/Project, default nil]: Organization or Project struct returned from StarkInfra.project(). Only necessary if default project or organization has not been set in configs.
## Return:
- Parsed Resource object
"""
@spec parse(
content: binary,
signature: binary,
cache_pid: PID,
user: Project.t() | Organization.t()
)::
{:ok, Event.t()} |
{:error, [error: Error.t()]}
def parse(options \\ []) do
%{content: content, signature: signature, cache_pid: cache_pid, user: user} =
Enum.into(
options |> Check.enforced_keys([:content, :signature]),
%{cache_pid: nil, user: nil}
)
Parse.parse_and_verify(
content: content,
signature: signature,
cache_pid: cache_pid,
key: "event",
resource_maker: &resource_maker/1,
user: user
)
end
@doc """
Same as parse(), but it will unwrap the error tuple and raise in case of errors.
"""
@spec parse!(
content: binary,
signature: binary,
cache_pid: PID,
user: Project.t() | Organization.t()
):: any
def parse!(options \\ []) do
%{content: content, signature: signature, cache_pid: cache_pid, user: user} =
Enum.into(
options |> Check.enforced_keys([:content, :signature]),
%{cache_pid: nil, user: nil}
)
Parse.parse_and_verify(
content: content,
signature: signature,
cache_pid: cache_pid,
key: "event",
resource_maker: &resource_maker/1,
user: user
)
end
defp resource() do
{
"Event",
&resource_maker/1
}
end
defp resource_maker(json) do
%Event{
id: json[:id],
log: parse_log_json(json[:log], json[:subscription]),
created: json[:created] |> Check.datetime(),
is_delivered: json[:is_delivered],
subscription: json[:subscription],
workspace_id: json[:workspace_id]
}
end
defp parse_log_json(log, subscription) do
log |> API.from_api_json(log_maker_by_subscription(subscription))
rescue
CaseClauseError -> log
end
defp log_maker_by_subscription(subscription) do
case subscription do
"credit-note" -> &CreditNote.resource_maker/1
"pix-request.in" -> &PixRequest.resource_maker/1
"pix-request.out" -> &PixRequest.resource_maker/1
"pix-reversal.in" -> &PixReversal.resource_maker/1
"pix-reversal.out" -> &PixReversal.resource_maker/1
"pix-key" -> &PixKey.resource_maker/1
"pix-claim" -> &PixClaim.resource_maker/1
"pix-infraction" -> &PixInfraction.resource_maker/1
"pix-chargeback" -> &PixChargeback.resource_maker/1
"issuing-card" -> &IssuingCard.resource_maker/1
"issuing-invoice" -> &IssuingInvoice.resource_maker/1
"issuing-purchase" -> &IssuingPurchase.resource_maker/1
end
end
end
|
lib/event/event.ex
| 0.912397
| 0.557484
|
event.ex
|
starcoder
|
defmodule Resourceful.JSONAPI.Params do
@moduledoc """
Functions for converting URL parameters into `Resourceful` queries.
Additionally validates parameters when JSON:API-specific parameters are
provided such as `fields`.
While JSON:API specifically designates the format for sparse fieldsets and
sorting, filtering and pagination is left up to the implementation. Filtering
is build around the generic queries in Resourceful and is therefore
opinionated in format. Pagination supports a page number and limit/offset
strategies.
There is currently no support for `include`.
## Comma-Separated Lists and Arrays
The JSONAPI spec shows examples of
[sparse fieldsets](https://jsonapi.org/format/#fetching-sparse-fieldsets) and
[sorting](https://jsonapi.org/format/#fetching-sorting) using comma-separated
strings to represent arrays of fields. In addition to the standard form, this
library will also accept an actual array of strings. The errors that are
returned will differ depending on the form--specifically, the `:input` and
`:source` values. This is intentional behavior.
For example, `sort=-field1,field2` and `sort[]=-field1&sort[]=field2` will
return identitcal results if the fields are correct for the type, however
if `field1` is invalid the errors will look slightly different:
String list:
`{:invalid_jsonapi_field, %{input: "field1,field2", key: "field1", source: ["sort"]}}`
Array:
`{:invalid_jsonapi_field, %{input: "field1", key: "field1", source: ["sort", 0]}}`
"""
alias Resourceful.{Error, Type}
alias Resourceful.JSONAPI.{Fields, Include, Pagination}
def split_string_list(input), do: String.split(input, ~r/, */)
def validate(%Type{} = type, %{} = params, opts \\ []) do
%{
fields: validate_fields(type, params),
filter: validate_filter(type, params),
include: validate_include(type, params),
page: validate_page(params, opts),
sort: validate_sort(type, params)
}
|> Error.or_ok()
|> case do
{:ok, opts_map} ->
{:ok,
opts_map
|> Stream.reject(fn {_, v} -> is_nil(v) end)
|> Keyword.new()}
errors ->
errors
end
end
def validate_fields(type, %{"fields" => %{} = fields_by_type}) do
Fields.validate(type, fields_by_type)
end
def validate_fields(_, %{"fields" => invalid}), do: invalid_input("fields", invalid)
def validate_fields(_, _), do: nil
def validate_filter(type, %{"filter" => %{} = filters}) do
filters
|> Enum.map(fn {source, input} = filter ->
with {:error, _} = error <- Type.validate_filter(type, filter),
do: Error.with_source(error, ["filter", source], %{input: input})
end)
|> Type.validate_max_filters(type, %{source: ["filter"]})
end
def validate_filter(_, %{"filter" => invalid}), do: invalid_input("filter", invalid)
def validate_filter(_, _), do: nil
def validate_include(type, %{"include" => includes})
when is_binary(includes) or is_list(includes),
do: Include.validate(type, includes)
def validate_include(_, %{"include" => invalid}), do: invalid_input("include", invalid)
def validate_include(_, _), do: nil
def validate_page(params, opts \\ [])
def validate_page(%{"page" => %{} = params}, opts) do
case Pagination.validate(params, opts) do
%{valid?: false} = changeset ->
changeset
|> Error.from_changeset()
|> Error.prepend_source(:page)
opts ->
opts
end
end
def validate_page(%{"page" => invalid}, _), do: invalid_input("page", invalid)
def validate_page(_, _), do: nil
def validate_sort(type, params, context \\ %{})
def validate_sort(type, %{"sort" => sorters} = params, context)
when is_binary(sorters) do
validate_sort(
type,
Map.put(params, "sort", split_string_list(sorters)),
Map.merge(context, %{input: sorters, source: ["sort"]})
)
end
def validate_sort(type, %{"sort" => sorters}, context)
when is_list(sorters) do
sorters
|> Stream.with_index()
|> Enum.map(fn {sorter, index} ->
with {:error, _} = error <- Type.validate_sorter(type, sorter) do
error
|> Error.with_source(Map.get(context, :source) || ["sort", index])
|> Error.with_input(Map.get(context, :input, sorter))
end
end)
|> Type.validate_max_sorters(type, %{source: ["sort"]})
end
def validate_sort(_, %{"sort" => invalid}, _), do: invalid_input("sort", invalid)
def validate_sort(_, _, _), do: nil
defp invalid_input(param, input) do
Error.with_input({:error, {:invalid_jsonapi_parameter, %{source: [param]}}}, input)
end
end
|
lib/resourceful/jsonapi/params.ex
| 0.914539
| 0.716219
|
params.ex
|
starcoder
|
defmodule AirElixirSensor.Dht11 do
@moduledoc """
DHT11 sensor reader module for Raspberry.
## Examples
defmodule AirElixirSensor.Dht11.Dht11Publisher do
use AirElixirSensor.Dht11, :publisher_by_python
end
"""
def publisher_by_python do
quote do
alias AirElixirSensor.PythonErlport
def read_value(prefix, pin) do
case PythonErlport.call(prefix, {:dht11, :read, []}) do
{:ok, result} -> result
{:error, _result} -> read_value(prefix, pin)
end
end
end
end
# @todo WIP
def publisher_by_clang do
quote do
@on_load :load_nif
@nif_path "./_build/c/libdht11"
def read_value(prefix, pin) do
case read(pin) do
{:ok, result} -> result
{:error, result} -> read_value(prefix, pin)
end
end
@doc "Reads data from the sensor"
def read(_pin) do
raise Code.LoadError, file: @nif_path
end
@doc "Loads and initializes the `libdht11.so` NIF library"
def load_nif do
case :erlang.load_nif(@nif_path, 0) do
:ok -> setup()
{:error, {:load_failed, error}} -> Logger.warn(error)
end
end
def setup do
raise Code.LoadError, file: @nif_path
end
end
end
# @todo WIP
def publisher_by_circuits do
quote do
use Bitwise
alias Circuits.GPIO
def read_value(prefix, pin) do
case read(pin) do
{:ok, result} -> result
{:error, _result} -> read_value(prefix, pin)
end
end
def read(pin) do
{:ok, gpo} = GPIO.open(pin, :output)
send_and_sleep(gpo, 1, 50)
send_and_sleep(gpo, 0, 20)
{:ok, gpi} = GPIO.open(pin, :input)
GPIO.set_pull_mode(gpi, :pullup)
pullup_lengths = gpi |> collect_input |> parse_data_pullup_lengths
bytes = pullup_lengths |> calculate_bits |> bits_to_bytes
cond do
length(pullup_lengths) != 40 -> {:error, {0, 0}}
Enum.at(bytes, 4) != calculate_checksum(bytes) -> {:error, {0, 0}}
true -> {:ok, {Enum.at(bytes, 2), Enum.at(bytes, 0)}}
end
end
defp send_and_sleep(gpo, output, sleep) do
GPIO.write(gpo, output)
:timer.sleep(sleep)
end
defp collect_input(gpi), do: rec_collect_input(gpi, 0, -1, [])
defp rec_collect_input(gpi, unchanged_count, last, data) do
current = GPIO.read(gpi)
data = data ++ [current]
cond do
last != current ->
rec_collect_input(gpi, 0, current, data)
last == current && unchanged_count <= 100 ->
rec_collect_input(gpi, unchanged_count + 1, last, data)
true ->
data
end
end
defp parse_data_pullup_lengths(data) do
state = %{
init_pulldown: 1,
init_pullup: 2,
firstpulldown: 3,
pullup: 4,
pulldown: 5
}
{_, _, rc_lengths} =
data
|> Enum.reduce(
{state[:init_pulldown], 0, []},
fn datum, {current_state, current_length, lengths} ->
current_length = current_length + 1
cond do
current_state == state[:init_pulldown] && datum == 0 ->
{state[:init_pullup], current_length, lengths}
current_state == state[:init_pullup] && datum == 1 ->
{state[:firstpulldown], current_length, lengths}
current_state == state[:firstpulldown] && datum == 0 ->
{state[:pullup], current_length, lengths}
current_state == state[:pullup] && datum == 1 ->
{state[:pulldown], 0, lengths}
current_state == state[:pulldown] && datum == 0 ->
{state[:pullup], current_length, lengths ++ [current_length]}
true ->
{current_state, current_length, lengths}
end
end
)
rc_lengths
end
defp calculate_bits(pullup_lengths) do
{shortest_pullup, longest_pullup} =
pullup_lengths
|> Enum.reduce({1000, 0}, fn length, {shortest_pullup, longest_pullup} ->
cond do
length < shortest_pullup -> {length, longest_pullup}
length > longest_pullup -> {shortest_pullup, length}
true -> {shortest_pullup, longest_pullup}
end
end)
halfway = shortest_pullup + (longest_pullup - shortest_pullup) / 2
pullup_lengths
|> Enum.reduce([], fn length, bits ->
if length > halfway, do: bits ++ [true], else: bits ++ [false]
end)
end
defp bits_to_bytes(bits) do
{_, rc_bytes} =
bits
|> Enum.with_index()
|> Enum.reduce({0, []}, fn {bit, i}, {byte, bytes} ->
byte = byte <<< 1
byte = if bit == true, do: byte ||| 1, else: byte ||| 0
if rem(i + 1, 8) == 0 do
{0, bytes ++ [byte]}
else
{byte, bytes}
end
end)
rc_bytes
end
defp calculate_checksum(bytes) do
bytes
|> Enum.with_index()
|> Enum.reduce(0, fn {byte, index}, acc -> if index < 4, do: acc + byte end)
|> Bitwise.&&&(255)
end
end
end
defmacro __using__(which) when is_atom(which) do
apply(__MODULE__, which, [])
end
end
|
lib/air_elixir_sensor/dht11/dht11.ex
| 0.725065
| 0.441071
|
dht11.ex
|
starcoder
|
defmodule FileSize.Units do
@moduledoc """
A module to retrieve information about known units.
"""
alias FileSize.Bit
alias FileSize.Byte
alias FileSize.Convertible
alias FileSize.InvalidUnitError
alias FileSize.InvalidUnitSystemError
alias FileSize.Units.Info
@unit_systems [:si, :iec]
@units [
# Bit
Info.new(name: :bit, mod: Bit, exp: 0, system: nil, symbol: "bit"),
Info.new(name: :kbit, mod: Bit, exp: 1, system: :si, symbol: "kbit"),
Info.new(name: :kibit, mod: Bit, exp: 1, system: :iec, symbol: "Kibit"),
Info.new(name: :mbit, mod: Bit, exp: 2, system: :si, symbol: "Mbit"),
Info.new(name: :mibit, mod: Bit, exp: 2, system: :iec, symbol: "Mibit"),
Info.new(name: :gbit, mod: Bit, exp: 3, system: :si, symbol: "Gbit"),
Info.new(name: :gibit, mod: Bit, exp: 3, system: :iec, symbol: "Gibit"),
Info.new(name: :tbit, mod: Bit, exp: 4, system: :si, symbol: "Tbit"),
Info.new(name: :tibit, mod: Bit, exp: 4, system: :iec, symbol: "Tibit"),
Info.new(name: :pbit, mod: Bit, exp: 5, system: :si, symbol: "Pbit"),
Info.new(name: :pibit, mod: Bit, exp: 5, system: :iec, symbol: "Pibit"),
Info.new(name: :ebit, mod: Bit, exp: 6, system: :si, symbol: "Ebit"),
Info.new(name: :eibit, mod: Bit, exp: 6, system: :iec, symbol: "Eibit"),
Info.new(name: :ebit, mod: Bit, exp: 6, system: :si, symbol: "Ebit"),
Info.new(name: :eibit, mod: Bit, exp: 6, system: :iec, symbol: "Eibit"),
Info.new(name: :zbit, mod: Bit, exp: 7, system: :si, symbol: "Zbit"),
Info.new(name: :zibit, mod: Bit, exp: 7, system: :iec, symbol: "Zibit"),
Info.new(name: :ybit, mod: Bit, exp: 8, system: :si, symbol: "Ybit"),
Info.new(name: :yibit, mod: Bit, exp: 8, system: :iec, symbol: "Yibit"),
# Byte
Info.new(name: :b, mod: Byte, exp: 0, system: nil, symbol: "B"),
Info.new(name: :kb, mod: Byte, exp: 1, system: :si, symbol: "kB"),
Info.new(name: :kib, mod: Byte, exp: 1, system: :iec, symbol: "KiB"),
Info.new(name: :mb, mod: Byte, exp: 2, system: :si, symbol: "MB"),
Info.new(name: :mib, mod: Byte, exp: 2, system: :iec, symbol: "MiB"),
Info.new(name: :gb, mod: Byte, exp: 3, system: :si, symbol: "GB"),
Info.new(name: :gib, mod: Byte, exp: 3, system: :iec, symbol: "GiB"),
Info.new(name: :tb, mod: Byte, exp: 4, system: :si, symbol: "TB"),
Info.new(name: :tib, mod: Byte, exp: 4, system: :iec, symbol: "TiB"),
Info.new(name: :pb, mod: Byte, exp: 5, system: :si, symbol: "PB"),
Info.new(name: :pib, mod: Byte, exp: 5, system: :iec, symbol: "PiB"),
Info.new(name: :eb, mod: Byte, exp: 6, system: :si, symbol: "EB"),
Info.new(name: :eib, mod: Byte, exp: 6, system: :iec, symbol: "EiB"),
Info.new(name: :zb, mod: Byte, exp: 7, system: :si, symbol: "ZB"),
Info.new(name: :zib, mod: Byte, exp: 7, system: :iec, symbol: "ZiB"),
Info.new(name: :yb, mod: Byte, exp: 8, system: :si, symbol: "YB"),
Info.new(name: :yib, mod: Byte, exp: 8, system: :iec, symbol: "YiB")
]
@units_by_names Map.new(@units, fn unit -> {unit.name, unit} end)
@units_by_symbols Map.new(@units, fn unit -> {unit.symbol, unit} end)
@units_by_mods_and_systems_and_exps Map.new(@units, fn info ->
{{info.mod, info.system, info.exp},
info}
end)
@doc """
Gets a list of all defined units.
"""
@doc since: "2.0.0"
@spec list() :: [Info.t()]
def list, do: @units
@doc """
Gets unit info for the unit specified by the given name.
"""
@doc since: "2.0.0"
@spec fetch(FileSize.unit()) :: {:ok, Info.t()} | :error
def fetch(symbol_unit_or_unit_info)
def fetch(%Info{} = unit), do: {:ok, unit}
def fetch(unit) when is_atom(unit) do
Map.fetch(@units_by_names, unit)
end
def fetch(symbol) when is_binary(symbol), do: from_symbol(symbol)
def fetch(_), do: :error
@doc """
Gets unit info for the unit specified by the given name. Raises when the unit
with the given name is unknown.
"""
@doc since: "2.0.0"
@spec fetch!(FileSize.unit()) :: Info.t() | no_return
def fetch!(unit) do
case fetch(unit) do
{:ok, info} -> info
:error -> raise InvalidUnitError, unit: unit
end
end
@doc false
@spec from_symbol(FileSize.unit_symbol()) :: {:ok, Info.t()} | :error
def from_symbol(symbol) do
Map.fetch(@units_by_symbols, symbol)
end
@doc false
@spec equivalent_unit_for_system!(FileSize.unit(), FileSize.unit_system()) ::
Info.t() | no_return
def equivalent_unit_for_system!(symbol_or_unit_or_unit_info, unit_system)
def equivalent_unit_for_system!(%Info{} = info, unit_system) do
validate_unit_system!(unit_system)
find_equivalent_unit_for_system(info, unit_system)
end
def equivalent_unit_for_system!(symbol_or_unit, unit_system) do
symbol_or_unit
|> fetch!()
|> equivalent_unit_for_system!(unit_system)
end
defp find_equivalent_unit_for_system(%{system: nil} = info, _), do: info
defp find_equivalent_unit_for_system(
%{system: unit_system} = info,
unit_system
) do
info
end
defp find_equivalent_unit_for_system(info, unit_system) do
Map.fetch!(
@units_by_mods_and_systems_and_exps,
{info.mod, unit_system, info.exp}
)
end
@doc false
@spec appropriate_unit_for_size!(FileSize.t(), nil | FileSize.unit_system()) ::
Info.t() | no_return
def appropriate_unit_for_size!(size, unit_system \\ nil) do
value = Convertible.normalized_value(size)
%{mod: mod} = orig_info = fetch!(size.unit)
unit_system = unit_system || orig_info.system || :si
validate_unit_system!(unit_system)
Enum.find_value(@units, orig_info, fn
%{mod: ^mod, system: ^unit_system} = info ->
if value >= info.min_value && value <= info.max_value, do: info
_ ->
nil
end)
end
defp validate_unit_system!(unit_system) when unit_system in @unit_systems do
:ok
end
defp validate_unit_system!(unit_system) do
raise InvalidUnitSystemError, unit_system: unit_system
end
end
|
lib/file_size/units.ex
| 0.814053
| 0.527134
|
units.ex
|
starcoder
|
defmodule ExUnit.ClusteredCase.Node do
@moduledoc """
This module handles starting new nodes
You can specify various options when starting a node:
- `:name`, takes either a string or an atom, in either long or short form,
this sets the node name and id.
- `:boot_timeout`, specifies the amount of time to wait for a node to perform it's
initial boot sequence before timing out.
- `:init_timeout`, specifies the amount of time to wait for the node agent to complete
initializing the node (loading and starting required applications, applying configuration,
loading test modules, and executing post-start functions).
- `:erl_flags`, a list of arguments to pass to `erl` when starting the node, e.g. `["-init_debug"]`.
- `:env`, a list of tuples containing environment variables to export in the node's environment,
e.g. `[{"PORT", "8080"}]`.
- `:config`, a `Keyword` list containing configuration overrides to apply to the node,
should be in the form of `[app: [key: value]]`.
- `:post_start_functions`, a list of functions, either captured or in `{module, function, args}` format,
which will be invoked on the node after it is booted and initialized. Functions must be zero-arity.
- `:stdout`, redirect output to a device or process with stdout: `:standard_error` | `:standard_io` | `pid`.
- `:capture_log`, capture the entire log from a node with `capture_log: true`,
can get the captured logs for a specific node with `Cluster.log(node)`.
"""
alias ExUnit.ClusteredCaseError
@type fun :: (() -> term) | {module, atom, [term]}
@type node_opts :: [node_opt]
@type node_opt ::
{:name, String.t() | atom}
| {:boot_timeout, pos_integer}
| {:init_timeout, pos_integer}
| {:erl_flags, [String.t()]}
| {:env, [{String.t(), String.t()}]}
| {:config, Keyword.t()}
| {:post_start_functions, [fun]}
| {:stdout, atom | pid}
| {:capture_log, boolean}
@type node_error ::
:already_started
| :started_not_connected
| :boot_timeout
| :init_timeout
| :not_alive
@doc false
@spec start(node_opts) :: {:ok, pid} | {:error, term}
def start(opts) when is_list(opts) do
do_start(opts, [])
end
@doc false
@spec start_nolink(node_opts) :: {:ok, pid} | {:error, term}
def start_nolink(opts) when is_list(opts) do
do_start(opts, [:nolink])
end
defp do_start(opts, start_opts) do
# We expect that the current node is already distributed
unless Node.alive?() do
raise ClusteredCaseError,
"cannot run clustered test cases if distribution is not active! " <>
"You can start distribution via Node.start/1, or with the --name flag."
end
if :nolink in start_opts do
__MODULE__.Manager.start_nolink(opts)
else
__MODULE__.Manager.start_link(opts)
end
end
@doc false
@spec name(pid | String.t() | atom) :: node
defdelegate name(pid), to: __MODULE__.Manager
@doc false
@spec connect(pid | String.t() | atom, [pid | String.t() | atom]) :: :ok
defdelegate connect(name, nodes), to: __MODULE__.Manager
@doc false
@spec disconnect(pid | String.t() | atom, [pid | String.t() | atom]) :: :ok
defdelegate disconnect(name, nodes), to: __MODULE__.Manager
@doc false
@spec stop(pid | String.t() | atom) :: :ok
defdelegate stop(name), to: __MODULE__.Manager
@doc false
@spec kill(pid | String.t() | atom) :: :ok
defdelegate kill(name), to: __MODULE__.Manager
@doc false
@spec alive?(pid | String.t() | atom) :: boolean
defdelegate alive?(name), to: __MODULE__.Manager
@doc false
@spec log(pid | String.t() | atom) :: {:ok, binary}
defdelegate log(name), to: __MODULE__.Manager
@doc false
@spec call(pid | String.t() | atom, fun) :: {:ok, term} | {:error, term}
defdelegate call(name, fun), to: __MODULE__.Manager
@doc false
@spec call(pid | String.t() | atom, fun, Keyword.t()) :: {:ok, term} | {:error, term}
defdelegate call(name, fun, opts), to: __MODULE__.Manager
@doc false
@spec call(pid | String.t() | atom, module, atom, [term]) :: {:ok, term} | {:error, term}
defdelegate call(name, m, f, a), to: __MODULE__.Manager
@doc false
@spec call(pid | String.t() | atom, module, atom, [term], Keyword.t()) ::
{:ok, term} | {:error, term}
defdelegate call(name, m, f, a, opts), to: __MODULE__.Manager
end
|
lib/node.ex
| 0.861684
| 0.731898
|
node.ex
|
starcoder
|
defmodule PhoenixMarkdown do
@moduledoc """
A Markdown template engine for Phoenix. It also lets you (optionally) embed EEx tags to be evaulated on the server.
> Powered by [Earmark](https://github.com/pragdave/earmark)
## Usage
1. Add `{:phoenix_markdown, "~> 1.0"}` to your deps in `mix.exs`.
2. Add the following to your Phoenix `config/config.exs`
```elixir
config :phoenix, :template_engines,
md: PhoenixMarkdown.Engine
```
If you are also using the [phoenix_haml](https://github.com/chrismccord/phoenix_haml) engine,
then it should look like this:
```elixir
config :phoenix, :template_engines,
haml: PhoenixHaml.Engine,
md: PhoenixMarkdown.Engine
```
3. Use the `.html.md` extensions for your templates.
## Optional
Add md extension to Phoenix live reload in `config/dev.exs`
```elixir
config :hello_phoenix, HelloPhoenix.Endpoint,
live_reload: [
patterns: [
~r{priv/static/.*(js|css|png|jpeg|jpg|gif|svg)$},
~r{web/views/.*(ex)$},
~r{web/templates/.*(eex|md)$}
]
]
```
If you are also using the [phoenix_haml](https://github.com/chrismccord/phoenix_haml) engine,
then the pattern should look like this:
```elixir
config :hello_phoenix, HelloPhoenix.Endpoint,
live_reload: [
patterns: [
~r{priv/static/.*(js|css|png|jpeg|jpg|gif|svg)$},
~r{web/views/.*(ex)$},
~r{web/templates/.*(eex|haml|md)$}
]
]
```
## Optional Earmark Configuration
You can configure phoenix_markdown via two seperate configuration blocks.
The first one is,
literally, the options that will be passed to Earmark as it renders the markdown into html.
```elixir
config :phoenix_markdown, :earmark, %{
gfm: true,
breaks: true
}
```
Please read the [Earmark Documentation](https://hexdocs.pm/earmark/Earmark.html#as_html!/2) to understand
the options that can go here.
The Earmark options set here apply to all .md template files. If anybody has a good idea on how to pass
per-file options to a template complier, I'm open to suggestions.
## Optional Server Tags Configuration
The second configuration block is where you indicate if you want to evaluate EEx tags on the server
or escape them Earmark. The default is to escape in Earmark.
Example of markdown content with a server-side tag:
```markdown
## Before server-side content
<%= 11 + 2 %>
After the server-side content
```
To turn on server-side eex tags, set the `:server_tags` configuration option.
```elixir
config :phoenix_markdown, :server_tags, :all
```
The options to turn on server tags are `:all`, `:only` and `:except`. Anything else (or not setting it at all)
leaves the tags escaped in Markdown.
* `:all` evaluates all server tags in all markdown files.
* `:only` Only files that match the pattern or patterns will be evaluated.
This pattern can be any of:
* The name of the final html file: `"sample.html"`
* The full path of the template file: `"lib/sample_web/templates/page/sample.html.md"`
* A path with wildcards: `"**/page/**"`. This is nice as it would evaluate all files in a single directory.
* A regex against the path: `~r/.+%%.+/`. This allows you to use a character sequence in the name as a per-file (or path) flag saying if it should be evaluated.
* `:except` Only files that do NOT match the pattern or patterns will be evaluated.
This pattern can be any of:
* The name of the final html file: `"sample.html"`
* The full path of the template file: `"lib/sample_web/templates/page/sample.html.md"`
* A path with wildcards: `"**/page/**"`. This is nice as it would prevent evaluation of all files in a single directory.
* a regex against the path: `~r/.+%%.+/`. This allows you to use a character sequence in the name as a per-file (or path) flag saying if it not should be evaluated.
Both the `:only` and `:except` options accept either a single pattern, or a list of patterns.
```elixir
config :phoenix_markdown, :server_tags, only: ~r/.+%%.+/
```
or...
```elixir
config :phoenix_markdown, :server_tags, only: [~r/.+%%.+/, "some_file.html"]
```
## Generators
There are no generators for phoenix_markdown since they wouldn't make sense. You can embed server-side
tags if you turn them on in the configuration, but otherwise just keep it static and refer to it from
a *.eex template.
Like this:
```elixir
<% render("some_markdown.html") %>
```
[Markdown](https://daringfireball.net/projects/markdown/) is intended to be written by a human
in any simple text editor ( or a fancy one like [iA Writer](https://ia.net/writer) ). Just create
a file with the `.html.md` extension and drop it into the appropriate templates folder in your
phoenix application. Then you can use it just like any other template.
"""
end
|
lib/phoenix_markdown.ex
| 0.819026
| 0.864882
|
phoenix_markdown.ex
|
starcoder
|
defmodule PrometheusTelemetry.Metrics.Ecto do
@moduledoc """
These metrics give you metrics around phoenix requests
- `ecto.query.total_time`
- `ecto.query.decode_time`
- `ecto.query.query_time`
- `ecto.query.idle_time`
"""
import Telemetry.Metrics, only: [distribution: 2]
alias PrometheusTelemetry.Config
@microsecond_buckets Config.default_microsecond_buckets()
@microsecond_unit {:native, :microsecond}
@millisecond_unit {:native, :millisecond}
@millisecond_buckets Config.default_millisecond_buckets()
def metrics(
repo_list,
default_opts \\ [
millisecond_buckets: @millisecond_buckets,
microsecond_buckets: @microsecond_buckets
])
def metrics(repo_list, default_opts) when is_list(repo_list) do
Enum.flat_map(repo_list, fn repo ->
repo
|> change_pg_module_to_string()
|> metrics(default_opts)
end)
end
def metrics(repo_str, default_opts) do
event_name = repo_str |> change_pg_module_to_string |> create_event_name
[
distribution(
"ecto.query.total_time",
event_name: event_name,
measurement: :total_time,
description: "Gets total time spent on query",
tags: [:repo, :query, :source, :result],
tag_values: &format_proper_tag_values/1,
unit: @microsecond_unit,
reporter_options: [buckets: default_opts[:microsecond_buckets]]
),
distribution(
"ecto.query.decode_time",
event_name: event_name,
measurement: :decode_time,
description: "Total time spent decoding query",
tags: [:repo, :query, :source, :result],
tag_values: &format_proper_tag_values/1,
unit: @millisecond_unit,
reporter_options: [buckets: default_opts[:millisecond_buckets]]
),
distribution(
"ecto.query.query_time",
event_name: event_name,
measurement: :query_time,
description: "Total time spent querying",
tags: [:repo, :query, :source, :result],
tag_values: &format_proper_tag_values/1,
unit: @millisecond_unit,
reporter_options: [buckets: default_opts[:millisecond_buckets]]
),
distribution(
"ecto.query.idle_time",
event_name: event_name,
measurement: :idle_time,
description: "Total time spent idling",
tags: [:repo, :query, :source],
unit: @millisecond_unit,
reporter_options: [buckets: default_opts[:millisecond_buckets]]
)
]
end
defp create_event_name(repo_string) do
repo_string
|> String.split(".")
|> Enum.map(fn prefix -> String.to_atom(prefix) end)
|> Kernel.++([:query])
end
defp change_pg_module_to_string(repo) when is_binary(repo) do
repo
end
defp change_pg_module_to_string(repo) when is_atom(repo) do
names = repo
|> inspect()
|> String.split(".")
names
|> Stream.map(fn name ->
Macro.underscore(name)
end)
|> Enum.join(".")
end
defp format_proper_tag_values(%{result: result} = metadata) do
{result_status, _} = result
Map.put(metadata, :result, to_string(result_status))
end
end
|
lib/prometheus_telemetry/metrics/ecto.ex
| 0.768081
| 0.458591
|
ecto.ex
|
starcoder
|
defmodule Spherical.R1.Interval do
@moduledoc ~S"""
Represents a closed interval on ℝ¹.
"""
defstruct lo: 1.0, hi: 0.0 # Return an empty interval by default
alias __MODULE__
import Kernel, except: [length: 1]
@type t :: %Interval{lo: number, hi: number}
# API
@doc "Returns an empty interval."
def new do
%__MODULE__{lo: 1.0, hi: 0.0}
end
@doc "Returns an interval representing a single `point`."
def new(point) when is_number(point) do
%__MODULE__{lo: point, hi: point}
end
@doc "Returns an interval between `a` and `b`."
def new(a, b) when is_number(a) and is_number(b) do
%__MODULE__{lo: a, hi: b}
end
# epsilon represents a reasonable level of noise between two values
# that can be considered to be equal.
@epsilon 1.0e-14
# API
@doc "Checks whether the `interval` is empty."
def is_empty?(%Interval{lo: lo, hi: hi}), do: lo > hi
def is_empty?(_interval), do: false
@doc "Checks if the intervals contains the same points."
def is_equal?(%Interval{lo: lo, hi: hi}, %Interval{lo: lo, hi: hi}), do: true
def is_equal?(%Interval{}=first, %Interval{}=second) do
is_empty?(first) && is_empty?(second)
end
def is_equal?(_first, _second), do: false
@doc "Returns the midpoint of the `interval`."
def center(%Interval{lo: lo, hi: hi}), do: 0.5 * (lo + hi)
@doc """
Returns the length of the `interval`.
The length of an empty interval is negative.
"""
def length(%Interval{lo: lo, hi: hi}), do: hi - lo
@doc "Checks if the interval contains `point`."
def contains?(%Interval{}=first, %Interval{}=second) do
case is_empty? second do
true -> true
false -> first.lo <= second.lo && second.hi <= first.hi
end
end
def contains?(%Interval{lo: lo, hi: hi}, point) when is_number(point) do
lo <= point && point <= hi
end
@doc "Checks if the `interval` strictly contains `point`."
def interior_contains?(%Interval{lo: lo, hi: hi}, point) when is_number(point) do
lo < point && point < hi
end
def interior_contains?(%Interval{}=first, %Interval{}=second) do
case is_empty? second do
true -> true
false -> first.lo < second.lo && second.hi < first.hi
end
end
@doc "Check if `first` contains any points in common with `second`."
def intersects?(%Interval{}=first, %Interval{}=second) do
if first.lo <= second.lo do
second.lo <= first.hi && second.lo <= second.hi
else
first.lo <= second.hi && first.lo <= first.hi
end
end
@doc """
Check if the interior of the `first` contains any points in common
with `second`, including the latter's boundary.
"""
def interior_intersects?(%Interval{}=first, %Interval{}=second) do
second.lo < first.hi
&& first.lo < second.hi
&& first.lo < first.hi
&& second.lo <= first.hi
end
@doc """
Returns the interval containing **all** points common to `first` and
`second`.
"""
def intersection(%Interval{}=first, %Interval{}=second) do
Interval.new(max(first.lo, second.lo),
min(first.hi, second.hi))
end
@doc "Returns a copy of `interval` containing the given `point`."
def add_point(%Interval{}=interval, point) when is_number(point) do
cond do
is_empty? interval -> Interval.new(point, point)
point < interval.lo -> Interval.new(point, interval.hi)
point > interval.hi -> Interval.new(interval.lo, point)
true -> interval
end
end
@doc """
Returns the closest point in the `interval` to the given `point`.
The interval must be non-empty.
"""
def clamp_point(%Interval{}=interval, point) when is_number(point) do
max(interval.lo, min(interval.hi, point))
end
@doc """
Returns an `interval` that has been expanded on each side by
`margin`.
If `margin` is negative, then the function shrinks the `interval` on
each side by margin instead. The resulting interval may be empty.
Any expansion of an empty interval remains empty.
"""
def expanded(%Interval{lo: lo, hi: hi}=interval, margin) when is_number(margin) do
case is_empty? interval do
true -> interval
false -> Interval.new(lo - margin, hi + margin)
end
end
@doc """
Returns the smallest interval that contains the `first` and `second`
intervals.
"""
def union(%Interval{}=first, %Interval{}=second) do
cond do
is_empty? first -> second
is_empty? second -> first
true ->
Interval.new(min(first.lo, second.lo),
max(first.hi, second.hi))
end
end
@doc """
Reports whether the `first` interval can be transformed into the
`second` interval by moving each endpoint a small distance.
The empty interval is considered to be positioned arbitrarily on the
real line, so any interval with a small enough length will match the
empty interval.
"""
def approx_equal(%Interval{}=first, %Interval{}=second) do
cond do
is_empty? first -> length(second) <= 2 * @epsilon
is_empty? second -> length(first) <= 2 * @epsilon
true ->
abs(second.lo - first.lo) <= @epsilon &&
abs(second.hi - first.hi) <= @epsilon
end
end
end
|
lib/spherical/r1/interval.ex
| 0.92792
| 0.803675
|
interval.ex
|
starcoder
|
defmodule Ecto.Repo do
@moduledoc """
Defines a repository.
A repository maps to an underlying data store, controlled by the
adapter. For example, Ecto ships with a Postgres adapter that
stores data into a PostgreSQL database.
When used, the repository expects the `:otp_app` and `:adapter` as
option. The `:otp_app` should point to an OTP application that has
the repository configuration. For example, the repository:
defmodule Repo do
use Ecto.Repo,
otp_app: :my_app,
adapter: Ecto.Adapters.Postgres
end
Could be configured with:
config :my_app, Repo,
database: "ecto_simple",
username: "postgres",
password: "<PASSWORD>",
hostname: "localhost"
Most of the configuration that goes into the `config` is specific
to the adapter. For this particular example, you can check
[`Ecto.Adapters.Postgres`](https://hexdocs.pm/ecto_sql/Ecto.Adapters.Postgres.html)
for more information. In spite of this, the following configuration values
are shared across all adapters:
* `:name`- The name of the Repo supervisor process
* `:priv` - the directory where to keep repository data, like
migrations, schema and more. Defaults to "priv/YOUR_REPO".
It must always point to a subdirectory inside the priv directory
* `:url` - an URL that specifies storage information. Read below
for more information
* `:log` - the log level used when logging the query with Elixir's
Logger. If false, disables logging for that repository.
Defaults to `:debug`
* `:pool_size` - the size of the pool used by the connection module.
Defaults to `10`
* `:telemetry_prefix` - we recommend adapters to publish events
using the `Telemetry` library. By default, the telemetry prefix
is based on the module name, so if your module is called
`MyApp.Repo`, the prefix will be `[:my_app, :repo]`. See the
"Telemetry Events" section to see which events we recommend
adapters to publish. Note that if you have multiple databases, you
should keep the `:telemetry_prefix` consistent for each repo and
use the `:repo` property in the event metadata for distinguishing
between repos.
## URLs
Repositories by default support URLs. For example, the configuration
above could be rewritten to:
config :my_app, Repo,
url: "ecto://postgres:postgres@localhost/ecto_simple"
The schema can be of any value. The path represents the database name
while options are simply merged in.
URL can include query parameters to override shared and adapter-specific
options, like `ssl`, `timeout` and `pool_size`. The following example
shows how to pass these configuration values:
config :my_app, Repo,
url: "ecto://postgres:postgres@localhost/ecto_simple?ssl=true&pool_size=10"
In case the URL needs to be dynamically configured, for example by
reading a system environment variable, such can be done via the
`c:init/2` repository callback:
def init(_type, config) do
{:ok, Keyword.put(config, :url, System.get_env("DATABASE_URL"))}
end
## Shared options
Almost all of the repository functions outlined in this module accept the following
options:
* `:timeout` - The time in milliseconds (as an integer) to wait for the query call to
finish. `:infinity` will wait indefinitely (default: `15_000`)
* `:log` - When false, does not log the query
* `:telemetry_event` - The telemetry event name to dispatch the event under.
See the next section for more information
* `:telemetry_options` - Extra options to attach to telemetry event name.
See the next section for more information
## Telemetry events
There are two types of telemetry events. The ones emitted by Ecto and the
ones that are adapter specific.
### Ecto telemetry events
The following events are emitted by all Ecto repositories:
* `[:ecto, :repo, :init]` - it is invoked whenever a repository starts.
The measurement is a single `system_time` entry in native unit. The
metadata is the `:repo` and all initialization options under `:opts`.
### Adapter-specific events
We recommend adapters to publish certain `Telemetry` events listed below.
Those events will use the `:telemetry_prefix` outlined above which defaults
to `[:my_app, :repo]`.
For instance, to receive all query events published by a repository called
`MyApp.Repo`, one would define a module:
defmodule MyApp.Telemetry do
def handle_event([:my_app, :repo, :query], measurements, metadata, config) do
IO.inspect binding()
end
end
Then, in the `Application.start/2` callback, attach the handler to this event using
a unique handler id:
:ok = :telemetry.attach("my-app-handler-id", [:my_app, :repo, :query], &MyApp.Telemetry.handle_event/4, %{})
For details, see [the telemetry documentation](https://hexdocs.pm/telemetry/).
Below we list all events developers should expect from Ecto. All examples
below consider a repository named `MyApp.Repo`:
#### `[:my_app, :repo, :query]`
This event should be invoked on every query sent to the adapter, including
queries that are related to the transaction management.
The `:measurements` map will include the following, all given in the
`:native` time unit:
* `:idle_time` - the time the connection spent waiting before being checked out for the query
* `:queue_time` - the time spent waiting to check out a database connection
* `:query_time` - the time spent executing the query
* `:decode_time` - the time spent decoding the data received from the database
* `:total_time` - the sum of the other measurements
All measurements are given in the `:native` time unit. You can read more
about it in the docs for `System.convert_time_unit/3`.
A telemetry `:metadata` map including the following fields. Each database
adapter may emit different information here. For Ecto.SQL databases, it
will look like this:
* `:type` - the type of the Ecto query. For example, for Ecto.SQL
databases, it would be `:ecto_sql_query`
* `:repo` - the Ecto repository
* `:result` - the query result
* `:params` - the query parameters
* `:query` - the query sent to the database as a string
* `:source` - the source the query was made on (may be nil)
* `:options` - extra options given to the repo operation under
`:telemetry_options`
## Read-only repositories
You can mark a repository as read-only by passing the `:read_only`
flag on `use`:
use Ecto.Repo, otp_app: ..., adapter: ..., read_only: true
By passing the `:read_only` option, none of the functions that perform
write operations, such as `c:insert/2`, `c:insert_all/3`, `c:update_all/3`,
and friends will be defined.
"""
@type t :: module
@doc """
Returns all running Ecto repositories.
The list is returned in no particular order. The list
contains either atoms, for named Ecto repositories, or
PIDs.
"""
@spec all_running() :: [atom() | pid()]
defdelegate all_running(), to: Ecto.Repo.Registry
@doc false
defmacro __using__(opts) do
quote bind_quoted: [opts: opts] do
@behaviour Ecto.Repo
{otp_app, adapter, behaviours} =
Ecto.Repo.Supervisor.compile_config(__MODULE__, opts)
@otp_app otp_app
@adapter adapter
@default_dynamic_repo opts[:default_dynamic_repo] || __MODULE__
@read_only opts[:read_only] || false
@before_compile adapter
@aggregates [:count, :avg, :max, :min, :sum]
def config do
{:ok, config} = Ecto.Repo.Supervisor.runtime_config(:runtime, __MODULE__, @otp_app, [])
config
end
def __adapter__ do
@adapter
end
def child_spec(opts) do
%{
id: __MODULE__,
start: {__MODULE__, :start_link, [opts]},
type: :supervisor
}
end
def start_link(opts \\ []) do
Ecto.Repo.Supervisor.start_link(__MODULE__, @otp_app, @adapter, opts)
end
def stop(timeout \\ 5000) do
Supervisor.stop(get_dynamic_repo(), :normal, timeout)
end
def load(schema_or_types, data) do
Ecto.Repo.Schema.load(@adapter, schema_or_types, data)
end
def checkout(fun, opts \\ []) when is_function(fun) do
{adapter, meta} = Ecto.Repo.Registry.lookup(get_dynamic_repo())
adapter.checkout(meta, opts, fun)
end
def checked_out? do
{adapter, meta} = Ecto.Repo.Registry.lookup(get_dynamic_repo())
adapter.checked_out?(meta)
end
@compile {:inline, get_dynamic_repo: 0, with_default_options: 2}
def get_dynamic_repo() do
Process.get({__MODULE__, :dynamic_repo}, @default_dynamic_repo)
end
def put_dynamic_repo(dynamic) when is_atom(dynamic) or is_pid(dynamic) do
Process.put({__MODULE__, :dynamic_repo}, dynamic) || @default_dynamic_repo
end
def default_options(_operation), do: []
defoverridable default_options: 1
defp with_default_options(operation_name, opts) do
Keyword.merge(default_options(operation_name), opts)
end
## Transactions
if Ecto.Adapter.Transaction in behaviours do
def transaction(fun_or_multi, opts \\ []) do
Ecto.Repo.Transaction.transaction(__MODULE__, get_dynamic_repo(), fun_or_multi, with_default_options(:transaction, opts))
end
def in_transaction? do
Ecto.Repo.Transaction.in_transaction?(get_dynamic_repo())
end
@spec rollback(term) :: no_return
def rollback(value) do
Ecto.Repo.Transaction.rollback(get_dynamic_repo(), value)
end
end
## Schemas
if Ecto.Adapter.Schema in behaviours and not @read_only do
def insert(struct, opts \\ []) do
Ecto.Repo.Schema.insert(__MODULE__, get_dynamic_repo(), struct, with_default_options(:insert, opts))
end
def update(struct, opts \\ []) do
Ecto.Repo.Schema.update(__MODULE__, get_dynamic_repo(), struct, with_default_options(:update, opts))
end
def insert_or_update(changeset, opts \\ []) do
Ecto.Repo.Schema.insert_or_update(__MODULE__, get_dynamic_repo(), changeset, with_default_options(:insert_or_update, opts))
end
def delete(struct, opts \\ []) do
Ecto.Repo.Schema.delete(__MODULE__, get_dynamic_repo(), struct, with_default_options(:delete, opts))
end
def insert!(struct, opts \\ []) do
Ecto.Repo.Schema.insert!(__MODULE__, get_dynamic_repo(), struct, with_default_options(:insert, opts))
end
def update!(struct, opts \\ []) do
Ecto.Repo.Schema.update!(__MODULE__, get_dynamic_repo(), struct, with_default_options(:update, opts))
end
def insert_or_update!(changeset, opts \\ []) do
Ecto.Repo.Schema.insert_or_update!(__MODULE__, get_dynamic_repo(), changeset, with_default_options(:insert_or_update, opts))
end
def delete!(struct, opts \\ []) do
Ecto.Repo.Schema.delete!(__MODULE__, get_dynamic_repo(), struct, with_default_options(:delete, opts))
end
def insert_all(schema_or_source, entries, opts \\ []) do
Ecto.Repo.Schema.insert_all(__MODULE__, get_dynamic_repo(), schema_or_source, entries, with_default_options(:insert_all, opts))
end
end
## Queryable
if Ecto.Adapter.Queryable in behaviours do
if not @read_only do
def update_all(queryable, updates, opts \\ []) do
Ecto.Repo.Queryable.update_all(get_dynamic_repo(), queryable, updates, with_default_options(:update_all, opts))
end
def delete_all(queryable, opts \\ []) do
Ecto.Repo.Queryable.delete_all(get_dynamic_repo(), queryable, with_default_options(:delete_all, opts))
end
end
def all(queryable, opts \\ []) do
Ecto.Repo.Queryable.all(get_dynamic_repo(), queryable, with_default_options(:all, opts))
end
def stream(queryable, opts \\ []) do
Ecto.Repo.Queryable.stream(get_dynamic_repo(), queryable, with_default_options(:stream, opts))
end
def get(queryable, id, opts \\ []) do
Ecto.Repo.Queryable.get(get_dynamic_repo(), queryable, id, with_default_options(:all, opts))
end
def get!(queryable, id, opts \\ []) do
Ecto.Repo.Queryable.get!(get_dynamic_repo(), queryable, id, with_default_options(:all, opts))
end
def get_by(queryable, clauses, opts \\ []) do
Ecto.Repo.Queryable.get_by(get_dynamic_repo(), queryable, clauses, with_default_options(:all, opts))
end
def get_by!(queryable, clauses, opts \\ []) do
Ecto.Repo.Queryable.get_by!(get_dynamic_repo(), queryable, clauses, with_default_options(:all, opts))
end
def reload(queryable, opts \\ []) do
Ecto.Repo.Queryable.reload(get_dynamic_repo(), queryable, opts)
end
def reload!(queryable, opts \\ []) do
Ecto.Repo.Queryable.reload!(get_dynamic_repo(), queryable, opts)
end
def one(queryable, opts \\ []) do
Ecto.Repo.Queryable.one(get_dynamic_repo(), queryable, with_default_options(:all, opts))
end
def one!(queryable, opts \\ []) do
Ecto.Repo.Queryable.one!(get_dynamic_repo(), queryable, with_default_options(:all, opts))
end
def aggregate(queryable, aggregate, opts \\ [])
def aggregate(queryable, aggregate, opts)
when aggregate in [:count] and is_list(opts) do
Ecto.Repo.Queryable.aggregate(get_dynamic_repo(), queryable, aggregate, with_default_options(:all, opts))
end
def aggregate(queryable, aggregate, field)
when aggregate in @aggregates and is_atom(field) do
Ecto.Repo.Queryable.aggregate(get_dynamic_repo(), queryable, aggregate, field, with_default_options(:all, []))
end
def aggregate(queryable, aggregate, field, opts)
when aggregate in @aggregates and is_atom(field) and is_list(opts) do
Ecto.Repo.Queryable.aggregate(get_dynamic_repo(), queryable, aggregate, field, with_default_options(:all, opts))
end
def exists?(queryable, opts \\ []) do
Ecto.Repo.Queryable.exists?(get_dynamic_repo(), queryable, with_default_options(:all, opts))
end
def preload(struct_or_structs_or_nil, preloads, opts \\ []) do
Ecto.Repo.Preloader.preload(struct_or_structs_or_nil, get_dynamic_repo(), preloads, with_default_options(:preload, opts))
end
def prepare_query(operation, query, opts), do: {query, opts}
defoverridable prepare_query: 3
end
end
end
## User callbacks
@optional_callbacks init: 2
@doc """
A callback executed when the repo starts or when configuration is read.
The first argument is the context the callback is being invoked. If it
is called because the Repo supervisor is starting, it will be `:supervisor`.
It will be `:runtime` if it is called for reading configuration without
actually starting a process.
The second argument is the repository configuration as stored in the
application environment. It must return `{:ok, keyword}` with the updated
list of configuration or `:ignore` (only in the `:supervisor` case).
"""
@doc group: "User callbacks"
@callback init(context :: :supervisor | :runtime, config :: Keyword.t()) ::
{:ok, Keyword.t()} | :ignore
## Ecto.Adapter
@doc """
Returns the adapter tied to the repository.
"""
@doc group: "Runtime API"
@callback __adapter__ :: Ecto.Adapter.t()
@doc """
Returns the adapter configuration stored in the `:otp_app` environment.
If the `c:init/2` callback is implemented in the repository,
it will be invoked with the first argument set to `:runtime`.
"""
@doc group: "Runtime API"
@callback config() :: Keyword.t()
@doc """
Starts any connection pooling or supervision and return `{:ok, pid}`
or just `:ok` if nothing needs to be done.
Returns `{:error, {:already_started, pid}}` if the repo is already
started or `{:error, term}` in case anything else goes wrong.
## Options
See the configuration in the moduledoc for options shared between adapters,
for adapter-specific configuration see the adapter's documentation.
"""
@doc group: "Runtime API"
@callback start_link(opts :: Keyword.t()) ::
{:ok, pid}
| {:error, {:already_started, pid}}
| {:error, term}
@doc """
Shuts down the repository.
"""
@doc group: "Runtime API"
@callback stop(timeout) :: :ok
@doc """
Checks out a connection for the duration of the function.
It returns the result of the function. This is useful when
you need to perform multiple operations against the repository
in a row and you want to avoid checking out the connection
multiple times.
`checkout/2` and `transaction/2` can be combined and nested
multiple times. If `checkout/2` is called inside the function
of another `checkout/2` call, the function is simply executed,
without checking out a new connection.
## Options
See the ["Shared options"](#module-shared-options) section at the module
documentation for more options.
"""
@doc group: "Transaction API"
@callback checkout((() -> result), opts :: Keyword.t()) :: result when result: var
@doc """
Returns true if a connection has been checked out.
This is true if inside a `c:Ecto.Repo.checkout/2` or
`c:Ecto.Repo.transaction/2`.
## Examples
MyRepo.checked_out?
#=> false
MyRepo.transaction(fn ->
MyRepo.checked_out? #=> true
end)
MyRepo.checkout(fn ->
MyRepo.checked_out? #=> true
end)
"""
@doc group: "Transaction API"
@callback checked_out?() :: boolean
@doc """
Loads `data` into a schema or a map.
The first argument can be a a schema module or a map (of types).
The first argument determines the return value: a struct or a map,
respectively.
The second argument `data` specifies fields and values that are to be loaded.
It can be a map, a keyword list, or a `{fields, values}` tuple.
Fields can be atoms or strings.
Fields that are not present in the schema (or `types` map) are ignored.
If any of the values has invalid type, an error is raised.
To load data from non-database sources, use `Ecto.embedded_load/3`.
## Examples
iex> MyRepo.load(User, %{name: "Alice", age: 25})
%User{name: "Alice", age: 25}
iex> MyRepo.load(User, [name: "Alice", age: 25])
%User{name: "Alice", age: 25}
`data` can also take form of `{fields, values}`:
iex> MyRepo.load(User, {[:name, :age], ["Alice", 25]})
%User{name: "Alice", age: 25, ...}
The first argument can also be a `types` map:
iex> types = %{name: :string, age: :integer}
iex> MyRepo.load(types, %{name: "Alice", age: 25})
%{name: "Alice", age: 25}
This function is especially useful when parsing raw query results:
iex> result = Ecto.Adapters.SQL.query!(MyRepo, "SELECT * FROM users", [])
iex> Enum.map(result.rows, &MyRepo.load(User, {result.columns, &1}))
[%User{...}, ...]
"""
@doc group: "Schema API"
@callback load(
schema_or_map :: module | map(),
data :: map() | Keyword.t() | {list, list}
) :: Ecto.Schema.t() | map()
@doc """
Returns the atom name or pid of the current repository.
See `c:put_dynamic_repo/1` for more information.
"""
@doc group: "Runtime API"
@callback get_dynamic_repo() :: atom() | pid()
@doc """
Sets the dynamic repository to be used in further interactions.
Sometimes you may want a single Ecto repository to talk to
many different database instances. By default, when you call
`MyApp.Repo.start_link/1`, it will start a repository with
name `MyApp.Repo`. But if you want to start multiple repositories,
you can give each of them a different name:
MyApp.Repo.start_link(name: :tenant_foo, hostname: "foo.example.com")
MyApp.Repo.start_link(name: :tenant_bar, hostname: "bar.example.com")
You can also start repositories without names by explicitly
setting the name to nil:
MyApp.Repo.start_link(name: nil, hostname: "temp.example.com")
However, once the repository is started, you can't directly interact with
it, since all operations in `MyApp.Repo` are sent by default to the repository
named `MyApp.Repo`. You can change the default repo at compile time with:
use Ecto.Repo, default_dynamic_repo: :name_of_repo
Or you can change it anytime at runtime by calling `put_dynamic_repo/1`:
MyApp.Repo.put_dynamic_repo(:tenant_foo)
From this moment on, all future queries done by the current process will
run on `:tenant_foo`.
"""
@doc group: "Runtime API"
@callback put_dynamic_repo(name_or_pid :: atom() | pid()) :: atom() | pid()
## Ecto.Adapter.Queryable
@optional_callbacks get: 3, get!: 3, get_by: 3, get_by!: 3, reload: 2, reload!: 2, aggregate: 3,
aggregate: 4, exists?: 2, one: 2, one!: 2, preload: 3, all: 2, stream: 2,
update_all: 3, delete_all: 2
@doc """
Fetches a single struct from the data store where the primary key matches the
given id.
Returns `nil` if no result was found. If the struct in the queryable
has no or more than one primary key, it will raise an argument error.
## Options
* `:prefix` - The prefix to run the query on (such as the schema path
in Postgres or the database in MySQL). This will be applied to all `from`
and `join`s in the query that did not have a prefix previously given
either via the `:prefix` option on `join`/`from` or via `@schema_prefix`
in the schema. For more information see the "Query Prefix" section of the
`Ecto.Query` documentation.
See the ["Shared options"](#module-shared-options) section at the module
documentation for more options.
## Example
MyRepo.get(Post, 42)
MyRepo.get(Post, 42, prefix: "public")
"""
@doc group: "Query API"
@callback get(queryable :: Ecto.Queryable.t(), id :: term, opts :: Keyword.t()) ::
Ecto.Schema.t() | nil
@doc """
Similar to `c:get/3` but raises `Ecto.NoResultsError` if no record was found.
## Options
* `:prefix` - The prefix to run the query on (such as the schema path
in Postgres or the database in MySQL). This will be applied to all `from`
and `join`s in the query that did not have a prefix previously given
either via the `:prefix` option on `join`/`from` or via `@schema_prefix`
in the schema. For more information see the "Query Prefix" section of the
`Ecto.Query` documentation.
See the ["Shared options"](#module-shared-options) section at the module
documentation for more options.
## Example
MyRepo.get!(Post, 42)
MyRepo.get!(Post, 42, prefix: "public")
"""
@doc group: "Query API"
@callback get!(queryable :: Ecto.Queryable.t(), id :: term, opts :: Keyword.t()) ::
Ecto.Schema.t()
@doc """
Fetches a single result from the query.
Returns `nil` if no result was found. Raises if more than one entry.
## Options
* `:prefix` - The prefix to run the query on (such as the schema path
in Postgres or the database in MySQL). This will be applied to all `from`
and `join`s in the query that did not have a prefix previously given
either via the `:prefix` option on `join`/`from` or via `@schema_prefix`
in the schema. For more information see the "Query Prefix" section of the
`Ecto.Query` documentation.
See the ["Shared options"](#module-shared-options) section at the module
documentation for more options.
## Example
MyRepo.get_by(Post, title: "My post")
MyRepo.get_by(Post, [title: "My post"], prefix: "public")
"""
@doc group: "Query API"
@callback get_by(
queryable :: Ecto.Queryable.t(),
clauses :: Keyword.t() | map,
opts :: Keyword.t()
) :: Ecto.Schema.t() | nil
@doc """
Similar to `c:get_by/3` but raises `Ecto.NoResultsError` if no record was found.
Raises if more than one entry.
## Options
* `:prefix` - The prefix to run the query on (such as the schema path
in Postgres or the database in MySQL). This will be applied to all `from`
and `join`s in the query that did not have a prefix previously given
either via the `:prefix` option on `join`/`from` or via `@schema_prefix`
in the schema. For more information see the "Query Prefix" section of the
`Ecto.Query` documentation.
See the ["Shared options"](#module-shared-options) section at the module
documentation for more options.
## Example
MyRepo.get_by!(Post, title: "My post")
MyRepo.get_by!(Post, [title: "My post"], prefix: "public")
"""
@doc group: "Query API"
@callback get_by!(
queryable :: Ecto.Queryable.t(),
clauses :: Keyword.t() | map,
opts :: Keyword.t()
) :: Ecto.Schema.t()
@doc """
Reloads a given schema or schema list from the database.
When using with lists, it is expected that all of the structs in the list belong
to the same schema. Ordering is guaranteed to be kept. Results not found in
the database will be returned as `nil`.
## Example
MyRepo.reload(post)
%Post{}
MyRepo.reload([post1, post2])
[%Post{}, %Post{}]
MyRepo.reload([deleted_post, post1])
[nil, %Post{}]
"""
@doc group: "Schema API"
@callback reload(
struct_or_structs :: Ecto.Schema.t() | [Ecto.Schema.t()],
opts :: Keyword.t()
) :: Ecto.Schema.t() | [Ecto.Schema.t() | nil] | nil
@doc """
Similar to `c:reload/2`, but raises when something is not found.
When using with lists, ordering is guaranteed to be kept.
## Example
MyRepo.reload!(post)
%Post{}
MyRepo.reload!([post1, post2])
[%Post{}, %Post{}]
"""
@doc group: "Schema API"
@callback reload!(struct_or_structs, opts :: Keyword.t()) :: struct_or_structs
when struct_or_structs: Ecto.Schema.t() | [Ecto.Schema.t()]
@doc """
Calculate the given `aggregate`.
If the query has a limit, offset, distinct or combination set, it will be
automatically wrapped in a subquery in order to return the
proper result.
Any preload or select in the query will be ignored in favor of
the column being aggregated.
The aggregation will fail if any `group_by` field is set.
## Options
* `:prefix` - The prefix to run the query on (such as the schema path
in Postgres or the database in MySQL). This will be applied to all `from`
and `join`s in the query that did not have a prefix previously given
either via the `:prefix` option on `join`/`from` or via `@schema_prefix`
in the schema. For more information see the "Query Prefix" section of the
`Ecto.Query` documentation.
See the ["Shared options"](#module-shared-options) section at the module
documentation for more options.
## Examples
# Returns the number of blog posts
Repo.aggregate(Post, :count)
# Returns the number of blog posts in the "private" schema path
# (in Postgres) or database (in MySQL)
Repo.aggregate(Post, :count, prefix: "private")
"""
@doc group: "Query API"
@callback aggregate(
queryable :: Ecto.Queryable.t(),
aggregate :: :count,
opts :: Keyword.t()
) :: term | nil
@doc """
Calculate the given `aggregate` over the given `field`.
See `c:aggregate/3` for general considerations and options.
## Examples
# Returns the number of visits per blog post
Repo.aggregate(Post, :count, :visits)
# Returns the number of visits per blog post in the "private" schema path
# (in Postgres) or database (in MySQL)
Repo.aggregate(Post, :count, :visits, prefix: "private")
# Returns the average number of visits for the top 10
query = from Post, limit: 10
Repo.aggregate(query, :avg, :visits)
"""
@doc group: "Query API"
@callback aggregate(
queryable :: Ecto.Queryable.t(),
aggregate :: :avg | :count | :max | :min | :sum,
field :: atom,
opts :: Keyword.t()
) :: term | nil
@doc """
Checks if there exists an entry that matches the given query.
Returns a boolean.
## Options
* `:prefix` - The prefix to run the query on (such as the schema path
in Postgres or the database in MySQL). This will be applied to all `from`
and `join`s in the query that did not have a prefix previously given
either via the `:prefix` option on `join`/`from` or via `@schema_prefix`
in the schema. For more information see the "Query Prefix" section of the
`Ecto.Query` documentation.
See the ["Shared options"](#module-shared-options) section at the module
documentation for more options.
## Examples
# checks if any posts exist
Repo.exists?(Post)
# checks if any posts exist in the "private" schema path (in Postgres) or
# database (in MySQL)
Repo.exists?(Post, schema: "private")
# checks if any post with a like count greater than 10 exists
query = from p in Post, where: p.like_count > 10
Repo.exists?(query)
"""
@doc group: "Query API"
@callback exists?(queryable :: Ecto.Queryable.t(), opts :: Keyword.t()) :: boolean()
@doc """
Fetches a single result from the query.
Returns `nil` if no result was found. Raises if more than one entry.
## Options
* `:prefix` - The prefix to run the query on (such as the schema path
in Postgres or the database in MySQL). This will be applied to all `from`
and `join`s in the query that did not have a prefix previously given
either via the `:prefix` option on `join`/`from` or via `@schema_prefix`
in the schema. For more information see the "Query Prefix" section of the
`Ecto.Query` documentation.
See the ["Shared options"](#module-shared-options) section at the module
documentation for more options.
## Examples
Repo.one(from p in Post, join: c in assoc(p, :comments), where: p.id == ^post_id)
query = from p in Post, join: c in assoc(p, :comments), where: p.id == ^post_id
Repo.one(query, prefix: "private")
"""
@doc group: "Query API"
@callback one(queryable :: Ecto.Queryable.t(), opts :: Keyword.t()) ::
Ecto.Schema.t() | nil
@doc """
Similar to `c:one/2` but raises `Ecto.NoResultsError` if no record was found.
Raises if more than one entry.
## Options
* `:prefix` - The prefix to run the query on (such as the schema path
in Postgres or the database in MySQL). This will be applied to all `from`
and `join`s in the query that did not have a prefix previously given
either via the `:prefix` option on `join`/`from` or via `@schema_prefix`
in the schema. For more information see the "Query Prefix" section of the
`Ecto.Query` documentation.
See the ["Shared options"](#module-shared-options) section at the module
documentation for more options.
"""
@doc group: "Query API"
@callback one!(queryable :: Ecto.Queryable.t(), opts :: Keyword.t()) ::
Ecto.Schema.t()
@doc """
Preloads all associations on the given struct or structs.
This is similar to `Ecto.Query.preload/3` except it allows
you to preload structs after they have been fetched from the
database.
In case the association was already loaded, preload won't attempt
to reload it.
## Options
* `:force` - By default, Ecto won't preload associations that
are already loaded. By setting this option to true, any existing
association will be discarded and reloaded.
* `:in_parallel` - If the preloads must be done in parallel. It can
only be performed when we have more than one preload and the
repository is not in a transaction. Defaults to `true`.
* `:prefix` - the prefix to fetch preloads from. By default, queries
will use the same prefix as the one in the given collection. This
option allows the prefix to be changed.
See the ["Shared options"](#module-shared-options) section at the module
documentation for more options.
## Examples
# Use a single atom to preload an association
posts = Repo.preload posts, :comments
# Use a list of atoms to preload multiple associations
posts = Repo.preload posts, [:comments, :authors]
# Use a keyword list to preload nested associations as well
posts = Repo.preload posts, [comments: [:replies, :likes], authors: []]
# Use a keyword list to customize how associations are queried
posts = Repo.preload posts, [comments: from(c in Comment, order_by: c.published_at)]
# Use a two-element tuple for a custom query and nested association definition
query = from c in Comment, order_by: c.published_at
posts = Repo.preload posts, [comments: {query, [:replies, :likes]}]
The query given to preload may also preload its own associations.
"""
@doc group: "Schema API"
@callback preload(structs_or_struct_or_nil, preloads :: term, opts :: Keyword.t()) ::
structs_or_struct_or_nil
when structs_or_struct_or_nil: [Ecto.Schema.t()] | Ecto.Schema.t() | nil
@doc """
A user customizable callback invoked for query-based operations.
This callback can be used to further modify the query and options
before it is transformed and sent to the database.
This callback is invoked for all query APIs, including the `stream`
functions. It is also invoked for `insert_all` if a source query is
given. It is not invoked for any of the other schema functions.
## Examples
Let's say you want to filter out records that were "soft-deleted"
(have `deleted_at` column set) from all operations unless an admin
is running the query; you can define the callback like this:
@impl true
def prepare_query(_operation, query, opts) do
if opts[:admin] do
{query, opts}
else
query = from(x in query, where: is_nil(x.deleted_at))
{query, opts}
end
end
And then execute the query:
Repo.all(query) # only non-deleted records are returned
Repo.all(query, admin: true) # all records are returned
The callback will be invoked for all queries, including queries
made from associations and preloads. It is not invoked for each
individual join inside a query.
"""
@doc group: "User callbacks"
@callback prepare_query(operation, query :: Ecto.Query.t(), opts :: Keyword.t()) ::
{Ecto.Query.t(), Keyword.t()}
when operation: :all | :update_all | :delete_all | :stream | :insert_all
@doc """
A user customizable callback invoked to retrieve default options
for operations.
This can be used to provide default values per operation that
have higher precedence than the values given on configuration
or when starting the repository. It can also be used to set
query specific options, such as `:prefix`.
This callback is invoked as the entry point for all repository
operations. For example, if you are executing a query with preloads,
this callback will be invoked once at the beginning, but the
options returned here will be passed to all following operations.
"""
@doc group: "User callbacks"
@callback default_options(operation) :: Keyword.t()
when operation: :all | :insert_all | :update_all | :delete_all | :stream |
:transaction | :insert | :update | :delete | :insert_or_update
@doc """
Fetches all entries from the data store matching the given query.
May raise `Ecto.QueryError` if query validation fails.
## Options
* `:prefix` - The prefix to run the query on (such as the schema path
in Postgres or the database in MySQL). This will be applied to all `from`
and `join`s in the query that did not have a prefix previously given
either via the `:prefix` option on `join`/`from` or via `@schema_prefix`
in the schema. For more information see the "Query Prefix" section of the
`Ecto.Query` documentation.
See the ["Shared options"](#module-shared-options) section at the module
documentation for more options.
## Example
# Fetch all post titles
query = from p in Post,
select: p.title
MyRepo.all(query)
"""
@doc group: "Query API"
@callback all(queryable :: Ecto.Queryable.t(), opts :: Keyword.t()) :: [Ecto.Schema.t()]
@doc """
Returns a lazy enumerable that emits all entries from the data store
matching the given query.
SQL adapters, such as Postgres and MySQL, can only enumerate a stream
inside a transaction.
May raise `Ecto.QueryError` if query validation fails.
## Options
* `:prefix` - The prefix to run the query on (such as the schema path
in Postgres or the database in MySQL). This will be applied to all `from`
and `join`s in the query that did not have a prefix previously given
either via the `:prefix` option on `join`/`from` or via `@schema_prefix`
in the schema. For more information see the "Query Prefix" section of the
`Ecto.Query` documentation.
* `:max_rows` - The number of rows to load from the database as we stream.
It is supported at least by Postgres and MySQL and defaults to 500.
See the ["Shared options"](#module-shared-options) section at the module
documentation for more options.
## Example
# Fetch all post titles
query = from p in Post,
select: p.title
stream = MyRepo.stream(query)
MyRepo.transaction(fn ->
Enum.to_list(stream)
end)
"""
@doc group: "Query API"
@callback stream(queryable :: Ecto.Queryable.t(), opts :: Keyword.t()) :: Enum.t()
@doc """
Updates all entries matching the given query with the given values.
It returns a tuple containing the number of entries and any returned
result as second element. The second element is `nil` by default
unless a `select` is supplied in the update query. Note, however,
not all databases support returning data from UPDATEs.
Keep in mind this `update_all` will not update autogenerated
fields like the `updated_at` columns.
See `Ecto.Query.update/3` for update operations that can be
performed on fields.
## Options
* `:prefix` - The prefix to run the query on (such as the schema path
in Postgres or the database in MySQL). This overrides the prefix set
in the query and any `@schema_prefix` set in the schema.
See the ["Shared options"](#module-shared-options) section at the module
documentation for remaining options.
## Examples
MyRepo.update_all(Post, set: [title: "New title"])
MyRepo.update_all(Post, inc: [visits: 1])
from(p in Post, where: p.id < 10, select: p.visits)
|> MyRepo.update_all(set: [title: "New title"])
from(p in Post, where: p.id < 10, update: [set: [title: "New title"]])
|> MyRepo.update_all([])
from(p in Post, where: p.id < 10, update: [set: [title: ^new_title]])
|> MyRepo.update_all([])
from(p in Post, where: p.id < 10, update: [set: [title: fragment("upper(?)", ^new_title)]])
|> MyRepo.update_all([])
"""
@doc group: "Query API"
@callback update_all(
queryable :: Ecto.Queryable.t(),
updates :: Keyword.t(),
opts :: Keyword.t()
) :: {non_neg_integer, nil | [term]}
@doc """
Deletes all entries matching the given query.
It returns a tuple containing the number of entries and any returned
result as second element. The second element is `nil` by default
unless a `select` is supplied in the delete query. Note, however,
not all databases support returning data from DELETEs.
## Options
* `:prefix` - The prefix to run the query on (such as the schema path
in Postgres or the database in MySQL). This overrides the prefix set
in the query and any `@schema_prefix` set in the schema.
See the ["Shared options"](#module-shared-options) section at the module
documentation for remaining options.
## Examples
MyRepo.delete_all(Post)
from(p in Post, where: p.id < 10) |> MyRepo.delete_all
"""
@doc group: "Query API"
@callback delete_all(queryable :: Ecto.Queryable.t(), opts :: Keyword.t()) ::
{non_neg_integer, nil | [term]}
## Ecto.Adapter.Schema
@optional_callbacks insert_all: 3, insert: 2, insert!: 2, update: 2, update!: 2,
delete: 2, delete!: 2, insert_or_update: 2, insert_or_update!: 2,
prepare_query: 3
@doc """
Inserts all entries into the repository.
It expects a schema module (`MyApp.User`) or a source (`"users"`) or
both (`{"users", MyApp.User}`) as the first argument. The second
argument is a list of entries to be inserted, either as keyword
lists or as maps. The keys of the entries are the field names as
atoms and the value should be the respective value for the field
type or, optionally, an `Ecto.Query` that returns a single entry
with a single value.
It returns a tuple containing the number of entries
and any returned result as second element. If the database
does not support RETURNING in INSERT statements or no
return result was selected, the second element will be `nil`.
When a schema module is given, the entries given will be properly dumped
before being sent to the database. If the schema primary key has type
`:id` or `:binary_id`, it will be handled either at the adapter
or the storage layer. However any other primary key type or autogenerated
value, like `Ecto.UUID` and timestamps, won't be autogenerated when
using `c:insert_all/3`. You must set those fields explicitly. This is by
design as this function aims to be a more direct way to insert data into
the database without the conveniences of `c:insert/2`. This is also
consistent with `c:update_all/3` that does not handle auto generated
values as well.
It is also not possible to use `insert_all` to insert across multiple
tables, therefore associations are not supported.
If a source is given, without a schema module, the given fields are passed
as is to the adapter.
## Options
* `:returning` - selects which fields to return. When `true`,
returns all fields in the given schema. May be a list of
fields, where a struct is still returned but only with the
given fields. Or `false`, where nothing is returned (the default).
This option is not supported by all databases.
* `:prefix` - The prefix to run the query on (such as the schema path
in Postgres or the database in MySQL). This overrides the prefix set
in the query and any `@schema_prefix` set in the schema.
* `:on_conflict` - It may be one of `:raise` (the default), `:nothing`,
`:replace_all`, `{:replace_all_except, fields}`, `{:replace, fields}`,
a keyword list of update instructions or an `Ecto.Query`
query for updates. See the "Upserts" section for more information.
* `:conflict_target` - A list of column names to verify for conflicts.
It is expected those columns to have unique indexes on them that may conflict.
If none is specified, the conflict target is left up to the database.
It may also be `{:unsafe_fragment, binary_fragment}` to pass any
expression to the database without any sanitization, this is useful
for partial index or index with expressions, such as
`{:unsafe_fragment, "(coalesce(firstname, ""), coalesce(lastname, "")) WHERE middlename IS NULL"}` for
`ON CONFLICT (coalesce(firstname, ""), coalesce(lastname, "")) WHERE middlename IS NULL` SQL query.
* `:placeholders` - A map with placeholders. This feature is not supported
by all databases. See the "Placeholders" section for more information.
See the ["Shared options"](#module-shared-options) section at the module
documentation for remaining options.
## Source query
A query can be given instead of a list with entries. This query needs to select
into a map containing only keys that are available as writeable columns in the
schema.
## Examples
MyRepo.insert_all(Post, [[title: "My first post"], [title: "My second post"]])
MyRepo.insert_all(Post, [%{title: "My first post"}, %{title: "My second post"}])
query = from p in Post,
join: c in assoc(p, :comments),
select: %{
author_id: p.author_id,
posts: count(p.id, :distinct),
interactions: sum(p.likes) + count(c.id)
},
group_by: p.author_id
MyRepo.insert_all(AuthorStats, query)
## Upserts
`c:insert_all/3` provides upserts (update or inserts) via the `:on_conflict`
option. The `:on_conflict` option supports the following values:
* `:raise` - raises if there is a conflicting primary key or unique index
* `:nothing` - ignores the error in case of conflicts
* `:replace_all` - replace **all** values on the existing row with the values
in the schema/changeset, including fields not explicitly set in the changeset,
such as IDs and autogenerated timestamps (`inserted_at` and `updated_at`).
Do not use this option if you have auto-incrementing primary keys, as they
will also be replaced. You most likely want to use `{:replace_all_except, [:id]}`
or `{:replace, fields}` explicitly instead. This option requires a schema
* `{:replace_all_except, fields}` - same as above except the given fields
are not replaced. This option requires a schema
* `{:replace, fields}` - replace only specific columns. This option requires
`:conflict_target`
* a keyword list of update instructions - such as the one given to
`c:update_all/3`, for example: `[set: [title: "new title"]]`
* an `Ecto.Query` that will act as an `UPDATE` statement, such as the
one given to `c:update_all/3`
Upserts map to "ON CONFLICT" on databases like Postgres and "ON DUPLICATE KEY"
on databases such as MySQL.
## Return values
By default, both Postgres and MySQL will return the number of entries
inserted on `c:insert_all/3`. However, when the `:on_conflict` option
is specified, Postgres and MySQL will return different results.
Postgres will only count a row if it was affected and will
return 0 if no new entry was added.
MySQL will return, at a minimum, the number of entries attempted. For example,
if `:on_conflict` is set to `:nothing`, MySQL will return
the number of entries attempted to be inserted, even when no entry
was added.
Also note that if `:on_conflict` is a query, MySQL will return
the number of attempted entries plus the number of entries modified
by the UPDATE query.
## Placeholders
Passing in a map for the `:placeholders` allows you to send less
data over the wire when you have many entries with the same value
for a field. To use a placeholder, replace its value in each of your
entries with `{:placeholder, key}`, where `key` is the key you
are using in the `:placeholders` option map. For example:
placeholders = %{blob: large_blob_of_text(...)}
entries = [
%{title: "v1", body: {:placeholder, :blob}},
%{title: "v2", body: {:placeholder, :blob}}
]
Repo.insert_all(Post, entries, placeholders: placeholders)
Keep in mind that:
* placeholders cannot be nested in other values. For example, you
cannot put a placeholder inside an array. Instead, the whole
array has to be the placeholder
* a placeholder key can only be used with columns of the same type
* placeholders require a database that supports index parameters,
so they are not currently compatible with MySQL
"""
@doc group: "Schema API"
@callback insert_all(
schema_or_source :: binary | {binary, module} | module,
entries_or_query :: [%{atom => value} | Keyword.t(value)] | Ecto.Query.t,
opts :: Keyword.t()
) :: {non_neg_integer, nil | [term]} when value: term | Ecto.Query.t()
@doc """
Inserts a struct defined via `Ecto.Schema` or a changeset.
In case a struct is given, the struct is converted into a changeset
with all non-nil fields as part of the changeset.
In case a changeset is given, the changes in the changeset are
merged with the struct fields, and all of them are sent to the
database. If more than one database operation is required, they're
automatically wrapped in a transaction.
It returns `{:ok, struct}` if the struct has been successfully
inserted or `{:error, changeset}` if there was a validation
or a known constraint error.
## Options
* `:returning` - selects which fields to return. It accepts a list
of fields to be returned from the database. When `true`, returns
all fields. When `false`, no extra fields are returned. It will
always include all fields in `read_after_writes` as well as any
autogenerated id. Not all databases support this option and it
may not be available during upserts. See the "Upserts" section
for more information.
* `:prefix` - The prefix to run the query on (such as the schema path
in Postgres or the database in MySQL). This overrides the prefix set
in the query and any `@schema_prefix` set any schemas. Also, the
`@schema_prefix` for the parent record will override all default
`@schema_prefix`s set in any child schemas for associations.
* `:on_conflict` - It may be one of `:raise` (the default), `:nothing`,
`:replace_all`, `{:replace_all_except, fields}`, `{:replace, fields}`,
a keyword list of update instructions or an `Ecto.Query` query for updates.
See the "Upserts" section for more information.
* `:conflict_target` - A list of column names to verify for conflicts.
It is expected those columns to have unique indexes on them that may conflict.
If none is specified, the conflict target is left up to the database.
It may also be `{:unsafe_fragment, binary_fragment}` to pass any
expression to the database without any sanitization, this is useful
for partial index or index with expressions, such as
`{:unsafe_fragment, "(coalesce(firstname, ""), coalesce(lastname, "")) WHERE middlename IS NULL"}` for
`ON CONFLICT (coalesce(firstname, ""), coalesce(lastname, "")) WHERE middlename IS NULL` SQL query.
* `:stale_error_field` - The field where stale errors will be added in
the returning changeset. This option can be used to avoid raising
`Ecto.StaleEntryError`.
* `:stale_error_message` - The message to add to the configured
`:stale_error_field` when stale errors happen, defaults to "is stale".
See the ["Shared options"](#module-shared-options) section at the module
documentation for more options.
## Examples
A typical example is calling `MyRepo.insert/1` with a struct
and acting on the return value:
case MyRepo.insert %Post{title: "Ecto is great"} do
{:ok, struct} -> # Inserted with success
{:error, changeset} -> # Something went wrong
end
## Upserts
`c:insert/2` provides upserts (update or inserts) via the `:on_conflict`
option. The `:on_conflict` option supports the following values:
* `:raise` - raises if there is a conflicting primary key or unique index
* `:nothing` - ignores the error in case of conflicts
* `:replace_all` - replace **all** values on the existing row with the values
in the schema/changeset, including fields not explicitly set in the changeset,
such as IDs and autogenerated timestamps (`inserted_at` and `updated_at`).
Do not use this option if you have auto-incrementing primary keys, as they
will also be replaced. You most likely want to use `{:replace_all_except, [:id]}`
or `{:replace, fields}` explicitly instead. This option requires a schema
* `{:replace_all_except, fields}` - same as above except the given fields are
not replaced. This option requires a schema
* `{:replace, fields}` - replace only specific columns. This option requires
`:conflict_target`
* a keyword list of update instructions - such as the one given to
`c:update_all/3`, for example: `[set: [title: "new title"]]`
* an `Ecto.Query` that will act as an `UPDATE` statement, such as the
one given to `c:update_all/3`. Similarly to `c:update_all/3`, auto
generated values, such as timestamps are not automatically updated.
If the struct cannot be found, `Ecto.StaleEntryError` will be raised.
Upserts map to "ON CONFLICT" on databases like Postgres and "ON DUPLICATE KEY"
on databases such as MySQL.
As an example, imagine `:title` is marked as a unique column in
the database:
{:ok, inserted} = MyRepo.insert(%Post{title: "this is unique"})
Now we can insert with the same title but do nothing on conflicts:
{:ok, ignored} = MyRepo.insert(%Post{title: "this is unique"}, on_conflict: :nothing)
assert ignored.id == nil
Because we used `on_conflict: :nothing`, instead of getting an error,
we got `{:ok, struct}`. However the returned struct does not reflect
the data in the database. One possible mechanism to detect if an
insert or nothing happened in case of `on_conflict: :nothing` is by
checking the `id` field. `id` will be nil if the field is autogenerated
by the database and no insert happened.
For actual upserts, where an insert or update may happen, the situation
is slightly more complex, as the database does not actually inform us
if an insert or update happened. Let's insert a post with the same title
but use a query to update the body column in case of conflicts:
# In Postgres (it requires the conflict target for updates):
on_conflict = [set: [body: "updated"]]
{:ok, updated} = MyRepo.insert(%Post{title: "this is unique"},
on_conflict: on_conflict, conflict_target: :title)
# In MySQL (conflict target is not supported):
on_conflict = [set: [title: "updated"]]
{:ok, updated} = MyRepo.insert(%Post{id: inserted.id, title: "updated"},
on_conflict: on_conflict)
In the examples above, even though it returned `:ok`, we do not know
if we inserted new data or if we updated only the `:on_conflict` fields.
In case an update happened, the data in the struct most likely does
not match the data in the database. For example, autogenerated fields
such as `inserted_at` will point to now rather than the time the
struct was actually inserted.
If you need to guarantee the data in the returned struct mirrors the
database, you have three options:
* Use `on_conflict: :replace_all`, although that will replace all
fields in the database with the ones in the struct/changeset,
including autogenerated fields such as `inserted_at` and `updated_at`:
MyRepo.insert(%Post{title: "this is unique"},
on_conflict: :replace_all, conflict_target: :title)
* Specify `read_after_writes: true` in your schema for choosing
fields that are read from the database after every operation.
Or pass `returning: true` to `insert` to read all fields back:
MyRepo.insert(%Post{title: "this is unique"}, returning: true,
on_conflict: on_conflict, conflict_target: :title)
* Alternatively, read the data again from the database in a separate
query. This option requires the primary key to be generated by the
database:
{:ok, updated} = MyRepo.insert(%Post{title: "this is unique"}, on_conflict: on_conflict)
Repo.get(Post, updated.id)
Because of the inability to know if the struct is up to date or not,
inserting a struct with associations and using the `:on_conflict` option
at the same time is not recommended, as Ecto will be unable to actually
track the proper status of the association.
"""
@doc group: "Schema API"
@callback insert(
struct_or_changeset :: Ecto.Schema.t() | Ecto.Changeset.t(),
opts :: Keyword.t()
) :: {:ok, Ecto.Schema.t()} | {:error, Ecto.Changeset.t()}
@doc """
Updates a changeset using its primary key.
A changeset is required as it is the only mechanism for
tracking dirty changes. Only the fields present in the `changes` part
of the changeset are sent to the database. Any other, in-memory
changes done to the schema are ignored. If more than one database
operation is required, they're automatically wrapped in a transaction.
If the struct has no primary key, `Ecto.NoPrimaryKeyFieldError`
will be raised.
If the struct cannot be found, `Ecto.StaleEntryError` will be raised.
It returns `{:ok, struct}` if the struct has been successfully
updated or `{:error, changeset}` if there was a validation
or a known constraint error.
## Options
* `:returning` - selects which fields to return. It accepts a list
of fields to be returned from the database. When `true`, returns
all fields. When `false`, no extra fields are returned. It will
always include all fields in `read_after_writes`. Not all
databases support this option.
* `:force` - By default, if there are no changes in the changeset,
`c:update/2` is a no-op. By setting this option to true, update
callbacks will always be executed, even if there are no changes
(including timestamps).
* `:prefix` - The prefix to run the query on (such as the schema path
in Postgres or the database in MySQL). This overrides the prefix set
in the query and any `@schema_prefix` set any schemas. Also, the
`@schema_prefix` for the parent record will override all default
`@schema_prefix`s set in any child schemas for associations.
* `:stale_error_field` - The field where stale errors will be added in
the returning changeset. This option can be used to avoid raising
`Ecto.StaleEntryError`.
* `:stale_error_message` - The message to add to the configured
`:stale_error_field` when stale errors happen, defaults to "is stale".
See the ["Shared options"](#module-shared-options) section at the module
documentation for more options.
## Example
post = MyRepo.get!(Post, 42)
post = Ecto.Changeset.change post, title: "New title"
case MyRepo.update post do
{:ok, struct} -> # Updated with success
{:error, changeset} -> # Something went wrong
end
"""
@doc group: "Schema API"
@callback update(changeset :: Ecto.Changeset.t(), opts :: Keyword.t()) ::
{:ok, Ecto.Schema.t()} | {:error, Ecto.Changeset.t()}
@doc """
Inserts or updates a changeset depending on whether the struct is persisted
or not.
The distinction whether to insert or update will be made on the
`Ecto.Schema.Metadata` field `:state`. The `:state` is automatically set by
Ecto when loading or building a schema.
Please note that for this to work, you will have to load existing structs from
the database. So even if the struct exists, this won't work:
struct = %Post{id: "existing_id", ...}
MyRepo.insert_or_update changeset
# => {:error, changeset} # id already exists
## Options
* `:prefix` - The prefix to run the query on (such as the schema path
in Postgres or the database in MySQL). This overrides the prefix set
in the query and any `@schema_prefix` set any schemas. Also, the
`@schema_prefix` for the parent record will override all default
`@schema_prefix`s set in any child schemas for associations.
* `:stale_error_field` - The field where stale errors will be added in
the returning changeset. This option can be used to avoid raising
`Ecto.StaleEntryError`. Only applies to updates.
* `:stale_error_message` - The message to add to the configured
`:stale_error_field` when stale errors happen, defaults to "is stale".
Only applies to updates.
See the ["Shared options"](#module-shared-options) section at the module
documentation for more options.
## Example
result =
case MyRepo.get(Post, id) do
nil -> %Post{id: id} # Post not found, we build one
post -> post # Post exists, let's use it
end
|> Post.changeset(changes)
|> MyRepo.insert_or_update
case result do
{:ok, struct} -> # Inserted or updated with success
{:error, changeset} -> # Something went wrong
end
"""
@doc group: "Schema API"
@callback insert_or_update(changeset :: Ecto.Changeset.t(), opts :: Keyword.t()) ::
{:ok, Ecto.Schema.t()} | {:error, Ecto.Changeset.t()}
@doc """
Deletes a struct using its primary key.
If the struct has no primary key, `Ecto.NoPrimaryKeyFieldError`
will be raised. If the struct has been removed prior to the call,
`Ecto.StaleEntryError` will be raised. If more than one database
operation is required, they're automatically wrapped in a transaction.
It returns `{:ok, struct}` if the struct has been successfully
deleted or `{:error, changeset}` if there was a validation
or a known constraint error. By default, constraint errors will
raise the `Ecto.ConstraintError` exception, unless a changeset is
given as the first argument with the relevant constraints declared
in it (see `Ecto.Changeset`).
## Options
* `:prefix` - The prefix to run the query on (such as the schema path
in Postgres or the database in MySQL). This overrides the prefix set
in the query and any `@schema_prefix` set in the schema.
* `:stale_error_field` - The field where stale errors will be added in
the returning changeset. This option can be used to avoid raising
`Ecto.StaleEntryError`.
* `:stale_error_message` - The message to add to the configured
`:stale_error_field` when stale errors happen, defaults to "is stale".
See the ["Shared options"](#module-shared-options) section at the module
documentation for more options.
## Example
post = MyRepo.get!(Post, 42)
case MyRepo.delete post do
{:ok, struct} -> # Deleted with success
{:error, changeset} -> # Something went wrong
end
"""
@doc group: "Schema API"
@callback delete(
struct_or_changeset :: Ecto.Schema.t() | Ecto.Changeset.t(),
opts :: Keyword.t()
) :: {:ok, Ecto.Schema.t()} | {:error, Ecto.Changeset.t()}
@doc """
Same as `c:insert/2` but returns the struct or raises if the changeset is invalid.
"""
@doc group: "Schema API"
@callback insert!(
struct_or_changeset :: Ecto.Schema.t() | Ecto.Changeset.t(),
opts :: Keyword.t()
) :: Ecto.Schema.t()
@doc """
Same as `c:update/2` but returns the struct or raises if the changeset is invalid.
"""
@doc group: "Schema API"
@callback update!(changeset :: Ecto.Changeset.t(), opts :: Keyword.t()) ::
Ecto.Schema.t()
@doc """
Same as `c:insert_or_update/2` but returns the struct or raises if the changeset
is invalid.
"""
@doc group: "Schema API"
@callback insert_or_update!(changeset :: Ecto.Changeset.t(), opts :: Keyword.t()) ::
Ecto.Schema.t()
@doc """
Same as `c:delete/2` but returns the struct or raises if the changeset is invalid.
"""
@doc group: "Schema API"
@callback delete!(
struct_or_changeset :: Ecto.Schema.t() | Ecto.Changeset.t(),
opts :: Keyword.t()
) :: Ecto.Schema.t()
## Ecto.Adapter.Transaction
@optional_callbacks transaction: 2, in_transaction?: 0, rollback: 1
@doc """
Runs the given function or `Ecto.Multi` inside a transaction.
## Use with function
`c:transaction/2` can be called with both a function of arity
zero or one. The arity zero function will just be executed as is,
while the arity one function will receive the repo of the transaction
as its first argument, similar to `Ecto.Multi.run/3`.
If an unhandled error occurs the transaction will be rolled back
and the error will bubble up from the transaction function.
If no error occurred the transaction will be committed when the
function returns. A transaction can be explicitly rolled back
by calling `c:rollback/1`, this will immediately leave the function
and return the value given to `rollback` as `{:error, value}`.
A successful transaction returns the value returned by the function
wrapped in a tuple as `{:ok, value}`.
If `c:transaction/2` is called inside another transaction, the function
is simply executed, without wrapping the new transaction call in any
way. If there is an error in the inner transaction and the error is
rescued, or the inner transaction is rolled back, the whole outer
transaction is marked as tainted, guaranteeing nothing will be committed.
## Use with Ecto.Multi
Besides functions, transactions can be used with an `Ecto.Multi` struct.
A transaction will be started, all operations applied and in case of
success committed returning `{:ok, changes}`. In case of any errors
the transaction will be rolled back and
`{:error, failed_operation, failed_value, changes_so_far}` will be
returned.
You can read more about using transactions with `Ecto.Multi` as well as
see some examples in the `Ecto.Multi` documentation.
## Options
See the ["Shared options"](#module-shared-options) section at the module
documentation for more options.
## Examples
import Ecto.Changeset, only: [change: 2]
MyRepo.transaction(fn ->
MyRepo.update!(change(alice, balance: alice.balance - 10))
MyRepo.update!(change(bob, balance: bob.balance + 10))
end)
# When passing a function of arity 1, it receives the repository itself
MyRepo.transaction(fn repo ->
repo.insert!(%Post{})
end)
# Roll back a transaction explicitly
MyRepo.transaction(fn ->
p = MyRepo.insert!(%Post{})
if not Editor.post_allowed?(p) do
MyRepo.rollback(:posting_not_allowed)
end
end)
# With Ecto.Multi
Ecto.Multi.new()
|> Ecto.Multi.insert(:post, %Post{})
|> MyRepo.transaction
"""
@doc group: "Transaction API"
@callback transaction(fun_or_multi :: fun | Ecto.Multi.t(), opts :: Keyword.t()) ::
{:ok, any}
| {:error, any}
| {:error, Ecto.Multi.name(), any, %{Ecto.Multi.name() => any}}
@doc """
Returns true if the current process is inside a transaction.
If you are using the `Ecto.Adapters.SQL.Sandbox` in tests, note that even
though each test is inside a transaction, `in_transaction?/0` will only
return true inside transactions explicitly created with `transaction/2`. This
is done so the test environment mimics dev and prod.
If you are trying to debug transaction-related code while using
`Ecto.Adapters.SQL.Sandbox`, it may be more helpful to configure the database
to log all statements and consult those logs.
## Examples
MyRepo.in_transaction?
#=> false
MyRepo.transaction(fn ->
MyRepo.in_transaction? #=> true
end)
"""
@doc group: "Transaction API"
@callback in_transaction?() :: boolean
@doc """
Rolls back the current transaction.
The transaction will return the value given as `{:error, value}`.
Note that calling `rollback` causes the code in the transaction to stop executing.
"""
@doc group: "Transaction API"
@callback rollback(value :: any) :: no_return
end
|
lib/ecto/repo.ex
| 0.897332
| 0.646167
|
repo.ex
|
starcoder
|
defmodule EpicenterWeb.Test.Pages.ContactInvestigationClinicalDetails do
import ExUnit.Assertions
import Euclid.Test.Extra.Assertions, only: [assert_eq: 2]
import Phoenix.LiveViewTest
alias Epicenter.ContactInvestigations.ContactInvestigation
alias Epicenter.Test
alias EpicenterWeb.Test.Pages
alias Phoenix.LiveViewTest.View
@form_id "contact-investigation-clinical-details-form"
def visit(%Plug.Conn{} = conn, %ContactInvestigation{id: id}) do
conn |> Pages.visit("/contact-investigations/#{id}/clinical-details")
end
def assert_here(view_or_conn_or_html) do
view_or_conn_or_html |> Pages.assert_on_page("contact-investigation-clinical-details")
view_or_conn_or_html
end
def assert_clinical_status_selection(%View{} = view, selections) do
actual_selections =
view
|> Pages.actual_selections("clinical-details-form-clinical-status", "radio")
assert selections == actual_selections
view
end
def assert_exposed_on_explanation_text(%View{} = view, date) do
html =
view
|> render()
|> Test.Html.parse()
assert html |> Test.Html.text("#contact-investigation-clinical-details") =~
"Last together with an initiating index case on #{date}"
view
end
def assert_exposed_on_value(%View{} = view, value) do
view
|> render()
|> Test.Html.parse()
|> Test.Html.attr("##{@form_id}_exposed_on", "value")
|> assert_eq([value])
view
end
def assert_symptoms_selection(%View{} = view, selections) do
actual_selections =
view
|> Pages.actual_selections("clinical-details-form-symptoms", "checkbox")
assert selections == actual_selections
view
end
def assert_save_button_visible(%View{} = view) do
view
|> render()
|> Test.Html.parse()
|> Test.Html.text("button[type=submit]")
|> assert_eq("Save")
view
end
def change_form(view, attrs) do
view |> element("#contact-investigation-clinical-details-form") |> render_change(attrs)
view
end
end
|
test/support/pages/contact_investigation_clinical_details.ex
| 0.623377
| 0.452717
|
contact_investigation_clinical_details.ex
|
starcoder
|
defmodule CodeCorps.GitHub.Adapters.Issue do
@moduledoc """
Used to adapt a GitHub Issue payload into attributes for creating or updating
a `CodeCorps.GithubIssue` and vice-versa.
"""
alias CodeCorps.{
Adapter.MapTransformer,
GitHub.Adapters.Utils.BodyDecorator,
GithubIssue,
Task
}
@github_payload_to_github_issue_mapping [
{:body, ["body"]},
{:closed_at, ["closed_at"]},
{:comments_url, ["comments_url"]},
{:events_url, ["events_url"]},
{:github_created_at, ["created_at"]},
{:github_id, ["id"]},
{:github_updated_at, ["updated_at"]},
{:html_url, ["html_url"]},
{:labels_url, ["labels_url"]},
{:locked, ["locked"]},
{:number, ["number"]},
{:state, ["state"]},
{:title, ["title"]},
{:url, ["url"]}
]
@doc ~S"""
Converts a GitHub Issue payload into a set of attributes used to create or
update a `GithubIssue` record.
"""
@spec to_issue(map) :: map
def to_issue(%{} = payload) do
payload |> MapTransformer.transform(@github_payload_to_github_issue_mapping)
end
@github_payload_to_task_mapping [
{:created_at, ["created_at"]},
{:markdown, ["body"]},
{:modified_at, ["updated_at"]},
{:status, ["state"]},
{:title, ["title"]}
]
@github_issue_to_task_mapping [
{:created_at, [:github_created_at]},
{:markdown, [:body]},
{:modified_at, [:github_updated_at]},
{:status, [:state]},
{:title, [:title]}
]
@doc ~S"""
Converts a GitHub Issue payload into a set of attributes used to create or
update a `Task` record.
"""
@spec to_task(GithubIssue.t) :: map
def to_task(%GithubIssue{} = github_issue) do
github_issue
|> Map.from_struct
|> MapTransformer.transform(@github_issue_to_task_mapping)
end
@autogenerated_github_keys ~w(closed_at comments_url created_at events_url html_url id labels_url number updated_at url)
@doc ~S"""
Converts a `GithubIssue` or `Task` into a set of attributes used to create or
update an associated GitHub Issue on the GitHub API.
"""
@spec to_api(GithubIssue.t | Task.t) :: map
def to_api(%GithubIssue{} = github_issue) do
github_issue
|> Map.from_struct
|> MapTransformer.transform_inverse(@github_payload_to_github_issue_mapping)
|> Map.drop(@autogenerated_github_keys)
|> BodyDecorator.add_code_corps_header(github_issue)
end
def to_api(%Task{} = task) do
task
|> Map.from_struct
|> MapTransformer.transform_inverse(@github_payload_to_task_mapping)
|> Map.drop(@autogenerated_github_keys)
|> BodyDecorator.add_code_corps_header(task)
end
end
|
lib/code_corps/github/adapters/issue.ex
| 0.508788
| 0.461988
|
issue.ex
|
starcoder
|
defmodule AWS.Budgets do
@moduledoc """
The AWS Budgets API enables you to use AWS Budgets to plan your service usage,
service costs, and instance reservations.
The API reference provides descriptions, syntax, and usage examples for each of
the actions and data types for AWS Budgets.
Budgets provide you with a way to see the following information:
* How close your plan is to your budgeted amount or to the free tier
limits
* Your usage-to-date, including how much you've used of your
Reserved Instances (RIs)
* Your current estimated charges from AWS, and how much your
predicted usage will accrue in charges by the end of the month
* How much of your budget has been used
AWS updates your budget status several times a day. Budgets track your unblended
costs, subscriptions, refunds, and RIs. You can create the following types of
budgets:
* **Cost budgets** - Plan how much you want to spend on a service.
* **Usage budgets** - Plan how much you want to use one or more
services.
* **RI utilization budgets** - Define a utilization threshold, and
receive alerts when your RI usage falls below that threshold. This lets you see
if your RIs are unused or under-utilized.
* **RI coverage budgets** - Define a coverage threshold, and receive
alerts when the number of your instance hours that are covered by RIs fall below
that threshold. This lets you see how much of your instance usage is covered by
a reservation.
Service Endpoint
The AWS Budgets API provides the following endpoint:
* https://budgets.amazonaws.com
For information about costs that are associated with the AWS Budgets API, see
[AWS Cost Management Pricing](https://aws.amazon.com/aws-cost-management/pricing/).
"""
alias AWS.Client
alias AWS.Request
def metadata do
%AWS.ServiceMetadata{
abbreviation: "AWSBudgets",
api_version: "2016-10-20",
content_type: "application/x-amz-json-1.1",
credential_scope: "us-east-1",
endpoint_prefix: "budgets",
global?: true,
protocol: "json",
service_id: "Budgets",
signature_version: "v4",
signing_name: "budgets",
target_prefix: "AWSBudgetServiceGateway"
}
end
@doc """
Creates a budget and, if included, notifications and subscribers.
Only one of `BudgetLimit` or `PlannedBudgetLimits` can be present in the syntax
at one time. Use the syntax that matches your case. The Request Syntax section
shows the `BudgetLimit` syntax. For `PlannedBudgetLimits`, see the
[Examples](https://docs.aws.amazon.com/aws-cost-management/latest/APIReference/API_budgets_CreateBudget.html#API_CreateBudget_Examples)
section.
"""
def create_budget(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateBudget", input, options)
end
@doc """
Creates a budget action.
"""
def create_budget_action(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateBudgetAction", input, options)
end
@doc """
Creates a notification.
You must create the budget before you create the associated notification.
"""
def create_notification(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateNotification", input, options)
end
@doc """
Creates a subscriber.
You must create the associated budget and notification before you create the
subscriber.
"""
def create_subscriber(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateSubscriber", input, options)
end
@doc """
Deletes a budget.
You can delete your budget at any time.
Deleting a budget also deletes the notifications and subscribers that are
associated with that budget.
"""
def delete_budget(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteBudget", input, options)
end
@doc """
Deletes a budget action.
"""
def delete_budget_action(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteBudgetAction", input, options)
end
@doc """
Deletes a notification.
Deleting a notification also deletes the subscribers that are associated with
the notification.
"""
def delete_notification(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteNotification", input, options)
end
@doc """
Deletes a subscriber.
Deleting the last subscriber to a notification also deletes the notification.
"""
def delete_subscriber(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteSubscriber", input, options)
end
@doc """
Describes a budget.
The Request Syntax section shows the `BudgetLimit` syntax. For
`PlannedBudgetLimits`, see the
[Examples](https://docs.aws.amazon.com/aws-cost-management/latest/APIReference/API_budgets_DescribeBudget.html#API_DescribeBudget_Examples)
section.
"""
def describe_budget(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeBudget", input, options)
end
@doc """
Describes a budget action detail.
"""
def describe_budget_action(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeBudgetAction", input, options)
end
@doc """
Describes a budget action history detail.
"""
def describe_budget_action_histories(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeBudgetActionHistories", input, options)
end
@doc """
Describes all of the budget actions for an account.
"""
def describe_budget_actions_for_account(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeBudgetActionsForAccount", input, options)
end
@doc """
Describes all of the budget actions for a budget.
"""
def describe_budget_actions_for_budget(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeBudgetActionsForBudget", input, options)
end
@doc """
Describes the history for `DAILY`, `MONTHLY`, and `QUARTERLY` budgets.
Budget history isn't available for `ANNUAL` budgets.
"""
def describe_budget_performance_history(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeBudgetPerformanceHistory", input, options)
end
@doc """
Lists the budgets that are associated with an account.
The Request Syntax section shows the `BudgetLimit` syntax. For
`PlannedBudgetLimits`, see the
[Examples](https://docs.aws.amazon.com/aws-cost-management/latest/APIReference/API_budgets_DescribeBudgets.html#API_DescribeBudgets_Examples)
section.
"""
def describe_budgets(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeBudgets", input, options)
end
@doc """
Lists the notifications that are associated with a budget.
"""
def describe_notifications_for_budget(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeNotificationsForBudget", input, options)
end
@doc """
Lists the subscribers that are associated with a notification.
"""
def describe_subscribers_for_notification(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeSubscribersForNotification", input, options)
end
@doc """
Executes a budget action.
"""
def execute_budget_action(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ExecuteBudgetAction", input, options)
end
@doc """
Updates a budget.
You can change every part of a budget except for the `budgetName` and the
`calculatedSpend`. When you modify a budget, the `calculatedSpend` drops to zero
until AWS has new usage data to use for forecasting.
Only one of `BudgetLimit` or `PlannedBudgetLimits` can be present in the syntax
at one time. Use the syntax that matches your case. The Request Syntax section
shows the `BudgetLimit` syntax. For `PlannedBudgetLimits`, see the
[Examples](https://docs.aws.amazon.com/aws-cost-management/latest/APIReference/API_budgets_UpdateBudget.html#API_UpdateBudget_Examples)
section.
"""
def update_budget(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateBudget", input, options)
end
@doc """
Updates a budget action.
"""
def update_budget_action(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateBudgetAction", input, options)
end
@doc """
Updates a notification.
"""
def update_notification(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateNotification", input, options)
end
@doc """
Updates a subscriber.
"""
def update_subscriber(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateSubscriber", input, options)
end
end
|
lib/aws/generated/budgets.ex
| 0.792464
| 0.539711
|
budgets.ex
|
starcoder
|
defmodule Annex do
@moduledoc """
Annex is a library for composing and running deep artificial
"""
alias Annex.{
Data,
Layer.Activation,
Layer.Dense,
Layer.Dropout,
Layer.Sequence,
LayerConfig,
Learner
}
@doc """
Given a list of `layers` returns a `LayerConfig` for a `Sequence`.
"""
@spec sequence(list(LayerConfig.t(module()))) :: LayerConfig.t(Sequence)
def sequence(layers) when is_list(layers) do
LayerConfig.build(Sequence, layers: layers)
end
@doc """
Given a frequency (between `0.0` and `1.0`) returns a LayerConfig for a `Dropout`.
The `Dropout` layer randomly, at a given frequency, returns `0.0` for an input
regardless of that input's value.
"""
@spec dropout(float()) :: LayerConfig.t(Dropout)
def dropout(frequency) do
LayerConfig.build(Dropout, frequency: frequency)
end
@doc """
Given a number of `rows`, `columns`, some `weights`,
and some `biases` returns a built `Dense` layer.
"""
@spec dense(pos_integer(), pos_integer(), Data.data(), Data.data()) :: LayerConfig.t(Dense)
def dense(rows, columns, weights, biases) do
LayerConfig.build(Dense, rows: rows, columns: columns, weights: weights, biases: biases)
end
@doc """
Given a number of `rows` and `columns` returns a Dense layer.
Without the `weights` and `biases` of `dense/4` this Dense layer will be
have no neurons. Upon `Layer.init_layer/2` the Dense layer will be
initialized with random neurons; Neurons with random weights and biases.
"""
@spec dense(pos_integer(), pos_integer()) :: LayerConfig.t(Dense)
def dense(rows, columns) do
LayerConfig.build(Dense, rows: rows, columns: columns)
end
@doc """
Given an Activation's name returns appropriate `Activation` layer.
"""
@spec activation(Activation.func_name()) :: LayerConfig.t(Activation)
def activation(name) do
LayerConfig.build(Activation, %{name: name})
end
@doc """
Trains an `Annex.Learner` given `learner`, `data`, `labels`, and `options`.
The `learner` should be initialized `Learner.init_learner/2` before being
trained.
Returns the trained `learner` along with some measure of loss or performance.
"""
def train(%_{} = learner, dataset, options \\ []) do
Learner.train(learner, dataset, options)
end
@doc """
Given an initialized Learner `learner` and some `data` returns a prediction.
The `learner` should be initialized with `Learner.init_learner` before being
used with the `predict/2` function.
Also, it's a good idea to train the `learner` (using `train/3` or `train/4`)
before using it to make predicitons. Chances are slim that an untrained
Learner is capable of making accurate predictions.
"""
@spec predict(Learner.t(), Learner.data()) :: Learner.data()
def predict(learner, data) do
learner
|> Learner.predict(data)
|> Data.to_flat_list()
end
end
|
lib/annex.ex
| 0.93276
| 0.729014
|
annex.ex
|
starcoder
|
defmodule EctoTranslate do
@moduledoc """
EctoTranslate is a library that helps with translating Ecto data. EctoTranslate can help you with returning translated values of your Ecto data attributes. For this it uses a singe table called "translations" which will contain polymorphic entries for all of your Ecto data stucts.
## examples
Given an ecto module like :
defmodule MyApp.Post do
...
use EctoTranslate, [:title, :body]
...
schema "posts" do
field :title, :string
field :body, :string
end
...
end
You can set translations using :
locale: :nl, title: "Een nederlandse titel", description: "Een nederlandse beschrijving"]
Then you can ask for a translated fields explicitly using :
iex> MyApp.Post.translated_title(post, :nl)
"Een nederlandse titel"
Or you can update the model by replacing the fields with their translations using :
iex> translated_post = MyApp.Post.translate!(post, :nl)
iex> translated_post.title
"Een nederlandse titel"
iex> translated_post.description
"Een nederlandse beschrijving"
You can also pass in a collection to translate in batch preventing n+1 queries
iex> posts = MyApp.Post |> MyApp.Repo.all
iex> translated_posts = MyApp.Post.translate!(posts, :nl)
If a translation is not found, it will fall back to the original database value.
If you ommit the locale in the function calls, the current gettext locale will be used.
iex> Gettext.set_locale(MyApp.Gettext, :nl)
iex> translated_post = MyApp.Post.translate!(post)
iex> translated_post.title
"""
use Ecto.Schema
import Ecto.Changeset
import Ecto.Query
@doc """
EctoTranslate is meant to be `use`d by a module.
`use` needs a list of attributes that you would like to make available for translation.
defmodule MyApp.Post do
use EctoTranslatable, [:title, :body]
end
When `use` is called, it will add the following functions to your module
- translatable_fields/0
- translate!/1
- translate!/2
- translated_attr/1 i.e. translated_title/1
- translated_attr/2 i.e. translated_title/2
For each of the functions the second parameter is an optional locale. if ommitted, it will use the current Gettext locale.
"""
defmacro __using__(fields) do
repo = Application.get_env(:ecto_translate, :repo)
translatable_fields_ast =
quote do
@docs """
A simple helper funtion to return a list of translatable fields on this model
"""
@spec translatable_fields :: list[atom]
def translatable_fields, do: unquote(fields)
end
translated_field_ast =
Enum.map(fields, fn field ->
quote do
@docs """
Returns a translated value for the requested field in the optionally given locale.
If locale was ommitted, it will use the current Gettext locale.
This will cause a query to be run to get the translation.
"""
def unquote(:"translated_#{field}")(
%{__meta__: %{source: translatable_type}, id: translatable_id} = model,
locale \\ nil
) do
locale = Atom.to_string(locale || String.to_atom(EctoTranslate.current_locale()))
record =
EctoTranslate
|> where(
translatable_type: ^translatable_type,
locale: ^locale,
translatable_id: ^translatable_id,
field: unquote(Atom.to_string(field))
)
|> unquote(repo).one
case record do
nil -> Map.get(model, unquote(field))
record -> Map.get(record, :content)
end
end
end
end)
translate_ast =
quote do
@docs """
Updates the model(s) that has/have been passed by replacing the content of the translatable fields with the optional locale given.
If locale was ommited, it will use the current Gettext locale.
This will cause a query to be run to get the translations.
"""
@spec translate!(model :: Ecto.Schema.t() | list[Ecto.Schema.t()], locale :: atom) ::
Ecto.Schema.t() | list[Ecto.Schema.t()]
def translate!(model, locale \\ nil)
def translate!(
%{__meta__: %{source: translatable_type}, id: translatable_id} = model,
locale
) do
locale = Atom.to_string(locale || String.to_atom(EctoTranslate.current_locale()))
translations =
EctoTranslate
|> where(
translatable_type: ^translatable_type,
translatable_id: ^translatable_id,
locale: ^locale
)
|> unquote(repo).all
|> Enum.map(fn record ->
{String.to_atom(Map.get(record, :field)), Map.get(record, :content)}
end)
|> Enum.into(%{})
Map.merge(model, translations)
end
def translate!([], _), do: []
def translate!(models, locale) when is_list(models) do
locale = Atom.to_string(locale || String.to_atom(EctoTranslate.current_locale()))
ids = Enum.map(models, fn model -> model.id end)
%{__meta__: %{source: translatable_type}} = Enum.at(models, 0)
translations =
EctoTranslate
|> where(translatable_type: ^translatable_type, locale: ^locale)
|> where([t], t.translatable_id in ^ids)
|> unquote(repo).all
# FIXME this might not be the most optimized way to do this, but for now, it works :)
Enum.map(models, fn model ->
attributes =
Enum.filter(translations, fn translation ->
translation.translatable_id == model.id
end)
|> Enum.map(fn record ->
{String.to_atom(Map.get(record, :field)), Map.get(record, :content)}
end)
|> Enum.into(%{})
Map.merge(model, attributes)
end)
end
end
{translatable_fields_ast, {translated_field_ast, translate_ast}}
end
@translatable_id_type Application.get_env(:ecto_translate, :translatable_id_type, :integer)
@doc """
Returns translatable id type configured for application
The id type can be configured by setting `:translatable_id_type` config for
`:ecto_translate` otp application.
## Example
```elixir
config ecto_translate,
translatable_id_type: :string
```
By default the is type is presumed as `:integer`
"""
@spec translatable_id_type :: atom
def translatable_id_type, do: @translatable_id_type
schema "translations" do
field(:translatable_id, @translatable_id_type)
field(:translatable_type, :string)
field(:locale, :string)
field(:field, :string)
field(:content, :string)
timestamps()
end
@repo Application.get_env(:ecto_translate, :repo)
@required_fields ~w(translatable_id translatable_type locale field content)a
@doc """
Builds a changeset based on the `struct` and `params` and validates the required fields and given locale
"""
@spec changeset(struct :: Ecto.Schema.t(), params :: map) :: Ecto.Changeset.t()
def changeset(struct, params \\ %{}) do
struct
|> cast(params, @required_fields)
|> validate_required(@required_fields)
|> validate_locale
|> unique_constraint(:translatable_id,
name: :translations_translatable_id_translatable_type_locale_field_ind
)
end
@doc """
Creates the translations for the given fields in the database or will update those when they already exist.
## Example
iex> %EctoTranslate.ExampleModel{title: "A title in english", description: "A description in english"}
...> |> EctoTranslate.Repo.insert!
...> |> EctoTranslate.set(locale: :de, title: "Eine deutche titel", description: "Ein deutsche umschreibung")
[
%EctoTranslate{__meta__: #Ecto.Schema.Metadata<:loaded, "translations">, content: "Eine deutche titel", field: "title", id: 241, inserted_at: #Ecto.DateTime<2016-07-01 21:09:11>, locale: "de", translatable_id: 221, translatable_type: "test_model", updated_at: #Ecto.DateTime<2016-07-01 21:09:11>},
%EctoTranslate{__meta__: #Ecto.Schema.Metadata<:loaded, "translations">, content: "Ein deutsche umschreibung", field: "description", id: 242, inserted_at: #Ecto.DateTime<2016-07-01 21:09:11>, locale: "de", translatable_id: 221, translatable_type: "test_model", updated_at: #Ecto.DateTime<2016-07-01 21:09:11>}
]
"""
@spec set(model :: Ecto.Schema.t(), options :: list[{atom, Any.t()}]) ::
:ok | {:error, list}
def set(%{__meta__: %{source: translatable_type}, id: translatable_id} = model, [
{:locale, locale} | options
]) do
params = %{
translatable_type: translatable_type,
translatable_id: translatable_id,
locale: Atom.to_string(locale)
}
changesets = create_changesets(model, params, options)
case validate_changesets(changesets) do
{:ok, changesets} -> changesets |> upsert_translations
error -> error
end
end
@doc """
An helper method to get the current Gettext locale
"""
@spec current_locale :: String.t()
def current_locale, do: Gettext.get_locale(Application.get_env(:ecto_translate, :gettext))
@doc """
An helper method to get the known Gettext locales
"""
@spec known_locales :: list[String.t()]
def known_locales, do: Gettext.known_locales(Application.get_env(:ecto_translate, :gettext))
defp validate_changesets(changesets) do
case Enum.filter(changesets, fn changeset -> !changeset.valid? end) do
invalid when invalid == [] ->
{:ok, changesets}
_ ->
{:error,
Enum.map(changesets, fn changeset -> {changeset.changes.field, changeset.errors} end)}
end
end
defp create_changesets(model, params, options) do
options
|> Enum.filter(fn {k, _v} -> Enum.member?(model.__struct__.translatable_fields, k) end)
|> Enum.map(fn {field, content} ->
params = Map.merge(params, %{field: Atom.to_string(field), content: content})
EctoTranslate.changeset(%EctoTranslate{}, params)
end)
end
defp validate_locale(%{changes: %{locale: locale}} = changeset) do
case Enum.member?(EctoTranslate.known_locales(), locale) do
true ->
changeset
false ->
add_error(
changeset,
:locale,
"The locale '#{locale}' is not supported, supported are: #{
Enum.join(EctoTranslate.known_locales(), ", ")
}, if you think this is incorrect, make sure your Gettext.known_locales/1 knows about the locale you want to add..."
)
end
end
defp validate_locale(changeset), do: changeset
defp upsert_translations([]), do: :ok
defp upsert_translations([changeset | changesets]) do
case @repo.insert(changeset) do
{:ok, cs} -> cs
{:error, cs} -> cs |> update_translation
end
upsert_translations(changesets)
end
defp update_translation(%{changes: changes}) do
record =
EctoTranslate
|> where(
translatable_type: ^changes.translatable_type,
translatable_id: ^changes.translatable_id,
locale: ^changes.locale,
field: ^changes.field
)
|> @repo.one!
record = Ecto.Changeset.change(record, changes)
@repo.update!(record)
end
end
|
lib/ecto_translate.ex
| 0.82755
| 0.483953
|
ecto_translate.ex
|
starcoder
|
defmodule Grizzly.Inclusions.InclusionRunner do
@moduledoc false
use GenServer
alias Grizzly.{Connection, Inclusions, Options, SeqNumber, Report}
alias Grizzly.Inclusions.InclusionRunner.Inclusion
alias Grizzly.Connections.AsyncConnection
alias Grizzly.ZWave.{Security, Command, DSK}
@typedoc """
At any given moment there can only be 1 `InclusionRunner` process going so
this process is the name of this module.
However, all the function in this module can take the pid of the process or
the name to aid in the flexibility of the calling context.
"""
@type t :: pid() | __MODULE__
def child_spec(args) do
# Don't restart the inclusion if there is a failure
%{id: __MODULE__, start: {__MODULE__, :start_link, [args]}, restart: :temporary}
end
@spec start_link(Options.t(), [Inclusions.opt()]) :: GenServer.on_start()
def start_link(grizzly_options, opts \\ []) do
controller_id = Keyword.get(opts, :controller_id, 1)
handler = Keyword.get(opts, :handler, self())
GenServer.start_link(
__MODULE__,
[grizzly_options, [controller_id: controller_id, handler: handler]],
name: __MODULE__
)
end
@spec add_node(t()) :: :ok
def add_node(runner \\ __MODULE__) do
GenServer.call(runner, :add_node)
end
@spec add_node_stop(t()) :: :ok
def add_node_stop(runner \\ __MODULE__) do
GenServer.call(runner, :add_node_stop)
end
@spec remove_node(t()) :: :ok
def remove_node(runner \\ __MODULE__) do
GenServer.call(runner, :remove_node)
end
@spec remove_node_stop(t()) :: :ok
def remove_node_stop(runner \\ __MODULE__) do
GenServer.call(runner, :remove_node_stop)
end
@spec grant_keys(t(), [Security.key()]) :: :ok
def grant_keys(runner \\ __MODULE__, security_keys) do
GenServer.call(runner, {:grant_keys, security_keys})
end
@spec set_dsk(t(), DSK.t()) :: :ok
def set_dsk(runner \\ __MODULE__, dsk \\ DSK.new("")) do
GenServer.call(runner, {:set_dsk, dsk})
end
def learn_mode(runner \\ __MODULE__) do
GenServer.call(runner, :learn_mode)
end
def learn_mode_stop(runner \\ __MODULE__) do
GenServer.call(runner, :learn_mode_stop)
end
@spec stop(t()) :: :ok
def stop(runner \\ __MODULE__) do
GenServer.stop(runner, :normal)
end
@impl true
def init([_grizzly_options, opts]) do
handler = Keyword.fetch!(opts, :handler)
controller_id = Keyword.fetch!(opts, :controller_id)
{:ok, _} = Connection.open(Keyword.fetch!(opts, :controller_id), mode: :async)
{:ok, %Inclusion{handler: handler, controller_id: controller_id}}
end
@impl true
def handle_call(:add_node, _from, inclusion) do
seq_number = SeqNumber.get_and_inc()
{command, new_inclusion} = Inclusion.next_command(inclusion, :node_adding, seq_number)
{:ok, command_ref} =
AsyncConnection.send_command(inclusion.controller_id, command, timeout: 120_000)
{:reply, :ok, Inclusion.update_command_ref(new_inclusion, command_ref)}
end
def handle_call(:add_node_stop, _from, inclusion) do
:ok = AsyncConnection.stop_command(inclusion.controller_id, inclusion.current_command_ref)
seq_number = SeqNumber.get_and_inc()
{next_command, new_inclusion} =
Inclusion.next_command(inclusion, :node_adding_stop, seq_number)
{:ok, command_ref} =
AsyncConnection.send_command(inclusion.controller_id, next_command, timeout: 60_000)
{:reply, :ok, Inclusion.update_command_ref(new_inclusion, command_ref)}
end
def handle_call(:remove_node, _from, inclusion) do
seq_number = SeqNumber.get_and_inc()
{command, new_inclusion} = Inclusion.next_command(inclusion, :node_removing, seq_number)
{:ok, command_ref} =
AsyncConnection.send_command(inclusion.controller_id, command, timeout: 120_000)
{:reply, :ok, Inclusion.update_command_ref(new_inclusion, command_ref)}
end
def handle_call(:remove_node_stop, _from, inclusion) do
:ok = AsyncConnection.stop_command(inclusion.controller_id, inclusion.current_command_ref)
seq_number = SeqNumber.get_and_inc()
{command, new_inclusion} = Inclusion.next_command(inclusion, :node_removing_stop, seq_number)
{:ok, command_ref} =
AsyncConnection.send_command(inclusion.controller_id, command, timeout: 60_000)
{:reply, :ok, Inclusion.update_command_ref(new_inclusion, command_ref)}
end
def handle_call(:learn_mode, _from, inclusion) do
seq_number = SeqNumber.get_and_inc()
{command, new_inclusion} = Inclusion.next_command(inclusion, :learn_mode, seq_number)
{:ok, command_ref} =
AsyncConnection.send_command(inclusion.controller_id, command, timeout: 120_000)
{:reply, :ok, Inclusion.update_command_ref(new_inclusion, command_ref)}
end
def handle_call(:learn_mode_stop, _from, inclusion) do
:ok = AsyncConnection.stop_command(inclusion.controller_id, inclusion.current_command_ref)
seq_number = SeqNumber.get_and_inc()
{command, new_inclusion} = Inclusion.next_command(inclusion, :learn_mode_stop, seq_number)
{:ok, command_ref} =
AsyncConnection.send_command(inclusion.controller_id, command, timeout: 60_000)
{:reply, :ok, Inclusion.update_command_ref(new_inclusion, command_ref)}
end
def handle_call({:grant_keys, keys}, _from, inclusion) do
# TODO check keys granted are valid?
seq_number = SeqNumber.get_and_inc()
{command, inclusion} =
Inclusion.next_command(inclusion, :keys_granted, seq_number,
csa: false,
accept: true,
granted_keys: keys
)
{:ok, command_ref} =
AsyncConnection.send_command(inclusion.controller_id, command, timeout: 120_000)
{:reply, :ok, Inclusion.update_command_ref(inclusion, command_ref)}
end
def handle_call({:set_dsk, dsk}, _from, inclusion) do
seq_number = SeqNumber.get_and_inc()
case Inclusion.next_command(inclusion, :dsk_set, seq_number, dsk: dsk) do
{command, inclusion} ->
:ok
{:ok, command_ref} =
AsyncConnection.send_command(inclusion.controller_id, command, timeout: 120_000)
{:reply, :ok, Inclusion.update_command_ref(inclusion, command_ref)}
end
end
@impl true
def handle_info({:grizzly, :report, %Report{type: :ack_response}}, inclusion) do
{:noreply, inclusion}
end
def handle_info({:grizzly, :report, {:error, :timeout, _command_ref}}, inclusion) do
respond_to_handler(
format_handler_spec(inclusion.handler),
{:error, :timeout, inclusion.state}
)
{:noreply, inclusion}
end
def handle_info(
{:grizzly, :report, report},
inclusion
) do
handle_report(report, inclusion)
end
@impl true
def terminate(:normal, inclusion) do
:ok = AsyncConnection.stop(inclusion.controller_id)
:ok
end
def terminate(_reason, _inclusion) do
:ok
end
defp build_inclusion_opts_for_command(command) do
case command.name do
:node_add_dsk_report ->
[dsk_input_length: Command.param!(command, :input_dsk_length)]
_ ->
[]
end
end
defp handle_report(report, inclusion) do
opts = build_inclusion_opts_for_command(report.command)
inclusion = Inclusion.handle_command(inclusion, report.command, opts)
respond_to_handler(format_handler_spec(inclusion.handler), report)
if inclusion.state == :complete do
{:stop, :normal, inclusion}
else
{:noreply, inclusion}
end
end
defp format_handler_spec({_handler_module, _handler_opts} = handler), do: handler
defp format_handler_spec(handler) when is_pid(handler), do: handler
defp format_handler_spec(handler), do: {handler, []}
defp respond_to_handler(handler, report) when is_pid(handler) do
send(handler, {:grizzly, :report, report})
end
defp respond_to_handler(
{handler_module, handler_opts},
{:error, _reason, inclusion_state}
) do
spawn_link(fn -> handler_module.handle_timeout(inclusion_state, handler_opts) end)
end
defp respond_to_handler({handler_module, handler_opts}, report) do
# TODO - Consider using a handler runner genserver for calling the plugin inclusion handler
spawn_link(fn -> handler_module.handle_report(report, handler_opts) end)
end
end
|
lib/grizzly/inclusions/inclusion_runner.ex
| 0.752649
| 0.403332
|
inclusion_runner.ex
|
starcoder
|
defmodule Kitt.Types do
@moduledoc """
Collection of custom typespec definitions for
abstracting the complexity of messages away from
the structs that define them.
"""
@type transmission_state ::
:neutral
| :park
| :forwardGears
| :reverseGears
| :reserved1
| :reserved2
| :reserved3
| :unavailable
@type acceleration_set_4_way :: %{
long: integer(),
lat: integer(),
vert: integer(),
yaw: integer()
}
@type brake_system_status :: %{
wheelBrakes: wheel_brake(),
traction:
:unavailable
| :off
| :on
| :engaged,
abs:
:unavailable
| :off
| :on
| :engaged,
scs:
:unavailable
| :off
| :on
| :engaged,
brakeBoost:
:unavailable
| :off
| :on,
auxBrakes:
:unavailable
| :off
| :on
| :reserved
}
@type wheel_brake ::
:unavailable
| quadrant()
@type vehicle_size :: %{
width: non_neg_integer(),
length: non_neg_integer()
}
@type positional_accuracy :: %{
semiMajor: non_neg_integer(),
semiMinor: non_neg_integer(),
orientation: non_neg_integer()
}
@type d_date_time :: %{
year: non_neg_integer(),
month: non_neg_integer(),
day: non_neg_integer(),
hour: non_neg_integer(),
minute: non_neg_integer(),
second: non_neg_integer(),
offset: integer()
}
@type transmission_and_speed :: %{
transmission:
:neutral
| :park
| :forwardGears
| :reverseGears
| :reserved1
| :reserved2
| :reserved3
| :unavailable,
speed: non_neg_integer()
}
@type path_prediction :: %{
radiusOfCurve: integer(),
confidence: non_neg_integer()
}
@type position_3d :: %{
lat: integer(),
long: integer(),
elevation: integer(),
regional: [map()]
}
@type privileged_events :: %{
sspRights: non_neg_integer(),
event: privileged_event_flag()
}
@type privileged_event_flag ::
:peUnavailable
| :peEmergencyResponse
| :peEmergencyLightsActive
| :peEmergencySoundActive
| :peNonEmergencyLightsActive
| :peNonEmergencySoundActive
@type emergency_details :: %{
sspRights: non_neg_integer(),
sirenUse:
:unavailable
| :notInUse
| :inUse
| :reserved,
lightsUse:
:unavailable
| :notInUse
| :inUse
| :yellowCautionLights
| :schoolBusLights
| :arrowSignsActive
| :slowMovingVehicle
| :freqStops,
multi:
:unavailable
| :singleVehicle
| :multiVehicle
| :reserved,
events: privileged_events(),
responseType: response_type()
}
@type response_type ::
:notInUseOrNotEquipped
| :emergency
| :nonEmergency
| :pursuit
| :stationary
| :slowMoving
| :stopAndGoMovement
@type event_description :: %{
typeEvent: non_neg_integer(),
description: [non_neg_integer()],
priority: non_neg_integer(),
heading: Kitt.Types.angle(),
extent: extent(),
regional: [map()]
}
@type event_flag ::
:eventHazardLights
| :eventStopLineViolation
| :eventABSactivated
| :eventTractionControlLoss
| :eventStabilityControlactivated
| :eventHazardousMaterials
| :eventReserved1
| :eventHardBraking
| :eventLightsChanged
| :eventWipersChanged
| :eventFlatTire
| :eventDisabledVehicle
| :eventAirBagDeployment
@type angle ::
:"from000-0to022-5degrees"
| :"from022-5to045-0degrees"
| :"from045-0to067-5degrees"
| :"from067-5to090-0degrees"
| :"from090-0to112-5degrees"
| :"from112-5to135-0degrees"
| :"from135-0to157-5degrees"
| :"from157-5to180-0degrees"
| :"from180-0to202-5degrees"
| :"from202-5to225-0degrees"
| :"from225-0to247-5degrees"
| :"from247-5to270-0degrees"
| :"from270-0to292-5degrees"
| :"from292-5to315-0degrees"
| :"from315-0to337-5degrees"
| :"from337-5to360-0degrees"
@type regional_extension :: map()
@type minute_of_year :: non_neg_integer()
@type extent ::
:useInstantlyOnly
| :useFor3meters
| :useFor10meters
| :useFor50meters
| :useFor100meters
| :useFor500meters
| :useFor1000meters
| :useFor5000meters
| :useFor10000meters
| :useFor50000meters
| :useFor100000meters
| :useFor500000meters
| :useFor1000000meters
| :useFor5000000meters
| :useFor10000000meters
| :forever
@type pivot_point_description :: %{
pivotOffset: integer(),
pivotAngle: non_neg_integer(),
pivots: boolean()
}
@type node_xy_24b :: %{
x: integer(),
y: integer()
}
@type bumper_heights :: %{
front: non_neg_integer(),
rear: non_neg_integer()
}
@type trailer_unit_description :: %{
isDolly: boolean(),
width: non_neg_integer(),
length: non_neg_integer(),
height: non_neg_integer(),
mass: non_neg_integer(),
bumperHeights: bumper_heights(),
centerOfGravity: non_neg_integer(),
frontPivot: pivot_point_description(),
rearPivot: pivot_point_description(),
rearWheelOffset: integer(),
positionOffset: node_xy_24b(),
elevationOffset: integer(),
crumbData: [trailer_history_point()]
}
@type trailer_data :: %{
sspRights: non_neg_integer(),
connection: pivot_point_description(),
units: [trailer_unit_description()]
}
@type trailer_history_point :: %{
pivotAngle: non_neg_integer(),
timeOffset: non_neg_integer(),
positionOffset: node_xy_24b(),
elevationOffset: integer(),
heading: non_neg_integer()
}
@type position_confidence_set :: %{
pos:
:unavailable
| :a500m
| :a200m
| :a100m
| :a50m
| :a20m
| :a10m
| :a5m
| :a2m
| :a1m
| :a50cm
| :a20cm
| :a10cm
| :a5cm
| :a2cm
| :a1cm,
elevation:
:unavailable
| :"elev-500-00"
| :"elev-200-00"
| :"elev-100-00"
| :"elev-050-00"
| :"elev-020-00"
| :"elev-010-00"
| :"elev-005-00"
| :"elev-002-00"
| :"elev-001-00"
| :"elev-000-50"
| :"elev-000-20"
| :"elev-000-10"
| :"elev-000-05"
| :"elev-000-02"
| :"elev-000-01"
}
@type speed_and_heading_and_throttle_confidence :: %{
heading:
:unavailable
| :prec10deg
| :prec05deg
| :prec01deg
| :"prec0-1deg"
| :"prec0-05deg"
| :"prec0-01deg"
| :"prec0-0125deg",
speed:
:unavailable
| :prec100ms
| :prec10ms
| :prec5ms
| :prec1ms
| :"prec0-1ms"
| :"prec0-05ms"
| :"prec0-01ms",
throttle: :unavailable | :prec10percent | :prec1percent | :"prec0-5percent"
}
@type full_position_vector :: %{
utcTime: d_date_time(),
long: integer(),
lat: integer(),
elevation: integer(),
heading: non_neg_integer(),
speed: transmission_and_speed(),
posAccuracy: positional_accuracy(),
timeConfidence:
:unavailable
| :"time-100-000"
| :"time-050-000"
| :"time-020-000"
| :"time-010-000"
| :"time-002-000"
| :"time-001-000"
| :"time-000-500"
| :"time-000-200"
| :"time-000-100"
| :"time-000-050"
| :"time-000-020"
| :"time-000-010"
| :"time-000-005"
| :"time-000-002"
| :"time-000-001"
| :"time-000-000-5"
| :"time-000-000-2"
| :"time-000-000-1"
| :"time-000-000-05"
| :"time-000-000-02"
| :"time-000-000-01"
| :"time-000-000-005"
| :"time-000-000-002"
| :"time-000-000-001"
| :"time-000-000-000-5"
| :"time-000-000-000-2"
| :"time-000-000-000-1"
| :"time-000-000-000-05"
| :"time-000-000-000-02"
| :"time-000-000-000-01"
| :"time-000-000-000-005"
| :"time-000-000-000-002"
| :"time-000-000-000-001"
| :"time-000-000-000-000-5"
| :"time-000-000-000-000-2"
| :"time-000-000-000-000-1"
| :"time-000-000-000-000-05"
| :"time-000-000-000-000-02"
| :"time-000-000-000-000-01"
| {:asn1_enum, non_neg_integer()},
posConfidence: position_confidence_set(),
speedConfidence: speed_and_heading_and_throttle_confidence()
}
@type path_history_point :: %{
latOffset: integer(),
lonOffset: integer(),
elevationOffset: integer(),
timeOffset: non_neg_integer(),
speed: non_neg_integer(),
osAccuracy: positional_accuracy(),
heading: non_neg_integer()
}
@type path_history :: %{
initialPosition: full_position_vector(),
currGNSSstatus: status(),
crumbData: [path_history_point()]
}
@type status ::
:unavailable
| :isHealthy
| :isMonitored
| :baseStationType
| :aPDOPofUnder5
| :inViewOfUnder5
| :localCorrectionsPresent
| :networkCorrectionsPresent
@type antenna_offset_set :: %{
antOffsetX: integer(),
antOffsetY: integer(),
antOffsetZ: integer()
}
@type rtcm_header :: %{
status: status(),
offsetSet: antenna_offset_set()
}
@type rtcm_package :: %{
rtcmHeader: rtcm_header(),
msgs: [non_neg_integer()]
}
@type speed_profile :: %{
speedReports: [non_neg_integer()]
}
@type disabled_vehicle :: %{
statusDetails: non_neg_integer(),
locationDetails: generic_locations()
}
@type obstacle_detection :: %{
obDist: non_neg_integer(),
obDirect: non_neg_integer(),
description: non_neg_integer(),
locationDetails: generic_locations(),
dateTime: d_date_time(),
vertEvent: vert_event()
}
@type vert_event ::
:notEquipped
| quadrant()
@type quadrant ::
:leftFront
| :leftRear
| :rightFront
| :rightRear
@type wiper_status ::
:unavailable
| :off
| :intermittent
| :low
| :high
| :washerInUse
| :automaticPresent
@type wiper_set :: %{
statusFront: wiper_status(),
rateFront: non_neg_integer(),
statusRear: wiper_status(),
rateRear: non_neg_integer()
}
@type weather_probe :: %{
airTemp: non_neg_integer(),
airPressure: non_neg_integer(),
rainRates: wiper_set()
}
@type weather_report :: %{
isRaining:
:precip
| :noPrecip
| :error,
rainRate: non_neg_integer(),
precipSituation:
:other
| :other
| :unknown
| :noPrecipitation
| :unidentifiedSlight
| :unidentifiedModerate
| :unidentifiedHeavy
| :snowSlight
| :snowModerate
| :snowHeavy
| :rainSlight
| :rainModerate
| :rainHeavy
| :frozenPrecipitationSlight
| :frozenPrecipitationModerate
| :frozenPrecipitationHeavy,
solarRadiation: non_neg_integer(),
friction: non_neg_integer(),
roadFriction: non_neg_integer()
}
@type vehicle_data :: %{
height: non_neg_integer(),
bumpers: bumper_heights(),
mass: non_neg_integer(),
trailerWeight: non_neg_integer()
}
@type vehicle_type ::
:none
| :unknown
| :special
| :moto
| :car
| :carOther
| :bus
| :axleCnt2
| :axleCnt3
| :axleCnt4
| :axleCnt4Trailer
| :axleCnt5Trailer
| :axleCnt6Trailer
| :axleCnt5MultiTrailer
| :axleCnt6MultiTrailer
| :axleCnt7MultiTrailer
@type vehicle_group_affected ::
:"all-vehicles"
| :bicycles
| :motorcycles
| :cars
| :"light-vehicles"
| :"cars-and-light-vehicles"
| :"cars-with-trailers"
| :"cars-with-recreational-trailers"
| :"vehicles-with-trailers"
| :"heavy-vehicles"
| :trucks
| :buses
| :"articulated-buses"
| :"school-buses"
| :"vehicles-with-semi-trailers"
| :"vehicles-with-double-trailers"
| :"high-profile-vehicles"
| :"wide-vehicles"
| :"long-vehicles"
| :"hazardous-loads"
| :"exceptional-loads"
| :"abnormal-loads"
| :convoys
| :"maintenance-vehicles"
| :"delivery-vehicles"
| :"vehicles-with-even-numbered-license-plates"
| :"vehicles-with-odd-numbered-license-plates"
| :"vehicles-with-parking-permits"
| :"vehicles-with-catalytic-converters"
| :"vehicles-without-catalytic-converters"
| :"gas-powered-vehicles"
| :"diesel-powered-vehicles"
| :"lPG-vehicles"
| :"military-convoys"
| :"military-vehicles"
@type incident_response_equipment ::
:"ground-fire-suppression"
| :"heavy-ground-equipment"
| :aircraft
| :"marine-equipment"
| :"support-equipment"
| :"medical-rescue-unit"
| :other
| :"ground-fire-suppression-other"
| :engine
| :"truck-or-aerial"
| :quint
| :"tanker-pumper-combination"
| :"brush-truck"
| :"aircraft-rescue-firefighting"
| :"heavy-ground-equipment-other"
| :"dozer-or-plow"
| :tractor
| :"tanker-or-tender"
| :"aircraft-other"
| :"aircraft-fixed-wing-tanker"
| :helitanker
| :helicopter
| :"marine-equipment-other"
| :"fire-boat-with-pump"
| :"boat-no-pump"
| :"support-apparatus-other"
| :"breathing-apparatus-support"
| :"light-and-air-unit"
| :"medical-rescue-unit-other"
| :"rescue-unit"
| :"urban-search-rescue-unit"
| :"high-angle-rescue"
| :"crash-fire-rescue"
| :"bLS-unit"
| :"aLS-unit"
| :"mobile-command-post"
| :"chief-officer-car"
| :"hAZMAT-unit"
| :"type-i-hand-crew"
| :"type-ii-hand-crew"
| :"privately-owned-vehicle"
| :"other-apparatus-resource"
| :ambulance
| :"bomb-squad-van"
| :"combine-harvester"
| :"construction-vehicle"
| :"farm-tractor"
| :"grass-cutting-machines"
| :"hAZMAT-containment-tow"
| :"heavy-tow"
| :"light-tow"
| :"flatbed-tow"
| :"hedge-cutting-machines"
| :"mobile-crane"
| :"refuse-collection-vehicle"
| :"resurfacing-vehicle"
| :"road-sweeper"
| :"roadside-litter-collection-crews"
| :"salvage-vehicle"
| :"sand-truck"
| :snowplow
| :"steam-roller"
| :"swat-team-van"
| :"track-laying-vehicle"
| :"unknown-vehicle"
| :"white-lining-vehicle"
| :"dump-truck"
| :"supervisor-vehicle"
| :"snow-blower"
| :"rotary-snow-blower"
| :"road-grader"
| :"steam-truck"
@type responder_group_affected ::
:"emergency-vehicle-units"
| :"federal-law-enforcement-units"
| :"state-police-units"
| :"county-police-units"
| :"local-police-units"
| :"ambulance-units"
| :"rescue-units"
| :"fire-units"
| :"hAZMAT-units"
| :"light-tow-unit"
| :"heavy-tow-unit"
| :"freeway-service-patrols"
| :"transportation-response-units"
| :"private-contractor-response-units"
@type basic_vehicle_role ::
:basicVehicle
| :publicTransport
| :specialTransport
| :dangerousGoods
| :roadWork
| :roadRescue
| :emergency
| :safetyCar
| :"none-unknown"
| :truck
| :motorcycle
| :roadSideSource
| :police
| :fire
| :ambulance
| :dot
| :transit
| :slowMoving
| :stopNgo
| :cyclist
| :pedestrian
| :nonMotorized
| :military
@type vehicle_classification :: %{
keyType: non_neg_integer(),
role: basic_vehicle_role(),
iso3883: non_neg_integer(),
hpmsType: vehicle_type(),
vehicleType: vehicle_group_affected(),
responseEquip: incident_response_equipment(),
fuelType: non_neg_integer(),
regional: [map()]
}
@type generic_locations() ::
:"on-bridges"
| :"in-tunnels"
| :"entering-or-leaving-tunnels"
| :"on-ramps"
| :"in-road-construction-area"
| :"around-a-curve"
| :"on-curve"
| :"on-tracks"
| :"in-street"
| :shoulder
| :"on-minor-roads"
| :"in-the-opposing-lanes"
| :"adjacent-to-roadway"
| :"across-tracks"
| :"on-bend"
| :intersection
| :"entire-intersection"
| :"in-the-median"
| :"moved-to-side-of-road"
| :"moved-to-shoulder"
| :"on-the-roadway"
| :dip
| :"traffic-circle"
| :crossover
| :"cross-road"
| :"side-road"
| :to
| :by
| :through
| :"area-of"
| :under
| :over
| :from
| :approaching
| :"entering-at"
| :"exiting-at"
| :"in-shaded-areas"
| :"in-low-lying-areas"
| :"in-the-downtown-area"
| :"in-the-inner-city-area"
| :"in-parts"
| :"in-some-places"
| :"in-the-ditch"
| :"in-the-valley"
| :"on-hill-top"
| :"near-the-foothills"
| :"at-high-altitudes"
| :"near-the-lake"
| :"near-the-shore"
| :"nearby-basin"
| :"over-the-crest-of-a-hill"
| :"other-than-on-the-roadway"
| :"near-the-beach"
| :"near-beach-access-point"
| :"mountain-pass"
| :"lower-level"
| :"upper-level"
| :airport
| :concourse
| :gate
| :"baggage-claim"
| :"customs-point"
| :"reservation-center"
| :station
| :platform
| :dock
| :depot
| :"ev-charging-point"
| :"information-welcome-point"
| :"at-rest-area"
| :"at-service-area"
| :"at-weigh-station"
| :"roadside-park"
| :"picnic-areas"
| :"rest-area"
| :"service-stations"
| :toilets
| :"bus-stop"
| :"park-and-ride-lot"
| :"on-the-right"
| :"on-the-left"
| :"in-the-center"
| :"in-the-opposite-direction"
| :"cross-traffic"
| :"northbound-traffic"
| :"eastbound-traffic"
| :"southbound-traffic"
| :"westbound-traffic"
| :north
| :south
| :east
| :west
| :northwest
| :northeast
| :southwest
| :southeast
@type intersection_reference_id :: %{
region: non_neg_integer(),
id: non_neg_integer()
}
@type road_segment_reference_id :: %{
region: non_neg_integer(),
id: non_neg_integer()
}
@type vehicle_id ::
{:entityID, non_neg_integer()}
| {:stationID, non_neg_integer()}
@type requestor_type :: %{
role: basic_vehicle_role(),
subrole:
:requestSubRoleUnknown
| :requestSubRole1
| :requestSubRole2
| :requestSubRole3
| :requestSubRole4
| :requestSubRole5
| :requestSubRole6
| :requestSubRole7
| :requestSubRole8
| :requestSubRole9
| :requestSubRole10
| :requestSubRole11
| :requestSubRole12
| :requestSubRole13
| :requestSubRole14
| :requestSubRoleReserved,
request:
:requestImportanceLevelUnknown
| :requestImportanceLevel1
| :requestImportanceLevel2
| :requestImportanceLevel3
| :requestImportanceLevel4
| :requestImportanceLevel5
| :requestImportanceLevel6
| :requestImportanceLevel7
| :requestImportanceLevel8
| :requestImportanceLevel9
| :requestImportanceLevel10
| :requestImportanceLevel11
| :requestImportanceLevel12
| :requestImportanceLevel13
| :requestImportanceLevel14
| :requestImportanceReserved,
iso3883: non_neg_integer(),
hpmsType: vehicle_type(),
regional: [map()]
}
@type intersection_access_point ::
{:lane, non_neg_integer()}
| {:approach, non_neg_integer()}
| {:connection, non_neg_integer()}
@type speed_limit_type ::
:unknown
| :maxSpeedInSchoolZone
| :maxSpeedInSchoolZoneWhenChildrenArePresent
| :maxSpeedInConstructionZone
| :vehicleMinSpeed
| :vehicleMaxSpeed
| :vehicleNightMaxSpeed
| :truckMinSpeed
| :truckMaxSpeed
| :truckNightMaxSpeed
| :vehicleWithTrailersMinSpeed
| :vehicleWithTrailersMaxSpeed
| :vehicleWithTrailersNightMaxSpeed
@type regulatory_speed_limit :: %{
type: speed_limit_type(),
speed: non_neg_integer()
}
@type offset_system :: %{
scale: non_neg_integer(),
offset: offset()
}
@type offset ::
{:xy, node_list_xy()}
| {:ll, node_list_ll()}
@type node_list_ll :: {:nodes, [node_ll()]}
@type node_ll :: %{
delta: node_offset_point_ll(),
attributes: node_attribute_set()
}
@type node_list_xy ::
{:nodes, [node_xy()]}
| {:computed, computed_lane()}
@type computed_lane :: %{
referenceLaneId: non_neg_integer(),
offsetXaxis: driven_lane_offset(),
offsetYaxis: driven_lane_offset(),
rotateXY: non_neg_integer(),
scaleXaxis: integer(),
scaleYaxis: integer(),
regional: [map()]
}
@type driven_lane_offset ::
{:small, integer()}
| {:large, integer()}
@type node_xy :: %{
delta: node_offset_point_xy(),
attributes: node_attribute_set()
}
@type node_offset_point_ll ::
{:"node-LL1"}
| {:"node-LL2", node_ll_b()}
| {:"node-LL3", node_ll_b()}
| {:"node-LL4", node_ll_b()}
| {:"node-LL5", node_ll_b()}
| {:"node-LL6", node_ll_b()}
| {:nodeLatLon, node_llmd_64b()}
| {:regional, [map()]}
@type node_offset_point_xy ::
{:"node-XY1", node_xy_b()}
| {:"node-XY2", node_xy_b()}
| {:"node-XY3", node_xy_b()}
| {:"node-XY4", node_xy_b()}
| {:"node-XY5", node_xy_b()}
| {:"node-XY6", node_xy_b()}
| {:"node-LatLon", node_llmd_64b()}
| {:regional, [map()]}
@type node_attribute_set :: %{
localNode: [node_attribute()],
disabled: [segment_attribute()],
enabled: [segment_attribute()],
data: [lane_data_attribute()],
dWidth: integer(),
dElevation: integer(),
regional: [map()]
}
@type node_attribute ::
:reserved
| :stopLine
| :roundedCapStyleA
| :roundedCapStyleB
| :mergePoint
| :divergePoint
| :downstreamStopLine
| :downstreamStartNode
| :closedToTraffic
| :safeIsland
| :curbPresentAtStepOff
| :hydrantPresent
@type segment_attribute ::
:reserved
| :doNotBlock
| :whiteLane
| :mergingLaneLeft
| :mergingLaneRight
| :curbOnLeft
| :curbOnRight
| :loadingzoneOnLeft
| :loadingzoneOnRight
| :turnOutPointOnLeft
| :turnOutPointOnRight
| :adjacentParkingOnLeft
| :adjacentParkingOnRight
| :adjacentBikeLaneOnLeft
| :adjacentBikeLaneOnRight
| :sharedBikeLane
| :bikeBoxInFront
| :transitStopOnLeft
| :transitStopOnRight
| :transitStopInLane
| :sharedWithTrackedVehicle
| :safeIsland
| :lowCurbsPresent
| :rumbleStripPresent
| :audibleSignalingPresent
| :adaptiveTimingPresent
| :rfSignalRequestPresent
| :partialCurbIntrusion
| :taperToLeft
| :taperToRight
| :taperToCenterLine
| :parallelParking
| :headInParking
| :freeParking
| :timeRestrictionsOnParking
| :costToPark
| :midBlockCurbPresent
| :unEvenPavementPresent
@type lane_data_attribute ::
{:pathEndPointAngle, integer()}
| {:laneCrownPointCenter, integer()}
| {:laneCrownPointLeft, integer()}
| {:laneCrownPointRight, integer()}
| {:laneAngle, integer()}
| {:speedLimits, [regulatory_speed_limit()]}
| {:regional, [map()]}
@type node_ll_b :: %{
lon: integer(),
lat: integer()
}
@type node_xy_b :: %{
x: integer(),
y: integer()
}
@type node_llmd_64b :: %{
lon: integer(),
lat: integer()
}
end
|
lib/kitt/types.ex
| 0.798462
| 0.646097
|
types.ex
|
starcoder
|
defmodule Copeiro do
@moduledoc """
The Copeiro package provides assertion functions that will enhance your testing experience in Elixir
"""
@doc false
def __assert_lists__(:==, left, right, any_order: true) do
left
|> Copeiro.Comparator.match_lists_in_any_order(right)
|> case do
{:error, _, _} ->
ExUnit.Assertions.flunk("""
assertion failed, lists does not match
left: #{inspect(left)}
right: #{inspect(right)}
""")
:ok ->
true
end
end
def __assert_lists__(:==, left, right, _opts) do
case left == right do
false ->
ExUnit.Assertions.flunk("""
Comparison (using ==) failed in:
left: #{inspect(left)}
right: #{inspect(right)}
""")
_ ->
true
end
end
def __assert_lists__(:in, left, right, _opts) do
left
|> Enum.reduce_while(true, fn l, _ ->
case l in right do
true -> {:cont, true}
false -> {:halt, {:error, l}}
end
end)
|> case do
{:error, value} ->
ExUnit.Assertions.flunk("""
assertion failed, value not found
value: #{inspect(value)}
left: #{inspect(left)}
right: #{inspect(right)}
""")
_ ->
true
end
end
def __assert_lists__(:not_in, left, right, _opts) do
left
|> Enum.reduce_while(true, fn l, _ ->
case l not in right do
true -> {:cont, true}
false -> {:halt, {:error, l}}
end
end)
|> case do
{:error, value} ->
ExUnit.Assertions.flunk("""
match succeeded, but should have failed
value: #{inspect(value)}
left: #{inspect(left)}
right: #{inspect(right)}
""")
_ ->
true
end
end
@doc """
Asserts that two lists matches
## Examples
For the following examples`LEFT` and `RIGHT` will be used to describe the expression `assert_lists LEFT OPERATOR RIGHT`
### All elements of `LEFT` are also elements of `RIGHT`
```
iex> assert_lists [1, 2] in [0, 2, 1, 3]
true
iex> assert_lists [{:a, 1}, {:c, 3}] in [{:a, 1}, {:b, 2}, {:c, 3}]
true
```
### `LEFT` and `RIGHT` has no element in common
```
iex> assert_lists [1, 2] not in [3, 4]
true
iex> assert_lists [%{c: 3}, %{d: 4}] not in [%{a: 1}, %{b: 2}]
true
```
### Asserts that two lists match in any order
```
iex> assert_lists [1, 2, 3] == [2, 1, 3], any_order: true
true
iex> assert_lists [{:a, 0}, {:b, 1}, {:c, 3}] == [{:a, 0}, {:c, 3}, {:b, 1}], any_order: true
true
```
### Asserting lists of maps/structs
```
iex> assert_lists [%{a: 1}, %{a: 2}] in [%{a: 1, b: 1}, %{a: 2, b: 2}, %{a: 3, b: 3}], keys: [:a]
true
iex> assert_lists [%{a: 1}, %{a: 2}] == [%{a: 2, b: 2}, %{a: 1, b: 1}], keys: [:a], any_order: true
true
```
"""
defmacro assert_lists(expr, opts \\ []) do
{op, left, right} =
case expr do
{:not, _, [{:in, _, [left, right]}]} -> {:not_in, left, right}
{op, _, [left, right]} -> {op, left, right}
end
quote bind_quoted: [op: op, left: left, right: right, opts: opts] do
[left, right, opts] =
(fn left, right, opts ->
keys = Keyword.get(opts, :keys, [])
if keys == [] do
[left, right, opts]
else
t = fn lst -> Enum.map(lst, &Map.take(&1, keys)) end
[t.(left), t.(right), Keyword.delete(opts, :keys)]
end
end).(left, right, opts)
Copeiro.__assert_lists__(op, left, right, opts)
end
end
end
|
lib/copeiro.ex
| 0.891746
| 0.895202
|
copeiro.ex
|
starcoder
|
defmodule CSV.Decoding.Parser do
alias CSV.EscapeSequenceError
alias CSV.StrayQuoteError
@moduledoc ~S"""
The CSV Parser module - parses tokens coming from the lexer and parses them
into a row of fields.
"""
@doc """
Parses tokens by receiving them from a sender / lexer and sending them to
the given receiver process (the decoder).
## Options
Options get transferred from the decoder. They are:
* `:strip_fields` – When set to true, will strip whitespace from fields.
Defaults to false.
"""
def parse(message, options \\ [])
def parse({tokens, index}, options) do
case parse([], "", tokens, :unescaped, options) do
{:ok, row} -> {:ok, row, index}
{:error, type, message} -> {:error, type, message, index}
end
end
def parse({:error, mod, message, index}, _) do
{:error, mod, message, index}
end
defp parse(row, field, [token | tokens], :inline_quote, options) do
case token do
{:double_quote, content} ->
parse(row, field <> content, tokens, :unescaped, options)
_ ->
{:error, StrayQuoteError, field}
end
end
defp parse(row, field, [token | tokens], :inline_quote_in_escaped, options) do
case token do
{:double_quote, content} ->
parse(row, field <> content, tokens, :escaped, options)
{:separator, _} ->
parse(row ++ [field |> strip(options)], "", tokens, :unescaped, options)
{:delimiter, _} ->
parse(row, field, tokens, :unescaped, options)
_ ->
{:error, StrayQuoteError, field}
end
end
defp parse(row, field, [token | tokens], :escaped, options) do
case token do
{:double_quote, _} ->
parse(row, field, tokens, :inline_quote_in_escaped, options)
{_, content} ->
parse(row, field <> content, tokens, :escaped, options)
end
end
defp parse(_, field, [], :escaped, _) do
{:error, EscapeSequenceError, field}
end
defp parse(_, field, [], :inline_quote, _) do
{:error, StrayQuoteError, field}
end
defp parse(row, "", [token | tokens], :unescaped, options) do
case token do
{:content, content} ->
parse(row, content, tokens, :unescaped, options)
{:separator, _} ->
parse(row ++ [""], "", tokens, :unescaped, options)
{:delimiter, _} ->
parse(row, "", tokens, :unescaped, options)
{:double_quote, _} ->
parse(row, "", tokens, :escaped, options)
end
end
defp parse(row, field, [token | tokens], :unescaped, options) do
case token do
{:content, content} ->
parse(row, field <> content, tokens, :unescaped, options)
{:separator, _} ->
parse(row ++ [field |> strip(options)], "", tokens, :unescaped, options)
{:delimiter, _} ->
parse(row, field, tokens, :unescaped, options)
{:double_quote, _} ->
parse(row, field, tokens, :inline_quote, options)
end
end
defp parse(row, field, [], :inline_quote_in_escaped, options) do
{:ok, row ++ [field |> strip(options)]}
end
defp parse(row, field, [], :unescaped, options) do
{:ok, row ++ [field |> strip(options)]}
end
defp strip(field, options) do
strip_fields = options |> Keyword.get(:strip_fields, false)
case strip_fields do
true -> field |> String.trim()
_ -> field
end
end
end
|
lib/csv/decoding/parser.ex
| 0.784567
| 0.564098
|
parser.ex
|
starcoder
|
defmodule Peerage.Via.Udp do
@moduledoc """
Use UDP multicast to find other nodes.
config :peerage, via: Peerage.Via.Udp, serves: true
ip: {0,0,0,0},
port: 45_900,
multicast_addr: {230,1,1,1}
Will both broadcast and receive node names via UDP on port 45900,
and keep track of ones it's seen in process state. It's a GenServer,
so we let Peerage know it needs to be run and
supervised with `serves: true`.
`Peerage.Server` periodically calls `poll()`, which is a client
function for the GenServer's `handle_call(:poll, _, state)`, which
returns the seen node names from state.
Only one node can bind the socket, but you can test multiple
nodes using docker, like
[this](https://github.com/docker/docker/issues/3043#issuecomment-51825140).
For more info on UDP in Elixir, see
[this scaleSmall post on multicast UDP in Elixir from
2015](http://dbeck.github.io/Scalesmall-W5-UDP-Multicast-Mixed-With-TCP/),
especially the explation of gen_udp's `active: N` mode.
"""
use GenServer
require Logger
@behaviour Peerage.Provider
def start_link, do: GenServer.start_link __MODULE__, :ok, name: __MODULE__
def init(:ok) do
{:ok, socket} = :gen_udp.open port = get_port(), [
:binary, reuseaddr: true, broadcast: true, multicast_loop: true,
active: 10, multicast_ttl: get_ttl(),
ip: get_ip(), add_membership: {maddr = get_maddr(), {0,0,0,0}}
]
{:ok, %{seen: MapSet.new(), conn: {maddr, port, socket}}, 0}
end
@doc "Client function: `Peerage.Provider` callback. Calls this GenServer."
def poll do
__MODULE__ |> GenServer.whereis |> do_poll
end
defp do_poll(pid) when is_pid(pid), do: __MODULE__ |> GenServer.call(:poll)
defp do_poll(_), do: Logger.debug "(no server)"; []
@doc "Server function: returns list of node names we've seen."
def handle_call(:poll, _, state = %{seen: ms}), do: {:reply, MapSet.to_list(ms), state}
def handle_call(:poll, _, state), do: {:reply, [], state}
@doc "Broadcast our node name via UDP every 3-7 seconds"
def handle_info(:broadcast, state = %{conn: {addr, port, sock}}) do
:ok = :gen_udp.send(sock, addr, port, ["Peer:#{ node() }"])
Process.send_after(self(), :broadcast, :rand.uniform(4_000) + 3_000)
{:noreply, state}
end
def handle_info(:timeout, state), do: handle_info(:broadcast, state)
@doc "Handle UDP packet. If it's a node name broadcast, adds to `state.seen`."
def handle_info({:udp,sock,_,_, "Peer:" <> name}, state = %{seen: ms}) do
Logger.debug " -> Peerage.Via.Udp sees: #{ name }"
:inet.setopts(sock, active: 1)
{:noreply, %{state | seen: ms |> MapSet.put(name |> String.to_atom)}}
end
def handle_info({:udp,sock,_,_,_}, state) do # bad packet,
:inet.setopts(sock, active: 1) # but don't die.
{:noreply, state} # ^ TODO configurable.
end
def terminate(_,_, %{conn: {_,_,sock}}), do: :gen_udp.close(sock)
# helpers
defp get_port, do: Application.get_env :peerage, :port, 45_900
defp get_ip, do: Application.get_env :peerage, :ip, {0,0,0,0}
defp get_maddr, do: Application.get_env :peerage, :multicast_addr, {230,1,1,1}
defp get_ttl, do: Application.get_env :peerage, :ttl, 1
end
|
lib/peerage/udp.ex
| 0.665954
| 0.438905
|
udp.ex
|
starcoder
|
defmodule UltraDark.Validator do
alias UltraDark.Blockchain.Block
alias UltraDark.Utilities
alias UltraDark.KeyPair
alias Decimal, as: D
@moduledoc """
Responsible for implementing the consensus rules to all blocks and transactions
"""
@doc """
A block is considered valid if the index is greater than the index of the previous block,
the previous_hash is equal to the hash of the previous block, and the hash of the block,
when recalculated, is the same as what the listed block hash is
"""
@spec is_block_valid?(Block, list, number) :: :ok | {:error, any}
def is_block_valid?(block, chain, difficulty) do
last_block = List.first(chain)
with :ok <- valid_index(block.index, last_block.index),
:ok <- valid_prev_hash?(block.previous_hash, last_block.hash),
:ok <- valid_hash?(block),
:ok <- valid_coinbase?(block),
:ok <- valid_transactions?(block),
:ok <- valid_difficulty?(block, difficulty) do
:ok
else
err -> err
end
end
@spec valid_index(number, number) :: :ok | {:error, {:invalid_index, number, number}}
defp valid_index(index, prev_index) when index > prev_index, do: :ok
defp valid_index(index, prev_index) when index <= prev_index,
do: {:error, {:invalid_index, prev_index, index}}
@spec valid_prev_hash?(String.t(), String.t()) ::
:ok | {:error, {:wrong_hash, {:doesnt_match_last, String.t(), String.t()}}}
defp valid_prev_hash?(prev_hash, last_block_hash) when prev_hash == last_block_hash, do: :ok
defp valid_prev_hash?(prev_hash, last_block_hash) when prev_hash != last_block_hash,
do: {:error, {:wrong_hash, {:doesnt_match_last, prev_hash, last_block_hash}}}
@spec valid_hash?(Block) :: :ok | {:error, {:wrong_hash, {:too_high, String.t(), number}}}
defp valid_hash?(%{
index: index,
previous_hash: previous_hash,
timestamp: timestamp,
nonce: nonce,
hash: hash,
merkle_root: merkle_root,
difficulty: difficulty
}) do
with :ok <- compare_hash({index, previous_hash, timestamp, nonce, merkle_root}, hash),
:ok <- fn ->
if Block.hash_beat_target?(%{hash: hash, difficulty: difficulty}),
do: :ok,
else: {:error, {:wrong_hash, {:too_high, hash, difficulty}}}
end do
:ok
else
err -> err
end
end
@spec compare_hash({number, String.t(), String.t(), number, String.t()}, String.t()) ::
:ok | {:error, {:wrong_hash, {:doesnt_match_provided, String.t(), String.t()}}}
defp compare_hash({index, previous_hash, timestamp, nonce, merkle_root}, hash) do
computed =
[Integer.to_string(index), previous_hash, timestamp, Integer.to_string(nonce), merkle_root]
|> Utilities.sha3_base16()
if computed == hash do
:ok
else
{:error, {:wrong_hash, {:doesnt_match_provided, computed, hash}}}
end
end
@spec valid_coinbase?(Block) :: :ok | {:error, :no_coinbase}
def valid_coinbase?(%{transactions: transactions, index: block_index}) do
coinbase = List.first(transactions)
with :ok <- (&if(&1 != nil, do: :ok, else: {:error, :no_coinbase})).(coinbase),
:ok <- is_coinbase?(coinbase),
:ok <- appropriate_coinbase_output?(transactions, block_index) do
:ok
else
err -> err
end
end
@spec valid_transaction?(Transaction) :: boolean
def valid_transaction?(%{inputs: inputs}) do
inputs
|> Enum.map(fn input ->
case {Base.decode16(input.addr), Base.decode16(input.signature)} do
{{:ok, pub}, {:ok, sig}} -> KeyPair.verify_signature(pub, sig, input.txoid)
_ -> false
end
end)
|> Enum.all?(&(&1 == true))
end
@spec valid_transactions?(Block) :: :ok | {:error, :invalid_inputs}
def valid_transactions?(%{transactions: transactions}) do
if Enum.all?(transactions, &valid_transaction?(&1)), do: :ok, else: {:error, :invalid_inputs}
end
@spec is_coinbase?(Transaction) :: :ok | {:error, {:not_coinbase, String.t()}}
defp is_coinbase?(tx) do
if tx.txtype == "COINBASE", do: :ok, else: {:error, {:not_coinbase, tx.txtype}}
end
@spec appropriate_coinbase_output?(list, number) :: :ok | {:error, :invalid_coinbase}
defp appropriate_coinbase_output?([coinbase | transactions], block_index) do
total_fees = Block.total_block_fees(transactions)
reward = Block.calculate_block_reward(block_index)
amount = List.first(coinbase.outputs).amount
if D.equal?(D.add(total_fees, reward), amount) do
:ok
else
{:error, {:invalid_coinbase, total_fees, reward, amount}}
end
end
@spec valid_difficulty?(Block, number) :: :ok | {:error, {:invalid_difficulty, number, number}}
def valid_difficulty?(%{difficulty: difficulty}, diff) do
if difficulty == diff, do: :ok, else: {:error, {:invalid_difficulty, difficulty, diff}}
end
end
|
lib/validator.ex
| 0.84653
| 0.537284
|
validator.ex
|
starcoder
|
defmodule Timex.Date do
@moduledoc """
Module for working with dates.
Functions that produce time intervals use UNIX epoch (or simly Epoch) as the
default reference date. Epoch is defined as UTC midnight of January 1, 1970.
Time intervals in this module don't account for leap seconds.
Supported tasks:
* get current date in the desired time zone
* convert dates between time zones and time units
* introspect dates to find out weekday, week number, number of days in a given month, etc.
* parse dates from string
* compare dates
* date arithmetic
"""
require Record
alias Timex.DateTime, as: DateTime
alias Timex.Time, as: Time
alias Timex.Timezone, as: Timezone
alias Timex.TimezoneInfo, as: TimezoneInfo
# Date types
@type dtz :: { datetime, TimezoneInfo.t }
@type datetime :: { date, time }
@type date :: { year, month, day }
@type iso_triplet :: { year, weeknum, weekday }
@type year :: non_neg_integer
@type month :: 1..12
@type day :: 1..31
@type daynum :: 1..366
@type weekday :: 1..7
@type weeknum :: 1..53
@type num_of_days :: 28..31
# Time types
@type time :: { hour, minute, second }
@type hour :: 0..23
@type minute :: 0..59
@type second :: 0..59
@type timestamp :: {megaseconds, seconds, microseconds }
@type megaseconds :: non_neg_integer
@type seconds :: non_neg_integer
@type microseconds :: non_neg_integer
# Constants
@million 1_000_000
@weekdays [
{"Monday", 1}, {"Tuesday", 2}, {"Wednesday", 3}, {"Thursday", 4},
{"Friday", 5}, {"Saturday", 6}, {"Sunday", 7}
]
@months [
{"January", 1}, {"February", 2}, {"March", 3},
{"April", 4}, {"May", 5}, {"June", 6},
{"July", 7}, {"August", 8}, {"September", 9},
{"October", 10}, {"November", 11}, {"December", 12}
]
@doc """
Get a TimezoneInfo object for the specified offset or name.
When offset or name is invalid, exception is raised.
## Examples
timezone() #=> <local time zone>
timezone(:utc) #=> { 0.0, "UTC" }
timezone(2) #=> { 2.0, "EET" }
timezone("+2") #=> { 2.0, "EET" }
timezone("EET") #=> { 2.0, "EET" }
"""
@spec timezone() :: TimezoneInfo.t
@spec timezone(:local, DateTime.t | nil) :: TimezoneInfo.t
@spec timezone(:utc | number | binary) :: TimezoneInfo.t
def timezone(), do: Timezone.local()
def timezone(:local), do: Timezone.local()
def timezone(name), do: Timezone.get(name)
def timezone(:local, date), do: Timezone.local(date)
@doc """
Get current date.
## Examples
Date.now #=> %DateTime{year: 2013, month: 3, day: 16, hour: 11, minute: 1, second: 12, timezone: %TimezoneInfo{...}}
"""
@spec now() :: DateTime.t
def now do
construct(:calendar.universal_time(), timezone(:utc))
end
@doc """
Get the current date, in a specific timezone.
## Examples
> Date.now("America/Chicago")
%DateTime{
year: 2013, month: 3, day: 16, ..,
timezone: %TimezoneInfo{standard_abbreviation: "CST", ...}
}
"""
@spec now(binary) :: DateTime.t
def now(tz) when is_binary(tz) do
case timezone(tz) do
%TimezoneInfo{} = tzinfo ->
construct(:calendar.universal_time(), timezone(:utc))
|> set(timezone: tzinfo)
{:error, _} = error ->
error
end
end
@doc """
Get representation of the current date in seconds or days since Epoch.
See convert/2 for converting arbitrary dates to various time units.
## Examples
now(:secs) #=> 1363439013
now(:days) #=> 15780
"""
@spec now(:secs | :days) :: integer
def now(:secs), do: to_secs(now())
def now(:days), do: to_days(now())
@doc """
Get current local date.
See also `universal/0`.
## Examples
Date.local #=> %DateTime{year: 2013, month: 3, day: 16, hour: 11, minute: 1, second: 12, timezone: %TimezoneInfo{...}}
"""
@spec local() :: DateTime.t
def local, do: construct(:calendar.local_time(), timezone(:local))
@doc """
Convert a date to your local timezone.
See also `universal/1`.
## Examples
Date.now |> Date.local
"""
@spec local(date :: DateTime.t) :: DateTime.t
def local(%DateTime{} = date), do: local(date, timezone(:local))
@doc """
Convert a date to a local date, using the provided timezone
## Examples
Date.now |> Date.local(timezone(:utc))
"""
@spec local(date :: DateTime.t, tz :: TimezoneInfo.t) :: DateTime.t
def local(%DateTime{:timezone => tz} = date, localtz) do
if tz !== localtz do
Timezone.convert(date, localtz)
else
date
end
end
@doc """
Get current the current datetime in UTC.
See also `local/0`.
"""
@spec universal() :: DateTime.t
def universal, do: construct(:calendar.universal_time(), timezone(:utc))
@doc """
Convert a date to UTC
See also `local/1`.
## Examples
Date.now |> Date.universal
"""
@spec universal(DateTime.t) :: DateTime.t
def universal(date), do: Timezone.convert(date, timezone(:utc))
@doc """
The first day of year zero (calendar module's default reference date).
See also `epoch/0`.
## Examples
Date.zero |> Date.to_secs #=> 0
"""
@spec zero() :: DateTime.t
def zero, do: construct({0, 1, 1}, {0, 0, 0}, timezone(:utc))
@doc """
The date of Epoch, used as default reference date by this module
and also by the Time module.
See also `zero/0`.
## Examples
Date.epoch |> Date.to_secs #=> 0
"""
@spec epoch() :: DateTime.t
def epoch, do: construct({1970, 1, 1}, {0, 0, 0}, timezone(:utc))
@doc """
Time interval since year 0 of Epoch expressed in the specified units.
## Examples
epoch() #=> %DateTime{year: 1970, month: 1 ...}
epoch(:secs) #=> 62167219200
epoch(:days) #=> 719528
"""
@spec epoch(:timestamp) :: timestamp
@spec epoch(:secs | :days) :: integer
def epoch(:timestamp), do: to_timestamp(epoch())
def epoch(:secs), do: to_secs(epoch(), :zero)
def epoch(:days), do: to_days(epoch(), :zero)
@doc """
Construct a date from Erlang's date or datetime value.
You may specify the date's time zone as the second argument. If the argument
is omitted, UTC time zone is assumed.
When passing {year, month, day} as the first argument, the resulting date
will indicate midnight of that day in the specified timezone (UTC by
default).
## Examples
Date.from(:erlang.universaltime) #=> %DateTime{...}
Date.from(:erlang.localtime) #=> %Datetime{...}
Date.from(:erlang.localtime, :local) #=> %DateTime{...}
Date.from({2014,3,16}, Date.timezone("PST")) #=> %DateTime{...}
Date.from({2014,3,16}, "PST") #=> %DateTime{...}
"""
@spec from(date | datetime) :: dtz
@spec from(date | datetime, :utc | :local | TimezoneInfo.t | binary) :: dtz
def from({_,_,_} = date), do: from(date, :utc)
def from({{_,_,_},{_,_,_}} = datetime), do: from(datetime, :utc)
def from({_,_,_} = date, :utc), do: construct({date, {0,0,0}}, timezone(:utc))
def from({{_,_,_},{_,_,_}} = datetime, :utc), do: construct(datetime, timezone(:utc))
def from({_,_,_} = date, :local), do: from({date, {0,0,0}}, timezone(:local))
def from({{_,_,_},{_,_,_}} = datetime, :local), do: from(datetime, timezone(:local))
def from({_,_,_} = date, %TimezoneInfo{} = tz), do: from({date, {0,0,0}}, tz)
def from({{_,_,_},{_,_,_}} = datetime, %TimezoneInfo{} = tz), do: construct(datetime, tz)
def from({_,_,_} = date, tz) when is_binary(tz), do: from({date, {0, 0, 0}}, tz)
def from({{_,_,_},{_,_,_}} = datetime, tz) when is_binary(tz) do
case timezone(tz) do
%TimezoneInfo{} = tzinfo ->
construct(datetime, tzinfo)
{:error, _} = error ->
error
end
end
@doc """
Construct a date from a time interval since Epoch or year 0.
UTC time zone is assumed. This assumption can be modified by setting desired
time zone using set/3 after the date is constructed.
## Examples
Date.from(13, :secs) #=> %DateTime{...}
Date.from(13, :days, :zero) #=> %DateTime{...}
date = Date.from(Time.now, :timestamp)
|> Date.set(:timezone, timezone(:local)) #=> yields the same value as Date.now would
"""
@spec from(timestamp, :timestamp) :: DateTime.t
@spec from(number, :secs | :days) :: DateTime.t
@spec from(timestamp, :timestamp, :epoch | :zero) :: DateTime.t
@spec from(number, :secs | :days, :epoch | :zero) :: DateTime.t
def from(value, type, reference \\ :epoch)
def from({mega, sec, _}, :timestamp, :epoch), do: from(mega * @million + sec, :secs)
def from({mega, sec, _}, :timestamp, :zero), do: from(mega * @million + sec, :secs, :zero)
def from(sec, :secs, :epoch) do
construct(:calendar.gregorian_seconds_to_datetime(trunc(sec) + epoch(:secs)), timezone(:utc))
end
def from(sec, :secs, :zero) do
construct(:calendar.gregorian_seconds_to_datetime(trunc(sec)), timezone(:utc))
end
def from(days, :days, :epoch) do
construct(:calendar.gregorian_days_to_date(trunc(days) + epoch(:days)), {0,0,0}, timezone(:utc))
end
def from(days, :days, :zero) do
construct(:calendar.gregorian_days_to_date(trunc(days)), {0,0,0}, timezone(:utc))
end
@doc """
Multi-purpose conversion function. Converts a date to the specified time
interval since Epoch. If you'd like to specify year 0 as a reference date,
use one of the to_* functions.
## Examples
date = Date.now
Date.convert(date, :secs) + Date.epoch(:secs) == Date.to_secs(date, :zero) #=> true
"""
@spec convert(DateTime.t) :: timestamp
@spec convert(DateTime.t, :timestamp) :: timestamp
@spec convert(DateTime.t, :secs | :days) :: integer
def convert(date, type \\ :timestamp)
def convert(date, :timestamp), do: to_timestamp(date)
def convert(date, :secs), do: to_secs(date)
def convert(date, :days), do: to_days(date)
@doc """
Convert a date to a timestamp value consumable by the Time module.
See also `diff/2` if you want to specify an arbitrary reference date.
## Examples
Date.epoch |> Date.to_timestamp #=> {0,0,0}
"""
@spec to_timestamp(DateTime.t) :: timestamp
@spec to_timestamp(DateTime.t, :epoch | :zero) :: timestamp
def to_timestamp(date, reference \\ :epoch)
def to_timestamp(date, :epoch) do
sec = to_secs(date)
{ div(sec, @million), rem(sec, @million), 0 }
end
def to_timestamp(date, :zero) do
sec = to_secs(date, :zero)
{ div(sec, @million), rem(sec, @million), 0 }
end
@doc """
Convert a date to an integer number of seconds since Epoch or year 0.
See also `diff/2` if you want to specify an arbitrary reference date.
## Examples
Date.from({{1999, 1, 2}, {12,13,14}}) |> Date.to_secs #=> 915279194
"""
@spec to_secs(DateTime.t) :: integer
@spec to_secs(DateTime.t, :epoch | :zero) :: integer
def to_secs(date, reference \\ :epoch)
def to_secs(date, :epoch), do: to_secs(date, :zero) - epoch(:secs)
def to_secs(%DateTime{:year => y, :month => m, :day => d, :hour => h, :minute => min, :second => s}, :zero) do
:calendar.datetime_to_gregorian_seconds({{y, m, d}, {h, min, s}})
end
@doc """
Convert the date to an integer number of days since Epoch or year 0.
See also `diff/2` if you want to specify an arbitray reference date.
## Examples
to_days(now()) #=> 15780
"""
@spec to_days(DateTime.t) :: integer
@spec to_days(DateTime.t, :epoch | :zero) :: integer
def to_days(date, reference \\ :epoch)
def to_days(date, :epoch), do: to_days(date, :zero) - epoch(:days)
def to_days(%DateTime{:year => y, :month => m, :day => d}, :zero) do
:calendar.date_to_gregorian_days({y, m, d})
end
@doc """
Given a date, get the century this date is in.
## Example
iex> Date.century(Date.now)
21
"""
@spec century(DateTime.t | nil) :: non_neg_integer
def century(datetime \\ Date.now)
def century(%DateTime{:year => y}) do
base_century = div(y, 100)
years_past = rem(y, 100)
cond do
base_century == (base_century - years_past) -> base_century
true -> base_century + 1
end
end
@doc """
Return weekday number (as defined by ISO 8601) of the specified date.
## Examples
Date.epoch |> Date.weekday #=> 4 (i.e. Thursday)
"""
@spec weekday(DateTime.t) :: weekday
def weekday(%DateTime{:year => y, :month => m, :day => d}), do: :calendar.day_of_the_week({y, m, d})
@doc """
Returns the ordinal day number of the date.
"""
@spec day(DateTime.t) :: daynum
def day(date) do
start_of_year = date |> set([month: 1, day: 1])
1 + diff(start_of_year, date, :days)
end
@doc """
Convert an iso ordinal day number to the day it represents in the
current year. If no date is provided, a new one will be created, with
the time will be set to 0:00:00, in UTC. Otherwise, the date provided will
have it's month and day reset to the date represented by the ordinal day.
## Examples
180 |> Date.from_iso_day #=> %DateTime{year: 2014, month: 6, day: 7}
180 |> Date.from_iso_day(date) #=> <modified date struct where the month and day has been set appropriately>
"""
@spec from_iso_day(non_neg_integer, date | nil) :: DateTime.t
def from_iso_day(day, date \\ nil)
def from_iso_day(day, nil) do
today = now |> set([month: 1, day: 1, hour: 0, minute: 0, second: 0, ms: 0])
shift(today, days: day)
end
def from_iso_day(day, date) do
reset = date |> set([month: 1, day: 1])
shift(reset, days: day)
end
@doc """
Return a pair {year, week number} (as defined by ISO 8601) that date falls
on.
## Examples
Date.epoch |> Date.iso_week #=> {1970,1}
"""
@spec iso_week(DateTime.t) :: {year, weeknum}
def iso_week(%DateTime{:year => y, :month => m, :day => d}) do
:calendar.iso_week_number({y, m, d})
end
def iso_week(date), do: iso_week(from(date, :utc))
@doc """
Get the day of the week corresponding to the given name.
## Examples
day_to_num("Monday") => 1
day_to_num("Mon") => 1
day_to_num("monday") => 1
day_to_num("mon") => 1
day_to_num(:mon) => 1
"""
@spec day_to_num(binary | atom()) :: integer
@weekdays |> Enum.each fn {day_name, day_num} ->
lower = day_name |> String.downcase
abbr_cased = day_name |> String.slice(0..2)
abbr_lower = lower |> String.slice(0..2)
symbol = abbr_lower |> String.to_atom
day_quoted = quote do
def day_to_num(unquote(day_name)), do: unquote(day_num)
def day_to_num(unquote(lower)), do: unquote(day_num)
def day_to_num(unquote(abbr_cased)), do: unquote(day_num)
def day_to_num(unquote(abbr_lower)), do: unquote(day_num)
def day_to_num(unquote(symbol)), do: unquote(day_num)
end
Module.eval_quoted __MODULE__, day_quoted, [], __ENV__
end
# Make an attempt at cleaning up the provided string
def day_to_num(x), do: {:error, "Invalid day name: #{x}"}
@doc """
Get the name of the day corresponding to the provided number
"""
@spec day_name(weekday) :: binary
@weekdays |> Enum.each fn {name, day_num} ->
def day_name(unquote(day_num)), do: unquote(name)
end
def day_name(x), do: {:error, "Invalid day num: #{x}"}
@doc """
Get the short name of the day corresponding to the provided number
"""
@spec day_shortname(weekday) :: binary
@weekdays |> Enum.each fn {name, day_num} ->
def day_shortname(unquote(day_num)), do: String.slice(unquote(name), 0..2)
end
def day_shortname(x), do: {:error, "Invalid day num: #{x}"}
@doc """
Get the number of the month corresponding to the given name.
## Examples
month_to_num("January") => 1
month_to_num("Jan") => 1
month_to_num("january") => 1
month_to_num("jan") => 1
month_to_num(:january) => 1
"""
@spec month_to_num(binary) :: integer
@months |> Enum.each fn {month_name, month_num} ->
lower = month_name |> String.downcase
abbr_cased = month_name |> String.slice(0..2)
abbr_lower = lower |> String.slice(0..2)
symbol = abbr_lower |> String.to_atom
full_chars = month_name |> String.to_char_list
abbr_chars = abbr_cased |> String.to_char_list
month_quoted = quote do
def month_to_num(unquote(month_name)), do: unquote(month_num)
def month_to_num(unquote(lower)), do: unquote(month_num)
def month_to_num(unquote(abbr_cased)), do: unquote(month_num)
def month_to_num(unquote(abbr_lower)), do: unquote(month_num)
def month_to_num(unquote(symbol)), do: unquote(month_num)
def month_to_num(unquote(full_chars)), do: unquote(month_num)
def month_to_num(unquote(abbr_chars)), do: unquote(month_num)
end
Module.eval_quoted __MODULE__, month_quoted, [], __ENV__
end
# Make an attempt at cleaning up the provided string
def month_to_num(x), do: {:error, "Invalid month name: #{x}"}
@doc """
Get the name of the month corresponding to the provided number
"""
@spec month_name(month) :: binary
@months |> Enum.each fn {name, month_num} ->
def month_name(unquote(month_num)), do: unquote(name)
end
def month_name(x), do: {:error, "Invalid month num: #{x}"}
@doc """
Get the short name of the month corresponding to the provided number
"""
@spec month_shortname(month) :: binary
@months |> Enum.each fn {name, month_num} ->
def month_shortname(unquote(month_num)), do: String.slice(unquote(name), 0..2)
end
def month_shortname(x), do: {:error, "Invalid month num: #{x}"}
@doc """
Return a 3-tuple {year, week number, weekday} for the given date.
## Examples
Date.epoch |> Date.iso_triplet #=> {1970, 1, 4}
"""
@spec iso_triplet(DateTime.t) :: {year, weeknum, weekday}
def iso_triplet(%DateTime{} = datetime) do
{ iso_year, iso_week } = iso_week(datetime)
{ iso_year, iso_week, weekday(datetime) }
end
@doc """
Given an ISO triplet `{year, week number, weekday}`, convert it to a
DateTime struct.
## Examples
{2014, 5, 2} |> Date.from_iso_triplet #=> %DateTime{year: 2014, month: 2, day: 2}
"""
@spec from_iso_triplet(iso_triplet) :: DateTime.t
def from_iso_triplet({year, _, _} = triplet) do
DateTime.new
|> set([year: year, month: 1, day: 1])
|> do_from_iso_triplet(triplet)
end
defp do_from_iso_triplet(date, {_, week, weekday}) do
{year, _, first_weekday} = date |> set([month: 1, day: 4]) |> iso_triplet
weekday_offset = first_weekday + 3
ordinal = ((week * 7) + weekday) - weekday_offset
cond do
ordinal <= 0 -> do_from_iso_triplet(%{date | :year => year - 1}, {year, 53, weekday})
true -> date |> shift(days: ordinal)
end
end
@doc """
Return the number of days in the month which the date falls on.
## Examples
Date.epoch |> Date.days_in_month #=> 31
"""
@spec days_in_month(DateTime.t | {year, month}) :: num_of_days
def days_in_month(%DateTime{:year => year, :month => month}) do
:calendar.last_day_of_the_month(year, month)
end
def days_in_month(year, month) do
:calendar.last_day_of_the_month(year, month)
end
@doc """
Return a boolean indicating whether the given year is a leap year. You may
pase a date or a year number.
## Examples
Date.epoch |> Date.is_leap? #=> false
Date.is_leap?(2012) #=> true
"""
@spec is_leap?(DateTime.t | year) :: boolean
def is_leap?(year) when is_integer(year), do: :calendar.is_leap_year(year)
def is_leap?(%DateTime{:year => year}), do: is_leap?(year)
@doc """
Return a boolean indicating whether the given date is valid.
## Examples
Date.from({1,1,1}, {1,1,1}) |> Date.is_valid? #=> true
Date.from({12,13,14}) |> Date.is_valid? #=> false
Date.from({12,12,12, {-1,59,59}}) |> Date.is_valid? #=> false
{{12,12,12},{1,1,1}, Date.timezone()} |> Date.is_valid? #=> true
"""
@spec is_valid?(dtz | DateTime.t) :: boolean
def is_valid?({date, time, tz}) do
:calendar.valid_date(date) and is_valid_time?(time) and is_valid_tz?(tz)
end
def is_valid?(%DateTime{:year => y, :month => m, :day => d, :hour => h, :minute => min, :second => sec, :timezone => tz}) do
:calendar.valid_date({y,m,d}) and is_valid_time?({h,min,sec}) and is_valid_tz?(tz)
end
defp is_valid_time?({hour,min,sec}) do
hour >= 0 and hour < 24 and min >= 0 and min < 60 and sec >= 0 and sec < 60
end
defp is_valid_tz?(%TimezoneInfo{} = tz) when tz == %TimezoneInfo{}, do: false
defp is_valid_tz?(%TimezoneInfo{}), do: true
defp is_valid_tz?(_), do: false
@doc """
Produce a valid date from a possibly invalid one.
All date's components will be clamped to the minimum or maximum valid value.
## Examples
{{1,13,44}, {-8,60,61}}
|> Date.normalize
|> Date.local #=> DateTime[month: 12, day: 31, hour: 0, minute: 59, second: 59, ...]
"""
@spec normalize(dtz) :: DateTime.t
@spec normalize(atom(), term) :: DateTime.t
def normalize({date, time}), do: normalize({date, time, timezone(:utc)})
def normalize({date, time, tz}) do
construct(normalize(:date, date), normalize(:time, time), tz)
end
defp normalize(:date, {year, month, day}) do
year = normalize(:year, year)
month = normalize(:month, month)
day = normalize(:day, {year, month, day})
{year, month, day}
end
defp normalize(:year, year) when year < 0, do: 0
defp normalize(:year, year), do: year
defp normalize(:month, month) do
cond do
month < 1 -> 1
month > 12 -> 12
true -> month
end
end
defp normalize(:time, {hour,min,sec}) do
hour = normalize(:hour, hour)
min = normalize(:minute, min)
sec = normalize(:second, sec)
{hour, min, sec}
end
defp normalize(:hour, hour) do
cond do
hour < 0 -> 0
hour > 23 -> 23
true -> hour
end
end
defp normalize(:minute, min) do
cond do
min < 0 -> 0
min > 59 -> 59
true -> min
end
end
defp normalize(:second, sec) do
cond do
sec < 0 -> 0
sec > 59 -> 59
true -> sec
end
end
defp normalize(:ms, ms) do
cond do
ms < 0 -> 0
ms > 999 -> 999
true -> ms
end
end
defp normalize(:timezone, tz), do: tz
defp normalize(:day, {year, month, day}) do
year = normalize(:year, year)
month = normalize(:month, month)
ndays = days_in_month(year, month)
cond do
day < 1 -> 1
day > ndays -> ndays
true -> day
end
end
@doc """
Return a new date with the specified fields replaced by new values.
Values are automatically validated and clamped to good values by default. If
you wish to skip validation, perhaps for performance reasons, pass `validate: false`.
Values are applied in order, so if you pass `[datetime: dt, date: d]`, the date value
from `date` will override `datetime`'s date value.
## Examples
Date.now |> Date.set(date: {1,1,1}) #=> DateTime[year: 1, month: 1, day: 1, ...]
Date.now |> Date.set(hour: 0) #=> DateTime[hour: 0, ...]
Date.now |> Date.set([date: {1,1,1}, hour: 30]) #=> DateTime[year: 1, month: 1, day: 1, hour: 23, ...]
Date.now |> Date.set([
datetime: {{1,1,1}, {0,0,0}}, date: {2,2,2}
]) #=> DateTime[year: 2, month: 2, day: 2, ...]
Date.now |> Date.set([minute: 74, validate: false]) #=> DateTime[minute: 74, ...]
"""
@spec set(DateTime.t, list({atom(), term})) :: DateTime.t
def set(date, options) do
validate? = options |> List.keyfind(:validate, 0, true)
Enum.reduce options, date, fn option, result ->
case option do
{:validate, _} -> result
{:datetime, {{y, m, d}, {h, min, sec}}} ->
if validate? do
%{result |
:year => normalize(y, :year),
:month => normalize(m, :month),
:day => normalize(d, :day),
:hour => normalize(h, :hour),
:minute => normalize(min, :minute),
:second => normalize(sec, :second)
}
else
%{result | :year => y, :month => m, :day => d, :hour => h, :minute => min, :second => sec}
end
{:date, {y, m, d}} ->
if validate? do
%{result | :year => normalize(:year, y), :month => normalize(:month, m), :day => normalize(:day, {y, m, d})}
else
%{result | :year => y, :month => m, :day => d}
end
{:time, {h, m, s}} ->
if validate? do
%{result | :hour => normalize(:hour, h), :minute => normalize(:minute, m), :second => normalize(:second, s)}
else
%{result | :hour => h, :minute => m, :second => s}
end
{:day, d} ->
if validate? do
%{result | :day => normalize(:day, {result.year, result.month, d})}
else
%{result | :day => d}
end
{:timezone, tz} ->
case tz do
%TimezoneInfo{} ->
%{result | :timezone => tz}
_ ->
%{result | :timezone => Timezone.get(tz)}
end
{name, val} when name in [:year, :month, :hour, :minute, :second, :ms] ->
if validate? do
Map.put(result, name, normalize(name, val))
else
Map.put(result, name, val)
end
{option_name, _} -> raise "Invalid option passed to Date.set: #{option_name}"
end
end
end
@doc """
Compare two dates returning one of the following values:
* `-1` -- `this` comes after `other`
* `0` -- Both arguments represent the same date when coalesced to the same timezone.
* `1` -- `this` comes before `other`
"""
@spec compare(DateTime.t, DateTime.t | :epoch | :zero | :distant_past | :distant_future) :: -1 | 0 | 1
@spec compare(DateTime.t, DateTime.t, :years | :months | :weeks | :days | :hours | :mins | :secs | :timestamp) :: -1 | 0 | 1
def compare(date, :epoch), do: compare(date, epoch())
def compare(date, :zero), do: compare(date, zero())
def compare(_, :distant_past), do: +1
def compare(_, :distant_future), do: -1
def compare(date, date), do: 0
def compare(a, b), do: compare(a, b, :secs)
def compare(%DateTime{:timezone => thistz} = this, %DateTime{:timezone => othertz} = other, granularity)
when granularity in [:years, :months, :weeks, :days, :hours, :mins, :secs, :timestamp] do
localized = if thistz !== othertz do
# Convert `other` to `this`'s timezone
Timezone.convert(other, thistz)
else
other
end
difference = diff(this, localized, granularity)
cond do
difference < 0 -> +1
difference == 0 -> 0
difference > 0 -> -1
end
end
def compare(_, _, _), do: {:error, "Invalid comparison granularity."}
@doc """
Determine if two dates represent the same point in time
"""
@spec equal?(DateTime.t, DateTime.t) :: boolean
def equal?(this, other), do: compare(this, other) == 0
@doc """
Calculate time interval between two dates. If the second date comes after the
first one in time, return value will be positive; and negative otherwise.
"""
@spec diff(DateTime.t, DateTime.t, :timestamp) :: timestamp
@spec diff(DateTime.t, DateTime.t, :secs | :days | :weeks | :months | :years) :: integer
def diff(this, other, :timestamp) do
diff(this, other, :secs) |> Time.from(:secs)
end
def diff(this, other, :secs) do
to_secs(other, :zero) - to_secs(this, :zero)
end
def diff(this, other, :mins) do
(to_secs(other, :zero) - to_secs(this, :zero)) |> div(60)
end
def diff(this, other, :hours) do
(to_secs(other, :zero) - to_secs(this, :zero)) |> div(60) |> div(60)
end
def diff(this, other, :days) do
to_days(other, :zero) - to_days(this, :zero)
end
def diff(this, other, :weeks) do
# TODO: think of a more accurate method
diff(this, other, :days) |> div(7)
end
def diff(this, other, :months) do
%DateTime{:year => y1, :month => m1} = universal(this)
%DateTime{:year => y2, :month => m2} = universal(other)
((y2 - y1) * 12) + (m2 - m1)
end
def diff(this, other, :years) do
%DateTime{:year => y1} = universal(this)
%DateTime{:year => y2} = universal(other)
y2 - y1
end
@doc """
Add time to a date using a timestamp, i.e. {megasecs, secs, microsecs}
Same as shift(date, Time.to_timestamp(5, :mins), :timestamp).
"""
@spec add(DateTime.t, timestamp) :: DateTime.t
def add(date, {mega, sec, _}) do
shift(date, [secs: (mega * @million) + sec])
end
@doc """
Subtract time from a date using a timestamp, i.e. {megasecs, secs, microsecs}
Same as shift(date, Time.to_timestamp(5, :mins) |> Time.invert, :timestamp).
"""
@spec subtract(DateTime.t, timestamp) :: DateTime.t
def subtract(date, {mega, sec, _}) do
shift(date, [secs: (-mega * @million) - sec])
end
@doc """
A single function for adjusting the date using various units: timestamp,
seconds, minutes, hours, days, weeks, months, years.
When shifting by timestamps, microseconds are ignored.
If the list contains `:month` and at least one other unit, an ArgumentError
is raised (due to ambiguity of such shifts). You can still shift by months
separately.
If `:year` is present, it is applied in the last turn.
The returned date is always valid. If after adding months or years the day
exceeds maximum number of days in the resulting month, that month's last day
is used.
To prevent day skew, fix up the date after shifting. For example, if you want
to land on the last day of the next month, do the following:
shift(date, 1, :month) |> set(:month, 31)
Since `set/3` is capping values that are out of range, you will get the
correct last day for each month.
## Examples
date = from({{2013,3,5}, {23,23,23}})
local(shift(date, secs: 24*3600*365))
#=> {{2014,3,5}, {23,23,23}}
local(shift(date, secs: -24*3600*(365*2 + 1))) # +1 day for leap year 2012
#=> {{2011,3,5}, {23,23,23}}
local(shift(date, [secs: 13, day: -1, week: 2]))
#=> {{2013,3,18}, {23,23,36}}
"""
@spec shift(DateTime.t, list({atom(), term})) :: DateTime.t
def shift(date, [{_, 0}]), do: date
def shift(date, [timestamp: {0,0,0}]), do: date
def shift(date, [timestamp: timestamp]), do: add(date, timestamp)
def shift(%DateTime{:timezone => tz} = date, [{type, value}]) when type in [:secs, :mins, :hours] do
secs = to_secs(date)
secs = secs + case type do
:secs -> value
:mins -> value * 60
:hours -> value * 3600
end
shifted = from(secs, :secs)
%{shifted | :timezone => tz}
end
def shift(%DateTime{:hour => h, :minute => m, :second => s, :timezone => tz} = date, [days: value]) do
days = to_days(date)
days = days + value
shifted = from(days, :days) |> set([time: {h, m, s}])
%{shifted | :timezone => tz}
end
def shift(date, [weeks: value]) do
date |> shift([days: value * 7])
end
def shift(date, [months: value]) do
%DateTime{
:year => year, :month => month, :day => day,
:hour => h, :minute => m, :second => s,
:timezone => tz
} = date
month = month + value
# Calculate a valid year value
year = cond do
month == 0 -> year - 1
month < 0 -> year + div(month, 12) - 1
month > 12 -> year + div(month - 1, 12)
true -> year
end
validate({year, round_month(month), day}) |> construct({h, m, s}, tz)
end
def shift(date, [years: value]) do
%DateTime{
:year => year, :month => month, :day => day,
:hour => h, :minute => m, :second => s,
:timezone => tz
} = date
validate({year + value, month, day}) |> construct({h, m, s}, tz)
end
Record.defrecordp :shift_rec, secs: 0, days: 0, years: 0
# This clause will match lists with at least 2 values
def shift(date, spec) when is_list(spec) do
shift_rec(secs: sec, days: day, years: year)
= Enum.reduce spec, shift_rec(), fn
({:timestamp, {mega, tsec, _}}, shift_rec(secs: sec) = rec) ->
shift_rec(rec, [secs: sec + mega * @million + tsec])
({:secs, tsec}, shift_rec(secs: sec) = rec) ->
shift_rec(rec, [secs: sec + tsec])
({:mins, min}, shift_rec(secs: sec) = rec) ->
shift_rec(rec, [secs: sec + min * 60])
({:hours, hrs}, shift_rec(secs: sec) = rec) ->
shift_rec(rec, [secs: sec + hrs * 3600])
({:days, days}, shift_rec(days: day) = rec) ->
shift_rec(rec, [days: day + days])
({:weeks, weeks}, shift_rec(days: day) = rec) ->
shift_rec(rec, [days: day + weeks * 7])
({:years, years}, shift_rec(years: year) = rec) ->
shift_rec(rec, [years: year + years])
({:months, _}, _) ->
raise ArgumentError, message: ":months not supported in bulk shifts"
end
# The order in which we apply secs and days is not important.
# The year shift must always go last though.
date |> shift([secs: sec]) |> shift([days: day]) |> shift([years: year])
end
# Primary constructor for DateTime objects
defp construct({_,_,_} = date, {_,_,_} = time, nil), do: construct(date, time, timezone(:utc))
defp construct({y, m, d}, {h, min, sec}, %TimezoneInfo{} = tz) do
%DateTime{
year: y, month: m, day: d,
hour: h, minute: min, second: sec,
timezone: tz
}
end
defp construct({y, m, d}, {h, min, sec}, {_, name}) do
%DateTime{
year: y, month: m, day: d,
hour: h, minute: min, second: sec,
timezone: Timezone.get(name)
}
end
def construct({date, time}, tz), do: construct(date, time, tz)
defp validate({year, month, day}) do
# Check if we got past the last day of the month
max_day = days_in_month(year, month)
if day > max_day do
day = max_day
end
{year, month, day}
end
defp mod(a, b) do
rem(rem(a, b) + b, b)
end
defp round_month(m) do
case mod(m, 12) do
0 -> 12
other -> other
end
end
end
|
lib/date/date.ex
| 0.91467
| 0.683102
|
date.ex
|
starcoder
|
defmodule SnapFramework.Parser.Outlet do
require Logger
@moduledoc false
def run(ast, assigns) do
ast
|> parse(assigns)
end
# -----------------------------------------------
# render the list of slot component passed to the
# outlet component if it matches the slot_name
# -----------------------------------------------
def parse({:outlet, meta, [slot_name, opts]}, assigns) do
# graph_val = Macro.var(:graph_val, SnapFramework.Engine)
slot = assigns[:state][:data][:slots][slot_name] || nil
case slot do
{nil, _, _} ->
quote do
nil
end
{cmp, data, nil} ->
quote line: meta[:line] || 0 do
# unquote(graph_val) =
[
type: :component,
module: unquote(cmp),
data: unquote(data),
opts: unquote(opts)
]
end
{cmp, data, cmp_opts} ->
quote line: meta[:line] || 0 do
# unquote(graph_val) =
[
type: :component,
module: unquote(cmp),
data: unquote(data),
opts: unquote(cmp_opts)
]
end
_ ->
quote do
nil
end
end
end
# -----------------------------------------------
# render the slot component for unnamed outlet
# used typically to render a list of components
# -----------------------------------------------
def parse({:outlet, _meta, [opts]}, assigns) do
# graph_val = Macro.var(:graph_val, SnapFramework.Engine)
slots = assigns[:state][:data][:slots] || nil
Enum.reduce(slots, [], fn {:slot, slot}, acc ->
case slot do
{nil, _, _} ->
quote do
unquote(acc)
end
{cmp, data, nil} ->
quote do
# var!(cmp) = cmp
# unquote(cmp)(unquote(acc), unquote(data), unquote(opts))
List.insert_at(
unquote(acc),
length(unquote(acc)),
type: :component,
module: unquote(cmp),
data: unquote(data),
opts: unquote(opts)
)
end
{cmp, data, cmp_opts} ->
quote do
# var!(cmp) = cmp
# unquote(cmp)(unquote(acc), unquote(data), Vector2.add(unquote(opts), unquote(cmp_opts)))
List.insert_at(
unquote(acc),
length(unquote(acc)),
type: :component,
module: unquote(cmp),
data: unquote(data),
opts: unquote(cmp_opts)
)
end
_ ->
quote do
unquote(acc)
end
end
end)
end
def parse(ast, _assigns), do: ast
end
|
lib/engine/parser/outlet.ex
| 0.584983
| 0.453201
|
outlet.ex
|
starcoder
|
defmodule Circuits.SPI do
@moduledoc """
This module enables Elixir programs to interact with hardware that's connected
via a SPI bus.
"""
alias Circuits.SPI.Nif
@typedoc """
SPI bus options. See `open/2`.
"""
@type spi_option ::
{:mode, 0..3}
| {:bits_per_word, 0..16}
| {:speed_hz, pos_integer()}
| {:delay_us, non_neg_integer()}
@typedoc """
SPI bus
Call `open/2` to obtain an SPI bus reference.
"""
@type spi_bus() :: reference()
@doc """
Open SPI channel
On success, returns a reference.
Use reference in subsequent calls to transfer SPI bus data
Parameters:
* `bus_name` is the name of the bus (e.g., "spidev0.0")
* `opts` is a keyword list to configure the bus
SPI bus options include:
* `mode`: This specifies the clock polarity and phase to use. (0)
* `bits_per_word`: bits per word on the bus (8)
* `speed_hz`: bus speed (1000000)
* `delay_us`: delay between transaction (10)
"""
@spec open(binary() | charlist(), [spi_option()]) :: {:ok, spi_bus()}
def open(bus_name, opts \\ []) do
mode = Keyword.get(opts, :mode, 0)
bits_per_word = Keyword.get(opts, :bits_per_word, 8)
speed_hz = Keyword.get(opts, :speed_hz, 1_000_000)
delay_us = Keyword.get(opts, :delay_us, 10)
Nif.open(to_charlist(bus_name), mode, bits_per_word, speed_hz, delay_us)
end
@doc """
Perform a SPI transfer. The `data` should be a binary containing the bytes to
send. Since SPI transfers simultaneously send and receive, the return value
will be a binary of the same length or an error.
"""
@spec transfer(spi_bus(), binary()) :: {:ok, binary()} | {:error, term()}
def transfer(spi_bus, data) do
Nif.transfer(spi_bus, data)
end
@doc """
Release any resources associated with the given file descriptor
"""
@spec close(spi_bus()) :: :ok
def close(spi_bus) do
Nif.close(spi_bus)
end
@doc """
Return a list of available SPI bus names. If nothing is returned,
it's possible that the kernel driver for that SPI bus is not enabled or the
kernel's device tree is not configured. On Raspbian, run `raspi-config` and
look in the advanced options.
```
iex> Circuits.SPI.bus_names
["spidev0.0", "spidev0.1"]
```
"""
@spec bus_names() :: [binary()]
def bus_names() do
Path.wildcard("/dev/spidev*")
|> Enum.map(fn p -> String.replace_prefix(p, "/dev/", "") end)
end
@doc """
Return info about the low level SPI interface
This may be helpful when debugging SPI issues.
"""
@spec info() :: map()
defdelegate info(), to: Nif
@doc """
Return the maximum transfer size in bytes
The number of bytes that can be sent and received at a time
may be capped by the low level SPI interface. For example,
the Linux `spidev` driver allocates its transfer buffer at
initialization based on the `bufsiz` parameter and rejects
requests that won't fit.
If you're sending large amounts of data over SPI, use this
function to determine how to split up large messages.
"""
@spec max_transfer_size() :: non_neg_integer()
defdelegate max_transfer_size(), to: Nif
defmodule :circuits_spi do
@moduledoc """
Provide an Erlang friendly interface to Circuits
Example Erlang code: circuits_spi:open("spidev0.1")
"""
defdelegate open(bus_name), to: Circuits.SPI
defdelegate open(bus_name, spi_opts), to: Circuits.SPI
defdelegate transfer(ref, data), to: Circuits.SPI
defdelegate close(ref), to: Circuits.SPI
end
end
|
lib/spi.ex
| 0.889496
| 0.696733
|
spi.ex
|
starcoder
|
defmodule Absinthe.Type.Enum.Value do
@moduledoc """
A possible value for an enum.
See `Absinthe.Type.Enum` and `Absinthe.Schema.Notation.value/1`.
"""
alias Absinthe.Type
@typedoc """
A defined enum value entry.
Generally defined using `Absinthe.Schema.Notation.value/2` as
part of a schema.
* `:name` - The name of the value. This is also the incoming, external
value that will be provided by query documents.
* `:description` - A nice description for introspection.
* `:value` - The raw, internal value that `:name` map to. This will be
provided as the argument value to resolve functions.
to `resolve` functions
* `:deprecation` - Deprecation information for a value, usually
set-up using the `Absinthe.Schema.Notation.deprecate/2` convenience
function.
"""
@type t :: %{name: binary, description: binary, value: any, deprecation: Type.Deprecation.t | nil, __reference__: Type.Reference.t}
defstruct name: nil, description: nil, value: nil, deprecation: nil, __reference__: nil
@spec build(Keyword.t) :: %{atom => Absinthe.Type.Enum.Value.t}
def build(raw_values) when is_list(raw_values) do
ast = for {identifier, value_attrs} <- normalize(raw_values) do
value_data = value_data(identifier, value_attrs)
value_ast = quote do: %Absinthe.Type.Enum.Value{unquote_splicing(value_data)}
{identifier, value_ast}
end
quote do: %{unquote_splicing(ast)}
end
def build(raw_values, key) when is_list(raw_values) do
ast = for {identifier, value_attrs} <- normalize(raw_values) do
value_data = value_data(identifier, value_attrs)
value_ast = quote do: %Absinthe.Type.Enum.Value{unquote_splicing(value_data)}
{value_data[key], value_ast}
end
quote do: %{unquote_splicing(ast)}
end
defp value_data(identifier, value_attrs) do
default_name = identifier
|> Atom.to_string
|> String.upcase
value_attrs
|> Keyword.put_new(:value, identifier)
|> Keyword.put_new(:name, default_name )
|> Type.Deprecation.from_attribute
end
# Normalize shorthand lists of atoms to the keyword list that `values` expects
@spec normalize([atom] | [{atom, Keyword.t}]) :: [{atom, Keyword.t}]
defp normalize(raw) do
if Keyword.keyword?(raw) do
raw
else
raw |> Enum.map(&({&1, []}))
end
end
end
|
lib/absinthe/type/enum/value.ex
| 0.860984
| 0.561245
|
value.ex
|
starcoder
|
defmodule Excommerce.Accounts.Message do
@moduledoc """
A module for sending messages, by email or phone, to the user.
This module provides functions to be used with the Phauxth authentication
library when confirming users or handling password resets. It uses
Bamboo, with the LocalAdapter, which is a good development tool.
For tests, it uses a test adapter, which is configured in the
config/test.exs file.
For production, you will need to setup a different email adapter.
## Bamboo with a different adapter
Bamboo has adapters for Mailgun, Mailjet, Mandrill, Sendgrid, SMTP,
SparkPost, PostageApp, Postmark and Sendcloud.
There is also a LocalAdapter, which is great for local development.
See [Bamboo](https://github.com/thoughtbot/bamboo) for more information.
## Other email / phone library
If you do not want to use Bamboo, follow the instructions below:
1. Edit this file, using the email / phone library of your choice
2. Remove the lib/shop/mailer.ex file
3. Remove the Bamboo entries in the config/config.exs and config/test.exs files
4. Remove bamboo from the deps section in the mix.exs file
"""
import Bamboo.Email
alias Excommerce.Mailer
@doc """
An email with a confirmation link in it.
"""
def confirm_request(address, key) do
prep_mail(address)
|> subject("Confirm your account")
|> text_body("Confirm your email here http://www.example.com/confirm?key=#{key}")
|> Mailer.deliver_now()
end
@doc """
An email with a link to reset the password.
"""
def reset_request(address, nil) do
prep_mail(address)
|> subject("Reset your password")
|> text_body(
"You requested a password reset, but no user is associated with the email you provided."
)
|> Mailer.deliver_now()
end
def reset_request(address, key) do
prep_mail(address)
|> subject("Reset your password")
|> text_body(
"Reset your password at http://www.example.com/password_resets/edit?key=#{key}"
)
|> Mailer.deliver_now()
end
@doc """
An email acknowledging that the account has been successfully confirmed.
"""
def confirm_success(address) do
prep_mail(address)
|> subject("Confirmed account")
|> text_body("Your account has been confirmed.")
|> Mailer.deliver_now()
end
@doc """
An email acknowledging that the password has been successfully reset.
"""
def reset_success(address) do
prep_mail(address)
|> subject("Password reset")
|> text_body("Your password has been reset.")
|> Mailer.deliver_now()
end
defp prep_mail(address) do
new_email()
|> to(address)
|> from("<EMAIL>")
end
end
|
lib/excommerce/accounts/message.ex
| 0.656218
| 0.475605
|
message.ex
|
starcoder
|
defmodule SenML do
@moduledoc """
Lightweight implementation of RFC 8428 Sensor Measurement Lists (SenML)
SenML Labels
+---------------+-------+------------+------------+------------+
| Name | Label | CBOR Label | JSON Type | XML Type |
+---------------+-------+------------+------------+------------+
| Base Name | bn | -2 | String | string |
| Base Time | bt | -3 | Number | double |
| Base Unit | bu | -4 | String | string |
| Base Value | bv | -5 | Number | double |
| Base Sum | bs | -6 | Number | double |
| Base Version | bver | -1 | Number | int |
| Name | n | 0 | String | string |
| Unit | u | 1 | String | string |
| Value | v | 2 | Number | double |
| String Value | vs | 3 | String | string |
| Boolean Value | vb | 4 | Boolean | boolean |
| Data Value | vd | 8 | String (*) | string (*) |
| Sum | s | 5 | Number | double |
| Time | t | 6 | Number | double |
| Update Time | ut | 7 | Number | double |
+---------------+-------+------------+------------+------------+
"""
defstruct [
:base_name,
:base_time,
:base_unit,
:base_value,
:base_sum,
:base_version,
:name,
:unit,
:value,
:string_value,
:boolean_value,
:data_value,
:sum,
:time,
:update_time
]
@spec decode(binary()) :: list(%SenML{})
def decode(data) do
data
|> Jason.decode!()
|> Enum.map(
&%SenML{
base_name: &1["bn"],
base_time: &1["bt"],
base_unit: &1["bu"],
base_value: &1["bv"],
base_sum: &1["bs"],
base_version: &1["bver"],
name: &1["n"],
unit: &1["u"],
value: &1["v"],
string_value: &1["vs"],
boolean_value: &1["vb"],
data_value: &1["vd"],
sum: &1["s"],
time: &1["t"],
update_time: &1["ut"]
}
)
end
@spec encode(list(%SenML{})) :: binary()
def encode(list) do
list
|> Enum.map(&encode_senml(&1))
|> Jason.encode!()
end
@spec normalize(list(%SenML{})) :: list(%SenML{})
def normalize(list) do
SenML.Normalizer.normalize(list)
end
defp encode_senml(%SenML{} = senml) do
%{}
|> map_maybe_put("bn", senml.base_name)
|> map_maybe_put("bt", senml.base_time)
|> map_maybe_put("bu", senml.base_unit)
|> map_maybe_put("bv", senml.base_value)
|> map_maybe_put("bs", senml.base_sum)
|> map_maybe_put("bver", senml.base_version)
|> map_maybe_put("n", senml.name)
|> map_maybe_put("u", senml.unit)
|> map_maybe_put("v", senml.value)
|> map_maybe_put("vs", senml.string_value)
|> map_maybe_put("vb", senml.boolean_value)
|> map_maybe_put("vd", senml.data_value)
|> map_maybe_put("s", senml.sum)
|> map_maybe_put("t", senml.time)
|> map_maybe_put("ut", senml.update_time)
end
defp map_maybe_put(map, _key, nil), do: map
defp map_maybe_put(map, key, value), do: Map.put(map, key, value)
end
|
lib/senml.ex
| 0.733547
| 0.558026
|
senml.ex
|
starcoder
|
defmodule Dynamo.Template do
@moduledoc """
The template struct is responsible for keeping information about
templates to be rendered. It contains:
* `:key` - The key used to find the template;
* `:identifier` - An unique identifier for the template, like its
filesystem path. This information may be used later by the finder
to retrieve the template source
* `:format` - The template format
* `:finder` - The finder that found the template
* `:handler` - The handler responsible for compiling the template
* `:updated_at` - The last time the template was updated
* `:extra` - Used by the finder to put extra information about the template
Besides, the following fields are private to Dynamo:
* `:ref` - A reference for already compiled templates
"""
defstruct [key: nil, identifier: nil, format: nil,
handler: nil, updated_at: nil, extra: nil, ref: nil, finder: nil]
end
defexception Dynamo.TemplateNotFound, query: nil, paths: nil do
def message(exception) do
"Could not find template #{inspect exception.query} in any of the paths: #{inspect exception.paths}"
end
end
defmodule Dynamo.Templates do
@moduledoc false
@doc """
Finds the given template in any of the templates paths.
"""
def find(query, _tmpl_paths) when is_record(query, Template) do
query
end
def find(query, tmpl_paths) do
query = normalize_query(query)
Enum.find_value(tmpl_paths, &Dynamo.Templates.Finder.find(&1, query))
end
defp normalize_query("/" <> query), do: query
defp normalize_query(query), do: query
@doc """
Finds the given template in any of the templates paths,
raises `Dynamo.TemplateNotfound` if a template cannot be found.
"""
def find!(query, tmpl_paths) do
find(query, tmpl_paths) ||
raise Dynamo.TemplateNotFound, query: query, paths: tmpl_paths
end
@doc """
Finds a layout in the layouts view path for the given
query and template.
"""
def find_layout(layout, template, tmpl_paths) do
(format = template.format) ||
raise ArgumentError, message: "cannot find layout #{layout} for template #{template.identifier} since it has no format"
find Path.join("layouts", layout) <> ".#{format}", tmpl_paths
end
@doc """
Renders the given template with the given assigns.
Expects the template renderer server as first argument.
"""
def render(renderer, template, locals, assigns, prelude) do
Dynamo.Templates.Renderer.render(renderer, template, locals, assigns, prelude)
end
@doc """
Compiles the given set of `templates` into a module
given by `name`. It returns the module binary,
"""
def compile_module(name, templates, locals, prelude) do
{ finders, _ } =
Enum.map_reduce templates, 0, fn(%Dynamo.Template{} = template, i) ->
template = %Dynamo.Template{template | ref: { name, :"dynamo_template_#{i}" }}
finder = quote do
def find(unquote(template.key)) do
unquote(Macro.escape(template))
end
end
{ finder, i + 1 }
end
{ templates, _ } =
Enum.map_reduce templates, 0, fn(%Dynamo.Template{} = template, i) ->
source = Dynamo.Templates.Finder.source(template.finder, template)
{ args, source } = template.handler.compile(template, source, locals)
template =
quote do
@file unquote(template.identifier)
def unquote(:"dynamo_template_#{i}")(unquote_splicing(args)) do
unquote(source)
end
end
{ template, i + 1 }
end
contents =
quote do
unquote(prelude.())
unquote(templates)
unquote(finders)
def find(_) do
nil
end
def all do
nil
end
def requires_precompilation? do
true
end
end
{ :module, _, binary, _ } = Module.create(name, contents, file: "(#{inspect name})")
binary
end
end
|
lib/dynamo/templates.ex
| 0.806243
| 0.580828
|
templates.ex
|
starcoder
|
defmodule SMPPEX.Pdu.ValidityPeriod do
@moduledoc ~S"""
Module for converting validity period to Unix timestamp.
Module works both with the absolute format of validity period and with the relative one.
In case of relative validity period, this module implements a naive representation of the month and year date shifting
for the sake of simplicity.
"""
@type validity_period :: String.t()
@type timestamp :: non_neg_integer
@type timestamp_origin :: non_neg_integer
@spec to_timestamp(validity_period, timestamp_origin) ::
{:ok, timestamp} | {:error, :invalid_validity_period}
@doc ~S"""
Converts `t:validity_period/0` to Unix timestamp according to the SMPP specification.
In case of the relative format, this function uses a naive implementation of the month and date shifting.
To be clear, it takes one month as 30 days and one year as 12 months.
One who uses the function should implement the way how this time shift might be limited
according to the SMPP specification:
* A MC operator may choose to impose a limit on
relative time offsets, thus either rejecting a message that exceeds such a limit or reducing the
offset to the maximum relative time allowed.
Returns `{:ok, timestamp}` if conversion was successful.
Returns `{:error, :invalid_validity_period}` if `validity_period` is not consistent with the SMPP specification.
In case of internal errors, however, this function raises an exception.
## Example (relative format)
iex> timestamp_origin = ~N[2017-01-01 00:00:00] |>
...> DateTime.from_naive!("Etc/UTC") |>
...> DateTime.to_unix()
iex> timestamp = SMPPEX.Pdu.ValidityPeriod.to_timestamp!("000000000005000R", timestamp_origin)
iex> DateTime.from_unix!(timestamp) |> to_string()
"2017-01-01 00:00:05Z"
## Example (absolute format)
iex> {:ok, timestamp} = SMPPEX.Pdu.ValidityPeriod.to_timestamp("170610233429004+")
iex> DateTime.from_unix!(timestamp) |> to_string()
"2017-06-10 22:34:29Z"
"""
def to_timestamp(validity_period, timestamp_origin \\ System.system_time(:second))
def to_timestamp(
<<y::binary-size(2), m::binary-size(2), d::binary-size(2), h::binary-size(2),
mn::binary-size(2), s::binary-size(2), _t::binary-size(1), _nn::binary-size(2), "R">>,
timestamp_origin
) do
timestamp =
timestamp_origin +
String.to_integer(s) +
String.to_integer(mn) * 60 +
String.to_integer(h) * 3600 +
String.to_integer(d) * 24 * 3600 +
String.to_integer(m) * 30 * 24 * 3600 +
String.to_integer(y) * 12 * 30 * 24 * 3600
{:ok, timestamp}
end
def to_timestamp(
<<y::binary-size(2), m::binary-size(2), d::binary-size(2), h::binary-size(2),
mn::binary-size(2), s::binary-size(2), _t::binary-size(1), nn::binary-size(2),
p::binary-size(1)>>,
_
) do
{:ok, datetime} =
NaiveDateTime.new(
2000 + String.to_integer(y),
String.to_integer(m),
String.to_integer(d),
String.to_integer(h),
String.to_integer(mn),
String.to_integer(s)
)
seconds =
datetime
|> DateTime.from_naive!("Etc/UTC")
|> DateTime.to_unix()
hour_diff = div(String.to_integer(nn), 4)
seconds_diff = hour_diff * 3600
timestamp =
case p do
"+" ->
seconds - seconds_diff
"-" ->
seconds + seconds_diff
end
{:ok, timestamp}
end
def to_timestamp(_, _) do
{:error, :invalid_validity_period}
end
@spec to_timestamp!(validity_period, timestamp_origin) :: timestamp
@doc """
Converts `t:validity_period/0` to Unix timestamp according to the SMPP specification.
The same as `to_timestamp/2` but raises an exception in case of error.
"""
def to_timestamp!(validity_period, timestamp_origin \\ System.system_time(:second)) do
case to_timestamp(validity_period, timestamp_origin) do
{:ok, value} ->
value
{:error, :invalid_validity_period} ->
raise ArgumentError, message: "Invalid validity period"
end
end
end
|
lib/smppex/pdu/validity_period.ex
| 0.90736
| 0.7586
|
validity_period.ex
|
starcoder
|
defmodule ClosedIntervals.Tree do
@moduledoc """
Functions to manipulate a tree of closed intervals.
Library users will often use the `ClosedIntervals` struct,
which contains a tree together with matching order and equality comparison functions.
This module contains utilities for direct manipulations on the tree structure,
many of which are reexported in `ClosedIntervals`.
"""
require Record
@doc """
This is the internal tree representation. It is not intended to be used publicly.
"""
Record.defrecord(:tree, [
:left,
:right,
:left_bound,
:right_bound,
:cut
])
@type t(data) ::
record(:tree,
left: nil | t(data),
right: nil | t(data),
left_bound: data,
right_bound: data,
cut: nil | data
)
@type comparison :: :lt | :eq | :gt
@doc """
Construct a tree from a sorted list of data.
See `ClosedIntervals.from/2`.
"""
def construct([x, y]) do
tree(
left_bound: x,
right_bound: y
)
end
def construct(sorted_list = [_, _ | _]) do
len = length(sorted_list)
middle = floor(len / 2)
cut = Enum.at(sorted_list, middle)
{left, right} = Enum.split(sorted_list, middle)
left = left ++ [cut]
left = construct(left)
right = construct(right)
tree(
left: left,
right: right,
left_bound: tree(left, :left_bound),
right_bound: tree(right, :right_bound),
cut: cut
)
end
@doc """
Create a tree with two leaves from the left and right bounds.
"""
@spec from_bounds({data, data}) :: t(data) when data: var
def from_bounds({left, right}) do
tree(
left_bound: left,
right_bound: right
)
end
@doc """
See `ClosedIntervals.from_leaf_intervals/1`.
"""
def from_leaf_intervals([leaf]) do
leaf
end
def from_leaf_intervals([
left = tree(left_bound: left_bound, right_bound: cut),
right = tree(left_bound: cut, right_bound: right_bound)
]) do
tree(
left: left,
right: right,
left_bound: left_bound,
right_bound: right_bound,
cut: cut
)
end
def from_leaf_intervals(leafs) do
len = length(leafs)
middle = round(len / 2)
{left, right} = Enum.split(leafs, middle)
left_right_bound = left |> List.last() |> right_bound()
right_left_bound = right |> List.first() |> left_bound()
if left_right_bound != right_left_bound do
raise ArgumentError, "Expected cut element between the middle two elements"
end
cut = left_right_bound
left = from_leaf_intervals(left)
right = from_leaf_intervals(right)
tree(
left: left,
right: right,
left_bound: tree(left, :left_bound),
right_bound: tree(right, :right_bound),
cut: cut
)
end
@doc """
See `ClosedIntervals.leaf_intervals/1`.
"""
def leaf_intervals(tree(cut: nil, left_bound: left_bound, right_bound: right_bound)) do
[{left_bound, right_bound}]
end
def leaf_intervals(tree(left: left, right: right)) do
[leaf_intervals(left), leaf_intervals(right)]
|> List.flatten()
end
@doc """
See `ClosedIntervals.get_all_intervals/2`.
"""
def get_all_intervals(tree, value, eq, order) do
get_all_intervals_by(tree, &mk_compare(value, &1, eq, order))
end
defp mk_compare(data1, data2, eq, order) do
cond do
eq.(data1, data2) -> :eq
order.(data1, data2) -> :lt
true -> :gt
end
end
@doc """
Get all intervals to which the given navigation function return `:eq`.
The function `navigation/1` says whether a given position is less than, greater than,
or equal to the desired position.
"""
@spec get_all_intervals_by(t(data), (data -> comparison)) ::
[ClosedIntervals.interval(data)]
when data: var
def get_all_intervals_by(tree = tree(cut: nil), _navigation) do
[
{tree(tree, :left_bound), tree(tree, :right_bound)}
]
end
def get_all_intervals_by(tree = tree(), navigation) do
cut = tree(tree, :cut)
case navigation.(cut) do
:eq ->
[
get_all_intervals_by(tree(tree, :left), navigation),
get_all_intervals_by(tree(tree, :right), navigation)
]
:lt ->
get_all_intervals_by(tree(tree, :left), navigation)
:gt ->
get_all_intervals_by(tree(tree, :right), navigation)
end
end
@doc """
See `ClosedIntervals.to_list/1`.
"""
def to_list(tree = tree()) do
tree
|> leaf_intervals()
|> to_list1([])
|> Enum.reverse()
end
defp to_list1([{left, right}], acc) do
[right, left | acc]
end
defp to_list1([{left, _right} | rest], acc) do
to_list1(rest, [left | acc])
end
@doc """
See `ClosedIntervals.map/2`.
"""
@spec map(t(data1), (data1 -> data2)) :: t(data2) when data1: var, data2: var
def map(tree = tree(cut: nil), mapper) do
tree(left_bound: left_bound, right_bound: right_bound) = tree
tree(tree,
left_bound: mapper.(left_bound),
right_bound: mapper.(right_bound)
)
end
def map(tree = tree(), mapper) do
tree(
left: left,
right: right,
left_bound: left_bound,
right_bound: right_bound,
cut: cut
) = tree
tree(tree,
left: map(left, mapper),
right: map(right, mapper),
left_bound: mapper.(left_bound),
right_bound: mapper.(right_bound),
cut: mapper.(cut)
)
end
@doc """
See `ClosedIntervals.left_bound/1`.
"""
def left_bound(tree = tree()) do
tree(tree, :left_bound)
end
@doc """
See `ClosedIntervals.right_bound/1`.
"""
def right_bound(tree = tree()) do
tree(tree, :right_bound)
end
end
|
lib/closed_intervals/tree.ex
| 0.901821
| 0.838018
|
tree.ex
|
starcoder
|
defmodule RulEx.Guards do
@moduledoc "Provide helper guards for use with RulEx."
@value_operands RulEx.Operands.value()
@variable_operands RulEx.Operands.variable()
@comparison_operands RulEx.Operands.comparison()
@reserved_operands RulEx.Operands.reserved()
@doc "Yield true if given expression is a valid `val` expression, false otherwise."
defguard is_val(expr)
when is_list(expr) and
length(expr) == 3 and
hd(expr) in @value_operands and
expr |> tl |> hd |> is_binary()
@doc "Yield true if given expression is a valid `var` expression, false otherwise."
defguard is_var(expr)
when is_list(expr) and
length(expr) in [3, 4] and
hd(expr) in @variable_operands and
expr |> tl |> hd |> is_binary()
@doc "Yield true if given expression is a valid `val` or `var` expression, false otherwise."
defguard is_val_or_var(expr) when is_val(expr) or is_var(expr)
@doc "Yield true if given operand is a valid comparison operand, false otherwise."
defguard is_comparison_operand(op) when op in @comparison_operands
@doc "Yield true if given expression is a valid comparison expression, false otherwise."
defguard is_comparison(expr)
when is_list(expr) and
length(expr) == 3 and
expr |> hd |> is_comparison_operand() and
expr |> tl |> hd |> is_val_or_var() and
expr |> tl |> tl |> hd |> is_val_or_var()
@doc "Yield true if given operand is reserved by RulEx, false otherwise."
defguard is_reserved_operand(op)
when op in @reserved_operands
@doc "Yield true if give operand is a reserved operand or a string standing for any custom operands, false otherwise."
defguard is_valid_operand(op) when is_reserved_operand(op) or is_binary(op)
@doc "Yield true if given Elixir values is truthy, i.e. not `nil` or `false`, false otherwise."
defguard is_truthy(value) when value not in [false, nil]
@doc "Yield true if given Elixir values is falsy, i.e. `nil` or `false`, false otherwise."
defguard is_falsy(value) when not is_truthy(value)
end
|
lib/guards.ex
| 0.796094
| 0.531209
|
guards.ex
|
starcoder
|
defmodule Oban.Cron.Expression do
@moduledoc false
@type t :: %__MODULE__{
minutes: MapSet.t(),
hours: MapSet.t(),
days: MapSet.t(),
months: MapSet.t(),
weekdays: MapSet.t()
}
defstruct [:minutes, :hours, :days, :months, :weekdays]
@dow_map %{
"SUN" => "0",
"MON" => "1",
"TUE" => "2",
"WED" => "3",
"THU" => "4",
"FRI" => "5",
"SAT" => "6"
}
@mon_map %{
"JAN" => "1",
"FEB" => "2",
"MAR" => "3",
"APR" => "4",
"MAY" => "5",
"JUN" => "6",
"JUL" => "7",
"AUG" => "8",
"SEP" => "9",
"OCT" => "10",
"NOV" => "11",
"DEC" => "12"
}
@doc """
Evaluate whether a cron struct overlaps with the current date time.
"""
@spec now?(cron :: t(), datetime :: DateTime.t()) :: boolean()
def now?(%__MODULE__{} = cron, datetime \\ DateTime.utc_now()) do
cron
|> Map.from_struct()
|> Enum.all?(&included?(&1, datetime))
end
defp included?({_, :*}, _datetime), do: true
defp included?({:minutes, set}, datetime), do: MapSet.member?(set, datetime.minute)
defp included?({:hours, set}, datetime), do: MapSet.member?(set, datetime.hour)
defp included?({:days, set}, datetime), do: MapSet.member?(set, datetime.day)
defp included?({:months, set}, datetime), do: MapSet.member?(set, datetime.month)
defp included?({:weekdays, set}, datetime), do: MapSet.member?(set, day_of_week(datetime))
defp day_of_week(datetime) do
datetime
|> Date.day_of_week()
|> Integer.mod(7)
end
@doc """
Parses a crontab expression into a %Cron{} struct.
The parser can handle common expressions that use minutes, hours, days, months and weekdays,
along with ranges and steps. It also supports common extensions, also called nicknames.
Raises an `ArgumentError` if the expression cannot be parsed.
## Nicknames
- @yearly: Run once a year, "0 0 1 1 *".
- @annually: same as @yearly
- @monthly: Run once a month, "0 0 1 * *".
- @weekly: Run once a week, "0 0 * * 0".
- @daily: Run once a day, "0 0 * * *".
- @midnight: same as @daily
- @hourly: Run once an hour, "0 * * * *".
- @reboot: Run once at boot
## Examples
iex> parse!("@hourly")
%Cron{}
iex> parse!("0 * * * *")
%Cron{}
iex> parse!("60 * * * *")
** (ArgumentError)
"""
@spec parse!(input :: binary()) :: t()
def parse!("@annually"), do: parse!("0 0 1 1 *")
def parse!("@yearly"), do: parse!("0 0 1 1 *")
def parse!("@monthly"), do: parse!("0 0 1 * *")
def parse!("@weekly"), do: parse!("0 0 * * 0")
def parse!("@midnight"), do: parse!("0 0 * * *")
def parse!("@daily"), do: parse!("0 0 * * *")
def parse!("@hourly"), do: parse!("0 * * * *")
def parse!("@reboot") do
now = DateTime.utc_now()
[now.minute, now.hour, now.day, now.month, day_of_week(now)]
|> Enum.join(" ")
|> parse!()
end
def parse!(input) when is_binary(input) do
[mip, hrp, dap, mop, wdp] =
input
|> String.trim()
|> String.split(~r/\s+/, parts: 5)
%__MODULE__{
minutes: parse_field(mip, 0..59),
hours: parse_field(hrp, 0..23),
days: parse_field(dap, 1..31),
months: mop |> trans_field(@mon_map) |> parse_field(1..12),
weekdays: wdp |> trans_field(@dow_map) |> parse_field(0..6)
}
end
defp parse_field(field, range) do
range_set = MapSet.new(range)
parsed =
field
|> String.split(~r/\s*,\s*/)
|> Enum.flat_map(&parse_part(&1, range))
|> MapSet.new()
unless MapSet.subset?(parsed, range_set) do
raise ArgumentError, "expression field #{field} is out of range #{inspect(range)}"
end
parsed
end
defp trans_field(field, map) do
Enum.reduce(map, field, fn {val, rep}, acc -> String.replace(acc, val, rep) end)
end
defp parse_part(part, range) do
cond do
part == "*" -> range
part =~ ~r/^\d+$/ -> parse_literal(part)
part =~ ~r/^\*\/[1-9]\d?$/ -> parse_step(part, range)
part =~ ~r/^\d+(\-\d+)?\/[1-9]\d?$/ -> parse_range_step(part, range)
part =~ ~r/^\d+\-\d+$/ -> parse_range(part, range)
true -> raise ArgumentError, "unrecognized cron expression: #{part}"
end
end
defp parse_literal(part) do
part
|> String.to_integer()
|> List.wrap()
end
defp parse_step(part, range) do
step =
part
|> String.replace_leading("*/", "")
|> String.to_integer()
Enum.take_every(range, step)
end
defp parse_range_step(part, max_range) do
[range, step] = String.split(part, "/")
parse_step(step, parse_range(range, max_range))
end
defp parse_range(part, max_range) do
case String.split(part, "-") do
[rall] ->
String.to_integer(rall)..Enum.max(max_range)
[rmin, rmax] ->
String.to_integer(rmin)..String.to_integer(rmax)
end
end
end
|
lib/oban/cron/expression.ex
| 0.834542
| 0.421611
|
expression.ex
|
starcoder
|
defmodule Saxy.SimpleForm do
@moduledoc ~S"""
Provides functions to parse a XML document to
[simple-form](http://erlang.org/doc/man/xmerl.html#export_simple-3) data structure.
## Data structure
Simple form is a basic representation of the parsed XML document. It contains a root
element, and all elements are in the following format:
```
element = {tag_name, attributes, content}
content = (element | binary)*
```
See "Types" section for more information.
"""
@doc """
Parse given string into simple form.
## Options
* `:expand_entity` - specifies how external entity references should be handled. Three supported strategies respectively are:
* `:keep` - keep the original binary, for example `Orange ®` will be expanded to `"Orange ®"`, this is the default strategy.
* `:skip` - skip the original binary, for example `Orange ®` will be expanded to `"Orange "`.
* `{mod, fun, args}` - take the applied result of the specified MFA.
## Examples
Given this XML document.
iex> xml = \"\"\"
...> <?xml version="1.0" encoding="utf-8" ?>
...> <menu>
...> <movie url="https://www.imdb.com/title/tt0120338/" id="tt0120338">
...> <name>Titanic</name>
...> <characters>Jack & Rose</characters>
...> </movie>
...> <movie url="https://www.imdb.com/title/tt0109830/" id="tt0109830">
...> <name>Forest Gump</name>
...> <characters>Forest & Jenny</characters>
...> </movie>
...> </menu>
...> \"\"\"
iex> Saxy.SimpleForm.parse_string(xml)
{:ok,
{"menu", [],
[
"\\n ",
{
"movie",
[
{"url", "https://www.imdb.com/title/tt0120338/"},
{"id", "tt0120338"}
],
[
"\\n ",
{"name", [], ["Titanic"]},
"\\n ",
{"characters", [], ["Jack & Rose"]},
"\\n "]
},
"\\n ",
{
"movie",
[
{"url", "https://www.imdb.com/title/tt0109830/"},
{"id", "tt0109830"}
],
[
"\\n ",
{"name", [], ["Forest Gump"]},
"\\n ",
{"characters", [], ["Forest & Jenny"]},
"\\n "
]
},
"\\n"
]}}
"""
@type tag_name() :: String.t()
@type attributes() :: [{name :: String.t(), value :: String.t()}]
@type content() :: [String.t() | Saxy.SimpleForm.t()]
@type t() :: {tag_name(), attributes(), content()}
@spec parse_string(data :: binary, options :: Keyword.t()) ::
{:ok, Saxy.SimpleForm.t()} | {:error, exception :: Saxy.ParseError.t()}
def parse_string(data, options \\ []) when is_binary(data) do
case Saxy.parse_string(data, __MODULE__.Handler, {[], options}, options) do
{:ok, {[document], _options}} ->
{:ok, document}
{:error, _reason} = error ->
error
end
end
end
|
lib/saxy/simple_form.ex
| 0.893988
| 0.847842
|
simple_form.ex
|
starcoder
|
defmodule Domo.TypeEnsurerFactory.Generator.MatchFunRegistry.Tuples do
@moduledoc false
alias Domo.TypeEnsurerFactory.Precondition
alias Domo.TypeEnsurerFactory.Generator.TypeSpec
def tuple_spec?(type_spec_precond) do
{type_spec, _precond} = TypeSpec.split_spec_precond(type_spec_precond)
case type_spec do
{:{}, _, [_element_spec]} -> true
tuple when tuple_size(tuple) == 2 -> true
{:{}, _, [_ | _]} -> true
_ -> false
end
end
def map_value_type(type_spec_precond, fun) do
{type_spec, precond} = TypeSpec.split_spec_precond(type_spec_precond)
{case type_spec do
{:{}, context, element_specs} -> {:{}, context, Enum.map(element_specs, &fun.(&1))}
{elem1, elem2} -> {fun.(elem1), fun.(elem2)}
end, precond}
end
def match_spec_function_quoted(type_spec_precond) do
{type_spec, precond} = TypeSpec.split_spec_precond(type_spec_precond)
element_spec_preconds =
case type_spec do
{:{}, _, element_spec_preconds} -> element_spec_preconds
{elem1, elem2} -> [elem1, elem2]
end
elem_vars_quoted = Enum.map(1..length(element_spec_preconds), &Macro.var(String.to_atom("el#{&1}"), __MODULE__))
with_expectations_quoted =
element_spec_preconds
|> Enum.reduce({[], [], []}, &append_match_spec_attributes_to_lists(&1, &2))
|> reverse_in_tuple()
|> Tuple.append(elem_vars_quoted)
|> Tuple.to_list()
|> Enum.zip()
|> Enum.with_index()
|> Enum.map(fn {{el_spec_atom, el_precond_atom, el_spec_string, var}, idx} ->
quote do
{unquote(idx), :ok} <-
{unquote(idx), do_match_spec({unquote(el_spec_atom), unquote(el_precond_atom)}, unquote(var), unquote(el_spec_string))}
end
end)
else_block_quoted =
quote do
{idx, {:error, element_value, messages}} ->
message = {
"The element at index %{idx} has value %{element_value} that is invalid.",
[idx: idx, element_value: inspect(element_value)]
}
{:error, value, [message | messages]}
end
type_spec_atom = TypeSpec.to_atom(type_spec)
precond_atom = if precond, do: Precondition.to_atom(precond)
spec_string_var = if precond, do: quote(do: spec_string), else: quote(do: _spec_string)
match_spec_quoted =
quote do
def do_match_spec({unquote(type_spec_atom), unquote(precond_atom)}, {unquote_splicing(elem_vars_quoted)} = value, unquote(spec_string_var)) do
# credo:disable-for-next-line
with unquote_splicing(with_expectations_quoted) do
unquote(Precondition.ok_or_precond_call_quoted(precond, quote(do: spec_string), quote(do: value)))
else
unquote(else_block_quoted)
end
end
end
{match_spec_quoted, element_spec_preconds}
end
defp append_match_spec_attributes_to_lists(spec_precond, {spec_atoms, precond_atoms, spec_strings}) do
{spec_atom, precond_atom, spec_string} = TypeSpec.match_spec_attributes(spec_precond)
{
[spec_atom | spec_atoms],
[precond_atom | precond_atoms],
[spec_string | spec_strings]
}
end
defp reverse_in_tuple({list1, list2, list3}) do
{Enum.reverse(list1), Enum.reverse(list2), Enum.reverse(list3)}
end
end
|
lib/domo/type_ensurer_factory/generator/match_fun_registry/tuples.ex
| 0.565539
| 0.427785
|
tuples.ex
|
starcoder
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
defmodule Adept.Svg do
require Logger
@moduledoc """
A tiny and fast library to compile and render inline SVGs for Phoenix templates and live views.
SVG files are images that are formatted as very simple, and usually small, text
files. It is faster, and recommended, that you directly include the svg data
in-line with your web pages instead of asking the browser to make additional
calls to servers before it can render your pages. This makes your pages load faster.
`adept_svg` renders your svg files as quickly as possible. To do this, it reads
the svg files at compile-time and provides runtime access through a term
stored in your beamfile.
If you use `nimble_publisher`, this should be a familiar concept.
To use `adept_svg`, you create a module in your project that wraps it, providing
a compile-time place to build the library and runtime access to it. It also happens
to make your template svg rendering code very simple.
You do __not__ need to store your svg files in the "assets/static" directory. Those files
are copied into your application via a file based mechanism, whereas `adept_svg` compiles
them in directly. I recommend simply using "assets/svg".
Each `*.svg` file must contain a single valid `<svg></svg>` tag set with data as appropriate.
Anything before the `<svg>` tag or after the `</svg>` is treated as comment and stripped
from the text during compilation.
## Example wrapper module
defmodule MyAppWeb.Svg do
# Build the library at compile time
@library Adept.Svg.compile( "assets/svg" )
# Accesses the library at run time
defp library(), do: @library
# Render an svg from the library
def render( key, opts \\ [] ) do
Adept.Svg.render( library(), key, opts )
end
end
To use the library, you would `alias MyAppWeb.Svg` in a controller, live_view or
your your main app module. This allows your template code to call Svg.render directly.
## Example use in a template
<%= Svg.render( "heroicons/user", class: "h-5 w-5 inline" ) %>
### Live reloading
If you are using Phoenix, you can enable live reloading by simply telling Phoenix to watch the svgs directory.
Open up "config/dev.exs", search for `live_reload:` and add this to the list of patterns:
```elixir
live_reload: [
patterns: [
...,
~r"assets/svg/*/.*(svg)$"
]
]
```
"""
defmodule Error do
@moduledoc false
defexception message: nil, svg: nil
end
# --------------------------------------------------------
@doc """
Compile a folder of `*.svg` files into a library you can render from.
The folder and it's subfolders will be traversed and all valid `*.svg` files will
be added to the library. Each svg will be added to the library with a key that is
relative path of the svg file, minus the .svg part. For example, if you compile
the folder "assets/svg" and it finds a file with the path "assets/svg/heroicons/calendar.svg",
then the key for that svg is `"heroicons/calendar"` in the library.
## Usage
The best way to use Adept.Svg is to create a new module in your project that wraps
it, providing storage for the generated library term. This also allows you to customize
naming, rendering or compiling as required.
## Example
defmodule MyAppWeb.Svg do
# Build the library at compile time
@library Adept.Svg.compile( "assets/svg" )
# Accesses the library at run time
defp library(), do: @library
# Render an svg from the library
def render( key, opts \\ [] ) do
Adept.Svg.render( library(), key, opts )
end
end
Note that @library is accessed through a function. The library could become large,
so you want to wrap it with a function to ensure that it is only stored as a term
in your beam file once.
"""
@spec compile(map(), String.t()) :: map()
def compile(%{} = library \\ %{}, svg_root) when is_bitstring(svg_root) do
svg_root
|> Kernel.<>("/**/*.svg")
|> Path.wildcard()
|> Enum.reduce(library, fn path, acc ->
with {:ok, key, svg} <- read_svg(path, svg_root),
:ok <- unique_key(library, key, path) do
Map.put(acc, key, svg <> "</svg>")
else
{:file_error, err, path} ->
raise %Error{message: "SVG file #{inspect(path)} is invalid, err: #{err}", svg: path}
{:duplicate, key, path} ->
Logger.warn("SVG file: #{path} overwrites existing svg: #{key}")
end
end)
end
defp read_svg(path, root) do
with {:ok, svg} <- File.read(path),
true <- String.valid?(svg),
[_, svg] <- String.split(svg, "<svg"),
[svg, _] <- String.split(svg, "</svg>") do
{
:ok,
# make the key
path
|> String.trim(root)
|> String.trim("/")
|> String.trim_trailing(".svg"),
svg
}
else
err -> {:file_error, err, path}
end
end
defp unique_key(library, key, path) do
case Map.fetch(library, key) do
{:ok, _} -> {:duplicate, key, path}
_ -> :ok
end
end
# --------------------------------------------------------
@doc """
Renders an svg into a safe string that can be inserted directly into a Phoenix template.
The named svg must be in the provided library, which should be build using the compile function.
_Optional_: pass in a keyword list of attributes to insert into the svg tag. This can be
used to add `class="something"` tag attributes, phoenix directives such as `phx-click`, or
even alpine directives such as `@click="some action"`. Note that key names containing
the underscore character `"_"` will be converted to the hyphen `"-"` character.
You don't normally call `Adept.Svg.render()` directly, except in your wrapper module. Instead,
you would `alias MyAppWeb.Svg` in a controller, live view or
your your main app module. This allows your template code to call Svg.render directly, which
is simple and looks nice.
The following examples all use an aliased `MyAppWeb.Svg`, which wraps `Adept.Svg`.
## Example use in a template
<%= Svg.render( "heroicons/menu" ) %>
<%= Svg.render( "heroicons/user", class: "h-5 w-5 inline" ) %>
## Other examples
Without attributes:
Svg.render( "heroicons/menu" )
{:safe, "<svg xmlns= ... </svg>"}
With options:
Svg.render( "heroicons/menu", class: "h-5 w-5" )
{:safe, "<svg class=\"h-5 w-5\" xmlns= ... </svg>"}
Svg.render( "heroicons/menu", phx_click: "action" )
{:safe, "<svg phx-click=\"action\" xmlns= ... </svg>"}
Svg.render( "heroicons/menu", "@click": "alpine_action" )
{:safe, "<svg @click=\"alpine_action\" xmlns= ... </svg>"}
"""
@spec render(map(), String.t(), list()) :: String.t()
def render(%{} = library, key, attrs \\ []) do
case Map.fetch(library, key) do
{:ok, svg} -> {:safe, "<svg" <> render_attrs(attrs) <> svg}
_ -> raise %Error{message: "SVG #{inspect(key)} not found", svg: key}
end
end
# --------------------------------------------------------
# transform an opts list into a string of tag options
defp render_attrs(attrs), do: do_render_attrs(attrs, "")
defp do_render_attrs([], acc), do: acc
defp do_render_attrs([{key, value} | tail], acc) do
key = to_string(key) |> String.replace("_", "-")
do_render_attrs(tail, "#{acc} #{key}=#{inspect(value)}")
end
end
|
lib/adept_svg.ex
| 0.914353
| 0.66884
|
adept_svg.ex
|
starcoder
|
defmodule RecurringEvents.Date do
@moduledoc """
Helper module responsible for common date manipulations.
"""
@time {0, 0, 0}
@week_days [
:monday,
:tuesday,
:wednesday,
:thursday,
:friday,
:saturday,
:sunday
]
@doc """
Shifts date by `:days`, `:weeks`, `:months` and `:years`
Optional param `opts` accepts the following options:
- `:return_invalid` - set to `true` to return the invalid dates as is,
`false` to return the last day of the month instead, default value is `false`
# Example
iex> RecurringEvents.Date.shift_date(~D[2011-02-04], 4, :days)
~D[2011-02-08]
iex> RecurringEvents.Date.shift_date(~D[2011-02-04], 2, :years)
~D[2013-02-04]
iex> RecurringEvents.Date.shift_date(~D[2011-10-31], 1, :months, return_invalid: true)
%Date{year: 2011, month: 11, day: 31}
"""
def shift_date(date, count, period, opts \\ [])
def shift_date(date, count, period, opts)
when period == :hours or period == :minutes or period == :seconds do
{
{new_year, new_month, new_day},
{new_hour, new_minute, new_second}
} = shift_time(date, count, period, opts)
%{
date
| year: new_year,
month: new_month,
day: new_day,
hour: new_hour,
minute: new_minute,
second: new_second
}
end
def shift_date(%{year: year, month: month, day: day} = date, count, period, opts) do
{new_year, new_month, new_day} = shift_date({year, month, day}, count, period, opts)
%{date | year: new_year, month: new_month, day: new_day}
end
def shift_date(date, 0, _, _), do: date
def shift_date({_year, _month, _day} = date, count, :days, _opts) do
date
|> :calendar.date_to_gregorian_days()
|> Kernel.+(count)
|> :calendar.gregorian_days_to_date()
end
def shift_date({_year, _month, _day} = date, count, :weeks, opts) do
shift_date(date, count * 7, :days, opts)
end
def shift_date({year, month, day}, count, :months, opts) do
months = year * 12 + (month - 1) + count
new_year = div(months, 12)
new_month = rem(months, 12) + 1
if opts[:return_invalid] do
{new_year, new_month, day}
else
last_day = :calendar.last_day_of_the_month(new_year, new_month)
new_day = min(day, last_day)
{new_year, new_month, new_day}
end
end
def shift_date({year, month, day}, count, :years, _opts) do
new_year = year + count
last_day = :calendar.last_day_of_the_month(new_year, month)
new_day = min(day, last_day)
{new_year, month, new_day}
end
# defp shift_time(%{hour: hour, minute: minute, second: second} = date, count, period, opts) do
# {new_hour, new_minute, new_second} = shift_date({hour, minute, second}, count, period)
# %{date | hour: new_hour, minute: new_minute, second: new_second}
# end
defp shift_time(
%{year: _, month: _, day: _, hour: _, minute: _, second: _} = date,
count,
period,
opts
) do
shift_time(to_erl_datetime(date), count, period, opts)
end
defp shift_time(datetime, 0, _, _), do: datetime
defp shift_time({date, {hour, minute, second}}, count, :hours, opts) do
days = div(hour + count, 24)
new_hour = rem(hour + count, 24)
{shift_date(date, days, :days, opts), {new_hour, minute, second}}
end
defp shift_time({date, {_, minute, second} = time}, count, :minutes, opts) do
hours = div(minute + count, 60)
new_minute = rem(minute + count, 60)
{new_date, {new_hour, _, _}} = shift_time({date, time}, hours, :hours, opts)
{new_date, {new_hour, new_minute, second}}
end
defp shift_time({date, {_, _, second} = time}, count, :seconds, opts) do
minutes = div(second + count, 60)
new_second = rem(second + count, 60)
{new_date, {new_hour, new_minute, _}} = shift_time({date, time}, minutes, :minutes, opts)
{new_date, {new_hour, new_minute, new_second}}
end
@doc """
Returns last daty of the month for provided date.
# Example
iex> RecurringEvents.Date.last_day_of_the_month(~D[2017-02-04])
28
"""
def last_day_of_the_month(%{year: year, month: month}) do
:calendar.last_day_of_the_month(year, month)
end
def last_day_of_the_month({year, month, _day}) do
:calendar.last_day_of_the_month(year, month)
end
@doc """
Returns week day of provided date
# Example
iex> RecurringEvents.Date.week_day(~D[2017-02-04])
:saturday
"""
def week_day(%{year: year, month: month, day: day}) do
week_day({year, month, day})
end
def week_day({_year, _month, _day} = date) do
@week_days |> Enum.at(:calendar.day_of_the_week(date) - 1)
end
@doc """
Returns numbered week day of provided date
# Example
iex> RecurringEvents.Date.numbered_week_day(~D[2017-02-04], :month)
{1, :saturday}
iex> RecurringEvents.Date.numbered_week_day(~D[2017-02-04], :year, :backward)
{-48, :saturday}
"""
def numbered_week_day(date, period \\ :month, order \\ :foreward)
def numbered_week_day(%{year: year, month: month, day: day}, period, order) do
numbered_week_day({year, month, day}, period, order)
end
def numbered_week_day({_year, _month, day} = date, :month, :foreward) do
day_of_the_week = week_day(date)
count = div(day - 1, 7) + 1
{count, day_of_the_week}
end
def numbered_week_day({_year, _month, day} = date, :month, :backward) do
day_of_the_week = week_day(date)
last_day = last_day_of_the_month(date)
count = div(last_day - day, 7) + 1
{-count, day_of_the_week}
end
def numbered_week_day({_year, _month, _day} = date, :year, :foreward) do
day_of_the_week = week_day(date)
count = div(day_of_the_year(date) - 1, 7) + 1
{count, day_of_the_week}
end
def numbered_week_day({year, _month, _day} = date, :year, :backward) do
day_of_the_week = week_day(date)
last_day = if(:calendar.is_leap_year(year), do: 366, else: 365)
count = div(last_day - day_of_the_year(date), 7) + 1
{-count, day_of_the_week}
end
@doc """
Returns week number of provided date
Minimum 4 days of week are required in the first week, `:week_start`
can be provided
# Example
iex> RecurringEvents.Date.week_number(~D[2017-01-05])
1
iex> RecurringEvents.Date.week_number(~D[2017-01-05], reversed: true)
-52
"""
def week_number(date, options \\ [])
def week_number(%{year: year, month: month, day: day}, options) do
week_number({year, month, day}, options)
end
def week_number({year, _month, _day} = date, reversed: false, week_start: week_start) do
year_start_day = week_day({year, 1, 1})
diff = week_day_diff(year_start_day, week_start)
shift_week = if(diff < 4, do: -1, else: 0)
max_weeks = weeks_count(year, week_start)
week_number = div(day_of_the_year(date) - 1 - diff + 7, 7) + 1 + shift_week
cond do
week_number == 0 -> weeks_count(year - 1, week_start)
max_weeks < week_number -> week_number - max_weeks
true -> week_number
end
end
def week_number({year, month, _day} = date, reversed: true, week_start: week_start) do
number = week_number(date, reversed: false, week_start: week_start)
cond do
month == 1 and number > 25 -> number - 1 - weeks_count(year - 1, week_start)
month == 12 and number < 25 -> number - 1 - weeks_count(year + 1, week_start)
true -> number - 1 - weeks_count(year, week_start)
end
end
def week_number(date, options) do
reversed = Keyword.get(options, :reversed, false)
week_start = Keyword.get(options, :week_start, :monday)
week_number(date, reversed: reversed, week_start: week_start)
end
defp weeks_count(year, week_start) do
year_start_day = week_day({year, 1, 1})
diff = week_day_diff(year_start_day, week_start)
has_53 = diff == 4 or (diff == 5 and :calendar.is_leap_year(year))
if(has_53, do: 53, else: 52)
end
defp week_day_diff(day1, day2) when is_atom(day1) and is_atom(day2) do
week_day_diff(
Enum.find_index(@week_days, &(day1 == &1)),
Enum.find_index(@week_days, &(day2 == &1))
)
end
defp week_day_diff(day1_no, day2_no) when day1_no < day2_no do
day2_no - day1_no
end
defp week_day_diff(day1_no, day2_no) when day1_no > day2_no do
day2_no - day1_no + 7
end
defp week_day_diff(day1_no, day2_no) when day1_no == day2_no, do: 0
@doc """
Returns year day of provided date
# Example
iex> RecurringEvents.Date.day_of_the_year(~D[2017-02-04])
35
"""
def day_of_the_year(%{year: year, month: month, day: day}) do
day_of_the_year({year, month, day})
end
def day_of_the_year({year, _month, _day} = date) do
{days, _} = :calendar.time_difference({{year, 1, 1}, @time}, {date, @time})
days + 1
end
@doc """
Shifts week days
# Example
iex> RecurringEvents.Date.shift_week_day(:monday, -3)
:friday
"""
def shift_week_day(day, shift) do
day_no =
@week_days
|> Enum.find_index(fn d -> d == day end)
|> Kernel.+(shift)
|> rem(7)
Enum.at(@week_days, day_no)
end
@doc """
Returns next day of the week
# Example
iex> RecurringEvents.Date.next_week_day(:friday)
:saturday
"""
def next_week_day(day) do
shift_week_day(day, 1)
end
@doc """
Returns previous day of the week
# Example
iex> RecurringEvents.Date.prev_week_day(:wednesday)
:tuesday
"""
def prev_week_day(day) do
shift_week_day(day, -1)
end
@doc """
Compares two dates or datetimes (it ignores tz)
# Example
iex> RecurringEvents.Date.compare(~D[2017-02-05], ~D[2017-02-01])
:gt
iex> RecurringEvents.Date.compare(~D[2017-02-01], ~D[2017-02-05])
:lt
iex> RecurringEvents.Date.compare(~D[2017-02-05], ~D[2017-02-05])
:eq
iex> RecurringEvents.Date.compare(~N[2017-02-05 12:00:00],
...> ~N[2017-02-05 18:21:11])
:lt
"""
def compare(
%{
year: y1,
month: m1,
day: d1,
hour: h1,
minute: i1,
second: s1
},
%{
year: y2,
month: m2,
day: d2,
hour: h2,
minute: i2,
second: s2
}
) do
case compare({y1, m1, d1}, {y2, m2, d2}) do
:eq -> compare({h1, i1, s1}, {h2, i2, s2})
ltgt -> ltgt
end
end
def compare(%{year: y1, month: m1, day: d1}, %{year: y2, month: m2, day: d2}) do
compare({y1, m1, d1}, {y2, m2, d2})
end
def compare({y1, m1, d1}, {y2, m2, d2}) do
cond do
y1 == y2 and m1 == m2 and d1 == d2 ->
:eq
y1 > y2 or (y1 == y2 and m1 > m2) or (y1 == y2 and m1 == m2 and d1 > d2) ->
:gt
true ->
:lt
end
end
defp to_erl_datetime(date) do
{to_erl_date(date), to_erl_time(date)}
end
defp to_erl_date(%{year: year, month: month, day: day}) do
{year, month, day}
end
defp to_erl_time(%{hour: hour, minute: minute, second: second}) do
{hour, minute, second}
end
end
|
lib/recurring_events/date.ex
| 0.884853
| 0.598723
|
date.ex
|
starcoder
|
defmodule GitGud.Maintainer do
@moduledoc """
Repository maintainer schema and helper functions.
A `GitGud.Maintainer` is used to grant `GitGud.Repo` permissions to a given `GitGud.User`.
Each repository maintainer also has a permission defining which actions he is able to perform
on the repository. Following permissions are available:
* `:read` -- can read and clone the repository.
* `:write` -- can read, clone and push to the repository.
* `:admin` -- can read, clone, push and administrate the repository.
By default, a newly created repository maintainer has `:read` permission.
"""
use Ecto.Schema
alias GitGud.DB
alias GitGud.User
alias GitGud.Repo
import Ecto.Query, only: [from: 2]
import Ecto.Changeset
schema "maintainers" do
belongs_to :user, User
belongs_to :repo, Repo
field :permission, :string, default: "read"
timestamps()
end
@type t :: %__MODULE__{
user_id: pos_integer,
user: User.t,
repo_id: pos_integer,
repo: Repo.t,
permission: binary,
inserted_at: NaiveDateTime.t,
updated_at: NaiveDateTime.t
}
@doc """
Updates the `permission` of the given `maintainer`.
```elixir
{:ok, maintainer} = GitGud.Maintainer.update_permission(maintainer, :write)
```
This function validates the given `permission` using `changeset/2`.
"""
@spec update_permission(t, binary) :: {:ok, t} | {:error, Ecto.Changeset.t}
def update_permission(%__MODULE__{} = maintainer, permission) do
DB.update(changeset(maintainer, %{permission: permission}))
end
@doc """
Updates the `permission` of the given `user` for the given `repo`.
```elixir
{:ok, maintainer} = GitGud.Maintainer.update_permission(repo, user, :write)
```
This function validates the given `permission` using `changeset/2`.
"""
@spec update_permission(Repo.t, User.t, binary) :: {:ok, t} | :error
def update_permission(%Repo{id: repo_id} = _repo, %User{id: user_id} = _user, permission) do
query = from(m in __MODULE__, where: m.repo_id == ^repo_id and m.user_id == ^user_id)
case DB.update_all(query, [set: [permission: permission]], returning: true) do
{1, [maintainer]} -> {:ok, maintainer}
{0, []} -> :error
end
end
@doc """
Similar to `update_permission/2`, but raises an `Ecto.InvalidChangesetError` if an error occurs.
"""
@spec update_permission!(t, binary) :: t
def update_permission!(%__MODULE__{} = maintainer, permission) do
DB.update!(changeset(maintainer, %{permission: permission}))
end
@doc """
Similar to `update_permission/3`, but raises an `Ecto.NoResultsError` if an error occurs.
"""
@spec update_permission!(Repo.t, User.t, binary) :: t
def update_permission!(%Repo{} = repo, %User{} = user, permission) do
case update_permission(repo, user, permission) do
{:ok, maintainer} -> maintainer
:error -> raise Ecto.NoResultsError
end
end
@doc """
Deletes the given `maintainer`.
```elixir
{:ok, maintainer} = GitGud.Maintainer.delete(maintainer)
```
"""
@spec delete(t) :: {:ok, t} | {:error, Ecto.Changeset.t}
def delete(%__MODULE__{} = maintainer) do
DB.delete(maintainer)
end
@doc """
Similar to `delete!/1`, but raises an `Ecto.InvalidChangesetError` if an error occurs.
"""
@spec delete(t) :: t
def delete!(%__MODULE__{} = maintainer) do
DB.delete!(maintainer)
end
@doc """
Returns a changeset for the given `params`.
"""
@spec changeset(t, map) :: Ecto.Changeset.t
def changeset(%__MODULE__{} = maintainer, params \\ %{}) do
maintainer
|> cast(params, [:user_id, :permission])
|> validate_required([:user_id])
|> unique_constraint(:user_id, name: "maintainers_user_id_repo_id_index")
|> validate_inclusion(:permission, ["read", "write", "admin"])
end
end
|
apps/gitgud/lib/gitgud/schemas/maintainer.ex
| 0.836688
| 0.792785
|
maintainer.ex
|
starcoder
|
defmodule Ox.LeftistHeap do
defstruct heap: nil, leq: nil
def new(leq) when is_function(leq, 2) do
%__MODULE__{leq: leq}
end
def new(list, leq) when is_list(list) do
list
|> Enum.reduce(new(leq), fn val, acc ->
merge(new(val, leq), acc)
end)
end
def new(el, leq) do
new(leq) |> insert(el)
end
def rank(%__MODULE__{heap: nil}), do: 0
def rank(%__MODULE__{heap: {r, _, _, _}}), do: r
def empty?(%__MODULE__{heap: nil}), do: true
def empty?(_), do: false
def insert(%__MODULE__{leq: leq} = heap, element) do
merge(heap, %__MODULE__{heap: {1, element, new(leq), new(leq)}, leq: leq})
end
def make(x, a, b) do
if rank(a) >= rank(b) do
%__MODULE__{heap: {rank(b) + 1, x, a, b}, leq: b.leq}
else
%__MODULE__{heap: {rank(a) + 1, x, b, a}, leq: a.leq}
end
end
def merge(heap1, %__MODULE__{heap: nil}), do: heap1
def merge(%__MODULE__{heap: nil}, heap2), do: heap2
def merge(
%__MODULE__{heap: {_, x, a1, b1}, leq: leq} = heap1,
%__MODULE__{heap: {_, y, a2, b2}} = heap2
) do
if leq.(x, y) do
make(x, a1, merge(b1, heap2))
else
make(y, a2, merge(heap1, b2))
end
end
def min(%__MODULE__{heap: nil}), do: nil
def min(%__MODULE__{heap: {_, x, _, _}}) do
x
end
def delete_min(%__MODULE__{heap: nil} = h), do: h
def delete_min(%__MODULE__{heap: {_, _, a, b}}) do
merge(a, b)
end
def count(%__MODULE__{heap: nil}), do: 0
def count(%__MODULE__{heap: _heap} = heap) do
do_count(heap, 0)
end
defp do_count(%__MODULE__{heap: nil}, count), do: count
defp do_count(heap, count) do
do_count(delete_min(heap), count + 1)
end
defimpl Enumerable do
def reduce(_heap, {:halt, acc}, _fun), do: {:halted, acc}
def reduce(heap, {:suspend, acc}, fun), do: {:suspended, acc, &reduce(heap, &1, fun)}
def reduce(heap, {:cont, acc}, fun) do
if Ox.LeftistHeap.empty?(heap) do
{:done, acc}
else
reduce(Ox.LeftistHeap.delete_min(heap), fun.(Ox.LeftistHeap.min(heap), acc), fun)
end
end
def count(_heap), do: {:error, __MODULE__}
def member?(_heap, _element), do: {:error, __MODULE__}
def slice(_heap), do: {:error, __MODULE__}
end
end
|
lib/ox/leftist_heap.ex
| 0.722331
| 0.497742
|
leftist_heap.ex
|
starcoder
|
defmodule ExcellentMigrations.Runner do
@moduledoc """
This module finds migration files in a project and detects potentially dangerous database
operations in them.
"""
alias ExcellentMigrations.{
DangersDetector,
FilesFinder
}
@type danger_type ::
:column_added_with_default
| :column_removed
| :column_renamed
| :column_type_changed
| :index_not_concurrently
| :many_columns_index
| :not_null_added
| :operation_delete
| :operation_insert
| :operation_update
| :raw_sql_executed
| :table_dropped
| :table_renamed
@type danger :: %{
type: danger_type,
path: String.t(),
line: integer
}
@doc """
Detects potentially dangerous database operations in database migration files.
## Options
* `:migrations_paths` - optional list of file paths to be checked.
## Scope of analysis
* If `migrations_paths` are specified, the analysis will be narrowed down to these files only.
* If not and application env `:excellent_migrations, :start_after` is set, only migrations with
timestamp older than the provided one will be chosen.
* If none of the above, all migration files in a project will be analyzed.
"""
@spec check_migrations(migrations_paths: [String.t()]) :: :safe | {:dangerous, [danger]}
def check_migrations(opts \\ []) do
opts
|> get_migrations_paths()
|> Task.async_stream(fn path ->
source_code = File.read!(path)
ast = Code.string_to_quoted!(source_code)
dangers = DangersDetector.detect_dangers(ast, source_code)
build_result(dangers, path)
end)
|> Stream.flat_map(fn {:ok, items} -> items end)
|> Enum.to_list()
|> close()
end
defp get_migrations_paths(opts) do
opts
|> Keyword.get_lazy(:migrations_paths, &FilesFinder.get_migrations_paths/0)
|> Enum.sort()
end
defp build_result(dangers, path) do
Enum.map(dangers, fn {type, line} ->
%{
type: type,
path: path,
line: line
}
end)
end
defp close([]), do: :safe
defp close(dangers), do: {:dangerous, dangers}
end
|
lib/runner.ex
| 0.833663
| 0.41052
|
runner.ex
|
starcoder
|
defmodule Turbo.Ecto.Hooks.Search do
@moduledoc """
This module provides a operations that can add searching functionality to
a pipeline of `Ecto` queries. This module works by taking fields.
Inspire from: [ex_sieve](https://github.com/valyukov/ex_sieve/blob/master/lib/ex_sieve/node/grouping.ex)
"""
import Turbo.Ecto.Utils, only: [done: 1]
alias Turbo.Ecto.Hooks.Search
alias Search.Condition
defstruct conditions: nil, combinator: nil, groupings: []
@type t :: %__MODULE__{}
@doc """
Returns the search object.
## Examples
iex> params = %{"q" => %{"name_or_category_name_like" => "elixir", "price_eq" => 1}, "s" => "updated_at+asc", "per_page" => 5, "page" => 1}
iex> Turbo.Ecto.Hooks.Search.run(Turbo.Ecto.Product, params)
{:ok,
%Turbo.Ecto.Hooks.Search{
combinator: :and,
conditions: [
%Turbo.Ecto.Hooks.Search.Condition{
attributes: [
%Turbo.Ecto.Hooks.Search.Attribute{name: :name, parent: :query},
%Turbo.Ecto.Hooks.Search.Attribute{name: :name, parent: :category}
],
combinator: :or,
search_type: :like,
values: ["elixir"]
},
%Turbo.Ecto.Hooks.Search.Condition{
attributes: [
%Turbo.Ecto.Hooks.Search.Attribute{name: :price, parent: :query}
],
combinator: :and,
search_type: :eq,
values: [1]
}
],
groupings: []
}
}
iex> params = %{"filter" => %{"name_like" => "elixir", "price_eq" => 1}}
iex> Turbo.Ecto.Hooks.Search.run(Turbo.Ecto.Product, params)
{:ok,
%Turbo.Ecto.Hooks.Search{
combinator: :and,
groupings: [],
conditions: [%Turbo.Ecto.Hooks.Search.Condition{search_type: :like, values: ["elixir"], attributes: [%Turbo.Ecto.Hooks.Search.Attribute{name: :name, parent: :query}], combinator: :and}, %Turbo.Ecto.Hooks.Search.Condition{attributes: [%Turbo.Ecto.Hooks.Search.Attribute{name: :price, parent: :query}], combinator: :and, search_type: :eq, values: [1]}]}
}
"""
@spec run(Ecto.Query.t(), map()) :: tuple()
def run(schema, params)
def run(schema, %{"q" => q}) do
q
|> extract(schema)
|> done()
end
def run(schema, %{"filter" => filter}) do
filter
|> extract(schema)
|> done()
end
def run(schema, _) do
%{}
|> extract(schema)
|> done()
end
defp extract(params, schema), do: do_extract(params, schema)
defp do_extract(params, schema, combinator \\ :and) do
case extract_conditions(params, schema) do
{:error, reason} -> {:error, reason}
conditions -> %Search{combinator: combinator, conditions: conditions}
end
end
defp extract_condition({key, value}, schema), do: Condition.extract(key, value, schema)
defp extract_conditions(params, schema) do
params
|> Enum.map(&extract_condition(&1, schema))
|> validate_conditions()
end
# validate_conditions
defp validate_conditions(conditions, acc \\ [])
defp validate_conditions([{:error, reason} | _tail], _acc), do: {:error, reason}
defp validate_conditions([attribute | tail], acc),
do: validate_conditions(tail, acc ++ [attribute])
defp validate_conditions([], acc), do: acc
end
|
lib/turbo_ecto/hooks/search.ex
| 0.870611
| 0.521776
|
search.ex
|
starcoder
|
defmodule Bacen.CCS.ACCS003 do
@moduledoc """
The ACCS003 message.
This message is a response from Bacen's system
about the validation of given ACCS001 message.
Also, this message reports all success and failures
from a ACCS001 message.
It has the following XML example:
```xml
<CCSArqValidcAtlzDiaria>
<Repet_ACCS003_Pessoa>
<Grupo_ACCS003_Pessoa>
<TpOpCCS>I</TpOpCCS>
<QualifdrOpCCS>N</QualifdrOpCCS>
<TpPessoa>F</TpPessoa>
<CNPJ_CPFPessoa>12345678901</CNPJ_CPFPessoa>
<DtIni>2002-01-01</DtIni>
<DtFim>2002-01-03</DtFim>
<ErroCCS>ECCS0023</ErroCCS>
</Grupo_ACCS003_Pessoa>
</Repet_ACCS003_Pessoa>
<QtdErro>1</QtdErro>
<QtdOpCCSActo>0</QtdOpCCSActo>
<DtHrBC>2004-06-16T05:04:00</DtHrBC>
<DtMovto>2004-10-10</DtMovto>
</CCSArqValidcAtlzDiaria>
```
"""
use Ecto.Schema
import Brcpfcnpj.Changeset
import Ecto.Changeset
@typedoc """
The ACCS003 message type
"""
@type t :: %__MODULE__{}
@daily_update_validation_fields ~w(error_quantity accepted_quantity reference_date movement_date)a
@daily_update_validation_opts [source: :CCSArqValidcAtlzDiaria, primary_key: false]
@daily_update_validation_fields_source_sequence ~w(Repet_ACCS003_Pessoa QtdErro QtdOpCCSActo DtHrBC DtMovto)a
@quantity_fields ~w(error_quantity accepted_quantity)a
@persons_fields ~w(cnpj)a
@persons_fields_source_sequence ~w(CNPJBasePart Grupo_ACCS003_Pessoa)a
@person_fields ~w(
operation_type operation_qualifier type
cpf_cnpj start_date end_date error
)a
@person_required_fields ~w(
operation_type operation_qualifier type
cpf_cnpj start_date
)a
@person_fields_source_sequence ~w(TpOpCCS QualifdrOpCCS TpPessoa CNPJ_CPFPessoa DtIni DtFim ErroCCS)a
@allowed_operation_types ~w(E A I)
@allowed_operation_qualifiers ~w(N P C L H E)
@allowed_person_types ~w(F J)
@primary_key false
embedded_schema do
embeds_one :daily_update_validation, DailyUpdateValidation, @daily_update_validation_opts do
embeds_one :persons, Persons, source: :Repet_ACCS003_Pessoa, primary_key: false do
embeds_many :person, Person, source: :Grupo_ACCS003_Pessoa, primary_key: false do
field :operation_type, :string, source: :TpOpCCS
field :operation_qualifier, :string, source: :QualifdrOpCCS
field :type, :string, source: :TpPessoa
field :cpf_cnpj, :string, source: :CNPJ_CPFPessoa
field :start_date, :date, source: :DtIni
field :end_date, :date, source: :DtFim
field :error, :string, source: :ErroCCS
end
field :cnpj, :string, source: :CNPJBasePart
end
field :error_quantity, :integer, source: :QtdErro
field :accepted_quantity, :integer, source: :QtdOpCCSActo
field :reference_date, :utc_datetime, source: :DtHrBC
field :movement_date, :date, source: :DtMovto
end
end
@doc """
Creates a new ACCS003 message from given attributes.
"""
@spec new(map()) :: {:ok, t()} | {:error, Ecto.Changeset.t()}
def new(attrs) when is_map(attrs) do
attrs
|> changeset()
|> apply_action(:insert)
end
@doc false
def changeset(accs001 \\ %__MODULE__{}, attrs) when is_map(attrs) do
accs001
|> cast(attrs, [])
|> cast_embed(:daily_update_validation,
with: &daily_update_validation_changeset/2,
required: true
)
end
@doc false
def daily_update_validation_changeset(daily_update_validation, attrs) when is_map(attrs) do
daily_update_validation
|> cast(attrs, @daily_update_validation_fields)
|> validate_required(@daily_update_validation_fields)
|> validate_number(:error_quantity, greater_than_or_equal_to: 0)
|> validate_number(:accepted_quantity, greater_than_or_equal_to: 0)
|> cast_embed(:persons, with: &persons_changeset/2)
|> validate_by_quantity()
|> validate_quantity_digit()
end
defp validate_by_quantity(changeset) do
Enum.reduce(@quantity_fields, changeset, fn field, acc ->
quantity = get_field(changeset, field, 0)
validate_cast_embed_by_quantity(acc, quantity)
end)
end
defp validate_cast_embed_by_quantity(changeset, quantity) do
if quantity > 0 do
cast_embed(changeset, :persons, with: &persons_changeset/2, required: true)
else
changeset
end
end
defp validate_quantity_digit(changeset) do
Enum.reduce(@quantity_fields, changeset, fn field, acc ->
quantity =
changeset
|> get_field(field, 0)
|> to_string()
check_quantity_digit(acc, field, quantity)
end)
end
defp check_quantity_digit(changeset, field, quantity) do
if String.length(quantity) > 9 do
add_error(changeset, field, "number should be minor than 9 digits")
else
changeset
end
end
@doc false
def persons_changeset(persons, attrs) when is_map(attrs) do
persons
|> cast(attrs, @persons_fields)
|> validate_required(@persons_fields)
|> validate_length(:cnpj, is: 8)
|> validate_format(:cnpj, ~r/[0-9]{8}/)
|> cast_embed(:person, with: &person_changeset/2, required: true)
end
@doc false
def person_changeset(person, attrs) when is_map(attrs) do
person
|> cast(attrs, @person_fields)
|> validate_required(@person_required_fields)
|> validate_inclusion(:operation_type, @allowed_operation_types)
|> validate_inclusion(:operation_qualifier, @allowed_operation_qualifiers)
|> validate_inclusion(:type, @allowed_person_types)
|> validate_length(:operation_type, is: 1)
|> validate_length(:operation_qualifier, is: 1)
|> validate_length(:type, is: 1)
|> validate_by_operation_type()
|> validate_by_type()
|> validate_error()
end
defp validate_by_operation_type(changeset) do
case get_field(changeset, :operation_type) do
"A" -> validate_required(changeset, [:end_date])
_ -> changeset
end
end
defp validate_by_type(changeset) do
case get_field(changeset, :type) do
"F" -> validate_cpf(changeset, :cpf_cnpj, message: "invalid CPF format")
"J" -> validate_cnpj(changeset, :cpf_cnpj, message: "invalid CNPJ format")
_ -> changeset
end
end
defp validate_error(changeset) do
case get_field(changeset, :error) do
nil ->
changeset
_ ->
changeset
|> validate_required([:error])
|> validate_length(:error, is: 8)
|> validate_format(:error, ~r/E[A-Z]{3}[0-9]{4}/)
end
end
@doc """
Returns the field sequence for given root xml element
## Examples
iex> Bacen.CCS.ACCS003.sequence(:CCSArqValidcAtlzDiaria)
[:Repet_ACCS003_Pessoa, :QtdErro, :QtdOpCCSActo, :DtHrBC, :DtMovto]
iex> Bacen.CCS.ACCS003.sequence(:Repet_ACCS003_Pessoa)
[:CNPJBasePart, :Grupo_ACCS003_Pessoa]
iex> Bacen.CCS.ACCS003.sequence(:Grupo_ACCS003_Pessoa)
[:TpOpCCS, :QualifdrOpCCS, :TpPessoa, :CNPJ_CPFPessoa, :DtIni, :DtFim, :ErroCCS]
"""
@spec sequence(:CCSArqValidcAtlzDiaria | :Repet_ACCS003_Pessoa | :Grupo_ACCS003_Pessoa) ::
list(atom())
def sequence(element)
def sequence(:CCSArqValidcAtlzDiaria), do: @daily_update_validation_fields_source_sequence
def sequence(:Repet_ACCS003_Pessoa), do: @persons_fields_source_sequence
def sequence(:Grupo_ACCS003_Pessoa), do: @person_fields_source_sequence
end
|
lib/bacen/ccs/accs003.ex
| 0.760028
| 0.6992
|
accs003.ex
|
starcoder
|
defmodule Exercises.Chapter4.Third do
@moduledoc """
We’ve created a function that sorts the items of a list in ascending order.
Now create a Sort.descending/1 function that sorts the elements in descending order.
"""
@doc """
Sorts a list in desired order. Defaults to ascending.
Example:
iex> alias Exercises.Chapter4.Third
Exercises.Chapter4.Third
iex> list = [6988, 1887, 7311, 2680, 1045, 1324, 6633, 9248, 2112, 4718]
[6988, 1887, 7311, 2680, 1045, 1324, 6633, 9248, 2112, 4718]
iex> Third.sort(list, :ascending)
[1045, 1324, 1887, 2112, 2680, 4718, 6633, 6988, 7311, 9248]
iex> Third.sort(list, :descending)
[9248, 7311, 6988, 6633, 4718, 2680, 2112, 1887, 1324, 1045]
"""
def sort(list, :ascending), do: sort(list, fn a, b -> a < b end)
def sort(list, :descending), do: sort(list, fn a, b -> a > b end)
def sort(list, comparator) do
len = size(list)
divide(list, len, comparator)
end
defp divide([], _, _), do: []
defp divide([_] = list, _, _), do: list
defp divide(list, len, comparator) do
half = div(len, 2)
{la, lb} = slice_at(list, half)
conquer(divide(la, half, comparator), divide(lb, len - half, comparator), comparator, [])
end
defp conquer([ha | ta] = la, [hb | tb] = lb, comparator, acc) do
if comparator.(ha, hb) do
conquer(ta, lb, comparator, [ha | acc])
else
conquer(la, tb, comparator, [hb | acc])
end
end
defp conquer([head | tail], [], cmp, acc), do: conquer(tail, [], cmp, [head | acc])
defp conquer([], [head | tail], cmp, acc), do: conquer(tail, [], cmp, [head | acc])
# that's "good" for memory, but terrible for cpu
# without tail call, it's nice for cpu, but bad for memory
defp conquer([], [], _, acc), do: acc |> reverse()
defp size(list), do: do_size(list, 0)
defp do_size([_ | tail], acc), do: do_size(tail, acc + 1)
defp do_size([], acc), do: acc
defp reverse(list), do: do_reverse(list, [])
defp do_reverse([head | tail], acc), do: do_reverse(tail, [head | acc])
defp do_reverse([], acc), do: acc
defp slice_at(list, index), do: do_slice_at(list, index, [])
defp do_slice_at([head | tail], index, acc) when index > 0 do
do_slice_at(tail, index - 1, [head | acc])
end
defp do_slice_at(remaining_list, _, acc), do: {acc |> reverse(), remaining_list}
end
|
lang/elixir/learning-fp-with-elixir/exercises/lib/chapter4/third.ex
| 0.739986
| 0.611933
|
third.ex
|
starcoder
|
defmodule LayoutOMatic.Layouts.Components.Layout do
@moduledoc """
Handles Auto-Layouts for Scenic Components.
Each Scenic component is a little different in how it's sized and positioned. While most components are positioned from it's top left most point, passing the next starting point is
a little different. Sizing for components are based on font metrics. By determining the dimensions of the font, width and height are calculated and applied to the component. The Layout-O-Matic
takes care of all of this for you. Width and height can also be passed as style arguments on a component in which case those dimensions will be used.
Auto-Layout, while a made up term, is used to describe that components will be automatically laid out by positioning components in equal rows and columns. Possibly in the future there may be other
types of layouts.
"""
alias Scenic.Graph
# alias LayoutOMatic.Layouts.Components.RadioGroup
alias LayoutOMatic.Layouts.Components.Button
alias LayoutOMatic.Layouts.Components.Checkbox
alias LayoutOMatic.Layouts.Components.Dropdown
alias LayoutOMatic.Layouts.Components.Slider
alias LayoutOMatic.Layouts.Components.TextField
alias LayoutOMatic.Layouts.Components.Toggle
import Scenic.Primitives
defmodule Layout do
defstruct component: %Scenic.Primitive{},
starting_xy: {},
max_xy: {},
grid_xy: {},
graph: %{},
padding: [{:total, 1}],
margin: [{:total, 1}],
position: :static,
float: :none,
align: :none
end
@spec auto_layout(graph :: Graph.t(), group_id :: :atom, list_of_comp_ids :: [:atom]) ::
Graph.t()
def auto_layout(graph, group_id, list_of_comp_ids) do
rect_id =
group_id
|> Atom.to_string()
|> String.split("_group")
|> hd()
|> String.to_atom()
[%{transforms: %{translate: grid_xy}}] = Graph.get(graph, group_id)
[%{data: max_xy}] = Graph.get(graph, rect_id)
graph =
Enum.reduce(list_of_comp_ids, [], fn c_id, acc ->
[%{data: {comp_type, _}} = component] = Graph.get(graph, c_id)
layout =
case acc do
[] ->
%Layout{
component: component,
starting_xy: grid_xy,
max_xy: max_xy,
grid_xy: grid_xy,
graph: graph
}
_ ->
acc
end
do_layout(comp_type, layout, c_id)
end)
|> Map.get(:graph)
{:ok, graph}
end
defp do_layout(Scenic.Component.Button, layout, c_id) do
case Button.translate(layout) do
{:ok, {x, y}, new_layout} ->
new_graph = Graph.modify(Map.get(new_layout, :graph), c_id, &update_opts(&1, t: {x, y}))
Map.put(new_layout, :graph, new_graph)
{:error, error} ->
{:error, error}
end
end
defp do_layout(Scenic.Component.Input.Checkbox, layout, c_id) do
case Checkbox.translate(layout) do
{:ok, {x, y}, new_layout} ->
new_graph = Graph.modify(Map.get(new_layout, :graph), c_id, &update_opts(&1, t: {x, y}))
Map.put(new_layout, :graph, new_graph)
{:error, error} ->
{:error, error}
end
end
defp do_layout(Scenic.Component.Input.Dropdown, layout, c_id) do
case Dropdown.translate(layout) do
{:ok, {x, y}, new_layout} ->
new_graph = Graph.modify(Map.get(new_layout, :graph), c_id, &update_opts(&1, t: {x, y}))
Map.put(new_layout, :graph, new_graph)
{:error, error} ->
{:error, error}
end
end
defp do_layout(Scenic.Component.Input.RadioGroup, _layout, _c_id) do
nil
# case RadioGroup.translate(layout) do
# {:ok, {x, y}, new_layout} ->
# new_graph = Graph.modify(Map.get(new_layout, :graph), c_id, &update_opts(&1, t: {x, y}))
# Map.put(new_layout, :graph, new_graph)
# {:error, error} ->
# {:error, error}
# end
end
defp do_layout(Scenic.Component.Input.Slider, layout, c_id) do
case Slider.translate(layout) do
{:ok, {x, y}, new_layout} ->
new_graph = Graph.modify(Map.get(new_layout, :graph), c_id, &update_opts(&1, t: {x, y}))
Map.put(new_layout, :graph, new_graph)
{:error, error} ->
{:error, error}
end
end
defp do_layout(Scenic.Component.Input.TextField, layout, c_id) do
case TextField.translate(layout) do
{:ok, {x, y}, new_layout} ->
new_graph = Graph.modify(Map.get(new_layout, :graph), c_id, &update_opts(&1, t: {x, y}))
Map.put(new_layout, :graph, new_graph)
{:error, error} ->
{:error, error}
end
end
defp do_layout(Scenic.Component.Input.Toggle, layout, c_id) do
case Toggle.translate(layout) do
{:ok, {x, y}, new_layout} ->
new_graph = Graph.modify(Map.get(new_layout, :graph), c_id, &update_opts(&1, t: {x, y}))
Map.put(new_layout, :graph, new_graph)
{:error, error} ->
{:error, error}
end
end
end
|
lib/layouts/components/autolayout.ex
| 0.819965
| 0.587825
|
autolayout.ex
|
starcoder
|
defmodule Membrane.RTMP.SourceBin do
@moduledoc """
Bin responsible for spawning new RTMP server.
It will receive RTMP stream from the client, parse it and demux it, outputting single audio and video which are ready for further processing with Membrane Elements.
At this moment only AAC and H264 codecs are support
"""
use Membrane.Bin
alias Membrane.{AAC, H264, RTMP}
def_output_pad :video,
caps: H264,
availability: :always,
mode: :pull,
demand_unit: :buffers
def_output_pad :audio,
caps: AAC,
availability: :always,
mode: :pull,
demand_unit: :buffers
def_options port: [
spec: 1..65_535,
description: "Port on which the server will listen"
],
local_ip: [
spec: binary(),
default: "127.0.0.1",
description:
"IP address on which the server will listen. This is useful if you have more than one network interface"
],
timeout: [
spec: Time.t() | :infinity,
default: :infinity,
description: """
Time during which the connection with the client must be established before handle_prepared_to_playing fails.
Duration given must be a multiply of one second or atom `:infinity`.
"""
]
@impl true
def handle_init(%__MODULE__{} = options) do
url = "rtmp://#{options.local_ip}:#{options.port}"
source = %RTMP.Source{url: url, timeout: options.timeout}
spec = %ParentSpec{
children: %{
src: source,
video_parser: %Membrane.H264.FFmpeg.Parser{
alignment: :au,
attach_nalus?: true,
skip_until_keyframe?: true
},
audio_parser: %Membrane.AAC.Parser{
in_encapsulation: :none,
out_encapsulation: :none
}
},
links: [
link(:src) |> via_out(:audio) |> to(:audio_parser) |> to_bin_output(:audio),
link(:src) |> via_out(:video) |> to(:video_parser) |> to_bin_output(:video)
]
}
{{:ok, spec: spec}, %{}}
end
end
|
lib/membrane_rtmp_plugin/rtmp/source/bin.ex
| 0.798854
| 0.426979
|
bin.ex
|
starcoder
|
defmodule PhoenixFormAwesomplete do
alias PhoenixFormAwesomplete.GenJS
alias Phoenix.HTML
alias Phoenix.HTML.Form
@moduledoc ~S"""
PhoenixFormAwesomplete is a [Phoenix form helper](https://hexdocs.pm/phoenix_html/Phoenix.HTML.Form.html)
that utilizes Lea Verou's autocomplete / autosuggest / typeahead /
inputsearch [Awesomplete widget](https://leaverou.github.io/awesomplete/index.html).
It comes with an AwesompleteUtil [javascript library](https://nico-amsterdam.github.io/awesomplete-util/index.html)
which adds the following features:
- Dynamic remote data loading; based on what is typed-in it performs an ajax lookup.
- Allow HTML markup in the shown items. Show value with description. Optionally search in the description text.
- Show when there is an exact match.
- Show when there isn't a match.
- When there is an exact match show related data (supplied in the remote data) in other parts of the page.
- Select the highlighted item with the tab-key.
## Example
iex> {:safe, [input, script]} = PhoenixFormAwesomplete.awesomplete(:user, :drinks,
...> ["data-list": "beer, gin, soda, sprite, water, vodga, whine, whisky"],
...> %{ minChars: 1 } )
iex> to_string input
"<input data-list=\"beer, gin, soda, sprite, water, vodga, whine, whisky\"" <>
" id=\"user_drinks\" name=\"user[drinks]\" type=\"text\">"
iex> script
"<script>AwesompleteUtil.start('#user_drinks', {}, {minChars: 1});</script>"
The first three parameters are passed on unchanged to the Phoenix form [text_input](https://hexdocs.pm/phoenix_html/Phoenix.HTML.Form.html#text_input/3) which generates the input tag.
`minChars` is an option for the Awesomplete object which is started with inline javascript.
Just adding the `multiple` option changes the generated javascript code completely, the PhoenixFormAwesomplete module
takes care of that.
Instead of an server side generated data-list it is possible to specify an url of a JSON web service and
let the client-code lookup the data list on-demand while typing.
Look at the [live examples](https://nico-amsterdam.github.io/awesomplete-util/phoenix.html) with code.
It is possible to use aliases for the javascript library references in the generated code
via the environment variables `util` and `awesomplete`.
The default names, `AwesompleteUtil` and `Awesomplete` respectively, are a bit long.
This can shorten the average page size.
For example use this javascript:
var AU = AwesompleteUtil, AW = Awesomplete;
and change the variables via the application config:
:phoenix_form_awesomplete, util: "AU"
:phoenix_form_awesomplete, awesomplete: "AW"
After changing the config/config.exs run:
mix deps.compile --force phoenix_form_awesomplete
"""
@doc ~S"""
Create script tag with the supplied script. No defer or async because this is used for inline script.
## Example
iex> PhoenixFormAwesomplete.script("alert(1);")
{:safe, "<script>alert(1);</script>"}
"""
def script(script) do
HTML.raw("<script>#{script}</script>")
end
@doc ~S"""
Create javascript that listens to `awesomplete-prepop` and `awesomplete-match` events,
and copies the `data_field` to the DOM element with the given target id.
The `target_id` can also be a javascript function.
## Example
iex> PhoenixFormAwesomplete.copy_to_id_js(:user, :color, "label", "#awe-color-result")
"AwesompleteUtil.startCopy('#user_color', 'label', '#awe-color-result');"
"""
def copy_to_id_js(source_form, source_field, data_field \\ nil, target_id)
when (is_nil(data_field) or is_binary(data_field)) and is_binary(target_id) do
source_id = "#" <> Form.input_id(source_form, source_field)
GenJS.copy_to_id_js(source_id, data_field, target_id)
end
@doc ~S"""
Create script tag with javascript that listens to `awesomplete-prepop` and `awesomplete-match` events,
and copies the `data_field` to the DOM element with the given target id.
The `target_id` can also be a javascript function. This function receives two parameters: event and dataField. The event detail property contains an array with the matching list item. The array is empty when there is no match.
## Example
iex> PhoenixFormAwesomplete.copy_to_id(:user, :color, "label", "#awe-color-result")
{:safe,
"<script>AwesompleteUtil.startCopy('#user_color', 'label', '#awe-color-result');</script>"}
"""
def copy_to_id(source_form, source_field, data_field \\ nil, target_id)
when (is_nil(data_field) or is_binary(data_field)) and is_binary(target_id) do
script(copy_to_id_js(source_form, source_field, data_field, target_id))
end
@doc ~S"""
Create script tag with javascript that listens to `awesomplete-prepop` and `awesomplete-match` events,
and copies the `data_field` to the target field.
## Example
iex> PhoenixFormAwesomplete.copy_to_field(:user, :color, "label", :door, :paint)
{:safe,
"<script>AwesompleteUtil.startCopy('#user_color', 'label', '#door_paint');</script>"}
"""
def copy_to_field(source_form, source_field, data_field \\ nil, target_form, target_field)
when is_nil(data_field) or is_binary(data_field) do
target_id = "#" <> Form.input_id(target_form, target_field)
script(copy_to_id_js(source_form, source_field, data_field, target_id))
end
@doc ~S"""
This method generates javascript code for using Awesomplete(Util).
## Example
iex> PhoenixFormAwesomplete.awesomplete_js(:user, :hobby, %{ minChars: 1 } )
"AwesompleteUtil.start('#user_hobby', {}, {minChars: 1});"
"""
def awesomplete_js(form, field, awesomplete_opts) do
element_id = Form.input_id(form, field)
GenJS.awesomplete_js(element_id, awesomplete_opts)
end
@doc ~S"""
This method generates an input tag and inline javascript code that starts Awesomplete.
Awesomplete options:
* `ajax` - Replace ajax function. Supplied function receives these parameters: (url, urlEnd, val, fn, xhr). fn is the callback function. Default: AwesompleteUtil.ajax.
* `assign` - Assign the Awesomplete object to a variable. true/false/name. If true the variable name will 'awe\_' + id of input tag. Default: false
* `autoFirst` - Automatically select the first element. Default: false.
* `combobox` - Id of the combobox button. true/false/id. If true the assumed button id is 'awe\_btn\_' + id of the input tag. Default: false
* `convertInput` - Convert input function. Internally convert input for comparison with the data list items. By default it trims the input and converts it to lowercase for a case-insensitive comparison.
* `convertResponse` - Convert JSON response from ajax calls. This function is called with the parsed JSON, and allows conversion of the data before further processing. Default: nil - no conversion.
* `data` - Data function as defined in [Awesomplete](http://leaverou.github.io/awesomplete/index.html#extensibility)
* `descr` - Name of the field in the data list (the JSON response) that contains the description text to show below the value in the suggestion list. Default: no description
* `descrSearch` - Filter must also search the input value in the description field. Default: false
* `value` - Name of the field in the data list (the JSON response) that contains the value.
* `filter` - Filter function as defined in [Awesomplete](http://leaverou.github.io/awesomplete/index.html#extensibility). Mostly Awesomplete.FILTER\_STARTSWITH or Awesomplete.FILTER\_CONTAINS. If label is different as value, filter on value with AweompleteUtil.filterStartsWith or AwesompleteUtil.filterContains.
* `item` - Item function as defined in [Awesomplete](http://leaverou.github.io/awesomplete/index.html#extensibility). Default is to highlight all occurrences of the input text. Use AwesompleteUtil.itemStartsWith if that matches with the used filter.
* `label` - Name of the field in the data list (the JSON response) that contains the text that should be shown instead of the value.
* `list` - Data list as defined in [Awesomplete](http://leaverou.github.io/awesomplete/index.html#extensibility).
* `loadall` - Data list contains all items. The input value will not be used in ajax calls. Default: false
* `limit` - number. If a limit is specified, and the number of items returned by the server is equal or more as this limit, the AwesompleteUtil code assumes that there are more results, so it will re-query if more characters are typed to get more refined results. The limit:1 tells that not more than 1 result is expected, so the json service doesn’t have to return an array. With limit:0 it will always re-query if more characters are typed. Default: no limit
* `maxItems` - Maximum number of suggestions to display. Default: 10
* `minChars` - Minimum characters the user has to type before the autocomplete popup shows up. Default: 2
* `multiple` - true/false/characters. Separators to allow multiple values. If true, the separator will be the space character. Default: false
* `prepop` - true/false. If true do lookup initial/autofilled value and send awesomplete-prepop event. Default: false
* `replace` - Replace function as defined in [Awesomplete](http://leaverou.github.io/awesomplete/index.html#extensibility)
* `sort` - Sort function as defined in [Awesomplete](http://leaverou.github.io/awesomplete/index.html#extensibility)
* `url` - url for ajax calls.
* `urlEnd` - Addition at the end of the url of the ajax call, after the input value.
## Example
iex> {:safe, [inp, scr]} = PhoenixFormAwesomplete.awesomplete(:user, :eyes,
...> ["data-list": "blue, brown, green"],
...> %{ minChars: 1, multiple: ",;" } )
iex> to_string inp
"<input data-list=\"blue, brown, green\" id=\"user_eyes\" name=\"user[eyes]\" type=\"text\">"
iex> scr
"<script>AwesompleteUtil.start('#user_eyes', " <>
"{convertInput: function(input) {" <>
" return input.replace(/[,;]\\s*$/, '').match(/[^,;]*$/)[0].trim().toLowerCase(); }}, " <>
"{minChars: 1, " <>
"replace: function(data) {" <>
" var text=data.value;" <>
" this.input.value = this.input.value.match(/^.+[,;]\\s*|/)[0] + text + ', '; }, " <>
"filter: function(data, input) {" <>
" return Awesomplete.FILTER_CONTAINS(data, input.match(/[^,;]*([,;]\\s*)?$/)[0]); }, " <>
"item: function(text, input) {" <>
" return AwesompleteUtil.itemContains(text, input.match(/[^,;]*([,;]\\s*)?$/)[0]); }});" <>
"</script>"
"""
def awesomplete(form, field, opts \\ [], awesomplete_opts) do
script = awesomplete_js(form, field, awesomplete_opts)
HTML.html_escape([Form.text_input(form, field, opts), script(script)])
end
@doc ~S"""
This method generates a script tag with javascript code for using Awesomplete(Util).
## Example
iex> PhoenixFormAwesomplete.awesomplete_script(:user, :hobby, %{ minChars: 1 } )
{:safe,
"<script>AwesompleteUtil.start('#user_hobby', {}, {minChars: 1});</script>"}
"""
def awesomplete_script(form, field, awesomplete_opts) do
script = awesomplete_js(form, field, awesomplete_opts)
script(script)
end
end
|
lib/phoenix_form_awesomplete.ex
| 0.824885
| 0.684119
|
phoenix_form_awesomplete.ex
|
starcoder
|
defmodule Sippet.Core do
@moduledoc """
A behaviour module for implementing the `Sippet.Core`.
The `Sippet.Core` designates a particular type of SIP entity, i.e., specific
to either a stateful or stateless proxy, a user agent or registrar.
"""
alias Sippet.Message, as: Message
alias Sippet.Transactions, as: Transactions
@doc """
Receives a new incoming request from a remote host, or ACK.
The `server_key` indicates the name of the transaction created when
the request was received. If it is an ACK, then the `server_key` is
`nil`.
The function `receive_request/2` is called from the server transaction
process when the parameter `server_key` is not `nil`, and from the
transport process (possibly a `poolboy` worker process), when the
`server_key` is `nil`.
"""
@callback receive_request(incoming_request :: Message.request,
server_key :: Transactions.Server.t | nil)
:: any
@doc """
Receives a response for a sent request.
The `client_key` indicates the name of the transaction created when
the request was sent using `Sippet.Transactions.send_request/1`.
The function `receive_response/2` is called from the client transaction
process when the parameter `client_key` is not `nil`, and from the
transport process (possibly a `poolboy` worker process), when the
`client_key` is `nil`.
"""
@callback receive_response(incoming_response :: Message.response,
client_key :: Transactions.Client.t | nil)
:: any
@doc """
Receives an error from the server or client transaction.
The function `receive_error/2` is called from the client or server
transaction process created when sending or receiving requests.
"""
@callback receive_error(reason :: term,
client_or_server_key ::
Transactions.Client.t |
Transactions.Server.t)
:: any
@doc """
Dispatches the received request to the registered `Sippet.Core`
implementation.
"""
@spec receive_request(Message.request, Transactions.Server.t | nil) :: any
def receive_request(incoming_request, server_key) do
args = [incoming_request, server_key]
apply(get_module!(), :receive_request, args)
end
defp get_module!() do
module = Application.get_env(:sippet, __MODULE__)
if module == nil do
raise RuntimeError, message: "Sippet.Core is not registered"
else
module
end
end
@doc """
Dispatches the received response to the registered `Sippet.Core`
implementation.
"""
@spec receive_response(Message.response, Transactions.Client.t | nil) :: any
def receive_response(incoming_response, client_key) do
args = [incoming_response, client_key]
apply(get_module!(), :receive_response, args)
end
@doc """
Dispatches the network transport error to the registered `Sippet.Core`
implementation.
"""
@spec receive_error(reason :: term,
Transactions.Client.t | Transactions.Server.t) :: any
def receive_error(reason, client_or_server_key) do
args = [reason, client_or_server_key]
apply(get_module!(), :receive_error, args)
end
defmacro __using__(_opts) do
quote location: :keep do
@behaviour Sippet.Core
end
end
end
|
lib/sippet/core.ex
| 0.861232
| 0.491151
|
core.ex
|
starcoder
|
defmodule Carrier do
use Application
alias Carrier.Server
@doc """
Set up the GenServer API. Should be called before usage either implicitly by
adding to `application` in `mix.exs` or explicitly by calling it directly.
"""
def start(_type, _args) do
import Supervisor.Spec, warn: false
children = [worker(Server, [])]
opts = [strategy: :one_for_one, name: Carrier.Supervisor]
Supervisor.start_link(children, opts)
end
@doc """
This is the public method for validating one address. The only parameter is a
four-tuple containing the following fields:
1. Street address (including suite, apt., etc.)
2. City
3. State
4. ZIP Code
Results are in the form of a two-tuple. Valid addresses are returned like
`{:valid, validated_address}` and invalid ones are returned like
`{:invalid, original_address}`.
## Examples
A pretty well-formed and complete address to query:
iex> Carrier.verify_one {"1 Infinite Loop", "Cupertino", "CA", "95014"}
{:valid, {"1 Infinite Loop", "Cupertino", "CA", "95014-2083"}}
Note that the addresses are also standardized (to follow USPS guidelines):
iex> Carrier.verify_one {"1 infinite loop", "cupertino", "ca", "95014"}
{:valid, {"1 Infinite Loop", "Cupertino", "CA", "95014-2083"}}
iex> Carrier.verify_one {"1096 Rainer Dr, Suite 1001", "", "", "32714"}
{:valid, {"1096 Rainer Dr Ste 1001", "Altamonte Springs", "FL", "32714-3855"}}
You can supply empty strings for fields you don't know:
iex> Carrier.verify_one {"1 Infinite Loop", "", "", "95014"}
{:valid, {"1 Infinite Loop", "Cupertino", "CA", "95014-2083"}}
iex> Carrier.verify_one {"1 Infinite Loop", "Cupertino", "", "95014"}
{:valid, {"1 Infinite Loop", "Cupertino", "CA", "95014-2083"}}
iex> Carrier.verify_one {"1 Infinite Loop", "Cupertino", "CA", ""}
{:valid, {"1 Infinite Loop", "Cupertino", "CA", "95014-2083"}}
If an address is invalid, we'll let you know and provide the original back:
iex> Carrier.verify_one {"123 Fake St", "Anytown", "FL", "12345"}
{:invalid, {"123 Fake St", "Anytown", "FL", "12345"}}
Easy!
"""
def verify_one(address),
do: Server.verify_one(address)
@doc """
This is the public method for validating many addresses. This accepts a list
of four-tuples representing addresses. Behavior works identically to the
`verify_one/1` method, except results are in a list.
Please see `verify_one/1` for more information.
## Examples
Validating two addresses:
iex> Carrier.verify_many [{"1 Infinite Loop", "", "", "95014"},
...> {"1096 Rainer Dr, Suite 1001", "", "", "32714"}]
[valid: {"1 Infinite Loop", "Cupertino", "CA", "95014-2083"},
valid: {"1096 Rainer Dr Ste 1001", "Altamonte Springs", "FL", "32714-3855"}]
"""
def verify_many(addresses), do: Server.verify_many(addresses)
end
|
lib/carrier.ex
| 0.864925
| 0.480113
|
carrier.ex
|
starcoder
|
defmodule Redix.PubSub.Connection do
@moduledoc false
@behaviour :gen_statem
alias Redix.{ConnectionError, Protocol, Utils}
require Logger
defstruct [
:opts,
:transport,
:socket,
:continuation,
:backoff_current,
:last_disconnect_reason,
subscriptions: %{},
monitors: %{}
]
@backoff_exponent 1.5
@impl true
def callback_mode(), do: :state_functions
@impl true
def init(opts) do
transport = if(opts[:ssl], do: :ssl, else: :gen_tcp)
data = %__MODULE__{opts: opts, transport: transport}
if opts[:sync_connect] do
with {:ok, socket} <- Utils.connect(data.opts),
:ok <- setopts(data, socket, active: :once) do
data = %__MODULE__{
data
| socket: socket,
last_disconnect_reason: nil,
backoff_current: nil
}
{:ok, :connected, data}
else
{:error, reason} -> {:stop, reason}
{:stop, reason} -> {:stop, reason}
end
else
send(self(), :handle_possible_erlang_bug)
{:ok, :state_needed_because_of_possible_erlang_bug, data}
end
end
## States
# If I use the action {:next_event, :internal, :connect} when returning
# {:ok, :disconnected, data} from init/1, then Erlang 20 (not 21) blows up saying:
# {:bad_return_from_init, {:next_events, :internal, :connect}}. The weird thing is
# that if I use `{:next_even, :internal, :connect}` it complains but with `:next_even`,
# but with `:next_event` it seems to add the final "s" (`:next_events`). No idea
# what's going on and no time to fix it.
def state_needed_because_of_possible_erlang_bug(:info, :handle_possible_erlang_bug, data) do
{:next_state, :disconnected, data, {:next_event, :internal, :connect}}
end
def state_needed_because_of_possible_erlang_bug(_event, _info, _data) do
{:keep_state_and_data, :postpone}
end
def disconnected(:internal, :handle_disconnection, data) do
log(data, :disconnection, fn ->
"Disconnected from Redis (#{Utils.format_host(data)}): " <>
Exception.message(data.last_disconnect_reason)
end)
if data.opts[:exit_on_disconnection] do
{:stop, data.last_disconnect_reason}
else
Enum.each(data.monitors, fn {pid, ref} ->
send(pid, ref, :disconnected, %{error: data.last_disconnect_reason})
end)
:keep_state_and_data
end
end
def disconnected({:timeout, :reconnect}, nil, _data) do
{:keep_state_and_data, {:next_event, :internal, :connect}}
end
def disconnected(:internal, :connect, data) do
with {:ok, socket} <- Utils.connect(data.opts),
:ok <- setopts(data, socket, active: :once) do
if data.last_disconnect_reason do
log(data, :reconnection, fn -> "Reconnected to Redis (#{Utils.format_host(data)})" end)
end
data = %__MODULE__{data | socket: socket, last_disconnect_reason: nil, backoff_current: nil}
{:next_state, :connected, data, {:next_event, :internal, :handle_connection}}
else
{:error, reason} ->
log(data, :failed_connection, fn ->
"Failed to connect to Redis (#{Utils.format_host(data)}): " <>
Exception.message(%ConnectionError{reason: reason})
end)
disconnect(data, reason)
{:stop, reason} ->
{:stop, reason, data}
end
end
def disconnected({:call, from}, {operation, targets, pid}, data)
when operation in [:subscribe, :psubscribe] do
{data, ref} = monitor_new(data, pid)
:ok = :gen_statem.reply(from, {:ok, ref})
# We can just add subscribers to channels here since when we'll reconnect, the connection
# will have to reconnect to all the channels/patterns anyways.
{_targets_to_subscribe_to, data} = subscribe_pid_to_targets(data, operation, targets, pid)
send(pid, ref, :disconnected, %{reason: data.last_disconnect_reason})
{:keep_state, data}
end
def disconnected({:call, from}, {operation, targets, pid}, data)
when operation in [:unsubscribe, :punsubscribe] do
:ok = :gen_statem.reply(from, :ok)
case data.monitors[pid] do
ref when is_reference(ref) ->
{_targets_to_unsubscribe_from, data} =
unsubscribe_pid_from_targets(data, operation, targets, pid)
{kind, target_type} =
case operation do
:unsubscribe -> {:unsubscribed, :channel}
:punsubscribe -> {:punsubscribed, :pattern}
end
Enum.each(targets, fn target ->
send(pid, ref, kind, %{target_type => target})
end)
data = demonitor_if_not_subscribed_to_anything(data, pid)
{:keep_state, data}
nil ->
:keep_state_and_data
end
end
def connected(:internal, :handle_connection, data) do
# We clean up channels/patterns that don't have any subscribers. We do this because some
# subscribers could have unsubscribed from a channel/pattern while disconnected.
data =
update_in(data.subscriptions, fn subscriptions ->
:maps.filter(fn _target, subscribers -> MapSet.size(subscribers) > 0 end, subscriptions)
end)
channels_to_subscribe_to =
for {{:channel, channel}, subscribers} <- data.subscriptions do
Enum.each(subscribers, fn pid ->
ref = Map.fetch!(data.monitors, pid)
send(pid, ref, :subscribed, %{channel: channel})
end)
channel
end
patterns_to_subscribe_to =
for {{:pattern, pattern}, subscribers} <- data.subscriptions do
Enum.each(subscribers, fn pid ->
ref = Map.fetch!(data.monitors, pid)
send(pid, ref, :psubscribe, %{pattern: pattern})
end)
pattern
end
case subscribe(data, channels_to_subscribe_to, patterns_to_subscribe_to) do
:ok -> {:keep_state, data}
{:error, reason} -> disconnect(data, reason)
end
end
def connected({:call, from}, {operation, targets, pid}, data)
when operation in [:subscribe, :psubscribe] do
{data, ref} = monitor_new(data, pid)
:ok = :gen_statem.reply(from, {:ok, ref})
{targets_to_subscribe_to, data} = subscribe_pid_to_targets(data, operation, targets, pid)
{kind, target_type} =
case operation do
:subscribe -> {:subscribed, :channel}
:psubscribe -> {:psubscribed, :pattern}
end
Enum.each(targets, fn target ->
send(pid, ref, kind, %{target_type => target})
end)
{channels_to_subscribe_to, patterns_to_subscribe_to} =
case operation do
:subscribe -> {targets_to_subscribe_to, []}
:psubscribe -> {[], targets_to_subscribe_to}
end
case subscribe(data, channels_to_subscribe_to, patterns_to_subscribe_to) do
:ok -> {:keep_state, data}
{:error, reason} -> disconnect(data, reason)
end
end
def connected({:call, from}, {operation, targets, pid}, data)
when operation in [:unsubscribe, :punsubscribe] do
:ok = :gen_statem.reply(from, :ok)
case data.monitors[pid] do
ref when is_reference(ref) ->
{targets_to_unsubscribe_from, data} =
unsubscribe_pid_from_targets(data, operation, targets, pid)
{kind, target_type} =
case operation do
:unsubscribe -> {:unsubscribed, :channel}
:punsubscribe -> {:punsubscribed, :pattern}
end
Enum.each(targets, fn target ->
send(pid, ref, kind, %{target_type => target})
end)
data = demonitor_if_not_subscribed_to_anything(data, pid)
{channels_to_unsubscribe_from, patterns_to_unsubscribe_from} =
case operation do
:unsubscribe -> {targets_to_unsubscribe_from, []}
:punsubscribe -> {[], targets_to_unsubscribe_from}
end
case unsubscribe(data, channels_to_unsubscribe_from, patterns_to_unsubscribe_from) do
:ok -> {:keep_state, data}
{:error, reason} -> disconnect(data, reason)
end
nil ->
:keep_state_and_data
end
end
def connected(:info, {transport_closed, socket}, %__MODULE__{socket: socket} = data)
when transport_closed in [:tcp_closed, :ssl_closed] do
disconnect(data, transport_closed)
end
def connected(:info, {transport_error, socket, reason}, %__MODULE__{socket: socket} = data)
when transport_error in [:tcp_error, :ssl_error] do
disconnect(data, reason)
end
def connected(:info, {transport, socket, bytes}, %__MODULE__{socket: socket} = data)
when transport in [:tcp, :ssl] do
:ok = setopts(data, socket, active: :once)
data = new_bytes(data, bytes)
{:keep_state, data}
end
def connected(:info, {:DOWN, ref, :process, pid, _reason}, data) do
{^ref, data} = pop_in(data.monitors[pid])
{targets_to_unsubscribe_from, data} =
get_and_update_in(data.subscriptions, fn subscriptions ->
Enum.flat_map_reduce(subscriptions, subscriptions, fn {target, subscribers}, acc ->
new_subscribers = MapSet.delete(subscribers, pid)
if MapSet.size(new_subscribers) == 0 do
{[target], Map.put(acc, target, new_subscribers)}
else
{[], Map.put(acc, target, new_subscribers)}
end
end)
end)
channels_to_unsubscribe_from =
for {:channel, channel} <- targets_to_unsubscribe_from, do: channel
patterns_to_unsubscribe_from =
for {:pattern, pattern} <- targets_to_unsubscribe_from, do: pattern
case unsubscribe(data, channels_to_unsubscribe_from, patterns_to_unsubscribe_from) do
:ok -> {:keep_state, data}
{:error, reason} -> disconnect(data, reason)
end
end
## Helpers
defp new_bytes(data, "") do
data
end
defp new_bytes(data, bytes) do
case (data.continuation || (&Protocol.parse/1)).(bytes) do
{:ok, resp, rest} ->
data = handle_pubsub_msg(data, resp)
new_bytes(%{data | continuation: nil}, rest)
{:continuation, continuation} ->
%{data | continuation: continuation}
end
end
defp handle_pubsub_msg(data, [operation, _target, _count])
when operation in ["subscribe", "psubscribe", "unsubscribe", "punsubscribe"] do
data
end
defp handle_pubsub_msg(data, ["message", channel, payload]) do
subscribers = Map.get(data.subscriptions, {:channel, channel}, [])
properties = %{channel: channel, payload: payload}
Enum.each(subscribers, fn pid ->
ref = Map.fetch!(data.monitors, pid)
send(pid, ref, :message, properties)
end)
data
end
defp handle_pubsub_msg(data, ["pmessage", pattern, channel, payload]) do
subscribers = Map.get(data.subscriptions, {:pattern, pattern}, [])
properties = %{channel: channel, pattern: pattern, payload: payload}
Enum.each(subscribers, fn pid ->
ref = Map.fetch!(data.monitors, pid)
send(pid, ref, :pmessage, properties)
end)
data
end
# Returns {targets_to_subscribe_to, data}.
defp subscribe_pid_to_targets(data, operation, targets, pid) do
get_and_update_in(data.subscriptions, fn subscriptions ->
Enum.flat_map_reduce(targets, subscriptions, fn target, acc ->
target_key = key_for_target(operation, target)
case acc do
%{^target_key => subscribers} ->
acc = %{acc | target_key => MapSet.put(subscribers, pid)}
if MapSet.size(subscribers) == 0 do
{[target], acc}
else
{[], acc}
end
%{} ->
{[target], Map.put(acc, target_key, MapSet.new([pid]))}
end
end)
end)
end
# Returns {targets_to_unsubscribe_from, data}.
defp unsubscribe_pid_from_targets(data, operation, targets, pid) do
get_and_update_in(data.subscriptions, fn subscriptions ->
Enum.flat_map_reduce(targets, subscriptions, fn target, acc ->
key = key_for_target(operation, target)
case acc do
%{^key => subscribers} ->
cond do
MapSet.size(subscribers) == 0 ->
{[], Map.delete(acc, key)}
subscribers == MapSet.new([pid]) ->
{[target], Map.delete(acc, key)}
true ->
{[], %{acc | key => MapSet.delete(subscribers, pid)}}
end
%{} ->
{[], acc}
end
end)
end)
end
defp subscribe(_data, [], []) do
:ok
end
defp subscribe(data, channels, patterns) do
pipeline =
case {channels, patterns} do
{channels, []} -> [["SUBSCRIBE" | channels]]
{[], patterns} -> [["PSUBSCRIBE" | patterns]]
{channels, patterns} -> [["SUBSCRIBE" | channels], ["PSUBSCRIBE" | patterns]]
end
transport_send(data, Enum.map(pipeline, &Protocol.pack/1))
end
defp unsubscribe(_data, [], []) do
:ok
end
defp unsubscribe(data, channels, patterns) do
pipeline =
case {channels, patterns} do
{channels, []} -> [["UNSUBSCRIBE" | channels]]
{[], patterns} -> [["PUNSUBSCRIBE" | patterns]]
{channels, patterns} -> [["UNSUBSCRIBE" | channels], ["PUNSUBSCRIBE" | patterns]]
end
transport_send(data, Enum.map(pipeline, &Protocol.pack/1))
end
defp transport_send(data, bytes) do
case data.transport.send(data.socket, bytes) do
:ok ->
:ok
{:error, reason} ->
:ok = :gen_tcp.close(data.socket)
{:error, reason}
end
end
defp monitor_new(data, pid) do
case data.monitors do
%{^pid => ref} ->
{data, ref}
_ ->
ref = Process.monitor(pid)
data = put_in(data.monitors[pid], ref)
{data, ref}
end
end
defp demonitor_if_not_subscribed_to_anything(data, pid) do
still_subscribed_to_something? =
Enum.any?(data.subscriptions, fn {_target, subscribers} -> pid in subscribers end)
if still_subscribed_to_something? do
data
else
{monitor_ref, data} = pop_in(data.monitors[pid])
Process.demonitor(monitor_ref, [:flush])
data
end
end
defp key_for_target(:subscribe, channel), do: {:channel, channel}
defp key_for_target(:unsubscribe, channel), do: {:channel, channel}
defp key_for_target(:psubscribe, pattern), do: {:pattern, pattern}
defp key_for_target(:punsubscribe, pattern), do: {:pattern, pattern}
defp setopts(data, socket, opts) do
inets_mod(data.transport).setopts(socket, opts)
end
defp inets_mod(:gen_tcp), do: :inet
defp inets_mod(:ssl), do: :ssl
defp next_backoff(data) do
backoff_current = data.backoff_current || data.opts[:backoff_initial]
backoff_max = data.opts[:backoff_max]
next_backoff = round(backoff_current * @backoff_exponent)
backoff_current =
if backoff_max == :infinity do
next_backoff
else
min(next_backoff, backoff_max)
end
{backoff_current, put_in(data.backoff_current, backoff_current)}
end
def disconnect(data, reason) do
{next_backoff, data} = next_backoff(data)
data = put_in(data.last_disconnect_reason, %ConnectionError{reason: reason})
timeout_action = {{:timeout, :reconnect}, next_backoff, nil}
actions = [{:next_event, :internal, :handle_disconnection}, timeout_action]
{:next_state, :disconnected, data, actions}
end
defp send(pid, ref, kind, properties)
when is_reference(ref) and is_atom(kind) and is_map(properties) do
send(pid, {:redix_pubsub, self(), ref, kind, properties})
end
defp log(data, action, message) do
level =
data.opts
|> Keyword.fetch!(:log)
|> Keyword.fetch!(action)
Logger.log(level, message)
end
end
|
lib/redix/pubsub/connection.ex
| 0.702836
| 0.410106
|
connection.ex
|
starcoder
|
defmodule Port do
@moduledoc """
Functions related to Erlang ports.
"""
@type name :: {:spawn, charlist | binary} |
{:spawn_driver, charlist | binary} |
{:spawn_executable, charlist | atom} |
{:fd, non_neg_integer, non_neg_integer}
@doc """
Opens an Erlang port given a tuple `name` and a list of `settings`.
## Name
The supported values for `name` are:
* `{:spawn, command}` - to run an external program. The first space separated
word of `command` will be considered as the name of the program to run, so
use `{:spawn_executable, command}` to run a program having spaces in its name.
* `{:spawn_driver, command}` - similar to `{:spawn, command}`, but to run a
loaded driver.
* `{:spawn_executable, filename}` - similar to `{:spawn, filename}`, but to run
an external executable. With this option, `filename` in its whole is considered
the name of the program to execute.
* `{:fd, fd_in, fd_out}` - to access file descriptors used by Erlang, `fd_in`
being used for standard input, `fd_out` for standard output.
For more information, see [`:erlang.open_port/2`](http://www.erlang.org/doc/man/erlang.html#open_port-2).
Inlined by the compiler.
"""
@spec open(name, list) :: port
def open(name, settings) do
:erlang.open_port(name, settings)
end
@doc """
Closes the `port`.
For more information, see [`:erlang.port_close/1`](http://www.erlang.org/doc/man/erlang.html#port_close-1).
Inlined by the compiler.
"""
@spec close(port) :: true
def close(port) do
:erlang.port_close(port)
end
@doc """
Sends `data` to the port driver `port`.
For more information, see [`:erlang.port_command/2`](http://www.erlang.org/doc/man/erlang.html#port_command-2).
Inlined by the compiler.
"""
@spec command(port, iodata, [:force | :nosuspend]) :: boolean
def command(port, data, options \\ []) do
:erlang.port_command(port, data, options)
end
@doc """
Associates the `port` identifier with a `pid`.
For more information, see [`:erlang.port_connect/2`](http://www.erlang.org/doc/man/erlang.html#port_connect-2).
Inlined by the compiler.
"""
@spec connect(port, pid) :: true
def connect(port, pid) do
:erlang.port_connect(port, pid)
end
@doc """
Sends a synchronous control command to the `port` and returns its reply as a binary.
Not all port drivers support this feature.
For more information, see [`:erlang.port_control/3`](http://www.erlang.org/doc/man/erlang.html#port_control-3).
Inlined by the compiler.
"""
@spec control(port, integer, iodata) :: iodata | binary
def control(port, operation, data) do
:erlang.port_control(port, operation, data)
end
@doc """
Makes a synchronous call to the `port` and returns its reply as a term.
Not all port drivers support this control feature.
For more information, see [`:erlang.port_call/3`](http://www.erlang.org/doc/man/erlang.html#port_call-3).
Inlined by the compiler.
"""
@spec call(port, integer, term) :: term
def call(port, operation, data) do
:erlang.port_call(port, operation, data)
end
@doc """
Returns information about the `port`
or `nil` if the port is closed.
For more information, see [`:erlang.port_info/1`](http://www.erlang.org/doc/man/erlang.html#port_info-1).
"""
def info(port) do
nillify :erlang.port_info(port)
end
@doc """
Returns information about the `port`
or `nil` if the port is closed.
For more information, see [`:erlang.port_info/2`](http://www.erlang.org/doc/man/erlang.html#port_info-2).
"""
@spec info(port, atom) :: {atom, term} | nil
def info(port, spec)
def info(port, :registered_name) do
case :erlang.port_info(port, :registered_name) do
[] -> {:registered_name, []}
other -> nillify(other)
end
end
def info(port, item) do
nillify :erlang.port_info(port, item)
end
@doc """
Returns a list of the ports for the current node.
For more information, see [`:erlang.ports/0`](http://www.erlang.org/doc/man/erlang.html#ports-0).
Inlined by the compiler.
"""
@spec list :: [port]
def list do
:erlang.ports
end
@compile {:inline, nillify: 1}
defp nillify(:undefined), do: nil
defp nillify(other), do: other
end
|
lib/elixir/lib/port.ex
| 0.879555
| 0.512205
|
port.ex
|
starcoder
|
defmodule Stripe.Charge do
@moduledoc """
Work with [Stripe `charge` objects](https://stripe.com/docs/api#charges).
You can:
- [Create a charge](https://stripe.com/docs/api#create_charge)
- [Retrieve a charge](https://stripe.com/docs/api#retrieve_charge)
- [Update a charge](https://stripe.com/docs/api#update_charge)
- [Capture a charge](https://stripe.com/docs/api#capture_charge)
- [List all charges](https://stripe.com/docs/api#list_charges)
"""
use Stripe.Entity
import Stripe.Request
require Stripe.Util
@type user_fraud_report :: %{
user_report: String.t()
}
@type stripe_fraud_report :: %{
stripe_report: String.t()
}
@type charge_outcome :: %{
network_status: String.t() | nil,
reason: String.t() | nil,
risk_level: String.t(),
rule: Stripe.id() | charge_outcome_rule,
seller_message: String.t() | nil,
type: String.t()
}
@type charge_outcome_rule :: %{
action: String.t(),
id: String.t(),
predicate: String.t()
}
@type card_info :: %{
exp_month: number,
exp_year: number,
number: String.t(),
object: String.t(),
cvc: String.t(),
address_city: String.t() | nil,
address_country: String.t() | nil,
address_line1: String.t() | nil,
address_line2: String.t() | nil,
name: String.t() | nil,
address_state: String.t() | nil,
address_zip: String.t() | nil
}
@type t :: %__MODULE__{
id: Stripe.id(),
object: String.t(),
amount: non_neg_integer,
amount_refunded: non_neg_integer,
application: Stripe.id() | nil,
application_fee: Stripe.id() | Stripe.ApplicationFee.t() | nil,
balance_transaction: Stripe.id() | Stripe.BalanceTransaction.t() | nil,
captured: boolean,
created: Stripe.timestamp(),
currency: String.t(),
customer: Stripe.id() | Stripe.Customer.t() | nil,
description: String.t() | nil,
destination: Stripe.id() | Stripe.Account.t() | nil,
dispute: Stripe.id() | Stripe.Dispute.t() | nil,
failure_code: Stripe.Error.card_error_code() | nil,
failure_message: String.t() | nil,
fraud_details: user_fraud_report | stripe_fraud_report | %{},
invoice: Stripe.id() | Stripe.Invoice.t() | nil,
livemode: boolean,
metadata: Stripe.Types.metadata(),
on_behalf_of: Stripe.id() | Stripe.Account.t() | nil,
order: Stripe.id() | Stripe.Order.t() | nil,
outcome: charge_outcome | nil,
paid: boolean,
receipt_email: String.t() | nil,
receipt_number: String.t() | nil,
refunded: boolean,
refunds: Stripe.List.t(Stripe.Refund.t()),
review: Stripe.id() | Stripe.Review.t() | nil,
shipping: Stripe.Types.shipping() | nil,
source: Stripe.Card.t() | map,
source_transfer: Stripe.id() | Stripe.Transfer.t() | nil,
statement_descriptor: String.t() | nil,
status: String.t(),
transfer: Stripe.id() | Stripe.Transfer.t() | nil,
transfer_group: String.t() | nil
}
defstruct [
:id,
:object,
:amount,
:amount_refunded,
:application,
:application_fee,
:balance_transaction,
:captured,
:created,
:currency,
:customer,
:description,
:destination,
:dispute,
:failure_code,
:failure_message,
:fraud_details,
:invoice,
:livemode,
:metadata,
:on_behalf_of,
:order,
:outcome,
:paid,
:receipt_email,
:receipt_number,
:refunded,
:refunds,
:review,
:shipping,
:source,
:source_transfer,
:statement_descriptor,
:status,
:transfer,
:transfer_group
]
@plural_endpoint "charges"
@doc """
Create a charge.
If your API key is in test mode, the supplied payment source (e.g., card) won't actually be
charged, though everything else will occur as if in live mode.
(Stripe assumes that the charge would have completed successfully).
See the [Stripe docs](https://stripe.com/docs/api#create_charge).
"""
@spec create(params, Stripe.options()) :: {:ok, t} | {:error, Stripe.Error.t()}
when params:
%{
:amount => pos_integer,
:currency => String.t(),
optional(:application_fee) => non_neg_integer,
optional(:capture) => boolean,
optional(:description) => String.t(),
optional(:destination) => %{
:account => Stripe.id() | Stripe.Account.t(),
optional(:amount) => non_neg_integer
},
optional(:transfer_group) => String.t(),
optional(:on_behalf_of) => Stripe.id() | Stripe.Account.t(),
optional(:metadata) => map,
optional(:receipt_email) => String.t(),
optional(:shipping) => Stripe.Types.shipping(),
optional(:customer) => Stripe.id() | Stripe.Customer.t(),
optional(:source) => Stripe.id() | Stripe.Card.t() | card_info,
optional(:statement_descriptor) => String.t()
}
| %{}
def create(params, opts \\ []) do
new_request(opts)
|> put_endpoint(@plural_endpoint)
|> put_params(params)
|> put_method(:post)
|> cast_path_to_id([:destination, :account])
|> cast_to_id([:on_behalf_of, :customer, :source])
|> make_request()
end
@doc """
Retrieve a charge.
Retrieves the details of a charge that has previously been created.
Supply the unique charge ID that was returned from your previous request, and Stripe will return
the corresponding charge information. The same information is returned when creating or refunding
the charge.
See the [Stripe docs](https://stripe.com/docs/api#retrieve_charge).
"""
@spec retrieve(Stripe.id() | t, Stripe.options()) :: {:ok, t} | {:error, Stripe.Error.t()}
def retrieve(id, opts \\ []) do
new_request(opts)
|> put_endpoint(@plural_endpoint <> "/#{get_id!(id)}")
|> put_method(:get)
|> make_request()
end
@doc """
Update a charge.
Updates the specified charge by setting the values of the parameters passed. Any parameters
not provided will be left unchanged.
This request accepts only the `:description`, `:metadata`, `:receipt_email`, `:fraud_details`,
and `:shipping` as arguments, as well as `:transfer_group` in some cases.
The charge to be updated may either be passed in as a struct or an ID.
See the [Stripe docs](https://stripe.com/docs/api#update_charge).
"""
@spec update(Stripe.id() | t, params, Stripe.options()) :: {:ok, t} | {:error, Stripe.Error.t()}
when params:
%{
optional(:customer) => Stripe.id() | Stripe.Customer.t(),
optional(:description) => String.t(),
optional(:fraud_details) => user_fraud_report,
optional(:metadata) => Stripe.Types.metadata(),
optional(:receipt_email) => String.t(),
optional(:shipping) => Stripe.Types.shipping(),
optional(:transfer_group) => String.t()
}
| %{}
def update(id, params, opts \\ []) do
new_request(opts)
|> put_endpoint(@plural_endpoint <> "/#{get_id!(id)}")
|> put_method(:post)
|> put_params(params)
|> make_request()
end
@doc """
Capture a charge.
Capture the payment of an existing, uncaptured, charge. This is the second
half of the two-step payment flow, where first you created a charge with the
capture option set to false.
Uncaptured payments expire exactly seven days after they are created. If they
are not captured by that point in time, they will be marked as refunded and
will no longer be capturable.
See the [Stripe docs](https://stripe.com/docs/api#capture_charge).
"""
@spec capture(Stripe.id() | t, params, Stripe.options()) ::
{:ok, t} | {:error, Stripe.Error.t()}
when params: %{
optional(:amount) => non_neg_integer,
optional(:application_fee) => non_neg_integer,
optional(:destination) => %{
optional(:amount) => non_neg_integer
},
optional(:receipt_email) => String.t(),
optional(:statement_descriptor) => String.t()
}
def capture(id, params, opts) do
new_request(opts)
|> put_endpoint(@plural_endpoint <> "/#{get_id!(id)}/capture")
|> put_params(params)
|> put_method(:post)
|> make_request()
end
@doc """
[DEPRECATED] Capture a charge.
This version of the function is deprecated. Please use `capture/3` instead.
"""
@spec capture(Stripe.id() | t, Stripe.options()) :: {:ok, t} | {:error, Stripe.Error.t()}
def capture(id, opts) when is_list(opts) do
Stripe.Util.log_deprecation("Please use `capture/3` instead.")
capture(id, %{}, opts)
end
@spec capture(Stripe.id() | t, map) :: {:ok, t} | {:error, Stripe.Error.t()}
def capture(id, params) when is_map(params) do
capture(id, params, [])
end
@spec capture(Stripe.id() | t) :: {:ok, t} | {:error, Stripe.Error.t()}
def capture(id) do
Stripe.Util.log_deprecation("Please use `capture/3` instead.")
capture(id, %{}, [])
end
@doc """
List all charges.
Returns a list of charges you’ve previously created. The charges are returned in sorted order,
with the most recent charges appearing first.
See the [Stripe docs](https://stripe.com/docs/api#list_charges).
"""
@spec list(params, Stripe.options()) :: {:ok, Stripe.List.t(t)} | {:error, Stripe.Error.t()}
when params: %{
optional(:created) => Stripe.date_query(),
optional(:customer) => Stripe.Customer.t() | Stripe.id(),
optional(:ending_before) => t | Stripe.id(),
optional(:limit) => 1..100,
optional(:source) => %{
optional(:object) => String.t()
},
optional(:starting_after) => t | Stripe.id(),
optional(:transfer_group) => String.t()
}
def list(params \\ %{}, opts \\ []) do
new_request(opts)
|> prefix_expansions()
|> put_endpoint(@plural_endpoint)
|> put_method(:get)
|> put_params(params)
|> cast_to_id([:customer, :ending_before, :starting_after])
|> make_request()
end
end
|
lib/stripe/core_resources/charge.ex
| 0.799951
| 0.496826
|
charge.ex
|
starcoder
|
require Utils
require Program
defmodule D14 do
@moduledoc """
--- Day 14: Space Stoichiometry ---
As you approach the rings of Saturn, your ship's low fuel indicator turns on. There isn't any fuel here, but the rings have plenty of raw material. Perhaps your ship's Inter-Stellar Refinery Union brand nanofactory can turn these raw materials into fuel.
You ask the nanofactory to produce a list of the reactions it can perform that are relevant to this process (your puzzle input). Every reaction turns some quantities of specific input chemicals into some quantity of an output chemical. Almost every chemical is produced by exactly one reaction; the only exception, ORE, is the raw material input to the entire process and is not produced by a reaction.
You just need to know how much ORE you'll need to collect before you can produce one unit of FUEL.
Each reaction gives specific quantities for its inputs and output; reactions cannot be partially run, so only whole integer multiples of these quantities can be used. (It's okay to have leftover chemicals when you're done, though.) For example, the reaction 1 A, 2 B, 3 C => 2 D means that exactly 2 units of chemical D can be produced by consuming exactly 1 A, 2 B and 3 C. You can run the full reaction as many times as necessary; for example, you could produce 10 D by consuming 5 A, 10 B, and 15 C.
Given the list of reactions in your puzzle input, what is the minimum amount of ORE required to produce exactly 1 FUEL?
--- Part Two ---
After collecting ORE for a while, you check your cargo hold: 1 trillion (1000000000000) units of ORE.
Given 1 trillion ORE, what is the maximum amount of FUEL you can produce?
"""
@behaviour Day
def produce(_map, store, 0, _type), do: store
def produce(_map, store, quantity, "ORE") do
store
|> Map.update("TOTAL ORE", quantity, &(&1 + quantity))
|> Map.update("ORE", quantity, &(&1 + quantity))
end
def produce(map, store, quantity, type) do
{count, requirements} = Map.get(map, type)
producing = ceil(quantity / count)
# produce and consume
store =
Enum.reduce(requirements, store, fn {r_count, r_type}, store ->
available = Map.get(store, r_type, 0)
needed = max(0, r_count * producing - available)
store = produce(map, store, needed, r_type)
Map.update!(store, r_type, &(&1 - r_count * producing))
end)
Map.update(store, type, count * producing, &(&1 + count * producing))
end
def calculate_ore(map, quantity, type),
do: produce(map, %{}, quantity, type) |> Map.get("TOTAL ORE")
def parse(input) do
input
|> Enum.reduce(%{}, fn line, acc ->
[requirements_string, element_string] = String.split(line, " => ")
[element_quantity_string, element_name] = String.split(element_string, " ")
{element_quantity, ""} = Integer.parse(element_quantity_string)
requirements =
requirements_string
|> String.split(", ")
|> Enum.map(fn element_string ->
[element_quantity_string, element_name] = String.split(element_string, " ")
{element_quantity, ""} = Integer.parse(element_quantity_string)
{element_quantity, element_name}
end)
Map.put(acc, element_name, {element_quantity, requirements})
end)
end
def part_2(_map, min, max) when min == max - 1, do: min
def part_2(map, min, max) do
avg = div(min + max, 2)
store = produce(map, %{}, avg, "FUEL")
ore_used = store["TOTAL ORE"]
if ore_used < 1_000_000_000_000, do: part_2(map, avg, max), else: part_2(map, min, avg)
end
def solve(input) do
map = parse(input)
part_1 = calculate_ore(map, 1, "FUEL")
part_2 = part_2(map, div(1_000_000_000_000, part_1), div(2_000_000_000_000, part_1))
{
part_1,
part_2
}
end
end
|
lib/days/14.ex
| 0.761627
| 0.644854
|
14.ex
|
starcoder
|
defmodule Commands.IntCommands do
use Memoize
alias Interp.Functions
alias Commands.GeneralCommands
require Interp.Functions
# All characters available from the 05AB1E code page, where the
# alphanumeric digits come first and the remaining characters
# ranging from 0x00 to 0xff that do not occur yet in the list are appended.
def digits, do: String.to_charlist(
"0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmno" <>
"pqrstuvwxyzǝʒαβγδεζηθвимнт\nΓΔΘιΣΩ≠∊∍∞₁₂₃₄₅₆ !\"#$%" <>
"&'()*+,-./:;<=>?@[\\]^_`{|}~Ƶ€Λ‚ƒ„…†‡ˆ‰Š‹ŒĆŽƶĀ‘’“”–" <>
"—˜™š›œćžŸā¡¢£¤¥¦§¨©ª«¬λ®¯°±²³´µ¶·¸¹º»¼½¾¿ÀÁÂÃÄÅÆÇÈÉ" <>
"ÊËÌÍÎÏÐÑÒÓÔÕÖרÙÚÛÜÝÞßàáâãäåæçèéêëìíîïðñòóôõö÷øùúûüýþÿ")
@doc """
TODO: negative numbers and decimal.
Computes the factorial of the given number.
## Parameters
- value: The value from which the factorial will be calculated
## Returns
The factorial of the given number.
"""
def factorial(0), do: 1
def factorial(value) when is_float(value), do: gamma_function(value + 1)
def factorial(value), do: factorial(value, 1)
defp factorial(1, acc), do: acc
defp factorial(value, acc), do: factorial(value - 1, acc * value)
@pi 3.141592653589793238462643383279502884197
@e 2.718281828459045235360287471352662497757
@gamma_coefficients [676.5203681218851, -1259.1392167224028, 771.32342877765313, -176.61502916214059, 12.507343278686905, -0.13857109526572012, 9.9843695780195716e-6, 1.5056327351493116e-7]
defp gamma_function(value) do
cond do
# For computing Γ(0.5 + n) or Γ(0.5 - n), a separate rule needs to be applied
mod(Functions.to_number(value - 0.5), 1) == 0 ->
n = Functions.to_integer(value - 0.5)
cond do
n >= 0 -> factorial(2 * n) / (pow(4, n) * factorial(n)) * pow(@pi, 0.5)
true ->
n = -n
pow(-4, n) * factorial(n) / factorial(2 * n) * pow(@pi, 0.5)
end
# When n < 0.5
value < 0.5 -> @pi / (:math.sin(@pi * value) * gamma_function(1 - value))
# Remaining cases
true ->
series = (@gamma_coefficients |> Enum.with_index |> Enum.map(fn {x, index} -> x / (value + index) end) |> Enum.sum) + 0.99999999999980993
pow(2 * @pi, 0.5) * pow(value + 6.5, (value - 0.5)) * pow(@e, -(value + 6.5)) * series
end
end
@doc """
Power function that also works for negative numbers and decimal numbers.
## Parameters
- n: The number that will be raised to the power k
- k: The exponent of the power function.
## Returns
The result of n ** k.
"""
def pow(n, k) do
cond do
k < 0 -> 1 / pow(n, -k, 1)
true -> pow(n, k, 1)
end
end
defp pow(_, 0, acc), do: acc
defp pow(n, k, acc) when k > 0 and k < 1, do: acc * :math.pow(n, k)
defp pow(n, k, acc), do: pow(n, k - 1, n * acc)
# Modulo operator:
@doc """
Modulo operator, which also works for negative and decimal numbers.
Using the following set of rules, we are able to include these numbers:
-x.(f|i) % -y.(f|i) --> -(x.(f|i) % y.(f|i))
-x.f % y.f --> (y.f - (x.f % y.f)) % y.f
x.f % -y.f --> -(-x.f % y.f)
x.(f|i) % y.(f|i) --> ((x / y) % 1) * y.(f|i)
-x.i % -y.i --> -(x.i % y.i)
-x.i % y.i --> (y.i - (x.i % y.i)) % y.i
x.i % -y.i --> -(-x.i % y.i)
x.i % y.i --> rem(x.i, y.i)
## Parameters
- dividend: The dividend of the modulo function.
- divisor: The divisor of the modulo function.
## Returns
Returns the result of dividend % divisor.
"""
def mod(dividend, divisor) when dividend < 0 and divisor < 0, do: -mod(-dividend, -divisor)
def mod(dividend, divisor) when is_float(divisor) do
cond do
dividend < 0 and divisor > 0 ->
case mod(-dividend, divisor) do
0 -> 0
x -> divisor - x
end
dividend > 0 and divisor < 0 -> -mod(-dividend, -divisor)
true -> mod(dividend / divisor, 1) * divisor
end
end
def mod(dividend, divisor) when is_float(dividend) and is_integer(divisor) do
int_part = trunc(dividend)
float_part = dividend - int_part
mod(int_part, divisor) + float_part
end
def mod(dividend, divisor) when is_integer(dividend) and is_integer(divisor) do
cond do
dividend < 0 and divisor > 0 ->
case mod(-dividend, divisor) do
0 -> 0
x -> divisor - x
end
dividend > 0 and divisor < 0 -> -mod(-dividend, -divisor)
true -> rem(dividend, divisor)
end
end
@doc """
Integer division method that uses integer division when applicable or else the
trunc-function in order to floor the result.
"""
def divide(dividend, divisor) when is_float(dividend) or is_float(divisor), do: trunc(dividend / divisor)
def divide(dividend, divisor), do: div(dividend, divisor)
@doc """
Converts the given number to the given base and returns it as a string using the characters
from the 05AB1E code page, except for '•', which is used to decompress base-255 strings.
"""
def to_base(value, base) do
Integer.digits(value, base) |> Enum.map(fn x -> Enum.at(digits(), x) end) |> List.to_string
end
@doc """
Converts the given number as a number from the given base and converts it to decimal.
"""
def string_from_base(value, base) do
list = to_charlist(value) |> Enum.map(fn x -> Enum.find_index(digits(), fn y -> x == y end) end)
list_from_base(list, base)
end
def list_from_base(value, base) do
value = Enum.to_list(value)
{result, _} = Enum.reduce(value, {0, length(value) - 1}, fn (x, {acc, index}) -> {acc + pow(base, index) * x, index - 1} end)
result
end
def to_base_arbitrary(value, base) when base > 0, do: Integer.digits(value, base)
def to_base_arbitrary(value, base) when base < 0, do: to_negative_base_arbitrary(value, base, [])
defp to_negative_base_arbitrary(0, _, acc), do: acc
defp to_negative_base_arbitrary(value, base, acc) do
remainder = rem(value, base)
cond do
remainder >= 0 -> to_negative_base_arbitrary(div(value, base), base, [remainder | acc])
# If the remainder is negative, we subtract the base from the remainder, resulting in a positive remainder.
# Since we are subtracting the base from the remainder, we must also add 1 to the divided result.
remainder < 0 -> to_negative_base_arbitrary(div(value, base) + 1, base, [(remainder - base) | acc])
end
end
def from_custom_base(value, base) when not Functions.is_iterable(base), do: from_custom_base(value, String.graphemes(to_string(base)))
def from_custom_base(value, base) when not Functions.is_iterable(value), do: from_custom_base(String.graphemes(to_string(value)), base)
def from_custom_base(value, base) do
base = Enum.to_list(base)
value |> Enum.map(fn x -> Enum.find_index(base, fn y -> GeneralCommands.equals(x, y) end) end) |> list_from_base(length(base))
end
def to_custom_base(value, base) when not Functions.is_iterable(base), do: to_custom_base(value, String.graphemes(to_string(base)))
def to_custom_base(value, base) do
base = Enum.to_list(base)
value |> to_base_arbitrary(length(base)) |> Enum.map(fn x -> Enum.at(base, x) end)
end
@doc """
Checks whether the given number is a prime number.
"""
def is_prime?(value) when value in [2, 3, 5, 7], do: true
def is_prime?(value) when value < 2 or rem(value, 2) == 0 or rem(value, 3) == 0 or rem(value, 5) == 0 or rem(value, 7) == 0, do: false
def is_prime?(value), do: is_prime?(value, 5, :math.sqrt(value) |> Float.floor |> round)
def is_prime?(_, current_prime, upper_bound) when current_prime > upper_bound, do: true
def is_prime?(value, current_prime, upper_bound) do
cond do
rem(value, current_prime) == 0 -> false
rem(value, current_prime + 2) == 0 -> false
true -> is_prime?(value, current_prime + 6, upper_bound)
end
end
@doc """
Computes the next prime from the given value.
"""
def next_prime(2), do: 3
def next_prime(value) when value < 2, do: 2
def next_prime(value) do
next = value + 2
cond do
is_prime?(next) -> next
true -> next_prime(value + 2)
end
end
def next_prime_from_arbitrary(number) when is_float(number), do: next_prime_from_arbitrary(Float.floor(number) |> round)
def next_prime_from_arbitrary(number) when number < 2, do: 2
def next_prime_from_arbitrary(number) do
next = number + 1
cond do
is_prime?(next) -> next
true -> next_prime_from_arbitrary(next)
end
end
def prev_prime_from_arbitrary(number) when is_float(number), do: prev_prime_from_arbitrary(Float.ceil(number) |> round)
def prev_prime_from_arbitrary(number) do
prev = number - 1
cond do
is_prime?(prev) -> prev
true -> prev_prime_from_arbitrary(prev)
end
end
def nearest_prime_from_arbitrary(number), do: nearest_prime_from_arbitrary(round(number), 0, number)
def nearest_prime_from_arbitrary(number, offset, initial) do
cond do
is_prime?(number + offset) ->
cond do
offset == 0 -> number
is_prime?(number - offset) and abs(initial - (number - offset)) < abs(initial - (number + offset)) -> number - offset
true -> number + offset
end
is_prime?(number - offset) ->
number - offset
true -> nearest_prime_from_arbitrary(number, offset + 1, initial)
end
end
@doc """
Retrieves the index of the nearest prime number to the given value that is smaller than
the given value. The returned index is 0-indexed.
"""
def get_prime_index(value) when value < 2, do: -1
def get_prime_index(value), do: get_prime_index(value, 2, 0)
def get_prime_index(value, current_prime, index) when value < current_prime, do: index - 1
def get_prime_index(value, current_prime, index) when value == current_prime, do: index
def get_prime_index(value, current_prime, index), do: get_prime_index(value, next_prime(current_prime), index + 1)
@doc """
Computes the prime factorization of the given value as a list of
prime factors with duplicates. Example, 60 → [2, 2, 3, 5]
"""
def prime_factors(value), do: prime_factors(value, [], 2)
def prime_factors(value, acc, _) when value < 2, do: Enum.reverse acc
def prime_factors(value, acc, index) when rem(value, index) == 0, do: prime_factors(div(value, index), [index | acc], index)
def prime_factors(value, acc, index), do: prime_factors(value, acc, next_prime(index))
@doc """
Computes the prime exponents of the given value as a list of exponents.
For example, given the factorization of n, which equals [2 ** a, 3 ** b, 5 ** c, 7 ** d, ...],
this method returns the list [a, b, c, d, ...] with trailing zeroes removed.
"""
def prime_exponents(value), do: prime_exponents(value, [], 2, 0)
def prime_exponents(value, acc, _, 0) when value < 2, do: Enum.reverse acc
def prime_exponents(value, acc, _, count) when value < 2, do: Enum.reverse [count | acc]
def prime_exponents(value, acc, index, count) when rem(value, index) == 0, do: prime_exponents(div(value, index), acc, index, count + 1)
def prime_exponents(value, acc, index, count), do: prime_exponents(value, [count | acc], next_prime(index), 0)
def number_from_prime_exponents(value) do
{result, _} = Enum.reduce(value, {1, 2}, fn (element, {product, prime}) -> {product * pow(prime, element), next_prime(prime)} end)
result
end
@doc """
Computes and retrieves the nth prime where n is the given parameter.
Uses the defmemo in order to memoize the sequence.
"""
defmemo nth_prime(0), do: 2
defmemo nth_prime(n) when n < 0, do: 0
defmemo nth_prime(n) when n > 0, do: nth_prime(n, 2)
defmemo nth_prime(0, last_prime), do: last_prime
defmemo nth_prime(n, last_prime), do: nth_prime(n - 1, next_prime(last_prime))
def divisors(value), do: divisors(abs(value), [], trunc(:math.sqrt(abs(value))))
defp divisors(_, acc, 0), do: acc
defp divisors(value, acc, index) when rem(value, index) == 0 do
if div(value, index) == index do
divisors(value, [index], index - 1)
else
divisors(value, [index] ++ acc ++ [div(value, index)], index - 1)
end
end
defp divisors(value, acc, index), do: divisors(value, acc, index - 1)
def n_choose_k(n, k) when k > n, do: 0
def n_choose_k(n, k), do: div(factorial(n), factorial(k) * factorial(n - k))
def n_permute_k(n, k) when k > n, do: 0
def n_permute_k(n, k), do: div(factorial(n), factorial(n - k))
@doc """
Is square method. Checks whether the given number is a square.
Handles arbitrary position.
"""
def is_square?(0), do: true
def is_square?(1), do: true
def is_square?(value) when not is_integer(value), do: false
def is_square?(value) do
if not is_integer(value) do
false
else
x = div(value, 2)
is_square?(value, MapSet.new([x]), x)
end
end
defp is_square?(value, _, x) when x * x == value, do: true
defp is_square?(value, history, x) do
x = div(x + div(value, x), 2)
cond do
MapSet.member?(history, x) -> false
true -> is_square?(value, MapSet.put(history, x), x)
end
end
def max_of(list) do
cond do
Functions.is_iterable(list) -> max_of(Enum.to_list(list), nil)
true -> max_of(String.graphemes(to_string(list)), nil)
end
end
def max_of([], value), do: value
def max_of(list, value) do
head = List.first Enum.take(list, 1)
cond do
Functions.is_iterable(head) and value == nil -> max_of(Enum.drop(list, 1), max_of(head))
Functions.is_iterable(head) -> max_of(Enum.drop(list, 1), max(max_of(head), value))
value == nil -> max_of(Enum.drop(list, 1), Functions.to_number(head))
Functions.to_number(head) > value and is_number(Functions.to_number(head)) -> max_of(Enum.drop(list, 1), Functions.to_number(head))
true -> max_of(Enum.drop(list, 1), value)
end
end
def min_of(list) do
cond do
Functions.is_iterable(list) -> min_of(Enum.to_list(list), nil)
true -> min_of(String.graphemes(to_string(list)), nil)
end
end
def min_of([], value), do: value
def min_of(list, value) do
head = List.first Enum.take(list, 1)
cond do
Functions.is_iterable(head) and value == nil -> min_of(Enum.drop(list, 1), min_of(head))
Functions.is_iterable(head) -> min_of(Enum.drop(list, 1), min(min_of(head), value))
value == nil -> min_of(Enum.drop(list, 1), Functions.to_number(head))
Functions.to_number(head) < value and is_number(Functions.to_number(head)) -> min_of(Enum.drop(list, 1), Functions.to_number(head))
true -> min_of(Enum.drop(list, 1), value)
end
end
# GCD that also supports decimal numbers.
def gcd_of(a, a), do: a
def gcd_of(a, 0), do: a
def gcd_of(0, b), do: b
def gcd_of(a, b) when is_integer(a) and is_integer(b), do: Integer.gcd(a, b)
def gcd_of(a, b) when a < 0 and b < 0, do: -gcd_of(-a, -b)
def gcd_of(a, b) when a < 0, do: gcd_of(-a, b)
def gcd_of(a, b) when b < 0, do: gcd_of(a, -b)
def gcd_of(a, b) when a > b, do: gcd_of(a - b, b)
def gcd_of(a, b) when a < b, do: gcd_of(a, b - a)
# LCM
def lcm_of(a, b), do: div(abs(a * b), gcd_of(a, b))
def euler_totient(value), do: euler_totient(value, value, 0)
def euler_totient(_, 0, acc), do: acc
def euler_totient(value, index, acc) do
if gcd_of(value, index) == 1 do
euler_totient(value, index - 1, acc + 1)
else
euler_totient(value, index - 1, acc)
end
end
def continued_fraction(a, b) do
Stream.resource(
fn -> {1, a.(0), 1, a.(1) * a.(0) + b.(1), a.(1)} end,
fn {k, p0, q0, p1, q1} ->
{current_digit, k_new, p0_new, q0_new, p1_new, q1_new} = next_fraction_digit(a, b, k, p0, q0, p1, q1)
{[current_digit], {k_new, p0_new, q0_new, p1_new, q1_new}} end,
fn _ -> nil end)
|> Stream.map(fn x -> x end)
end
defp next_fraction_digit(a, b, k, p0, q0, p1, q1) do
case {{div(p0, q0), mod(p0, q0)}, {div(p1, q1), mod(p1, q1)}} do
{{x, r0}, {x, r1}} -> {x, k, 10 * r0, q0, 10 * r1, q1}
_ ->
k = k + 1
x = a.(k)
y = b.(k)
next_fraction_digit(a, b, k, p1, q1, x * p1 + y * p0, x * q1 + y * q0)
end
end
def arithmetic_mean(list), do: arithmetic_mean(Enum.to_list(list), 0, 0)
def arithmetic_mean([], sum, index), do: sum / index
def arithmetic_mean([head | remaining], _, _) when Functions.is_iterable(head), do: [head | remaining] |> Stream.map(&arithmetic_mean/1)
def arithmetic_mean([head | remaining], sum, index), do: arithmetic_mean(remaining, sum + Functions.to_number(head), index + 1)
@doc """
Tail call optimized version of the Fibonacci sequence
"""
def fibonacci(0), do: 0
def fibonacci(1), do: 1
def fibonacci(index) when index > 1, do: fibonacci(index, 0, 1)
defp fibonacci(0, a, _), do: a
defp fibonacci(index, a, b), do: fibonacci(index - 1, b, a + b)
@doc """
Tail call optimized version of the Lucas sequence.
"""
def lucas(0), do: 2
def lucas(1), do: 1
def lucas(index) when index > 1, do: lucas(index, 2, 1)
defp lucas(0, a, _), do: a
defp lucas(index, a, b), do: lucas(index - 1, b, a + b)
@roman_number_list [
[1000, "M"],
[900, "CM"],
[500, "D"],
[400, "CD"],
[100, "C"],
[90, "XC"],
[50, "L"],
[40, "XL"],
[10, "X"],
[9, "IX"],
[5, "V"],
[4, "IV"],
[1, "I"]
]
def to_roman_numeral(number), do: to_roman_numeral(number, "")
defp to_roman_numeral(0, parsed), do: parsed
defp to_roman_numeral(number, parsed) do
[curr_number, roman] = Enum.find(@roman_number_list, fn [x, _] -> number >= x end)
to_roman_numeral(number - curr_number, parsed <> roman)
end
def from_roman_numeral(roman), do: from_roman_numeral(roman, 0)
defp from_roman_numeral("", number), do: number
defp from_roman_numeral(roman, number) do
[curr_number, curr_roman] = Enum.find(@roman_number_list, fn [_, y] -> String.starts_with?(roman, y) end)
from_roman_numeral(roman |> String.slice(String.length(curr_roman)..-1), number + curr_number)
end
def median([]), do: []
def median(list) do
len = length(list)
mid = div(len, 2)
sorted = Enum.sort(Functions.to_number(list))
cond do
rem(len, 2) == 0 -> (Enum.at(sorted, mid - 1) + Enum.at(sorted, mid)) / 2
true -> Enum.at(sorted, mid)
end
end
end
|
lib/commands/int_commands.ex
| 0.684686
| 0.582699
|
int_commands.ex
|
starcoder
|
defmodule Pigpiox.Waveform do
use Bitwise
@moduledoc """
Build and send waveforms with pigpiod.
"""
@doc """
Clears all waveforms and any data added.
"""
@spec clear_all() :: :ok | {:error, atom}
def clear_all() do
case Pigpiox.Socket.command(:waveform_clear_all) do
{:ok, _} -> :ok
error -> error
end
end
defmodule Pulse do
@moduledoc false
defstruct gpio_on: 0, gpio_off: 0, delay: 0
end
defmodule ChainElement do
@moduledoc false
defstruct content: [], repeat: 1
end
@typedoc """
A pulse used in constructing a waveform. Specifies the GPIO that should be turned on, the GPIO that should be turned off,
and the delay before the next pulse.
At least one field is required to be set.
"""
@type pulse :: %Pulse{}
@type chain_element :: %ChainElement{}
@doc """
Adds a list of pulses to the current waveform
Returns the new total number of pulses in the waveform or an error.
"""
@spec add_generic(pulses :: list(pulse)) :: {:ok, non_neg_integer} | {:error, atom}
def add_generic(pulses) do
extents = Enum.flat_map pulses, fn pulse ->
[mask(pulse.gpio_on), mask(pulse.gpio_off), pulse.delay]
end
Pigpiox.Socket.command(:waveform_add_generic, 0, 0, extents)
end
@doc """
Chain waveform
Returns ok or an error.
"""
@spec chain(chain_element :: chain_element) :: {:ok, 0} | {:error, atom}
def chain(chain_element) do
extents = chain_elements_to_list(chain_element)
Pigpiox.Socket.command(:waveform_chain, 0, 0, extents, 8)
end
defp chain_elements_to_list(wave_id) when is_integer(wave_id), do: [wave_id]
defp chain_elements_to_list(%ChainElement{content: content, repeat: repeat}) do
content = Enum.flat_map(content, &chain_elements_to_list/1)
[255, 0] ++ content ++ chain_element_repeat_to_list(repeat)
end
defp chain_element_repeat_to_list(:forever), do: [255, 3]
defp chain_element_repeat_to_list(repeat) do
[255, 1] ++ :erlang.binary_to_list(<<repeat::little-unsigned-integer-size(16)>>)
end
@doc """
Creates a waveform based on previous calls to `add_...`
Returns the id of the newly created waveform or an error
"""
@spec create() :: {:ok, non_neg_integer} | {:error, atom}
def create() do
Pigpiox.Socket.command(:waveform_create)
end
@doc """
Deletes a previously added waveform.
"""
@spec delete(non_neg_integer) :: :ok | {:error, atom}
def delete(wave_id) do
case Pigpiox.Socket.command(:waveform_delete, wave_id) do
{:ok, _} -> :ok
error -> error
end
end
@doc """
Returns the id of the currently transmitted waveform.
"""
@spec current() :: {:ok, non_neg_integer} | {:error, atom}
def current() do
Pigpiox.Socket.command(:waveform_current)
end
@doc """
Returns whether or not a waveform is currently being transmitted.
"""
@spec busy?() :: {:ok, boolean} | {:error, atom}
def busy?() do
case Pigpiox.Socket.command(:waveform_busy) do
{:ok, 1} -> {:ok, true}
{:ok, _} -> {:ok, false}
error -> error
end
end
@doc """
Stops a waveform that is currently being transmitted.
"""
@spec stop() :: :ok | {:error, atom}
def stop() do
case Pigpiox.Socket.command(:waveform_stop) do
{:ok, _} -> :ok
error -> error
end
end
@doc """
Sends a waveform once, by its id.
Returns the number of DMA control blocks used in the waveform.
"""
@spec send(non_neg_integer) :: {:ok, non_neg_integer} | {:error, atom}
def send(wave_id) do
Pigpiox.Socket.command(:waveform_transmit_once, wave_id)
end
@doc """
Starts a repeating waveform, by its id.
Returns the number of DMA control blocks used in the waveform.
"""
@spec repeat(non_neg_integer) :: {:ok, non_neg_integer} | {:error, atom}
def repeat(wave_id) do
Pigpiox.Socket.command(:waveform_transmit_repeat, wave_id)
end
@doc """
Returns the length in microseconds of the current waveform.
"""
@spec get_micros() :: {:ok, non_neg_integer} | {:error, atom}
def get_micros() do
Pigpiox.Socket.command(:waveform_get_micros)
end
@doc """
Returns the maximum possible size of a waveform in microseconds.
"""
@spec get_max_micros() :: {:ok, non_neg_integer} | {:error, atom}
def get_max_micros() do
Pigpiox.Socket.command(:waveform_get_micros, 2)
end
@doc """
Returns the length in pulses of the current waveform.
"""
@spec get_pulses() :: {:ok, non_neg_integer} | {:error, atom}
def get_pulses() do
Pigpiox.Socket.command(:waveform_get_pulses)
end
@doc """
Returns the maximum possible size of a waveform in pulses.
"""
@spec get_max_pulses() :: {:ok, non_neg_integer} | {:error, atom}
def get_max_pulses() do
Pigpiox.Socket.command(:waveform_get_pulses, 2)
end
@doc """
Returns the length in DMA control blocks of the current waveform.
"""
@spec get_cbs() :: {:ok, non_neg_integer} | {:error, atom}
def get_cbs() do
Pigpiox.Socket.command(:waveform_get_cbs)
end
@doc """
Returns the maximum possible size of a waveform in DMA control blocks.
"""
@spec get_max_cbs() :: {:ok, non_neg_integer} | {:error, atom}
def get_max_cbs() do
Pigpiox.Socket.command(:waveform_get_cbs, 2)
end
@spec mask(non_neg_integer) :: non_neg_integer
defp mask(0), do: 0
defp mask(gpio) do
1 <<< gpio
end
end
|
lib/pigpiox/waveform.ex
| 0.870982
| 0.475118
|
waveform.ex
|
starcoder
|
defmodule MssqlexV3.Type do
@moduledoc """
Type conversions.
"""
@typedoc "Input param."
@type param ::
bitstring()
| number()
| date()
| time()
| datetime()
| Decimal.t()
@typedoc "Output value."
@type return_value ::
bitstring()
| integer()
| date()
| datetime()
| Decimal.t()
@typedoc "Date as `{year, month, day}`"
@type date :: {1..9_999, 1..12, 1..31}
@typedoc "Time as `{hour, minute, sec, usec}`"
@type time :: {0..24, 0..60, 0..60, 0..999_999}
@typedoc "Datetime"
@type datetime :: {date(), time()}
@doc """
Transforms input params into `:odbc` params.
"""
@spec encode(value :: param(), opts :: Keyword.t()) ::
{:odbc.odbc_data_type(), [:odbc.value()]}
def encode(value, _) when is_boolean(value) do
{:sql_bit, [value]}
end
def encode({_year, _month, _day} = date, _) do
encoded =
Date.from_erl!(date)
|> to_string
|> :unicode.characters_to_binary(:unicode, :latin1)
{{:sql_varchar, String.length(encoded)}, [encoded]}
end
def encode({hour, minute, sec, usec}, _) do
precision = if usec == 0, do: 0, else: 6
encoded =
Time.from_erl!({hour, minute, sec}, {usec, precision})
|> to_string
|> :unicode.characters_to_binary(:unicode, :latin1)
{{:sql_varchar, String.length(encoded)}, [encoded]}
end
def encode({{year, month, day}, {hour, minute, sec, usec}}, _) do
precision = if usec == 0, do: 0, else: 2
encoded =
NaiveDateTime.from_erl!(
{{year, month, day}, {hour, minute, sec}},
{usec, precision}
)
|> to_string
|> :unicode.characters_to_binary(:unicode, :latin1)
{{:sql_varchar, String.length(encoded)}, [encoded]}
end
def encode(%NaiveDateTime{} = datetime, _) do
encoded =
datetime
|> to_string
|> :unicode.characters_to_binary(:unicode, :latin1)
{{:sql_varchar, String.length(encoded)}, [encoded]}
end
def encode(value, _)
when is_integer(value) and value > -1_000_000_000 and
value < 1_000_000_000 do
{:sql_integer, [value]}
end
def encode(value, _) when is_integer(value) do
encoded =
value |> to_string |> :unicode.characters_to_binary(:unicode, :latin1)
{{:sql_varchar, String.length(encoded)}, [encoded]}
end
def encode(value, _) when is_float(value) do
encoded =
value |> to_string |> :unicode.characters_to_binary(:unicode, :latin1)
{{:sql_varchar, String.length(encoded)}, [encoded]}
end
def encode(%Decimal{} = value, _) do
encoded =
value |> to_string |> :unicode.characters_to_binary(:unicode, :latin1)
{{:sql_varchar, String.length(encoded)}, [encoded]}
end
def encode(value, _) when is_binary(value) do
with utf16 when is_bitstring(utf16) <-
:unicode.characters_to_binary(value, :unicode, {:utf16, :little}) do
{{:sql_wvarchar, byte_size(value)}, [utf16]}
else
_ ->
raise MssqlexV3.Error.exception(message: "failed to convert string to UTF16LE")
end
end
def encode(nil, _) do
{:sql_integer, [:null]}
end
def encode(value, _) do
raise MssqlexV3.Error.exception(message: "could not parse param #{inspect(value)} of unrecognised type.")
end
@doc """
Transforms `:odbc` return values to Elixir representations.
"""
@spec decode(:odbc.value(), opts :: Keyword.t()) :: return_value()
def decode(value, _) when is_float(value) do
Decimal.from_float(value)
end
def decode(value, opts) when is_binary(value) do
if opts[:preserve_encoding] || String.printable?(value) do
value
else
:unicode.characters_to_binary(value, {:utf16, :little}, :unicode)
end
end
def decode(value, _) when is_list(value) do
to_string(value)
end
def decode(:null, _) do
nil
end
def decode({date, {h, m, s}}, _) do
{date, {h, m, s, 0}}
end
def decode(value, _) do
value
end
end
|
lib/mssqlex_v3/type.ex
| 0.857291
| 0.481515
|
type.ex
|
starcoder
|
defmodule Scenic.Primitive.Transform do
@moduledoc """
Change the position, rotation, scale and more of a primitive.
Unlike html, which uses auto-layout to position items on the screen, Scenic moves primitives around using matrix transforms. This is common in video games and provides powerful control of your primitives.
A [matrix](https://en.wikipedia.org/wiki/Matrix_(mathematics)) is an array of numbers that can be used to change the positions, rotations, scale and more of locations.
**Don't worry!** You will not need to look at any matrices unless you want to get fancy. In Scenic, you will rarely (if ever) create matrices on your own (you can if you know what you are doing!), and will instead use the transform helpers.
Multiple transforms can be applied to any primitive. Transforms combine down the graph to create a very flexible way to manage your scene.
There are a fixed set of transform helpers that create matrices for you.
* [`Matrix`](Scenic.Primitive.Transform.Matrix.html) hand specify a matrix.
* [`Pin`](Scenic.Primitive.Transform.Pin.html) set a pin to rotate or scale around. Most primitives define a sensible default pin.
* [`Rotate`](Scenic.Primitive.Transform.Rotate.html) rotate around the pin.
* [`Scale`](Scenic.Primitive.Transform.Scale.html) scale larger or smaller. Centered around the pin.
* [`Translate`](Scenic.Primitive.Transform.Translate.html) move/translate horizontally and veritcally.
### Specifying Transforms
You apply transforms to a primitive the same way you specify styles.
graph =
Graph.build
|> circle( 100, fill: {:color, :green}, translate: {200, 200} )
|> ellipse( {40, 60, fill: {:color, :red}, rotate: 0.4, translate: {100, 100} )
Don't worry about the order you apply transforms to a single object. Scenic will multiply them together in the correct way when it comes time to render them.
"""
alias Scenic.Math.Matrix
alias Scenic.Math.Vector2
alias Scenic.Primitive.Transform
@callback info(data :: any) :: bitstring
@callback verify(any) :: boolean
# ===========================================================================
defmodule FormatError do
@moduledoc false
defexception message: nil, module: nil, data: nil
end
@style_name_map %{
:pin => Transform.Pin,
:scale => Transform.Scale,
:rotate => Transform.Rotate,
:translate => Transform.Translate,
:matrix => Transform.Matrix
}
# ===========================================================================
# defmacro __using__([type_code: type_code]) when is_integer(type_code) do
defmacro __using__(_opts) do
quote do
@behaviour Scenic.Primitive.Transform
@doc false
def verify!(data) do
case verify(data) do
true ->
data
false ->
raise FormatError, message: info(data), module: __MODULE__, data: data
end
end
end
# quote
end
# ===========================================================================
@doc false
def verify!(tx_key, tx_data) do
case Map.get(@style_name_map, tx_key) do
nil -> raise FormatError, message: "Unknown transform", module: tx_key, data: tx_data
module -> module.verify!(tx_data)
end
end
# ============================================================================
# transform helper functions
# --------------------------------------------------------
@doc """
Given a Map describing the transforms on a primitive, calculate the combined matrix
that should be applied.
This is trickier than just multiplying them together. Rotations, translations and scale,
need to be done in the right order, which is why this function is provided.
You will not normally need to use this function. It is used internally by the input system.
"""
def calculate_local(txs)
def calculate_local(nil), do: nil
def calculate_local(txs) when txs == %{}, do: nil
def calculate_local(%{pin: _} = txs) do
# look for case where only the pin is set
case Enum.count(txs) do
1 -> nil
_ -> do_calculate_local(txs)
end
end
def calculate_local(txs), do: do_calculate_local(txs)
defp do_calculate_local(txs) do
# start with identity - which is like multiplying by 1
Matrix.identity()
|> multiply_partial(:matrix, txs[:matrix])
|> multiply_partial(:translate, txs[:translate])
|> rotate_and_scale(txs)
end
# --------------------------------------------------------
defp multiply_partial(mx, type, value)
defp multiply_partial(mx, _, nil), do: mx
defp multiply_partial(mx, :pin, point), do: Matrix.translate(mx, point)
defp multiply_partial(mx, :scale, pct), do: Matrix.scale(mx, pct)
defp multiply_partial(mx, :rotate, rot), do: Matrix.rotate(mx, rot)
defp multiply_partial(mx, :translate, trns), do: Matrix.translate(mx, trns)
defp multiply_partial(mx, :matrix, dev_mx), do: Matrix.mul(mx, dev_mx)
defp multiply_partial(mx, :inv_pin, point) do
Matrix.translate(mx, Vector2.invert(point))
end
# --------------------------------------------------------
defp rotate_and_scale(mx, txs) do
# don't do any work if neight rotate nor scale are set
# don't need to translate twice for no reason
case txs[:rotate] || txs[:scale] do
nil ->
mx
_ ->
mx
|> multiply_partial(:pin, txs[:pin])
|> multiply_partial(:rotate, txs[:rotate])
|> multiply_partial(:scale, txs[:scale])
|> multiply_partial(:inv_pin, txs[:pin])
end
end
end
|
lib/scenic/primitive/transform/transform.ex
| 0.910466
| 0.854945
|
transform.ex
|
starcoder
|
defmodule ExSDP do
@moduledoc """
This module represents the SDP Session.
Its fields directly correspond to those defined in
[RFC4566](https://tools.ietf.org/html/rfc4566#section-5)
"""
use Bunch.Access
@enforce_keys [:origin]
@optional_keys [
:email,
:encryption,
:uri,
:phone_number,
:session_information,
:timing,
:time_zones_adjustments,
:connection_data,
attributes: [],
bandwidth: [],
media: [],
time_repeats: []
]
defstruct [
version: 0,
session_name: "-"
] ++ @enforce_keys ++ @optional_keys
alias ExSDP.{
Attribute,
Address,
Bandwidth,
ConnectionData,
Encryption,
Media,
Origin,
Parser,
RepeatTimes,
Serializer,
Timezone,
Timing
}
@type t :: %__MODULE__{
version: non_neg_integer(),
origin: Origin.t(),
session_name: binary(),
session_information: binary() | nil,
uri: binary() | nil,
email: binary() | nil,
phone_number: binary() | nil,
connection_data: ConnectionData.t() | nil,
bandwidth: [Bandwidth.t()],
time_zones_adjustments: Timezone.t() | nil,
encryption: Encryption.t() | nil,
attributes: [Attribute.t()],
timing: Timing.t() | nil,
time_repeats: [RepeatTimes.t()],
media: [Media.t()]
}
defdelegate parse(text), to: Parser
defdelegate parse!(text), to: Parser
@doc """
Returns new sdp struct.
By default:
* `version` is `0`
* `username`, `session_id`, `session_version` and `address` - refer to `Origin.new/1`
* `session_name` is `-`
"""
@spec new(
version: non_neg_integer(),
username: binary(),
session_id: integer(),
session_version: integer(),
address: Address.t(),
session_name: binary()
) :: t()
def new(opts \\ []) do
{version, opts} = Keyword.pop(opts, :version, 0)
{session_name, opts} = Keyword.pop(opts, :session_name, "-")
%__MODULE__{
version: version,
origin: Origin.new(opts),
session_name: session_name
}
end
@spec add_media(sdp :: t(), media :: Media.t() | [Media.t()]) :: t()
def add_media(sdp, media), do: Map.update!(sdp, :media, &(&1 ++ Bunch.listify(media)))
@spec add_attribute(sdp :: t(), attribute :: Attribute.t()) :: t()
def add_attribute(sdp, attribute), do: add_attributes(sdp, [attribute])
@spec add_attributes(sdp :: t(), attributes :: [Attribute.t()]) :: t()
def add_attributes(sdp, attributes) when is_list(attributes),
do: Map.update!(sdp, :attributes, &(&1 ++ attributes))
end
defimpl String.Chars, for: ExSDP do
def to_string(session) do
import ExSDP.Sigil
alias ExSDP.Serializer
~n"""
v=#{session.version}
o=#{session.origin}
s=#{session.session_name}
#{Serializer.maybe_serialize("i", session.session_information)}
#{Serializer.maybe_serialize("u", session.uri)}
#{Serializer.maybe_serialize("e", session.email)}
#{Serializer.maybe_serialize("p", session.phone_number)}
#{Serializer.maybe_serialize("c", session.connection_data)}
#{Serializer.maybe_serialize("b", session.bandwidth)}
#{Serializer.maybe_serialize("t", session.timing)}
#{Serializer.maybe_serialize("r", session.time_repeats)}
#{Serializer.maybe_serialize("z", session.time_zones_adjustments)}
#{Serializer.maybe_serialize("k", session.encryption)}
#{Serializer.maybe_serialize("a", session.attributes)}
#{Serializer.maybe_serialize("m", session.media)}
"""
end
end
|
lib/ex_sdp.ex
| 0.754373
| 0.50708
|
ex_sdp.ex
|
starcoder
|
defmodule Static.NestedSet do
@moduledoc """
You can describe a tree as a `Static.NestedSet`
"""
alias Static.Folder
alias Static.Site
def populate_lnum_rnum(folder_or_site, lnum \\ 1)
def populate_lnum_rnum(%Site{} = site, lnum) do
%Site{site | lnum: lnum, rnum: lnum + 1}
end
def populate_lnum_rnum(%Folder{sites: sites} = folder, lnum) do
%{last: %{rnum: last_rnum}, values: updated_sites} =
sites
|> Enum.reduce(%{last: nil, values: []}, fn e, %{last: last} = acc ->
next =
case last do
nil ->
populate_lnum_rnum(e, lnum + 1)
%{rnum: last_rnum} ->
populate_lnum_rnum(e, last_rnum + 1)
end
%{last: next, values: acc.values ++ [next]}
end)
%Folder{folder | lnum: lnum, rnum: last_rnum + 1, sites: updated_sites}
end
def flatten(%Folder{lnum: lnum, rnum: rnum, sites: sites}) do
found_sites =
sites
|> Enum.filter(fn x -> Kernel.is_struct(x, Site) end)
# Add the first site of the folder to list.
# As for this, you have to replace the lnum and rnum values with
# the folder ones so breadcrumb will work
|> Kernel.++([
%Site{(sites |> List.first()) | lnum: lnum, rnum: rnum, should_generate_teasers: true}
])
# because we added the frist site with new lnum/rnum values
# we have to remove the "old" duplcate one
|> List.delete_at(0)
sites
|> Enum.filter(fn x -> Kernel.is_struct(x, Folder) end)
|> Enum.map(&flatten/1)
|> List.flatten()
|> Kernel.++(found_sites)
|> List.flatten()
|> Enum.sort_by(fn %Site{lnum: lnum} -> lnum end)
end
def flattened_tree(folder, lnum \\ 1) do
populate_lnum_rnum(folder, lnum)
|> flatten()
end
def breadcrumb(sites, %Site{content_filename: content_filename}) do
case sites
|> Enum.find(fn %Site{content_filename: possible_content_filename} ->
possible_content_filename == content_filename
end) do
nil ->
[]
%Site{lnum: lnum, rnum: rnum} ->
sites
|> Enum.filter(fn %Site{lnum: lnum_to_compare, rnum: rnum_to_compare} ->
lnum_to_compare <= lnum and rnum_to_compare >= rnum
end)
end
end
end
|
lib/nested_set.ex
| 0.675444
| 0.595904
|
nested_set.ex
|
starcoder
|
defmodule Plug.CGI do
@moduledoc """
Primary public module for `plug_cgi`.
See [`run/1`](https://hexdocs.pm/plug_cgi/Plug.CGI.html#run/2)
below for the main entrypoint of `plug_cgi`.
This module can also be used in a supervisor since it provides a `child_spec/1`
```
children = [
{Plug.CGI, splug: MyPlug, options: [log_device: :stdio]}
]
opts = [strategy: :one_for_one, name: MyApp.Supervisor]
Supervisor.start_link(children, opts)
```
"""
def child_spec(args) do
%{
id: Plug.CGI,
start: {Plug.CGI, :start_link, [args]}
}
end
def start_link(args) do
plug = Keyword.get(args, :plug, Plug.Debugger)
args = Keyword.delete(args, :plug)
run(plug, args)
end
@doc """
The entrypoint of `Plug.CGI`, called to start the Plug chain starting with `plug`.
The `log_device` option sets the default device for
[`Logger.Backends.Console`](https://hexdocs.pm/logger/Logger.Backends.Console.html),
defaults to `:standard_error`.
The `output_device` option sets the default output for the CGI response,
defaults to `:stdio`.
The `opts` argument is also passed along to
[`Plug.init/1`](https://hexdocs.pm/plug/Plug.html#c:init/1) for the given `plug`
and `call_opts` option will be passed into
[`Plug.call/2`](https://hexdocs.pm/plug/Plug.html#c:call/2) for the given `plug`.
## Example
```
defmodule MyPlug do
import Plug.Conn
def init(options), do: options
def call(conn, _opts) do
conn
|> put_resp_content_type("text/plain")
|> send_resp(200, "Hello world")
end
end
Plug.CGI.run MyPlug
```
"""
@spec run(atom(), Keyword.t()) :: Plug.Conn.t()
def run(plug, opts \\ []) when is_atom(plug) do
Application.ensure_started(Logger)
log_device = Keyword.get(opts, :log_device, :standard_error)
console = Application.fetch_env!(:logger, :console)
Application.put_env(:logger, :console, Keyword.merge([device: log_device], console))
conn =
System.get_env()
|> Plug.CGI.Conn.conn(opts)
plug.init(opts)
plug.call(conn, Keyword.get(opts, :call_opts))
end
end
|
lib/plug/cgi.ex
| 0.716119
| 0.738457
|
cgi.ex
|
starcoder
|
defmodule ExDebugger.Manual do
@moduledoc """
`use` `ExDebugger.Manual` provides convenience macros that users can
employ at strategic places in their code base when they feel that the
default behaviour of `use ExDebugger` is not sufficient.
Turning functionality on/off is managed by: `#{Documentation.debug_options_path()}`,
similarly to `use ExDebugger`
The following macros are provided:
* `dd/2`: First argument denotes the piped_value and second argument a
specific label for the very same reason you would:
```elixir
value
|> IO.inspect(label: :some_label)
```
* `dd/3`: Same as `dd/2`, with an extra argument to force output. This allows
users to leave the settings under `#{Documentation.debug_options_path()}` unmolested while
quickly checking something.
The benefit of these being macros is that all the strategic places in which users employ debugging statements can remain
in place without this taking an extra toll on when they need to deploy to production. By switching off all the relevant
settings under: `#{Documentation.debug_options_path()}`, occurrences of these macros are compile time
replaced by their value without any side effects.
"""
def __dd__(piped_value, label, module, force_output?) do
ex_debugger_opts = ExDebugger.Options.extract(:manual_debug, module)
if ex_debugger_opts == :no_debug_options_file_set do
quote do
unquote(piped_value)
end
else
if ex_debugger_opts.global_output || force_output? || ex_debugger_opts.default_output do
quote location: :keep do
unquote(piped_value)
|> ExDebugger.Event.new(unquote(label), binding(), __ENV__)
|> ExDebugger.Event.cast(unquote(Macro.escape(ex_debugger_opts)).capture_medium,
warn: unquote(Macro.escape(ex_debugger_opts)).warn
)
end
else
if ex_debugger_opts.warn do
quote do
Logger.warn("Manual Debugger output silenced for: #{__MODULE__}")
unquote(piped_value)
end
else
quote do
unquote(piped_value)
end
end
end
end
end
defmacro __using__(_) do
quote location: :keep do
if Application.get_env(:ex_debugger, :debug_options_file) do
@external_resource Application.get_env(:ex_debugger, :debug_options_file)
require Logger
end
@spec dd(any(), atom(), boolean()) :: any()
@doc false
defmacro dd(piped_value, label, force_output?) do
ExDebugger.Manual.__dd__(piped_value, label, __MODULE__, force_output?)
end
@spec dd(any(), atom()) :: any()
@doc false
defmacro dd(piped_value, label) do
ExDebugger.Manual.__dd__(piped_value, label, __MODULE__, false)
end
@spec dd(atom()) :: any()
@doc false
defmacro dd(label) do
ExDebugger.Manual.__dd__(nil, label, __MODULE__, false)
end
@spec dd :: any()
@doc false
defmacro dd do
ExDebugger.Manual.__dd__(nil, nil, __MODULE__, false)
end
end
end
end
|
lib/ex_debugger/manual.ex
| 0.768603
| 0.759382
|
manual.ex
|
starcoder
|
defmodule AshGraphql.Resource do
alias Ash.Changeset.ManagedRelationshipHelpers
alias Ash.Dsl.Extension
alias Ash.Query.Aggregate
alias AshGraphql.Resource
alias AshGraphql.Resource.{ManagedRelationship, Mutation, Query}
@get %Ash.Dsl.Entity{
name: :get,
args: [:name, :action],
describe: "A query to fetch a record by primary key",
examples: [
"get :get_post, :read"
],
schema: Query.get_schema(),
target: Query,
auto_set_fields: [
type: :get
]
}
@read_one %Ash.Dsl.Entity{
name: :read_one,
args: [:name, :action],
describe: "A query to fetch a record",
examples: [
"read_one :current_user, :current_user"
],
schema: Query.read_one_schema(),
target: Query,
auto_set_fields: [
type: :read_one
]
}
@list %Ash.Dsl.Entity{
name: :list,
schema: Query.list_schema(),
args: [:name, :action],
describe: "A query to fetch a list of records",
examples: [
"list :list_posts, :read"
],
target: Query,
auto_set_fields: [
type: :list
]
}
@create %Ash.Dsl.Entity{
name: :create,
schema: Mutation.create_schema(),
args: [:name, :action],
describe: "A mutation to create a record",
examples: [
"create :create_post, :create"
],
target: Mutation,
auto_set_fields: [
type: :create
]
}
@update %Ash.Dsl.Entity{
name: :update,
schema: Mutation.update_schema(),
args: [:name, :action],
describe: "A mutation to update a record",
examples: [
"update :update_post, :update"
],
target: Mutation,
auto_set_fields: [
type: :update
]
}
@destroy %Ash.Dsl.Entity{
name: :destroy,
schema: Mutation.destroy_schema(),
args: [:name, :action],
describe: "A mutation to destroy a record",
examples: [
"destroy :destroy_post, :destroy"
],
target: Mutation,
auto_set_fields: [
type: :destroy
]
}
@queries %Ash.Dsl.Section{
name: :queries,
describe: """
Queries (read actions) to expose for the resource.
""",
examples: [
"""
queries do
get :get_post, :read
read_one :current_user, :current_user
list :list_posts, :read
end
"""
],
entities: [
@get,
@read_one,
@list
]
}
@managed_relationship %Ash.Dsl.Entity{
name: :managed_relationship,
schema: ManagedRelationship.schema(),
args: [:action, :argument],
target: ManagedRelationship,
describe: """
Instructs ash_graphql that a given argument with a `manage_relationship` change should have its input objects derived automatically from the potential actions to be called.
For example, given an action like:
```elixir
actions do
create :create do
argument :comments, {:array, :map}
change manage_relationship(:comments, type: :direct_control) # <- we look for this change with a matching argument name
end
end
```
You could add the following managed_relationship
```elixir
graphql do
...
managed_relationships do
managed_relationship :create_post, :comments
end
end
```
By default, the `{:array, :map}` would simply be a `json[]` type. If the argument name
is placed in this list, all of the potential actions that could be called will be combined
into a single input object. If there are type conflicts (for example, if the input could create
or update a record, and the create and update actions have an argument of the same name but with a different type),
a warning is emitted at compile time and the first one is used. If that is insufficient, you will need to do one of the following:
1.) provide the `:types` option to the `managed_relationship` constructor (see that option for more)
2.) define a custom type, with a custom input object (see the custom types guide), and use that custom type instead of `:map`
3.) change your actions to not have overlapping inputs with different types
"""
}
@managed_relationships %Ash.Dsl.Section{
name: :managed_relationships,
describe: """
Generates input objects for `manage_relationship` arguments on reosurce actions.
""",
examples: [
"""
managed_relationships do
manage_relationship :create_post, :comments
end
"""
],
entities: [
@managed_relationship
]
}
@mutations %Ash.Dsl.Section{
name: :mutations,
describe: """
Mutations (create/update/destroy actions) to expose for the resource.
""",
examples: [
"""
mutations do
create :create_post, :create
update :update_post, :update
destroy :destroy_post, :destroy
end
"""
],
entities: [
@create,
@update,
@destroy
]
}
@graphql %Ash.Dsl.Section{
name: :graphql,
describe: """
Configuration for a given resource in graphql
""",
examples: [
"""
graphql do
type :post
queries do
get :get_post, :read
list :list_posts, :read
end
mutations do
create :create_post, :create
update :update_post, :update
destroy :destroy_post, :destroy
end
end
"""
],
schema: [
type: [
type: :atom,
required: true,
doc: "The type to use for this entity in the graphql schema"
],
primary_key_delimiter: [
type: :string,
doc:
"If a composite primary key exists, this must be set to determine the `id` field value"
],
depth_limit: [
type: :integer,
doc: """
A simple way to prevent massive queries.
"""
],
relay?: [
type: :boolean,
default: false,
doc: """
NOT YET SUPPORTED
If true, the graphql queries/resolvers for this resource will be built to honor the [relay specification](https://relay.dev/graphql/connections.htm).
The two changes that are made currently are:
* the type for the resource will implement the `Node` interface
* pagination over that resource will behave as a Connection.
"""
]
],
sections: [
@queries,
@mutations,
@managed_relationships
]
}
@transformers [
AshGraphql.Resource.Transformers.RequireIdPkey,
AshGraphql.Resource.Transformers.ValidateActions
]
@sections [@graphql]
@moduledoc """
This Ash resource extension adds configuration for exposing a resource in a graphql.
# Table of Contents
#{Ash.Dsl.Extension.doc_index(@sections)}
#{Ash.Dsl.Extension.doc(@sections)}
"""
use Extension, sections: @sections, transformers: @transformers
def queries(resource) do
Extension.get_entities(resource, [:graphql, :queries])
end
def mutations(resource) do
Extension.get_entities(resource, [:graphql, :mutations]) || []
end
def managed_relationships(resource) do
Extension.get_entities(resource, [:graphql, :managed_relationships]) || []
end
def type(resource) do
Extension.get_opt(resource, [:graphql], :type, nil)
end
def relay?(resource) do
Extension.get_opt(resource, [:graphql], :relay?, nil)
end
def primary_key_delimiter(resource) do
Extension.get_opt(resource, [:graphql], :primary_key_delimiter, nil)
end
def ref(env) do
%{module: __MODULE__, location: %{file: env.file, line: env.line}}
end
def encode_primary_key(%resource{} = record) do
case Ash.Resource.Info.primary_key(resource) do
[field] ->
Map.get(record, field)
keys ->
delimiter = primary_key_delimiter(resource)
[_ | concatenated_keys] =
keys
|> Enum.reverse()
|> Enum.reduce([], fn key, acc -> [delimiter, to_string(Map.get(record, key)), acc] end)
IO.iodata_to_binary(concatenated_keys)
end
end
def decode_primary_key(resource, value) do
case Ash.Resource.Info.primary_key(resource) do
[field] ->
{:ok, [{field, value}]}
fields ->
delimiter = primary_key_delimiter(resource)
parts = String.split(value, delimiter)
if Enum.count(parts) == Enum.count(fields) do
{:ok, Enum.zip(fields, parts)}
else
{:error, "Invalid primary key"}
end
end
end
@doc false
def queries(api, resource, schema, as_mutations? \\ false) do
type = Resource.type(resource)
if type do
resource
|> queries()
|> Enum.filter(&(&1.as_mutation? == as_mutations?))
|> Enum.map(fn query ->
query_action = Ash.Resource.Info.action(resource, query.action, :read)
%Absinthe.Blueprint.Schema.FieldDefinition{
arguments: args(query.type, resource, query_action, schema, query.identity),
identifier: query.name,
middleware: [
{{AshGraphql.Graphql.Resolver, :resolve}, {api, resource, query}}
],
complexity: {AshGraphql.Graphql.Resolver, :query_complexity},
module: schema,
name: to_string(query.name),
type: query_type(query, resource, query_action, type),
__reference__: ref(__ENV__)
}
end)
else
[]
end
end
# sobelow_skip ["DOS.StringToAtom"]
@doc false
def mutations(api, resource, schema) do
resource
|> mutations()
|> Enum.map(fn
%{type: :destroy} = mutation ->
action = Ash.Resource.Info.action(resource, mutation.action)
if action.soft? do
update_mutation(resource, schema, mutation, schema, api)
else
%Absinthe.Blueprint.Schema.FieldDefinition{
arguments: mutation_args(mutation, resource, schema),
identifier: mutation.name,
middleware: [
{{AshGraphql.Graphql.Resolver, :mutate}, {api, resource, mutation}}
],
module: schema,
name: to_string(mutation.name),
type: String.to_atom("#{mutation.name}_result"),
__reference__: ref(__ENV__)
}
end
%{type: :create} = mutation ->
action = Ash.Resource.Info.action(resource, mutation.action)
args =
case mutation_fields(
resource,
schema,
action,
mutation.type
) do
[] ->
[]
_ ->
[
%Absinthe.Blueprint.Schema.InputValueDefinition{
identifier: :input,
module: schema,
name: "input",
placement: :argument_definition,
type: String.to_atom("#{mutation.name}_input")
}
]
end
%Absinthe.Blueprint.Schema.FieldDefinition{
arguments: args,
identifier: mutation.name,
middleware: [
{{AshGraphql.Graphql.Resolver, :mutate}, {api, resource, mutation}}
],
module: schema,
name: to_string(mutation.name),
type: String.to_atom("#{mutation.name}_result"),
__reference__: ref(__ENV__)
}
mutation ->
update_mutation(resource, schema, mutation, schema, api)
end)
|> Enum.concat(queries(api, resource, schema, true))
end
# sobelow_skip ["DOS.StringToAtom"]
defp update_mutation(resource, schema, mutation, schema, api) do
action = Ash.Resource.Info.action(resource, mutation.action)
args =
case mutation_fields(
resource,
schema,
action,
mutation.type
) do
[] ->
mutation_args(mutation, resource, schema)
_ ->
mutation_args(mutation, resource, schema) ++
[
%Absinthe.Blueprint.Schema.InputValueDefinition{
identifier: :input,
module: schema,
name: "input",
placement: :argument_definition,
type: String.to_atom("#{mutation.name}_input"),
__reference__: ref(__ENV__)
}
]
end
%Absinthe.Blueprint.Schema.FieldDefinition{
arguments: args,
identifier: mutation.name,
middleware: [
{{AshGraphql.Graphql.Resolver, :mutate}, {api, resource, mutation}}
],
module: schema,
name: to_string(mutation.name),
type: String.to_atom("#{mutation.name}_result"),
__reference__: ref(__ENV__)
}
end
defp mutation_args(%{identity: false}, _resource, _schema) do
[]
end
defp mutation_args(%{identity: identity}, resource, _schema) when not is_nil(identity) do
resource
|> Ash.Resource.Info.identities()
|> Enum.find(&(&1.name == identity))
|> Map.get(:keys)
|> Enum.map(fn key ->
attribute = Ash.Resource.Info.attribute(resource, key)
%Absinthe.Blueprint.Schema.InputValueDefinition{
name: to_string(key),
identifier: key,
type: %Absinthe.Blueprint.TypeReference.NonNull{
of_type: field_type(attribute.type, attribute, resource)
},
description: attribute.description || "",
__reference__: ref(__ENV__)
}
end)
end
defp mutation_args(_, _, schema) do
[
%Absinthe.Blueprint.Schema.InputValueDefinition{
identifier: :id,
module: schema,
name: "id",
placement: :argument_definition,
type: :id,
__reference__: ref(__ENV__)
}
]
end
@doc false
# sobelow_skip ["DOS.StringToAtom"]
def mutation_types(resource, schema) do
resource
|> mutations()
|> Enum.flat_map(fn mutation ->
mutation = %{
mutation
| action: Ash.Resource.Info.action(resource, mutation.action, mutation.type)
}
description =
if mutation.type == :destroy do
"The record that was successfully deleted"
else
"The successful result of the mutation"
end
fields = [
%Absinthe.Blueprint.Schema.FieldDefinition{
description: description,
identifier: :result,
module: schema,
name: "result",
type: Resource.type(resource),
__reference__: ref(__ENV__)
},
%Absinthe.Blueprint.Schema.FieldDefinition{
description: "Any errors generated, if the mutation failed",
identifier: :errors,
module: schema,
name: "errors",
type: %Absinthe.Blueprint.TypeReference.List{
of_type: :mutation_error
},
__reference__: ref(__ENV__)
}
]
metadata_object_type = metadata_field(resource, mutation, schema)
fields =
if metadata_object_type do
fields ++
[
%Absinthe.Blueprint.Schema.FieldDefinition{
description: "Metadata produced by the mutation",
identifier: :metadata,
module: schema,
name: "metadata",
type: metadata_object_type.identifier,
__reference__: ref(__ENV__)
}
]
else
fields
end
result = %Absinthe.Blueprint.Schema.ObjectTypeDefinition{
description: "The result of the #{inspect(mutation.name)} mutation",
fields: fields,
identifier: String.to_atom("#{mutation.name}_result"),
module: schema,
name: Macro.camelize("#{mutation.name}_result"),
__reference__: ref(__ENV__)
}
case mutation_fields(
resource,
schema,
mutation.action,
mutation.type
) do
[] ->
[result] ++ List.wrap(metadata_object_type)
fields ->
input = %Absinthe.Blueprint.Schema.InputObjectTypeDefinition{
fields: fields,
identifier: String.to_atom("#{mutation.name}_input"),
module: schema,
name: Macro.camelize("#{mutation.name}_input"),
__reference__: ref(__ENV__)
}
[input, result] ++ List.wrap(metadata_object_type)
end
end)
end
# sobelow_skip ["DOS.StringToAtom"]
defp metadata_field(resource, mutation, schema) do
metadata_fields =
Map.get(mutation.action, :metadata, [])
|> Enum.map(fn metadata ->
field_type =
metadata.type
|> field_type(metadata, resource)
|> maybe_wrap_non_null(not metadata.allow_nil?)
%Absinthe.Blueprint.Schema.FieldDefinition{
description: metadata.description,
identifier: metadata.name,
module: schema,
name: to_string(metadata.name),
type: field_type,
__reference__: ref(__ENV__)
}
end)
if !Enum.empty?(metadata_fields) do
name = "#{mutation.name}_metadata"
%Absinthe.Blueprint.Schema.ObjectTypeDefinition{
fields: metadata_fields,
identifier: String.to_atom(name),
module: schema,
name: Macro.camelize(name),
__reference__: ref(__ENV__)
}
end
end
@doc false
# sobelow_skip ["DOS.StringToAtom"]
def embedded_type_input(source_resource, attribute, resource, schema) do
create_action =
case attribute.constraints[:create_action] do
nil ->
Ash.Resource.Info.primary_action!(resource, :create)
name ->
Ash.Resource.Info.action(resource, name, :create)
end
update_action =
case attribute.constraints[:update_action] do
nil ->
Ash.Resource.Info.primary_action!(resource, :update)
name ->
Ash.Resource.Info.action(resource, name, :update)
end
fields =
mutation_fields(resource, schema, create_action, :create) ++
mutation_fields(resource, schema, update_action, :update)
fields =
fields
|> Enum.group_by(& &1.identifier)
# We only want one field per id. Right now we just take the first one
# If there are overlaps, and the field isn't `NonNull` in *all* cases, then
# we pick one and mark it explicitly as nullable (we unwrap the `NonNull`)
|> Enum.map(fn {_id, fields} ->
if Enum.all?(
fields,
&match?(%Absinthe.Blueprint.TypeReference.NonNull{}, &1.type)
) do
Enum.at(fields, 0)
else
fields
|> Enum.at(0)
|> case do
%{type: %Absinthe.Blueprint.TypeReference.NonNull{of_type: type}} = field ->
%{field | type: type}
field ->
field
end
end
end)
name = "#{AshGraphql.Resource.type(source_resource)}_#{attribute.name}_input"
%Absinthe.Blueprint.Schema.InputObjectTypeDefinition{
fields: fields,
identifier: String.to_atom(name),
module: schema,
name: Macro.camelize(name),
__reference__: ref(__ENV__)
}
end
defp mutation_fields(resource, schema, action, type) do
managed_relationships =
Enum.filter(
AshGraphql.Resource.managed_relationships(resource),
&(&1.action == action.name)
)
attribute_fields =
if action.type == :destroy && !action.soft? do
[]
else
resource
|> Ash.Resource.Info.public_attributes()
|> Enum.filter(fn attribute ->
is_nil(action.accept) || attribute.name in action.accept
end)
|> Enum.filter(& &1.writable?)
|> Enum.map(fn attribute ->
allow_nil? =
attribute.allow_nil? || attribute.default != nil || type == :update ||
attribute.generated? ||
(type == :create && attribute.name in action.allow_nil_input)
explicitly_required = attribute.name in action.require_attributes
field_type =
attribute.type
|> field_type(attribute, resource, true)
|> maybe_wrap_non_null(explicitly_required || not allow_nil?)
%Absinthe.Blueprint.Schema.FieldDefinition{
description: attribute.description,
identifier: attribute.name,
module: schema,
name: to_string(attribute.name),
type: field_type,
__reference__: ref(__ENV__)
}
end)
end
argument_fields =
action.arguments
|> Enum.reject(& &1.private?)
|> Enum.map(fn argument ->
case find_manage_change(argument, action, managed_relationships) do
nil ->
type =
argument.type
|> field_type(argument, resource, true)
|> maybe_wrap_non_null(argument_required?(argument))
%Absinthe.Blueprint.Schema.FieldDefinition{
identifier: argument.name,
module: schema,
name: to_string(argument.name),
type: type,
__reference__: ref(__ENV__)
}
_manage_opts ->
managed = Enum.find(managed_relationships, &(&1.argument == argument.name))
type =
if managed.type_name do
managed.type_name
else
default_managed_type_name(resource, action, argument)
end
type = wrap_arrays(argument.type, type, argument.constraints)
%Absinthe.Blueprint.Schema.FieldDefinition{
identifier: argument.name,
module: schema,
name: to_string(argument.name),
type: maybe_wrap_non_null(type, argument_required?(argument)),
__reference__: ref(__ENV__)
}
end
end)
attribute_fields ++ argument_fields
end
defp wrap_arrays({:array, arg_type}, type, constraints) do
%Absinthe.Blueprint.TypeReference.List{
of_type:
maybe_wrap_non_null(
wrap_arrays(arg_type, type, constraints[:items] || []),
!constraints[:nil_items?] || Ash.Type.embedded_type?(type)
)
}
end
defp wrap_arrays(_, type, _), do: type
# sobelow_skip ["DOS.StringToAtom"]
defp default_managed_type_name(resource, action, argument) do
String.to_atom(
to_string(action.type) <>
"_" <>
to_string(AshGraphql.Resource.type(resource)) <>
"_" <> to_string(argument.name) <> "_input"
)
end
defp find_manage_change(argument, action, managed_relationships) do
if argument.name in Enum.map(managed_relationships, & &1.argument) do
Enum.find_value(action.changes, fn
%{change: {Ash.Resource.Change.ManageRelationship, opts}} ->
opts[:argument] == argument.name && opts
_ ->
nil
end)
end
end
# sobelow_skip ["DOS.StringToAtom"]
defp query_type(%{type: :list}, resource, action, type) do
if action.pagination do
if relay?(resource) do
String.to_atom("#{type}_connection")
else
String.to_atom("page_of_#{type}")
end
else
%Absinthe.Blueprint.TypeReference.NonNull{
of_type: %Absinthe.Blueprint.TypeReference.List{
of_type: %Absinthe.Blueprint.TypeReference.NonNull{
of_type: type
}
}
}
end
end
defp query_type(query, _resource, _action, type) do
maybe_wrap_non_null(type, not query.allow_nil?)
end
defp maybe_wrap_non_null(type, true) do
%Absinthe.Blueprint.TypeReference.NonNull{
of_type: type
}
end
defp maybe_wrap_non_null(type, _), do: type
defp args(action_type, resource, action, schema, identity \\ nil)
defp args(:get, resource, action, schema, nil) do
[
%Absinthe.Blueprint.Schema.InputValueDefinition{
name: "id",
identifier: :id,
type: %Absinthe.Blueprint.TypeReference.NonNull{of_type: :id},
description: "The id of the record",
__reference__: ref(__ENV__)
}
] ++ read_args(resource, action, schema)
end
defp args(:get, resource, action, schema, identity) do
if identity do
resource
|> Ash.Resource.Info.identities()
|> Enum.find(&(&1.name == identity))
|> Map.get(:keys)
|> Enum.map(fn key ->
attribute = Ash.Resource.Info.attribute(resource, key)
%Absinthe.Blueprint.Schema.InputValueDefinition{
name: to_string(key),
identifier: key,
type: %Absinthe.Blueprint.TypeReference.NonNull{
of_type: field_type(attribute.type, attribute, resource, true)
},
description: attribute.description || "",
__reference__: ref(__ENV__)
}
end)
else
[]
end
|> Enum.concat(read_args(resource, action, schema))
end
defp args(:read_one, resource, action, schema, _) do
args =
case resource_filter_fields(resource, schema) do
[] ->
[]
_ ->
[
%Absinthe.Blueprint.Schema.InputValueDefinition{
name: "filter",
identifier: :filter,
type: resource_filter_type(resource),
description: "A filter to limit the results",
__reference__: ref(__ENV__)
}
]
end
args ++ read_args(resource, action, schema)
end
defp args(:list, resource, action, schema, _) do
args =
case resource_filter_fields(resource, schema) do
[] ->
[]
_ ->
[
%Absinthe.Blueprint.Schema.InputValueDefinition{
name: "filter",
identifier: :filter,
type: resource_filter_type(resource),
description: "A filter to limit the results",
__reference__: ref(__ENV__)
}
]
end
args =
case sort_values(resource) do
[] ->
args
_ ->
[
%Absinthe.Blueprint.Schema.InputValueDefinition{
name: "sort",
identifier: :sort,
type: %Absinthe.Blueprint.TypeReference.List{
of_type: resource_sort_type(resource)
},
description: "How to sort the records in the response",
__reference__: ref(__ENV__)
}
| args
]
end
args ++ pagination_args(action) ++ read_args(resource, action, schema)
end
defp args(:list_related, resource, action, schema, identity) do
args(:list, resource, action, schema, identity) ++
[
%Absinthe.Blueprint.Schema.InputValueDefinition{
name: "limit",
identifier: :limit,
type: :integer,
description: "The number of records to return.",
__reference__: ref(__ENV__)
},
%Absinthe.Blueprint.Schema.InputValueDefinition{
name: "offset",
identifier: :offset,
type: :integer,
description: "The number of records to skip.",
__reference__: ref(__ENV__)
}
]
end
defp read_args(resource, action, schema) do
action.arguments
|> Enum.reject(& &1.private?)
|> Enum.map(fn argument ->
type =
argument.type
|> field_type(argument, resource, true)
|> maybe_wrap_non_null(argument_required?(argument))
%Absinthe.Blueprint.Schema.FieldDefinition{
identifier: argument.name,
module: schema,
name: to_string(argument.name),
type: type,
__reference__: ref(__ENV__)
}
end)
end
defp pagination_args(action) do
if action.pagination do
max_message =
if action.pagination.max_page_size do
" Maximum #{action.pagination.max_page_size}"
else
""
end
limit_type =
maybe_wrap_non_null(
:integer,
action.pagination.required? && is_nil(action.pagination.default_limit)
)
[
%Absinthe.Blueprint.Schema.InputValueDefinition{
name: "limit",
identifier: :limit,
type: limit_type,
default_value: action.pagination.default_limit,
description: "The number of records to return." <> max_message,
__reference__: ref(__ENV__)
}
] ++ keyset_pagination_args(action) ++ offset_pagination_args(action)
else
[]
end
end
# sobelow_skip ["DOS.StringToAtom"]
defp resource_sort_type(resource) do
String.to_atom(to_string(AshGraphql.Resource.type(resource)) <> "_sort_input")
end
# sobelow_skip ["DOS.StringToAtom"]
defp resource_filter_type(resource) do
String.to_atom(to_string(AshGraphql.Resource.type(resource)) <> "_filter_input")
end
# sobelow_skip ["DOS.StringToAtom"]
defp attribute_filter_field_type(resource, attribute) do
String.to_atom(
to_string(AshGraphql.Resource.type(resource)) <> "_filter_" <> to_string(attribute.name)
)
end
# sobelow_skip ["DOS.StringToAtom"]
defp calculation_filter_field_type(resource, calculation) do
String.to_atom(
to_string(AshGraphql.Resource.type(resource)) <>
"_filter_" <> to_string(calculation.name)
)
end
defp keyset_pagination_args(action) do
if action.pagination.keyset? do
[
%Absinthe.Blueprint.Schema.InputValueDefinition{
name: "before",
identifier: :before,
type: :string,
description: "Show records before the specified keyset.",
__reference__: ref(__ENV__)
},
%Absinthe.Blueprint.Schema.InputValueDefinition{
name: "after",
identifier: :after,
type: :string,
description: "Show records after the specified keyset.",
__reference__: ref(__ENV__)
}
]
else
[]
end
end
defp offset_pagination_args(action) do
if action.pagination.offset? do
[
%Absinthe.Blueprint.Schema.InputValueDefinition{
name: "offset",
identifier: :offset,
type: :integer,
description: "The number of records to skip.",
__reference__: ref(__ENV__)
}
]
else
[]
end
end
@doc false
def type_definitions(resource, api, schema) do
List.wrap(calculation_input(resource, schema)) ++
List.wrap(type_definition(resource, api, schema)) ++
List.wrap(sort_input(resource, schema)) ++
List.wrap(filter_input(resource, schema)) ++
filter_field_types(resource, schema) ++
List.wrap(page_of(resource, schema)) ++
enum_definitions(resource, schema) ++
managed_relationship_definitions(resource, schema)
end
def no_graphql_types(resource, schema) do
enum_definitions(resource, schema, true) ++
managed_relationship_definitions(resource, schema)
end
defp managed_relationship_definitions(resource, schema) do
resource
|> Ash.Resource.Info.actions()
|> Enum.flat_map(fn action ->
resource
|> AshGraphql.Resource.managed_relationships()
|> Enum.filter(&(&1.action == action.name))
|> Enum.map(fn managed_relationship ->
argument =
Enum.find(action.arguments, &(&1.name == managed_relationship.argument)) ||
raise """
No such argument #{managed_relationship.argument}, in `managed_relationship`
"""
opts =
find_manage_change(argument, action, [managed_relationship]) ||
raise """
There is no corresponding `change manage_change(...)` for the given argument and action
combination.
"""
managed_relationship_input(
resource,
action,
opts,
argument,
managed_relationship,
schema
)
end)
end)
end
defp managed_relationship_input(resource, action, opts, argument, managed_relationship, schema) do
relationship =
Ash.Resource.Info.relationship(resource, opts[:relationship]) ||
raise """
No relationship found when building managed relationship input: #{opts[:relationship]}
"""
manage_opts_schema =
if opts[:opts][:type] do
defaults = Ash.Changeset.manage_relationship_opts(opts[:opts][:type])
Enum.reduce(defaults, Ash.Changeset.manage_relationship_schema(), fn {key, value},
manage_opts ->
Ash.OptionsHelpers.set_default!(manage_opts, key, value)
end)
else
Ash.Changeset.manage_relationship_schema()
end
manage_opts = Ash.OptionsHelpers.validate!(opts[:opts], manage_opts_schema)
fields =
on_match_fields(manage_opts, relationship, schema) ++
on_no_match_fields(manage_opts, relationship, schema) ++
on_lookup_fields(manage_opts, relationship, schema) ++
manage_pkey_fields(manage_opts, managed_relationship, relationship, schema)
type = managed_relationship.type_name || default_managed_type_name(resource, action, argument)
fields = check_for_conflicts!(fields, managed_relationship, resource)
%Absinthe.Blueprint.Schema.InputObjectTypeDefinition{
identifier: type,
fields: fields,
module: schema,
name: type |> to_string() |> Macro.camelize(),
__reference__: ref(__ENV__)
}
end
defp check_for_conflicts!(fields, managed_relationship, resource) do
{ok, errors} =
fields
|> Enum.map(fn {resource, action, field} ->
%{field: field, source: %{resource: resource, action: action}}
end)
|> Enum.group_by(& &1.field.identifier)
|> Enum.map(fn {identifier, data} ->
case Keyword.fetch(managed_relationship.types || [], identifier) do
{:ok, nil} ->
nil
{:ok, type} ->
type = unwrap_managed_relationship_type(type)
{:ok, %{Enum.at(data, 0).field | type: type}}
:error ->
get_conflicts(data)
end
end)
|> Enum.reject(&is_nil/1)
|> Enum.split_with(&match?({:ok, _}, &1))
unless Enum.empty?(errors) do
raise_conflicts!(Enum.map(errors, &elem(&1, 1)), managed_relationship, resource)
end
Enum.map(ok, &elem(&1, 1))
end
defp raise_conflicts!(conflicts, managed_relationship, resource) do
raise """
#{inspect(resource)}: #{managed_relationship.action}.#{managed_relationship.argument}
Error while deriving managed relationship input object type: type conflict.
Because multiple actions could be called, and those actions may have different
derived types, you will need to override the graphql schema to specify the type
for the following fields. This can be done by specifying the `types` option on your
`managed_relationship` inside of the `managed_relationships` in your resource's
`graphql` configuration.
#{Enum.map_join(conflicts, "\n\n", &conflict_message(&1, managed_relationship))}
"""
end
defp conflict_message(
{_reducing_type, _type, [%{field: %{name: name}} | _] = fields},
managed_relationship
) do
formatted_types =
fields
|> Enum.map(fn
%{source: %{action: :__primary_key}} = field ->
"#{inspect(format_type(field.field.type))} - from #{inspect(field.source.resource)}'s lookup by primary key"
%{source: %{action: {:identity, identity}}} = field ->
"#{inspect(format_type(field.field.type))} - from #{inspect(field.source.resource)}'s identity: #{identity}"
field ->
"#{inspect(format_type(field.field.type))} - from #{inspect(field.source.resource)}.#{field.source.action}"
end)
|> Enum.uniq()
"""
Possible types for #{managed_relationship.action}.#{managed_relationship.argument}.#{name}:
#{Enum.map(formatted_types, &" * #{&1}\n")}
"""
end
defp unwrap_managed_relationship_type({:non_null, type}) do
%Absinthe.Blueprint.TypeReference.NonNull{of_type: unwrap_managed_relationship_type(type)}
end
defp unwrap_managed_relationship_type({:array, type}) do
%Absinthe.Blueprint.TypeReference.List{of_type: unwrap_managed_relationship_type(type)}
end
defp unwrap_managed_relationship_type(type) do
type
end
defp format_type(%Absinthe.Blueprint.TypeReference.NonNull{of_type: type}) do
{:non_null, format_type(type)}
end
defp format_type(%Absinthe.Blueprint.TypeReference.List{of_type: type}) do
{:array, format_type(type)}
end
defp format_type(type) do
type
end
defp get_conflicts([field]) do
{:ok, field.field}
end
defp get_conflicts([field | _] = fields) do
case reduce_types(fields) do
{:ok, res} ->
{:ok, %{field.field | type: res}}
{:error, {reducing_type, type}} ->
{:error, {reducing_type, type, fields}}
end
end
defp reduce_types(fields) do
Enum.reduce_while(fields, {:ok, nil}, fn field, {:ok, type} ->
if type do
case match_types(type, field.field.type) do
{:ok, value} ->
{:cont, {:ok, value}}
:error ->
{:halt, {:error, {type, field.field.type}}}
end
else
{:cont, {:ok, field.field.type}}
end
end)
end
defp match_types(
%Absinthe.Blueprint.TypeReference.NonNull{
of_type: type
},
type
) do
{:ok, type}
end
defp match_types(
type,
%Absinthe.Blueprint.TypeReference.NonNull{
of_type: type
}
) do
{:ok, type}
end
defp match_types(
type,
type
) do
{:ok, type}
end
defp match_types(_, _) do
:error
end
defp on_lookup_fields(opts, relationship, schema) do
case ManagedRelationshipHelpers.on_lookup_update_action(opts, relationship) do
{:destination, nil} ->
[]
{:destination, action} ->
action = Ash.Resource.Info.action(relationship.destination, action)
relationship.destination
|> mutation_fields(schema, action, action.type)
|> Enum.map(fn field ->
{relationship.destination, action.name, field}
end)
{:source, nil} ->
[]
{:source, action} ->
action = Ash.Resource.Info.action(relationship.source, action)
relationship.source
|> mutation_fields(schema, action, action.type)
|> Enum.map(fn field ->
{relationship.source, action.name, field}
end)
{:join, nil, _} ->
[]
{:join, action, fields} ->
action = Ash.Resource.Info.action(relationship.through, action)
if fields == :all do
mutation_fields(relationship.through, schema, action, action.type)
else
relationship.through
|> mutation_fields(schema, action, action.type)
|> Enum.filter(&(&1.identifier in fields))
end
|> Enum.map(fn field ->
{relationship.through, action.name, field}
end)
nil ->
[]
end
end
defp on_match_fields(opts, relationship, schema) do
opts
|> ManagedRelationshipHelpers.on_match_destination_actions(relationship)
|> List.wrap()
|> Enum.flat_map(fn
{:destination, nil} ->
[]
{:destination, action_name} ->
action = Ash.Resource.Info.action(relationship.destination, action_name)
relationship.destination
|> mutation_fields(schema, action, action.type)
|> Enum.map(fn field ->
{relationship.destination, action.name, field}
end)
{:join, nil, _} ->
[]
{:join, action_name, fields} ->
action = Ash.Resource.Info.action(relationship.through, action_name)
if fields == :all do
mutation_fields(relationship.through, schema, action, action.type)
else
relationship.through
|> mutation_fields(schema, action, action.type)
|> Enum.filter(&(&1.identifier in fields))
end
|> Enum.map(fn field ->
{relationship.through, action.name, field}
end)
end)
end
defp on_no_match_fields(opts, relationship, schema) do
opts
|> ManagedRelationshipHelpers.on_no_match_destination_actions(relationship)
|> List.wrap()
|> Enum.flat_map(fn
{:destination, nil} ->
[]
{:destination, action_name} ->
action = Ash.Resource.Info.action(relationship.destination, action_name)
relationship.destination
|> mutation_fields(schema, action, action.type)
|> Enum.map(fn field ->
{relationship.destination, action.name, field}
end)
{:join, nil, _} ->
[]
{:join, action_name, fields} ->
action = Ash.Resource.Info.action(relationship.through, action_name)
if fields == :all do
mutation_fields(relationship.through, schema, action, action.type)
else
relationship.through
|> mutation_fields(schema, action, action.type)
|> Enum.filter(&(&1.identifier in fields))
end
|> Enum.map(fn field ->
{relationship.through, action.name, field}
end)
end)
end
defp manage_pkey_fields(opts, managed_relationship, relationship, schema) do
resource = relationship.destination
could_lookup? = ManagedRelationshipHelpers.could_lookup?(opts)
could_match? = ManagedRelationshipHelpers.could_update?(opts)
needs_pkey? = opts[:on_no_match] == :match
if could_lookup? || (could_match? && needs_pkey?) do
pkey_fields =
if managed_relationship.lookup_with_primary_key? do
resource
|> pkey_fields(schema, false)
|> Enum.map(fn field ->
{resource, :__primary_key, field}
end)
else
[]
end
resource
|> Ash.Resource.Info.identities()
|> Enum.filter(fn identity ->
is_nil(managed_relationship.lookup_identities) ||
identity.name in managed_relationship.lookup_identities
end)
|> Enum.flat_map(fn identity ->
identity
|> Map.get(:keys)
|> Enum.map(fn key ->
{identity.name, key}
end)
end)
|> Enum.uniq_by(&elem(&1, 1))
|> Enum.map(fn {identity_name, key} ->
attribute = Ash.Resource.Info.attribute(resource, key)
field = %Absinthe.Blueprint.Schema.InputValueDefinition{
name: to_string(key),
identifier: key,
type: field_type(attribute.type, attribute, resource),
description: attribute.description || "",
__reference__: ref(__ENV__)
}
{resource, {:identity, identity_name}, field}
end)
|> Enum.concat(pkey_fields)
else
[]
end
end
defp filter_field_types(resource, schema) do
filter_attribute_types(resource, schema) ++ filter_aggregate_types(resource, schema)
end
defp filter_attribute_types(resource, schema) do
resource
|> Ash.Resource.Info.public_attributes()
|> Enum.flat_map(&filter_type(&1, resource, schema))
end
defp filter_aggregate_types(resource, schema) do
resource
|> Ash.Resource.Info.public_aggregates()
|> Enum.flat_map(&filter_type(&1, resource, schema))
end
defp attribute_or_aggregate_type(%Ash.Resource.Attribute{type: type}, _resource),
do: type
defp attribute_or_aggregate_type(
%Ash.Resource.Aggregate{kind: kind, field: field, relationship_path: relationship_path},
resource
) do
field_type =
with field when not is_nil(field) <- field,
related when not is_nil(related) <-
Ash.Resource.Info.related(resource, relationship_path),
attr when not is_nil(attr) <- Ash.Resource.Info.attribute(related, field) do
attr.type
end
{:ok, aggregate_type} = Ash.Query.Aggregate.kind_to_type(kind, field_type)
aggregate_type
end
defp filter_type(attribute_or_aggregate, resource, schema) do
type = attribute_or_aggregate_type(attribute_or_aggregate, resource)
array_type? = match?({:array, _}, type)
fields =
Ash.Filter.builtin_operators()
|> Enum.filter(& &1.predicate?)
|> restrict_for_lists(type)
|> Enum.flat_map(fn operator ->
filter_fields(operator, type, array_type?, schema, attribute_or_aggregate, resource)
end)
if fields == [] do
[]
else
identifier = attribute_filter_field_type(resource, attribute_or_aggregate)
[
%Absinthe.Blueprint.Schema.InputObjectTypeDefinition{
identifier: identifier,
fields: fields,
module: schema,
name: identifier |> to_string() |> Macro.camelize(),
__reference__: ref(__ENV__)
}
]
end
end
defp filter_fields(operator, type, array_type?, schema, attribute_or_aggregate, resource) do
expressable_types =
Enum.filter(operator.types(), fn
[:any, {:array, type}] when is_atom(type) ->
true
[{:array, inner_type}, :same] when is_atom(inner_type) and array_type? ->
true
:same ->
true
:any ->
true
[:any, type] when is_atom(type) ->
true
_ ->
false
end)
if Enum.any?(expressable_types, &(&1 == :same)) do
[
%Absinthe.Blueprint.Schema.FieldDefinition{
identifier: operator.name(),
module: schema,
name: to_string(operator.name()),
type: field_type(type, attribute_or_aggregate, resource, true),
__reference__: ref(__ENV__)
}
]
else
type =
case Enum.at(expressable_types, 0) do
[{:array, :any}, :same] ->
{:unwrap, type}
[_, {:array, :same}] ->
{:array, type}
[_, :same] ->
type
[_, :any] ->
Ash.Type.String
[_, type] when is_atom(type) ->
Ash.Type.get_type(type)
_ ->
nil
end
if type do
{type, attribute_or_aggregate} =
case type do
{:unwrap, type} ->
{:array, type} = type
constraints = Map.get(attribute_or_aggregate, :constraints) || []
{type,
%{attribute_or_aggregate | type: type, constraints: constraints[:items] || []}}
type ->
{type, attribute_or_aggregate}
end
if Ash.Type.embedded_type?(type) do
[]
else
attribute_or_aggregate = constraints_to_item_constraints(type, attribute_or_aggregate)
[
%Absinthe.Blueprint.Schema.FieldDefinition{
identifier: operator.name(),
module: schema,
name: to_string(operator.name()),
type: field_type(type, attribute_or_aggregate, resource, true),
__reference__: ref(__ENV__)
}
]
end
else
[]
end
end
rescue
_ ->
[]
end
defp restrict_for_lists(operators, {:array, _}) do
list_predicates = [Ash.Query.Operator.IsNil, Ash.Query.Operator.Has]
Enum.filter(operators, &(&1 in list_predicates))
end
defp restrict_for_lists(operators, _), do: operators
defp constraints_to_item_constraints(
{:array, _},
%Ash.Resource.Attribute{
constraints: constraints,
allow_nil?: allow_nil?
} = attribute
) do
%{
attribute
| constraints: [
items: constraints,
nil_items?: allow_nil? || Ash.Type.embedded_type?(attribute.type)
]
}
end
defp constraints_to_item_constraints(_, attribute_or_aggregate), do: attribute_or_aggregate
defp sort_input(resource, schema) do
case sort_values(resource) do
[] ->
nil
_ ->
%Absinthe.Blueprint.Schema.InputObjectTypeDefinition{
fields: [
%Absinthe.Blueprint.Schema.FieldDefinition{
identifier: :order,
module: schema,
name: "order",
default_value: :asc,
type: :sort_order,
__reference__: ref(__ENV__)
},
%Absinthe.Blueprint.Schema.FieldDefinition{
identifier: :field,
module: schema,
name: "field",
type: %Absinthe.Blueprint.TypeReference.NonNull{
of_type: resource_sort_field_type(resource)
},
__reference__: ref(__ENV__)
}
],
identifier: resource_sort_type(resource),
module: schema,
name: resource |> resource_sort_type() |> to_string() |> Macro.camelize(),
__reference__: ref(__ENV__)
}
end
end
defp filter_input(resource, schema) do
case resource_filter_fields(resource, schema) do
[] ->
nil
fields ->
%Absinthe.Blueprint.Schema.InputObjectTypeDefinition{
identifier: resource_filter_type(resource),
module: schema,
name: resource |> resource_filter_type() |> to_string() |> Macro.camelize(),
fields: fields,
__reference__: ref(__ENV__)
}
end
end
# sobelow_skip ["DOS.StringToAtom"]
defp calculation_input(resource, schema) do
resource
|> Ash.Resource.Info.public_calculations()
|> Enum.filter(fn %{calculation: {module, _}} ->
Code.ensure_compiled(module)
:erlang.function_exported(module, :expression, 2)
end)
|> Enum.flat_map(fn calculation ->
field_type = calculation_type(calculation, resource)
arguments = calculation_args(calculation, resource, schema)
array_type? = match?({:array, _}, field_type)
filter_fields =
Ash.Filter.builtin_operators()
|> Enum.filter(& &1.predicate?)
|> restrict_for_lists(field_type)
|> Enum.flat_map(
&filter_fields(
&1,
calculation.type,
array_type?,
schema,
calculation,
resource
)
)
filter_input = %Absinthe.Blueprint.Schema.InputObjectTypeDefinition{
fields: arguments,
identifier:
String.to_atom(
to_string(calculation_filter_field_type(resource, calculation)) <> "_input"
),
module: schema,
name:
Macro.camelize(
to_string(calculation_filter_field_type(resource, calculation)) <> "_input"
),
__reference__: ref(__ENV__)
}
filter_input_field = %Absinthe.Blueprint.Schema.FieldDefinition{
identifier: :input,
module: schema,
name: "input",
type:
String.to_atom(
to_string(calculation_filter_field_type(resource, calculation)) <> "_input"
),
__reference__: ref(__ENV__)
}
if Enum.empty?(arguments) do
type_def = %Absinthe.Blueprint.Schema.InputObjectTypeDefinition{
fields: filter_fields,
identifier: calculation_filter_field_type(resource, calculation),
module: schema,
name: Macro.camelize(to_string(calculation_filter_field_type(resource, calculation))),
__reference__: ref(__ENV__)
}
[
type_def
]
else
type_def = %Absinthe.Blueprint.Schema.InputObjectTypeDefinition{
fields: [filter_input_field | filter_fields],
identifier: calculation_filter_field_type(resource, calculation),
module: schema,
name: Macro.camelize(to_string(calculation_filter_field_type(resource, calculation))),
__reference__: ref(__ENV__)
}
[
filter_input,
type_def
]
end
end)
end
defp resource_filter_fields(resource, schema) do
boolean_filter_fields(resource, schema) ++
attribute_filter_fields(resource, schema) ++
relationship_filter_fields(resource, schema) ++
aggregate_filter_fields(resource, schema) ++ calculation_filter_fields(resource, schema)
end
defp attribute_filter_fields(resource, schema) do
resource
|> Ash.Resource.Info.public_attributes()
|> Enum.reject(fn
{:array, _} ->
true
_ ->
false
end)
|> Enum.reject(&Ash.Type.embedded_type?/1)
|> Enum.flat_map(fn attribute ->
[
%Absinthe.Blueprint.Schema.FieldDefinition{
identifier: attribute.name,
module: schema,
name: to_string(attribute.name),
type: attribute_filter_field_type(resource, attribute),
__reference__: ref(__ENV__)
}
]
end)
end
defp aggregate_filter_fields(resource, schema) do
if Ash.DataLayer.data_layer_can?(resource, :aggregate_filter) do
resource
|> Ash.Resource.Info.public_aggregates()
|> Enum.flat_map(fn aggregate ->
[
%Absinthe.Blueprint.Schema.FieldDefinition{
identifier: aggregate.name,
module: schema,
name: to_string(aggregate.name),
type: attribute_filter_field_type(resource, aggregate),
__reference__: ref(__ENV__)
}
]
end)
else
[]
end
end
defp calculation_filter_fields(resource, schema) do
if Ash.DataLayer.data_layer_can?(resource, :expression_calculation) do
resource
|> Ash.Resource.Info.public_calculations()
|> Enum.filter(fn %{calculation: {module, _}} ->
:erlang.function_exported(module, :expression, 2)
end)
|> Enum.map(fn calculation ->
%Absinthe.Blueprint.Schema.FieldDefinition{
identifier: calculation.name,
module: schema,
name: to_string(calculation.name),
type: calculation_filter_field_type(resource, calculation),
__reference__: ref(__ENV__)
}
end)
else
[]
end
end
defp relationship_filter_fields(resource, schema) do
resource
|> Ash.Resource.Info.public_relationships()
|> Enum.filter(fn relationship ->
AshGraphql.Resource.type(relationship.destination)
end)
|> Enum.map(fn relationship ->
%Absinthe.Blueprint.Schema.FieldDefinition{
identifier: relationship.name,
module: schema,
name: to_string(relationship.name),
type: resource_filter_type(relationship.destination),
__reference__: ref(__ENV__)
}
end)
end
defp boolean_filter_fields(resource, schema) do
if Ash.DataLayer.can?(:boolean_filter, resource) do
[
%Absinthe.Blueprint.Schema.FieldDefinition{
identifier: :and,
module: schema,
name: "and",
type: %Absinthe.Blueprint.TypeReference.List{
of_type: %Absinthe.Blueprint.TypeReference.NonNull{
of_type: resource_filter_type(resource)
}
},
__reference__: ref(__ENV__)
},
%Absinthe.Blueprint.Schema.FieldDefinition{
identifier: :or,
module: schema,
name: "or",
__reference__: ref(__ENV__),
type: %Absinthe.Blueprint.TypeReference.List{
of_type: %Absinthe.Blueprint.TypeReference.NonNull{
of_type: resource_filter_type(resource)
}
}
}
]
else
[]
end
end
# sobelow_skip ["DOS.StringToAtom"]
defp resource_sort_field_type(resource) do
type = AshGraphql.Resource.type(resource)
String.to_atom(to_string(type) <> "_sort_field")
end
def enum_definitions(resource, schema, only_auto? \\ false) do
atom_enums =
resource
|> get_auto_enums()
|> Enum.filter(&is_list(&1.constraints[:one_of]))
|> Enum.map(fn attribute ->
type_name = atom_enum_type(resource, attribute.name)
%Absinthe.Blueprint.Schema.EnumTypeDefinition{
module: schema,
name: type_name |> to_string() |> Macro.camelize(),
values:
Enum.map(attribute.constraints[:one_of], fn value ->
%Absinthe.Blueprint.Schema.EnumValueDefinition{
module: schema,
identifier: value,
name: String.upcase(to_string(value)),
value: value
}
end),
identifier: type_name,
__reference__: ref(__ENV__)
}
end)
if only_auto? do
atom_enums
else
sort_values = sort_values(resource)
sort_order = %Absinthe.Blueprint.Schema.EnumTypeDefinition{
module: schema,
name: resource |> resource_sort_field_type() |> to_string() |> Macro.camelize(),
identifier: resource_sort_field_type(resource),
__reference__: ref(__ENV__),
values:
Enum.map(sort_values, fn sort_value ->
%Absinthe.Blueprint.Schema.EnumValueDefinition{
module: schema,
identifier: sort_value,
name: String.upcase(to_string(sort_value)),
value: sort_value
}
end)
}
[sort_order | atom_enums]
end
end
defp get_auto_enums(resource) do
resource
|> Ash.Resource.Info.public_attributes()
|> Enum.map(fn attribute ->
unnest(attribute)
end)
|> Enum.filter(&(&1.type == Ash.Type.Atom))
end
defp unnest(%{type: {:array, type}, constraints: constraints} = attribute) do
%{attribute | type: type, constraints: constraints[:items] || []}
end
defp unnest(other), do: other
defp sort_values(resource) do
attribute_sort_values =
resource
|> Ash.Resource.Info.public_attributes()
|> Enum.reject(fn
%{type: {:array, _}} ->
true
_ ->
false
end)
|> Enum.reject(&Ash.Type.embedded_type?(&1.type))
|> Enum.map(& &1.name)
aggregate_sort_values =
resource
|> Ash.Resource.Info.public_aggregates()
|> Enum.reject(fn aggregate ->
field_type =
with field when not is_nil(field) <- aggregate.field,
related when not is_nil(related) <-
Ash.Resource.Info.related(resource, aggregate.relationship_path),
attr when not is_nil(attr) <- Ash.Resource.Info.attribute(related, aggregate.field) do
attr.type
end
case Ash.Query.Aggregate.kind_to_type(aggregate.kind, field_type) do
{:ok, {:array, _}} ->
true
{:ok, type} ->
Ash.Type.embedded_type?(type)
_ ->
true
end
end)
|> Enum.map(& &1.name)
attribute_sort_values ++ aggregate_sort_values
end
# sobelow_skip ["DOS.StringToAtom"]
defp page_of(resource, schema) do
type = Resource.type(resource)
paginatable? =
resource
|> Ash.Resource.Info.actions()
|> Enum.any?(fn action ->
action.type == :read && action.pagination
end)
if paginatable? do
if relay?(resource) do
# "#{type}_connection"
raise "Relay pagination is not yet supported."
else
%Absinthe.Blueprint.Schema.ObjectTypeDefinition{
description: "A page of #{inspect(type)}",
fields: [
%Absinthe.Blueprint.Schema.FieldDefinition{
description: "The records contained in the page",
identifier: :results,
module: schema,
name: "results",
__reference__: ref(__ENV__),
type: %Absinthe.Blueprint.TypeReference.List{
of_type: %Absinthe.Blueprint.TypeReference.NonNull{
of_type: type
}
}
},
%Absinthe.Blueprint.Schema.FieldDefinition{
description: "The count of records",
identifier: :count,
module: schema,
name: "count",
type: :integer,
__reference__: ref(__ENV__)
}
],
identifier: String.to_atom("page_of_#{type}"),
module: schema,
name: Macro.camelize("page_of_#{type}"),
__reference__: ref(__ENV__)
}
end
else
nil
end
end
def type_definition(resource, api, schema) do
type = Resource.type(resource)
interfaces =
if relay?(resource) do
[:node]
else
[]
end
%Absinthe.Blueprint.Schema.ObjectTypeDefinition{
description: Ash.Resource.Info.description(resource),
interfaces: interfaces,
fields: fields(resource, api, schema),
identifier: type,
module: schema,
name: Macro.camelize(to_string(type)),
__reference__: ref(__ENV__)
}
end
defp fields(resource, api, schema) do
attributes(resource, schema) ++
relationships(resource, api, schema) ++
aggregates(resource, schema) ++
calculations(resource, schema)
end
defp attributes(resource, schema) do
non_id_attributes =
resource
|> Ash.Resource.Info.public_attributes()
|> Enum.reject(&(&1.name == :id))
|> Enum.map(fn attribute ->
field_type =
attribute.type
|> field_type(attribute, resource)
|> maybe_wrap_non_null(not attribute.allow_nil?)
%Absinthe.Blueprint.Schema.FieldDefinition{
description: attribute.description,
identifier: attribute.name,
module: schema,
name: to_string(attribute.name),
type: field_type,
__reference__: ref(__ENV__)
}
end)
case id_field(resource, schema) do
nil ->
non_id_attributes
id_field ->
[id_field | non_id_attributes]
end
end
defp id_field(resource, schema) do
case Ash.Resource.Info.primary_key(resource) do
[field] ->
attribute = Ash.Resource.Info.attribute(resource, field)
unless attribute.private? do
%Absinthe.Blueprint.Schema.FieldDefinition{
description: attribute.description,
identifier: %Absinthe.Blueprint.TypeReference.NonNull{of_type: :id},
module: schema,
name: "id",
type: %Absinthe.Blueprint.TypeReference.NonNull{of_type: :id},
middleware: [
{{AshGraphql.Graphql.Resolver, :resolve_id}, {resource, field}}
],
__reference__: ref(__ENV__)
}
end
fields ->
%Absinthe.Blueprint.Schema.FieldDefinition{
description: "A unique identifier",
identifier: :id,
module: schema,
name: "id",
type: %Absinthe.Blueprint.TypeReference.NonNull{of_type: :id},
middleware: [
{{AshGraphql.Graphql.Resolver, :resolve_composite_id}, {resource, fields}}
],
__reference__: ref(__ENV__)
}
end
end
defp pkey_fields(resource, schema, require?) do
case Ash.Resource.Info.primary_key(resource) do
[field] ->
attribute = Ash.Resource.Info.attribute(resource, field)
if attribute.private? do
[]
else
field_type =
attribute.type
|> field_type(attribute, resource)
|> maybe_wrap_non_null(require?)
[
%Absinthe.Blueprint.Schema.FieldDefinition{
description: attribute.description,
identifier: attribute.name,
module: schema,
name: to_string(attribute.name),
type: field_type,
__reference__: ref(__ENV__)
}
]
end
fields ->
added_pkey_fields =
if :id in fields do
[]
else
for field <- fields do
attribute = Ash.Resource.Info.attribute(resource, field)
field_type =
attribute.type
|> field_type(attribute, resource)
|> maybe_wrap_non_null(require?)
%Absinthe.Blueprint.Schema.FieldDefinition{
description: attribute.description,
identifier: attribute.name,
module: schema,
name: to_string(attribute.name),
type: field_type,
__reference__: ref(__ENV__)
}
end
end
[
%Absinthe.Blueprint.Schema.FieldDefinition{
description: "The primary key of the resource",
identifier: :id,
module: schema,
name: "id",
type: :id,
__reference__: ref(__ENV__)
}
] ++ added_pkey_fields
end
end
defp argument_required?(%{allow_nil?: true}), do: false
defp argument_required?(%{default: default}) when not is_nil(default), do: false
defp argument_required?(_), do: true
# sobelow_skip ["DOS.StringToAtom"]
defp relationships(resource, api, schema) do
resource
|> Ash.Resource.Info.public_relationships()
|> Enum.filter(fn relationship ->
Resource in Ash.Resource.Info.extensions(relationship.destination)
end)
|> Enum.map(fn
%{cardinality: :one} = relationship ->
type =
relationship.destination
|> Resource.type()
|> maybe_wrap_non_null(relationship.required?)
%Absinthe.Blueprint.Schema.FieldDefinition{
identifier: relationship.name,
module: schema,
name: to_string(relationship.name),
middleware: [
{{AshGraphql.Graphql.Resolver, :resolve_assoc}, {api, relationship}}
],
arguments: [],
type: type,
__reference__: ref(__ENV__)
}
%{cardinality: :many} = relationship ->
read_action = Ash.Resource.Info.primary_action!(relationship.destination, :read)
type = Resource.type(relationship.destination)
query_type = %Absinthe.Blueprint.TypeReference.NonNull{
of_type: %Absinthe.Blueprint.TypeReference.List{
of_type: %Absinthe.Blueprint.TypeReference.NonNull{
of_type: type
}
}
}
%Absinthe.Blueprint.Schema.FieldDefinition{
identifier: relationship.name,
module: schema,
name: to_string(relationship.name),
complexity: {AshGraphql.Graphql.Resolver, :query_complexity},
middleware: [
{{AshGraphql.Graphql.Resolver, :resolve_assoc}, {api, relationship}}
],
arguments: args(:list_related, relationship.destination, read_action, schema),
type: query_type,
__reference__: ref(__ENV__)
}
end)
end
defp aggregates(resource, schema) do
resource
|> Ash.Resource.Info.public_aggregates()
|> Enum.map(fn aggregate ->
field_type =
with field when not is_nil(field) <- aggregate.field,
related when not is_nil(related) <-
Ash.Resource.Info.related(resource, aggregate.relationship_path),
attr when not is_nil(attr) <- Ash.Resource.Info.attribute(related, aggregate.field) do
attr.type
end
{:ok, type} = Aggregate.kind_to_type(aggregate.kind, field_type)
type =
if is_nil(Ash.Query.Aggregate.default_value(aggregate.kind)) do
field_type(type, aggregate, resource)
else
%Absinthe.Blueprint.TypeReference.NonNull{
of_type: field_type(type, aggregate, resource)
}
end
%Absinthe.Blueprint.Schema.FieldDefinition{
identifier: aggregate.name,
module: schema,
name: to_string(aggregate.name),
type: type,
__reference__: ref(__ENV__)
}
end)
end
defp calculations(resource, schema) do
resource
|> Ash.Resource.Info.public_calculations()
|> Enum.map(fn calculation ->
field_type = calculation_type(calculation, resource)
arguments = calculation_args(calculation, resource, schema)
%Absinthe.Blueprint.Schema.FieldDefinition{
identifier: calculation.name,
module: schema,
arguments: arguments,
complexity: 2,
name: to_string(calculation.name),
type: field_type,
__reference__: ref(__ENV__)
}
end)
end
defp calculation_type(calculation, resource) do
calculation.type
|> Ash.Type.get_type()
|> field_type(nil, resource)
|> maybe_wrap_non_null(not calculation.allow_nil?)
end
defp calculation_args(calculation, resource, schema) do
Enum.map(calculation.arguments, fn argument ->
type =
argument.type
|> field_type(argument, resource, true)
|> maybe_wrap_non_null(argument_required?(argument))
%Absinthe.Blueprint.Schema.FieldDefinition{
identifier: argument.name,
module: schema,
name: to_string(argument.name),
type: type,
__reference__: ref(__ENV__)
}
end)
end
defp field_type(type, field, resource, input? \\ false)
defp field_type(
{:array, type},
%Ash.Resource.Aggregate{kind: :list} = aggregate,
resource,
input?
) do
with related when not is_nil(related) <-
Ash.Resource.Info.related(resource, aggregate.relationship_path),
attr when not is_nil(related) <- Ash.Resource.Info.attribute(related, aggregate.field) do
if attr.allow_nil? do
%Absinthe.Blueprint.TypeReference.List{
of_type: field_type(type, aggregate, resource, input?)
}
else
%Absinthe.Blueprint.TypeReference.List{
of_type: %Absinthe.Blueprint.TypeReference.NonNull{
of_type: field_type(type, aggregate, resource, input?)
}
}
end
end
end
defp field_type({:array, type}, %Ash.Resource.Aggregate{} = aggregate, resource, input?) do
%Absinthe.Blueprint.TypeReference.List{
of_type: field_type(type, aggregate, resource, input?)
}
end
defp field_type({:array, type}, nil, resource, input?) do
field_type = field_type(type, nil, resource, input?)
%Absinthe.Blueprint.TypeReference.List{
of_type: field_type
}
end
defp field_type({:array, type}, attribute, resource, input?) do
new_constraints = attribute.constraints[:items] || []
new_attribute = %{attribute | constraints: new_constraints, type: type}
field_type =
type
|> field_type(new_attribute, resource, input?)
|> maybe_wrap_non_null(
!attribute.constraints[:nil_items?] || Ash.Type.embedded_type?(attribute.type)
)
%Absinthe.Blueprint.TypeReference.List{
of_type: field_type
}
end
# sobelow_skip ["DOS.BinToAtom"]
defp field_type(type, attribute, resource, input?) do
if Ash.Type.builtin?(type) do
do_field_type(type, attribute, resource)
else
if Ash.Type.embedded_type?(type) do
if input? do
:"#{AshGraphql.Resource.type(resource)}_#{attribute.name}_input"
else
case type(type) do
nil ->
Application.get_env(:ash_graphql, :json_type) || :json_string
type ->
type
end
end
else
if :erlang.function_exported(type, :values, 0) do
if :erlang.function_exported(type, :graphql_type, 0) do
type.graphql_type()
else
:string
end
else
function =
if input? do
:graphql_input_type
else
:graphql_type
end
if :erlang.function_exported(type, function, 1) do
apply(type, function, [attribute.constraints])
else
raise """
Could not determine graphql type for #{inspect(type)}, please define: #{function}/1!
"""
end
end
end
end
end
defp do_field_type(
Ash.Type.Atom,
%Ash.Resource.Attribute{constraints: constraints, name: name},
resource
) do
if is_list(constraints[:one_of]) do
atom_enum_type(resource, name)
else
:string
end
end
defp do_field_type(Ash.Type.Boolean, _, _), do: :boolean
defp do_field_type(Ash.Type.Atom, _, _), do: :string
defp do_field_type(Ash.Type.CiString, _, _), do: :string
defp do_field_type(Ash.Type.Date, _, _), do: :date
defp do_field_type(Ash.Type.Decimal, _, _), do: :decimal
defp do_field_type(Ash.Type.Integer, _, _), do: :integer
defp do_field_type(Ash.Type.Map, _, _),
do: Application.get_env(:ash_graphql, :json_type) || :json_string
defp do_field_type(Ash.Type.String, _, _), do: :string
defp do_field_type(Ash.Type.Term, _, _), do: :string
defp do_field_type(Ash.Type.UtcDatetime, _, _), do: :naive_datetime
defp do_field_type(Ash.Type.UtcDatetimeUsec, _, _), do: :naive_datetime
defp do_field_type(Ash.Type.UUID, _, _), do: :string
defp do_field_type(Ash.Type.Float, _, _), do: :float
# sobelow_skip ["DOS.StringToAtom"]
defp atom_enum_type(resource, attribute_name) do
resource
|> AshGraphql.Resource.type()
|> to_string()
|> Kernel.<>("_")
|> Kernel.<>(to_string(attribute_name))
|> String.to_atom()
end
end
|
lib/resource/resource.ex
| 0.818918
| 0.697055
|
resource.ex
|
starcoder
|
defmodule Iuliia.Engine do
@moduledoc """
Engine provides main transliteration logic.
"""
@ending_length 2
@doc """
Transliterates string using chosen schema.
## Example
iex> Iuliia.translate("Юлия, съешь ещё этих мягких французских булок из Йошкар-Олы, да выпей алтайского чаю", "mvd_782")
"Yuliya, syesh' eshche etikh myagkikh frantsuzskikh bulok iz Yoshkar-Oly, da vypey altayskogo chayu"
"""
@spec translate(String.t(), String.t()) :: String.t()
def translate(string, schema_name) do
schema = Iuliia.Schema.lookup(schema_name)
translated_chunks =
for word <- String.split(string, ~r/\b/u, trim: true), do: translit_chunk(schema, word)
Enum.join(translated_chunks)
end
defp translit_chunk(schema, chunk) do
with true <- String.match?(chunk, ~r/\p{L}+/u),
{stem, ending} when ending not in [""] <- split_word(chunk),
te when not is_nil(te) <- schema["ending_mapping"][ending] do
[translit_stem(schema, stem), te] |> Enum.join()
else
false ->
chunk
_ ->
translit_stem(schema, chunk)
end
end
defp split_word(word), do: split_word(word, String.length(word))
defp split_word(word, len) when len <= @ending_length, do: {word, ""}
defp split_word(word, len) do
stem =
case String.slice(word, 0..(len - @ending_length - 1)) do
"" -> word
string -> string
end
{stem, String.slice(word, -@ending_length..-1)}
end
defp translit_stem(schema, stem) do
translited_stem =
for {char, index} <- stem |> String.codepoints() |> Enum.with_index() do
translit_char(schema, stem |> String.codepoints(), char, index)
end
translited_stem |> Enum.join() |> camelcase(stem)
end
def translit_char(schema, chars, char, index) do
with nil <- translit_prev(schema, chars, index),
nil <- translit_next(schema, chars, index) do
schema["mapping"][char |> String.downcase()] |> camelcase(char)
else
translited_char -> translited_char
end
end
defp translit_prev(schema, chars, 0),
do: chars |> Enum.at(0) |> String.downcase() |> translit_prev(schema)
defp translit_prev(schema, chars, index),
do:
chars
|> Enum.slice((index - 1)..index)
|> Enum.join()
|> String.downcase()
|> translit_prev(schema)
defp translit_prev(char, schema),
do: schema["prev_mapping"][char]
defp translit_next(schema, chars, index) do
next_char = chars |> Enum.slice(index..(index + 1)) |> Enum.join() |> String.downcase()
schema["next_mapping"][next_char]
end
defp camelcase(string, source) do
if String.match?(source, ~r/[[:upper:]]/u) do
downcased_string = String.downcase(string)
first_sym = downcased_string |> String.at(0) |> String.upcase()
ending = downcased_string |> String.slice(1..String.length(downcased_string))
first_sym <> ending
else
string
end
end
end
|
lib/iuliia/engine.ex
| 0.656108
| 0.411406
|
engine.ex
|
starcoder
|
defmodule OMG.Utxo do
@moduledoc """
Manipulates a single unspent transaction output (UTXO) held be the child chain state.
"""
alias OMG.Crypto
alias OMG.State.Transaction
defstruct [:owner, :currency, :amount, :creating_txhash]
@type t() :: %__MODULE__{
creating_txhash: Transaction.tx_hash(),
owner: Crypto.address_t(),
currency: Crypto.address_t(),
amount: non_neg_integer()
}
@doc """
Inserts a representation of an UTXO position, usable in guards. See Utxo.Position for handling of these entities
"""
defmacro position(blknum, txindex, oindex) do
quote do
{:utxo_position, unquote(blknum), unquote(txindex), unquote(oindex)}
end
end
defguard is_position(blknum, txindex, oindex)
when is_integer(blknum) and blknum >= 0 and
is_integer(txindex) and txindex >= 0 and
is_integer(oindex) and oindex >= 0
@interval elem(OMG.Eth.RootChain.get_child_block_interval(), 1)
@doc """
Based on the contract parameters determines whether UTXO position provided was created by a deposit
"""
defguard is_deposit(position)
when is_tuple(position) and tuple_size(position) == 4 and
is_position(elem(position, 1), elem(position, 2), elem(position, 3)) and
rem(elem(position, 1), @interval) != 0
defmacrop is_nil_or_binary(binary) do
quote do
is_binary(unquote(binary)) or is_nil(unquote(binary))
end
end
# NOTE: we have no migrations, so we handle data compatibility here (make_db_update/1 and from_db_kv/1), OMG-421
def to_db_value(%__MODULE__{owner: owner, currency: currency, amount: amount, creating_txhash: creating_txhash})
when is_binary(owner) and is_binary(currency) and is_integer(amount) and is_nil_or_binary(creating_txhash) do
%{owner: owner, currency: currency, amount: amount, creating_txhash: creating_txhash}
end
def from_db_value(%{owner: owner, currency: currency, amount: amount, creating_txhash: creating_txhash})
when is_binary(owner) and is_binary(currency) and is_integer(amount) and is_nil_or_binary(creating_txhash) do
value = %{owner: owner, currency: currency, amount: amount, creating_txhash: creating_txhash}
struct!(__MODULE__, value)
end
end
|
apps/omg/lib/omg/utxo.ex
| 0.830972
| 0.528168
|
utxo.ex
|
starcoder
|
defmodule JSONPatch do
@moduledoc ~S"""
JSONPatch is an Elixir implementation of the JSON Patch format,
described in [RFC 6902](http://tools.ietf.org/html/rfc6902).
## Examples
iex> JSONPatch.patch(%{"a" => 1}, [
...> %{"op" => "add", "path" => "/b", "value" => %{"c" => true}},
...> %{"op" => "test", "path" => "/a", "value" => 1},
...> %{"op" => "move", "from" => "/b/c", "path" => "/c"}
...> ])
{:ok, %{"a" => 1, "b" => %{}, "c" => true}}
iex> JSONPatch.patch(%{"a" => 22}, [
...> %{"op" => "add", "path" => "/b", "value" => %{"c" => true}},
...> %{"op" => "test", "path" => "/a", "value" => 1},
...> %{"op" => "move", "from" => "/b/c", "path" => "/c"}
...> ])
{:error, :test_failed, ~s|test failed (patches[1], %{"op" => "test", "path" => "/a", "value" => 1})|}
## Installation
# mix.exs
def deps do
[
{:json_patch, "~> 0.8.0"}
]
end
"""
alias JSONPatch.Path
@type json_document :: json_object | json_array
@type json_object :: %{String.t() => json_encodable}
@type json_array :: [json_encodable]
@type json_encodable ::
json_object
| json_array
| String.t()
| number
| true
| false
| nil
@type patches :: [patch]
@type patch :: map
@type return_value :: {:ok, json_encodable} | {:error, error_type, String.t()}
@type error_type :: :test_failed | :syntax_error | :path_error
@type status_code :: non_neg_integer
@doc ~S"""
Applies JSON Patch (RFC 6902) patches to the given JSON document.
Returns `{:ok, patched_map}` or `{:error, error_type, description}`.
Examples:
iex> %{"foo" => "bar"} |> JSONPatch.patch([%{"op" => "replace", "path" => "/foo", "value" => 2}])
{:ok, %{"foo" => 2}}
iex> %{"foo" => "bar"} |> JSONPatch.patch([%{"op" => "test", "path" => "/foo", "value" => 2}])
{:error, :test_failed, ~s|test failed (patches[0], %{"op" => "test", "path" => "/foo", "value" => 2})|}
iex> %{"foo" => "bar"} |> JSONPatch.patch([%{"op" => "remove", "path" => "/foo"}])
{:ok, %{}}
"""
@spec patch(json_document, patches, non_neg_integer) :: return_value
def patch(doc, patches) do
patch(doc, patches, 0)
end
defp patch(doc, [], _), do: {:ok, doc}
defp patch(doc, [p | rest], i) do
case apply_single_patch(doc, p) do
{:ok, newdoc} ->
patch(newdoc, rest, i + 1)
{:error, type, desc} ->
{:error, type, "#{desc} (patches[#{i}], #{inspect(p)})"}
end
end
@doc ~S"""
Converts a `t:return_value/0` or `t:error_type/0' to an HTTP status code.
The HTTP status codes emitted are:
* 200 OK (success)
* 400 Bad Request (the syntax of the patch was invalid)
* 409 Conflict (a `test` operation inside the patch did not succeed)
* 422 Unprocessble Entity (the patch refers to an invalid or nonexistent path)
Example:
iex> JSONPatch.patch(%{"a" => 1}, [%{"op" => "test", "path" => "/a", "value" => 1}]) |> JSONPatch.status_code
200
iex> JSONPatch.patch(%{"a" => 1}, [%{"op" => "test", "path" => "/a", "value" => 22}]) |> JSONPatch.status_code
409
iex> JSONPatch.status_code(:path_error)
422
"""
@spec status_code(return_value | error_type) :: status_code
def status_code(value) do
case value do
{:error, type, _} -> status_code(type)
{:ok, _} -> 200
:test_failed -> 409
:path_error -> 422
:syntax_error -> 400
_ -> 400
end
end
@spec apply_single_patch(json_document, patch) :: return_value
defp apply_single_patch(doc, patch) do
cond do
!Map.has_key?(patch, "op") -> {:error, :syntax_error, "missing `op`"}
!Map.has_key?(patch, "path") -> {:error, :syntax_error, "missing `path`"}
:else -> apply_op(patch["op"], doc, patch)
end
end
@spec apply_op(String.t(), json_document, patch) :: return_value
defp apply_op("test", doc, patch) do
cond do
!Map.has_key?(patch, "value") ->
{:error, :syntax_error, "missing `value`"}
:else ->
case Path.get_value_at_path(doc, patch["path"]) do
{:ok, path_value} ->
if path_value == patch["value"] do
{:ok, doc}
else
{:error, :test_failed, "test failed"}
end
err ->
err
end
end
end
defp apply_op("remove", doc, patch) do
Path.remove_value_at_path(doc, patch["path"])
end
defp apply_op("add", doc, patch) do
cond do
!Map.has_key?(patch, "value") ->
{:error, :syntax_error, "missing `value`"}
:else ->
Path.add_value_at_path(doc, patch["path"], patch["value"])
end
end
defp apply_op("replace", doc, patch) do
cond do
!Map.has_key?(patch, "value") ->
{:error, :syntax_error, "missing `value`"}
:else ->
with {:ok, data} <- Path.remove_value_at_path(doc, patch["path"]) do
Path.add_value_at_path(data, patch["path"], patch["value"])
else
err -> err
end
end
end
defp apply_op("move", doc, patch) do
cond do
!Map.has_key?(patch, "from") ->
{:error, :syntax_error, "missing `from`"}
:else ->
with {:ok, value} <- Path.get_value_at_path(doc, patch["from"]),
{:ok, data} <- Path.remove_value_at_path(doc, patch["from"]) do
Path.add_value_at_path(data, patch["path"], value)
else
err -> err
end
end
end
defp apply_op("copy", doc, patch) do
cond do
!Map.has_key?(patch, "from") ->
{:error, :syntax_error, "missing `from`"}
:else ->
with {:ok, value} <- Path.get_value_at_path(doc, patch["from"]) do
Path.add_value_at_path(doc, patch["path"], value)
else
err -> err
end
end
end
defp apply_op(op, _doc, _patch) do
{:error, :syntax_error, "not implemented: #{op}"}
end
end
|
lib/json_patch.ex
| 0.822439
| 0.463384
|
json_patch.ex
|
starcoder
|
defmodule ElixirLS.LanguageServer.Providers.FoldingRange.SpecialToken do
@moduledoc """
Code folding based on "special" tokens.
Several tokens, like `"..."`s, define ranges all on their own.
This module converts these tokens to ranges.
These ranges can be either `kind?: :comment` or `kind?: :region`.
"""
alias ElixirLS.LanguageServer.Providers.FoldingRange
alias ElixirLS.LanguageServer.Providers.FoldingRange.Token
@kinds [
:bin_heredoc,
:bin_string,
:list_heredoc,
:list_string,
:sigil
]
@docs [:moduledoc, :typedoc, :doc]
@doc """
Provides ranges based on "special" tokens
## Example
iex> alias ElixirLS.LanguageServer.Providers.FoldingRange
iex> text = \"""
...> defmodule A do # 0
...> def hello() do # 1
...> "
...> regular string # 3
...> "
...> '
...> charlist string # 6
...> '
...> end # 8
...> end # 9
...> \"""
iex> FoldingRange.convert_text_to_input(text)
...> |> FoldingRange.SpecialToken.provide_ranges()
{:ok, [
%{startLine: 5, endLine: 6, kind?: :region},
%{startLine: 2, endLine: 3, kind?: :region},
]}
"""
@spec provide_ranges(FoldingRange.input()) :: {:ok, [FoldingRange.t()]}
def provide_ranges(%{tokens: tokens}) do
ranges =
tokens
|> group_tokens()
|> convert_groups_to_ranges()
{:ok, ranges}
end
@spec group_tokens([Token.t()]) :: [[Token.t()]]
defp group_tokens(tokens) do
do_group_tokens(tokens, [])
end
defp do_group_tokens([], acc), do: acc
# Don't create folding ranges for docs
defp do_group_tokens([{:identifier, _, doc_identifier}, {false, _, _} | rest], acc)
when doc_identifier in @docs do
do_group_tokens(rest, acc)
end
# Start a folding range for `@doc` and `@moduledoc`
defp do_group_tokens([{:identifier, _, doc_identifier} = token | rest], acc)
when doc_identifier in @docs do
acc = [[token] | acc]
do_group_tokens(rest, acc)
end
# Amend the folding range
defp do_group_tokens([{k, _, _} = token | rest], [[{:identifier, _, _}] = head | tail])
when k in @kinds do
acc = [[token | head] | tail]
do_group_tokens(rest, acc)
end
# Start a new folding range
defp do_group_tokens([{k, _, _} = token | rest], acc) when k in @kinds do
acc = [[token] | acc]
do_group_tokens(rest, acc)
end
# Finish the open folding range
defp do_group_tokens([{:eol, _, _} = token | rest], [[{k, _, _} | _] = head | tail])
when k in @kinds do
acc = [[token | head] | tail]
do_group_tokens(rest, acc)
end
defp do_group_tokens([_unmatched_token | rest], acc) do
do_group_tokens(rest, acc)
end
@spec convert_groups_to_ranges([[Token.t()]]) :: [FoldingRange.t()]
defp convert_groups_to_ranges(groups) do
groups
|> Enum.map(fn group ->
# Each group comes out of group_tokens/1 reversed
{last, first} = FoldingRange.Helpers.first_and_last_of_list(group)
classify_group(first, last)
end)
|> Enum.map(fn {start_line, end_line, kind} ->
%{
startLine: start_line,
endLine: end_line - 1,
kind?: kind
}
end)
|> Enum.filter(fn range -> range.endLine > range.startLine end)
end
defp classify_group({kind, {start_line, _, _}, _}, {_, {end_line, _, _}, _}) do
kind = if kind == :identifier, do: :comment, else: :region
{start_line, end_line, kind}
end
end
|
apps/language_server/lib/language_server/providers/folding_range/special_token.ex
| 0.879101
| 0.425128
|
special_token.ex
|
starcoder
|
defmodule Aoc2021 do
@moduledoc """
Each function answers the puzzle for each day of [Advent of Code][1]. Each
function has doctests that solve the example input. So no spoilers in this
module. Other than the actual code that solves the puzzle of course. The code
can definitely be better as I am a novice Elixir coder.
[1]: https://adventofcode.com
"""
@doc """
Day one. Figure out how quickly the depth increases.
As the submarine drops below the surface of the ocean, it automatically
performs a sonar sweep of the nearby sea floor. On a small screen, the sonar
sweep report (your puzzle input) appears: each line is a measurement of the
sea floor depth as the sweep looks further and further away from the
submarine.
For example, suppose you had the following report:
```
199
200
208
210
200
207
240
269
260
263
```
This report indicates that, scanning outward from the submarine, the sonar
sweep found depths of 199, 200, 208, 210, and so on.
The first order of business is to figure out how quickly the depth increases,
just so you know what you're dealing with - you never know if the keys will
get carried into deeper water by an ocean current or a fish or something.
To do this, count the number of times a depth measurement increases from the
previous measurement. (There is no measurement before the first measurement.)
In the example above, the changes are as follows:
```
199 (N/A - no previous measurement)
200 (increased)
208 (increased)
210 (increased)
200 (decreased)
207 (increased)
240 (increased)
269 (increased)
260 (decreased)
263 (increased)
```
In this example, there are 7 measurements that are larger than the previous
measurement.
How many measurements are larger than the previous measurement?
## Parameters
- depths: List that containts sonar depth measurements
## Examples
iex> Aoc2021.day_one([199, 200, 208, 210, 200, 207, 240, 269, 260, 263])
7
"""
@spec day_one(list) :: integer
def day_one(depths) do
depths
|> Enum.chunk_every(2, 1, :discard)
|> Enum.filter(fn [l, r] -> l < r end)
|> Enum.count
end
@doc """
Day one part two. Figure out how quickly the depth increases but this time
using a three-measurement sliding window to reduce noise in the data.
Considering every single measurement isn't as useful as you expected: there's
just too much noise in the data.
Instead, consider sums of a three-measurement sliding window. Again
considering the above example:
```
199 A
200 A B
208 A B C
210 B C D
200 E C D
207 E F D
240 E F G
269 F G H
260 G H
263 H
```
Start by comparing the first and second three-measurement windows. The
measurements in the first window are marked A (199, 200, 208); their sum is
199 + 200 + 208 = 607. The second window is marked B (200, 208, 210); its sum
is 618. The sum of measurements in the second window is larger than the sum of
the first, so this first comparison increased.
Your goal now is to count the number of times the sum of measurements in this
sliding window increases from the previous sum. So, compare A with B, then
compare B with C, then C with D, and so on. Stop when there aren't enough
measurements left to create a new three-measurement sum.
In the above example, the sum of each three-measurement window is as follows:
```
A: 607 (N/A - no previous sum)
B: 618 (increased)
C: 618 (no change)
D: 617 (decreased)
E: 647 (increased)
F: 716 (increased)
G: 769 (increased)
H: 792 (increased)
```
In this example, there are 5 sums that are larger than the previous sum.
Consider sums of a three-measurement sliding window. How many sums are larger
than the previous sum?
## Parameters
- depths: List that containts sonar depth measurements
## Examples
iex> Aoc2021.day_one_p2([199, 200, 208, 210, 200, 207, 240, 269, 260, 263])
5
"""
@spec day_one_p2(list) :: integer
def day_one_p2(depths) do
depths
|> Enum.chunk_every(3, 1, :discard)
|> Enum.map(fn(tmw) -> Enum.sum(tmw) end)
|> Aoc2021.day_one
end
end
|
lib/aoc2021.ex
| 0.91061
| 0.96157
|
aoc2021.ex
|
starcoder
|
defmodule Code do
defexception LoadError, file: nil do
def message(exception) do
"could not load #{exception.file}"
end
end
@moduledoc """
The Code module is responsible to manage code compilation,
code evaluation and code loading.
It complements (Erlang's code module)[1] to add behavior
which is specific to Elixir.
[1]: (www.erlang.org/doc/man/code.html)
"""
@doc """
Returns all the loaded files.
"""
def loaded_files do
:elixir_code_server.call :loaded
end
@doc """
Removes the given files from the loaded files list.
The modules defined in the file are not removed,
calling this function only removes it from the list,
allowing it to be required again.
"""
def unload_files(files) do
:elixir_code_server.cast { :unload_files, files }
end
@doc """
Appends a path to Erlang VM code path.
The path is expanded with `Path.expand` before added.
"""
def append_path(path) do
:code.add_pathz(Path.expand to_char_list(path))
end
@doc """
Prepends a path to Erlang VM code path.
The path is expanded with `Path.expand` before added.
"""
def prepend_path(path) do
:code.add_patha(Path.expand to_char_list(path))
end
@doc """
Deletes a path from Erlang VM code path.
The path is expanded with `Path.expand` before deleted.
"""
def delete_path(path) do
:code.del_path(Path.expand to_char_list(path))
end
@doc false
def eval(string, binding // [], opts // []) do
IO.write "[WARNING] Code.eval is deprecated, please use Code.eval_string instead\n#{Exception.format_stacktrace}"
eval_string(string, binding, opts)
end
@doc """
Evaluates the contents given by string. The second argument is the
binding (which should be a keyword) followed by a keyword list of
environment options. Those options can be:
* `:file` - the file to be considered in the evaluation
* `:line` - the line the script starts
* `:delegate_locals_to` - delegate local calls to the given module,
the default is to not delegate
Besides, the following scope values can be configured:
* `:aliases` - a list of tuples with the alias and its target
* `:requires` - a list of modules required
* `:functions` - a list of tuples where the first element is a module
and the second a list of imported function names and arity
* `:macros` - a list of tuples where the first element is a module
and the second a list of imported macro names and arity
Notice that setting any ov the values above overrides Elixir default
values. For example, setting `:requires` to `[]`, will no longer
automatically required the `Kernel` module, in the same way setting
`:macros` will no longer auto-import `Kernel` macros as `if`, `case`,
etc.
## Examples
iex> Code.eval_string("a + b", [a: 1, b: 2], file: __ENV__.file, line: __ENV__.line)
{ 3, [ {:a, 1}, {:b, 2} ] }
For convenience, you can my pass `__ENV__` as argument and
all imports, requires and aliases will be automatically carried
over:
iex> Code.eval_string("a + b", [a: 1, b: 2], __ENV__)
{ 3, [ {:a, 1}, {:b, 2} ] }
"""
def eval_string(string, binding // [], opts // [])
def eval_string(string, binding, Macro.Env[] = env) do
eval_string(string, binding, env.to_keywords)
end
def eval_string(string, binding, opts) do
{ value, binding, _scope } =
:elixir.eval :unicode.characters_to_list(string), binding, opts
{ value, binding }
end
@doc """
Evaluates the quoted contents.
This function accepts a list of environment options.
Check `Code.eval_string` for more information.
## Examples
iex> contents = quote(hygiene: [vars: false], do: a + b)
...> Code.eval_quoted(contents, [a: 1, b: 2], file: __ENV__.file, line: __ENV__.line)
{ 3, [ {:a, 1}, {:b, 2} ] }
For convenience, you can my pass `__ENV__` as argument and
all options will be automatically extracted from the environment:
iex> contents = quote(hygiene: [vars: false], do: a + b)
...> Code.eval_quoted(contents, [a: 1, b: 2], __ENV__)
{ 3, [ {:a, 1}, {:b, 2} ] }
"""
def eval_quoted(quoted, binding // [], opts // [])
def eval_quoted(quoted, binding, Macro.Env[] = env) do
eval_quoted(quoted, binding, env.to_keywords)
end
def eval_quoted(quoted, binding, opts) do
{ value, binding, _scope } =
:elixir.eval_quoted [quoted], binding, opts
{ value, binding }
end
@doc """
Converts the given string to AST. It returns `{ :ok, ast }`
if it succeeds, `{ :error, { line, error, token } }` otherwise.
## Options
* `:file` - The filename to be used in stacktraces
and the file reported in the __ENV__ variable.
* `:line` - The line reported in the __ENV__ variable.
* `:existing_atoms_only` - When true, raises an error
when non-existing atoms are found by the tokenizer.
## Macro.to_binary/1
The opposite of converting a string to its AST is
`Macro.to_binary`, which converts a AST to a binary
representation.
"""
def string_to_ast(string, opts // []) do
file = Keyword.get opts, :file, "nofile"
line = Keyword.get opts, :line, 1
res = :elixir_translator.forms(:unicode.characters_to_list(string), line, file, opts)
case res do
{ :ok, ast } -> { :ok, unpack_ast(line, ast) }
_ -> res
end
end
@doc """
Converts the given string to AST. It returns the ast if it succeeds,
raises an exception otherwise. The exception is a TokenMissingError
in case a token is missing (usually because the expression is incomplete),
SyntaxError otherwise.
Check `Code.string_to_ast/2` for options information.
"""
def string_to_ast!(string, opts // []) do
file = Keyword.get opts, :file, "nofile"
line = Keyword.get opts, :line, 1
res = :elixir_translator.forms!(:unicode.characters_to_list(string), line, file, opts)
unpack_ast(line, res)
end
defp unpack_ast(_line, []), do: nil
defp unpack_ast(_line, [forms]) when not is_list(forms), do: forms
defp unpack_ast(line, forms), do: { :__block__, [line: line], forms }
@doc """
Loads the given `file`. Accepts `relative_to` as an argument
to tell where the file is located. If the file was already
required/loaded, loads it again. It returns all the modules
defined in the file.
Notice that if `load_file` is invoked by different processes
concurrently, the target file will be invoked concurrently
in many times. I.e. if `load_file` is called N times with
a given file, the given file will be loaded N times. Check
`require_file` if you don't want a file to be loaded concurrently.
"""
def load_file(file, relative_to // nil) when is_binary(file) do
file = find_file(file, relative_to)
:elixir_code_server.call { :acquire, file }
loaded = :elixir_compiler.file file
:elixir_code_server.cast { :loaded, file }
loaded
end
@doc """
Requires the given `file`. Accepts `relative_to` as an argument to tell where
the file is located. It returns all the modules defined in the file. If the
file was already required/loaded, doesn't do anything and returns nil.
Notice that if `require_file` is invoked by different processes concurrently,
the first process to invoke `require_file` acquires a lock and the remaining
ones will block until the file is available. I.e. if `require_file` is called
N times with a given file, it will be loaded only once. The first process to
call `require_file` will get the list of loaded modules, others will get nil.
Check `load_file` if you want a file to be loaded concurrently.
"""
def require_file(file, relative_to // nil) when is_binary(file) do
file = find_file(file, relative_to)
case :elixir_code_server.call({ :acquire, file }) do
:loaded ->
nil
{ :queued, ref } ->
receive do { :elixir_code_server, ^ref, :loaded } -> nil end
:proceed ->
loaded = :elixir_compiler.file file
:elixir_code_server.cast { :loaded, file }
loaded
end
end
@doc """
Loads the compilation options from the code server.
Check compiler_options/1 for more information.
"""
def compiler_options do
:elixir_code_server.call :compiler_options
end
@doc """
Sets compilation options. Those options are global
since they are stored by Elixir's Code Server.
Available options are:
* docs - when true, retain documentation in the compiled module.
True by default;
* debug_info - when true, retain debug information in the compiled module.
This allows a developer to reconstruct the original source
code, for such reasons, false by default;
* ignore_module_conflict - when true, override modules that were already defined
without raising errors, false by default;
"""
def compiler_options(opts) do
:elixir_code_server.cast { :compiler_options, opts }
end
@doc """
Compiles the given string and returns a list of tuples where
the first element is the module name and the second one is its
binary.
For compiling many files at once, check `Kernel.ParallelCompiler`.
"""
def compile_string(string, file // "nofile") when is_binary(file) do
:elixir_compiler.string :unicode.characters_to_list(string), file
end
@doc """
Compiles the quoted expression and returns a list of tuples where
the first element is the module name and the second one is its
binary.
"""
def compile_quoted(quoted, file // "nofile") when is_binary(file) do
:elixir_compiler.quoted [quoted], file
end
@doc """
Ensures the given module is loaded. If the module is already
loaded, it works as no-op. If the module was not loaded yet,
it tries to load it.
If it succeeds loading the module anyhow, it returns
`{ :module, module }`. If not, returns `{ :error, reason }` with
the error reason.
## Code loading on the Erlang VM
Erlang has two modes to load code: interactive and embedded.
By default, the Erlang VM runs on interactive mode, where modules
are loaded as needed. In embedded mode the opposite happens, as all
modules need to be loaded upfront or explicitly.
Therefore, this function is useful to check if a module is loaded
before using it and react accordingly. For example, the `URI` module
uses this function to check if a specific parser exists for a given
URI scheme.
## Code.ensure_compiled
Elixir also contains an `ensure_compiled/1` function that is a
superset of `ensure_loaded/1`.
Since Elixir's compilation happens in parallel, in some situations
you may need to use a module but it was not compiled yet, therefore
it can't even be loaded.
`ensure_compiled/1` puts a halt in the current process until the
module we are depending on is available.
In most of the cases, `ensure_loaded` is enough. `ensure_compiled`
must be used just in same rare conditions, usually involving macros
that needs to invoke a module for callback information.
"""
def ensure_loaded(module) when is_atom(module) do
:code.ensure_loaded(module)
end
@doc """
Similar to `ensure_loaded/1`, but returns a boolean in case
it could be ensured or not.
"""
def ensure_loaded?(module) do
match?({ :module, ^module }, ensure_loaded(module))
end
@doc """
Ensures the given module is compiled and loaded. If the module
is already loaded, it works as no-op. If the module was not
loaded yet, it checks if it needs to be compiled first and just
then tries to load it.
If it succeeds loading the module anyhow, it returns
`{ :module, module }`. If not, returns `{ :error, reason }` with
the error reason.
Check `ensure_loaded/1` for more information on module loading
and when to use `ensure_loaded/1` or `ensure_compiled/1`.
"""
def ensure_compiled(module) when is_atom(module) do
case :code.ensure_loaded(module) do
{ :error, :nofile } = error ->
case :erlang.get(:elixir_ensure_compiled) do
:undefined -> error
_ ->
try do
module.__info__(:module)
{ :module, module }
rescue
UndefinedFunctionError -> error
end
end
other -> other
end
end
@doc """
Similar to `ensure_compiled/1`, but returns a boolean in case
it could be ensured or not.
"""
def ensure_compiled?(module) do
match?({ :module, ^module }, ensure_compiled(module))
end
## Helpers
# Finds the file given the relative_to path.
# If the file is found, returns its path in binary, fails otherwise.
defp find_file(file, relative_to) do
file = if relative_to do
Path.expand(file, relative_to)
else
Path.expand(file)
end
if File.regular?(file) do
file
else
raise LoadError, file: file
end
end
end
|
lib/elixir/lib/code.ex
| 0.891114
| 0.535341
|
code.ex
|
starcoder
|
defmodule FnDef do
@moduledoc """
"""
defstruct [
fn_call_ast: nil,
fn_options_ast: nil
]
@doc """
Parse a function call into its parts: name and arguments
"""
def parse_fn_name_and_args({:when, _, [short_head | _]}),
do: parse_fn_name_and_args(short_head)
def parse_fn_name_and_args(short_head),
do: Macro.decompose_call(short_head)
@doc """
Add meidator arguments to a function arguments list ast.
Mainly for the availabity to print unbounded arguments
in a function call.
say(word) -> say(word = arg0)
Returns {[args names], [decorated args ast]}
## Examples
```elixir
iex> FnDef.decorate_args(quote context: __MODULE__ do [a, b, _] end)
{[:a, :b, :arg2],
[{:arg0, [], FnDef}, {:arg1, [], FnDef}, {:arg2, [], FnDef}],
[{:=, [], [{:a, [], __MODULE__}, {:arg0, [], FnDef}]},
{:=, [], [{:b, [], __MODULE__}, {:arg1, [], FnDef}]},
{:=, [], [{:_, [], __MODULE__}, {:arg2, [], FnDef}]}]}
```
"""
@spec decorate_args(list) :: {list, list, list}
def decorate_args([]), do: {[], [],[]}
def decorate_args(args_ast) do
Enum.with_index(args_ast)
|> Enum.map(&decorate_arg/1)
|> convert_to_cols
end
@doc """
## Examples
```elixir
iex> FnDef.convert_to_cols([{:first_name, {:arg0, [], FnDef}, {:=, [], [{:first_name, [], __MODULE__}, {:arg0, [], FnDef}]}}, {:last_name, {:arg1, [], FnDef}, {:=, [], [{:last_name, [], __MODULE__}, {:arg1, [], FnDef}]}}])
{[:first_name, :last_name], [{:arg0, [], FnDef}, {:arg1, [], FnDef}], [{:=, [], [{:first_name, [], __MODULE__}, {:arg0, [], FnDef}]}, {:=, [], [{:last_name, [], __MODULE__}, {:arg1, [], FnDef}]}]}
```
"""
def convert_to_cols(list) do
args_calc_names = Enum.map(list,
fn {arg_calc_name, arg, full_arg} ->
arg_calc_name
end)
args = Enum.map(list,
fn {arg_calc_name, arg, full_arg} ->
arg
end)
full_args = Enum.map(list,
fn {arg_calc_name, arg, full_arg} ->
full_arg
end)
{args_calc_names, args, full_args}
end
@doc """
Add mediator argument to a function argument ast.
Returns {decorated argument name, decorated argument ast}
## Examples
```elixir
iex> FnDef.decorate_arg({quote context: __MODULE__ do first_name end, 0})
{:first_name, {:arg0, [], FnDef}, {:=, [], [{:first_name, [], __MODULE__}, {:arg0, [], FnDef}]}}
```
"""
@spec decorate_arg({Macro.t, non_neg_integer}) :: {Macro.t, Macro.t}
def decorate_arg({arg_ast, index}) do
mediator_arg_ast = Macro.var(:"arg#{index}", __MODULE__)
full_arg_ast = calc_full_arg(arg_ast, mediator_arg_ast)
arg_calc_name = calc_arg_name(full_arg_ast)
{arg_calc_name, mediator_arg_ast, full_arg_ast}
end
@doc """
Generate AST for argument AST and its mediator.
## Examples
```elixir
iex> FnDef.calc_full_arg(quote context: __MODULE__ do first_name end, quote context: __MODULE__ do arg0 end)
{:=, [], [{:first_name, [], __MODULE__}, {:arg0, [], __MODULE__}]}
```
"""
@spec calc_full_arg(Macro.t, Macro.t) :: Macro.t
def calc_full_arg(arg_ast, mediator_arg_ast) when elem(arg_ast, 0) == :\\ do
{:\\, _, [{_, _, _} = full_optional_name, default_arg_value]} = arg_ast
quote do
unquote(full_optional_name) = unquote(mediator_arg_ast) \\ unquote(default_arg_value)
end
end
def calc_full_arg(arg_ast, mediator_arg_ast) do
quote do
unquote(arg_ast) = unquote(mediator_arg_ast)
end
end
@doc """
Calcuate argument name from arg expresion.
Returns the first good arg name that can be used. Not _ if possible.
## Examples
```elixir
iex> FnDef.calc_arg_name(quote do aa end)
:aa
```
```elixir
iex> FnDef.calc_arg_name(quote do aa = bb end)
:aa
```
```elixir
iex> FnDef.calc_arg_name(quote do _aa = bb end)
:bb
```
```elixir
iex> FnDef.calc_arg_name(quote do _ = bb end)
:bb
```
```elixir
iex> FnDef.calc_arg_name(quote do _ = bb \\\\ 6 end)
:bb
```
```elixir
iex> FnDef.calc_arg_name(quote do _ = _bb \\\\ 6 end)
:_bb
```
```elixir
iex> FnDef.calc_arg_name(quote do {aa} = bb end)
:bb
```
```elixir
iex> FnDef.calc_arg_name(quote do [aa] = bb end)
:bb
```
"""
def calc_arg_name({:=, _, [{first_name, _, _}, second]} = arg_ast) do
first_name_str = Atom.to_string(first_name)
cond do
String.match?(first_name_str, ~r/^_.*/) ->
calc_arg_name(second)
is_operator_token(first_name) ->
calc_arg_name(second)
true ->
String.to_atom first_name_str
end
end
def calc_arg_name({:=, _, [first, second]} = _arg_ast) when is_list(first) do
calc_arg_name(second)
end
def calc_arg_name({:\\, _, [first, _second]} = _arg_ast) do
calc_arg_name(first)
end
def calc_arg_name({name, _, _} = _arg_ast) do
name
end
def is_operator_token(:{}) do
true
end
def is_operator_token(:%{}) do
true
end
def is_operator_token(:%) do
true
end
def is_operator_token(a) when is_list(a) do
true
end
def is_operator_token(a) do
false
end
end
|
lib/fn_def.ex
| 0.787032
| 0.734703
|
fn_def.ex
|
starcoder
|
defmodule Digraphviz.Converter do
@moduledoc false
defmodule Document do
@moduledoc false
defstruct ~w(
digraph
subgraphs
node_converter
edge_converter
)a
end
alias Digraphviz.Types
def from(digraph) do
%Document{digraph: digraph}
end
def convert(graph, type \\ :digraph, attributes \\ [], subgraphs \\ %{}) do
stype =
case type do
:digraph -> "digraph"
:graph -> "graph"
end
nodes_and_subgraphs = process(graph)
[
stype,
" {",
Types.Attributes.convert(attributes),
subgraphs(nodes_and_subgraphs.subgraphs, subgraphs),
nodes_and_subgraphs.nodes,
edges(graph, type),
"}"
]
end
defp process(graph) do
converter =
case graph.node_converter do
nil -> &node/2
conv -> conv
end
:digraph.vertices(graph.digraph)
|> Stream.map(fn n ->
{^n, _l} = :digraph.vertex(graph.digraph, n)
end)
|> Enum.reduce(
Types.Subgraph.create(),
fn {v, l}, g ->
case converter.(v, l) do
{n, nil} -> Types.Subgraph.add_node(g, n)
{n, subgraph} -> Types.Subgraph.add_node_to_subgraph(g, n, subgraph)
end
end
)
end
defp subgraphs(subgraphs, subgraphs_info) do
Types.Subgraph.fold(subgraphs, subgraphs_info)
end
defp edges(graph, type) do
edge_list = :digraph.edges(graph.digraph)
converter =
case graph.edge_converter do
nil -> &edge/4
conv -> conv
end
edge_list |> Enum.map(process_edge(graph.digraph, converter, type))
end
defp process_edge(digraph, fun, type) do
fn edge_name ->
case :digraph.edge(digraph, edge_name) do
false -> []
{^edge_name, v1, v2, label} -> fun.(v1, v2, label, type)
end
end
end
defp node(name, label) do
{subgraph, label} = Keyword.pop(label, :subgraph)
{[Types.ID.convert(name), Types.AttrsList.convert(label)], subgraph}
end
defp edge(v1, v2, label, type) do
connect =
case type do
:digraph -> "->"
:graph -> "--"
end
[Types.ID.convert(v1), connect, Types.ID.convert(v2), Types.AttrsList.convert(label)]
end
end
|
lib/digraphviz/convert.ex
| 0.712732
| 0.455804
|
convert.ex
|
starcoder
|
defmodule SpandexOTLP.Opentelemetry.Proto.Common.V1.AnyValue do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
value: {atom, any}
}
defstruct [:value]
oneof(:value, 0)
field(:string_value, 1, type: :string, oneof: 0)
field(:bool_value, 2, type: :bool, oneof: 0)
field(:int_value, 3, type: :int64, oneof: 0)
field(:double_value, 4, type: :double, oneof: 0)
field(:array_value, 5, type: SpandexOTLP.Opentelemetry.Proto.Common.V1.ArrayValue, oneof: 0)
field(:kvlist_value, 6, type: SpandexOTLP.Opentelemetry.Proto.Common.V1.KeyValueList, oneof: 0)
field(:bytes_value, 7, type: :bytes, oneof: 0)
end
defmodule SpandexOTLP.Opentelemetry.Proto.Common.V1.ArrayValue do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
values: [SpandexOTLP.Opentelemetry.Proto.Common.V1.AnyValue.t()]
}
defstruct [:values]
field(:values, 1, repeated: true, type: SpandexOTLP.Opentelemetry.Proto.Common.V1.AnyValue)
end
defmodule SpandexOTLP.Opentelemetry.Proto.Common.V1.KeyValueList do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
values: [SpandexOTLP.Opentelemetry.Proto.Common.V1.KeyValue.t()]
}
defstruct [:values]
field(:values, 1, repeated: true, type: SpandexOTLP.Opentelemetry.Proto.Common.V1.KeyValue)
end
defmodule SpandexOTLP.Opentelemetry.Proto.Common.V1.KeyValue do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
key: String.t(),
value: SpandexOTLP.Opentelemetry.Proto.Common.V1.AnyValue.t() | nil
}
defstruct [:key, :value]
field(:key, 1, type: :string)
field(:value, 2, type: SpandexOTLP.Opentelemetry.Proto.Common.V1.AnyValue)
end
defmodule SpandexOTLP.Opentelemetry.Proto.Common.V1.StringKeyValue do
@moduledoc false
use Protobuf, deprecated: true, syntax: :proto3
@type t :: %__MODULE__{
key: String.t(),
value: String.t()
}
defstruct [:key, :value]
field(:key, 1, type: :string)
field(:value, 2, type: :string)
end
defmodule SpandexOTLP.Opentelemetry.Proto.Common.V1.InstrumentationLibrary do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
name: String.t(),
version: String.t()
}
defstruct [:name, :version]
field(:name, 1, type: :string)
field(:version, 2, type: :string)
end
|
lib/spandex_otlp/opentelemetry/proto/common/v1/common.pb.ex
| 0.706899
| 0.527864
|
common.pb.ex
|
starcoder
|
defmodule Ash.Flow.Chart.Mermaid do
@moduledoc "Tools to render an Ash.Flow as a mermaid chart."
@opts [
expand: [
type: :boolean,
default: true,
doc: """
If the flow should be fully expanded (all `run_flow` steps will be inlined)
"""
]
]
def chart(flow, opts \\ []) do
opts = Ash.OptionsHelpers.validate!(opts, @opts)
# This is a hack that may not work forever
# Eventually, we may need a separate mode/option for `build`
# that doesn't attempt to substitute arguments into templates
args =
flow
|> Ash.Flow.Info.arguments()
|> Map.new(fn argument ->
{argument.name, {:_arg, argument.name}}
end)
steps =
if opts[:expand] do
{:ok, %{steps: steps}} = Ash.Flow.Executor.AshEngine.build(flow, args, [])
unwrap(steps)
else
Ash.Flow.Info.steps(flow)
end
arguments = flow |> Ash.Flow.Info.arguments()
init = """
flowchart TB
classDef hidden visibility:hidden
"""
init
|> add_arguments(arguments)
|> add_steps(steps, steps, opts)
|> add_links(steps, steps, opts)
|> IO.iodata_to_binary()
end
defp add_arguments(message, arguments) do
message =
message
|> add_line("subgraph Arguments")
|> add_line("direction TB")
Enum.reduce(arguments, message, fn argument, message ->
question_mark =
if argument.allow_nil? do
"?"
else
""
end
add_line(
message,
"_arguments.#{argument.name}(\"#{argument.name}#{question_mark}: #{inspect(argument.type)}\")"
)
end)
|> add_line("end")
end
defp unwrap(steps) do
Enum.map(steps, fn %{step: step} ->
case step do
%{steps: steps} ->
%{step | steps: unwrap(steps)}
step ->
step
end
end)
end
defp add_steps(message, steps, all_steps, opts) do
Enum.reduce(steps, message, fn step, message ->
case step do
%Ash.Flow.Step.Map{steps: steps, over: over, output: output} = step ->
id = "#{format_name(step)}.element"
highlight =
if output do
do_format_name(List.wrap(output))
else
format_name(List.last(steps))
end
name = format_name(step)
message =
message
|> add_line("subgraph #{name} [Map]")
|> add_line("direction TB")
|> add_line("#{id}(\"Element: #{format_template(over, all_steps)}\")")
|> add_steps(steps, all_steps, opts)
|> highlight(highlight)
|> add_line("end")
message
%Ash.Flow.Step.Transaction{steps: steps} = step ->
name = format_name(step)
message
|> add_line("subgraph #{name}.subgraph [Transaction]")
|> add_line("direction TB")
|> add_steps(steps, all_steps, opts)
|> add_line("end")
%Ash.Flow.Step.RunFlow{flow: flow} = step ->
returns = Ash.Flow.Info.returns(flow)
if returns && opts[:expand] do
escaped_returns = escape(inspect(Ash.Flow.Info.returns(flow)))
name = format_name(step)
header =
if is_atom(returns) do
"Gather Value"
else
"Gather Values"
end
message
|> add_line("#{name}(\"#{header}: #{escaped_returns}\")")
else
message
end
step ->
add_line(message, "#{format_name(step)}(\"#{short_name(step)}\")")
end
end)
end
defp short_name(%Ash.Flow.Step.Custom{custom: {mod, opts}}) do
if function_exported?(mod, :short_name, 1) do
mod.short_name(opts)
else
escape(inspect({mod, opts}))
end
end
defp short_name(%Ash.Flow.Step.Map{steps: steps, output: output}) do
child_step =
if output do
find_step(steps, output)
else
List.last(steps)
end
"Element of #{short_name(child_step)}"
end
defp short_name(%Ash.Flow.Step.RunFlow{flow: flow}) do
"Run Flow: #{inspect(flow)}"
end
defp short_name(%Ash.Flow.Step.Create{action: action, resource: resource}) do
"Create: #{inspect(resource)}.#{action}"
end
defp short_name(%Ash.Flow.Step.Update{action: action, resource: resource}) do
"Update: #{inspect(resource)}.#{action}"
end
defp short_name(%Ash.Flow.Step.Destroy{action: action, resource: resource}) do
"Destroy: #{inspect(resource)}.#{action}"
end
defp short_name(%Ash.Flow.Step.Read{action: action, resource: resource}) do
"Read: #{inspect(resource)}.#{action}"
end
defp highlight(message, id) do
add_line(message, "style #{id} fill:#4287f5,stroke:#333,stroke-width:4px")
end
defp add_links(message, steps, all_steps, opts) do
Enum.reduce(steps, message, fn step, message ->
case step do
%Ash.Flow.Step.Map{steps: steps, over: over} = step ->
id = "#{format_name(step)}.element"
message
|> add_dependencies(step, all_steps)
|> add_deps(over, id, all_steps)
|> add_links(steps, all_steps, opts)
%Ash.Flow.Step.Transaction{steps: steps} = step ->
message
|> add_dependencies(step, all_steps)
|> add_links(steps, all_steps, opts)
%Ash.Flow.Step.RunFlow{flow: flow} ->
returns = Ash.Flow.Info.returns(flow)
name = format_name(step)
message =
Enum.reduce(List.wrap(returns), message, fn
{key, _}, message ->
{source, note} = link_source(all_steps, List.wrap(step.name) ++ List.wrap(key))
message
|> add_link(source, note, name)
value, message ->
{source, note} = link_source(all_steps, List.wrap(step.name) ++ List.wrap(value))
message
|> add_link(source, note, name)
end)
if opts[:expand] do
message
else
message
|> add_dependencies(step, all_steps)
|> add_links(steps, all_steps, opts)
end
step ->
add_dependencies(message, step, all_steps)
end
end)
end
defp add_link(message, source, nil, name) do
add_line(message, "#{source}-->#{name}")
end
defp add_link(message, source, note, name) do
add_line(message, "#{source}-->|#{note}|#{name}")
end
defp format_template(template, all_steps) do
do_format_template(template, all_steps)
end
defp do_format_template(template, all_steps) when is_map(template) do
"%{#{Enum.map_join(template, ", ", fn {key, value} -> "#{do_format_template(key, all_steps)}: #{do_format_template(value, all_steps)}" end)}}"
end
defp do_format_template(template, all_steps) when is_list(template) do
"[#{Enum.map_join(template, ", ", &do_format_template(&1, all_steps))}]"
end
defp do_format_template({:_path, value, path}, all_steps) do
"get_in(#{do_format_template(value, all_steps)}, #{Enum.map_join(path, ", ", &do_format_template(&1, all_steps))})"
end
defp do_format_template({:_result, step_name}, all_steps) do
"result(#{short_name(find_step(all_steps, step_name))})"
end
defp do_format_template({:_element, step_name}, all_steps) do
"element(#{short_name(find_step(all_steps, step_name)).name})"
end
defp do_format_template(value, all_steps) when is_tuple(value) do
"#{Enum.map_join(value, ", ", &do_format_template(&1, all_steps))}"
end
defp do_format_template(value, _), do: inspect(value)
def find_step(steps, name) when is_list(steps), do: Enum.find_value(steps, &find_step(&1, name))
def find_step(%{name: name} = step, name), do: step
def find_step(%{steps: steps}, name), do: find_step(steps, name)
def find_step(_, _), do: nil
defp escape(string) do
String.replace(string, "\"", "'")
end
defp add_dependencies(message, step, all_steps) do
Enum.reduce(Ash.Flow.Executor.AshEngine.deps_keys(), message, fn key, message ->
case Map.fetch(step, key) do
{:ok, value} ->
add_deps(message, value, format_name(step), all_steps)
:error ->
message
end
end)
end
defp add_deps(message, template, destination, all_steps) do
result_refs = Ash.Flow.result_refs(template)
arg_refs = Ash.Flow.arg_refs(template)
element_refs = Ash.Flow.element_refs(template)
message =
Enum.reduce(element_refs, message, fn element, message ->
add_line(message, "#{do_format_name(element)}.element --> #{destination}")
end)
message =
Enum.reduce(arg_refs, message, fn arg, message ->
add_line(message, "_arguments.#{arg} -.-> #{destination}")
end)
Enum.reduce(result_refs, message, fn dep, message ->
{source, note} = link_source(all_steps, dep)
add_link(message, source, note, destination)
end)
end
defp link_source(all_steps, dep, note \\ nil) do
case find_step(all_steps, dep) do
%Ash.Flow.Step.Map{steps: steps, output: output} = step ->
output_step =
if output do
find_step(steps, List.wrap(output))
else
List.last(steps)
end
case output_step do
nil ->
{format_name(step), note}
output_step ->
link_source(all_steps, output_step.name, "list")
end
step ->
{format_name(step), note}
end
end
defp add_line(message, line) do
[message, "\n", line]
end
defp format_name(step) do
do_format_name(step.name)
end
defp do_format_name(name) do
name
|> List.wrap()
|> List.flatten()
|> Enum.join(".")
end
end
|
lib/ash/flow/chart/mermaid.ex
| 0.658088
| 0.557604
|
mermaid.ex
|
starcoder
|
defmodule Artemis.Ecto.DateMacros do
@doc """
Returns a timestamp, truncated to the given precision.
## Usage Example
Incident
|> group_by([i], [date_trunc("month", i.triggered_at)])
|> select([i], [date_trunc("month", i.triggered_at), count(i.id)])
|> Repo.all()
Returns:
[
[~N[1995-09-01 00:00:00.000000], 3]
[~N[2019-09-01 00:00:00.000000], 3],
[~N[2019-10-01 00:00:00.000000], 127],
[~N[2019-11-01 00:00:00.000000], 7043],
[~N[2019-12-01 00:00:00.000000], 83]
]
## Comparing `date_trunc` and `date_part`
Where `date_trunc` returns:
[
[~N[1995-09-01 00:00:00.000000], 3]
[~N[2019-09-01 00:00:00.000000], 3],
[~N[2019-10-01 00:00:00.000000], 127],
[~N[2019-11-01 00:00:00.000000], 7043],
[~N[2019-12-01 00:00:00.000000], 83]
]
And `date_part` returns:
[
[9.0, 6],
[10.0, 127],
[11.0, 7043],
[12.0, 82]
]
Notice how `date_part` collapses results across years since it only returns
the numeric `month` value as a float.
Whereas, the `date_trunc` function preserves the difference between each year
since it returns a truncated timestamp value.
## Precision Values
Supported `precision` values include:
microseconds
milliseconds
second
minute
hour
day
week
month
quarter
year
decade
century
millennium
See: https://www.postgresql.org/docs/12/functions-datetime.html#FUNCTIONS-DATETIME-TRUNC
"""
defmacro date_trunc(precision, field) do
quote do
fragment("date_trunc(?, ?)", unquote(precision), unquote(field))
end
end
@doc """
Returns the part
## Usage Example
Incident
|> group_by([i], [date_trunc("month", i.triggered_at)])
|> select([i], [date_trunc("month", i.triggered_at), count(i.id)])
|> Repo.all()
Returns:
[
[9.0, 6],
[10.0, 127],
[11.0, 7043],
[12.0, 82]
]
## Comparing `date_trunc` and `date_part`
See the documentation for `date_trunc/2`.
## Precision Values
Supported `precision` values include:
microseconds
milliseconds
second
minute
hour
day
week
month
quarter
year
decade
century
millennium
dow
doy
epoch
isodow
isoyear
timezone
timezone_hour
timezone_minute
See: https://www.postgresql.org/docs/12/functions-datetime.html#FUNCTIONS-DATETIME-EXTRACT
"""
defmacro date_part(precision, field) do
quote do
fragment("date_part(?, ?)", unquote(precision), unquote(field))
end
end
end
|
apps/artemis/lib/artemis/ecto/date_macros.ex
| 0.851737
| 0.478894
|
date_macros.ex
|
starcoder
|
defmodule MelodyMatch.Matchmaker.MatcherTopTrack do
@moduledoc """
Production-ready matching algorithm that matches based on
similarity of users' top tracks.
"""
@behaviour MelodyMatch.Matchmaker.MatcherBase
@maximum_diff 50
@max_location_meters 500_000
@impl true
def best_match(_, _, pool) when map_size(pool) == 0, do: nil
@impl true
def best_match(recent_partners, user, pool) do
{other_id, _} = pool
|> Enum.filter(fn {id, _} -> !Enum.member?(recent_partners, id) end)
|> Enum.filter(fn {_, traits} -> close_enough(user, traits) end)
|> Enum.map(fn {id, traits} -> {id, traits_difference(user, traits)} end)
|> Enum.filter(fn {_, diff} -> diff <= @maximum_diff end)
|> Enum.min(fn {_, diff1}, {_, diff2} -> diff1 <= diff2 end, fn -> {nil, nil} end)
other_id
end
defp traits_difference(a, b) do
danceability_diff = a.danceability - b.danceability
acousticness_diff = a.acousticness - b.acousticness
energy_diff = a.energy - b.energy
instrumentalness_diff = a.instrumentalness - b.instrumentalness
liveness_diff = a.liveness - b.liveness
loudness_diff = a.loudness - b.loudness
speechiness_diff = a.speechiness - b.speechiness
valence_diff = a.valence - b.valence
tempo_diff = a.tempo - b.tempo
diff_raw = (danceability_diff + acousticness_diff + energy_diff \
+ instrumentalness_diff + liveness_diff + loudness_diff + speechiness_diff \
+ valence_diff + tempo_diff) / 9
abs(diff_raw)
end
defp close_enough(%{latitude: nil, longitude: nil}, %{latitude: nil, longitude: nil}) do
true
end
defp close_enough(%{latitude: lat1, longitude: long1}, %{latitude: lat2, longitude: long2}) do
cond do
(lat1 == nil) || (long1 == nil) -> false
(lat2 == nil) || (long2 == nil) -> false
true -> close_enough(lat1, long1, lat2, long2)
end
end
defp close_enough(_, _), do: false
defp close_enough(lat1, long1, lat2, long2) do
with {lat1, _} <- Float.parse(lat1),
{long1, _} <- Float.parse(long1),
{lat2, _} <- Float.parse(lat2),
{long2, _} <- Float.parse(long2)
do
point1 = %{lat: lat1, lon: long1}
point2 = %{lat: lat2, lon: long2}
Geocalc.distance_between(point1, point2) <= @max_location_meters
else
_ -> false
end
end
end
|
server/lib/melody_match/matchmaker/matcher_top_track.ex
| 0.864139
| 0.503296
|
matcher_top_track.ex
|
starcoder
|
defmodule SSHTunnel do
@moduledoc ~S"""
Module for creating SSH tunnels using `:ssh`.
It provides functions to create forwarded ssh channels, similair
to how other channels can be created using `:ssh_connection`.
There are two type of channels supported
* `directtcp-ip` - Forwards a port from the client machine to the remote machine. This is the same as `ssh -nNT -L 8080:forward.example.com:9000 <EMAIL>`
* `direct-streamlocal` - Forwards to a unix domain socket. This is the same as `ssh -nNT -L 8080:/var/lib/mysql/mysql.sock <EMAIL>`
When using `direct_tcpip/3` or `stream_local_forward/2` directly there will not be any local port or socket bound,
this can either be done using `SSHTunnel.Tunnel` or by manually sending data with `:ssh_connection.send/3`
Although `connect/1` can be used to connect to the remote host, other methods are supported.
One can use [SSHex](https://github.com/rubencaro/sshex), `:ssh.connect/3` for instance.
## Tunnels
Tunnels are on-demand TCP servers and are bound listeners to either a port or a path. The tunnel will handle
relaying TCP messages to the ssh connection and back.
## Examples
{:ok, ssh_ref} = SSHTunnel.connect(host: "sshserver.example.com", user: "user", password: "password")
{:ok, pid} = SSHTunnel.start_tunnel(pid, {:tcpip, {8080, {"192.168.90.15", 80}}})
# Send a TCP message for instance HTTP
%HTTPoison.Response{body: body} = HTTPoison.get!("127.0.0.1:8080")
IO.puts("Received body: #{body})
"""
@direct_tcpip String.to_charlist("direct-tcpip")
@stream_local String.to_charlist("<EMAIL>")
@ini_window_size 1024 * 1024
@max_packet_size 32 * 1024
@type location :: {String.t(), integer()}
@doc """
Create a connetion to a remote host with the provided options. This function is mostly used as
convenience wrapper around `:ssh_connect/3` and does not support all options.
returns: `{:ok, connection}` or `{:error, reason}`.
"""
@spec connect(Keyword.t()) :: {:ok, pid()} | {:error, term()}
def connect(opts \\ []) do
host = Keyword.get(opts, :host, "127.0.0.1")
port = Keyword.get(opts, :port, 22)
ssh_config = defaults(opts)
:ssh.connect(String.to_charlist(host), port, ssh_config)
end
@doc ~S"""
Starts a SSHTunnel.Tunnel process, the tunnel will listen to either a local port or local path and handle
passing messages between the TCP client and ssh connection.
## Examples
{:ok, ssh_ref} = SSHTunnel.connect(host: "sshserver.example.com", user: "user", password: "password")
{:ok, pid} = SSHTunnel.start_tunnel(pid, {:tcpip, {8080, {"192.168.90.15", 80}}})
# Send a TCP message
%HTTPoison.Response{body: body} = HTTPoison.get!("127.0.0.1:8080")
IO.puts("Received body: #{body})
"""
@spec start_tunnel(pid(), SSHTunnel.Tunnel.to(), Keyword.t()) :: {:ok, pid()} | {:error, term()}
defdelegate start_tunnel(pid, to, opts \\ []), to: SSHTunnel.Tunnel, as: :start
@doc ~S"""
Creates a ssh directtcp-ip forwarded channel to a remote port.
The returned channel together with a ssh connection reference (returned from `:ssh.connect/4`) can be used
to send messages with `:ssh_connection.send/3`
returns: `{:ok, channel}` or `{:error, reason}`.
## Examples:
msg = "GET / HTTP/1.1\r\nHost: localhost:8080\r\nUser-Agent: curl/7.47.0\r\nAccept: */*\r\n\r\n"
{:ok, pid} = SSHTunnel.connect(host: "192.168.1.10", user: "user", password: "password")
{:ok, ch} = SSHTunnel.direct_tcpip(pid, {"127.0.0.1", 8080}, {"192.168.1.10", 80})
:ok = :ssh_connection.send(pid, ch, msg)
recieve do
{:ssh_cm, _, {:data, channel, _, data}} -> IO.puts("Data: #{(data)}")
end
"""
@spec direct_tcpip(pid(), location, location) :: {:ok, integer()} | {:error, term()}
def direct_tcpip(pid, from, to) do
{orig_host, orig_port} = from
{remote_host, remote_port} = to
remote_len = byte_size(remote_host)
orig_len = byte_size(orig_host)
msg = <<
remote_len::size(32),
remote_host::binary,
remote_port::size(32),
orig_len::size(32),
orig_host::binary,
orig_port::size(32)
>>
open_channel(pid, @direct_tcpip, msg, @ini_window_size, @max_packet_size, :infinity)
end
@doc ~S"""
Creates a ssh stream local-forward channel to a remote unix domain socket.
The returned channel together with a ssh connection reference (returned from `:ssh.connect/4`) can be used
to send messages with `:ssh_connection.send/3`.
returns: `{:ok, channel}` or `{:error, reason}`.
Ex:
```
msg = "GET /images/json HTTP/1.1\r\nHost: /var/run/docker.sock\r\nAccept: */*\r\n\r\n"
{:ok, pid} = SSHTunnel.connect(host: "192.168.90.15", user: "user", password: "password")
{:ok, ch} = SSHTunnel.stream_local_forward(pid, "/var/run/docker.sock")
:ok = :ssh_connection.send(pid, ch, msg)
```
"""
@spec stream_local_forward(pid(), String.t()) :: {:ok, integer()} | {:error, term()}
def stream_local_forward(pid, socket_path) do
msg = <<byte_size(socket_path)::size(32), socket_path::binary, 0::size(32), 0::size(32)>>
open_channel(pid, @stream_local, msg, @ini_window_size, @max_packet_size, :infinity)
end
defp open_channel(pid, type, msg, window_size, max_packet_size, timeout) do
case :ssh_connection_handler.open_channel(
pid,
type,
msg,
window_size,
max_packet_size,
timeout
) do
{:open, ch} -> {:ok, ch}
{:open_error, _, reason, _} -> {:error, to_string(reason)}
end
end
defp defaults(opts) do
user = Keyword.get(opts, :user, "")
password = Keyword.get(opts, :password, "")
[
user_interaction: false,
silently_accept_hosts: true,
user: String.to_charlist(user),
password: String.to_charlist(password)
]
end
end
|
lib/ssh_tunnel.ex
| 0.850624
| 0.759916
|
ssh_tunnel.ex
|
starcoder
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.