code
stringlengths 114
1.05M
| path
stringlengths 3
312
| quality_prob
float64 0.5
0.99
| learning_prob
float64 0.2
1
| filename
stringlengths 3
168
| kind
stringclasses 1
value |
|---|---|---|---|---|---|
if Code.ensure_loaded?(Plug) do
defmodule Unleash.Plug do
@moduledoc """
An extra fancy `Plug` and utility functions to help when developing `Plug`
or `Phoenix`-based applications. It automatically puts together a
`t:Unleash.context/0` under the `Plug.Conn`'s `Plug.assigns/0`.
To use, call `plug Unleash.Plug` in your plug pipeline. It depends on the
session, and requires being after `:fetch_session` to work. It accepts the
following options:
* `:user_id`: The key under which the user's ID is found in the session.
* `:session_id`: The key under wwhich the session ID is found in the
session.
After which, `enabled?/3` is usable.
If you are _also_ using `PhoenixGon` in your application, you can call
`put_feature/3` to put a specific feature flag into the gon object for
using on the client side.
"""
import Plug.Conn
@behaviour Plug
@default_opts [
user_id: :user_id,
session_id: :session_id
]
@doc false
def init(opts) when is_list(opts), do: Keyword.merge(@default_opts, opts)
def init(_), do: @default_opts
@doc false
def call(conn, opts) do
context = construct_context(conn, opts)
assign(conn, :unleash_context, context)
end
@doc """
Given a `t:Plug.Conn/0`, a feature, and (optionally) a boolean, return
whether or not a feature is enabled. This requires this plug to be a part
of the plug pipeline, as it will construct an `t:Unleash.context()/0` out
of the session.
## Examples
iex> Unleash.Plug.enabled?(conn, :test)
false
iex> Unleash.Plug.enabled?(conn, :test, true)
true
"""
@spec enabled?(Plug.Conn.t(), String.t() | atom(), boolean()) :: boolean()
def enabled?(%Plug.Conn{assigns: assigns}, feature, default \\ false) do
context = Map.get(assigns, :unleash_context, %{})
Unleash.enabled?(feature, context, default)
end
if Code.ensure_loaded?(PhoenixGon) do
alias PhoenixGon.Controller
@doc """
If you are using `PhoenixGon`, you can call this to put a feature in the
gon object to be used on the client side. It will be available under
`window.Gon.getAssets('features')`. It listens to the options that
are configured by `PhoenixGon.Pipeline`.
## Examples
iex> Unleash.Plug.put_feature(conn, :test)
%Plug.Conn{}
iex> Unleash.Plug.enabled?(conn, :test, true)
%Plug.Conn{}
"""
@spec put_feature(Plug.Conn.t(), String.t() | atom(), boolean()) :: Plug.Conn.t()
def put_feature(conn, feature, default \\ false) do
conn
|> Controller.get_gon(:features)
|> case do
features when is_map(features) ->
Controller.update_gon(
conn,
:features,
Map.put(features, feature, enabled?(conn, feature, default))
)
_ ->
Controller.put_gon(
conn,
:features,
Map.new([{feature, enabled?(conn, feature, default)}])
)
end
end
end
defp construct_context(conn, opts) do
opts
|> Enum.map(fn {k, v} ->
{k, get_session(conn, v)}
end)
|> Enum.concat(remote_address: to_string(:inet.ntoa(conn.remote_ip)))
|> Enum.into(%{})
end
end
end
|
lib/unleash/plug.ex
| 0.834609
| 0.542742
|
plug.ex
|
starcoder
|
defmodule DAG do
@moduledoc File.read!("#{__DIR__}/../README.md")
defstruct vs: MapSet.new(), es: MapSet.new()
alias __MODULE__, as: M
@doc """
Creates an empty DAG.
"""
def new do
{:ok, %M{}}
end
@doc """
Add a vertex to the DAG.
"""
def add_vertex(%M{} = m, v) do
{:ok, %M{m | vs: MapSet.put(m.vs, v)}}
end
@doc """
Return the list of vertices in no particular order.
"""
def vertices(%M{} = m) do
Enum.to_list(m.vs)
end
@doc """
Return the list of edges in no particular order.
"""
def edges(%M{} = m) do
Enum.to_list(m.es)
end
@doc """
Add an edge between two vertices.
The vertices must already exist in the DAG, otherwise an error is
returned. An error is also returned when the edge would form a
cycle.
"""
def add_edge(%M{} = m, a, b) do
with true <- Enum.member?(m.vs, a),
true <- Enum.member?(m.vs, b),
{:exists, false} <- {:exists, Enum.member?(m.es, {a, b})},
{:path, false} <- {:path, path?(m, b, a)} do
{:ok, %M{m | es: MapSet.put(m.es, {a, b})}}
else
false ->
{:error, :invalid}
{:exists, true} ->
{:ok, m}
{:path, true} ->
{:error, :cycle}
end
end
@doc """
Returns true when there is a path between the given vertices
"""
def path?(%M{} = m, a, b) do
case Enum.member?(m.es, {a, b}) do
true ->
true
false ->
outgoing(m, a)
|> Enum.reduce_while(
false,
fn v, _ ->
case path?(m, v, b) do
true -> {:halt, true}
false -> {:cont, false}
end
end
)
end
end
@doc """
Return the outgoing edges
"""
def outgoing(%M{} = m, v) do
m.es
|> Enum.filter(&(elem(&1, 0) == v))
|> Enum.map(&elem(&1, 1))
end
@doc """
Return the list over vertices, sorted topologically
The order between non-connected vertices is arbitrarily decided;
however it is stable between sorts.
"""
def topsort(%M{} = m) do
m.vs
|> Enum.sort(fn a, b ->
cond do
path?(m, a, b) ->
false
path?(m, b, a) ->
true
true ->
a < b
end
end)
end
@doc """
Split the DAG into its components, returning a list of independent DAGs.
"""
def components(%M{} = m) do
components(Enum.to_list(m.vs), m.es, [])
end
defp components([], _edges, acc) do
acc
end
defp components([v | rest], edges, acc) do
component_edges = edges |> Enum.filter(fn {a, b} -> a == v or b == v end)
m = from_edges(v, component_edges)
rest = rest -- Enum.to_list(m.vs)
components(rest, edges, [m | acc])
end
defp from_edges(v, edges) do
vs = edges |> Enum.map(&Tuple.to_list/1) |> List.flatten()
%M{vs: MapSet.new([v | vs]), es: MapSet.new(edges)}
end
end
|
lib/dag.ex
| 0.873032
| 0.553324
|
dag.ex
|
starcoder
|
defmodule Plymio.Ast.Vorm.Vormen.Transform do
@moduledoc false
alias Plymio.Option.Utility, as: POU
alias Plymio.Ast.Utility, as: PAU
alias Plymio.Ast.Vorm.Error, as: PAVE
use Plymio.Ast.Vorm.Attribute
defdelegate new_error(opts), to: PAVE, as: :new
defdelegate new_error_result(opts), to: PAVE, as: :new_result
defdelegate list_wrap_flat_just(value), to: POU
defdelegate list_wrap_flat_just_uniq(value), to: POU
def vormen_transform_reduce_all_forms_fun(forms) do
forms
end
def vormen_transform_reduce_empty_forms_fun(_forms) do
[]
end
def vormen_transform_reduce_accumulator_forms_fun(forms) do
{forms, []}
end
def vormen_transform_then_fetch({form,_index}, _new_forms, forms)
when is_list(forms) do
forms ++ [form]
end
def vormen_transform_else_fetch({_form,_index}, _new_forms, forms)
when is_list(forms) do
forms
end
def vormen_transform_then_filter({form,_index}, _new_forms, forms)
when is_list(forms) do
forms ++ [form]
end
def vormen_transform_else_filter(_form_index, _new_forms, forms)
when is_list(forms) do
forms
end
def vormen_transform_then_reject(_form_index, _new_forms, forms)
when is_list(forms) do
forms
end
def vormen_transform_else_reject({form,_index}, _new_forms, forms)
when is_list(forms) do
forms ++ [form]
end
def vormen_transform_then_insert({form,_index}, new_forms, forms)
when is_list(new_forms) and is_list(forms) do
forms ++ new_forms ++ [form]
end
def vormen_transform_else_insert({form,_index}, new_forms, forms)
when is_list(new_forms) and is_list(forms) do
forms ++ [form]
end
def vormen_transform_then_replace(_form_index, new_forms, forms)
when is_list(new_forms) and is_list(forms) do
forms ++ new_forms
end
def vormen_transform_else_replace({form,_index}, new_forms, forms)
when is_list(new_forms) and is_list(forms) do
forms ++ [form]
end
def vormen_transform_then_pipe_before({form,_index}, new_forms, forms)
when is_list(new_forms) and is_list(forms) do
with {:ok, new_form} <- PAU.forms_pipe([form | new_forms]) do
{:ok, forms ++ List.wrap(new_form)}
else
{:error, _} = result -> result
end
end
def vormen_transform_else_pipe_before({form,_index}, new_forms, forms)
when is_list(new_forms) and is_list(forms) do
forms ++ [form]
end
def vormen_transform_then_pipe_after({form,_index}, new_forms, forms)
when is_list(new_forms) and is_list(forms) do
with {:ok, new_form} <- PAU.forms_pipe(new_forms ++ List.wrap(form)) do
{:ok, forms ++ List.wrap(new_form)}
else
{:error, _} = result -> result
end
end
def vormen_transform_else_pipe_after({form,_index}, new_forms, forms)
when is_list(new_forms) and is_list(forms) do
forms ++ [form]
end
def normalise_vormen_transform(transform)
def normalise_vormen_transform(transform)
when is_function(transform,1) do
{:ok, transform}
end
def normalise_vormen_transform(transforms) when is_list(transforms) do
transforms
|> list_wrap_flat_just
|> Enum.reduce_while([],
fn fun, funs ->
case fun |> is_function(1) do
true -> {:cont, [fun | funs]}
_ -> {:halt, new_error_result(m: "forms transform invalid", v: fun)}
end
end)
|> case do
{:error, _} = result -> result
transforms ->
case transforms do
[fun] -> {:ok, fun}
funs ->
funs = funs |> Enum.reverse
fun = fn forms ->
funs
|> Enum.reduce_while(forms,
fn f, forms ->
forms
|> f.()
|> case do
{:error, _} = result -> {:halt, result}
{:ok, forms} -> {:cont, forms}
forms -> {:cont, forms}
end
end)
|> case do
{:error, _} = result -> result
forms -> {:ok, forms}
end
end
{:ok, fun}
end
end
end
def normalise_vormen_transform(transform) do
new_error_result(m: "forms transform invalid", v: transform)
end
def normalise_form_index_transform(transform)
def normalise_form_index_transform(transform)
when is_function(transform,1) do
{:ok, transform}
end
def normalise_form_index_transform(transforms) when is_list(transforms) do
transforms
|> list_wrap_flat_just
|> Enum.reduce_while([],
fn fun, funs ->
case fun |> is_function(1) do
true -> {:cont, [fun | funs]}
_ -> {:halt, new_error_result(m: "{form,index} transform invalid", v: fun)}
end
end)
|> case do
{:error, _} = result -> result
transforms ->
case transforms do
[fun] -> {:ok, fun}
funs ->
funs = funs |> Enum.reverse
fun = fn {form,index} ->
funs
|> Enum.reduce_while({form,index},
fn f, {form,index} ->
{form,index}
|> f.()
|> case do
{:error, _} = result -> {:halt, result}
{:ok, form} -> {:cont, {form,index}}
form -> {:cont, {form,index}}
end
end)
|> case do
{:error, _} = result -> result
{form,_index} -> {:ok, form}
end
end
{:ok, fun}
end
end
end
def normalise_form_index_transform(transform) do
new_error_result(m: "{form,index} transform invalid", v: transform)
end
def normalise_form_walk_transform(transform)
def normalise_form_walk_transform(transform)
when is_function(transform,1) do
{:ok, transform}
end
def normalise_form_walk_transform(transforms) when is_list(transforms) do
transforms
|> list_wrap_flat_just
|> Enum.reduce_while([],
fn fun, funs ->
case fun |> is_function(1) do
true -> {:cont, [fun | funs]}
_ -> {:halt, new_error_result(m: "invalid form walk transform", v: fun)}
end
end)
|> case do
{:error, _} = result -> result
transforms ->
case transforms do
[fun] -> {:ok, fun}
funs ->
funs = funs |> Enum.reverse
fun = fn form ->
funs |> Enum.reduce(form, fn f, form -> f.(form) end)
end
{:ok, fun}
end
end
end
def normalise_form_walk_function(transform) do
new_error_result(m: "invalid form walk transform", v: transform)
end
def normalise_form_postwalk_transform(transform)
def normalise_form_postwalk_transform(transform) do
with {:ok, fun_walk} <- transform |> normalise_form_walk_transform do
fun_postwalk = fn {form,_index} ->
{:ok, form |> Macro.postwalk(fun_walk)}
end
{:ok, fun_postwalk}
else
{:error, _} = result -> result
end
end
def normalise_form_prewalk_transform(transform)
def normalise_form_prewalk_transform(transform) do
with {:ok, fun_walk} <- transform |> normalise_form_walk_transform do
fun_prewalk = fn {form,_index} ->
{:ok, form |> Macro.prewalk(fun_walk)}
end
{:ok, fun_prewalk}
else
{:error, _} = result -> result
end
end
def normalise_form_traverse_transform(acc, pre, post)
def normalise_form_traverse_transform(acc, pre, post) do
[pre, post]
|> Enum.reduce_while([],
fn fun, funs ->
case fun |> is_function(2) do
true -> {:cont, [fun | funs]}
_ -> {:halt, new_error_result(m: "form traverse function invalid", v: fun)}
end
end)
|> case do
{:error, _} = result -> result
[fun_pre, fun_post] ->
fun = fn {form,_index} ->
with {form, _acc} <- form |> Macro.traverse(acc, fun_pre, fun_post) do
{:ok, form}
else
x -> new_error_result(m: "form traverse failed", v: x)
end
end
{:ok, fun}
end
end
def normalise_form_traverse_transform(transform) do
new_error_result(m: "transform not (any -> any)", v: transform)
end
def normalise_traverse_function(transform)
def normalise_traverse_function(transform)
when is_function(transform,2) do
{:ok, transform}
end
def normalise_traverse_function(transform) do
new_error_result(m: "transform not (any -> any)", v: transform)
end
end
|
lib/ast/vorm/vormen/transform.ex
| 0.649579
| 0.554712
|
transform.ex
|
starcoder
|
defmodule Godfist.Static do
@moduledoc """
Module to interact with the Static data provided by Riot.
I'm not going to provide examples in this Module unless it's necessary.
Just pass each region as an atom and id if it requires it.
On some of these functions I could have provided the ids as options and use
the same function name but I thought that explicitly telling that you want
a single rune or mastery is a better id so... You can do it like this intead.
"""
alias Godfist.LeagueRates
@endpoint "/lol/static-data/v3"
@doc """
Get a list of all champs.
One option must be passed, otherwise everything is returned.
Consult: https://developer.riotgames.com/api-methods/#lol-static-data-v3/GET_getChampionList
for a list of options and their names.
"""
@spec all_champs(atom, Keyword.t()) :: {:ok, map} | {:error, String.t()}
def all_champs(region, opts \\ []) do
opts = Pastry.to_query_string(opts)
rest = @endpoint <> "/champions" <> opts
LeagueRates.handle_rate(region, rest, :static)
end
@doc """
Get a single champion by id.
Refer to `all_champs/2` for a list of options.
"""
@spec champion(atom, integer, Keyword.t()) :: {:ok, map} | {:error, String.t()}
def champion(region, id, opts \\ []) do
opts = Pastry.to_query_string(opts)
rest = @endpoint <> "/champions/#{id}" <> opts
LeagueRates.handle_rate(region, rest, :static)
end
@doc """
Get a list of all items.
One option must be passed, otherwise "all" is returned.
https://developer.riotgames.com/api-methods/#lol-static-data-v3/GET_getItemList
"""
@spec all_items(atom, Keyword.t()) :: {:ok, map} | {:error, String.t()}
def all_items(region, opts \\ []) do
opts = Pastry.to_query_string(opts)
rest = @endpoint <> "/items" <> opts
LeagueRates.handle_rate(region, rest, :static)
end
@doc """
Get a single item by id.
Refer to `all_items/2` for a list of options.
"""
@spec item(atom, integer, Keyword.t()) :: {:ok, map} | {:error, String.t()}
def item(region, id, opts \\ []) do
opts = Pastry.to_query_string(opts)
rest = @endpoint <> "/items/#{id}" <> opts
LeagueRates.handle_rate(region, rest, :static)
end
@doc """
Retrieve language strings data.
"""
@spec lang_strings(atom) :: {:ok, map} | {:error, String.t()}
def lang_strings(region) do
rest = @endpoint <> "/language-strings"
LeagueRates.handle_rate(region, rest, :static)
end
@doc """
Get support languages data.
"""
@spec languages(atom) :: {:ok, map} | {:error, String.t()}
def languages(region) do
rest = @endpoint <> "/languages"
LeagueRates.handle_rate(region, rest, :static)
end
@doc """
Get information about all maps.
"""
@spec maps(atom) :: {:ok, map} | {:error, String.t()}
def maps(region) do
rest = @endpoint <> "/maps"
LeagueRates.handle_rate(region, rest, :static)
end
@doc """
Get a list of all masteries.
See https://developer.riotgames.com/api-methods/#lol-static-data-v3/GET_getMasteryList
for a list of options
"""
@spec all_masteries(atom, Kwyrod.t()) :: {:ok, map} | {:error, String.t()}
def all_masteries(region, opts \\ []) do
opts = Pastry.to_query_string(opts)
rest = @endpoint <> "/masteries" <> opts
LeagueRates.handle_rate(region, rest, :static)
end
@doc """
Get a single mastery by id.
Refer to `all_masteries/2` for a list of options.
"""
@spec mastery(atom, integer, Kwyrod.t()) :: {:ok, map} | {:error, String.t()}
def mastery(region, id, opts \\ []) do
opts = Pastry.to_query_string(opts)
rest = @endpoint <> "/masteries/#{id}" <> opts
LeagueRates.handle_rate(region, rest, :static)
end
@doc """
Get a list of all profile icons.
"""
@spec profile_icons(atom) :: {:ok, map} | {:error, String.t()}
def profile_icons(region) do
rest = @endpoint <> "/profile-icons"
LeagueRates.handle_rate(region, rest, :static)
end
@doc """
Retrieve realm data.
"""
@spec realms(atom) :: {:ok, map} | {:error, String.t()}
def realms(region) do
rest = @endpoint <> "/realms"
LeagueRates.handle_rate(region, rest, :static)
end
@doc """
Retrieves reforged rune path list.
"""
@spec reforged_runes_path(atom) :: {:ok, list} | {:error, String.t()}
def reforged_runes_path(region) do
rest = @endpoint <> "/reforged-rune-paths"
LeagueRates.handle_rate(region, rest, :static)
end
@doc """
Retrieves reforged rune path by ID.
"""
@spec reforged_rune_path_by_id(atom, integer) :: {:ok, list} | {:error, String.t()}
def reforged_rune_path_by_id(region, id) do
rest = @endpoint <> "/reforged-rune-paths/#{id}"
LeagueRates.handle_rate(region, rest, :static)
end
@doc """
Retrieves reforged runes list.
"""
@spec reforged_runes(atom) :: {:ok, list} | {:error, String.t()}
def reforged_runes(region) do
rest = @endpoint <> "/reforged-runes"
LeagueRates.handle_rate(region, rest, :static)
end
@doc """
Retrieves reforged rune by ID.
"""
@spec reforged_rune_by_id(atom, integer) :: {:ok, list} | {:error, String.t()}
def reforged_rune_by_id(region, id) do
rest = @endpoint <> "/reforged-runes/#{id}"
LeagueRates.handle_rate(region, rest, :static)
end
@doc """
Get a list of all runes.
See https://developer.riotgames.com/api-methods/#lol-static-data-v3/GET_getRuneList
for a list of options
"""
@spec all_runes(atom, Keyword.t()) :: {:ok, map} | {:error, String.t()}
def all_runes(region, opts \\ []) do
opts = Pastry.to_query_string(opts)
rest = @endpoint <> "/runes" <> opts
LeagueRates.handle_rate(region, rest, :static)
end
@doc """
Get information about a single rune by id.
Reger to `all_runes/2` for a list of options.
"""
@spec rune(atom, integer, Keyword.t()) :: {:ok, map} | {:error, String.t()}
def rune(region, id, opts \\ []) do
opts = Pastry.to_query_string(opts)
rest = @endpoint <> "/runes/#{id}" <> opts
LeagueRates.handle_rate(region, rest, :static)
end
@doc """
Get a list of all summoner spells.
See https://developer.riotgames.com/api-methods/#lol-static-data-v3/GET_getSummonerSpellList
for a list of options
"""
@spec sum_spells(atom, Keyword.t()) :: {:ok, map} | {:error, String.t()}
def sum_spells(region, opts \\ []) do
opts = Pastry.to_query_string(opts)
rest = @endpoint <> "/summoner-spells" <> opts
LeagueRates.handle_rate(region, rest, :static)
end
@doc """
Get a single spell by id.
Refer to `sum_spells/1` for a list of options
"""
@spec spell(atom, integer, Keyword.t()) :: {:ok, map} | {:error, String.t()}
def spell(region, id, opts \\ []) do
opts = Pastry.to_query_string(opts)
rest = @endpoint <> "/summoner-spells/#{id}" <> opts
LeagueRates.handle_rate(region, rest, :static)
end
@doc """
Retrieves full tarball link
"""
@spec tarball_links(atom) :: {:ok, String.t()} | {:error, String.t()}
def tarball_links(region) do
rest = @endpoint <> "/tarball-links"
LeagueRates.handle_rate(region, rest, :static)
end
@doc """
Get version data.
"""
@spec versions(atom) :: {:ok, map} | {:error, String.t()}
def versions(region) do
rest = @endpoint <> "/versions"
LeagueRates.handle_rate(region, rest, :static)
end
end
|
lib/godfist/requests/static.ex
| 0.769687
| 0.448909
|
static.ex
|
starcoder
|
defmodule KafkaEx.GenConsumer.Supervisor do
@moduledoc """
A supervisor for managing `GenConsumer` processes that are part of a consumer
group.
The supervisor will launch individual `GenConsumer` processes for each
partition given by the `partitions` argument to `start_link/4`. When
terminated, each of the supervisor's child processes will commit its latest
offset before terminating.
This module manages a static list of consumer processes. For dynamically
distributing consumers in a consumer group across a cluster of nodes, see
`KafkaEx.ConsumerGroup`.
"""
use DynamicSupervisor
@doc """
Starts a `GenConsumer.Supervisor` process linked to the current process.
`gen_consumer_module` is a module that implements the `GenServer` behaviour
which consumes events from Kafka.
`consumer_module` is a module that implements the `GenConsumer` behaviour.
`group_name` is the name of a consumer group, and `assignments` is a list of
partitions for the `GenConsumer`s to consume. `opts` accepts the same
options as `KafkaEx.GenConsumer.start_link/5`.
### Return Values
This function has the same return values as `Supervisor.start_link/3`.
If the supervisor and its consumers are successfully created, this function
returns `{:ok, pid}`, where `pid` is the PID of the supervisor.
"""
@spec start_link(
{gen_consumer_module :: module, consumer_module :: module},
consumer_group_name :: binary,
assigned_partitions :: [
{topic_name :: binary, partition_id :: non_neg_integer}
],
KafkaEx.GenConsumer.options()
) :: Elixir.Supervisor.on_start()
def start_link(
{gen_consumer_module, consumer_module},
group_name,
assignments,
opts \\ []
) do
start_link_result =
DynamicSupervisor.start_link(
__MODULE__,
{{gen_consumer_module, consumer_module}, group_name, assignments, opts}
)
child_spec_builder = fn topic, partition ->
%{
id: gen_consumer_module,
start:
{gen_consumer_module, :start_link,
[consumer_module, group_name, topic, partition, opts]}
}
end
case start_link_result do
{:ok, pid} ->
:ok = start_workers(pid, child_spec_builder, assignments)
{:ok, pid}
error ->
error
end
end
@doc """
Returns a list of child pids
Intended to be used for operational and testing purposes
"""
@spec child_pids(pid | atom) :: [pid]
def child_pids(supervisor_pid) do
supervisor_pid
|> DynamicSupervisor.which_children()
|> Enum.map(fn {_, pid, _, _} -> pid end)
end
@doc """
Returns true if any child pids are alive
"""
@spec active?(Supervisor.supervisor()) :: boolean
def active?(supervisor_pid) do
supervisor_pid
|> child_pids
|> Enum.any?(&Process.alive?/1)
end
@impl true
def init(_init_args) do
DynamicSupervisor.init(strategy: :one_for_one)
end
defp start_workers(pid, child_spec_builder, assignments) do
Enum.each(assignments, fn {topic, partition} ->
child_spec = child_spec_builder.(topic, partition)
case start_child(pid, child_spec) do
{:ok, _child} -> nil
{:ok, _child, _info} -> nil
end
end)
:ok
end
defp start_child(pid, child_spec) do
DynamicSupervisor.start_child(pid, child_spec)
end
end
|
lib/kafka_ex/gen_consumer/supervisor.ex
| 0.867233
| 0.624279
|
supervisor.ex
|
starcoder
|
defmodule Ecto.Filters do
@moduledoc """
Adds a macro `filter` and private function `apply_filter/2` to transform
request params into ecto query expressions.
## Example
use Ecto.Filters
filter(:comment_body, fn query, value ->
query
|> join(:left, [p], c in assoc(p, :comments), as: :comments)
|> where([comments: comments], ilike(comments.body, ^value))
end)
Post
|> apply_filters(%{"filters" => %{"comment_body" => "some text"}})
|> MyRepo.all()
[%Post{title: "Ecto Filters"}, ...]
"""
alias Ecto.Filters.Exception
defmacro filter(key, fun) do
quote line: __CALLER__.line do
def filter_by(query, unquote(key), value) do
if !is_atom(unquote(key)) do
args = [key: unquote(key), value: value, query: query]
raise Exception, type: :atom_key, args: args
end
unquote(fun).(query, value)
end
end
end
defmacro __using__(_) do
quote location: :keep do
import Ecto.Filters
alias Ecto.Filters
defp apply_filters(query, params, opts \\ []) do
key = Keyword.get(opts, :key, :filters)
params
|> Filters.get_filter_params(key)
|> Filters.build_query(__MODULE__, Ecto.Queryable.to_query(query), opts)
end
end
end
def build_query(filters, module, query, opts) do
ignore_bad_filters = Keyword.get(opts, :ignore_bad_filters, false)
Enum.reduce(filters, query, fn {key, value}, query ->
try do
apply(module, :filter_by, [query, to_atom(key), value])
rescue
error in FunctionClauseError ->
args = [key: key, value: value, query: query, error: error]
ignore_undefined_function(ignore_bad_filters, args)
end
end)
end
defp ignore_undefined_function(true, args), do: Keyword.get(args, :query)
defp ignore_undefined_function(false, args) do
raise Exception,
type: :not_found,
args: args
end
defp to_atom(key) when is_binary(key), do: String.to_existing_atom(key)
defp to_atom(key) when is_atom(key), do: key
defp to_atom(key) do
raise Exception,
type: :atom_or_binary_key,
args: [key: key]
end
def get_filter_params(params, key) when is_list(params) do
Keyword.get(params, key, [])
end
def get_filter_params(params, key) when is_map(params) do
Map.get(params, key) || Map.get(params, Atom.to_string(key), [])
end
def get_filter_params(_, _), do: []
end
|
lib/ecto/filters/filters.ex
| 0.785514
| 0.455622
|
filters.ex
|
starcoder
|
defmodule SanbaseWeb.Graphql.ClickhouseDataloader do
alias Sanbase.Clickhouse
alias Sanbase.Model.Project
alias Sanbase.Metric
def data(), do: Dataloader.KV.new(&query/2)
def query(:aggregated_metric, args) do
args_list = args |> Enum.to_list()
args_list
|> Enum.group_by(fn %{metric: metric, from: from, to: to, aggregation: aggregation} ->
{metric, from, to, aggregation}
end)
|> Sanbase.Parallel.map(fn {selector, group} ->
{metric, from, to, aggregation} = selector
slugs = Enum.map(group, & &1.slug)
data =
case Metric.aggregated_timeseries_data(metric, %{slug: slugs}, from, to, aggregation) do
{:ok, result} -> result
{:error, error} -> {:error, error}
end
{selector, data}
end)
|> Map.new()
end
def query(:average_daily_active_addresses, args) do
args
|> Enum.to_list()
|> Enum.group_by(fn %{from: from, to: to} -> {from, to} end)
|> Sanbase.Parallel.map(fn {{from, to}, group} ->
{{from, to}, average_daily_active_addresses(group, from, to)}
end)
|> Map.new()
end
@doc ~s"""
Returns a map with the average dev activity for every project passed in `args`.
The map key is the `days` argument passed. This is done so aliases are
supported in the format:
```
...
dev_7d: averageDevActivity(days: 7)
dev_30d: averageDevActivity(days: 30)
...
```
The `days` key points to a map of results or to an {:error, error} tuple.
The map of results has github organizations as key and their average activity
as value.
"""
def query(:average_dev_activity, args) do
args = Enum.to_list(args)
Enum.group_by(args, fn %{days: days} -> days end)
|> Sanbase.Parallel.map(
fn {days, group} ->
{days, average_dev_activity(group, days)}
end,
ordered: false
)
|> Map.new()
end
@doc ~s"""
Returns a map with the ethereum spent by each project passed in `args`.
The map key is the project's id.
The map value is either `{:ok, value}` or `{:nocache, {:ok, value}}`.
The :nocache value is returned if some problems were encountered while calculating the
ethereum spent and the value won't be put in the cache.
"""
def query(:eth_spent, args) do
args = Enum.to_list(args)
Enum.group_by(args, fn %{days: days} -> days end)
|> Sanbase.Parallel.map(
fn {days, group} ->
{days, eth_spent_for_days_group(group |> Enum.map(& &1.project), days)}
end,
ordered: false
)
|> Map.new()
end
defp average_daily_active_addresses(args, from, to) do
slugs =
args
|> Enum.map(fn %{project: project} -> project.slug end)
|> Enum.reject(&is_nil/1)
Sanbase.Metric.aggregated_timeseries_data(
"daily_active_addresses",
%{slug: slugs},
from,
to,
:avg
)
|> case do
{:ok, result} ->
result
{:error, error} ->
{:error, error}
end
end
defp average_dev_activity(group, days) do
to = Timex.now()
from = Timex.shift(to, days: -days)
organizations =
group
|> Enum.flat_map(fn %{project: project} ->
{:ok, organizations} = Project.github_organizations(project)
organizations
end)
Sanbase.Metric.aggregated_timeseries_data(
"dev_activity",
%{organizations: organizations},
from,
to
)
|> case do
{:ok, result} ->
result
|> Enum.into(%{}, fn {org, dev_activity} ->
{org, dev_activity / days}
end)
{:error, error} ->
{:error, error}
end
end
defp eth_spent_for_days_group(projects, days) do
from = Timex.shift(Timex.now(), days: -days)
to = Timex.now()
eth_spent_per_address =
eth_addresses(projects)
|> Enum.chunk_every(10)
|> Sanbase.Parallel.map(ð_spent(&1, from, to),
map_type: :flat_map,
max_concurrency: 8,
ordered: false
)
|> Map.new()
projects
|> Enum.map(fn project ->
{project.id, eth_spent_per_project(project, eth_spent_per_address)}
end)
|> Map.new()
end
# Calculate the ethereum spent for a single project by summing the ethereum
# spent for each of its ethereum addresses. If an error is encountered while
# calculating, the value will be wrapped in a :nocache tuple that the cache
# knows how to handle
defp eth_spent_per_project(project, eth_spent_per_address) do
{:ok, addresses} = Project.eth_addresses(project)
project_addresses_eth_spent =
addresses
|> Enum.map(fn address ->
Map.get(eth_spent_per_address, address, {:ok, 0})
end)
project_eth_spent =
for({:ok, value} <- project_addresses_eth_spent, do: value)
|> Enum.sum()
project_addresses_eth_spent
|> Enum.any?(&match?({:error, _}, &1))
|> project_eth_spent_result(project_eth_spent)
end
defp project_eth_spent_result(has_errors?, value)
defp project_eth_spent_result(true, value) when value < 0, do: {:nocache, {:ok, abs(value)}}
defp project_eth_spent_result(true, _), do: {:nocache, {:ok, 0}}
defp project_eth_spent_result(false, value) when value < 0, do: {:ok, abs(value)}
defp project_eth_spent_result(false, _), do: {:ok, 0}
defp eth_spent(eth_addresses, from, to) do
case Clickhouse.HistoricalBalance.EthSpent.eth_balance_change(eth_addresses, from, to) do
{:ok, balance_changes} ->
balance_changes
|> Enum.map(fn {addr, {_, _, change}} -> {addr, {:ok, change}} end)
_ ->
eth_addresses
|> Enum.map(fn addr -> {addr, {:error, :novalue}} end)
end
end
defp eth_addresses(projects) do
projects
|> Enum.map(fn project ->
case Project.eth_addresses(project) do
{:ok, addresses} when addresses != [] ->
addresses
_ ->
nil
end
end)
|> Enum.reject(fn addresses -> addresses == nil or addresses == [] end)
end
end
|
lib/sanbase_web/graphql/dataloader/clickhouse_dataloader.ex
| 0.868688
| 0.568536
|
clickhouse_dataloader.ex
|
starcoder
|
defmodule HLClock.Timestamp do
@moduledoc """
HLC Timestamp
Implements the necessary components of the HLC tuple (i.e. logical time and
logical counter) with extension to support node ids to provide unique
timestamps even in cases where time and counter are the same
Binary representations assume big endianness for interop simplicity with other
languages/representations.
"""
defstruct [:time, :counter, :node_id]
alias __MODULE__, as: T
@type t :: %__MODULE__{
time: integer(),
counter: integer(),
node_id: integer()
}
@doc """
Construct a timestamp from its principal components: logical time (initially
node's physical time), logical counter (initally zero), and the node id
"""
def new(time, counter, node_id \\ 0) do
assert_byte_size(node_id, 8)
assert_byte_size(counter, 2)
assert_byte_size(time, 6)
%T{time: time, counter: counter, node_id: node_id}
end
defp assert_byte_size(value, size) do
byte_size(:binary.encode_unsigned(value)) <= size ||
raise ArgumentError, "#{value} exceeds max byte size of #{size}"
end
@doc """
Generate a single HLC Timestamp for sending to other nodes or
local causality tracking
"""
def send(%{time: old_time, counter: counter, node_id: node_id}, pt, max_drift) do
new_time = max(old_time, pt)
new_counter = advance_counter(old_time, counter, new_time)
with :ok <- handle_drift(old_time, new_time, max_drift) do
{:ok, new(new_time, new_counter, node_id)}
end
end
@doc """
Given the current timestamp for this node and a provided remote timestamp,
perform the merge of both logical time and logical counters. Returns the new
current timestamp for the local node
"""
def recv(local, remote, physical_time, max_drift) do
new_time = Enum.max([physical_time, local.time, remote.time])
with {:ok, node_id} <- compare_node_ids(local.node_id, remote.node_id),
:ok <-
handle_drift(
remote.time,
physical_time,
max_drift,
:remote_drift_violation
),
:ok <- handle_drift(new_time, physical_time, max_drift),
new_counter <- merge_logical(new_time, local, remote) do
{:ok, new(new_time, new_counter, node_id)}
end
end
@doc """
Exhaustive comparison of two timestamps: precedence is in order of time
component, logical counter, and finally node identifier
"""
def compare(%{time: t1}, %{time: t2}) when t1 > t2, do: :gt
def compare(%{time: t1}, %{time: t2}) when t1 < t2, do: :lt
def compare(%{counter: c1}, %{counter: c2}) when c1 > c2, do: :gt
def compare(%{counter: c1}, %{counter: c2}) when c1 < c2, do: :lt
def compare(%{node_id: n1}, %{node_id: n2}) when n1 > n2, do: :gt
def compare(%{node_id: n1}, %{node_id: n2}) when n1 < n2, do: :lt
def compare(_ = %{}, _ = %{}), do: :eq
@doc """
Determines if the clock's timestamp "happened before" a different timestamp
"""
def before?(t1, t2) do
compare(t1, t2) == :lt
end
@doc """
Create a millisecond granularity DateTime struct representing the logical time
portion of the Timestamp.
Given that this representation looses the logical counter and node information,
it should be used as a reference only. Including the counter in the DateTime
struct would create absurd but still ordered timestamps.
## Example
iex> _t0 = HLClock.Timestamp.new(1410652800000, 0, 0)
%HLClock.Timestamp{counter: 0, node_id: 0, time: 1410652800000}
...> << time_and_counter :: size(64), _ :: size(64) >> = encoded
<<1, 72, 113, 117, 132, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0>>
...> DateTime.from_unix(time_and_counter, :microsecond)
{:ok, #DateTime<4899-07-30 06:31:40.800000Z>}
"""
def to_datetime(%T{time: t}) do
with {:ok, dt} <- DateTime.from_unix(t, :millisecond) do
dt
end
end
@doc """
Return the logical, monotonic time portion. Unlike `System.monotonic_time`, if
timestamps are regularly exchanged with other nodes and/or clients, this
monotonic timestamp will represent a cluster wide monotonic value.
"""
def to_os_time(%T{time: t}), do: t
@doc """
Pack the rich Timestamp struct as a 128 bit byte array
48 bits - Physical time
16 bits - Logical time
64 bits - Node ID
"""
def encode(%{time: t, counter: c, node_id: n}) do
<<t::size(48)>> <> <<c::size(16)>> <> <<n::size(64)>>
end
@doc """
Construct a Timestamp from the binary representation
"""
def decode(<<t::size(48)>> <> <<c::size(16)>> <> <<n::size(64)>>) do
%T{time: t, counter: c, node_id: n}
end
@doc """
Recover a Timestamp from the string representation.
"""
def from_string(tstr) do
with {:ok, dt, _} <- tstr |> String.slice(0, 24) |> DateTime.from_iso8601(),
time when is_integer(time) <- DateTime.to_unix(dt, :millisecond),
{counter, _} <- tstr |> String.slice(25, 4) |> Integer.parse(16),
{node, _} <- tstr |> String.slice(30, 16) |> Integer.parse(16),
{:ok, timestamp} <- new(time, counter, node) do
timestamp
end
end
defp compare_node_ids(local_id, remote_id) when local_id == remote_id,
do: {:error, :duplicate_node_id}
defp compare_node_ids(local_id, _), do: {:ok, local_id}
defp merge_logical(max_pt, local, remote) do
cond do
max_pt == local.time && max_pt == remote.time ->
max(local.counter, remote.counter) + 1
max_pt == local.time ->
local.counter + 1
max_pt == remote.time ->
remote.counter + 1
true ->
0
end
end
defp handle_drift(l, pt, max_drift, err \\ :clock_drift_violation) do
cond do
drift?(l, pt, max_drift) ->
{:error, err}
true ->
:ok
end
end
defp drift?(l, pt, max_drift) do
abs(l - pt) > max_drift
end
defp advance_counter(old_time, counter, new_time) do
cond do
old_time == new_time ->
counter + 1
true ->
0
end
end
defimpl String.Chars do
def to_string(ts) do
logical_time =
ts
|> T.to_datetime()
|> DateTime.to_iso8601()
counter =
<<ts.counter::size(16)>>
|> Base.encode16()
node_id =
<<ts.node_id::size(64)>>
|> Base.encode16()
"#{logical_time}-#{counter}-#{node_id}"
end
end
end
|
lib/hlclock/timestamp.ex
| 0.907019
| 0.729255
|
timestamp.ex
|
starcoder
|
defmodule Emulator do
@moduledoc """
Documentation for `Emulator`.
"""
@doc """
Hello world.
## Examples
iex> Emulator.hello()
:world
"""
def hello do
:world
end
def run(code, mem, out) do
reg = Register.new()
run(0, code, mem, reg, out)
end
def run(pc, code, mem, reg, out) do
next = Program.read(code, pc)
case next do
{:halt} ->
Out.close(out)
{:out, rs} ->
a = Register.read(reg, rs)
run(pc + 1, code, mem, reg, Out.put(out, a))
{:add, rd, rs, rt} ->
a = Register.read(reg, rs)
b = Register.read(reg, rt)
reg = Register.write(reg, rd, a + b)
run(pc + 1 , code, mem, reg, out)
{:sub, rd, rs, rt} ->
a = Register.read(reg, rs)
b = Register.read(reg, rt)
reg = Register.write(reg, rd, a - b)
run(pc + 1, code, mem, reg, out)
{:addi, rd, rs, imm} ->
a = Register.read(reg, rs)
reg = Register.write(reg, rd, a + imm)
run(pc + 1, code, mem, reg, out)
{:beq, rs, rt, imm} ->
a = Register.read(reg, rs)
# IO.write(a)
b = Register.read(reg, rt)
# IO.write(b)
cond do
a == b ->
pc = pc + imm
run(pc, code, mem, reg, out)
a != b -> pc = pc
run(pc + 1, code, mem, reg, out)
end
{:bne, rs, rt, imm} ->
a = Register.read(reg, rs)
b = Register.read(reg, rt)
cond do
a != b ->
pc = pc + imm
run(pc , code, mem, reg, out)
a == b ->
pc = pc
run(pc + 1, code, mem, reg, out)
end
{:lw, rd, rs, imm} ->
a = Register.read(reg, rs)
addr = a + imm
val = Memory.read(mem, addr)
reg = Register.write(reg, rd, val)
run(pc + 1, code, mem, reg, out)
{:sw, rs, rt, imm} ->
vs = Register.read(reg, rs)
vt = Register.read(reg, rt)
addr = vt + imm
mem = Memory.write(mem, addr,vs)
run(pc + 1, code, mem, reg, out)
end
end
end
|
emulator/lib/emulator.ex
| 0.741955
| 0.481576
|
emulator.ex
|
starcoder
|
defmodule Openflow.Hello do
@moduledoc "OpenFlow Hello codec module"
import Bitwise
@ofp_hello_size 4
defstruct(version: 4, xid: 0, elements: [])
alias __MODULE__
def ofp_type, do: 0
def new(version) when is_integer(version) do
%Hello{elements: [versionbitmap: [version]]}
end
def new(versions) when is_list(versions) do
%Hello{elements: [versionbitmap: versions]}
end
def supported_version?(%Hello{version: 4, elements: []}), do: true
def supported_version?(%Hello{elements: []}), do: false
def supported_version?(%Hello{elements: elements}) do
elements
|> Enum.reduce([], fn {:versionbitmap, versions}, acc -> acc ++ versions end)
|> Enum.any?(fn version -> version == 4 end)
end
def read(binary), do: %Hello{elements: decode([], binary)}
def to_binary(%Hello{elements: elements}), do: encode([], elements)
# private functions
defp decode(acc, <<>>), do: acc
defp decode(acc, <<typeint::16, length::16, rest::bytes>>) do
data_len = length - @ofp_hello_size
<<data::bytes-size(data_len), rest2::bytes>> = rest
try do
typeint
|> Openflow.Enums.to_atom(:hello_elem)
|> decode_hello_elem(acc, data)
|> decode(rest2)
catch
:bad_enum ->
decode(acc, rest2)
end
end
defp encode(acc, []), do: to_string(acc)
defp encode(acc, [h | rest]), do: encode([encode_hello_elem(h) | acc], rest)
defp decode_hello_elem(:versionbitmap, acc, binary),
do: [{:versionbitmap, decode_bitmap([], binary, 0)} | acc]
defp decode_hello_elem(_, acc, _binary), do: acc
defp encode_hello_elem({:versionbitmap, versions}) do
bitmap_bin = encode_bitmap(versions)
type_int = Openflow.Enums.to_int(:versionbitmap, :hello_elem)
size_int = @ofp_hello_size + byte_size(bitmap_bin)
<<type_int::16, size_int::16, bitmap_bin::bytes>>
end
defp encode_hello_elem(_) do
<<>>
end
defp decode_bitmap(acc, "", _), do: acc
defp decode_bitmap(acc, <<int::32, rest::bytes>>, base) do
acc
|> decode_bitmap(int, 0, base)
|> decode_bitmap(rest, base + 32)
end
defp encode_bitmap(list) do
size =
list
|> Enum.max()
|> div(32)
encode_bitmap(0, list, size)
end
defp decode_bitmap(acc, _, index, _) when index >= 32, do: acc
defp decode_bitmap(acc, int, index, base) when (int &&& 1 <<< index) == 0,
do: decode_bitmap(acc, int, index + 1, base)
defp decode_bitmap(acc, int, index, base),
do: decode_bitmap([base + index | acc], int, index + 1, base)
defp encode_bitmap(acc, [], size) do
bytes = (size + 1) * 32
<<acc::size(bytes)>>
end
defp encode_bitmap(acc, [h | rest], size) do
index = size - div(h, 32) * 32 + rem(h, 32)
encode_bitmap(acc ||| 1 <<< index, rest, size)
end
end
|
lib/openflow/hello.ex
| 0.602529
| 0.635859
|
hello.ex
|
starcoder
|
defmodule Extatus.Metric.Gauge do
@moduledoc """
This module defines a wrapper over `Prometheus.Metric.Gauge` functions to
be compatible with `Extatus` way of handling metrics.
"""
alias Extatus.Settings
@metric Settings.extatus_gauge_mod()
@doc """
Creates a gauge using the `name` of a metric.
"""
defmacro new(name) do
module = __MODULE__
caller = __CALLER__.module()
metric = @metric
quote do
require Prometheus.Metric.Gauge
name = unquote(name)
case unquote(caller).get_spec(name) do
{unquote(module), spec} ->
unquote(metric).new(spec)
_ ->
raise %Prometheus.UnknownMetricError{registry: nil, name: name}
end
end
end
@doc """
Creates a gauge using the `name` of a `metric`. If the counter exists,
returns false.
"""
defmacro declare(name) do
module = __MODULE__
caller = __CALLER__.module()
metric = @metric
quote do
require Prometheus.Metric.Gauge
name = unquote(name)
case unquote(caller).get_spec(name) do
{unquote(module), spec} ->
unquote(metric).declare(spec)
_ ->
raise %Prometheus.UnknownMetricError{registry: nil, name: name}
end
end
end
@doc """
Increments the gauge identified by `name` and `values` (keyword list with
the correspondence between labels and values) by `value`.
"""
defmacro inc(name, values, value \\ 1) do
module = __MODULE__
caller = __CALLER__.module()
metric = @metric
quote do
require Prometheus.Metric.Gauge
name = unquote(name)
case unquote(caller).gen_spec(name, unquote(values)) do
{unquote(module), spec} ->
unquote(metric).inc(spec, unquote(value))
_ ->
raise %Prometheus.UnknownMetricError{registry: nil, name: name}
end
end
end
@doc """
Increments the gauge identified by `name` and `values` (keyword list with
the correspondence between labels and values) by `value`. If `value` happened
to be a float even one time(!) you shouldn't use `inc/3` after `dinc/3`.
"""
defmacro dinc(name, values, value \\ 1) do
module = __MODULE__
caller = __CALLER__.module()
metric = @metric
quote do
require Prometheus.Metric.Gauge
name = unquote(name)
case unquote(caller).gen_spec(name, unquote(values)) do
{unquote(module), spec} ->
unquote(metric).dinc(spec, unquote(value))
_ ->
raise %Prometheus.UnknownMetricError{registry: nil, name: name}
end
end
end
@doc """
Decrements the gauge identified by `name` and `values` (keyword list with
the correspondence between labels and values) by `value`.
"""
defmacro dec(name, values, value \\ 1) do
module = __MODULE__
caller = __CALLER__.module()
metric = @metric
quote do
require Prometheus.Metric.Gauge
name = unquote(name)
case unquote(caller).gen_spec(name, unquote(values)) do
{unquote(module), spec} ->
unquote(metric).dec(spec, unquote(value))
_ ->
raise %Prometheus.UnknownMetricError{registry: nil, name: name}
end
end
end
@doc """
Decrements the gauge identified by `name` and `values` (keyword list with
the correspondence between labels and values) by `value`. If `value` happened
to be a float even one time(!) you shouldn't use `dec/3` after `ddec/3`.
"""
defmacro ddec(name, values, value \\ 1) do
module = __MODULE__
caller = __CALLER__.module()
metric = @metric
quote do
require Prometheus.Metric.Gauge
name = unquote(name)
case unquote(caller).gen_spec(name, unquote(values)) do
{unquote(module), spec} ->
unquote(metric).ddec(spec, unquote(value))
_ ->
raise %Prometheus.UnknownMetricError{registry: nil, name: name}
end
end
end
@doc """
Sets the gauge identified by `name` and `values` (keyword list with the
correspondence between labels and values) by `value`.
"""
defmacro set(name, values, value) do
module = __MODULE__
caller = __CALLER__.module()
metric = @metric
quote do
require Prometheus.Metric.Gauge
name = unquote(name)
case unquote(caller).gen_spec(name, unquote(values)) do
{unquote(module), spec} ->
unquote(metric).set(spec, unquote(value))
_ ->
raise %Prometheus.UnknownMetricError{registry: nil, name: name}
end
end
end
@doc """
Sets the gauge identified by `name` and `values` (keyword list with the
correspondence between labels and values) to the current unix time.
"""
defmacro set_to_current_time(name, values) do
module = __MODULE__
caller = __CALLER__.module()
metric = @metric
quote do
require Prometheus.Metric.Gauge
name = unquote(name)
case unquote(caller).gen_spec(name, unquote(values)) do
{unquote(module), spec} ->
unquote(metric).set_to_current_time(spec)
_ ->
raise %Prometheus.UnknownMetricError{registry: nil, name: name}
end
end
end
@doc """
Sets the gauge identified by `name` and `values` (keyword list with the
correspondence between labels and values) to the number of the currently
executing `function`.
"""
defmacro track_inprogress(name, values, function) do
module = __MODULE__
caller = __CALLER__.module()
metric = @metric
quote do
require Prometheus.Metric.Gauge
name = unquote(name)
case unquote(caller).gen_spec(name, unquote(values)) do
{unquote(module), spec} ->
unquote(metric).track_inprogress(spec, unquote(function))
_ ->
raise %Prometheus.UnknownMetricError{registry: nil, name: name}
end
end
end
@doc """
Sets the gauge identified by `name` and `values` (keyword list with the
correspondence between labels and values) to the amount of time spent
executing `function`.
"""
defmacro set_duration(name, values, function) do
module = __MODULE__
caller = __CALLER__.module()
metric = @metric
quote do
require Prometheus.Metric.Gauge
name = unquote(name)
case unquote(caller).gen_spec(name, unquote(values)) do
{unquote(module), spec} ->
unquote(metric).set_duration(spec, unquote(function))
_ ->
raise %Prometheus.UnknownMetricError{registry: nil, name: name}
end
end
end
@doc """
Removes gauge series identified by `name` and `values` (keyword list with
the correspondence between labels and values).
"""
defmacro remove(name, values) do
module = __MODULE__
caller = __CALLER__.module()
metric = @metric
quote do
require Prometheus.Metric.Gauge
name = unquote(name)
case unquote(caller).gen_spec(name, unquote(values)) do
{unquote(module), spec} ->
unquote(metric).remove(spec)
_ ->
raise %Prometheus.UnknownMetricError{registry: nil, name: name}
end
end
end
@doc """
Resets the value of the gauge identified by `name` and `values`
(keyword list with the correspondence between labels and values).
"""
defmacro reset(name, values) do
module = __MODULE__
caller = __CALLER__.module()
metric = @metric
quote do
require Prometheus.Metric.Gauge
name = unquote(name)
case unquote(caller).gen_spec(name, unquote(values)) do
{unquote(module), spec} ->
unquote(metric).reset(spec)
_ ->
raise %Prometheus.UnknownMetricError{registry: nil, name: name}
end
end
end
@doc """
Returns the value of the gauge identified by `name` and `values`
(keyword list with the correspondence between labels and values).
"""
defmacro value(name, values) do
module = __MODULE__
caller = __CALLER__.module()
metric = @metric
quote do
require Prometheus.Metric.Gauge
name = unquote(name)
case unquote(caller).gen_spec(name, unquote(values)) do
{unquote(module), spec} ->
unquote(metric).value(spec)
_ ->
raise %Prometheus.UnknownMetricError{registry: nil, name: name}
end
end
end
end
|
lib/extatus/metric/gauge.ex
| 0.858955
| 0.59302
|
gauge.ex
|
starcoder
|
defmodule AWS.DeviceFarm do
@moduledoc """
Welcome to the AWS Device Farm API documentation, which contains APIs for:
* Testing on desktop browsers
Device Farm makes it possible for you to test your web applications on desktop
browsers using Selenium.
The APIs for desktop browser testing contain `TestGrid` in their names. For more
information, see [Testing Web Applications on Selenium with Device Farm](https://docs.aws.amazon.com/devicefarm/latest/testgrid/).
* Testing on real mobile devices
Device Farm makes it possible for you to test apps on physical phones, tablets,
and other devices in the cloud. For more information, see the [Device Farm Developer Guide](https://docs.aws.amazon.com/devicefarm/latest/developerguide/).
"""
alias AWS.Client
alias AWS.Request
def metadata do
%AWS.ServiceMetadata{
abbreviation: nil,
api_version: "2015-06-23",
content_type: "application/x-amz-json-1.1",
credential_scope: nil,
endpoint_prefix: "devicefarm",
global?: false,
protocol: "json",
service_id: "Device Farm",
signature_version: "v4",
signing_name: "devicefarm",
target_prefix: "DeviceFarm_20150623"
}
end
@doc """
Creates a device pool.
"""
def create_device_pool(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateDevicePool", input, options)
end
@doc """
Creates a profile that can be applied to one or more private fleet device
instances.
"""
def create_instance_profile(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateInstanceProfile", input, options)
end
@doc """
Creates a network profile.
"""
def create_network_profile(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateNetworkProfile", input, options)
end
@doc """
Creates a project.
"""
def create_project(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateProject", input, options)
end
@doc """
Specifies and starts a remote access session.
"""
def create_remote_access_session(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateRemoteAccessSession", input, options)
end
@doc """
Creates a Selenium testing project.
Projects are used to track `TestGridSession` instances.
"""
def create_test_grid_project(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateTestGridProject", input, options)
end
@doc """
Creates a signed, short-term URL that can be passed to a Selenium
`RemoteWebDriver` constructor.
"""
def create_test_grid_url(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateTestGridUrl", input, options)
end
@doc """
Uploads an app or test scripts.
"""
def create_upload(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateUpload", input, options)
end
@doc """
Creates a configuration record in Device Farm for your Amazon Virtual Private
Cloud (VPC) endpoint.
"""
def create_vpce_configuration(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateVPCEConfiguration", input, options)
end
@doc """
Deletes a device pool given the pool ARN.
Does not allow deletion of curated pools owned by the system.
"""
def delete_device_pool(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteDevicePool", input, options)
end
@doc """
Deletes a profile that can be applied to one or more private device instances.
"""
def delete_instance_profile(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteInstanceProfile", input, options)
end
@doc """
Deletes a network profile.
"""
def delete_network_profile(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteNetworkProfile", input, options)
end
@doc """
Deletes an AWS Device Farm project, given the project ARN.
Deleting this resource does not stop an in-progress run.
"""
def delete_project(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteProject", input, options)
end
@doc """
Deletes a completed remote access session and its results.
"""
def delete_remote_access_session(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteRemoteAccessSession", input, options)
end
@doc """
Deletes the run, given the run ARN.
Deleting this resource does not stop an in-progress run.
"""
def delete_run(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteRun", input, options)
end
@doc """
Deletes a Selenium testing project and all content generated under it.
You cannot undo this operation.
You cannot delete a project if it has active sessions.
"""
def delete_test_grid_project(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteTestGridProject", input, options)
end
@doc """
Deletes an upload given the upload ARN.
"""
def delete_upload(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteUpload", input, options)
end
@doc """
Deletes a configuration for your Amazon Virtual Private Cloud (VPC) endpoint.
"""
def delete_vpce_configuration(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteVPCEConfiguration", input, options)
end
@doc """
Returns the number of unmetered iOS or unmetered Android devices that have been
purchased by the account.
"""
def get_account_settings(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetAccountSettings", input, options)
end
@doc """
Gets information about a unique device type.
"""
def get_device(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetDevice", input, options)
end
@doc """
Returns information about a device instance that belongs to a private device
fleet.
"""
def get_device_instance(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetDeviceInstance", input, options)
end
@doc """
Gets information about a device pool.
"""
def get_device_pool(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetDevicePool", input, options)
end
@doc """
Gets information about compatibility with a device pool.
"""
def get_device_pool_compatibility(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetDevicePoolCompatibility", input, options)
end
@doc """
Returns information about the specified instance profile.
"""
def get_instance_profile(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetInstanceProfile", input, options)
end
@doc """
Gets information about a job.
"""
def get_job(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetJob", input, options)
end
@doc """
Returns information about a network profile.
"""
def get_network_profile(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetNetworkProfile", input, options)
end
@doc """
Gets the current status and future status of all offerings purchased by an AWS
account.
The response indicates how many offerings are currently available and the
offerings that will be available in the next period. The API returns a
`NotEligible` error if the user is not permitted to invoke the operation. If you
must be able to invoke this operation, contact
[<EMAIL>](mailto:<EMAIL>-<EMAIL>).
"""
def get_offering_status(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetOfferingStatus", input, options)
end
@doc """
Gets information about a project.
"""
def get_project(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetProject", input, options)
end
@doc """
Returns a link to a currently running remote access session.
"""
def get_remote_access_session(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetRemoteAccessSession", input, options)
end
@doc """
Gets information about a run.
"""
def get_run(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetRun", input, options)
end
@doc """
Gets information about a suite.
"""
def get_suite(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetSuite", input, options)
end
@doc """
Gets information about a test.
"""
def get_test(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetTest", input, options)
end
@doc """
Retrieves information about a Selenium testing project.
"""
def get_test_grid_project(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetTestGridProject", input, options)
end
@doc """
A session is an instance of a browser created through a `RemoteWebDriver` with
the URL from `CreateTestGridUrlResult$url`.
You can use the following to look up sessions:
* The session ARN (`GetTestGridSessionRequest$sessionArn`).
* The project ARN and a session ID
(`GetTestGridSessionRequest$projectArn` and
`GetTestGridSessionRequest$sessionId`).
"""
def get_test_grid_session(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetTestGridSession", input, options)
end
@doc """
Gets information about an upload.
"""
def get_upload(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetUpload", input, options)
end
@doc """
Returns information about the configuration settings for your Amazon Virtual
Private Cloud (VPC) endpoint.
"""
def get_vpce_configuration(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetVPCEConfiguration", input, options)
end
@doc """
Installs an application to the device in a remote access session.
For Android applications, the file must be in .apk format. For iOS applications,
the file must be in .ipa format.
"""
def install_to_remote_access_session(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "InstallToRemoteAccessSession", input, options)
end
@doc """
Gets information about artifacts.
"""
def list_artifacts(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListArtifacts", input, options)
end
@doc """
Returns information about the private device instances associated with one or
more AWS accounts.
"""
def list_device_instances(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListDeviceInstances", input, options)
end
@doc """
Gets information about device pools.
"""
def list_device_pools(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListDevicePools", input, options)
end
@doc """
Gets information about unique device types.
"""
def list_devices(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListDevices", input, options)
end
@doc """
Returns information about all the instance profiles in an AWS account.
"""
def list_instance_profiles(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListInstanceProfiles", input, options)
end
@doc """
Gets information about jobs for a given test run.
"""
def list_jobs(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListJobs", input, options)
end
@doc """
Returns the list of available network profiles.
"""
def list_network_profiles(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListNetworkProfiles", input, options)
end
@doc """
Returns a list of offering promotions.
Each offering promotion record contains the ID and description of the promotion.
The API returns a `NotEligible` error if the caller is not permitted to invoke
the operation. Contact
[<EMAIL>](mailto:<EMAIL>) if
you must be able to invoke this operation.
"""
def list_offering_promotions(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListOfferingPromotions", input, options)
end
@doc """
Returns a list of all historical purchases, renewals, and system renewal
transactions for an AWS account.
The list is paginated and ordered by a descending timestamp (most recent
transactions are first). The API returns a `NotEligible` error if the user is
not permitted to invoke the operation. If you must be able to invoke this
operation, contact
[<EMAIL>](mailto:<EMAIL>).
"""
def list_offering_transactions(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListOfferingTransactions", input, options)
end
@doc """
Returns a list of products or offerings that the user can manage through the
API.
Each offering record indicates the recurring price per unit and the frequency
for that offering. The API returns a `NotEligible` error if the user is not
permitted to invoke the operation. If you must be able to invoke this operation,
contact
[<EMAIL>](mailto:<EMAIL>).
"""
def list_offerings(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListOfferings", input, options)
end
@doc """
Gets information about projects.
"""
def list_projects(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListProjects", input, options)
end
@doc """
Returns a list of all currently running remote access sessions.
"""
def list_remote_access_sessions(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListRemoteAccessSessions", input, options)
end
@doc """
Gets information about runs, given an AWS Device Farm project ARN.
"""
def list_runs(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListRuns", input, options)
end
@doc """
Gets information about samples, given an AWS Device Farm job ARN.
"""
def list_samples(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListSamples", input, options)
end
@doc """
Gets information about test suites for a given job.
"""
def list_suites(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListSuites", input, options)
end
@doc """
List the tags for an AWS Device Farm resource.
"""
def list_tags_for_resource(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListTagsForResource", input, options)
end
@doc """
Gets a list of all Selenium testing projects in your account.
"""
def list_test_grid_projects(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListTestGridProjects", input, options)
end
@doc """
Returns a list of the actions taken in a `TestGridSession`.
"""
def list_test_grid_session_actions(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListTestGridSessionActions", input, options)
end
@doc """
Retrieves a list of artifacts created during the session.
"""
def list_test_grid_session_artifacts(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListTestGridSessionArtifacts", input, options)
end
@doc """
Retrieves a list of sessions for a `TestGridProject`.
"""
def list_test_grid_sessions(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListTestGridSessions", input, options)
end
@doc """
Gets information about tests in a given test suite.
"""
def list_tests(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListTests", input, options)
end
@doc """
Gets information about unique problems, such as exceptions or crashes.
Unique problems are defined as a single instance of an error across a run, job,
or suite. For example, if a call in your application consistently raises an
exception (`OutOfBoundsException in MyActivity.java:386`), `ListUniqueProblems`
returns a single entry instead of many individual entries for that exception.
"""
def list_unique_problems(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListUniqueProblems", input, options)
end
@doc """
Gets information about uploads, given an AWS Device Farm project ARN.
"""
def list_uploads(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListUploads", input, options)
end
@doc """
Returns information about all Amazon Virtual Private Cloud (VPC) endpoint
configurations in the AWS account.
"""
def list_vpce_configurations(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListVPCEConfigurations", input, options)
end
@doc """
Immediately purchases offerings for an AWS account.
Offerings renew with the latest total purchased quantity for an offering, unless
the renewal was overridden. The API returns a `NotEligible` error if the user is
not permitted to invoke the operation. If you must be able to invoke this
operation, contact
[aws-devicefarm-<EMAIL>](mailto:<EMAIL>).
"""
def purchase_offering(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "PurchaseOffering", input, options)
end
@doc """
Explicitly sets the quantity of devices to renew for an offering, starting from
the `effectiveDate` of the next period.
The API returns a `NotEligible` error if the user is not permitted to invoke the
operation. If you must be able to invoke this operation, contact
[<EMAIL>](mailto:<EMAIL>).
"""
def renew_offering(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "RenewOffering", input, options)
end
@doc """
Schedules a run.
"""
def schedule_run(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ScheduleRun", input, options)
end
@doc """
Initiates a stop request for the current job.
AWS Device Farm immediately stops the job on the device where tests have not
started. You are not billed for this device. On the device where tests have
started, setup suite and teardown suite tests run to completion on the device.
You are billed for setup, teardown, and any tests that were in progress or
already completed.
"""
def stop_job(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "StopJob", input, options)
end
@doc """
Ends a specified remote access session.
"""
def stop_remote_access_session(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "StopRemoteAccessSession", input, options)
end
@doc """
Initiates a stop request for the current test run.
AWS Device Farm immediately stops the run on devices where tests have not
started. You are not billed for these devices. On devices where tests have
started executing, setup suite and teardown suite tests run to completion on
those devices. You are billed for setup, teardown, and any tests that were in
progress or already completed.
"""
def stop_run(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "StopRun", input, options)
end
@doc """
Associates the specified tags to a resource with the specified `resourceArn`.
If existing tags on a resource are not specified in the request parameters, they
are not changed. When a resource is deleted, the tags associated with that
resource are also deleted.
"""
def tag_resource(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "TagResource", input, options)
end
@doc """
Deletes the specified tags from a resource.
"""
def untag_resource(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UntagResource", input, options)
end
@doc """
Updates information about a private device instance.
"""
def update_device_instance(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateDeviceInstance", input, options)
end
@doc """
Modifies the name, description, and rules in a device pool given the attributes
and the pool ARN.
Rule updates are all-or-nothing, meaning they can only be updated as a whole (or
not at all).
"""
def update_device_pool(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateDevicePool", input, options)
end
@doc """
Updates information about an existing private device instance profile.
"""
def update_instance_profile(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateInstanceProfile", input, options)
end
@doc """
Updates the network profile.
"""
def update_network_profile(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateNetworkProfile", input, options)
end
@doc """
Modifies the specified project name, given the project ARN and a new name.
"""
def update_project(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateProject", input, options)
end
@doc """
Change details of a project.
"""
def update_test_grid_project(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateTestGridProject", input, options)
end
@doc """
Updates an uploaded test spec.
"""
def update_upload(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateUpload", input, options)
end
@doc """
Updates information about an Amazon Virtual Private Cloud (VPC) endpoint
configuration.
"""
def update_vpce_configuration(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateVPCEConfiguration", input, options)
end
end
|
lib/aws/generated/device_farm.ex
| 0.805096
| 0.46478
|
device_farm.ex
|
starcoder
|
defmodule Astarte.Flow.Blocks.ModbusTCPSource do
@moduledoc """
An Astarte Flow source that produces data by polling a Modbus device. This block
is built to poll a single Modbus slave, if you want to poll multiple slaves you
must instantiate multiple flows changing the slave id.
The message contains these fields:
* `key` contains the name defined in the configuration
* `data` contains the data read from the Modbus device, converted with the format
indicated in the configuration.
* `type` depends on the format indicated in the configuration.
* `metadata` contains the static metadata indicated in the configuration.
* `timestamp` contains the timestamp (in microseconds) the message was polled on.
Since polling happens at regular intervals while Flow works in a demand-driven
way, this block implements a queue to buffer incoming messages while waiting
for consumer demand.
"""
use GenStage
require Logger
alias Astarte.Flow.Message
alias Modbux.Tcp.Client, as: ModbusClient
defmodule State do
@moduledoc false
defstruct [
:modbus_connection,
:slave_id,
:targets_map,
:polling_interval_ms,
:pending_demand,
:queue
]
end
@doc """
Starts the `ModbusTCPSource`.
## Options
* `host` (required): the IP address of the Modbus master this block will connect to.
* `slave_id` (required): the slave id that will be polled.
* `targets` (required): an array of maps representing the polling targets. See the section
below for the structure of the map.
* `port`: the TCP port. Defaults to 502, which is the standard Modbus TCP port.
## Target map
Each map in the `targets` option must have this keys, with string keys:
* `name` (required): the name of the measured quantity. This will be used as `key` in the
resulting Flow Message
* `base_address` (required): the address where the data starts. Depending on `format`, one
or more registers will be read starting from this address.
* `format` (required): one of `:int16`, `:uint16`, `:float32be`, `:float32le`, `:boolean`.
The `be` and `le` suffix in `float32` format indicates the endianness, i.e. the order of
the two 16 bits halves.
* `modbus_type` (required): one of `:coil`, `:discrete_input`, `:input_register`,
`:holding_register`.
* `polling_interval_ms` (required): the interval between two polling on this target. Must be
> 1000. *Caveat*: currently this block only supports setting the same `polling_interval_ms`
for all targets, this limitation will be removed in a future release.
* `static_metadata`: a map containing some static metadata that will be added to the message
in the `metadata` field. It can be used to add information (e.g. units of measurement).
"""
@spec start_link(opts) :: GenServer.on_start()
when opts: [opt],
opt:
{:host, String.t()}
| {:port, integer()}
| {:slave_id, integer()}
| {:targets, nonempty_list(map())}
def start_link(opts) do
GenStage.start_link(__MODULE__, opts)
end
# GenStage callbacks
@impl true
def init(opts) do
with {:ok, conn_opts} <- build_connection_opts(opts),
{:ok, slave_id} <- fetch_slave_id(opts),
{:ok, targets_map} <- build_targets_map(opts),
{:ok, pid} <- ModbusClient.start_link(conn_opts),
:ok <- ModbusClient.connect(pid) do
# TODO: for now we use a single polling interval
polling_interval_ms = get_polling_interval_ms(targets_map)
state = %State{
modbus_connection: pid,
slave_id: slave_id,
targets_map: targets_map,
polling_interval_ms: polling_interval_ms,
pending_demand: 0,
queue: :queue.new()
}
# Kickoff the polling
send(self(), :poll)
{:producer, state, dispatcher: GenStage.BroadcastDispatcher}
else
{:error, reason} ->
{:stop, reason}
end
end
@impl true
def handle_demand(incoming_demand, %State{pending_demand: demand} = state) do
dispatch_messages(%{state | pending_demand: demand + incoming_demand}, [])
end
@impl true
def handle_cast({:new_message, %Message{} = message}, state) do
%State{
queue: queue
} = state
updated_queue = :queue.in(message, queue)
dispatch_messages(%{state | queue: updated_queue}, [])
end
@impl true
def handle_info(:poll, state) do
%State{
modbus_connection: pid,
slave_id: slave_id,
polling_interval_ms: polling_interval_ms,
targets_map: targets_map
} = state
Enum.each(targets_map, fn {address, %{format: format, read_command: cmd_id}} ->
count = format_to_count(format)
cmd = {cmd_id, slave_id, address, count}
# TODO: for now we expect a valid request, so reconnection is handled
# by crash + supervision. We should probably handle this more gracefully in
# the future
:ok = ModbusClient.request(pid, cmd)
end)
# Fire polling again in a bit
:timer.send_after(polling_interval_ms, :poll)
{:noreply, [], state}
end
def handle_info({:modbus_tcp, {_cmd, _slave_id, address, _count}, values}, state) do
%State{
targets_map: targets_map,
queue: queue
} = state
with {:ok, target} <- Map.fetch(targets_map, address),
{:ok, message} <- build_message(values, target) do
updated_queue = :queue.in(message, queue)
dispatch_messages(%{state | queue: updated_queue}, [])
else
:error ->
_ = Logger.warn("Received data for unknown target address: #{address}")
{:noreply, [], state}
{:error, reason} ->
_ = Logger.warn("Error generating message from Modbus data: #{reason}")
{:noreply, [], state}
end
end
defp build_message(values, %{format: format, name: name} = target) do
with {:ok, data} <- convert_values(values, format) do
timestamp = DateTime.utc_now() |> DateTime.to_unix(:microsecond)
type = format_to_type(format)
metadata = Map.get(target, :static_metadata, %{})
message = %Message{
key: name,
data: data,
type: type,
metadata: metadata,
timestamp: timestamp
}
{:ok, message}
end
end
defp convert_values([value], :int16) do
<<signed_value::integer-signed-size(16)>> = :binary.encode_unsigned(value)
{:ok, signed_value}
end
defp convert_values([value], :uint16) do
{:ok, value}
end
defp convert_values([value], :boolean) do
if value == 0 do
{:ok, false}
else
{:ok, true}
end
end
defp convert_values([_v1, _v2] = values, :float32be) do
float_value = Modbux.IEEE754.from_2_regs(values, :be)
{:ok, float_value}
end
defp convert_values([_v1, _v2] = values, :float32le) do
float_value = Modbux.IEEE754.from_2_regs(values, :le)
{:ok, float_value}
end
defp convert_values(values, format) do
Logger.warn("Invalid conversion, values: #{inspect(values)}, format: #{inspect(format)}")
{:error, :invalid_conversion}
end
defp format_to_count(format) do
case format do
:int16 -> 1
:uint16 -> 1
:float32be -> 2
:float32le -> 2
:boolean -> 1
end
end
defp format_to_type(format) do
case format do
:int16 -> :integer
:uint16 -> :integer
:float32be -> :real
:float32le -> :real
:boolean -> :boolean
end
end
defp build_connection_opts(opts) do
with {:ok, ip} <- fetch_ip(opts),
{:ok, port} <- fetch_port(opts) do
opts = [
active: true,
ip: ip,
tcp_port: port
]
{:ok, opts}
end
end
defp fetch_ip(opts) do
with {:ok, host_string} <- Keyword.fetch(opts, :host),
host_charlist = to_charlist(host_string),
{:ok, parsed_ip} <- :inet.parse_address(host_charlist) do
{:ok, parsed_ip}
else
:error ->
{:error, :missing_host}
{:error, :einval} ->
{:error, :invalid_host_ip}
end
end
defp fetch_port(opts) do
case Keyword.get(opts, :port, 502) do
port when is_integer(port) and port >= 1 and port <= 65535 ->
{:ok, port}
_ ->
{:error, :invalid_port}
end
end
defp fetch_slave_id(opts) do
case Keyword.fetch(opts, :slave_id) do
{:ok, slave_id} when is_integer(slave_id) and slave_id >= 1 and slave_id <= 247 ->
{:ok, slave_id}
:error ->
{:error, :missing_slave_id}
_ ->
{:error, :invalid_slave_id}
end
end
defp build_targets_map(opts) do
with {:ok, targets_config} when targets_config != [] <- Keyword.fetch(opts, :targets),
{:ok, targets} <- convert_targets(targets_config),
targets_map = Enum.into(targets, %{}),
# TODO: temporary check to ensure all targets have the same polling interval
:ok <- check_polling_interval(targets_map) do
{:ok, targets_map}
else
:error ->
{:error, :missing_targets}
# Empty targets
{:ok, []} ->
{:error, :empty_targets}
{:error, reason} ->
{:error, reason}
end
end
defp check_polling_interval(targets_map) do
[%{polling_interval_ms: first_interval} | tail] = Map.values(targets_map)
all_intervals_equal =
Enum.all?(tail, fn %{polling_interval_ms: interval} -> interval == first_interval end)
if all_intervals_equal do
:ok
else
{:error, :polling_intervals_must_be_equal}
end
end
defp convert_targets(targets_config) do
Enum.reduce_while(targets_config, {:ok, []}, fn target, {:ok, acc} ->
with {:ok, address} <- Map.fetch(target, "base_address"),
{:ok, target_value} <- build_target(target) do
entry = {address, target_value}
{:cont, {:ok, [entry | acc]}}
else
:error ->
{:halt, {:error, :missing_base_address}}
{:error, reason} ->
{:halt, {:error, reason}}
end
end)
end
defp build_target(target_config) do
with {:name, {:ok, name}} <- {:name, Map.fetch(target_config, "name")},
{:format, {:ok, format}} <- {:format, Map.fetch(target_config, "format")},
{:ok, format_atom} <- cast_format(format),
{:modbus_type, {:ok, modbus_type}} <-
{:modbus_type, Map.fetch(target_config, "modbus_type")},
{:ok, read_command} <- get_read_command(modbus_type),
{:polling_interval, {:ok, polling_interval}} <-
{:polling_interval, Map.fetch(target_config, "polling_interval_ms")} do
static_metadata = Map.get(target_config, "static_metadata")
target = %{
name: name,
format: format_atom,
static_metadata: static_metadata,
read_command: read_command,
polling_interval_ms: polling_interval
}
{:ok, target}
else
{:name, :error} ->
{:error, :missing_name_in_target}
{:format, :error} ->
{:error, :missing_format_in_target}
{:modbus_type, :error} ->
{:error, :missing_modbus_type_in_target}
{:polling_interval, :error} ->
{:error, :missing_polling_interval_in_target}
{:error, reason} ->
{:error, reason}
end
end
defp cast_format(format) do
case format do
"int16" -> {:ok, :int16}
"uint16" -> {:ok, :uint16}
"float32be" -> {:ok, :float32be}
"float32le" -> {:ok, :float32le}
_ -> {:error, :invalid_format}
end
end
defp get_read_command(modbus_type) do
case modbus_type do
"coil" -> {:ok, :rc}
"discrete_input" -> {:ok, :ri}
"input_register" -> {:ok, :rir}
"holding_register" -> {:ok, :rhr}
_ -> {:error, :invalid_modbus_type}
end
end
defp get_polling_interval_ms(targets_map) do
# TODO: right now the targets are guaranteed to have all the same polling interval
# so we just take the first one
Map.values(targets_map)
|> hd()
|> Map.fetch!(:polling_interval_ms)
end
defp dispatch_messages(%State{pending_demand: 0} = state, messages) do
{:noreply, Enum.reverse(messages), state}
end
defp dispatch_messages(%State{pending_demand: demand, queue: queue} = state, messages) do
case :queue.out(queue) do
{{:value, message}, updated_queue} ->
updated_state = %{state | pending_demand: demand - 1, queue: updated_queue}
updated_messages = [message | messages]
dispatch_messages(updated_state, updated_messages)
{:empty, _queue} ->
{:noreply, Enum.reverse(messages), state}
end
end
end
|
lib/astarte_flow/blocks/modbus_tcp_source.ex
| 0.810291
| 0.531757
|
modbus_tcp_source.ex
|
starcoder
|
defmodule Benchee.Conversion do
@moduledoc """
Integration of the conversion of multiple units with benchee.
Can be used by plugins to use benchee unit scaling logic.
"""
alias Benchee.Conversion.{Count, Duration, Memory}
@doc """
Takes scenarios and a given scaling_strategy, returns the best units for the
given scaling strategy. The return value changes based on whether you want
units for run time or memory usage.
The units can then be passed on to the appropriate `format` calls to format
the output of arbitrary values with the right unit.
## Examples
iex> statistics = %Benchee.Statistics{average: 1_000_000.0, ips: 1000.0}
iex> scenario = %Benchee.Scenario{
...> run_time_data: %Benchee.CollectionData{statistics: statistics},
...> memory_usage_data: %Benchee.CollectionData{statistics: statistics},
...> reductions_data: %Benchee.CollectionData{statistics: statistics}
...> }
iex> Benchee.Conversion.units([scenario], :best)
%{
ips: %Benchee.Conversion.Unit{
label: "K",
long: "Thousand",
magnitude: 1000,
name: :thousand
},
run_time: %Benchee.Conversion.Unit{
label: "ms",
long: "Milliseconds",
magnitude: 1_000_000,
name: :millisecond
},
memory: %Benchee.Conversion.Unit{
label: "KB",
long: "Kilobytes",
magnitude: 1024,
name: :kilobyte
},
reduction_count: %Benchee.Conversion.Unit{
label: "M",
long: "Million",
magnitude: 1000000,
name: :million
}
}
"""
def units(scenarios, scaling_strategy) do
run_time_measurements = measurments_for(scenarios, :run_time_data)
reductions_measurements = measurments_for(scenarios, :reductions_data)
memory_measurements = measurments_for(scenarios, :memory_usage_data)
%{
run_time: Duration.best(run_time_measurements.average, strategy: scaling_strategy),
ips: Count.best(run_time_measurements.ips, strategy: scaling_strategy),
memory: Memory.best(memory_measurements.average, strategy: scaling_strategy),
reduction_count: Count.best(reductions_measurements.average, strategry: scaling_strategy)
}
end
defp measurments_for(scenarios, path) do
paths = [Access.key(path), Access.key(:statistics)]
scenarios
|> Enum.flat_map(fn scenario -> scenario |> get_in(paths) |> Map.to_list() end)
|> Enum.group_by(fn {stat_name, _} -> stat_name end, fn {_, value} -> value end)
end
end
|
lib/benchee/conversion.ex
| 0.91934
| 0.639201
|
conversion.ex
|
starcoder
|
defmodule OMG.Eth do
@moduledoc """
Library for common code of the adapter/port to contracts deployed on Ethereum.
NOTE: The library code is not intended to be used outside of `OMG.Eth`: use `OMG.Eth.RootChain` and `OMG.Eth.Token` as main
entrypoints to the contract-interaction functionality.
NOTE: This wrapper is intended to be as thin as possible, only offering a consistent API to the Ethereum JSONRPC client and contracts.
Handles other non-contract queries to the Ethereum client.
Notes on encoding: All APIs of `OMG.Eth` and the submodules with contract APIs always use raw, decoded binaries
for binaries - never use hex encoded binaries. Such binaries may be passed as is onto `ABI` related functions,
however they must be encoded/decoded when entering/leaving the `Ethereumex` realm
"""
import OMG.Eth.Encoding
@type address :: <<_::160>>
@type hash :: <<_::256>>
def get_ethereum_height do
case Ethereumex.HttpClient.eth_block_number() do
{:ok, height_hex} ->
{:ok, int_from_hex(height_hex)}
other ->
other
end
end
@doc """
Returns placeholder for non-existent Ethereum address
"""
@spec zero_address :: address()
def zero_address, do: <<0::160>>
def call_contract(contract, signature, args, return_types) do
data = signature |> ABI.encode(args)
with {:ok, return} <- Ethereumex.HttpClient.eth_call(%{to: to_hex(contract), data: to_hex(data)}),
do: decode_answer(return, return_types)
end
defp decode_answer(enc_return, return_types) do
enc_return
|> from_hex()
|> ABI.TypeDecoder.decode(return_types)
|> case do
[single_return] -> {:ok, single_return}
other when is_list(other) -> {:ok, List.to_tuple(other)}
end
end
@spec contract_transact(address, address, binary, [any], keyword) :: {:ok, binary} | {:error, any}
def contract_transact(from, to, signature, args, opts \\ []) do
data = encode_tx_data(signature, args)
txmap =
%{from: to_hex(from), to: to_hex(to), data: data}
|> Map.merge(Map.new(opts))
|> encode_all_integer_opts()
with {:ok, txhash} <- Ethereumex.HttpClient.eth_send_transaction(txmap),
do: {:ok, from_hex(txhash)}
end
defp encode_all_integer_opts(opts) do
opts
|> Enum.filter(fn {_k, v} -> is_integer(v) end)
|> Enum.into(opts, fn {k, v} -> {k, to_hex(v)} end)
end
def get_bytecode!(path_project_root, contract_name) do
"0x" <> read_contracts_bin!(path_project_root, contract_name)
end
defp encode_tx_data(signature, args) do
signature
|> ABI.encode(args)
|> to_hex()
end
defp encode_constructor_params(args, types) do
args
|> ABI.TypeEncoder.encode_raw(types)
# NOTE: we're not using `to_hex` because the `0x` will be appended to the bytecode already
|> Base.encode16(case: :lower)
end
def deploy_contract(addr, bytecode, types, args, opts) do
enc_args = encode_constructor_params(types, args)
txmap =
%{from: to_hex(addr), data: bytecode <> enc_args}
|> Map.merge(Map.new(opts))
|> encode_all_integer_opts()
with {:ok, txhash} <- Ethereumex.HttpClient.eth_send_transaction(txmap),
do: {:ok, from_hex(txhash)}
end
defp read_contracts_bin!(path_project_root, contract_name) do
path = "_build/contracts/#{contract_name}.bin"
case File.read(Path.join(path_project_root, path)) do
{:ok, contract_json} ->
contract_json
{:error, reason} ->
raise(
RuntimeError,
"Can't read #{path} because #{inspect(reason)}, try running mix deps.compile plasma_contracts"
)
end
end
defp event_topic_for_signature(signature) do
signature |> ExthCrypto.Hash.hash(ExthCrypto.Hash.kec()) |> to_hex()
end
defp filter_not_removed(logs) do
logs |> Enum.filter(&(not Map.get(&1, "removed", true)))
end
def get_ethereum_events(block_from, block_to, signature, contract) do
topic = event_topic_for_signature(signature)
try do
{:ok, logs} =
Ethereumex.HttpClient.eth_get_logs(%{
fromBlock: to_hex(block_from),
toBlock: to_hex(block_to),
address: to_hex(contract),
topics: ["#{topic}"]
})
{:ok, filter_not_removed(logs)}
catch
_ -> {:error, :failed_to_get_ethereum_events}
end
end
def parse_event(%{"data" => data} = log, {signature, keys}) do
decoded_values =
data
|> from_hex()
|> ABI.TypeDecoder.decode(ABI.FunctionSelector.decode(signature))
Enum.zip(keys, decoded_values)
|> Map.new()
|> common_parse_event(log)
end
def parse_events_with_indexed_fields(
%{"data" => data, "topics" => [_event_sig | indexed_data]} = log,
{non_indexed_keys, non_indexed_key_types},
{indexed_keys, indexed_keys_types}
) do
decoded_non_indexed_fields =
data
|> from_hex()
|> ABI.TypeDecoder.decode(non_indexed_key_types)
non_indexed_fields =
Enum.zip(non_indexed_keys, decoded_non_indexed_fields)
|> Map.new()
decoded_indexed_fields =
for {encoded, type_sig} <- Enum.zip(indexed_data, indexed_keys_types) do
[decoded] =
encoded
|> from_hex()
|> ABI.TypeDecoder.decode([type_sig])
decoded
end
indexed_fields =
Enum.zip(indexed_keys, decoded_indexed_fields)
|> Map.new()
Map.merge(non_indexed_fields, indexed_fields)
|> common_parse_event(log)
end
@doc """
Gets the decoded call data of a contract call, based on a particular Ethereum-tx hash and some info on the contract
function.
`eth_tx_hash` is expected encoded in raw binary format, as usual
NOTE: function name and rich information about argument names and types is used, rather than its compact signature
(like elsewhere) because `ABI.decode` has some issues with parsing signatures in this context.
"""
@spec get_call_data(binary(), binary(), list(atom), list(atom)) :: map
def get_call_data(eth_tx_hash, name, arg_names, arg_types) do
{:ok, %{"input" => eth_tx_input}} = Ethereumex.HttpClient.eth_get_transaction_by_hash(to_hex(eth_tx_hash))
encoded_input = from_hex(eth_tx_input)
function_inputs =
ABI.decode(
ABI.FunctionSelector.parse_specification_item(%{
"type" => "function",
"name" => name,
"inputs" => Enum.map(arg_types, &%{"type" => to_string(&1)}),
"outputs" => []
}),
encoded_input
)
Enum.zip(arg_names, function_inputs)
|> Map.new()
end
defp common_parse_event(result, %{"blockNumber" => eth_height}) do
result
|> Map.put(:eth_height, int_from_hex(eth_height))
end
end
|
apps/omg_eth/lib/eth.ex
| 0.894138
| 0.576542
|
eth.ex
|
starcoder
|
defmodule TelemetryInfluxDB.Test.FluxParser do
alias NimbleCSV.RFC4180, as: CSV
@column_types %{
"boolean" => :boolean,
"double" => :double,
"string" => :string,
"long" => :long,
"unsignedLong" => :unsigned_long,
"dateTime:RFC3339" => :datetime
}
def parse_tables(csv) do
csv
|> extract_chunks()
|> Enum.flat_map(&parse_chunk/1)
end
def extract_chunks(csv) do
csv
|> String.trim()
|> String.split(~r/\n\s*\n/)
end
def parse_chunk(chunk) do
{annotations, data} =
chunk
|> String.split("\n")
|> Enum.split_with(&is_annotation?/1)
table_data = data |> parse_lines() |> separate_tables()
column_types = annotations |> parse_lines() |> get_column_types()
Enum.flat_map(table_data, &parse_table(&1, column_types))
end
defp is_annotation?(line) do
String.starts_with?(line, "#")
end
defp parse_lines(lines) do
lines
|> Enum.join("\n")
|> String.trim()
|> parse_csv()
end
defp separate_tables(parsed) when parsed == [], do: [[]]
defp separate_tables([headers | rows]) do
table_index = Enum.find_index(headers, fn header -> header == "table" end)
rows
|> Enum.chunk_by(fn row -> Enum.at(row, table_index) end)
|> Enum.map(fn table_rows -> List.insert_at(table_rows, 0, headers) end)
end
def get_column_types([]), do: []
def get_column_types(annotation_data) do
col_types_index =
annotation_data
|> Enum.find_index(fn a -> List.first(a) == "#datatype" end)
annotation_data
|> Enum.at(col_types_index)
end
defp parse_table(table, _column_types) when length(table) == 0, do: %{}
defp parse_table(table, column_types) do
[column_names | table_rows] = table
parse_rows(table_rows, column_names, column_types)
end
defp parse_rows(
table,
[_datatype | column_names],
[_ | column_types]
) do
Enum.map(table, fn [_empty | row] -> parse_row(row, column_types, column_names) end)
end
defp parse_row(row, types, columns) do
[types, columns, row]
|> Enum.zip()
|> Enum.map(fn column_info -> type_value(column_info) end)
|> Enum.into(%{})
end
defp type_value({raw_type, column, value}) do
type = Map.get(@column_types, raw_type)
typed_value = parse_value(value, type)
{column, typed_value}
end
def parse_value("null", _type), do: nil
def parse_value("true", :boolean), do: true
def parse_value("false", :boolean), do: false
def parse_value(string, :string), do: string
def parse_value("NaN", :double), do: NaN
def parse_value(string, :double) do
case Float.parse(string) do
{value, _} -> value
:error -> raise ArgumentError, "invalid double argument: '#{string}'"
end
end
def parse_value(datetime, :datetime) do
case DateTime.from_iso8601(datetime) do
{:ok, datetime, _offset} -> %{datetime | microsecond: {0, 6}}
{:error, _} -> raise ArgumentError, "invalid datetime argument: '#{datetime}'"
end
end
def parse_value(raw, :unsigned_long) do
value = parse_integer(raw)
if value < 0 do
raise ArgumentError, message: "invalid unsigned_long argument: '#{value}'"
end
value
end
def parse_value(raw, :long), do: parse_integer(raw)
defp parse_integer("NaN"), do: NaN
defp parse_integer(raw) do
{value, _} = Integer.parse(raw, 10)
value
end
def parse_csv(csv) do
CSV.parse_string(csv, skip_headers: false)
end
end
|
test/support/flux_parser.ex
| 0.59972
| 0.450118
|
flux_parser.ex
|
starcoder
|
alias InterpreterTerms.SymbolMatch, as: Sym
alias InterpreterTerms.WordMatch, as: Word
defmodule Interpreter.Diff do
def similarity( a, b ) do
{ matching, total } = similarity_calc( a, b )
matching / total
end
@doc """
Returns a similarity number. Comparing how similar the two objects
are.
We compare this by looking at the amount of terms in the query, and
seeing how much they overlap.
The matching is returned as a tuple containing the total amount of
positive similarities as the first value, and the total amount of
compared similarities as the second value.
@return { positive_similaties, total_similarities }
"""
def similarity_calc( %Sym{ submatches: asub } = a, %Sym{ submatches: bsub } = b ) do
if shallow_same?( a, b ) do
{ self_positive_similarities, self_total_similarities } = { 1, 1 }
asub = if asub == :none do [] else asub end
bsub = if bsub == :none do [] else bsub end
longest_length = max( Enum.count( asub ), Enum.count( bsub ) )
shortest_length = min( Enum.count( asub ), Enum.count( bsub ) )
{ sub_positive_similarities, sub_total_similarities } =
[ asub, bsub ]
|> Enum.zip
|> Enum.reduce( {0,0}, fn (sub_elts, acc_similarities) ->
{asub_elt, bsub_elt} = sub_elts
{ positive_similarities, total_similarities } = acc_similarities
{add_pos, add_total} = similarity_calc( asub_elt, bsub_elt )
{positive_similarities + add_pos, total_similarities + add_total}
end )
{ missing_matches_positive_similarities, missing_matches_total_similarities } =
{ 0, longest_length - shortest_length }
{ self_positive_similarities + sub_positive_similarities + missing_matches_positive_similarities,
self_total_similarities + sub_total_similarities + missing_matches_total_similarities }
else
{ 0, 1 }
end
end
def similarity_calc( a, b ) do
if shallow_same?( a, b ) do
{ 1, 1 }
else
{ 0, 1 }
end
end
def shallow_same?( %Sym{ symbol: a, submatches: :none, string: str_a }, %Sym{ symbol: a, submatches: :none, string: str_b } ) do
str_a == str_b
end
def shallow_same?( %Sym{ symbol: a , whitespace: whitespace }, %Sym{ symbol: a , whitespace: whitespace } ) do
true
end
def shallow_same?( %Sym{ symbol: a, whitespace: _whitespace_one }, %Sym{ symbol: a, whitespace: _whitespace_two } ) do
# Symbols with different whitespace are different.
# TODO: merge with the last clause? this will basically fall
# through to there.
false
end
def shallow_same?( %Word{ word: word, whitespace: whitespace }, %Word{ word: word, whitespace: whitespace } ) do
true
end
def shallow_same?( _, _ ) do
false
end
end
|
lib/interpreter/diff/diff.ex
| 0.77343
| 0.493958
|
diff.ex
|
starcoder
|
defmodule Job do
@moduledoc """
Managed execution of potentially failing actions.
A job is a logical unit of work which is split into multiple _actions_, where each action is
running in its own separate process, which is a child of the job process.
## Basic sketch
Job.start_link(fn ->
# this is the root action
{:ok, action1} = Job.start_action(fn ->
# ...
end)
{:ok, action2} = Job.start_action(fn ->
result_of_action_3 =
Job.run(fn ->
end)
# ...
end)
{Job.await(action1), Job.await(action2)}
end)
## Comparison to tasks
Job is somewhat similar to `Task`, with the main difference that it manages the execution of its
actions. The job process stops only after all of its actions have terminated, which is a
guarantee not provided by `Task`.
In addition, `Job` makes some different decisions. Most notably, a crash of an action is
automatically converted into an error result, which simplifies the implementation of custom crash
logic (e.g. error reporting).
If slight asynchronism during termination can be tolerated, and there's no need to implement
custom crash logic, `Task` is likely a better option.
## Actions
Each job runs one or more actions, which are technically child processes of the job process.
A job is started with the _root action_. When the root action stops, the job process will also
stop with the same exit reason. This is the only case where `Job` propagates a process exit.
In its simplest form, an action can be a zero-arity function or an MFA tuple. In both cases,
the action process will be powered by `Task`. You can also power the action process by your own
module. See "Custom action processes" for details.
Each action can start additional actions. Logically, such action is treated as the child of
the action which started it. If the parent action terminates, its children will also be taken
down.
Technically every action process will be running as a direct child of the job process. This
decision is made to simplify the process tree and reduce the amount of running processes.
You can start additional actions with `start_action/2` or `run_action/2`. If `start_action/2` is
used, the action result can be awaited on with `await/1`. These functions may only be invoked
inside an action process.
In case of `Task`-powered jobs, the action result is the return value of the invoked function. If
the action process crashes, the result will be `{:exit, exit_reason}`. To avoid ambiguity, it is
recommended to return ok/error tuples from the actions.
## Custom action processes
To power the action by a custom logic (e.g. `GenServer`), you need to provide the action
spec factory function to `start_action/2` or `run_action/2`:
Job.start_action(
fn responder ->
child_spec(responder)
end
)
The factory function takes the responder, which is an arity one anonymous function. The function
should return a child specification (`t:Parent.start_spec/0`) which describes how to start the
action process.
The action process needs to invoke `responder.(action_response)` to send the response back to
its caller. This function may only be invoked inside the action process. As soon as this function
is invoked, the action process must stop with the reason `:normal`.
If the action process is stopping with an abnormal exit reason, it shouldn't invoke the responder
function. Doing this will lead to duplicate response message in the mailbox of the parent action.
For example of custom actions, see `OsCmd` and `Job.Pipeline`.
## Internals
The job process is powered by `Parent`, with all actions running as its children. Logical
hierarchy (parent-child relationship between actions) is modeled via bound siblings feature
of `Parent`.
"""
use Parent.GenServer
@type action ::
action_fun_or_mfa
| {action_fun_or_mfa, [action_opt]}
| (responder -> {Parent.start_spec(), [action_opt]})
@type action_fun_or_mfa :: (() -> response) | {module :: atom, function :: atom, args :: [any]}
@type response :: any
@type responder :: (response -> :ok)
@type start_opt :: {:respond_to, pid} | {:name, GenServer.name()} | action_opt
@type action_opt :: {:timeout, timeout} | {:telemetry_id, [any]} | {:telemetry_meta, map}
@doc """
Starts the job process and the root action.
## Options
- `:respond_to` - a pid of the process that will receive the result of the root action.
The target process can await on the result with `await/1`. If this option is not provided,
the response will not be sent.
- `name` - Registered name of the process. If not provided, the process won't be registered.
- action option - see `start_action/2` for details.
"""
@spec start_link(action, [start_opt]) :: GenServer.on_start()
def start_link(action, opts \\ []) do
{gen_server_opts, opts} = Keyword.split(opts, ~w/name/a)
Parent.GenServer.start_link(__MODULE__, {action, opts}, gen_server_opts)
end
@doc "Returns a child specification for inserting a job into the supervision tree."
@spec child_spec({action, [start_opt]} | action) :: Parent.child_spec()
def child_spec({action, opts}),
do: Parent.parent_spec(id: __MODULE__, start: {__MODULE__, :start_link, [action, opts]})
def child_spec(action), do: child_spec({action, []})
@doc "Starts the job with `start_link/2`, and awaits for it with `await/1`."
@spec run(action, [start_opt]) :: response | {:exit, reason :: any}
def run(action, opts \\ []) do
with {:ok, pid} <- start_link(action, Keyword.merge(opts, respond_to: self())),
do: await(pid)
end
@doc """
Awaits for the job or the action response.
This function must be invoked in the process which receives the response message. In the case of
the job, this is the process specified with the `:respond_to` start option. When awaiting on the
action, this is the logical parent action (i.e. the process that started the action).
This function will await indefinitely. There's no support for client-side timeout. Instead, you
can use the `:timeout` action option to limit the duration of an action.
If the awaited job or action crashes, the response will be `{:exit, exit_reason}`. To avoid
ambiguity, it is recommended to return ok/error tuples from each action.
"""
@spec await(GenServer.server()) :: response | {:exit, reason :: any}
def await(server) do
pid = whereis!(server)
mref = Process.monitor(pid)
# We assume that response will always be sent. This is ensured by the parent logic which
# sends the response in the `handle_stopped_children` callback.
response =
receive do
{__MODULE__, :response, ^pid, response} -> response
end
# Awaiting for the process to stop before returning the response to improve synchronism.
receive do
{:DOWN, ^mref, :process, ^pid, _} -> response
end
end
@doc """
Starts a child action.
See module documentation for details on action specification.
The started action process will logically be treated as the child of the caller process. If the
caller process terminates, the started process will be taken down as well.
Unlike the root action, a child action always sends response to its caller. You can await this
response with `await/1`.
This function can only be invoked inside an action process.
## Options
- `:timeout` - Maximum duration of the action. If the action exceeds the given duration, it
will be forcefully taken down. Defaults to `:infinity`.
- `:telemetry_id` - If provided, telemetry start and stop events will be emitted. This option
must be a list. Job will append `:start` and `:stop` atoms to this list to emit the events.
- `:telemetry_meta` - Additional metadata to send with telemetry events. If not provided, an
empty map is used.
"""
@spec start_action(action, [action_opt]) :: Parent.on_start_child()
def start_action(action, opts \\ []) do
{action_spec, action_opts} = action_spec(action, responder(self()))
opts = Keyword.merge(action_opts, opts)
child_overrides = child_overrides(opts, self(), id: nil, binds_to: [self()])
Parent.Client.start_child(parent(), action_spec, child_overrides)
end
@doc "Starts a child action and awaits for its response."
@spec run_action(action, [action_opt]) :: response | {:exit, reason :: any}
def run_action(action, opts \\ []) do
with {:ok, pid} <- start_action(action, opts),
do: await(pid)
end
@impl GenServer
def init({action, opts}) do
{respond_to, opts} = Keyword.pop(opts, :respond_to)
{action_spec, action_opts} = action_spec(action, responder(respond_to, from: self()))
opts = Keyword.merge(action_opts, opts)
child_overrides = child_overrides(opts, respond_to, id: :main)
case Parent.start_child(action_spec, child_overrides) do
{:ok, _pid} -> {:ok, nil}
{:error, reason} -> {:stop, reason}
end
end
@impl Parent.GenServer
def handle_stopped_children(%{main: %{exit_reason: reason, meta: meta}}, state) do
send_exit_response(self(), reason, meta)
exit_reason = if is_nil(meta.respond_to), do: reason, else: :normal
{:stop, exit_reason, state}
end
@impl Parent.GenServer
def handle_stopped_children(other, state) do
Enum.each(
other,
fn {pid, %{exit_reason: reason, meta: meta}} -> send_exit_response(pid, reason, meta) end
)
{:noreply, state}
end
defp action_spec(fun, responder) when is_function(fun, 1) do
{action_spec, action_opts} = fun.(responder)
{Supervisor.child_spec(action_spec, []), action_opts}
end
defp action_spec({fun_or_mfa, action_opts}, responder),
do: {task_spec(fun_or_mfa, responder), action_opts}
defp action_spec(fun_or_mfa, responder), do: action_spec({fun_or_mfa, []}, responder)
defp task_spec(invocable, responder), do: {Task, fn -> responder.(invoke(invocable)) end}
defp invoke(fun) when is_function(fun, 0), do: fun.()
defp invoke({module, function, args}), do: apply(module, function, args)
defp send_exit_response(from, reason, meta) do
unless is_nil(meta.telemetry_id) do
duration = System.monotonic_time() - meta.start
:telemetry.execute(
meta.telemetry_id ++ [:stop],
%{duration: duration},
meta.telemetry_meta
)
end
if(reason != :normal, do: respond(meta.respond_to, {:exit, reason}, from: from))
end
defp parent, do: hd(Process.get(:"$ancestors"))
defp responder(respond_to, opts \\ []),
do: &respond(respond_to, &1, opts)
defp respond(server, response, opts) do
with server when not is_nil(server) <- server,
pid when not is_nil(pid) <- GenServer.whereis(server),
do: send(pid, {__MODULE__, :response, Keyword.get(opts, :from, self()), response})
:ok
end
defp child_overrides(overrides, respond_to, extra_overrides) do
start = System.monotonic_time()
telemetry_id = Keyword.get(overrides, :telemetry_id)
telemetry_meta = Keyword.get(overrides, :telemetry_meta, %{})
unless is_nil(telemetry_id),
do: :telemetry.execute(telemetry_id ++ [:start], %{time: start}, telemetry_meta)
meta = %{
respond_to: respond_to,
start: start,
telemetry_id: telemetry_id,
telemetry_meta: telemetry_meta
}
[timeout: :timer.seconds(5)]
|> Keyword.merge(overrides)
|> Keyword.merge(extra_overrides)
|> Keyword.merge(meta: meta, restart: :temporary, ephemeral?: true)
end
defp whereis!(server) do
case GenServer.whereis(server) do
pid when is_pid(pid) -> pid
nil -> raise "process #{inspect(server)} not found"
end
end
end
|
lib/job.ex
| 0.860193
| 0.710603
|
job.ex
|
starcoder
|
defmodule FilterQueryParser do
@moduledoc """
Small library to handle parsing of github style filter queries.
## Examples
iex> FilterQueryParser.parse("campaign:Lagerverkauf trainer:Josè")
{:ok, [{"campaign", "Lagerverkauf"}, {"trainer", "Josè"}]}
"""
import NimbleParsec
@doc "See module docs for `FilterQueryParser`"
def parse(query) do
with {:ok, matches, "", %{}, _, _} <- query |> String.trim() |> parse_query() do
{:ok, matches |> Enum.map(&List.to_tuple/1)}
end
end
# Prepend equal sign for only integer filters
defp prepend_equal(_, args, context, _, _), do: {args ++ [:=], context}
# Build a date struct from the parsed date string values
defp build_date([year, month, day]) do
case Date.new(year, month, day) do
{:ok, date} -> date
_ -> "invalid-date"
end
end
# field is a lowercase ascii name with more than 2 characters (e.g. is:…)
field = ascii_string([?a..?z], min: 2)
# Quoted string value
# Started with double quotes and terminated by non-escaped ones
quoted_string =
ignore(ascii_char([?"]))
|> repeat(
lookahead_not(ascii_char([?"]))
|> choice([
~S(\") |> string() |> replace(?"),
utf8_char([])
])
)
|> ignore(ascii_char([?"]))
|> reduce({List, :to_string, []})
# String value without quotes
# Terminated by a space character
string = utf8_string([{:not, String.to_charlist(" ") |> List.first()}], min: 1)
# Match a string date in YYYY-MM-DD format
date =
integer(4)
|> ignore(string("-"))
|> integer(2)
|> ignore(string("-"))
|> integer(2)
|> reduce(:build_date)
# Operators for integer values
operator =
choice([
string("=") |> replace(:==),
string("==") |> replace(:==),
string(">=") |> replace(:>=),
string("<=") |> replace(:<=),
string(">") |> replace(:>),
string("<") |> replace(:<)
])
string_starting_with_number =
optional(string("0"))
|> optional(string("0"))
|> optional(string("0"))
|> optional(string("0"))
|> optional(string("0"))
|> optional(string("0"))
|> optional(string("0"))
|> optional(string("0"))
|> optional(string("0"))
|> integer(min: 1)
|> utf8_string([], min: 1)
|> reduce({Enum, :join, [""]})
# Match integer value and optional operator
# Default to adding := as operator
integer =
choice([
operator |> integer(min: 1),
integer(min: 1) |> post_traverse(:prepend_equal)
])
# Parse a query
defparsec :parse_query,
field
|> ignore(ascii_char([?:]))
|> choice([date, string_starting_with_number, integer, quoted_string, string])
|> ignore(optional(string(" ")))
|> wrap()
|> repeat()
end
|
lib/filter_query_parser.ex
| 0.76145
| 0.432782
|
filter_query_parser.ex
|
starcoder
|
defmodule Grizzly.Inclusion do
@moduledoc """
Z-Wave Inclusion Server
## Overview
When using this process inclusion and exclusion are
done asynchronously and information will be communicated
via message passing.
By default this process will send information about the
inclusion to the process that started the inclusion. However,
if you pass the `:client` option into the call that points to
a `pid`, messages will be sent to that process.
Moverover, the caller can pass in the `:timeout` option inorder to
set the timeout of the inclusion. By default this is set to one minute.
## Adding Nodes
### Add
To add a node to the network this is what will be called:
```elixir
Grizzly.Inclusion.add_node()
```
This will return `:ok` and set the Z-Wave module into add node mode.
You can use `RingLogger.next` to see the logs from `grizzly` and
`Grizzly` to verify. In this example, whatever messages that are sent
will go to the process who called this function. If you have a process
that you what to filter all inclusion messages through you can run this:
```elixir
Grizzly.Inclusion.add_node(client: some_pid)
```
This will then filter all messages to that client.
A notification will be broadcasted, and a message sent to the client, like this:
`{:node_added, %Grizzly.Node{}}`
### Remove
To remove a node from the network this function should be called:
```elixir
Grizzly.Inclusion.remove_node()
```
The `:client` option works like adding a node.
When removing a node is successful and complete, a notification will be broadcasted, and a message sent to the client, like this:
`{:node_removed, node_id}`
Where `node_id` is an integer of the node's id that was removed
If the `node_id` is `0`, then the node was removed from another
network and now can be included into this controller's network.
### Stopping
This is useful for when an inclusion has started and the user wants to
stop the inclusion process from taking place. The function to do this:
```elixir
Grizzly.Inclusion.add_node_stop()
```
When this takes places the client will be sent a message like this:
`:node_add_stopped`
This is the same for removing a node but instead run this function:
```elixir
Grizzly.Inclusion.remove_node_stop()
```
And this message will be sent to the client
`:node_remove_stopped`
### Learn mode
It is necessary to put the controller into Learn mode for it to be included by another controller.
This is required for certification testing.
```elixir
Grizzly.Inclusion.start_learn_mode()
```
The `:client` option works like adding a node.
When being in Learn mode completes, a message sent to the client, like this:
`{:learn_mode_set, %{status: :done, new_node_id: 4}}`
When `status` is :done, `new_node_id` is the new node id taken by the controller (an integer other than 0).
When `status` is :failed or :security_failed, Learn mode completed without the controller being included.
## Timeouts
By default the timeout is set to one minute, but the `:timeout`
option can passed into to either `add_node/1` or `remove_node/1`
in milliseconds to adjust the timeout.
When the time passes for the timeout to trigger the client will
be sent two messages. The first is to let the client know that
it timed out and the second is to confirm that the inclusion
process was stopped on the Z-Wave module.
For when `add_node` was called, the messages look like:
```elixir
{:timeout, :add_node}
:node_add_stopped
```
And for when `remove_node` was called, the messages look like:
```elixir
{:timeout, :add_node}
:node_add_stopped
```
The controller will only stay in Learn mode for a limited amount of time. If the process times out before it completes
(successfully or not), the Learn mode is aborted.
## Errors
Errors are reported to the client as follows
```elixir
{:error, :node_add_failed}
{:error, :node_add_stopped}
{:error, :node_remove_failed}
{:error, :node_remove_stopped}
{:error, :learn_mode_failed}
{:error, :learn_mode_stopped}
```
"""
use GenServer
require Logger
alias Grizzly.{SeqNumber, Notifications, Controller, Node, Security, Conn}
alias Grizzly.CommandClass.NetworkManagementInclusion.{
NodeAdd,
NodeRemove,
NodeAddKeysSet,
NodeAddDSKSet
}
alias Grizzly.CommandClass.NetworkManagementBasic
alias Grizzly.CommandClass.NetworkManagementBasic.LearnModeSet
@typedoc """
Options for inclusion and exclusion process
- `:client` - the process the messages from the inclusion will sent to (default `self`)
- `:timeout` - the timeout interval for when to stop the adding/removing a node (default 60_000)
- `:pin` - this is used for S2 authenticated, when doing S2 authenticated inclusion this should be the 5 digit number printed on the joining device.
- `:s2_keys` - What keys to grant when the join node request keys, this will use the highest security group.
"""
@type opt ::
{:client, pid()}
| {:timeout, non_neg_integer()}
| {:pin, non_neg_integer | nil}
| {:s2_keys, [Security.key()]}
@type learn_mode_report :: %{
status: NetworkManagementBasic.learn_mode_status(),
new_node_id: non_neg_integer
}
@type invalid_opts_reason :: :pin_required_for_s2_authentication | :pin_size_invalid
defmodule State do
@moduledoc false
alias Grizzly.Conn
@type t :: %__MODULE__{
conn: Conn.t() | nil,
inclusion_opts: [Grizzly.Inclusion.opt()]
}
defstruct conn: nil,
inclusion_opts: []
end
def start_link(_) do
GenServer.start_link(__MODULE__, nil, name: __MODULE__)
end
@doc """
Start the process to add a node the network
"""
@spec add_node([opt]) :: :ok | {:error, {:invalid_option, invalid_opts_reason}}
def add_node(opts \\ []) do
opts = setup_inclusion_opts(opts)
case validate_options(opts) do
:valid ->
GenServer.cast(__MODULE__, {:add_node, opts})
{:invalid_option, _reason} = error ->
{:error, error}
end
end
@doc """
Stop the process to add a node to the network
"""
@spec add_node_stop([opt]) :: :ok
def add_node_stop(opts \\ [])
def add_node_stop(opts) do
opts = if opts == [], do: setup_inclusion_opts(opts), else: opts
GenServer.cast(__MODULE__, {:add_node_stop, opts})
end
@doc """
Remove a node from the network
"""
@spec remove_node([opt]) :: :ok
def remove_node(opts \\ []) do
opts = setup_inclusion_opts(opts)
GenServer.cast(__MODULE__, {:remove_node, opts})
end
@doc """
Stop the remove node process from running
"""
@spec remove_node_stop([opt]) :: :ok
def remove_node_stop(opts \\ [])
def remove_node_stop(opts) do
opts = if opts == [], do: setup_inclusion_opts(opts), else: opts
GenServer.cast(__MODULE__, {:remove_node_stop, opts})
end
@doc """
Put the controller in learn mode for a few seconds
"""
@spec start_learn_mode([opt]) :: :ok | {:error, {:invalid_option, invalid_opts_reason()}}
def start_learn_mode(opts \\ []) do
opts = setup_inclusion_opts(opts)
case validate_options(opts) do
:valid ->
GenServer.cast(__MODULE__, {:learn_mode_start, opts})
{:invalid_option, _reason} = error ->
{:error, error}
end
end
@doc """
Put the controller out of learn mode
"""
def stop_learn_mode(opts) do
GenServer.cast(__MODULE__, {:learn_mode_stop, opts})
end
@impl true
def init(_) do
:ok = Notifications.subscribe(:controller_connected)
{:ok, %State{}}
end
def handle_cast({:add_node, opts}, %State{conn: conn} = state) do
seq_number = SeqNumber.get_and_inc()
case Grizzly.send_command(
conn,
NodeAdd,
seq_number: seq_number,
exec_state: :including,
timeout: Keyword.get(opts, :timeout)
) do
:ok ->
:ok
{:error, reason} ->
_ = Logger.warn("Add node failed: #{inspect(reason)}")
send_to_client(opts, {:error, :node_add_failed})
end
{:noreply, %{state | inclusion_opts: opts}}
end
@impl true
def handle_cast(
{:add_node_stop, opts},
%State{conn: conn} = state
) do
seq_number = SeqNumber.get_and_inc()
# Cancel any busy network state before stopping inclusion
case Grizzly.send_command(
conn,
NodeAdd,
seq_number: seq_number,
mode: :stop,
pre_states: [:including],
exec_state: :inclusion_stopping
) do
:ok ->
:ok
{:error, reason} ->
_ = Logger.warn("Add node stop failed: #{inspect(reason)}")
send_to_client(opts, {:error, :node_add_stopped})
end
{:noreply, %{state | inclusion_opts: opts}}
end
def handle_cast({:remove_node, opts}, %State{conn: conn} = state) do
case Grizzly.send_command(
conn,
NodeRemove,
seq_number: SeqNumber.get_and_inc(),
exec_state: :excluding,
timeout: Keyword.get(opts, :timeout)
) do
:ok ->
:ok
{:error, reason} ->
_ = Logger.warn("Remove node failed: #{inspect(reason)}")
send_to_client(opts, {:error, :node_remove_failed})
end
{:noreply, %{state | inclusion_opts: opts}}
end
def handle_cast(
{:remove_node_stop, opts},
%State{conn: conn} = state
) do
seq_number = SeqNumber.get_and_inc()
case Grizzly.send_command(
conn,
NodeRemove,
seq_number: seq_number,
mode: :stop,
pre_states: [:excluding],
exec_state: :exclusion_stopping
) do
:ok ->
:ok
{:error, reason} ->
_ = Logger.warn("Remove node stop failed: #{inspect(reason)}")
send_to_client(opts, {:error, :node_remove_stopped})
end
{:noreply, %{state | inclusion_opts: opts}}
end
def handle_cast({:learn_mode_start, opts}, %State{conn: conn} = state) do
seq_number = SeqNumber.get_and_inc()
case Grizzly.send_command(
conn,
LearnModeSet,
seq_number: seq_number,
mode: :enable,
exec_state: :learning,
timeout: Keyword.get(opts, :timeout)
) do
:ok ->
:ok
{:error, reason} ->
_ = Logger.warn("Learn mode set failed: #{inspect(reason)}")
send_to_client(opts, {:error, :learn_mode_failed})
end
{:noreply, %{state | inclusion_opts: opts}}
end
def handle_cast({:learn_mode_stop, opts}, %State{conn: conn} = state) do
seq_number = SeqNumber.get_and_inc()
_ = Logger.info("Disabling learn mode")
case Grizzly.send_command(
conn,
LearnModeSet,
seq_number: seq_number,
mode: :disable,
pre_states: [:learning]
) do
:ok ->
:ok
{:error, reason} ->
_ = Logger.warn("Learn mode disable failed: #{inspect(reason)}")
send_to_client(opts, {:error, :learn_mode_stopped})
end
{:noreply, %{state | inclusion_opts: opts}}
end
@impl true
def handle_info(:controller_connected, %State{} = state) do
# Checkout an async version of the controllers connection
{:noreply, %{state | conn: Controller.conn(:async)}}
end
def handle_info({:timeout, command_module}, %State{inclusion_opts: opts} = state) do
case command_module do
NodeRemove ->
send_to_client(opts, {:timeout, :remove_node})
remove_node_stop(opts)
NodeAdd ->
send_to_client(opts, {:timeout, :add_node})
add_node_stop(opts)
LearnModeSet ->
_ = Logger.warn("Setting learn mode timed out")
stop_learn_mode(opts)
end
{:noreply, state}
end
def handle_info(
{:async_command, {:ok, %Node{} = zw_node}},
%State{inclusion_opts: inclusion_opts} = state
) do
with {:ok, zw_node} <- Node.connect(zw_node),
{:ok, zw_node} <-
Node.add_lifeline_group(zw_node, network_state: :configurating_new_node) do
Notifications.broadcast(:node_added, zw_node)
send_to_client(inclusion_opts, {:node_added, zw_node})
end
{:noreply, reset_state(state)}
end
def handle_info(
{:async_command, {:ok, :node_add_stopped}},
%State{inclusion_opts: inclusion_opts} = state
) do
send_to_client(inclusion_opts, :node_add_stopped)
{:noreply, state}
end
def handle_info(
{:async_command, {:ok, :node_remove_stopped}},
%State{inclusion_opts: inclusion_opts} = state
) do
send_to_client(inclusion_opts, :node_remove_stopped)
{:noreply, state}
end
def handle_info(
{:async_command, {:error, reason} = error},
%State{inclusion_opts: inclusion_opts} = state
) do
_ = Logger.warn("Error on #{reason}")
send_to_client(inclusion_opts, error)
{:noreply, reset_state(state)}
end
def handle_info(
{
:async_command,
{:node_add_keys_report, %{csa?: false, requested_keys: _requested_keys}}
},
%State{inclusion_opts: inclusion_opts, conn: conn} = state
) do
seq_number = SeqNumber.get_and_inc()
keys_to_grant = Keyword.get(inclusion_opts, :s2_keys)
:ok =
Grizzly.send_command(
conn,
NodeAddKeysSet,
seq_number: seq_number,
granted_keys: keys_to_grant
)
send_to_client(inclusion_opts, :setting_s2_keys)
{:noreply, state}
end
# This handle_info is for S2_unauthenticated devices
def handle_info(
{:async_command, {:dsk_report_info, %{dsk: _dsk, required_input_length: 0}}},
%State{inclusion_opts: inclusion_opts, conn: conn} = state
) do
seq_number = SeqNumber.get_and_inc()
:ok = Grizzly.send_command(conn, NodeAddDSKSet, seq_number: seq_number, input_dsk_length: 0)
send_to_client(inclusion_opts, :sending_dsk_input)
{:noreply, state}
end
def handle_info(
{:async_command, {:dsk_report_info, %{dsk: _dsk, required_input_length: 2}}},
%State{inclusion_opts: inclusion_opts, conn: conn} = state
) do
case Keyword.get(inclusion_opts, :pin) do
nil ->
send_to_client(inclusion_opts, :provide_S2_pin)
pin ->
_ = send_node_add_dsk_set(conn, pin)
send_to_client(inclusion_opts, :sending_dsk_input)
end
{:noreply, state}
end
def handle_info(
# weak signal
{:async_command, {:ok, node_id}},
%State{inclusion_opts: inclusion_opts} = state
)
when is_integer(node_id) do
Notifications.broadcast(:node_removed, node_id)
send_to_client(inclusion_opts, {:node_removed, node_id})
{:noreply, reset_state(state)}
end
def handle_info(
{:async_command, {:ok, %{status: _status} = status_report}},
%State{inclusion_opts: inclusion_opts} = state
) do
_ = Logger.info("Learning mode status report: #{inspect(status_report)}")
send_to_client(inclusion_opts, {:learn_mode_set, status_report})
{:noreply, state}
end
def handle_info(message, state) do
_ = Logger.info("Unhandled inclusion process message: #{inspect(message)}")
{:noreply, state}
end
defp setup_inclusion_opts(opts) do
opts
|> Keyword.put_new(:timeout, 60_000)
|> Keyword.put_new(:client, self())
|> Keyword.put_new(:s2_keys, [])
end
defp validate_options(options) do
case Security.get_highest_level(options[:s2_keys]) do
:s2_authenticated ->
validate_pin_option(options[:pin])
_ ->
:valid
end
end
defp validate_pin_option(nil), do: {:invalid_option, :pin_required_for_s2_authentication}
defp validate_pin_option(pin) do
case Security.validate_user_input_pin_length(pin) do
:invalid -> {:invalid_option, :pin_size_invalid}
:valid -> :valid
end
end
defp send_to_client(opts, message) do
client = Keyword.get(opts, :client)
send(client, message)
end
defp reset_state(%State{} = state) do
%{state | inclusion_opts: []}
end
defp send_node_add_dsk_set(conn, pin) do
seq_number = SeqNumber.get_and_inc()
Grizzly.send_command(
conn,
NodeAddDSKSet,
seq_number: seq_number,
input_dsk: pin,
input_dsk_length: 2
)
end
end
|
lib/grizzly/inclusion.ex
| 0.887281
| 0.903294
|
inclusion.ex
|
starcoder
|
defmodule Itsy.Float do
use Bitwise
require Itsy.Bit
alias Itsy.Bit
@type infinity :: :"-inf" | :"+inf"
@type rounding :: :down | :up | :even
@type sign_size :: non_neg_integer
@type exponent_size :: non_neg_integer
@type mantissa_size :: non_neg_integer
@type encoding :: { sign_size, exponent_size, mantissa_size }
@type precision :: encoding | 16 | 32 | 64 | 128 | 256
@type options :: [rounding: rounding, precision: precision, raw: boolean]
@doc """
Get the sign of a float.
Precision is used if the value is a bitstring. By default the precision is
set to a binary64, but this can be changed by setting the `:precision` option.
This can either be passed in a standard IEEE 754 encoding format, or the
precision can be set for each part of the float (sign, encoding,
mantissa/significand).
iex> Itsy.Float.sign(1)
0
iex> Itsy.Float.sign(-1)
1
iex> Itsy.Float.sign(:"+inf")
0
iex> Itsy.Float.sign(:"-inf")
1
iex> Itsy.Float.sign(<<6::size(5)>>, precision: { 1, 2, 2 })
0
"""
@spec sign(float | infinity, []) :: 0 | 1
@spec sign(bitstring, [precision: precision]) :: 0 | 1
def sign(v, opts \\ [])
def sign(v, _) when is_number(v), do: sign(<<v :: float>>)
def sign(:"+inf", _), do: 0
def sign(:"-inf", _), do: 1
def sign(v, opts) do
{ sp, _, _ } = format_options(opts)[:precision]
<<s :: size(sp), _ :: bitstring>> = v
s
end
@doc """
Get the exponent of a float.
Precision is used if the value is a bitstring or infinity atom. By default
the precision is set to a binary64, but this can be changed by setting the
`:precision` option. This can either be passed in a standard IEEE 754
encoding format, or the precision can be set for each part of the float
(sign, encoding, mantissa/significand).
iex> Itsy.Float.exponent(1)
0
iex> Itsy.Float.exponent(-1)
0
iex> Itsy.Float.exponent(:"+inf")
-1023
iex> Itsy.Float.exponent(:"-inf")
-1023
iex> Itsy.Float.exponent(<<6::size(5)>>, precision: { 1, 2, 2 })
0
"""
@spec exponent(float | infinity, []) :: integer
@spec exponent(bitstring, [precision: precision]) :: integer
def exponent(v, opts \\ [])
def exponent(v, _) when is_number(v), do: exponent(<<v :: float>>)
def exponent(v, opts) when v in [:"+inf", :"-inf"] do
{ _, ep, _ } = format_options(opts)[:precision]
-Bit.set(ep - 1)
end
def exponent(v, opts) do
{ sp, ep, _ } = format_options(opts)[:precision]
<<_s :: size(sp), e :: size(ep), _ :: bitstring>> = v
e - Bit.set(ep - 1)
end
@doc """
Get the mantissa of a float.
Precision is used if the value is a bitstring or infinity atom. By default
the precision is set to a binary64, but this can be changed by setting the
`:precision` option. This can either be passed in a standard IEEE 754
encoding format, or the precision can be set for each part of the float
(sign, encoding, mantissa/significand).
iex> Itsy.Float.mantissa(1)
0
iex> Itsy.Float.mantissa(-1)
0
iex> Itsy.Float.mantissa(:"+inf")
0
iex> Itsy.Float.mantissa(:"-inf")
0
iex> Itsy.Float.mantissa(<<6::size(5)>>, precision: { 1, 2, 2 })
2
"""
@spec mantissa(float | infinity, []) :: non_neg_integer
@spec mantissa(bitstring, [precision: precision]) :: non_neg_integer
def mantissa(v, opts \\ [])
def mantissa(v, _) when is_number(v), do: mantissa(<<v :: float>>)
def mantissa(v, _) when v in [:"+inf", :"-inf"], do: 0
def mantissa(v, opts) do
{ sp, ep, mp } = format_options(opts)[:precision]
<<_s :: size(sp), _e :: size(ep), m :: size(mp)>> = v
m
end
@doc """
Get the epsilon.
iex> Itsy.Float.epsilon()
2.220446049250313e-16
"""
@spec epsilon() :: float
def epsilon, do: ulp(1)
@doc """
Get the unit of least precision of a number.
iex> Itsy.Float.ulp(3.14159265358979323846)
4.440892098500626e-16
iex> Itsy.Float.ulp(1.0e15)
0.125
iex> Itsy.Float.ulp(1.0e16)
2.0
"""
@spec ulp(number) :: float
def ulp(v) do
<<f :: 64>> = <<v :: float>>
<<v1 :: float>> = <<f + 1 :: 64>>
v1 - v
end
defp ulps(<<a :: 64>>, <<b :: 64>>) when a > b, do: a - b
defp ulps(<<a :: 64>>, <<b :: 64>>), do: b - a
defp ulps(a, b), do: ulps(<<a :: float>>, <<b :: float>>)
@doc """
Check if two numbers are equal based on ulps difference.
iex> Itsy.Float.ulps_equality(1.0, 1.0, 0)
true
iex> Itsy.Float.ulps_equality(1.5, 1.0, 1125899906842624)
false
iex> Itsy.Float.ulps_equality(1.5, 1.0, 2251799813685248)
true
iex> Stream.repeatedly(fn -> 0.1 end) |> Enum.take(10) |> Enum.sum |> Itsy.Float.ulps_equality(1.0, 0)
false
iex> Stream.repeatedly(fn -> 0.1 end) |> Enum.take(10) |> Enum.sum |> Itsy.Float.ulps_equality(1.0, 1)
true
"""
@spec ulps_equality(number, number, integer) :: boolean
def ulps_equality(a, b, max) do
if sign(a) == sign(b) do
ulps(a, b) <= max
else
a == b
end
end
@doc """
Check if two numbers are equal based on relative difference.
iex> Itsy.Float.relative_equality(1.0, 1.0, 0)
true
iex> Itsy.Float.relative_equality(1.5, 1.0, 0.25)
false
iex> Itsy.Float.relative_equality(1.5, 1.0, 0.5)
true
iex> Itsy.Float.relative_equality(1.5, 1.0, 0.4)
true
iex> Stream.repeatedly(fn -> 0.1 end) |> Enum.take(10) |> Enum.sum |> Itsy.Float.relative_equality(1.0, 0)
false
iex> Stream.repeatedly(fn -> 0.1 end) |> Enum.take(10) |> Enum.sum |> Itsy.Float.relative_equality(1.0, Itsy.Float.epsilon)
true
"""
@spec relative_equality(number, number, number) :: boolean
def relative_equality(a, b, relative_diff) do
diff = abs(a - b)
a = abs(a)
b = abs(b)
diff <= (if(a > b, do: a, else: b) * relative_diff)
end
@doc """
Check if two numbers are equal based on absolute difference.
iex> Itsy.Float.absolute_equality(1.0, 1.0, 0)
true
iex> Itsy.Float.absolute_equality(1.5, 1.0, 0.25)
false
iex> Itsy.Float.absolute_equality(1.5, 1.0, 0.5)
true
iex> Itsy.Float.absolute_equality(1.5, 1.0, 0.4)
false
iex> Stream.repeatedly(fn -> 0.1 end) |> Enum.take(10) |> Enum.sum |> Itsy.Float.absolute_equality(1.0, 0)
false
iex> Stream.repeatedly(fn -> 0.1 end) |> Enum.take(10) |> Enum.sum |> Itsy.Float.absolute_equality(1.0, Itsy.Float.epsilon)
true
"""
@spec absolute_equality(number, number, number) :: boolean
def absolute_equality(a, b, diff), do: abs(a - b) <= diff
@spec format_options(options) :: [raw: boolean, rounding: rounding, precision: encoding]
defp format_options(opts) do
opts = Keyword.merge([raw: false, precision: 64, rounding: :even], opts)
case opts[:precision] do
256 -> Keyword.replace!(opts, :precision, { 1, 19, 236 })
128 -> Keyword.replace!(opts, :precision, { 1, 15, 112 })
64 -> Keyword.replace!(opts, :precision, { 1, 11, 52 })
32 -> Keyword.replace!(opts, :precision, { 1, 8, 23 })
16 -> Keyword.replace!(opts, :precision, { 1, 5, 10 })
precision when is_tuple(precision) -> opts
end
end
@doc """
Create a float from an integer value and exponent.
The default precision is set to a binary64, but this can be changed by
setting the `:precision` option. This can either be passed in a standard
IEEE 754 encoding format, or the precision can be set for each part of
the float (sign, encoding, mantissa/significand).
By default the return value will be a float converting from the underlying
precision, or one of the infinity atoms. However if `:raw` is set to `true`
the return type will be the unconverted binary for the given precision.
The rounding defaults to the standard IEEE 754 rounding mode of round half
to even (`:even`). If this is not desired, an alternative rounding mode
can be specified using `:rounding`.
iex> Itsy.Float.new(0)
0.0
iex> Itsy.Float.new(1, 20)
1.0e20
iex> Itsy.Float.new(1, -20)
1.0e-20
iex> Itsy.Float.new(1, -1, rounding: :even)
0.1
iex> Itsy.Float.new(1, -1, rounding: :down)
0.09999999999999999
iex> Itsy.Float.new(3, -1, rounding: :even)
0.3
iex> Itsy.Float.new(3, -1, rounding: :up)
0.30000000000000004
iex> Itsy.Float.new(2225073858507201, -323)
2.225073858507201e-308
iex> Itsy.Float.new(17976931348623157, 292)
1.7976931348623157e308
iex> Itsy.Float.new(13, -1, precision: { 1, 2, 2 })
1.25
iex> Itsy.Float.new(14, -1, precision: { 1, 2, 2 })
1.5
iex> Itsy.Float.new(-14, -1, precision: { 1, 2, 2 })
-1.5
iex> Itsy.Float.new(14, -1, precision: { 1, 2, 2 }, raw: true)
<<6::size(5)>>
iex> Itsy.Float.new(9999, 0, precision: { 1, 2, 2 })
:"+inf"
iex> Itsy.Float.new(-9999, 0, precision: { 1, 2, 2 })
:"-inf"
iex> Itsy.Float.new(1, -100000, precision: { 1, 2, 2 })
0.5
"""
@spec new(integer, integer, options) :: float | infinity | bitstring
def new(value, exponent \\ 0, opts \\ []) do
opts = format_options(opts)
encoding = { sp, ep, mp } = opts[:precision]
e_max = Bit.set(ep - 1)
{ e, m } = if(value != 0, do: integer_to_float(value, exponent * -1, opts[:rounding], encoding), else: { -e_max, 0 })
s = case sp do
0 -> 0
_ -> boolean_to_integer(value < 0)
end
if e > e_max do
if opts[:raw] do
<<s :: size(sp), -1 :: size(ep), 0 :: size(mp)>>
else
if(s == 0, do: :"+inf", else: :"-inf")
end
else
if opts[:raw] do
<<s :: size(sp), e + e_max :: size(ep), m :: size(mp)>>
else
e = e + 1023
m = case mp do
0 -> 0
size when size <= 52 -> min(m, Bit.set(mp)) <<< (52 - size)
size -> min(m, Bit.set(mp)) >>> abs(52 - size)
end
<<f :: float>> = <<s :: 1, e :: 11, m :: 52>>
f
end
end
end
@spec whole_to_float(integer, mantissa_size) :: { integer, integer }
defp whole_to_float(v, mp) do
e = Bit.highest_set(v) |> Bit.mask_lower_power_of_2
m = e &&& v
e = Bit.count(e)
if e >= mp do
m = (m >>> (e - mp)) + ((m >>> (e - mp - 1)) &&& 1)
if Bit.count(Bit.mask(m)) > mp do
{ e + 1, 0 }
else
{ e, m }
end
else
{ e, m <<< (mp - e) }
end
end
@spec rounding(integer, integer, integer, rounding) :: 0 | 1
defp rounding(_, _, _, :down), do: 0
defp rounding(_, _, _, :up), do: 1
defp rounding(v, precision, m, :even) do
case (rem(v, precision) * 2) do
v when v == precision -> m &&& 1
v when v > precision -> 1
_ -> 0
end
end
@spec fraction_to_mantissa(integer, integer, integer, rounding, mantissa_size, integer, integer) :: integer
defp fraction_to_mantissa(v, e, precision, rounding, size, m \\ 0, index \\ 0)
defp fraction_to_mantissa(0, _, _, _, _, m, _), do: m
defp fraction_to_mantissa(v, _, precision, rounding, size, m, size), do: m + rounding(v, precision, m, rounding)
defp fraction_to_mantissa(v, e, precision, rounding, size, m, index) do
v = rem(v, precision) * 2
m = m ||| ((boolean_to_integer(v >= precision) <<< ((size - 1) - index)) >>> e)
fraction_to_mantissa(v, e, precision, rounding, size, m, index + 1)
end
@spec fraction_to_exponent(integer, integer, integer, integer, integer) :: { integer, integer }
defp fraction_to_exponent(v, precision, max, e \\ nil, index \\ 0)
defp fraction_to_exponent(v, _, _, e, _) when e != nil, do: { e, v }
defp fraction_to_exponent(0, _, _, _, _), do: { nil, 0 }
defp fraction_to_exponent(v, _, max, _, max), do: { max, v }
defp fraction_to_exponent(v, precision, opts, _, index) do
v = rem(v, precision) * 2
fraction_to_exponent(v, precision, opts, if(v >= precision, do: index), index + 1)
end
@spec integer_to_float(integer, integer, rounding, encoding) :: { integer, integer }
defp integer_to_float(v, precision, rounding, encoding) when precision < 0 do
precision = pow10(abs(precision))
integer_to_float(0, abs(v) * precision, precision, rounding, encoding)
end
defp integer_to_float(v, precision, rounding, encoding) do
precision = pow10(precision)
integer_to_float(abs(v), div(abs(v), precision), precision, rounding, encoding)
end
@spec integer_to_float(integer, integer, integer, rounding, encoding) :: { integer, integer }
defp integer_to_float(v, 0, precision, rounding, { _, ep, mp }) do
{ e, v } = fraction_to_exponent(v, precision, Bit.set(ep - 1) - 1)
m = fraction_to_mantissa(v, 0, precision, rounding, mp)
{ ~~~e, m }
end
defp integer_to_float(v, i, precision, rounding, { _, _, mp }) do
{ e, m } = whole_to_float(i, mp)
m = fraction_to_mantissa(v, e, precision, rounding, mp, m)
{ e, m }
end
@spec pow10(non_neg_integer) :: non_neg_integer
defp pow10(n), do: pow10(1, n)
defp pow10(x, 0), do: x
defp pow10(x, n), do: pow10(x * 10, n - 1)
defp boolean_to_integer(false), do: 0
defp boolean_to_integer(true), do: 1
end
|
lib/itsy/float.ex
| 0.868186
| 0.416559
|
float.ex
|
starcoder
|
defmodule Crawly.Engine do
@moduledoc """
Crawly Engine - process responsible for starting and stopping spiders.
Stores all currently running spiders.
"""
require Logger
use GenServer
@type t :: %__MODULE__{
started_spiders: started_spiders(),
known_spiders: [Crawly.spider()]
}
@type started_spiders() :: %{optional(Crawly.spider()) => identifier()}
@type spider_info() :: %{
name: Crawly.spider(),
status: :stopped | :started,
pid: identifier() | nil
}
defstruct(started_spiders: %{}, known_spiders: [])
@doc """
Starts a spider. All options passed in the second argument will be passed along to the spider's `init/1` callback.
### Reserved Options
- `:crawl_id` (binary). Optional, automatically generated if not set.
- `:closespider_itemcount` (integer | disabled). Optional, overrides the close
spider item count on startup.
- `:closespider_timeout` (integer | disabled). Optional, overrides the close
spider timeout on startup.
- `:concurrent_requests_per_domain` (integer). Optional, overrides the number of
workers for a given spider
### Backward compatibility
If the 2nd positional argument is a binary, it will be set as the `:crawl_id`. Deprecated, will be removed in the future.
"""
@type crawl_id_opt :: {:crawl_id, binary()}
@spec start_spider(Crawly.spider(), opts) :: result
when opts: [crawl_id_opt],
result:
:ok
| {:error, :spider_already_started}
| {:error, :atom}
def start_spider(spider_name, opts \\ [])
def start_spider(spider_name, crawl_id) when is_binary(crawl_id) do
Logger.warn(
"Deprecation Warning: Setting the crawl_id as second positional argument is deprecated. Please use the :crawl_id option instead. Refer to docs for more info (https://hexdocs.pm/crawly/Crawly.Engine.html#start_spider/2) "
)
start_spider(spider_name, crawl_id: crawl_id)
end
def start_spider(spider_name, opts) when is_list(opts) do
opts =
Enum.into(opts, %{})
|> Map.put_new_lazy(:crawl_id, &UUID.uuid1/0)
# Filter all logs related to a given spider
case {Crawly.Utils.get_settings(:log_to_file, spider_name),
Crawly.Utils.ensure_loaded?(LoggerFileBackend)} do
{true, true} ->
configure_spider_logs(spider_name, opts[:crawl_id])
{true, false} ->
Logger.warn(
":logger_file_backend https://github.com/onkel-dirtus/logger_file_backend#loggerfilebackend must be installed as a peer dependency if log_to_file config is set to true"
)
_ ->
false
end
GenServer.call(
__MODULE__,
{:start_spider, spider_name, opts[:crawl_id], Map.to_list(opts)}
)
end
@spec get_manager(Crawly.spider()) :: pid() | {:error, :spider_not_found}
def get_manager(spider_name) do
case Map.fetch(running_spiders(), spider_name) do
:error ->
{:error, :spider_not_found}
{:ok, {pid_sup, _job_tag}} ->
Supervisor.which_children(pid_sup)
|> Enum.find(&({Crawly.Manager, _, :worker, [Crawly.Manager]} = &1))
|> case do
nil ->
{:error, :spider_not_found}
{_, pid, :worker, _} ->
pid
end
end
end
@spec stop_spider(Crawly.spider(), reason) :: result
when reason: :itemcount_limit | :itemcount_timeout | atom(),
result:
:ok | {:error, :spider_not_running} | {:error, :spider_not_found}
def stop_spider(spider_name, reason \\ :ignore) do
GenServer.call(__MODULE__, {:stop_spider, spider_name, reason})
end
@spec list_known_spiders() :: [spider_info()]
def list_known_spiders() do
GenServer.call(__MODULE__, :list_known_spiders)
end
@spec running_spiders() :: started_spiders()
def running_spiders() do
GenServer.call(__MODULE__, :running_spiders)
end
@spec get_spider_info(Crawly.spider()) :: spider_info() | nil
def get_spider_info(spider_name) do
GenServer.call(__MODULE__, {:get_spider, spider_name})
end
def refresh_spider_list() do
GenServer.cast(__MODULE__, :refresh_spider_list)
end
def start_link(_) do
GenServer.start_link(__MODULE__, [], name: __MODULE__)
end
@spec get_crawl_id(Crawly.spider()) ::
{:error, :spider_not_running} | {:ok, binary()}
def get_crawl_id(spider_name) do
GenServer.call(__MODULE__, {:get_crawl_id, spider_name})
end
@spec init(any) :: {:ok, t()}
def init(_args) do
spiders = get_updated_known_spider_list()
{:ok, %Crawly.Engine{known_spiders: spiders}}
end
def handle_call({:get_manager, spider_name}, _, state) do
pid =
case Map.get(state.started_spiders, spider_name) do
nil ->
{:error, :spider_not_found}
pid ->
pid
end
{:reply, pid, state}
end
def handle_call({:get_crawl_id, spider_name}, _from, state) do
msg =
case Map.get(state.started_spiders, spider_name) do
nil ->
{:error, :spider_not_running}
{_pid, crawl_id} ->
{:ok, crawl_id}
end
{:reply, msg, state}
end
def handle_call(:running_spiders, _from, state) do
{:reply, state.started_spiders, state}
end
def handle_call(:list_known_spiders, _from, state) do
return = Enum.map(state.known_spiders, &format_spider_info(&1, state))
{:reply, return, state}
end
def handle_call(
{:start_spider, spider_name, crawl_id, options},
_form,
state
) do
result =
case Map.get(state.started_spiders, spider_name) do
nil ->
Crawly.EngineSup.start_spider(spider_name, options)
_ ->
{:error, :spider_already_started}
end
{msg, new_started_spiders} =
case result do
{:ok, pid} ->
{:ok, Map.put(state.started_spiders, spider_name, {pid, crawl_id})}
{:error, _} = err ->
{err, state.started_spiders}
end
{:reply, msg, %Crawly.Engine{state | started_spiders: new_started_spiders}}
end
def handle_call({:stop_spider, spider_name, reason}, _form, state) do
{msg, new_started_spiders} =
case Map.pop(state.started_spiders, spider_name) do
{nil, _} ->
{{:error, :spider_not_running}, state.started_spiders}
{{pid, crawl_id}, new_started_spiders} ->
case Crawly.Utils.get_settings(
:on_spider_closed_callback,
spider_name
) do
nil -> :ignore
fun -> apply(fun, [spider_name, crawl_id, reason])
end
Crawly.EngineSup.stop_spider(pid)
{:ok, new_started_spiders}
end
{:reply, msg, %Crawly.Engine{state | started_spiders: new_started_spiders}}
end
def handle_call({:get_spider, spider_name}, _from, state) do
return =
if Enum.member?(state.known_spiders, spider_name) do
format_spider_info(spider_name, state)
end
{:reply, return, state}
end
def handle_cast(:refresh_spider_list, state) do
updated = get_updated_known_spider_list(state.known_spiders)
{:noreply, %Crawly.Engine{state | known_spiders: updated}}
end
# this function generates a spider_info map for each spider known
defp format_spider_info(spider_name, state) do
pid = Map.get(state.started_spiders, spider_name)
%{
name: spider_name,
status: if(is_nil(pid), do: :stopped, else: :started),
pid: pid
}
end
defp get_updated_known_spider_list(known \\ []) do
new = Crawly.Utils.list_spiders()
(known ++ new)
|> Enum.dedup_by(& &1)
end
defp configure_spider_logs(spider_name, crawl_id) do
log_dir =
Crawly.Utils.get_settings(
:log_dir,
spider_name,
System.tmp_dir()
)
current_unix_timestamp = :os.system_time(:second)
Logger.add_backend({LoggerFileBackend, :debug})
log_file_path =
Path.join([
log_dir,
inspect(spider_name),
# underscore separates the timestamp and the crawl_id
inspect(current_unix_timestamp) <> "_" <> crawl_id
]) <> ".log"
Logger.configure_backend({LoggerFileBackend, :debug},
path: log_file_path,
level: :debug,
metadata_filter: [crawl_id: crawl_id]
)
Logger.debug("Writing logs to #{log_file_path}")
end
end
|
lib/crawly/engine.ex
| 0.801819
| 0.410106
|
engine.ex
|
starcoder
|
defmodule AWS.Billingconductor do
@moduledoc """
Amazon Web Services Billing Conductor is a fully managed service that you can
use to customize a [pro forma](https://docs.aws.amazon.com/enterprisebilling/6b7c01c5-b592-467e-9769-90052eaf359c/userguide/understanding-eb.html#eb-other-definitions)
version of your billing data each month, to accurately show or chargeback your
end customers.
Amazon Web Services Billing Conductor doesn't change the way you're billed by
Amazon Web Services each month by design. Instead, it provides you with a
mechanism to configure, generate, and display rates to certain customers over a
given billing period. You can also analyze the difference between the rates you
apply to your accounting groupings relative to your actual rates from Amazon Web
Services. As a result of your Amazon Web Services Billing Conductor
configuration, the payer account can also see the custom rate applied on the
billing details page of the [Amazon Web Services Billing console](https://console.aws.amazon.com/billing), or configure a cost and usage
report per billing group.
This documentation shows how you can configure Amazon Web Services Billing
Conductor using its API. For more information about using the [Amazon Web Services Billing Conductor](https://console.aws.amazon.com/enterprisebilling/)
user interface, see the [ Amazon Web Services Enterprise Billing Console User Guide](https://docs.aws.amazon.com/enterprisebilling/6b7c01c5-b592-467e-9769-90052eaf359c/userguide/what-is-enterprisebilling.html).
"""
alias AWS.Client
alias AWS.Request
def metadata do
%AWS.ServiceMetadata{
abbreviation: nil,
api_version: "2021-07-30",
content_type: "application/x-amz-json-1.1",
credential_scope: "us-east-1",
endpoint_prefix: "billingconductor",
global?: true,
protocol: "rest-json",
service_id: "billingconductor",
signature_version: "v4",
signing_name: "billingconductor",
target_prefix: nil
}
end
@doc """
Connects an array of account IDs in a consolidated billing family to a
predefined billing group.
The account IDs must be a part of the consolidated billing family during the
current month, and not already associated with another billing group. The
maximum number of accounts that can be associated in one call is 30.
"""
def associate_accounts(%Client{} = client, input, options \\ []) do
url_path = "/associate-accounts"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
200
)
end
@doc """
Connects an array of `PricingRuleArns` to a defined `PricingPlan`.
The maximum number `PricingRuleArn` that can be associated in one call is 30.
"""
def associate_pricing_rules(%Client{} = client, input, options \\ []) do
url_path = "/associate-pricing-rules"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:put,
url_path,
query_params,
headers,
input,
options,
200
)
end
@doc """
Associates a batch of resources to a percentage custom line item.
"""
def batch_associate_resources_to_custom_line_item(%Client{} = client, input, options \\ []) do
url_path = "/batch-associate-resources-to-custom-line-item"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:put,
url_path,
query_params,
headers,
input,
options,
200
)
end
@doc """
Disassociates a batch of resources from a percentage custom line item.
"""
def batch_disassociate_resources_from_custom_line_item(%Client{} = client, input, options \\ []) do
url_path = "/batch-disassociate-resources-from-custom-line-item"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:put,
url_path,
query_params,
headers,
input,
options,
200
)
end
@doc """
Creates a billing group that resembles a consolidated billing family that Amazon
Web Services charges, based off of the predefined pricing plan computation.
"""
def create_billing_group(%Client{} = client, input, options \\ []) do
url_path = "/create-billing-group"
{headers, input} =
[
{"ClientToken", "X-Amzn-Client-Token"}
]
|> Request.build_params(input)
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
200
)
end
@doc """
Creates a custom line item that can be used to create a one-time fixed charge
that can be applied to a single billing group for the current or previous
billing period.
The one-time fixed charge is either a fee or discount.
"""
def create_custom_line_item(%Client{} = client, input, options \\ []) do
url_path = "/create-custom-line-item"
{headers, input} =
[
{"ClientToken", "X-Amzn-Client-Token"}
]
|> Request.build_params(input)
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
200
)
end
@doc """
Creates a pricing plan that is used for computing Amazon Web Services charges
for billing groups.
"""
def create_pricing_plan(%Client{} = client, input, options \\ []) do
url_path = "/create-pricing-plan"
{headers, input} =
[
{"ClientToken", "X-Amzn-Client-Token"}
]
|> Request.build_params(input)
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
200
)
end
@doc """
Creates a pricing rule can be associated to a pricing plan, or a set of pricing
plans.
"""
def create_pricing_rule(%Client{} = client, input, options \\ []) do
url_path = "/create-pricing-rule"
{headers, input} =
[
{"ClientToken", "X-Amzn-Client-Token"}
]
|> Request.build_params(input)
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
200
)
end
@doc """
Deletes a billing group.
"""
def delete_billing_group(%Client{} = client, input, options \\ []) do
url_path = "/delete-billing-group"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
200
)
end
@doc """
Deletes the custom line item identified by the given ARN in the current, or
previous billing period.
"""
def delete_custom_line_item(%Client{} = client, input, options \\ []) do
url_path = "/delete-custom-line-item"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
200
)
end
@doc """
Deletes a pricing plan.
The pricing plan must not be associated with any billing groups to delete
successfully.
"""
def delete_pricing_plan(%Client{} = client, input, options \\ []) do
url_path = "/delete-pricing-plan"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
200
)
end
@doc """
Deletes the pricing rule identified by the input Amazon Resource Name (ARN).
"""
def delete_pricing_rule(%Client{} = client, input, options \\ []) do
url_path = "/delete-pricing-rule"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
200
)
end
@doc """
Removes the specified list of account IDs from the given billing group.
"""
def disassociate_accounts(%Client{} = client, input, options \\ []) do
url_path = "/disassociate-accounts"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
200
)
end
@doc """
Disassociates a list of pricing rules from a pricing plan.
"""
def disassociate_pricing_rules(%Client{} = client, input, options \\ []) do
url_path = "/disassociate-pricing-rules"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:put,
url_path,
query_params,
headers,
input,
options,
200
)
end
@doc """
* **Amazon Web Services Billing Conductor is in beta release and is subject to
change.
Your use of Amazon Web Services Billing Conductor is subject to the Beta Service
Participation terms of the [Amazon Web Services Service Terms](https://aws.amazon.com/service-terms/) (Section 1.10).** *
This is a paginated call to list linked accounts that are linked to the payer
account for the specified time period. If no information is provided, the
current billing period is used. The response will optionally include the billing
group associated with the linked account.
"""
def list_account_associations(%Client{} = client, input, options \\ []) do
url_path = "/list-account-associations"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
200
)
end
@doc """
A paginated call to retrieve a summary report of actual Amazon Web Services
charges and the calculated Amazon Web Services charges based on the associated
pricing plan of a billing group.
"""
def list_billing_group_cost_reports(%Client{} = client, input, options \\ []) do
url_path = "/list-billing-group-cost-reports"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
200
)
end
@doc """
A paginated call to retrieve a list of billing groups for the given billing
period.
If you don't provide a billing group, the current billing period is used.
"""
def list_billing_groups(%Client{} = client, input, options \\ []) do
url_path = "/list-billing-groups"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
200
)
end
@doc """
A paginated call to get a list of all custom line items (FFLIs) for the given
billing period.
If you don't provide a billing period, the current billing period is used.
"""
def list_custom_line_items(%Client{} = client, input, options \\ []) do
url_path = "/list-custom-line-items"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
200
)
end
@doc """
A paginated call to get pricing plans for the given billing period.
If you don't provide a billing period, the current billing period is used.
"""
def list_pricing_plans(%Client{} = client, input, options \\ []) do
url_path = "/list-pricing-plans"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
200
)
end
@doc """
A list of the pricing plans associated with a pricing rule.
"""
def list_pricing_plans_associated_with_pricing_rule(%Client{} = client, input, options \\ []) do
url_path = "/list-pricing-plans-associated-with-pricing-rule"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
200
)
end
@doc """
Describes a pricing rule that can be associated to a pricing plan, or set of
pricing plans.
"""
def list_pricing_rules(%Client{} = client, input, options \\ []) do
url_path = "/list-pricing-rules"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
200
)
end
@doc """
Lists the pricing rules associated with a pricing plan.
"""
def list_pricing_rules_associated_to_pricing_plan(%Client{} = client, input, options \\ []) do
url_path = "/list-pricing-rules-associated-to-pricing-plan"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
200
)
end
@doc """
List the resources associated to a custom line item.
"""
def list_resources_associated_to_custom_line_item(%Client{} = client, input, options \\ []) do
url_path = "/list-resources-associated-to-custom-line-item"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
200
)
end
@doc """
A list the tags for a resource.
"""
def list_tags_for_resource(%Client{} = client, resource_arn, options \\ []) do
url_path = "/tags/#{AWS.Util.encode_uri(resource_arn)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
204
)
end
@doc """
Associates the specified tags to a resource with the specified `resourceArn`.
If existing tags on a resource are not specified in the request parameters, they
are not changed.
"""
def tag_resource(%Client{} = client, resource_arn, input, options \\ []) do
url_path = "/tags/#{AWS.Util.encode_uri(resource_arn)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
204
)
end
@doc """
Deletes specified tags from a resource.
"""
def untag_resource(%Client{} = client, resource_arn, input, options \\ []) do
url_path = "/tags/#{AWS.Util.encode_uri(resource_arn)}"
headers = []
{query_params, input} =
[
{"TagKeys", "tagKeys"}
]
|> Request.build_params(input)
Request.request_rest(
client,
metadata(),
:delete,
url_path,
query_params,
headers,
input,
options,
204
)
end
@doc """
This updates an existing billing group.
"""
def update_billing_group(%Client{} = client, input, options \\ []) do
url_path = "/update-billing-group"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
200
)
end
@doc """
Update an existing custom line item in the current or previous billing period.
"""
def update_custom_line_item(%Client{} = client, input, options \\ []) do
url_path = "/update-custom-line-item"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
200
)
end
@doc """
This updates an existing pricing plan.
"""
def update_pricing_plan(%Client{} = client, input, options \\ []) do
url_path = "/update-pricing-plan"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:put,
url_path,
query_params,
headers,
input,
options,
200
)
end
@doc """
Updates an existing pricing rule.
"""
def update_pricing_rule(%Client{} = client, input, options \\ []) do
url_path = "/update-pricing-rule"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:put,
url_path,
query_params,
headers,
input,
options,
200
)
end
end
|
lib/aws/generated/billingconductor.ex
| 0.825941
| 0.529446
|
billingconductor.ex
|
starcoder
|
defmodule OMG.API.State.Transaction do
@moduledoc """
Internal representation of transaction spent on Plasma chain
"""
alias OMG.API.Crypto
alias OMG.API.State.Transaction.Signed
@zero_address Crypto.zero_address()
@max_inputs 2
defstruct [
:blknum1,
:txindex1,
:oindex1,
:blknum2,
:txindex2,
:oindex2,
:cur12,
:newowner1,
:amount1,
:newowner2,
:amount2
]
@type t() :: %__MODULE__{
blknum1: non_neg_integer(),
txindex1: non_neg_integer(),
oindex1: 0 | 1,
blknum2: non_neg_integer(),
txindex2: non_neg_integer(),
oindex2: 0 | 1,
cur12: currency(),
newowner1: Crypto.address_t(),
amount1: pos_integer(),
newowner2: Crypto.address_t(),
amount2: non_neg_integer()
}
@type currency :: Crypto.address_t()
@doc """
Creates transaction from utxo positions and outputs. Provides simple, stateless validation on arguments.
#### Assumptions:
* length of inputs between 1 and `@max_inputs`
* length of outputs between 0 and `@max_inputs`
* the same currency for each output
* all amounts are non-negative integers
"""
@spec create_from_utxos(
[
%{
blknum: pos_integer(),
txindex: non_neg_integer(),
oindex: 0 | 1,
currency: Crypto.address_t(),
amount: pos_integer()
}
],
[%{owner: Crypto.address_t(), amount: non_neg_integer()}],
non_neg_integer()
) :: {:ok, t()} | {:error, atom()}
def create_from_utxos(inputs, outputs, fee)
def create_from_utxos(inputs, _, _) when not is_list(inputs), do: {:error, :inputs_should_be_list}
def create_from_utxos(_, outputs, _) when not is_list(outputs), do: {:error, :outputs_should_be_list}
def create_from_utxos(inputs, _, _) when length(inputs) > @max_inputs, do: {:error, :too_many_inputs}
def create_from_utxos([], _, _), do: {:error, :at_least_one_input_required}
def create_from_utxos(_, outputs, _) when length(outputs) > @max_inputs, do: {:error, :too_many_outputs}
def create_from_utxos(_, _, fee) when fee < 0, do: {:error, :invalid_fee}
def create_from_utxos(inputs, outputs, fee) do
with {:ok, currency} <- validate_currency(inputs),
:ok <- validate_amount(inputs),
:ok <- validate_amount(outputs),
:ok <- amounts_add_up?(inputs, outputs, fee) do
{
:ok,
new(
inputs |> Enum.map(&{&1.blknum, &1.txindex, &1.oindex}),
currency,
outputs |> Enum.map(&{&1.owner, &1.amount})
)
}
end
end
defp validate_currency(inputs) do
currencies =
inputs
|> Enum.map(& &1.currency)
|> Enum.uniq()
if match?([_], currencies),
do: {:ok, currencies |> hd()},
else: {:error, :currency_mixing_not_possible}
end
# Validates amount in both inputs and outputs
defp validate_amount(items) do
all_valid? =
items
|> Enum.map(& &1.amount)
|> Enum.all?(fn amount -> is_integer(amount) and amount >= 0 end)
if all_valid?,
do: :ok,
else: {:error, :amount_noninteger_or_negative}
end
defp amounts_add_up?(inputs, outputs, fee) do
spent =
inputs
|> Enum.map(& &1.amount)
|> Enum.sum()
received =
outputs
|> Enum.map(& &1.amount)
|> Enum.sum()
cond do
spent < received ->
{:error, :not_enough_funds_to_cover_spend}
spent < received + fee ->
{:error, :not_enough_funds_to_cover_fee}
true ->
:ok
end
end
@doc """
Adds empty (zeroes) inputs and/or outputs to reach the expected size
of 2 inputs and 2 outputs.
assumptions:
```
length(inputs) <= 2
length(outputs) <= 2
```
"""
@spec new(
list({pos_integer, pos_integer, 0 | 1}),
Crypto.address_t(),
list({Crypto.address_t(), pos_integer})
) :: t()
def new(inputs, currency, outputs) do
inputs = inputs ++ List.duplicate({0, 0, 0}, @max_inputs - Kernel.length(inputs))
outputs = outputs ++ List.duplicate({@zero_address, 0}, @max_inputs - Kernel.length(outputs))
inputs =
inputs
|> Enum.with_index(1)
|> Enum.map(fn {{blknum, txindex, oindex}, index} ->
%{
String.to_existing_atom("blknum#{index}") => blknum,
String.to_existing_atom("txindex#{index}") => txindex,
String.to_existing_atom("oindex#{index}") => oindex
}
end)
|> Enum.reduce(%{}, &Map.merge/2)
outputs =
outputs
|> Enum.with_index(1)
|> Enum.map(fn {{newowner, amount}, index} ->
%{
String.to_existing_atom("newowner#{index}") => newowner,
String.to_existing_atom("amount#{index}") => amount
}
end)
|> Enum.reduce(%{cur12: currency}, &Map.merge/2)
struct(__MODULE__, Map.merge(inputs, outputs))
end
def account_address?(@zero_address), do: false
def account_address?(address) when is_binary(address) and byte_size(address) == 20, do: true
def account_address?(_), do: false
def encode(tx) do
[
tx.blknum1,
tx.txindex1,
tx.oindex1,
tx.blknum2,
tx.txindex2,
tx.oindex2,
tx.cur12,
tx.newowner1,
tx.amount1,
tx.newowner2,
tx.amount2
]
|> ExRLP.encode()
end
def hash(%__MODULE__{} = tx) do
tx
|> encode
|> Crypto.hash()
end
@doc """
Signs transaction using private keys
private keys are in the binary form, e.g.:
```<<54, 43, 207, 67, 140, 160, 190, 135, 18, 162, 70, 120, 36, 245, 106, 165, 5, 101, 183,
55, 11, 117, 126, 135, 49, 50, 12, 228, 173, 219, 183, 175>>```
"""
@spec sign(t(), Crypto.priv_key_t(), Crypto.priv_key_t()) :: Signed.t()
def sign(%__MODULE__{} = tx, priv1, priv2) do
encoded_tx = encode(tx)
signature1 = signature(encoded_tx, priv1)
signature2 = signature(encoded_tx, priv2)
transaction = %Signed{raw_tx: tx, sig1: signature1, sig2: signature2}
%{transaction | signed_tx_bytes: Signed.encode(transaction)}
end
defp signature(_encoded_tx, <<>>), do: <<0::size(520)>>
defp signature(encoded_tx, priv), do: Crypto.signature(encoded_tx, priv)
end
|
apps/omg_api/lib/state/transaction.ex
| 0.86592
| 0.634317
|
transaction.ex
|
starcoder
|
defmodule Statifier.Schema.State do
@moduledoc """
A State node in a Schema tree
State nodes model the different states a state machine can transition to.
They optionally form a hierarchy where a state is a parent to other state
node(s). This hierarchy forms a lineage where when in a child state you are
also in the parent state. The rules and behavior surrounding how child
states are entered is dependent on the `type` of the state node - see
"Classifications" section below.
## The Statifier.Schema.State struct:
The public fields are:
* `id` - name by which state can be identified and transitioned to
* `type` - whether this is a :final, :parallel, or :state node
* `transitions` - all of the `Statifier.Schema.Transition`s of the state
* `on_enter` function to run when leaving state
* `on_exit` - function to run when leaving state
## Classifictions
State nodes fall into one of three classifications. They are: `:parallel`,
`:final`, or `:state` nodes. A `:parallel` node has two or more child states
and they all become activated when entering the parent state. A `:final`
state node must be an atomic state and signifies that the state has reached
its final state. Once a state enters final it can no longer transition. A
`:state` node can have 0 or more children. If they have more than one child
than only one can be active at any one time.
Non `:parallel` state nodes can be `atomic` or `compound`. An `atomic` state
is one that does not have any child states. A compound state has one or more
child states. A `parallel` state by definition will always be a `compound`
state.
### Examples
Classic Microwave (non parallel) example:
- microwave machine
- off state
- on state
- idle state
- cooking state
The above example does not contain any `:parallel` state nodes. It does
contain two `compound` states. The top level machine which can be off or on,
and the on state which is either idle or cooking. Lastly, it contains three
`atomic` states: off, idle, and cooking.
Parallel Microwave example:
- microwave machine
- parallel (oven state)
- engine state
- off state
- on state
- cooking
- idle
- door state
- closed state
- open state
This example does contain a `:parallel` which maintains the state of the door
and the state of the engine of the microwave. When in the oven state you are
also in the door state and the engine state. Here there are four `compound`
states: oven, engine, on, and door. For `atomic` states there are five: off,
cooking, idle, closed, and open.
"""
alias Statifier.Schema.Transition
@typedoc """
State identifiers are used to name a state and give an identifier that
`Statifier.Schema.Transition` use as their target parameter in order to move
to a state.
"""
@type state_identifier :: String.t()
# TODO: figure out how we want to represent on_enter/exit
@type t :: %__MODULE__{
id: state_identifier(),
type: :state | :parallel | :final,
transitions: [Transition.t()],
on_exit: String.t() | nil,
on_entry: String.t() | nil
}
defstruct id: nil, type: nil, transitions: [], on_entry: nil, on_exit: nil
@spec new(Map.t()) :: t()
@doc """
Creates a new `Statifier.Schema.State`
If no `id` is supplied in params then one will be generated.
## Options
* `id` - state identifier
* `initial` - initial state (only for compound states)
* `type` - is this a :parallel, :state, or :final node
# TODO: These are not implemented yet
* `on_enter` - function to execute when entering state
* `on_exit` - function to execute when exiting state
"""
def new(params) do
dynamic_defaults = %{
id: "TODO: make this random?"
}
params = Map.merge(dynamic_defaults, params)
state = struct(__MODULE__, params)
# If supplied an initial convert it to a transition with no cond or event
if Map.has_key?(params, :initial) do
# TODO: would the initial transition be an internal?
add_transition(state, Transition.new(target: Map.get(params, :initial), type: :internal))
else
state
end
end
@doc """
Adds a transition to a state.
This should only be used as a helper to build a state node, and not doing a
running machine since state machines need to be deterministic.
"""
def add_transition(%__MODULE__{} = state, %Transition{} = transition) do
%__MODULE__{
state
| transitions: Enum.concat(state.transitions, [transition])
}
end
end
|
impl/ex/lib/schema/state.ex
| 0.801548
| 0.950227
|
state.ex
|
starcoder
|
defmodule LocalHex.Registry do
@moduledoc """
Module meant for maintaining a registry of available packages of a repository.
Current `Registry` is kept in a simple Map structure and looks like the following:
```
%{
"package_1" => [
%{
version: "0.0.1"
},
%{
version: "0.0.2"
}
],
"package_2" => [
%{
version: "0.0.1"
retired: %{
reason: :RETIRED_OTHER | :RETIRED_INVALID | :RETIRED_SECURITY | :RETIRED_DEPRECATED | :RETIRED_RENAMED,
message: "Please update to newer version"
}
},
%{
version: "0.0.2"
},
...
],
...
}
```
"""
def add_package(registry, package) do
Map.update(registry, package.name, [package.release], fn releases ->
[package.release | releases]
|> Enum.uniq_by(fn %{version: version} -> version end)
|> Enum.sort(&(Version.compare(&1.version, &2.version) == :lt))
end)
end
def all_versions_of_packages(registry) do
registry
|> Map.keys()
|> Enum.map(&all_versions_of_package(registry, &1))
end
def all_versions_of_package(registry, package_name) do
versions =
registry[package_name]
|> Enum.map(fn entry -> entry[:version] end)
|> Enum.sort()
%{
name: package_name,
internal: true,
versions: versions
}
end
def has_version?(registry, package_name, version) do
registry[package_name]
|> Enum.any?(fn release ->
release.version == version
end)
end
def revert_release(registry, package_name, version) do
Map.update!(registry, package_name, fn releases ->
Enum.reject(releases, &(&1.version == version))
end)
end
def retire_package_release(registry, package_name, version, reason, message) do
Map.update!(registry, package_name, fn releases ->
for release <- releases do
if release.version == version do
retired = %{
reason: retirement_reason(reason),
message: message
}
Map.put(release, :retired, retired)
else
release
end
end
end)
end
def unretire_package_release(registry, package_name, version) do
Map.update!(registry, package_name, fn releases ->
for release <- releases do
if release.version == version do
Map.delete(release, :retired)
else
release
end
end
end)
end
defp retirement_reason("invalid"), do: :RETIRED_INVALID
defp retirement_reason("security"), do: :RETIRED_SECURITY
defp retirement_reason("deprecated"), do: :RETIRED_DEPRECATED
defp retirement_reason("renamed"), do: :RETIRED_RENAMED
defp retirement_reason(_), do: :RETIRED_OTHER
end
|
lib/local_hex/registry.ex
| 0.739799
| 0.697119
|
registry.ex
|
starcoder
|
defmodule Number.Delimit do
@moduledoc """
Provides functions to delimit numbers into strings.
"""
@doc """
Formats a number into a string with grouped thousands using `delimiter`.
## Parameters
* `number` - A float or integer to convert.
* `options` - A keyword list of options. See the documentation of all
available options below for more information.
## Options
* `:precision` - The number of decimal places to include. Default: 2
* `:delimiter` - The character to use to delimit the number by thousands.
Default: ","
* `:separator` - The character to use to separate the number from the decimal
places. Default: "."
Default configuration for these options can be specified in the `Number`
application configuration.
config :number, delimit: [
precision: 3,
delimiter: ",",
separator: "."
]
## Examples
iex> Number.Delimit.number_to_delimited(nil)
nil
iex> Number.Delimit.number_to_delimited(998.999)
"999.00"
iex> Number.Delimit.number_to_delimited(-234234.234)
"-234,234.23"
iex> Number.Delimit.number_to_delimited("998.999")
"999.00"
iex> Number.Delimit.number_to_delimited("-234234.234")
"-234,234.23"
iex> Number.Delimit.number_to_delimited(12345678)
"12,345,678.00"
iex> Number.Delimit.number_to_delimited(12345678.05)
"12,345,678.05"
iex> Number.Delimit.number_to_delimited(12345678, delimiter: ".")
"12.345.678.00"
iex> Number.Delimit.number_to_delimited(12345678, delimiter: ",")
"12,345,678.00"
iex> Number.Delimit.number_to_delimited(12345678.05, separator: " ")
"12,345,678 05"
iex> Number.Delimit.number_to_delimited(98765432.98, delimiter: " ", separator: ",")
"98 765 432,98"
iex> Number.Delimit.number_to_delimited(Decimal.new(9998.2))
"9,998.20"
iex> Number.Delimit.number_to_delimited "123456789555555555555555555555555"
"123,456,789,555,555,555,555,555,555,555,555.00"
iex> Number.Delimit.number_to_delimited Decimal.new("123456789555555555555555555555555")
"123,456,789,555,555,555,555,555,555,555,555.00"
"""
@spec number_to_delimited(Number.t(), list) :: String.t()
def number_to_delimited(number, options \\ [])
def number_to_delimited(nil, _options), do: nil
def number_to_delimited(number, options) do
float = number |> Number.Conversion.to_float()
options = Keyword.merge(config(), options)
prefix = if float < 0, do: "-", else: ""
delimited =
case to_integer(number) do
{:ok, number} ->
number = delimit_integer(number, options[:delimiter])
if options[:precision] > 0 do
decimals = String.pad_trailing("", options[:precision], "0")
Enum.join([to_string(number), options[:separator], decimals])
else
number
end
{:error, other} ->
other
|> to_string
|> Number.Conversion.to_decimal()
|> delimit_decimal(options[:delimiter], options[:separator], options[:precision])
end
delimited = String.Chars.to_string(delimited)
prefix <> delimited
end
defp to_integer(integer) when is_integer(integer) do
{:ok, integer}
end
defp to_integer(%{__struct__: Decimal} = decimal) do
try do
{:ok, Decimal.to_integer(decimal)}
rescue
_ ->
{:error, decimal}
end
end
defp to_integer(string) when is_binary(string) do
try do
{:ok, String.to_integer(string)}
rescue
_ ->
{:error, string}
end
end
defp to_integer(other) do
{:error, other}
end
defp delimit_integer(number, delimiter) do
abs(number)
|> Integer.to_charlist()
|> :lists.reverse()
|> delimit_integer(delimiter, [])
end
defp delimit_integer([a, b, c, d | tail], delimiter, acc) do
delimit_integer([d | tail], delimiter, [delimiter, c, b, a | acc])
end
defp delimit_integer(list, _, acc) do
:lists.reverse(list) ++ acc
end
@doc false
def delimit_decimal(decimal, delimiter, separator, precision) do
string =
decimal
|> Decimal.round(precision)
|> Decimal.to_string(:normal)
[number, decimals] =
case String.split(string, ".") do
[number, decimals] -> [number, decimals]
[number] -> [number, ""]
end
decimals = String.pad_trailing(decimals, precision, "0")
integer =
number
|> String.to_integer()
|> delimit_integer(delimiter)
separator = if precision == 0, do: "", else: separator
Enum.join([integer, separator, decimals])
end
defp config do
defaults = [
delimiter: ",",
separator: ".",
precision: 2
]
Keyword.merge(defaults, Application.get_env(:number, :delimit, []))
end
end
|
lib/number/delimit.ex
| 0.88113
| 0.554832
|
delimit.ex
|
starcoder
|
defmodule KaufmannEx.Config do
@moduledoc """
Convenience Getters for pulling config.exs values
A config.exs may look like
```
# test env
config :kaufmann_ex,
consumer_group: System.get_env("CONSUMER_GROUP"),
default_topic: System.get_env("KAFKA_TOPIC"),
max_demand: 1, # batch size leave at 1 for slow topics.
stages: 16,
event_handler_mod: nil, # Be sure to specify your event handler
gen_consumer_mod: KaufmannEx.Consumer.GenConsumer,
producer_mod: KaufmannEx.Publisher,
schema_path: "priv/schemas",
schema_registry_uri: System.get_env("SCHEMA_REGISTRY_PATH"),
service_id: System.get_env("HOSTNAME"),
service_name: "SampleService"
```
"""
@doc """
`Application.get_env(:kaufmann_ex, :consumer_group)`
"""
@spec consumer_group() :: String.t() | nil
def consumer_group, do: Application.get_env(:kaufmann_ex, :consumer_group)
@doc """
`Application.get_env(:kaufmann_ex, :default_topic)`
"""
@spec default_topic() :: String.t() | nil
def default_topic, do: Application.get_env(:kaufmann_ex, :default_topic)
@doc """
`default_topic/0` in a list
`[KaufmannEx.Config.default_topic()]`
"""
@spec default_topics() :: [String.t()]
def default_topics, do: Application.get_env(:kaufmann_ex, :default_topics, [default_topic()])
@doc """
`Application.get_env(:kaufmann_ex, :subscription_topics, default_topics())`
Determines which topics the Kaufmann Supervisor will subscribe to
"""
@spec subscription_topics() :: [String.t()]
def subscription_topics,
do: Application.get_env(:kaufmann_ex, :subscription_topics, default_topics())
@spec default_publish_topic() :: String.t() | nil
def default_publish_topic,
do: Application.get_env(:kaufmann_ex, :default_publish_topic, default_topic())
@doc """
`Application.get_env(:kaufmann_ex, :event_handler_mod)`
"""
@spec event_handler() :: atom | nil
def event_handler, do: Application.get_env(:kaufmann_ex, :event_handler_mod)
@doc """
`Application.get_env(:kaufmann_ex, :producer_mod)`
"""
@spec producer_mod() :: atom | nil
def producer_mod, do: Application.get_env(:kaufmann_ex, :producer_mod, KaufmannEx.Publisher)
@doc """
`Application.get_env(:kaufmann_ex, :schema_path)`
"""
@spec schema_path() :: [String.t() | nil]
def schema_path,
do: List.flatten([Application.get_env(:kaufmann_ex, :schema_path, "priv/schemas")])
@doc """
`Application.get_env(:kaufmann_ex, :schema_registry_uri)`
"""
@spec schema_registry_uri() :: String.t() | nil
def schema_registry_uri, do: Application.get_env(:kaufmann_ex, :schema_registry_uri)
@doc """
`Application.get_env(:kaufmann_ex, :service_name)`
"""
@spec service_name() :: String.t() | nil
def service_name, do: Application.get_env(:kaufmann_ex, :service_name)
@doc """
`Application.get_env(:kaufmann_ex, :service_id)`
"""
@spec service_id() :: String.t() | nil
def service_id, do: Application.get_env(:kaufmann_ex, :service_id)
@doc """
Application.get_env(:kaufmann_ex, :max_demand, 1)
"""
@spec max_demand() :: integer()
def max_demand, do: Application.get_env(:kaufmann_ex, :max_demand, 1)
@doc """
Application.get_env(:kaufmann_ex, :max_demand, 1)
"""
@spec stages() :: integer()
def stages, do: Application.get_env(:kaufmann_ex, :stages, 16)
@doc """
Application.get_env(:kaufmann_ex, :gen_consumer_mod)
"""
@spec gen_consumer_mod() :: atom
def gen_consumer_mod,
do: Application.get_env(:kaufmann_ex, :gen_consumer_mod, KaufmannEx.Consumer.GenConsumer)
@doc """
Partition selection strategy, default is :random, options are `[:random]
Application.get_env(:kaufmann_ex, :partition_strategy, :random)
"""
@spec partition_strategy() :: :random | :md5
def partition_strategy, do: Application.get_env(:kaufmann_ex, :partition_strategy, :random)
@doc """
partitioning strategy, only option is default
"""
@spec topic_strategy() :: :default
def topic_strategy, do: Application.get_env(:kaufmann_ex, :topic_strategy, :default)
@spec schema_cache_expires_in_ms() :: integer
def schema_cache_expires_in_ms,
do: Application.get_env(:kaufmann_ex, :schema_cache_expires_in_ms, 10 * 60 * 1000)
def commit_strategy, do: Application.get_env(:kaufmann_ex, :commit_strategy, :async_commit)
@spec transcoder(atom) :: atom
def transcoder(format),
do:
:kaufmann_ex
|> Application.get_env(:transcoder)
|> Keyword.get(format)
def transcoders,
do:
:kaufmann_ex
|> Application.get_env(:transcoder)
|> Keyword.values()
end
|
lib/kaufmann_ex/config.ex
| 0.803598
| 0.478102
|
config.ex
|
starcoder
|
defimpl Vivid.Rasterize, for: Vivid.Line do
alias Vivid.{Point, Line, Bounds}
@moduledoc """
Generates points between the origin and termination point of the line
for rendering using the Digital Differential Analyzer (DDA) algorithm.
"""
@doc ~S"""
Rasterize all points of `line` within `bounds` into a `MapSet`.
## Examples
iex> Vivid.Line.init(Vivid.Point.init(1,1), Vivid.Point.init(3,3))
...> |> Vivid.Rasterize.rasterize(Vivid.Bounds.init(0, 0, 3, 3))
#MapSet<[#Vivid.Point<{1, 1}>, #Vivid.Point<{2, 2}>, #Vivid.Point<{3, 3}>]>
iex> Vivid.Line.init(Vivid.Point.init(1,1), Vivid.Point.init(4,2))
...> |> Vivid.Rasterize.rasterize(Vivid.Bounds.init(0, 0, 4, 4))
MapSet.new([
%Vivid.Point{x: 1, y: 1},
%Vivid.Point{x: 2, y: 1},
%Vivid.Point{x: 3, y: 2},
%Vivid.Point{x: 4, y: 2}
])
iex> Vivid.Line.init(Vivid.Point.init(4,4), Vivid.Point.init(4,1))
...> |> Vivid.Rasterize.rasterize(Vivid.Bounds.init(0, 0, 4, 4))
MapSet.new([
%Vivid.Point{x: 4, y: 4},
%Vivid.Point{x: 4, y: 3},
%Vivid.Point{x: 4, y: 2},
%Vivid.Point{x: 4, y: 1}
])
"""
@spec rasterize(Line.t(), Bounds.t()) :: MapSet.t()
def rasterize(%Line{} = line, bounds) do
# Convert the line into absolute coordinates.
origin = line |> Line.origin() |> Point.round()
term = line |> Line.termination() |> Point.round()
line = Line.init(origin, term)
dx = line |> Line.x_distance()
dy = line |> Line.y_distance()
steps = choose_largest_of(abs(dx), abs(dy))
points =
if steps == 0 do
MapSet.new([origin])
else
x_increment = dx / steps
y_increment = dy / steps
points = MapSet.new([origin])
current_x = origin |> Point.x()
current_y = origin |> Point.y()
reduce_points({points, steps, current_x, current_y, x_increment, y_increment})
end
points
|> Stream.map(&Point.round(&1))
|> Stream.filter(&Bounds.contains?(bounds, &1))
|> Enum.into(MapSet.new())
end
defp reduce_points({points, 0, _, _, _, _}), do: points
defp reduce_points({points, steps, current_x, current_y, x_increment, y_increment}) do
next_x = current_x + x_increment
next_y = current_y + y_increment
steps = steps - 1
points = MapSet.put(points, Point.init(next_x, next_y))
reduce_points({points, steps, next_x, next_y, x_increment, y_increment})
end
defp choose_largest_of(a, b) when a > b, do: a
defp choose_largest_of(_, b), do: b
end
|
lib/vivid/rasterize/line.ex
| 0.936431
| 0.762203
|
line.ex
|
starcoder
|
defprotocol PersistentTree.Day09.Tree do
def add(tree, val)
def preorder(tree)
def postorder(tree)
def in_order(tree)
end
defmodule PersistentTree.Day09.Leaf,
do: defstruct []
defmodule PersistentTree.Day09.Node,
do: defstruct [:item, :left, :right]
defimpl PersistentTree.Day09.Tree, for: PersistentTree.Day09.Leaf do
alias PersistentTree.Day09.Node
def add(empty, val), do: %Node{item: val, left: empty, right: empty}
def preorder(_empty), do: []
def postorder(_empty), do: []
def in_order(_empty), do: []
end
defimpl PersistentTree.Day09.Tree, for: PersistentTree.Day09.Node do
alias PersistentTree.Day09.{Node, Leaf, Tree}
def add(node = %Node{item: item}, val) when item == val, do: node
def add(%Node{item: item, left: left, right: right}, val) when item > val,
do: %Node{
item: item,
left: Tree.add(left, val),
right: right
}
def add(%Node{item: item, left: left, right: right}, val) when item < val,
do: %Node{
item: item,
left: left,
right: Tree.add(right, val)
}
def preorder(%Node{item: item, left: left, right: right}),
do: [item]
|> Stream.concat(Tree.preorder(left))
|> Stream.concat(Tree.preorder(right))
def postorder(%Node{item: item, left: left, right: right}),
do: Tree.postorder(left)
|> Stream.concat(Tree.postorder(right))
|> Stream.concat([item])
def in_order(%Node{item: item, left: left, right: right}),
do: Tree.in_order(left)
|> Stream.concat([item])
|> Stream.concat(Tree.in_order(right))
end
defmodule PersistentTree.Day09 do
alias PersistentTree.Day09.{Leaf, Tree}
def new(), do: %Leaf{}
def add(tree, val),
do: tree
|> Tree.add(val)
def preorder(tree),
do: tree
|> Tree.preorder()
def postorder(tree),
do: tree
|> Tree.postorder()
def in_order(tree),
do: tree
|> Tree.in_order()
end
|
persistent_tree/lib/persistent_tree/day09.ex
| 0.734215
| 0.592342
|
day09.ex
|
starcoder
|
defmodule LogjamAgent.Action do
alias LogjamAgent.Instrumentation
@moduledoc """
Use this module if you want to activate Logjam reporting for your
Phoenix controllers. It automatically instruments all exported functions
in your module to submit data to the logjam service.
## Example:
```elixir
defmodule UsersController do
use LogjamAgent.Action, except: [update: 2]
def index(conn, params) do
# information will be reported to logjam
end
def update(conn, params) do
# this function will not report information to logjam
end
end
``
Note that you can exclude actions from being instrumented by specifying the `:except` option.
All actions that match the name and arity as defined in the `:except` keyword list will
be excluded from instrumentation.
Beside this local list of actions to be excluded you can also configure a global
list of actions to be excluded in all modules. This is done via the `:instrumentation`
configuration.
```elixir
config :logjam_agent, :instrumentation,
except: [show: 1]
```
"""
defmacro __using__(opts \\ []) do
quote do
import LogjamAgent.Action
Module.register_attribute(__MODULE__, :logjam_enabled_functions, accumulate: true)
excluded_actions = Keyword.get(unquote(opts), :except, [])
Module.register_attribute(__MODULE__, :logjam_excluded_functions, accumulate: false)
Module.put_attribute(__MODULE__, :logjam_excluded_functions, excluded_actions)
@before_compile LogjamAgent.Action
@on_definition LogjamAgent.Action
end
end
def __on_definition__(env, kind, name, args, guards, body)
def __on_definition__(%{module: mod}, :def, name, args, guards, [do: body]) do
unless Instrumentation.exclude_action?(mod, name, Enum.count(args)) do
Module.put_attribute(mod, :logjam_enabled_functions, %Instrumentation.Definition{name: name, args: args, guards: guards, body: body})
end
end
def __on_definition__(_env, _kind, _name, _args, _guards, _body), do: nil
defmacro __before_compile__(%{module: mod}) do
logjam_enabled_functions = Module.get_attribute(mod, :logjam_enabled_functions)
instrumented_actions = Instrumentation.instrument_all(mod, logjam_enabled_functions, Instrumentation.Action)
quote do
unquote_splicing(instrumented_actions)
end
end
end
|
lib/logjam_agent/action.ex
| 0.827967
| 0.717408
|
action.ex
|
starcoder
|
defmodule Lens do
alias Focus.Types
@moduledoc """
Lenses combine getters and setters for keys in data structures.
Lenses should match/operate over a single value in a data structure,
e.g. a key in a map/struct.
"""
@enforce_keys [:get, :put]
defstruct [:get, :put]
@type t :: %Lens{
get: (any -> any),
put: ((any -> any) -> any)
}
@doc """
Define a lens to Focus on a part of a data structure.
## Examples
iex> person = %{name: "Homer"}
iex> name_lens = Lens.make_lens(:name)
iex> name_lens |> Focus.view(person)
"Homer"
iex> name_lens |> Focus.set(person, "Bart")
%{name: "Bart"}
"""
@spec make_lens(any) :: Lens.t()
def make_lens(path) do
%Lens{
get: fn s -> Lensable.getter(s, path) end,
put: fn s ->
fn f ->
Lensable.setter(s, path, f)
end
end
}
end
@doc """
Automatically generate the valid lenses for the supplied map-like data structure.
## Examples
iex> lisa = %{name: "Lisa", pets: %{cat: "Snowball"}}
iex> lisa_lenses = Lens.make_lenses(lisa)
iex> lisa_lenses.name
...> |> Focus.view(lisa)
"Lisa"
iex> pet_lenses = Lens.make_lenses(lisa.pets)
iex> lisa_lenses.pets
...> ~> pet_lenses.cat
...> |> Focus.set(lisa, "Snowball II")
%{name: "Lisa", pets: %{cat: "Snowball II"}}
"""
@spec make_lenses(Types.traversable()) :: %{
optional(atom) => Lens.t(),
optional(String.t()) => Lens.t()
}
def make_lenses(%{} = structure) when is_map(structure) do
for key <- Map.keys(structure), into: %{} do
{key, Lens.make_lens(key)}
end
end
@doc """
Define a struct and derive lenses for the struct's keys as functions
in the module.
Examples assume the following module:
```elixir
defmodule PersonExample do
import Lens
deflenses name: nil, age: nil
end
```
## Example
iex> function_exported?(PersonExample, :age_lens, 0)
true
iex> function_exported?(PersonExample, :name_lens, 0)
true
iex> bart = %PersonExample{name: "Bart", age: 10}
iex> PersonExample.name_lens |> Focus.view(bart)
"Bart"
end
"""
defmacro deflenses(fields) do
quote do
Module.register_attribute(__MODULE__, :struct_fields, accumulate: true)
for field <- unquote(fields) do
Module.put_attribute(__MODULE__, :struct_fields, field)
end
Module.eval_quoted(
__ENV__,
[
Lens.__defstruct__(@struct_fields),
Lens.__deflenses__(@struct_fields)
]
)
end
end
@doc false
def __deflenses__(fields) do
if Keyword.keyword?(fields) do
for {field, _val} <- fields do
quote do
def unquote(:"#{field}_lens")() do
Lens.make_lens(unquote(field))
end
end
end
else
for field <- fields do
quote do
def unquote(:"#{field}_lens")() do
Lens.make_lens(unquote(field))
end
end
end
end
end
@doc false
def __defstruct__(fields) do
quote do
defstruct unquote(Macro.escape(fields))
defimpl Lensable, for: __MODULE__ do
def getter(s, x), do: Map.get(s, x)
def setter(s, x, f), do: Map.put(s, x, f)
end
end
end
@doc """
A lens that focuses on an index in a list.
## Examples
iex> first_elem = Lens.idx(0)
iex> first_elem |> Focus.view([1,2,3,4,5])
1
iex> bad_index = Lens.idx(10)
iex> bad_index |> Focus.view([1,2,3])
nil
"""
@spec idx(number) :: Lens.t()
def idx(num) when is_number(num), do: make_lens(num)
defimpl Focusable do
@doc """
View the data that an optic Focuses on.
## Examples
iex> marge = %{
...> name: "Marge",
...> address: %{
...> street: "123 Fake St.",
...> city: "Springfield"
...> }
...> }
iex> name_lens = Lens.make_lens(:name)
iex> Focus.view(name_lens, marge)
"Marge"
"""
@spec view(Lens.t(), Types.traversable()) :: any | nil
def view(%Lens{get: get}, structure), do: get.(structure)
@doc """
Modify the part of a data structure that a lens Focuses on.
## Examples
iex> marge = %{name: "Marge", address: %{street: "123 Fake St.", city: "Springfield"}}
iex> name_lens = Lens.make_lens(:name)
iex> Focus.over(name_lens, marge, &String.upcase/1)
%{name: "MARGE", address: %{street: "123 Fake St.", city: "Springfield"}}
"""
@spec over(Lens.t(), Types.traversable(), (any -> any)) :: Types.traversable()
def over(%Lens{put: setter} = lens, structure, f) do
with {:ok, d} <- Lens.safe_view(lens, structure) do
setter.(structure).(f.(d))
end
end
@doc """
Update the part of a data structure the lens Focuses on.
## Examples
iex> marge = %{name: "Marge", address: %{street: "123 Fake St.", city: "Springfield"}}
iex> name_lens = Lens.make_lens(:name)
iex> Focus.set(name_lens, marge, "Homer")
%{name: "Homer", address: %{street: "123 Fake St.", city: "Springfield"}}
iex> marge = %{name: "Marge", address: %{street: "123 Fake St.", city: "Springfield"}}
iex> address_lens = Lens.make_lens(:address)
iex> street_lens = Lens.make_lens(:street)
iex> composed = Focus.compose(address_lens, street_lens)
iex> Focus.set(composed, marge, "42 Wallaby Way")
%{name: "Marge", address: %{street: "42 Wallaby Way", city: "Springfield"}}
"""
@spec set(Lens.t(), Types.traversable(), any) :: Types.traversable()
def set(lens, structure, val) do
over(lens, structure, fn _ -> val end)
end
end
@doc """
Get a piece of a data structure that a lens Focuses on;
returns {:ok, data} | {:error, :bad_lens_path} | {:error, :bad_data_structure}
## Examples
iex> marge = %{name: "Marge", address: %{street: "123 Fake St.", city: "Springfield"}}
iex> name_lens = Lens.make_lens(:name)
iex> Lens.safe_view(name_lens, marge)
{:ok, "Marge"}
"""
@spec safe_view(Lens.t(), Types.traversable()) ::
{:error, {:lens, :bad_path}} | {:error, {:lens, :bad_data_structure}} | {:ok, any}
def safe_view(%Lens{} = lens, structure) do
res = Focus.view(lens, structure)
case res do
{:error, err} -> {:error, err}
_ -> {:ok, res}
end
end
end
|
lib/lens/lens.ex
| 0.881098
| 0.70805
|
lens.ex
|
starcoder
|
defmodule Range do
@moduledoc """
Ranges represent a sequence of zero, one or many, ascending
or descending integers with a common difference called step.
The most common form of creating and matching on ranges is
via the [`first..last`](`../2`) and [`first..last//step`](`..///3`)
notations, auto-imported from `Kernel`:
iex> 1 in 1..10
true
iex> 5 in 1..10
true
iex> 10 in 1..10
true
Ranges are always inclusive in Elixir. When a step is defined,
integers will only belong to the range if they match the step:
iex> 5 in 1..10//2
true
iex> 4 in 1..10//2
false
When defining a range without a step, the step will be
defined based on the first and last position of the
range, If `first >= last`, it will be an increasing range
with a step of 1. Otherwise, it is a decreasing range.
Note however implicit decreasing ranges are deprecated.
Therefore, if you need a decreasing range from `3` to `1`,
prefer to write `3..1//-1` instead.
`../0` can also be used as a shortcut to create the range `0..-1//1`,
also known as the full-slice range:
iex> ..
0..-1//1
## Use cases
Ranges typically have two uses in Elixir: as a collection or
to represent a slice of another data structure.
### Ranges as collections
Ranges in Elixir are enumerables and therefore can be used
with the `Enum` module:
iex> Enum.to_list(1..3)
[1, 2, 3]
iex> Enum.to_list(3..1//-1)
[3, 2, 1]
iex> Enum.to_list(1..5//2)
[1, 3, 5]
Ranges may also have a single element:
iex> Enum.to_list(1..1)
[1]
iex> Enum.to_list(1..1//2)
[1]
Or even no elements at all:
iex> Enum.to_list(10..0//1)
[]
iex> Enum.to_list(0..10//-1)
[]
The full-slice range, returned by `../0`, is an empty collection:
iex> Enum.to_list(..)
[]
### Ranges as slices
Ranges are also frequently used to slice collections.
You can slice strings or any enumerable:
iex> String.slice("elixir", 1..4)
"lixi"
iex> Enum.slice([0, 1, 2, 3, 4, 5], 1..4)
[1, 2, 3, 4]
In those cases, the first and last values of the range
are mapped to positions in the collections.
If a negative number is given, it maps to a position
from the back:
iex> String.slice("elixir", 1..-2//1)
"lixi"
iex> Enum.slice([0, 1, 2, 3, 4, 5], 1..-2//1)
[1, 2, 3, 4]
The range `0..-1//1`, returned by `../0`, returns the
collection as is, which is why it is called the full-slice
range:
iex> String.slice("elixir", ..)
"elixir"
iex> Enum.slice([0, 1, 2, 3, 4, 5], ..)
[0, 1, 2, 3, 4, 5]
## Definition
An increasing range `first..last//step` is a range from `first`
to `last` increasing by `step` where `step` must be a positive
integer and all values `v` must be `first <= v and v <= last`.
Therefore, a range `10..0//1` is an empty range because there
is no value `v` that is `10 <= v and v <= 0`.
Similarly, a decreasing range `first..last//step` is a range
from `first` to `last` decreasing by `step` where `step` must
be a negative integer and values `v` must be `first >= v and v >= last`.
Therefore, a range `0..10//-1` is an empty range because there
is no value `v` that is `0 >= v and v >= 10`.
## Representation
Internally, ranges are represented as structs:
iex> range = 1..9//2
1..9//2
iex> first..last//step = range
iex> first
1
iex> last
9
iex> step
2
iex> range.step
2
You can access the range fields (`first`, `last`, and `step`)
directly but you should not modify nor create ranges by hand.
Instead use the proper operators or `new/2` and `new/3`.
Ranges implement the `Enumerable` protocol with memory
efficient versions of all `Enumerable` callbacks:
iex> range = 1..10
1..10
iex> Enum.reduce(range, 0, fn i, acc -> i * i + acc end)
385
iex> Enum.count(range)
10
iex> Enum.member?(range, 11)
false
iex> Enum.member?(range, 8)
true
Such function calls are efficient memory-wise no matter the
size of the range. The implementation of the `Enumerable`
protocol uses logic based solely on the endpoints and does
not materialize the whole list of integers.
"""
@enforce_keys [:first, :last, :step]
defstruct first: nil, last: nil, step: nil
@type limit :: integer
@type step :: pos_integer | neg_integer
@type t :: %__MODULE__{first: limit, last: limit, step: step}
@type t(first, last) :: %__MODULE__{first: first, last: last, step: step}
@doc """
Creates a new range.
If `first` is less than `last`, the range will be increasing from
`first` to `last`. If `first` is equal to `last`, the range will contain
one element, which is the number itself.
If `first` is greater than `last`, the range will be decreasing from `first`
to `last`, albeit this behaviour is deprecated. Therefore, it is advised to
explicitly list the step with `new/3`.
## Examples
iex> Range.new(-100, 100)
-100..100
"""
@spec new(limit, limit) :: t
def new(first, last) when is_integer(first) and is_integer(last) do
# TODO: Deprecate inferring a range with a step of -1 on Elixir v1.17
step = if first <= last, do: 1, else: -1
%Range{first: first, last: last, step: step}
end
def new(first, last) do
raise ArgumentError,
"ranges (first..last) expect both sides to be integers, " <>
"got: #{inspect(first)}..#{inspect(last)}"
end
@doc """
Creates a new range with `step`.
## Examples
iex> Range.new(-100, 100, 2)
-100..100//2
"""
@doc since: "1.12.0"
@spec new(limit, limit, step) :: t
def new(first, last, step)
when is_integer(first) and is_integer(last) and is_integer(step) and step != 0 do
%Range{first: first, last: last, step: step}
end
def new(first, last, step) do
raise ArgumentError,
"ranges (first..last//step) expect both sides to be integers and the step to be a " <>
"non-zero integer, got: #{inspect(first)}..#{inspect(last)}//#{inspect(step)}"
end
@doc """
Returns the size of `range`.
## Examples
iex> Range.size(1..10)
10
iex> Range.size(1..10//2)
5
iex> Range.size(1..10//3)
4
iex> Range.size(1..10//-1)
0
iex> Range.size(10..1)
10
iex> Range.size(10..1//-1)
10
iex> Range.size(10..1//-2)
5
iex> Range.size(10..1//-3)
4
iex> Range.size(10..1//1)
0
"""
@doc since: "1.12.0"
def size(range)
def size(first..last//step) when step > 0 and first > last, do: 0
def size(first..last//step) when step < 0 and first < last, do: 0
def size(first..last//step), do: abs(div(last - first, step)) + 1
# TODO: Remove me on v2.0
def size(%{__struct__: Range, first: first, last: last} = range) do
step = if first <= last, do: 1, else: -1
size(Map.put(range, :step, step))
end
@doc """
Checks if two ranges are disjoint.
## Examples
iex> Range.disjoint?(1..5, 6..9)
true
iex> Range.disjoint?(5..1, 6..9)
true
iex> Range.disjoint?(1..5, 5..9)
false
iex> Range.disjoint?(1..5, 2..7)
false
Steps are also considered when computing the ranges to be disjoint:
iex> Range.disjoint?(1..10//2, 2..10//2)
true
# First element in common in all below is 29
iex> Range.disjoint?(2..100//3, 9..100//5)
false
iex> Range.disjoint?(101..2//-3, 99..9//-5)
false
iex> Range.disjoint?(1..100//14, 8..100//21)
false
iex> Range.disjoint?(57..-1//-14, 8..100//21)
false
iex> Range.disjoint?(1..100//14, 51..8//-21)
false
# If 29 is out of range
iex> Range.disjoint?(1..28//14, 8..28//21)
true
iex> Range.disjoint?(2..28//3, 9..28//5)
true
"""
@doc since: "1.8.0"
@spec disjoint?(t, t) :: boolean
def disjoint?(first1..last1//step1 = range1, first2..last2//step2 = range2) do
if size(range1) == 0 or size(range2) == 0 do
true
else
{first1, last1, step1} = normalize(first1, last1, step1)
{first2, last2, step2} = normalize(first2, last2, step2)
cond do
last2 < first1 or last1 < first2 ->
true
abs(step1) == 1 and abs(step2) == 1 ->
false
true ->
# We need to find the first intersection of two arithmetic
# progressions and see if they belong within the ranges
# https://math.stackexchange.com/questions/1656120/formula-to-find-the-first-intersection-of-two-arithmetic-progressions
{gcd, u, v} = Integer.extended_gcd(-step1, step2)
c = first1 - first2 + step2 - step1
t1 = -c / step1 * u
t2 = -c / step2 * v
t = max(floor(t1) + 1, floor(t2) + 1)
x = div(c * u + t * step2, gcd) - 1
y = div(c * v + t * step1, gcd) - 1
x < 0 or first1 + x * step1 > last1 or
y < 0 or first2 + y * step2 > last2
end
end
end
@compile inline: [normalize: 3]
defp normalize(first, last, step) when first > last, do: {last, first, -step}
defp normalize(first, last, step), do: {first, last, step}
@doc false
@deprecated "Pattern match on first..last//step instead"
def range?(term)
def range?(first..last) when is_integer(first) and is_integer(last), do: true
def range?(_), do: false
end
defimpl Enumerable, for: Range do
def reduce(first..last//step, acc, fun) do
reduce(first, last, acc, fun, step)
end
# TODO: Remove me on v2.0
def reduce(%{__struct__: Range, first: first, last: last} = range, acc, fun) do
step = if first <= last, do: 1, else: -1
reduce(Map.put(range, :step, step), acc, fun)
end
defp reduce(_first, _last, {:halt, acc}, _fun, _step) do
{:halted, acc}
end
defp reduce(first, last, {:suspend, acc}, fun, step) do
{:suspended, acc, &reduce(first, last, &1, fun, step)}
end
defp reduce(first, last, {:cont, acc}, fun, step)
when step > 0 and first <= last
when step < 0 and first >= last do
reduce(first + step, last, fun.(first, acc), fun, step)
end
defp reduce(_, _, {:cont, acc}, _fun, _up) do
{:done, acc}
end
def member?(first..last//step = range, value) when is_integer(value) do
cond do
Range.size(range) == 0 ->
{:ok, false}
first <= last ->
{:ok, first <= value and value <= last and rem(value - first, step) == 0}
true ->
{:ok, last <= value and value <= first and rem(value - first, step) == 0}
end
end
# TODO: Remove me on v2.0
def member?(%{__struct__: Range, first: first, last: last} = range, value)
when is_integer(value) do
step = if first <= last, do: 1, else: -1
member?(Map.put(range, :step, step), value)
end
def member?(_, _value) do
{:ok, false}
end
def count(range) do
{:ok, Range.size(range)}
end
def slice(first.._//step = range) do
{:ok, Range.size(range), &slice(first + &1 * step, step + &3 - 1, &2)}
end
# TODO: Remove me on v2.0
def slice(%{__struct__: Range, first: first, last: last} = range) do
step = if first <= last, do: 1, else: -1
slice(Map.put(range, :step, step))
end
defp slice(current, _step, 1), do: [current]
defp slice(current, step, remaining), do: [current | slice(current + step, step, remaining - 1)]
end
defimpl Inspect, for: Range do
import Inspect.Algebra
import Kernel, except: [inspect: 2]
def inspect(first..last//1, opts) when last >= first do
concat([to_doc(first, opts), "..", to_doc(last, opts)])
end
def inspect(first..last//step, opts) do
concat([to_doc(first, opts), "..", to_doc(last, opts), "//", to_doc(step, opts)])
end
# TODO: Remove me on v2.0
def inspect(%{__struct__: Range, first: first, last: last} = range, opts) do
step = if first <= last, do: 1, else: -1
inspect(Map.put(range, :step, step), opts)
end
end
|
lib/elixir/lib/range.ex
| 0.872775
| 0.76238
|
range.ex
|
starcoder
|
defmodule Tensor.Matrix do
alias Tensor.{Vector, Matrix, Tensor}
defmodule Inspect do
@doc false
def inspect(matrix, _opts) do
"""
#Matrix<(#{Tensor.Inspect.dimension_string(matrix)})
#{inspect_contents(matrix)}
>
"""
end
defp inspect_contents(matrix) do
contents_inspect =
matrix
|> Matrix.to_list()
|> Enum.map(fn row ->
row
|> Enum.map(fn elem ->
elem
|> inspect
|> String.pad_leading(8)
end)
|> Enum.join(",")
end)
# |> Enum.join("│\n│")
top_row_length = String.length(List.first(contents_inspect) || "")
bottom_row_length = String.length(List.last(contents_inspect) || "")
top = "┌#{String.pad_trailing("", top_row_length)}┐\n│"
bottom = "│\n└#{String.pad_trailing("", bottom_row_length)}┘"
contents_str = contents_inspect |> Enum.join("│\n│")
"#{top}#{contents_str}#{bottom}"
end
end
@doc """
Creates a new matrix of dimensions `height` x `width`.
Optionally pass in a fourth argument, which will be the default values the matrix will be filled with. (default: `0`)
"""
def new(list_of_lists \\ [], height, width, identity \\ 0)
when width >= 0 and height >= 0 and (width > 0 or height > 0) do
Tensor.new(list_of_lists, [height, width], identity)
end
@doc """
Creates an 'identity' matrix.
This is a square matrix of size `size` that has the `diag_identity` value (default: `1`) at the diagonal, and the rest is `0`.
Optionally pass in a third argument, which is the value the rest of the elements in the matrix will be set to.
"""
def identity_matrix(diag_identity \\ 1, size, rest_identity \\ 0) when size > 0 do
elems = Stream.cycle([diag_identity]) |> Enum.take(size)
diag(elems, rest_identity)
end
@doc """
Creates a square matrix where the diagonal elements are filled with the elements of the given List or Vector.
The second argument is an optional `identity` to be used for all elements not part of the diagonal.
"""
def diag(list_or_vector, identity \\ 0)
def diag(vector = %Tensor{dimensions: [_length]}, identity) do
diag(Tensor.to_list(vector), identity)
end
def diag(list = [_ | _], identity) when is_list(list) do
size = length(list)
matrix = new([], size, size, identity)
list
|> Enum.with_index()
|> Enum.reduce(matrix, fn {e, i}, mat ->
put_in(mat, [i, i], e)
end)
end
@doc """
True if the matrix is square and the same as its transpose.
"""
def symmetric?(matrix = %Tensor{dimensions: [s, s]}) do
matrix == matrix |> transpose
end
def symmetric?(%Tensor{dimensions: [_, _]}), do: false
def square?(%Tensor{dimensions: [s, s]}), do: true
def square?(%Tensor{dimensions: [_, _]}), do: false
@doc """
Returns the `width` of the matrix.
"""
def width(%Tensor{dimensions: [_height, width]}), do: width
@doc """
Returns the `height` of the matrix.
"""
def height(%Tensor{dimensions: [height, _width]}), do: height
def transpose(matrix = %Tensor{dimensions: [_, _]}) do
Tensor.transpose(matrix, 1)
# new_contents = Enum.reduce(matrix.contents, %{}, fn {row_key, row_map}, new_row_map ->
# Enum.reduce(row_map, new_row_map, fn {col_key, value}, new_row_map ->
# map = Map.put_new(new_row_map, col_key, %{})
# put_in(map, [col_key, row_key], value)
# end)
# end)
# %Tensor{identity: matrix.identity, contents: new_contents, dimensions: [h, w]}
end
@doc """
Takes a vector, and returns a 1×`n` matrix.
"""
def row_matrix(vector = %Tensor{dimensions: [_]}) do
Tensor.lift(vector)
end
@doc """
"""
def column_matrix(vector = %Tensor{dimensions: [_]}) do
vector
|> Tensor.lift()
|> Matrix.transpose()
end
@doc """
Returns the rows of this matrix as a list of Vectors.
"""
def rows(matrix = %Tensor{dimensions: [_w, _h]}) do
Tensor.slices(matrix)
end
@doc """
Builds a Matrix up from a list of vectors.
Will only work as long as the vectors have the same length.
"""
def from_rows(list_of_vectors) do
Tensor.from_slices(list_of_vectors)
end
@doc """
Returns the columns of this matrix as a list of Vectors.
"""
def columns(matrix = %Tensor{dimensions: [_, _]}) do
matrix
|> transpose
|> rows
end
@doc """
Returns the `n`-th row of the matrix as a Vector.
This is the same as doing matrix[n]
"""
def row(matrix, n) do
matrix[n]
end
@doc """
Returns the `n`-th column of the matrix as a Vector.
If you're doing a lot of calls to `column`, consider transposing the matrix
and calling `rows` on that transposed matrix, as it will be faster.
"""
def column(matrix, n) do
transpose(matrix)[n]
end
@doc """
Returns the values in the main diagonal (top left to bottom right) as list
"""
def main_diagonal(matrix = %Tensor{dimensions: [h, w]}) do
for i <- 0..(min(w, h) - 1) do
matrix[i][i]
end
end
def flip_vertical(matrix = %Tensor{dimensions: [_w, h]}) do
new_contents =
for {r, v} <- matrix.contents, into: %{} do
{h - 1 - r, v}
end
%Tensor{matrix | contents: new_contents}
end
def flip_horizontal(matrix) do
matrix
|> transpose
|> flip_vertical
|> transpose
end
def rotate_counterclockwise(matrix) do
matrix
|> transpose
|> flip_vertical
end
def rotate_clockwise(matrix) do
matrix
|> flip_vertical
|> transpose
end
def rotate_180(matrix) do
matrix
|> flip_vertical
|> flip_horizontal
end
@doc """
Returns the sum of the main diagonal of a square matrix.
Note that this method will fail when called with a non-square matrix
"""
def trace(matrix = %Tensor{dimensions: [n, n]}) do
Enum.sum(main_diagonal(matrix))
end
def trace(%Tensor{dimensions: [height, width]}) do
raise Tensor.ArithmeticError, """
Matrix.trace/1 is not defined for non-square matrices!
height: #{inspect(height)}
width: #{inspect(width)}
"""
end
@doc """
Returns the current identity of matrix `matrix`.
"""
defdelegate identity(matrix), to: Tensor
@doc """
`true` if `a` is a Matrix.
"""
defdelegate matrix?(a), to: Tensor
@doc """
Returns the element at `index` from `matrix`.
"""
defdelegate fetch(matrix, index), to: Tensor
@doc """
Returns the element at `index` from `matrix`. If `index` is out of bounds, returns `default`.
"""
defdelegate get(matrix, index, default), to: Tensor
defdelegate pop(matrix, index, default), to: Tensor
defdelegate get_and_update(matrix, index, function), to: Tensor
defdelegate merge_with_index(matrix_a, matrix_b, function), to: Tensor
defdelegate merge(matrix_a, matrix_b, function), to: Tensor
defdelegate to_list(matrix), to: Tensor
defdelegate lift(matrix), to: Tensor
defdelegate map(matrix, function), to: Tensor
defdelegate with_coordinates(matrix), to: Tensor
defdelegate sparse_map_with_coordinates(matrix, function), to: Tensor
defdelegate dense_map_with_coordinates(matrix, function), to: Tensor
defdelegate to_sparse_map(matrix), to: Tensor
@doc """
Converts a sparse map where each key is a [height, width] coordinate list,
and each value is anything to a Matrix with the given height, width and contents.
See `to_sparse_map/1` for the inverse operation.
"""
def from_sparse_map(matrix, height, width, identity \\ 0) do
Tensor.from_sparse_map(matrix, [height, width], identity)
end
defdelegate add(a, b), to: Tensor
defdelegate sub(a, b), to: Tensor
defdelegate mult(a, b), to: Tensor
defdelegate div(a, b), to: Tensor
defdelegate add_number(a, b), to: Tensor
defdelegate sub_number(a, b), to: Tensor
defdelegate mult_number(a, b), to: Tensor
defdelegate div_number(a, b), to: Tensor
@doc """
Elementwise addition of matrixs `matrix_a` and `matrix_b`.
"""
defdelegate add_matrix(matrix_a, matrix_b), to: Tensor, as: :add_tensor
@doc """
Elementwise subtraction of `matrix_b` from `matrix_a`.
"""
defdelegate sub_matrix(matrix_a, matrix_b), to: Tensor, as: :sub_tensor
@doc """
Elementwise multiplication of `matrix_a` with `matrix_b`.
"""
defdelegate mult_matrix(matrix_a, matrix_b), to: Tensor, as: :mult_tensor
@doc """
Elementwise division of `matrix_a` and `matrix_b`.
Make sure that the identity of `matrix_b` isn't 0 before doing this.
"""
defdelegate div_matrix(matrix_a, matrix_b), to: Tensor, as: :div_tensor
@doc """
Calculates the Matrix Product. This is a new matrix, obtained by multiplying
taking the `m` rows of the `m_by_n_matrix`, the `p` columns of the `n_by_p_matrix`
and calculating the dot-product (See `Vector.dot_product/2`) of these two `n`-length vectors.
The resulting values are stored at position [m][p] in the final matrix.
There is no way to perform this operation in a sparse way, so it is performed dense.
The identities of the two matrices cannot be kept; `nil` is used as identity of the output Matrix.
"""
def product(m_by_n_matrix, n_by_p_matrix)
def product(a = %Tensor{dimensions: [m, n]}, b = %Tensor{dimensions: [n, p]}) do
b_t = transpose(b)
list_of_lists =
for r <- 0..(m - 1) do
for c <- 0..(p - 1) do
Vector.dot_product(a[r], b_t[c])
end
end
Tensor.new(list_of_lists, [m, p])
end
def product(%Tensor{dimensions: [height_a, width_a]}, %Tensor{dimensions: [height_b, width_b]}) do
raise Tensor.ArithmeticError, """
Cannot compute Matrix.product if the width of matrix `a` does not match the height of matrix `b`!
height_a: #{inspect(height_a)}
width_a: #{inspect(width_a)}
height_b: #{inspect(height_b)}
width_b: #{inspect(width_b)}
"""
end
@doc """
Calculates the product of `matrix` with `matrix`, `exponent` times.
If `exponent` == 0, then the result will be the identity matrix with the same dimensions as the given matrix.
This is calculated using the fast [exponentiation by squaring](https://en.wikipedia.org/wiki/Exponentiation_by_squaring) algorithm.
"""
def power(matrix, exponent)
def power(matrix = %Tensor{dimensions: [a, a]}, negative_number) when negative_number < 0 do
product(Matrix.identity_matrix(-1, a), power(matrix, -negative_number))
end
def power(%Tensor{dimensions: [a, a]}, 0), do: Matrix.identity_matrix(a)
def power(matrix = %Tensor{dimensions: [a, a]}, 1), do: matrix
def power(matrix = %Tensor{dimensions: [a, a]}, exponent) when rem(exponent, 2) == 0 do
power(product(matrix, matrix), Kernel.div(exponent, 2))
end
def power(matrix = %Tensor{dimensions: [a, a]}, exponent) when rem(exponent, 2) == 1 do
product(matrix, power(product(matrix, matrix), Kernel.div(exponent, 2)))
end
def power(%Tensor{dimensions: [height, width]}, exponent) do
raise Tensor.ArithmeticError, """
Cannot compute Matrix.power with non-square matrices!
height: #{inspect(height)}
width: #{inspect(width)}
exponent: #{inspect(exponent)}
"""
end
end
|
lib/tensor/matrix.ex
| 0.919679
| 0.77081
|
matrix.ex
|
starcoder
|
defmodule Bolt.Cogs.Tempmute do
@moduledoc false
@behaviour Nosedrum.Command
alias Bolt.Converters
alias Bolt.ErrorFormatters
alias Bolt.Events.Handler
alias Bolt.Schema.{Infraction, MuteRole}
alias Bolt.{Helpers, ModLog, Parsers, Repo}
alias Nosedrum.Predicates
alias Nostrum.Api
alias Nostrum.Struct.User
@impl true
def usage, do: ["tempmute <user:member> <duration:duration> [reason:str...]"]
@impl true
def description,
do: """
Temporarily mutes the given `user` by applying the configured mute role.
Requires the `MANAGE_MESSAGES` permission.
```rs
// Mute @Dude#0007 for 2 days and 12 hours.
.tempmute @Dude#0007 2d12h
// Mute @Dude#0007 for 5 hours with a reason provided for the infraction.
.tempmute @Dude#0007 5h spamming in #general
```
"""
@impl true
def predicates, do: [&Predicates.guild_only/1, Predicates.has_permission(:manage_messages)]
@impl true
def command(msg, [user_str, duration | reason_list]) do
reason = Enum.join(reason_list, " ")
response =
with {:ok, member} <- Converters.to_member(msg.guild_id, user_str),
nil <-
Repo.get_by(Infraction,
guild_id: msg.guild_id,
user_id: member.user.id,
type: "tempmute",
active: true
),
%MuteRole{role_id: mute_role_id} <- Repo.get(MuteRole, msg.guild_id),
{:ok, expiry} <- Parsers.human_future_date(duration),
{:ok} <- Api.add_guild_member_role(msg.guild_id, member.user.id, mute_role_id),
infraction_map <- %{
type: "tempmute",
guild_id: msg.guild_id,
actor_id: msg.author.id,
user_id: member.user.id,
expires_at: expiry,
reason: if(reason != "", do: reason, else: nil),
data: %{
"role_id" => mute_role_id
}
},
{:ok, _struct} <- Handler.create(infraction_map) do
ModLog.emit(
msg.guild_id,
"INFRACTION_CREATE",
"#{User.full_name(msg.author)} has temporarily muted #{User.full_name(member.user)} " <>
"(`#{member.user.id}`) until #{Helpers.datetime_to_human(expiry)}" <>
if(reason != "", do: " (``#{reason}``)", else: "")
)
base_response =
"👌 #{User.full_name(member.user)} is now muted until #{Helpers.datetime_to_human(expiry)}"
if reason do
base_response <> " (`#{reason}`)"
else
base_response
end
else
nil ->
"🚫 no mute role is set up on this server"
%Infraction{id: active_id} ->
"🚫 that user is already muted (##{active_id})"
error ->
ErrorFormatters.fmt(msg, error)
end
{:ok, _msg} = Api.create_message(msg.channel_id, response)
end
def command(msg, _args) do
response = "ℹ️ usage: `#{List.first(usage())}`"
{:ok, _msg} = Api.create_message(msg.channel_id, response)
end
end
|
lib/bolt/cogs/tempmute.ex
| 0.84994
| 0.562026
|
tempmute.ex
|
starcoder
|
defmodule Lab42.StateMachine do
use Lab42.StateMachine.Types
@moduledoc """
# Lab42.StateMachine
## Synopsis
A simple State Machine operating on a list of input values, a map of transition definitions
and an accumulator.
## What is the _Transition Definitions Map_?
It maps each `state` to a list of `transitions`. Each `transition` is of the following format, as
defined by type `transition_t`
```elixir
{trigger, transformer, updater, new_state}
```
## State, did you say state?
Yes I did, and what I mean by it is that the State Machine keeps track of its state (very surprising)
and some data, passed into by the user.
## How do these _Transitions_ transform the input?
The State Machine processes its input in a loop in which, depending on the current input element, designated
as `input` and the current state, designated as `current_state` a transition is triggered.
Data is passed in like an accumulator in `Enum.reduce` it is designated as `data`.
The triggered transition will define the `current_state` of the next loop, if any, and perform actions defining what
goes to the output and how `data` changes.
As one might guess these actions are performed by the `transformer` and the `updater` of the triggered transition.
## So what is the contract?
### Starting the whole thing.
Create the _Transition Defintions Map_, knowing that the State Machine will start with `current_state` equal to `:start`,
yes I know, naming is difficult ;).
Then run the whole thing like that:
```elixir
Lab42.StateMachine.run(input, my_data, %{})
```
The empty map passed in will cause havoc though
iex(0)> input = ~w(alpha)
...(0)> my_data = %{n: 42}
...(0)> Lab42.StateMachine.run(input, my_data, %{})
{:error, "No transitions found for current state", :start}
### Specifying Transitions
A minimal example might be a line counter
iex(1)> input = ~w{alpha beta}
...(1)> count = 0
...(1)> states = %{
...(1)> start: [ {true, fn {_, line} -> line end, fn count, _ -> count + 1 end, :start} ]
...(1)> }
...(1)> run(input, count, states)
{:start, ~w{alpha beta}, 2}
N.B. That the `true` trigger always matches, other triggers are either regular expressions, which are used
as follows: `Regex.match(trigger, input)`, or functions, which are used as one might easily guess, thusly: `trigger.(input)`
One could argue that a default behavior of copying the current input to the output might be convenient, but that might
lead to difficulties in debugging state machines. (Maybe in later versions with an option for `run`?)
On the same token some StateMachines, like the counter above collect the output without needing it, although we will
learn below how to avoid this, a global option to not collect will make the _Transitions Map_ more concise.
Therefore the following will happen
iex(2)> input = ~w{alpha beta}
...(2)> states = %{
...(2)> start: []
...(2)> }
...(2)> run(input, nil, states)
{:error, "No trigger matched the current input \\"alpha\\"", :start}
Let us return to the correctly working example, let us simplify that rather expressive transition
iex(3)> input = ~w{alpha beta}
...(3)> states = %{
...(3)> start: [ {true, :id, fn count, _ -> count + 1 end, :start} ]
...(3)> }
...(3)> run(input, 0, states)
{:start, ~w(alpha beta), 2}
So we can use a shortcut for copying the input to the output, that is better already, but still, why
create the output that is not needed, let us use the atom form of the transformer function
iex(4)> input = ~w{alpha beta}
...(4)> states = %{
...(4)> start: [ {true, fn _ -> :ignore end, fn count, _ -> count + 1 end, :start} ]
...(4)> }
...(4)> run(input, 0, states)
{:start, ~w(), 2}
As there is a shortcut for `:id` so there is one for `:ignore`
iex(5)> input = ~w{alpha beta}
...(5)> states = %{
...(5)> start: [ {true, :ignore, fn count, _ -> count + 1 end, :start} ]
...(5)> }
...(5)> run(input, 0, states)
{:start, ~w(), 2}
But what if we want to have `:ignore` in the output? Let us assume that we want to replace all `"alphas"` with
`:ignore`. We can use the tuple form of the transformer in this case.
This example also demonstrates that we do not need to specify the updater and the new state if these do
not change and that even pushing the input to the output can be omitted, therefore the transitions `{true, :id}` and
`{true}` have the same semantics.
And a third simplifaction is that we can omit to pass nil as data, but note that it will present in the result.
iex(6)> input = ~w{alpha beta alpha}
...(6)> states = %{
...(6)> start: [
...(6)> {~r{alpha}, fn _ -> {:push, :ignore} end},
...(6)> {true}] }
...(6)> run(input, states)
{:start, [:ignore, "beta", :ignore], nil}
Now let us look at a complex example, that will use the following features, not yet explored:
- Stopping with the `:halt` atom form of the transformer, by its shortcut
- Stopping with the `{:halt, value}` tuple form of the transformer
- Creating a function to rerun with the same states
- Using constant functions
- Using function triggers
iex(7)> summer = fn data, {_, input} -> %{data|sum: data.sum + input} end
...(7)> states = %{
...(7)> start: [
...(7)> {&(&1>8), :halt},
...(7)> {&(&1<1), fn {_, input} -> {:halt, input} end, constant(:negative)},
...(7)> {&(rem(&1,2)==0), push_constant(:even), summer, :even},
...(7)> {true} ],
...(7)> even: [
...(7)> {&(rem(&1,2)==0), :halt, constant(:more_evens)},
...(7)> {true, :id, summer }
...(7)> ] }
...(7)> state_machine = make_machine(%{sum: 0}, states)
...(7)> [
...(7)> state_machine.(1..10|>Enum.into([])),
...(7)> state_machine.([1, 2, 3]),
...(7)> state_machine.([1, 9]),
...(7)> state_machine.([1, -1]),
...(7)> state_machine.([1, 3, 5]) ]
[ {:even, [1, :even, 3], :more_evens},
{:even, [1, :even, 3], %{sum: 5}},
{:start, [1], %{sum: 0}},
{:start, [1, -1], :negative},
{:start, [1, 3, 5], %{sum: 0}} ]
### Summary of Shortcuts and Transformer Semantics
As we have seen in the examples above, the StateMachine is driven by the _Transitions Map_ and the result of the
`transformer` function.
The returned `data` value is influenced by the result of the `updater` function.
In order to write shorter _Transition Maps_ transitions can be shortened and symbolic function names can be used.
Let us see how transitions are _normalized_ by the StateMachine before they are executed.
| Specified | Normalize | Remarks |
|------------------------------------|------------------------------------------------------|------------------------|
|`{trigger, function, function, state}` | same value | already normalized|
|`{trigger, function, function}` | `{trigger, function, function, current_state}` | normalization is done during tuntime, thusly `current_state` is known |
|`{trigger, function }` | `{trigger, function, identity2, current_state}` | given the interface of `updater` `identity2` is defined as `fn d, _ -> d end` |
|`{trigger}` | `{trigger, identity1, identity2, current_state}` | `identity1` is for the `transformer` and thusly `fn {_, v} -> v end`
As can be seen sometimes the defaults, are not available, because we need to provide the `new_state` in the transition.
In that case the following symbols can be used for the `transformer`
| Symbolic Transformer | Expanded to |
|-------------------------|--------------------------------------|
| `:id` | `fn {_, v} -> v end` |
| `:ignore` | `fn _ -> :ignore end` |
| `:halt` | `fn _ -> :halt end` |
For the `updater` there is only `:id` which expands to `fn d, _ -> d end` as mentioned above.
In action that would give
iex(8)> states = %{
...(8)> start: [{true, :id, :id, :stop}],
...(8)> stop: [{true, fn _ -> {:halt, "..."} end, :id}]
...(8)> }
...(8)> run(~w[Headline Rest MoreRest], states)
{:stop, ~w[Headline ...], nil}
"""
@doc """
A helper creating an updater function that sets data to a constant value
"""
@spec constant( const_type ) :: const_type when const_type: any()
def constant(constant_value), do: fn _, _ -> constant_value end
@doc """
A convenience helper to run the same state machine on different inputs
"""
@spec make_machine( any(), maybe(map()) ) :: (list() -> result_t())
def make_machine(data_or_states, states \\ nil)
def make_machine(states, nil), do: make_machine(nil, states)
def make_machine(data, states), do: &run(&1, data, states)
@doc """
A helper creating a transformer function that pushes a constant value to the output
"""
@spec push_constant( const_type ) :: ((any()) -> {:push, const_type}) when const_type: any()
def push_constant(constant_value), do: fn _ -> {:push, constant_value} end
@spec run( list(), any(), maybe(map()) ) :: result_t()
def run(input, data_or_states, states_or_nil \\ nil)
def run(input, states, nil) do
Lab42.StateMachine.Runner.run(:start, input, [], nil, states)
end
def run(input, data, states) do
Lab42.StateMachine.Runner.run(:start, input, [], data, states)
end
end
|
lib/lab42/state_machine.ex
| 0.8727
| 0.945801
|
state_machine.ex
|
starcoder
|
defmodule Pavlov.Case do
@moduledoc """
Use this module to prepare other modules for testing.
## Example
defmodule MySpec do
use Pavlov.Case
it "always passes" do
assert true
end
end
"""
@doc false
defmacro __using__(opts \\ []) do
async = Keyword.get(opts, :async, false)
quote do
use ExUnit.Case, async: unquote(async)
use Pavlov.Callbacks
use Pavlov.Mocks
@stack []
@pending false
Agent.start(fn -> %{} end, name: :pavlov_let_defs)
Agent.start(fn -> %{} end, name: :pavlov_subject_defs)
import Pavlov.Case
import Pavlov.Syntax.Sugar
end
end
@doc """
The cornerstone BDD macro, "it" allows your test to be defined
via a string.
## Example
it "is the truth" do
assert true == true
end
"""
defmacro it(contents) do
quote do
it "is expected", do: unquote(contents)
end
end
defmacro it(desc, var \\ quote(do: _), contents) do
quote do
message = Enum.join(@stack, "") <> unquote(desc)
defit message, unquote(var), @pending do
unquote(contents)
end
end
end
@doc """
Allows you specify a pending test, meaning that it is never run.
## Example
xit "is the truth" do
# This will never run
assert true == true
end
"""
defmacro xit(description, var \\ quote(do: _), contents) do
quote do
defit Enum.join(@stack, "") <> unquote(description), unquote(var), true do
unquote(contents)
end
end
end
@doc """
You can nest your tests under a descriptive name.
Tests can be infinitely nested.
"""
defmacro describe(desc, _ \\ quote(do: _), pending \\ false, contents) do
quote do
@stack Enum.concat(@stack, [unquote(desc) <> ", "])
# Closure the old stack so we can use it in defmodule
old_stack = Enum.concat @stack, []
pending = @pending || unquote(pending)
# Defines a new module per describe, thus scoping .let
defmodule Module.concat(__MODULE__, unquote(desc)) do
use ExUnit.Case
@stack old_stack
@pending pending
def subject, do: nil
defoverridable [subject: 0]
# Redefine enclosing let definitions in this module
Agent.get(:pavlov_callback_defs, fn dict ->
Stream.filter dict, fn {module, _name} ->
sub_module? module, __MODULE__
end
end)
|> Stream.map(fn {_module, {periodicity, context, fun}} ->
quote do
use Pavlov.Mocks
before(unquote(periodicity), unquote(context), do: unquote(fun))
end
end)
|> Enum.each(&Module.eval_quoted(__MODULE__, &1))
unquote(contents)
# Redefine enclosing let definitions in this module
Agent.get(:pavlov_let_defs, fn dict ->
Stream.filter dict, fn {module, lets} ->
sub_module? module, __MODULE__
end
end)
|> Stream.flat_map(fn {_module, lets} ->
Stream.map lets, fn ({name, fun}) ->
quote do: let(unquote(name), do: unquote(fun))
end
end)
|> Enum.each(&Module.eval_quoted(__MODULE__, &1))
# Redefine enclosing subject definitions in this module
Agent.get(:pavlov_subject_defs, fn dict ->
Stream.filter dict, fn {module, _subjects} ->
sub_module? module, __MODULE__
end
end)
|> Stream.flat_map(fn {_module, subjects} ->
Stream.map subjects, fn (fun) ->
quote do: subject(unquote(fun))
end
end)
|> Enum.each(&Module.eval_quoted(__MODULE__, &1))
end
# Cleans context stack
if Enum.count(@stack) > 0 do
@stack Enum.take(@stack, Enum.count(@stack) - 1)
end
end
end
@doc """
Defines a group of tests as pending.
Any other contexts nested within an xdescribe will not run
as well.
"""
defmacro xdescribe(desc, _ \\ quote(do: _), contents) do
quote do
describe unquote(desc), _, true, unquote(contents)
end
end
@doc """
Allows lazy initialization of subjects for your tests.
Subjects created via "let" will never leak into other
contexts (defined via "describe" or "context"), not even
those who are children of the context where the lazy subject
is defined.
Example:
let :lazy do
"oh so lazy"
end
it "lazy initializes" do
assert lazy == "oh so lazy"
end
"""
defmacro let(name, contents) do
quote do
require Pavlov.Utils.Memoize, as: Memoize
Memoize.defmem unquote(name)(), do: unquote(contents[:do])
defoverridable [{unquote(name), 0}]
Agent.update(:pavlov_let_defs, fn(map) ->
new_let = {unquote(Macro.escape name), unquote(Macro.escape contents[:do])}
Dict.put map, __MODULE__, (map[__MODULE__] || []) ++ [new_let]
end)
end
end
@doc """
You can use `subject` to explicitly define the value
that is returned by the subject method in the example
scope. A subject declared in a context will be available
in child contexts as well.
Example:
describe "Array" do
subject do
[1, 2, 3]
end
it "has the prescribed elements" do
assert subject == [1, 2, 3]
end
context "Inner context" do
it "can use an outer-scope subject" do
assert subject == [1, 2, 3]
end
end
end
"""
defmacro subject(contents) do
contents = Macro.escape(contents)
quote bind_quoted: binding do
def subject do
Macro.expand(unquote(contents), __MODULE__)[:do]
end
defoverridable [subject: 0]
Agent.update(:pavlov_subject_defs, fn(map) ->
Dict.put map, __MODULE__, (map[__MODULE__] || []) ++ [contents]
end)
end
end
@doc false
defmacro defit(message, var \\ quote(do: _), pending \\ false, contents) do
contents =
case contents do
[do: _] ->
quote do
unquote(contents)
:ok
end
_ ->
quote do
try(unquote(contents))
:ok
end
end
var = Macro.escape(var)
contents = Macro.escape(contents, unquote: true)
quote bind_quoted: binding do
message = :"#{message}"
Pavlov.Case.__on_definition__(__ENV__, message, pending)
def unquote(message)(unquote(var)) do
Pavlov.Utils.Memoize.flush
unquote(contents)
end
end
end
@doc false
def __on_definition__(env, name, pending \\ false) do
mod = env.module
tags = Module.get_attribute(mod, :tag) ++ Module.get_attribute(mod, :moduletag)
if pending do tags = tags ++ [:pending] end
tags = tags |> normalize_tags |> Map.merge(%{line: env.line, file: env.file})
Module.put_attribute(mod, :ex_unit_tests,
%ExUnit.Test{name: name, case: mod, tags: tags})
Module.delete_attribute(mod, :tag)
end
@doc false
def sub_module?(child, parent) do
String.starts_with? "#{parent}", "#{child}"
end
defp normalize_tags(tags) do
Enum.reduce Enum.reverse(tags), %{}, fn
tag, acc when is_atom(tag) -> Map.put(acc, tag, true)
tag, acc when is_list(tag) -> Dict.merge(acc, tag)
end
end
end
|
lib/case.ex
| 0.828002
| 0.409398
|
case.ex
|
starcoder
|
defmodule Legion.Telephony.PhoneNumber do
@moduledoc """
Declares types and utility functions for working with phone numbers.
"""
@typedoc """
Describes the type of the phone number.
"""
@type host_type() ::
:fixed_line
| :mobile
| :fixed_line_or_mobile
| :toll_free
| :premium_rate
| :shared_cost
| :voip
| :personal_number
| :pager
| :uan
| :voicemail
| :unknown
@typedoc """
Type for the phone number.
"""
@type t() :: String.t()
@doc ~S"""
Returns the type of the phone number.
## Examples
iex> Legion.Telephony.PhoneNumber.get_number_type("+90 532 111 1111")
{:ok, :mobile}
iex> Legion.Telephony.PhoneNumber.get_number_type("+90 216 111 1111")
{:ok, :fixed_line}
iex> Legion.Telephony.PhoneNumber.get_number_type("test")
{:error, :invalid}
"""
@spec get_number_type(t()) ::
{:ok, host_type()}
| {:error, :invalid}
def get_number_type(number) when is_binary(number) do
case ExPhoneNumber.parse(number, nil) do
{:ok, phone_number} ->
number_type = ExPhoneNumber.get_number_type(phone_number)
{:ok, number_type}
{:error, _} ->
{:error, :invalid}
end
end
@doc """
Determines validity of the given phone number.
iex> Legion.Telephony.PhoneNumber.is_valid_number?("+905321111111")
true
iex> Legion.Telephony.PhoneNumber.is_valid_number?("test")
false
"""
@spec is_valid_number?(t()) :: boolean()
def is_valid_number?(number) when is_binary(number) do
case ExPhoneNumber.parse(number, nil) do
{:ok, phone_number} ->
ExPhoneNumber.is_valid_number?(phone_number)
{:error, _} ->
false
end
end
@doc ~S"""
Returns a boolean value indicating the possibility of validity of the given phone number.
Unlike `is_valid_number/2`, this function validates the phone number by bare lookup of
its length.
iex> Legion.Telephony.PhoneNumber.is_possible_number?("+905321111111")
true
iex> Legion.Telephony.PhoneNumber.is_possible_number?("test")
false
"""
@spec is_possible_number?(t()) :: boolean()
def is_possible_number?(number) when is_binary(number),
do: ExPhoneNumber.is_possible_number?(number, "")
@doc ~S"""
Converts given number to RFC 3966-formatted string.
iex> Legion.Telephony.PhoneNumber.to_rfc3966("+90 532 111 1111")
{:ok, "tel:+90-532-111-11-11"}
iex> Legion.Telephony.PhoneNumber.to_rfc3966("test")
{:error, :invalid}
"""
@spec to_rfc3966(t()) ::
{:ok, String.t()}
| {:error, :invalid}
def to_rfc3966(number) do
case ExPhoneNumber.parse(number, nil) do
{:ok, phone_number} ->
rfc3966 = ExPhoneNumber.format(phone_number, :rfc3966)
{:ok, rfc3966}
{:error, _} ->
{:error, :invalid}
end
end
@doc ~S"""
Converts given number to E164-formatted string.
## Examples
iex> Legion.Telephony.PhoneNumber.to_e164("+90 532 111 1111")
{:ok, "+905321111111"}
iex> Legion.Telephony.PhoneNumber.to_e164("test")
{:error, :invalid}
"""
def to_e164(number) when is_binary(number) do
case ExPhoneNumber.parse(number, nil) do
{:ok, phone_number} ->
e164 = ExPhoneNumber.format(phone_number, :e164)
{:ok, e164}
{:error, _} ->
{:error, :invalid}
end
end
@doc """
Humanizes the phone number in given format. `format` parameter can be either
`:international` or `:national`, determining the existence of country code in
resulting phone number.
## Examples
iex> Legion.Telephony.PhoneNumber.humanize("+905321111111", :international)
{:ok, "+90 532 111 11 11"}
iex> Legion.Telephony.PhoneNumber.humanize("+905321111111", :national)
{:ok, "0532 111 11 11"}
iex> Legion.Telephony.PhoneNumber.humanize("test", :national)
{:error, :invalid}
"""
def humanize(number, format) when is_binary(number) and format in [:international, :national] do
case ExPhoneNumber.parse(number, nil) do
{:ok, phone_number} ->
humanized = ExPhoneNumber.format(phone_number, format)
{:ok, humanized}
{:error, _} ->
{:error, :invalid}
end
end
@doc """
Same as `humanize/2`, but uses `:international` formatting as default.
## Examples
iex> Legion.Telephony.PhoneNumber.humanize("+905321111111")
{:ok, "+90 532 111 11 11"}
iex> Legion.Telephony.PhoneNumber.humanize("test")
{:error, :invalid}
"""
def humanize(number) when is_binary(number),
do: humanize(number, :international)
end
|
apps/legion/lib/telephony/phone_number.ex
| 0.833562
| 0.429071
|
phone_number.ex
|
starcoder
|
defmodule Loom.GCounter do
@moduledoc """
Grow only counters
GCounters can only ever increment. They are useful for view and hit counters,
which will never shrink.
They are not delta-CRDT's, as they are rather lightweight in general. A delta-
CRDT implementation would just return the latest value for an actor.
They do, however, implement the CRDT protocol, and can be coposed into larger
CRDT datastructures.
"""
alias Loom.GCounter, as: Counter
@type actor :: term
@type dot :: {actor, pos_integer}
@type t :: %Counter{
counter: %{
actor => pos_integer
},
ctx: %{
actor => pos_integer
}
}
defstruct counter: %{}, ctx: %{}
@doc """
Instantiate a new GCounter. Starts at 0.
iex> Loom.GCounter.new |> Loom.GCounter.value
0
"""
@spec new() :: t
def new, do: %Counter{}
@doc """
Instantiate a new GCounter with previous values.
iex> Loom.GCounter.new(values: [a: 10, b: 5, c: 27]) |> Loom.GCounter.value
42
"""
@spec new([values: [{actor, pos_integer}]]) :: t
def new(opts) do
new_values = Keyword.get(opts, :values, []) |> Enum.into %{}
%Counter{counter: new_values}
end
@doc """
Increment a counter on behalf of the actor.
If you need to decrement, see `Loom.PNCounter`
iex> Loom.GCounter.new |> Loom.GCounter.inc(:a, 1) |> Loom.GCounter.inc(:a, 29) |> Loom.GCounter.value()
30
"""
@spec inc(t, actor, pos_integer) :: t
def inc(%Counter{counter: c}, actor, int \\ 1) when int > 0 do
%Counter{counter: Map.update(c, actor, int, &(&1+int))}
end
@doc """
Get the value of a counter.
Will always be >=0.
"""
@spec value(t) :: non_neg_integer
def value(%Counter{counter: c}) do
Dict.values(c) |> Enum.sum
end
@doc """
Joins 2 counters.
Because counters monotonically increase, we can just merge them.
iex> alias Loom.GCounter
iex> ctr1 = GCounter.new |> GCounter.inc(:a) |> GCounter.inc(:a, 10)
iex> ctr2 = GCounter.new |> GCounter.inc(:b) |> GCounter.inc(:b, 5)
iex> GCounter.join(ctr1,ctr2) |> GCounter.value
17
"""
@spec join(t, t) :: t
def join(%Counter{counter: c1}, %Counter{counter: c2}) do
%Counter{counter: Dict.merge(c1, c2, fn (_,v1,v2) -> max(v1,v2) end)}
end
end
defimpl Loom.CRDT, for: Loom.GCounter do
alias Loom.GCounter, as: Ctr
@doc """
Returns a description of the operations that this CRDT takes.
Updates return a new CRDT, reads can return any natural datatype. This counter
returns an integer.
"""
def ops(_crdt) do
[ update: [
inc: [:actor],
inc: [:actor, :int]
],
read: [
value: []
]
]
end
@doc """
Applies a CRDT to a counter in an abstract way.
This is for ops-based support.
iex> alias Loom.CRDT
iex> alias Loom.GCounter
iex> ctr = GCounter.new |> CRDT.apply({:inc, :a}) |> CRDT.apply({:inc, :a, 3})
iex> CRDT.value(ctr)
4
iex> CRDT.apply(ctr, :value)
4
"""
def apply(crdt, {:inc, actor}), do: Ctr.inc(crdt, actor)
def apply(crdt, {:inc, actor, int}), do: Ctr.inc(crdt, actor, int)
def apply(crdt, :value), do: Ctr.value(crdt)
@doc """
Joins 2 CRDT's of the same type.
2 different types cannot mix (yet). In the future, we may be able to join
different counters and merge their semantics, as long as the datatype grows
monotonically.
"""
def join(%Ctr{}=a, %Ctr{}=b), do: Ctr.join(a, b)
@doc """
Returns the most natural value for a counter, an integer.
"""
def value(crdt), do: Ctr.value(crdt)
end
|
lib/loom/gcounter.ex
| 0.774029
| 0.463505
|
gcounter.ex
|
starcoder
|
defmodule Daguex.Processor.ConvertImage do
@moduledoc """
Processor that convert given image to targeting format
This processor in the charge of converting image to specified format
and save the results to the `variants` field of `Daguex.Image`
Convert `Context.image_file` to the format specified in the opts for `#{__MODULE__}.process/2`
or all the format that defined through opts for this module, if not format is given.
Save the result of converting to
"""
use Daguex.Processor
alias Daguex.{Image, Variant}
import Daguex.Processor.StorageHelper
def init(opts) do
variants = required_option(:variants)
variants = for variant <- variants, into: %{} do
{variant.format, variant}
end
%{variants: variants}
end
def process(context, %{variants: variants}) do
formats = get_formats(context.opts, variants)
with {:ok, formats} <- validate_formats(formats, variants),
formats <- filter_formats(context.image, formats),
do: convert_images(context, formats, variants)
end
defp get_formats(opts, variants) do
formats = Keyword.get(opts, :format) || Keyword.get(opts, :formats) || Map.keys(variants)
List.wrap(formats)
end
defp validate_formats(formats, variants) do
Enum.reduce_while(formats, nil, fn
format, nil -> if Map.has_key?(variants, format), do: {:cont, nil}, else: {:halt, {:error, {:illegal_format, format}}}
end) || {:ok, formats}
end
defp filter_formats(image, formats) do
Enum.reduce(formats, [], fn format, acc ->
if Image.has_variant?(image, format), do: acc, else: [format|acc]
end)
end
defp convert_images(context, formats, variants) do
Enum.reduce_while(formats, {:ok, context}, fn format, {:ok, context} ->
with {:ok, context, image_file} <- load_local_image(context, "orig"),
{:ok, new_image} <- convert_image(image_file, Map.get(variants, format)),
{:ok, context} <- put_local_image(context, new_image, format) do
{:cont, {:ok, context}}
else
e -> {:halt, e}
end
end)
end
defp convert_image(image_file, variant) do
Variant.call(image_file, variant)
end
end
|
lib/daguex/processor/convert_image.ex
| 0.835047
| 0.654087
|
convert_image.ex
|
starcoder
|
defmodule Mix.Tasks.PhoenixConfig.Gen.Resource do
use Mix.Task
alias Mix.PhoenixConfigHelpers
alias PhoenixConfig.EctoContextGenerator
@shortdoc "Creates a resource file that will be used to configure absinthe routes and can create schemas"
@moduledoc """
You can use this to create all resources needed for a GraphQL API
### Existing Schema
If you have an existing schema, you can use the `--from-ecto-schema` flag with the `--context` flag
to generate a config file for that specific flle
#### Example
```bash
> mix phoenix_config.gen.resource --context MyApp.SomeContext --from-ecto-schema MyApp.SomeContext.Schema
```
### New Schema
If you're creating a new schema, you can pass in the same arguments you would to `mix phx.gen.schema`
#### Example
```bash
> mix phoenix_config.gen.resource Accounts.User email:string name:string birthday:date
```
### Options
- `dirname` - The directory to generate the config files in
- `file_name` - The file name for the config
- `only` - Parts to generate (create, all, find, update, delete)
- `except` - Parts of the CRUD resource to exclude
- `context` - Context module if supplying `--from-ecto-schema`
- `from-ecto-schema` - Specify a specific module instead of generating a new schema
"""
def run(args) do
PhoenixConfigHelpers.ensure_not_in_umbrella!("phoenix_config.gen.resource")
{opts, extra_args, _} = OptionParser.parse(args,
switches: [
dirname: :string,
file_name: :string,
only: :keep,
except: :keep,
context: :string,
from_ecto_schema: :string
]
)
cond do
!opts[:from_ecto_schema] and Enum.empty?(extra_args) ->
Mix.raise("Must provide a from_ecto_schema or create a schema for mix phoenix_config.gen.resource using the --from-ecto-schema flag")
opts[:from_ecto_schema] ->
create_and_write_resource_from_schema(opts)
extra_args ->
ecto_schema = create_schema_from_args(extra_args)
opts
|> Keyword.merge(from_ecto_schema: ecto_schema)
|> create_and_write_resource_from_schema
end
end
defp create_and_write_resource_from_schema(opts) do
from_ecto_schema = safe_concat_with_error([opts[:from_ecto_schema]])
config_file_path = PhoenixConfigHelpers.config_file_full_path(opts[:dirname], opts[:file_name])
if File.exists?(config_file_path) do
contents = create_config_contents(from_ecto_schema, opts[:only], opts[:except])
# TODO: Inject this instead of forcing user to do this
Mix.shell.info("Make sure to merge the following with your phoenix_config.exs\n\n#{contents}")
else
contents = create_config_contents(from_ecto_schema, opts[:only], opts[:except])
PhoenixConfigHelpers.write_phoenix_config_file(opts[:dirname], opts[:file_name], contents)
end
end
defp create_config_contents(schema_name, nil, nil) do
"""
import PhoenixConfig, only: [crud_from_schema: 1]
[
crud_from_schema(#{inspect(schema_name)})
]
"""
end
defp create_config_contents(schema_name, only, except) do
"""
import PhoenixConfig, only: [crud_from_schema: 2]
[
crud_from_schema(#{inspect(schema_name)}#{build_only(only) <> build_except(except)}
]
"""
end
defp build_only(nil), do: ""
defp build_only(only), do: ", only: #{inspect(only)}"
defp build_except(nil), do: ""
defp build_except(except), do: ", except: #{inspect(except)}"
defp create_schema_from_args(extra_args) do
with :ok <- Mix.Tasks.Phx.Gen.Schema.run(extra_args) do
context_app = Mix.Phoenix.context_app() |> to_string |> Macro.camelize
schema_module = hd(extra_args)
context_module = context_module_from_schema_module(schema_module)
ecto_schema = safe_concat_with_error(context_app, schema_module)
ensure_context_module_created(Mix.Phoenix.context_app(), context_module, ecto_schema)
inspect(ecto_schema)
end
end
defp ensure_context_module_created(context_app, context_module, ecto_schema) do
context_app_module = context_app |> to_string |> Macro.camelize
Module.safe_concat(context_app_module, context_module)
rescue
ArgumentError ->
context_app_module = context_app |> to_string |> Macro.camelize
Mix.shell().info("No context found for schema at #{context_app_module}.#{context_module}, creating...")
context_module_path = Mix.Phoenix.context_lib_path(context_app, "#{Macro.underscore(context_module)}.ex")
if Mix.Generator.create_file(context_module_path, EctoContextGenerator.create_context_module_for_schemas(context_app_module, context_module, [ecto_schema])) do
Code.compile_file(context_module_path)
safe_concat_with_error(context_app_module, context_module)
end
end
defp safe_concat_with_error(module_a, module_b) do
safe_concat_with_error([module_a, module_b])
end
defp safe_concat_with_error(modules) do
Module.safe_concat(modules)
rescue
ArgumentError ->
Mix.raise("Module #{Enum.join(modules, ".")} cannot be found in your application, please ensure you have the right modules passed in")
end
defp context_module_from_schema_module(schema_module) do
case schema_module |> to_string |> String.split(".") do
[item] -> item
schema_parts -> schema_parts |> Enum.drop(-1) |> Enum.join(".")
end
end
end
|
lib/mix/tasks/gen.resource.ex
| 0.644337
| 0.582372
|
gen.resource.ex
|
starcoder
|
defmodule Adventofcode.Day19TractorBeam do
use Adventofcode
alias __MODULE__.{Grid, Position, Printer, Program}
def part_1(input) do
input
|> part_1_grid()
|> Grid.locations_affected_by_beam()
end
def part_1_grid(input) do
input
|> Program.parse()
|> Grid.new(view: {0..49, 0..49})
|> Grid.deploy_drones()
end
defmodule Position do
@enforce_keys [:x, :y]
defstruct x: 0, y: 0
def new(x, y), do: %Position{x: x, y: y}
def range({x_range, y_range}) do
for y <- y_range, x <- x_range, do: new(x, y)
end
end
defmodule Program do
alias Adventofcode.IntcodeComputer
def parse(input), do: IntcodeComputer.parse(input)
def deploy_drone(program, %Position{x: x, y: y}) do
program
|> IntcodeComputer.inputs([x, y])
|> IntcodeComputer.run()
|> IntcodeComputer.output()
end
end
defmodule Grid do
@default_view {0..1, 0..1}
@enforce_keys [:program]
defstruct view: @default_view, program: nil, locations: %{}
def new(program, options) do
view = Keyword.get(options, :view, @default_view)
%Grid{program: program, view: view}
end
def get_location(grid, position), do: Map.get(grid.locations, position)
def deploy_drones(grid) do
grid.view
|> Position.range()
|> Enum.reduce(grid, &do_deploy_drone(&2, &1))
end
defp do_deploy_drone(grid, position) do
value = Program.deploy_drone(grid.program, position)
%{grid | locations: Map.put(grid.locations, position, value)}
end
def locations_affected_by_beam(grid) do
grid.locations
|> Map.values()
|> Enum.filter(&(&1 == 1))
|> Enum.count()
end
end
defmodule Printer do
def print(grid) do
IO.puts("\n" <> s_print(grid))
grid
end
def s_print(%{view: {_, y1..y2}} = grid) do
y1..y2
|> Enum.to_list()
|> Enum.map_join("\n", &print_row(grid, &1))
end
defp print_row(%{view: {x1..x2, _}} = grid, y) do
x1..x2
|> Enum.to_list()
|> Enum.map(&Position.new(&1, y))
|> Enum.map_join(&do_print_row(grid, &1))
end
defp do_print_row(grid, position) do
grid
|> Grid.get_location(position)
|> print_location()
end
defp print_location(0), do: "."
defp print_location(1), do: "#"
end
end
defimpl Inspect, for: Adventofcode.Day19TractorBeam.Position do
import Inspect.Algebra
def inspect(%{x: x, y: y}, _opts) do
concat(["#Position{", to_string(x), " ", to_string(y), "}"])
end
end
|
lib/day_19_tractor_beam.ex
| 0.639286
| 0.560403
|
day_19_tractor_beam.ex
|
starcoder
|
defmodule Coherence.Schema do
@moduledoc """
Add Coherence support to a User schema module.
Add `use Coherence.Schema, opts \\ []` to your User module to add a number of
Module functions and helpers.
The optional `opt` parameter can be used to disable options enabled in the
global configuration by passing `option: false`
For example,
defmodule MyProject.User do
use MyProject.Web, :model
use Coherence.Schema, invitable: false
The following functions are added regardless of the options configured:
* `authenticatable?/0` - Returns true if the option is configured.
* `registerable?/0` - Returns true if the option is configured.
* `confirmable?/0` - Returns true if the option is configured.
* `trackable?/0` - Returns true if the option is configured.
* `trackable_table?/0` - Returns true if the option is configured.
* `recoverable?/0` - Returns true if the option is configured.
* `lockable?/0` - Returns true if the option is configured.
* `invitable?/0` - Returns true if the option is configured.
* `unlockable_with_token?/0` - Returns true if the option is configured.
The following functions are available when `authenticatable?/0` returns true:
* `checkpw/2` - Validate password.
* `encrypt_password/1` - encrypted a password using `Comeonin.Bcrypt.<PASSWORD>`
* `validate_coherence/2` - run the coherence password validations.
* `validate_password/2` - Used by `validate_coherence for password validation`
The following functions are available when `confirmable?/0` returns true.
* `confirmed?/1` - Has the given user been confirmed?
* `confirm/1` - Return a changeset to confirm the given user
The following functions are available when `lockable?/0` returns true.
* `locked?/1` - Is the given user locked?
* `lock/1` - Return a changeset to lock the given user
* `unlock/1` - Return a changeset to unlock the given user
The `coherence_schema/1` macro is used to add the configured schema fields to the User models schema.
The `coherence_fields/0` function is used to return the validation fields appropriate for the selected options.
## Examples:
The following is an example User module when the :authenticatable is used:
defmodule MyProject.User do
use MyProject.Web, :model
use Coherence.Schema
schema "users" do
field :name, :string
field :email, :string
coherence_schema
timestamps
end
@required_fields ~w(name email)
@optional_fields ~w() ++ coherence_fields
def changeset(model, params \\ %{}) do
model
|> cast(params, @required_fields, @optional_fields)
|> unique_constraint(:email)
|> validate_coherence(params)
end
end
"""
use Coherence.Config
defmacro __using__(opts \\ []) do
quote do
import unquote(__MODULE__)
import Ecto.Changeset
use Coherence.Config
require Logger
alias Coherence.{ConfirmableService}
use ConfirmableService, unquote(opts)
def authenticatable? do
Coherence.Config.has_option(:authenticatable) and
Keyword.get(unquote(opts), :authenticatable, true)
end
def registerable? do
Coherence.Config.has_option(:registerable) and
Keyword.get(unquote(opts), :registerable, true)
end
def trackable? do
Coherence.Config.has_option(:trackable) and
Keyword.get(unquote(opts), :trackable, true)
end
def trackable_table? do
Coherence.Config.has_option(:trackable_table) and
Keyword.get(unquote(opts), :trackable_table, true)
end
def recoverable? do
Coherence.Config.has_option(:recoverable) and
Keyword.get(unquote(opts), :recoverable, true)
end
def lockable? do
Coherence.Config.has_option(:lockable) and
Keyword.get(unquote(opts), :lockable, true)
end
def invitable? do
Coherence.Config.has_option(:invitable) and
Keyword.get(unquote(opts), :invitable, true)
end
def unlockable_with_token? do
Coherence.Config.has_option(:unlockable_with_token) and
Keyword.get(unquote(opts), :unlockable_with_token, true)
end
def rememberable? do
Coherence.Config.has_option(:rememberable) and
Keyword.get(unquote(opts), :rememberable, true)
end
if Coherence.Config.has_option(:lockable) and
Keyword.get(unquote(opts), :lockable, true) do
@doc """
Checks if the user is locked.
Returns true if locked, false otherwise
"""
def locked?(user) do
!!user.locked_at and
!Coherence.ControllerHelpers.expired?(user.locked_at,
minutes: Config.unlock_timeout_minutes)
end
@doc """
Unlock a user account.
Clears the `:locked_at` field on the user model.
Returns a changeset ready for Repo.update
"""
def unlock(user) do
Config.user_schema.changeset(user, %{locked_at: nil, unlock_token: nil, failed_attempts: 0})
end
@doc """
Unlock a user account.
Clears the `:locked_at` field on the user model.
deprecated! Please use Coherence.ControllerHelpers.unlock!/1.
"""
def unlock!(user) do
IO.warn "#{inspect Config.user_schema}.unlock!/1 has been deprecated. Please use Coherence.ControllerHelpers.unlock!/1 instead."
changeset = unlock user
if locked?(user) do
changeset
|> Config.repo.update
else
changeset = Ecto.Changeset.add_error changeset, :locked_at, "not locked"
{:error, changeset}
end
end
@doc """
Lock a use account.
Sets the `:locked_at` field on the user model to the current date and time unless
provided a value for the optional parameter.
You can provide a date in the future to override the configured lock expiry time. You
can set this data far in the future to do a pseudo permanent lock.
Returns a changeset ready for Repo.update
"""
def lock(user, locked_at \\ Ecto.DateTime.utc) do
Config.user_schema.changeset(user, %{locked_at: locked_at})
end
@doc """
Lock a use account.
Sets the `:locked_at` field on the user model to the current date and time unless
provided a value for the optional parameter.
You can provide a date in the future to override the configured lock expiry time. You
can set this data far in the future to do a pseudo permanent lock.
deprecated! Please use Coherence.ControllerHelpers.lock!/1.
"""
def lock!(user, locked_at \\ Ecto.DateTime.utc) do
IO.warn "#{inspect Config.user_schema}.lock!/1 has been deprecated. Please use Coherence.ControllerHelpers.lock!/1 instead."
changeset = Config.user_schema.changeset(user, %{locked_at: locked_at})
unless locked?(user) do
changeset
|> Config.repo.update
else
changeset = Ecto.Changeset.add_error changeset, :locked_at, "already locked"
{:error, changeset}
end
end
end
if Coherence.Config.has_option(:authenticatable) and
Keyword.get(unquote(opts), :authenticatable, true) do
def checkpw(password, encrypted) do
try do
Comeonin.Bcrypt.checkpw(password, encrypted)
rescue
_ -> false
end
end
def encrypt_password(password) do
Comeonin.Bcrypt.hashpwsalt(password)
end
def validate_coherence(changeset, params) do
changeset
|> validate_length(:password, min: 4)
|> validate_current_password(params)
|> validate_password(params)
end
def validate_current_password(changeset, params) do
current_password = params[:current_password] || params["current_password"]
if Config.require_current_password and (not is_nil(changeset.data.id)) and Map.has_key?(changeset.changes, :password) do
if is_nil(current_password) do
changeset
|> add_error(:current_password, "can't be blank")
else
if not checkpw(current_password, Map.get(changeset.data, Config.password_hash)) do
changeset
|> add_error(:current_password, "invalid current password")
else
changeset
end
end
else
changeset
end
end
def validate_password(changeset, params) do
if is_nil(Map.get(changeset.data, Config.password_hash)) and is_nil(changeset.changes[:password]) do
changeset
|> add_error(:password, "can't be blank")
else
changeset
|> validate_confirmation(:password)
|> set_password(params)
end
end
defp set_password(changeset, _params) do
if changeset.valid? and not is_nil(changeset.changes[:password]) do
put_change changeset, Config.password_hash,
encrypt_password(changeset.changes[:password])
else
changeset
end
end
else
def validate_coherence(changeset, _params), do: changeset
end
end
end
@doc """
Get list of migration schema fields for each option.
Helper function to return a keyword list of the migration fields for each
of the supported options.
TODO: Does this really belong here? Should it not be in a migration support
module?
"""
def schema_fields, do: [
authenticatable: [
"# authenticatable",
"add :#{Config.password_hash}, :string",
],
recoverable: [
"# recoverable",
"add :reset_password_token, :string",
"add :reset_password_sent_at, :utc_datetime"
],
rememberable: [
"# rememberable",
"add :remember_created_at, :utc_datetime"
],
trackable: [
"# trackable",
"add :sign_in_count, :integer, default: 0",
"add :current_sign_in_at, :utc_datetime",
"add :last_sign_in_at, :utc_datetime",
"add :current_sign_in_ip, :string",
"add :last_sign_in_ip, :string"
],
lockable: [
"# lockable",
"add :failed_attempts, :integer, default: 0",
"add :locked_at, :utc_datetime",
],
unlockable_with_token: [
"# unlockable_with_token",
"add :unlock_token, :string",
],
confirmable: [
"# confirmable",
"add :confirmation_token, :string",
"add :confirmed_at, :utc_datetime",
"add :confirmation_sent_at, :utc_datetime"
]
]
@doc """
Add configure schema fields.
Adds the schema fields to the schema block for the options selected.
Only the fields for configured options are added.
For example, for `Coherence.Config.opts == [:authenticatable, :recoverable]`
`coherence_schema` used in the following context:
defmodule MyProject.User do
use MyProject.Web, :model
use Coherence.Schema
schema "users" do
field :name, :string
field :email, :string
coherence_schema
timestamps
end
Will compile a schema to the following:
defmodule MyProject.User do
use MyProject.Web, :model
use Coherence.Schema
schema "users" do
field :name, :string
field :email, :string
# authenticatable
field :password_hash, :string
field :password, :string, virtual: true
field :password_confirmation, :string, virtual: true
# recoverable
field :reset_password_token, :string
field :reset_password_sent_at, Ecto.DateTime
timestamps
end
"""
defmacro coherence_schema do
quote do
if Coherence.Config.has_option(:authenticatable) do
field Config.password_hash, :string
field :current_password, :string, virtual: true
field :password, :string, virtual: true
field :password_confirmation, :string, virtual: true
end
if Coherence.Config.has_option(:recoverable) do
field :reset_password_token, :string
field :reset_password_sent_at, Ecto.DateTime
end
if Coherence.Config.has_option(:rememberable) do
field :remember_created_at, Ecto.DateTime
end
if Coherence.Config.has_option(:trackable) do
field :sign_in_count, :integer, default: 0
field :current_sign_in_at, Ecto.DateTime
field :last_sign_in_at, Ecto.DateTime
field :current_sign_in_ip, :string
field :last_sign_in_ip, :string
end
if Coherence.Config.has_option(:trackable_table) do
has_many :trackables, Coherence.Trackable
end
if Coherence.Config.has_option(:lockable) do
field :failed_attempts, :integer, default: 0
field :locked_at, Ecto.DateTime
end
if Coherence.Config.has_option(:unlockable_with_token) do
field :unlock_token, :string
end
if Coherence.Config.has_option(:confirmable) do
field :confirmation_token, :string
field :confirmed_at, Ecto.DateTime
field :confirmation_sent_at, Ecto.DateTime
# field :unconfirmed_email, :string
end
end
end
@optional_fields %{
authenticatable: ~w(#{Config.password_hash} password password_confirmation),
recoverable: ~w(reset_password_token reset_password_sent_at),
rememberable: ~w(remember_created_at),
trackable: ~w(sign_in_count current_sign_in_at last_sign_in_at current_sign_in_ip last_sign_in_ip),
lockable: ~w(locked_at failed_attempts),
unlockable_with_token: ~w(unlock_token),
confirmable: ~w(confirmation_token confirmed_at confirmation_sent_at)
}
@doc """
Get a list of the configured database fields.
Returns a list of fields that can be appended to your @option_fields used
in your models changeset cast.
For example, for `Coherence.Config.opts == [:authenticatable, :recoverable]`
`coherence_fiels/0` will return:
~w(password_hash password password_confirmation reset_password_token <PASSWORD>_password_<PASSWORD>_at)
"""
def coherence_fields do
[]
|> options_fields(:authenticatable)
|> options_fields(:recoverable)
|> options_fields(:rememberable)
|> options_fields(:trackable)
|> options_fields(:lockable)
|> options_fields(:unlockable_with_token)
|> options_fields(:confirmable)
end
defp options_fields(fields, key) do
if Coherence.Config.has_option(key) do
fields ++ @optional_fields[key]
else
fields
end
end
end
|
lib/coherence/schema.ex
| 0.91182
| 0.515437
|
schema.ex
|
starcoder
|
defmodule Circuits.GPIO do
alias Circuits.GPIO.Nif
@type pin_number :: non_neg_integer()
@type pin_direction :: :input | :output
@type value :: 0 | 1
@type edge :: :rising | :falling | :both | :none
@type pull_mode :: :not_set | :none | :pullup | :pulldown
# Public API
@doc """
Open a GPIO for use. `pin` should be a valid GPIO pin number on the system
and `pin_direction` should be `:input` or `:output`.
"""
@spec open(pin_number(), pin_direction()) :: {:ok, reference()} | {:error, atom()}
def open(pin_number, pin_direction) do
Nif.open(pin_number, pin_direction)
end
@doc """
Read the current value on a pin.
"""
@spec read(reference()) :: value()
def read(gpio) do
Nif.read(gpio)
end
@doc """
Set the value of a pin. The pin should be configured to an output
for this to work.
"""
@spec write(reference(), value()) :: :ok
def write(gpio, value) do
Nif.write(gpio, value)
end
@doc """
Enable or disable pin value change notifications. The notifications
are sent based on the edge mode parameter:
* :none - No notifications are sent
* :rising - Send a notification when the pin changes from 0 to 1
* :falling - Send a notification when the pin changes from 1 to 0
* :both - Send a notification on all changes
Available Options:
* `suppress_glitches` - It is possible that the pin transitions to a value
and back by the time that Circuits GPIO gets to process it. This controls
whether a notification is sent. Set this to `false` to receive notifications.
* `receiver` - Process which should receive the notifications.
Defaults to the calling process (`self()`)
Notifications look like:
```
{:gpio, pin_number, timestamp, value}
```
Where `pin_number` is the pin that changed values, `timestamp` is roughly when
the transition occurred in nanoseconds, and `value` is the new value.
"""
@spec set_edge_mode(reference(), edge(), list()) :: :ok | {:error, atom()}
def set_edge_mode(gpio, edge \\ :both, opts \\ []) do
suppress_glitches = Keyword.get(opts, :suppress_glitches, true)
receiver =
case Keyword.get(opts, :receiver) do
pid when is_pid(pid) -> pid
name when is_atom(name) -> Process.whereis(name) || self()
_ -> self()
end
Nif.set_edge_mode(gpio, edge, suppress_glitches, receiver)
end
@doc """
Change the direction of the pin.
"""
@spec set_direction(reference(), pin_direction()) :: :ok | {:error, atom()}
def set_direction(gpio, pin_direction) do
Nif.set_direction(gpio, pin_direction)
end
@doc """
Enable or disable internal pull-up or pull-down resistor to GPIO pin
"""
@spec set_pull_mode(reference(), pull_mode()) :: :ok | {:error, atom()}
def set_pull_mode(gpio, pull_mode) do
Nif.set_pull_mode(gpio, pull_mode)
end
@doc """
Get the GPIO pin number
"""
@spec pin(reference) :: pin_number
def pin(gpio) do
Nif.pin(gpio)
end
defmodule :circuits_gpio do
@moduledoc """
Provide an Erlang friendly interface to Circuits
Example Erlang code: circuits_gpio:open(5, output)
"""
defdelegate open(pin_number, pin_direction), to: Circuits.GPIO
defdelegate read(gpio), to: Circuits.GPIO
defdelegate write(gpio, value), to: Circuits.GPIO
defdelegate set_edge_mode(gpio), to: Circuits.GPIO
defdelegate set_edge_mode(gpio, edge), to: Circuits.GPIO
defdelegate set_edge_mode(gpio, edge, suppress_glitches), to: Circuits.GPIO
defdelegate set_direction(gpio, pin_direction), to: Circuits.GPIO
defdelegate set_pull_mode(gpio, pull_mode), to: Circuits.GPIO
defdelegate pin(gpio), to: Circuits.GPIO
end
end
|
lib/gpio.ex
| 0.870115
| 0.841174
|
gpio.ex
|
starcoder
|
defmodule Automata.OperatorStats do
@moduledoc false
use GenServer
alias Automata.FailuresManifest
@typep counter :: non_neg_integer
@spec stats(pid) :: %{
failures: counter,
total: counter
}
def stats(pid) when is_pid(pid) do
GenServer.call(pid, :stats, :infinity)
end
@spec get_failure_counter(pid) :: counter
def get_failure_counter(sup) when is_pid(sup) do
GenServer.call(sup, :get_failure_counter)
end
@spec increment_failure_counter(pid) :: pos_integer
def increment_failure_counter(sup, increment \\ 1)
when is_pid(sup) and is_integer(increment) and increment >= 1 do
GenServer.call(sup, {:increment_failure_counter, increment})
end
# Callbacks
def init(opts) do
state = %{
total: 0,
failures: 0,
failures_manifest_file: opts[:failures_manifest_file],
failures_manifest: FailuresManifest.new(),
failure_counter: 0,
pids: []
}
{:ok, state}
end
def handle_call(:stats, _from, state) do
stats = Map.take(state, [:total, :failures])
{:reply, stats, state}
end
def handle_call(:get_failure_counter, _from, state) do
{:reply, state.failure_counter, state}
end
def handle_call({:increment_failure_counter, increment}, _from, state) do
%{failure_counter: failure_counter} = state
{:reply, failure_counter, %{state | failure_counter: failure_counter + increment}}
end
def handle_cast({:automaton_finished, Automaton = automaton}, state) do
state =
state
|> Map.update!(:failures_manifest, &FailuresManifest.put_automaton(&1, automaton))
|> Map.update!(:total, &(&1 + 1))
|> increment_status_counter(automaton.state)
{:noreply, state}
end
def handle_cast({:world_started, _opts}, %{failures_manifest_file: file} = state)
when is_binary(file) do
state = %{state | failures_manifest: FailuresManifest.read(file)}
{:noreply, state}
end
def handle_cast({:world_finished, _, _}, %{failures_manifest_file: file} = state)
when is_binary(file) do
FailuresManifest.write!(state.failures_manifest, file)
{:noreply, state}
end
def handle_cast(_, state) do
{:noreply, state}
end
defp increment_status_counter(state, {tag, _}) when tag in [:aborted] do
Map.update!(state, :failures, &(&1 + 1))
end
defp increment_status_counter(state, _), do: state
end
|
lib/automata/core/control/operator/operator_stats.ex
| 0.740831
| 0.409575
|
operator_stats.ex
|
starcoder
|
defmodule Ecto.Adapter do
@moduledoc """
Specifies the minimal API required from adapters.
"""
@type t :: module
@typedoc """
The metadata returned by the adapter `c:init/1`.
It must be a map and Ecto itself will always inject
two keys into the meta:
* the `:cache` key, which as ETS table that can be used as a cache (if available)
* the `:pid` key, which is the PID returned by the child spec returned in `c:init/1`
"""
@type adapter_meta :: %{optional(:stacktrace) => boolean(), optional(any()) => any()}
@doc """
The callback invoked in case the adapter needs to inject code.
"""
@macrocallback __before_compile__(env :: Macro.Env.t()) :: Macro.t()
@doc """
Ensure all applications necessary to run the adapter are started.
"""
@callback ensure_all_started(config :: Keyword.t(), type :: :permanent | :transient | :temporary) ::
{:ok, [atom]} | {:error, atom}
@doc """
Initializes the adapter supervision tree by returning the children and adapter metadata.
"""
@callback init(config :: Keyword.t()) :: {:ok, :supervisor.child_spec(), adapter_meta}
@doc """
Checks out a connection for the duration of the given function.
In case the adapter provides a pool, this guarantees all of the code
inside the given `fun` runs against the same connection, which
might improve performance by for instance allowing multiple related
calls to the datastore to share cache information:
Repo.checkout(fn ->
for _ <- 100 do
Repo.insert!(%Post{})
end
end)
If the adapter does not provide a pool, just calling the passed function
and returning its result are enough.
If the adapter provides a pool, it is supposed to "check out" one of the
pool connections for the duration of the function call. Which connection
is checked out is not passed to the calling function, so it should be done
using a stateful method like using the current process' dictionary, process
tracking, or some kind of other lookup method. Make sure that this stored
connection is then used in the other callbacks implementations, such as
`Ecto.Adapter.Queryable` and `Ecto.Adapter.Schema`.
"""
@callback checkout(adapter_meta, config :: Keyword.t(), (() -> result)) :: result when result: var
@doc """
Returns true if a connection has been checked out.
"""
@callback checked_out?(adapter_meta) :: boolean
@doc """
Returns the loaders for a given type.
It receives the primitive type and the Ecto type (which may be
primitive as well). It returns a list of loaders with the given
type usually at the end.
This allows developers to properly translate values coming from
the adapters into Ecto ones. For example, if the database does not
support booleans but instead returns 0 and 1 for them, you could
add:
def loaders(:boolean, type), do: [&bool_decode/1, type]
def loaders(_primitive, type), do: [type]
defp bool_decode(0), do: {:ok, false}
defp bool_decode(1), do: {:ok, true}
All adapters are required to implement a clause for `:binary_id` types,
since they are adapter specific. If your adapter does not provide binary
ids, you may simply use `Ecto.UUID`:
def loaders(:binary_id, type), do: [Ecto.UUID, type]
def loaders(_primitive, type), do: [type]
"""
@callback loaders(primitive_type :: Ecto.Type.primitive(), ecto_type :: Ecto.Type.t()) ::
[(term -> {:ok, term} | :error) | Ecto.Type.t()]
@doc """
Returns the dumpers for a given type.
It receives the primitive type and the Ecto type (which may be
primitive as well). It returns a list of dumpers with the given
type usually at the beginning.
This allows developers to properly translate values coming from
the Ecto into adapter ones. For example, if the database does not
support booleans but instead returns 0 and 1 for them, you could
add:
def dumpers(:boolean, type), do: [type, &bool_encode/1]
def dumpers(_primitive, type), do: [type]
defp bool_encode(false), do: {:ok, 0}
defp bool_encode(true), do: {:ok, 1}
All adapters are required to implement a clause for :binary_id types,
since they are adapter specific. If your adapter does not provide
binary ids, you may simply use `Ecto.UUID`:
def dumpers(:binary_id, type), do: [type, Ecto.UUID]
def dumpers(_primitive, type), do: [type]
"""
@callback dumpers(primitive_type :: Ecto.Type.primitive(), ecto_type :: Ecto.Type.t()) ::
[(term -> {:ok, term} | :error) | Ecto.Type.t()]
@doc """
Returns the adapter metadata from its `c:init/1` callback.
It expects a process name of a repository. The name is either
an atom or a PID. For a given repository, you often want to
call this function based on the repository dynamic repo:
Ecto.Adapter.lookup_meta(repo.get_dynamic_repo())
"""
def lookup_meta(repo_name_or_pid) do
Ecto.Repo.Registry.lookup(repo_name_or_pid)
end
end
|
lib/ecto/adapter.ex
| 0.901144
| 0.543348
|
adapter.ex
|
starcoder
|
defmodule Cldr.Territory.Backend do
def define_territory_module(config) do
module = inspect(__MODULE__)
backend = config.backend
config = Macro.escape(config)
quote location: :keep, bind_quoted: [module: module, backend: backend, config: config] do
defmodule Territory do
alias Cldr.Locale
@doc """
Returns a list of available styles.
## Example
iex> #{inspect __MODULE__}.available_styles()
[:short, :standard, :variant]
"""
@spec available_styles() :: [:short | :standard | :variant]
def available_styles(), do: Cldr.Territory.available_styles()
@doc """
Returns the available territories for a given locale.
* `locale` is any configured locale. See `#{inspect backend}.known_locale_names/0`.
The default is `Cldr.get_locale/0`
## Example
=> #{inspect __MODULE__}.available_territories()
[:"001", :"002", :"003", :"005", :"009", :"011", :"013", :"014", :"015", :"017",
:"018", :"019", :"021", :"029", :"030", :"034", :"035", :"039", :"053", :"054",
:"057", :"061", :"142", :"143", :"145", :"150", :"151", :"154", :"155", :"202",
:"419", :AC, :AD, :AE, :AF, :AG, :AI, :AL, :AM, :AO, :AQ, :AR, :AS, :AT, :AU,
:AW, :AX, :AZ, :BA, :BB, ...]
=> #{inspect __MODULE__}.available_territories("zzz")
{:error, {Cldr.UnknownLocaleError, "The locale \"zzz\" is not known."}}
"""
@spec available_territories(Cldr.Territory.binary_tag()) :: [atom()] | {:error, Cldr.Territory.error()}
def available_territories(locale \\ unquote(backend).get_locale())
def available_territories(%LanguageTag{cldr_locale_name: cldr_locale_name}) do
available_territories(cldr_locale_name)
end
@doc """
Returns the available territory subdivisions for a given locale.
* `locale` is any configured locale. See `#{inspect backend}.known_locale_names/0`.
The default is `Cldr.get_locale/0`
## Example
=> #{inspect __MODULE__}.available_subdivisions("en")
[:ad02, :ad03, :ad04, :ad05, :ad06, :ad07, :ad08, ...]
iex> #{inspect __MODULE__}.available_subdivisions()
[]
iex> #{inspect __MODULE__}.available_subdivisions("zzz")
{:error, {Cldr.UnknownLocaleError, "The locale \\"zzz\\" is not known."}}
"""
@spec available_subdivisions(Cldr.Territory.binary_tag()) :: [atom()] | {:error, Cldr.Territory.error()}
def available_subdivisions(locale \\ unquote(backend).get_locale())
def available_subdivisions(%LanguageTag{cldr_locale_name: cldr_locale_name}) do
available_subdivisions(cldr_locale_name)
end
@doc """
Returns a map of all known territories in a given locale.
* `locale` is any configured locale. See `#{inspect backend}.known_locale_names/0`.
The default is `Cldr.get_locale/0`
## Example
=> #{inspect __MODULE__}.known_territories()
%{SN: %{standard: "Senegal"}, "061": %{standard: "Polynesia"},
BH: %{standard: "Bahrain"}, TM: %{standard: "Turkmenistan"},
"009": %{standard: "Oceania"}, CW: %{standard: "Curaçao"},
FR: %{standard: "France"}, TN: %{standard: "Tunisia"},
FI: %{standard: "Finland"}, BF: %{standard: "Burkina Faso"},
"155": %{standard: "Western Europe"}, GL: %{standard: "Greenland"},
VI: %{standard: "U.S. Virgin Islands"}, ZW: %{standard: "Zimbabwe"},
AR: %{standard: "Argentina"}, SG: %{standard: "Singapore"},
SZ: %{standard: "Swaziland"}, ID: %{standard: "Indonesia"},
NR: %{standard: "Nauru"}, RW: %{standard: "Rwanda"},
TR: %{standard: "Turkey"}, IS: %{standard: "Iceland"},
ME: %{standard: "Montenegro"}, AW: %{standard: "Aruba"},
PY: %{standard: "Paraguay"}, "145": %{standard: "Western Asia"},
CG: %{standard: "Congo - Brazzaville", variant: "Congo (Republic)"},
LT: %{standard: "Lithuania"}, SA: %{standard: "Saudi Arabia"},
MZ: %{standard: "Mozambique"}, NU: %{standard: "Niue"},
NG: %{standard: "Nigeria"}, CK: %{standard: "Cook Islands"},
ZM: %{standard: "Zambia"}, LK: %{standard: "Sri Lanka"},
UY: %{standard: "Uruguay"}, YE: %{standard: "Yemen"},
"011": %{standard: "Western Africa"},
CC: %{standard: "Cocos (Keeling) Islands"}, BY: %{standard: "Belarus"},
IL: %{standard: "Israel"}, KY: %{standard: "Cayman Islands"},
GN: %{standard: "Guinea"}, VN: %{standard: "Vietnam"},
PE: %{standard: "Peru"}, HU: %{standard: "Hungary"},
HN: %{standard: "Honduras"}, GI: %{standard: "Gibraltar"},
"142": %{standard: "Asia"}, "029": %{...}, ...}
=> #{inspect __MODULE__}.known_territories("zzz")
{:error, {Cldr.UnknownLocaleError, "The locale \"zzz\" is not known."}}
"""
@spec known_territories(Cldr.Territory.binary_tag()) :: map() | {:error, Cldr.Territory.error()}
def known_territories(locale \\ unquote(backend).get_locale())
def known_territories(%LanguageTag{cldr_locale_name: cldr_locale_name}) do
known_territories(cldr_locale_name)
end
@doc """
Returns a map of all known territory subdivisions in a given locale.
* `locale` is any configured locale. See `#{inspect backend}.known_locale_names/0`.
The default is `Cldr.get_locale/0`
## Example
=> #{inspect __MODULE__}.known_subdivisions("en")
%{
"ad02" => "Canillo",
"ad03" => "Encamp",
"ad04" => "La Massana",
"ad05" => "Ordino",
"ad06" => "Sant Julià de Lòria",
"ad07" => "Andorra la Vella",
...
iex> #{inspect __MODULE__}.known_subdivisions()
%{}
iex> #{inspect __MODULE__}.known_subdivisions("zzz")
{:error, {Cldr.UnknownLocaleError, "The locale \\"zzz\\" is not known."}}
"""
@spec known_subdivisions(Cldr.Territory.binary_tag()) :: map() | {:error, Cldr.Territory.error()}
def known_subdivisions(locale \\ unquote(backend).get_locale())
def known_subdivisions(%LanguageTag{cldr_locale_name: cldr_locale_name}) do
known_subdivisions(cldr_locale_name)
end
@doc """
Returns a list of subdivisions of a given territory.
## Example
=> #{inspect __MODULE__}.known_territory_subdivisions(:GB)
{:ok, ["gbabc", "gbabd", "gbabe", "gbagb", "gbagy", "gband", "gbann",
"gbans", "gbbas", "gbbbd", "gbbdf", "gbbdg", "gbben", "gbbex", "gbbfs",
"gbbge", "gbbgw", "gbbir", "gbbkm", "gbbmh", "gbbne", "gbbnh", "gbbns",
"gbbol", "gbbpl", "gbbrc", "gbbrd", "gbbry", "gbbst", "gbbur", "gbcam",
"gbcay", "gbcbf", "gbccg", "gbcgn", "gbche", "gbchw", "gbcld", "gbclk",
"gbcma", "gbcmd", "gbcmn", "gbcon", "gbcov", "gbcrf", "gbcry", "gbcwy",
"gbdal", "gbdby", "gbden", ...]}
iex> #{inspect __MODULE__}.known_territory_subdivisions(:ZZZ)
{:error, {Cldr.UnknownTerritoryError, "The territory :ZZZ is unknown"}}
"""
@spec known_territory_subdivisions(Cldr.Territory.atom_binary_tag()) :: {:ok, binary()} | {:error, Cldr.Territory.error()}
def known_territory_subdivisions(territory_code) do
territory_code
|> Cldr.validate_territory()
|> case do
{:error, reason} -> {:error, reason}
{:ok, territory_code} -> {:ok, Cldr.known_territory_subdivisions[territory_code]}
end
end
@doc """
Localized string for the given territory code.
Returns `{:ok, String.t}` if successful, otherwise `{:error, reason}`.
* `options` are:
* `locale` is any configured locale. See `#{inspect backend}.known_locale_names/0`.
The default is `Cldr.get_locale/0`
* `style` is one of those returned by `#{inspect __MODULE__}.available_styles/0`.
The current styles are `:short`, `:standard` and `:variant`.
The default is `:standard`
## Example
iex> #{inspect __MODULE__}.from_territory_code(:GB)
{:ok, "United Kingdom"}
iex> #{inspect __MODULE__}.from_territory_code(:GB, [style: :short])
{:ok, "UK"}
iex> #{inspect __MODULE__}.from_territory_code(:GB, [style: :ZZZ])
{:error, {Cldr.UnknownStyleError, "The style :ZZZ is unknown"}}
iex> #{inspect __MODULE__}.from_territory_code(:GB, [style: "ZZZ"])
{:error, {Cldr.UnknownStyleError, "The style \\"ZZZ\\" is unknown"}}
iex> #{inspect __MODULE__}.from_territory_code(:GB, [locale: "pt"])
{:ok, "Reino Unido"}
iex> #{inspect __MODULE__}.from_territory_code(:GB, [locale: :zzz])
{:error, {Cldr.UnknownLocaleError, "The locale :zzz is not known."}}
iex> #{inspect __MODULE__}.from_territory_code(:GB, [locale: "zzz"])
{:error, {Cldr.InvalidLanguageError, "The language \\"zzz\\" is invalid"}}
"""
@spec from_territory_code(Cldr.Territory.atom_binary_tag(), Cldr.Territory.options()) :: {:ok, binary()} | {:error, Cldr.Territory.error()}
def from_territory_code(territory_code, options \\ [locale: unquote(backend).get_locale(), style: :standard])
def from_territory_code(territory_code, [locale: %LanguageTag{cldr_locale_name: cldr_locale_name}]) do
from_territory_code(territory_code, [locale: cldr_locale_name, style: :standard])
end
def from_territory_code(territory_code, [locale: %LanguageTag{cldr_locale_name: cldr_locale_name}, style: style]) do
from_territory_code(territory_code, [locale: cldr_locale_name, style: style])
end
def from_territory_code(territory_code, [locale: locale]) do
from_territory_code(territory_code, [locale: locale, style: :standard])
end
def from_territory_code(territory_code, [style: style]) do
from_territory_code(territory_code, [locale: unquote(backend).get_locale(), style: style])
end
def from_territory_code(territory_code, [locale: locale, style: style]) do
territory_code
|> Cldr.validate_territory()
|> validate_locale(locale)
|> case do
{:error, reason} -> {:error, reason}
{:ok, code, locale_name} -> from_territory_code(code, locale_name, style)
end
end
@doc """
Localized string for the given subdivision code.
Returns `{:ok, String.t}` if successful, otherwise `{:error, reason}`.
* `options` are:
* `locale` is any configured locale. See `#{inspect backend}.known_locale_names/0`.
The default is `Cldr.get_locale/0`
## Example
iex> #{inspect __MODULE__}.from_subdivision_code("gbcma", locale: "en")
{:ok, "Cumbria"}
iex> #{inspect __MODULE__}.from_subdivision_code("gbcma", locale: "pl")
{:ok, "Kumbria"}
iex> #{inspect __MODULE__}.from_subdivision_code("gbcma", locale: "bs")
{:error, {Cldr.UnknownSubdivisionError, "The locale \\"bs\\" has no translation for :gbcma."}}
iex> #{inspect __MODULE__}.from_subdivision_code("invalid", locale: "en")
{:error, {Cldr.UnknownTerritoryError, "The territory \\"invalid\\" is unknown"}}
iex> #{inspect __MODULE__}.from_subdivision_code("gbcma", [locale: :zzz])
{:error, {Cldr.UnknownLocaleError, "The locale :zzz is not known."}}
iex> #{inspect __MODULE__}.from_subdivision_code("gbcma", [locale: "zzz"])
{:error, {Cldr.InvalidLanguageError, "The language \\"zzz\\" is invalid"}}
"""
@spec from_subdivision_code(binary(), [locale: Cldr.Territory.binary_tag()]) :: {:ok, binary()} | {:error, Cldr.Territory.error()}
def from_subdivision_code(subdivision_code, options \\ [locale: unquote(backend).get_locale()])
def from_subdivision_code(subdivision_code, [locale: %LanguageTag{cldr_locale_name: cldr_locale_name}]) do
from_subdivision_code(subdivision_code, [locale: cldr_locale_name])
end
def from_subdivision_code(subdivision_code, [locale: locale]) do
subdivision_code
|> Cldr.validate_territory_subdivision()
|> validate_locale(locale)
|> case do
{:error, reason} -> {:error, reason}
{:ok, code, locale_name} -> __from_subdivision_code__(code, locale_name)
end
end
@doc """
The same as `from_territory_code/2`, but raises an exception if it fails.
## Example
iex> #{inspect __MODULE__}.from_territory_code!(:GB)
"United Kingdom"
iex> #{inspect __MODULE__}.from_territory_code!(:GB, [style: :short])
"UK"
iex> #{inspect __MODULE__}.from_territory_code!(:GB, [locale: "pt"])
"Reino Unido"
"""
@spec from_territory_code!(Cldr.Territory.atom_binary_tag(), Cldr.Territory.options()) :: binary() | no_return()
def from_territory_code!(territory_code, options \\ [locale: unquote(backend).get_locale(), style: :standard])
def from_territory_code!(territory_code, [locale: %LanguageTag{cldr_locale_name: cldr_locale_name}]) do
from_territory_code!(territory_code, [locale: cldr_locale_name, style: :standard])
end
def from_territory_code!(territory_code, [locale: %LanguageTag{cldr_locale_name: cldr_locale_name}, style: style]) do
from_territory_code!(territory_code, [locale: cldr_locale_name, style: style])
end
def from_territory_code!(territory_code, [locale: locale]) do
from_territory_code!(territory_code, [locale: locale, style: :standard])
end
def from_territory_code!(territory_code, [style: style]) do
from_territory_code!(territory_code, [locale: unquote(backend).get_locale(), style: style])
end
def from_territory_code!(territory_code, options) do
case from_territory_code(territory_code, options) do
{:error, {exception, msg}} -> raise exception, msg
{:ok, result} -> result
end
end
@doc """
The same as `from_subdivision_code/2`, but raises an exception if it fails.
## Example
iex> #{inspect __MODULE__}.from_subdivision_code!("gbcma", locale: "en")
"Cumbria"
iex> #{inspect __MODULE__}.from_subdivision_code!("gbcma", locale: "pl")
"Kumbria"
"""
@spec from_subdivision_code!(binary(), [locale: Cldr.Territory.binary_tag()]) :: binary() | no_return()
def from_subdivision_code!(subdivision_code, options \\ [locale: unquote(backend).get_locale()])
def from_subdivision_code!(subdivision_code, [locale: %LanguageTag{cldr_locale_name: cldr_locale_name}]) do
from_subdivision_code!(subdivision_code, [locale: cldr_locale_name])
end
def from_subdivision_code!(subdivision_code, [locale: _locale] = options) do
case from_subdivision_code(subdivision_code, options) do
{:error, {exception, msg}} -> raise exception, msg
{:ok, result} -> result
end
end
@doc """
Localized string for the given `LanguageTag.t`.
Returns `{:ok, String.t}` if successful, otherwise `{:error, reason}`.
* `options` are:
* `style` is one of those returned by `#{inspect __MODULE__}.available_styles/0`.
The current styles are `:short`, `:standard` and `:variant`.
The default is `:standard`
## Example
iex> #{inspect __MODULE__}.from_language_tag(Cldr.get_locale())
{:ok, "world"}
iex> #{inspect __MODULE__}.from_language_tag(Cldr.get_locale(), [style: :short])
{:error, {Cldr.UnknownStyleError, "The style :short is unknown"}}
iex> #{inspect __MODULE__}.from_language_tag(Cldr.get_locale(), [style: :ZZZ])
{:error, {Cldr.UnknownStyleError, "The style :ZZZ is unknown"}}
iex> #{inspect __MODULE__}.from_language_tag(Cldr.get_locale(), [style: "ZZZ"])
{:error, {Cldr.UnknownStyleError, "The style \\"ZZZ\\" is unknown"}}
"""
@spec from_language_tag(Cldr.Territory.tag(), Cldr.Territory.options()) :: {:ok, binary()} | {:error, Cldr.Territory.error()}
def from_language_tag(tag, options \\ [style: :standard])
def from_language_tag(%LanguageTag{cldr_locale_name: cldr_locale_name, territory: territory}, [style: style]) do
from_territory_code(territory, [locale: cldr_locale_name, style: style])
end
def from_language_tag(tag, _options), do: {:error, {Cldr.UnknownLanguageTagError, "The tag #{inspect tag} is not a valid `LanguageTag.t`"}}
@doc """
The same as `from_language_tag/2`, but raises an exception if it fails.
## Example
iex> #{inspect __MODULE__}.from_language_tag!(Cldr.get_locale())
"world"
"""
@spec from_language_tag!(Cldr.Territory.tag(), Cldr.Territory.options()) :: binary() | no_return()
def from_language_tag!(tag, options \\ [style: :standard])
def from_language_tag!(%LanguageTag{cldr_locale_name: cldr_locale_name, territory: territory}, [style: style]) do
from_territory_code!(territory, [locale: cldr_locale_name, style: style])
end
def from_language_tag!(tag, _options), do: raise Cldr.UnknownLanguageTagError, "The tag #{inspect tag} is not a valid `LanguageTag.t`"
@doc """
Translate a localized string from one locale to another.
Returns `{:ok, result}` if successful, otherwise `{:error, reason}`.
* `to_locale` is any configured locale. See `#{inspect backend}.known_locale_names/0`.
The default is `Cldr.get_locale/0`
## Example
iex> #{inspect __MODULE__}.translate_territory("Reino Unido", "pt")
{:ok, "United Kingdom"}
iex> #{inspect __MODULE__}.translate_territory("United Kingdom", "en", "pt")
{:ok, "Reino Unido"}
iex> #{inspect __MODULE__}.translate_territory("Reino Unido", :zzz)
{:error, {Cldr.UnknownLocaleError, "The locale :zzz is not known."}}
iex> #{inspect __MODULE__}.translate_territory("United Kingdom", "en", "zzz")
{:error, {Cldr.UnknownLocaleError, "The locale \\"zzz\\" is not known."}}
iex> #{inspect __MODULE__}.translate_territory("Westworld", "en", "pt")
{:error, {Cldr.UnknownTerritoryError, "No territory translation for \\"Westworld\\" could be found in locale \\"en\\""}}
"""
@spec translate_territory(binary(), Cldr.Territory.binary_tag(), Cldr.Territory.binary_tag(), atom()) :: {:ok, binary()} | {:error, Cldr.Territory.error()}
def translate_territory(localized_string, %LanguageTag{cldr_locale_name: from_locale}, to_locale, style) do
translate_territory(localized_string, from_locale, to_locale, style)
end
def translate_territory(localized_string, from_locale, %LanguageTag{cldr_locale_name: to_locale}, style) do
translate_territory(localized_string, from_locale, to_locale, style)
end
def translate_territory(localized_string, from_locale, to_locale) do
translate_territory(localized_string, from_locale, to_locale, :standard)
end
def translate_territory(localized_string, from_locale) do
translate_territory(localized_string, from_locale, unquote(backend).get_locale(), :standard)
end
@doc """
Translate a localized string from one locale to another.
Returns `{:ok, result}` if successful, otherwise `{:error, reason}`.
* `to_locale` is any configured locale. See `#{inspect backend}.known_locale_names/0`.
The default is `Cldr.get_locale/0`
## Example
iex> #{inspect __MODULE__}.translate_subdivision("Cumbria", "en", "pl")
{:ok, "Kumbria"}
iex> #{inspect __MODULE__}.translate_subdivision("Cumbria", "en", "bs")
{:error, {Cldr.UnknownSubdivisionError, "The locale \\"bs\\" has no translation for :gbcma."}}
iex> #{inspect __MODULE__}.translate_subdivision("Cumbria", :zzz)
{:error, {Cldr.UnknownLocaleError, "The locale :zzz is not known."}}
iex> #{inspect __MODULE__}.translate_subdivision("Cumbria", "en", "zzz")
{:error, {Cldr.UnknownLocaleError, "The locale \\"zzz\\" is not known."}}
"""
@spec translate_subdivision(binary(), Cldr.Territory.binary_tag(), Cldr.Territory.binary_tag()) :: {:ok, binary()} | {:error, Cldr.Territory.error()}
def translate_subdivision(localized_string, from_locale, to_locale \\ unquote(backend).get_locale())
def translate_subdivision(localized_string, %LanguageTag{cldr_locale_name: from_locale}, to_locale) do
translate_subdivision(localized_string, from_locale, to_locale)
end
def translate_subdivision(localized_string, from_locale, %LanguageTag{cldr_locale_name: to_locale}) do
translate_subdivision(localized_string, from_locale, to_locale)
end
@doc """
The same as `translate_territory/3`, but raises an exception if it fails.
## Example
iex> #{inspect __MODULE__}.translate_territory!("Reino Unido", "pt")
"United Kingdom"
iex> #{inspect __MODULE__}.translate_territory!("United Kingdom", "en", "pt")
"Reino Unido"
#=> #{inspect __MODULE__}.translate_territory!("Westworld", "en", "pt")
** (Cldr.UnknownTerritoryError) No territory translation for "Westworld" could be found in locale "en"
"""
@spec translate_territory!(binary(), Cldr.Territory.binary_tag(), Cldr.Territory.binary_tag(), atom()) :: binary() | no_return()
def translate_territory!(localized_string, %LanguageTag{cldr_locale_name: from_locale}, to_locale, style) do
translate_territory!(localized_string, from_locale, to_locale, style)
end
def translate_territory!(localized_string, from_locale, %LanguageTag{cldr_locale_name: to_locale}, style) do
translate_territory!(localized_string, from_locale, to_locale, style)
end
def translate_territory!(localized_string, locale_from, locale_name, style) do
case translate_territory(localized_string, locale_from, locale_name, style) do
{:error, {exception, msg}} -> raise exception, msg
{:ok, result} -> result
end
end
def translate_territory!(localized_string, from_locale, to_locale) do
translate_territory!(localized_string, from_locale, to_locale, :standard)
end
def translate_territory!(localized_string, from_locale) do
translate_territory!(localized_string, from_locale, unquote(backend).get_locale(), :standard)
end
@doc """
The same as `translate_subdivision/3`, but raises an exception if it fails.
## Example
iex> #{inspect __MODULE__}.translate_subdivision!("Cumbria", "en", "pl")
"Kumbria"
iex> #{inspect __MODULE__}.translate_subdivision!("Kumbria", "pl", "en")
"Cumbria"
"""
@spec translate_subdivision!(binary(), Cldr.Territory.binary_tag(), Cldr.Territory.binary_tag()) :: binary() | no_return()
def translate_subdivision!(localized_string, from_locale, to_locale \\ unquote(backend).get_locale())
def translate_subdivision!(localized_string, %LanguageTag{cldr_locale_name: from_locale}, to_locale) do
translate_subdivision!(localized_string, from_locale, to_locale)
end
def translate_subdivision!(localized_string, from_locale, %LanguageTag{cldr_locale_name: to_locale}) do
translate_subdivision!(localized_string, from_locale, to_locale)
end
def translate_subdivision!(localized_string, locale_from, locale_name) do
case translate_subdivision(localized_string, locale_from, locale_name) do
{:error, {exception, msg}} -> raise exception, msg
{:ok, result} -> result
end
end
@doc """
Translate a LanguageTag.t into a localized string from one locale to another.
Returns `{:ok, result}` if successful, otherwise `{:error, reason}`.
* `options` are:
* `locale` is any configured locale. See `#{inspect backend}.known_locale_names/0`.
The default is `Cldr.get_locale/0`
* `style` is one of those returned by `#{inspect __MODULE__}.available_styles/0`.
The current styles are `:short`, `:standard` and `:variant`.
The default is `:standard`
## Example
iex> #{inspect __MODULE__}.translate_language_tag(Cldr.get_locale())
{:ok, "world"}
iex> #{inspect __MODULE__}.translate_language_tag(Cldr.get_locale(), [locale: Cldr.Locale.new!("pt", TestBackend.Cldr)])
{:ok, "Mundo"}
"""
@spec translate_language_tag(Cldr.Territory.tag(), Cldr.Territory.options()) :: {:ok, binary()} | {:error, Cldr.Territory.error()}
def translate_language_tag(from_locale, options \\ [locale: unquote(backend).get_locale(), style: :standard])
def translate_language_tag(%LanguageTag{} = from_locale, [locale: %LanguageTag{} = to_locale]) do
translate_language_tag(from_locale, [locale: to_locale, style: :standard])
end
def translate_language_tag(%LanguageTag{} = from_locale, [style: style]) do
translate_language_tag(from_locale, [locale: unquote(backend).get_locale(), style: style])
end
def translate_language_tag(%LanguageTag{} = from_locale, [locale: %LanguageTag{} = to_locale, style: style]) do
case from_language_tag(from_locale, [style: style]) do
{:error, reason} -> {:error, reason}
{:ok, result} -> translate_territory(result, from_locale, to_locale, style)
end
end
def translate_language_tag(%LanguageTag{}, [locale: tag, style: _style]) do
{:error, {Cldr.UnknownLanguageTagError, "The tag #{inspect tag} is not a valid `LanguageTag.t`"}}
end
def translate_language_tag(%LanguageTag{}, [locale: tag]) do
{:error, {Cldr.UnknownLanguageTagError, "The tag #{inspect tag} is not a valid `LanguageTag.t`"}}
end
def translate_language_tag(tag, _options) do
{:error, {Cldr.UnknownLanguageTagError, "The tag #{inspect tag} is not a valid `LanguageTag.t`"}}
end
@doc """
The same as `translate_language_tag/2`, but raises an exception if it fails.
## Example
iex> #{inspect __MODULE__}.translate_language_tag!(Cldr.get_locale())
"world"
iex> #{inspect __MODULE__}.translate_language_tag!(Cldr.get_locale(), [locale: Cldr.Locale.new!("pt", TestBackend.Cldr)])
"Mundo"
"""
@spec translate_language_tag!(Cldr.Territory.tag(), Cldr.Territory.options()) :: binary() | no_return()
def translate_language_tag!(locale_from, options \\ [locale: unquote(backend).get_locale(), style: :standard])
def translate_language_tag!(locale_from, options) do
case translate_language_tag(locale_from, options) do
{:error, {exception, msg}} -> raise exception, msg
{:ok, result} -> result
end
end
@doc """
Lists parent(s) for the given territory code.
Returns `{:ok, list}` if successful, otherwise `{:error, reason}`.
* `options` are:
* `as: :atom`
* `as: :binary`
* `as: :charlist`
## Example
iex> #{inspect __MODULE__}.parent(:GB)
{:ok, [:"154", :UN]}
iex> #{inspect __MODULE__}.parent(:ZZZ)
{:error, {Cldr.UnknownTerritoryError, "The territory :ZZZ is unknown"}}
iex> #{inspect __MODULE__}.parent(Cldr.get_locale())
{:error, {Cldr.UnknownChildrenError, "The territory :\\"001\\" has no parent(s)"}}
"""
@spec parent(Cldr.Territory.atom_binary_tag(), Cldr.Territory.as_options()) :: {:ok, Cldr.Territory.atom_binary_charlist()} | {:error, Cldr.Territory.error()}
def parent(territory_code, opts \\ [as: :atom]), do: Cldr.Territory.parent(territory_code, opts)
@doc """
The same as `parent/2`, but raises an exception if it fails.
* `options` are:
* `as: :atom`
* `as: :binary`
* `as: :charlist`
## Example
iex> #{inspect __MODULE__}.parent!(:GB)
[:"154", :UN]
"""
@spec parent!(Cldr.Territory.atom_binary_tag(), Cldr.Territory.as_options()) :: [Cldr.Territory.atom_binary_charlist()] | no_return()
def parent!(territory_code, opts \\ [as: :atom]), do: Cldr.Territory.parent!(territory_code, opts)
@doc """
Lists children(s) for the given territory code.
Returns `{:ok, list}` if successful, otherwise `{:error, reason}`.
* `options` are:
* `as: :atom`
* `as: :binary`
* `as: :charlist`
## Example
iex> #{inspect __MODULE__}.children(:EU)
{:ok,
[:AT, :BE, :CY, :CZ, :DE, :DK, :EE, :ES, :FI, :FR, :GR, :HR, :HU, :IE,
:IT, :LT, :LU, :LV, :MT, :NL, :PL, :PT, :SE, :SI, :SK, :BG, :RO]}
iex> #{inspect __MODULE__}.children(:ZZZ)
{:error, {Cldr.UnknownTerritoryError, "The territory :ZZZ is unknown"}}
iex> #{inspect __MODULE__}.children(:GB)
{:error, {Cldr.UnknownParentError, "The territory :GB has no children"}}
"""
@spec children(Cldr.Territory.atom_binary_tag(), Cldr.Territory.as_options()) :: {:ok, Cldr.Territory.atom_binary_charlist()} | {:error, Cldr.Territory.error()}
def children(territory_code, opts \\ [as: :atom]), do: Cldr.Territory.children(territory_code, opts)
@doc """
The same as `children/2`, but raises an exception if it fails.
* `options` are:
* `as: :atom`
* `as: :binary`
* `as: :charlist`
## Example
iex> #{inspect __MODULE__}.children!(:EU)
[:AT, :BE, :CY, :CZ, :DE, :DK, :EE, :ES, :FI, :FR, :GR, :HR, :HU, :IE, :IT,
:LT, :LU, :LV, :MT, :NL, :PL, :PT, :SE, :SI, :SK, :BG, :RO]
"""
@spec children!(Cldr.Territory.atom_binary_tag(), Cldr.Territory.as_options()) :: [Cldr.Territory.atom_binary_charlist()] | no_return()
def children!(territory_code, opts \\ [as: :atom]), do: Cldr.Territory.children!(territory_code, opts)
@doc """
Checks relationship between two territories, where the first argument is the `parent` and second the `child`.
Returns `true` if successful, otherwise `false`.
## Example
iex> #{inspect __MODULE__}.contains?(:EU, :DK)
true
iex> #{inspect __MODULE__}.contains?(:DK, :EU)
false
"""
@spec contains?(Cldr.Territory.atom_tag(), Cldr.Territory.atom_tag()) :: boolean()
def contains?(parent, child), do: Cldr.Territory.contains?(parent, child)
@doc """
Maps territory info for the given territory code.
Returns `{:ok, map}` if successful, otherwise `{:error, reason}`.
## Example
iex> #{inspect __MODULE__}.info(:GB)
{:ok,
%{
currency: [GBP: %{from: ~D[1694-07-27]}],
gdp: 2925000000000,
language_population: %{
"bn" => %{population_percent: 0.67},
"cy" => %{official_status: "official_regional", population_percent: 0.77},
"de" => %{population_percent: 6},
"el" => %{population_percent: 0.33},
"en" => %{official_status: "official", population_percent: 99},
"fr" => %{population_percent: 19},
"ga" => %{official_status: "official_regional", population_percent: 0.026},
"gd" => %{
official_status: "official_regional",
population_percent: 0.099,
writing_percent: 5
},
"it" => %{population_percent: 0.33},
"ks" => %{population_percent: 0.19},
"kw" => %{population_percent: 0.003},
"ml" => %{population_percent: 0.035},
"pa" => %{population_percent: 0.79},
"sco" => %{population_percent: 2.7, writing_percent: 5},
"syl" => %{population_percent: 0.51},
"yi" => %{population_percent: 0.049},
"zh-Hant" => %{population_percent: 0.54}
},
literacy_percent: 99,
measurement_system: %{
default: :uksystem,
paper_size: :a4,
temperature: :uksystem
},
population: 65761100
}}
"""
@spec info(Cldr.Territory.atom_tag()) :: {:ok, map()} | {:error, Cldr.Territory.error()}
def info(territory_code), do: Cldr.Territory.info(territory_code)
@doc """
The same as `info/1`, but raises an exception if it fails.
## Example
iex> #{inspect __MODULE__}.info!(:GB)
%{
currency: [GBP: %{from: ~D[1694-07-27]}],
gdp: 2925000000000,
language_population: %{
"bn" => %{population_percent: 0.67},
"cy" => %{official_status: "official_regional", population_percent: 0.77},
"de" => %{population_percent: 6},
"el" => %{population_percent: 0.33},
"en" => %{official_status: "official", population_percent: 99},
"fr" => %{population_percent: 19},
"ga" => %{official_status: "official_regional", population_percent: 0.026},
"gd" => %{
official_status: "official_regional",
population_percent: 0.099,
writing_percent: 5
},
"it" => %{population_percent: 0.33},
"ks" => %{population_percent: 0.19},
"kw" => %{population_percent: 0.003},
"ml" => %{population_percent: 0.035},
"pa" => %{population_percent: 0.79},
"sco" => %{population_percent: 2.7, writing_percent: 5},
"syl" => %{population_percent: 0.51},
"yi" => %{population_percent: 0.049},
"zh-Hant" => %{population_percent: 0.54}
},
literacy_percent: 99,
measurement_system: %{
default: :uksystem,
paper_size: :a4,
temperature: :uksystem
},
population: 65761100
}
"""
@spec info!(Cldr.Territory.atom_tag()) :: map() | no_return()
def info!(territory_code), do: Cldr.Territory.info!(territory_code)
# Generate the functions that encapsulate the territory data from CDLR
@known_locales Cldr.Locale.Loader.known_locale_names(config)
for locale_name <- @known_locales do
locale = Cldr.Locale.Loader.get_locale(locale_name, config)
territories = Map.fetch!(locale, :territories)
subdivisions = Map.fetch!(locale, :subdivisions)
territory_codes = Map.keys(territories)
# Maps a territory name to a territory code
# Flattens all variations of the territory name
# to the same territory code. Note the normalization
# of the territory name both here and in `translate_territory/4`
inverted_territories =
territories
|> Enum.map(fn {territory_code, names} ->
{Map.values(names), territory_code}
end)
|> Enum.flat_map(fn {names, territory_code} ->
for name <- names, code <- [territory_code],
do: {Cldr.Territory.normalize_name(name), code}
end)
|> Map.new
# Maps a subdivision name to a subdivision code
# Note the normalization of the subdivision name
# both here and in `translate_subdivision/3`
inverted_subdivisions =
subdivisions
|> Enum.map(fn {subdivision_code, name} ->
{Cldr.Territory.normalize_name(name), subdivision_code}
end)
|> Map.new
def available_territories(unquote(locale_name)) do
unquote(Map.keys(territories) |> Enum.sort())
end
def available_subdivisions(unquote(locale_name)) do
unquote(Map.keys(subdivisions) |> Enum.sort())
end
def known_territories(unquote(locale_name)) do
unquote(Macro.escape(territories))
end
def known_subdivisions(unquote(locale_name)) do
unquote(Macro.escape(subdivisions))
end
def inverted_territories(unquote(locale_name)) do
unquote(Macro.escape(inverted_territories))
end
def inverted_subdivisions(unquote(locale_name)) do
unquote(Macro.escape(inverted_subdivisions))
end
@doc false
def from_territory_code(territory_code, unquote(locale_name) = locale_name, style)
when territory_code in unquote(territory_codes) do
locale_name
|> known_territories()
|> Map.fetch!(territory_code)
|> Map.get(style)
|> case do
nil -> {:error, {Cldr.UnknownStyleError, "The style #{inspect style} is unknown"}}
string -> {:ok, string}
end
end
@doc false
def __from_subdivision_code__(subdivision_code, unquote(locale_name) = locale_name) do
case known_subdivisions(locale_name) do
%{^subdivision_code => subdivision_translation} -> {:ok, subdivision_translation}
subdivisions when map_size(subdivisions) == 0 -> {:error, {Cldr.UnknownSubdivisionError, "The locale #{inspect unquote(locale_name)} has no subdivisions."}}
_subdivisions -> {:error, {Cldr.UnknownSubdivisionError, "The locale #{inspect unquote(locale_name)} has no translation for #{inspect subdivision_code}."}}
end
end
end
def translate_territory(localized_string, locale_from, locale_to, style)
when locale_from in @known_locales and locale_to in @known_locales do
normalized_name = Cldr.Territory.normalize_name(localized_string)
locale_from
|> inverted_territories()
|> Map.fetch(normalized_name)
|> case do
{:ok, territory_code} ->
from_territory_code(territory_code, locale_to, style)
:error ->
{:error, unknown_territory_error(localized_string, locale_from, locale_to)}
end
end
def translate_subdivision(localized_string, locale_from, locale_to)
when locale_from in @known_locales and locale_to in @known_locales do
normalized_name = Cldr.Territory.normalize_name(localized_string)
case inverted_subdivisions(locale_from) do
%{^normalized_name => subdivision_code} ->
__from_subdivision_code__(subdivision_code, locale_to)
subdivisions when map_size(subdivisions) == 0 ->
{:error, {Cldr.UnknownSubdivisionError, "The locale #{inspect locale_from} has no subdivisions."}}
_subdivisions ->
{:error, {Cldr.UnknownSubdivisionError, "The locale #{inspect locale_from} has no translation for #{inspect localized_string}."}}
end
end
def available_territories(locale), do: {:error, Locale.locale_error(locale)}
def available_subdivisions(locale), do: {:error, Locale.locale_error(locale)}
def known_territories(locale), do: {:error, Locale.locale_error(locale)}
def known_subdivisions(locale), do: {:error, Locale.locale_error(locale)}
def translate_territory(_localized_string, from, _locale, _style) when from not in @known_locales,
do: {:error, Locale.locale_error(from)}
def translate_territory(_localized_string, _from, locale, _style),
do: {:error, Locale.locale_error(locale)}
def translate_subdivision(_localized_string, from, _locale) when from not in @known_locales,
do: {:error, Locale.locale_error(from)}
def translate_subdivision(_localized_string, _from, locale),
do: {:error, Locale.locale_error(locale)}
defp validate_locale({:error, reason}, _locale), do: {:error, reason}
defp validate_locale({:ok, code}, locale) do
locale
|> Cldr.validate_locale(unquote(backend))
|> case do
{:error, error} -> {:error, error}
{:ok, %LanguageTag{cldr_locale_name: locale_name}} -> {:ok, code, locale_name}
end
end
@doc """
Unicode flag for the given territory code.
Returns `{:ok, flag}` if successful, otherwise `{:error, reason}`.
## Example
iex> #{inspect __MODULE__}.to_unicode_flag(:US)
{:ok, "🇺🇸"}
iex> #{inspect __MODULE__}.to_unicode_flag(:EZ)
{:error, {Cldr.UnknownFlagError, "The territory :EZ has no flag"}}
"""
@spec to_unicode_flag(Cldr.Territory.atom_binary_tag() | {:ok, atom()} | {:error, Cldr.Territory.error()}) :: {:ok, binary()} | {:error, Cldr.Territory.error()}
def to_unicode_flag(territory_code), do: Cldr.Territory.to_unicode_flag(territory_code)
@doc """
The same as `to_unicode_flag/1`, but raises an exception if it fails.
## Example
iex> #{inspect __MODULE__}.to_unicode_flag!(:US)
"🇺🇸"
"""
@spec to_unicode_flag!(Cldr.Territory.atom_binary_tag()) :: binary() | no_return()
def to_unicode_flag!(territory_code), do: Cldr.Territory.to_unicode_flag!(territory_code)
@doc """
A helper method to get a territory's currency code
if a territory has multiply currencies then the oldest active currency is returned.
Returns `{:ok, code}` if successful, otherwise `{:error, reason}`.
* `options` are:
* `as: :atom`
* `as: :binary`
* `as: :charlist`
## Example
iex> #{inspect __MODULE__}.to_currency_code(:US)
{:ok, :USD}
iex> #{inspect __MODULE__}.to_currency_code("cu")
{:ok, :CUP}
"""
@spec to_currency_code(Cldr.Territory.atom_binary_tag(), Cldr.Territory.as_options()) :: {:ok, Cldr.Territory.atom_binary_charlist()} | {:error, Cldr.Territory.error()}
def to_currency_code(territory_code, opts \\ [as: :atom]), do: Cldr.Territory.to_currency_code(territory_code, opts)
@doc """
The same as `to_currency_code/2`, but raises an exception if it fails.
* `options` are:
* `as: :atom`
* `as: :binary`
* `as: :charlist`
## Example
iex> #{inspect __MODULE__}.to_currency_code!(:US)
:USD
iex> #{inspect __MODULE__}.to_currency_code!(:US, as: :charlist)
'USD'
iex> #{inspect __MODULE__}.to_currency_code!("PS")
:ILS
iex> #{inspect __MODULE__}.to_currency_code!("PS", as: :binary)
"ILS"
"""
@spec to_currency_code!(Cldr.Territory.atom_binary_tag(), Cldr.Territory.as_options()) :: Cldr.Territory.atom_binary_charlist() | no_return()
def to_currency_code!(territory_code, opts \\ [as: :atom]), do: Cldr.Territory.to_currency_code!(territory_code, opts)
@doc """
A helper method to get a territory's currency codes.
Returns `{:ok, list}` if successful, otherwise `{:error, reason}`.
* `options` are:
* `as: :atom`
* `as: :binary`
* `as: :charlist`
## Example
iex> #{inspect __MODULE__}.to_currency_codes(:US)
{:ok, [:USD]}
iex> #{inspect __MODULE__}.to_currency_codes("cu")
{:ok, [:CUP, :CUC]}
"""
@spec to_currency_codes(Cldr.Territory.atom_binary_tag(), Cldr.Territory.as_options()) :: {:ok, [Cldr.Territory.atom_binary_charlist()]} | {:error, Cldr.Territory.error()}
def to_currency_codes(territory_code, opts \\ [as: :atom]), do: Cldr.Territory.to_currency_codes(territory_code, opts)
@doc """
The same as `to_currency_codes/2`, but raises an exception if it fails.
* `options` are:
* `as: :atom`
* `as: :binary`
* `as: :charlist`
## Example
iex> #{inspect __MODULE__}.to_currency_codes!(:US)
[:USD]
iex> #{inspect __MODULE__}.to_currency_codes!(:US, as: :charlist)
['USD']
iex> #{inspect __MODULE__}.to_currency_codes!("PS")
[:ILS, :JOD]
iex> #{inspect __MODULE__}.to_currency_codes!("PS", as: :binary)
["ILS", "JOD"]
"""
@spec to_currency_codes!(Cldr.Territory.atom_binary_tag(), Cldr.Territory.as_options()) :: [Cldr.Territory.atom_binary_charlist()] | no_return()
def to_currency_codes!(territory_code, opts \\ [as: :atom]), do: Cldr.Territory.to_currency_codes!(territory_code, opts)
@doc """
Returns a list of country codes.
* `options` are:
* `as: :atom`
* `as: :binary`
* `as: :charlist`
## Example
=> #{inspect __MODULE__}.country_codes()
[:AD, :AE, :AF, :AG, :AI, :AL, :AM, :AO, :AR, :AS, :AT, :AU, :AW,
:AX, :AZ, :BA, :BB, :BD, :BE, :BF, :BG, :BH, :BI, :BJ, :BL, :BM,
:BN, :BO, :BQ, :BR, :BS, :BT, :BV, :BW, :BY, :BZ, :CA, :CC, :CD,
:CF, :CG, :CH, :CI, :CK, :CL, :CM, :CN, :CO, :CR, :CU, ...]
"""
@spec country_codes(Cldr.Territory.as_options()) :: [Cldr.Territory.atom_binary_charlist()]
def country_codes(opts \\ [as: :atom]), do: Cldr.Territory.country_codes(opts)
defp unknown_territory_error(string, from, _to) do
{Cldr.UnknownTerritoryError,
"No territory translation for #{inspect string} could be found in locale #{inspect from}"}
end
end
end
end
end
|
lib/cldr/backend.ex
| 0.838448
| 0.481698
|
backend.ex
|
starcoder
|
defmodule Game.Format.Rooms do
@moduledoc """
Format functions for rooms
"""
import Game.Format.Context
alias Data.Exit
alias Data.Room
alias Game.Door
alias Game.Format
alias Game.Format.NPCs, as: FormatNPCs
alias Game.Format.Proficiencies, as: FormatProficiencies
@doc """
Display a room's name
"""
def room_name(room) do
context()
|> assign(:name, room.name)
|> Format.template("{room}[name]{/room}")
end
@doc """
Display a zone's name
"""
def zone_name(zone) do
context()
|> assign(:name, zone.name)
|> Format.template("{zone}[name]{/zone}")
end
@doc """
Format full text for a room
"""
@spec room(Room.t(), [Item.t()], Map.t()) :: String.t()
def room(room, items, map) do
context()
|> assign(:name, room_name(room))
|> assign(:underline, Format.underline(room.name))
|> assign(:description, room_description(room))
|> assign(:map, map)
|> assign(:who, who_is_here(room))
|> assign(:exits, maybe_exits(room))
|> assign(:items, maybe_items(room, items))
|> assign(:shops, shops(room))
|> Format.template(template("room"))
end
@doc """
Template a room's description
"""
def room_description(room) do
description = room_description_with_features(room)
context =
context()
|> assign(:room, room_name(room))
|> assign(:zone, zone_name(room.zone))
|> assign(:features, Enum.join(features(room.features), " "))
context =
Enum.reduce(room.features, context, fn room_feature, context ->
assign(context, room_feature.key, feature(room_feature))
end)
Format.template(context, Format.resources(description))
end
defp room_description_with_features(room) do
contains_features? = String.contains?(room.description, "[features]")
contains_sub_features? =
Enum.any?(room.features, fn feature ->
String.contains?(room.description, "[#{feature.key}]")
end)
case contains_features? || contains_sub_features? do
true ->
room.description
false ->
"#{room.description} [features]"
end
end
@doc """
Display a room's feature
"""
def feature(feature) do
String.replace(feature.short_description, feature.key, "{white}#{feature.key}{/white}")
end
@doc """
Display room features
"""
def features(features) do
Enum.map(features, &feature/1)
end
@doc """
Peak at a room from the room you're in
Example:
iex> Rooms.peak_room(%{direction: "north", requirements: []}, %{name: "Hallway"})
"{room}Hallway{/room} is north."
"""
def peak_room(room_exit, room) do
context()
|> assign(:name, room_name(room))
|> assign(:direction, room_exit.direction)
|> assign(:requirements, exit_requirements(room_exit))
|> Format.template("[name] is [direction].[requirements]")
end
defp exit_requirements(%{requirements: []}), do: nil
defp exit_requirements(room_exit) do
context()
|> assign_many(:requirements, room_exit.requirements, &render_requirement/1)
|> Format.template("\n\nRequirements:\n[requirements]")
end
def render_requirement(requirement) do
context()
|> assign(:name, FormatProficiencies.name(requirement))
|> assign(:rank, requirement.ranks)
|> Format.template(" - [name] [rank]")
end
@doc """
Output for an overworld look
"""
@spec overworld_room(Overworld.t(), String.t()) :: String.t()
def overworld_room(room, map) do
context()
|> assign(:map, map)
|> assign(:who, who_is_here(room))
|> assign(:exits, maybe_exits(room))
|> Format.template(template("overworld"))
end
defp maybe_exits(room) do
case room |> Room.exits() do
[] ->
nil
directions ->
directions = Enum.sort(directions)
context()
|> assign_many(:exits, directions, &render_exit(room, &1), joiner: ", ")
|> Format.template("Exits: [exits]")
end
end
defp render_exit(room, direction) do
room_exit = Exit.exit_to(room, direction)
context()
|> assign(:direction, direction)
|> assign(:door_state, door_state(room_exit))
|> assign(:requirements, exit_requirements_hint(room_exit))
|> Format.template("{exit}[direction]{/exit}[requirements][ door_state]")
end
defp door_state(room_exit) do
case room_exit.has_door do
true ->
context()
|> assign(:door_state, Door.get(room_exit.door_id))
|> Format.template("([door_state])")
false ->
nil
end
end
defp exit_requirements_hint(room_exit) do
case Enum.empty?(room_exit.requirements) do
true ->
nil
false ->
context()
|> Format.template("{white}*{/white}")
end
end
@doc """
Format full text for who is in the room
Example:
iex> Rooms.who_is_here(%{players: [%{name: "Mordred"}], npcs: [%{name: "Arthur", extra: %{status_line: "[name] is here."}}]})
"{player}Mordred{/player} is here.\\n{npc}Arthur{/npc} is here."
"""
def who_is_here(room) do
context()
|> assign_many(:players, room.players, &player_line/1)
|> assign_many(:npcs, room.npcs, &FormatNPCs.npc_status/1)
|> Format.template("[players\n][npcs]")
end
@doc """
Format a player's status line
"""
def player_line(player) do
context()
|> assign(:name, Format.player_name(player))
|> Format.template("[name] is here.")
end
@doc """
Maybe display items
"""
def maybe_items(room, items) do
case Enum.empty?(items) and room.currency == 0 do
true ->
nil
false ->
items = Enum.map(items, &Format.item_name/1)
items = items ++ [Format.currency(room)]
items = Enum.reject(items, &(&1 == ""))
context()
|> assign_many(:items, items, &(&1), joiner: ", ")
|> Format.template("Items: [items]")
end
end
@doc """
Format Shop text for shops in the room
"""
def shops(room) do
case Enum.empty?(room.shops) do
true ->
nil
false ->
context()
|> assign_many(:shops, room.shops, &Format.shop_name/1, joiner: ", ")
|> Format.template("Shops: [shops]")
end
end
def list_shops(room) do
context()
|> assign_many(:shops, room.shops, &shop_line/1)
|> Format.template("Shops around you:\n[shops]")
end
def shop_line(shop) do
context()
|> assign(:name, Format.shop_name(shop))
|> Format.template(" - [name]")
end
def template("room") do
"""
[name]
[underline]
[description]
[map]
[who]
[exits]
[items]
[shops]
"""
end
def template("overworld") do
"""
{bold}[map]{/bold}
[who]
[exits]
"""
end
end
|
lib/game/format/rooms.ex
| 0.680666
| 0.425247
|
rooms.ex
|
starcoder
|
defmodule Lastfm.Archive do
@moduledoc """
A behaviour module for implementing a Lastfm archive.
The module also provides a struct that keeps metadata about the archive.
An archive contains scrobbles data retrieved from Lastfm API. It can be based
upon various storage implementation such as file systems and databases.
"""
@archive Application.get_env(:lastfm_archive, :type, LastFm.FileArchive)
@derive Jason.Encoder
@enforce_keys [:creator]
defstruct [
:created,
:creator,
:date,
:description,
:extent,
:format,
:identifier,
:modified,
:source,
:temporal,
:title,
:type
]
@type options :: keyword()
@type user :: binary()
@type scrobbles :: map()
@typedoc """
Metadata descriping a Lastfm archive based on
[Dublin Core Metadata Initiative](https://www.dublincore.org/specifications/dublin-core/dcmi-terms/).
"""
@type t :: %__MODULE__{
created: DateTime.t(),
creator: binary(),
date: Date.t(),
description: binary(),
extent: integer(),
format: binary(),
identifier: binary(),
modified: DateTime.t(),
source: binary(),
temporal: {integer, integer},
title: binary(),
type: atom()
}
@doc """
Creates a new and empty archive and records metadata.
"""
@callback update_metadata(t(), options) :: {:ok, t()} | {:error, term()}
@doc """
Describes the status of an existing archive.
"""
@callback describe(user, options) :: {:ok, t()} | {:error, term()}
@doc """
Write scrobbles data to an existing archive.
"""
@callback write(t(), scrobbles, options) :: :ok | {:error, term()}
@doc """
Data struct containing new and some default metadata of an archive.
Other metadata fields such as temporal, modified can be populated
based on the outcomes of archiving, i.e. the implementation of the
callbacks of this behaviour.
"""
@spec new(user) :: t()
def new(user) do
%__MODULE__{
created: DateTime.utc_now(),
creator: user,
description: "Lastfm archive of #{user}, extracted from Lastfm API",
format: "application/json",
identifier: user,
source: "http://ws.audioscrobbler.com/2.0",
title: "Lastfm archive of #{user}",
type: @archive
}
end
end
defimpl Jason.Encoder, for: Tuple do
def encode(data, options) when is_tuple(data) do
data
|> Tuple.to_list()
|> Jason.Encoder.List.encode(options)
end
end
|
lib/lastfm/archive.ex
| 0.803444
| 0.496033
|
archive.ex
|
starcoder
|
defmodule Garuda.GameRoom do
@moduledoc """
Behaviours and functions for implementing core game-logic rooms.
Game-rooms are under-the-hood genservers, with certain extra gamey properties.
We can write our gameplay code in game-room, and game-channel act as event handler.
Events from game-channel can be then route to corresponding game-room functions.
## Using GameRoom
defmodule TictactoePhx.TictactoeRoom do
use Garuda.GameRoom, expiry: 120_000
def create(_opts) do
# Return the initial game state.
gamestate
end
def leave(player_id, game_state) do
# handle player leaving.
{:ok, gamestate}
end
end
## Options
* expiry - game-room will shutdown itself after given time(ms). Default 3hr
* reconnection_timeout - Time game-room will wait for a player who left non-explicitly. Default 20s
"""
alias Garuda.RoomManager.RoomDb
@doc """
create the game-room.
We can setup the inital gamestate by returning game_state, where game_state is any erlang term.
`opts` is a keyword list containing details about room and player.
ex: `[room_id: "bingo:e6cf6669", player_id: "bingo_anon_759", max_players: 2]`
Note: `create` is called only once.
"""
@callback create(opts :: term()) :: game_state :: term()
@doc """
Handle player leaving.
We can handle the gamestate, when a player leaves.
"""
@callback leave(player_id :: String.t(), game_state :: term()) :: {:ok, game_state :: term()}
defmacro __using__(opts \\ []) do
quote do
@behaviour unquote(__MODULE__)
import unquote(__MODULE__)
@g_room_expiry Keyword.get(unquote(opts), :expiry) || 10_800_000
@g_reconnection_time Keyword.get(unquote(opts), :reconnection_timeout) || 20_000
use GenServer, restart: :transient
def start_link(name: name, opts: opts) do
result = GenServer.start_link(__MODULE__, opts, name: name)
end
@impl true
def init(init_opts) do
Process.send_after(self(), "expire_room", @g_room_expiry)
{:ok, nil, {:continue, {"create", init_opts}}}
end
@impl true
def handle_continue({"create", init_opts}, state) do
game_state = apply(__MODULE__, :create, [init_opts])
{:noreply, game_state}
end
@impl true
def handle_call("dispose_room", _from, game_state) do
send(self(), "expire_room")
{:reply, "disposing", game_state}
end
@impl true
def handle_call({"on_channel_leave", player_id, reason}, _from, game_state) do
game_state =
case reason do
{:shutdown, :left} ->
RoomDb.on_player_leave(self(), player_id)
{:ok, game_state} = apply(__MODULE__, :leave, [player_id, game_state])
game_state
_ ->
timer_ref =
Process.send_after(
self(),
{"reconnection_timeout", player_id},
@g_reconnection_time
)
RoomDb.update_timer_ref(self(), player_id, timer_ref)
game_state
end
{:reply, "ok", game_state}
end
@impl true
def handle_call({"on_rejoin", player_id}, _from, game_state) do
timer_ref = RoomDb.get_timer_ref(self(), player_id)
if is_reference(timer_ref) do
_resp = Process.cancel_timer(timer_ref)
RoomDb.update_timer_ref(self(), player_id, true)
end
{:reply, "ok", game_state}
end
@impl true
def handle_info("expire_room", game_state) do
{:stop, {:shutdown, "room_expired"}, game_state}
end
@impl true
def handle_info({"reconnection_timeout", player_id}, game_state) do
case RoomDb.has_rejoined(self(), player_id) do
true ->
RoomDb.update_timer_ref(self(), player_id, true)
{:noreply, game_state}
_ ->
RoomDb.on_player_leave(self(), player_id)
{:ok, game_state} = apply(__MODULE__, :leave, [player_id, game_state])
{:noreply, game_state}
end
end
@impl true
def terminate(reason, _game_state) do
RoomDb.delete_room(self())
end
end
end
@doc """
Returns the corresponding game-channel of the game-room.
We can broadcast to game-channel from game-room itself like,
`DingoWeb.Endpoint.broadcast!(get_channel(), "line_counts", %{"msg" => "heelp"})`
"""
def get_channel do
RoomDb.get_channel_name(self())
end
@doc """
Shutdowns the game-room gracefully.
"""
def shutdown do
send(self(), "expire_room")
end
end
|
lib/framework/game_room.ex
| 0.753285
| 0.478346
|
game_room.ex
|
starcoder
|
defmodule BsvRpc.TransactionInput do
# TODO Move to a separate library?
@moduledoc """
Functions for Bitcoin transaction inputs manipulation.
"""
use Bitwise
@enforce_keys [:script_sig, :sequence]
@typedoc """
A Bitcoin transaction input.
"""
defstruct [:previous_transaction, :previous_output, :script_sig, :sequence]
@type t :: %__MODULE__{
previous_transaction: binary(),
previous_output: non_neg_integer(),
script_sig: binary(),
sequence: non_neg_integer()
}
@doc """
Creates transaction inputs from a binary blob.
Blob can include multiple inputs.
Returns a tuple with the list of inputs and the remaining of the binary blob.
## Arguments
- `tx_in_blob` - Binary blob to parse transaction inputs from.
- `input_count` - Number of transactions inputs to parse.
## Example
iex> tx_in = "8149C4A82A52AD851562780A99FF1ABB8A051BF0D1520E3CE78349EEC539423E020000006A47304402200FB61D66AEB74B471DA8B8B648609C0C1E7F02DB01E6FA573699B2A0AD377D940220065BC14DBB05D5F7981F9294BD5EA90F4AC6B4A6F0771C870B10622B8E8EA57741210244936527CED7DC6FBB30491E5BFBC31E208EEAA87EB3FCA2C748D098EF8614D3FFFFFFFF" <> "812667A59695ECCC55724AB10C6469535F0639FEF73D0C802EFB5E609A6316B4000000006A47304402207268E3F27C94E59426A1698AD00CD186D2095C8A38CB273DA6FD4448AD345ECF02203021A7A4A54EBBB63D606E83CF0DC471FE0FD210B326395C9E6D498DF358C6EA41210244936527CED7DC6FBB30491E5BFBC31E208EEAA87EB3FCA2C748D098EF8614D3FFFFFFFF" <> "AABB"
iex> {[tx1, tx2], <<0xAA, 0xBB>>} = tx_in |> Base.decode16!() |> BsvRpc.TransactionInput.create(2)
iex> tx1.previous_transaction
<<62, 66, 57, 197, 238, 73, 131, 231, 60, 14, 82, 209, 240, 27, 5,
138, 187, 26, 255, 153, 10, 120, 98, 21, 133, 173, 82, 42, 168,
196, 73, 129>>
iex> tx1.previous_output
2
iex> tx2.previous_transaction
<<180, 22, 99, 154, 96, 94, 251, 46, 128, 12, 61, 247, 254, 57, 6,
95, 83, 105, 100, 12, 177, 74, 114, 85, 204, 236, 149, 150, 165,
103, 38, 129>>
iex> tx2.previous_output
0
"""
@spec create(binary, non_neg_integer) :: {[__MODULE__.t()], binary}
def create(tx_in_blob, input_count), do: do_create(tx_in_blob, [], input_count)
@doc """
Creates a single transaction input from a binary blob.
Raises `MatchError` if the binary includes any more data after the first input.
## Example
iex> tx_in = "CF3E4414A1A65B96A5485F7A497FED32C0C90E95E4FF334A79559AD9B14920E90100000006AABBCCDDEEFFFFFFFFFF"
iex> tx_in |> Base.decode16!() |> BsvRpc.TransactionInput.create()
%BsvRpc.TransactionInput{
previous_output: 1,
previous_transaction: <<233, 32, 73, 177, 217, 154, 85, 121, 74, 51, 255, 228, 149, 14, 201, 192, 50, 237, 127,
73, 122, 95, 72, 165, 150, 91, 166, 161, 20, 68, 62, 207>>,
script_sig: <<170, 187, 204, 221, 238, 255>>,
sequence: 4294967295
}
iex> tx_in = tx_in <> "FF"
iex> tx_in |> Base.decode16!() |> BsvRpc.TransactionInput.create()
** (MatchError) no match of right hand side value: {[%BsvRpc.TransactionInput{previous_output: 1, previous_transaction: <<233, 32, 73, 177, 217, 154, 85, 121, 74, 51, 255, 228, 149, 14, 201, 192, 50, 237, 127, 73, 122, 95, 72, 165, 150, 91, 166, 161, 20, 68, 62, 207>>, script_sig: <<170, 187, 204, 221, 238, 255>>, sequence: 4294967295}], <<255>>}
"""
@spec create(binary) :: __MODULE__.t()
def create(tx_in_blob) do
{[tx_in | []], <<>>} = do_create(tx_in_blob, [], 1)
tx_in
end
@spec do_create(binary, [t()], non_neg_integer) :: {[t()], binary}
defp do_create(rest, inputs, 0), do: {Enum.reverse(inputs), rest}
defp do_create(
<<prev_tx::binary-size(32), prev_txout::little-size(32), rest::binary>>,
inputs,
input_count
) do
{script_sig, <<sequence::little-size(32), rest::binary>>} =
BsvRpc.Helpers.get_varlen_data(rest)
input = %__MODULE__{
# We have to reverse the hash bytes in order to store it in little endian.
previous_transaction: BsvRpc.Helpers.reverse_endianess(prev_tx),
previous_output: prev_txout,
script_sig: script_sig,
sequence: sequence
}
do_create(rest, [input | inputs], input_count - 1)
end
@doc """
Gets the binary representation of the transaction input.
## Examples
iex> tx_in = "CF3E4414A1A65B96A5485F7A497FED32C0C90E95E4FF334A79559AD9B14920E90100000006AABBCCDDEEFFFFFFFFFF"
iex> t = tx_in |> Base.decode16!() |> BsvRpc.TransactionInput.create()
iex> t |> BsvRpc.TransactionInput.to_binary() |> Base.encode16()
"CF3E4414A1A65B96A5485F7A497FED32C0C90E95E4FF334A79559AD9B14920E90100000006AABBCCDDEEFFFFFFFFFF"
"""
@spec to_binary(__MODULE__.t()) :: binary
def to_binary(tx_in) do
# We have to reverse the hash bytes in order to store it in little endian.
<<prev_tx_reversed::size(256)>> = tx_in.previous_transaction
<<prev_tx_reversed::integer-little-size(256)>> <>
<<tx_in.previous_output::little-size(32)>> <>
BsvRpc.Helpers.to_varint(byte_size(tx_in.script_sig)) <>
tx_in.script_sig <>
<<tx_in.sequence::little-size(32)>>
end
@spec sign(
BsvRpc.TransactionInput.t(),
BsvRpc.Transaction.t(),
BsvRpc.PrivateKey.t(),
BsvRpc.UTXO.t() | nil,
BsvRpc.Sighash.t()
) :: {:error, String.t()} | {:ok, BsvRpc.TransactionInput.t()}
def sign(
%__MODULE__{} = tx_in,
%BsvRpc.Transaction{} = tx,
%BsvRpc.PrivateKey{} = key,
utxo,
sigtype \\ [:sighash_all, :sighash_forkid]
) do
# TODO Check if it is the correct key.
sighash = BsvRpc.Sighash.sighash(tx_in, tx, key, utxo, sigtype)
{:ok, signature} = :libsecp256k1.ecdsa_sign(sighash, key.key, :default, "")
{:ok, public_key} = BsvRpc.PublicKey.create(key)
{:ok, Map.put(tx_in, :script_sig, p2pkh_script_sig(signature, public_key, sigtype))}
end
@spec p2pkh_script_sig(binary, BsvRpc.PublicKey.t(), BsvRpc.Sighash.t()) :: binary
def p2pkh_script_sig(signature, %BsvRpc.PublicKey{} = public_key, sigtype) do
signature = signature <> <<BsvRpc.Sighash.get_sighash_suffix(sigtype)>>
BsvRpc.Helpers.to_varlen_data(signature) <> BsvRpc.Helpers.to_varlen_data(public_key.key)
end
end
|
lib/bsv_rpc/transaction_input.ex
| 0.598312
| 0.403273
|
transaction_input.ex
|
starcoder
|
defmodule AWS.Redshift do
@moduledoc """
Amazon Redshift
**Overview**
This is an interface reference for Amazon Redshift. It contains
documentation for one of the programming or command line interfaces you can
use to manage Amazon Redshift clusters. Note that Amazon Redshift is
asynchronous, which means that some interfaces may require techniques, such
as polling or asynchronous callback handlers, to determine when a command
has been applied. In this reference, the parameter descriptions indicate
whether a change is applied immediately, on the next instance reboot, or
during the next maintenance window. For a summary of the Amazon Redshift
cluster management interfaces, go to [Using the Amazon Redshift Management
Interfaces](https://docs.aws.amazon.com/redshift/latest/mgmt/using-aws-sdk.html).
Amazon Redshift manages all the work of setting up, operating, and scaling
a data warehouse: provisioning capacity, monitoring and backing up the
cluster, and applying patches and upgrades to the Amazon Redshift engine.
You can focus on using your data to acquire new insights for your business
and customers.
If you are a first-time user of Amazon Redshift, we recommend that you
begin by reading the [Amazon Redshift Getting Started
Guide](https://docs.aws.amazon.com/redshift/latest/gsg/getting-started.html).
If you are a database developer, the [Amazon Redshift Database Developer
Guide](https://docs.aws.amazon.com/redshift/latest/dg/welcome.html)
explains how to design, build, query, and maintain the databases that make
up your data warehouse.
"""
@doc """
Exchanges a DC1 Reserved Node for a DC2 Reserved Node with no changes to
the configuration (term, payment type, or number of nodes) and no
additional costs.
"""
def accept_reserved_node_exchange(client, input, options \\ []) do
request(client, "AcceptReservedNodeExchange", input, options)
end
@doc """
Adds an inbound (ingress) rule to an Amazon Redshift security group.
Depending on whether the application accessing your cluster is running on
the Internet or an Amazon EC2 instance, you can authorize inbound access to
either a Classless Interdomain Routing (CIDR)/Internet Protocol (IP) range
or to an Amazon EC2 security group. You can add as many as 20 ingress rules
to an Amazon Redshift security group.
If you authorize access to an Amazon EC2 security group, specify
*EC2SecurityGroupName* and *EC2SecurityGroupOwnerId*. The Amazon EC2
security group and Amazon Redshift cluster must be in the same AWS Region.
If you authorize access to a CIDR/IP address range, specify *CIDRIP*. For
an overview of CIDR blocks, see the Wikipedia article on [Classless
Inter-Domain
Routing](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing).
You must also associate the security group with a cluster so that clients
running on these IP addresses or the EC2 instance are authorized to connect
to the cluster. For information about managing security groups, go to
[Working with Security
Groups](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-security-groups.html)
in the *Amazon Redshift Cluster Management Guide*.
"""
def authorize_cluster_security_group_ingress(client, input, options \\ []) do
request(client, "AuthorizeClusterSecurityGroupIngress", input, options)
end
@doc """
Authorizes the specified AWS customer account to restore the specified
snapshot.
For more information about working with snapshots, go to [Amazon Redshift
Snapshots](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-snapshots.html)
in the *Amazon Redshift Cluster Management Guide*.
"""
def authorize_snapshot_access(client, input, options \\ []) do
request(client, "AuthorizeSnapshotAccess", input, options)
end
@doc """
Deletes a set of cluster snapshots.
"""
def batch_delete_cluster_snapshots(client, input, options \\ []) do
request(client, "BatchDeleteClusterSnapshots", input, options)
end
@doc """
Modifies the settings for a set of cluster snapshots.
"""
def batch_modify_cluster_snapshots(client, input, options \\ []) do
request(client, "BatchModifyClusterSnapshots", input, options)
end
@doc """
Cancels a resize operation for a cluster.
"""
def cancel_resize(client, input, options \\ []) do
request(client, "CancelResize", input, options)
end
@doc """
Copies the specified automated cluster snapshot to a new manual cluster
snapshot. The source must be an automated snapshot and it must be in the
available state.
When you delete a cluster, Amazon Redshift deletes any automated snapshots
of the cluster. Also, when the retention period of the snapshot expires,
Amazon Redshift automatically deletes it. If you want to keep an automated
snapshot for a longer period, you can make a manual copy of the snapshot.
Manual snapshots are retained until you delete them.
For more information about working with snapshots, go to [Amazon Redshift
Snapshots](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-snapshots.html)
in the *Amazon Redshift Cluster Management Guide*.
"""
def copy_cluster_snapshot(client, input, options \\ []) do
request(client, "CopyClusterSnapshot", input, options)
end
@doc """
Creates a new cluster with the specified parameters.
To create a cluster in Virtual Private Cloud (VPC), you must provide a
cluster subnet group name. The cluster subnet group identifies the subnets
of your VPC that Amazon Redshift uses when creating the cluster. For more
information about managing clusters, go to [Amazon Redshift
Clusters](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html)
in the *Amazon Redshift Cluster Management Guide*.
"""
def create_cluster(client, input, options \\ []) do
request(client, "CreateCluster", input, options)
end
@doc """
Creates an Amazon Redshift parameter group.
Creating parameter groups is independent of creating clusters. You can
associate a cluster with a parameter group when you create the cluster. You
can also associate an existing cluster with a parameter group after the
cluster is created by using `ModifyCluster`.
Parameters in the parameter group define specific behavior that applies to
the databases you create on the cluster. For more information about
parameters and parameter groups, go to [Amazon Redshift Parameter
Groups](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-parameter-groups.html)
in the *Amazon Redshift Cluster Management Guide*.
"""
def create_cluster_parameter_group(client, input, options \\ []) do
request(client, "CreateClusterParameterGroup", input, options)
end
@doc """
Creates a new Amazon Redshift security group. You use security groups to
control access to non-VPC clusters.
For information about managing security groups, go to [Amazon Redshift
Cluster Security
Groups](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-security-groups.html)
in the *Amazon Redshift Cluster Management Guide*.
"""
def create_cluster_security_group(client, input, options \\ []) do
request(client, "CreateClusterSecurityGroup", input, options)
end
@doc """
Creates a manual snapshot of the specified cluster. The cluster must be in
the `available` state.
For more information about working with snapshots, go to [Amazon Redshift
Snapshots](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-snapshots.html)
in the *Amazon Redshift Cluster Management Guide*.
"""
def create_cluster_snapshot(client, input, options \\ []) do
request(client, "CreateClusterSnapshot", input, options)
end
@doc """
Creates a new Amazon Redshift subnet group. You must provide a list of one
or more subnets in your existing Amazon Virtual Private Cloud (Amazon VPC)
when creating Amazon Redshift subnet group.
For information about subnet groups, go to [Amazon Redshift Cluster Subnet
Groups](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-cluster-subnet-groups.html)
in the *Amazon Redshift Cluster Management Guide*.
"""
def create_cluster_subnet_group(client, input, options \\ []) do
request(client, "CreateClusterSubnetGroup", input, options)
end
@doc """
Creates an Amazon Redshift event notification subscription. This action
requires an ARN (Amazon Resource Name) of an Amazon SNS topic created by
either the Amazon Redshift console, the Amazon SNS console, or the Amazon
SNS API. To obtain an ARN with Amazon SNS, you must create a topic in
Amazon SNS and subscribe to the topic. The ARN is displayed in the SNS
console.
You can specify the source type, and lists of Amazon Redshift source IDs,
event categories, and event severities. Notifications will be sent for all
events you want that match those criteria. For example, you can specify
source type = cluster, source ID = my-cluster-1 and mycluster2, event
categories = Availability, Backup, and severity = ERROR. The subscription
will only send notifications for those ERROR events in the Availability and
Backup categories for the specified clusters.
If you specify both the source type and source IDs, such as source type =
cluster and source identifier = my-cluster-1, notifications will be sent
for all the cluster events for my-cluster-1. If you specify a source type
but do not specify a source identifier, you will receive notice of the
events for the objects of that type in your AWS account. If you do not
specify either the SourceType nor the SourceIdentifier, you will be
notified of events generated from all Amazon Redshift sources belonging to
your AWS account. You must specify a source type if you specify a source
ID.
"""
def create_event_subscription(client, input, options \\ []) do
request(client, "CreateEventSubscription", input, options)
end
@doc """
Creates an HSM client certificate that an Amazon Redshift cluster will use
to connect to the client's HSM in order to store and retrieve the keys used
to encrypt the cluster databases.
The command returns a public key, which you must store in the HSM. In
addition to creating the HSM certificate, you must create an Amazon
Redshift HSM configuration that provides a cluster the information needed
to store and use encryption keys in the HSM. For more information, go to
[Hardware Security
Modules](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-HSM.html)
in the Amazon Redshift Cluster Management Guide.
"""
def create_hsm_client_certificate(client, input, options \\ []) do
request(client, "CreateHsmClientCertificate", input, options)
end
@doc """
Creates an HSM configuration that contains the information required by an
Amazon Redshift cluster to store and use database encryption keys in a
Hardware Security Module (HSM). After creating the HSM configuration, you
can specify it as a parameter when creating a cluster. The cluster will
then store its encryption keys in the HSM.
In addition to creating an HSM configuration, you must also create an HSM
client certificate. For more information, go to [Hardware Security
Modules](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-HSM.html)
in the Amazon Redshift Cluster Management Guide.
"""
def create_hsm_configuration(client, input, options \\ []) do
request(client, "CreateHsmConfiguration", input, options)
end
@doc """
Creates a scheduled action. A scheduled action contains a schedule and an
Amazon Redshift API action. For example, you can create a schedule of when
to run the `ResizeCluster` API operation.
"""
def create_scheduled_action(client, input, options \\ []) do
request(client, "CreateScheduledAction", input, options)
end
@doc """
Creates a snapshot copy grant that permits Amazon Redshift to use a
customer master key (CMK) from AWS Key Management Service (AWS KMS) to
encrypt copied snapshots in a destination region.
For more information about managing snapshot copy grants, go to [Amazon
Redshift Database
Encryption](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-db-encryption.html)
in the *Amazon Redshift Cluster Management Guide*.
"""
def create_snapshot_copy_grant(client, input, options \\ []) do
request(client, "CreateSnapshotCopyGrant", input, options)
end
@doc """
Create a snapshot schedule that can be associated to a cluster and which
overrides the default system backup schedule.
"""
def create_snapshot_schedule(client, input, options \\ []) do
request(client, "CreateSnapshotSchedule", input, options)
end
@doc """
Adds tags to a cluster.
A resource can have up to 50 tags. If you try to create more than 50 tags
for a resource, you will receive an error and the attempt will fail.
If you specify a key that already exists for the resource, the value for
that key will be updated with the new value.
"""
def create_tags(client, input, options \\ []) do
request(client, "CreateTags", input, options)
end
@doc """
Creates a usage limit for a specified Amazon Redshift feature on a cluster.
The usage limit is identified by the returned usage limit identifier.
"""
def create_usage_limit(client, input, options \\ []) do
request(client, "CreateUsageLimit", input, options)
end
@doc """
Deletes a previously provisioned cluster without its final snapshot being
created. A successful response from the web service indicates that the
request was received correctly. Use `DescribeClusters` to monitor the
status of the deletion. The delete operation cannot be canceled or reverted
once submitted. For more information about managing clusters, go to [Amazon
Redshift
Clusters](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html)
in the *Amazon Redshift Cluster Management Guide*.
If you want to shut down the cluster and retain it for future use, set
*SkipFinalClusterSnapshot* to `false` and specify a name for
*FinalClusterSnapshotIdentifier*. You can later restore this snapshot to
resume using the cluster. If a final cluster snapshot is requested, the
status of the cluster will be "final-snapshot" while the snapshot is being
taken, then it's "deleting" once Amazon Redshift begins deleting the
cluster.
For more information about managing clusters, go to [Amazon Redshift
Clusters](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html)
in the *Amazon Redshift Cluster Management Guide*.
"""
def delete_cluster(client, input, options \\ []) do
request(client, "DeleteCluster", input, options)
end
@doc """
Deletes a specified Amazon Redshift parameter group.
<note> You cannot delete a parameter group if it is associated with a
cluster.
</note>
"""
def delete_cluster_parameter_group(client, input, options \\ []) do
request(client, "DeleteClusterParameterGroup", input, options)
end
@doc """
Deletes an Amazon Redshift security group.
<note> You cannot delete a security group that is associated with any
clusters. You cannot delete the default security group.
</note> For information about managing security groups, go to [Amazon
Redshift Cluster Security
Groups](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-security-groups.html)
in the *Amazon Redshift Cluster Management Guide*.
"""
def delete_cluster_security_group(client, input, options \\ []) do
request(client, "DeleteClusterSecurityGroup", input, options)
end
@doc """
Deletes the specified manual snapshot. The snapshot must be in the
`available` state, with no other users authorized to access the snapshot.
Unlike automated snapshots, manual snapshots are retained even after you
delete your cluster. Amazon Redshift does not delete your manual snapshots.
You must delete manual snapshot explicitly to avoid getting charged. If
other accounts are authorized to access the snapshot, you must revoke all
of the authorizations before you can delete the snapshot.
"""
def delete_cluster_snapshot(client, input, options \\ []) do
request(client, "DeleteClusterSnapshot", input, options)
end
@doc """
Deletes the specified cluster subnet group.
"""
def delete_cluster_subnet_group(client, input, options \\ []) do
request(client, "DeleteClusterSubnetGroup", input, options)
end
@doc """
Deletes an Amazon Redshift event notification subscription.
"""
def delete_event_subscription(client, input, options \\ []) do
request(client, "DeleteEventSubscription", input, options)
end
@doc """
Deletes the specified HSM client certificate.
"""
def delete_hsm_client_certificate(client, input, options \\ []) do
request(client, "DeleteHsmClientCertificate", input, options)
end
@doc """
Deletes the specified Amazon Redshift HSM configuration.
"""
def delete_hsm_configuration(client, input, options \\ []) do
request(client, "DeleteHsmConfiguration", input, options)
end
@doc """
Deletes a scheduled action.
"""
def delete_scheduled_action(client, input, options \\ []) do
request(client, "DeleteScheduledAction", input, options)
end
@doc """
Deletes the specified snapshot copy grant.
"""
def delete_snapshot_copy_grant(client, input, options \\ []) do
request(client, "DeleteSnapshotCopyGrant", input, options)
end
@doc """
Deletes a snapshot schedule.
"""
def delete_snapshot_schedule(client, input, options \\ []) do
request(client, "DeleteSnapshotSchedule", input, options)
end
@doc """
Deletes tags from a resource. You must provide the ARN of the resource from
which you want to delete the tag or tags.
"""
def delete_tags(client, input, options \\ []) do
request(client, "DeleteTags", input, options)
end
@doc """
Deletes a usage limit from a cluster.
"""
def delete_usage_limit(client, input, options \\ []) do
request(client, "DeleteUsageLimit", input, options)
end
@doc """
Returns a list of attributes attached to an account
"""
def describe_account_attributes(client, input, options \\ []) do
request(client, "DescribeAccountAttributes", input, options)
end
@doc """
Returns an array of `ClusterDbRevision` objects.
"""
def describe_cluster_db_revisions(client, input, options \\ []) do
request(client, "DescribeClusterDbRevisions", input, options)
end
@doc """
Returns a list of Amazon Redshift parameter groups, including parameter
groups you created and the default parameter group. For each parameter
group, the response includes the parameter group name, description, and
parameter group family name. You can optionally specify a name to retrieve
the description of a specific parameter group.
For more information about parameters and parameter groups, go to [Amazon
Redshift Parameter
Groups](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-parameter-groups.html)
in the *Amazon Redshift Cluster Management Guide*.
If you specify both tag keys and tag values in the same request, Amazon
Redshift returns all parameter groups that match any combination of the
specified keys and values. For example, if you have `owner` and
`environment` for tag keys, and `admin` and `test` for tag values, all
parameter groups that have any combination of those values are returned.
If both tag keys and values are omitted from the request, parameter groups
are returned regardless of whether they have tag keys or values associated
with them.
"""
def describe_cluster_parameter_groups(client, input, options \\ []) do
request(client, "DescribeClusterParameterGroups", input, options)
end
@doc """
Returns a detailed list of parameters contained within the specified Amazon
Redshift parameter group. For each parameter the response includes
information such as parameter name, description, data type, value, whether
the parameter value is modifiable, and so on.
You can specify *source* filter to retrieve parameters of only specific
type. For example, to retrieve parameters that were modified by a user
action such as from `ModifyClusterParameterGroup`, you can specify *source*
equal to *user*.
For more information about parameters and parameter groups, go to [Amazon
Redshift Parameter
Groups](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-parameter-groups.html)
in the *Amazon Redshift Cluster Management Guide*.
"""
def describe_cluster_parameters(client, input, options \\ []) do
request(client, "DescribeClusterParameters", input, options)
end
@doc """
Returns information about Amazon Redshift security groups. If the name of a
security group is specified, the response will contain only information
about only that security group.
For information about managing security groups, go to [Amazon Redshift
Cluster Security
Groups](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-security-groups.html)
in the *Amazon Redshift Cluster Management Guide*.
If you specify both tag keys and tag values in the same request, Amazon
Redshift returns all security groups that match any combination of the
specified keys and values. For example, if you have `owner` and
`environment` for tag keys, and `admin` and `test` for tag values, all
security groups that have any combination of those values are returned.
If both tag keys and values are omitted from the request, security groups
are returned regardless of whether they have tag keys or values associated
with them.
"""
def describe_cluster_security_groups(client, input, options \\ []) do
request(client, "DescribeClusterSecurityGroups", input, options)
end
@doc """
Returns one or more snapshot objects, which contain metadata about your
cluster snapshots. By default, this operation returns information about all
snapshots of all clusters that are owned by you AWS customer account. No
information is returned for snapshots owned by inactive AWS customer
accounts.
If you specify both tag keys and tag values in the same request, Amazon
Redshift returns all snapshots that match any combination of the specified
keys and values. For example, if you have `owner` and `environment` for tag
keys, and `admin` and `test` for tag values, all snapshots that have any
combination of those values are returned. Only snapshots that you own are
returned in the response; shared snapshots are not returned with the tag
key and tag value request parameters.
If both tag keys and values are omitted from the request, snapshots are
returned regardless of whether they have tag keys or values associated with
them.
"""
def describe_cluster_snapshots(client, input, options \\ []) do
request(client, "DescribeClusterSnapshots", input, options)
end
@doc """
Returns one or more cluster subnet group objects, which contain metadata
about your cluster subnet groups. By default, this operation returns
information about all cluster subnet groups that are defined in you AWS
account.
If you specify both tag keys and tag values in the same request, Amazon
Redshift returns all subnet groups that match any combination of the
specified keys and values. For example, if you have `owner` and
`environment` for tag keys, and `admin` and `test` for tag values, all
subnet groups that have any combination of those values are returned.
If both tag keys and values are omitted from the request, subnet groups are
returned regardless of whether they have tag keys or values associated with
them.
"""
def describe_cluster_subnet_groups(client, input, options \\ []) do
request(client, "DescribeClusterSubnetGroups", input, options)
end
@doc """
Returns a list of all the available maintenance tracks.
"""
def describe_cluster_tracks(client, input, options \\ []) do
request(client, "DescribeClusterTracks", input, options)
end
@doc """
Returns descriptions of the available Amazon Redshift cluster versions. You
can call this operation even before creating any clusters to learn more
about the Amazon Redshift versions. For more information about managing
clusters, go to [Amazon Redshift
Clusters](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html)
in the *Amazon Redshift Cluster Management Guide*.
"""
def describe_cluster_versions(client, input, options \\ []) do
request(client, "DescribeClusterVersions", input, options)
end
@doc """
Returns properties of provisioned clusters including general cluster
properties, cluster database properties, maintenance and backup properties,
and security and access properties. This operation supports pagination. For
more information about managing clusters, go to [Amazon Redshift
Clusters](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html)
in the *Amazon Redshift Cluster Management Guide*.
If you specify both tag keys and tag values in the same request, Amazon
Redshift returns all clusters that match any combination of the specified
keys and values. For example, if you have `owner` and `environment` for tag
keys, and `admin` and `test` for tag values, all clusters that have any
combination of those values are returned.
If both tag keys and values are omitted from the request, clusters are
returned regardless of whether they have tag keys or values associated with
them.
"""
def describe_clusters(client, input, options \\ []) do
request(client, "DescribeClusters", input, options)
end
@doc """
Returns a list of parameter settings for the specified parameter group
family.
For more information about parameters and parameter groups, go to [Amazon
Redshift Parameter
Groups](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-parameter-groups.html)
in the *Amazon Redshift Cluster Management Guide*.
"""
def describe_default_cluster_parameters(client, input, options \\ []) do
request(client, "DescribeDefaultClusterParameters", input, options)
end
@doc """
Displays a list of event categories for all event source types, or for a
specified source type. For a list of the event categories and source types,
go to [Amazon Redshift Event
Notifications](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-event-notifications.html).
"""
def describe_event_categories(client, input, options \\ []) do
request(client, "DescribeEventCategories", input, options)
end
@doc """
Lists descriptions of all the Amazon Redshift event notification
subscriptions for a customer account. If you specify a subscription name,
lists the description for that subscription.
If you specify both tag keys and tag values in the same request, Amazon
Redshift returns all event notification subscriptions that match any
combination of the specified keys and values. For example, if you have
`owner` and `environment` for tag keys, and `admin` and `test` for tag
values, all subscriptions that have any combination of those values are
returned.
If both tag keys and values are omitted from the request, subscriptions are
returned regardless of whether they have tag keys or values associated with
them.
"""
def describe_event_subscriptions(client, input, options \\ []) do
request(client, "DescribeEventSubscriptions", input, options)
end
@doc """
Returns events related to clusters, security groups, snapshots, and
parameter groups for the past 14 days. Events specific to a particular
cluster, security group, snapshot or parameter group can be obtained by
providing the name as a parameter. By default, the past hour of events are
returned.
"""
def describe_events(client, input, options \\ []) do
request(client, "DescribeEvents", input, options)
end
@doc """
Returns information about the specified HSM client certificate. If no
certificate ID is specified, returns information about all the HSM
certificates owned by your AWS customer account.
If you specify both tag keys and tag values in the same request, Amazon
Redshift returns all HSM client certificates that match any combination of
the specified keys and values. For example, if you have `owner` and
`environment` for tag keys, and `admin` and `test` for tag values, all HSM
client certificates that have any combination of those values are returned.
If both tag keys and values are omitted from the request, HSM client
certificates are returned regardless of whether they have tag keys or
values associated with them.
"""
def describe_hsm_client_certificates(client, input, options \\ []) do
request(client, "DescribeHsmClientCertificates", input, options)
end
@doc """
Returns information about the specified Amazon Redshift HSM configuration.
If no configuration ID is specified, returns information about all the HSM
configurations owned by your AWS customer account.
If you specify both tag keys and tag values in the same request, Amazon
Redshift returns all HSM connections that match any combination of the
specified keys and values. For example, if you have `owner` and
`environment` for tag keys, and `admin` and `test` for tag values, all HSM
connections that have any combination of those values are returned.
If both tag keys and values are omitted from the request, HSM connections
are returned regardless of whether they have tag keys or values associated
with them.
"""
def describe_hsm_configurations(client, input, options \\ []) do
request(client, "DescribeHsmConfigurations", input, options)
end
@doc """
Describes whether information, such as queries and connection attempts, is
being logged for the specified Amazon Redshift cluster.
"""
def describe_logging_status(client, input, options \\ []) do
request(client, "DescribeLoggingStatus", input, options)
end
@doc """
Returns properties of possible node configurations such as node type,
number of nodes, and disk usage for the specified action type.
"""
def describe_node_configuration_options(client, input, options \\ []) do
request(client, "DescribeNodeConfigurationOptions", input, options)
end
@doc """
Returns a list of orderable cluster options. Before you create a new
cluster you can use this operation to find what options are available, such
as the EC2 Availability Zones (AZ) in the specific AWS Region that you can
specify, and the node types you can request. The node types differ by
available storage, memory, CPU and price. With the cost involved you might
want to obtain a list of cluster options in the specific region and specify
values when creating a cluster. For more information about managing
clusters, go to [Amazon Redshift
Clusters](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html)
in the *Amazon Redshift Cluster Management Guide*.
"""
def describe_orderable_cluster_options(client, input, options \\ []) do
request(client, "DescribeOrderableClusterOptions", input, options)
end
@doc """
Returns a list of the available reserved node offerings by Amazon Redshift
with their descriptions including the node type, the fixed and recurring
costs of reserving the node and duration the node will be reserved for you.
These descriptions help you determine which reserve node offering you want
to purchase. You then use the unique offering ID in you call to
`PurchaseReservedNodeOffering` to reserve one or more nodes for your Amazon
Redshift cluster.
For more information about reserved node offerings, go to [Purchasing
Reserved
Nodes](https://docs.aws.amazon.com/redshift/latest/mgmt/purchase-reserved-node-instance.html)
in the *Amazon Redshift Cluster Management Guide*.
"""
def describe_reserved_node_offerings(client, input, options \\ []) do
request(client, "DescribeReservedNodeOfferings", input, options)
end
@doc """
Returns the descriptions of the reserved nodes.
"""
def describe_reserved_nodes(client, input, options \\ []) do
request(client, "DescribeReservedNodes", input, options)
end
@doc """
Returns information about the last resize operation for the specified
cluster. If no resize operation has ever been initiated for the specified
cluster, a `HTTP 404` error is returned. If a resize operation was
initiated and completed, the status of the resize remains as `SUCCEEDED`
until the next resize.
A resize operation can be requested using `ModifyCluster` and specifying a
different number or type of nodes for the cluster.
"""
def describe_resize(client, input, options \\ []) do
request(client, "DescribeResize", input, options)
end
@doc """
Describes properties of scheduled actions.
"""
def describe_scheduled_actions(client, input, options \\ []) do
request(client, "DescribeScheduledActions", input, options)
end
@doc """
Returns a list of snapshot copy grants owned by the AWS account in the
destination region.
For more information about managing snapshot copy grants, go to [Amazon
Redshift Database
Encryption](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-db-encryption.html)
in the *Amazon Redshift Cluster Management Guide*.
"""
def describe_snapshot_copy_grants(client, input, options \\ []) do
request(client, "DescribeSnapshotCopyGrants", input, options)
end
@doc """
Returns a list of snapshot schedules.
"""
def describe_snapshot_schedules(client, input, options \\ []) do
request(client, "DescribeSnapshotSchedules", input, options)
end
@doc """
Returns account level backups storage size and provisional storage.
"""
def describe_storage(client, input, options \\ []) do
request(client, "DescribeStorage", input, options)
end
@doc """
Lists the status of one or more table restore requests made using the
`RestoreTableFromClusterSnapshot` API action. If you don't specify a value
for the `TableRestoreRequestId` parameter, then
`DescribeTableRestoreStatus` returns the status of all table restore
requests ordered by the date and time of the request in ascending order.
Otherwise `DescribeTableRestoreStatus` returns the status of the table
specified by `TableRestoreRequestId`.
"""
def describe_table_restore_status(client, input, options \\ []) do
request(client, "DescribeTableRestoreStatus", input, options)
end
@doc """
Returns a list of tags. You can return tags from a specific resource by
specifying an ARN, or you can return all tags for a given type of resource,
such as clusters, snapshots, and so on.
The following are limitations for `DescribeTags`:
<ul> <li> You cannot specify an ARN and a resource-type value together in
the same request.
</li> <li> You cannot use the `MaxRecords` and `Marker` parameters together
with the ARN parameter.
</li> <li> The `MaxRecords` parameter can be a range from 10 to 50 results
to return in a request.
</li> </ul> If you specify both tag keys and tag values in the same
request, Amazon Redshift returns all resources that match any combination
of the specified keys and values. For example, if you have `owner` and
`environment` for tag keys, and `admin` and `test` for tag values, all
resources that have any combination of those values are returned.
If both tag keys and values are omitted from the request, resources are
returned regardless of whether they have tag keys or values associated with
them.
"""
def describe_tags(client, input, options \\ []) do
request(client, "DescribeTags", input, options)
end
@doc """
Shows usage limits on a cluster. Results are filtered based on the
combination of input usage limit identifier, cluster identifier, and
feature type parameters:
<ul> <li> If usage limit identifier, cluster identifier, and feature type
are not provided, then all usage limit objects for the current account in
the current region are returned.
</li> <li> If usage limit identifier is provided, then the corresponding
usage limit object is returned.
</li> <li> If cluster identifier is provided, then all usage limit objects
for the specified cluster are returned.
</li> <li> If cluster identifier and feature type are provided, then all
usage limit objects for the combination of cluster and feature are
returned.
</li> </ul>
"""
def describe_usage_limits(client, input, options \\ []) do
request(client, "DescribeUsageLimits", input, options)
end
@doc """
Stops logging information, such as queries and connection attempts, for the
specified Amazon Redshift cluster.
"""
def disable_logging(client, input, options \\ []) do
request(client, "DisableLogging", input, options)
end
@doc """
Disables the automatic copying of snapshots from one region to another
region for a specified cluster.
If your cluster and its snapshots are encrypted using a customer master key
(CMK) from AWS KMS, use `DeleteSnapshotCopyGrant` to delete the grant that
grants Amazon Redshift permission to the CMK in the destination region.
"""
def disable_snapshot_copy(client, input, options \\ []) do
request(client, "DisableSnapshotCopy", input, options)
end
@doc """
Starts logging information, such as queries and connection attempts, for
the specified Amazon Redshift cluster.
"""
def enable_logging(client, input, options \\ []) do
request(client, "EnableLogging", input, options)
end
@doc """
Enables the automatic copy of snapshots from one region to another region
for a specified cluster.
"""
def enable_snapshot_copy(client, input, options \\ []) do
request(client, "EnableSnapshotCopy", input, options)
end
@doc """
Returns a database user name and temporary password with temporary
authorization to log on to an Amazon Redshift database. The action returns
the database user name prefixed with `IAM:` if `AutoCreate` is `False` or
`IAMA:` if `AutoCreate` is `True`. You can optionally specify one or more
database user groups that the user will join at log on. By default, the
temporary credentials expire in 900 seconds. You can optionally specify a
duration between 900 seconds (15 minutes) and 3600 seconds (60 minutes).
For more information, see [Using IAM Authentication to Generate Database
User
Credentials](https://docs.aws.amazon.com/redshift/latest/mgmt/generating-user-credentials.html)
in the Amazon Redshift Cluster Management Guide.
The AWS Identity and Access Management (IAM)user or role that executes
GetClusterCredentials must have an IAM policy attached that allows access
to all necessary actions and resources. For more information about
permissions, see [Resource Policies for
GetClusterCredentials](https://docs.aws.amazon.com/redshift/latest/mgmt/redshift-iam-access-control-identity-based.html#redshift-policy-resources.getclustercredentials-resources)
in the Amazon Redshift Cluster Management Guide.
If the `DbGroups` parameter is specified, the IAM policy must allow the
`redshift:JoinGroup` action with access to the listed `dbgroups`.
In addition, if the `AutoCreate` parameter is set to `True`, then the
policy must include the `redshift:CreateClusterUser` privilege.
If the `DbName` parameter is specified, the IAM policy must allow access to
the resource `dbname` for the specified database name.
"""
def get_cluster_credentials(client, input, options \\ []) do
request(client, "GetClusterCredentials", input, options)
end
@doc """
Returns an array of DC2 ReservedNodeOfferings that matches the payment
type, term, and usage price of the given DC1 reserved node.
"""
def get_reserved_node_exchange_offerings(client, input, options \\ []) do
request(client, "GetReservedNodeExchangeOfferings", input, options)
end
@doc """
Modifies the settings for a cluster.
You can also change node type and the number of nodes to scale up or down
the cluster. When resizing a cluster, you must specify both the number of
nodes and the node type even if one of the parameters does not change.
You can add another security or parameter group, or change the master user
password. Resetting a cluster password or modifying the security groups
associated with a cluster do not need a reboot. However, modifying a
parameter group requires a reboot for parameters to take effect. For more
information about managing clusters, go to [Amazon Redshift
Clusters](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html)
in the *Amazon Redshift Cluster Management Guide*.
"""
def modify_cluster(client, input, options \\ []) do
request(client, "ModifyCluster", input, options)
end
@doc """
Modifies the database revision of a cluster. The database revision is a
unique revision of the database running in a cluster.
"""
def modify_cluster_db_revision(client, input, options \\ []) do
request(client, "ModifyClusterDbRevision", input, options)
end
@doc """
Modifies the list of AWS Identity and Access Management (IAM) roles that
can be used by the cluster to access other AWS services.
A cluster can have up to 10 IAM roles associated at any time.
"""
def modify_cluster_iam_roles(client, input, options \\ []) do
request(client, "ModifyClusterIamRoles", input, options)
end
@doc """
Modifies the maintenance settings of a cluster.
"""
def modify_cluster_maintenance(client, input, options \\ []) do
request(client, "ModifyClusterMaintenance", input, options)
end
@doc """
Modifies the parameters of a parameter group.
For more information about parameters and parameter groups, go to [Amazon
Redshift Parameter
Groups](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-parameter-groups.html)
in the *Amazon Redshift Cluster Management Guide*.
"""
def modify_cluster_parameter_group(client, input, options \\ []) do
request(client, "ModifyClusterParameterGroup", input, options)
end
@doc """
Modifies the settings for a snapshot.
This exanmple modifies the manual retention period setting for a cluster
snapshot.
"""
def modify_cluster_snapshot(client, input, options \\ []) do
request(client, "ModifyClusterSnapshot", input, options)
end
@doc """
Modifies a snapshot schedule for a cluster.
"""
def modify_cluster_snapshot_schedule(client, input, options \\ []) do
request(client, "ModifyClusterSnapshotSchedule", input, options)
end
@doc """
Modifies a cluster subnet group to include the specified list of VPC
subnets. The operation replaces the existing list of subnets with the new
list of subnets.
"""
def modify_cluster_subnet_group(client, input, options \\ []) do
request(client, "ModifyClusterSubnetGroup", input, options)
end
@doc """
Modifies an existing Amazon Redshift event notification subscription.
"""
def modify_event_subscription(client, input, options \\ []) do
request(client, "ModifyEventSubscription", input, options)
end
@doc """
Modifies a scheduled action.
"""
def modify_scheduled_action(client, input, options \\ []) do
request(client, "ModifyScheduledAction", input, options)
end
@doc """
Modifies the number of days to retain snapshots in the destination AWS
Region after they are copied from the source AWS Region. By default, this
operation only changes the retention period of copied automated snapshots.
The retention periods for both new and existing copied automated snapshots
are updated with the new retention period. You can set the manual option to
change only the retention periods of copied manual snapshots. If you set
this option, only newly copied manual snapshots have the new retention
period.
"""
def modify_snapshot_copy_retention_period(client, input, options \\ []) do
request(client, "ModifySnapshotCopyRetentionPeriod", input, options)
end
@doc """
Modifies a snapshot schedule. Any schedule associated with a cluster is
modified asynchronously.
"""
def modify_snapshot_schedule(client, input, options \\ []) do
request(client, "ModifySnapshotSchedule", input, options)
end
@doc """
Modifies a usage limit in a cluster. You can't modify the feature type or
period of a usage limit.
"""
def modify_usage_limit(client, input, options \\ []) do
request(client, "ModifyUsageLimit", input, options)
end
@doc """
Pauses a cluster.
"""
def pause_cluster(client, input, options \\ []) do
request(client, "PauseCluster", input, options)
end
@doc """
Allows you to purchase reserved nodes. Amazon Redshift offers a predefined
set of reserved node offerings. You can purchase one or more of the
offerings. You can call the `DescribeReservedNodeOfferings` API to obtain
the available reserved node offerings. You can call this API by providing a
specific reserved node offering and the number of nodes you want to
reserve.
For more information about reserved node offerings, go to [Purchasing
Reserved
Nodes](https://docs.aws.amazon.com/redshift/latest/mgmt/purchase-reserved-node-instance.html)
in the *Amazon Redshift Cluster Management Guide*.
"""
def purchase_reserved_node_offering(client, input, options \\ []) do
request(client, "PurchaseReservedNodeOffering", input, options)
end
@doc """
Reboots a cluster. This action is taken as soon as possible. It results in
a momentary outage to the cluster, during which the cluster status is set
to `rebooting`. A cluster event is created when the reboot is completed.
Any pending cluster modifications (see `ModifyCluster`) are applied at this
reboot. For more information about managing clusters, go to [Amazon
Redshift
Clusters](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html)
in the *Amazon Redshift Cluster Management Guide*.
"""
def reboot_cluster(client, input, options \\ []) do
request(client, "RebootCluster", input, options)
end
@doc """
Sets one or more parameters of the specified parameter group to their
default values and sets the source values of the parameters to
"engine-default". To reset the entire parameter group specify the
*ResetAllParameters* parameter. For parameter changes to take effect you
must reboot any associated clusters.
"""
def reset_cluster_parameter_group(client, input, options \\ []) do
request(client, "ResetClusterParameterGroup", input, options)
end
@doc """
Changes the size of the cluster. You can change the cluster's type, or
change the number or type of nodes. The default behavior is to use the
elastic resize method. With an elastic resize, your cluster is available
for read and write operations more quickly than with the classic resize
method.
Elastic resize operations have the following restrictions:
<ul> <li> You can only resize clusters of the following types:
<ul> <li> dc2.large
</li> <li> dc2.8xlarge
</li> <li> ds2.xlarge
</li> <li> ds2.8xlarge
</li> <li> ra3.4xlarge
</li> <li> ra3.16xlarge
</li> </ul> </li> <li> The type of nodes that you add must match the node
type for the cluster.
</li> </ul>
"""
def resize_cluster(client, input, options \\ []) do
request(client, "ResizeCluster", input, options)
end
@doc """
Creates a new cluster from a snapshot. By default, Amazon Redshift creates
the resulting cluster with the same configuration as the original cluster
from which the snapshot was created, except that the new cluster is created
with the default cluster security and parameter groups. After Amazon
Redshift creates the cluster, you can use the `ModifyCluster` API to
associate a different security group and different parameter group with the
restored cluster. If you are using a DS node type, you can also choose to
change to another DS node type of the same size during restore.
If you restore a cluster into a VPC, you must provide a cluster subnet
group where you want the cluster restored.
For more information about working with snapshots, go to [Amazon Redshift
Snapshots](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-snapshots.html)
in the *Amazon Redshift Cluster Management Guide*.
"""
def restore_from_cluster_snapshot(client, input, options \\ []) do
request(client, "RestoreFromClusterSnapshot", input, options)
end
@doc """
Creates a new table from a table in an Amazon Redshift cluster snapshot.
You must create the new table within the Amazon Redshift cluster that the
snapshot was taken from.
You cannot use `RestoreTableFromClusterSnapshot` to restore a table with
the same name as an existing table in an Amazon Redshift cluster. That is,
you cannot overwrite an existing table in a cluster with a restored table.
If you want to replace your original table with a new, restored table, then
rename or drop your original table before you call
`RestoreTableFromClusterSnapshot`. When you have renamed your original
table, then you can pass the original name of the table as the
`NewTableName` parameter value in the call to
`RestoreTableFromClusterSnapshot`. This way, you can replace the original
table with the table created from the snapshot.
"""
def restore_table_from_cluster_snapshot(client, input, options \\ []) do
request(client, "RestoreTableFromClusterSnapshot", input, options)
end
@doc """
Resumes a paused cluster.
"""
def resume_cluster(client, input, options \\ []) do
request(client, "ResumeCluster", input, options)
end
@doc """
Revokes an ingress rule in an Amazon Redshift security group for a
previously authorized IP range or Amazon EC2 security group. To add an
ingress rule, see `AuthorizeClusterSecurityGroupIngress`. For information
about managing security groups, go to [Amazon Redshift Cluster Security
Groups](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-security-groups.html)
in the *Amazon Redshift Cluster Management Guide*.
"""
def revoke_cluster_security_group_ingress(client, input, options \\ []) do
request(client, "RevokeClusterSecurityGroupIngress", input, options)
end
@doc """
Removes the ability of the specified AWS customer account to restore the
specified snapshot. If the account is currently restoring the snapshot, the
restore will run to completion.
For more information about working with snapshots, go to [Amazon Redshift
Snapshots](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-snapshots.html)
in the *Amazon Redshift Cluster Management Guide*.
"""
def revoke_snapshot_access(client, input, options \\ []) do
request(client, "RevokeSnapshotAccess", input, options)
end
@doc """
Rotates the encryption keys for a cluster.
"""
def rotate_encryption_key(client, input, options \\ []) do
request(client, "RotateEncryptionKey", input, options)
end
@spec request(AWS.Client.t(), binary(), map(), list()) ::
{:ok, Poison.Parser.t() | nil, Poison.Response.t()}
| {:error, Poison.Parser.t()}
| {:error, HTTPoison.Error.t()}
defp request(client, action, input, options) do
client = %{client | service: "redshift"}
host = build_host("redshift", client)
url = build_url(host, client)
headers = [
{"Host", host},
{"Content-Type", "application/x-www-form-urlencoded"}
]
input = Map.merge(input, %{"Action" => action, "Version" => "2012-12-01"})
payload = AWS.Util.encode_query(input)
headers = AWS.Request.sign_v4(client, "POST", url, headers, payload)
case HTTPoison.post(url, payload, headers, options) do
{:ok, %HTTPoison.Response{status_code: 200, body: ""} = response} ->
{:ok, nil, response}
{:ok, %HTTPoison.Response{status_code: 200, body: body} = response} ->
{:ok, AWS.Util.decode_xml(body), response}
{:ok, %HTTPoison.Response{body: body}} ->
error = AWS.Util.decode_xml(body)
{:error, error}
{:error, %HTTPoison.Error{reason: reason}} ->
{:error, %HTTPoison.Error{reason: reason}}
end
end
defp build_host(_endpoint_prefix, %{region: "local"}) do
"localhost"
end
defp build_host(endpoint_prefix, %{region: region, endpoint: endpoint}) do
"#{endpoint_prefix}.#{region}.#{endpoint}"
end
defp build_url(host, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}/"
end
end
|
lib/aws/redshift.ex
| 0.902177
| 0.593992
|
redshift.ex
|
starcoder
|
if Code.ensure_loaded?(Plug) do
defmodule DataSpecs.Plug.Loader do
@moduledoc """
DataSpecs Plug
NOTE: this module is available if you include the optional dependency ":plug".
This module can be used to plug a "Jason.decode! -> DataSpecs.load" pipeline in your routes.
For example:
defmodule Api.Router.Something do
use Plug.Router
import #{__MODULE__}, only: [typeref: 2, value: 1]
plug :match
plug #{__MODULE__}
plug :dispatch
post "/foo", typeref(Api.Model.Foo, :t) do
%Api.Model.Foo{...} = value(conn)
...
end
end
defmodule Api.Model.Foo do
defmodule Bar do
@enforce_keys [:b1, :b2]
defstruct [:b1, :b2]
@type t :: %__MODULE__{
b1: nil | number(),
b2: String.t()
}
end
@enforce_keys [:a]
defstruct [:a, :bars]
@type t :: %__MODULE__{
a: non_neg_integer(),
bars: [Bar.t()]
}
end
"""
use Plug.Builder
alias DataSpecs.Types
plug(Plug.Parsers, parsers: [:json], json_decoder: Jason)
plug(:load)
@spec typeref(module(), Types.type_id()) :: [assigns: %{dataspec: %{type: Types.type_ref(), value: term()}}]
@doc """
Declare the type the body of a route should conform
For example:
post "/foo", typeref(Model.Foo) do
...
end
"""
def typeref(module, type \\ :t), do: [assigns: %{dataspec: %{type: {module, type}, value: nil}}]
@spec value(Plug.Conn.t()) :: term()
@doc """
Get the value loaded.
For example:
mystruct = value(conn)
"""
def value(conn), do: conn.assigns.dataspec.value
@spec load(Plug.Conn.t(), Plug.opts()) :: Plug.Conn.t()
defp load(conn, _opts) do
with {:conn_dataspec, {:ok, type_ref}} <- {:conn_dataspec, conn_dataspec(conn)},
{:load, {:ok, value}} <- {:load, DataSpecs.load(conn.body_params, type_ref)} do
put_in(conn.assigns.dataspec.value, value)
else
{:conn_dataspec, :error} ->
raise """
Probably you missed a typeref on this route.
post "/foo", #{__MODULE__}.typeref(Foo, :t) do
...
end
"""
{:load, {:error, reason}} ->
conn
|> resp(:bad_request, inspect(reason))
|> halt()
end
end
@spec conn_dataspec(map()) :: {:ok, Types.type_ref()} | :error
defp conn_dataspec(%{assigns: %{dataspec: %{type: type_ref}}}), do: {:ok, type_ref}
defp conn_dataspec(_), do: :error
end
end
# coveralls-ignore-end
|
lib/dataspecs/plug/loader.ex
| 0.760651
| 0.492127
|
loader.ex
|
starcoder
|
defmodule GitHelper do
@moduledoc """
Collection of lower-level functions for analyzing outputs from git command.
"""
@type contrib_count :: %{String.t() => integer}
@doc """
parse_diff/1: returns the relevant information contained in the last array position of a diff array
"""
@spec parse_diff([String.t]) :: {:ok, non_neg_integer, non_neg_integer, non_neg_integer}
def parse_diff(list) do
last = List.last(list)
last_trimmed = String.trim(last)
commit_info = String.split(last_trimmed, ", ")
file_string = Enum.at(commit_info, 0)
if file_string == nil do
{:ok, 0, 0, 0}
else
insertion_string = Enum.at(commit_info, 1)
if insertion_string == nil do
[file_num | _tail] = String.split(file_string, " ")
{:ok, String.to_integer(file_num), 0, 0}
else
deletion_string = Enum.at(commit_info, 2)
if deletion_string == nil do
[file_num | _tail] = String.split(file_string, " ")
[insertion_num | _tail] = String.split(insertion_string, " ")
{:ok, String.to_integer(file_num), String.to_integer(insertion_num), 0}
else
[file_num | _tail] = String.split(file_string, " ")
[insertion_num | _tail] = String.split(insertion_string, " ")
[deletion_num | _tail] = String.split(deletion_string, " ")
{:ok, String.to_integer(file_num), String.to_integer(insertion_num),
String.to_integer(deletion_num)}
end
end
end
end
@doc """
get_avg_tag_commit_tim_diff/1: return the average time between commits within each subarray representing a tag
"""
def get_avg_tag_commit_time_diff(list) do
get_avg_tag_commit_time_diff(list, [])
end
@doc """
get_total_tag_commit_time_diff/1: return the total time between commits within each subarray representing a tag
"""
def get_total_tag_commit_time_diff(list) do
get_total_tag_commit_time_diff(list, [])
end
@doc """
split_commits_by_tag/1: returns a list with sublists arranged by tag
"""
def split_commits_by_tag(list) do
split_commits_by_tag(list, [])
end
@doc """
get_contributor_counts/1: Gets the number of contributions belonging to each author and return a map of %{name => number}
"""
def get_contributor_counts(list) do
get_contributor_counts(list, %{})
end
@doc """
det_filtered_contributor_count/2: Gets the resolved list of contributers, return count and list
"""
@spec get_filtered_contributor_count(contrib_count, non_neg_integer) ::
{:ok, non_neg_integer, [contrib_count]}
def get_filtered_contributor_count(map, total) do
filtered_list =
Enum.filter(
map,
fn {_key, value} ->
value / total >= 1 / Kernel.map_size(map)
end
)
length = Kernel.length(filtered_list)
{:ok, length, filtered_list}
end
@spec parse_shortlog(String.t) :: [Contributor.t()]
def parse_shortlog(log) do
split_shortlog(log)
|> Enum.map(fn contributor ->
{name, email, count} = parse_header(contributor)
{merges, commits} = parse_commits(contributor)
{count, _} = Integer.parse(count)
%Contributor{
name: String.trim(name),
email: String.trim(email),
count: count,
merges: merges,
commits: commits
}
end)
|> filter_contributors()
end
defp split_shortlog(log) do
log
|> String.trim()
|> String.split(~r{\n\n})
end
defp parse_header(contributor) do
header =
contributor
|> String.split("\n")
|> Enum.at(0)
|> (&Regex.scan(~r{([^<]+)<([^;]*)>.\(([^:]+)\)}, &1)).()
|> Enum.at(0)
{Enum.at(header, 1), Enum.at(header, 2), Enum.at(header, 3)}
end
defp parse_commits(contributor) do
[_ | commits] = String.split(contributor, "\n")
commits = Enum.map(commits, fn commit -> String.trim(commit) end)
merges = Enum.count(commits, &(&1 =~ ~r/^(merge)+/i))
{merges, commits}
end
defp split_commits_by_tag([], current) do
{:ok, current}
end
defp split_commits_by_tag([first | rest], []) do
split_commits_by_tag(rest, [[first]])
end
defp split_commits_by_tag([first | rest], current) do
[head | _tail] = first
if String.contains?(head, "tag") do
new_current = [[first] | current]
split_commits_by_tag(rest, new_current)
else
[current_head | current_tail] = current
new_current = [[first | current_head] | current_tail]
split_commits_by_tag(rest, new_current)
end
end
defp get_total_tag_commit_time_diff([first | tail], accumulator) do
{:ok, time} = TimeHelper.sum_ts_diff(first)
ret = [time | accumulator]
get_total_tag_commit_time_diff(tail, ret)
end
defp get_total_tag_commit_time_diff([], accumulator) do
{:ok, accumulator}
end
defp get_avg_tag_commit_time_diff([first | tail], accumulator) do
{:ok, time} = TimeHelper.sum_ts_diff(first)
ret = [time / Kernel.length(first) | accumulator]
get_avg_tag_commit_time_diff(tail, ret)
end
defp get_avg_tag_commit_time_diff([], accumulator) do
{:ok, accumulator}
end
@spec get_contributor_counts([any], contrib_count) :: {:ok, [contrib_count], non_neg_integer}
defp get_contributor_counts([head | tail], accumulator) do
if head == "" do
get_contributor_counts(tail, accumulator)
else
maybe_new_key = Map.put_new(accumulator, String.trim(head), 0)
{_num, new_value} =
Map.get_and_update(maybe_new_key, head, fn current_value ->
if current_value == nil do
{0, 1}
else
{current_value, current_value + 1}
end
end)
get_contributor_counts(tail, new_value)
end
end
@spec get_contributor_counts([], non_neg_integer) :: {:ok, non_neg_integer}
defp get_contributor_counts([], accumulator) do
{:ok, accumulator}
end
defp name_sorter(x) do
# Create a name metric to compare with
10 * length(String.split(x, " ")) + String.length(x)
end
@spec filter_contributors([any]) :: [any]
defp filter_contributors([]) do
[]
end
@spec filter_contributors([Contributor.t()]) :: [Contributor.t()]
defp filter_contributors(list) do
is_author = fn x, y -> String.downcase(x.email) == String.downcase(y.email) end
# Divide the list
cur_contrib = for item <- list, is_author.(item, hd(list)) == true, do: item
other = for item <- list, is_author.(item, hd(list)) == false, do: item
# Determine the best name
# for now, just the first one
name_list = for a <- cur_contrib, do: a.name
best_name =
Enum.sort_by(name_list, &name_sorter/1, &>=/2)
|> Enum.at(0)
# Create the new contributor object
contrib_ret = %Contributor{
name: best_name,
email: hd(list).email,
commits: List.flatten(for a <- cur_contrib, do: a.commits),
merges: Enum.sum(for a <- cur_contrib, do: a.merges),
count: Enum.sum(for a <- cur_contrib, do: a.count)
}
[contrib_ret | filter_contributors(other)]
end
end
|
lib/git_helper.ex
| 0.731251
| 0.483831
|
git_helper.ex
|
starcoder
|
defmodule BatchPlease.DynamicResolvers do
@moduledoc false
@definitely_not_a_module_doc """
This module provides dynamic function resolution for BatchPlease's `batch_*`
callbacks.
For example, a function `do_XXXX(state, batch)` in this module would do
the following:
1. If `state.config.XXXX` is defined, run that.
2. If a function `XXXX` of the correct arity exists in module `state.module`,
run that.
3. Raise exception or pass input through.
"""
@doc false
@spec do_batch_init(BatchPlease.state, BatchPlease.opts) :: BatchPlease.batch_return | no_return
def do_batch_init(state, opts) do
cond do
state.overrides.batch_init ->
state.overrides.batch_init.(opts)
function_exported?(state.module, :batch_init, 1) ->
state.module.batch_init(opts)
:else ->
raise UndefinedFunctionError, message: "batch_init/1 is not defined locally or in module #{state.module}"
end
end
@doc false
@spec do_batch_add_item(BatchPlease.state, BatchPlease.batch, BatchPlease.item) :: BatchPlease.batch_return | no_return
def do_batch_add_item(state, batch, item) do
cond do
state.overrides.batch_add_item ->
state.overrides.batch_add_item.(batch, item)
function_exported?(state.module, :batch_add_item, 2) ->
state.module.batch_add_item(batch, item)
:else ->
raise UndefinedFunctionError, message: "batch_add_item/2 is not defined locally or in module #{state.module}"
end
end
@doc false
@spec do_batch_pre_flush(BatchPlease.state, BatchPlease.batch) :: BatchPlease.batch_return
def do_batch_pre_flush(state, batch) do
cond do
state.overrides.batch_pre_flush ->
state.overrides.batch_pre_flush.(batch)
function_exported?(state.module, :batch_pre_flush, 1) ->
state.module.batch_pre_flush(batch)
:else ->
{:ok, batch}
end
end
@doc false
@spec do_batch_flush(BatchPlease.state, BatchPlease.batch) :: BatchPlease.ok_or_error | no_return
def do_batch_flush(state, batch) do
cond do
state.overrides.batch_flush ->
state.overrides.batch_flush.(batch)
function_exported?(state.module, :batch_flush, 1) ->
state.module.batch_flush(batch)
:else ->
raise UndefinedFunctionError, message: "batch_flush/1 is not defined locally or in module #{state.module}"
end
end
@doc false
@spec do_batch_post_flush(BatchPlease.state, BatchPlease.batch) :: BatchPlease.ok_or_error
def do_batch_post_flush(state, batch) do
cond do
state.overrides.batch_post_flush ->
state.overrides.batch_post_flush.(batch)
function_exported?(state.module, :batch_post_flush, 1) ->
state.module.batch_post_flush(batch)
:else ->
:ok
end
end
@doc false
@spec do_batch_terminate(BatchPlease.state, BatchPlease.batch) :: BatchPlease.ok_or_error
def do_batch_terminate(state, batch) do
cond do
state.overrides.batch_terminate ->
state.overrides.batch_terminate.(batch)
function_exported?(state.module, :batch_terminate, 1) ->
state.module.batch_terminate(batch)
:else ->
:ok
end
end
@doc false
@spec do_should_flush(BatchPlease.state) :: boolean
def do_should_flush(state) do
cond do
state.overrides.should_flush ->
state.overrides.should_flush.(state)
function_exported?(state.module, :should_flush, 1) ->
state.module.should_flush(state)
:else ->
false
end
end
end
|
lib/batch_please/dynamic_resolvers.ex
| 0.750004
| 0.45042
|
dynamic_resolvers.ex
|
starcoder
|
defmodule DigitalOcean.VolumeAction do
alias DigitalOcean.{ Operation }
@doc """
Create an action for a volume.
## Example for attaching a volume to a droplet
iex> DigitalOcean.VolumeAction.create(
...> "7724db7c-e098-11e5-b522-000f53304e51",
...> type: "attach",
...> droplet_id: 11612190,
...> region: "nyc1",
...> tags: ["aninterestingtag"]
...> ) |> DigitalOcean.request()
{ :ok, %DigitalOcean.Response{} }
## Example for removing a volume from a droplet
iex> DigitalOcean.VolumeAction.create(
...> "7724db7c-e098-11e5-b522-000f53304e51",
...> type: "detach",
...> droplet_id: 11612190,
...> region: "nyc1"
...> ) |> DigitalOcean.request()
{ :ok, %DigitalOcean.Response{} }
## Example for resizing a volume
iex> DigitalOcean.VolumeAction.create(
...> "7724db7c-e098-11e5-b522-000f53304e51",
...> type: "resize",
...> size_gigabytes: 100,
...> region: "nyc1"
...> ) |> DigitalOcean.request()
{ :ok, %DigitalOcean.Response{} }
"""
@spec create(String.t(), Keyword.t()) :: Operation.t()
def create(volume_id, opts) do
%Operation{}
|> Map.put(:method, :post)
|> Map.put(:params, opts)
|> Map.put(:path, "/volumes/#{volume_id}/actions")
end
@doc """
Create an action for a volume by name.
## Example for attaching a volume to a droplet
iex> DigitalOcean.VolumeAction.create(
...> type: "attach",
...> volume_name: "example",
...> region: "nyc1",
...> droplet_id: 11612190,
...> tags: ["aninterestingname"]
...> ) |> DigitalOcean.request()
{ :ok, %DigitalOcean.Response{} }
## Example for detaching a volume from a droplet
iex> DigitalOcean.VolumeAction.create(
...> type: "detach",
...> droplet_id: 11612190,
...> volume_name: "example",
...> region: "nyc1"
...> ) |> DigitalOcean.request()
{ :ok, %DigitalOcean.Response{} }
"""
@spec create_by_name(Keyword.t()) :: Operation.t()
def create_by_name(opts) do
%Operation{}
|> Map.put(:method, :post)
|> Map.put(:params, opts)
|> Map.put(:path, "/volumes/actions")
end
@doc """
Retrieve a volume action.
## Examples
iex> DigitalOcean.VolumeAction.get("7724db7c-e098-11e5-b522-000f53304e51", 72531856) |> DigitalOcean.request()
{ :ok, %DigitalOcean.Response{} }
"""
@spec get(String.t(), DigitalOcean.id_t()) :: Operation.t()
def get(volume_id, action_id) do
%Operation{}
|> Map.put(:method, :get)
|> Map.put(:path, "/volumes/#{volume_id}/actions/#{action_id}")
end
@doc """
Retrieve a list of actions that have been executed on a volume.
## Examples
iex> DigitalOcean.VolumeAction.list("7724db7c-e098-11e5-b522-000f53304e51") |> DigitalOcean.request()
{ :ok, %DigitalOcean.Response{} }
"""
@spec list(String.t(), Keyword.t()) :: Operation.t()
def list(volume_id, opts \\ []) do
%Operation{}
|> Map.put(:method, :get)
|> Map.put(:params, opts)
|> Map.put(:path, "/volumes/#{volume_id}/actions")
end
end
|
lib/digital_ocean/volume_action.ex
| 0.840079
| 0.433262
|
volume_action.ex
|
starcoder
|
defmodule Algae.Reader do
@moduledoc ~S"""
`Algae.Reader` allows you to pass some readable context around through actions.
This is useful in a number of situations, but the most common case is to weave
access to environment variables monadically.
For an illustrated guide to `Reader`s,
see [Thee Useful Monads](http://adit.io/posts/2013-06-10-three-useful-monads.html#the-state-monad).
## Examples
iex> use Witchcraft
...>
...> correct =
...> monad %Algae.Reader{} do
...> count <- ask &Map.get(&1, :count)
...> bindings <- ask()
...> return (count == Kernel.map_size(bindings))
...> end
...>
...> sample_bindings = %{count: 3, a: 1, b: 2}
...> correct_count = run(correct, sample_bindings)
...> "Correct count for %{a: 1, b: 2, count: 3}? true" == "Correct count for #{inspect sample_bindings}? #{correct_count}"
true
...>
...> bad_bindings = %{count: 100, a: 1, b: 2}
...> bad_count = run(correct, bad_bindings)
...> _ = "Correct count for #{inspect bad_bindings}? #{bad_count}"
"Correct count for %{a: 1, b: 2, count: 100}? false"
Example adapted from
[source](https://hackage.haskell.org/package/mtl-2.2.1/docs/Control-Monad-Reader.html)
"""
alias __MODULE__
import Algae
use Witchcraft
defdata(fun())
@doc """
`Reader` constructor.
## Examples
iex> newbie = new(fn x -> x * 10 end)
...> newbie.reader.(10)
100
"""
@spec new(fun()) :: t()
def new(fun), do: %Reader{reader: fun}
@doc """
Run the reader function with some argument.
iex> reader = new(fn x -> x + 5 end)
...> run(reader, 42)
47
This is the opposite of `new/1`.
iex> fun = fn x -> x + 5 end
...> fun.(42) == fun |> new() |> run(42)
true
"""
@spec run(t(), any()) :: any()
def run(%Reader{reader: fun}, arg), do: fun.(arg)
@doc """
Get the wrapped environment. Especially useful in monadic do-notation.
## Examples
iex> run(ask(), 42)
42
iex> use Witchcraft
...>
...> example_fun =
...> fn x ->
...> monad %Algae.Reader{} do
...> e <- ask()
...> return {x, e}
...> end
...> end
...>
...> 42
...> |> example_fun.()
...> |> run(7)
{42, 7}
"""
@spec ask() :: t()
def ask, do: Reader.new(fn x -> x end)
@doc ~S"""
Similar to `new/1` and `ask/0`. Construct an `Algae.Reader`,
but apply a function to the constructed envoronment.
The pun here is that you're "asking" a function for something.
## Examples
iex> fn x -> x * 10 end
...> |> ask()
...> |> run(5)
50
iex> use Witchcraft
...>
...> foo =
...> fn words ->
...> monad %Algae.Reader{} do
...> loud <- ask &(&1 == String.upcase(&1))
...> return(words <> (if loud, do: "!", else: "."))
...> end
...> end
...>
...> "Hello" |> foo.() |> run("WORLD") # "WORLD" is the context being asked for
"Hello!"
"""
@spec ask((any() -> any())) :: t()
def ask(fun) do
monad %Reader{} do
e <- ask
return(fun.(e))
end
end
@doc """
Locally composes a function into a `Reader`.
Often the idea is to temporarily adapt the `Reader` without continuing this
change in later `run`s.
## Examples
iex> ask()
...> |> local(fn word -> word <> "!" end)
...> |> local(&String.upcase/1)
...> |> run("o hai thar")
"O HAI THAR!"
"""
@spec local(t(), (any() -> any())) :: any()
def local(reader, fun) do
monad %Reader{} do
e <- ask
return(run(reader, fun.(e)))
end
end
end
|
lib/algae/reader.ex
| 0.807878
| 0.485783
|
reader.ex
|
starcoder
|
defmodule ServerSentEventStage do
@moduledoc """
A GenStage producer which parses the ServerSentEvent (SSE) protocol.
SSEs are used in browsers via the EventSource API, but they can be used for
any kind of one-directional streaming.
For more information, see the [W3C](https://html.spec.whatwg.org/multipage/server-sent-events.html).
"""
use GenStage
require Logger
alias ServerSentEventStage.Event
# Client functions
@doc """
Starts a producer stage which parse the ServerSentEvent protocol and send those messages as events.
The only required argument is `url`: it can be either a binary of the URL
to connect to or a {module, fun, arguments} tuple.
Other arguments are passed as options to `GenStage.start_link/3`.
"""
def start_link(args) do
_url = Keyword.fetch!(args, :url)
opts = Keyword.take(args, ~w(debug name timeout spawn_opt)a)
GenStage.start_link(__MODULE__, args, opts)
end
@doc """
Refresh the connection by disconnecting and reconnecting.
Some clients will send a final message, but not terminate the
connection=. This function allows a client of SSES to reconnect.
"""
def refresh(server) do
GenStage.cast(server, :refresh)
end
# Server functions
defstruct [:url, :headers, :id, buffer: "", state: :not_connected]
@doc false
def init(args) do
state = %__MODULE__{
url: Keyword.fetch!(args, :url),
headers: Keyword.get(args, :headers, [])
}
{:producer, state}
end
@doc false
def handle_info(:connect, state) do
url = compute_url(state)
{:ok, id} = connect_to_url(url, state.headers)
{:noreply, [], %{state | id: id}}
end
def handle_info(%HTTPoison.AsyncStatus{id: id, code: code}, %{id: id} = state) do
state = %{state | state: :connected}
state =
if code == 200 do
Logger.debug(fn -> "#{__MODULE__} connected" end)
state
else
Logger.warn(fn -> "#{__MODULE__} unexpected status: #{code}" end)
do_refresh(state)
end
{:noreply, [], state}
end
def handle_info(%HTTPoison.AsyncHeaders{id: id}, %{id: id} = state) do
{:noreply, [], state}
end
def handle_info(
%HTTPoison.AsyncChunk{id: id, chunk: chunk},
%{id: id, state: :connected} = state
) do
buffer = state.buffer <> chunk
event_binaries = String.split(buffer, "\n\n")
{event_binaries, [buffer]} = Enum.split(event_binaries, -1)
events = Enum.map(event_binaries, &Event.from_string/1)
unless events == [] do
Logger.info(fn -> "#{__MODULE__} sending #{length(events)} events" end)
for event <- events do
Logger.debug(fn ->
inspect(event, limit: :infinity, printable_limit: :infinity)
end)
end
end
state = %{state | buffer: buffer}
{:noreply, events, state}
end
def handle_info(%HTTPoison.Error{id: id, reason: reason}, %{id: id} = state) do
Logger.error(fn -> "#{__MODULE__} HTTP error: #{inspect(reason)}" end)
state = reset_state(state)
send(self(), :connect)
{:noreply, [], state}
end
def handle_info(%HTTPoison.AsyncEnd{id: id}, %{id: id} = state) do
Logger.info(fn -> "#{__MODULE__} disconnected, reconnecting..." end)
state = reset_state(state)
send(self(), :connect)
{:noreply, [], state}
end
def handle_info(%HTTPoison.AsyncRedirect{id: id, to: location}, %{id: id} = state) do
{:ok, id} = connect_to_url(location, state.headers)
state = reset_state(state)
{:noreply, [], %{state | id: id}}
end
def handle_info(msg, state) do
# ignore data received unexpectedly
Logger.warn(fn ->
"#{__MODULE__} unexpected message: #{inspect(msg)}\nState: #{inspect(state)}"
end)
{:noreply, [], state}
end
@doc false
def handle_demand(_demand, state) do
:ok = maybe_connect(state)
{:noreply, [], state}
end
@doc false
def handle_cast(:refresh, state) do
state = do_refresh(state)
{:noreply, [], state}
end
defp connect_to_url(url, headers) do
Logger.debug(fn -> "#{__MODULE__} requesting #{url}" end)
headers = [
{"Accept", "text/event-stream"} | headers
]
{:ok, %{id: id}} =
HTTPoison.get(
url,
headers,
recv_timeout: 60_000,
follow_redirect: true,
stream_to: self()
)
{:ok, id}
end
defp maybe_connect(%{state: :not_connected}) do
send(self(), :connect)
:ok
end
defp maybe_connect(_state) do
:ok
end
defp compute_url(%{url: {m, f, a}}) do
apply(m, f, a)
end
defp compute_url(%{url: url}) when is_binary(url) do
url
end
defp reset_state(state) do
%{state | id: nil, buffer: ""}
end
defp do_refresh(%{id: id} = state) do
unless is_nil(id) do
:hackney.close(id)
end
send(self(), :connect)
reset_state(state)
end
end
|
lib/server_sent_event_stage.ex
| 0.750918
| 0.440229
|
server_sent_event_stage.ex
|
starcoder
|
defmodule Xlsxir.ParseStyle do
@moduledoc """
Holds the SAX event instructions for parsing style data via `Xlsxir.SaxParser.parse/2`
"""
# the following module attributes hold `numStyleId`s for standard number styles, grouping them between numbers and dates
@num [0,1,2,3,4,9,10,11,12,13,37,38,39,40,44,48,49,56,59,60,61,62,67,68,69,70]
@date [14,15,16,17,18,19,20,21,22,27,30,36,45,46,47,50,57]
defstruct custom_style: %{}, cellxfs: false, index: 0, tid: nil, num_fmt_ids: []
@doc """
Sax event utilized by `Xlsxir.SaxParser.parse/2`. Takes a pattern and the current state of a struct and recursivly parses the
styles XML file, ultimately saving each parsed style type to the ETS process. The style types generated are `nil` for numbers and `'d'` for dates.
## Parameters
- pattern - the XML pattern of the event to match upon
- state - the state of the `%Xlsxir.ParseStyle{}` struct which temporarily holds each `numFmtId` and its associated `formatCode` for custom format types
## Example
Recursively sends style types generated from parsing the `xl/sharedStrings.xml` file to ETS process. The data can ultimately
be retreived from the ETS table (i.e. `:ets.lookup(tid, 0)` would return `nil` or `'d'` depending on each style type generated).
"""
def sax_event_handler(:startDocument, _state) do
%__MODULE__{tid: GenServer.call(Xlsxir.StateManager, :new_table)}
end
def sax_event_handler({:startElement,_,'cellXfs',_,_}, state) do
%{state | cellxfs: true}
end
def sax_event_handler({:endElement,_,'cellXfs',_}, state) do
%{state | cellxfs: false}
end
def sax_event_handler({:startElement,_,'xf',_,xml_attr}, %__MODULE__{num_fmt_ids: num_fmt_ids} = state) do
if state.cellxfs do
[{_,_,_,_,id}] = Enum.filter(xml_attr, fn attr ->
case attr do
{:attribute,'numFmtId',_,_,_} -> true
_ -> false
end
end)
%{state | num_fmt_ids: num_fmt_ids ++ [id]}
else
state
end
end
def sax_event_handler({:startElement,_,'numFmt',_,xml_attr},
%__MODULE__{custom_style: custom_style} = state) do
temp = Enum.reduce(xml_attr, %{}, fn attr, acc ->
case attr do
{:attribute,'numFmtId',_,_,id} -> Map.put(acc, :id, id)
{:attribute,'formatCode',_,_,cd} -> Map.put(acc, :cd, cd)
_ -> nil
end
end)
%{state | custom_style: Map.put(custom_style, temp[:id], temp[:cd])}
end
def sax_event_handler(:endDocument, %__MODULE__{} = state) do
%__MODULE__{custom_style: custom_style, num_fmt_ids: num_fmt_ids, index: index, tid: tid} = state
custom_type = custom_style_handler(custom_style)
inc = Enum.reduce(num_fmt_ids, 0, fn style_type, acc ->
case List.to_integer(style_type) do
i when i in @num -> :ets.insert(tid, {index + acc, nil})
i when i in @date -> :ets.insert(tid, {index + acc, 'd'})
_ -> add_custom_style(tid, style_type, custom_type, index + acc)
end
acc + 1
end)
%{state | index: index + inc}
end
def sax_event_handler(_, state), do: state
defp custom_style_handler(custom_style) do
custom_style
|> Enum.reduce(%{}, fn {k, v}, acc ->
cond do
String.match?(to_string(v), ~r/\bred\b/i) -> Map.put_new(acc, k, nil)
String.match?(to_string(v), ~r/[dhmsy]/i) -> Map.put_new(acc, k, 'd')
true -> Map.put_new(acc, k, nil)
end
end)
end
defp add_custom_style(tid, style_type, custom_type, index) do
if Map.has_key?(custom_type, style_type) do
:ets.insert(tid, {index, custom_type[style_type]})
else
raise "Unsupported style type: #{style_type}. See doc page \"Number Styles\" for more info."
end
end
end
|
lib/xlsxir/parse_style.ex
| 0.726911
| 0.505981
|
parse_style.ex
|
starcoder
|
defmodule ExWire.DEVp2p do
@moduledoc """
Functions that deal directly with the DEVp2p Wire Protocol.
For more information, please see:
https://github.com/ethereum/wiki/wiki/%C3%90%CE%9EVp2p-Wire-Protocol
"""
alias ExWire.Config
alias ExWire.DEVp2p.Session
alias ExWire.Packet.Capability.Mana
alias ExWire.Packet.Protocol.Hello
@doc """
Convenience function to create an `ExWire.DEVp2p.Session` struct
"""
@spec init_session :: Session.t()
def init_session do
%Session{}
end
@doc """
Function to create a DEVp2p struct needed for a protocol handshake. This
should be an `ExWire.Packet.Protocol.Hello` struct with the appropriate values filled in.
## Examples
iex> ExWire.DEVp2p.build_hello().client_id
"mana/0.0.1"
"""
@spec build_hello() :: Hello.t()
def build_hello() do
%Hello{
p2p_version: Config.p2p_version(),
client_id: Config.client_id(),
caps: Mana.get_our_capabilities(),
listen_port: Config.listen_port(),
node_id: Config.node_id()
}
end
@doc """
Function to update `ExWire.DEVp2p.Session` when a handshake is sent. The
handshake should be an `ExWire.Packet.Protocol.Hello` that we have sent to a peer.
"""
@spec hello_sent(Session.t(), Hello.t()) :: Session.t()
def hello_sent(session, hello = %Hello{}) do
%{session | hello_sent: hello}
end
@doc """
Function to check whether or not a `ExWire.DEVp2p.Session` is active. See
`ExWire.DEVp2p.Session.active?/1` for more information.
"""
@spec session_active?(Session.t()) :: boolean()
def session_active?(session), do: Session.active?(session)
@doc """
Function to check whether or not a `ExWire.DEVp2p.Session` is compatible.
See `ExWire.DEVp2p.Session.compatible_capabilities?/1` for more information.
"""
@spec session_compatible?(Session.t()) :: boolean()
def session_compatible?(session), do: Session.compatible_capabilities?(session)
@doc """
Function to handles other messages related to the DEVp2p protocol that a peer
sends. The messages could be `ExWire.Packet.Protocol.Disconnect`, `ExWire.Packet.Protocol.Ping`,
or `ExWire.Packet.Protocol.Pong`.
An `ExWire.DEVp2p.Session` is required as the first argument in order to
properly update the session based on the message received.
"""
@spec handle_message(Session.t(), struct()) ::
{:error, :handshake_incomplete} | {:ok, Session.t()}
def handle_message(session, packet = %Hello{}) do
{:ok, Session.hello_received(session, packet)}
end
def handle_message(_session, _message) do
{:error, :handshake_incomplete}
end
end
|
apps/ex_wire/lib/ex_wire/dev_p2p.ex
| 0.680135
| 0.410372
|
dev_p2p.ex
|
starcoder
|
defmodule Mix.Config do
@moduledoc ~S"""
Module for defining, reading and merging app configurations.
Most commonly, this module is used to define your own configuration:
use Mix.Config
config :plug,
key1: "value1",
key2: "value2"
import_config "#{Mix.env}.exs"
All `config/*` macros, including `import_config/1`, are used
to help define such configuration files.
Furthermore, this module provides functions like `read!/1`,
`merge/2` and friends which help manipulate configurations
in general.
Configuration set using `Mix.Config` will set the application environment, so
that `Application.get_env/3` and other `Application` functions can be used
at run or compile time to retrieve or change the configuration.
For example, the `:key1` value from the application `:plug` (see example above) can be
retrieved with:
"value1" = Application.fetch_env!(:plug, :key1)
"""
@doc false
defmacro __using__(_) do
quote do
# TODO: If we split User API from Mix API, we no longer need to use Mix.Config.
import Mix.Config, only: [config: 2, config: 3, import_config: 1]
end
end
@config_key {__MODULE__, :config}
@files_key {__MODULE__, :files}
defp get_config!() do
Process.get(@config_key) || raise_improper_use!()
end
defp put_config(value) do
Process.put(@config_key, value)
end
defp delete_config() do
Process.delete(@config_key)
end
defp get_files!() do
Process.get(@files_key) || raise_improper_use!()
end
defp put_files(value) do
Process.put(@files_key, value)
end
defp delete_files() do
Process.delete(@files_key)
end
defp raise_improper_use!() do
raise "could not set configuration via Mix.Config. " <>
"This usually means you are trying to execute a configuration file " <>
"directly instead of using the proper command, such as mix loadconfig"
end
## User API
@doc """
Configures the given application.
Keyword lists are always deep merged.
## Examples
The given `opts` are merged into the existing configuration
for the given `app`. Conflicting keys are overridden by the
ones specified in `opts`. For example, the declaration below:
config :lager,
log_level: :warn,
mode: :truncate
config :lager,
log_level: :info,
threshold: 1024
Will have a final configuration of:
[log_level: :info, mode: :truncate, threshold: 1024]
This final configuration can be retrieved at run or compile time:
Application.get_all_env(:lager)
"""
def config(app, opts) when is_atom(app) and is_list(opts) do
get_config!()
|> merge([{app, opts}])
|> put_config()
end
@doc """
Configures the given key for the given application.
Keyword lists are always deep merged.
## Examples
The given `opts` are merged into the existing values for `key`
in the given `app`. Conflicting keys are overridden by the
ones specified in `opts`. For example, given the two configurations
below:
config :ecto, Repo,
log_level: :warn,
adapter: Ecto.Adapters.Postgres
config :ecto, Repo,
log_level: :info,
pool_size: 10
the final value of the configuration for the `Repo` key in the `:ecto`
application will be:
[log_level: :info, pool_size: 10, adapter: Ecto.Adapters.Postgres]
This final value can be retrieved at runtime or compile time with:
Application.get_env(:ecto, Repo)
"""
def config(app, key, opts) when is_atom(app) do
get_config!()
|> merge([{app, [{key, opts}]}])
|> put_config()
end
@doc ~S"""
Imports configuration from the given file or files.
If `path_or_wildcard` is a wildcard, then all the files
matching that wildcard will be imported; if no file matches
the wildcard, no errors are raised. If `path_or_wildcard` is
not a wildcard but a path to a single file, then that file is
imported; in case the file doesn't exist, an error is raised.
If path/wildcard is a relative path/wildcard, it will be expanded
relatively to the directory the current configuration file is in.
## Examples
This is often used to emulate configuration across environments:
import_config "#{Mix.env}.exs"
Or to import files from children in umbrella projects:
import_config "../apps/*/config/config.exs"
"""
defmacro import_config(path_or_wildcard) do
quote do
Mix.Config.__import__!(unquote(path_or_wildcard), __DIR__)
end
end
@doc false
def __import__!(path_or_wildcard, dir) do
path_or_wildcard = Path.expand(path_or_wildcard, dir)
paths =
if String.contains?(path_or_wildcard, ~w(* ? [ {)) do
Path.wildcard(path_or_wildcard)
else
[path_or_wildcard]
end
for path <- paths do
eval_config!(path)
end
:ok
end
defp eval_config!(file) do
current_files = get_files!()
if file in current_files do
raise ArgumentError,
"attempting to load configuration #{Path.relative_to_cwd(file)} recursively"
end
put_files([file | current_files])
Code.eval_file(file)
end
## Mix API
@doc """
Evaluates the given configuration file.
It accepts a list of `imported_paths` that should raise if attempted
to be imported again (to avoid recursive imports).
It returns a tuple with the configuration and the imported paths.
"""
def eval!(file, imported_paths \\ []) do
previous_config = put_config([])
previous_files = put_files(imported_paths)
try do
{eval_config, _} = eval_config!(Path.expand(file))
case get_config!() do
[] when is_list(eval_config) ->
{validate!(eval_config), get_files!()}
pdict_config ->
{pdict_config, get_files!()}
end
after
if previous_config, do: put_config(previous_config), else: delete_config()
if previous_files, do: put_files(previous_files), else: delete_files()
end
end
@doc false
@deprecated "Use eval!/2 instead"
def read!(file, loaded_paths \\ []) do
eval!(file, loaded_paths) |> elem(0)
end
@doc false
@deprecated "Use eval!/2 instead"
def read_wildcard!(path, loaded_paths \\ []) do
paths =
if String.contains?(path, ~w(* ? [ {)) do
Path.wildcard(path)
else
[path]
end
Enum.reduce(paths, [], &merge(&2, read!(&1, loaded_paths)))
end
@doc false
@deprecated "Manually validate the data instead"
def validate!(config) do
validate!(config, "runtime")
end
defp validate!(config, file) do
if is_list(config) do
Enum.all?(config, fn
{app, value} when is_atom(app) ->
if Keyword.keyword?(value) do
true
else
raise ArgumentError,
"expected #{Path.relative_to_cwd(file)} config for app #{inspect(app)} " <>
"to return keyword list, got: #{inspect(value)}"
end
_ ->
false
end)
else
raise ArgumentError,
"expected #{Path.relative_to_cwd(file)} config to return " <>
"keyword list, got: #{inspect(config)}"
end
config
end
@doc """
Persists the given configuration by modifying
the configured applications environment.
`config` should be a list of `{app, app_config}` tuples or a
`%{app => app_config}` map where `app` are the applications to
be configured and `app_config` are the configuration (as key-value
pairs) for each of those applications.
Returns the configured applications.
## Examples
Mix.Config.persist(logger: [level: :error], my_app: [my_config: 1])
#=> [:logger, :my_app]
"""
def persist(config) do
for {app, kw} <- config do
for {k, v} <- kw do
Application.put_env(app, k, v, persistent: true)
end
app
end
end
@doc """
Merges two configurations.
The configuration of each application is merged together
with the values in the second one having higher preference
than the first in case of conflicts.
## Examples
iex> Mix.Config.merge([app: [k: :v1]], [app: [k: :v2]])
[app: [k: :v2]]
iex> Mix.Config.merge([app1: []], [app2: []])
[app1: [], app2: []]
"""
def merge(config1, config2) do
Keyword.merge(config1, config2, fn _, app1, app2 ->
Keyword.merge(app1, app2, &deep_merge/3)
end)
end
defp deep_merge(_key, value1, value2) do
if Keyword.keyword?(value1) and Keyword.keyword?(value2) do
Keyword.merge(value1, value2, &deep_merge/3)
else
value2
end
end
end
|
lib/mix/lib/mix/config.ex
| 0.670069
| 0.438364
|
config.ex
|
starcoder
|
defmodule Azalea.Tree do
@moduledoc """
`Azalea.Tree` models a rose, or multi-way tree. A rose tree is an `n`-ary (with unbounded `n`) tree
where each branch of a node is itself a rose tree. For example:
iex> Azalea.Tree.new(:a, [:b, :c, Azalea.Tree.new(:d, [:e, :f])])
%Azalea.Tree{
value: :a,
children: [
%Azalea.Tree{value: :b, children: []},
%Azalea.Tree{value: :c, children: []},
%Azalea.Tree{
value: :d,
children: [
%Azalea.Tree{value: :e, children: []},
%Azalea.Tree{value: :f, children: []}
]
}
]
}
`Azalea.Tree` nodes are unbalanced and child ordering maintains insertion order. See `add_child/2` and `insert_child/3` below.
"""
@type t :: %Azalea.Tree{value: any, children: [Azalea.Tree.t]}
defstruct [:value, :children]
@doc """
Returns a tree defined by the arguments passed.
`new/0` returns an empty tree
iex> Azalea.Tree.new()
%Azalea.Tree{value: nil, children: []}
`new/1` returns a tree with the argument assigned as the value, and no children
iex> Azalea.Tree.new({1, :one, "un"})
%Azalea.Tree{value: {1, :one, "un"}, children: []}
`new/2` returns a tree with the first argument assigned to the value, and the second argument (a list)
assigned to the tree's children, with each element wrapped in an `Azalea.Tree`
iex> Azalea.Tree.new(:a, [:b, :c])
%Azalea.Tree{
value: :a,
children: [
%Azalea.Tree{value: :b, children: []},
%Azalea.Tree{value: :c, children: []}
]
}
"""
@spec new(any, [any]) :: Azalea.Tree.t
def new(value \\ nil, children \\ []) do
%__MODULE__{value: value, children: wrap_children(children)}
end
@doc """
Returns true if `child` is one of `tree`'s children.
iex> child = Azalea.Tree.new(:b)
iex> tree = Azalea.Tree.new(:a, [child])
iex> Azalea.Tree.is_child?(child, tree)
true
iex> Azalea.Tree.is_child?(Azalea.Tree.new(:c), tree)
false
"""
@spec is_child?(Azalea.Tree.t, Azalea.Tree.t) :: boolean
def is_child?(child, tree) do
Enum.member?(tree, child)
end
@doc """
Appends `child` to the front of `tree`'s children.
iex> tree = Azalea.Tree.new(:a, [:b, :c, :d])
iex> Azalea.Tree.add_child(tree, Azalea.Tree.new("e"))
%Azalea.Tree{
value: :a,
children: [
%Azalea.Tree{value: "e", children: []},
%Azalea.Tree{value: :b, children: []},
%Azalea.Tree{value: :c, children: []},
%Azalea.Tree{value: :d, children: []}
]
}
`add_child/2` will wrap a non-`Azalea.Tree` child in a tree before appending it:
iex> tree = Azalea.Tree.new(:a, [:b, :c, :d])
iex> Azalea.Tree.add_child(tree, "f")
%Azalea.Tree{
value: :a,
children: [
%Azalea.Tree{value: "f", children: []},
%Azalea.Tree{value: :b, children: []},
%Azalea.Tree{value: :c, children: []},
%Azalea.Tree{value: :d, children: []}
]
}
This is a shortcut for `Azalea.Tree.insert_child(tree, child, 0)`
"""
@spec add_child(Azalea.Tree.t, any) :: Azalea.Tree.t
def add_child(tree = %Azalea.Tree{}, child) do
insert_child(tree, child, 0)
end
@doc """
Inserts `child` into `tree`'s children at the given `index`.
iex> tree = Azalea.Tree.new(:a, [:b, :c, :d])
iex> Azalea.Tree.insert_child(tree, Azalea.Tree.new("e"), 1)
%Azalea.Tree{
value: :a,
children: [
%Azalea.Tree{value: :b, children: []},
%Azalea.Tree{value: "e", children: []},
%Azalea.Tree{value: :c, children: []},
%Azalea.Tree{value: :d, children: []}
]
}
`add_child/2` will wrap a non-`Azalea.Tree` child in a tree before appending it:
iex> tree = Azalea.Tree.new(:a, [:b, :c, :d])
iex> Azalea.Tree.insert_child(tree, "f", -1)
%Azalea.Tree{
value: :a,
children: [
%Azalea.Tree{value: :b, children: []},
%Azalea.Tree{value: :c, children: []},
%Azalea.Tree{value: :d, children: []},
%Azalea.Tree{value: "f", children: []}
]
}
"""
@spec insert_child(Azalea.Tree.t, any, integer) :: Azalea.Tree.t
def insert_child(tree, child, index) do
with child <- wrap_child(child) do
%{tree | children: List.insert_at(tree.children, index, child)}
end
end
@doc """
Removes the tree's first child, and returns a tuple of `{child, tree_without_child}`
iex> tree = Azalea.Tree.new(:a, [:b, :c, :d])
iex> Azalea.Tree.pop_child(tree)
{
%Azalea.Tree{value: :b, children: []},
%Azalea.Tree{
value: :a,
children: [
%Azalea.Tree{value: :c, children: []},
%Azalea.Tree{value: :d, children: []}
]
}
}
"""
@spec pop_child(Azalea.Tree.t) :: {Azalea.Tree.t, Azalea.Tree.t}
def pop_child(tree = %Azalea.Tree{}) do
{child, children} = List.pop_at(tree.children, 0)
{child, %{tree | children: children}}
end
@doc """
Removes the tree's child at the given index and returns a tuple of `{child, tree_without_child}`
iex> tree = Azalea.Tree.new(:a, [:b, :c, :d])
iex> Azalea.Tree.remove_child(tree, 2)
{
%Azalea.Tree{value: :d, children: []},
%Azalea.Tree{
value: :a,
children: [
%Azalea.Tree{value: :b, children: []},
%Azalea.Tree{value: :c, children: []}
]
}
}
"""
@spec remove_child(Azalea.Tree.t, integer) :: {Azalea.Tree.t, Azalea.Tree.t}
def remove_child(tree = %Azalea.Tree{}, index) do
{child, children} = List.pop_at(tree.children, index)
{child, %{tree | children: children}}
end
@doc """
Applies a given function to each sub-tree of the tree, maintaining the tree's nested structure
iex> tree = Azalea.Tree.new(:a, [:b, :c, :d])
iex> Azalea.Tree.map(tree, fn t -> %{t | value: "node-" <> to_string(t.value)} end)
%Azalea.Tree{
value: "node-a",
children: [
%Azalea.Tree{value: "node-b", children: []},
%Azalea.Tree{value: "node-c", children: []},
%Azalea.Tree{value: "node-d", children: []}
]
}
Note that this behaves differently than `Enum.map` applied to a tree, which will flatten the tree in depth-first order.
iex> tree = Azalea.Tree.new(:a, [:b, :c, :d])
iex> Enum.map(tree, fn t -> %{t | value: "node-" <> to_string(t.value)} end)
[
%Azalea.Tree{
value: "node-a",
children: [
%Azalea.Tree{value: :b, children: []},
%Azalea.Tree{value: :c, children: []},
%Azalea.Tree{value: :d, children: []}
]
},
%Azalea.Tree{value: "node-b", children: []},
%Azalea.Tree{value: "node-c", children: []},
%Azalea.Tree{value: "node-d", children: []}
]
"""
@spec map(Azalea.Tree.t, fun) :: Azalea.Tree.t
def map(tree, fun) do
%{fun.(tree) | children: Enum.map(tree.children, &map(&1, fun))}
end
@doc """
Returns the total count of `Azalea.Trees` contained in the tree, including the root
iex> tree = Azalea.Tree.new(:a, [:b, :c, :d])
iex> Azalea.Tree.length(tree)
4
"""
@spec length(Azalea.Tree.t) :: integer
def length(tree), do: Enum.count(tree)
@doc """
Reduces the tree to a single value, using a depth-first walk
iex> tree = Azalea.Tree.new(:a, [:b, Azalea.Tree.new(:c, [:e, :f]), :d])
iex> Azalea.Tree.reduce(tree, "", fn t, acc -> acc <> to_string(t.value) end)
"abcefd"
"""
@spec reduce(Azalea.Tree.t, term, (term, term -> term)) :: term
def reduce(tree, acc, fun), do: Enum.reduce(tree, acc, fun)
@doc """
Finds a path through `tree` to the `child`.
iex> tree = Azalea.Tree.new(:a, [:b, Azalea.Tree.new(:c, [:e, :f]), :d])
iex> Azalea.Tree.path_to(Azalea.Tree.new(:e), tree)
[
%Azalea.Tree{
value: :a,
children: [
%Azalea.Tree{value: :b, children: []},
%Azalea.Tree{
value: :c,
children: [
%Azalea.Tree{value: :e, children: []},
%Azalea.Tree{value: :f, children: []}
]
},
%Azalea.Tree{value: :d, children: []}
]
},
%Azalea.Tree{
value: :c,
children: [
%Azalea.Tree{value: :e, children: []},
%Azalea.Tree{value: :f, children: []}
]
},
%Azalea.Tree{value: :e, children: []}
]
"""
@spec path_to(Azalea.Tree.t, Azalea.Tree.t) :: [Azalea.Tree.t]
def path_to(child, tree) do
find_path(tree, child, [])
end
## Private
defp find_path(tree, target, acc) when tree == target do
[tree|acc]
end
defp find_path(%Azalea.Tree{children: []}, _, _), do: nil
defp find_path(tree = %Azalea.Tree{children: children}, target, acc) do
[tree | find_path(children, target, acc)]
end
defp find_path(trees, target, acc) when is_list(trees) do
Enum.find(
Enum.map(trees, &find_path(&1, target, acc)),
&(!is_nil(&1))
)
end
defp wrap_children(children) when is_list(children) do
Enum.map(children, &wrap_child/1)
end
defp wrap_child(tree = %__MODULE__{}), do: tree
defp wrap_child(child), do: new(child)
@behaviour Access
@doc """
Implemenation of the `Access` behaviour. See `Access.fetch/2` for details.
"""
def fetch(%Azalea.Tree{children: children}, index) when is_integer(index) do
case Enum.at(children, index) do
nil -> :error
child -> {:ok, child}
end
end
@doc """
Implemenation of the `Access` behaviour. See `Access.get/3` for details.
"""
def get(tree, index, default \\ nil) when is_integer(index) do
case fetch(tree, index) do
{:ok, child} -> child
:error -> default
end
end
@doc """
Implemenation of the `Access` behaviour. See `Access.get_and_update/3` for details.
"""
def get_and_update(tree, index, fun) do
value = get(tree, index)
case fun.(value) do
{get, update} ->
{get, %{tree | children: List.replace_at(tree.children, index, update)}}
:pop ->
{value, %{tree | children: List.delete_at(tree.children, index)}}
x -> raise "#{inspect x}"
end
end
@doc """
Implemenation of the `Access` behaviour. See `Access.pop/2` for details.
"""
def pop(tree, index) do
case get(tree, index) do
nil -> {nil, tree}
child -> {child, %{tree | children: List.delete_at(tree.children, index)}}
end
end
end
|
lib/azalea/tree.ex
| 0.930474
| 0.737371
|
tree.ex
|
starcoder
|
defmodule ExYarn.Token do
@moduledoc """
A token is the building block of a lockfile (intended for internal use only)
This module takes a lockfile's contents as input and breaks it up into a list
of tokens, each of them representing a single discrete element of the lockfile.
Tokens represent every piece that makes up a lockfile, from comments, strings
and integers to line returns, colons and indentation.
"""
@typedoc """
The list of types a token can have
"""
@type tokenType ::
:boolean
| :string
| :identifier
| :eof
| :colon
| :new_line
| :comment
| :indent
| :invalid
| :number
| :comma
@valid_prop_value_token [:boolean, :string, :number]
@enforce_keys [:type, :value, :line, :col]
defstruct [:line, :col, :type, :value]
@typedoc """
The representation of a token
"""
@type t() :: %__MODULE__{
line: integer(),
col: integer(),
type: tokenType(),
value: any()
}
@doc """
Takes a `ExYarn.Parser.Token` as an input and returns a boolean indicating
whether or not it can be used as a value for a key
"""
@spec valid_prop_value?(t()) :: boolean
def valid_prop_value?(%__MODULE__{type: type}) do
type in @valid_prop_value_token
end
@doc """
Main entrypoint for the module. Takes as input a `String` representing the
contents of a yarn lockfile and returns the corresponding list of tokens.
"""
@spec tokenize(String.t()) :: [t()]
def tokenize(input) do
tokenize(input, false, 1, 0, [])
|> Enum.reverse()
end
@spec tokenize(String.t(), boolean(), integer(), integer(), [t()]) :: [t()]
defp tokenize("", _last_new_line, line, col, tokens) do
[build_token(line, col, :eof) | tokens]
end
defp tokenize(input, last_new_line, line, col, tokens) do
{chop, token, line, col} = generate_next_token(input, last_new_line, line, col)
tokens =
case token do
nil -> tokens
token -> [token | tokens]
end
tokens =
case chop do
0 -> [build_token(line, col, :invalid) | tokens]
_ -> tokens
end
col = col + chop
last_new_line = String.at(input, 0) == "\n" or String.at(input, 0) == "\r\n"
String.slice(input, chop..-1)
|> tokenize(last_new_line, line, col, tokens)
end
@spec build_token(integer(), integer(), tokenType, any()) :: t()
defp build_token(line, col, type, value \\ nil) do
%__MODULE__{line: line, col: col, type: type, value: value}
end
@spec generate_next_token(String.t(), boolean(), integer(), integer()) ::
{integer(), t() | nil, integer(), integer()}
defp generate_next_token("\n" <> _rest, _last_new_line, line, _col) do
line = line + 1
col = 0
{1, build_token(line, col, :new_line), line, col}
end
defp generate_next_token("\r\n" <> _rest, _last_new_line, line, _col) do
line = line + 1
col = 1
{1, build_token(line, col, :new_line), line, col}
end
defp generate_next_token("#" <> rest, _last_new_line, line, col) do
{val, val_length} =
case Regex.run(~r/^.*?\n/, rest) do
[capture | _] ->
val_length = String.length(capture) - 1
val = String.slice(capture, 0, val_length)
{val, val_length}
nil ->
{rest, String.length(rest)}
end
{val_length + 1, build_token(line, col, :comment, val), line, col}
end
defp generate_next_token(" " <> _rest, false, line, col) do
{1, nil, line, col}
end
defp generate_next_token(" " <> _rest = input, true, line, col) do
indent_size =
Regex.run(~r/^ */, input)
|> List.first()
|> String.length()
if rem(indent_size, 2) == 0 do
{indent_size, build_token(line, col, :indent, indent_size / 2), line, col}
else
throw({:message, "Invalid number of spaces", :token, build_token(line, col, :invalid)})
end
end
defp generate_next_token("\"" <> _rest = input, _last_new_line, line, col) do
string_length =
Regex.run(~r/^".*?(\\\\|[^\\])"/, input)
|> List.first()
|> String.length()
val = String.slice(input, 1, string_length - 2)
{string_length, build_token(line, col, :string, val), line, col}
end
defp generate_next_token("true" <> _rest, _last_new_line, line, col) do
{4, build_token(line, col, :boolean, true), line, col}
end
defp generate_next_token("false" <> _rest, _last_new_line, line, col) do
{5, build_token(line, col, :boolean, false), line, col}
end
defp generate_next_token(":" <> _rest, _last_new_line, line, col) do
{1, build_token(line, col, :colon), line, col}
end
defp generate_next_token("," <> _rest, _last_new_line, line, col) do
{1, build_token(line, col, :comma), line, col}
end
defp generate_next_token(input, _last_new_line, line, col) do
cond do
Regex.match?(~r/^[0-9]/, input) -> generate_number_token(input, line, col)
Regex.match?(~r/^[a-zA-Z\/.-]/, input) -> generate_string_token(input, line, col)
true -> {0, build_token(line, col, :invalid), line, col}
end
end
defp generate_number_token(input, line, col) do
val =
Regex.run(~r/^[0-9]*/, input)
|> List.first()
{String.length(val), build_token(line, col, :number, String.to_integer(val)), line, col}
end
defp generate_string_token(input, line, col) do
{name, name_length} =
case Regex.run(~r/.*?[: \r\n,]/, input) do
nil -> {input, String.length(input)}
[name | _] -> {name, String.length(name) - 1}
end
name = String.slice(name, 0, name_length)
{name_length, build_token(line, col, :string, name), line, col}
end
end
|
lib/ex_yarn/token.ex
| 0.827201
| 0.606586
|
token.ex
|
starcoder
|
defmodule Json5 do
@moduledoc """
Convert Json5 to Elixir term and back
"""
@doc """
parse json5 input as elixir type.
To keep the precision of the given numbers the integers and floats are cast to `Decimal`
options:
- object_key_function: (binary) -> any
- use given function to format the object key
- object_key_existing_atom: boolean
- format the object key with `String.to_existing_atom/1`
- object_key_atom: boolean
- format the object key with `String.to_atom/1`
- if none of the above options are set return the key as a binary (String.t())
- object_new_function: ({any, any}) -> any
- function to create a map from the list of parsed tuples, by default uses `Map.new/1`
- backend: [`Json5.Decode.Backend.Combine`, `Json5.Decode.Backend.Yecc`]
- select the backend to be used (Defaults to Combine).
- The Combine backend is coded with the json5 spec (with unicode) in mind, but a lot slower (about 2000x slower than `Jason`)
- The Yecc backend is a lot faster (about 6x slower than `Jason`) but not that rigorous based on the json5 spec. It is just written to make the existing tests work.
```elixir
iex> Json5.decode("{array: [1, 2, 3], map: {'null': null, test: 1, }, }")
{:ok, %{
"map" => %{
"test" => Decimal.new(1),
"null" => nil
},
"array" => [
Decimal.new(1),
Decimal.new(2),
Decimal.new(3)
]
}}
```
"""
def decode(text, opts \\ []) do
Json5.Decode.parse(text, Map.new(opts))
end
@doc """
Same as `decode/2` but raises on error
"""
def decode!(text, opts \\ []) do
{:ok, result} = Json5.Decode.parse(text, Map.new(opts))
result
end
@doc """
Encode elixir input as json5. Contains some simple formatting options
options:
- pretty: boolean
- compact: boolean
```elixir
iex> Json5.encode(%{map: %{test: 1, null: nil}, array: [1,2,3]})
{:ok, "{array: [1, 2, 3], map: {'null': null, test: 1, }, }"}
iex> Json5.encode(%{map: %{test: 1, null: nil}, array: [1,2,3]}, pretty: true)
{:ok, \"\"\"
{
array: [
1,
2,
3,
],
map: {
'null': null,
test: 1,
},
}
\"\"\"}
iex> Json5.encode(%{map: %{test: 1, null: nil}, array: [1,2,3]}, compact: true)
{:ok, "{array:[1,2,3],map:{'null':null,test:1}}"}
```
"""
def encode(input, opts \\ []) do
Json5.Encode.dump(input, Map.new(opts))
end
@doc """
Same as `encode/2` but raises on error
"""
def encode!(input, opts \\ []) do
{:ok, result} = encode(input, Map.new(opts))
result
end
end
|
lib/json5.ex
| 0.872007
| 0.862583
|
json5.ex
|
starcoder
|
defmodule Sanbase.Alert.Trigger.RawSignalTriggerSettings do
@moduledoc ~s"""
An alert based on the ClickHouse signals.
The signal we're following is configured via the 'signal' parameter
Example parameters:
```
%{
type: "raw_signal_data",
signal: "mvrv_usd_30d_lower_zone"
channel: "telegram",
target: %{slug: ["ethereum", "bitcoin"]},
time_window: "10d"
}
```
The above parameters mean: Fire an alert if there is a record in CH signals table for:
1. signal: `mvrv_usd_30d_lower_zone`
2. assets: "ethereum" or "bitcoin"
3. in the interval: [now()-10d, now()]
and send it to this telegram channel.
"""
use Vex.Struct
import Sanbase.Validation
import Sanbase.DateTimeUtils, only: [round_datetime: 1, str_to_sec: 1]
alias __MODULE__
alias Sanbase.Model.Project
alias Sanbase.Alert.Type
alias Sanbase.Cache
alias Sanbase.Signal
@derive {Jason.Encoder, except: [:filtered_target, :triggered?, :payload, :template_kv]}
@trigger_type "raw_signal_data"
@enforce_keys [:type, :channel, :signal, :target]
defstruct type: @trigger_type,
signal: nil,
channel: nil,
target: nil,
time_window: "1d",
# Private fields, not stored in DB.
filtered_target: %{list: []},
triggered?: false,
payload: %{},
template_kv: %{}
@type t :: %__MODULE__{
signal: Type.signal(),
type: Type.trigger_type(),
channel: Type.channel(),
target: Type.complex_target(),
time_window: Type.time_window(),
# Private fields, not stored in DB.
filtered_target: Type.filtered_target(),
triggered?: boolean(),
payload: Type.payload(),
template_kv: Type.template_kv()
}
validates(:signal, &valid_signal?/1)
validates(:time_window, &valid_time_window?/1)
@spec type() :: String.t()
def type(), do: @trigger_type
def post_create_process(_trigger), do: :nochange
def post_update_process(_trigger), do: :nochange
def new(settings) do
struct(RawSignalTriggerSettings, settings)
end
def get_data(%{} = settings) do
%{filtered_target: %{list: target_list, type: _type}} = settings
{:ok, data} = fetch_signal(target_list, settings)
fired_slugs = Enum.map(data, & &1.slug)
target_list
|> Enum.filter(fn slug -> slug in fired_slugs end)
end
defp fetch_signal(slug_or_slugs, settings) do
%{signal: signal, time_window: time_window} = settings
cache_key =
{__MODULE__, :fetch_raw_signal_data, signal, slug_or_slugs, time_window,
round_datetime(Timex.now())}
|> Sanbase.Cache.hash()
%{from: from, to: to} = timerange_params(settings)
Cache.get_or_store(cache_key, fn ->
Signal.raw_data([signal], %{slug: slug_or_slugs}, from, to)
end)
end
defp timerange_params(%RawSignalTriggerSettings{} = settings) do
interval_seconds = str_to_sec(settings.time_window)
now = Timex.now()
%{
from: Timex.shift(now, seconds: -interval_seconds),
to: now
}
end
defimpl Sanbase.Alert.Settings, for: RawSignalTriggerSettings do
import Sanbase.Alert.Utils
alias Sanbase.Alert.OperationText
def triggered?(%RawSignalTriggerSettings{triggered?: triggered}), do: triggered
def evaluate(%RawSignalTriggerSettings{} = settings, _trigger) do
case RawSignalTriggerSettings.get_data(settings) do
data when is_list(data) and data != [] ->
build_result(data, settings)
_ ->
%RawSignalTriggerSettings{settings | triggered?: false}
end
end
defp build_result(
fired_slugs,
%RawSignalTriggerSettings{filtered_target: %{list: slugs}} = settings
) do
template_kv =
Enum.reduce(fired_slugs, %{}, fn slug, acc ->
if slug in slugs do
Map.put(acc, slug, template_kv(settings, slug))
else
acc
end
end)
case template_kv != %{} do
true ->
%RawSignalTriggerSettings{
settings
| triggered?: true,
template_kv: template_kv
}
false ->
%RawSignalTriggerSettings{settings | triggered?: false}
end
end
def cache_key(%RawSignalTriggerSettings{} = settings) do
construct_cache_key([
settings.type,
settings.target,
settings.time_window
])
end
defp template_kv(settings, slug) do
project = Project.by_slug(slug)
{:ok, human_readable_name} = Sanbase.Signal.human_readable_name(settings.signal)
{details_template, details_kv} = OperationText.details(:signal, settings)
kv =
%{
type: settings.type,
signal: settings.signal,
project_name: project.name,
project_slug: project.slug,
project_ticker: project.ticker,
sanbase_project_link: "https://app.santiment.net/charts?slug=#{project.slug}",
signal_human_readable_name: human_readable_name
}
|> Map.merge(details_kv)
template = """
🔔 [\#{{project_ticker}}]({{sanbase_project_link}}) | {{signal_human_readable_name}} signal fired for *{{project_name}}*.
#{details_template}
"""
{template, kv}
end
end
end
|
lib/sanbase/alerts/trigger/settings/raw_signal_trigger_settings.ex
| 0.814607
| 0.796649
|
raw_signal_trigger_settings.ex
|
starcoder
|
defmodule Imager.Store.S3 do
@behaviour Imager.Store
alias ExAws.S3
@moduledoc """
S3 compatible storage. It will try to stream files as much as possible
in both ways.
"""
require Logger
def retrieve(path, opts) do
{bucket, config} = Keyword.pop(opts, :bucket)
{chunk_size, config} = Keyword.pop(config, :chunk_size, 2 * 1024)
with {:ok, size, mime} <- get_file_size(bucket, path, config) do
stream =
size
|> stream(chunk_size)
|> Stream.map(&get_chunk(bucket, path, &1, opts))
{:ok, {size, mime, stream}}
end
end
def store(path, mime, stream, opts) do
{bucket, config} = Keyword.pop(opts, :bucket)
stream
|> Stream.transform(
fn ->
%{body: body} =
bucket
|> S3.initiate_multipart_upload(path, content_type: mime)
|> ExAws.request!(config)
{body.upload_id, 1, [], <<>>}
end,
fn data, {id, idx, etags, chunk} ->
chunk = chunk <> data
# This magic is needed due to fact that S3 disallows to have multiple
# chunks that are smaller than 5 MiB, only last one can be smaller. So
# there we group the chunks to be at least 5 MiB
if byte_size(chunk) > 5 * 1024 * 1024 do
%{headers: headers} =
bucket
|> S3.upload_part(path, id, idx, chunk)
|> ExAws.request!(config)
etag = header_find(headers, "Etag")
{[data], {id, idx + 1, [{idx, etag} | etags], <<>>}}
else
{[data], {id, idx, etags, chunk}}
end
end,
fn {id, idx, etags, data} ->
# We can have some leftovers in data if the file size wasn't multiply of
# 5Mi, so now we upload last chunk
%{headers: headers} =
bucket
|> S3.upload_part(path, id, idx, data)
|> ExAws.request!(config)
etag = header_find(headers, "Etag")
etags = [{idx, etag} | etags]
bucket
|> ExAws.S3.complete_multipart_upload(path, id, Enum.reverse(etags))
|> ExAws.request!(config)
end
)
end
defp stream(size, chunk_size) do
Stream.unfold(0, fn
counter when counter * chunk_size < size ->
start_byte = counter * chunk_size
end_byte = (counter + 1) * chunk_size
{{start_byte, min(end_byte, size) - 1}, counter + 1}
_ ->
nil
end)
end
defp get_chunk(bucket, path, {start_byte, end_byte}, config) do
%{body: body} =
bucket
|> S3.get_object(path, range: "bytes=#{start_byte}-#{end_byte}")
|> ExAws.request!(config)
body
end
defp get_file_size(bucket, path, config) do
with {:ok, %{headers: headers}} <-
bucket
|> S3.head_object(path)
|> ExAws.request(config),
value when not is_nil(value) <-
header_find(headers, "Content-Length"),
{length, ""} <- Integer.parse(value),
mime =
header_find(headers, "Content-Type") || "application/octet-stream" do
{:ok, length, mime}
else
_ -> :error
end
end
defp header_find(headers, name) do
name = String.downcase(name)
Enum.find_value(headers, fn {key, value} ->
if String.downcase(key) == name, do: value
end)
end
end
|
lib/imager/store/s3.ex
| 0.682045
| 0.411347
|
s3.ex
|
starcoder
|
defmodule RelaxLib.ImageMagick do
alias RelaxLib.ImageMagick.Image
@doc """
Opens image source
"""
def open(path) do
path = Path.expand(path)
unless File.regular?(path), do: raise(File.Error)
%Image{path: path, ext: Path.extname(path)}
end
@doc """
Saves modified image
## Options
* `:path` - The output path of the image. Defaults to a temporary file.
* `:in_place` - Overwrite the original image, ignoring `:path` option. Default false.
"""
def save(image, opts \\ []) do
output_path = output_path_for(image, opts)
system_cmd "mogrify", arguments_for_saving(image, output_path), stderr_to_stdout: true
image_after_command(image, output_path)
end
@doc """
Creates or saves image
Uses the `convert` command, which accepts both existing images, or image
operators. If you have an existing image, prefer save/2.
## Options
* `:path` - The output path of the image. Defaults to a temporary file.
* `:in_place` - Overwrite the original image, ignoring `:path` option. Default false.
"""
def create(image, opts \\ []) do
output_path = output_path_for(image, opts)
system_cmd("convert", arguments_for_creating(image, output_path), stderr_to_stdout: true)
image_after_command(image, output_path)
end
defp image_after_command(image, output_path) do
%{image | path: output_path,
ext: Path.extname(output_path),
format: Map.get(image.dirty, :format, image.format),
operations: [],
dirty: %{}}
end
defp output_path_for(image, save_opts) do
if Keyword.get(save_opts, :in_place) do
image.path
else
Keyword.get(save_opts, :path, temporary_path_for(image))
end
end
defp arguments_for_saving(image, path) do
base_arguments = ["-write", path, image.path]
arguments(image) ++ base_arguments
end
defp arguments_for_creating(image, path) do
base_arguments = ["#{Path.dirname(path)}/#{Path.basename(image.path)}"]
arguments(image) ++ base_arguments
end
defp arguments(image) do
Enum.flat_map(image.operations, &normalize_arguments/1)
end
defp normalize_arguments({:image_operator, params}), do: ~w(#{params})
defp normalize_arguments({"annotate", params}), do: ~w(-annotate #{params})
defp normalize_arguments({"+" <> option, nil}), do: ["+#{option}"]
defp normalize_arguments({"-" <> option, nil}), do: ["-#{option}"]
defp normalize_arguments({option, nil}), do: ["-#{option}"]
defp normalize_arguments({"+" <> option, params}), do: ["+#{option}", to_string(params)]
defp normalize_arguments({"-" <> option, params}), do: ["-#{option}", to_string(params)]
defp normalize_arguments({option, params}), do: ["-#{option}", to_string(params)]
@doc """
Makes a copy of original image
"""
def copy(image) do
temp = temporary_path_for(image)
File.cp!(image.path, temp)
Map.put(image, :path, temp)
end
def temporary_path_for(%{dirty: %{path: dirty_path}} = _image) do
do_temporary_path_for(dirty_path)
end
def temporary_path_for(%{path: path} = _image) do
do_temporary_path_for(path)
end
defp do_temporary_path_for(path) do
name = Path.basename(path)
random = :crypto.rand_uniform(100_000, 999_999)
Path.join(System.tmp_dir, "#{random}-#{name}")
end
@doc """
Provides detailed information about the image
"""
def verbose(image) do
args = ~w(-verbose -write #{dev_null()}) ++ [image.path]
{output, 0} = system_cmd "mogrify", args, stderr_to_stdout: true
info =
~r/\b(?<animated>\[0])? (?<format>\S+) (?<width>\d+)x(?<height>\d+)/
|> Regex.named_captures(output)
|> Enum.map(&normalize_verbose_term/1)
|> Enum.into(%{})
|> put_frame_count(output)
Map.merge(image, info)
end
defp dev_null do
case :os.type do
{:win32, _} -> "NUL"
_ -> "/dev/null"
end
end
defp normalize_verbose_term({"animated", "[0]"}), do: {:animated, true}
defp normalize_verbose_term({"animated", ""}), do: {:animated, false}
defp normalize_verbose_term({key, value}) when key in ["width", "height"] do
{String.to_atom(key), String.to_integer(value)}
end
defp normalize_verbose_term({key, value}), do: {String.to_atom(key), String.downcase(value)}
defp put_frame_count(%{animated: false} = map, _), do: Map.put(map, :frame_count, 1)
defp put_frame_count(map, text) do
# skip the [0] lines which may be duplicated
matches = Regex.scan(~r/\b\[[1-9][0-9]*] \S+ \d+x\d+/, text)
# add 1 for the skipped [0] frame
frame_count = length(matches) + 1
Map.put(map, :frame_count, frame_count)
end
@doc """
Converts the image to the image format you specify
"""
def format(image, format) do
downcase_format = String.downcase(format)
ext = ".#{downcase_format}"
rootname = Path.rootname(image.path, image.ext)
%{image | operations: image.operations ++ [format: format],
dirty: image.dirty |> Map.put(:path, "#{rootname}#{ext}") |> Map.put(:format, downcase_format)}
end
@doc """
Resizes the image with provided geometry
"""
def resize(image, params) do
%{image | operations: image.operations ++ [resize: params]}
end
@doc """
Extends the image to the specified dimensions
"""
def extent(image, params) do
%{image | operations: image.operations ++ [extent: params]}
end
@doc """
Sets the gravity of the image
"""
def gravity(image, params) do
%{image | operations: image.operations ++ [gravity: params]}
end
@doc """
Resize the image to fit within the specified dimensions while retaining
the original aspect ratio. Will only resize the image if it is larger than the
specified dimensions. The resulting image may be shorter or narrower than specified
in the smaller dimension but will not be larger than the specified values.
"""
def resize_to_limit(image, params) do
resize(image, "#{params}>")
end
@doc """
Resize the image to fit within the specified dimensions while retaining
the aspect ratio of the original image. If necessary, crop the image in the
larger dimension.
"""
def resize_to_fill(image, params) do
[_, width, height] = Regex.run(~r/(\d+)x(\d+)/, params)
image = verbose(image)
{width, _} = Float.parse width
{height, _} = Float.parse height
cols = image.width
rows = image.height
if width != cols || height != rows do
scale_x = width/cols #.to_f
scale_y = height/rows #.to_f
larger_scale = max(scale_x, scale_y)
cols = (larger_scale * (cols + 0.5)) |> Float.round
rows = (larger_scale * (rows + 0.5)) |> Float.round
image = resize image, (if scale_x >= scale_y, do: "#{cols}", else: "x#{rows}")
if width != cols || height != rows do
extent(image, params)
else
image
end
else
image
end
end
def auto_orient(image) do
%{image | operations: image.operations ++ ["auto-orient": nil]}
end
def canvas(image, color) do
image_operator(image, "xc:#{color}")
end
def custom(image, action, options \\ nil) do
%{image | operations: image.operations ++ [{action, options}]}
end
def image_operator(image, operator) do
%{image | operations: image.operations ++ [{:image_operator, operator}]}
end
defp system_cmd(command, args, opts) do
start_at = Timex.now
RelaxLib.log_debug(normalize_logger_args(["start: ", command, args, opts]), pretty: false)
result = System.cmd(command, args, opts)
RelaxLib.log_debug(normalize_logger_args(["finish: ", command, args, opts, " in #{Timex.diff(Timex.now, start_at) / 1000}ms"]), pretty: false)
result
end
defp normalize_logger_args(args) do
args
end
end
|
lib/relax_lib/image_magick/image_magick.ex
| 0.74008
| 0.506225
|
image_magick.ex
|
starcoder
|
defmodule Elastic.Bulk do
alias Elastic.Document
alias Elastic.HTTP
alias Elastic.ResponseHandler
@moduledoc ~S"""
Used to make requests to ElasticSearch's bulk API.
All of `index`, `create` and `update` take a list of tuples.
The order of elements in each tuple is this:
* Index
* Type
* ID (not required for `index` bulk action)
* Data
Here's how to use `create`:
```elixir
Elastic.Bulk.create(
[
{Elastic.Index.name("answer"), "answer", "id-goes-here", %{text: "This is an answer"}}
]
)
```
It's worth noting here that you can choose to pass `nil` as the ID in these index
requests; ElasticSearch will automatically generate an ID for you.
For `create` requests, an ID _must_ be provided.
"""
@type document ::
{
index :: binary(),
type :: binary(),
id :: Document.id(),
document :: term()
}
@doc """
Makes bulk index requests to ElasticSearch.
For more information see documentation on `Elastic.Bulk`.
"""
@spec index(list(document())) :: ResponseHandler.result()
def index(documents) do
documents
|> Enum.map(&index_or_create_document(&1, :index))
|> call_bulk_api
end
@doc """
Makes bulk create requests to ElasticSearch.
For more information see documentation on `Elastic.Bulk`.
"""
@spec create(list(document())) :: ResponseHandler.result()
def create(documents) do
documents
|> Enum.map(&index_or_create_document(&1, :create))
|> call_bulk_api
end
@doc """
Makes bulk update requests to ElasticSearch.
For more information see documentation on `Elastic.Bulk`.
"""
@spec update(list(document())) :: ResponseHandler.result()
def update(documents) do
documents
|> Enum.map(&update_document/1)
|> call_bulk_api
end
@spec index_or_create_document(
document(),
action :: atom()
) :: binary()
defp index_or_create_document({index, type, id, document}, action) do
[
Jason.encode!(%{action => identifier(index, type, id)}),
Jason.encode!(document)
]
|> Enum.join("\n")
end
@spec update_document(document()) :: binary()
defp update_document({index, type, id, document}) do
[
Jason.encode!(%{update: identifier(index, type, id)}),
# Note that the API here is slightly different to index_or_create_document/2.
Jason.encode!(%{doc: document})
]
|> Enum.join("\n")
end
@spec identifier(
index :: binary(),
type :: binary(),
id :: Document.id() | nil
) :: %{
required(:_index) => binary(),
required(:_type) => binary(),
optional(:_id) => binary()
}
defp identifier(index, type, nil) do
%{_index: index, _type: type}
end
defp identifier(index, type, id) do
identifier(index, type, nil) |> Map.put(:_id, id)
end
@spec call_bulk_api(quereis :: list(binary())) :: ResponseHandler.result()
defp call_bulk_api(queries) do
queries = queries |> Enum.join("\n")
HTTP.bulk(body: queries)
end
end
|
lib/elastic/bulk.ex
| 0.836688
| 0.807764
|
bulk.ex
|
starcoder
|
defmodule XUtil.Bitstring do
@moduledoc """
Simple utilities for working with bitstrings.
Elixir has amazing support for working with binaries (bitstrings whose size is divisible by 8)
"""
@doc """
Joins the enumerable bitstrings into a single bitstring.
Examples:
iex(1)> XUtil.Bitstring.join([<<1, 2, 3>>, <<1::size(1)>>, <<2::size(2)>>])
<<1, 2, 3, 1::size(1), 2::size(2)>>
iex(1)> XUtil.Bitstring.join([<<1, 2, 3>>, <<>>, <<1::size(1)>>, <<2::size(2)>>])
<<1, 2, 3, 1::size(1), 2::size(2)>>
iex(1)> XUtil.Bitstring.join([<<>>, <<1::size(1)>>, <<2::size(2)>>])
<<6::size(3)>>
"""
def join(bitstrings) do
Enum.reduce(bitstrings, fn bits, acc ->
acc_size = bit_size(acc)
<<acc::bitstring-size(acc_size), bits::bitstring>>
end)
end
@doc """
True if the first bitstring contains the second.
Examples:
iex(1)> XUtil.Bitstring.contains(<<1, 2, 3, 4, 1::size(1)>>, <<2, 3, 4>>)
true
iex(1)> XUtil.Bitstring.contains(<<1, 2, 3, 4, 1::size(1)>>, <<3, 4, 1::size(1)>>)
true
iex(1)> XUtil.Bitstring.contains(<<1, 2, 3, 4, 1::size(1)>>, <<1::size(1)>>)
true
iex(1)> XUtil.Bitstring.contains(<<1, 2, 3, 4, 1::size(1)>>, <<>>)
true
iex(1)> XUtil.Bitstring.contains(<<1, 2, 3, 4, 1::size(1)>>, <<1, 2, 3, 4, 1::size(2)>>)
false
"""
def contains(haystack, needle)
# A catch for when you inadvertently pass in a multiple-of-8 sized bitstring.
# This should be quite a bit faster than our recursive implementation.
def contains(haystack, needle) when is_binary(haystack) and is_binary(needle) do
:binary.match(haystack, needle) != :nomatch
end
def contains(haystack, needle) when is_bitstring(haystack) and is_bitstring(needle) do
cond do
bit_size(needle) > bit_size(haystack) -> false
bit_size(needle) == bit_size(haystack) -> needle == haystack
true -> contains_subbitstring(haystack, needle)
end
end
@doc """
Splits the bitstring into chunks of equal size. (Input bitstring must be an even multiple of your size.)
iex> XUtil.Bitstring.chunk(<<255::size(241)>>, 241)
[<<255::size(241)>>]
iex> XUtil.Bitstring.chunk(<<255::size(241), 255::size(241), 255::size(241), 255::size(241)>>, 241)
[<<255::size(241)>>, <<255::size(241)>>, <<255::size(241)>>, <<255::size(241)>>]
"""
def chunk(bits, chunk_bit_size) when is_bitstring(bits) do
for <<chunk::bitstring-size(chunk_bit_size) <- bits>> do
<<chunk::bitstring-size(chunk_bit_size)>>
end
end
defp contains_subbitstring(haystack, needle) when is_bitstring(haystack) and is_bitstring(needle) do
needle_size = bit_size(needle)
case haystack do
<<>> -> false
<<^needle::bitstring-size(needle_size), _::bitstring>> -> true
<<_::bitstring-size(1), remainder::bitstring>> -> contains_subbitstring(remainder, needle)
end
end
end
|
lib/x_util/bitstring.ex
| 0.641198
| 0.503479
|
bitstring.ex
|
starcoder
|
defmodule Bolt.Cogs.Role.Mute do
@moduledoc false
@behaviour Nosedrum.Command
alias Bolt.Converters
alias Bolt.Schema.MuteRole
alias Bolt.{ErrorFormatters, ModLog, Repo}
alias Nosedrum.Predicates
alias Nostrum.Api
alias Nostrum.Cache.GuildCache
alias Nostrum.Struct.User
@impl true
def usage, do: ["role mute [muterole:role...]"]
@impl true
def description,
do: """
Set the role to be applied when the `.mute` or `.tempmute` commands are used.
When invoked without a role, show the currently configured mute role.
Note that the `.mute` and `.tempmute` commands can be used by users with the guild-wide `MANAGE_MESSAGES` permission.
Requires the `MANAGE_GUILD` permission.
**Example**:
```rs
// See the currently configured mute role.
.role mute
// Set the mute role to a role called 'Muted'
.role mute Muted
```
"""
@impl true
def predicates, do: [&Predicates.guild_only/1, Predicates.has_permission(:manage_guild)]
@impl true
def command(msg, []) do
response =
with %MuteRole{role_id: role_id} <- Repo.get(MuteRole, msg.guild_id),
{:ok, role} <-
GuildCache.select(
msg.guild_id,
&Map.get(&1.roles, role_id)
) do
if role == nil do
"ℹ️ mute role is currently set to an unknown role, does it exist?"
else
"ℹ️ mute role is currently set to `#{role.name}`"
end
else
nil ->
"ℹ️ no mute role configured, pass a role to set it up"
error ->
ErrorFormatters.fmt(msg, error)
end
{:ok, _msg} = Api.create_message(msg.channel_id, response)
end
def command(msg, ["delete"]) do
response =
with row when row != nil <- Repo.get(MuteRole, msg.guild_id),
{:ok, struct} <- Repo.delete(row) do
ModLog.emit(
msg.guild_id,
"CONFIG_UPDATE",
"#{User.full_name(msg.author)} deleted configured mute role, was `#{struct.role_id}`"
)
"👌 deleted configured mute role"
else
nil -> "🚫 no mute role is set up"
error -> ErrorFormatters.fmt(msg, error)
end
{:ok, _msg} = Api.create_message(msg.channel_id, response)
end
def command(msg, role_str_list) do
role_str = Enum.join(role_str_list, " ")
response =
with {:ok, role} <- Converters.to_role(msg.guild_id, role_str),
mute_role_map <- %{
guild_id: msg.guild_id,
role_id: role.id
},
changeset <- MuteRole.changeset(%MuteRole{}, mute_role_map),
{:ok, _struct} <-
Repo.insert(changeset,
on_conflict: [set: [role_id: role.id]],
conflict_target: :guild_id
) do
ModLog.emit(
msg.guild_id,
"CONFIG_UPDATE",
"#{User.full_name(msg.author)} set the mute role to `#{role.name}`"
)
"👌 will now use role `#{role.name}` for mutes"
else
error -> ErrorFormatters.fmt(msg, error)
end
{:ok, _msg} = Api.create_message(msg.channel_id, response)
end
end
|
lib/bolt/cogs/role/mute.ex
| 0.878262
| 0.600862
|
mute.ex
|
starcoder
|
defmodule Day09 do
@moduledoc """
Documentation for `Day09`.
"""
def part1(input) do
maxrows = Enum.count(input)
maxcols = Enum.count(Enum.at(input, 0))
input
|> to_map()
|> find_lows(maxrows, maxcols, :heights)
|> Enum.map(&(&1 + 1))
|> Enum.sum()
end
def part2(input) do
maxrows = Enum.count(input)
maxcols = Enum.count(Enum.at(input, 0))
seafloor = to_map(input)
find_lows(seafloor, maxrows, maxcols, :positions)
|> Enum.map(&find_basin([&1], MapSet.new(), seafloor, maxrows, maxcols))
|> Enum.sort(fn a, b -> a > b end)
|> Enum.take(3)
|> Enum.reduce(&*/2)
end
# could be better
defp to_map(input) do
input
|> Enum.with_index()
|> Enum.map(fn {row, ridx} ->
row
|> Enum.with_index()
|> Enum.reduce(%{}, fn {height, cidx}, map -> Map.put(map, {ridx, cidx}, height) end)
end)
|> Enum.reduce(%{}, fn m, acc -> Map.merge(acc, m) end)
end
defp around({r, c}, maxrows, maxcols) do
[{r - 1, c}, {r + 1, c}, {r, c - 1}, {r, c + 1}]
|> Enum.filter(fn {r, c} -> r >= 0 and r < maxrows and c >= 0 and c < maxcols end)
end
defp find_lows(seafloor, maxrows, maxcols, what) do
Enum.reduce(seafloor, [], fn {{r, c}, height}, lows ->
maybe =
around({r, c}, maxrows, maxcols)
|> Enum.map(fn {r, c} -> Map.get(seafloor, {r, c}) end)
|> Enum.all?(fn h -> height < h end)
cond do
maybe and what == :heights -> [height | lows]
maybe and what == :positions -> [{r, c} | lows]
true -> lows
end
end)
end
defp find_basin([], seen, _seafloor, _maxrows, _maxcols), do: Enum.count(seen)
defp find_basin([{r, c} | locations], seen, seafloor, maxrows, maxcols) do
more =
around({r, c}, maxrows, maxcols)
|> Enum.filter(fn {r, c} -> {r, c} not in seen end)
|> Enum.filter(fn {r, c} -> Map.get(seafloor, {r, c}) < 9 end)
find_basin(
more ++ locations,
MapSet.union(seen, MapSet.new(more)),
seafloor,
maxrows,
maxcols
)
end
end
|
2021/day09/lib/day09.ex
| 0.698535
| 0.506591
|
day09.ex
|
starcoder
|
defmodule ScrapyCloudEx.HttpAdapter.RequestConfig do
@moduledoc """
Struct containing the configuration for an API call.
"""
@typedoc """
Contains the configuration relevant for an API request:
* `:api_key` - the API key as can be obtained [here](https://app.scrapinghub.com/account/apikey).
This must be provided to the API either by using HTTP Basic authentication (which is the approach
used by `ScrapyCloudEx.HttpAdapters.Default`), or within the URL as a query parameter:
`https://storage.scrapinghub.com/foo?apikey=APIKEY`.
See [docs](https://doc.scrapinghub.com/scrapy-cloud.html#authentication) for more info.
* `:url` - the API URL to send the request to. May contain query parameters.
* `:method` - HTTP request method to use. Supported values are: `:get`, `:post`, `:put`, `:delete`.
* `:headers` - headers to add to the request. By default a `{:"Accept-Encoding", "gzip"}` is always present.
* `:body` - request body.
* `:opts` - any options provided to an endpoint method will be copied here. Always contains a `:decoder`
value which is either a module implementing the `ScrapyCloudEx.Decoder` behaviour, or a function
of type `t:ScrapyCloudEx.Decoder.decoder_function/0`. Adding values here can be particularly
useful to work around certain API quirks, such as the `ScrapyCloudEx.Endpoints.App.Jobs.list/4` endpoint
which will return a "text/plain" encoding value when requesting the `:jl` format. By adding (for example)
the requested format in the `:opts` parameter of the endpoint call, the implementation of
`c:ScrapyCloudEx.HttpAdapter.handle_response/2` can process the body as appropriate.
"""
@type t :: %__MODULE__{}
@http_methods [:get, :post, :put, :delete]
@default_decoder ScrapyCloudEx.Decoders.Default
defstruct [
:api_key,
:url,
method: :get,
headers: [],
body: [],
opts: [
decoder: @default_decoder
]
]
@doc false
@spec new() :: t
def new(), do: %__MODULE__{}
@doc false
@spec put(t, atom | any, any) :: t
def put(%__MODULE__{} = config, key, value) when key in [:api_key, :url] and is_binary(value) do
config |> Map.put(key, value)
end
def put(%__MODULE__{}, key, _) when key in [:api_key, :url] do
raise ArgumentError, message: "value for key '#{key}' must be a string"
end
def put(%__MODULE__{} = config, :method, method) when method in @http_methods do
config |> Map.put(:method, method)
end
def put(%__MODULE__{}, :method, _) do
raise ArgumentError, message: "method must be one of #{inspect(@http_methods)}"
end
def put(%__MODULE__{} = config, key, value) when key in [:headers, :body, :opts] do
if tuple_list?(value) do
config |> Map.put(key, value)
else
raise ArgumentError,
message: "value for key '#{key}' must be a list of tuples (such as a keyword list)"
end
end
def put(%__MODULE__{}, _, _) do
valid_keys = new() |> Map.keys()
raise ArgumentError, message: "key must be one of #{inspect(valid_keys)}"
end
@doc false
@spec ensure_defaults(t()) :: t()
def ensure_defaults(%__MODULE__{} = config) do
config
|> ensure_decoder()
|> default_encoding_to_gzip()
end
@spec tuple_list?(any) :: boolean
defp tuple_list?([]), do: true
defp tuple_list?([{_, _} | t]), do: tuple_list?(t)
defp tuple_list?(_), do: false
@spec ensure_decoder(t()) :: t()
defp ensure_decoder(config) do
if Keyword.get(config.opts, :decoder) do
config
else
%{config | opts: Keyword.put(config.opts, :decoder, @default_decoder)}
end
end
@spec default_encoding_to_gzip(t()) :: t()
defp default_encoding_to_gzip(config) do
if has_encoding_header?(config.headers) do
config
else
%{config | headers: [{:"Accept-Encoding", "gzip"} | config.headers]}
end
end
@spec has_encoding_header?([{String.t(), String.t()}]) :: boolean()
defp has_encoding_header?([]), do: false
defp has_encoding_header?([{k, _} | t]) do
if k |> Atom.to_string() |> String.downcase() == "accept-encoding" do
true
else
has_encoding_header?(t)
end
end
end
|
lib/http_adapter/request_config.ex
| 0.883532
| 0.563138
|
request_config.ex
|
starcoder
|
defmodule Proplist do
@moduledoc """
A proplist is a list of tuples where the first element
of the tuple is a binary and the second element can be
any value.
A proplist may have duplicated props so it is not strictly
a dictionary. However most of the functions in this module
behave exactly as a dictionary and mimic the API defined
by the `Dict` behaviour.
For example, `Proplist.get/3` will get the first entry matching
the given prop, regardless if duplicated entries exist.
Similarly, `Proplist.put/3` and `Proplist.delete/3` ensure all
duplicated entries for a given prop are removed when invoked.
A handful of functions exist to handle duplicated props, in
particular, `Enum.into/2` allows creating new proplist without
removing duplicated props, `get_values/2` returns all values for
a given prop and `delete_first/2` deletes just one of the existing
entries.
Since a proplist list is simply a list, all the operations defined
in `Enum` and `List` can be applied.
"""
@behaviour Dict
@type prop :: binary
@type value :: any
@type t :: [{prop, value}]
@type t(value) :: [{prop, value}]
@doc """
Checks if the given argument is a proplist list or not.
"""
@spec proplist?(term) :: boolean
def proplist?([{prop, _value} | rest]) when is_binary(prop) do
proplist?(rest)
end
def proplist?([]), do: true
def proplist?(_), do: false
@doc """
Returns an empty property list, i.e. an empty list.
"""
@spec new :: t
def new do
[]
end
@doc """
Creates a proplist from an enumerable.
Duplicated entries are removed, the latest one prevails.
Unlike `Enum.into(enumerable, [])`,
`Proplist.new(enumerable)` guarantees the props are unique.
## Examples
iex> Proplist.new([{"b", 1}, {"a", 2}])
[{"a", 2}, {"b", 1}]
"""
@spec new(Enum.t) :: t
def new(pairs) do
Enum.reduce pairs, [], fn {p, v}, proplist ->
put(proplist, p, v)
end
end
@doc """
Creates a proplist from an enumerable via the transformation function.
Duplicated entries are removed, the latest one prevails.
Unlike `Enum.into(enumerable, [], fun)`,
`Proplist.new(enumerable, fun)` guarantees the props are unique.
## Examples
iex> Proplist.new(["a", "b"], fn (x) -> {x, x} end) |> Enum.sort
[{"a", "a"}, {"b", "b"}]
"""
def new(pairs, transform) do
Enum.reduce pairs, [], fn i, proplist ->
{p, v} = transform.(i)
put(proplist, p, v)
end
end
@doc """
Gets the value for a specific `prop`.
If `prop` does not exist, return the default value (`nil` if no default value).
If duplicated entries exist, the first one is returned.
Use `get_values/2` to retrieve all entries.
## Examples
iex> Proplist.get([{"a", 1}], "a")
1
iex> Proplist.get([{"a", 1}], "b")
nil
iex> Proplist.get([{"a", 1}], "b", 3)
3
"""
@spec get(t, prop) :: value
@spec get(t, prop, value) :: value
def get(proplist, prop, default \\ nil) when is_list(proplist) and is_binary(prop) do
case :lists.keyfind(prop, 1, proplist) do
{^prop, value} -> value
false -> default
end
end
@doc """
Gets the value for a specific `prop`.
If `prop` does not exist, lazily evaluates `fun` and returns its result.
If duplicated entries exist, the first one is returned.
Use `get_values/2` to retrieve all entries.
## Examples
iex> proplist = [{"a", 1}]
iex> fun = fn ->
...> :result
...> end
iex> Proplist.get_lazy(proplist, "a", fun)
1
iex> Proplist.get_lazy(proplist, "b", fun)
:result
"""
@spec get_lazy(t, prop, (() -> value)) :: value
def get_lazy(proplist, prop, fun) when is_list(proplist) and is_binary(prop) and is_function(fun, 0) do
case :lists.keyfind(prop, 1, proplist) do
{^prop, value} -> value
false -> fun.()
end
end
@doc """
Gets the value from `prop` and updates it, all in one pass.
This `fun` argument receives the value of `prop` (or `nil` if `prop`
is not present) and must return a two-elements tuple: the "get" value (the
retrieved value, which can be operated on before being returned) and the new
value to be stored under `prop`.
The returned value is a tuple with the "get" value returned by `fun` and a new
proplist with the updated value under `prop`.
## Examples
iex> Proplist.get_and_update [{"a", 1}], "a", fn(current_value) ->
...> {current_value, current_value + 1}
...> end
{1, [{"a", 2}]}
"""
@spec get_and_update(t, prop, (value -> {value, value})) :: {value, t}
def get_and_update(proplist, prop, fun) when is_list(proplist) and is_binary(prop) do
get_and_update(proplist, [], prop, fun)
end
defp get_and_update([{prop, value}|t], acc, prop, fun) do
{get, new_value} = fun.(value)
{get, :lists.reverse(acc, [{prop, new_value}|t])}
end
defp get_and_update([head|tail], acc, prop, fun) do
get_and_update(tail, [head|acc], prop, fun)
end
defp get_and_update([], acc, prop, fun) do
{get, update} = fun.(nil)
{get, [{prop, update}|:lists.reverse(acc)]}
end
@doc """
Fetches the value for a specific `prop` and returns it in a tuple.
If the `prop` does not exist, returns `:error`.
## Examples
iex> Proplist.fetch([{"a", 1}], "a")
{:ok, 1}
iex> Proplist.fetch([{"a", 1}], "b")
:error
"""
@spec fetch(t, prop) :: {:ok, value} | :error
def fetch(proplist, prop) when is_list(proplist) and is_binary(prop) do
case :lists.keyfind(prop, 1, proplist) do
{^prop, value} -> {:ok, value}
false -> :error
end
end
@doc """
Fetches the value for specific `prop`.
If `prop` does not exist, a `KeyError` is raised.
## Examples
iex> Proplist.fetch!([{"a", 1}], "a")
1
iex> Proplist.fetch!([{"a", 1}], "b")
** (KeyError) key "b" not found in: [{"a", 1}]
"""
@spec fetch!(t, prop) :: value | no_return
def fetch!(proplist, prop) when is_list(proplist) and is_binary(prop) do
case :lists.keyfind(prop, 1, proplist) do
{^prop, value} -> value
false -> raise KeyError, key: prop, term: proplist
end
end
@doc """
Gets all values for a specific `prop`.
## Examples
iex> Proplist.get_values([{"a", 1}, {"a", 2}], "a")
[1,2]
"""
@spec get_values(t, prop) :: [value]
def get_values(proplist, prop) when is_list(proplist) and is_binary(prop) do
fun = fn
{p, v} when p === prop -> {true, v}
{_, _} -> false
end
:lists.filtermap(fun, proplist)
end
@doc """
Returns all props from the proplist list.
Duplicated props appear duplicated in the final list of props.
## Examples
iex> Proplist.props([{"a", 1}, {"b", 2}])
["a", "b"]
iex> Proplist.props([{"a", 1}, {"b", 2}, {"a", 3}])
["a", "b", "a"]
"""
@spec props(t) :: [prop]
def props(proplist) when is_list(proplist) do
:lists.map(fn {p, _} -> p end, proplist)
end
@doc """
Returns all values from the proplist list.
## Examples
iex> Proplist.values([{"a", 1}, {"b", 2}])
[1,2]
"""
@spec values(t) :: [value]
def values(proplist) when is_list(proplist) do
:lists.map(fn {_, v} -> v end, proplist)
end
@doc """
Deletes the entries in the proplist list for a `prop` with `value`.
If no `prop` with `value` exists, returns the proplist list unchanged.
## Examples
iex> Proplist.delete([{"a", 1}, {"b", 2}], "a", 1)
[{"b", 2}]
iex> Proplist.delete([{"a", 1}, {"b", 2}, {"a", 3}], "a", 3)
[{"a", 1}, {"b", 2}]
iex> Proplist.delete([{"b", 2}], "a", 5)
[{"b", 2}]
"""
@spec delete(t, prop, value) :: t
def delete(proplist, prop, value) when is_list(proplist) and is_binary(prop) do
:lists.filter(fn {k, v} -> k != prop or v != value end, proplist)
end
@doc """
Deletes the entries in the proplist list for a specific `prop`.
If the `prop` does not exist, returns the proplist list unchanged.
Use `delete_first/2` to delete just the first entry in case of
duplicated props.
## Examples
iex> Proplist.delete([{"a", 1}, {"b", 2}], "a")
[{"b", 2}]
iex> Proplist.delete([{"a", 1}, {"b", 2}, {"a", 3}], "a")
[{"b", 2}]
iex> Proplist.delete([{"b", 2}], "a")
[{"b", 2}]
"""
@spec delete(t, prop) :: t
def delete(proplist, prop) when is_list(proplist) and is_binary(prop) do
:lists.filter(fn {k, _} -> k != prop end, proplist)
end
@doc """
Deletes the first entry in the proplist list for a specific `prop`.
If the `prop` does not exist, returns the proplist list unchanged.
## Examples
iex> Proplist.delete_first([{"a", 1}, {"b", 2}, {"a", 3}], "a")
[{"b", 2}, {"a", 3}]
iex> Proplist.delete_first([{"b", 2}], "a")
[{"b", 2}]
"""
@spec delete_first(t, prop) :: t
def delete_first(proplist, prop) when is_list(proplist) and is_binary(prop) do
:lists.keydelete(prop, 1, proplist)
end
@doc """
Puts the given `value` under `prop`.
If a previous value is already stored, all entries are
removed and the value is overridden.
## Examples
iex> Proplist.put([{"a", 1}, {"b", 2}], "a", 3)
[{"a", 3}, {"b", 2}]
iex> Proplist.put([{"a", 1}, {"b", 2}, {"a", 4}], "a", 3)
[{"a", 3}, {"b", 2}]
"""
@spec put(t, prop, value) :: t
def put(proplist, prop, value) when is_list(proplist) and is_binary(prop) do
[{prop, value}|delete(proplist, prop)]
end
@doc """
Evaluates `fun` and puts the result under `prop`
in proplist unless `prop` is already present.
## Examples
iex> proplist = [{"a", 1}]
iex> fun = fn ->
...> 3
...> end
iex> Proplist.put_new_lazy(proplist, "a", fun)
[{"a", 1}]
iex> Proplist.put_new_lazy(proplist, "b", fun)
[{"b", 3}, {"a", 1}]
"""
@spec put_new_lazy(t, prop, (() -> value)) :: t
def put_new_lazy(proplist, prop, fun) when is_list(proplist) and is_binary(prop) and is_function(fun, 0) do
case :lists.keyfind(prop, 1, proplist) do
{^prop, _} -> proplist
false -> [{prop, fun.()}|proplist]
end
end
@doc """
Puts the given `value` under `prop` unless the entry `prop`
already exists.
## Examples
iex> Proplist.put_new([{"a", 1}], "b", 2)
[{"b", 2}, {"a", 1}]
iex> Proplist.put_new([{"a", 1}, {"b", 2}], "a", 3)
[{"a", 1}, {"b", 2}]
"""
@spec put_new(t, prop, value) :: t
def put_new(proplist, prop, value) when is_list(proplist) and is_binary(prop) do
case :lists.keyfind(prop, 1, proplist) do
{^prop, _} -> proplist
false -> [{prop, value}|proplist]
end
end
@doc """
Checks if two proplists are equal.
Two proplists are considered to be equal if they contain
the same props and those props contain the same values.
## Examples
iex> Proplist.equal?([{"a", 1}, {"b", 2}], [{"b", 2}, {"a", 1}])
true
"""
@spec equal?(t, t) :: boolean
def equal?(left, right) when is_list(left) and is_list(right) do
:lists.sort(left) == :lists.sort(right)
end
@doc """
Merges two proplist lists into one.
If they have duplicated props, the one given in the second argument wins.
## Examples
iex> Proplist.merge([{"a", 1}, {"b", 2}], [{"a", 3}, {"d", 4}]) |> Enum.sort
[{"a", 3}, {"b", 2}, {"d", 4}]
"""
@spec merge(t, t) :: t
def merge(d1, d2) when is_list(d1) and is_list(d2) do
fun = fn {k, _v} -> not has_prop?(d2, k) end
d2 ++ :lists.filter(fun, d1)
end
@doc """
Merges two proplist lists into one.
If they have duplicated props, the given function is invoked to solve conflicts.
## Examples
iex> Proplist.merge([{"a", 1}, {"b", 2}], [{"a", 3}, {"d", 4}], fn (_k, v1, v2) ->
...> v1 + v2
...> end)
[{"a", 4}, {"b", 2}, {"d", 4}]
"""
@spec merge(t, t, (prop, value, value -> value)) :: t
def merge(d1, d2, fun) when is_list(d1) and is_list(d2) do
do_merge(d2, d1, fun)
end
defp do_merge([{k, v2}|t], acc, fun) do
do_merge t, update(acc, k, v2, fn(v1) -> fun.(k, v1, v2) end), fun
end
defp do_merge([], acc, _fun) do
acc
end
@doc """
Returns whether a given `prop` exists in the given `proplist`.
## Examples
iex> Proplist.has_prop?([{"a", 1}], "a")
true
iex> Proplist.has_prop?([{"a", 1}], "b")
false
"""
@spec has_prop?(t, prop) :: boolean
def has_prop?(proplist, prop) when is_list(proplist) and is_binary(prop) do
:lists.keymember(prop, 1, proplist)
end
@doc """
Updates the `prop` with the given function.
If the `prop` does not exist, raises `KeyError`.
If there are duplicated props, they are all removed and only the first one
is updated.
## Examples
iex> Proplist.update!([{"a", 1}], "a", &(&1 * 2))
[{"a", 2}]
iex> Proplist.update!([{"a", 1}], "b", &(&1 * 2))
** (KeyError) key "b" not found in: [{"a", 1}]
"""
@spec update!(t, prop, (value -> value)) :: t | no_return
def update!(proplist, prop, fun) do
update!(proplist, prop, fun, proplist)
end
defp update!([{prop, value}|proplist], prop, fun, _dict) do
[{prop, fun.(value)}|delete(proplist, prop)]
end
defp update!([{_, _} = e|proplist], prop, fun, dict) do
[e|update!(proplist, prop, fun, dict)]
end
defp update!([], prop, _fun, dict) when is_binary(prop) do
raise(KeyError, key: prop, term: dict)
end
@doc """
Updates the `prop` with the given function.
If the `prop` does not exist, inserts the given `initial` value.
If there are duplicated props, they are all removed and only the first one
is updated.
## Examples
iex> Proplist.update([{"a", 1}], "a", 13, &(&1 * 2))
[{"a", 2}]
iex> Proplist.update([{"a", 1}], "b", 11, &(&1 * 2))
[{"a", 1}, {"b", 11}]
"""
@spec update(t, prop, value, (value -> value)) :: t
def update([{prop, value}|proplist], prop, _initial, fun) do
[{prop, fun.(value)}|delete(proplist, prop)]
end
def update([{_, _} = e|proplist], prop, initial, fun) do
[e|update(proplist, prop, initial, fun)]
end
def update([], prop, initial, _fun) when is_binary(prop) do
[{prop, initial}]
end
@doc """
Takes all entries corresponding to the given props and extracts them into a
separate proplist list.
Returns a tuple with the new list and the old list with removed props.
Keys for which there are no entires in the proplist list are ignored.
Entries with duplicated props end up in the same proplist list.
## Examples
iex> d = [{"a", 1}, {"b", 2}, {"c", 3}, {"d", 4}]
iex> Proplist.split(d, ["a", "c", "e"])
{[{"a", 1}, {"c", 3}], [{"b", 2}, {"d", 4}]}
iex> d = [{"a", 1}, {"b", 2}, {"c", 3}, {"d", 4}, {"e", 5}]
iex> Proplist.split(d, ["a", "c", "e"])
{[{"a", 1}, {"c", 3}, {"e", 5}], [{"b", 2}, {"d", 4}]}
"""
def split(proplist, props) when is_list(proplist) do
fun = fn {k, v}, {take, drop} ->
case k in props do
true -> {[{k, v}|take], drop}
false -> {take, [{k, v}|drop]}
end
end
acc = {[], []}
{take, drop} = :lists.foldl(fun, acc, proplist)
{:lists.reverse(take), :lists.reverse(drop)}
end
@doc """
Takes all entries corresponding to the given props and returns them in a new
proplist list.
Duplicated props are preserved in the new proplist list.
## Examples
iex> d = [{"a", 1}, {"b", 2}, {"c", 3}, {"d", 4}]
iex> Proplist.take(d, ["a", "c", "e"])
[{"a", 1}, {"c", 3}]
iex> d = [{"a", 1}, {"b", 2}, {"c", 3}, {"d", 4}, {"e", 5}]
iex> Proplist.take(d, ["a", "c", "e"])
[{"a", 1}, {"c", 3}, {"e", 5}]
"""
def take(proplist, props) when is_list(proplist) do
:lists.filter(fn {k, _} -> k in props end, proplist)
end
@doc """
Drops the given props from the proplist list.
Duplicated props are preserved in the new proplist list.
## Examples
iex> d = [{"a", 1}, {"b", 2}, {"c", 3}, {"d", 4}]
iex> Proplist.drop(d, ["b", "d"])
[{"a", 1}, {"c", 3}]
iex> d = [{"a", 1}, {"b", 2}, {"c", 3}, {"d", 4}, {"e", 5}]
iex> Proplist.drop(d, ["b", "d"])
[{"a", 1}, {"c", 3}, {"e", 5}]
"""
def drop(proplist, props) when is_list(proplist) do
:lists.filter(fn {k, _} -> not k in props end, proplist)
end
@doc """
Returns the first value associated with `prop` in the proplist
list as well as the proplist list without `prop`.
All duplicated props are removed. See `pop_first/3` for
removing only the first entry.
## Examples
iex> Proplist.pop [{"a", 1}], "a"
{1,[]}
iex> Proplist.pop [{"a", 1}], "b"
{nil,[{"a", 1}]}
iex> Proplist.pop [{"a", 1}], "b", 3
{3,[{"a", 1}]}
iex> Proplist.pop [{"a", 1}], "b", 3
{3,[{"a", 1}]}
iex> Proplist.pop [{"a", 1}, {"a", 2}], "a"
{1,[]}
"""
@spec pop(t, prop, value) :: {value, t}
def pop(proplist, prop, default \\ nil) when is_list(proplist) do
{get(proplist, prop, default), delete(proplist, prop)}
end
@doc """
Returns the first value associated with `prop` in the proplist
as well as the proplist without `prop`.
All duplicated props are removed. See `pop_first/3` for
removing only the first entry.
## Examples
iex> proplist = [{"a", 1}]
iex> fun = fn ->
...> :result
...> end
iex> Proplist.pop_lazy(proplist, "a", fun)
{1, []}
iex> Proplist.pop_lazy(proplist, "b", fun)
{:result, [{"a", 1}]}
"""
@spec pop_lazy(t, prop, (() -> value)) :: {value, t}
def pop_lazy(proplist, prop, fun) when is_list(proplist) and is_binary(prop) and is_function(fun, 0) do
case fetch(proplist, prop) do
{:ok, value} -> {value, delete(proplist, prop)}
:error -> {fun.(), proplist}
end
end
@doc """
Returns the first value associated with `prop` in the proplist
list as well as the proplist list without that particular occurrence
of `prop`.
Duplicated props are not removed.
## Examples
iex> Proplist.pop_first [{"a", 1}], "a"
{1,[]}
iex> Proplist.pop_first [{"a", 1}], "b"
{nil,[{"a", 1}]}
iex> Proplist.pop_first [{"a", 1}], "b", 3
{3,[{"a", 1}]}
iex> Proplist.pop_first [{"a", 1}], "b", 3
{3,[{"a", 1}]}
iex> Proplist.pop_first [{"a", 1}, {"a", 2}], "a"
{1,[{"a", 2}]}
"""
def pop_first(proplist, prop, default \\ nil) when is_list(proplist) do
{get(proplist, prop, default), delete_first(proplist, prop)}
end
# Dict callbacks
@doc false
def keys(proplist) when is_list(proplist) do
props(proplist)
end
@doc false
def has_key?(proplist, prop) when is_list(proplist) do
has_prop?(proplist, prop)
end
@doc false
def size(proplist) do
length(proplist)
end
@doc false
def to_list(proplist) do
proplist
end
end
|
lib/proplist.ex
| 0.872483
| 0.697979
|
proplist.ex
|
starcoder
|
defmodule Membrane.Core.Element.PadController do
@moduledoc false
# Module handling linking and unlinking pads.
alias Membrane.{Core, Event}
alias Core.{CallbackHandler, Message, PullBuffer}
alias Core.Element.{EventController, PadModel, State}
alias Membrane.Element.{CallbackContext, Pad}
alias Bunch.Type
require CallbackContext.{PadAdded, PadRemoved}
require Message
require Pad
require PadModel
use Core.Element.Log
use Bunch
@doc """
Verifies linked pad, initializes it's data.
"""
@spec handle_link(Pad.ref_t(), Pad.direction_t(), pid, Pad.ref_t(), Keyword.t(), State.t()) ::
State.stateful_try_t()
def handle_link(pad_ref, direction, pid, other_ref, props, state) do
with :ok <- validate_pad_being_linked(pad_ref, direction, state) do
pad_name = pad_ref |> Pad.name_by_ref()
info = state.pads.info[pad_name]
state = init_pad_data(pad_ref, pid, other_ref, props, info, state)
state =
case Pad.availability_mode_by_ref(pad_ref) do
:static ->
state |> Bunch.Access.update_in([:pads, :info], &(&1 |> Map.delete(pad_name)))
:dynamic ->
add_to_currently_linking(pad_ref, state)
end
{:ok, state}
else
{:error, reason} -> {{:error, reason}, state}
end
end
@doc """
Performs checks and executes 'handle_new_pad' callback.
This can be done only at the end of linking, because before there is no guarantee
that the pad has been linked in the other element.
"""
@spec handle_linking_finished(State.t()) :: State.stateful_try_t()
def handle_linking_finished(state) do
with {:ok, state} <-
state.pads.dynamic_currently_linking
|> Bunch.Enum.try_reduce(state, &handle_pad_added/2) do
static_unlinked =
state.pads.info
|> Enum.flat_map(fn {name, info} ->
case info.availability |> Pad.availability_mode() do
:static -> [name]
_ -> []
end
end)
if not Enum.empty?(static_unlinked) do
warn(
"""
Some static pads remained unlinked: #{inspect(static_unlinked)}
""",
state
)
end
{:ok, clear_currently_linking(state)}
end
end
@doc """
Executes `handle_pad_removed` callback for dynamic pads and removes pad data.
"""
@spec handle_unlink(Pad.ref_t(), State.t()) :: State.stateful_try_t()
def handle_unlink(pad_ref, state) do
PadModel.assert_data!(pad_ref, %{direction: :input}, state)
with {:ok, state} <- generate_eos_if_not_received(pad_ref, state),
{:ok, state} <- handle_pad_removed(pad_ref, state),
{:ok, state} <- PadModel.delete_data(pad_ref, state) do
{:ok, state}
end
end
@doc """
Returns a pad reference - a term uniquely identifying pad instance.
In case of static pad it will be just its name, for dynamic it will return
tuple containing name and id.
"""
@spec get_pad_ref(Pad.name_t(), State.t()) :: State.stateful_try_t(Pad.ref_t())
def get_pad_ref(pad_name, state) do
{pad_ref, state} =
state
|> Bunch.Access.get_and_update_in([:pads, :info, pad_name], fn
nil ->
:pop
%{availability: av, current_id: id} = pad_info when Pad.is_availability_dynamic(av) ->
{{:dynamic, pad_name, id}, %{pad_info | current_id: id + 1}}
%{availability: av} = pad_info when Pad.is_availability_static(av) ->
{pad_name, pad_info}
end)
{pad_ref |> Bunch.error_if_nil(:unknown_pad), state}
end
@spec validate_pad_being_linked(Pad.ref_t(), Pad.direction_t(), State.t()) :: Type.try_t()
defp validate_pad_being_linked(pad_ref, direction, state) do
info = state.pads.info[pad_ref |> Pad.name_by_ref()]
cond do
:ok == PadModel.assert_instance(pad_ref, state) ->
{:error, :already_linked}
info == nil ->
{:error, :unknown_pad}
Pad.availability_mode_by_ref(pad_ref) != Pad.availability_mode(info.availability) ->
{:error,
{:invalid_pad_availability_mode,
expected: Pad.availability_mode_by_ref(pad_ref),
actual: Pad.availability_mode(info.availability)}}
info.direction != direction ->
{:error, {:invalid_pad_direction, expected: direction, actual: info.direction}}
true ->
:ok
end
end
@spec init_pad_data(
Pad.ref_t(),
pid,
Pad.ref_t(),
props :: Keyword.t(),
PadModel.pad_info_t(),
State.t()
) :: State.t()
defp init_pad_data(ref, pid, other_ref, props, info, state) do
data =
info
|> Map.merge(%{
pid: pid,
other_ref: other_ref,
caps: nil,
start_of_stream?: false,
end_of_stream?: false
})
data = data |> Map.merge(init_pad_direction_data(data, props, state))
data = data |> Map.merge(init_pad_mode_data(data, props, state))
data = %Pad.Data{} |> Map.merge(data)
state |> Bunch.Access.put_in([:pads, :data, ref], data)
end
defp init_pad_direction_data(%{direction: :input}, _props, _state), do: %{sticky_messages: []}
defp init_pad_direction_data(%{direction: :output}, _props, _state), do: %{}
defp init_pad_mode_data(%{mode: :pull, direction: :input} = data, props, state) do
%{pid: pid, other_ref: other_ref, demand_unit: demand_unit} = data
:ok =
pid
|> Message.call(:demand_unit, [demand_unit, other_ref])
pb =
PullBuffer.new(
state.name,
pid,
other_ref,
demand_unit,
props[:pull_buffer] || %{}
)
%{buffer: pb, demand: 0}
end
defp init_pad_mode_data(%{mode: :pull, direction: :output}, _props, _state), do: %{demand: 0}
defp init_pad_mode_data(%{mode: :push}, _props, _state), do: %{}
@spec add_to_currently_linking(Pad.ref_t(), State.t()) :: State.t()
defp add_to_currently_linking(ref, state),
do: state |> Bunch.Access.update_in([:pads, :dynamic_currently_linking], &[ref | &1])
@spec clear_currently_linking(State.t()) :: State.t()
defp clear_currently_linking(state),
do: state |> Bunch.Access.put_in([:pads, :dynamic_currently_linking], [])
@spec generate_eos_if_not_received(Pad.ref_t(), State.t()) :: State.stateful_try_t()
defp generate_eos_if_not_received(pad_ref, state) do
if PadModel.get_data!(pad_ref, :end_of_stream?, state) do
{:ok, state}
else
EventController.exec_handle_event(pad_ref, %Event.EndOfStream{}, state)
end
end
@spec handle_pad_added(Pad.ref_t(), State.t()) :: State.stateful_try_t()
defp handle_pad_added(ref, state) do
context =
CallbackContext.PadAdded.from_state(
state,
direction: PadModel.get_data!(ref, :direction, state)
)
CallbackHandler.exec_and_handle_callback(
:handle_pad_added,
ActionHandler,
[ref, context],
state
)
end
@spec handle_pad_removed(Pad.ref_t(), State.t()) :: State.stateful_try_t()
defp handle_pad_removed(ref, state) do
%{direction: direction, availability: availability} = PadModel.get_data!(ref, state)
if availability |> Pad.availability_mode() == :dynamic do
context = CallbackContext.PadRemoved.from_state(state, direction: direction)
CallbackHandler.exec_and_handle_callback(
:handle_pad_removed,
ActionHandler,
[ref, context],
state
)
else
{:ok, state}
end
end
end
|
lib/membrane/core/element/pad_controller.ex
| 0.804098
| 0.434461
|
pad_controller.ex
|
starcoder
|
defmodule Blocked.Config do
@moduledoc """
Configuration for `Blocked`.
The simplest ways to configure `Blocked` is by:
- adding `config :blocked. [warn: true, project_owner: "YourName"]` to your Elixir configuration file.
(for older Elixir versions, this is `Mix.Config`; for newer versions, `Config`.)
- alternatively, adding system-environment-variables like `BLOCKED_WARN=true`, `BLOCKED_PROJECT_OWNER="YourName"` etc. to the environment you want to run `Blocked` in.
If you want to check the configuration in the current environment,
print the output of `Blocked.Config.load_with_defaults`.
More advanced configuration set-ups are possible by using the advanced features of `Specify`.
"""
require Specify
Specify.defconfig(sources: [%Specify.Provider.MixEnv{application: :blocked, optional: true}, %Specify.Provider.SystemEnv{prefix: "BLOCKED", optional: true}]) do
@doc """
trigger warnings in this particular environment.
When this is off, `Blocked.by` will simply silently compile
to whatever block was passed.
It is automatically turned on (by default)
when we're in a Continuous Integration environment.
(this is checked by looking for the prescence of the `CI` environment variable.)
"""
field :warn, :term, default: nil
@doc """
The repository name of this source-code project.
This can be overridden if you cannot or don't want to rely
on `Blocked`'s auto-detection using the git command-line tools.
"""
field :project_repo, :term, default: nil
@doc """
The name of the owner or organization of this source-code project.
This can be overridden if you cannot or don't want to rely
on `Blocked`'s auto-detection using the git command-line tools.
"""
field :project_owner, :term, default: nil
@doc """
This needs to be set if (and only if) you have a private GitHub-project,
because otherwise we cannot access its issues.
The personal API token requires "repo" access.
c.f. https://github.blog/2013-05-16-personal-api-tokens/
"""
field :github_api_token, :term, default: nil
end
def load_with_defaults do
config = load()
if config.warn == nil && System.get_env("CI") do
put_in(config.warn, true)
else
config
end
end
end
|
lib/blocked/config.ex
| 0.806434
| 0.490968
|
config.ex
|
starcoder
|
defmodule HXL do
@moduledoc File.read!(Path.join([__DIR__, "..", "README.md"]))
alias __MODULE__.{Parser, Eval}
@type opt ::
{:variables, map()}
| {:functions, map()}
| {:keys, :atoms | :string | (binary -> term())}
| {:evaluator, HXL.Evaluator.t()}
@type opts :: [opt()]
@doc """
Reads a `HCL` document from file.
Uses same options as `decode/2`
## Examples
iex> HXL.decode_file("/path/to/file.hcl")
{:ok, %{"a" => "b"}}
"""
@spec decode_file(Path.t(), opts()) :: {:ok, map()} | {:error, term()}
def decode_file(path, opts \\ []) do
with {:ok, bin} <- File.read(path),
{:ok, _body} = return <- decode(bin, opts) do
return
else
# File error
{:error, reason} when is_atom(reason) ->
msg =
reason
|> :file.format_error()
|> List.to_string()
{:error, msg}
# Lex/parse error
{:error, _reason} = err ->
err
end
end
@doc """
Reads a `HCL` document from file, returns the document directly or raises `HXL.Error`.
See `decode_file/1`
"""
@spec decode_file!(Path.t(), opts) :: map() | no_return()
def decode_file!(path, opts \\ []) do
case decode_file(path, opts) do
{:ok, body} -> body
{:error, reason} -> raise HXL.Error, reason
end
end
@doc """
Decode a binary to a `HCL` document.
`decode/2` parses and evaluates the AST before returning the `HCL` docuement.
If the document is using functions in it's definition, these needs to be passed in the `opts` part of this functions.
See bellow for an example
## Options
The following options can be passed to configure evaluation of the document:
* `:evaluator` - A `HXL.Evaluator` module to interpret the AST during evaluation. See `HXL.Evaluator` for more information.
* `:functions` - A map of `(<function_name> -> <function>)` to make available in document evaluation.
* `:variables` - A map of Top level variables that should be injected into the context when evaluating the document.
* `:keys` - controls how keys in the parsed AST are evaluated. Possible values are:
* `:strings` (default) - evaluates keys as strings
* `:atoms` - converts keys to atoms with `String.to_atom/1`
* `:atoms!` - converts keys to atoms with `String.to_existing_atom/1`
* `(key -> term)` - converts keys using the provided function
## Examples
Using functions:
iex> hcl = "a = upper(trim(\" a \"))"
"a = upper(trim(\" a \"))"
iex> HXL.decode(hcl, functions: %{"upper" => &String.capitalize/1, "trim" => &String.trim/1})
{:ok, %{"a" => "A"}}
Using variables:
iex> hcl = "a = b"
"a = b"
iex> HXL.decode(hcl, variables: %{"b" => "B"})
{:ok, %{"a" => "B"}}
"""
@spec decode(binary(), opts()) :: {:ok, map()} | {:error, term()}
def decode(binary, opts \\ []) do
with {:ok, body} <- Parser.parse(binary),
%Eval{document: doc} <- Eval.eval(body, opts) do
{:ok, doc}
end
end
@doc """
Reads a `HCL` document from a binary. Returns the document or raises `HXL.Error`.
See `from_binary/1`
"""
@spec decode!(binary(), opts) :: map() | no_return()
def decode!(bin, opts \\ []) do
case decode(bin, opts) do
{:ok, doc} -> doc
{:error, reason} -> raise HXL.Error, reason
end
end
@doc """
Decode a binary to a `HXL` document AST.
## Examples
iex> HXL.decode_as_ast("a = 1")
{:ok, %HXL.Ast.Body{
statements: [
%HXL.Ast.Attr{
expr: %HXL.Ast.Literal{value: {:int, 1}}, name: "a"}
]
}}
"""
@spec decode_as_ast(binary()) :: {:ok, HXL.Ast.t()} | {:error, term()}
defdelegate decode_as_ast(binary), to: __MODULE__.Parser, as: :parse
end
|
lib/hxl.ex
| 0.870446
| 0.542439
|
hxl.ex
|
starcoder
|
defmodule ExWire.Packet.GetBlockHeaders do
@moduledoc """
Requests block headers starting from a given hash.
```
**GetBlockHeaders** [`+0x03`: `P`, `block`: { `P` , `B_32` }, `maxHeaders`: `P`, `skip`: `P`, `reverse`: `P` in { `0` , `1` } ]
Require peer to return a BlockHeaders message. Reply
must contain a number of block headers, of rising number when reverse is 0,
falling when 1, skip blocks apart, beginning at block block (denoted by either
number or hash) in the canonical chain, and with at most maxHeaders items.
```
"""
alias ExWire.Packet
@behaviour ExWire.Packet
@type t :: %__MODULE__{
block_identifier: Packet.block_identifier,
max_headers: integer(),
skip: integer(),
reverse: boolean()
}
defstruct [
:block_identifier,
:max_headers,
:skip,
:reverse
]
@doc """
Given a GetBlockHeaders packet, serializes for transport over Eth Wire Protocol.
## Examples
iex> %ExWire.Packet.GetBlockHeaders{block_identifier: 5, max_headers: 10, skip: 2, reverse: true}
...> |> ExWire.Packet.GetBlockHeaders.serialize
[5, 10, 2, 1]
iex> %ExWire.Packet.GetBlockHeaders{block_identifier: <<5>>, max_headers: 10, skip: 2, reverse: false}
...> |> ExWire.Packet.GetBlockHeaders.serialize
[<<5>>, 10, 2, 0]
"""
@spec serialize(t) :: ExRLP.t
def serialize(packet=%__MODULE__{}) do
[
packet.block_identifier,
packet.max_headers,
packet.skip,
(if packet.reverse, do: 1, else: 0)
]
end
@doc """
Given an RLP-encoded GetBlockHeaders packet from Eth Wire Protocol,
decodes into a GetBlockHeaders struct.
## Examples
iex> ExWire.Packet.GetBlockHeaders.deserialize([5, 10, 2, 1])
%ExWire.Packet.GetBlockHeaders{block_identifier: 5, max_headers: 10, skip: 2, reverse: true}
iex> ExWire.Packet.GetBlockHeaders.deserialize([<<5>>, 10, 2, 0])
%ExWire.Packet.GetBlockHeaders{block_identifier: <<5>>, max_headers: 10, skip: 2, reverse: false}
"""
@spec deserialize(ExRLP.t) :: t
def deserialize(rlp) do
[
block_identifier,
max_headers,
skip,
reverse
] = rlp
%__MODULE__{
block_identifier: block_identifier,
max_headers: max_headers,
skip: skip,
reverse: reverse == 1
}
end
@doc """
Handles a GetBlockHeaders message. We shoud send the block headers
to the peer if we have them. For now, we'll do nothing.
## Examples
iex> %ExWire.Packet.GetBlockHeaders{block_identifier: 5, max_headers: 10, skip: 2, reverse: true}
...> |> ExWire.Packet.GetBlockHeaders.handle()
:ok
"""
@spec handle(ExWire.Packet.packet) :: ExWire.Packet.handle_response
def handle(_packet=%__MODULE__{}) do
:ok
end
end
|
apps/ex_wire/lib/ex_wire/packet/get_block_headers.ex
| 0.915034
| 0.887838
|
get_block_headers.ex
|
starcoder
|
defmodule Geometry.MultiPolygonZM do
@moduledoc """
A set of polygons from type `Geometry.PolygonZM`
`MultiPointZM` implements the protocols `Enumerable` and `Collectable`.
## Examples
iex> Enum.map(
...> MultiPolygonZM.new([
...> PolygonZM.new([
...> LineStringZM.new([
...> PointZM.new(11, 12, 13, 14),
...> PointZM.new(11, 22, 23, 24),
...> PointZM.new(31, 22, 33, 34),
...> PointZM.new(11, 12, 13, 14)
...> ]),
...> ]),
...> PolygonZM.new([
...> LineStringZM.new([
...> PointZM.new(35, 10, 13, 14),
...> PointZM.new(45, 45, 23, 24),
...> PointZM.new(10, 20, 33, 34),
...> PointZM.new(35, 10, 13, 14)
...> ]),
...> LineStringZM.new([
...> PointZM.new(20, 30, 13, 14),
...> PointZM.new(35, 35, 23, 24),
...> PointZM.new(30, 20, 33, 34),
...> PointZM.new(20, 30, 13, 14)
...> ])
...> ])
...> ]),
...> fn polygon -> length(polygon) == 1 end
...> )
[true, false]
iex> Enum.into(
...> [
...> PolygonZM.new([
...> LineStringZM.new([
...> PointZM.new(11, 12, 13, 14),
...> PointZM.new(11, 22, 23, 24),
...> PointZM.new(31, 22, 33, 34),
...> PointZM.new(11, 12, 13, 14)
...> ])
...> ])
...> ],
...> MultiPolygonZM.new())
%MultiPolygonZM{
polygons:
MapSet.new([
[
[
[11, 12, 13, 14],
[11, 22, 23, 24],
[31, 22, 33, 34],
[11, 12, 13, 14]
]
]
])
}
"""
alias Geometry.{GeoJson, MultiPolygonZM, PolygonZM, WKB, WKT}
defstruct polygons: MapSet.new()
@type t :: %MultiPolygonZM{polygons: MapSet.t([Geometry.coordinates()])}
@doc """
Creates an empty `MultiPolygonZM`.
## Examples
iex> MultiPolygonZM.new()
%MultiPolygonZM{polygons: MapSet.new()}
"""
@spec new :: t()
def new, do: %MultiPolygonZM{}
@doc """
Creates a `MultiPolygonZM` from the given `Geometry.MultiPolygonZM`s.
## Examples
iex> MultiPolygonZM.new([
...> PolygonZM.new([
...> LineStringZM.new([
...> PointZM.new(6, 2, 3, 4),
...> PointZM.new(8, 2, 4, 5),
...> PointZM.new(8, 4, 5, 6),
...> PointZM.new(6, 2, 3, 4)
...> ]),
...> ]),
...> PolygonZM.new([
...> LineStringZM.new([
...> PointZM.new(1, 1, 3, 4),
...> PointZM.new(9, 1, 4, 5),
...> PointZM.new(9, 8, 5, 6),
...> PointZM.new(1, 1, 3, 4)
...> ]),
...> LineStringZM.new([
...> PointZM.new(6, 2, 3, 4),
...> PointZM.new(7, 2, 4, 5),
...> PointZM.new(7, 3, 5, 6),
...> PointZM.new(6, 2, 3, 4)
...> ])
...> ])
...> ])
%MultiPolygonZM{
polygons:
MapSet.new([
[
[[1, 1, 3, 4], [9, 1, 4, 5], [9, 8, 5, 6], [1, 1, 3, 4]],
[[6, 2, 3, 4], [7, 2, 4, 5], [7, 3, 5, 6], [6, 2, 3, 4]]
],
[[[6, 2, 3, 4], [8, 2, 4, 5], [8, 4, 5, 6], [6, 2, 3, 4]]]
])
}
iex> MultiPolygonZM.new([])
%MultiPolygonZM{}
"""
@spec new([PolygonZM.t()]) :: t()
def new([]), do: %MultiPolygonZM{}
def new(polygons),
do: %MultiPolygonZM{
polygons: Enum.into(polygons, MapSet.new(), fn polygon -> polygon.rings end)
}
@doc """
Returns `true` if the given `MultiPolygonZM` is empty.
## Examples
iex> MultiPolygonZM.empty?(MultiPolygonZM.new())
true
iex> MultiPolygonZM.empty?(
...> MultiPolygonZM.new([
...> PolygonZM.new([
...> LineStringZM.new([
...> PointZM.new(1, 1, 3, 4),
...> PointZM.new(1, 5, 4, 8),
...> PointZM.new(5, 4, 2, 6),
...> PointZM.new(1, 1, 3, 4)
...> ])
...> ])
...> ])
...> )
false
"""
@spec empty?(t()) :: boolean
def empty?(%MultiPolygonZM{} = multi_polygon),
do: Enum.empty?(multi_polygon.polygons)
@doc """
Creates a `MultiPolygonZM` from the given coordinates.
## Examples
iex> MultiPolygonZM.from_coordinates([
...> [
...> [[6, 2, 3, 4], [8, 2, 4, 5], [8, 4, 5, 6], [6, 2, 3, 4]]
...> ], [
...> [[1, 1, 3, 4], [9, 1, 4, 5], [9, 8, 5, 6], [1, 1, 3, 4]],
...> [[6, 2, 4, 3], [7, 2, 6, 7], [7, 3, 3, 4], [6, 2, 4, 3]]
...> ]
...> ])
%MultiPolygonZM{
polygons:
MapSet.new([
[
[[6, 2, 3, 4], [8, 2, 4, 5], [8, 4, 5, 6], [6, 2, 3, 4]],
], [
[[1, 1, 3, 4], [9, 1, 4, 5], [9, 8, 5, 6], [1, 1, 3, 4]],
[[6, 2, 4, 3], [7, 2, 6, 7], [7, 3, 3, 4], [6, 2, 4, 3]]
]
])
}
"""
@spec from_coordinates([[Geometry.coordinates()]]) :: t()
def from_coordinates(coordinates) do
%MultiPolygonZM{
polygons: MapSet.new(coordinates)
}
end
@doc """
Returns an `:ok` tuple with the `MultiPolygonZM` from the given GeoJSON
term. Otherwise returns an `:error` tuple.
## Examples
iex> ~s(
...> {
...> "type": "MultiPolygon",
...> "coordinates": [
...> [
...> [[6, 2, 3, 4], [8, 2, 4, 5], [8, 4, 5, 6], [6, 2, 3, 4]]
...> ], [
...> [[1, 1, 3, 4], [9, 1, 4, 5], [9, 8, 5, 6], [1, 1, 3, 4]],
...> [[6, 2, 4, 3], [7, 2, 6, 7], [7, 3, 3, 4], [6, 2, 4, 3]]
...> ]
...> ]
...> }
...> )
...> |> Jason.decode!()
...> |> MultiPolygonZM.from_geo_json()
{:ok,
%MultiPolygonZM{
polygons:
MapSet.new([
[
[[1, 1, 3, 4], [9, 1, 4, 5], [9, 8, 5, 6], [1, 1, 3, 4]],
[[6, 2, 4, 3], [7, 2, 6, 7], [7, 3, 3, 4], [6, 2, 4, 3]]
], [
[[6, 2, 3, 4], [8, 2, 4, 5], [8, 4, 5, 6], [6, 2, 3, 4]]
]
])
}}
"""
@spec from_geo_json(Geometry.geo_json_term()) :: {:ok, t()} | Geometry.geo_json_error()
def from_geo_json(json), do: GeoJson.to_multi_polygon(json, MultiPolygonZM)
@doc """
The same as `from_geo_json/1`, but raises a `Geometry.Error` exception if it fails.
"""
@spec from_geo_json!(Geometry.geo_json_term()) :: t()
def from_geo_json!(json) do
case GeoJson.to_multi_polygon(json, MultiPolygonZM) do
{:ok, geometry} -> geometry
error -> raise Geometry.Error, error
end
end
@doc """
Returns the GeoJSON term of a `MultiPolygonZM`.
There are no guarantees about the order of polygons in the returned
`coordinates`.
## Examples
```elixir
MultiPolygonZM.to_list(
MultiPolygonZM.new([
PolygonZM.new([
LineStringZM.new([
PointZM.new(111, 112, 113, 114),
PointZM.new(111, 122, 123, 124),
PointZM.new(131, 122, 133, 134),
PointZM.new(111, 112, 113, 114)
])
]),
PolygonZM.new([
LineStringZM.new([
PointZM.new(211, 212, 213, 214),
PointZM.new(211, 222, 223, 224),
PointZM.new(231, 222, 233, 234),
PointZM.new(211, 212, 213, 214)
])
])
])
)
# =>
# %{
# "type" => "MultiPolygon",
# "coordinates" => [
# [
# [
# [11, 12, 13, 14],
# [11, 22, 23, 24],
# [31, 22, 33, 34],
# [11, 12, 13, 14]
# ]
# ], [
# [
# [21, 22, 23, 24],
# [21, 22, 23, 24],
# [21, 22, 23, 24],
# [21, 22, 23, 24]
# ]
# ]
# ]
# }
```
"""
@spec to_geo_json(t()) :: Geometry.geo_json_term()
def to_geo_json(%MultiPolygonZM{polygons: polygons}) do
%{
"type" => "MultiPolygon",
"coordinates" => MapSet.to_list(polygons)
}
end
@doc """
Returns an `:ok` tuple with the `MultiPolygonZM` from the given WKT string.
Otherwise returns an `:error` tuple.
If the geometry contains a SRID the id is added to the tuple.
## Examples
iex> MultiPolygonZM.from_wkt("
...> SRID=1234;MULTIPOLYGON ZM (
...> (
...> (40 40 10 20, 20 45 20 10, 45 30 15 30, 40 40 10 20)
...> ), (
...> (20 35 20 10, 10 30 10 20, 10 10 30 15, 30 5 10 15, 45 20 10 16, 20 35 20 10),
...> (30 20 10 15, 20 15 20 10, 20 25 15 25, 30 20 10 15)
...> )
...> )
...> ")
{:ok, {
%MultiPolygonZM{
polygons:
MapSet.new([
[
[
[20, 35, 20, 10],
[10, 30, 10, 20],
[10, 10, 30, 15],
[30, 5, 10, 15],
[45, 20, 10, 16],
[20, 35, 20, 10]
],
[
[30, 20, 10, 15],
[20, 15, 20, 10],
[20, 25, 15, 25],
[30, 20, 10, 15]
]
],
[
[
[40, 40, 10, 20],
[20, 45, 20, 10],
[45, 30, 15, 30],
[40, 40, 10, 20]
]
]
])
},
1234
}}
iex> MultiPolygonZM.from_wkt("MultiPolygon ZM EMPTY")
{:ok, %MultiPolygonZM{}}
"""
@spec from_wkt(Geometry.wkt()) ::
{:ok, t() | {t(), Geometry.srid()}} | Geometry.wkt_error()
def from_wkt(wkt), do: WKT.to_geometry(wkt, MultiPolygonZM)
@doc """
The same as `from_wkt/1`, but raises a `Geometry.Error` exception if it fails.
"""
@spec from_wkt!(Geometry.wkt()) :: t() | {t(), Geometry.srid()}
def from_wkt!(wkt) do
case WKT.to_geometry(wkt, MultiPolygonZM) do
{:ok, geometry} -> geometry
error -> raise Geometry.Error, error
end
end
@doc """
Returns the WKT representation for a `MultiPolygonZM`. With option `:srid` an
EWKT representation with the SRID is returned.
There are no guarantees about the order of polygons in the returned
WKT-string.
## Examples
```elixir
MultiPolygonZM.to_wkt(
MultiPolygonZM.new([
PolygonZM.new([
LineStrinZM.new([
PointZM.new(20, 35, 20, 10),
PointZM.new(10, 30, 10, 20),
PointZM.new(10, 10, 30, 15),
PointZM.new(30, 5, 10, 15),
PointZM.new(45, 20, 10, 16),
PointZM.new(20, 35, 20, 10)
]),
LineStringZM.new([
PointZM.new(30, 20, 10, 15),
PointZM.new(20, 15, 20, 10),
PointZM.new(20, 25, 15, 25),
PointZM.new(30, 20, 10, 15)
])
]),
PolygonZM.new([
LineStringZM.new([
PointZM.new(40, 40, 10, 20),
PointZM.new(20, 45, 20, 10),
PointZM.new(45, 30, 15, 30),
PointZM.new(40, 40, 10, 20)
])
])
])
)
# Returns a string without any \\n or extra spaces (formatted just for readability):
# SRID=478;MultiPolygon ZM (
# (
# (20 35 20 10, 10 30 10 20, 10 10 30 15, 30 5 10 15, 45 20 10 16, 20 35 20 10),
# (30 20 10 15, 20 15 20 10, 20 25 15 25, 30 20 10 15)
# ), (
# (40 40 10 20, 20 45 20 10, 45 30 15 30, 40 40 10 20)
# )
# )
"""
@spec to_wkt(t(), opts) :: Geometry.wkt()
when opts: [srid: Geometry.srid()]
def to_wkt(%MultiPolygonZM{polygons: polygons}, opts \\ []) do
WKT.to_ewkt(
<<
"MultiPolygon ZM ",
polygons |> MapSet.to_list() |> to_wkt_polygons()::binary()
>>,
opts
)
end
@doc """
Returns the WKB representation for a `MultiPolygonZM`.
With option `:srid` an EWKB representation with the SRID is returned.
The option `endian` indicates whether `:xdr` big endian or `:ndr` little
endian is returned. The default is `:xdr`.
The `:mode` determines whether a hex-string or binary is returned. The default
is `:binary`.
An example of a simpler geometry can be found in the description for the
`Geometry.PointZM.to_wkb/1` function.
"""
@spec to_wkb(t(), opts) :: Geometry.wkb()
when opts: [endian: Geometry.endian(), srid: Geometry.srid(), mode: Geometry.mode()]
def to_wkb(%MultiPolygonZM{} = multi_polygon, opts \\ []) do
endian = Keyword.get(opts, :endian, Geometry.default_endian())
mode = Keyword.get(opts, :mode, Geometry.default_mode())
srid = Keyword.get(opts, :srid)
to_wkb(multi_polygon, srid, endian, mode)
end
@doc """
Returns an `:ok` tuple with the `MultiPolygonZM` from the given WKB string. Otherwise
returns an `:error` tuple.
If the geometry contains a SRID the id is added to the tuple.
An example of a simpler geometry can be found in the description for the
`Geometry.PointZM.from_wkb/2` function.
"""
@spec from_wkb(Geometry.wkb(), Geometry.mode()) ::
{:ok, t() | {t(), Geometry.srid()}} | Geometry.wkb_error()
def from_wkb(wkb, mode \\ :binary), do: WKB.to_geometry(wkb, mode, MultiPolygonZM)
@doc """
The same as `from_wkb/2`, but raises a `Geometry.Error` exception if it fails.
"""
@spec from_wkb!(Geometry.wkb(), Geometry.mode()) :: t() | {t(), Geometry.srid()}
def from_wkb!(wkb, mode \\ :binary) do
case WKB.to_geometry(wkb, mode, MultiPolygonZM) do
{:ok, geometry} -> geometry
error -> raise Geometry.Error, error
end
end
@doc """
Returns the number of elements in `MultiPolygonZM`.
## Examples
iex> MultiPolygonZM.size(
...> MultiPolygonZM.new([
...> PolygonZM.new([
...> LineStringZM.new([
...> PointZM.new(11, 12, 13, 14),
...> PointZM.new(11, 22, 23, 24),
...> PointZM.new(31, 22, 33, 34),
...> PointZM.new(11, 12, 13, 14)
...> ])
...> ])
...> ])
...> )
1
"""
@spec size(t()) :: non_neg_integer()
def size(%MultiPolygonZM{polygons: polygons}), do: MapSet.size(polygons)
@doc """
Checks if `MultiPolygonZM` contains `point`.
## Examples
iex> MultiPolygonZM.member?(
...> MultiPolygonZM.new([
...> PolygonZM.new([
...> LineStringZM.new([
...> PointZM.new(11, 12, 13, 14),
...> PointZM.new(11, 22, 23, 24),
...> PointZM.new(31, 22, 33, 34),
...> PointZM.new(11, 12, 13, 14)
...> ])
...> ])
...> ]),
...> PolygonZM.new([
...> LineStringZM.new([
...> PointZM.new(11, 12, 13, 14),
...> PointZM.new(11, 22, 23, 24),
...> PointZM.new(31, 22, 33, 34),
...> PointZM.new(11, 12, 13, 14)
...> ])
...> ])
...> )
true
iex> MultiPolygonZM.member?(
...> MultiPolygonZM.new([
...> PolygonZM.new([
...> LineStringZM.new([
...> PointZM.new(11, 12, 13, 14),
...> PointZM.new(11, 22, 23, 24),
...> PointZM.new(31, 22, 33, 34),
...> PointZM.new(11, 12, 13, 14)
...> ])
...> ])
...> ]),
...> PolygonZM.new([
...> LineStringZM.new([
...> PointZM.new(11, 12, 13, 14),
...> PointZM.new(11, 22, 23, 24),
...> PointZM.new(33, 22, 33, 34),
...> PointZM.new(11, 12, 13, 14)
...> ])
...> ])
...> )
false
"""
@spec member?(t(), PolygonZM.t()) :: boolean()
def member?(%MultiPolygonZM{polygons: polygons}, %PolygonZM{rings: rings}),
do: MapSet.member?(polygons, rings)
@doc """
Converts `MultiPolygonZM` to a list.
## Examples
iex> MultiPolygonZM.to_list(
...> MultiPolygonZM.new([
...> PolygonZM.new([
...> LineStringZM.new([
...> PointZM.new(11, 12, 13, 14),
...> PointZM.new(11, 22, 23, 24),
...> PointZM.new(31, 22, 33, 34),
...> PointZM.new(11, 12, 13, 14)
...> ])
...> ])
...> ])
...> )
[
[
[
[11, 12, 13, 14],
[11, 22, 23, 24],
[31, 22, 33, 34],
[11, 12, 13, 14]
]
]
]
"""
@spec to_list(t()) :: [PolygonZM.t()]
def to_list(%MultiPolygonZM{polygons: polygons}), do: MapSet.to_list(polygons)
@compile {:inline, to_wkt_polygons: 1}
defp to_wkt_polygons([]), do: "EMPTY"
defp to_wkt_polygons([polygon | polygons]) do
<<"(",
Enum.reduce(polygons, PolygonZM.to_wkt_rings(polygon), fn polygon, acc ->
<<acc::binary(), ", ", PolygonZM.to_wkt_rings(polygon)::binary()>>
end)::binary(), ")">>
end
@doc false
@compile {:inline, to_wkb: 4}
@spec to_wkb(t(), srid, endian, mode) :: wkb
when srid: Geometry.srid() | nil,
endian: Geometry.endian(),
mode: Geometry.mode(),
wkb: Geometry.wkb()
def to_wkb(%MultiPolygonZM{polygons: polygons}, srid, endian, mode) do
<<
WKB.byte_order(endian, mode)::binary(),
wkb_code(endian, not is_nil(srid), mode)::binary(),
WKB.srid(srid, endian, mode)::binary(),
to_wkb_polygons(MapSet.to_list(polygons), endian, mode)::binary()
>>
end
@compile {:inline, to_wkb_polygons: 3}
defp to_wkb_polygons(polygons, endian, mode) do
Enum.reduce(polygons, WKB.length(polygons, endian, mode), fn polygon, acc ->
<<acc::binary(), PolygonZM.to_wkb(polygon, nil, endian, mode)::binary()>>
end)
end
@compile {:inline, wkb_code: 3}
defp wkb_code(endian, srid?, :hex) do
case {endian, srid?} do
{:xdr, false} -> "C0000006"
{:ndr, false} -> "060000C0"
{:xdr, true} -> "E0000006"
{:ndr, true} -> "060000E0"
end
end
defp wkb_code(endian, srid?, :binary) do
case {endian, srid?} do
{:xdr, false} -> <<0xC0000006::big-integer-size(32)>>
{:ndr, false} -> <<0xC0000006::little-integer-size(32)>>
{:xdr, true} -> <<0xE0000006::big-integer-size(32)>>
{:ndr, true} -> <<0xE0000006::little-integer-size(32)>>
end
end
defimpl Enumerable do
# credo:disable-for-next-line Credo.Check.Readability.Specs
def count(multi_polygon) do
{:ok, MultiPolygonZM.size(multi_polygon)}
end
# credo:disable-for-next-line Credo.Check.Readability.Specs
def member?(multi_polygon, val) do
{:ok, MultiPolygonZM.member?(multi_polygon, val)}
end
# credo:disable-for-next-line Credo.Check.Readability.Specs
def slice(multi_polygon) do
size = MultiPolygonZM.size(multi_polygon)
{:ok, size, &Enumerable.List.slice(MultiPolygonZM.to_list(multi_polygon), &1, &2, size)}
end
# credo:disable-for-next-line Credo.Check.Readability.Specs
def reduce(multi_polygon, acc, fun) do
Enumerable.List.reduce(MultiPolygonZM.to_list(multi_polygon), acc, fun)
end
end
defimpl Collectable do
# credo:disable-for-next-line Credo.Check.Readability.Specs
def into(%MultiPolygonZM{polygons: polygons}) do
fun = fn
list, {:cont, x} ->
[{x, []} | list]
list, :done ->
map =
Map.merge(
polygons.map,
Enum.into(list, %{}, fn {polygon, []} -> {polygon.rings, []} end)
)
%MultiPolygonZM{polygons: %{polygons | map: map}}
_list, :halt ->
:ok
end
{[], fun}
end
end
end
|
lib/geometry/multi_polygon_zm.ex
| 0.925483
| 0.619615
|
multi_polygon_zm.ex
|
starcoder
|
defmodule Mongo.Pool do
@moduledoc """
Defines a pool of MongoDB connections.
A pool can be defined as:
defmodule MyPool do
use Mongo.Pool,
adapter: Mongo.Pool.Poolboy,
hostname: "localhost"
end
Options will be passed to the pool adapter and to `Mongo.Connection`.
## Logging
The pool may define a `log/5` function, that will be called by the
driver on every call to the database.
Please refer to the callback's documentation for more information.
"""
use Behaviour
alias Mongo.Pool.Monitor
@type t :: module
@doc false
defmacro __using__(opts) do
adapter = Keyword.fetch!(opts, :adapter)
quote do
# TODO: Customizable timeout
@timeout 5_000
@behaviour unquote(__MODULE__)
@adapter unquote(adapter)
@name __MODULE__
@sup __MODULE__.Sup
@monitor __MODULE__.Monitor
@ets __MODULE__.ETS
@doc false
def start_link(opts) do
import Supervisor.Spec, warn: false
children = [
worker(Monitor, [@monitor, @ets, opts]),
worker(@adapter, [@name, opts]),
]
opts = [strategy: :one_for_all, name: @sup]
Supervisor.start_link(children, opts)
end
@doc false
def stop do
Process.whereis(__MODULE__)
|> Process.exit(:shutdown)
end
@doc false
def run(fun) do
@adapter.run(@name, fun)
end
@doc false
def version do
Monitor.version(@monitor, @ets, @timeout)
end
@doc false
def log(return, queue_time, query_time, _fun, _args) do
return
end
defoverridable [log: 5]
end
end
@type time :: integer
@type operation ::
:run_command | :insert_one | :insert_many | :delete_one | :delete_many |
:replace_one | :update_one | :update_many | :find_cursor | :find_batch |
:kill_cursors
@doc """
Executes given function checking out a connection from pool, and ensuring it
will be properely checked in back once finished.
"""
defcallback run((pid -> return)) :: {queue_time :: time, return} when return: var
@doc """
Returns the version of the MongoDB wire protocol used for the pool's connections
"""
defcallback version() :: non_neg_integer
@doc """
Called every time when the driver has a logging information to be printed.
The first argument result can be of form: `:ok`, `{:ok, _}` or `{:error, _}`.
The second element of the tuples should be considered private, and not used.
## Operations
The fourth argument determines the operation, these can be (listed with the
arguments passed as the fifth argument to the log function):
Operation | Arguments
:-------------- | :-------------------------------------------
`:run_command` | `[query, options]`
`:insert_one` | `[collection, document, options]`
`:insert_many` | `[collection, documents, options]`
`:delete_one` | `[collection, filter, options]`
`:delete_many` | `[collection, filter, options]`
`:replace_one` | `[collection, filter, replacement, options]`
`:update_one` | `[collection, filter, update, options]`
`:update_many` | `[collection, filter, update, options]`
`:find` | `[collection, query, projection, options]`
`:find_rest` | `[collection, cursor, options]`
`:kill_cursors` | `[cursors, options]`
"""
defcallback log(return, queue_time, query_time, operation, args :: list) ::
return when return: var, queue_time: time, query_time: time
@doc """
Invokes given pool's `run/1` gathering information necessary for the pools
`log/5` function.
The `opts` argument is appended to the `args` list passed to the pool's
log function.
## Options
* `:log` - if `false` the `log/5` function won't be invoked (default: `true`)
"""
def run_with_log(pool, log, args, opts, fun) do
{log?, opts} = Keyword.pop(opts, :log, true)
if log? do
{queue_time, {query_time, value}} =
pool.run(&:timer.tc(fun, [&1]))
pool.log(value, queue_time, query_time, log, args ++ [opts])
value
else
{_queue_time, value} = pool.run(fun)
value
end
end
end
|
lib/mongo/pool.ex
| 0.657098
| 0.408955
|
pool.ex
|
starcoder
|
defmodule Kronky.ChangesetParser do
@moduledoc """
Converts an ecto changeset into a list of validation errors structs.
Currently *does not* support nested errors
"""
import Ecto.Changeset, only: ["traverse_errors": 2]
alias Kronky.ValidationMessage
@doc "Extract a nested map of raw errors from a changeset
For examples, please see the test cases in the github repo.
"
def messages_as_map(changeset) do
Ecto.Changeset.traverse_errors(changeset, & &1)
end
@doc "Generate a list of `Kronky.ValidationMessage` structs from changeset errors
For examples, please see the test cases in the github repo.
"
def extract_messages(changeset) do
changeset
|> traverse_errors(&construct_traversed_message/3)
|> Enum.to_list
|> Enum.flat_map(fn({_field, values}) -> values end)
end
defp construct_traversed_message(_changeset, field, {message, opts}) do
construct_message(field, {message, opts})
end
@doc "Generate a single `Kronky.ValidationMessage` struct from a changeset.
This method is designed to be used with `Ecto.Changeset.traverse_errors` to generate a map of structs.
## Examples
```
error_map = Changeset.traverse_errors(fn(changeset, field, error) ->
Kronky.ChangesetParser.construct_message(field, error)
end)
error_list = Enum.flat_map(error_map, fn({_, messages}) -> messages end)
```
"
def construct_message(field, error_tuple)
def construct_message(field, {message, opts}) do
%ValidationMessage{
code: to_code({message, opts}),
field: field,
key: field,
template: message,
message: interpolate_message({message, opts}),
options: tidy_opts(opts),
}
end
defp tidy_opts(opts) do
Keyword.drop(opts, [:validation, :max, :is, :min, :code])
end
@doc """
Inserts message variables into message.
## Examples
iex> interpolate_message({"length should be between %{one} and %{two}", [one: "1", two: "2", three: "3"]})
"length should be between 1 and 2"
"""
#Code Taken from the Pheonix DataCase.on_errors/1 boilerplate"
def interpolate_message({message, opts}) do
Enum.reduce(opts, message, fn {key, value}, acc ->
String.replace(acc, "%{#{key}}", to_string(value))
end)
end
@doc """
Generate unique code for each validation type.
Expects an array of validation options such as those supplied
by `Ecto.Changeset.traverse_errors/2`, with the addition of a message key containing the message string.
Messages are required for several validation types to be identified.
## Supported
- :cast - generated by `Ecto.Changeset.cast/3`
- :association - generated by `Ecto.Changeset.assoc_constraint/3`, `Ecto.Changeset.cast_assoc/3`, `Ecto.Changeset.put_assoc/3`, `Ecto.Changeset.cast_embed/3`, `Ecto.Changeset.put_embed/3`
- :acceptance - generated by `Ecto.Changeset.validate_acceptance/3`
- :confirmation - generated by `Ecto.Changeset.validate_confirmation/3`
- :length - generated by `Ecto.Changeset.validate_length/3` when the `:is` option fails validation
- :min - generated by `Ecto.Changeset.validate_length/3` when the `:min` option fails validation
- :max - generated by `Ecto.Changeset.validate_length/3` when the `:max` option fails validation
- :less_than_or_equal_to - generated by `Ecto.Changeset.validate_length/3` when the `:less_than_or_equal_to` option fails validation
- :less_than - generated by `Ecto.Changeset.validate_length/3` when the `:less_than` option fails validation
- :greater_than_or_equal_to - generated by `Ecto.Changeset.validate_length/3` when the `:greater_than_or_equal_to` option fails validation
- :greater_than - generated by `Ecto.Changeset.validate_length/3` when the `:greater_than` option fails validation
- :equal_to - generated by `Ecto.Changeset.validate_length/3` when the `:equal_to` option fails validation
- :exclusion - generated by `Ecto.Changeset.validate_exclusion/4`
- :inclusion - generated by `Ecto.Changeset.validate_inclusion/4`
- :required - generated by `Ecto.Changeset.validate_required/3`
- :subset - generated by `Ecto.Changeset.validate_subset/4`
- :unique - generated by `Ecto.Changeset.unique_constraint/3`
- :foreign - generated by `Ecto.Changeset.foreign_key_constraint/3`
- :no_assoc_constraint - generated by `Ecto.Changeset.no_assoc_constraint/3`
- :unknown - supplied when validation cannot be matched. This will also match any custom errors added through
`Ecto.Changeset.add_error/4`, `Ecto.Changeset.validate_change/3`, and `Ecto.Changeset.validate_change/4`
"""
def to_code({message, opts}) do
opts
|> Enum.into(%{message: message})
|> do_to_code
end
defp do_to_code(%{code: code}), do: code
defp do_to_code(%{validation: :cast}), do: :cast
defp do_to_code(%{validation: :required}), do: :required
defp do_to_code(%{validation: :format}), do: :format
defp do_to_code(%{validation: :inclusion}), do: :inclusion
defp do_to_code(%{validation: :exclusion}), do: :exclusion
defp do_to_code(%{validation: :subset}), do: :subset
defp do_to_code(%{validation: :acceptance}), do: :acceptance
defp do_to_code(%{validation: :confirmation}), do: :confirmation
defp do_to_code(%{validation: :length, is: _}), do: :length
defp do_to_code(%{validation: :length, min: _}), do: :min
defp do_to_code(%{validation: :length, max: _}), do: :max
defp do_to_code(%{validation: :number, message: message}) do
cond do
String.contains?(message, "less than or equal to") -> :less_than_or_equal_to
String.contains?(message, "greater than or equal to") -> :greater_than_or_equal_to
String.contains?(message, "less than") -> :less_than
String.contains?(message, "greater than") -> :greater_than
String.contains?(message, "equal to") -> :equal_to
true -> :unknown
end
end
defp do_to_code(%{message: "is invalid", type: _}), do: :association
defp do_to_code(%{message: "has already been taken"}), do: :unique
defp do_to_code(%{message: "does not exist"}), do: :foreign
defp do_to_code(%{message: "is still associated with this entry"}), do: :no_assoc
defp do_to_code(_unknown) do
:unknown
end
end
|
lib/kronky/changeset_parser.ex
| 0.90647
| 0.558447
|
changeset_parser.ex
|
starcoder
|
defmodule Contex.ContinuousLinearScale do
@moduledoc """
A linear scale to map continuous numberic data to a plotting coordinate system.
Implements the general aspects of scale setup and use defined in the `Contex.Scale` protocol
The `ContinuousLinearScale` is responsible for mapping to and from values in the data
to a visual scale. The two key concepts are "domain" and "range".
The "domain" represents values in the dataset to be plotted.
The "range" represents the plotting coordinate system use to plot values in the "domain".
*Important Note* - When you set domain and range, the scale code makes a few adjustments
based on the desired number of tick intervals so that the ticks look "nice" - i.e. on
round numbers. So if you have a data range of 0.0 → 8.7 and you want 10 intervals the scale
won't display ticks at 0.0, 0.87, 1.74 etc, it will round up the domain to 10 so you have nice
tick intervals of 0, 1, 2, 3 etc.
By default the scale creates 10 tick intervals.
When domain and range are both set, the scale makes transform functions available to map each way
between the domain and range that are then available to the various plots to map data
to plotting coordinate systems, and potentially vice-versa.
The typical setup of the scale looks like this:
```
y_scale
= ContinuousLinearScale.new()
|> ContinuousLinearScale.domain(min_value, max_value)
|> Scale.set_range(start_of_y_plotting_coord, end_of_y_plotting_coord)
```
Translating a value to plotting coordinates would then look like this:
```
plot_y = Scale.domain_to_range(y_scale, y_value)
```
`ContinuousLinearScale` implements the `Contex.Scale` protocol that provides a nicer way to access the
transform functions. Calculation of plotting coordinates is typically done in tight loops
so you are more likely to do something like than translating a single value as per the above example:
```
x_tx_fn = Scale.domain_to_range_fn(x_scale)
y_tx_fn = Scale.domain_to_range_fn(y_scale)
points_to_plot = Enum.map(big_load_of_data, fn %{x: x, y: y}=_row ->
{x_tx_fn.(x), y_tx_fn.(y)}
end)
```
"""
alias __MODULE__
alias Contex.Utils
defstruct [
:domain,
:nice_domain,
:range,
:interval_count,
:interval_size,
:display_decimals,
:custom_tick_formatter
]
@type t() :: %__MODULE__{}
@doc """
Creates a new scale with defaults
"""
@spec new :: Contex.ContinuousLinearScale.t()
def new() do
%ContinuousLinearScale{range: {0.0, 1.0}, interval_count: 10, display_decimals: nil}
end
@doc """
Defines the number of intervals between ticks.
Defaults to 10.
Tick-rendering is the responsibility of `Contex.Axis`, but calculating tick intervals is the responsibility
of the scale.
"""
@spec interval_count(Contex.ContinuousLinearScale.t(), integer()) ::
Contex.ContinuousLinearScale.t()
def interval_count(%ContinuousLinearScale{} = scale, interval_count)
when is_integer(interval_count) and interval_count > 1 do
scale
|> struct(interval_count: interval_count)
|> nice()
end
def interval_count(%ContinuousLinearScale{} = scale, _), do: scale
@doc """
Sets the extents of the value domain for the scale.
"""
@spec domain(Contex.ContinuousLinearScale.t(), number, number) ::
Contex.ContinuousLinearScale.t()
def domain(%ContinuousLinearScale{} = scale, min, max) when is_number(min) and is_number(max) do
# We can be flexible with the range start > end, but the domain needs to start from the min
{d_min, d_max} =
case min < max do
true -> {min, max}
_ -> {max, min}
end
scale
|> struct(domain: {d_min, d_max})
|> nice()
end
@doc """
Sets the extents of the value domain for the scale by specifying a list of values to be displayed.
The scale will determine the extents of the data.
"""
@spec domain(Contex.ContinuousLinearScale.t(), list(number())) ::
Contex.ContinuousLinearScale.t()
def domain(%ContinuousLinearScale{} = scale, data) when is_list(data) do
{min, max} = extents(data)
domain(scale, min, max)
end
# NOTE: interval count will likely get adjusted down here to keep things looking nice
defp nice(
%ContinuousLinearScale{domain: {min_d, max_d}, interval_count: interval_count} = scale
)
when is_number(min_d) and is_number(max_d) and is_number(interval_count) and
interval_count > 1 do
width = max_d - min_d
width = if width == 0.0, do: 1.0, else: width
unrounded_interval_size = width / (interval_count - 1)
order_of_magnitude = :math.ceil(:math.log10(unrounded_interval_size) - 1)
power_of_ten = :math.pow(10, order_of_magnitude)
rounded_interval_size =
lookup_axis_interval(unrounded_interval_size / power_of_ten) * power_of_ten
min_nice = rounded_interval_size * Float.floor(min_d / rounded_interval_size)
max_nice = rounded_interval_size * Float.ceil(max_d / rounded_interval_size)
adjusted_interval_count = round(1.0001 * (max_nice - min_nice) / rounded_interval_size)
display_decimals = guess_display_decimals(order_of_magnitude)
%{
scale
| nice_domain: {min_nice, max_nice},
interval_size: rounded_interval_size,
interval_count: adjusted_interval_count,
display_decimals: display_decimals
}
end
defp nice(%ContinuousLinearScale{} = scale), do: scale
@axis_interval_breaks [0.1, 0.2, 0.25, 0.4, 0.5, 1.0, 2.0, 2.5, 4.0, 5.0, 10.0]
defp lookup_axis_interval(raw_interval) when is_float(raw_interval) do
Enum.find(@axis_interval_breaks, fn x -> x >= raw_interval end)
end
defp guess_display_decimals(power_of_ten) when power_of_ten > 0 do
0
end
defp guess_display_decimals(power_of_ten) do
1 + -1 * round(power_of_ten)
end
@doc false
def get_domain_to_range_function(%ContinuousLinearScale{
nice_domain: {min_d, max_d},
range: {min_r, max_r}
})
when is_number(min_d) and is_number(max_d) and is_number(min_r) and is_number(max_r) do
domain_width = max_d - min_d
range_width = max_r - min_r
case domain_width do
0 ->
fn x -> x end
0.0 ->
fn x -> x end
_ ->
fn domain_val ->
ratio = (domain_val - min_d) / domain_width
min_r + ratio * range_width
end
end
end
def get_domain_to_range_function(_), do: fn x -> x end
@doc false
def get_range_to_domain_function(%ContinuousLinearScale{
nice_domain: {min_d, max_d},
range: {min_r, max_r}
})
when is_number(min_d) and is_number(max_d) and is_number(min_r) and is_number(max_r) do
domain_width = max_d - min_d
range_width = max_r - min_r
case range_width do
0 ->
fn x -> x end
0.0 ->
fn x -> x end
_ ->
fn range_val ->
ratio = (range_val - min_r) / range_width
min_d + ratio * domain_width
end
end
end
def get_range_to_domain_function(_), do: fn x -> x end
@doc false
def extents(data) do
Enum.reduce(data, {nil, nil}, fn x, {min, max} ->
{Utils.safe_min(x, min), Utils.safe_max(x, max)}
end)
end
defimpl Contex.Scale do
def domain_to_range_fn(%ContinuousLinearScale{} = scale),
do: ContinuousLinearScale.get_domain_to_range_function(scale)
def ticks_domain(%ContinuousLinearScale{
nice_domain: {min_d, _},
interval_count: interval_count,
interval_size: interval_size
})
when is_number(min_d) and is_number(interval_count) and is_number(interval_size) do
0..interval_count
|> Enum.map(fn i -> min_d + i * interval_size end)
end
def ticks_domain(_), do: []
def ticks_range(%ContinuousLinearScale{} = scale) do
transform_func = ContinuousLinearScale.get_domain_to_range_function(scale)
ticks_domain(scale)
|> Enum.map(transform_func)
end
def domain_to_range(%ContinuousLinearScale{} = scale, range_val) do
transform_func = ContinuousLinearScale.get_domain_to_range_function(scale)
transform_func.(range_val)
end
def get_range(%ContinuousLinearScale{range: {min_r, max_r}}), do: {min_r, max_r}
def set_range(%ContinuousLinearScale{} = scale, start, finish)
when is_number(start) and is_number(finish) do
%{scale | range: {start, finish}}
end
def set_range(%ContinuousLinearScale{} = scale, {start, finish})
when is_number(start) and is_number(finish),
do: set_range(scale, start, finish)
def get_formatted_tick(
%ContinuousLinearScale{
display_decimals: display_decimals,
custom_tick_formatter: custom_tick_formatter
},
tick_val
) do
format_tick_text(tick_val, display_decimals, custom_tick_formatter)
end
defp format_tick_text(tick, _, custom_tick_formatter) when is_function(custom_tick_formatter),
do: custom_tick_formatter.(tick)
defp format_tick_text(tick, _, _) when is_integer(tick), do: to_string(tick)
defp format_tick_text(tick, display_decimals, _) when display_decimals > 0 do
:erlang.float_to_binary(tick, decimals: display_decimals)
end
defp format_tick_text(tick, _, _), do: :erlang.float_to_binary(tick, [:compact, decimals: 0])
end
end
|
lib/chart/scale/continuous_linear_scale.ex
| 0.95104
| 0.980986
|
continuous_linear_scale.ex
|
starcoder
|
defmodule RDF.Star.Triple do
@moduledoc """
Helper functions for RDF-star triples.
An RDF-star triple is represented as a plain Elixir tuple consisting of three valid
RDF values for subject, predicate and object.
As opposed to an `RDF.Triple` the subject or object can be a triple itself.
"""
alias RDF.Star.Statement
alias RDF.PropertyMap
@type t :: {Statement.subject(), Statement.predicate(), Statement.object()}
@type coercible ::
{
Statement.coercible_subject(),
Statement.coercible_predicate(),
Statement.coercible_object()
}
@doc """
Creates a `RDF.Star.Triple` with proper RDF-star values.
An error is raised when the given elements are not coercible to RDF-star values.
Note: The `RDF.triple` function is a shortcut to this function.
## Examples
iex> RDF.Star.Triple.new({"http://example.com/S", "http://example.com/p", 42}, "http://example.com/p2", 43)
{{~I<http://example.com/S>, ~I<http://example.com/p>, RDF.literal(42)}, ~I<http://example.com/p2>, RDF.literal(43)}
iex> RDF.Star.Triple.new({EX.S, EX.p, 42}, EX.p2, 43)
{{~I<http://example.com/S>, ~I<http://example.com/p>, RDF.literal(42)}, ~I<http://example.com/p2>, RDF.literal(43)}
iex> RDF.Star.Triple.new(EX.S, EX.p, 42)
{RDF.iri("http://example.com/S"), RDF.iri("http://example.com/p"), RDF.literal(42)}
iex> RDF.Star.Triple.new({EX.S, :p, 42}, :p2, 43, RDF.PropertyMap.new(p: EX.p, p2: EX.p2))
{{~I<http://example.com/S>, ~I<http://example.com/p>, RDF.literal(42)}, ~I<http://example.com/p2>, RDF.literal(43)}
"""
@spec new(
Statement.coercible_subject(),
Statement.coercible_predicate(),
Statement.coercible_object(),
PropertyMap.t() | nil
) :: t
def new(subject, predicate, object, property_map \\ nil)
def new(subject, predicate, object, nil) do
{
Statement.coerce_subject(subject),
Statement.coerce_predicate(predicate),
Statement.coerce_object(object)
}
end
def new(subject, predicate, object, %PropertyMap{} = property_map) do
{
Statement.coerce_subject(subject, property_map),
Statement.coerce_predicate(predicate, property_map),
Statement.coerce_object(object, property_map)
}
end
@doc """
Creates a `RDF.Star.Triple` with proper RDF-star values.
An error is raised when the given elements are not coercible to RDF-star values.
Note: The `RDF.triple` function is a shortcut to this function.
## Examples
iex> RDF.Star.Triple.new {"http://example.com/S", "http://example.com/p", 42}
{~I<http://example.com/S>, ~I<http://example.com/p>, RDF.literal(42)}
iex> RDF.Star.Triple.new {EX.S, EX.p, 42}
{RDF.iri("http://example.com/S"), RDF.iri("http://example.com/p"), RDF.literal(42)}
iex> RDF.Star.Triple.new {EX.S, EX.p, 42, EX.Graph}
{RDF.iri("http://example.com/S"), RDF.iri("http://example.com/p"), RDF.literal(42)}
iex> RDF.Star.Triple.new {EX.S, :p, 42}, RDF.PropertyMap.new(p: EX.p)
{RDF.iri("http://example.com/S"), RDF.iri("http://example.com/p"), RDF.literal(42)}
iex> RDF.Star.Triple.new({{EX.S, :p, 42}, :p2, 43}, RDF.PropertyMap.new(p: EX.p, p2: EX.p2))
{{~I<http://example.com/S>, ~I<http://example.com/p>, RDF.literal(42)}, ~I<http://example.com/p2>, RDF.literal(43)}
"""
@spec new(Statement.coercible(), PropertyMap.t() | nil) :: t
def new(statement, property_map \\ nil)
def new({subject, predicate, object}, property_map),
do: new(subject, predicate, object, property_map)
def new({subject, predicate, object, _}, property_map),
do: new(subject, predicate, object, property_map)
@doc """
Checks if the given tuple is a valid RDF-star triple.
The elements of a valid RDF-star triple must be RDF terms. On the subject
position only IRIs, blank nodes and triples are allowed, while on the predicate
position only IRIs allowed. The object position can be any RDF term or triple.
"""
@spec valid?(t | any) :: boolean
def valid?(tuple)
def valid?({_, _, _} = triple), do: Statement.valid?(triple)
def valid?(_), do: false
end
|
lib/rdf/star/triple.ex
| 0.909242
| 0.653286
|
triple.ex
|
starcoder
|
defmodule Kaffe.Consumer do
@moduledoc """
Consume messages from Kafka and pass to a given local module.
See `start_link/4` for details on how to start a Consumer process.
As messages are consumed from Kafka they will be sent to your
`handle_message/1` (sync) or `handle_message/2` (async) functions for
processing in your system. Those functions _must_ return `:ok`.
Kaffe.Consumer commits offsets to Kafka which is very reliable but not
immediate. If your application restarts then it's highly likely you'll
reconsume some messages, especially for a quickly moving topic. Be ready!
"""
@behaviour :brod_group_subscriber
@kafka Application.get_env(:kaffe, :kafka_mod, :brod)
@group_subscriber Application.get_env(:kaffe, :group_subscriber_mod, :brod_group_subscriber)
require Record
import Record, only: [defrecord: 2, extract: 2]
defrecord :kafka_message, extract(:kafka_message, from_lib: "brod/include/brod.hrl")
defmodule State do
@moduledoc """
Running state for the consumer.
- `message_handler` - The module to call with each Kafka message
- `async` - Kafka offset processing behavior
"""
defstruct message_handler: nil, async: false
end
## -------------------------------------------------------------------------
## public api
## -------------------------------------------------------------------------
@doc """
Start a Kafka consumer
The consumer pulls in values from the Kaffe consumer configuration:
- `heroku_kafka_env` - endpoints and SSL configuration will be pulled from ENV
- `endpoints` - plaintext Kafka endpoints
- `consumer_group` - the consumer group id (should be unique to your app)
- `topics` - a list of Kafka topics to consume
- `message_handler` - the module that will be called for each Kafka message
- `async_message_ack` - if false Kafka offset will automatically acknowledge
after successful message parsing
- `start_with_earliest_message` - If true the worker will consume from the
beginning of the topic when it first starts. This only affects consumer
behavior before the consumer group starts recording its offsets in Kafka.
Note: If `async_message_ack` is true then you'll need to call `ack/2` to
acknowledge Kafka messages as processed.
Only use async processing if absolutely needed by your application's
processing flow. With automatic (sync) acknowledgement then the message flow
from Kaffe.Consumer has backpressure from your system. With manual (async)
acknowledgement you will be able to process messages faster but will need to
take on the burden of ensuring no messages are lost.
"""
def start_link(opts \\ %{}) do
config = Kaffe.Config.Consumer.configuration(opts)
@kafka.start_link_group_subscriber(
config.subscriber_name,
config.consumer_group,
config.topics,
config.group_config,
config.consumer_config,
__MODULE__,
[config]
)
end
@doc """
Acknowledge the Kafka message as processed.
- `pid` - the pid your `handle_message/2` function was given as the first argument
- `message` - the Kafka message your `handle_message/2` function was given as
the second argument
```
Kaffe.Consumer.ack(pid, message)
```
"""
def ack(pid, %{topic: topic, partition: partition, offset: offset}) do
@group_subscriber.ack(pid, topic, partition, offset)
end
## -------------------------------------------------------------------------
## callbacks
## -------------------------------------------------------------------------
@doc """
Initialize the consumer loop.
"""
def init(_consumer_group, [config]) do
start_consumer_client(config)
{:ok, %Kaffe.Consumer.State{message_handler: config.message_handler, async: config.async_message_ack}}
end
@doc """
Call the message handler with the restructured Kafka message.
Kafka messages come from brod as an Erlang record. To make processing simpler
for clients we convert that to an Elixir map. Since the consumer can
subscribe to multiple topics with multiple partitions we also add the topic
and partition as additional fields.
After compiling the Kafka message your message handler module's
`handle_message` function will be called.
If `async` is false:
- Your message handler module's `handle_message/1` function will be called
with the message
- The Consumer will block and wait for your `handle_message` function to
complete and then automatically acknowledge the message as processed.
If `async` is true:
- Your message handler module's `handle_message/2` function will be called
with the pid of the running Consumer process and the message.
- Message intake on the Consumer will not wait for your `handle_message/2` to
complete and will not automatically acknowledge the message as processed.
- Once you've processed the message you will need to call
`Kaffe.Consumer.ack/2` with the pid and message.
"""
def handle_message(topic, partition, msg, %{async: false, message_handler: handler} = state) do
:ok = apply(handler, :handle_message, [compile_message(msg, topic, partition)])
{:ok, :ack, state}
end
def handle_message(topic, partition, msg, %{async: true, message_handler: handler} = state) do
:ok = apply(handler, :handle_message, [self(), compile_message(msg, topic, partition)])
{:ok, state}
end
## -------------------------------------------------------------------------
## internal functions
## -------------------------------------------------------------------------
def start_consumer_client(config) do
@kafka.start_client(config.endpoints, config.subscriber_name, config.consumer_config)
end
defp compile_message(msg, topic, partition) do
Map.merge(%{topic: topic, partition: partition}, kafka_message_to_map(msg))
end
defp kafka_message_to_map(msg) do
Enum.into(kafka_message(msg), %{})
end
end
|
lib/kaffe/consumer.ex
| 0.852153
| 0.777215
|
consumer.ex
|
starcoder
|
defmodule Wobserver2.Util.Metrics.Prometheus do
@moduledoc ~S"""
Prometheus formatter.
Formats metrics in a for Prometheus readable way.
See: [https://prometheus.io/docs/instrumenting/writing_exporters/](https://prometheus.io/docs/instrumenting/writing_exporters/)
"""
@behaviour Wobserver2.Util.Metrics.Formatter
@doc ~S"""
Format a set of `data` with a `label` for a Prometheus.
The `data` must be given as a `list` of tuples with the following format: `{value, labels}`, where `labels` is a keyword list with labels and their values.
The following options can also be given:
- `type`, the type of the metric. The following values are currently supported: `:gauge`, `:counter`.
- `help`, a single line text description of the metric.
"""
@spec format_data(
name :: String.t(),
data :: [{integer | float, keyword}],
type :: :atom,
help :: String.t()
) :: String.t()
def format_data(name, data, type, help) do
"#{format_help(name, help)}#{format_type(name, type)}#{format_values(name, data)}"
end
@doc ~S"""
Combines formatted metrics together.
Arguments:
- `metrics`, a list of formatted metrics for one node.
Example:
iex> combine_metrics ["metric1{node="127.0.0.1"} 5\n", "metric2{node="127.0.0.1"} 5\n"]
"metric1{node="127.0.0.1"} 5\n", "metric2{node="127.0.0.1"} 5\n"
"""
@spec combine_metrics(metrics :: list[String.t()]) :: String.t()
def combine_metrics(metrics), do: Enum.join(metrics)
@doc ~S"""
Merges formatted sets of metrics from different nodes together.
The merge will filter out double declarations of help and type.
Arguments:
- `metrics`, a list of formatted sets metrics for multiple node.
Example:
iex> combine_metrics ["metric{node="192.168.0.6"} 5\n", "metric{node="192.168.0.5"} 5\n"]
"metric{node="192.168.0.6"} 5\n", "metric{node="192.168.0.7"} 5\n"
"""
@spec merge_metrics(metrics :: list[String.t()]) :: String.t()
def merge_metrics(metrics) do
{combined, _} = Enum.reduce(metrics, {"", []}, &filter/2)
combined
end
# Helpers
defp format_help(_name, nil), do: ""
defp format_help(name, help) do
"\# HELP #{name} #{help}\n"
end
defp format_type(_name, nil), do: ""
defp format_type(name, type) do
"\# TYPE #{name} #{type}\n"
end
defp format_labels(labels) do
labels
|> Enum.map(fn {label, value} -> "#{label}=\"#{value}\"" end)
|> Enum.join(",")
end
defp format_values(name, data) do
data
|> Enum.map(fn {value, labels} -> "#{name}{#{format_labels(labels)}} #{value}\n" end)
|> Enum.join()
end
defp analyze_metrics(metrics) do
help =
~r/^\# HELP ([a-zA-Z_]+\ )/m
|> Regex.scan(metrics)
|> Enum.map(fn [match | _] -> match end)
type =
~r/^\# TYPE ([a-zA-Z_]+\ )/m
|> Regex.scan(metrics)
|> Enum.map(fn [match | _] -> match end)
help ++ type
end
defp filter_line(line, filter) do
filter
|> Enum.find_value(false, &String.starts_with?(line, &1))
|> Kernel.!()
end
defp filter(metric, {metrics, filter}) do
filtered_metric =
metric
|> String.split("\n")
|> Enum.filter(&filter_line(&1, filter))
|> Enum.join("\n")
updated_filter =
filtered_metric
|> analyze_metrics()
|> Kernel.++(filter)
{metrics <> filtered_metric, updated_filter}
end
end
|
lib/wobserver2/util/metrics/prometheus.ex
| 0.908567
| 0.594051
|
prometheus.ex
|
starcoder
|
defmodule Google.Protobuf.FileDescriptorSet do
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
file: [Google.Protobuf.FileDescriptorProto.t]
}
defstruct [:file]
field :file, 1, repeated: true, type: Google.Protobuf.FileDescriptorProto
end
defmodule Google.Protobuf.FileDescriptorProto do
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
name: String.t,
package: String.t,
dependency: [String.t],
public_dependency: [integer],
weak_dependency: [integer],
message_type: [Google.Protobuf.DescriptorProto.t],
enum_type: [Google.Protobuf.EnumDescriptorProto.t],
service: [Google.Protobuf.ServiceDescriptorProto.t],
extension: [Google.Protobuf.FieldDescriptorProto.t],
options: Google.Protobuf.FileOptions.t,
source_code_info: Google.Protobuf.SourceCodeInfo.t,
syntax: String.t
}
defstruct [:name, :package, :dependency, :public_dependency, :weak_dependency, :message_type, :enum_type, :service, :extension, :options, :source_code_info, :syntax]
field :name, 1, optional: true, type: :string
field :package, 2, optional: true, type: :string
field :dependency, 3, repeated: true, type: :string
field :public_dependency, 10, repeated: true, type: :int32
field :weak_dependency, 11, repeated: true, type: :int32
field :message_type, 4, repeated: true, type: Google.Protobuf.DescriptorProto
field :enum_type, 5, repeated: true, type: Google.Protobuf.EnumDescriptorProto
field :service, 6, repeated: true, type: Google.Protobuf.ServiceDescriptorProto
field :extension, 7, repeated: true, type: Google.Protobuf.FieldDescriptorProto
field :options, 8, optional: true, type: Google.Protobuf.FileOptions
field :source_code_info, 9, optional: true, type: Google.Protobuf.SourceCodeInfo
field :syntax, 12, optional: true, type: :string
end
defmodule Google.Protobuf.DescriptorProto do
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
name: String.t,
field: [Google.Protobuf.FieldDescriptorProto.t],
extension: [Google.Protobuf.FieldDescriptorProto.t],
nested_type: [Google.Protobuf.DescriptorProto.t],
enum_type: [Google.Protobuf.EnumDescriptorProto.t],
extension_range: [Google.Protobuf.DescriptorProto.ExtensionRange.t],
oneof_decl: [Google.Protobuf.OneofDescriptorProto.t],
options: Google.Protobuf.MessageOptions.t,
reserved_range: [Google.Protobuf.DescriptorProto.ReservedRange.t],
reserved_name: [String.t]
}
defstruct [:name, :field, :extension, :nested_type, :enum_type, :extension_range, :oneof_decl, :options, :reserved_range, :reserved_name]
field :name, 1, optional: true, type: :string
field :field, 2, repeated: true, type: Google.Protobuf.FieldDescriptorProto
field :extension, 6, repeated: true, type: Google.Protobuf.FieldDescriptorProto
field :nested_type, 3, repeated: true, type: Google.Protobuf.DescriptorProto
field :enum_type, 4, repeated: true, type: Google.Protobuf.EnumDescriptorProto
field :extension_range, 5, repeated: true, type: Google.Protobuf.DescriptorProto.ExtensionRange
field :oneof_decl, 8, repeated: true, type: Google.Protobuf.OneofDescriptorProto
field :options, 7, optional: true, type: Google.Protobuf.MessageOptions
field :reserved_range, 9, repeated: true, type: Google.Protobuf.DescriptorProto.ReservedRange
field :reserved_name, 10, repeated: true, type: :string
end
defmodule Google.Protobuf.DescriptorProto.ExtensionRange do
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
start: integer,
end: integer
}
defstruct [:start, :end]
field :start, 1, optional: true, type: :int32
field :end, 2, optional: true, type: :int32
end
defmodule Google.Protobuf.DescriptorProto.ReservedRange do
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
start: integer,
end: integer
}
defstruct [:start, :end]
field :start, 1, optional: true, type: :int32
field :end, 2, optional: true, type: :int32
end
defmodule Google.Protobuf.FieldDescriptorProto do
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
name: String.t,
number: integer,
label: integer,
type: integer,
type_name: String.t,
extendee: String.t,
default_value: String.t,
oneof_index: integer,
json_name: String.t,
options: Google.Protobuf.FieldOptions.t
}
defstruct [:name, :number, :label, :type, :type_name, :extendee, :default_value, :oneof_index, :json_name, :options]
field :name, 1, optional: true, type: :string
field :number, 3, optional: true, type: :int32
field :label, 4, optional: true, type: Google.Protobuf.FieldDescriptorProto.Label, enum: true
field :type, 5, optional: true, type: Google.Protobuf.FieldDescriptorProto.Type, enum: true
field :type_name, 6, optional: true, type: :string
field :extendee, 2, optional: true, type: :string
field :default_value, 7, optional: true, type: :string
field :oneof_index, 9, optional: true, type: :int32
field :json_name, 10, optional: true, type: :string
field :options, 8, optional: true, type: Google.Protobuf.FieldOptions
end
defmodule Google.Protobuf.FieldDescriptorProto.Type do
use Protobuf, enum: true, syntax: :proto2
field :TYPE_DOUBLE, 1
field :TYPE_FLOAT, 2
field :TYPE_INT64, 3
field :TYPE_UINT64, 4
field :TYPE_INT32, 5
field :TYPE_FIXED64, 6
field :TYPE_FIXED32, 7
field :TYPE_BOOL, 8
field :TYPE_STRING, 9
field :TYPE_GROUP, 10
field :TYPE_MESSAGE, 11
field :TYPE_BYTES, 12
field :TYPE_UINT32, 13
field :TYPE_ENUM, 14
field :TYPE_SFIXED32, 15
field :TYPE_SFIXED64, 16
field :TYPE_SINT32, 17
field :TYPE_SINT64, 18
end
defmodule Google.Protobuf.FieldDescriptorProto.Label do
use Protobuf, enum: true, syntax: :proto2
field :LABEL_OPTIONAL, 1
field :LABEL_REQUIRED, 2
field :LABEL_REPEATED, 3
end
defmodule Google.Protobuf.OneofDescriptorProto do
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
name: String.t,
options: Google.Protobuf.OneofOptions.t
}
defstruct [:name, :options]
field :name, 1, optional: true, type: :string
field :options, 2, optional: true, type: Google.Protobuf.OneofOptions
end
defmodule Google.Protobuf.EnumDescriptorProto do
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
name: String.t,
value: [Google.Protobuf.EnumValueDescriptorProto.t],
options: Google.Protobuf.EnumOptions.t
}
defstruct [:name, :value, :options]
field :name, 1, optional: true, type: :string
field :value, 2, repeated: true, type: Google.Protobuf.EnumValueDescriptorProto
field :options, 3, optional: true, type: Google.Protobuf.EnumOptions
end
defmodule Google.Protobuf.EnumValueDescriptorProto do
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
name: String.t,
number: integer,
options: Google.Protobuf.EnumValueOptions.t
}
defstruct [:name, :number, :options]
field :name, 1, optional: true, type: :string
field :number, 2, optional: true, type: :int32
field :options, 3, optional: true, type: Google.Protobuf.EnumValueOptions
end
defmodule Google.Protobuf.ServiceDescriptorProto do
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
name: String.t,
method: [Google.Protobuf.MethodDescriptorProto.t],
options: Google.Protobuf.ServiceOptions.t
}
defstruct [:name, :method, :options]
field :name, 1, optional: true, type: :string
field :method, 2, repeated: true, type: Google.Protobuf.MethodDescriptorProto
field :options, 3, optional: true, type: Google.Protobuf.ServiceOptions
end
defmodule Google.Protobuf.MethodDescriptorProto do
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
name: String.t,
input_type: String.t,
output_type: String.t,
options: Google.Protobuf.MethodOptions.t,
client_streaming: boolean,
server_streaming: boolean
}
defstruct [:name, :input_type, :output_type, :options, :client_streaming, :server_streaming]
field :name, 1, optional: true, type: :string
field :input_type, 2, optional: true, type: :string
field :output_type, 3, optional: true, type: :string
field :options, 4, optional: true, type: Google.Protobuf.MethodOptions
field :client_streaming, 5, optional: true, type: :bool, default: false
field :server_streaming, 6, optional: true, type: :bool, default: false
end
defmodule Google.Protobuf.FileOptions do
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
java_package: String.t,
java_outer_classname: String.t,
java_multiple_files: boolean,
java_generate_equals_and_hash: boolean,
java_string_check_utf8: boolean,
optimize_for: integer,
go_package: String.t,
cc_generic_services: boolean,
java_generic_services: boolean,
py_generic_services: boolean,
deprecated: boolean,
cc_enable_arenas: boolean,
objc_class_prefix: String.t,
csharp_namespace: String.t,
swift_prefix: String.t,
uninterpreted_option: [Google.Protobuf.UninterpretedOption.t]
}
defstruct [:java_package, :java_outer_classname, :java_multiple_files, :java_generate_equals_and_hash, :java_string_check_utf8, :optimize_for, :go_package, :cc_generic_services, :java_generic_services, :py_generic_services, :deprecated, :cc_enable_arenas, :objc_class_prefix, :csharp_namespace, :swift_prefix, :uninterpreted_option]
field :java_package, 1, optional: true, type: :string
field :java_outer_classname, 8, optional: true, type: :string
field :java_multiple_files, 10, optional: true, type: :bool, default: false
field :java_generate_equals_and_hash, 20, optional: true, type: :bool, deprecated: true
field :java_string_check_utf8, 27, optional: true, type: :bool, default: false
field :optimize_for, 9, optional: true, type: Google.Protobuf.FileOptions.OptimizeMode, default: :SPEED, enum: true
field :go_package, 11, optional: true, type: :string
field :cc_generic_services, 16, optional: true, type: :bool, default: false
field :java_generic_services, 17, optional: true, type: :bool, default: false
field :py_generic_services, 18, optional: true, type: :bool, default: false
field :deprecated, 23, optional: true, type: :bool, default: false
field :cc_enable_arenas, 31, optional: true, type: :bool, default: false
field :objc_class_prefix, 36, optional: true, type: :string
field :csharp_namespace, 37, optional: true, type: :string
field :swift_prefix, 39, optional: true, type: :string
field :uninterpreted_option, 999, repeated: true, type: Google.Protobuf.UninterpretedOption
end
defmodule Google.Protobuf.FileOptions.OptimizeMode do
use Protobuf, enum: true, syntax: :proto2
field :SPEED, 1
field :CODE_SIZE, 2
field :LITE_RUNTIME, 3
end
defmodule Google.Protobuf.MessageOptions do
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
message_set_wire_format: boolean,
no_standard_descriptor_accessor: boolean,
deprecated: boolean,
map_entry: boolean,
uninterpreted_option: [Google.Protobuf.UninterpretedOption.t]
}
defstruct [:message_set_wire_format, :no_standard_descriptor_accessor, :deprecated, :map_entry, :uninterpreted_option]
field :message_set_wire_format, 1, optional: true, type: :bool, default: false
field :no_standard_descriptor_accessor, 2, optional: true, type: :bool, default: false
field :deprecated, 3, optional: true, type: :bool, default: false
field :map_entry, 7, optional: true, type: :bool
field :uninterpreted_option, 999, repeated: true, type: Google.Protobuf.UninterpretedOption
end
defmodule Google.Protobuf.FieldOptions do
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
ctype: integer,
packed: boolean,
jstype: integer,
lazy: boolean,
deprecated: boolean,
weak: boolean,
uninterpreted_option: [Google.Protobuf.UninterpretedOption.t]
}
defstruct [:ctype, :packed, :jstype, :lazy, :deprecated, :weak, :uninterpreted_option]
field :ctype, 1, optional: true, type: Google.Protobuf.FieldOptions.CType, default: :STRING, enum: true
field :packed, 2, optional: true, type: :bool
field :jstype, 6, optional: true, type: Google.Protobuf.FieldOptions.JSType, default: :JS_NORMAL, enum: true
field :lazy, 5, optional: true, type: :bool, default: false
field :deprecated, 3, optional: true, type: :bool, default: false
field :weak, 10, optional: true, type: :bool, default: false
field :uninterpreted_option, 999, repeated: true, type: Google.Protobuf.UninterpretedOption
end
defmodule Google.Protobuf.FieldOptions.CType do
use Protobuf, enum: true, syntax: :proto2
field :STRING, 0
field :CORD, 1
field :STRING_PIECE, 2
end
defmodule Google.Protobuf.FieldOptions.JSType do
use Protobuf, enum: true, syntax: :proto2
field :JS_NORMAL, 0
field :JS_STRING, 1
field :JS_NUMBER, 2
end
defmodule Google.Protobuf.OneofOptions do
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
uninterpreted_option: [Google.Protobuf.UninterpretedOption.t]
}
defstruct [:uninterpreted_option]
field :uninterpreted_option, 999, repeated: true, type: Google.Protobuf.UninterpretedOption
end
defmodule Google.Protobuf.EnumOptions do
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
allow_alias: boolean,
deprecated: boolean,
uninterpreted_option: [Google.Protobuf.UninterpretedOption.t]
}
defstruct [:allow_alias, :deprecated, :uninterpreted_option]
field :allow_alias, 2, optional: true, type: :bool
field :deprecated, 3, optional: true, type: :bool, default: false
field :uninterpreted_option, 999, repeated: true, type: Google.Protobuf.UninterpretedOption
end
defmodule Google.Protobuf.EnumValueOptions do
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
deprecated: boolean,
uninterpreted_option: [Google.Protobuf.UninterpretedOption.t]
}
defstruct [:deprecated, :uninterpreted_option]
field :deprecated, 1, optional: true, type: :bool, default: false
field :uninterpreted_option, 999, repeated: true, type: Google.Protobuf.UninterpretedOption
end
defmodule Google.Protobuf.ServiceOptions do
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
deprecated: boolean,
uninterpreted_option: [Google.Protobuf.UninterpretedOption.t]
}
defstruct [:deprecated, :uninterpreted_option]
field :deprecated, 33, optional: true, type: :bool, default: false
field :uninterpreted_option, 999, repeated: true, type: Google.Protobuf.UninterpretedOption
end
defmodule Google.Protobuf.MethodOptions do
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
deprecated: boolean,
idempotency_level: integer,
uninterpreted_option: [Google.Protobuf.UninterpretedOption.t]
}
defstruct [:deprecated, :idempotency_level, :uninterpreted_option]
field :deprecated, 33, optional: true, type: :bool, default: false
field :idempotency_level, 34, optional: true, type: Google.Protobuf.MethodOptions.IdempotencyLevel, default: :IDEMPOTENCY_UNKNOWN, enum: true
field :uninterpreted_option, 999, repeated: true, type: Google.Protobuf.UninterpretedOption
end
defmodule Google.Protobuf.MethodOptions.IdempotencyLevel do
use Protobuf, enum: true, syntax: :proto2
field :IDEMPOTENCY_UNKNOWN, 0
field :NO_SIDE_EFFECTS, 1
field :IDEMPOTENT, 2
end
defmodule Google.Protobuf.UninterpretedOption do
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
name: [Google.Protobuf.UninterpretedOption.NamePart.t],
identifier_value: String.t,
positive_int_value: non_neg_integer,
negative_int_value: integer,
double_value: float,
string_value: String.t,
aggregate_value: String.t
}
defstruct [:name, :identifier_value, :positive_int_value, :negative_int_value, :double_value, :string_value, :aggregate_value]
field :name, 2, repeated: true, type: Google.Protobuf.UninterpretedOption.NamePart
field :identifier_value, 3, optional: true, type: :string
field :positive_int_value, 4, optional: true, type: :uint64
field :negative_int_value, 5, optional: true, type: :int64
field :double_value, 6, optional: true, type: :double
field :string_value, 7, optional: true, type: :bytes
field :aggregate_value, 8, optional: true, type: :string
end
defmodule Google.Protobuf.UninterpretedOption.NamePart do
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
name_part: String.t,
is_extension: boolean
}
defstruct [:name_part, :is_extension]
field :name_part, 1, required: true, type: :string
field :is_extension, 2, required: true, type: :bool
end
defmodule Google.Protobuf.SourceCodeInfo do
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
location: [Google.Protobuf.SourceCodeInfo.Location.t]
}
defstruct [:location]
field :location, 1, repeated: true, type: Google.Protobuf.SourceCodeInfo.Location
end
defmodule Google.Protobuf.SourceCodeInfo.Location do
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
path: [integer],
span: [integer],
leading_comments: String.t,
trailing_comments: String.t,
leading_detached_comments: [String.t]
}
defstruct [:path, :span, :leading_comments, :trailing_comments, :leading_detached_comments]
field :path, 1, repeated: true, type: :int32, packed: true
field :span, 2, repeated: true, type: :int32, packed: true
field :leading_comments, 3, optional: true, type: :string
field :trailing_comments, 4, optional: true, type: :string
field :leading_detached_comments, 6, repeated: true, type: :string
end
defmodule Google.Protobuf.GeneratedCodeInfo do
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
annotation: [Google.Protobuf.GeneratedCodeInfo.Annotation.t]
}
defstruct [:annotation]
field :annotation, 1, repeated: true, type: Google.Protobuf.GeneratedCodeInfo.Annotation
end
defmodule Google.Protobuf.GeneratedCodeInfo.Annotation do
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
path: [integer],
source_file: String.t,
begin: integer,
end: integer
}
defstruct [:path, :source_file, :begin, :end]
field :path, 1, repeated: true, type: :int32, packed: true
field :source_file, 2, optional: true, type: :string
field :begin, 3, optional: true, type: :int32
field :end, 4, optional: true, type: :int32
end
|
lib/protobuf/protoc/descriptor.pb.ex
| 0.611382
| 0.444505
|
descriptor.pb.ex
|
starcoder
|
defmodule Zaryn.TransactionChain.Transaction do
@moduledoc """
Represents the main unit of the Zaryn network and its Transaction Chain.
Blocks are reduce to its unitary form to provide high scalability, avoiding double spending attack and chain integrity
"""
alias Zaryn.Crypto
alias __MODULE__.CrossValidationStamp
alias __MODULE__.ValidationStamp
alias __MODULE__.ValidationStamp.LedgerOperations.TransactionMovement
alias Zaryn.TransactionChain.TransactionData
alias Zaryn.TransactionChain.TransactionData.Ledger
alias Zaryn.TransactionChain.TransactionData.NFTLedger
alias Zaryn.TransactionChain.TransactionData.ZARYNLedger
defstruct [
:address,
:type,
:data,
:previous_public_key,
:previous_signature,
:origin_signature,
:validation_stamp,
cross_validation_stamps: [],
version: 1
]
@typedoc """
Represent a transaction in pending validation
- Address: hash of the new generated public key for the given transaction
- Type: transaction type
- Data: transaction data zone (identity, keychain, smart contract, etc.)
- Previous signature: signature from the previous public key
- Previous public key: previous generated public key matching the previous signature
- Origin signature: signature from the device which originated the transaction (used in the Proof of work)
- Version: version of the transaction (used for backward compatiblity)
When the transaction is validated the following fields are filled:
- Validation stamp: coordinator work result
- Cross validation stamps: endorsements of the validation stamp from the coordinator
"""
@type t() :: %__MODULE__{
address: binary(),
type: transaction_type(),
data: TransactionData.t(),
previous_public_key: nil | Crypto.key(),
previous_signature: nil | binary(),
origin_signature: nil | binary(),
validation_stamp: nil | ValidationStamp.t(),
cross_validation_stamps: nil | list(CrossValidationStamp.t()),
version: pos_integer()
}
@typedoc """
Supported transaction types
"""
@type transaction_type ::
:node
| :node_shared_secrets
| :origin_shared_secrets
| :node_rewards
| :beacon
| :beacon_summary
| :oracle
| :oracle_summary
| :code_proposal
| :code_approval
| :keychain
| :keychain_access
| :transfer
| :nft
| :hosting
@transaction_types [
:node,
:node_shared_secrets,
:origin_shared_secrets,
:beacon,
:beacon_summary,
:oracle,
:oracle_summary,
:node_rewards,
:code_proposal,
:code_approval,
:keychain,
:keychain_access,
:transfer,
:hosting,
:nft
]
@doc """
List all the supported transaction types
"""
@spec types() :: list(transaction_type())
def types, do: @transaction_types
@doc """
Create a new pending transaction using the Crypto keystore to find out
the seed and the transaction index
The first node private key is used as origin private key
"""
@spec new(type :: transaction_type(), data :: TransactionData.t()) ::
t()
def new(type, data = %TransactionData{}) do
{previous_public_key, next_public_key} = get_transaction_public_keys(type)
%__MODULE__{
address: Crypto.hash(next_public_key),
type: type,
data: data,
previous_public_key: previous_public_key
}
|> previous_sign_transaction()
|> origin_sign_transaction()
end
@doc """
Create a new pending transaction
The first node private key is used as origin private key
"""
@spec new(
type :: transaction_type(),
data :: TransactionData.t(),
seed :: binary(),
index :: non_neg_integer()
) :: t()
def new(type, data = %TransactionData{}, seed, index)
when type in @transaction_types and is_binary(seed) and is_integer(index) and index >= 0 do
{previous_public_key, previous_private_key} = Crypto.derive_keypair(seed, index)
{next_public_key, _} = Crypto.derive_keypair(seed, index + 1)
%__MODULE__{
address: Crypto.hash(next_public_key),
type: type,
data: data,
previous_public_key: previous_public_key
}
|> previous_sign_transaction(previous_private_key)
|> origin_sign_transaction()
end
def new(
type,
data = %TransactionData{},
previous_private_key,
previous_public_key,
next_public_key
) do
%__MODULE__{
address: Crypto.hash(next_public_key),
type: type,
data: data,
previous_public_key: previous_public_key
}
|> previous_sign_transaction(previous_private_key)
|> origin_sign_transaction()
end
defp get_transaction_public_keys(:node_shared_secrets) do
key_index = Crypto.number_of_node_shared_secrets_keys()
previous_public_key = Crypto.node_shared_secrets_public_key(key_index)
next_public_key = Crypto.node_shared_secrets_public_key(key_index + 1)
{previous_public_key, next_public_key}
end
defp get_transaction_public_keys(:node_rewards) do
key_index = Crypto.number_of_network_pool_keys()
previous_public_key = Crypto.network_pool_public_key(key_index)
next_public_key = Crypto.network_pool_public_key(key_index + 1)
{previous_public_key, next_public_key}
end
defp get_transaction_public_keys(_) do
previous_public_key = Crypto.previous_node_public_key()
next_public_key = Crypto.next_node_public_key()
{previous_public_key, next_public_key}
end
defp previous_sign_transaction(tx = %__MODULE__{type: :node_shared_secrets}) do
key_index = Crypto.number_of_node_shared_secrets_keys()
previous_signature =
tx
|> extract_for_previous_signature()
|> serialize()
|> Crypto.sign_with_node_shared_secrets_key(key_index)
%{tx | previous_signature: previous_signature}
end
defp previous_sign_transaction(tx = %__MODULE__{type: :node_rewards}) do
key_index = Crypto.number_of_network_pool_keys()
previous_signature =
tx
|> extract_for_previous_signature()
|> serialize()
|> Crypto.sign_with_network_pool_key(key_index)
%{tx | previous_signature: previous_signature}
end
defp previous_sign_transaction(tx = %__MODULE__{}) do
previous_signature =
tx
|> extract_for_previous_signature()
|> serialize()
|> Crypto.sign_with_previous_node_key()
%{tx | previous_signature: previous_signature}
end
defp previous_sign_transaction(tx = %__MODULE__{}, private_key) do
previous_signature =
tx
|> extract_for_previous_signature()
|> serialize()
|> Crypto.sign(private_key)
%{tx | previous_signature: previous_signature}
end
defp origin_sign_transaction(tx) do
origin_sig =
tx
|> extract_for_origin_signature
|> serialize()
|> Crypto.sign_with_first_node_key()
%{tx | origin_signature: origin_sig}
end
@doc """
Extract the transaction payload for the previous signature including address, type and data
"""
@spec extract_for_previous_signature(t()) :: t()
def extract_for_previous_signature(tx = %__MODULE__{}) do
%__MODULE__{
address: tx.address,
type: tx.type,
data: tx.data
}
end
@doc """
Extract the transaction payload for the origin signature including address
type data, previous_public_key and previous_signature
"""
@spec extract_for_origin_signature(t()) :: t()
def extract_for_origin_signature(tx = %__MODULE__{}) do
%__MODULE__{
address: tx.address,
type: tx.type,
data: tx.data,
previous_public_key: tx.previous_public_key,
previous_signature: tx.previous_signature
}
end
@doc """
Serialize transaction type
"""
@spec serialize_type(transaction_type()) :: non_neg_integer()
# Network transaction's type
def serialize_type(:node), do: 0
def serialize_type(:node_shared_secrets), do: 1
def serialize_type(:origin_shared_secrets), do: 2
def serialize_type(:beacon), do: 3
def serialize_type(:beacon_summary), do: 4
def serialize_type(:oracle), do: 5
def serialize_type(:oracle_summary), do: 6
def serialize_type(:code_proposal), do: 7
def serialize_type(:code_approval), do: 8
def serialize_type(:node_rewards), do: 9
# User transaction's type
def serialize_type(:keychain), do: 255
def serialize_type(:keychain_access), do: 254
def serialize_type(:transfer), do: 253
def serialize_type(:hosting), do: 252
def serialize_type(:nft), do: 251
@doc """
Parse a serialize transaction type
"""
@spec parse_type(non_neg_integer()) :: transaction_type()
# Network transaction's type
def parse_type(0), do: :node
def parse_type(1), do: :node_shared_secrets
def parse_type(2), do: :origin_shared_secrets
def parse_type(3), do: :beacon
def parse_type(4), do: :beacon_summary
def parse_type(5), do: :oracle
def parse_type(6), do: :oracle_summary
def parse_type(7), do: :code_proposal
def parse_type(8), do: :code_approval
def parse_type(9), do: :node_rewards
# User transaction's type
def parse_type(255), do: :keychain
def parse_type(254), do: :keychain_access
def parse_type(253), do: :transfer
def parse_type(252), do: :hosting
def parse_type(251), do: :nft
@doc """
Determines if a transaction type is a network one
"""
@spec network_type?(transaction_type()) :: boolean()
def network_type?(:node), do: true
def network_type?(:node_shared_secrets), do: true
def network_type?(:origin_shared_secrets), do: true
def network_type?(:code_proposal), do: true
def network_type?(:code_approval), do: true
def network_type?(:oracle), do: true
def network_type?(:oracle_summary), do: true
def network_type?(:node_rewards), do: true
def network_type?(_), do: false
@doc """
Extract the pending transaction fields from a transaction
"""
@spec to_pending(t()) :: t()
def to_pending(tx = %__MODULE__{}) do
%{tx | validation_stamp: nil, cross_validation_stamps: nil}
end
@doc """
Get the transfers and transaction movements from a transaction
## Examples
iex> %Transaction{
...> data: %TransactionData{
...> ledger: %Ledger{
...> zaryn: %ZARYNLedger{
...> transfers: [
...> %ZARYNLedger.Transfer{to: "@Alice1", amount: 10}
...> ]
...> },
...> nft: %NFTLedger{
...> transfers: [
...> %NFTLedger.Transfer{to: "@Alice1", amount: 3, nft: "@BobNFT"}
...> ]
...> }
...> }
...> }
...> } |> Transaction.get_movements()
[
%TransactionMovement{
to: "@Alice1", amount: 10, type: :ZARYN,
},
%TransactionMovement{
to: "@Alice1", amount: 3, type: {:NFT, "@BobNFT"},
}
]
"""
@spec get_movements(t()) :: list(TransactionMovement.t())
def get_movements(%__MODULE__{
data: %TransactionData{
ledger: %Ledger{
zaryn: %ZARYNLedger{transfers: zaryn_transfers},
nft: %NFTLedger{transfers: nft_transfers}
}
}
}) do
Enum.map(zaryn_transfers, &%TransactionMovement{to: &1.to, amount: &1.amount, type: :ZARYN}) ++
Enum.map(
nft_transfers,
&%TransactionMovement{to: &1.to, amount: &1.amount, type: {:NFT, &1.nft}}
)
end
@doc """
Return the previous transaction address from the previous public key
"""
@spec previous_address(t()) :: binary()
def previous_address(%__MODULE__{previous_public_key: previous_public_key}),
do: Crypto.hash(previous_public_key)
@doc """
Determines if the atomic commitment has been reached
## Examples
iex> %Transaction{
...> cross_validation_stamps: [
...> %CrossValidationStamp{inconsistencies: []},
...> %CrossValidationStamp{inconsistencies: [:invalid_ledger_operations]},
...> %CrossValidationStamp{inconsistencies: [:invalid_proof_of_work]}
...> ]
...> } |> Transaction.atomic_commitment?
false
iex> %Transaction{
...> cross_validation_stamps: [
...> %CrossValidationStamp{inconsistencies: []},
...> %CrossValidationStamp{inconsistencies: []},
...> %CrossValidationStamp{inconsistencies: []}
...> ]
...> } |> Transaction.atomic_commitment?
true
"""
@spec atomic_commitment?(t()) :: boolean()
def atomic_commitment?(%__MODULE__{cross_validation_stamps: stamps}) when is_list(stamps) do
nb_distinct_inconsistencies =
stamps
|> Enum.dedup_by(& &1.inconsistencies)
|> length
nb_distinct_inconsistencies == 1
end
def atomic_commitment?(%__MODULE__{cross_validation_stamps: _}), do: false
@doc """
Verify if the public key match the origin signature of the transaction
"""
@spec verify_origin_signature?(t(), Crypto.key()) :: boolean()
def verify_origin_signature?(%__MODULE__{}, ""), do: false
def verify_origin_signature?(tx = %__MODULE__{origin_signature: origin_signature}, public_key)
when is_binary(public_key) do
raw_tx =
tx
|> extract_for_origin_signature()
|> serialize()
Crypto.verify?(origin_signature, raw_tx, public_key)
end
@doc """
Determines if the previous signature is valid
"""
@spec verify_previous_signature?(t()) :: boolean()
def verify_previous_signature?(
tx = %__MODULE__{previous_public_key: prev_key, previous_signature: prev_sig}
) do
raw_tx =
tx
|> extract_for_previous_signature()
|> serialize()
Crypto.verify?(prev_sig, raw_tx, prev_key)
end
@doc """
Serialize a transaction into binary format
## Examples
iex> %Transaction{
...> version: 1,
...> address: <<0, 62, 198, 74, 197, 246, 83, 6, 174, 95, 223, 107, 92, 12, 36, 93, 197, 197,
...> 196, 186, 34, 34, 134, 184, 95, 181, 113, 255, 93, 134, 197, 243, 85>>,
...> type: :transfer,
...> data: %TransactionData{},
...> previous_public_key: <<0, 61, 250, 128, 151, 100, 231, 128, 158, 139, 88, 128, 68, 236, 240, 238, 116,
...> 186, 164, 87, 3, 60, 198, 21, 248, 64, 207, 58, 221, 192, 131, 180, 213>>,
...> previous_signature: <<65, 66, 248, 246, 119, 69, 36, 103, 249, 201, 252, 154, 69, 24, 48, 18, 63,
...> 65, 5, 10, 248, 37, 245, 101, 19, 118, 235, 82, 161, 165, 62, 43, 249, 237,
...> 223, 226, 253, 241, 155, 33, 45, 164, 50, 14, 176, 241, 3, 107, 12, 177, 47,
...> 20, 235, 147, 252, 28, 136, 226, 176, 180, 170, 85, 3, 151>>,
...> origin_signature: <<10, 165, 166, 170, 175, 231, 164, 69, 83, 150, 36, 135, 144, 20, 104, 226,
...> 183, 149, 250, 90, 117, 107, 162, 17, 63, 118, 229, 125, 15, 189, 245, 64,
...> 214, 93, 126, 179, 251, 41, 101, 249, 226, 180, 88, 241, 184, 154, 181, 156,
...> 178, 213, 132, 220, 31, 63, 23, 165, 174, 82, 182, 120, 142, 87, 34, 132>>,
...> validation_stamp: %ValidationStamp{
...> timestamp: ~U[2020-06-26 06:37:04Z],
...> proof_of_work: <<0, 212, 52, 50, 200, 144, 139, 192, 177, 99, 145, 174, 178, 113, 229, 251, 170,
...> 186, 184, 109, 13, 200, 136, 34, 241, 99, 99, 210, 172, 143, 104, 160, 99>>,
...> proof_of_integrity: <<0, 199, 216, 73, 158, 82, 76, 158, 8, 215, 22, 186, 166, 45, 153, 17, 22, 251,
...> 133, 212, 35, 220, 155, 242, 198, 93, 133, 134, 244, 226, 122, 87, 17>>,
...> proof_of_election: <<84, 71, 95, 29, 105, 203, 16, 245, 173, 18, 126, 216, 43, 32, 143, 223, 71,
...> 184, 247, 123, 166, 185, 137, 6, 151, 77, 251, 163, 132, 132, 235, 136>>,
...> ledger_operations: %LedgerOperations{
...> fee: 0.1,
...> transaction_movements: [],
...> node_movements: [],
...> unspent_outputs: []
...> },
...> recipients: [],
...> errors: [],
...> signature: <<47, 48, 215, 147, 153, 120, 199, 102, 130, 0, 51, 138, 164, 146, 99, 2, 74,
...> 116, 89, 117, 185, 72, 109, 10, 198, 124, 44, 66, 126, 43, 85, 186, 105, 169,
...> 159, 56, 129, 179, 207, 176, 97, 190, 162, 240, 186, 164, 58, 41, 221, 27,
...> 234, 185, 105, 75, 81, 238, 158, 13, 150, 184, 31, 247, 79, 251>>
...> },
...> cross_validation_stamps: [
...> %CrossValidationStamp{
...> node_public_key: <<0, 253, 187, 69, 83, 77, 33, 173, 15, 226, 88, 230, 68, 235, 114, 146, 89, 221,
...> 115, 26, 63, 191, 152, 219, 245, 217, 29, 140, 42, 58, 104, 109, 108>>,
...> signature: <<171, 239, 218, 149, 194, 162, 14, 64, 73, 160, 142, 149, 100, 135, 215, 48,
...> 133, 74, 61, 203, 113, 16, 163, 135, 41, 8, 227, 82, 131, 248, 200, 50, 113,
...> 195, 241, 169, 208, 28, 2, 53, 49, 141, 89, 99, 12, 189, 115, 188, 75, 11, 42,
...> 149, 223, 53, 234, 144, 115, 42, 99, 54, 94, 120, 25, 193>>,
...> inconsistencies: []
...> }
...> ]
...> }
...> |> Transaction.serialize()
<<
# Version
0, 0, 0, 1,
# Address
0, 62, 198, 74, 197, 246, 83, 6, 174, 95, 223, 107, 92, 12, 36, 93, 197, 197,
196, 186, 34, 34, 134, 184, 95, 181, 113, 255, 93, 134, 197, 243, 85,
# Transaction type (transfer),
253,
# Code size
0, 0, 0, 0,
# Content size
0, 0, 0, 0,
# Secret size
0, 0, 0, 0,
# Nb authorized keys
0,
# Nb ZARYN transfers
0,
# Nb NFT transfers
0,
# Nb recipients
0,
# Previous public key
0, 61, 250, 128, 151, 100, 231, 128, 158, 139, 88, 128, 68, 236, 240, 238, 116,
186, 164, 87, 3, 60, 198, 21, 248, 64, 207, 58, 221, 192, 131, 180, 213,
# Previous signature size
64,
# Previous signature
65, 66, 248, 246, 119, 69, 36, 103, 249, 201, 252, 154, 69, 24, 48, 18, 63,
65, 5, 10, 248, 37, 245, 101, 19, 118, 235, 82, 161, 165, 62, 43, 249, 237,
223, 226, 253, 241, 155, 33, 45, 164, 50, 14, 176, 241, 3, 107, 12, 177, 47,
20, 235, 147, 252, 28, 136, 226, 176, 180, 170, 85, 3, 151,
# Origin signature size
64,
# Origin signature
10, 165, 166, 170, 175, 231, 164, 69, 83, 150, 36, 135, 144, 20, 104, 226,
183, 149, 250, 90, 117, 107, 162, 17, 63, 118, 229, 125, 15, 189, 245, 64,
214, 93, 126, 179, 251, 41, 101, 249, 226, 180, 88, 241, 184, 154, 181, 156,
178, 213, 132, 220, 31, 63, 23, 165, 174, 82, 182, 120, 142, 87, 34, 132,
# Validated transaction
1,
# Validation Stamp timestamp
94, 245, 151, 144,
# Proof of work
0, 212, 52, 50, 200, 144, 139, 192, 177, 99, 145, 174, 178, 113, 229, 251, 170,
186, 184, 109, 13, 200, 136, 34, 241, 99, 99, 210, 172, 143, 104, 160, 99,
# Proof of integrity
0, 199, 216, 73, 158, 82, 76, 158, 8, 215, 22, 186, 166, 45, 153, 17, 22, 251,
133, 212, 35, 220, 155, 242, 198, 93, 133, 134, 244, 226, 122, 87, 17,
# Proof of election
84, 71, 95, 29, 105, 203, 16, 245, 173, 18, 126, 216, 43, 32, 143, 223, 71,
184, 247, 123, 166, 185, 137, 6, 151, 77, 251, 163, 132, 132, 235, 136,
# Fee
63, 185, 153, 153, 153, 153, 153, 154,
# Nb transaction movements
0,
# Nb node movements
0,
# Nb unspent outputs,
0,
# Nb resolved recipients,
0,
# Nb errors
0,
# Signature size
64,
# Signature
47, 48, 215, 147, 153, 120, 199, 102, 130, 0, 51, 138, 164, 146, 99, 2, 74,
116, 89, 117, 185, 72, 109, 10, 198, 124, 44, 66, 126, 43, 85, 186, 105, 169,
159, 56, 129, 179, 207, 176, 97, 190, 162, 240, 186, 164, 58, 41, 221, 27,
234, 185, 105, 75, 81, 238, 158, 13, 150, 184, 31, 247, 79, 251,
# Nb cross validation stamps
1,
# Node public key
0, 253, 187, 69, 83, 77, 33, 173, 15, 226, 88, 230, 68, 235, 114, 146, 89, 221,
115, 26, 63, 191, 152, 219, 245, 217, 29, 140, 42, 58, 104, 109, 108,
# Signature size
64,
# Signature
171, 239, 218, 149, 194, 162, 14, 64, 73, 160, 142, 149, 100, 135, 215, 48,
133, 74, 61, 203, 113, 16, 163, 135, 41, 8, 227, 82, 131, 248, 200, 50, 113,
195, 241, 169, 208, 28, 2, 53, 49, 141, 89, 99, 12, 189, 115, 188, 75, 11, 42,
149, 223, 53, 234, 144, 115, 42, 99, 54, 94, 120, 25, 193,
# Nb inconsistencies
0
>>
"""
@spec serialize(t()) :: bitstring()
def serialize(%__MODULE__{
version: version,
address: address,
type: type,
data: data,
previous_public_key: nil,
previous_signature: nil,
origin_signature: nil,
validation_stamp: nil
}) do
<<version::32, address::binary, serialize_type(type)::8,
TransactionData.serialize(data)::binary>>
end
def serialize(%__MODULE__{
version: version,
address: address,
type: type,
data: data,
previous_public_key: previous_public_key,
previous_signature: previous_signature,
origin_signature: nil,
validation_stamp: nil
}) do
<<version::32, address::binary, serialize_type(type)::8,
TransactionData.serialize(data)::binary, previous_public_key::binary,
byte_size(previous_signature)::8, previous_signature::binary>>
end
def serialize(%__MODULE__{
version: version,
address: address,
type: type,
data: data,
previous_public_key: previous_public_key,
previous_signature: previous_signature,
origin_signature: origin_signature,
validation_stamp: nil
}) do
<<version::32, address::binary, serialize_type(type)::8,
TransactionData.serialize(data)::binary, previous_public_key::binary,
byte_size(previous_signature)::8, previous_signature::binary,
byte_size(origin_signature)::8, origin_signature::binary, 0::8>>
end
def serialize(%__MODULE__{
version: version,
address: address,
type: type,
data: data,
previous_public_key: previous_public_key,
previous_signature: previous_signature,
origin_signature: origin_signature,
validation_stamp: validation_stamp,
cross_validation_stamps: cross_validation_stamps
}) do
cross_validation_stamps_bin =
cross_validation_stamps
|> Enum.map(&CrossValidationStamp.serialize/1)
|> :erlang.list_to_binary()
<<version::32, address::binary, serialize_type(type)::8,
TransactionData.serialize(data)::binary, previous_public_key::binary,
byte_size(previous_signature)::8, previous_signature::binary,
byte_size(origin_signature)::8, origin_signature::binary, 1::8,
ValidationStamp.serialize(validation_stamp)::bitstring, length(cross_validation_stamps)::8,
cross_validation_stamps_bin::binary>>
end
@doc """
Deserialize an encoded transaction
## Examples
iex> <<0, 0, 0, 1, 0, 62, 198, 74, 197, 246, 83, 6, 174, 95, 223, 107, 92, 12, 36, 93, 197, 197,
...> 196, 186, 34, 34, 134, 184, 95, 181, 113, 255, 93, 134, 197, 243, 85, 253,
...> 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 61, 250, 128, 151, 100, 231, 128, 158, 139,
...> 88, 128, 68, 236, 240, 238, 116, 186, 164, 87, 3, 60, 198, 21, 248, 64, 207, 58, 221, 192,
...> 131, 180, 213, 64, 65, 66, 248, 246, 119, 69, 36, 103, 249, 201, 252, 154, 69, 24, 48, 18, 63,
...> 65, 5, 10, 248, 37, 245, 101, 19, 118, 235, 82, 161, 165, 62, 43, 249, 237,
...> 223, 226, 253, 241, 155, 33, 45, 164, 50, 14, 176, 241, 3, 107, 12, 177, 47,
...> 20, 235, 147, 252, 28, 136, 226, 176, 180, 170, 85, 3, 151, 64, 10, 165, 166, 170,
...> 175, 231, 164, 69, 83, 150, 36, 135, 144, 20, 104, 226,
...> 183, 149, 250, 90, 117, 107, 162, 17, 63, 118, 229, 125, 15, 189, 245, 64,
...> 214, 93, 126, 179, 251, 41, 101, 249, 226, 180, 88, 241, 184, 154, 181, 156,
...> 178, 213, 132, 220, 31, 63, 23, 165, 174, 82, 182, 120, 142, 87, 34, 132,
...> 1, 94, 245, 151, 144, 0, 0, 212, 52, 50, 200, 144, 139, 192, 177, 99, 145, 174, 178, 113, 229, 251, 170,
...> 186, 184, 109, 13, 200, 136, 34, 241, 99, 99, 210, 172, 143, 104, 160, 99,
...> 0, 199, 216, 73, 158, 82, 76, 158, 8, 215, 22, 186, 166, 45, 153, 17, 22, 251,
...> 133, 212, 35, 220, 155, 242, 198, 93, 133, 134, 244, 226, 122, 87, 17,
...> 74, 224, 26, 42, 253, 85, 104, 246, 72, 244, 189, 182, 165, 94, 92, 20, 166,
...> 149, 124, 246, 219, 170, 160, 168, 206, 214, 236, 215, 211, 121, 95, 149, 132,
...> 136, 114, 244, 132, 44, 255, 222, 98, 76, 247, 125, 45, 170, 95, 51, 46, 229,
...> 21, 32, 226, 99, 16, 5, 107, 207, 32, 240, 23, 85, 219, 247,
...> 63, 185, 153, 153, 153, 153, 153, 154, 0, 0, 0, 0, 0, 64, 47, 48, 215, 147, 153, 120, 199,
...> 102, 130, 0, 51, 138, 164, 146, 99, 2, 74, 116, 89, 117, 185, 72, 109, 10, 198, 124,
...> 44, 66, 126, 43, 85, 186, 105, 169, 159, 56, 129, 179, 207, 176, 97, 190, 162, 240,
...> 186, 164, 58, 41, 221, 27, 234, 185, 105, 75, 81, 238, 158, 13, 150, 184, 31, 247, 79, 251,
...> 1, 0, 0, 253, 187, 69, 83, 77, 33, 173, 15, 226, 88, 230, 68, 235, 114, 146, 89, 221,
...> 115, 26, 63, 191, 152, 219, 245, 217, 29, 140, 42, 58, 104, 109, 108,
...> 64, 171, 239, 218, 149, 194, 162, 14, 64, 73, 160, 142, 149, 100, 135, 215, 48,
...> 133, 74, 61, 203, 113, 16, 163, 135, 41, 8, 227, 82, 131, 248, 200, 50, 113,
...> 195, 241, 169, 208, 28, 2, 53, 49, 141, 89, 99, 12, 189, 115, 188, 75, 11, 42,
...> 149, 223, 53, 234, 144, 115, 42, 99, 54, 94, 120, 25, 193, 0>>
...> |> Transaction.deserialize()
{
%Transaction{
version: 1,
address: <<0, 62, 198, 74, 197, 246, 83, 6, 174, 95, 223, 107, 92, 12, 36, 93, 197, 197,
196, 186, 34, 34, 134, 184, 95, 181, 113, 255, 93, 134, 197, 243, 85>>,
type: :transfer,
data: %TransactionData{},
previous_public_key: <<0, 0, 61, 250, 128, 151, 100, 231, 128, 158, 139, 88, 128, 68, 236, 240, 238, 116,
186, 164, 87, 3, 60, 198, 21, 248, 64, 207, 58, 221, 192, 131, 180, 213>>,
previous_signature: <<65, 66, 248, 246, 119, 69, 36, 103, 249, 201, 252, 154, 69, 24, 48, 18, 63,
65, 5, 10, 248, 37, 245, 101, 19, 118, 235, 82, 161, 165, 62, 43, 249, 237,
223, 226, 253, 241, 155, 33, 45, 164, 50, 14, 176, 241, 3, 107, 12, 177, 47,
20, 235, 147, 252, 28, 136, 226, 176, 180, 170, 85, 3, 151>>,
origin_signature: <<10, 165, 166, 170, 175, 231, 164, 69, 83, 150, 36, 135, 144, 20, 104, 226,
183, 149, 250, 90, 117, 107, 162, 17, 63, 118, 229, 125, 15, 189, 245, 64,
214, 93, 126, 179, 251, 41, 101, 249, 226, 180, 88, 241, 184, 154, 181, 156,
178, 213, 132, 220, 31, 63, 23, 165, 174, 82, 182, 120, 142, 87, 34, 132>>,
validation_stamp: %ValidationStamp{
timestamp: ~U[2020-06-26 06:37:04Z],
proof_of_work: <<0, 0, 212, 52, 50, 200, 144, 139, 192, 177, 99, 145, 174, 178, 113, 229, 251, 170,
186, 184, 109, 13, 200, 136, 34, 241, 99, 99, 210, 172, 143, 104, 160, 99>>,
proof_of_integrity: <<0, 199, 216, 73, 158, 82, 76, 158, 8, 215, 22, 186, 166, 45, 153, 17, 22, 251,
133, 212, 35, 220, 155, 242, 198, 93, 133, 134, 244, 226, 122, 87, 17>>,
proof_of_election: <<74, 224, 26, 42, 253, 85, 104, 246, 72, 244, 189, 182, 165, 94, 92, 20, 166,
149, 124, 246, 219, 170, 160, 168, 206, 214, 236, 215, 211, 121, 95, 149, 132,
136, 114, 244, 132, 44, 255, 222, 98, 76, 247, 125, 45, 170, 95, 51, 46, 229,
21, 32, 226, 99, 16, 5, 107, 207, 32, 240, 23, 85, 219, 247>>,
ledger_operations: %LedgerOperations{
fee: 0.1,
transaction_movements: [],
node_movements: [],
unspent_outputs: []
},
errors: [],
recipients: [],
signature: <<47, 48, 215, 147, 153, 120, 199, 102, 130, 0, 51, 138, 164, 146, 99, 2, 74,
116, 89, 117, 185, 72, 109, 10, 198, 124, 44, 66, 126, 43, 85, 186, 105, 169,
159, 56, 129, 179, 207, 176, 97, 190, 162, 240, 186, 164, 58, 41, 221, 27,
234, 185, 105, 75, 81, 238, 158, 13, 150, 184, 31, 247, 79, 251>>
},
cross_validation_stamps: [
%CrossValidationStamp{
node_public_key: <<0, 0, 253, 187, 69, 83, 77, 33, 173, 15, 226, 88, 230, 68, 235, 114, 146, 89, 221,
115, 26, 63, 191, 152, 219, 245, 217, 29, 140, 42, 58, 104, 109, 108>>,
signature: <<171, 239, 218, 149, 194, 162, 14, 64, 73, 160, 142, 149, 100, 135, 215, 48,
133, 74, 61, 203, 113, 16, 163, 135, 41, 8, 227, 82, 131, 248, 200, 50, 113,
195, 241, 169, 208, 28, 2, 53, 49, 141, 89, 99, 12, 189, 115, 188, 75, 11, 42,
149, 223, 53, 234, 144, 115, 42, 99, 54, 94, 120, 25, 193>>,
inconsistencies: []
}
]
},
""
}
"""
@spec deserialize(bitstring()) :: {transaction :: t(), rest :: bitstring}
def deserialize(_serialized_term = <<version::32, hash_algo::8, rest::bitstring>>) do
address_size = Crypto.hash_size(hash_algo)
<<address::binary-size(address_size), type::8, rest::bitstring>> = rest
{data, rest} = TransactionData.deserialize(rest)
<<curve_id::8, origin_id::8, rest::bitstring>> = rest
key_size = Crypto.key_size(curve_id)
<<previous_public_key::binary-size(key_size), previous_signature_size::8,
previous_signature::binary-size(previous_signature_size), origin_signature_size::8,
origin_signature::binary-size(origin_signature_size), validated::8, rest::bitstring>> = rest
tx = %__MODULE__{
version: version,
address: <<hash_algo::8, address::binary>>,
type: parse_type(type),
data: data,
previous_public_key: <<curve_id::8, origin_id::8, previous_public_key::binary>>,
previous_signature: previous_signature,
origin_signature: origin_signature
}
case validated do
0 ->
{tx, rest}
1 ->
{validation_stamp, rest} = ValidationStamp.deserialize(rest)
<<nb_cross_validations_stamps::8, rest::bitstring>> = rest
{cross_validation_stamps, rest} =
reduce_cross_validation_stamps(rest, nb_cross_validations_stamps, [])
{
%{
tx
| validation_stamp: validation_stamp,
cross_validation_stamps: cross_validation_stamps
},
rest
}
end
end
defp reduce_cross_validation_stamps(rest, 0, _), do: {[], rest}
defp reduce_cross_validation_stamps(rest, nb_stamps, acc) when length(acc) == nb_stamps do
{Enum.reverse(acc), rest}
end
defp reduce_cross_validation_stamps(rest, nb_stamps, acc) do
{stamp, rest} = CrossValidationStamp.deserialize(rest)
reduce_cross_validation_stamps(rest, nb_stamps, [stamp | acc])
end
@spec to_map(t()) :: map()
def to_map(tx = %__MODULE__{}) do
%{
version: tx.version,
address: tx.address,
type: Atom.to_string(tx.type),
data: TransactionData.to_map(tx.data),
previous_public_key: tx.previous_public_key,
previous_signature: tx.previous_signature,
origin_signature: tx.origin_signature,
validation_stamp: ValidationStamp.to_map(tx.validation_stamp),
cross_validation_stamps:
Enum.map(tx.cross_validation_stamps || [], &CrossValidationStamp.to_map/1)
}
end
@spec from_map(map()) :: t()
def from_map(tx = %{}) do
type =
case Map.get(tx, :type) do
nil ->
nil
type ->
String.to_atom(type)
end
%__MODULE__{
version: Map.get(tx, :version),
address: Map.get(tx, :address),
type: type,
data: Map.get(tx, :data, %TransactionData{}) |> TransactionData.from_map(),
previous_public_key: Map.get(tx, :previous_public_key),
previous_signature: Map.get(tx, :previous_signature),
origin_signature: Map.get(tx, :origin_signature),
validation_stamp: Map.get(tx, :validation_stamp) |> ValidationStamp.from_map(),
cross_validation_stamps:
(Map.get(tx, :cross_validation_stamps) || [])
|> Enum.map(&CrossValidationStamp.from_map/1)
}
end
end
|
lib/zaryn/transaction_chain/transaction.ex
| 0.909055
| 0.467696
|
transaction.ex
|
starcoder
|
defmodule TimeZoneInfo.Downloader do
@moduledoc """
The behaviour for downloaders.
"""
@typedoc """
The `mode` to download the data.
Possible modes:
- `:iana` downloads the data as a zipped tar archive in IANA format.
- `:etf` downloads the data as a compressed file in the `TimeZoneInfo`
[External Term Format](http://erlang.org/doc/apps/erts/erl_ext_dist.html).
- `:ws` downloads the data from a web service. In this mode, the
configuration is sent to the server. The returned data is transformed
according to the config on the server and comes in the same format as in the
mode `:etf`.
"""
@type mode :: :iana | :etf | :ws
@typedoc "HTTP headers."
@type headers() :: [{header_name :: String.t(), header_value :: String.t()}]
@type opts :: [
headers: headers(),
mode: mode()
]
@typedoc "HTTP status code"
@type status_code :: non_neg_integer()
@type download :: {:ok, mode(), {status_code(), binary()}}
@mode [:iana, :etf, :ws]
@callback download(uri :: URI.t(), opts :: opts) :: download() | {:error, term()}
@doc false
def download(config) do
with {:ok, mode} <- mode(),
{:ok, uri} <- uri(mode, config),
{:ok, opts} <- opts(config),
{:ok, data} <- impl().download(uri, opts),
do: {:ok, mode, data}
end
@spec impl :: module()
defp impl do
:time_zone_info
|> Application.get_env(:downloader)
|> Keyword.fetch!(:module)
end
@spec mode :: {:ok, mode()} | {:error, term()}
defp mode do
with {:ok, value} <- fetch_env(:mode) do
case value in @mode do
true -> {:ok, value}
false -> {:error, {:invalid_config, [downloader: [mode: value]]}}
end
end
end
@spec uri :: {:ok, URI.t()} | {:error, term()}
defp uri do
with {:ok, uri} <- fetch_env(:uri) do
{:ok, URI.parse(uri)}
end
end
@spec uri(mode(), keyword()) :: {:ok, URI.t()} | {:error, term()}
defp uri(:ws, config) do
with {:ok, uri} <- uri() do
query = config |> prepare_query() |> URI.encode_query()
{:ok, %URI{uri | query: query}}
end
end
defp uri(_, _), do: uri()
defp prepare_query(config) do
config
|> prepare_query(:time_zones, :"time_zones[]")
|> prepare_query(:files, :"files[]")
end
defp prepare_query(config, key, query_key) do
case Keyword.pop(config, key) do
{[_ | _] = values, config} ->
Enum.into(values, config, fn value -> {query_key, value} end)
{_, config} ->
config
end
end
@spec opts(keyword()) :: {:ok, keyword()} | {:error, term()}
defp opts(config) do
with {:ok, opts} <- fetch_env() do
opts =
opts
|> Keyword.delete(:module)
|> Keyword.delete(:uri)
|> add_header("if-none-match", config[:checksum])
{:ok, opts}
end
end
defp add_header(opts, _key, nil), do: opts
defp add_header(opts, key, value) do
case opts[:headers] do
nil ->
Keyword.put(opts, :headers, [{key, value}])
headers ->
Keyword.put(opts, :headers, [{key, value} | headers])
end
end
@spec fetch_env :: {:ok, keyword()} | {:error, term()}
defp fetch_env do
with :error <- Application.fetch_env(:time_zone_info, :downloader) do
{:error, {:invalid_config, :downloader}}
end
end
@spec fetch_env(Keyword.key()) :: {:ok, term()} | {:error, term()}
defp fetch_env(key) do
with {:ok, env} <- fetch_env(),
:error <- Keyword.fetch(env, key) do
{:error, {:invalid_config, [:downloader, key]}}
end
end
end
|
lib/time_zone_info/downloader.ex
| 0.890735
| 0.602383
|
downloader.ex
|
starcoder
|
defmodule ShEx.NodeConstraint.NumericFacets do
@moduledoc false
defstruct ~w[mininclusive minexclusive maxinclusive maxexclusive totaldigits fractiondigits]a
alias RDF.{Literal, XSD}
def new(xs_facets) do
xs_facets_with_literals =
Map.new(xs_facets, fn
{key, value} when key in ~w[mininclusive minexclusive maxinclusive maxexclusive]a ->
{key, value |> XSD.Decimal.new() |> Literal.canonical()}
{key, value} ->
{key, value}
end)
numeric_facets = struct(__MODULE__, xs_facets_with_literals)
if %__MODULE__{} != numeric_facets do
numeric_facets
end
end
# TODO: instead of checking on every application to a node which constraints are there and must be applied, this could be compiled into minimal constraint checker
def satisfies(nil, _), do: :ok
def satisfies(numeric_facets, %Literal{} = node) do
with true <- XSD.Numeric.datatype?(node) and Literal.valid?(node),
true <- satisfies_numeric_mininclusive(numeric_facets.mininclusive, node),
true <- satisfies_numeric_minexclusive(numeric_facets.minexclusive, node),
true <- satisfies_numeric_maxinclusive(numeric_facets.maxinclusive, node),
true <- satisfies_numeric_maxexclusive(numeric_facets.maxexclusive, node),
true <- satisfies_numeric_totaldigits(numeric_facets.totaldigits, node),
true <- satisfies_numeric_fractiondigits(numeric_facets.fractiondigits, node) do
:ok
else
false ->
%ShEx.Violation.NumericFacetConstraint{
facet_type: :invalid_numeric,
node: node
}
{:violates, type, value} ->
%ShEx.Violation.NumericFacetConstraint{
facet_type: type,
facet_value: value,
node: node
}
end
end
def satisfies(_, node) do
%ShEx.Violation.NumericFacetConstraint{
facet_type: :invalid_numeric,
node: node
}
end
defp satisfies_numeric_mininclusive(nil, _), do: true
defp satisfies_numeric_mininclusive(mininclusive, literal) do
RDF.Literal.compare(literal, mininclusive) in [:gt, :eq] ||
{:violates, :mininclusive, mininclusive}
end
defp satisfies_numeric_minexclusive(nil, _), do: true
defp satisfies_numeric_minexclusive(minexclusive, literal) do
RDF.Literal.compare(literal, minexclusive) == :gt ||
{:violates, :minexclusive, minexclusive}
end
defp satisfies_numeric_maxinclusive(nil, _), do: true
defp satisfies_numeric_maxinclusive(maxinclusive, literal) do
RDF.Literal.compare(literal, maxinclusive) in [:lt, :eq] ||
{:violates, :maxinclusive, maxinclusive}
end
defp satisfies_numeric_maxexclusive(nil, _), do: true
defp satisfies_numeric_maxexclusive(maxexclusive, literal) do
RDF.Literal.compare(literal, maxexclusive) == :lt ||
{:violates, :maxexclusive, maxexclusive}
end
defp satisfies_numeric_totaldigits(nil, _), do: true
defp satisfies_numeric_totaldigits(totaldigits, literal) do
(decimal?(literal) && XSD.Decimal.digit_count(literal) <= totaldigits) ||
{:violates, :totaldigits, totaldigits}
end
defp satisfies_numeric_fractiondigits(nil, _), do: true
defp satisfies_numeric_fractiondigits(fractiondigits, literal) do
(decimal?(literal) && XSD.Decimal.fraction_digit_count(literal) <= fractiondigits) ||
{:violates, :fractiondigits, fractiondigits}
end
defp decimal?(%Literal{} = literal) do
# We also have to check for XSD.Integer since RDF.ex implements it as a primitive
XSD.Integer.datatype?(literal) or XSD.Decimal.datatype?(literal)
end
end
|
lib/shex/shape_expressions/node_constraint/numeric_facets.ex
| 0.5769
| 0.620521
|
numeric_facets.ex
|
starcoder
|
defmodule LivePhone.Countries do
@moduledoc """
The `LivePhone.Countries` module can be used to list all known countries and return
them ordered alphabetically, and optionally you can ensure your preferred countries
are prepended to the list instead of mixed in with the rest.
"""
alias ISO
alias LivePhone.Country
@type phone() :: %ExPhoneNumber.Model.PhoneNumber{}
@doc """
This function returns all known countries as `LivePhone.Country` structs,
sorted alphabetically by country name.
Optionally you can specify a list of preferred country codes, these will
subsequently be prepended to the list.
```elixir
# This will return everything alphabetically
abc_countries = LivePhone.Countries.list_countries()
# This will return it alphabetically as well, but push
# the US and GB `LivePhone.Country` structs to the top
# of the list.
my_countries = LivePhone.Countries.list_countries(["US", "GB"])
```
"""
@spec list_countries(list(String.t())) :: [Country.t()]
def list_countries(preferred \\ []) when is_list(preferred) do
preferred = preferred |> Enum.uniq() |> Enum.with_index()
ISO.countries()
|> Enum.map(&Country.from_iso/1)
|> Enum.map(&set_preferred_flag(&1, preferred))
|> Enum.filter(&(&1.region_code && &1.region_code != ""))
|> Enum.sort(&sort_by_name/2)
|> Enum.sort_by(&sort_by_preferred(&1, preferred), :desc)
end
@doc """
This function will lookup a `Country` by it's country code.
```elixir
Examples:
iex> LivePhone.Countries.get_country("US")
{:ok, %LivePhone.Country{code: "US", flag_emoji: "🇺🇸", name: "United States of America (the)", preferred: false, region_code: "1"}}
iex> LivePhone.Countries.get_country("FAKE")
{:error, :not_found}
```
"""
@spec get_country(String.t()) :: {:ok, Country.t()} | {:error, :not_found}
def get_country(country_code) do
list_countries()
|> Enum.find(&(&1.code == country_code))
|> case do
nil -> {:error, :not_found}
country -> {:ok, country}
end
end
@doc """
This function can be used to try and find the `Country` for a specific
phone number in the `ExPhoneNumber` format.
"""
@spec lookup(phone()) :: {:ok, Country.t()} | {:error, :not_found}
def lookup(%ExPhoneNumber.Model.PhoneNumber{} = phone) do
country_code = ExPhoneNumber.Metadata.get_region_code_for_number(phone)
list_countries()
|> Enum.find(&(&1.code == country_code))
|> case do
nil -> {:error, :not_found}
country -> {:ok, country}
end
end
@spec set_preferred_flag(Country.t(), list(String.t())) :: Country.t()
defp set_preferred_flag(%Country{} = country, preferred) do
preferred
|> Enum.find(fn {value, _index} -> value == country.code end)
|> case do
nil -> country
{_, _index} -> %{country | preferred: true}
end
end
@spec sort_by_name(%{name: String.t()}, %{name: String.t()}) :: boolean()
defp sort_by_name(%{name: name_1}, %{name: name_2}) do
name_1 < name_2
end
@spec sort_by_preferred(Country.t(), list(String.t())) :: integer()
defp sort_by_preferred(%Country{preferred: false}, _), do: 0
defp sort_by_preferred(%Country{code: country_code}, preferred) do
preferred
|> Enum.find(fn {value, _index} -> value == country_code end)
|> case do
nil -> 0
{_, index} -> length(preferred) - index
end
end
end
|
lib/live_phone/countries.ex
| 0.827306
| 0.823719
|
countries.ex
|
starcoder
|
defmodule AoC.Day12 do
@moduledoc false
def part_1 do
initial_states =
"data/day12-input.txt"
|> File.stream!()
|> Enum.map(&String.trim/1)
|> parse_input_data()
|> Enum.map(&initialize_moon/1)
1..1000
|> Enum.reduce(initial_states, fn _, states -> step(states) end)
|> total_energy()
end
def part_2 do
"data/day12-input.txt"
|> File.stream!()
|> Enum.map(&String.trim/1)
|> parse_input_data()
|> Enum.map(&initialize_moon/1)
|> cycle_length()
end
def apply_gravity_vector(state1, state2),
do: Enum.reduce([:x, :y, :z], state1, &apply_gravity_axis(&2, state2, &1))
def apply_gravity(vec, others), do: Enum.reduce(others, vec, &apply_gravity_vector(&2, &1))
def apply_velocity({{p_x, p_y, p_z}, {v_x, v_y, v_z} = v}),
do: {{p_x + v_x, p_y + v_y, p_z + v_z}, v}
def cycle_length(initial_states) do
new_states = AoC.Day12.step(initial_states)
cycle_length_per_axis(new_states, :x, 1)
|> Math.lcm(cycle_length_per_axis(new_states, :y, 1))
|> Math.lcm(cycle_length_per_axis(new_states, :z, 1))
|> Kernel.*(2)
end
def energy({p, v}), do: energy_vector(p) * energy_vector(v)
def initialize_moon(position), do: {position, {0, 0, 0}}
def parse_input_data(lines), do: Enum.map(lines, &parse_line/1)
def step(states) do
states
|> Enum.map(fn state ->
others = List.delete(states, state)
state
|> apply_gravity(others)
|> apply_velocity
end)
end
def total_energy(states) do
states
|> Enum.map(&energy/1)
|> Enum.sum()
end
def until_zero_energy(_, 0, counter), do: counter * 2
def until_zero_energy(states, _, counter) do
new_states = AoC.Day12.step(states)
new_energy = AoC.Day12.total_energy(new_states)
until_zero_energy(new_states, new_energy, counter + 1)
end
defp apply_gravity_axis({{p1_x, _, _} = p1, {v1_x, v1_y, v1_z}}, {{p2_x, _, _}, _}, :x)
when p1_x < p2_x,
do: {p1, {v1_x + 1, v1_y, v1_z}}
defp apply_gravity_axis({{p1_x, _, _} = p1, {v1_x, v1_y, v1_z}}, {{p2_x, _, _}, _}, :x)
when p1_x > p2_x,
do: {p1, {v1_x - 1, v1_y, v1_z}}
defp apply_gravity_axis({{_, p1_y, _} = p1, {v1_x, v1_y, v1_z}}, {{_, p2_y, _}, _}, :y)
when p1_y < p2_y,
do: {p1, {v1_x, v1_y + 1, v1_z}}
defp apply_gravity_axis({{_, p1_y, _} = p1, {v1_x, v1_y, v1_z}}, {{_, p2_y, _}, _}, :y)
when p1_y > p2_y,
do: {p1, {v1_x, v1_y - 1, v1_z}}
defp apply_gravity_axis({{_, _, p1_z} = p1, {v1_x, v1_y, v1_z}}, {{_, _, p2_z}, _}, :z)
when p1_z < p2_z,
do: {p1, {v1_x, v1_y, v1_z + 1}}
defp apply_gravity_axis({{_, _, p1_z} = p1, {v1_x, v1_y, v1_z}}, {{_, _, p2_z}, _}, :z)
when p1_z > p2_z,
do: {p1, {v1_x, v1_y, v1_z - 1}}
defp apply_gravity_axis({p1, v1}, _, _), do: {p1, v1}
defp cycle_length_per_axis(states, :x, counter) do
all_zeros =
states
|> Enum.map(fn {_, {velocity, _, _}} -> velocity end)
|> Enum.all?(fn value -> value == 0 end)
if all_zeros do
counter
else
new_states = AoC.Day12.step(states)
cycle_length_per_axis(new_states, :x, counter + 1)
end
end
defp cycle_length_per_axis(states, :y, counter) do
all_zeros =
states
|> Enum.map(fn {_, {_, velocity, _}} -> velocity end)
|> Enum.all?(fn value -> value == 0 end)
if all_zeros do
counter
else
new_states = AoC.Day12.step(states)
cycle_length_per_axis(new_states, :y, counter + 1)
end
end
defp cycle_length_per_axis(states, :z, counter) do
all_zeros =
states
|> Enum.map(fn {_, {_, _, velocity}} -> velocity end)
|> Enum.all?(fn value -> value == 0 end)
if all_zeros do
counter
else
new_states = AoC.Day12.step(states)
cycle_length_per_axis(new_states, :z, counter + 1)
end
end
defp energy_vector(v), do: Tuple.to_list(v) |> Enum.map(&abs/1) |> Enum.sum()
defp parse_line(line) do
# <x=-1, y=0, z=2>
line
|> String.trim_leading("<")
|> String.trim_trailing(">")
|> String.split(~r(\s*,\s*))
# ["x=-1", "y=0", "z=2"]
|> Enum.map(&String.replace(&1, ~r/^[^=]+=/, ""))
|> Enum.map(&String.to_integer/1)
|> List.to_tuple()
end
end
|
lib/aoc/day_12.ex
| 0.559892
| 0.423518
|
day_12.ex
|
starcoder
|
defmodule PageObject.Collections.Collection do
@moduledoc """
Collections are used to scope a CSS query to multiple page elements that follow the same html structure.
This allows you to interact with each element in a consistent and expressive manner in your tests.
Collections currently support all the actions and queries available in PageObject, allowing you to query specific
attributes of collection items or interact with actionable parts of any collection item.
"""
@doc """
defines a collection scope based on `:item_scope` passed in to opts. A camelized module is generated from the `collection_name`. the `:item_scope`
is used to generate a css selector scope that queries and actions defined within the block can use.
The module generates 2 functions for querying the items in the collection:
`all` which returns all items in the collection and `get(index)` which returns the item at passed `index`
## Example
```
defmodule MyPage do
# make the collection macro available via PageObject
use PageObject
collection :menu_items, item_scope: ".menu .item" do
# all DOM related query and action macros called here will have their selectors scoped to ".menu .item"
# and will be available as methods on the generated collection module: `MyPage.MenuItems`
clickable :click, "a"
end
end
# test usage
test "I can logout by clicking the last menu item" do
# get the last menu item
MyPage.MenuItems.all
|> List.last
# most scoped and queries with scope take a Hound Element as the first argument
|> MyPage.MenuItems.click
end
```
"""
defmacro collection(collection_name, opts, do: block) do
generate_module(collection_name, opts, block)
end
@doc """
The same as `collection/3` but does not require a block passed in
"""
defmacro collection(collection_name, opts) do
generate_module(collection_name, opts)
end
defp generate_module(collection_name, opts, block \\ nil) do
quote do
module = Module.concat([__MODULE__, Inflex.camelize(unquote(to_string(collection_name)))])
defmodule module do
if ! Module.get_attribute(__MODULE__, :scope) do
Module.register_attribute(module, :scope, accumulate: false)
end
Module.put_attribute(module, :scope, unquote(opts[:item_scope]) <> " ")
use PageObject
unquote(block)
def get(index) do
Enum.fetch!(find_all_elements(:css, unquote(opts[:item_scope])), index)
end
def all() do
find_all_elements(:css, unquote(opts[:item_scope]))
end
Module.put_attribute(module, :scope, "")
end
end
end
end
|
lib/collections/collection.ex
| 0.835181
| 0.852752
|
collection.ex
|
starcoder
|
defmodule OMG.Utxo.Position do
@moduledoc """
Representation of a UTXO position in the child chain, providing encoding/decoding to/from formats digestible in `Eth`
and in the `OMG.DB`
"""
# these two offset constants are driven by the constants from the RootChain.sol contract
@block_offset 1_000_000_000
@transaction_offset 10_000
alias OMG.Utxo
require Utxo
import Utxo, only: [is_position: 3]
@type t() :: {
:utxo_position,
# blknum
non_neg_integer,
# txindex
non_neg_integer,
# oindex
non_neg_integer
}
@type db_t() :: {non_neg_integer, non_neg_integer, non_neg_integer}
@spec encode(t()) :: non_neg_integer()
def encode(Utxo.position(blknum, txindex, oindex)) when is_position(blknum, txindex, oindex),
do: blknum * @block_offset + txindex * @transaction_offset + oindex
@spec decode!(number()) :: t()
def decode!(encoded) do
{:ok, decoded} = decode(encoded)
decoded
end
@spec decode(number()) :: {:ok, t()} | {:error, :encoded_utxo_position_too_low}
def decode(encoded) when is_integer(encoded) and encoded >= @block_offset do
{blknum, txindex, oindex} = get_position(encoded)
{:ok, Utxo.position(blknum, txindex, oindex)}
end
def decode(encoded) when is_number(encoded), do: {:error, :encoded_utxo_position_too_low}
@spec non_zero?(t()) :: boolean()
def non_zero?(Utxo.position(0, 0, 0)), do: false
def non_zero?(Utxo.position(blknum, txindex, oindex)) when is_position(blknum, txindex, oindex), do: true
@spec to_db_key(t()) :: db_t()
def to_db_key(Utxo.position(blknum, txindex, oindex)) when is_position(blknum, txindex, oindex),
do: {blknum, txindex, oindex}
@spec from_db_key(db_t()) :: t()
def from_db_key({blknum, txindex, oindex}) when is_position(blknum, txindex, oindex),
do: Utxo.position(blknum, txindex, oindex)
def blknum(Utxo.position(blknum, _, _)), do: blknum
def txindex(Utxo.position(_, txindex, _)), do: txindex
def oindex(Utxo.position(_, _, oindex)), do: oindex
@spec get_position(pos_integer()) :: {non_neg_integer, non_neg_integer, non_neg_integer}
defp get_position(encoded) when is_integer(encoded) and encoded > 0 do
blknum = div(encoded, @block_offset)
txindex = encoded |> rem(@block_offset) |> div(@transaction_offset)
oindex = rem(encoded, @transaction_offset)
{blknum, txindex, oindex}
end
@doc """
Based on the contract parameters determines whether UTXO position provided was created by a deposit
"""
@spec is_deposit?(__MODULE__.t()) :: boolean()
def is_deposit?(Utxo.position(blknum, txindex, oindex)) when is_position(blknum, txindex, oindex) do
{:ok, interval} = OMG.Eth.RootChain.get_child_block_interval()
rem(blknum, interval) != 0
end
end
|
apps/omg/lib/omg/utxo/position.ex
| 0.897002
| 0.439266
|
position.ex
|
starcoder
|
defmodule ElxValidation.Storage do
@moduledoc """
### File
- the field under validation must be a file and at leas 1 kilobyte size
- empty value return error unless field is under nullable validation
```
data = %{
logo: "LOGO FILE",
}
rules = [
%{
field: "logo",
validate: ["file","min_size:10","max_size:20","mimes:png,jpg","mime_types:image/png,image/jpeg"]
},
]
```
- file : data must be a file and at least 1 kb size
- min_size: minimum size (Kb) of file
- max_size: maximum size (Kb) of file
- mimes: list of accepted mimes
- mime_types: list of accepted mime_types
"""
def is_file(target) do
if File.exists?(target.path) do
true
else
false
end
rescue
_ ->
false
end
def max_size(target, value) do
if is_file(target) do
%{size: size} = File.stat!(target.path)
cond do
div(size, 1024) <= String.to_integer(value) -> true
div(size, 1024) > String.to_integer(value) -> false
true -> false
end
else
false
end
rescue
_ ->
false
end
def min_size(target, value) do
if is_file(target) do
%{size: size} = File.stat!(target.path)
cond do
div(size, 1024) >= String.to_integer(value) -> true
div(size, 1024) < String.to_integer(value) -> false
true -> false
end
else
false
end
rescue
_ ->
false
end
def mimes(target, value) do
if is_file(target) do
extension = Path.extname(target.filename)
v =
value
|> String.split(",")
Enum.find(v, fn x -> "." <> x == extension end) != nil
else
false
end
rescue
_ ->
false
end
def mime_types(target, value) do
if is_file(target) do
v =
value
|> String.split(",")
Enum.find(v, fn x -> x == target.content_type end) != nil
else
false
end
rescue
_ ->
false
end
end
|
lib/rules/storage.ex
| 0.714429
| 0.766905
|
storage.ex
|
starcoder
|
defmodule Data.Parser do
@moduledoc """
Higher-order functions to create and modify parsers.
"""
alias FE.{Maybe, Result}
alias MapSet, as: Set
import Result, only: [ok: 1, error: 1]
import Maybe, only: [just: 1, nothing: 0]
defdelegate kv(fields), to: Data.Parser.KV, as: :new
@typedoc """
A parser is a function that takes any value as input and produces a `Result.t`.
More specifically, a `parser(a,b)` is a fuction that takes any input and
returns `{:ok, a}` on a successful parse or `{:error, b}` if parsing failed.
"""
@type t(a, b) :: (any -> Result.t(a, b))
@doc """
Takes a boolean function `p` (the predicate), and returns a parser
that parses successfully those values for which `p` is `true`.
If the predicate returns `false` the parser will return a domain `Error`
with the input value and the predicate functions listed in the error details.
## Examples
iex> {:error, e} = Data.Parser.predicate(&String.valid?/1).('charlists are not ok')
...> e.reason
:predicate_not_satisfied
...> e.details
%{input: 'charlists are not ok', predicate: &String.valid?/1}
iex> Data.Parser.predicate(&String.valid?/1).("this is fine")
{:ok, "this is fine"}
iex> Data.Parser.predicate(&(&1<10)).(5)
{:ok, 5}
iex> {:error, e} = Data.Parser.predicate(&(&1<10)).(55)
...> e.details.input
55
"""
@spec predicate((a -> boolean())) :: t(a, Error.t()) when a: var, b: var
def predicate(p) when is_function(p, 1) do
fn x ->
case p.(x) do
true ->
Result.ok(x)
_ ->
Error.domain(
:predicate_not_satisfied,
%{predicate: p, input: x}
)
|> Result.error()
end
end
end
@doc """
Takes a boolean function `p` (the predicate) and a default value, and returns
a parser that parses successfully those values for which `p` is `true`.
If the predicate function applied to the input returns `true`, the parser
wraps the input in an `{:ok, input}` tuple.
If the predicate function returns `false`, and `default` is a value, the
parser returns `{:error, default}`
If the predicate returns `false` and `default` is a unary function, the
parser returns `{:error, default.(the_failed_input)}`.
## Examples
iex> Data.Parser.predicate(&String.valid?/1, "invalid string").('charlists are not ok')
{:error, "invalid string"}
iex> Data.Parser.predicate(&String.valid?/1, "invalid string").(<<"neither are invalid utf sequences", 99999>>)
{:error, "invalid string"}
iex> Data.Parser.predicate(&String.valid?/1, "invalid string").("this is fine")
{:ok, "this is fine"}
iex> Data.Parser.predicate(&String.valid?/1, fn x -> "the bad value is: #\{inspect x}" end).(12345)
{:error, "the bad value is: 12345"}
"""
@spec predicate((a -> boolean()), b | (a -> b)) :: t(a, b) when a: var, b: var
def predicate(p, default) when is_function(default, 1) do
fn x ->
case p.(x) do
true -> ok(x)
false -> default.(x) |> error()
end
end
end
def predicate(p, default), do: predicate(p, fn _ -> default end)
@doc """
Takes a list of values, `elements`, and returns a parser that returns
successfully if its input is present in `elements`.
If the input is not a member of `elements` and `default` is a value, the
parser fails with `{:error, default}`. If `default` is a unary function, the
parser fails with `{:error, default.(input)}`.
## Examples
iex> Data.Parser.one_of([:he, :ne, :ar, :kr, :xe, :rn], "not a noble gas").(:he)
{:ok, :he}
iex> Data.Parser.one_of([:he, :ne, :ar, :kr, :xe, :rn], "not a noble gas").(:n)
{:error, "not a noble gas"}
iex> Data.Parser.one_of([:he, :ne, :ar, :kr, :xe, :rn],
...> fn x -> "not a noble gas: #\{inspect x}" end).(:o)
{:error, "not a noble gas: :o"}
"""
@spec one_of([a], b | (a -> b)) :: t(a, b) when a: var, b: var
def one_of(elements, default) when is_function(default, 1) do
fn x ->
case Enum.member?(elements, x) do
true -> ok(x)
false -> default.(x) |> error()
end
end
end
def one_of(elements, default), do: one_of(elements, fn _ -> default end)
@doc """
Takes a parser `p` and creates a parser that will successfully parse lists of values that all satisfy `p`.
Specifically, the input:
1) Must be a list
2) `p` must parse successfully all elements in the input
If this is the case, the output will be `{:ok, list_of_parsed_values}`.
If not all values can be parsed with `p`, the result will be the original
parse error, enriched with the field `:failed_element` in the error details.
If the input is not a list, the domain error `:not_a_list` will be returned.
## Examples
iex> Data.Parser.list(Data.Parser.BuiltIn.integer()).([])
{:ok, []}
iex> Data.Parser.list(Data.Parser.BuiltIn.integer()).([1,2,3])
{:ok, [1, 2, 3]}
iex> {:error, e} = Data.Parser.list(Data.Parser.BuiltIn.integer()).(%{a: :b})
...> Error.reason(e)
:not_a_list
iex> {:error, e} = Data.Parser.list(Data.Parser.BuiltIn.integer()).([1, :b, 3])
...> Error.reason(e)
:not_an_integer
...> Error.details(e)
%{failed_element: :b}
"""
@spec list(t(a, Error.t())) :: t([a], Error.t()) when a: var
def list(p) do
fn
xs when is_list(xs) ->
Result.fold(Result.ok([]), xs, fn el, acc ->
case p.(el) do
{:ok, parsed} ->
Result.ok([parsed | acc])
{:error, why} ->
why
|> Error.map_details(&Map.put(&1, :failed_element, el))
|> Result.error()
end
end)
|> Result.map(&Enum.reverse/1)
_other ->
Error.domain(:not_a_list) |> Result.error()
end
end
@doc """
Creates a parser that behaves exactly the same as the `list/1` parser, except
that it will return the domain error `:empty_list` if applied to an empty list.
## Examples
iex> Data.Parser.nonempty_list(Data.Parser.BuiltIn.integer()).([1, 2, 3])
{:ok, [1, 2, 3]}
iex> {:error, e} = Data.Parser.nonempty_list(Data.Parser.BuiltIn.integer()).([1, :b, 3])
...> Error.reason(e)
:not_an_integer
...> Error.details(e)
%{failed_element: :b}
iex> {:error, e} = Data.Parser.nonempty_list(Data.Parser.BuiltIn.integer()).([])
...> Error.reason(e)
:empty_list
"""
@spec nonempty_list(t(a, Error.t())) :: t(nonempty_list(a), Error.t()) when a: var
def nonempty_list(p) do
fn
[] -> Error.domain(:empty_list) |> Result.error()
xs -> list(p).(xs)
end
end
@doc """
Takes a parser `p` and creates a parser that will successfully parse sets of
values that all satisfy `p`.
Specifically, the input:
1) must be a `MapSet`
2) all elements of the input set must be parsed correctly by `p`
If this is the case, the output will be `{:ok, set_of_parsed_values}`.
If not all values can be parsed with `p`, the result will be the original parse
error, enriched with the field `:failed_element` in the error details.
If the input is not a `MapSet`, the domain error `:not_a_set` will be returned.
## Examples
iex> {:ok, s} = Data.Parser.set(Data.Parser.BuiltIn.integer()).(MapSet.new())
...> s
#MapSet<[]>
iex> {:ok, s} = Data.Parser.set(Data.Parser.BuiltIn.integer()).(MapSet.new([1,2,3]))
...> s
#MapSet<[1, 2, 3]>
iex> {:error, e} = Data.Parser.set(Data.Parser.BuiltIn.integer()).(%{a: :b})
...> Error.reason(e)
:not_a_set
iex> {:error, e} = Data.Parser.set(Data.Parser.BuiltIn.integer()).(MapSet.new([1, :b, 3]))
...> Error.reason(e)
:not_an_integer
...> Error.details(e)
%{failed_element: :b}
"""
@spec set(t(a, Error.t())) :: t(Set.t(a), Error.t()) when a: var
def set(p) do
fn
%Set{} = set ->
set
# to work around %Set{} opaqueness violation
|> (&apply(Set, :to_list, [&1])).()
|> list(p).()
|> Result.map(&Set.new/1)
_other ->
Error.domain(:not_a_set) |> Result.error()
end
end
@doc """
Takes a parser and transforms it so that it works 'inside' `Maybe.t` values.
If the original parser works on `String.t()`, the new one will work on
`Maybe.t(String.t())`.
Successful parses on `just()` values return `{:ok, {:just, result_of_parse}}`.
Unsuccessful parses on `just()` values reutrn `{:error, parse_error}`.
The parser will successfully return `{:ok, :nothing}` when applied to `:nothing`.
## Examples
iex(2)> Data.Parser.maybe(
...> Data.Parser.predicate( &String.valid?/1, :invalid)).({:just, "good"})
{:ok, {:just, "good"}}
iex> Data.Parser.maybe(
...> Data.Parser.predicate( &String.valid?/1, :invalid)).({:just, 'bad'})
{:error, :invalid}
iex> Data.Parser.maybe(
...> Data.Parser.predicate( &String.valid?/1, :invalid)).(:nothing)
{:ok, :nothing}
"""
@spec maybe(t(a, b)) :: t(Maybe.t(a), Maybe.t(b)) when a: var, b: var
def maybe(parser) do
fn
{:just, val} ->
case parser.(val) do
{:ok, res} -> ok(just(res))
{:error, e} -> error(e)
end
:nothing ->
ok(nothing())
end
end
@doc """
Takes a list of parsers and creates a parser that returns the first
successful parse result, or an error listing the parsers and the failed
input.
## Examples
iex> Data.Parser.union(
...> [Data.Parser.BuiltIn.integer(),
...> Data.Parser.BuiltIn.boolean()]).(true)
{:ok, true}
iex> Data.Parser.union(
...> [Data.Parser.BuiltIn.integer(),
...> Data.Parser.BuiltIn.boolean()]).(1)
{:ok, 1}
iex> {:error, e} = Data.Parser.union(
...> [Data.Parser.BuiltIn.integer(),
...> Data.Parser.BuiltIn.boolean()]).(:atom)
...> Error.reason(e)
:no_parser_applies
...> Error.details(e).input
:atom
"""
@spec union(list(t(any(), any()))) :: t(any(), any())
def union(parsers) when is_list(parsers) do
fn
input ->
Enum.find_value(
parsers,
error(Error.domain(:no_parser_applies, %{input: input, parsers: parsers})),
fn parser ->
case parser.(input) do
{:ok, _} = success -> success
_ -> false
end
end
)
end
end
end
|
lib/data/parser.ex
| 0.839257
| 0.763153
|
parser.ex
|
starcoder
|
defmodule TflInterp do
@moduledoc """
Tensorflow lite intepreter for Elixir.
Deep Learning inference framework for embedded devices.
## Design policy (Features)
TflInterp is designed based on the following policy.
1. Provide only the Deep Learning inference. It aims to the poor-resource devices such as IOT and mobile.
2. Easy to understand. The inference part, excluding pre/post-processing, can be written in a few lines.
3. Use trained models from major Deep Learning frameworks that are easy to obtain.
4. Multiple inference models can be used from a single application.
5. There are few dependent modules. It does not have image processing or matrix calculation functions.
6. TflInterp does not block the erlang/elixir process scheduler. It runs as an OS process outside of elixir.
7. The back-end inference engine can be easily replaced. It's easy to keep up with the latest Deep Learninig technology.
And I'm trying to make TflInterp easy to install.
### short or concise history
The development of Tflinterp started in 2020 Nov. The original idea was to use Nerves to create an AI remote controlled car.
In the first version, I implemented Yolo3, but the design strongly depended on the model, which made it difficult to use in other applications.
Reflecting on that mistake, I redesigned Tflinterp according to the above design guidelines.
## Installation
Since 0.1.3, the installation method of this module has changed.
You may need to remove previously installed TflInterp before installing new version.
There are two installation methods. You can choose either one according to your purpose.
1. Like any other elixir module, add TflInterp to the dependency list in the mix.exs.
```
def deps do
[
...
{:tfl_interp, github: "shoz-f/tfl_interp", branch: "nerves"}
]
end
```
2. Download TflInterp to a directory in advance, and add that path to the dependency list in mix.exs.
```
# download TflInterp in advance.
$ cd /home/{your home}/workdir
$ git clone -b nerves https://github.com/shoz-f/tfl_interp.git
```
```
def deps do
[
...
{:tfl_interp, path: "/home/{your home}/workdir/tfl_interp"}
]
end
```
Then you run the following commands in your application project.
For native application:
```
$ mix deps.get
$ mix compile
```
For Nerves application:
```
$ export MIX_TARGET=rpi3 # <- specify target device tag
$ mix deps.get
$ mix firmware
```
It takes a long time to finish the build. Because it will download the required files - Tensorflow sources,
ARM toolchain [^1], etc - at the first build time.
Method 1 saves the downloaded files under "{your app}/deps/tfl_interp". On the other hand,
method 2 saves them under "/home/{your home}/workdir/tfl_interp".
If you want to reuse the downloaded files in other applications, we recommend Method 2.
In either method 1 or 2, the external modules required for Tensorflow lite are stored under
"{your app}/_build/{target}/.cmake_build" according to the cmakelists.txt that comes with Tensorflow.
[^1] Unfortunately, the ARM toolchain that comes with Nerves can not build Tensorflow lite. We need to get the toolchain recommended by the Tensorflow project.
After installation, you will have the directory tree like these:
Method 1
```
work_dir
+- your-app
+- _build/
| +- dev/
| +- .cmake_build/ --- CMakeCash.txt and external modules that Tensorflowlite depends on.
| | The cmake build outputs are stored here also.
| +- lib/
| | +- tfl_interp
| | +- ebin/
| | +- priv
| | +- tfl_interp --- executable: tensorflow interpreter.
| :
|
+- deps/
| + tfl_interp
| | +- 3rd_party/ --- Tensorflow sources, etc.
| | +- lib/ --- TflInterp module.
| | +- src/ --- tfl_interp C++ sources.
| | +- test/
| | +- toolchain/ --- ARM toolchains for Nerves.
| | +- CMakeLists.txt --- CMake configuration for for building tfl_interp.
| | +- mix.exs
| :
|
+- lib/
+- test/
+- mix.exs
```
Method 2
```
work_dir
+- your-app
| +- _build/
| | +- dev/
| | +- .cmake_build/ --- CMakeCash.txt and external modules that Tensorflowlite depends on.
| | | The cmake build outputs are stored here also.
| | +- lib/
| | | +- tfl_interp
| | | +- ebin/
| | | +- priv
| | | +- tfl_interp --- executable: tensorflow interpreter.
| | :
| |
| +- deps/
| +- lib/
| +- test/
| +- mix.exs
|
+- tfl_interp
+- 3rd_party/ --- Tensorflow sources, etc.
+- lib/ --- TflInterp module.
+- src/ --- tfl_interp C++ sources.
+- test/
+- toolchain/ --- ARM toolchains for Nerves.
+- CMakeLists.txt --- CMake configuration for for building tfl_interp.
+- mix.exs
```
## Basic Usage
You get the trained tflite model and save it in a directory that your application can read.
"your-app/priv" may be good choice.
```
$ cp your-trained-model.tflite ./priv
```
Next, you will create a module that interfaces with the deep learning model.
The module will need pre-processing and post-processing in addition to inference
processing, as in the example following. TflInterp provides inference processing
only.
You put `use TflInterp` at the beginning of your module, specify the model path as an optional argument. In the inference
section, you will put data input to the model (`TflInterp.set_input_tensor/3`), inference execution (`TflInterp.invoke/1`),
and inference result retrieval (`TflInterp.get_output_tensor/2`).
```elixr:your_model.ex
defmodule YourApp.YourModel do
use TflInterp, model: "priv/your-trained-model.tflite"
def predict(data) do
# preprocess
# to convert the data to be inferred to the input format of the model.
input_bin = convert-float32-binaries(data)
# inference
# typical I/O data for Tensorflow lite models is a serialized 32-bit float tensor.
output_bin =
__MODULE__
|> TflInterp.set_input_tensor(0, input_bin)
|> TflInterp.invoke()
|> TflInterp.get_output_tensor(0)
# postprocess
# add your post-processing here.
# you may need to reshape output_bin to tensor at first.
tensor = output_bin
|> Nx.from_binary({:f, 32})
|> Nx.reshape({size-x, size-y, :auto})
* your-postprocessing *
...
end
end
```
"""
@timeout 300000
@padding 0
defmacro __using__(opts) do
quote generated: true, location: :keep do
use GenServer
def start_link(opts) do
GenServer.start_link(__MODULE__, opts, name: __MODULE__)
end
def init(opts) do
executable = Application.app_dir(:tfl_interp, "priv/tfl_interp")
opts = Keyword.merge(unquote(opts), opts)
tfl_model = Keyword.get(opts, :model)
tfl_label = Keyword.get(opts, :label, "none")
tfl_opts = Keyword.get(opts, :opts, "")
port = Port.open({:spawn_executable, executable}, [
{:args, String.split(tfl_opts) ++ [tfl_model, tfl_label]},
{:packet, 4},
:binary
])
{:ok, %{port: port}}
end
def handle_call(cmd_line, _from, state) do
Port.command(state.port, cmd_line)
response = receive do
{_, {:data, <<result::binary>>}} -> {:ok, result}
after
Keyword.get(unquote(opts), :timeout, 300000) -> {:timeout}
end
{:reply, response, state}
end
def terminate(_reason, state) do
Port.close(state.port)
end
end
end
@doc """
Get the propaty of the tflite model.
## Parameters
* mod - modules' names
"""
def info(mod) do
cmd = 0
case GenServer.call(mod, <<cmd::8>>, @timeout) do
{:ok, result} -> Poison.decode(result)
any -> any
end
end
@doc """
Stop the tflite interpreter.
## Parameters
* mod - modules' names
"""
def stop(mod) do
GenServer.stop(mod)
end
@doc """
Put a flat binary to the input tensor on the interpreter.
## Parameters
* mod - modules' names
* index - index of input tensor in the model
* bin - input data - flat binary, cf. serialized tensor
"""
def set_input_tensor(mod, index, bin) do
cmd = 1
case GenServer.call(mod, <<cmd::8, index::8, bin::binary>>, @timeout) do
{:ok, result} -> Poison.decode(result)
any -> any
end
mod
end
@doc """
Invoke prediction.
## Parameters
* mod - modules' names
"""
def invoke(mod) do
cmd = 2
case GenServer.call(mod, <<cmd::8>>, @timeout) do
{:ok, result} -> Poison.decode(result)
any -> any
end
mod
end
@doc """
Get the flat binary from the output tensor on the interpreter"
## Parameters
* mod - modules' names
* index - index of output tensor in the model
"""
def get_output_tensor(mod, index) do
cmd = 3
case GenServer.call(mod, <<cmd::8, index::8>>, @timeout) do
{:ok, result} -> result
any -> any
end
end
@doc """
Execute post processing: nms.
## Parameters
* mod - modules' names
* num_boxes - number of candidate boxes
* num_class - number of category class
* boxes - binaries, serialized boxes tensor[`num_boxes`][4]; dtype: float32
* scores - binaries, serialized score tensor[`num_boxes`][`num_class`]; dtype: float32
* iou_threshold - IOU threshold
* score_threshold - score cutoff threshold
* sigma - soft IOU parameter
"""
def non_max_suppression_multi_class(mod, {num_boxes, num_class}, boxes, scores, iou_threshold \\ 0.5, score_threshold \\ 0.25, sigma \\ 0.0) do
cmd = 4
case GenServer.call(mod, <<cmd::8, @padding::8*3, num_boxes::little-integer-32, num_class::little-integer-32, iou_threshold::little-float-32, score_threshold::little-float-32, sigma::little-float-32>> <> boxes <> scores, @timeout) do
{:ok, result} -> Poison.decode(result)
any -> any
end
end
end
|
lib/tfl_interp.ex
| 0.838382
| 0.896251
|
tfl_interp.ex
|
starcoder
|
defmodule TextDelta.Composition do
@moduledoc """
The composition of two non-concurrent deltas into a single delta.
The deltas are composed in such a way that the resulting delta has the same
effect on text state as applying one delta and then the other:
S ○ compose(Oa, Ob) = S ○ Oa ○ Ob
In more simple terms, composition allows you to take many deltas and transform
them into one of equal effect. When used together with Operational
Transformation that allows to reduce system overhead when tracking non-synced
changes.
"""
alias TextDelta.{Operation, Attributes, Iterator}
@doc """
Composes two deltas into a single equivalent delta.
## Example
iex> foo = TextDelta.insert(TextDelta.new(), "Foo")
%TextDelta{ops: [%{insert: "Foo"}]}
iex> bar = TextDelta.insert(TextDelta.new(), "Bar")
%TextDelta{ops: [%{insert: "Bar"}]}
iex> TextDelta.compose(bar, foo)
%TextDelta{ops: [%{insert: "FooBar"}]}
"""
@spec compose(TextDelta.t(), TextDelta.t()) :: TextDelta.t()
def compose(first, second) do
{TextDelta.operations(first), TextDelta.operations(second)}
|> iterate()
|> do_compose(TextDelta.new())
|> TextDelta.trim()
end
defp do_compose({{nil, _}, {nil, _}}, result) do
result
end
defp do_compose({{nil, _}, {op_b, remainder_b}}, result) do
List.foldl([op_b | remainder_b], result, &TextDelta.append(&2, &1))
end
defp do_compose({{op_a, remainder_a}, {nil, _}}, result) do
List.foldl([op_a | remainder_a], result, &TextDelta.append(&2, &1))
end
defp do_compose(
{{%{insert: _} = ins_a, remainder_a},
{%{insert: _} = ins_b, remainder_b}},
result
) do
{[ins_a | remainder_a], remainder_b}
|> iterate()
|> do_compose(TextDelta.append(result, ins_b))
end
defp do_compose(
{{%{insert: el_a} = ins, remainder_a},
{%{retain: _} = ret, remainder_b}},
result
) do
insert = Operation.insert(el_a, compose_attributes(ins, ret))
{remainder_a, remainder_b}
|> iterate()
|> do_compose(TextDelta.append(result, insert))
end
defp do_compose(
{{%{insert: _}, remainder_a}, {%{delete: _}, remainder_b}},
result
) do
{remainder_a, remainder_b}
|> iterate()
|> do_compose(result)
end
defp do_compose(
{{%{delete: _} = del, remainder_a}, {%{insert: _} = ins, remainder_b}},
result
) do
{[del | remainder_a], remainder_b}
|> iterate()
|> do_compose(TextDelta.append(result, ins))
end
defp do_compose(
{{%{delete: _} = del, remainder_a}, {%{retain: _} = ret, remainder_b}},
result
) do
{remainder_a, [ret | remainder_b]}
|> iterate()
|> do_compose(TextDelta.append(result, del))
end
defp do_compose(
{{%{delete: _} = del_a, remainder_a},
{%{delete: _} = del_b, remainder_b}},
result
) do
{remainder_a, [del_b | remainder_b]}
|> iterate()
|> do_compose(TextDelta.append(result, del_a))
end
defp do_compose(
{{%{retain: _} = ret, remainder_a}, {%{insert: _} = ins, remainder_b}},
result
) do
{[ret | remainder_a], remainder_b}
|> iterate()
|> do_compose(TextDelta.append(result, ins))
end
defp do_compose(
{{%{retain: len} = ret_a, remainder_a},
{%{retain: _} = ret_b, remainder_b}},
result
) do
retain = Operation.retain(len, compose_attributes(ret_a, ret_b, true))
{remainder_a, remainder_b}
|> iterate()
|> do_compose(TextDelta.append(result, retain))
end
defp do_compose(
{{%{retain: _}, remainder_a}, {%{delete: _} = del, remainder_b}},
result
) do
{remainder_a, remainder_b}
|> iterate()
|> do_compose(TextDelta.append(result, del))
end
defp iterate(stream), do: Iterator.next(stream, :delete)
defp compose_attributes(op_a, op_b, keep_nil \\ false) do
attrs_a = Map.get(op_a, :attributes)
attrs_b = Map.get(op_b, :attributes)
Attributes.compose(attrs_a, attrs_b, keep_nil)
end
end
|
lib/text_delta/composition.ex
| 0.791378
| 0.823896
|
composition.ex
|
starcoder
|
defmodule Daguex.Pipeline.Context do
@moduledoc """
This module defines the context for `Dageux.Processor` to process.
This module defines a `#{__MODULE__}` struct and the main functions for working with it.
* `image` - the image object that can be used for identifying specified image,
and for keeping necessay meta data that should be persisted
* `image_file` - a reference to local accessible image file
* `opts` - the options passed to the pipeline
* `private` - shared data across different processor
* `done` - an array contains all the executed processores and their
corresponding result
"""
@type done_t :: {module, any()}
@type t :: %__MODULE__{
image: Dageux.Image.t,
local_storage: {Daguex.Storage.t, any},
opts: keyword,
private: Map.t,
done: [done_t]
}
@enforce_keys [:image, :local_storage]
defstruct [
image: nil,
local_storage: nil,
opts: [],
private: %{},
done: []
]
alias __MODULE__
def put_image_file(context = %Context{}, image_file = %Daguex.ImageFile{}) do
%{context | image_file: image_file}
end
@doc """
Assigns a new **private** key and value in the context.
This storage is meant to be used by processors and frameworks to avoid writing
to the image data files.
"""
def put_private(context = %Context{private: private}, key, value) when is_atom(key) do
%{context | private: Map.put(private, key, value)}
end
@doc """
Gets a new **private** value assigned to `key` in the context.
"""
def get_private(%Context{private: private}, key, default_value \\ nil) do
Map.get(private, key, default_value)
end
def put_opts(context = %Context{opts: opts}, key, value) when is_atom(key) do
%{context | opts: Keyword.put(opts, key, value)}
end
@doc """
Prepends an executed processor to the done fields of the context
"""
def done(context = %Context{done: done}, processor, result \\ :ok) when is_atom(processor) do
%{context | done: [{processor, result}|done]}
end
end
|
lib/daguex/pipeline/context.ex
| 0.806052
| 0.731778
|
context.ex
|
starcoder
|
defmodule ExUnit.Filters do
@moduledoc """
Conveniences for parsing and evaluating filters.
"""
@type t :: list({atom, Regex.t | String.Chars.t} | atom)
@doc """
Parses filters out of a path.
Determines whether a given file path (supplied to ExUnit/Mix as arguments
on the command line) includes a line number filter, and if so returns the
appropriate ExUnit configuration options.
"""
@spec parse_path(String.t) :: {String.t, any}
def parse_path(file) do
{paths, [line]} = file |> String.split(":") |> Enum.split(-1)
case Integer.parse(line) do
{_, ""} ->
{Enum.join(paths, ":"), exclude: [:test], include: [line: line]}
_ ->
{file, []}
end
end
@doc """
Normalizes include and excludes to remove duplicates
and keep precedence.
## Examples
iex> ExUnit.Filters.normalize(nil, nil)
{[], []}
iex> ExUnit.Filters.normalize([:foo, :bar, :bar], [:foo, :baz])
{[:foo, :bar], [:baz]}
"""
@spec normalize(t | nil, t | nil) :: {t, t}
def normalize(include, exclude) do
include = include |> List.wrap |> Enum.uniq
exclude = exclude |> List.wrap |> Enum.uniq |> Kernel.--(include)
{include, exclude}
end
@doc """
Parses the given filters, as one would receive from the command line.
## Examples
iex> ExUnit.Filters.parse(["foo:bar", "baz", "line:9", "bool:true"])
[{:foo, "bar"}, :baz, {:line, "9"}, {:bool, "true"}]
"""
@spec parse([String.t]) :: t
def parse(filters) do
Enum.map filters, fn filter ->
case String.split(filter, ":", parts: 2) do
[key, value] -> {String.to_atom(key), value}
[key] -> String.to_atom(key)
end
end
end
@doc """
Evaluates the `include` and `exclude` filters against the given `tags`.
Some filters, like `:line`, may require the whole test collection to
find the closest line, that's why it must also be passed as argument.
Filters can either be a regular expression or any data structure
that implements to `String.Chars`, which is invoked before comparing
the filter with the tag value.
## Examples
iex> ExUnit.Filters.eval([foo: "bar"], [:foo], %{foo: "bar"}, [])
:ok
iex> ExUnit.Filters.eval([foo: "bar"], [:foo], %{foo: "baz"}, [])
{:error, "due to foo filter"}
"""
@spec eval(t, t, map, [ExUnit.Test.t]) :: :ok | {:error, binary}
def eval(include, exclude, tags, collection) when is_map(tags) do
skip? = not Enum.any?(include, &has_tag(&1, %{skip: true}, collection))
case Map.fetch(tags, :skip) do
{:ok, msg} when is_binary(msg) and skip? ->
{:error, msg}
{:ok, true} when skip? ->
{:error, "due to skip tag"}
_ ->
excluded = Enum.find_value exclude, &has_tag(&1, tags, collection)
if !excluded or Enum.any?(include, &has_tag(&1, tags, collection)) do
:ok
else
{:error, "due to #{excluded} filter"}
end
end
end
defp has_tag({:line, line}, %{line: _} = tags, collection) do
line = to_integer(line)
tags.line <= line and
closest_test_before_line(line, collection).tags.line == tags.line
end
defp has_tag({key, %Regex{} = value}, tags, _collection) when is_atom(key) do
case Map.fetch(tags, key) do
{:ok, tag} -> to_string(tag) =~ value and key
_ -> false
end
end
defp has_tag({key, value}, tags, _collection) when is_atom(key) do
case Map.fetch(tags, key) do
{:ok, ^value} -> key
{:ok, tag} -> compare(to_string(tag), to_string(value)) and key
_ -> false
end
end
defp has_tag(key, tags, _collection) when is_atom(key),
do: Map.has_key?(tags, key) and key
defp to_integer(integer) when is_integer(integer), do: integer
defp to_integer(integer) when is_binary(integer), do: String.to_integer(integer)
defp compare("Elixir." <> tag1, tag2), do: compare(tag1, tag2)
defp compare(tag1, "Elixir." <> tag2), do: compare(tag1, tag2)
defp compare(tag, tag), do: true
defp compare(_, _), do: false
defp closest_test_before_line(line, collection) do
Enum.min_by(collection, fn %ExUnit.Test{tags: %{line: test_line}} ->
if line - test_line >= 0 do
line - test_line
else
:infinity
end
end)
end
end
|
lib/ex_unit/lib/ex_unit/filters.ex
| 0.915922
| 0.626453
|
filters.ex
|
starcoder
|
defmodule AWS.CodeCommit do
@moduledoc """
AWS CodeCommit
This is the *AWS CodeCommit API Reference*.
This reference provides descriptions of the operations and data types for AWS
CodeCommit API along with usage examples.
You can use the AWS CodeCommit API to work with the following objects:
Repositories, by calling the following:
* `BatchGetRepositories`, which returns information about one or
more repositories associated with your AWS account.
* `CreateRepository`, which creates an AWS CodeCommit repository.
* `DeleteRepository`, which deletes an AWS CodeCommit repository.
* `GetRepository`, which returns information about a specified
repository.
* `ListRepositories`, which lists all AWS CodeCommit repositories
associated with your AWS account.
* `UpdateRepositoryDescription`, which sets or updates the
description of the repository.
* `UpdateRepositoryName`, which changes the name of the repository.
If you change the name of a repository, no other users of that repository can
access it until you send them the new HTTPS or SSH URL to use.
Branches, by calling the following:
* `CreateBranch`, which creates a branch in a specified repository.
* `DeleteBranch`, which deletes the specified branch in a repository
unless it is the default branch.
* `GetBranch`, which returns information about a specified branch.
* `ListBranches`, which lists all branches for a specified
repository.
* `UpdateDefaultBranch`, which changes the default branch for a
repository.
Files, by calling the following:
* `DeleteFile`, which deletes the content of a specified file from a
specified branch.
* `GetBlob`, which returns the base-64 encoded content of an
individual Git blob object in a repository.
* `GetFile`, which returns the base-64 encoded content of a
specified file.
* `GetFolder`, which returns the contents of a specified folder or
directory.
* `PutFile`, which adds or modifies a single file in a specified
repository and branch.
Commits, by calling the following:
* `BatchGetCommits`, which returns information about one or more
commits in a repository.
* `CreateCommit`, which creates a commit for changes to a
repository.
* `GetCommit`, which returns information about a commit, including
commit messages and author and committer information.
* `GetDifferences`, which returns information about the differences
in a valid commit specifier (such as a branch, tag, HEAD, commit ID, or other
fully qualified reference).
Merges, by calling the following:
* `BatchDescribeMergeConflicts`, which returns information about
conflicts in a merge between commits in a repository.
* `CreateUnreferencedMergeCommit`, which creates an unreferenced
commit between two branches or commits for the purpose of comparing them and
identifying any potential conflicts.
* `DescribeMergeConflicts`, which returns information about merge
conflicts between the base, source, and destination versions of a file in a
potential merge.
* `GetMergeCommit`, which returns information about the merge
between a source and destination commit.
* `GetMergeConflicts`, which returns information about merge
conflicts between the source and destination branch in a pull request.
* `GetMergeOptions`, which returns information about the available
merge options between two branches or commit specifiers.
* `MergeBranchesByFastForward`, which merges two branches using the
fast-forward merge option.
* `MergeBranchesBySquash`, which merges two branches using the
squash merge option.
* `MergeBranchesByThreeWay`, which merges two branches using the
three-way merge option.
Pull requests, by calling the following:
* `CreatePullRequest`, which creates a pull request in a specified
repository.
* `CreatePullRequestApprovalRule`, which creates an approval rule
for a specified pull request.
* `DeletePullRequestApprovalRule`, which deletes an approval rule
for a specified pull request.
* `DescribePullRequestEvents`, which returns information about one
or more pull request events.
* `EvaluatePullRequestApprovalRules`, which evaluates whether a pull
request has met all the conditions specified in its associated approval rules.
* `GetCommentsForPullRequest`, which returns information about
comments on a specified pull request.
* `GetPullRequest`, which returns information about a specified pull
request.
* `GetPullRequestApprovalStates`, which returns information about
the approval states for a specified pull request.
* `GetPullRequestOverrideState`, which returns information about
whether approval rules have been set aside (overriden) for a pull request, and
if so, the Amazon Resource Name (ARN) of the user or identity that overrode the
rules and their requirements for the pull request.
* `ListPullRequests`, which lists all pull requests for a
repository.
* `MergePullRequestByFastForward`, which merges the source
destination branch of a pull request into the specified destination branch for
that pull request using the fast-forward merge option.
* `MergePullRequestBySquash`, which merges the source destination
branch of a pull request into the specified destination branch for that pull
request using the squash merge option.
* `MergePullRequestByThreeWay`. which merges the source destination
branch of a pull request into the specified destination branch for that pull
request using the three-way merge option.
* `OverridePullRequestApprovalRules`, which sets aside all approval
rule requirements for a pull request.
* `PostCommentForPullRequest`, which posts a comment to a pull
request at the specified line, file, or request.
* `UpdatePullRequestApprovalRuleContent`, which updates the
structure of an approval rule for a pull request.
* `UpdatePullRequestApprovalState`, which updates the state of an
approval on a pull request.
* `UpdatePullRequestDescription`, which updates the description of a
pull request.
* `UpdatePullRequestStatus`, which updates the status of a pull
request.
* `UpdatePullRequestTitle`, which updates the title of a pull
request.
Approval rule templates, by calling the following:
* `AssociateApprovalRuleTemplateWithRepository`, which associates a
template with a specified repository. After the template is associated with a
repository, AWS CodeCommit creates approval rules that match the template
conditions on every pull request created in the specified repository.
* `BatchAssociateApprovalRuleTemplateWithRepositories`, which
associates a template with one or more specified repositories. After the
template is associated with a repository, AWS CodeCommit creates approval rules
that match the template conditions on every pull request created in the
specified repositories.
* `BatchDisassociateApprovalRuleTemplateFromRepositories`, which
removes the association between a template and specified repositories so that
approval rules based on the template are not automatically created when pull
requests are created in those repositories.
* `CreateApprovalRuleTemplate`, which creates a template for
approval rules that can then be associated with one or more repositories in your
AWS account.
* `DeleteApprovalRuleTemplate`, which deletes the specified
template. It does not remove approval rules on pull requests already created
with the template.
* `DisassociateApprovalRuleTemplateFromRepository`, which removes
the association between a template and a repository so that approval rules based
on the template are not automatically created when pull requests are created in
the specified repository.
* `GetApprovalRuleTemplate`, which returns information about an
approval rule template.
* `ListApprovalRuleTemplates`, which lists all approval rule
templates in the AWS Region in your AWS account.
* `ListAssociatedApprovalRuleTemplatesForRepository`, which lists
all approval rule templates that are associated with a specified repository.
* `ListRepositoriesForApprovalRuleTemplate`, which lists all
repositories associated with the specified approval rule template.
* `UpdateApprovalRuleTemplateDescription`, which updates the
description of an approval rule template.
* `UpdateApprovalRuleTemplateName`, which updates the name of an
approval rule template.
* `UpdateApprovalRuleTemplateContent`, which updates the content of
an approval rule template.
Comments in a repository, by calling the following:
* `DeleteCommentContent`, which deletes the content of a comment on
a commit in a repository.
* `GetComment`, which returns information about a comment on a
commit.
* `GetCommentReactions`, which returns information about emoji
reactions to comments.
* `GetCommentsForComparedCommit`, which returns information about
comments on the comparison between two commit specifiers in a repository.
* `PostCommentForComparedCommit`, which creates a comment on the
comparison between two commit specifiers in a repository.
* `PostCommentReply`, which creates a reply to a comment.
* `PutCommentReaction`, which creates or updates an emoji reaction
to a comment.
* `UpdateComment`, which updates the content of a comment on a
commit in a repository.
Tags used to tag resources in AWS CodeCommit (not Git tags), by calling the
following:
* `ListTagsForResource`, which gets information about AWS tags for a
specified Amazon Resource Name (ARN) in AWS CodeCommit.
* `TagResource`, which adds or updates tags for a resource in AWS
CodeCommit.
* `UntagResource`, which removes tags for a resource in AWS
CodeCommit.
Triggers, by calling the following:
* `GetRepositoryTriggers`, which returns information about triggers
configured for a repository.
* `PutRepositoryTriggers`, which replaces all triggers for a
repository and can be used to create or delete triggers.
* `TestRepositoryTriggers`, which tests the functionality of a
repository trigger by sending data to the trigger target.
For information about how to use AWS CodeCommit, see the [AWS CodeCommit User Guide](https://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html).
"""
alias AWS.Client
alias AWS.Request
def metadata do
%AWS.ServiceMetadata{
abbreviation: "CodeCommit",
api_version: "2015-04-13",
content_type: "application/x-amz-json-1.1",
credential_scope: nil,
endpoint_prefix: "codecommit",
global?: false,
protocol: "json",
service_id: "CodeCommit",
signature_version: "v4",
signing_name: "codecommit",
target_prefix: "CodeCommit_20150413"
}
end
@doc """
Creates an association between an approval rule template and a specified
repository.
Then, the next time a pull request is created in the repository where the
destination reference (if specified) matches the destination reference (branch)
for the pull request, an approval rule that matches the template conditions is
automatically created for that pull request. If no destination references are
specified in the template, an approval rule that matches the template contents
is created for all pull requests in that repository.
"""
def associate_approval_rule_template_with_repository(%Client{} = client, input, options \\ []) do
Request.request_post(
client,
metadata(),
"AssociateApprovalRuleTemplateWithRepository",
input,
options
)
end
@doc """
Creates an association between an approval rule template and one or more
specified repositories.
"""
def batch_associate_approval_rule_template_with_repositories(
%Client{} = client,
input,
options \\ []
) do
Request.request_post(
client,
metadata(),
"BatchAssociateApprovalRuleTemplateWithRepositories",
input,
options
)
end
@doc """
Returns information about one or more merge conflicts in the attempted merge of
two commit specifiers using the squash or three-way merge strategy.
"""
def batch_describe_merge_conflicts(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "BatchDescribeMergeConflicts", input, options)
end
@doc """
Removes the association between an approval rule template and one or more
specified repositories.
"""
def batch_disassociate_approval_rule_template_from_repositories(
%Client{} = client,
input,
options \\ []
) do
Request.request_post(
client,
metadata(),
"BatchDisassociateApprovalRuleTemplateFromRepositories",
input,
options
)
end
@doc """
Returns information about the contents of one or more commits in a repository.
"""
def batch_get_commits(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "BatchGetCommits", input, options)
end
@doc """
Returns information about one or more repositories.
The description field for a repository accepts all HTML characters and all valid
Unicode characters. Applications that do not HTML-encode the description and
display it in a webpage can expose users to potentially malicious code. Make
sure that you HTML-encode the description field in any application that uses
this API to display the repository description on a webpage.
"""
def batch_get_repositories(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "BatchGetRepositories", input, options)
end
@doc """
Creates a template for approval rules that can then be associated with one or
more repositories in your AWS account.
When you associate a template with a repository, AWS CodeCommit creates an
approval rule that matches the conditions of the template for all pull requests
that meet the conditions of the template. For more information, see
`AssociateApprovalRuleTemplateWithRepository`.
"""
def create_approval_rule_template(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateApprovalRuleTemplate", input, options)
end
@doc """
Creates a branch in a repository and points the branch to a commit.
Calling the create branch operation does not set a repository's default branch.
To do this, call the update default branch operation.
"""
def create_branch(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateBranch", input, options)
end
@doc """
Creates a commit for a repository on the tip of a specified branch.
"""
def create_commit(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateCommit", input, options)
end
@doc """
Creates a pull request in the specified repository.
"""
def create_pull_request(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreatePullRequest", input, options)
end
@doc """
Creates an approval rule for a pull request.
"""
def create_pull_request_approval_rule(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreatePullRequestApprovalRule", input, options)
end
@doc """
Creates a new, empty repository.
"""
def create_repository(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateRepository", input, options)
end
@doc """
Creates an unreferenced commit that represents the result of merging two
branches using a specified merge strategy.
This can help you determine the outcome of a potential merge. This API cannot be
used with the fast-forward merge strategy because that strategy does not create
a merge commit.
This unreferenced merge commit can only be accessed using the GetCommit API or
through git commands such as git fetch. To retrieve this commit, you must
specify its commit ID or otherwise reference it.
"""
def create_unreferenced_merge_commit(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateUnreferencedMergeCommit", input, options)
end
@doc """
Deletes a specified approval rule template.
Deleting a template does not remove approval rules on pull requests already
created with the template.
"""
def delete_approval_rule_template(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteApprovalRuleTemplate", input, options)
end
@doc """
Deletes a branch from a repository, unless that branch is the default branch for
the repository.
"""
def delete_branch(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteBranch", input, options)
end
@doc """
Deletes the content of a comment made on a change, file, or commit in a
repository.
"""
def delete_comment_content(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteCommentContent", input, options)
end
@doc """
Deletes a specified file from a specified branch.
A commit is created on the branch that contains the revision. The file still
exists in the commits earlier to the commit that contains the deletion.
"""
def delete_file(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteFile", input, options)
end
@doc """
Deletes an approval rule from a specified pull request.
Approval rules can be deleted from a pull request only if the pull request is
open, and if the approval rule was created specifically for a pull request and
not generated from an approval rule template associated with the repository
where the pull request was created. You cannot delete an approval rule from a
merged or closed pull request.
"""
def delete_pull_request_approval_rule(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeletePullRequestApprovalRule", input, options)
end
@doc """
Deletes a repository.
If a specified repository was already deleted, a null repository ID is returned.
Deleting a repository also deletes all associated objects and metadata. After a
repository is deleted, all future push calls to the deleted repository fail.
"""
def delete_repository(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteRepository", input, options)
end
@doc """
Returns information about one or more merge conflicts in the attempted merge of
two commit specifiers using the squash or three-way merge strategy.
If the merge option for the attempted merge is specified as FAST_FORWARD_MERGE,
an exception is thrown.
"""
def describe_merge_conflicts(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeMergeConflicts", input, options)
end
@doc """
Returns information about one or more pull request events.
"""
def describe_pull_request_events(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribePullRequestEvents", input, options)
end
@doc """
Removes the association between a template and a repository so that approval
rules based on the template are not automatically created when pull requests are
created in the specified repository.
This does not delete any approval rules previously created for pull requests
through the template association.
"""
def disassociate_approval_rule_template_from_repository(
%Client{} = client,
input,
options \\ []
) do
Request.request_post(
client,
metadata(),
"DisassociateApprovalRuleTemplateFromRepository",
input,
options
)
end
@doc """
Evaluates whether a pull request has met all the conditions specified in its
associated approval rules.
"""
def evaluate_pull_request_approval_rules(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "EvaluatePullRequestApprovalRules", input, options)
end
@doc """
Returns information about a specified approval rule template.
"""
def get_approval_rule_template(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetApprovalRuleTemplate", input, options)
end
@doc """
Returns the base-64 encoded content of an individual blob in a repository.
"""
def get_blob(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetBlob", input, options)
end
@doc """
Returns information about a repository branch, including its name and the last
commit ID.
"""
def get_branch(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetBranch", input, options)
end
@doc """
Returns the content of a comment made on a change, file, or commit in a
repository.
Reaction counts might include numbers from user identities who were deleted
after the reaction was made. For a count of reactions from active identities,
use GetCommentReactions.
"""
def get_comment(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetComment", input, options)
end
@doc """
Returns information about reactions to a specified comment ID.
Reactions from users who have been deleted will not be included in the count.
"""
def get_comment_reactions(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetCommentReactions", input, options)
end
@doc """
Returns information about comments made on the comparison between two commits.
Reaction counts might include numbers from user identities who were deleted
after the reaction was made. For a count of reactions from active identities,
use GetCommentReactions.
"""
def get_comments_for_compared_commit(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetCommentsForComparedCommit", input, options)
end
@doc """
Returns comments made on a pull request.
Reaction counts might include numbers from user identities who were deleted
after the reaction was made. For a count of reactions from active identities,
use GetCommentReactions.
"""
def get_comments_for_pull_request(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetCommentsForPullRequest", input, options)
end
@doc """
Returns information about a commit, including commit message and committer
information.
"""
def get_commit(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetCommit", input, options)
end
@doc """
Returns information about the differences in a valid commit specifier (such as a
branch, tag, HEAD, commit ID, or other fully qualified reference).
Results can be limited to a specified path.
"""
def get_differences(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetDifferences", input, options)
end
@doc """
Returns the base-64 encoded contents of a specified file and its metadata.
"""
def get_file(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetFile", input, options)
end
@doc """
Returns the contents of a specified folder in a repository.
"""
def get_folder(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetFolder", input, options)
end
@doc """
Returns information about a specified merge commit.
"""
def get_merge_commit(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetMergeCommit", input, options)
end
@doc """
Returns information about merge conflicts between the before and after commit
IDs for a pull request in a repository.
"""
def get_merge_conflicts(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetMergeConflicts", input, options)
end
@doc """
Returns information about the merge options available for merging two specified
branches.
For details about why a merge option is not available, use GetMergeConflicts or
DescribeMergeConflicts.
"""
def get_merge_options(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetMergeOptions", input, options)
end
@doc """
Gets information about a pull request in a specified repository.
"""
def get_pull_request(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetPullRequest", input, options)
end
@doc """
Gets information about the approval states for a specified pull request.
Approval states only apply to pull requests that have one or more approval rules
applied to them.
"""
def get_pull_request_approval_states(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetPullRequestApprovalStates", input, options)
end
@doc """
Returns information about whether approval rules have been set aside
(overridden) for a pull request, and if so, the Amazon Resource Name (ARN) of
the user or identity that overrode the rules and their requirements for the pull
request.
"""
def get_pull_request_override_state(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetPullRequestOverrideState", input, options)
end
@doc """
Returns information about a repository.
The description field for a repository accepts all HTML characters and all valid
Unicode characters. Applications that do not HTML-encode the description and
display it in a webpage can expose users to potentially malicious code. Make
sure that you HTML-encode the description field in any application that uses
this API to display the repository description on a webpage.
"""
def get_repository(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetRepository", input, options)
end
@doc """
Gets information about triggers configured for a repository.
"""
def get_repository_triggers(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetRepositoryTriggers", input, options)
end
@doc """
Lists all approval rule templates in the specified AWS Region in your AWS
account.
If an AWS Region is not specified, the AWS Region where you are signed in is
used.
"""
def list_approval_rule_templates(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListApprovalRuleTemplates", input, options)
end
@doc """
Lists all approval rule templates that are associated with a specified
repository.
"""
def list_associated_approval_rule_templates_for_repository(
%Client{} = client,
input,
options \\ []
) do
Request.request_post(
client,
metadata(),
"ListAssociatedApprovalRuleTemplatesForRepository",
input,
options
)
end
@doc """
Gets information about one or more branches in a repository.
"""
def list_branches(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListBranches", input, options)
end
@doc """
Returns a list of pull requests for a specified repository.
The return list can be refined by pull request status or pull request author
ARN.
"""
def list_pull_requests(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListPullRequests", input, options)
end
@doc """
Gets information about one or more repositories.
"""
def list_repositories(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListRepositories", input, options)
end
@doc """
Lists all repositories associated with the specified approval rule template.
"""
def list_repositories_for_approval_rule_template(%Client{} = client, input, options \\ []) do
Request.request_post(
client,
metadata(),
"ListRepositoriesForApprovalRuleTemplate",
input,
options
)
end
@doc """
Gets information about AWS tags for a specified Amazon Resource Name (ARN) in
AWS CodeCommit.
For a list of valid resources in AWS CodeCommit, see [CodeCommit Resources and Operations](https://docs.aws.amazon.com/codecommit/latest/userguide/auth-and-access-control-iam-access-control-identity-based.html#arn-formats)
in the* AWS CodeCommit User Guide*.
"""
def list_tags_for_resource(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListTagsForResource", input, options)
end
@doc """
Merges two branches using the fast-forward merge strategy.
"""
def merge_branches_by_fast_forward(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "MergeBranchesByFastForward", input, options)
end
@doc """
Merges two branches using the squash merge strategy.
"""
def merge_branches_by_squash(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "MergeBranchesBySquash", input, options)
end
@doc """
Merges two specified branches using the three-way merge strategy.
"""
def merge_branches_by_three_way(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "MergeBranchesByThreeWay", input, options)
end
@doc """
Attempts to merge the source commit of a pull request into the specified
destination branch for that pull request at the specified commit using the
fast-forward merge strategy.
If the merge is successful, it closes the pull request.
"""
def merge_pull_request_by_fast_forward(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "MergePullRequestByFastForward", input, options)
end
@doc """
Attempts to merge the source commit of a pull request into the specified
destination branch for that pull request at the specified commit using the
squash merge strategy.
If the merge is successful, it closes the pull request.
"""
def merge_pull_request_by_squash(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "MergePullRequestBySquash", input, options)
end
@doc """
Attempts to merge the source commit of a pull request into the specified
destination branch for that pull request at the specified commit using the
three-way merge strategy.
If the merge is successful, it closes the pull request.
"""
def merge_pull_request_by_three_way(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "MergePullRequestByThreeWay", input, options)
end
@doc """
Sets aside (overrides) all approval rule requirements for a specified pull
request.
"""
def override_pull_request_approval_rules(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "OverridePullRequestApprovalRules", input, options)
end
@doc """
Posts a comment on the comparison between two commits.
"""
def post_comment_for_compared_commit(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "PostCommentForComparedCommit", input, options)
end
@doc """
Posts a comment on a pull request.
"""
def post_comment_for_pull_request(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "PostCommentForPullRequest", input, options)
end
@doc """
Posts a comment in reply to an existing comment on a comparison between commits
or a pull request.
"""
def post_comment_reply(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "PostCommentReply", input, options)
end
@doc """
Adds or updates a reaction to a specified comment for the user whose identity is
used to make the request.
You can only add or update a reaction for yourself. You cannot add, modify, or
delete a reaction for another user.
"""
def put_comment_reaction(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "PutCommentReaction", input, options)
end
@doc """
Adds or updates a file in a branch in an AWS CodeCommit repository, and
generates a commit for the addition in the specified branch.
"""
def put_file(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "PutFile", input, options)
end
@doc """
Replaces all triggers for a repository.
Used to create or delete triggers.
"""
def put_repository_triggers(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "PutRepositoryTriggers", input, options)
end
@doc """
Adds or updates tags for a resource in AWS CodeCommit.
For a list of valid resources in AWS CodeCommit, see [CodeCommit Resources and Operations](https://docs.aws.amazon.com/codecommit/latest/userguide/auth-and-access-control-iam-access-control-identity-based.html#arn-formats)
in the *AWS CodeCommit User Guide*.
"""
def tag_resource(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "TagResource", input, options)
end
@doc """
Tests the functionality of repository triggers by sending information to the
trigger target.
If real data is available in the repository, the test sends data from the last
commit. If no data is available, sample data is generated.
"""
def test_repository_triggers(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "TestRepositoryTriggers", input, options)
end
@doc """
Removes tags for a resource in AWS CodeCommit.
For a list of valid resources in AWS CodeCommit, see [CodeCommit Resources and Operations](https://docs.aws.amazon.com/codecommit/latest/userguide/auth-and-access-control-iam-access-control-identity-based.html#arn-formats)
in the *AWS CodeCommit User Guide*.
"""
def untag_resource(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UntagResource", input, options)
end
@doc """
Updates the content of an approval rule template.
You can change the number of required approvals, the membership of the approval
rule, and whether an approval pool is defined.
"""
def update_approval_rule_template_content(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateApprovalRuleTemplateContent", input, options)
end
@doc """
Updates the description for a specified approval rule template.
"""
def update_approval_rule_template_description(%Client{} = client, input, options \\ []) do
Request.request_post(
client,
metadata(),
"UpdateApprovalRuleTemplateDescription",
input,
options
)
end
@doc """
Updates the name of a specified approval rule template.
"""
def update_approval_rule_template_name(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateApprovalRuleTemplateName", input, options)
end
@doc """
Replaces the contents of a comment.
"""
def update_comment(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateComment", input, options)
end
@doc """
Sets or changes the default branch name for the specified repository.
If you use this operation to change the default branch name to the current
default branch name, a success message is returned even though the default
branch did not change.
"""
def update_default_branch(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateDefaultBranch", input, options)
end
@doc """
Updates the structure of an approval rule created specifically for a pull
request.
For example, you can change the number of required approvers and the approval
pool for approvers.
"""
def update_pull_request_approval_rule_content(%Client{} = client, input, options \\ []) do
Request.request_post(
client,
metadata(),
"UpdatePullRequestApprovalRuleContent",
input,
options
)
end
@doc """
Updates the state of a user's approval on a pull request.
The user is derived from the signed-in account when the request is made.
"""
def update_pull_request_approval_state(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdatePullRequestApprovalState", input, options)
end
@doc """
Replaces the contents of the description of a pull request.
"""
def update_pull_request_description(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdatePullRequestDescription", input, options)
end
@doc """
Updates the status of a pull request.
"""
def update_pull_request_status(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdatePullRequestStatus", input, options)
end
@doc """
Replaces the title of a pull request.
"""
def update_pull_request_title(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdatePullRequestTitle", input, options)
end
@doc """
Sets or changes the comment or description for a repository.
The description field for a repository accepts all HTML characters and all valid
Unicode characters. Applications that do not HTML-encode the description and
display it in a webpage can expose users to potentially malicious code. Make
sure that you HTML-encode the description field in any application that uses
this API to display the repository description on a webpage.
"""
def update_repository_description(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateRepositoryDescription", input, options)
end
@doc """
Renames a repository.
The repository name must be unique across the calling AWS account. Repository
names are limited to 100 alphanumeric, dash, and underscore characters, and
cannot include certain characters. The suffix .git is prohibited. For more
information about the limits on repository names, see
[Limits](https://docs.aws.amazon.com/codecommit/latest/userguide/limits.html) in
the AWS CodeCommit User Guide.
"""
def update_repository_name(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateRepositoryName", input, options)
end
end
|
lib/aws/generated/code_commit.ex
| 0.883676
| 0.601886
|
code_commit.ex
|
starcoder
|
defmodule Surface.Formatter do
@moduledoc "Functions for formatting Surface code snippets."
alias Surface.Formatter.Phases
@typedoc """
Options that can be passed to `Surface.Formatter.format_string!/2`.
- `:line_length` - Maximum line length before wrapping opening tags
- `:indent` - Starting indentation depth depending on the context of the ~F sigil
"""
@type option :: {:line_length, integer} | {:indent, integer}
@typedoc """
The name of an HTML/Surface tag, such as `div`, `ListItem`, or `#Markdown`.
"""
@type tag :: String.t()
@typedoc "The value of a parsed HTML/Component attribute."
@type attribute_value ::
integer
| boolean
| String.t()
| {:attribute_expr, interpolated_expression :: String.t(), term}
| [String.t()]
@typedoc "A parsed HTML/Component attribute name and value."
@type attribute :: {name :: String.t(), attribute_value, term}
@typedoc "A node output by `Surface.Compiler.Parser.parse`."
@type surface_node ::
String.t()
| {:interpolation, String.t(), map}
| {tag, list(attribute), list(surface_node), map}
@typedoc """
Whitespace nodes that can be rendered by `Surface.Formatter.Render.node/2`.
The Surface parser does not return these, but formatter phases introduce these nodes
in preparation for rendering.
- `:newline` adds a newline (`\\n`) character
- `:space` adds a space (` `) character
- `:indent` adds spaces at the appropriate indentation amount
- `:indent_one_less` adds spaces at 1 indentation level removed (used for closing tags)
"""
@type whitespace ::
:newline
| :space
| :indent
| :indent_one_less
@typedoc """
A node that will ultimately be sent to `Surface.Formatter.Render.node/2` for rendering.
The output of `Surface.Compiler.Parser.parse` is ran through the various formatting
phases, which ultimately output a tree of this type.
"""
@type formatter_node :: surface_node | whitespace
@doc """
Formats the given Surface code string. (Typically the contents of an `~F`
sigil or `.sface` file.)
In short:
- HTML/Surface elements are indented to the right of their parents.
- Attributes are split on multiple lines if the line is too long; otherwise on the same line.
- Elixir code snippets (inside `{ }`) are ran through the Elixir code formatter.
- Lack of whitespace is preserved, so that intended behaviors are not removed.
(For example, `<span>Foo bar baz</span>` will not have newlines or spaces added.)
Below the **Options** section is a non-exhaustive list of behaviors of the formatter.
# Options
* `:line_length` - the line length to aim for when formatting
the document. Defaults to 98. As with the Elixir formatter,
this value is used as reference but is not always enforced
depending on the context.
# Indentation
The formatter ensures that children are indented one tab (two spaces) in from
their parent.
# Whitespace
## Whitespace that exists
As in regular HTML, any string of continuous whitespace is considered
equivalent to any other string of continuous whitespace. There are four
exceptions:
1. Macro components (with names starting with `#`, such as `<#Markdown>`)
2. `<pre>` tags
3. `<code>` tags
4. `<script>` tags
The contents of those tags are considered whitespace-sensitive, and developers
should sanity check after running the formatter.
## Whitespace that doesn't exist (Lack of whitespace)
As is sometimes the case in HTML, _lack_ of whitespace is considered
significant. Instead of attempting to determine which contexts matter, the
formatter consistently retains lack of whitespace. This means that the
following
```html
<div><p>Hello</p></div>
```
will not be changed. However, the following
```html
<div> <p> Hello </p> </div>
```
will be formatted as
```html
<div>
<p>
Hello
</p>
</div>
```
because of the whitespace on either side of each tag.
To be clear, this example
```html
<div> <p>Hello</p> </div>
```
will be formatted as
```html
<div>
<p>Hello</p>
</div>
```
because of the lack of whitespace in between the opening and closing `<p>` tags
and their child content.
## Splitting children onto separate lines
In certain scenarios, the formatter will move nodes to their own line:
(Below, "element" means an HTML element or a Surface component.)
1. If an element contains other elements as children, it will be surrounded by newlines.
2. If there is a space after an opening tag or before a closing tag, it is converted to a newline.
3. If a closing tag is put on its own line, the formatter ensures there's a newline before the next sibling node.
Since SurfaceFormatter doesn't know if a component represents an inline or block element,
it does not currently make distinctions between elements that should or should not be
moved onto their own lines, other than the above rules.
This allows inline elements to be placed among text without splitting them onto their own lines:
```html
The <b>Dialog</b> is a stateless component. All event handlers
had to be defined in the parent <b>LiveView</b>.
```
## Newline characters
The formatter will not add extra newlines unprompted beyond moving nodes onto
their own line. However, if the input code has extra newlines, the formatter
will retain them but will collapse more than one extra newline into a single
one.
This means that
```html
<p>Hello</p>
<p>Goodbye</p>
```
will be formatted as
```html
<p>Hello</p>
<p>Goodbye</p>
```
# HTML attributes and component props
HTML attributes such as `class` in `<p class="container">` and component
props such as `name` in `<Person name="Samantha">` are formatted to make use
of Surface features.
## Inline literals
String literals are placed after the `=` without any interpolation brackets (`{ }`). This means that
```html
<Component foo={"hello"} />
```
will be formatted as
```html
<Component foo="hello" />
```
Also, `true` boolean literals are formatted using the Surface shorthand
whereby you can simply write the name of the attribute and it is passed in as
`true`. For example,
```html
<Component secure={true} />
```
and
```html
<Component secure=true />
```
will both be formatted as
```html
<Component secure />
```
## Interpolation (`{ }` brackets)
Attributes that interpolate Elixir code with `{ }` brackets are ran through
the Elixir code formatter.
This means that:
- `<Foo num={123456} />` becomes `<Foo num={123_456} />`
- `list={[1,2,3]}` becomes `list={[1, 2, 3]}`
- `things={%{ one: "1", two: "2"}}` becomes `things={%{one: "1", two: "2"}}`
Sometimes the Elixir code formatter will add line breaks in the formatted
expression. In that case, SurfaceFormatter will ensure indentation lines up. If
there is a single attribute, it will keep the attribute on the same line as the
tag name, for example:
```html
<Component list={[
{"foo", foo},
{"bar", bar}
]} />
```
However, if there are multiple attributes it will put them on separate lines:
```html
<Child
list={[
{"foo", foo},
{"bar", bar}
]}
int={123}
/>
```
## Whitespace in string attributes
### Code semantics must be maintained
It's critical that a code formatter never change the semantics of the code
it modifies. In other words, the behavior of a program should never change
due to a code formatter.
The **Whitespace** section above outlines how `SurfaceFormatter` preserves
code semantics by refusing to modify contents of `<script>`, `<code>` and
`<pre>` tags as well as macro components. And for the same reason, the
formatter does not introduce whitespace between HTML tags when there is none.
### Code semantics in string attributes
This principle is also relevant to string attributes, such as:
```html
<MyComponent string_prop=" string with whitespace " />
```
`SurfaceFormatter` cannot reliably guess whether application behavior will be
changed by formatting the contents of a string. For example, consider a
component with the following interface:
```html
<List items="
apples (fuji)
oranges (navel)
bell peppers (green)
" />
```
The component internally splits on newline characters and outputs the following HTML:
```html
<ul>
<li>apples (fuji)</li>
<li>oranges (navel)</li>
<li>bell peppers (green)</li>
</ul>
```
If `SurfaceFormatter` assumes it is safe to modify whitespace in string
attributes, then the Surface code would likely change to this:
```html
<List items="apples (fuji) oranges (navel) bell peppers (green)" />
```
Which would output the following HTML:
```html
<ul>
<li>apples (fuji) oranges (navel) bell peppers (green)</li>
</ul>
```
Notice that the behavior of the application would have changed simply by
running the formatter. It is for this reason that `SurfaceFormatter`
always retains precisely the same whitespace in attribute strings,
including both space and newline characters.
## Wrapping attributes on separate lines
In the **Interpolation (`{ }` brackets)** section we noted that attributes
will each be put on their own line if there is more than one attribute and at
least one contains a newline after being formatted by the Elixir code
formatter.
There is another scenario where attributes will each be given their own line:
**any time the opening tag would exceed `line_length` if put on one line**.
This value is provided in `.formatter.exs` and defaults to 98.
The formatter indents attributes one tab in from the start of the opening tag
for readability:
```html
<div
class="very long class value that causes this to exceed the established line length"
aria-role="button"
>
```
If you desire to have a separate line length for `mix format` and `mix surface.format`,
provide `surface_line_length` in `.formatter.exs` and it will be given precedence
when running `mix surface.format`. For example:
```elixir
# .formatter.exs
[
surface_line_length: 120,
import_deps: [...],
# ...
]
```
# Developer Responsibility
As with all changes (for both `mix format` and `mix surface.format`) it's
recommended that developers don't blindly run the formatter on an entire
codebase and commit, but instead sanity check each file to ensure the results
are desired.
"""
@spec format_string!(String.t(), list(option)) :: String.t()
def format_string!(string, opts \\ []) do
trailing_newline =
case Regex.run(~r/\n+\s*$/, string) do
[match] -> match
nil -> nil
end
parsed =
string
|> String.trim()
|> Surface.Compiler.Parser.parse!(translator: Surface.Formatter.NodeTranslator)
# Ensure the :indent and :trailing_newline options are set
opts =
opts
|> Keyword.put_new(:indent, 0)
|> Keyword.put(:trailing_newline, !is_nil(trailing_newline))
[
Phases.TagWhitespace,
Phases.Newlines,
Phases.SpacesToNewlines,
Phases.Indent,
Phases.FinalNewline,
Phases.BlockExceptions,
Phases.Render
]
|> Enum.reduce(parsed, fn phase, nodes ->
phase.run(nodes, opts)
end)
end
@doc """
Returns true if the argument is an element (HTML element or surface
component), false otherwise.
"""
@spec is_element?(surface_node) :: boolean
def is_element?({_, _, _, _}), do: true
def is_element?(_), do: false
@doc """
Given a tag, return whether to render the contens verbatim instead of formatting them.
Specifically, don't modify contents of macro components or <pre> and <code> tags.
"""
@spec render_contents_verbatim?(tag) :: boolean
def render_contents_verbatim?("#template"), do: false
def render_contents_verbatim?("#slot"), do: false
def render_contents_verbatim?("#" <> _), do: true
def render_contents_verbatim?("pre"), do: true
def render_contents_verbatim?("code"), do: true
def render_contents_verbatim?("script"), do: true
def render_contents_verbatim?(tag) when is_binary(tag), do: false
end
|
lib/surface/formatter.ex
| 0.934657
| 0.886076
|
formatter.ex
|
starcoder
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.