code
stringlengths 114
1.05M
| path
stringlengths 3
312
| quality_prob
float64 0.5
0.99
| learning_prob
float64 0.2
1
| filename
stringlengths 3
168
| kind
stringclasses 1
value |
|---|---|---|---|---|---|
defmodule Tw.V1_1.Place do
@moduledoc """
Place data structure and related functions.
https://developer.twitter.com/en/docs/twitter-api/v1/data-dictionary/object-model/geo
"""
alias Tw.V1_1.BoundingBox
@type id :: binary()
@enforce_keys [:id, :url, :place_type, :name, :full_name, :country_code, :country, :bounding_box, :attributes]
defstruct([:id, :url, :place_type, :name, :full_name, :country_code, :country, :bounding_box, :attributes])
@typedoc """
> | field | description |
> | - | - |
> | `id` | ID representing this place. Note that this is represented as a string, not an integer. Example: `\"01a9a39529b27f36\" `. |
> | `url` | URL representing the location of additional place metadata for this place. Example: `\"https://api.twitter.com/1.1/geo/id/01a9a39529b27f36.json\" `. |
> | `place_type` | The type of location represented by this place. Example: `\"city\" `. |
> | `name` | Short human-readable representation of the place’s name. Example: `\"Manhattan\" `. |
> | `full_name` | Full human-readable representation of the place’s name. Example: `\"Manhattan, NY\" `. |
> | `country_code` | Shortened country code representing the country containing this place. Example: `\"US\" `. |
> | `country` | Name of the country containing this place. Example: `\"United States\" `. |
> | `bounding_box` | A bounding box of coordinates which encloses this place. |
> | `attributes` | When using PowerTrack, 30-Day and Full-Archive Search APIs, and Volume Streams this hash is null. Example: `{} `. |
>
"""
@type t :: %__MODULE__{
id: id(),
url: binary,
place_type: binary,
name: binary,
full_name: binary,
country_code: binary,
country: binary,
bounding_box: BoundingBox.t(),
attributes: map
}
@spec decode!(map) :: t
@doc """
Decode JSON-decoded map into `t:t/0`
"""
def decode!(json) do
json =
json
|> Map.update!(:bounding_box, &BoundingBox.decode!/1)
struct(__MODULE__, json)
end
end
|
lib/tw/v1_1/place.ex
| 0.898603
| 0.604078
|
place.ex
|
starcoder
|
defmodule Bunch.Map do
@moduledoc """
A bunch of helper functions for manipulating maps.
"""
use Bunch
@doc """
Updates value at `key` in `map` and returns new value and updated map.
Uses `Map.get_and_update/3` under the hood.
## Example
iex> %{a: 1} |> #{inspect(__MODULE__)}.get_updated(:a, & &1+1)
{2, %{a: 2}}
"""
@spec get_updated(map, Map.key(), (Map.value() -> v)) :: {v, map} when v: Map.value()
def get_updated(map, key, fun) do
Map.get_and_update(map, key, fn a -> fun.(a) ~> {&1, &1} end)
end
@doc """
Works like `get_updated/3`, but requires `map` to contain `key`.
Uses `Map.get_and_update!/3` under the hood.
## Example
iex> %{a: 1} |> #{inspect(__MODULE__)}.get_updated!(:a, & &1+1)
{2, %{a: 2}}
"""
@spec get_updated!(map, Map.key(), (Map.value() -> v)) :: {v, map} when v: Map.value()
def get_updated!(map, key, fun) do
Map.get_and_update!(map, key, fn a -> fun.(a) ~> {&1, &1} end)
end
@doc """
Maps keys of `map` using function `f`.
## Example
iex> #{inspect(__MODULE__)}.map_keys(%{1 => :a, 2 => :b}, & &1+1)
%{2 => :a, 3 => :b}
"""
@spec map_keys(%{k1 => v}, (k1 -> k2)) :: %{k2 => v} when k1: any, k2: any, v: any
def map_keys(map, f) do
map |> Enum.into(Map.new(), fn {key, value} -> {f.(key), value} end)
end
@doc """
Maps values of `map` using function `f`.
## Example
iex> #{inspect(__MODULE__)}.map_values(%{a: 1, b: 2}, & &1+1)
%{a: 2, b: 3}
"""
@spec map_values(%{k => v1}, (v1 -> v2)) :: %{k => v2} when k: any, v1: any, v2: any
def map_values(map, f) do
map |> Enum.into(Map.new(), fn {key, value} -> {key, f.(value)} end)
end
@doc """
Moves value stored at `old_key` to `new_key`.
If `old_key` is not present in `map`, `default_value` is stored at `new_key`.
If `new_key` is present in `map`, it's value is overwritten.
## Examples
iex> #{inspect(__MODULE__)}.move(%{a: 1, b: 2}, :a, :c, 3)
%{b: 2, c: 1}
iex> #{inspect(__MODULE__)}.move(%{a: 1, b: 2}, :a, :b, 3)
%{b: 1}
iex> #{inspect(__MODULE__)}.move(%{a: 1, b: 2}, :c, :b, 3)
%{a: 1, b: 3}
"""
@spec move(%{k => v}, old_key :: k, new_key :: k, default_value :: v) :: %{k => v}
when k: any, v: any
def move(map, old_key, new_key, default_value) do
{value, map} = map |> Map.pop(old_key, default_value)
map |> Map.put(new_key, value)
end
@doc """
Works like `move/3`, but fails if either `old_key` is absent or `new_key` is present
in `map`.
## Example
iex> #{inspect(__MODULE__)}.move!(%{a: 1, b: 2}, :a, :c)
%{b: 2, c: 1}
"""
@spec move!(%{k => v}, old_key :: k, new_key :: k) :: %{k => v} | no_return
when k: any, v: any
def move!(map, old_key, new_key) do
true = Map.has_key?(map, old_key) and not Map.has_key?(map, new_key)
{value, map} = map |> Map.pop(old_key)
map |> Map.put(new_key, value)
end
end
|
lib/bunch/map.ex
| 0.911618
| 0.703206
|
map.ex
|
starcoder
|
defmodule EventStore.Subscriptions.Subscriber do
@moduledoc false
defstruct [
:pid,
:ref,
:partition_key,
last_sent: 0,
buffer_size: 1,
in_flight: []
]
alias EventStore.RecordedEvent
alias EventStore.Subscriptions.Subscriber
@doc """
Subscriber is available to receive events when the number of in-flight events
is less than its configured buffer size. By default this is set to one event.
"""
def available?(%Subscriber{in_flight: []}), do: true
def available?(%Subscriber{in_flight: in_flight, buffer_size: buffer_size}),
do: length(in_flight) < buffer_size
@doc """
Is the given event in the same partition as any in-flight events?
"""
def in_partition?(%Subscriber{partition_key: nil}, _partition_key), do: false
def in_partition?(%Subscriber{partition_key: partition_key}, partition_key), do: true
def in_partition?(%Subscriber{}, _partition_key), do: false
def track_in_flight(%Subscriber{} = subscriber, %RecordedEvent{} = event, partition_key) do
%Subscriber{in_flight: in_flight} = subscriber
%RecordedEvent{event_number: event_number} = event
%Subscriber{
subscriber
| in_flight: [event | in_flight],
last_sent: event_number,
partition_key: partition_key
}
end
def reset_in_flight(%Subscriber{} = subscriber) do
%Subscriber{subscriber | in_flight: [], partition_key: nil}
end
@doc """
Acknowledge the in-flight event by number and all events sent to the
subscriber before the ack'd event.
"""
def acknowledge(%Subscriber{} = subscriber, ack) do
%Subscriber{in_flight: in_flight} = subscriber
case ack_event_index(in_flight, ack) do
nil ->
{:error, :unexpected_ack}
index ->
# All in-flight events up to the ack'd event number are also ack'd
{in_flight, acknowledged_events} = Enum.split(in_flight, index)
subscriber =
case in_flight do
[] ->
%Subscriber{subscriber | in_flight: [], partition_key: nil}
in_flight ->
%Subscriber{subscriber | in_flight: in_flight}
end
{:ok, subscriber, acknowledged_events}
end
end
defp ack_event_index(in_flight, ack) do
Enum.find_index(in_flight, fn
%RecordedEvent{event_number: ^ack} -> true
%RecordedEvent{} -> false
end)
end
end
|
lib/event_store/subscriptions/subscriber.ex
| 0.752559
| 0.49762
|
subscriber.ex
|
starcoder
|
defmodule DarkMatter.Lists do
@moduledoc """
Utils for working with lists or improper lists
"""
@moduledoc since: "1.0.0"
@type atom_or_improper_tree_list() ::
atom
| maybe_improper_list
| {atom, atom | maybe_improper_list | {atom, atom | maybe_improper_list | {any, any}}}
@doc """
Flattens common tree shaped keyword lists into a single list
## Examples
iex> flatten_atom_or_improper_tree_list([])
[]
iex> flatten_atom_or_improper_tree_list(:atom)
[:atom]
iex> flatten_atom_or_improper_tree_list([:atom_list])
[:atom_list]
iex> flatten_atom_or_improper_tree_list({:tuple, :atom})
[:tuple, :atom]
iex> flatten_atom_or_improper_tree_list([tuple: :list])
[:tuple, :list]
iex> flatten_atom_or_improper_tree_list([tuple: [nested: :atom]])
[:tuple, :nested, :atom]
iex> flatten_atom_or_improper_tree_list([tuple: [nested: [:list, :atom]]])
[:tuple, :nested, :list, :atom]
iex> flatten_atom_or_improper_tree_list([:atom, tuple: [nested: [:nested2, nested3: [:nested4]]]])
[:atom, :tuple, :nested, :nested2, :nested3, :nested4]
"""
@spec flatten_atom_or_improper_tree_list(atom_or_improper_tree_list()) :: [atom()]
def flatten_atom_or_improper_tree_list(val) do
do_flatten_atom_or_improper_tree_list([], val)
end
defp do_flatten_atom_or_improper_tree_list(acc, atom) when is_list(acc) and is_atom(atom) do
[atom | acc]
end
defp do_flatten_atom_or_improper_tree_list(acc, {atom, val})
when is_list(acc) and is_atom(atom) do
[atom | do_flatten_atom_or_improper_tree_list([], val)] ++ acc
end
defp do_flatten_atom_or_improper_tree_list(acc, list) when is_list(acc) and is_list(list) do
acc ++ Enum.flat_map(list, &do_flatten_atom_or_improper_tree_list([], &1))
end
@doc """
Split list into uniques
"""
@spec split_uniq(Enumerable.t()) :: {Enumerable.t(), Enumerable.t()}
def split_uniq(enumerable) do
split_uniq_by(enumerable, fn x -> x end)
end
@doc """
Split list into uniques by `fun`
"""
@spec split_uniq_by(Enumerable.t(), (any() -> any())) :: {Enumerable.t(), Enumerable.t()}
def split_uniq_by(enumerable, fun) when is_list(enumerable) do
split_uniq_list(enumerable, %{}, fun)
end
defp split_uniq_list([head | tail], set, fun) do
value = fun.(head)
case set do
%{^value => true} ->
{uniq, dupl} = split_uniq_list(tail, set, fun)
{uniq, [head | dupl]}
%{} ->
{uniq, dupl} = split_uniq_list(tail, Map.put(set, value, true), fun)
{[head | uniq], dupl}
end
end
defp split_uniq_list([], _set, _fun) do
{[], []}
end
end
|
lib/dark_matter/lists.ex
| 0.821438
| 0.615911
|
lists.ex
|
starcoder
|
defmodule Ecto.Pools.Poolboy do
@moduledoc """
Start a pool of connections using `poolboy`.
### Options
* `:pool_name` - The name of the pool supervisor
* `:pool_size` - The number of connections to keep in the pool (default: 10)
* `:lazy` - When true, connections to the repo are lazily started (default: true)
* `:max_overflow` - The maximum overflow of connections (default: 0) (see poolboy docs)
* `:shutdown` - The shutdown method for the connections (default: 5000) (see Supervisor.Spec)
"""
alias Ecto.Pools.Poolboy.Worker
@behaviour Ecto.Pool
@doc """
Starts a pool of connections for the given connection module and options.
* `conn_mod` - The connection module, see `Ecto.Adapters.Connection`
* `opts` - The options for the pool and the connections
"""
def start_link(conn_mod, opts) do
{:ok, _} = Application.ensure_all_started(:poolboy)
{pool_opts, conn_opts} = split_opts(opts)
:poolboy.start_link(pool_opts, {conn_mod, conn_opts})
end
@doc false
def checkout(pool, timeout) do
checkout(pool, :run, timeout)
end
@doc false
def checkin(pool, worker, _) do
:poolboy.checkin(pool, worker)
end
@doc false
def open_transaction(pool, timeout) do
checkout(pool, :transaction, timeout)
end
@doc false
def close_transaction(pool, worker, _) do
try do
Worker.checkin(worker)
after
:poolboy.checkin(pool, worker)
end
end
@doc false
def break(pool, worker, timeout) do
try do
Worker.break(worker, timeout)
after
:poolboy.checkin(pool, worker)
end
end
## Helpers
defp split_opts(opts) do
{pool_opts, conn_opts} = Keyword.split(opts, [:pool_name, :pool_size, :max_overflow])
conn_opts =
conn_opts
|> Keyword.put(:timeout, Keyword.get(opts, :connect_timeout, 5_000))
pool_opts = [worker_module: Worker,
name: {:local, Keyword.fetch!(pool_opts, :pool_name)},
size: Keyword.get(pool_opts, :pool_size, 10),
max_overflow: Keyword.get(pool_opts, :max_overflow, 0)]
{pool_opts, conn_opts}
end
defp checkout(pool, fun, timeout) do
case :timer.tc(fn() -> do_checkout(pool, fun, timeout) end) do
{queue_time, {:ok, worker, mod_conn}} ->
{:ok, worker, mod_conn, queue_time}
{_queue_time, {:error, _} = error} ->
error
end
end
defp do_checkout(pool, fun, timeout) do
try do
:poolboy.checkout(pool, :true, timeout)
catch
:exit, {:noproc, _} ->
{:error, :noproc}
else
worker ->
do_checkout(pool, worker, fun, timeout)
end
end
defp do_checkout(pool, worker, fun, timeout) do
try do
Worker.checkout(worker, fun, timeout)
catch
class, reason ->
stack = System.stacktrace()
:poolboy.checkin(pool, worker)
:erlang.raise(class, reason, stack)
else
{:ok, mod_conn} ->
{:ok, worker, mod_conn}
{:error, err} ->
:poolboy.checkin(pool, worker)
raise err
end
end
end
|
lib/ecto/pools/poolboy.ex
| 0.815306
| 0.485478
|
poolboy.ex
|
starcoder
|
defmodule Loom.LWWRegister do
@moduledoc """
A Last-write-wins register
While alone, this kinda defeats the point of a CRDT, there are times, such as
in a map or other kind of composite CRDT, where an individual value should
be last-write-wins, but we'd like to preserve causality between all the
properties.
This is one of the most simple CRDT's possible.
"""
alias __MODULE__, as: Reg
@type t :: %Reg{
value: term,
clock: nil | pos_integer
}
defstruct value: nil, clock: nil
@doc """
Returns a new LWWRegister CRDT.
`nil` is a new CRDT's identity value, and by default the system time in
microseconds is used as the clock value.
iex> Loom.LWWRegister.new |> Loom.LWWRegister.value
nil
"""
@spec new :: t
def new, do: %Reg{}
@doc """
Returns a new LWWRegister CRDT. Initializes to `value`.
iex> Loom.LWWRegister.new("test") |> Loom.LWWRegister.value
"test"
"""
@spec new(term) :: t
def new(value), do: new |> set(value)
@doc """
Returns a new LWWRegister CRDT. Initializes to `value` with another clock
iex> Loom.LWWRegister.new("test", 5) |> Loom.LWWRegister.value
"test"
"""
@spec new(term, pos_integer) :: t
def new(value, clock), do: new |> set(value, clock)
@doc """
Sets a value using the built-in clock
iex> alias Loom.LWWRegister, as: Reg
iex> Reg.new("test")
...> |> Reg.set("test2")
...> |> Reg.value
"test2"
"""
@spec set(t, term) :: t
def set(reg, value), do: set(reg, value, make_microtime)
@doc """
Set a value according to your own clock.
iex> alias Loom.LWWRegister, as: Reg
iex> Reg.new("test", 5)
...> |> Reg.set("test2", 10)
...> |> Reg.set("won't set.", 2)
...> |> Reg.value
"test2"
"""
@spec set(t, term, pos_integer) :: t
def set(%Reg{value: nil}, value, clock), do: %Reg{value: value, clock: clock}
def set(reg, value, clock), do: join(reg, %Reg{value: value, clock: clock})
@doc """
Joins 2 LWWRegisters
iex> alias Loom.LWWRegister, as: Reg
iex> a = Reg.new("test") |> Reg.set("test2")
iex> :timer.sleep(1)
iex> Reg.new("take over") |> Reg.join(a) |> Reg.value
"take over"
In the event that 2 have the same clock, it simply takes the biggest according
to Elixir's rules. If you want something more portable, string comparisons are
likely to be the same across languages.
iex> alias Loom.LWWRegister, as: Reg
iex> a = Reg.new("test", 10) |> Reg.set("test2", 11)
iex> b = Reg.new("take over", 11)
...> Reg.join(a,b) |> Reg.value
"test2"
"""
@spec join(t, t) :: t
def join(a, a), do: a
def join(a, %Reg{clock: nil}), do: a
def join(%Reg{clock: nil}, b), do: b
def join(%Reg{clock: c}=a, %Reg{clock: c}=b) do
if a > b, do: a, else: b
end
def join(%Reg{clock: ac}=a, %Reg{clock: bc}) when ac > bc, do: a
def join(%Reg{clock: ac}, %Reg{clock: bc}=b) when ac < bc, do: b
@doc """
Returns the natural value of the register. Can be any type, really.
"""
@spec value(t) :: term
def value(%Reg{value: value}), do: value
defp make_microtime do
{mega, sec, micro} = :os.timestamp
(mega * 1000000 + sec) * 1000000 + micro
end
end
defimpl Loom.CRDT, for: Loom.LWWRegister do
alias Loom.LWWRegister, as: Reg
@doc """
Returns a description of the operations that this CRDT takes.
Updates return a new CRDT, reads can return any natural datatype. This register
returns a value.
"""
def ops(_crdt) do
[ update: [
set: [:value],
set: [:value, :clock]
],
read: [
value: []
]
]
end
@doc """
Applies a CRDT to a counter in an abstract way.
This is for ops-based support.
iex> alias Loom.CRDT
iex> alias Loom.LWWRegister, as: Reg
iex> ctr = Reg.new |> CRDT.apply({:set, "test"}) |> CRDT.apply({:set, "testing"})
iex> CRDT.value(ctr)
"testing"
iex> alias Loom.CRDT
iex> alias Loom.LWWRegister, as: Reg
iex> ctr = Reg.new |> CRDT.apply({:set, "test", 10}) |> CRDT.apply({:set, "testing", 11})
iex> CRDT.apply(ctr, :value)
"testing"
"""
def apply(crdt, {:set, value}), do: Reg.set(crdt, value)
def apply(crdt, {:set, value, clock}), do: Reg.set(crdt, value, clock)
def apply(crdt, :value), do: Reg.value(crdt)
@doc """
Joins 2 CRDT's of the same type.
2 different types cannot mix (yet). In the future, we may be able to join
different counters and merge their semantics, as long as the datatype grows
monotonically.
iex> alias Loom.CRDT
iex> a = Loom.LWWRegister.new |> CRDT.apply({:set, "test", 10})
iex> b = Loom.LWWRegister.new |> CRDT.apply({:set, "test2", 11})
iex> CRDT.join(a,b) |> CRDT.value
"test2"
iex> alias Loom.CRDT
iex> a = Loom.LWWRegister.new("test")
iex> CRDT.join(a,a) |> CRDT.value()
"test"
"""
def join(a, %Reg{}=b), do: Reg.join(a, b)
@doc """
Returns the most natural value for a counter, an integer.
"""
def value(crdt), do: Reg.value(crdt)
end
|
lib/loom/lwwregister.ex
| 0.838944
| 0.574962
|
lwwregister.ex
|
starcoder
|
defmodule Plymio.Ast.Vorm.Vormen do
@moduledoc false
alias Plymio.Option.Utility, as: POU
##alias Plymio.Ast.Utility, as: PAU
alias Plymio.Ast.Vorm.Utility, as: PAVU
alias Plymio.Ast.Vorm.Vormen.Transform, as: PAVMT
import Plymio.Ast.Vorm.Utility, only: [
new_error_result: 1,
]
use Plymio.Ast.Vorm.Attribute
@type error :: struct
@type form :: Macro.t
@type forms :: [form]
@type index :: integer
@doc false
@spec vormen_normalise(any) :: {:ok, forms} | {:error, error}
def vormen_normalise(forms \\ [])
def vormen_normalise(forms) when is_list(forms) do
{:ok, forms}
end
def vormen_normalise(forms) do
{:ok, [forms]}
end
@doc false
@spec vormen_validate(any) :: {:ok, forms} | {:error, error}
def vormen_validate(forms \\ [])
def vormen_validate(forms) do
with {:ok, forms} <- forms |> vormen_normalise,
{:ok, _forms} = result <- forms |> PAVU.forms_validate do
result
else
{:error, _} -> new_error_result(m: "expected valid forms", v: forms)
end
end
@doc false
def vormen_index_normalise(forms, index)
def vormen_index_normalise(forms, nil) when is_list(forms) do
{:ok, nil}
end
def vormen_index_normalise(forms, index)
when is_list(forms) and is_integer(index) and index >= 0 do
{:ok, index}
end
def vormen_index_normalise(forms, index)
when is_list(forms) and is_integer(index) and index < 0 do
{:ok, length(forms) + index}
end
def vormen_index_normalise(forms, _index) when not is_list(forms) do
new_error_result(m: "forms invalid", v: forms)
end
def vormen_index_normalise(_forms, index) when not is_integer(index) do
new_error_result(m: "index invalid", v: index)
end
@spec vormen_index_validate(any, any) :: {:ok, index} | {:error, error}
defp vormen_index_validate(forms, index)
defp vormen_index_validate(forms, index)
when is_list(forms) and is_integer(index) do
with {:ok, index} <- forms |> vormen_index_normalise(index) do
index_max = length(forms) - 1
case index do
x when x >= 0 -> x
x -> index_max + x + 1
end
|> fn
ndx when ndx < 0 -> new_error_result(m: "index too small", v: ndx)
ndx when ndx > index_max -> new_error_result(m: "index too large", v: ndx)
ndx -> {:ok, ndx}
end.()
else
{:error, _} = result -> result
end
end
defp vormen_index_validate(_forms, index) when not is_integer(index) do
new_error_result(m: "index invalid", v: index)
end
defp vormen_indices_validate(forms, indices) when is_list(forms) do
indices
|> List.wrap
|> Enum.reduce_while([],
fn index, indices ->
case forms |> vormen_index_validate(index) do
{:ok, index} -> {:cont, [index | indices]}
{:error, _} = result -> {:halt, result}
end
end)
|> case do
{:error, _} = result -> result
indices -> {:ok, indices |> Enum.reverse}
end
end
defp vormen_indices(forms) when is_list(forms) do
forms |> Enum.with_index |> Enum.map(&(elem(&1,1)))
end
defp vormen_indices_normalise(forms, indices)
defp vormen_indices_normalise(forms, nil) when is_list(forms) do
{:ok, forms |> vormen_indices}
end
defp vormen_indices_normalise(forms, indices) when is_list(forms) do
indices = cond do
is_integer(indices) -> indices
is_atom(indices) ->
case indices do
:first -> 0
:last -> -1
:all -> forms |> vormen_indices
x -> x
end
Keyword.keyword?(indices) -> indices |> Keyword.values
is_list(indices) -> indices
is_map(indices) -> indices |> Map.keys
true -> indices
end
|> POU.list_wrap_flat_just_uniq
forms |> vormen_indices_validate(indices)
end
defp vormen_transform(forms, opts)
defp vormen_transform(forms, []) when is_list(forms) do
{:ok, forms}
end
defp vormen_transform(forms, opts) when is_list(forms) and is_list(opts) do
with {:ok, reduce_while_transform} <- opts |> vormen_build_reduce_while_transform do
forms
|> reduce_while_transform.()
else
{:error, _} = result -> result
end
end
@vorm_transform_types_opts %{
filter: [
{@pav_key_fun_then, &PAVMT.vormen_transform_then_filter/3},
{@pav_key_fun_else, &PAVMT.vormen_transform_else_filter/3},
{@pav_key_fun_initial_value, &PAVMT.vormen_transform_reduce_empty_forms_fun/1},
],
reject: [
{@pav_key_fun_then, &PAVMT.vormen_transform_then_reject/3},
{@pav_key_fun_else, &PAVMT.vormen_transform_else_reject/3},
{@pav_key_fun_initial_value, &PAVMT.vormen_transform_reduce_empty_forms_fun/1},
],
insert: [
{@pav_key_fun_then, &PAVMT.vormen_transform_then_insert/3},
{@pav_key_fun_else, &PAVMT.vormen_transform_else_insert/3},
{@pav_key_fun_initial_value, &PAVMT.vormen_transform_reduce_empty_forms_fun/1},
],
replace: [
{@pav_key_fun_then, &PAVMT.vormen_transform_then_replace/3},
{@pav_key_fun_else, &PAVMT.vormen_transform_else_replace/3},
{@pav_key_fun_initial_value, &PAVMT.vormen_transform_reduce_empty_forms_fun/1},
],
transform: [
{@pav_key_fun_initial_value, &PAVMT.vormen_transform_reduce_empty_forms_fun/1},
],
pipe_before: [
{@pav_key_fun_then, &PAVMT.vormen_transform_then_pipe_before/3},
{@pav_key_fun_else, &PAVMT.vormen_transform_else_pipe_before/3},
{@pav_key_fun_initial_value, &PAVMT.vormen_transform_reduce_empty_forms_fun/1},
],
pipe_after: [
{@pav_key_fun_then, &PAVMT.vormen_transform_then_pipe_after/3},
{@pav_key_fun_else, &PAVMT.vormen_transform_else_pipe_after/3},
{@pav_key_fun_initial_value, &PAVMT.vormen_transform_reduce_empty_forms_fun/1},
],
fetch: [
{@pav_key_fun_then, &PAVMT.vormen_transform_then_fetch/3},
{@pav_key_fun_else, &PAVMT.vormen_transform_else_fetch/3},
{@pav_key_fun_initial_value, &PAVMT.vormen_transform_reduce_empty_forms_fun/1},
],
}
def vormen_create_transform(forms, form_range, transform_type, transform_opts \\ [])
def vormen_create_transform(forms, form_range, transform_type, transform_opts) do
with {:ok, type_opts} <- @vorm_transform_types_opts |> Map.fetch(transform_type) do
with true <- transform_opts |> Keyword.keyword? do
with {:ok, form_predicate} <- forms |> form_range_create_predicate(form_range) do
# order is important
all_opts = type_opts ++ transform_opts ++ [{@pav_key_fun_pred, form_predicate}]
|> Keyword.new
forms |> vormen_transform(all_opts)
else
{:error, _} = result -> result
end
else
false -> new_error_result(m: "transform opts invalid", v: transform_opts)
end
else
:error -> new_error_result(m: "transform type unknown", v: transform_type)
end
end
defp vormen_build_reduce_while_transform(opts)
defp vormen_build_reduce_while_transform(opts) when is_list(opts) do
with {:ok, reduce_initial_value_fun} <- opts |> vormen_build_reduce_while_initial_value_fun,
{:ok, reduce_while_fun} <- opts |> vormen_build_reduce_while_fun do
reduce_while_transform = fn forms ->
forms
|> Stream.with_index
|> Enum.reduce_while(reduce_initial_value_fun.(forms), reduce_while_fun)
|> case do
{:error, _} = result -> result
forms -> {:ok, forms}
end
end
{:ok, reduce_while_transform}
else
{:error, _} = result -> result
end
end
defp vormen_build_reduce_while_initial_value_fun(opts)
defp vormen_build_reduce_while_initial_value_fun(opts) when is_list(opts) do
with {:ok, funs_opts} <- opts |> POU.opts_filter_keys([@pav_key_fun_initial_value]),
{:ok, funs_opts} <- funs_opts |> POU.opts_predicate(fn {_k,v} -> is_function(v,1) end) do
fun_reduce_initial_value = funs_opts
|> Keyword.fetch!(@pav_key_fun_initial_value)
{:ok,fun_reduce_initial_value}
else
{:error, _} = result -> result
end
end
defp vormen_build_reduce_while_fun(opts)
defp vormen_build_reduce_while_fun(opts) when is_list(opts) do
with {:ok, pred_the_else_fun} <- opts |> vormen_build_pred_then_else_fun do
fun = fn {_form, _index} = fi, forms ->
fi
|> pred_the_else_fun.(forms)
|> case do
{:error, _} = result -> {:halt, result}
{:ok, forms} -> {:cont, forms}
forms -> {:cont, forms}
end
end
{:ok, fun}
else
{:error, _} = result -> result
end
end
defp vormen_build_pred_then_else_fun(opts)
defp vormen_build_pred_then_else_fun(opts) when is_list(opts) do
with {:ok, opts} <- opts |> POU.opts_validate,
{:ok, _} <- opts |> POU.opts_avoir_keys(@pav_keys_pred_then_else),
{:ok, pred_opts} <- opts |> POU.opts_filter_keys(@pav_keys_pred),
{:ok, _opts} <- pred_opts |> POU.opts_predicate(fn {_k,v} -> is_function(v,1) end),
{:ok, then_else_opts} <- opts |> POU.opts_filter_keys(@pav_keys_then_else),
{:ok, _opts} <- then_else_opts |> POU.opts_predicate(fn {_k,v} -> is_function(v,3) end) do
# could be nil
new_forms = opts |> Keyword.get(@pav_key_new_forms)
fun_pred = opts |> Keyword.fetch!(@pav_key_fun_pred)
fun_then = opts |> Keyword.fetch!(@pav_key_fun_then)
fun_else = opts |> Keyword.fetch!(@pav_key_fun_else)
fun = fn {_form, _index} = fi, forms ->
fi
|> fun_pred.()
|> case do
true -> fun_then.(fi, new_forms, forms)
_ -> fun_else.(fi, new_forms, forms)
end
|> case do
{:ok, _} = result -> result
{:error, _} = result -> result
forms -> {:ok, forms}
end
end
{:ok, fun}
else
{:error, _} = result -> result
end
end
def form_range_create_predicate(forms, range)
# range == nil => all forms
def form_range_create_predicate(forms, nil) when is_list(forms) do
{:ok, fn _ -> true end}
end
# range = arity 1 fun
def form_range_create_predicate(forms, range)
when is_list(forms) and is_function(range, 1) do
{:ok, range}
end
def form_range_create_predicate(forms, range) when is_list(forms) do
with {:ok, indices} <- forms |> vormen_indices_normalise(range) do
range_map = indices |> Map.new(fn k -> {k, nil} end)
fun = fn
{_form,index} ->
range_map |> Map.has_key?(index)
x ->
raise ArgumentError, message: "predicated expected {form,index}; got #{inspect x}"
end
{:ok, fun}
else
{:error, _} = result -> result
end
end
def form_range_create_predicate(forms, index) do
new_error_result(m: "forms or index invalid", e: "forms #{inspect forms}; index #{inspect index}")
end
end
|
lib/ast/vorm/vormen/vormen.ex
| 0.715921
| 0.504089
|
vormen.ex
|
starcoder
|
defmodule ElixirDbf.Row do
@moduledoc """
ElixirDbf row module
"""
def decode(string, :utf8), do: string
def decode(string, encoding) when is_atom(encoding), do: Exconv.to_unicode!(string, encoding)
def decode(string, [from, to]) do
string
|> Exconv.to_unicode!(from)
|> Exconv.to_unicode!(to)
end
def read(stream, chars, encoding), do: stream |> IO.binread(chars) |> decode(encoding)
def parse_column(_column, :eof), do: nil
def parse_column(column, field) do
case column.type do
:string ->
value = field |> String.trim_trailing(" ")
{column.name, value}
:numeric ->
raw_string = field |> String.trim_leading(" ")
value =
case raw_string do
"" -> nil
_ ->
case Integer.parse(raw_string) do
{number, ""} -> number
_ -> String.to_float(raw_string)
end
end
{column.name, value}
:integer ->
value =
case String.trim_leading(field, " ") do
"" -> nil
str_int ->
integer_size = column.field_size * 8
<<integer::little-integer-size(integer_size)>> = str_int
integer
end
{column.name, value}
:float ->
value =
case String.trim_leading(field, " ") do
"" -> nil
str_flt -> String.to_float(str_flt)
end
{column.name, value}
:date ->
value =
case Timex.parse(field, "{YYYY}{0M}{D}") do
{:ok, datetime} -> Timex.to_date(datetime)
{:error, _} -> nil
end
{column.name, value}
_ -> {column.name, column.type, field}
end
end
def parse(stream, columns, encoding) do
case read(stream, 1, encoding) do
" " ->
for column <- columns do
field = read(stream, column.field_size, encoding)
parse_column(column, field)
end
_ -> :error
end
end
end
|
lib/elixir_dbf/row.ex
| 0.677154
| 0.558387
|
row.ex
|
starcoder
|
defmodule Mongo.Ecto.NormalizedQuery do
@moduledoc false
defmodule ReadQuery do
@moduledoc false
defstruct coll: nil,
pk: nil,
params: {},
query: %{},
projection: %{},
order: %{},
fields: [],
database: nil,
opts: []
end
defmodule WriteQuery do
@moduledoc false
defstruct coll: nil, query: %{}, command: %{}, database: nil, opts: []
end
defmodule CommandQuery do
@moduledoc false
defstruct command: nil, database: nil, opts: []
end
defmodule CountQuery do
@moduledoc false
defstruct coll: nil, pk: nil, fields: [], query: %{}, database: nil, opts: []
end
defmodule AggregateQuery do
@moduledoc false
defstruct coll: nil, pk: nil, fields: [], pipeline: [], database: nil, opts: []
end
alias Ecto.Query
alias Mongo.Ecto.Conversions
defmacrop is_op(op) do
quote do
is_atom(unquote(op)) and unquote(op) != :^
end
end
def all(original, params) do
check_query!(original, [:limit, :offset])
from = from(original)
params = List.to_tuple(params)
query = query(original, params, from)
case projection(original, params, from) do
{:count, fields} ->
count(original, query, fields, params, from)
{:find, projection, fields} ->
find_all(original, query, projection, fields, params, from)
{:aggregate, pipeline, fields} ->
aggregate(original, query, pipeline, fields, params, from)
end
end
defp find_all(original, query, projection, fields, params, {coll, _, pk} = from) do
%ReadQuery{
coll: coll,
pk: pk,
params: params,
query: query,
fields: fields,
projection: projection,
order: order(original, from),
database: original.prefix,
opts: limit_skip(original, params, from)
}
end
defp count(original, query, fields, params, {coll, _, pk} = from) do
%CountQuery{
coll: coll,
query: query,
opts: limit_skip(original, params, from),
pk: pk,
fields: fields,
database: original.prefix
}
end
defp aggregate(original, query, pipeline, fields, params, {coll, _, pk} = from) do
pipeline =
limit_skip(original, params, from)
|> Enum.map(fn
{:limit, value} -> ["$limit": value]
{:skip, value} -> ["$skip": value]
end)
|> Kernel.++(pipeline)
pipeline = if query != %{}, do: [["$match": query] | pipeline], else: pipeline
%AggregateQuery{
coll: coll,
pipeline: pipeline,
pk: pk,
fields: fields,
database: original.prefix
}
end
def update_all(%Query{} = original, params) do
check_query!(original)
params = List.to_tuple(params)
from = from(original)
coll = coll(from)
query = query(original, params, from)
command = command(:update, original, params, from)
%WriteQuery{coll: coll, query: query, command: command, database: original.prefix}
end
def update(%{source: coll, prefix: prefix, schema: schema}, fields, filter) do
command = command(:update, fields, primary_key(schema))
query = query(filter, primary_key(schema))
%WriteQuery{coll: coll, query: query, database: prefix, command: command}
end
def delete_all(%Query{} = original, params) do
check_query!(original)
params = List.to_tuple(params)
from = from(original)
coll = coll(from)
query = query(original, params, from)
%WriteQuery{coll: coll, query: query, database: original.prefix}
end
def delete(%{source: coll, schema: schema, prefix: prefix}, filter) do
query = query(filter, primary_key(schema))
%WriteQuery{coll: coll, query: query, database: prefix}
end
def insert(%{source: coll, schema: schema, prefix: prefix}, document) do
command = command(:insert, document, primary_key(schema))
%WriteQuery{coll: coll, command: command, database: prefix}
end
def command(command, opts) do
%CommandQuery{command: command, database: Keyword.get(opts, :database, nil)}
end
defp from(%Query{from: %{source: {coll, model}}}) do
{coll, model, primary_key(model)}
end
defp from(%Query{from: %{source: %Ecto.SubQuery{}}}) do
raise ArgumentError, "MongoDB does not support subqueries"
end
@aggregate_ops [:min, :max, :sum, :avg]
@special_ops [:count | @aggregate_ops]
defp projection(%Query{select: nil}, _params, _from), do: {:find, %{}, []}
defp projection(
%Query{select: %Query.SelectExpr{fields: fields} = _select} = query,
params,
from
) do
projection(fields, params, from, query, %{}, [])
end
defp projection([], _params, _from, _query, pacc, facc), do: {:find, pacc, Enum.reverse(facc)}
defp projection(
[{:&, _, [0, nil, _]} = field | rest],
params,
{_, nil, _} = from,
query,
_pacc,
facc
) do
# Model is nil, we want empty projection, but still extract fields
facc =
case projection(rest, params, from, query, %{}, [field | facc]) do
{:find, _, facc} ->
facc
_other ->
error(
query,
"select clause supports only one of the special functions: `count`, `min`, `max`"
)
end
{:find, %{}, facc}
end
defp projection(
[{:&, _, [0, nil, _]} = field | rest],
params,
{_, model, pk} = from,
query,
pacc,
facc
) do
pacc = Enum.into(model.__schema__(:fields), pacc, &{field(&1, pk), true})
facc = [field | facc]
projection(rest, params, from, query, pacc, facc)
end
defp projection(
[{:&, _, [0, fields, _]} = field | rest],
params,
{_, _model, pk} = from,
query,
pacc,
facc
) do
pacc = Enum.into(fields, pacc, &{field(&1, pk), true})
facc = [field | facc]
projection(rest, params, from, query, pacc, facc)
end
defp projection([%Ecto.Query.Tagged{value: value} | rest], params, from, query, pacc, facc) do
{_, model, pk} = from
pacc = Enum.into(model.__schema__(:fields), pacc, &{field(&1, pk), true})
facc = [{:field, pk, value} | facc]
projection(rest, params, from, query, pacc, facc)
end
defp projection([{{:., _, [_, name]}, _, _} = field | rest], params, from, query, pacc, facc) do
{_, _, pk} = from
# Projections use names as in database, fields as in models
pacc = Map.put(pacc, field(name, pk), true)
facc = [{:field, name, field} | facc]
projection(rest, params, from, query, pacc, facc)
end
# Keyword and interpolated fragments
defp projection([{:fragment, _, [args]} = field | rest], params, from, query, pacc, facc)
when is_list(args) or tuple_size(args) == 3 do
{_, _, pk} = from
pacc =
args
|> value(params, pk, query, "select clause")
|> Enum.into(pacc)
facc = [field | facc]
projection(rest, params, from, query, pacc, facc)
end
defp projection([{:count, _, [_]} = field], _params, _from, _query, pacc, _facc)
when pacc == %{} do
{:count, [{:field, :value, field}]}
end
defp projection([{:count, _, [name, :distinct]} = field], _params, from, query, _pacc, _facc) do
{_, _, pk} = from
name = field(name, pk, query, "select clause")
field = {:field, :value, field}
{:aggregate, [["$group": [_id: "$#{name}"]], ["$group": [_id: nil, value: ["$sum": 1]]]],
[field]}
end
defp projection([{op, _, [name]} = field], _params, from, query, pacc, _facc)
when pacc == %{} and op in @aggregate_ops do
{_, _, pk} = from
name = field(name, pk, query, "select clause")
field = {:field, :value, field}
{:aggregate, [["$group": [_id: nil, value: [{"$#{op}", "$#{name}"}]]]], [field]}
end
defp projection([{op, _, _} | _rest], _params, _from, query, _pacc, _facc)
when op in @special_ops do
error(
query,
"select clause supports only one of the special functions: `count`, `min`, `max`"
)
end
defp projection([{op, _, _} | _rest], _params, _from, query, _pacc, _facc) when is_op(op) do
error(query, "select clause")
end
defp limit_skip(%Query{limit: limit, offset: offset} = query, params, {_, _, pk}) do
[
limit: offset_limit(limit, params, pk, query, "limit clause"),
skip: offset_limit(offset, params, pk, query, "offset clause")
]
|> Enum.reject(&is_nil(elem(&1, 1)))
end
defp coll({coll, _model, _pk}), do: coll
defp query(%Query{wheres: wheres} = query, params, {_coll, _model, pk}) do
wheres
|> Enum.map(fn %Query.BooleanExpr{expr: expr} ->
pair(expr, params, pk, query, "where clause")
end)
|> :lists.flatten()
|> merge_keys(query, "where clause")
|> map_unless_empty
end
defp query(filter, pk) do
filter |> value(pk, "where clause") |> map_unless_empty
end
defp order(%Query{order_bys: order_bys} = query, {_coll, _model, pk}) do
order_bys
|> Enum.flat_map(fn %Query.QueryExpr{expr: expr} ->
Enum.map(expr, &order_by_expr(&1, pk, query))
end)
|> map_unless_empty
end
defp command(:update, %Query{updates: updates} = query, params, {_coll, _model, pk}) do
updates
|> Enum.flat_map(fn %Query.QueryExpr{expr: expr} ->
Enum.map(expr, fn {key, value} ->
value = value |> value(params, pk, query, "update clause")
{update_op(key, query), value}
end)
end)
|> merge_keys(query, "update clause")
end
defp command(:insert, document, pk) do
document
|> value(pk, "insert command")
|> map_unless_empty
end
defp command(:update, values, pk) do
["$set": values |> value(pk, "update command") |> map_unless_empty]
end
# Currently unused
# defp both_nil(nil, nil), do: true
# defp both_nil(_, _), do: false
defp offset_limit(nil, _params, _pk, _query, _where), do: nil
defp offset_limit(%Query.QueryExpr{expr: expr}, params, pk, query, where),
do: value(expr, params, pk, query, where)
defp primary_key(nil), do: nil
defp primary_key(schema) do
case schema.__schema__(:primary_key) do
[] ->
nil
[pk] ->
pk
keys ->
raise ArgumentError,
"MongoDB adapter does not support multiple primary keys " <>
"and #{inspect(keys)} were defined in #{inspect(schema)}."
end
end
defp order_by_expr({:asc, expr}, pk, query), do: {field(expr, pk, query, "order clause"), 1}
defp order_by_expr({:desc, expr}, pk, query), do: {field(expr, pk, query, "order clause"), -1}
@maybe_disallowed ~w(distinct lock joins group_bys havings limit offset)a
@query_empty_values %Ecto.Query{} |> Map.take(@maybe_disallowed)
defp check_query!(query, allow \\ []) do
@query_empty_values
|> Map.drop(allow)
|> Enum.each(fn {element, empty} ->
check(
Map.get(query, element),
empty,
query,
"MongoDB adapter does not support #{element} clause in this query"
)
end)
end
defp check(expr, expr, _, _), do: nil
defp check(_, _, query, message), do: raise(Ecto.QueryError, query: query, message: message)
defp value(expr, pk, place) do
case Conversions.from_ecto_pk(expr, pk) do
{:ok, value} -> value
:error -> error(place)
end
end
defp value(expr, params, pk, query, place) do
case Conversions.inject_params(expr, params, pk) do
{:ok, value} -> value
:error -> error(query, place)
end
end
defp field(pk, pk), do: :_id
defp field(key, _), do: key
defp field({{:., _, [{:&, _, [0]}, field]}, _, []}, pk, _query, _place), do: field(field, pk)
defp field(_expr, _pk, query, place), do: error(query, place)
defp map_unless_empty([]), do: %{}
defp map_unless_empty(list), do: list
defp merge_keys(keyword, query, place) do
Enum.reduce(keyword, %{}, fn {key, value}, acc ->
Map.update(acc, key, value, fn
old when is_list(old) -> old ++ value
_ -> error(query, place)
end)
end)
end
update = [set: :"$set", inc: :"$inc", push: :"$push", pull: :"$pull"]
Enum.map(update, fn {key, op} ->
def update_op(unquote(key), _query), do: unquote(op)
end)
def update_op(_, query), do: error(query, "update clause")
binary_ops = [>: :"$gt", >=: :"$gte", <: :"$lt", <=: :"$lte", !=: :"$ne", in: :"$in"]
bool_ops = [and: :"$and", or: :"$or"]
@binary_ops Keyword.keys(binary_ops)
@bool_ops Keyword.keys(bool_ops)
Enum.map(binary_ops, fn {op, mongo_op} ->
defp binary_op(unquote(op)), do: unquote(mongo_op)
end)
Enum.map(bool_ops, fn {op, mongo_op} ->
defp bool_op(unquote(op)), do: unquote(mongo_op)
end)
defp mapped_pair_or_value({op, _, _} = tuple, params, pk, query, place) when is_op(op) do
List.wrap(pair(tuple, params, pk, query, place))
end
defp mapped_pair_or_value(value, params, pk, query, place) do
value(value, params, pk, query, place)
end
defp pair({op, _, args}, params, pk, query, place) when op in @bool_ops do
args = Enum.map(args, &mapped_pair_or_value(&1, params, pk, query, place))
{bool_op(op), args}
end
defp pair({:is_nil, _, [expr]}, _, pk, query, place) do
{field(expr, pk, query, place), nil}
end
defp pair({:==, _, [left, right]}, params, pk, query, place) do
{field(left, pk, query, place), value(right, params, pk, query, place)}
end
defp pair({:in, _, [left, {:^, _, [0, 0]}]}, _params, pk, query, place) do
{field(left, pk, query, place), ["$in": []]}
end
defp pair({:in, _, [left, {:^, _, [ix, len]}]}, params, pk, query, place) do
args =
ix..(ix + len - 1)
|> Enum.map(&elem(params, &1))
|> Enum.map(&value(&1, params, pk, query, place))
{field(left, pk, query, place), ["$in": args]}
end
defp pair({:in, _, [lhs, {{:., _, _}, _, _} = rhs]}, params, pk, query, place) do
{field(rhs, pk, query, place), value(lhs, params, pk, query, place)}
end
defp pair({op, _, [left, right]}, params, pk, query, place) when op in @binary_ops do
{field(left, pk, query, place), [{binary_op(op), value(right, params, pk, query, place)}]}
end
defp pair({:not, _, [{:in, _, [left, {:^, _, [ix, len]}]}]}, params, pk, query, place) do
args =
ix..(ix + len - 1)
|> Enum.map(&elem(params, &1))
|> Enum.map(&value(&1, params, pk, query, place))
{field(left, pk, query, place), ["$nin": args]}
end
defp pair({:not, _, [{:in, _, [left, right]}]}, params, pk, query, place) do
{field(left, pk, query, place), ["$nin": value(right, params, pk, query, place)]}
end
defp pair({:not, _, [{:is_nil, _, [expr]}]}, _, pk, query, place) do
{field(expr, pk, query, place), ["$ne": nil]}
end
defp pair({:not, _, [{:==, _, [left, right]}]}, params, pk, query, place) do
{field(left, pk, query, place), ["$ne": value(right, params, pk, query, place)]}
end
defp pair({:not, _, [expr]}, params, pk, query, place) do
{:"$not", [pair(expr, params, pk, query, place)]}
end
defp pair({:^, _, _} = expr, params, pk, query, place) do
case value(expr, params, pk, query, place) do
bool when is_boolean(bool) ->
boolean_query_hack_pair(bool)
_value ->
error(query, place)
end
end
# Keyword or embedded fragment
defp pair({:fragment, _, [args]}, params, pk, query, place)
when is_list(args) or tuple_size(args) == 3 do
value(args, params, pk, query, place)
end
defp pair(bool, _params, _pk, _query, _place) when is_boolean(bool) do
boolean_query_hack_pair(bool)
end
defp pair(_expr, _params, _pk, query, place) do
error(query, place)
end
defp boolean_query_hack_pair(bool) do
{:_id, ["$exists": bool]}
end
defp error(query, place) do
raise Ecto.QueryError,
query: query,
message: "Invalid expression for MongoDB adapter in #{place}"
end
defp error(place) do
raise ArgumentError, "Invalid expression for MongoDB adapter in #{place}"
end
end
|
lib/mongo_ecto/normalized_query.ex
| 0.809615
| 0.518485
|
normalized_query.ex
|
starcoder
|
defmodule Contex.Mapping do
@moduledoc """
Mappings generalize the process of associating columns in the dataset to the
elements of a plot. As part of creating a mapping, these associations are
validated to confirm that a column has been assigned to each of the graphical
elements that are necessary to draw the plot, and that all of the assigned columns
exist in the dataset.
The Mapping struct stores accessor functions for the assigned columns, which
are used to retrieve values for those columns from the dataset to support
drawing the plot. The accessor functions have the same name as the associated
plot element; this allows plot-drawing functions to access data based that plot's
required elements without knowing anything about the dataset.
"""
alias Contex.{Dataset}
defstruct [:column_map, :accessors, :expected_mappings, :dataset]
@type t() :: %__MODULE__{}
@doc """
Given expected mappings for a plot and a map associating plot elements with dataset
columns, creates a Mapping struct for the plot that stores accessor functions for
each element and returns a mapping. Raises if the map does not include all
required elements of the specified plot type or if the dataset columns are not
present in the dataset.
Expected mappings are passed as a keyword list where each plot element is one
of the following:
* `:exactly_one` - indicates that the plot needs exactly one of these elements, for
example a column representing categories in a barchart.
* `:one_more_more` - indicates that the plot needs at least one of these elements,
for example y columns in a point plot
* `:zero_or_one` - indicates that the plot will use one of these elements if it
is available, for example a fill colour column in a point plot
* `:zero_or_more` - indicates that plot will use one or more of these elements if it
is available
For example, the expected mappings for a barchart are represented as follows:
`[category_col: :exactly_one, value_cols: :one_or_more]`
and for a point point:
`[ x_col: :exactly_one, y_cols: :one_or_more, fill_col: :zero_or_one]`
Provided mappings are passed as a map with the map key matching the expected mapping
and the map value representing the columns in the underlying dataset. So for a barchart
the column mappings may be:
`%{category_col: "Quarter", value_cols: ["Australian Sales", "Kiwi Sales", "South African Sales"]}`
If columns are not specified for optional plot elements, an accessor function
that returns `nil` is created for those elements.
"""
@spec new(keyword(), map(), Contex.Dataset.t()) :: Contex.Mapping.t()
def new(expected_mappings, provided_mappings, %Dataset{} = dataset) do
column_map = check_mappings(provided_mappings, expected_mappings, dataset)
mapped_accessors = accessors(dataset, column_map)
%__MODULE__{
column_map: column_map,
expected_mappings: expected_mappings,
dataset: dataset,
accessors: mapped_accessors
}
end
@doc """
Given a plot that already has a mapping and a new map of elements to columns,
updates the mapping accordingly and returns the plot.
"""
@spec update(Contex.Mapping.t(), map()) :: Contex.Mapping.t()
def update(
%__MODULE__{expected_mappings: expected_mappings, dataset: dataset} = mapping,
updated_mappings
) do
column_map =
Map.merge(mapping.column_map, updated_mappings)
|> check_mappings(expected_mappings, dataset)
mapped_accessors = accessors(dataset, column_map)
%{mapping | column_map: column_map, accessors: mapped_accessors}
end
defp check_mappings(nil, expected_mappings, %Dataset{} = dataset) do
check_mappings(default_mapping(expected_mappings, dataset), expected_mappings, dataset)
end
defp check_mappings(mappings, expected_mappings, %Dataset{} = dataset) do
add_nil_for_optional_mappings(mappings, expected_mappings)
|> validate_mappings(expected_mappings, dataset)
end
defp default_mapping(_expected_mappings, %Dataset{data: [first | _rest]} = _dataset)
when is_map(first) do
raise(ArgumentError, "Can not create default data mappings with Map data.")
end
defp default_mapping(expected_mappings, %Dataset{} = dataset) do
Enum.with_index(expected_mappings)
|> Enum.reduce(%{}, fn {{expected_mapping, expected_count}, index}, mapping ->
column_name = Dataset.column_name(dataset, index)
column_names =
case expected_count do
:exactly_one -> column_name
:one_or_more -> [column_name]
:zero_or_one -> nil
:zero_or_more -> [nil]
end
Map.put(mapping, expected_mapping, column_names)
end)
end
defp add_nil_for_optional_mappings(mappings, expected_mappings) do
Enum.reduce(expected_mappings, mappings, fn {expected_mapping, expected_count}, mapping ->
case expected_count do
:zero_or_one ->
if mapping[expected_mapping] == nil,
do: Map.put(mapping, expected_mapping, nil),
else: mapping
:zero_or_more ->
if mapping[expected_mapping] == nil,
do: Map.put(mapping, expected_mapping, [nil]),
else: mapping
_ ->
mapping
end
end)
end
defp validate_mappings(provided_mappings, expected_mappings, %Dataset{} = dataset) do
# TODO: Could get more precise by looking at how many mapped dataset columns are expected
check_required_columns!(expected_mappings, provided_mappings)
confirm_columns_in_dataset!(dataset, provided_mappings)
provided_mappings
end
defp check_required_columns!(expected_mappings, column_map) do
required_mappings = Enum.map(expected_mappings, fn {k, _v} -> k end)
provided_mappings = Map.keys(column_map)
missing_mappings = missing_columns(required_mappings, provided_mappings)
case missing_mappings do
[] ->
:ok
mappings ->
mapping_string = Enum.map_join(mappings, ", ", &"\"#{&1}\"")
raise "Required mapping(s) #{mapping_string} not included in column map."
end
end
defp confirm_columns_in_dataset!(dataset, column_map) do
available_columns = [nil | Dataset.column_names(dataset)]
missing_columns =
Map.values(column_map)
|> List.flatten()
|> missing_columns(available_columns)
case missing_columns do
[] ->
:ok
columns ->
column_string = Enum.map_join(columns, ", ", &"\"#{&1}\"")
raise "Column(s) #{column_string} in the column mapping not in the dataset."
end
end
defp missing_columns(required_columns, provided_columns) do
MapSet.new(required_columns)
|> MapSet.difference(MapSet.new(provided_columns))
|> MapSet.to_list()
end
defp accessors(dataset, column_map) do
Enum.map(column_map, fn {mapping, columns} ->
{mapping, accessor(dataset, columns)}
end)
|> Enum.into(%{})
end
defp accessor(dataset, columns) when is_list(columns) do
Enum.map(columns, &accessor(dataset, &1))
end
defp accessor(_dataset, nil) do
fn _row -> nil end
end
defp accessor(dataset, column) do
Dataset.value_fn(dataset, column)
end
end
|
lib/chart/mapping.ex
| 0.891864
| 0.977479
|
mapping.ex
|
starcoder
|
defmodule Firebirdex.Query do
alias Firebirdex.Result
@type t :: %__MODULE__{
ref: reference() | nil,
name: iodata(),
statement: iodata(),
stmt: tuple()
}
defstruct name: "",
ref: nil,
stmt: nil,
statement: nil,
stmt: nil
defimpl DBConnection.Query do
def parse(query, _opts) do
query
end
def describe(query, _opts) do
query
end
def encode(%{stmt: nil} = query, _params, _opts) do
raise ArgumentError, "query #{inspect(query)} has not been prepared"
end
def encode(_query, params, _opts) do
params
end
defp convert_value({_, :long, scale, _, _}, {_name, v}) when scale < 0 do
Decimal.new(to_string(v))
end
defp convert_value({_, :short, scale, _, _}, {_name, v}) when scale < 0 do
Decimal.new(to_string(v))
end
defp convert_value({_, :int64, scale, _, _}, {_name, v}) when scale < 0 do
Decimal.new(to_string(v))
end
defp convert_value({_, :quad, scale, _, _}, {_name, v}) when scale < 0 do
Decimal.new(to_string(v))
end
defp convert_value({_, :date, _, _, _}, {_name, {year, month, day}}) do
{:ok, v} = Date.new(year, month, day)
v
end
defp convert_value({_, :time, _, _, _}, {_name, {hour, minute, second, microsecond}}) do
{:ok, v} = Time.new(hour, minute, second, microsecond)
v
end
defp convert_value({_, :timestamp, _, _, _}, {_name, {{year, month, day}, {hour, minute, second, microsecond}}}) do
{:ok, v} = NaiveDateTime.new(year, month, day, hour, minute, second, microsecond)
v
end
defp convert_value({_, :time_tz, _, _, _}, {_name, {{hour, minute, second, microsecond}, tz, offset}}) do
d = Date.utc_today
{:ok, dt} = NaiveDateTime.new(d.year, d.month, d.day, hour, minute, second)
dttz1 = DateTime.from_naive!(dt, tz)
{:ok, dttz2} = DateTime.shift_zone(dttz1, offset)
{:ok, v} = Time.new(dttz2.hour, dttz2.minute, dttz2.second, microsecond)
{v, offset}
end
defp convert_value({_, :timestamp_tz, _, _, _}, {_name, {{year, month, day}, {hour, minute, second, microsecond}, tz, offset}}) do
{:ok, dt} = NaiveDateTime.new(year, month, day, hour, minute, second, microsecond)
dttz = DateTime.from_naive!(dt, tz)
{:ok, v} = DateTime.shift_zone(dttz, offset)
v
end
defp convert_value({_, _, _, _, _}, {_name, v}) do
v
end
defp convert_row(row, [], []) do
Enum.reverse(row)
end
defp convert_row(row, rest_columns, rest_row) do
[c | rest_columns] = rest_columns
[v | rest_row] = rest_row
convert_row([convert_value(c, v) | row], rest_columns, rest_row)
end
def decode(query, result, _opts) do
columns = :efirebirdsql_protocol.columns(query.stmt)
rows = if result.rows == nil do
result.rows
else
Enum.map(result.rows, &(convert_row([], columns, &1)))
end
%Result{result | rows: rows, num_rows: result.num_rows}
end
end
defimpl String.Chars do
def to_string(%{statement: statement}) do
IO.iodata_to_binary(statement)
end
end
end
|
lib/firebirdex/query.ex
| 0.654453
| 0.46563
|
query.ex
|
starcoder
|
defmodule HTS221.Server do
@moduledoc """
Server for setting up and using the HTS221
When controlling the HTS221 there are few setup steps and other checks you
may want to do. Also, to keep the transport layer working often times this
will call for a GenServer. This server is meant to provide common
functionality around setup and an expose a higher level API for application
use.
This process can be added to your supervision tree.
```
def MyApp do
use Application
def start(_type, _args) do
children = [
... children ...
{HTS221.Server, transport: {HTS221.Transport.I2C, [bus_name: "i2c-1"]}
... children ...
]
opts = [strategy: :one_for_one, name: MyApp.Supervisor]
Supervisor.start(children, opts)
end
end
```
If you a custom transport implementation then the `:transport` argument to
this server will look different.
If the HTS221 is not detected this server will log that it was not found and
return `:ignore` on the `GenServer.init/1` callback. This is useful if your FW
will run on devices with different hardware attached and don't want the device
availability to crash your application supervisor.
"""
use GenServer
require Logger
alias HTS221.{CTRLReg1, CTRLReg2, Transport}
defmodule State do
@moduledoc false
alias HTS221.{Calibration, Transport}
@type t() :: %__MODULE__{
calibration: Calibration.t(),
transport: Transport.t()
}
defstruct calibration: nil, transport: nil
end
@type name() :: any()
@typedoc """
Arguments to the `HTS221.Server`
- `:transport` - the transport implementation module and optional arguments
- `:name` - this is a named GenServer that either uses what you pass in or
`HTS221.Server`
"""
@type arg() ::
{:transport, Transport.impl() | {Transport.impl(), [Transport.arg()]}} | {:name, name()}
@type temperature_opt() :: {:scale, HTS221.scale()}
@spec start_link([arg()]) :: GenServer.on_start()
def start_link(args) do
name = Keyword.get(args, :name, __MODULE__)
GenServer.start_link(__MODULE__, args, name: name)
end
@doc """
Read the temperature value
By default this is read in celsius you can the `:scale` option to change the
unit the temperature is measured in. See `HTS221.scale()` for more
information.
"""
@spec temperature(name(), [temperature_opt()]) :: {:ok, float()} | {:error, any()}
def temperature(server, opts \\ []) do
GenServer.call(server, {:temperature, opts})
end
@doc """
Read the humidity level
This is measured in percent.
"""
@spec humidity(name()) :: {:ok, float()} | {:error, any()}
def humidity(server) do
GenServer.call(server, :humidity)
end
@doc """
Get the transport the server is using
This is useful if you need to debug the registers using the functions in
`HTS221` module.
"""
@spec transport(name()) :: Transport.t()
def transport(server) do
GenServer.call(server, :transport)
end
@impl GenServer
def init(args) do
{transport_impl, transport_args} = get_transport(args)
with {:ok, transport} <- Transport.init(transport_impl, transport_args),
:ok = reboot_registers(transport),
{:ok, calibration} <- HTS221.read_calibration(transport),
:ok = setup_ctrl_reg1(transport) do
{:ok, %State{calibration: calibration, transport: transport}}
else
{:error, :device_not_available} ->
Logger.info("HTS221 not detected on device")
:ignore
error ->
{:stop, error}
end
end
@impl GenServer
def handle_call({:temperature, opts}, _from, state) do
%State{calibration: calibration, transport: transport} = state
case HTS221.read_temperature(transport) do
{:ok, temp} ->
{:reply, {:ok, HTS221.calculate_temperature(temp, calibration, opts)}, state}
error ->
{:reply, error, state}
end
end
def handle_call(:humidity, _from, state) do
%State{calibration: calibration, transport: transport} = state
case HTS221.read_humidity(transport) do
{:ok, hum} ->
{:reply, {:ok, HTS221.calculate_humidity(hum, calibration)}, state}
error ->
{:reply, error, state}
end
end
def handle_call(:transport, _from, state) do
{:reply, state.transport, state}
end
defp get_transport(args) do
case Keyword.fetch!(args, :transport) do
{_, _} = transport -> transport
transport when is_atom(transport) -> {transport, []}
end
end
defp reboot_registers(transport) do
# Sometimes registers need to be rebooted to ensure
# calibration is correctly being read from the non-volatile
# memory.
ctrl_reg2 = %CTRLReg2{
boot: :reboot_memory
}
HTS221.write_register(transport, ctrl_reg2)
end
defp setup_ctrl_reg1(transport) do
# setup HTS221 to continuously read datasets
ctrl_reg1 = %CTRLReg1{
power_mode: :active,
block_data_update: :wait_for_reading,
output_data_rate: :one_Hz
}
HTS221.write_register(transport, ctrl_reg1)
end
end
|
lib/hts221/server.ex
| 0.88263
| 0.851706
|
server.ex
|
starcoder
|
defmodule Graphmath.Vec2 do
@moduledoc """
This is the 2D mathematics library for graphmath.
This submodule handles vectors stored as a tuple.
"""
@type vec2 :: {float, float}
@doc """
`create()` creates a zero vec2.
It will return a tuple of the form {0.0,0.0}.
`create()` creates a zeroed `vec2`.
It takes no arguments.
It returns a `vec2` of the form `{ 0.0, 0.0 }`.
"""
@spec create() :: vec2
def create() do
{0.0, 0.0}
end
@doc """
`create(x,y)` creates a `vec2` of value (x,y).
`x` is the first element of the `vec3` to be created.
`y` is the second element of the `vec3` to be created.
It returns a `vec2` of the form `{x,y}`.
"""
@spec create(float, float) :: vec2
def create(x, y) do
{x, y}
end
@doc """
`create(vec)` creates a `vec2` from a list of 2 or more floats.
`vec` is a list of 2 or more floats.
It returns a `vec2` of the form `{x,y}`, where `x` and `y` are the first three elements in `vec`.
"""
@spec create([float]) :: vec2
def create(vec) do
[x, y | _] = vec
{x, y}
end
@doc """
`add( a, b)` adds a vec2 (a) to a vec2 (b).
It returns a tuple of the form { ax + bx, ay + by }.
`add( a, b )` adds two `vec2`s.
`a` is the first `vec2`.
`b` is the second `vec2`.
It returns a `vec2` of the form { a<sub>x</sub> + b<sub>x</sub>, a<sub>y</sub> + b<sub>y</sub> }.
"""
@spec add(vec2, vec2) :: vec2
def add(a, b) do
{x, y} = a
{u, v} = b
{x + u, y + v}
end
@doc """
`subtract(a, b )` subtracts one `vec2` from another `vec2`.
`a` is the `vec2` minuend.
`b` is the `vec2` subtrahend.
It returns a `vec2` of the form { a<sub>x</sub> - b<sub>x</sub>, a<sub>y</sub> - b<sub>y</sub> }.
(the terminology was found [here](http://mathforum.org/library/drmath/view/58801.html)).
"""
@spec subtract(vec2, vec2) :: vec2
def subtract(a, b) do
{x, y} = a
{u, v} = b
{x - u, y - v}
end
@doc """
`multiply( a, b)` mulitplies element-wise a vec2 (a) by a vec2 (b).
It returns a tuple of the form { ax*bx, ay*by }.
`multiply( a, b )` multiplies element-wise a `vec2` by another `vec2`.
`a` is the `vec2` multiplicand.
`b` is the `vec2` multiplier.
It returns a `vec2` of the form { a<sub>x</sub>b<sub>x</sub>, a<sub>y</sub>b<sub>y</sub> }.
"""
@spec multiply(vec2, vec2) :: vec2
def multiply(a, b) do
{x, y} = a
{u, v} = b
{x * u, y * v}
end
@doc """
`scale( a, scale )` uniformly scales a `vec2`.
`a` is the `vec2` to be scaled.
`scale` is the float to scale each element of `a` by.
It returns a tuple of the form { a<sub>x</sub>scale, a<sub>y</sub>scale }.
"""
@spec scale(vec2, float) :: vec2
def scale(a, scale) do
{x, y} = a
{x * scale, y * scale}
end
@doc """
`dot( a, b )` finds the dot (inner) product of one `vec2` with another `vec2`.
`a` is the first `vec2`.
`b` is the second `vec2`.
It returns a float of the value (a<sub>x</sub>b<sub>x</sub> + a<sub>y</sub>b<sub>y</sub> ).
"""
@spec dot(vec2, vec2) :: float
def dot(a, b) do
{x, y} = a
{u, v} = b
x * u + y * v
end
@doc """
`perp_prod( a, b )` finds the perpindicular product of one `vec2` with another `vec2`.
`a` is the first `vec2`.
`b` is the second `vec2`.
The perpindicular product is the magnitude of the cross-product between the two vectors.
It returns a float of the value (a<sub>x</sub>b<sub>y</sub> - b<sub>x</sub>a<sub>y</sub>).
"""
@spec perp_prod(vec2, vec2) :: float
def perp_prod(a, b) do
{x, y} = a
{u, v} = b
x * v - u * y
end
@doc """
`length(a)` finds the length (Eucldiean or L2 norm) of a `vec2`.
`a` is the `vec2` to find the length of.
It returns a float of the value (sqrt( a<sub>x</sub><sup>2</sup> + a<sub>y</sub><sup>2</sup>)).
"""
@spec length(vec2) :: float
def length(a) do
{x, y} = a
:math.sqrt(x * x + y * y)
end
@doc """
`length_squared(a)` finds the square of the length of a vec2 (a).
In many cases, this is sufficient for comparisions and avaoids a sqrt.
It returns a float of the value (ax*ax + ay*ay).
`length_squared(a)` finds the square of the length of a `vec2`.
`a` is the `vec2` to find the length squared of.
It returns a float of the value a<sub>x</sub><sup>2</sup> + a<sub>y</sub><sup>2</sup>.
In many cases, this is sufficient for comparisons and avoids a square root.
"""
@spec length_squared(vec2) :: float
def length_squared(a) do
{x, y} = a
x * x + y * y
end
@doc """
`length_manhattan(a)` finds the Manhattan (L1 norm) length of a `vec2`.
`a` is the `vec2` to find the Manhattan length of.
It returns a float of the value (a<sub>x</sub> + a<sub>y</sub>).
The Manhattan length is the sum of the components.
"""
@spec length_manhattan(vec2) :: float
def length_manhattan(a) do
{x, y} = a
x + y
end
@doc """
`normalize(a)` finds the unit vector with the same direction as a `vec2`.
`a` is the `vec2` to be normalized.
It returns a `vec2` of the form `{normx, normy}`.
This is done by dividing each component by the vector's magnitude.
"""
@spec normalize(vec2) :: vec2
def normalize(a) do
{x, y} = a
invmag = 1 / :math.sqrt(x * x + y * y)
{x * invmag, y * invmag}
end
@doc """
`lerp(a,b,t)` is used to linearly interpolate between two given vectors a and b along an interpolant t.
The interpolant `t` is on the domain [0,1]. Behavior outside of that is undefined.
`lerp(a,b,t)` linearly interpolates between one `vec2` and another `vec2` along an interpolant.
`a` is the starting `vec2`.
`b` is the ending `vec2`.
`t` is the interpolant float, on the domain [0,1].
It returns a `vec2` of the form (1-t)**a** - (t)**b**.
The interpolant `t` is on the domain [0,1]. Behavior outside of that is undefined.
"""
@spec lerp(vec2, vec2, float) :: vec2
def lerp(a, b, t) do
{x, y} = a
{u, v} = b
{t * u + (1 - t) * x, t * v + (1 - t) * y}
end
@doc """
`rotate(a,theta)` rotates a `vec2` CCW about the +Z axis.
`a` is the `vec2` to rotate.
`theta` is the number of radians to rotate by as a float.
This returns a `vec2`.
"""
@spec rotate(vec2, float) :: vec2
def rotate(a, theta) do
{x, y} = a
ct = :math.cos(theta)
st = :math.sin(theta)
{x * ct - y * st, x * st + y * ct}
end
@doc """
`near(a,b, distance)` checks whether two `vec2`s are within a certain distance of each other.
`a` is the first `vec2`.
`b` is the second `vec2`.
`distance` is the distance between them as a float.
"""
@spec near(vec2, vec2, float) :: boolean
def near(a, b, distance) do
{x, y} = a
{u, v} = b
dx = x - u
dy = y - v
distance > :math.sqrt(dx * dx + dy * dy)
end
@doc """
`project(a,b)` projects one `vec2` onto another `vec2`.
`a` is the first `vec2`.
`b` is the second `vec2`.
This returns a `vec2` representing the image of `a` in the direction of `b`.
"""
@spec project(vec2, vec2) :: vec2
def project(a, b) do
{x, y} = a
{u, v} = b
coeff = (x * u + y * v) / (u * u + v * v)
{u * coeff, v * coeff}
end
@doc """
`perp(a)` creates a vector perpendicular to another vector `a`.
`a` is the `vec2` to be perpindicular to.
This returns a `vec2` perpindicular to `a`, to the right of the original `a`.
"""
@spec perp(vec2) :: vec2
def perp(a) do
{x, y} = a
{-y, x}
end
@doc """
`equal(a, b)` checks to see if two vec2s a and b are equivalent.
`a` is the `vec2`.
`b` is the `vec2`.
It returns true if the vectors have equal elements.
Note that due to precision issues, you may want to use `equal/3` instead.
"""
@spec equal(vec2, vec2) :: boolean
def equal({ax, ay}, {bx, by}) do
ax == bx and ay == by
end
@doc """
`equal(a, b, eps)` checks to see if two vec2s a and b are equivalent within some tolerance.
`a` is the `vec2`.
`b` is the `vec2`.
`eps` is the tolerance, a float.
It returns true if the vectors have equal elements within some tolerance.
"""
@spec equal(vec2, vec2, float) :: boolean
def equal({ax, ay}, {bx, by}, eps) do
abs(ax - bx) <= eps and
abs(ay - by) <= eps
end
@doc """
`random_circle()` generates a point on the unit circle.
It returns a vec2 with distance 1 from the origin.
"""
@spec random_circle() :: vec2
def random_circle() do
pi = :math.pi()
theta = :rand.uniform()
{:math.cos(2.0 * pi * theta), :math.sin(2.0 * pi * theta)}
end
@doc """
`random_disc()` generates a point on or inside the unit circle using the method [here](http://mathworld.wolfram.com/DiskPointPicking.html).
It returns a vec2 with distance 1 from the origin.
"""
@spec random_disc() :: vec2
def random_disc() do
pi = :math.pi()
theta = :rand.uniform()
rho = :math.sqrt(:rand.uniform())
{rho * :math.cos(2.0 * pi * theta), rho * :math.sin(2.0 * pi * theta)}
end
@doc """
`random_box()` generates a point on or inside the unit box [0,1]x[0,1].
"""
@spec random_box() :: vec2
def random_box(), do: {:rand.uniform(), :rand.uniform()}
@doc """
`negate(v)` creates a vector whose elements are opposite in sign to `v`.
"""
@spec negate(vec2) :: vec2
def negate({x, y}), do: {-1.0 * x, -1.0 * y}
@doc """
`weighted_sum(a, v1, b, v2)` returns the sum of vectors `v1` and `v2` having been scaled by `a` and `b`, respectively.
"""
@spec weighted_sum(number, vec2, number, vec2) :: vec2
def weighted_sum(a, {x, y}, b, {u, v}) do
{a * x + b * u, a * y + b * v}
end
@doc """
`minkowski_distance(a,b,order)` returns the [Minkowski distance](https://en.wikipedia.org/wiki/Minkowski_distance) between two points `a` and b` of order `order`.
Order 1 is equivalent to manhattan distance, 2 to Euclidean distance, otherwise all bets are off.
"""
@spec minkowski_distance( vec2, vec2, number) :: number
def minkowski_distance({x1,y1}, {x2,y2}, order) do
adx = abs(x2 - x1)
ady = abs(y2 - y1)
temp = :math.pow(adx, order) + :math.pow(ady, order)
:math.pow(temp, 1 / order)
end
@doc """
`chebyshev_distance(a,b)` returns the [Chebyshev distance](https://en.wikipedia.org/wiki/Chebyshev_distance) between two points `a` and b`.
"""
@spec chebyshev_distance( vec2, vec2) :: number
def chebyshev_distance({x1,y1}, {x2,y2}) do
adx = abs(x2 - x1)
ady = abs(y2 - y1)
max(adx, ady)
end
@doc """
`p_norm(v,order)` returns the [P-norm](https://en.wikipedia.org/wiki/Lp_space#The_p-norm_in_finite_dimensions) of vector `v` of order `order`.
`order` needs to be greater than or equal to 1 to define a [metric space](https://en.wikipedia.org/wiki/Metric_space).
`order` 1 is equivalent to manhattan distance, 2 to Euclidean distance, otherwise all bets are off.
"""
@spec p_norm( vec2, number) :: number
def p_norm({x, y}, order) do
ax = abs(x)
ay = abs(y)
temp = :math.pow(ax, order) + :math.pow(ay, order)
:math.pow(temp, 1 / order)
end
end
|
lib/graphmath/Vec2.ex
| 0.949189
| 0.986726
|
Vec2.ex
|
starcoder
|
defmodule Bitmap.Integer do
@moduledoc """
Bitmap behaviour implementation using arbitrarily sized integers.
"""
use Bitwise
import Kernel, except: [to_string: 1]
@behaviour Bitmap
@typedoc """
A typed map which holds the integer bitmap as defined by the module struct
"""
@type t :: %__MODULE__{}
@type argt :: non_neg_integer | [any] | Range.t
@type index :: non_neg_integer
@type bit :: 1 | 0
@set_bit 1
@unset_bit 0
defstruct size: 0, data: 0
@doc """
Creates and returns a bitmap of size corresponding to the `argument` passed.
If `argument` is
- integer, size of bitmap is equal to the `argument`
- range, size of bitmap is equal to the length of `argument`
- list, size of bitmap is equal to the length of `argument`
> Note: All bits are set to 0 by default
## Examples
iex> Bitmap.Integer.new(400)
%Bitmap.Integer{data: 0, size: 400}
iex> Bitmap.Integer.new([1,2,3,4,5])
%Bitmap.Integer{data: 0, size: 5}
iex> Bitmap.Integer.new(1..25)
%Bitmap.Integer{data: 0, size: 25}
"""
@spec new(argt) :: __MODULE__.t
def new(argument)
def new(size) when is_integer(size) and size >= 0, do: %__MODULE__{size: size}
def new(list) when is_list(list), do: new(length(list))
def new(a..b), do: new(abs(b - a) + 1)
@doc """
Returns the bit value at `index` in the bitmap
## Examples
iex> bm = Bitmap.Integer.new(5)
iex> Bitmap.Integer.at(bm, 2)
0
iex> bm = Bitmap.Integer.set(bm, 2)
iex> Bitmap.Integer.at(bm, 2)
1
"""
@spec at(__MODULE__.t, index) :: bit
def at(bitmap, index) do
(bitmap.data >>> index) &&& 1
end
@doc """
Returns a boolean representing whether the bit at position `index`
is set or not
## Examples
iex> bm = Bitmap.Integer.new(5) |> Bitmap.Integer.set(1) |> Bitmap.Integer.set(3)
iex> Bitmap.Integer.set?(bm, 1)
true
iex> Bitmap.Integer.set?(bm, 4)
false
"""
@spec set?(__MODULE__.t, index) :: boolean
def set?(bitmap, index) do
at(bitmap, index) == @set_bit
end
@doc """
Sets the bit at `index` in the bitmap and returns the new bitmap
Index can also have a value `:all` in which case all bits
will be set like in set_all
## Examples
iex> Bitmap.Integer.set(Bitmap.Integer.new(5), 3)
%Bitmap.Integer{data: 8, size: 5}
iex> Bitmap.Integer.set(Bitmap.Integer.new(1..10), 2)
%Bitmap.Integer{data: 4, size: 10}
"""
@spec set(__MODULE__.t, index) :: __MODULE__.t
def set(%__MODULE__{size: size} = bitmap, index) when index >= 0 and index < size do
%__MODULE__{bitmap | data: (bitmap.data ||| (@set_bit <<< index))}
end
def set(bitmap, :all), do: set_all(bitmap)
@doc """
Set all bits in the bitmap and returns a new bitmap
## Examples
iex> Bitmap.Integer.set_all(Bitmap.Integer.new(10))
%Bitmap.Integer{data: 1023, size: 10}
iex> Bitmap.Integer.set_all(Bitmap.Integer.new(100))
%Bitmap.Integer{data: 1267650600228229401496703205375, size: 100}
"""
@spec set_all(__MODULE__.t) :: __MODULE__.t
def set_all(bitmap) do
import Bitmap.Utils, only: [pow: 2]
%__MODULE__{bitmap | data: pow(2, bitmap.size) - 1}
end
@doc """
Returns a boolean representing whether the bit at position `index`
is unset or not
## Examples
iex> bm = Bitmap.Integer.new(5) |> Bitmap.Integer.set(1) |> Bitmap.Integer.set(3)
iex> Bitmap.Integer.unset?(bm, 1)
false
iex> Bitmap.Integer.unset?(bm, 4)
true
"""
@spec unset?(__MODULE__.t, index) :: boolean
def unset?(bitmap, index) do
at(bitmap, index) == @unset_bit
end
@doc """
Unsets the bit at `index` in the bitmap and returns the new bitmap
Index can also have a value `:all` in which case all bits
will be unset like in unset_all
## Examples
iex> bm = Bitmap.Integer.new(10) |> Bitmap.Integer.set(4) |> Bitmap.Integer.set(8)
iex> Bitmap.Integer.unset(bm, 4)
%Bitmap.Integer{data: 256, size: 10}
iex> Bitmap.Integer.unset(bm, 8)
%Bitmap.Integer{data: 16, size: 10}
"""
@spec unset(__MODULE__.t, index) :: __MODULE__.t
def unset(%__MODULE__{size: size} = bitmap, index) when index >=0 and index < size do
%__MODULE__{bitmap | data: (bitmap.data &&& ~~~(@set_bit <<< index))}
end
def unset(bitmap, :all), do: unset_all(bitmap)
@doc """
Unsets all bits in the bitmap and returns a new bitmap
## Examples
iex> bm = Bitmap.Integer.new(10) |> Bitmap.Integer.set(4) |> Bitmap.Integer.set(8)
iex> Bitmap.Integer.unset_all(bm)
%Bitmap.Integer{data: 0, size: 10}
"""
@spec unset_all(__MODULE__.t) :: __MODULE__.t
def unset_all(bitmap) do
%__MODULE__{bitmap | data: 0}
end
@doc """
Toggles the bit at `index` in the bitmap and returns the new bitmap
i.e. it sets the bit to 1 if it was 0 or sets the bit to 0 if it was 1
Index can also have a value `:all` in which case all bits will be toggled
like in toggle_all
## Examples
iex> bm = Bitmap.Integer.new(10) |> Bitmap.Integer.set(4) |> Bitmap.Integer.set(8)
iex> Bitmap.Integer.toggle(bm, 3)
%Bitmap.Integer{data: 280, size: 10}
iex> Bitmap.Integer.toggle(bm, 6)
%Bitmap.Integer{data: 336, size: 10}
"""
@spec toggle(__MODULE__.t, index) :: __MODULE__.t
def toggle(bitmap, index) do
%__MODULE__{bitmap | data: (bitmap.data ^^^ (@set_bit <<< index))}
end
@doc """
Toggles all bits in the bitmap and returns a new bitmap
## Examples
iex> bm = Bitmap.Integer.new(10) |> Bitmap.Integer.set(4) |> Bitmap.Integer.set(8)
iex> Bitmap.Integer.toggle_all(bm)
%Bitmap.Integer{data: -273, size: 10}
"""
@spec toggle_all(__MODULE__.t) :: __MODULE__.t
def toggle_all(bitmap) do
%__MODULE__{bitmap | data: ~~~bitmap.data}
end
@doc """
Returns the string representation of the bitmap
Note: This can be very long for huge bitmaps.
"""
@spec to_string(__MODULE__.t) :: String.t
def to_string(bitmap) do
to_string(bitmap.data, bitmap.size, <<>>)
end
@doc """
Inspects the bitmap and returns the string representation of the bitmap
Note: This can be very long for huge bitmaps.
"""
@spec inspect(__MODULE__.t) :: __MODULE__.t
def inspect(bitmap) do
bitmap |> to_string |> IO.inspect
end
defp to_string(_data, 0, acc), do: acc
defp to_string(data, size, acc) do
case data &&& 1 do
1 -> to_string(data >>> 1, size - 1, "1" <> acc)
0 -> to_string(data >>> 1, size - 1, "0" <> acc)
end
end
end
|
lib/bitmap/integer.ex
| 0.9491
| 0.676593
|
integer.ex
|
starcoder
|
defmodule AdventOfCode.Y2020.Day20.V1 do
alias AdventOfCode.Y2020.Day20.V1.Edge
alias AdventOfCode.Y2020.Day20.V1.Parser
alias AdventOfCode.Y2020.Day20.V1.Matrix
def run_both() do
relations =
"2020/day20.txt"
|> Parser.parse()
|> build_relations()
{solve1(relations), solve2(relations)}
end
def run1(file_name) do
Parser.parse(file_name)
|> build_relations()
|> solve1()
end
def run2(file_name) do
Parser.parse(file_name)
|> build_relations()
|> solve2()
end
def solve1(relations) do
relations
|> Enum.filter(fn {_, tile} -> is_corner?(tile) end)
|> Enum.map(fn {id, _} -> String.to_integer(id) end)
|> Enum.reduce(fn id, acc -> id * acc end)
end
def solve2(relations) do
relations
|> Map.new()
|> orient_tiles()
|> Map.values()
|> construct_map()
|> find_sea_monsters()
end
def build_relations(l), do: build_relations(l, [], [])
def build_relations([], _, result), do: result
def build_relations([current | rest], processed, result) do
%{id: id, edges: edges} = current
neighbours =
matching_edges(edges, rest ++ processed)
|> Enum.map(fn {edge, %{id: id}} -> {edge, id} end)
build_relations(rest, [current | processed], [
{id, %{current | neighbours: neighbours}} | result
])
end
def construct_map(tiles) do
width = tiles |> Enum.count() |> :math.sqrt() |> round()
map =
tiles
|> Enum.sort(fn %{pos: {ax, ay}}, %{pos: {bx, by}} ->
width * ay + ax <= width * by + bx
end)
|> Enum.group_by(fn %{pos: {_, y}} -> y end)
|> Map.values()
|> construct_map_rows([])
|> Enum.reverse()
|> Enum.concat()
Matrix.apply_ops([:hflip], map)
end
def construct_map_rows([], result), do: Enum.reverse(result)
def construct_map_rows([columns | rest], acc) do
row =
columns
|> Enum.map(fn %{matrix: matrix} -> Matrix.inner(matrix) end)
|> Enum.reduce(fn col, acc -> Matrix.concat(acc, col) end)
construct_map_rows(rest, [row | acc])
end
@all_orientations [
:hflip,
:vflip,
:hflip,
:rotate90,
:hflip,
:vflip,
:hflip
]
def find_sea_monsters(map) do
width = List.first(map) |> String.length()
hashes =
map
|> Enum.join()
|> String.graphemes()
|> Enum.filter(fn s -> s == "#" end)
|> Enum.count()
sea_monsters =
@all_orientations
|> find_sea_monsters(map, monster_regex(width))
hashes - sea_monsters * 15
end
def find_sea_monsters([], _map, _monster), do: 0
def find_sea_monsters([op | rest], map, monster) do
case find_in_map(map, monster) do
0 -> find_sea_monsters(rest, Matrix.apply_op(map, op), monster)
n -> n
end
end
def monster_regex(width) do
{:ok, pattern} =
"#.a#....##....##....###a.#..#..#..#..#..#"
|> String.replace("a", String.duplicate(".", width - 20))
|> Regex.compile()
pattern
end
def find_in_map(map, monster) do
map
|> Enum.chunk_every(3, 1, :discard)
|> Enum.map(&Enum.join/1)
|> Enum.map(®ex_hunt(&1, monster))
|> Enum.sum()
end
def regex_hunt(str, pattern) do
case Regex.scan(pattern, str, return: :index) do
[[{n, _}]] -> 1 + regex_hunt(replace_at(str, n), pattern)
[] -> 0
end
end
def replace_at(str, n) do
str
|> String.split_at(n)
|> (fn {a, b} -> [a, String.graphemes(b) |> Enum.drop(1) |> Enum.join()] end).()
|> Enum.join("0")
end
def orient_tiles(relations) do
{_, tile} =
relations
|> Enum.find(fn {_, tile} -> is_corner?(tile) end)
tile = %{tile | pos: {0, 0}}
orient_tiles([tile], Map.put(relations, tile.id, tile))
end
def orient_tiles([], relations), do: relations
def orient_tiles([tile | rest], relations) do
updated_tiles =
tile.neighbours
|> Enum.map(fn {tile_edge, neighbour_id} ->
{tile_edge, Map.get(relations, neighbour_id)}
end)
|> Enum.filter(fn {_, neighbour} -> neighbour.pos == nil end)
|> Enum.map(fn {tile_edge, neighbour} ->
connect_neighbour(tile, tile_edge, neighbour)
end)
updated_relations =
updated_tiles |> Enum.reduce(relations, fn t, acc -> Map.put(acc, t.id, t) end)
orient_tiles(updated_tiles ++ rest, updated_relations)
end
def connect_neighbour(tile, {dir, edge}, n_tile) do
n_pos = coordinate_from_tile_in_direction(tile, dir)
{{n_dir, n_edge}, _} =
n_tile.neighbours
|> Enum.find(fn {_, id} -> tile.id == id end)
tile_dir = Edge.opposite(dir)
operations = Edge.ops({n_dir, n_edge}, {tile_dir, edge})
new_edges = n_tile.edges |> Enum.map(fn e -> Edge.apply_ops(operations, e) end)
new_neighbours =
n_tile.neighbours |> Enum.map(fn {e, nid} -> {Edge.apply_ops(operations, e), nid} end)
new_matrix = Matrix.apply_ops(operations, n_tile.matrix)
%{
n_tile
| pos: n_pos,
neighbours: new_neighbours,
operations: operations,
matrix: new_matrix,
edges: new_edges
}
# Figure how to get n_tiles tile.id-edge to n_edge and align with edge
end
def is_corner?(tile), do: Enum.count(tile.neighbours) == 2
def coordinate_from_tile_in_direction(%{pos: {x, y}}, dir) do
case dir do
:bottom -> {x, y - 1}
:top -> {x, y + 1}
:left -> {x - 1, y}
:right -> {x + 1, y}
end
end
def matching_edges(edges, rest) do
for edge <- edges,
tile <- rest,
tile_matching_edge?(tile, edge),
do: {edge, tile}
end
def tile_matching_edge?(%{edges: edges}, {_, edge}) do
match_against = [edge, String.reverse(edge)]
not (edges
|> Enum.filter(fn {_, e} -> e in match_against end)
|> Enum.empty?())
end
end
defmodule AdventOfCode.Y2020.Day20.V1.Parser do
def parse(file) do
AdventOfCode.Helpers.Data.read_from_file_no_split(file)
|> String.split("\n\n", trim: true)
|> Enum.map(&parse_tile/1)
end
def parse_tile(data) do
[id_string | matrix_data] =
data
|> String.split("\n", trim: true)
id = parse_tile_id(id_string)
%{
id: id,
edges: parse_matrix_edges(matrix_data),
matrix: matrix_data,
neighbours: [],
pos: nil,
operations: []
}
end
def parse_tile_id(tile_id_string) do
Regex.run(~r/^Tile\s(\d+)\:/, tile_id_string, capture: :all_but_first) |> hd
end
def parse_matrix_edges(data) do
top = List.first(data)
bottom = List.last(data)
[left, right] =
Enum.map(data, fn line -> {String.slice(line, 0, 1), String.slice(line, -1, 1)} end)
|> Enum.reduce([[], []], fn {s, e}, [l, r] ->
[[s | l], [e | r]]
end)
|> Enum.map(fn l -> l |> Enum.reverse() |> Enum.join() end)
[
{:top, top},
{:right, right},
{:bottom, bottom},
{:left, left}
]
end
end
defmodule AdventOfCode.Y2020.Day20.V1.Edge do
def ops(rotating, target) do
ops(rotating, target, [])
|> Enum.reverse()
end
def ops({r_dir, r_str}, {t_dir, t_str}, ops) when r_dir == t_dir do
if r_str == t_str do
ops
else
case r_dir do
:top -> [:hflip | ops]
:bottom -> [:hflip | ops]
:left -> [:vflip | ops]
:right -> [:vflip | ops]
end
end
end
def ops(rotating, target, ops) do
new_rotating = rotate90(rotating)
ops(new_rotating, target, [:rotate90 | ops])
end
def apply_ops([], edge), do: edge
def apply_ops([h | r], edge) do
new_edge =
case h do
:vflip -> vflip(edge)
:hflip -> hflip(edge)
:rotate90 -> rotate90(edge)
end
apply_ops(r, new_edge)
end
def vflip({dir, str}) do
case dir do
:top -> {:bottom, str}
:left -> {:left, String.reverse(str)}
:bottom -> {:top, str}
:right -> {:right, String.reverse(str)}
end
end
def hflip({dir, str}) do
case dir do
:left -> {:right, str}
:top -> {:top, String.reverse(str)}
:right -> {:left, str}
:bottom -> {:bottom, String.reverse(str)}
end
end
def rotate90({dir, str}) do
case dir do
:left -> {:top, String.reverse(str)}
:top -> {:right, str}
:right -> {:bottom, String.reverse(str)}
:bottom -> {:left, str}
end
end
def opposite(dir) do
case dir do
:top -> :bottom
:bottom -> :top
:left -> :right
:right -> :left
end
end
end
defmodule AdventOfCode.Y2020.Day20.V1.Matrix do
def apply_ops([], matrix), do: matrix
def apply_ops([h | r], matrix) do
new_matrix = apply_op(matrix, h)
apply_ops(r, new_matrix)
end
def apply_op(rows, :rotate90) do
rotate(rows)
end
def apply_op(rows, :hflip) do
rows
|> Enum.map(&String.reverse/1)
end
def apply_op(rows, :vflip) do
rows
|> Enum.reverse()
end
def rotate(rows) do
rows
|> Enum.map(&String.graphemes/1)
|> rotate([])
end
def rotate([[] | _], new) do
new
|> Enum.reverse()
|> Enum.map(&Enum.reverse/1)
|> Enum.map(&Enum.join/1)
end
def rotate(rows, new) do
first = Enum.flat_map(rows, fn r -> Enum.take(r, 1) end)
rest = Enum.map(rows, fn r -> Enum.drop(r, 1) end)
rotate(rest, [first | new])
end
def inner(data) do
data
|> slice_middle()
|> Stream.map(fn row ->
row
|> String.graphemes()
|> slice_middle()
|> Enum.join()
end)
|> Enum.to_list()
end
defp slice_middle(stream) do
stream
|> Stream.drop(1)
|> Stream.drop(-1)
end
def concat(a, b), do: concat(a, b, [])
def concat([], [], result), do: Enum.reverse(result)
def concat([a_head | a_rest], [b_head | b_rest], acc) do
concat(a_rest, b_rest, [a_head <> b_head | acc])
end
def print_two([], []) do
IO.puts("-------------------------")
end
def print_two([a | a_rest], [b | b_rest]) do
IO.puts("#{a} #{b}")
print_two(a_rest, b_rest)
end
def print(matrix) do
IO.puts("--------")
Enum.each(matrix, fn row -> IO.puts(row) end)
IO.puts("--------")
matrix
end
end
|
lib/2020/day20-v1.ex
| 0.654343
| 0.55646
|
day20-v1.ex
|
starcoder
|
defmodule TheFuzz.Similarity.WeightedLevenshtein do
@moduledoc """
This module contains function to calculate the weighted levenshtein distance
between 2 given strings.
"""
require IEx
@behaviour TheFuzz.StringMetric
@default_delete_cost 1
@default_insert_cost 1
@default_replace_cost 1
@doc """
Calculates the weighted levenshtein distance between the given strings with
the costs of insert, delete, replace as 1.
## Examples
iex> TheFuzz.Similarity.WeightedLevenshtein.compare("kitten", "sitting")
3
iex> TheFuzz.Similarity.WeightedLevenshtein.compare("sunday", "saturday")
3
"""
def compare(a, b) when byte_size(a) == 0 or byte_size(b) == 0, do: nil
def compare(a, b) when a == b, do: 0
def compare(a, b) do
compare(a, b, %{
delete: @default_delete_cost,
insert: @default_insert_cost,
replace: @default_replace_cost
})
end
@doc """
Calculates the weighted levenshtein distance between the given strings with
costs for insert, delete and replace provided as a Map in the third argument.
## Examples
iex> weights = %{delete: 10, insert: 0.1, replace: 1}
iex> TheFuzz.Similarity.WeightedLevenshtein.compare("book", "back", weights)
2
iex> weights = %{delete: 10, insert: 1, replace: 1}
iex> TheFuzz.Similarity.WeightedLevenshtein.compare("clms blvd", "columbus boulevard", weights)
9
"""
def compare(a, b, _) when byte_size(a) == 0 or byte_size(b) == 0, do: nil
def compare(a, b, _) when a == b, do: 0
def compare(a, b, %{} = weights) do
distance(a |> String.to_charlist(), b |> String.to_charlist(), weights)
end
defp store_result(key, result, cache) do
{result, Map.put(cache, key, result)}
end
defp distance(a, b, weights) do
distance(a, b, weights, Map.new()) |> elem(0)
end
defp distance(a, [] = b, %{delete: delete_cost}, cache) do
store_result({a, b}, delete_cost * length(a), cache)
end
defp distance([] = a, b, %{insert: insert_cost}, cache) do
store_result({a, b}, insert_cost * length(b), cache)
end
defp distance([x | rest1], [x | rest2], weights, cache) do
distance(rest1, rest2, weights, cache)
end
defp distance(
[_ | rest1] = a,
[_ | rest2] = b,
%{
delete: delete_cost,
insert: insert_cost,
replace: replace_cost
} = weights,
cache
) do
case Map.has_key?(cache, {a, b}) do
true ->
{Map.get(cache, {a, b}), cache}
false ->
{l1, c1} = distance(a, rest2, weights, cache)
{l2, c2} = distance(rest1, b, weights, c1)
{l3, c3} = distance(rest1, rest2, weights, c2)
min = :lists.min([l1 + insert_cost, l2 + delete_cost, l3 + replace_cost])
store_result({a, b}, min, c3)
end
end
end
|
lib/the_fuzz/similarity/weighted_levenshtein.ex
| 0.874299
| 0.619011
|
weighted_levenshtein.ex
|
starcoder
|
defmodule Resx.Producers.Transform do
use Resx.Producer
alias Resx.Resource
alias Resx.Resource.Reference
defp to_ref(reference = %Reference{}), do: { :ok, reference }
defp to_ref(uri) when is_binary(uri), do: URI.decode(uri) |> URI.parse |> to_ref
defp to_ref(%URI{ scheme: "resx-transform", path: path }) do
String.split(path, ",", trim: true)
|> get_stages
|> case do
{ modules = [_|_], { :ok, uri } } -> build_ref(modules, uri)
{ _, _ } -> { :error, { :invalid_reference, "data is not base64" } }
reason -> { :error, { :invalid_reference, reason } }
end
|> to_ref
end
defp to_ref(error = { :error, _ }), do: error
defp to_ref(_), do: { :error, { :invalid_reference, "not a transformation reference" } }
defp build_ref([], base), do: base
defp build_ref([{ module, options }|modules], base) do
build_ref(modules, %Reference{
adapter: __MODULE__,
repository: { module, options, base },
integrity: nil
})
end
defp get_stages(path, modules \\ [])
defp get_stages([], _), do: "missing transformation"
defp get_stages([_], []), do: "missing transformation"
defp get_stages([data], modules), do: { modules, Base.decode64(data) }
defp get_stages([module|path], modules) do
{ module, options } = case String.split(module, ":") do
[module] -> { module, [] }
[module, options] ->
options = case Base.decode64(options) do
{ :ok, options } ->
try do
:erlang.binary_to_term(options)
rescue
_ -> "invalid transformation option"
end
_ -> "transformation option is not base64"
end
{ module, options }
end
try do
Module.safe_concat([module])
rescue
_ -> "transformation (#{module}) does not exist"
else
module -> get_stages(path, [{ module, options }|modules])
end
end
@impl Resx.Producer
def schemes(), do: ["resx-transform"]
@impl Resx.Producer
def open(reference, opts \\ []) do
case to_ref(reference) do
{ :ok, %Reference{ repository: { transformer, options, reference } } } ->
case Resource.open(reference, opts) do
{ :ok, resource } -> Resx.Transformer.apply(resource, transformer, options)
error -> error
end
error -> error
end
end
@impl Resx.Producer
def stream(reference, opts \\ []) do
case to_ref(reference) do
{ :ok, %Reference{ repository: { transformer, options, reference } } } ->
case Resource.stream(reference, opts) do
{ :ok, resource } -> Resx.Transformer.apply(resource, transformer, options)
error -> error
end
error -> error
end
end
@impl Resx.Producer
def exists?(reference) do
case to_ref(reference) do
{ :ok, %Reference{ repository: { _, _, reference } } } -> Resource.exists?(reference)
error -> error
end
end
@impl Resx.Producer
def alike?(a, b) do
with { :a, { :ok, %Reference{ repository: { transformer, options, reference_a } } } } <- { :a, to_ref(a) },
{ :b, { :ok, %Reference{ repository: { ^transformer, ^options, reference_b } } } } <- { :b, to_ref(b) } do
Resource.alike?(reference_a, reference_b)
else
_ -> false
end
end
@impl Resx.Producer
def source(reference) do
case to_ref(reference) do
{ :ok, %Reference{ repository: { _, _, source } } } -> { :ok, source }
error -> error
end
end
defp format_options([]), do: []
defp format_options(options), do: [":", :erlang.term_to_binary(options) |> Base.encode64]
defp to_uri(reference, transformations \\ [])
defp to_uri(%Reference{ repository: { transformer, options, reference = %Reference{ adapter: __MODULE__ } } }, transformations), do: to_uri(reference, [transformations, [[inspect(transformer)|format_options(options)], ","]])
defp to_uri(%Reference{ repository: { transformer, options, reference } }, transformations) do
case Resource.uri(reference) do
{ :ok, uri } ->
uri =
[
"resx-transform:",
transformations,
[inspect(transformer)|format_options(options)],
",",
Base.encode64(uri)
]
|> IO.iodata_to_binary
|> URI.encode
{ :ok, uri }
error -> error
end
end
@impl Resx.Producer
def resource_uri(reference) do
case to_ref(reference) do
{ :ok, reference } -> to_uri(reference)
error -> error
end
end
@impl Resx.Producer
def resource_attribute(reference, field) do
case to_ref(reference) do
{ :ok, %Reference{ repository: { _, _, reference } } } -> Resource.attribute(reference, field)
error -> error
end
end
@impl Resx.Producer
def resource_attributes(reference) do
case to_ref(reference) do
{ :ok, %Reference{ repository: { _, _, reference } } } -> Resource.attributes(reference)
error -> error
end
end
@impl Resx.Producer
def resource_attribute_keys(reference) do
case to_ref(reference) do
{ :ok, %Reference{ repository: { _, _, reference } } } -> Resource.attribute_keys(reference)
error -> error
end
end
end
|
lib/resx/producers/transform.ex
| 0.720467
| 0.480235
|
transform.ex
|
starcoder
|
defmodule Socket.SSL do
@moduledoc """
This module allows usage of SSL sockets and promotion of TCP sockets to SSL
sockets.
## Options
When creating a socket you can pass a series of options to use for it.
* `:cert` can either be an encoded certificate or `[path:
"path/to/certificate"]`
* `:key` can either be an encoded certificate, `[path: "path/to/key"]`, `[rsa:
"rsa encoded"]` or `[dsa: "dsa encoded"]` or `[ec: "ec encoded"]`
* `:authorities` can iehter be an encoded authorities or `[path:
"path/to/authorities"]`
* `:dh` can either be an encoded dh or `[path: "path/to/dh"]`
* `:verify` can either be `false` to disable peer certificate verification,
or a keyword list containing a `:function` and an optional `:data`
* `:password` the password to use to decrypt certificates
* `:renegotiation` if it's set to `:secure` renegotiation will be secured
* `:ciphers` is a list of ciphers to allow
* `:advertised_protocols` is a list of strings representing the advertised
protocols for NPN
* `:preferred_protocols` is a list of strings representing the preferred next
protocols for NPN
You can also pass TCP options.
## Smart garbage collection
Normally sockets in Erlang are closed when the controlling process exits,
with smart garbage collection the controlling process will be the
`Socket.Manager` and it will be closed when there are no more references to
it.
"""
use Socket.Helpers
require Record
@opaque t :: port
@doc """
Get the list of supported ciphers.
"""
@spec ciphers :: [:ssl.erl_cipher_suite]
def ciphers do
:ssl.cipher_suites
end
@doc """
Get the list of supported SSL/TLS versions.
"""
@spec versions :: [tuple]
def versions do
:ssl.versions
end
@doc """
Return a proper error string for the given code or nil if it can't be
converted.
"""
@spec error(term) :: String.t
def error(code) do
case :ssl.format_error(code) do
'Unexpected error:' ++ _ ->
nil
message ->
message |> to_string
end
end
@doc """
Connect to the given address and port tuple or SSL connect the given socket.
"""
@spec connect(Socket.t | { Socket.t, :inet.port_number }) :: { :ok, t } | { :error, term }
def connect({ address, port }) do
connect(address, port)
end
def connect(socket) do
connect(socket, [])
end
@doc """
Connect to the given address and port tuple or SSL connect the given socket,
raising if an error occurs.
"""
@spec connect!(Socket.t | { Socket.t, :inet.port_number }) :: t | no_return
defbang connect(socket_or_descriptor)
@doc """
Connect to the given address and port tuple with the given options or SSL
connect the given socket with the given options or connect to the given
address and port.
"""
@spec connect({ Socket.Address.t, :inet.port_number } | Socket.t | Socket.Address.t, Keyword.t | :inet.port_number) :: { :ok, t } | { :error, term }
def connect({ address, port }, options) when options |> is_list do
connect(address, port, options)
end
def connect(wrap, options) when options |> is_list do
wrap = unless wrap |> is_port do
wrap.to_port
else
wrap
end
timeout = options[:timeout] || :infinity
options = Keyword.delete(options, :timeout)
:ssl.connect(wrap, options, timeout)
end
def connect(address, port) when port |> is_integer do
connect(address, port, [])
end
@doc """
Connect to the given address and port tuple with the given options or SSL
connect the given socket with the given options or connect to the given
address and port, raising if an error occurs.
"""
@spec connect!({ Socket.Address.t, :inet.port_number } | Socket.t | Socket.Address.t, Keyword.t | :inet.port_number) :: t | no_return
defbang connect(descriptor, options)
@doc """
Connect to the given address and port with the given options.
"""
@spec connect(Socket.Address.t, :inet.port_number, Keyword.t) :: { :ok, t } | { :error, term }
def connect(address, port, options) do
address = if address |> is_binary do
String.to_char_list(address)
else
address
end
timeout = options[:timeout] || :infinity
options = Keyword.delete(options, :timeout)
:ssl.connect(address, port, arguments(options), timeout)
end
@doc """
Connect to the given address and port with the given options, raising if an
error occurs.
"""
@spec connect!(Socket.Address.t, :inet.port_number, Keyword.t) :: t | no_return
defbang connect(address, port, options)
@doc """
Create an SSL socket listening on an OS chosen port, use `local` to know the
port it was bound on.
"""
@spec listen :: { :ok, t } | { :error, term }
def listen do
listen(0, [])
end
@doc """
Create an SSL socket listening on an OS chosen port, use `local` to know the
port it was bound on, raising in case of error.
"""
@spec listen! :: t | no_return
defbang listen
@doc """
Create an SSL socket listening on an OS chosen port using the given options or
listening on the given port.
"""
@spec listen(:inet.port_number | Keyword.t) :: { :ok, t } | { :error, term }
def listen(port) when port |> is_integer do
listen(port, [])
end
def listen(options) do
listen(0, options)
end
@doc """
Create an SSL socket listening on an OS chosen port using the given options
or listening on the given port, raising in case of error.
"""
@spec listen!(:inet.port_number | Keyword.t) :: t | no_return
defbang listen(port_or_options)
@doc """
Create an SSL socket listening on the given port and using the given options.
"""
@spec listen(:inet.port_number, Keyword.t) :: { :ok, t } | { :error, term }
def listen(port, options) do
options = Keyword.put(options, :mode, :passive)
options = Keyword.put_new(options, :reuse, true)
:ssl.listen(port, arguments(options))
end
@doc """
Create an SSL socket listening on the given port and using the given options,
raising in case of error.
"""
@spec listen!(:inet.port_number, Keyword.t) :: t | no_return
defbang listen(port, options)
@doc """
Accept a connection from a listening SSL socket or start an SSL connection on
the given client socket.
"""
@spec accept(Socket.t | t) :: { :ok, t } | { :error, term }
def accept(self) do
accept(self, [])
end
@doc """
Accept a connection from a listening SSL socket or start an SSL connection on
the given client socket, raising if an error occurs.
"""
@spec accept!(Socket.t | t) :: t | no_return
defbang accept(socket)
@doc """
Accept a connection from a listening SSL socket with the given options or
start an SSL connection on the given client socket with the given options.
"""
@spec accept(Socket.t, Keyword.t) :: { :ok, t } | { :error, term }
def accept(sock, options) when sock |> Record.is_record(:sslsocket) do
timeout = options[:timeout] || :infinity
case :ssl.transport_accept(sock, timeout) do
{ :ok, sock } ->
result = if options[:mode] == :active do
:ssl.setopts(sock, [{ :active, true }])
else
:ok
end
if result == :ok do
result = sock |> handshake(timeout: timeout)
if result == :ok do
{ :ok, sock }
else
result
end
else
result
end
error ->
error
end
end
def accept(wrap, options) when wrap |> is_port do
timeout = options[:timeout] || :infinity
options = Keyword.delete(options, :timeout)
:ssl.ssl_accept(wrap, arguments(options), timeout)
end
@doc """
Accept a connection from a listening SSL socket with the given options or
start an SSL connection on the given client socket with the given options,
raising if an error occurs.
"""
@spec accept!(Socket.t, t | Keyword.t) :: t | no_return
defbang accept(socket, options)
@doc """
Execute the handshake; useful if you want to delay the handshake to make it
in another process.
"""
@spec handshake(t) :: :ok | { :error, term }
@spec handshake(t, Keyword.t) :: :ok | { :error, term }
def handshake(sock, options \\ []) when sock |> Record.is_record(:sslsocket) do
timeout = options[:timeout] || :infinity
:ssl.ssl_accept(sock, timeout)
end
@doc """
Execute the handshake, raising if an error occurs; useful if you want to
delay the handshake to make it in another process.
"""
@spec handshake!(t) :: :ok | no_return
@spec handshake!(t, Keyword.t) :: :ok | no_return
defbang handshake(sock)
defbang handshake(sock, options)
@doc """
Set the process which will receive the messages.
"""
@spec process(t | port, pid) :: :ok | { :error, :closed | :not_owner | Error.t }
def process(sock, pid) when sock |> Record.is_record(:sslsocket) do
:ssl.controlling_process(sock, pid)
end
@doc """
Set the process which will receive the messages, raising if an error occurs.
"""
@spec process!(t | port, pid) :: :ok | no_return
def process!(sock, pid) do
case process(sock, pid) do
:ok ->
:ok
:closed ->
raise RuntimeError, message: "the socket is closed"
:not_owner ->
raise RuntimeError, message: "the current process isn't the owner"
code ->
raise Socket.Error, reason: code
end
end
@doc """
Set options of the socket.
"""
@spec options(t | :ssl.sslsocket, Keyword.t) :: :ok | { :error, Socket.Error.t }
def options(socket, options) when socket |> Record.is_record(:sslsocket) do
:ssl.setopts(socket, arguments(options))
end
@doc """
Set options of the socket, raising if an error occurs.
"""
@spec options!(t | Socket.SSL.t | port, Keyword.t) :: :ok | no_return
defbang options(socket, options)
@doc """
Convert SSL options to `:ssl.setopts` compatible arguments.
"""
@spec arguments(Keyword.t) :: list
def arguments(options) do
options = Enum.group_by(options, fn
{ :server_name, _ } -> true
{ :cert, _ } -> true
{ :key, _ } -> true
{ :authorities, _ } -> true
{ :sni, _ } -> true
{ :dh, _ } -> true
{ :verify, _ } -> true
{ :password, _ } -> true
{ :renegotiation, _ } -> true
{ :ciphers, _ } -> true
{ :depth, _ } -> true
{ :identity, _ } -> true
{ :versions, _ } -> true
{ :alert, _ } -> true
{ :ibernate, _ } -> true
{ :session, _ } -> true
{ :advertised_protocols, _ } -> true
{ :preferred_protocols, _ } -> true
_ -> false
end)
{ local, global } = {
Map.get(options, true, []),
Map.get(options, false, [])
}
Socket.TCP.arguments(global) ++ Enum.flat_map(local, fn
{ :server_name, false } ->
[{ :server_name_indication, :disable }]
{ :server_name, name } ->
[{ :server_name_indication, String.to_charlist(name) }]
{ :cert, [path: path] } ->
[{ :certfile, path }]
{ :cert, cert } ->
[{ :cert, cert }]
{ :key, [path: path] } ->
[{ :keyfile, path }]
{ :key, [rsa: key] } ->
[{ :key, { :RSAPrivateKey, key } }]
{ :key, [dsa: key] } ->
[{ :key, { :DSAPrivateKey, key } }]
{ :key, [ec: key] } ->
[{ :key, { :ECPrivateKey, key } }]
{ :key, key } ->
[{ :key, { :PrivateKeyInfo, key } }]
{ :authorities, [path: path] } ->
[{ :cacertfile, path }]
{ :authorities, ca } ->
[{ :cacert, ca }]
{ :dh, [path: path] } ->
[{ :dhfile, path }]
{ :dh, dh } ->
[{ :dh, dh }]
{ :sni, sni } ->
Enum.flat_map(sni, fn
{ :hosts, hosts } ->
[{ :sni_hosts, Enum.map(hosts, fn { name, options } ->
{ String.to_charlist(name), arguments(options) }
end) }]
{ :function, fun } ->
[{ :sni_fun, fun }]
end)
{ :verify, false } ->
[{ :verify, :verify_none }]
{ :verify, [function: fun] } ->
[{ :verify_fun, { fun, nil } }]
{ :verify, [function: fun, data: data] } ->
[{ :verify_fun, { fun, data } }]
{ :identity, identity } ->
Enum.flat_map(identity, fn
{ :psk, value } ->
[{ :psk_identity, String.to_charlist(value) }]
{ :srp, { first, second } } ->
[{ :srp_identity, { String.to_charlist(first), String.to_charlist(second) } }]
end)
{ :password, password } ->
[{ :password, String.to_charlist(password) }]
{ :renegotiation, :secure } ->
[{ :secure_renegotiate, true }]
{ :ciphers, ciphers } ->
[{ :ciphers, ciphers }]
{ :depth, depth } ->
[{ :depth, depth }]
{ :versions, versions } ->
[{ :versions, versions }]
{ :alert, value } ->
[{ :log_alert, value }]
{ :hibernate, hibernate } ->
[{ :hibernate_after, hibernate }]
{ :session, session } ->
Enum.flat_map(session, fn
{ :reuse, true } ->
[{ :reuse_sessions, true }]
{ :reuse, false } ->
[{ :reuse_sessions, false }]
{ :reuse, fun } when fun |> is_function ->
[{ :reuse_session, fun }]
end)
{ :advertised_protocols, protocols } ->
[{ :next_protocols_advertised, protocols }]
{ :preferred_protocols, protocols } ->
[{ :client_preferred_next_protocols, protocols }]
end)
end
@doc """
Get information about the SSL connection.
"""
@spec info(t) :: { :ok, list } | { :error, term }
def info(sock) when sock |> Record.is_record(:sslsocket) do
:ssl.connection_information(sock)
end
@doc """
Get information about the SSL connection, raising if an error occurs.
"""
@spec info!(t) :: list | no_return
defbang info(sock)
@doc """
Get the certificate of the peer.
"""
@spec certificate(t) :: { :ok, String.t } | { :error, term }
def certificate(sock) when sock |> Record.is_record(:sslsocket) do
:ssl.peercert(sock)
end
@doc """
Get the certificate of the peer, raising if an error occurs.
"""
@spec certificate!(t) :: String.t | no_return
defbang certificate(sock)
@doc """
Get the negotiated protocol.
"""
@spec negotiated_protocol(t) :: String.t | nil
def negotiated_protocol(sock) when sock |> Record.is_record(:sslsocket) do
case :ssl.negotiated_protocol(sock) do
{ :ok, protocol } ->
protocol
{ :error, :protocol_not_negotiated } ->
nil
end
end
@doc """
Renegotiate the secure connection.
"""
@spec renegotiate(t) :: :ok | { :error, term }
def renegotiate(sock) when sock |> Record.is_record(:sslsocket) do
:ssl.renegotiate(sock)
end
@doc """
Renegotiate the secure connection, raising if an error occurs.
"""
@spec renegotiate!(t) :: :ok | no_return
defbang renegotiate(sock)
end
|
deps/socket/lib/socket/ssl.ex
| 0.898597
| 0.609931
|
ssl.ex
|
starcoder
|
defmodule ExHal do
@moduledoc """
Use HAL APIs with ease.
Given a resource `http://example.com/hal` whose HAL representation looks like
```json
{ "name": "Hello!",
"_links": {
"self" : { "href": "http://example.com" },
"profile": [{ "href": "http://example.com/special" },
{ "href": "http://example.com/normal" }]
}
}
```
```elixir
iex> {:ok, doc, response_header} = ExHal.client
...> |> ExHal.Client.add_headers("User-Agent": "MyClient/1.0")
...> |> ExHal.Client.get("http://example.com/hal")
%ExHal.Document{...}
```
Now we have an entry point to the API we can follow links to navigate around.
```exlixir
iex> ExHal.follow_link(doc, "profile")
{:ok, %ExHal.Document{...}, %ExHal.ResponseHeader{...}}
iex> ExHal.follow_link(doc, "self")
{:ok, %ExHal.Document{...}, %ExHal.ResponseHeader{...}}
iex> ExHal.follow_links(doc, "profile")
[{:ok, %ExHal.Document{...}, %ExHal.ResponseHeader{...}}, {:ok, %ExHal.Document{...}, %ExHal.ResponseHeader{...}}]
```
We can specify headers for each request in addition to the headers specified in the client.
```elixir
iex> ExHal.follow_links(doc, "profile",
headers: ["Accept": "application/vnd.custom.json+type"])
[{:ok, %ExHal.Document{...}, %ExHal.ResponseHeader{...}}, {:ok, %ExHal.Document{...}, %ExHal.ResponseHeader{...}}]
```
If we try to follow a non-existent or compound link with `ExHal.follow_link` it will return an error tuple.
```elixir
iex> ExHal.follow_link(doc, "nonexistent")
{:error, %ExHal.Error{reason: "no such link"}}
iex> ExHal.follow_link(doc, "profile", strict: true)
{:error, %ExHal.Error{reason: "multiple choices"}}
```
If we try to follow a non-existent with `ExHal.follow_links` it will return a list of error tuples.
```elixir
iex> ExHal.follow_links(doc, "nonexistent")
[{:error, %ExHal.Error{reason: "no such link"}}]
```
### Collections
Consider a resource `http://example.com/hal-collection` whose HAL representation looks like
```json
{ "_links": {
"self" : { "href": "http://example.com/hal-collection" },
"item": [{ "href": "http://example.com/beginning" },
{ "href": "http://example.com/middle" }]
"next": { "href": "http://example.com/hal-collection?p=2" }
}
}
```
and a resource `http://example.com/hal-collection?p=2` whose HAL representation looks like
```json
{ "_links": {
"self" : { "href": "http://example.com/hal-collection?p=2" },
"item": [{ "href": "http://example.com/end" }]
}
}
```
If we get the first HAL collection resource and turn it into a stream we can use all our favorite Stream functions on it.
```elixir
iex> collection = ExHal.client
...> |> ExHal.Client.add_headers("User-Agent": "MyClient/1.0")
...> |> ExHal.Client.get("http://example.com/hal-collection")
...> |> ExHal.to_stream
#Function<11.52512309/2 in Stream.resource/3>
iex> Stream.map(collection, fn follow_results ->
...> case follow_results do
...> {:ok, a_doc, %ResponseHeader{}} -> ExHal.url(a_doc)}
...> {:error, _} -> :error
...> end
...> end )
["http://example.com/beginning", "http://example.com/middle", "http://example.com/end"]
```
"""
alias ExHal.{Client, Navigation, Document}
@doc """
Returns a default client
"""
@spec client() :: Client.t()
def client do
Client.new()
end
defdelegate follow_link(a_doc, name), to: Navigation
defdelegate follow_link(a_doc, name, opts), to: Navigation
defdelegate follow_links(a_doc, name), to: Navigation
defdelegate follow_links(a_doc, name, opts), to: Navigation
defdelegate follow_links(a_doc, name, missing_link_handler, opts), to: Navigation
defdelegate post(a_doc, name, body), to: Navigation
defdelegate post(a_doc, name, body, opts), to: Navigation
defdelegate patch(a_doc, name, body), to: Navigation
defdelegate patch(a_doc, name, body, opts), to: Navigation
defdelegate link_target(a_doc, name), to: Navigation
defdelegate link_target(a_doc, name, opts), to: Navigation
defdelegate link_targets(a_doc, name), to: Navigation
defdelegate link_targets(a_doc, name, opts), to: Navigation
defdelegate link_target_lazy(a_doc, name, fun), to: Navigation
defdelegate link_target_lazy(a_doc, name, opts, fun), to: Navigation
defdelegate link_targets_lazy(a_doc, name, fun), to: Navigation
defdelegate link_targets_lazy(a_doc, name, opts, fun), to: Navigation
defdelegate fetch(a_document, name), to: Document
defdelegate get_lazy(a_doc, name, default_fun), to: Document
defdelegate get_property_lazy(a_doc, prop_name, default_fun), to: Document
defdelegate get_links_lazy(a_doc, link_name, default_fun), to: Document
defdelegate url(a_doc), to: Document
defdelegate url(a_doc, default_fn), to: Document
@doc """
Returns a stream that yields the items in the rfc 6573 collection
represented by `a_doc`.
"""
def to_stream(a_doc) do
ExHal.Collection.to_stream(a_doc)
end
end
|
lib/exhal.ex
| 0.868764
| 0.731994
|
exhal.ex
|
starcoder
|
defmodule Ravix.RQL.QueryParser do
@moduledoc false
require OK
alias Ravix.RQL.Query
@non_aliasable_fields ["id()", "count()", "sum()", :"id()", :"count()", :"sum()"]
@doc """
Receives a `Ravix.RQL.Query` object and parses it to a RQL query string
"""
@spec parse(Query.t()) :: {:error, any} | {:ok, Query.t()}
def parse(%Query{} = query) do
OK.for do
parsed_query =
query
|> parse_stmt(query.from_token)
|> parse_stmt(query.group_token)
|> parse_stmt(query.where_token)
|> parse_stmt(query.order_token)
|> parse_stmts(query.and_tokens)
|> parse_stmts(query.or_tokens)
|> parse_stmt(query.update_token)
|> parse_stmt(query.select_token)
|> parse_stmt(query.limit_token)
after
parsed_query
end
end
defp parse_stmts({:ok, query}, stmts), do: parse_stmts(query, stmts)
defp parse_stmts({:error, err}, _),
do: {:error, err}
defp parse_stmts(%Query{} = query, []), do: query
defp parse_stmts(%Query{} = query, stmts) do
stmts
|> Enum.reduce(query, fn stmt, acc ->
case parse_stmt(acc, stmt) do
{:ok, stmts} -> Map.merge(acc, stmts)
{:error, err} -> {:error, err}
end
end)
end
defp parse_stmt({:ok, query}, stmt), do: parse_stmt(query, stmt)
defp parse_stmt(%Query{} = query, nil), do: query
defp parse_stmt({:error, err}, _),
do: {:error, err}
defp parse_stmt(%Query{} = query, stmt) do
case stmt.token do
:from -> parse_from(query, stmt)
:from_index -> parse_from_index(query, stmt)
:select -> parse_select(query, stmt)
:select_function -> parse_select_function(query, stmt)
:group_by -> parse_group_by(query, stmt)
:update -> parse_update(query, stmt)
:where -> parse_where(query, stmt)
:and -> parse_and(query, stmt)
:or -> parse_or(query, stmt)
:not -> parse_not(query, stmt)
:order_by -> parse_ordering(query, stmt)
:limit -> parse_limit(query, stmt)
_ -> {:error, :invalid_statement}
end
end
defp parse_condition_stmt(%Query{} = query, nil), do: query
defp parse_condition_stmt(%Query{} = query, condition) do
query_part =
case condition.token do
:greater_than ->
"#{parse_field(query, condition.field)} > $p#{query.params_count}"
:eq ->
"#{parse_field(query, condition.field)} = $p#{query.params_count}"
:greater_than_or_eq ->
"#{parse_field(query, condition.field)} >= $p#{query.params_count}"
:lower_than ->
"#{parse_field(query, condition.field)} < $p#{query.params_count}"
:lower_than_or_eq ->
"#{parse_field(query, condition.field)} <= $p#{query.params_count}"
:between ->
"#{parse_field(query, condition.field)} between $p#{query.params_count} and $p#{query.params_count + 1}"
:in ->
"#{parse_field(query, condition.field)} in " <>
"(" <> parse_params_to_positional_string(query, condition.params) <> ")"
# This one is weird yeah, RavenDB only accepts the NOT in binary operations (OR | AND), so we need
# to be a little hackish
:nin ->
"#{parse_field(query, condition.field)} != null and not #{parse_field(query, condition.field)} in " <>
"(" <> parse_params_to_positional_string(query, condition.params) <> ")"
:ne ->
"#{parse_field(query, condition.field)} != $p#{query.params_count}"
_ ->
{:error, :invalid_condition_param}
end
case query_part do
{:error, :invalid_condition_param} -> {:error, :invalid_condition_param}
_ -> {:ok, query_part}
end
end
defp parse_from(%Query{} = query, from_token) do
query_fragment =
"from #{from_token.document_or_index}" <> parse_alias(query, from_token.document_or_index)
{:ok, append_query_fragment(query, query_fragment)}
end
defp parse_from_index(%Query{} = query, from_token) do
query_fragment =
"from #{from_token.document_or_index}" <> parse_alias(query, from_token.document_or_index)
{:ok, append_query_fragment(query, query_fragment)}
end
defp parse_select(%Query{} = query, select_token) do
query_fragment =
" select " <> Enum.map_join(select_token.fields, ", ", &parse_field(query, &1))
{:ok, append_query_fragment(query, query_fragment)}
end
defp parse_select_function(%Query{} = query, projected_select_token) do
query_fragment =
" select { " <>
Enum.map_join(projected_select_token.fields, "\n", fn {field, projected} ->
Atom.to_string(field) <> " : " <> projected
end) <> " }"
{:ok, append_query_fragment(query, query_fragment)}
end
defp parse_update(%Query{} = query, update_token) do
fields_to_update =
update_token.fields
|> Enum.reduce(%{updates: [], current_position: query.params_count}, fn field, acc ->
%{
acc
| updates:
acc.updates ++
[
"#{parse_field(query, field.name)} #{parse_assignment_operation(field.operation)} $p#{acc.current_position}"
],
current_position: acc.current_position + 1
}
end)
field_values = Enum.map(update_token.fields, fn field -> field.value end)
positional_params = parse_params_to_positional(query, field_values)
query_params =
Map.merge(
query.query_params,
positional_params
)
{:ok,
%Query{
query
| query_params: query_params,
params_count: fields_to_update.current_position
}
|> append_query_fragment(" update{ " <> Enum.join(fields_to_update.updates, ", ") <> " }")}
end
defp parse_assignment_operation(operation) do
case operation do
:set -> "="
:inc -> "+="
:dec -> "-="
end
end
defp parse_where(%Query{} = query, where_token) do
parse_locator_stmt(query, where_token, "where", false)
end
defp parse_and(%Query{} = query, and_token, negated \\ false) do
parse_locator_stmt(query, and_token, "and", negated)
end
defp parse_or(%Query{} = query, or_token, negated \\ false) do
parse_locator_stmt(query, or_token, "or", negated)
end
defp parse_not(%Query{} = query, not_token) do
case not_token.condition do
%Ravix.RQL.Tokens.And{} = and_token -> parse_and(query, and_token, true)
%Ravix.RQL.Tokens.Or{} = or_token -> parse_or(query, or_token, true)
end
end
defp parse_ordering(%Query{} = query, order_by_token) do
query_fragment =
" order by " <>
Enum.map_join(order_by_token.fields, ",", fn {field, order} ->
"#{parse_field(query, field)} #{Atom.to_string(order)}"
end)
{:ok, append_query_fragment(query, query_fragment)}
end
defp parse_group_by(%Query{} = query, group_by_token) do
query_fragment =
" group by " <> Enum.map_join(group_by_token.fields, ", ", &parse_field(query, &1))
{:ok, append_query_fragment(query, query_fragment)}
end
defp parse_limit(%Query{} = query, limit_token) do
query_fragment = " limit #{limit_token.skip}, #{limit_token.next}"
{:ok, append_query_fragment(query, query_fragment)}
end
defp parse_locator_stmt(%Query{} = query, stmt, locator, negated) do
OK.for do
condition <- parse_condition_stmt(query, stmt.condition)
positional_params = parse_params_to_positional(query, stmt.condition.params)
params_count = query.params_count + length(stmt.condition.params)
negated =
case negated do
true -> " not "
false -> " "
end
query_params =
Map.merge(
query.query_params,
positional_params
)
after
%Query{
query
| query_string: query.query_string <> " #{locator}#{negated}#{condition}",
query_params: query_params,
params_count: params_count
}
end
end
defp parse_params_to_positional(query, params) do
query.params_count..(query.params_count + length(params) - 1)
|> Enum.reduce(%{}, fn position, acc ->
Map.put(acc, "p#{position}", Enum.at(params, position - query.params_count))
end)
end
defp parse_params_to_positional_string(query, params) do
parse_params_to_positional(query, params)
|> Map.keys()
|> Enum.map_join(",", fn key -> "$#{key}" end)
end
defp parse_alias(%Query{aliases: aliases}, document) do
case Map.has_key?(aliases, document) do
true -> " as " <> Map.get(aliases, document)
false -> ""
end
end
defp parse_field(%Query{}, {field_name, field_alias})
when field_name in @non_aliasable_fields do
field_name <> " as #{field_alias}"
end
defp parse_field(%Query{aliases: aliases, from_token: from_token}, {field_name, field_alias}) do
case Map.has_key?(aliases, from_token.document_or_index) do
true -> Map.get(aliases, from_token.document_or_index) <> ".#{field_name} as #{field_alias}"
false -> field_name <> " as #{field_alias}"
end
end
defp parse_field(%Query{}, field) when field in @non_aliasable_fields, do: field
defp parse_field(%Query{aliases: aliases, from_token: from_token}, field) do
case Map.has_key?(aliases, from_token.document_or_index) do
true -> Map.get(aliases, from_token.document_or_index) <> ".#{field}"
false -> field
end
end
defp append_query_fragment(%Query{} = query, append) do
%Query{
query
| query_string: query.query_string <> "#{append}"
}
end
end
|
lib/rql/query_parser.ex
| 0.680454
| 0.424054
|
query_parser.ex
|
starcoder
|
defmodule Brain.Filter.Complementary do
require Logger
alias Brain.BlackBox
@pi 3.14159265359
@degrees_to_radians @pi/180
def init(_) do
{:ok,
%{
roll: 0,
pitch: 0,
roll_offset: 1.79,
pitch_offset: -0.5,
yaw: 0,
alpha: 0.985,
first_loop: true
}
}
end
def start_link() do
Logger.debug "Starting #{__MODULE__}..."
GenServer.start_link(__MODULE__, [], name: __MODULE__)
end
def handle_call({:update, gyroscope_data, accelerometer_data, sample_rate}, _from, %{first_loop: first_loop} = state) do
sample_rate_in_seconds = sample_rate / 1000
yawed_transfer = :math.sin(gyroscope_data[:z] * @degrees_to_radians * sample_rate_in_seconds)
accelerometer_total_vector = :math.sqrt(:math.pow(accelerometer_data[:x], 2) + :math.pow(accelerometer_data[:y], 2) + :math.pow(accelerometer_data[:z], 2))
pitch_accelerometer = :math.asin(accelerometer_data[:y] / accelerometer_total_vector) * (1 / @degrees_to_radians) - state[:pitch_offset]
roll_accelerometer = :math.asin(accelerometer_data[:x] / accelerometer_total_vector) * -(1 / @degrees_to_radians) - state[:roll_offset]
pitch_gyroscope = state[:pitch] + (gyroscope_data[:x] * sample_rate_in_seconds)
pitch_gyroscope = pitch_gyroscope + (pitch_gyroscope * yawed_transfer)
roll_gyroscope = state[:roll] + (gyroscope_data[:y] * sample_rate_in_seconds)
roll_gyroscope = roll_gyroscope + (roll_gyroscope * yawed_transfer)
new_state = case first_loop do
true ->
Logger.debug("#{__MODULE__} starts with roll: #{roll_accelerometer} and pitch #{pitch_accelerometer}.")
%{
roll: roll_accelerometer,
pitch: pitch_accelerometer,
yaw: 0,
first_loop: false
}
false ->
%{
roll: (roll_gyroscope * state[:alpha] + roll_accelerometer * (1- state[:alpha])),
pitch: (pitch_gyroscope * state[:alpha] + pitch_accelerometer * (1- state[:alpha])),
yaw: 0,
}
end
trace(state, new_state)
{:reply,
{
:ok, %{
roll: new_state[:roll],
pitch: new_state[:pitch],
yaw: new_state[:yaw]
}
}, Map.merge(state, new_state)
}
end
def handle_cast({:offset, :roll, value}, state) do
{:noreply, %{state | roll_offset: value}}
end
def update(gyroscope, accelerometer, sample_rate) do
GenServer.call(__MODULE__, {:update, gyroscope, accelerometer, sample_rate})
end
defp trace(_state, data) do
BlackBox.trace(__MODULE__, Process.info(self())[:registered_name], data)
end
def to_csv(data) do
{:ok, data |> Map.values |> Enum.join(",")}
end
def csv_headers(data) do
{:ok, data |> Map.keys |> Enum.join(",")}
end
end
|
apps/brain/lib/filters/complementary.ex
| 0.772273
| 0.502747
|
complementary.ex
|
starcoder
|
defmodule Ockam.Transport.Address do
defstruct [:family, :addr, :port]
defmodule InvalidAddressError do
defexception [:message]
def new({:unsupported_family, family}) do
%__MODULE__{message: "unsupported address family: #{inspect(family)}"}
end
def new(reason) when not is_binary(reason) do
%__MODULE__{message: "invalid address: #{inspect(reason)}"}
end
def new(reason) when is_binary(reason) do
%__MODULE__{message: reason}
end
def message(%__MODULE__{message: message}), do: message
end
def new!(family, addr, port \\ nil)
def new!(family, addr, port) do
case new(family, addr, port) do
{:ok, addr} ->
addr
{:error, reason} ->
raise InvalidAddressError.new(reason)
end
end
def new(family, addr, port \\ nil)
def new(:inet, addr, port) when addr in [:any, :loopback] do
case parse_port(port) do
{:ok, port} ->
{:ok, %__MODULE__{family: :inet, addr: addr, port: port}}
{:error, _reason} = err ->
err
end
end
def new(:inet, addr, port) do
with {:ok, addr} <- parse_address(addr),
{:ok, port} <- parse_port(port) do
{:ok, %__MODULE__{family: :inet, addr: addr, port: port}}
end
end
def new(family, _addr, _port) do
{:error, {:unsupported_family, family}}
end
def ip(%__MODULE__{addr: :any}), do: {0, 0, 0, 0}
def ip(%__MODULE__{addr: :loopback}), do: {127, 0, 0, 1}
def ip(%__MODULE__{addr: {_, _, _, _} = addr}), do: addr
def ip(%__MODULE__{addr: {_, _, _, _, _, _, _, _} = addr}), do: addr
def port(%__MODULE__{port: port}), do: port
@doc """
Converts this struct to the Erlang address representation
expected by the `:socket` API.
"""
def to_erl(%__MODULE__{} = addr) do
Map.from_struct(addr)
end
def parse_address(:any), do: :any
def parse_address(:loopback), do: :loopback
def parse_address(addr) when is_binary(addr) do
parse_address(String.to_charlist(addr))
end
def parse_address(addr) when is_list(addr) do
case :inet.parse_address(addr) do
{:ok, _addr} = result ->
result
{:error, _reason} = err ->
err
end
end
def parse_address(addr), do: {:error, {:invalid_address, addr}}
def parse_port(p) when is_integer(p) and p > 0 and p <= 65535 do
{:ok, p}
end
def parse_port(p) when is_binary(p), do: parse_port(String.to_integer(p))
def parse_port(p), do: {:error, {:invalid_port, p}}
end
|
implementations/elixir/lib/transport/address.ex
| 0.797478
| 0.411613
|
address.ex
|
starcoder
|
defmodule OMG.API.State.Transaction do
@moduledoc """
Internal representation of transaction spent on Plasma chain
"""
alias OMG.API.Crypto
alias OMG.API.Utxo
require Utxo
@zero_address OMG.Eth.zero_address()
@max_inputs 4
@max_outputs 4
@default_metadata nil
defstruct [:inputs, :outputs, metadata: @default_metadata]
@type t() :: %__MODULE__{
inputs: list(input()),
outputs: list(output()),
metadata: metadata()
}
@type currency() :: Crypto.address_t()
@type tx_bytes() :: binary()
@type tx_hash() :: Crypto.hash_t()
@type metadata() :: binary() | nil
@type input() :: %{
blknum: non_neg_integer(),
txindex: non_neg_integer(),
oindex: non_neg_integer()
}
@type output() :: %{
owner: Crypto.address_t(),
currency: currency(),
amount: non_neg_integer()
}
@type decode_error() ::
:malformed_transaction_rlp
| :malformed_inputs
| :malformed_outputs
| :malformed_address
| :malformed_metadata
| :malformed_transaction
defmacro is_metadata(metadata) do
quote do
unquote(metadata) == nil or (is_binary(unquote(metadata)) and byte_size(unquote(metadata)) == 32)
end
end
defmacro max_inputs do
quote do
unquote(@max_inputs)
end
end
defmacro max_outputs do
quote do
unquote(@max_outputs)
end
end
@type input_index_t() :: 0..3
@doc """
Creates a new transaction from a list of inputs and a list of outputs.
Adds empty (zeroes) inputs and/or outputs to reach the expected size
of `@max_inputs` inputs and `@max_outputs` outputs.
assumptions:
```
length(inputs) <= @max_inputs
length(outputs) <= @max_outputs
```
"""
@spec new(
list({pos_integer, pos_integer, 0 | 1}),
list({Crypto.address_t(), currency(), pos_integer}),
metadata()
) :: t()
def new(inputs, outputs, metadata \\ @default_metadata) when is_metadata(metadata) do
inputs =
inputs
|> Enum.map(fn {blknum, txindex, oindex} -> %{blknum: blknum, txindex: txindex, oindex: oindex} end)
inputs = inputs ++ List.duplicate(%{blknum: 0, txindex: 0, oindex: 0}, @max_inputs - Kernel.length(inputs))
outputs =
outputs
|> Enum.map(fn {owner, currency, amount} -> %{owner: owner, currency: currency, amount: amount} end)
outputs =
outputs ++
List.duplicate(
%{owner: @zero_address, currency: @zero_address, amount: 0},
@max_outputs - Kernel.length(outputs)
)
%__MODULE__{inputs: inputs, outputs: outputs, metadata: metadata}
end
def account_address?(@zero_address), do: false
def account_address?(address) when is_binary(address) and byte_size(address) == 20, do: true
def account_address?(_), do: false
def reconstruct([inputs_rlp, outputs_rlp | rest_rlp])
when rest_rlp == [] or length(rest_rlp) == 1 do
with {:ok, inputs} <- reconstruct_inputs(inputs_rlp),
{:ok, outputs} <- reconstruct_outputs(outputs_rlp),
{:ok, metadata} <- reconstruct_metadata(rest_rlp),
do: {:ok, %__MODULE__{inputs: inputs, outputs: outputs, metadata: metadata}}
end
def reconstruct(_), do: {:error, :malformed_transaction}
defp reconstruct_inputs(inputs_rlp) do
{:ok,
Enum.map(inputs_rlp, fn [blknum, txindex, oindex] ->
%{blknum: parse_int(blknum), txindex: parse_int(txindex), oindex: parse_int(oindex)}
end)}
rescue
_ -> {:error, :malformed_inputs}
end
defp reconstruct_outputs(outputs_rlp) do
outputs =
Enum.map(outputs_rlp, fn [owner, currency, amount] ->
with {:ok, cur12} <- parse_address(currency),
{:ok, owner} <- parse_address(owner) do
%{owner: owner, currency: cur12, amount: parse_int(amount)}
end
end)
if error = Enum.find(outputs, &match?({:error, _}, &1)),
do: error,
else: {:ok, outputs}
rescue
_ -> {:error, :malformed_outputs}
end
defp reconstruct_metadata([]), do: {:ok, nil}
defp reconstruct_metadata([metadata]) when is_metadata(metadata), do: {:ok, metadata}
defp reconstruct_metadata([_]), do: {:error, :malformed_metadata}
defp parse_int(binary), do: :binary.decode_unsigned(binary, :big)
# necessary, because RLP handles empty string equally to integer 0
@spec parse_address(<<>> | Crypto.address_t()) :: {:ok, Crypto.address_t()} | {:error, :malformed_address}
defp parse_address(binary)
defp parse_address(""), do: {:ok, <<0::160>>}
defp parse_address(<<_::160>> = address_bytes), do: {:ok, address_bytes}
defp parse_address(_), do: {:error, :malformed_address}
@spec decode(tx_bytes()) :: {:ok, t()} | {:error, decode_error()}
def decode(tx_bytes) do
with {:ok, raw_tx_rlp_decoded_chunks} <- try_exrlp_decode(tx_bytes),
do: reconstruct(raw_tx_rlp_decoded_chunks)
end
def decode!(tx_bytes) do
{:ok, tx} = decode(tx_bytes)
tx
end
defp try_exrlp_decode(tx_bytes) do
{:ok, ExRLP.decode(tx_bytes)}
rescue
_ -> {:error, :malformed_transaction_rlp}
end
@spec encode(t()) :: tx_bytes()
def encode(transaction) do
get_data_for_rlp(transaction)
|> ExRLP.encode()
end
def get_data_for_rlp(%__MODULE__{inputs: inputs, outputs: outputs, metadata: metadata}) when is_metadata(metadata),
do:
[
# contract expects 4 inputs and outputs
Enum.map(inputs, fn %{blknum: blknum, txindex: txindex, oindex: oindex} -> [blknum, txindex, oindex] end) ++
List.duplicate([0, 0, 0], 4 - length(inputs)),
Enum.map(outputs, fn %{owner: owner, currency: currency, amount: amount} -> [owner, currency, amount] end) ++
List.duplicate([@zero_address, @zero_address, 0], 4 - length(outputs))
] ++ if(metadata, do: [metadata], else: [])
@spec hash(t()) :: tx_hash()
def hash(%__MODULE__{} = tx) do
tx
|> encode
|> Crypto.hash()
end
@doc """
Returns all inputs
"""
def get_inputs(%__MODULE__{inputs: inputs}) do
inputs
|> Enum.map(fn %{blknum: blknum, txindex: txindex, oindex: oindex} -> Utxo.position(blknum, txindex, oindex) end)
end
@doc """
Returns all outputs
"""
@spec get_outputs(t()) :: list(output())
def get_outputs(%__MODULE__{outputs: outputs}), do: outputs
end
|
apps/omg_api/lib/state/transaction.ex
| 0.887972
| 0.576244
|
transaction.ex
|
starcoder
|
defmodule Phoenix.Component do
@moduledoc """
API for function components.
A function component is any function that receives
an assigns map as argument and returns a rendered
struct built with [the `~H` sigil](`Phoenix.LiveView.Helpers.sigil_H/2`).
Here is an example:
defmodule MyComponent do
use Phoenix.Component
# Optionally also bring the HTML helpers
# use Phoenix.HTML
def greet(assigns) do
~H"\""
<p>Hello, <%= assigns.name %></p>
"\""
end
end
The component can be invoked as a regular function:
MyComponent.greet(%{name: "Jane"})
But it is typically invoked using the function component
syntax from the `~H` sigil:
~H"\""
<MyComponent.greet name="Jane" />
"\""
If the `MyComponent` module is imported or if the function
is defined locally, you can skip the module name:
~H"\""
<.greet name="Jane" />
"\""
Learn more about the `~H` sigil [in its documentation](`Phoenix.LiveView.Helpers.sigil_H/2`).
## `use Phoenix.Component`
Modules that have to define function components should call `use Phoenix.Component`
at the top. Doing so will import the functions from both `Phoenix.LiveView`
and `Phoenix.LiveView.Helpers` modules.
Note it is not necessary to `use Phoenix.Component` inside `Phoenix.LiveView`
and `Phoenix.LiveComponent`.
## Assigns
While inside a function component, you must use the `assign/3` and
`assign_new/3` functions in `Phoenix.LiveView` to manipulate assigns,
so that LiveView can track changes to the assigns values.
For example, let's imagine a component that receives the first
name and last name and must compute the name assign. One option
would be:
def show_name(assigns) do
assigns = assign(assigns, :name, assigns.first_name <> assigns.last_name)
~H"\""
<p>Your name is: <%= @name %></p>
"\""
end
However, when possible, it may be cleaner to break the logic over function
calls instead of precomputed assigns:
def show_name(assigns) do
~H"\""
<p>Your name is: <%= full_name(@first_name, @last_name) %></p>
"\""
end
defp full_name(first_name, last_name), do: first_name <> last_name
Another example is making an assign optional by providing
a default value:
def field_label(assigns) do
assigns = assign_new(assigns, :help, fn -> nil end)
~H"\""
<label>
<%= @text %>
<%= if @help do %>
<span class="help"><%= @help %></span>
<% end %>
</label>
"\""
end
## Blocks
It is also possible to give HTML blocks to function components
as in regular HTML tags. For example, you could create a
button component that looks like this:
def button(assigns) do
~H"\""
<button class="btn">
<%= render_block(@inner_block) %>
</button>
"\""
end
and now you can invoke it as:
<.button>
This renders <strong>inside</strong> the button!
</.button>
In a nutshell, the block given to the component is
assigned to `@inner_block` and then we use
[`render_block`](`Phoenix.LiveView.Helpers.render_block/2`)
to render it.
You can even have the component give a value back to
the caller, by using `let`. Imagine this component:
def unordered_list(assigns) do
~H"\""
<ul>
<%= for entry <- @entries do %>
<li><%= render_block(@inner_block, entry) %></li>
<% end %>
</ul>
"\""
end
And now you can invoke it as:
<.unordered_list let={entry} entries={~w(apple banana cherry)}>
I like <%= entry %>
</.unordered_list>
"""
@doc false
defmacro __using__(_) do
quote do
import Phoenix.LiveView
import Phoenix.LiveView.Helpers
end
end
end
|
lib/phoenix_component.ex
| 0.847227
| 0.645036
|
phoenix_component.ex
|
starcoder
|
defmodule Crit.Params.Variants.SingletonToMany do
@moduledoc """
A builder for controller params of this form:
%{
"0" => %{..., "split_field" => ...}},
"1" => %{..., "split_field" => ...}},
...
}
A controller will typically receive N indexed parameters. For tests
using this module, only a single value is sent (with an index of "0").
A further wrinkle is that the *split field* is used to produce separate copies
of all the fields. For example, the split field might be a list of
integers representing ids from a set of checkboxes. See `OneToMany` for more.
Note: This builder will also insert an "index" field into each exemplars
params. For example, a multi-exemplar set of params will look like this:
%{
"0" => %{
"frequency_id" => "1",
"index" => "0", ## <<<---
"name" => ""
},
"1" => %{
"frequency_id" => "1",
"index" => "1", ## <<<---
"name" => "",
"species_ids" => ["1"]
},
"2" => %{
"frequency_id" => "2",
"index" => "2", ## <<<---
"name" => "valid",
"species_ids" => ["1"]
}
That should work fine even if your schema doesn't include such
an index. It should be thrown away by `Changeset.cast`. However,
if you don't want it, just change the call to `Get.doubly_numbered_params`
below into a call to `Get.numbered_params`.
"""
defmacro __using__(_) do
quote do
use Crit.Params.Variants.Common
alias Crit.Params.{Get,Validate}
use FlowAssertions
use FlowAssertions.Ecto
# -----CAN BE USED IN TEST--------------------------------------------------
def that_are(descriptors) when is_list(descriptors) do
Get.doubly_numbered_params(config(), descriptors, "index")
end
def that_are(descriptor), do: that_are([descriptor])
def that_are(descriptor, opts), do: that_are([[descriptor | opts]])
def discarded do
fn result, name ->
if ok_content(result) != [] do
IO.inspect result
flunk("Exemplar #{name} is not supposed to produce a changeset")
end
end
end
# ----------------------------------------------------------------------------
defp check_changeset({:error, :form, [changeset]}, name) do
config = config()
assert_invalid(changeset)
Validate.FormChecking.assert_error_expected(config, name)
Validate.FormChecking.check(config, changeset, name)
end
defp check_changeset({:ok, [changeset]}, name) do
config = config()
assert_valid(changeset)
Validate.FormChecking.refute_error_expected(config, name)
Validate.FormChecking.check(config, changeset, name)
end
defp make_params_for_name(config, name),
do: Get.doubly_numbered_params(config(), [name], "index")
end
end
end
|
test/support/http_params/variants/singleton_to_many.ex
| 0.783409
| 0.433562
|
singleton_to_many.ex
|
starcoder
|
defmodule StacktraceCleaner do
@moduledoc """
Stacktraces often include many lines that are not relevant for the context under review.
This makes it hard to find the signal amongst many noise, and adds debugging time.
StacktraceCleaner is a module to remove those noises and make them easier to see.
"""
@noise_paths ["process", "ex_unit", ".erl"]
@type type_stacktrace :: {atom(), atom(), integer, [file: charlist(), line: integer()]}
@doc """
Extracts first stacktrace from `StacktraceCleaner.current_stacktraces`
* match_path: If you want to extract more strictly, you can specify it with an argument.
"""
@spec current_stacktrace(String.t() | nil) :: type_stacktrace
def current_stacktrace(match_path \\ nil) do
[stacktrace | _] = current_stacktraces(match_path)
stacktrace
end
@doc """
Gets cleaned stacktraces.
* match_path: If you want to extract more strictly, you can specify it with an argument.
"""
@spec current_stacktraces(String.t() | nil) :: list(type_stacktrace)
def current_stacktraces(match_path \\ nil) do
Process.info(self(), :current_stacktrace)
|> elem(1)
|> clean(match_path)
end
@doc """
Cleans stacktraces.
* match_path: If you want to extract more strictly, you can specify it with an argument.
"""
@spec clean(list(type_stacktrace), String.t() | nil) :: list(type_stacktrace)
def clean(stacktraces, match_path \\ nil) do
deps_app_regexes = create_deps_app_regexes()
cleaned =
stacktraces
|> Enum.reject(fn stacktrace ->
stacktrace_path = stacktrace |> elem(3) |> Keyword.get(:file) |> to_string()
deps_app_regexes |> Enum.find(&(stacktrace_path =~ &1))
end)
if match_path do
cleaned
|> Enum.filter(fn stacktrace ->
stacktrace |> elem(3) |> Keyword.get(:file) |> to_string() =~ Regex.compile!(match_path)
end)
else
cleaned
end
end
defp create_deps_app_regexes do
(Mix.Project.deps_apps() ++ @noise_paths)
|> Enum.filter(&(!is_nil(&1)))
|> Enum.map(fn app ->
app
|> to_string()
|> Regex.compile!()
end)
end
end
|
lib/stacktrace_cleaner.ex
| 0.747432
| 0.521227
|
stacktrace_cleaner.ex
|
starcoder
|
defprotocol Xema.Castable do
@moduledoc """
Converts data using the specified schema.
"""
@doc """
Converts the given data using the specified schema.
"""
def cast(value, schema)
end
defimpl Xema.Castable, for: Atom do
alias Xema.Schema
def cast(atom, %Schema{type: :any}),
do: {:ok, atom}
def cast(atom, %Schema{type: boolean})
when is_boolean(boolean),
do: {:ok, atom}
def cast(nil, %Schema{type: nil}),
do: {:ok, nil}
def cast(atom, %Schema{type: :atom}),
do: {:ok, atom}
def cast(atom, %Schema{type: :string}),
do: {:ok, to_string(atom)}
def cast(atom, %Schema{type: :boolean})
when is_boolean(atom),
do: {:ok, atom}
def cast(str, %Schema{type: :struct, module: module})
when not is_nil(module),
do: {:error, %{to: module, value: str}}
def cast(atom, %Schema{type: type}),
do: {:error, %{to: type, value: atom}}
end
defimpl Xema.Castable, for: BitString do
import Xema.Utils, only: [to_existing_atom: 1]
alias Xema.Schema
def cast(str, %Schema{type: :struct, module: module})
when module in [Date, DateTime, NaiveDateTime, Time] do
case apply(module, :from_iso8601, [str]) do
{:ok, value, _offset} -> {:ok, value}
{:ok, value} -> {:ok, value}
{:error, _} -> {:error, %{to: module, value: str}}
end
end
def cast(str, %Schema{type: :struct, module: Decimal}) do
{:ok, Decimal.new(str)}
rescue
_ -> {:error, %{to: Decimal, value: str}}
end
def cast(str, %Schema{type: :any}),
do: {:ok, str}
def cast(str, %Schema{type: boolean}) when is_boolean(boolean),
do: {:ok, str}
def cast(str, %Schema{type: :atom}) do
case to_existing_atom(str) do
nil -> {:error, %{to: :atom, value: str}}
atom -> {:ok, atom}
end
end
def cast(str, %Schema{type: :float}),
do: to_float(str, :float)
def cast(str, %Schema{type: :integer}),
do: to_integer(str, :integer)
def cast(str, %Schema{type: :number}) do
case String.contains?(str, ".") do
true -> to_float(str, :number)
false -> to_integer(str, :number)
end
end
def cast(str, %Schema{type: :string}),
do: {:ok, str}
def cast(str, %Schema{type: :struct, module: module}) when not is_nil(module),
do: {:error, %{to: module, value: str}}
def cast(str, %Schema{type: type}),
do: {:error, %{to: type, value: str}}
defp to_integer(str, type) when type in [:integer, :number] do
case Integer.parse(str) do
{int, ""} -> {:ok, int}
_ -> {:error, %{to: type, value: str}}
end
end
defp to_float(str, type) when type in [:float, :number] do
case Float.parse(str) do
{int, ""} -> {:ok, int}
_ -> {:error, %{to: type, value: str}}
end
end
end
defimpl Xema.Castable, for: Date do
alias Xema.Schema
def cast(date, %Schema{type: :struct, module: Date}), do: {:ok, date}
def cast(date, %Schema{type: :struct, module: module})
when not is_nil(module),
do: {:error, %{to: module, value: date}}
def cast(date, %Schema{type: type}),
do: {:error, %{to: type, value: date}}
end
defimpl Xema.Castable, for: DateTime do
alias Xema.Schema
def cast(date_time, %Schema{type: :struct, module: DateTime}), do: {:ok, date_time}
def cast(date_time, %Schema{type: :struct, module: module})
when not is_nil(module),
do: {:error, %{to: module, value: date_time}}
def cast(date_time, %Schema{type: type}),
do: {:error, %{to: type, value: date_time}}
end
defimpl Xema.Castable, for: Decimal do
alias Xema.Schema
def cast(decimal, %Schema{type: :struct, module: Decimal}), do: {:ok, decimal}
def cast(decimal, %Schema{type: :struct, module: module})
when not is_nil(module),
do: {:error, %{to: module, value: decimal}}
def cast(decimal, %Schema{type: type}),
do: {:error, %{to: type, value: decimal}}
end
defimpl Xema.Castable, for: Float do
alias Xema.Schema
def cast(float, %Schema{type: :any}),
do: {:ok, float}
def cast(float, %Schema{type: boolean}) when is_boolean(boolean),
do: {:ok, float}
def cast(float, %Schema{type: :float}),
do: {:ok, float}
def cast(float, %Schema{type: :number}),
do: {:ok, float}
def cast(float, %Schema{type: :string}),
do: {:ok, to_string(float)}
def cast(float, %Schema{type: :struct, module: Decimal}),
do: {:ok, Decimal.from_float(float)}
def cast(str, %Schema{type: :struct, module: module})
when not is_nil(module),
do: {:error, %{to: module, value: str}}
def cast(float, %Schema{type: type}),
do: {:error, %{to: type, value: float}}
end
defimpl Xema.Castable, for: Integer do
alias Xema.Schema
def cast(int, %Schema{type: :any}),
do: {:ok, int}
def cast(int, %Schema{type: boolean})
when is_boolean(boolean),
do: {:ok, int}
def cast(int, %Schema{type: :integer}),
do: {:ok, int}
def cast(int, %Schema{type: :number}),
do: {:ok, int}
def cast(int, %Schema{type: :string}),
do: {:ok, to_string(int)}
def cast(int, %Schema{type: :struct, module: Decimal}),
do: {:ok, Decimal.new(int)}
def cast(str, %Schema{type: :struct, module: module})
when not is_nil(module),
do: {:error, %{to: module, value: str}}
def cast(int, %Schema{type: type}),
do: {:error, %{to: type, value: int}}
end
defimpl Xema.Castable, for: List do
alias Xema.Schema
def cast([], %Schema{type: :keyword}),
do: {:ok, []}
def cast(list, %Schema{type: :keyword}) do
with :ok <- check_keyword(list, :keyword) do
{:ok, list}
end
end
def cast([], %Schema{type: :map}),
do: {:ok, %{}}
def cast(list, %Schema{type: :any, properties: properties})
when not is_nil(properties) do
with :ok <- check_keyword(list, :map) do
{:ok, Enum.into(list, %{}, & &1)}
end
end
def cast(list, %Schema{type: :any}),
do: {:ok, list}
def cast(list, %Schema{type: boolean})
when is_boolean(boolean),
do: {:ok, list}
def cast(list, %Schema{type: :struct, module: nil}),
do: {:error, %{to: :struct, value: list}}
def cast(list, %Schema{type: :struct, module: module}) do
case Keyword.keyword?(list) do
true -> {:ok, struct!(module, list)}
false -> {:error, %{to: module, value: list}}
end
end
def cast([], %Schema{type: :tuple}),
do: {:ok, {}}
def cast(list, %Schema{type: :tuple}) do
case Keyword.keyword?(list) do
true -> {:error, %{to: :tuple, value: list}}
false -> {:ok, List.to_tuple(list)}
end
end
def cast([], %Schema{type: :list}),
do: {:ok, []}
def cast(list, %Schema{type: :list}) do
case Keyword.keyword?(list) do
true -> {:error, %{to: :list, value: list}}
false -> {:ok, list}
end
end
def cast(list, %Schema{type: :map, keys: keys}) when keys in [nil, :atoms] do
with :ok <- check_keyword(list, :map) do
{:ok, Enum.into(list, %{}, & &1)}
end
end
def cast(list, %Schema{type: :map, keys: :strings}) do
with :ok <- check_keyword(list, :map) do
{:ok, Enum.into(list, %{}, fn {key, value} -> {to_string(key), value} end)}
end
end
def cast(list, %Schema{type: type}),
do: {:error, %{to: type, value: list}}
defp check_keyword(list, to) do
case Keyword.keyword?(list) do
true -> :ok
false -> {:error, %{to: to, value: list}}
end
end
end
defimpl Xema.Castable, for: Map do
import Xema.Utils, only: [to_existing_atom: 1]
alias Xema.Schema
def cast(map, %Schema{type: :any}),
do: {:ok, map}
def cast(map, %Schema{type: boolean}) when is_boolean(boolean),
do: {:ok, map}
def cast(map, %Schema{type: :struct, module: nil}),
do: {:ok, map}
def cast(map, %Schema{type: :struct, module: module}) do
with {:ok, fields} <- fields(map) do
{:ok, struct!(module, fields)}
end
end
def cast(map, %Schema{type: :keyword}) do
Enum.reduce_while(map, {:ok, []}, fn {key, value}, {:ok, acc} ->
case cast_key(key, :atoms) do
{:ok, key} ->
{:cont, {:ok, [{key, value} | acc]}}
:error ->
{:halt, {:error, %{to: :keyword, key: key}}}
end
end)
end
def cast(map, %Schema{type: :map, keys: keys}) do
Enum.reduce_while(map, {:ok, %{}}, fn {key, value}, {:ok, acc} ->
case cast_key(key, keys) do
{:ok, key} ->
{:cont, {:ok, Map.put(acc, key, value)}}
:error ->
{:halt, {:error, %{to: :map, key: key}}}
end
end)
end
def cast(map, %Schema{type: type}),
do: {:error, %{to: type, value: map}}
defp cast_key(value, :atoms) when is_binary(value) do
case to_existing_atom(value) do
nil -> :error
cast -> {:ok, cast}
end
end
defp cast_key(value, :strings) when is_atom(value),
do: {:ok, Atom.to_string(value)}
defp cast_key(value, _),
do: {:ok, value}
defp fields(map) do
Enum.reduce_while(map, {:ok, %{}}, fn {key, value}, {:ok, acc} ->
case cast_key(key, :atoms) do
{:ok, key} ->
{:cont, {:ok, Map.put(acc, key, value)}}
:error ->
{:halt, {:error, %{to: :struct, key: key}}}
end
end)
end
end
defimpl Xema.Castable, for: NaiveDateTime do
alias Xema.Schema
def cast(date_time, %Schema{type: :struct, module: NaiveDateTime}), do: {:ok, date_time}
def cast(date_time, %Schema{type: :struct, module: module})
when not is_nil(module),
do: {:error, %{to: module, value: date_time}}
def cast(date_time, %Schema{type: type}),
do: {:error, %{to: type, value: date_time}}
end
defimpl Xema.Castable, for: Time do
alias Xema.Schema
def cast(time, %Schema{type: :struct, module: Time}), do: {:ok, time}
def cast(time, %Schema{type: :struct, module: module})
when not is_nil(module),
do: {:error, %{to: module, value: time}}
def cast(time, %Schema{type: type}),
do: {:error, %{to: type, value: time}}
end
defimpl Xema.Castable, for: Tuple do
alias Xema.Schema
def cast(tuple, %Schema{type: :any}),
do: {:ok, tuple}
def cast(tuple, %Schema{type: boolean})
when is_boolean(boolean),
do: {:ok, tuple}
def cast(tuple, %Schema{type: :tuple}),
do: {:ok, tuple}
def cast(tuple, %Schema{type: :list}),
do: {:ok, Tuple.to_list(tuple)}
def cast(str, %Schema{type: :struct, module: module})
when not is_nil(module),
do: {:error, %{to: module, value: str}}
def cast(tuple, %Schema{type: type}),
do: {:error, %{to: type, value: tuple}}
end
|
lib/xema/castable.ex
| 0.894868
| 0.437223
|
castable.ex
|
starcoder
|
defmodule Oli.Activities.Realizer.Selection do
@moduledoc """
Represents a selection embedded within a page.
"""
@derive Jason.Encoder
@enforce_keys [:id, :count, :logic, :purpose, :type]
defstruct [:id, :count, :logic, :purpose, :type]
alias Oli.Activities.Realizer.Logic
alias Oli.Activities.Realizer.Selection
alias Oli.Activities.Realizer.Query.Builder
alias Oli.Activities.Realizer.Query.Source
alias Oli.Activities.Realizer.Query.Executor
alias Oli.Activities.Realizer.Query.Paging
alias Oli.Activities.Realizer.Query.Result
@type t() :: %__MODULE__{
id: String.t(),
count: integer(),
logic: %Logic{},
purpose: String.t(),
type: String.t()
}
def parse(%{"count" => count, "id" => id, "logic" => logic, "purpose" => purpose}) do
case Logic.parse(logic) do
{:ok, logic} ->
{:ok, %Selection{id: id, count: count, logic: logic, purpose: purpose, type: "selection"}}
e ->
e
end
end
def parse(_) do
{:error, "invalid selection"}
end
@doc """
Fulfills a selection by querying the database for matching activities.
Returns {:ok, %Result{}} when the selection is filled.
Returns {:partial, %Result{}} when the query succeeds but less than the requested
count of activities is returned. This includes the case where zero activities are returned.
Returns {:error, e} on a failure to execute the query.
"""
def fulfill(%Selection{count: count} = selection, %Source{} = source) do
run(selection, source, %Paging{limit: count, offset: 0})
end
@doc """
Tests the fulfillment of a selection by querying the database for matching activities, but
only returning one result to save bandwidth.
Returns {:ok, %Result{}} when the selection is filled.
Returns {:partial, %Result{}} when the query succeeds but less than the requested
count of activities is returned. This includes the case where zero activities are returned.
Returns {:error, e} on a failure to execute the query.
"""
def test(%Selection{} = selection, %Source{} = source) do
run(selection, source, %Paging{limit: 1, offset: 0})
end
defp run(%Selection{count: count, logic: logic}, %Source{} = source, %Paging{} = paging) do
case Builder.build(logic, source, paging, :random)
|> Executor.execute() do
{:ok, %Result{totalCount: ^count} = result} ->
{:ok, result}
{:ok, result} ->
{:partial, result}
e ->
e
end
end
end
|
lib/oli/activities/realizer/selection.ex
| 0.873276
| 0.446012
|
selection.ex
|
starcoder
|
defmodule Jaxon.Stream do
alias Jaxon.{Path, Parser, ParseError, Decoder}
@doc """
Query all values of an array:
```
iex> ~s({ "numbers": [1,2] }) |> List.wrap() |> Jaxon.Stream.query([:root, "numbers", :all]) |> Enum.to_list()
[1, 2]
```
Query an object property:
```
iex> ~s({ "person": {"name": "Jose"} }) |> List.wrap() |> Jaxon.Stream.query([:root, "person", "name"]) |> Enum.to_list()
["Jose"]
```
"""
@spec query(Stream.t(), Path.t()) :: Stream.t()
def query(bin_stream, [:root | rest]) do
query(bin_stream, rest)
end
def query(bin_stream, query) do
initial_fun = fn events ->
query_value(query, [], events)
end
bin_stream
|> Stream.concat([:end_stream])
|> Stream.transform({"", initial_fun}, fn
:end_stream, {"", _} ->
{:halt, nil}
chunk, {tail, fun} ->
chunk = tail <> chunk
Parser.parse(chunk)
|> fun.()
|> case do
{:yield, tail, fun} ->
{[], {tail, fun}}
{:ok, records, _events} ->
{records, {"", initial_fun}}
{:error, error} ->
raise error
end
end)
end
def query_value([], acc, events) do
append_value(Decoder.events_to_value(events), acc)
end
def query_value(query, acc, []) do
{:yield, "", &query_value(query, acc, &1)}
end
def query_value(query, acc, [:start_array | events]) do
query_array(query, acc, 0, events)
end
def query_value(query, acc, [:start_object | events]) do
query_object(query, acc, events)
end
defp append_value({:ok, value, rest}, acc) do
{:ok, acc ++ [value], rest}
end
defp append_value({:yield, tail, inner}, acc) do
{:yield, tail, &append_value(inner.(&1), acc)}
end
defp append_value(other, _acc) do
other
end
defp add_array_value({:ok, acc, events}, query, _, key) do
query_array(query, acc, key + 1, events)
end
defp add_array_value({:yield, tail, inner}, query, acc, key) do
{:yield, tail, &add_array_value(inner.(&1), query, acc, key)}
end
defp skip_array_value({:ok, _, events}, query, acc, key) do
query_array(query, acc, key + 1, events)
end
defp skip_array_value({:yield, tail, inner}, query, acc, key) do
{:yield, tail, &skip_array_value(inner.(&1), query, acc, key)}
end
defp query_array(query, acc, 0, events) do
query_array_value(query, acc, 0, events)
end
defp query_array(query, acc, key, [:comma | events]) do
query_array_value(query, acc, key, events)
end
defp query_array(_query, acc, _key, [:end_array | events]) do
{:ok, acc, events}
end
defp query_array(query, acc, key, []) do
{:yield, "", &query_array(query, acc, key, &1)}
end
defp query_array(_query, _acc, _key, [event | _]) do
{:error, ParseError.unexpected_event(event, [:comma, :end_array])}
end
defp query_array_value(query = [key | rest_query], acc, key, events) do
add_array_value(query_value(rest_query, acc, events), query, acc, key)
end
defp query_array_value(query = [:all | rest_query], acc, key, events) do
add_array_value(query_value(rest_query, acc, events), query, acc, key)
end
defp query_array_value(query, acc, key, events) do
skip_array_value(Decoder.events_to_value(events), query, acc, key)
end
defp append_object_value({:ok, acc, events}, query, _) do
query_object(query, acc, events)
end
defp append_object_value({:yield, tail, inner}, query, acc) do
{:yield, tail, &append_object_value(inner.(&1), query, acc)}
end
defp append_object_value(other, _, _) do
other
end
defp skip_object_value({:ok, _, events}, query, acc) do
query_object(query, acc, events)
end
defp skip_object_value({:yield, tail, inner}, query, acc) do
{:yield, tail, &skip_object_value(inner.(&1), query, acc)}
end
defp skip_object_value(other, _, _) do
other
end
defp query_object(_query, acc, [:end_object | events]) do
{:ok, acc, events}
end
defp query_object(query, acc, [:comma | events]) do
query_object(query, acc, events)
end
defp query_object(query, acc, []) do
{:yield, "", &query_object(query, acc, &1)}
end
defp query_object(query, acc, [{:incomplete, tail}]) do
{:yield, tail, &query_object(query, acc, &1)}
end
defp query_object(query, acc, [{:string, key}]) do
{:yield, "", &query_object(query, acc, [{:string, key} | &1])}
end
defp query_object([key | query], acc, [{:string, key} | events]) do
with {:ok, events, acc} <- Decoder.events_expect(events, :colon, acc) do
append_object_value(query_value(query, acc, events), query, acc)
end
end
defp query_object(query, acc, [{:string, _key} | events]) do
with {:ok, events, acc} <- Decoder.events_expect(events, :colon, acc) do
skip_object_value(Decoder.events_to_value(events), query, acc)
end
end
end
|
lib/jaxon/stream.ex
| 0.683208
| 0.906694
|
stream.ex
|
starcoder
|
defmodule FusionAuth.Reports do
@moduledoc """
The `FusionAuth.Reports` module provides access functions to the [FusionAuth Reports API](https://fusionauth.io/docs/v1/tech/apis/reports).
All functions require a Tesla Client struct created with `FusionAuth.client(base_url, api_key, tenant_id)`.
All but one function (totals) takes in required parameters `start_date` and `end_date`.
- end_date :: integer() :: Required]\n
The end of the query range. This is an [instant](https://fusionauth.io/docs/v1/tech/reference/data-types#instants) but it is truncated to days in the report timezone (which is set in the system settings).
- start_date :: integer() :: Required]\n
The start of the query range. This is an [instant](https://fusionauth.io/docs/v1/tech/reference/data-types#instants) but it is truncated to days in the report timezone (which is set in the system settings).
"""
alias FusionAuth.Utils
@type client :: FusionAuth.client()
@type result :: FusionAuth.result()
@type start_date :: integer()
@type end_date :: integer()
@type application_id :: String.t()
@type login_id :: String.t()
@type user_id :: String.t()
@doc """
Generate the daily active users report
This report retrieves the number of daily active users for a given application or across all applications. You must specify a date range for the report.
## Parameters
- applicationid :: String.t() :: Optional\n
A specific application to query for. If not provided a "Global" (across all applications) daily active users report will be returned.
## Examples
iex> client = FusionAuth.client()
iex> end_date = 1591830136785
iex> start_date = 1588316400000
iex> params = [applicationId: "473f2618-c526-45ba-9c35-8739ba6cfc2e"]
iex> FusionAuth.Reports.get_daily_active_users_report(client, start_date, end_date, params)
{
:ok,
%{
"dailyActiveUsers" => [
%{"count" => 1, "interval" => 18418},
%{"count" => 1, "interval" => 18421},
%{"count" => 1, "interval" => 18422},
%{"count" => 1, "interval" => 18423}
],
"total" => 4
},
%Tesla.Env{...}
}
For more information, visit the FusionAuth API Documentation for [Generate Daily Active Users Report](https://fusionauth.io/docs/v1/tech/apis/reports#generate-daily-active-users-report).
"""
@spec get_daily_active_users_report(client(), start_date(), end_date(), [key: application_id()] | []) :: result()
def get_daily_active_users_report(client, start_date, end_date, parameters \\ []) do
params = Keyword.merge([start: start_date, end: end_date], parameters)
Tesla.get(
client,
"/api/report/daily-active-user" <>
Utils.build_query_parameters(params)
) |> FusionAuth.result()
end
@doc """
Generate Login Report
This report retrieves the number of logins for a given application or across all applications.
You must specify a date range for the report. The report is always generated in hours.
If you want to calculate daily logins, you’ll need to roll up the results in the response.
## Parameters
- applicationid :: String.t() :: Optional\n
A specific application to query for. If not provided a "Global" (across all applications) logins report will be returned.
- loginId :: String.t() :: Optional\n
When this parameter is provided it will reduce the scope of the report to a single user with the requested email or username specified by this parameter.\n
This parameter is mutually exclusive with `userId`, if both are provided, the `loginId` will take precedence.\n
- userId :: String.t() :: Optional\n
When this parameter is provided it will reduce the scope of the report to a single user with the requested unique Id.\n
This parameter is mutually exclusive with `loginId`, if both are provided, the `loginId` will take precedence.\n
## Examples
iex> client = FusionAuth.client()
iex> end_date = 1591913469434
iex> start_date = 1577865600000
iex> FusionAuth.Reports.get_login_report(client, start_date, end_date)
{
:ok,
%{
"hourlyCounts" => [
%{"count" => 1, "interval" => 442050},
%{"count" => 1, "interval" => 442051},
%{"count" => 1, "interval" => 442054},
%{"count" => 3, "interval" => 442055},
%{"count" => 1, "interval" => 442120},
%{"count" => 2, "interval" => 442122},
%{"count" => 1, "interval" => 442146},
%{"count" => 1, "interval" => 442149},
%{"count" => 1, "interval" => 442151},
%{"count" => 1, "interval" => 442168},
%{"count" => 3, "interval" => 442170},
%{"count" => 3, "interval" => 442171},
%{"count" => 1, "interval" => 442174},
%{"count" => 1, "interval" => 442194},
%{"count" => 1, "interval" => 442197}
],
"total" => 22
},
%Tesla.Env{...}
}
For more information, visit the FusionAuth API Documentation for [Generate Login Report](https://fusionauth.io/docs/v1/tech/apis/reports#generate-login-report).
"""
@spec get_login_report(client(), start_date(), end_date(), [key: String.t()] | []) :: result()
def get_login_report(client, start_date, end_date, parameters \\ []) do
params = Keyword.merge([start: start_date, end: end_date], parameters)
Tesla.get(
client,
"/api/report/login" <>
Utils.build_query_parameters(params)
) |> FusionAuth.result()
end
@doc """
Generate Monthly Active Users Report
This report retrieves the number of monthly active users for a given application or across all applications.
You must specify a date range for the report. The report is always generated using months as the interval.
## Parameters
- applicationid :: String.t() :: Optional\n
A specific application to query for. If not provided a "Global" (across all applications) monthly active users report will be returned.
## Examples
iex> client = FusionAuth.client()
iex> end_date = 1591830136785
iex> start_date = 1588316400000
iex> params = [applicationId: "473f2618-c526-45ba-9c35-8739ba6cfc2e"]
iex> FusionAuth.Reports.get_monthly_active_users_report(client, start_date, end_date, params)
{
:ok,
%{
"monthlyActiveUsers" => [
%{"count" => 10, "interval" => 543},
%{"count" => 10, "interval" => 544},
%{"count" => 10, "interval" => 545},
%{"count" => 9, "interval" => 546}
],
"total": 39,
},
%Tesla.Env{...}
}
For more information, visit the FusionAuth API Documentation for [Generate Monthly Active Users Report](https://fusionauth.io/docs/v1/tech/apis/reports#generate-monthly-active-users-report).
"""
@spec get_monthly_active_users_report(client(), start_date(), end_date(), [key: application_id()] | []) :: result()
def get_monthly_active_users_report(client, start_date, end_date, parameters \\ []) do
params = Keyword.merge([start: start_date, end: end_date], parameters)
Tesla.get(
client,
"/api/report/monthly-active-user" <>
Utils.build_query_parameters(params)
) |> FusionAuth.result()
end
@doc """
Generate Registration Report
This report retrieves the number of registrations for a given application or across all applications.
You must specify a date range for the report. The report is always generated in hours.
If you want to calculate daily registrations, you’ll need to roll up the results in the response.
## Parameters
- applicationid :: String.t() :: Optional\n
A specific application to query for. If not provided a "Global" (across all applications) registrations report will be returned.
## Examples
iex> client = FusionAuth.client()
iex> end_date = 1591830136785
iex> start_date = 1588316400000
iex> params = [applicationId: "473f2618-c526-45ba-9c35-8739ba6cfc2e"]
iex> FusionAuth.Reports.get_registration_report(client, start_date, end_date, params)
{
:ok,
%{
"hourlyCounts" => [
%{"count" => 1, "interval" => 442030},
%{"count" => 1, "interval" => 442055},
%{"count" => 1, "interval" => 442056},
%{"count" => 0, "interval" => 442168}
],
"total" => 3
},
%Tesla.Env{...}
}
For more information, visit the FusionAuth API Documentation for [Generate Registration Report](https://fusionauth.io/docs/v1/tech/apis/reports#generate-registration-report).
"""
@spec get_registration_report(client(), start_date(), end_date(), [key: application_id()] | []) :: result()
def get_registration_report(client, start_date, end_date, parameters \\ []) do
params = Keyword.merge([start: start_date, end: end_date], parameters)
Tesla.get(
client,
"/api/report/registration" <>
Utils.build_query_parameters(params)
) |> FusionAuth.result()
end
@doc """
Generate Totals Report
This report retrieves the number of registrations for a given application or across all applications.
You must specify a date range for the report. The report is always generated in hours.
If you want to calculate daily registrations, you’ll need to roll up the results in the response.
## Examples
iex> FusionAuth.Reports.get_totals_report(client)
{
:ok,
%{
"applicationTotals" => %{
"3c219e58-ed0e-4b18-ad48-f4f92793ae32" => %{
"logins" => 15,
"registrations" => 1,
"totalRegistrations" => 1
},
"c4e82607-dd9f-412a-a033-c53b79820446" => %{
"logins" => 0,
"registrations" => 0,
"totalRegistrations" => 0
},
"f7a72ad1-de6a-412f-a372-e689a3b7adcb" => %{
"logins" => 5,
"registrations" => 1,
"totalRegistrations" => 1
}
},
"globalRegistrations" => 1,
"totalGlobalRegistrations" => 3
},
%Tesla.Env{...}
}
For more information, visit the FusionAuth API Documentation for [Generate Totals Report](https://fusionauth.io/docs/v1/tech/apis/reports#generate-totals-report).
"""
@spec get_totals_report(client()) :: result()
def get_totals_report(client) do
Tesla.get(client, "/api/report/totals") |> FusionAuth.result()
end
end
|
lib/fusion_auth/reports.ex
| 0.857917
| 0.437703
|
reports.ex
|
starcoder
|
defmodule DuckDuck.Transform do
alias DuckDuck.UploadCommand, as: Command
@moduledoc """
Describes a series of transformations to iteratively build an UploadCommand.
"""
@effects Application.get_env(:duckduck, :effects_client, DuckDuck.Effects)
@switches [tag: :string, path: :string, yes: :boolean]
@aliases [t: :tag, f: :path, y: :yes]
@doc "Transform an argument list into a command"
@spec parse([String.t()]) :: Command.t()
def parse(argv) do
{parsed, _rest} =
OptionParser.parse!(argv, switches: @switches, aliases: @aliases)
params =
parsed
|> Enum.map(&translate/1)
|> Enum.into(%{})
Command.transform(%Command{}, params)
end
# translate to the keys of the UploadCommand schema
@spec translate({atom(), any()}) :: {atom(), any()}
defp translate({:yes, accepted?}), do: {:accept?, accepted?}
defp translate({:file, path}), do: {:path, path}
defp translate(pair), do: pair
@doc """
Put the owner in the command if not already there
"""
@spec owner(Command.t()) :: Command.t() | {:error, String.t()}
def owner(command) do
case @effects.fetch_env(:duckduck, :owner) do
{:ok, owner} ->
Command.transform(command, %{owner: owner})
:error ->
{:error, "Couldn't find repo owner in config"}
end
end
@doc """
Put the repo in the command if not already there
"""
@spec repo(Command.t()) :: Command.t() | {:error, String.t()}
def repo(command) do
case @effects.fetch_env(:duckduck, :repo) do
{:ok, repo} ->
Command.transform(command, %{repo: repo})
:error ->
{:error, "Couldn't find repo name in config"}
end
end
@doc """
Put the tag in a command if not already there
"""
@spec tag(Command.t()) :: Command.t()
def tag(command) do
Command.transform(command, %{tag: @effects.get_tag()})
end
@doc """
Put the path to the upload file in the command if not already present
"""
@spec path(Command.t()) :: Command.t() | {:error, String.t()}
def path(%Command{tag: tag} = command) do
case DuckDuck.find_release_file(tag) do
{:ok, file} ->
Command.transform(command, %{path: file})
{:error, _reason} = e ->
e
end
end
@doc """
Put the api token in the command if not already there.
"""
@spec api_token(Command.t()) :: Command.t() | {:error, String.t()}
def api_token(command) do
case @effects.read_api_token() do
{:ok, token} ->
Command.transform(command, %{api_token: token})
{:error, _reason} = e ->
e
end
end
@doc """
Ask the user if they're ok with the upload plan
"""
@spec accept?(Command.t()) :: Command.t()
def accept?(%Command{tag: tag, path: path} = command) do
Command.transform(command, %{accept?: DuckDuck.confirm(path, tag)})
end
@doc """
Find the upload url and put it in the command.
For uploading assets, you need to ask GitHub where to put them through
their API. Interestingly, you can't upload assets to a tag. Only a release
may have assets. So when you want to upload to a tag, you must also create
the release from the tag. You can do this with a single API call.
"""
@spec upload_url(Command.t()) :: Command.t() | {:error, String.t()}
def upload_url(
%Command{api_token: token, owner: owner, repo: repo, tag: tag} = command
) do
if Enum.any?([token, owner, repo, tag], &is_nil/1) do
{:error,
"""
Couldn't find the upload url because I didn't know at least one of
- api token
- repo owner
- repo name
- tag
"""}
else
Command.transform(command, %{
upload_url: DuckDuck.find_upload_url(token, owner, repo, tag)
})
end
end
@doc """
Try uploading the tarball given the information in the command.
"""
@spec upload(Command.t()) :: IO.chardata()
def upload(%Command{path: path, api_token: api_token, upload_url: url}) do
IO.puts("Please wait. Uploading #{path}...")
case DuckDuck.upload(path, api_token, url) do
:ok ->
[:green, "Release successfully uploaded", :reset, "."]
{:error, reason} ->
[:red, reason]
end
end
def upload(%Command{}) do
["Release upload ", :red, "failed", :reset, ".\n"]
end
end
|
lib/duck_duck/transform.ex
| 0.774967
| 0.416322
|
transform.ex
|
starcoder
|
defmodule Militerm.Parsers.SimpleResponse do
@moduledoc ~S"""
The response system allows NPCs to map text string patterns to
events. This is a fairly generic system, so the scripting needs to
supply the string being matched as well as the set of matches. The
returned event is then triggered by the script as well.
response:
set-name:
- pattern: pattern
events:
- event1
- event2
The pattern is a regex with named captures available.
This should be sufficient to build a bot based on the old Eliza game.
"""
@doc """
Takes a pattern and returns the Elixir regex that can match against
a string.
Patterns:
$name - a single word
$name* - zero, one, or more words
$name? - zero or one word
$name+ - one or more words
$$ - literal dollar sign
## Examples
iex> Regex.named_captures(SimpleResponse.parse("This is fun!"), "This is fun!")
%{}
iex> Regex.named_captures(SimpleResponse.parse("$x* hello $y*"), "Why hello there")
%{"x" => "Why", "y" => "there"}
iex> Regex.named_captures(SimpleResponse.parse("$_* hello $y*"), "Why hello there")
%{"y" => "there"}
"""
def parse(pattern) do
[literal | rest] = String.split(pattern, ~r{\s*\$})
raw =
[prepare_literal(literal) | compile_pattern_bits(rest)]
|> Enum.map(&Regex.source/1)
|> Enum.join("")
Regex.compile!("^#{raw}$")
end
def compile_pattern_bits(list, acc \\ [])
def compile_pattern_bits([], acc), do: Enum.reverse(acc)
def compile_pattern_bits([<<"$", _::binary>> = literal | rest], acc) do
compile_pattern_bits(rest, [Regex.escape(literal) | acc])
end
def compile_pattern_bits([match | rest], acc) do
case Regex.run(~r{^(_|[a-zA-Z]+)([?*+]?)\s*(.*)}, match, capture: :all_but_first) do
[name, quantifier, literal] ->
compile_pattern_bits(rest, [
prepare_literal(literal),
prepare_match(name, quantifier)
| acc
])
nil ->
compile_pattern_bits(rest, [prepare_literal("$#{match}") | acc])
end
end
def prepare_literal(literal) do
literal
|> Regex.escape()
|> String.replace(~r{(\\ )+}, ~S"\s+")
|> Regex.compile!()
end
def prepare_match(name, "") do
Regex.compile!("\\b\\s*(#{capture(name)}\\S+)\\b\\s*")
end
def prepare_match(name, "?") do
Regex.compile!("\\b\\s*(#{capture(name)}(\\S+)?)\\b\\s*")
end
def prepare_match(name, "*") do
Regex.compile!("\\b\\s*(#{capture(name)}((\\S+(\\s+\\S+)*))?)\\b\\s*")
end
def prepare_match(name, "+") do
Regex.compile!("\\b\\s*(#{capture(name)}(\\S+(\\s+\\S+)*))\\b\\s*")
end
def capture("_"), do: ""
def capture(name), do: "?<#{name}>"
end
|
lib/militerm/parsers/simple_response.ex
| 0.750187
| 0.523177
|
simple_response.ex
|
starcoder
|
defmodule EthBlockchain.Transaction do
@moduledoc false
import Utils.Helpers.Encoding
alias Keychain.Signature
alias ExthCrypto.Hash.Keccak
alias EthBlockchain.{Adapter, ABIEncoder}
defstruct nonce: 0,
gas_price: 0,
gas_limit: 0,
to: <<>>,
value: 0,
v: nil,
r: nil,
s: nil,
init: <<>>,
data: <<>>
@type t :: %__MODULE__{
nonce: integer(),
gas_price: integer(),
gas_limit: integer(),
to: String.t() | <<_::0>>,
value: integer(),
v: Signature.hash_v(),
r: Signature.hash_r(),
s: Signature.hash_s(),
init: binary(),
data: binary()
}
@doc """
Initiate a token transfer between from `from` to `to` of the given amount.
Ether is represented with `0x0000000000000000000000000000000000000000` as contract address and will be used as the default currency if no contract address specified.
The gas price can be optionally specied, will default to the configued `:default_gas_price` if ommited.
Possible map attrs:
/3 %{:from, :to, :amount}
-> will transfer `amount` ether from `from` to `to` with the default gas price
/4 %{:from, :to, :amount, :gas_price}
-> will transfer `amount` ether from `from` to `to` with the specified gas price
/4 %{:from, :to, :amount, :contract_addr}
-> will transfer `amount` token using the ERC20 contract residing at `contract_addr`
from `from_addr` to `to` with the default gas price
/5 %{:from, :to, :amount, :contract_address, :gas_price}
->
if contract_addr is `0x0000000000000000000000000000000000000000`:
will transfer `amount` ether from `from_addr` to `to_addr` with the specified gas price
otherwise:
will transfer `amount` token using the ERC20 contract residing at `contract_addr`
from `from_addr` to `to_addr` with the specified gas price
Returns:
`{:ok, tx_hash}` if successful
or
`{:error, error_code}` or `{:error, error_code, message}` if failed
"""
@spec send(map(), atom() | nil, pid() | nil) ::
{atom(), String.t()} | {atom(), atom()} | {atom(), atom(), String.t()}
def send(attrs, adapter \\ nil, pid \\ nil)
def send(
%{contract_address: "0x0000000000000000000000000000000000000000"} = attrs,
adapter,
pid
) do
send_eth(attrs, adapter, pid)
end
def send(
%{contract_address: _} = attrs,
adapter,
pid
) do
send_token(attrs, adapter, pid)
end
def send(
attrs,
adapter,
pid
) do
send_eth(attrs, adapter, pid)
end
defp send_eth(
%{
from: from,
to: to,
amount: amount
} = attrs,
adapter,
pid
) do
gas_limit = Application.get_env(:eth_blockchain, :default_eth_transaction_gas_limit)
gas_price = get_gas_price_or_default(attrs)
case get_transaction_count(%{address: from}, adapter, pid) do
{:ok, nonce} ->
%__MODULE__{
gas_limit: gas_limit,
gas_price: gas_price,
nonce: int_from_hex(nonce),
to: from_hex(to),
value: amount
}
|> sign_and_hash(from)
|> send_raw(adapter, pid)
error ->
error
end
end
defp send_token(
%{
from: from,
to: to,
amount: amount,
contract_address: contract_address
} = attrs,
adapter,
pid
) do
with {:ok, encoded_abi_data} <- ABIEncoder.transfer(to, amount),
{:ok, nonce} <- get_transaction_count(%{address: from}, adapter, pid) do
gas_limit = Application.get_env(:eth_blockchain, :default_contract_transaction_gas_limit)
gas_price = get_gas_price_or_default(attrs)
%__MODULE__{
gas_limit: gas_limit,
gas_price: gas_price,
nonce: int_from_hex(nonce),
to: from_hex(contract_address),
data: encoded_abi_data
}
|> sign_and_hash(from)
|> send_raw(adapter, pid)
else
error -> error
end
end
defp get_gas_price_or_default(%{gas_price: gas_price}), do: gas_price
defp get_gas_price_or_default(_attrs) do
Application.get_env(:eth_blockchain, :default_gas_price)
end
defp sign_and_hash(%__MODULE__{} = transaction_data, from) do
case sign_transaction(transaction_data, from) do
{:ok, signed_trx} ->
hashed =
signed_trx
|> serialize()
|> ExRLP.encode()
|> to_hex()
{:ok, hashed}
error ->
error
end
end
defp send_raw({:ok, transaction_data}, adapter, pid) do
Adapter.call({:send_raw, transaction_data}, adapter, pid)
end
defp send_raw(error, _adapter, _pid), do: error
defp get_transaction_count(%{address: address}, adapter, pid) do
Adapter.call({:get_transaction_count, address}, adapter, pid)
end
defp sign_transaction(transaction, wallet_address) do
chain_id = Application.get_env(:eth_blockchain, :chain_id)
result =
transaction
|> transaction_hash(chain_id)
|> Signature.sign_transaction_hash(wallet_address, chain_id)
case result do
{:ok, {v, r, s}} -> {:ok, %{transaction | v: v, r: r, s: s}}
error -> error
end
end
@doc """
Serialize, encode and returns a hash of a given transaction
"""
@spec transaction_hash(__MODULE__.t(), integer()) :: Keccak.keccak_hash()
def transaction_hash(trx, chain_id) do
trx
|> serialize(false)
|> Kernel.++([encode_unsigned(chain_id), <<>>, <<>>])
|> ExRLP.encode()
|> Keccak.kec()
end
@doc """
Encodes a transaction such that it can be RLP-encoded.
"""
@spec serialize(__MODULE__.t(), bool()) :: ExRLP.t()
def serialize(trx, include_vrs \\ true) do
init_or_data =
case trx.to do
<<>> -> trx.init
_ -> trx.data
end
base = [
trx.nonce |> encode_unsigned(),
trx.gas_price |> encode_unsigned(),
trx.gas_limit |> encode_unsigned(),
trx.to,
trx.value |> encode_unsigned(),
init_or_data
]
if include_vrs do
base ++
[
trx.v |> encode_unsigned(),
trx.r |> encode_unsigned(),
trx.s |> encode_unsigned()
]
else
base
end
end
@doc """
Decodes a transaction that was previously encoded using `Transaction.serialize/1`.
"""
@spec deserialize(ExRLP.t()) :: __MODULE__.t()
def deserialize(rlp) do
[
nonce,
gas_price,
gas_limit,
to,
value,
init_or_data,
v,
r,
s
] = rlp
{init, data} =
case to do
<<>> -> {init_or_data, <<>>}
_ -> {<<>>, init_or_data}
end
%__MODULE__{
nonce: :binary.decode_unsigned(nonce),
gas_price: :binary.decode_unsigned(gas_price),
gas_limit: :binary.decode_unsigned(gas_limit),
to: to,
value: :binary.decode_unsigned(value),
init: init,
data: data,
v: :binary.decode_unsigned(v),
r: :binary.decode_unsigned(r),
s: :binary.decode_unsigned(s)
}
end
end
|
apps/eth_blockchain/lib/eth_blockchain/transaction.ex
| 0.912927
| 0.608536
|
transaction.ex
|
starcoder
|
defmodule Tip do
@moduledoc """
Tip is a basic apparatus for performing type checks during
development and test. It does not replace the need to write type
checks in the code, it merely provides you with a convenient
assertive style and a framework for thinking with types.
Tip's basic types correspond roughly to the types that are
recognised by Elixir protocols, which are the names of struct
modules and the following special types:
* `Tuple`
* `Atom`
* `List`
* `BitString`
* `Integer`
* `Float`
* `Function`
* `Pid`
* `Map`
* `Port`
* `Reference`
* `Any`
You may, however, define any types arbitrarily by implementing the
`Tip.Check` ProtocolEx. Here are a few custom types we ship:
* `Nil` - Nil
* `Truthy` - Not nil or false
* `Falsy` - Nil or false
* `{Tuple, count :: non_neg_integer}` - tuple of N items
* `{Function, arity :: non_neg_integer}` - function of arity
* `{List, of :: type}` - List of a given type
### A note on taste
We don't advise overly strict testing of types. For example, if you
were given a list of ints and you wanted a list of binaries, it
might make more sense to delay the checking of the contents for
clarity of error messaging.
"""
@doc "The type of something as used by Protocol determination"
def prototype(%struct{}), do: struct
def prototype(t) when is_tuple(t), do: Tuple
def prototype(a) when is_atom(a), do: Atom
def prototype(l) when is_list(l), do: List
def prototype(b) when is_bitstring(b), do: BitString
def prototype(i) when is_integer(i), do: Integer
def prototype(f) when is_float(f), do: Float
def prototype(f) when is_function(f), do: Function
def prototype(p) when is_pid(p), do: Pid
def prototype(%{}), do: Map
def prototype(p) when is_port(p), do: Port
def prototype(r) when is_reference(r), do: Reference
def prototype(_), do: Any
if Code.ensure_loaded?(ProtocolEx) do
@spec check(type :: term, data :: term) :: {:ok, data :: term} | {:error, Tip.Error.t}
defmacro check(type, data) do
caller = caller_info(__CALLER__)
quote do
Tip.check(unquote(type), unquote(data), unquote(caller))
end
end
@spec check!(type :: term, data :: term) :: term
defmacro check!(type, data) do
caller = caller_info(__CALLER__)
quote do
Tip.check!(unquote(type), unquote(data), unquote(caller))
end
end
@doc false
def check(type, data, caller) do
if Tip.Check.check(type, data),
do: {:ok, data},
else: {:error, Tip.Error.exception(type: type, data: data, env: caller)}
end
@doc false
def check!(type, data, caller) do
if Tip.Check.check(type, data),
do: data,
else: raise Tip.Error, [type: type, data: data, env: caller]
end
@doc """
A config-disable-able type assertion that matches the type and
data, returning the data.
The name is taken from Idris, if you were wondering
"""
@spec the(type :: term, data :: term) :: (data :: term)
defmacro the(type, data) do
caller = caller_info(__CALLER__)
quote do
Tip.the(unquote(type), unquote(data), unquote(caller))
end
end
@doc false
def the(type, data, env) do
if Application.get_env(Tip, :enabled, false),
do: Tip.check!(type, data, env),
else: data
end
defp caller_info(caller) do
quote do
%{ module: unquote(caller.module),
function: unquote(caller.function),
file: unquote(caller.file),
line: unquote(caller.line) }
end
end
end
end
|
lib/tip.ex
| 0.809615
| 0.800887
|
tip.ex
|
starcoder
|
defmodule Benx.Decoder do
@moduledoc """
Provides decoding for iodata according to the
Bencoding specification.
"""
alias Benx.Decoder.SyntaxError
@doc """
Decodes a Bencoded iolist.
"""
@spec decode(iodata) ::
{:ok, Benx.Encoder.t} |
{:error, SyntaxError.t}
def decode(data) do
with flattened = :lists.flatten(data),
{:ok, term, [], _pos} <- do_decode(flattened)
do
{:ok, term}
else
{:ok, _term, _rest, pos} ->
{:error, SyntaxError.exception(message: "unable to determine type", position: pos)}
{:error, reason, pos} ->
{:error, SyntaxError.exception(message: reason, position: pos)}
end
end
@doc """
Decodes a Bencoded iolist. Raises on syntax errors.
"""
@spec decode!(iodata) :: String.t
def decode!(data) do
case decode(data) do
{:ok, term} -> term
{:error, err} -> raise err
end
end
defp do_decode(data, pos \\ 0)
defp do_decode([?i|rem], pos) do
decode_integer(rem, pos + 1)
end
defp do_decode([?l|rem], pos) do
decode_list(rem, pos + 1)
end
defp do_decode([?d|rem], pos) do
decode_map(rem, pos + 1)
end
defp do_decode(data, pos) do
decode_string_length(data, pos)
end
defp decode_integer(data, pos, acc \\ [])
defp decode_integer([?-|[?0|[?e|_rest]]], pos, _acc) do
{:error, "-0 is an invalid integer", pos}
end
defp decode_integer([?e|rest], pos, acc) do
integer =
acc
|> :lists.reverse()
|> :erlang.list_to_integer()
{:ok, integer, rest, pos + 1}
end
defp decode_integer([digit|rem], pos, acc) do
decode_integer(rem, pos + 1, [digit|acc])
end
defp decode_list(data, pos, acc \\ [])
defp decode_list([?e|rest], pos, acc) do
{:ok, :lists.reverse(acc), rest, pos + 1}
end
defp decode_list([?i|rem], pos, acc) do
with {:ok, integer, rest, new_pos} <- decode_integer(rem, pos + 1),
do: decode_list(rest, new_pos, [integer|acc])
end
defp decode_list([?l|rem], pos, acc) do
with {:ok, list, rest, new_pos} <- decode_list(rem, pos + 1),
do: decode_list(rest, new_pos, [list|acc])
end
defp decode_list([?d|rem], pos, acc) do
with {:ok, map, rest, new_pos} <- decode_map(rem, pos + 1),
do: decode_list(rest, new_pos, [map|acc])
end
defp decode_list([], pos, _acc) do
{:error, "expected 'e' for end of list or for list to continue", pos}
end
defp decode_list(data, pos, acc) do
with {:ok, string, rest, new_pos} <- decode_string_length(data, pos),
do: decode_list(rest, new_pos, [string|acc])
end
defp decode_map(data, pos, acc \\ %{})
defp decode_map([?e|rest], pos, acc) do
{:ok, acc, rest, pos + 1}
end
defp decode_map([], pos, _acc) do
{:error, "expected 'e' for end of dict or for dict to continue", pos}
end
defp decode_map(data, pos, acc) do
with {:ok, key, rest, new_pos} <- decode_string_length(data, pos),
{:ok, value, rest, new_pos} <- do_decode(rest, new_pos),
do: decode_map(rest, new_pos, Map.put(acc, key, value))
end
defp decode_string_length(data, pos, acc \\ [])
defp decode_string_length([?:|_rem], pos, []) do
{:error, "expected string length", pos}
end
defp decode_string_length([?:|rem], pos, acc) do
length =
acc
|> :lists.reverse()
|> :erlang.list_to_integer()
decode_string(rem, length, pos + 1)
end
defp decode_string_length([], pos, acc) do
start_pos = pos - length(acc)
{:error, "unable to determine type", start_pos}
end
defp decode_string_length([digit|rem], pos, acc) do
decode_string_length(rem, pos + 1, [digit|acc])
end
defp decode_string(data, length, pos, acc \\ [])
defp decode_string(rest, 0, pos, acc) do
string =
acc
|> :lists.reverse()
|> :erlang.iolist_to_binary()
{:ok, string, rest, pos}
end
defp decode_string([], length, pos, acc) do
start_pos = pos - length(acc)
{:error, "expected #{length} more character(s) for string", start_pos}
end
defp decode_string([char|rem], length, pos, acc) do
decode_string(rem, length - 1, pos + 1, [char|acc])
end
end
|
lib/benx/decoder.ex
| 0.780328
| 0.646063
|
decoder.ex
|
starcoder
|
defmodule ConfusionMatrix.Printer do
defimpl String.Chars, for: P7777776.ConfusionMatrix do
import P7777776.ConfusionMatrix
alias P7777776.ConfusionMatrix
def to_string(%ConfusionMatrix{classes: classes, counts: counts} = cm) do
cell_width =
counts
|> Enum.reduce(0, fn {_, c}, max -> max(c, max) end)
|> Integer.to_string()
|> String.length()
label_width =
classes
|> Enum.reduce(0, fn c, max -> max(String.length(Atom.to_string(c)), max) end)
sorted_classes =
classes
|> Enum.sort()
a_header = String.duplicate(" ", label_width + 3) <> "actual"
a_labels =
sorted_classes
|> Enum.map(fn c -> String.pad_leading(Atom.to_string(c), label_width) end)
|> Enum.map(fn s -> String.to_charlist(s) end)
|> Enum.zip()
|> Enum.map(fn t -> Tuple.to_list(t) end)
|> Enum.map(fn l ->
l
|> Enum.map(fn c -> List.to_string([c]) |> String.pad_leading(cell_width + 1) end)
|> Enum.join("")
end)
|> Enum.map(fn s -> String.duplicate(" ", label_width + 2) <> s end)
|> Enum.join("\n")
rows =
sorted_classes
|> Enum.sort()
|> Enum.map(fn predicted_class ->
row(cm, sorted_classes, predicted_class, label_width, cell_width)
end)
|> Enum.join("\n")
per_class_header = String.duplicate(" ", label_width) <> " precision recall"
per_class =
sorted_classes
|> Enum.map(fn c ->
{String.pad_leading(Atom.to_string(c), label_width), format(precision(cm, c)),
format(recall(cm, c))}
end)
|> Enum.map(fn {c, p, r} -> ~s"#{c} #{p} #{r}" end)
|> Enum.join("\n")
avgs =
[
" precision recall",
~s"macro-avg #{format(macro_avg_precision(cm))} #{
format(macro_avg_recall(cm))
}",
~s"weighted-avg #{format(weighted_avg_precision(cm))} #{
format(weighted_avg_recall(cm))
}"
]
|> Enum.join("\n")
acc = ~s" accuracy\n #{format(accuracy(cm))}"
[a_header, "\n", a_labels, "\n", rows, "\n\n", per_class_header, "\n", per_class, "\n\n", avgs, "\n\n", acc]
end
defp format(nil), do: " n/a "
defp format(f), do: List.to_string(:io_lib.format("~5.3.0f", [f]))
defp row(
%ConfusionMatrix{counts: counts},
actual_classes,
predicted_class,
label_width,
cell_width
) do
p_label =
("p_" <> Atom.to_string(predicted_class))
|> String.pad_leading(label_width + 2)
numbers =
Enum.map(actual_classes, fn actual_class ->
Map.get(counts, {actual_class, predicted_class}, 0)
|> Integer.to_string()
|> String.pad_leading(cell_width)
end)
|> Enum.join(" ")
Enum.join([p_label, numbers], " ")
end
end
end
|
lib/p7777776_confusion_matrix_printer.ex
| 0.644449
| 0.424889
|
p7777776_confusion_matrix_printer.ex
|
starcoder
|
defmodule Grizzly.ZWave.SmartStart.MetaExtension.NetworkStatus do
@moduledoc """
This extension is used to advertise if the node is in the network and its
assigned node id
"""
@behaviour Grizzly.ZWave.SmartStart.MetaExtension
alias Grizzly.ZWave
@typedoc """
The different network statuses are:
- `:not_in_network` - the node in the provisioning list is not included in
the network
- `:included` - the node in the provisioning list is included in the network
and is functional
- `:failing` - the node in the provisioning list is included in the network
but is now marked as failing
"""
@type network_status :: :not_in_network | :included | :failing
@type t :: %__MODULE__{
node_id: ZWave.node_id(),
network_status: network_status()
}
@enforce_keys [:node_id, :network_status]
defstruct node_id: nil, network_status: nil
@doc """
Create a `NetworkStatus.t()`
If the node is not included into the network the `node_id` has to be equal to
`0`. If the node has been included into the network and is either functional
or failing then it has to have a `node_id` greater than `0`.
"""
@spec new(ZWave.node_id(), network_status()) ::
{:ok, t()} | {:error, :invalid_network_status | :invalid_node_id}
def new(node_id, :not_in_network) when node_id > 0 do
{:error, :invalid_node_id}
end
def new(node_id, network_status)
when node_id == 0 and network_status in [:included, :failing] do
{:error, :invalid_node_id}
end
def new(node_id, network_status)
when network_status in [:not_in_network, :included, :failing] do
{:ok, %__MODULE__{node_id: node_id, network_status: network_status}}
end
def new(_, _) do
{:error, :invalid_network_status}
end
@doc """
Make a binary string from a `NetworkStatus.t()`
"""
@impl true
@spec to_binary(t()) :: {:ok, binary()}
def to_binary(%__MODULE__{node_id: node_id, network_status: network_status}) do
{:ok, <<0x37::size(7), 0::size(1), 0x02, node_id, network_status_to_byte(network_status)>>}
end
@doc """
Make a `NetworkStatus.t()` from a binary string
The binary string's critical bit MUST not be set. If it is this function will
return `{:error, :critical_bit_set}`
"""
@impl true
@spec from_binary(binary()) ::
{:ok, t()} | {:error, :invalid_network_status | :critical_bit_set | :invalid_binary}
def from_binary(<<0x37::size(7), 0::size(1), 0x02, node_id, network_status_byte>>) do
case network_status_from_byte(network_status_byte) do
{:ok, network_status} ->
new(node_id, network_status)
error ->
error
end
end
def from_binary(<<0x37::size(7), 1::size(1), _rest::binary>>) do
{:error, :critical_bit_set}
end
def from_binary(_), do: {:error, :invalid_binary}
defp network_status_to_byte(:not_in_network), do: 0x00
defp network_status_to_byte(:included), do: 0x01
defp network_status_to_byte(:failing), do: 0x02
defp network_status_from_byte(0x00), do: {:ok, :not_in_network}
defp network_status_from_byte(0x01), do: {:ok, :included}
defp network_status_from_byte(0x02), do: {:ok, :failing}
defp network_status_from_byte(_), do: {:error, :invalid_network_status}
end
|
lib/grizzly/zwave/smart_start/meta_extension/network_status.ex
| 0.847479
| 0.480722
|
network_status.ex
|
starcoder
|
if Code.ensure_loaded?(:ibrowse) do
defmodule Tesla.Adapter.Ibrowse do
@moduledoc """
Adapter for [ibrowse](https://github.com/cmullaparthi/ibrowse)
Remember to add `{:ibrowse, "~> 4.2"}` to dependencies (and `:ibrowse` to applications in `mix.exs`)
Also, you need to recompile tesla after adding `:ibrowse` dependency:
```
mix deps.clean tesla
mix deps.compile tesla
```
### Example usage
```
# set globally in config/config.exs
config :tesla, :adapter, Tesla.Adapter.Ibrowse
# set per module
defmodule MyClient do
use Tesla
adapter Tesla.Adapter.Ibrowse
end
```
"""
@behaviour Tesla.Adapter
import Tesla.Adapter.Shared, only: [stream_to_fun: 1, next_chunk: 1]
alias Tesla.Multipart
def call(env, opts) do
with {:ok, status, headers, body} <- request(env, opts) do
{:ok,
%{
env
| status: format_status(status),
headers: format_headers(headers),
body: format_body(body)
}}
end
end
defp format_status(status) when is_list(status) do
status |> to_string() |> String.to_integer()
end
defp format_headers(headers) do
for {key, value} <- headers do
{String.downcase(to_string(key)), to_string(value)}
end
end
defp format_body(data) when is_list(data), do: IO.iodata_to_binary(data)
defp format_body(data) when is_binary(data), do: data
defp request(env, opts) do
body = env.body || []
handle(
request(
Tesla.build_url(env.url, env.query) |> to_charlist,
env.headers,
env.method,
body,
Tesla.Adapter.opts(env, opts)
)
)
end
defp request(url, headers, method, %Multipart{} = mp, opts) do
headers = headers ++ Multipart.headers(mp)
body = stream_to_fun(Multipart.body(mp))
request(url, headers, method, body, opts)
end
defp request(url, headers, method, %Stream{} = body, opts) do
fun = stream_to_fun(body)
request(url, headers, method, fun, opts)
end
defp request(url, headers, method, body, opts) when is_function(body) do
body = {&next_chunk/1, body}
opts = Keyword.put(opts, :transfer_encoding, :chunked)
request(url, headers, method, body, opts)
end
defp request(url, headers, method, body, opts) do
{timeout, opts} = opts |> Keyword.pop(:timeout, 30_000)
:ibrowse.send_req(url, headers, method, body, opts, timeout)
end
defp handle({:error, {:conn_failed, error}}), do: error
defp handle(response), do: response
end
end
|
lib/tesla/adapter/ibrowse.ex
| 0.818845
| 0.697442
|
ibrowse.ex
|
starcoder
|
defmodule ExMachina.Sequence do
@moduledoc """
Module for generating sequential values.
Use `ExMachina.sequence/1` or `ExMachina.sequence/2` to generate
sequential values instead of calling this module directly.
"""
@doc false
def start_link do
Agent.start_link(fn -> Map.new() end, name: __MODULE__)
end
@doc """
Reset all sequences so that the next sequence starts from 0
## Example
ExMachina.Sequence.next("joe") # "joe0"
ExMachina.Sequence.next("joe") # "joe1"
ExMachina.Sequence.reset
ExMachina.Sequence.next("joe") # resets so the return value is "joe0"
You can use list as well
ExMachina.Sequence.next(["A", "B"]) # "A"
ExMachina.Sequence.next(["A", "B"]) # "B"
Sequence.reset
ExMachina.Sequence.next(["A", "B"]) # resets so the return value is "A"
If you want to reset sequences at the beginning of every test, put it in a
`setup` block in your test.
setup do
ExMachina.Sequence.reset
end
"""
@spec reset() :: :ok
def reset do
Agent.update(__MODULE__, fn _ -> Map.new() end)
end
@doc false
def next(sequence_name) when is_binary(sequence_name) do
next(sequence_name, &(sequence_name <> to_string(&1)))
end
@doc false
def next(sequence_name) do
raise(
ArgumentError,
"Sequence name must be a string, got #{inspect(sequence_name)} instead"
)
end
@doc false
def next(sequence_name, [_ | _] = list) do
length = length(list)
Agent.get_and_update(__MODULE__, fn sequences ->
current_value = Map.get(sequences, sequence_name, 0)
index = rem(current_value, length)
new_sequences = Map.put(sequences, sequence_name, index + 1)
{value, _} = List.pop_at(list, index)
{value, new_sequences}
end)
end
@doc false
def next(sequence_name, formatter) do
Agent.get_and_update(__MODULE__, fn sequences ->
current_value = Map.get(sequences, sequence_name, 0)
new_sequences = Map.put(sequences, sequence_name, current_value + 1)
{formatter.(current_value), new_sequences}
end)
end
end
|
lib/ex_machina/sequence.ex
| 0.86378
| 0.410549
|
sequence.ex
|
starcoder
|
defmodule Ockam.Telemetry do
@moduledoc """
Provides functions to emit `:telemetry` events.
"""
@typedoc "The event name."
@type event_name :: atom() | [atom(), ...]
@typedoc "The event measurements."
@type event_measurements :: map()
@typedoc "The event metadata."
@type event_metadata :: map()
@typedoc "The option values accepted by emit* functions."
@type option :: {:measurements, event_measurements()} | {:metadata, event_metadata()}
@typedoc "The options accepted by emit* functions."
@type options :: [option()]
@doc """
Emits a `:telemetry` event.
The first argument is `event_name` which may be an atom or a list of atoms
identifying the event. If `event_name` is an atom, the event that is emitted
has the name [:ockam, event_name]. If `event_name` is list of atoms, the
event that is emitted has the name `[:ockam] ++ event_name`.
The second argument is a keyword list of options:
* `:measurements` - a map of measurements
* `:metadata` - a map of metadata
When the event is emitted, the handler functions attached to the event are
invoked in the emitting process.
Always returns `:ok`
"""
@spec emit_event(event_name(), options()) :: :ok
def emit_event(event_name, options \\ [])
def emit_event(event_name, options) when is_atom(event_name),
do: emit_event([event_name], options)
def emit_event([first | _rest] = event_name, options)
when is_list(event_name) and is_atom(first) do
measurements = Keyword.get(options, :measurements, %{})
metadata = Keyword.get(options, :metadata, %{})
:ok = :telemetry.execute([:ockam] ++ event_name, measurements, metadata)
end
@doc """
Emits the `start` event.
The first argument is `event_name` which may be an atom or a list of atoms
identifying the event. If `event_name` is an atom, the event that is emitted
has the name [:ockam, event_name, :start]. If `event_name` is list of atoms,
the event that is emitted has the name `[:ockam] ++ event_name ++ [:start]`.
The second argument is a keyword list of options:
* `:measurements` - a map of measurements
* `:metadata` - a map of metadata
When the event is emitted, the handler functions attached to the event are
invoked in the emitting process.
Returns the `start_time` as returned by `System.monotonic_time/0`. This
value should be later passed back into the `Ockam.Telemetry.emit_stop_event/3`
or the `Ockam.Telemetry.emit_exception_event/4` functions.
"""
@spec emit_start_event(event_name(), options()) :: start_time :: integer()
def emit_start_event(event_name, options \\ [])
def emit_start_event(event_name, options) when is_atom(event_name),
do: emit_start_event([event_name], options)
def emit_start_event([first | _rest] = event_name, options)
when is_list(event_name) and is_atom(first) do
start_time = System.monotonic_time()
metadata = Keyword.get(options, :metadata, %{})
measurements = Keyword.get(options, :measurements, %{})
measurements = Map.merge(measurements, %{system_time: System.system_time()})
event_name = Enum.reverse([:start] ++ event_name)
:ok = :telemetry.execute([:ockam] ++ event_name, measurements, metadata)
start_time
end
@doc """
Emits the `stop` event.
The first argument is `event_name` which may be an atom or a list of atoms
identifying the event. If `event_name` is an atom, the event that is emitted
has the name [:ockam, event_name, :stop]. If `event_name` is list of atoms,
the event that is emitted has the name `[:ockam] ++ event_name ++ [:stop]`.
The second argument is `start_time` that was returned by calling
`Ockam.Telemetry.emit_start_event/2`. This function will add a `duration`
measurement to the event by calculating `start_time - end_time`.
The third argument is a keyword list of options:
* `:measurements` - a map of measurements
* `:metadata` - a map of metadata
When the event is emitted, the handler functions attached to the event are
invoked in the emitting process.
Always returns `:ok`.
"""
@spec emit_stop_event(event_name(), start_time :: integer(), options()) :: :ok
def emit_stop_event(event_name, start_time, options \\ [])
def emit_stop_event(event_name, start_time, options)
when is_atom(event_name) and is_integer(start_time),
do: emit_stop_event([event_name], start_time, options)
def emit_stop_event([first | _rest] = event_name, start_time, options)
when is_list(event_name) and is_atom(first) and is_integer(start_time) do
measurements = Keyword.get(options, :measurements, %{})
metadata = Keyword.get(options, :metadata, %{})
end_time = System.monotonic_time()
measurements = Map.merge(measurements, %{duration: end_time - start_time})
event_name = Enum.reverse([:stop] ++ event_name)
:ok = :telemetry.execute([:ockam] ++ event_name, measurements, metadata)
end
@doc """
Emits the `exception` event.
The first argument is `event_name` which may be an atom or a list of atoms
identifying the event. If `event_name` is an atom, the event that is emitted
has the name [:ockam, event_name, :exception]. If `event_name` is list of
atoms, the event that is emitted has the name
`[:ockam] ++ event_name ++ [:exception]`.
The second argument is `start_time` that was returned by calling
`Ockam.Telemetry.emit_start_event/2`. This function will add a `duration`
measurement to the event by calculating `start_time - end_time`.
The third argument is the kind of exception.
The fourth argument is the reason for the exception.
The fifth argument is the stacktrace of the exception.
The sixth argument is a keyword list of options:
* `:measurements` - a map of measurements
* `:metadata` - a map of metadata
When the event is emitted, the handler functions attached to the event are
invoked in the emitting process.
Always returns `:ok`.
"""
@spec emit_exception_event(
event_name :: event_name(),
start_time :: integer(),
exception ::
{kind :: Exception.kind(), reason :: any(), stacktrace :: Exception.stacktrace()},
options :: options()
) :: :ok
def emit_exception_event(event_name, start_time, exception, options \\ [])
def emit_exception_event(event_name, start_time, exception, options)
when is_atom(event_name) and is_integer(start_time),
do: emit_exception_event([event_name], start_time, exception, options)
def emit_exception_event(
[first | _rest] = event_name,
start_time,
{kind, reason, stacktrace},
options
)
when is_list(event_name) and is_atom(first) and is_integer(start_time) do
measurements = Keyword.get(options, :measurements, %{})
metadata = Keyword.get(options, :metadata, %{})
metadata = Map.merge(metadata, %{kind: kind, reason: reason, stacktrace: stacktrace})
end_time = System.monotonic_time()
measurements = Map.merge(measurements, %{duration: end_time - start_time})
event_name = Enum.reverse([:exception] ++ event_name)
:ok = :telemetry.execute([:ockam] ++ event_name, measurements, metadata)
end
end
|
implementations/elixir/ockam/ockam/lib/ockam/telemetry.ex
| 0.928934
| 0.654149
|
telemetry.ex
|
starcoder
|
defmodule Surface.API do
@moduledoc false
alias Surface.IOHelper
@types [
:any,
:css_class,
:list,
:event,
:boolean,
:string,
:date,
:datetime,
:number,
:integer,
:decimal,
:map,
:fun,
:atom,
:module,
:changeset,
:form,
:keyword
]
@private_opts [:action, :to]
defmacro __using__(include: include) do
arities = %{
property: [2, 3],
slot: [1, 2],
data: [2, 3],
context: [1]
}
functions = for func <- include, arity <- arities[func], into: [], do: {func, arity}
quote do
import unquote(__MODULE__), only: unquote(functions)
@before_compile unquote(__MODULE__)
@after_compile unquote(__MODULE__)
Module.register_attribute(__MODULE__, :assigns, accumulate: false)
# Any caller component can hold other components with slots
Module.register_attribute(__MODULE__, :assigned_slots_by_parent, accumulate: false)
for func <- unquote(include) do
Module.register_attribute(__MODULE__, func, accumulate: true)
unquote(__MODULE__).init_func(func, __MODULE__)
end
end
end
defmacro __before_compile__(env) do
generate_docs(env)
[
quoted_property_funcs(env),
quoted_slot_funcs(env),
quoted_data_funcs(env),
quoted_context_funcs(env)
]
end
def __after_compile__(env, _) do
if !function_exported?(env.module, :init_context, 1) do
validate_has_init_context(env)
end
if function_exported?(env.module, :__slots__, 0) do
validate_slot_props_bindings!(env)
validate_required_slots!(env)
end
end
@doc false
def init_func(:slot, module) do
Module.register_attribute(module, :used_slot, accumulate: true)
:ok
end
def init_func(_func, _caller) do
:ok
end
@doc "Defines a property for the component"
defmacro property(name_ast, type_ast, opts_ast \\ []) do
build_assign_ast(:property, name_ast, type_ast, opts_ast, __CALLER__)
end
@doc "Defines a slot for the component"
defmacro slot(name_ast, opts_ast \\ []) do
build_assign_ast(:slot, name_ast, :any, opts_ast, __CALLER__)
end
@doc "Defines a data assign for the component"
defmacro data(name_ast, type_ast, opts_ast \\ []) do
build_assign_ast(:data, name_ast, type_ast, opts_ast, __CALLER__)
end
@doc """
Sets or retrieves a context assign.
### Usage
```
context set name, type, opts \\ []
context get name, opts
```
### Examples
```
context set form, :form
...
context get form, from: Form
```
"""
# context get
defmacro context({:get, _, [name_ast, opts_ast]}) when is_list(opts_ast) do
opts_ast = [{:action, :get} | opts_ast]
build_assign_ast(:context, name_ast, :any, opts_ast, __CALLER__)
end
defmacro context({:get, _, [_name_ast, type]}) when type in @types do
message = """
cannot redefine the type of the assign when using action :get. \
The type is already defined by a parent component using action :set\
"""
IOHelper.compile_error(message, __CALLER__.file, __CALLER__.line)
end
defmacro context({:get, _, [name_ast, invalid_opts_ast]}) do
build_assign_ast(:context, name_ast, :any, invalid_opts_ast, __CALLER__)
end
defmacro context({:get, _, [name_ast]}) do
opts_ast = [action: :get]
build_assign_ast(:context, name_ast, :any, opts_ast, __CALLER__)
end
defmacro context({:get, _, nil}) do
message = """
no name defined for context get
Usage: context get name, opts
"""
IOHelper.compile_error(message, __CALLER__.file, __CALLER__.line)
end
# context set
defmacro context({:set, _, [name_ast, type_ast, opts_ast]}) when is_list(opts_ast) do
opts_ast = Keyword.merge(opts_ast, action: :set, to: __CALLER__.module)
build_assign_ast(:context, name_ast, type_ast, opts_ast, __CALLER__)
end
defmacro context({:set, _, [_name_ast, opts]}) when is_list(opts) do
message = "no type defined for context set. Type is required after the name."
IOHelper.compile_error(message, __CALLER__.file, __CALLER__.line)
end
defmacro context({:set, _, [name_ast, type_ast]}) do
opts_ast = [action: :set, to: __CALLER__.module]
build_assign_ast(:context, name_ast, type_ast, opts_ast, __CALLER__)
end
defmacro context({:set, _, [_name_ast]}) do
message = "no type defined for context set. Type is required after the name."
IOHelper.compile_error(message, __CALLER__.file, __CALLER__.line)
end
# invalid usage
defmacro context({_action, _, args}) when length(args) > 2 do
message = """
invalid use of context. Usage: `context get name, opts` \
or `context set name, type, opts \\ []`\
"""
IOHelper.compile_error(message, __CALLER__.file, __CALLER__.line)
end
defmacro context({action, _, _}) do
message = "invalid context action. Expected :get or :set, got: #{Macro.to_string(action)}"
IOHelper.compile_error(message, __CALLER__.file, __CALLER__.line)
end
@doc false
def put_assign!(caller, func, name, type, opts, opts_ast, line) do
Surface.API.validate!(func, name, type, opts, caller)
assign = %{
func: func,
name: name,
type: type,
doc: pop_doc(caller.module),
opts: opts,
opts_ast: opts_ast,
line: line
}
assigns = Module.get_attribute(caller.module, :assigns) || %{}
name = Keyword.get(assign.opts, :as, assign.name)
existing_assign = assigns[name]
if Keyword.get(assign.opts, :scope) != :only_children do
if existing_assign do
message = """
cannot use name \"#{assign.name}\". There's already \
a #{existing_assign.func} assign with the same name \
at line #{existing_assign.line}.#{suggestion_for_duplicated_assign(assign)}\
"""
IOHelper.compile_error(message, caller.file, assign.line)
else
assigns = Map.put(assigns, name, assign)
Module.put_attribute(caller.module, :assigns, assigns)
end
end
Module.put_attribute(caller.module, assign.func, assign)
end
defp suggestion_for_duplicated_assign(%{func: :context, opts: opts}) do
"\n\nHint: " <>
case Keyword.get(opts, :action) do
:set ->
"""
if you only need this context assign in the child components, \
you can set option :scope as :only_children to solve the issue.\
"""
:get ->
"you can use the :as option to set another name for the context assign."
end
end
defp suggestion_for_duplicated_assign(_assign) do
""
end
defp quoted_data_funcs(env) do
data = Module.get_attribute(env.module, :data) || []
quote do
@doc false
def __data__() do
unquote(Macro.escape(data))
end
end
end
defp quoted_property_funcs(env) do
props =
(Module.get_attribute(env.module, :property) || [])
|> Enum.sort_by(&{&1.name != :id, !&1.opts[:required], &1.line})
props_names = Enum.map(props, fn prop -> prop.name end)
props_by_name = for p <- props, into: %{}, do: {p.name, p}
quote do
@doc false
def __props__() do
unquote(Macro.escape(props))
end
@doc false
def __validate_prop__(prop) do
prop in unquote(props_names)
end
@doc false
def __get_prop__(name) do
Map.get(unquote(Macro.escape(props_by_name)), name)
end
end
end
defp quoted_slot_funcs(env) do
used_slots =
for %{name: name, line: line} <- Module.get_attribute(env.module, :used_slot) || [] do
%{func: :slot, name: name, type: :any, doc: nil, opts: [], opts_ast: [], line: line}
end
slots = (Module.get_attribute(env.module, :slot) || []) ++ used_slots
slots = Enum.uniq_by(slots, & &1.name)
slots_names = Enum.map(slots, fn slot -> slot.name end)
slots_by_name = for p <- slots, into: %{}, do: {p.name, p}
required_slots_names =
for %{name: name, opts: opts} <- slots, opts[:required] do
name
end
assigned_slots_by_parent = Module.get_attribute(env.module, :assigned_slots_by_parent) || %{}
quote do
@doc false
def __slots__() do
unquote(Macro.escape(slots))
end
@doc false
def __validate_slot__(prop) do
prop in unquote(slots_names)
end
@doc false
def __get_slot__(name) do
Map.get(unquote(Macro.escape(slots_by_name)), name)
end
@doc false
def __assigned_slots_by_parent__() do
unquote(Macro.escape(assigned_slots_by_parent))
end
@doc false
def __required_slots_names__() do
unquote(Macro.escape(required_slots_names))
end
end
end
defp quoted_context_funcs(env) do
context = Module.get_attribute(env.module, :context) || []
{gets, sets} = Enum.split_with(context, fn c -> c.opts[:action] == :get end)
sets_in_scope = Enum.filter(sets, fn var -> var.opts[:scope] != :only_children end)
assigns = gets ++ sets_in_scope
quote do
@doc false
def __context_gets__() do
unquote(Macro.escape(gets))
end
@doc false
def __context_sets__() do
unquote(Macro.escape(sets))
end
@doc false
def __context_sets_in_scope__() do
unquote(Macro.escape(sets_in_scope))
end
@doc false
def __context_assigns__() do
unquote(Macro.escape(assigns))
end
end
end
def validate!(func, name, type, opts, caller) do
with :ok <- validate_type(func, name, type),
:ok <- validate_opts_keys(func, name, type, opts),
:ok <- validate_opts(func, type, opts),
:ok <- validate_required_opts(func, type, opts) do
:ok
else
{:error, message} ->
file = Path.relative_to_cwd(caller.file)
IOHelper.compile_error(message, file, caller.line)
end
end
defp validate_name_ast!(_func, {name, meta, context}, _caller)
when is_atom(name) and is_list(meta) and is_atom(context) do
name
end
defp validate_name_ast!(func, name_ast, caller) do
message = """
invalid #{func} name. Expected a variable name, got: #{Macro.to_string(name_ast)}\
"""
IOHelper.compile_error(message, caller.file, caller.line)
end
defp validate_type(_func, _name, type) when type in @types do
:ok
end
defp validate_type(func, name, type) do
message = """
invalid type #{Macro.to_string(type)} for #{func} #{name}.
Expected one of #{inspect(@types)}.
Hint: Use :any if the type is not listed.\
"""
{:error, message}
end
defp validate_opts_keys(func, name, type, opts) do
with true <- Keyword.keyword?(opts),
keys <- Keyword.keys(opts),
valid_opts <- get_valid_opts(func, type, opts),
[] <- keys -- (valid_opts ++ @private_opts) do
:ok
else
false ->
{:error,
"invalid options for #{func} #{name}. " <>
"Expected a keyword list of options, got: #{inspect(remove_private_opts(opts))}"}
unknown_options ->
valid_opts = get_valid_opts(func, type, opts)
{:error, unknown_options_message(valid_opts, unknown_options)}
end
end
defp validate_opts_ast!(func, opts, caller) when is_list(opts) do
if Keyword.keyword?(opts) do
for {key, value} <- opts do
{key, validate_opt_ast!(func, key, value, caller)}
end
else
opts
end
end
defp validate_opts_ast!(_func, opts, _caller) do
opts
end
defp validate_opts(func, type, opts) do
Enum.reduce_while(opts, :ok, fn {key, value}, _acc ->
case validate_opt(func, type, key, value) do
:ok ->
{:cont, :ok}
error ->
{:halt, error}
end
end)
end
defp validate_required_opts(func, type, opts) do
case get_required_opts(func, type, opts) -- Keyword.keys(opts) do
[] ->
:ok
missing_opts ->
{:error, "the following options are required: #{inspect(missing_opts)}"}
end
end
defp get_valid_opts(:property, _type, _opts) do
[:required, :default, :values]
end
defp get_valid_opts(:data, _type, _opts) do
[:default, :values]
end
defp get_valid_opts(:slot, _type, _opts) do
[:required, :props]
end
defp get_valid_opts(:context, _type, opts) do
case Keyword.fetch!(opts, :action) do
:get ->
[:from, :as]
:set ->
[:scope]
end
end
defp get_required_opts(:context, _type, opts) do
case Keyword.fetch!(opts, :action) do
:get ->
[:from]
_ ->
[]
end
end
defp get_required_opts(_func, _type, _opts) do
[]
end
defp validate_opt_ast!(:slot, :props, args_ast, caller) do
Enum.map(args_ast, fn
{name, {:^, _, [{generator, _, context}]}} when context in [Elixir, nil] ->
Macro.escape(%{name: name, generator: generator})
name when is_atom(name) ->
Macro.escape(%{name: name, generator: nil})
ast ->
message =
"invalid slot prop #{Macro.to_string(ast)}. " <>
"Expected an atom or a binding to a generator as `key: ^property_name`"
IOHelper.compile_error(message, caller.file, caller.line)
end)
end
defp validate_opt_ast!(_func, _key, value, _caller) do
value
end
defp validate_opt(_func, _type, :required, value) when not is_boolean(value) do
{:error, "invalid value for option :required. Expected a boolean, got: #{inspect(value)}"}
end
defp validate_opt(_func, _type, :values, value) when not is_list(value) do
{:error,
"invalid value for option :values. Expected a list of values, got: #{inspect(value)}"}
end
defp validate_opt(:context, _type, :scope, value)
when value not in [:only_children, :self_and_children] do
message = """
invalid value for option :scope. Expected :only_children or :self_and_children, \
got: #{inspect(value)}
"""
{:error, message}
end
defp validate_opt(:context, _type, :from, value) when not is_atom(value) do
{:error, "invalid value for option :from. Expected a module, got: #{inspect(value)}"}
end
defp validate_opt(:context, _type, :as, value) when not is_atom(value) do
{:error, "invalid value for option :as. Expected an atom, got: #{inspect(value)}"}
end
defp validate_opt(_func, _type, _key, _value) do
:ok
end
defp unknown_options_message(valid_opts, unknown_options) do
{plural, unknown_items} =
case unknown_options do
[option] ->
{"", option}
_ ->
{"s", unknown_options}
end
"""
unknown option#{plural} #{inspect(unknown_items)}. \
Available options: #{inspect(valid_opts)}\
"""
end
defp format_opts(opts_ast) do
opts_ast
|> Macro.to_string()
|> String.slice(1..-2)
end
defp generate_docs(env) do
case Module.get_attribute(env.module, :moduledoc) do
{_line, false} ->
:ok
nil ->
props_doc = generate_props_docs(env.module)
Module.put_attribute(env.module, :moduledoc, {env.line, props_doc})
{line, doc} ->
props_doc = generate_props_docs(env.module)
Module.put_attribute(env.module, :moduledoc, {line, doc <> "\n" <> props_doc})
end
end
defp generate_props_docs(module) do
docs =
for prop <- Module.get_attribute(module, :property) do
doc = if prop.doc, do: " - #{prop.doc}.", else: ""
opts = if prop.opts == [], do: "", else: ", #{format_opts(prop.opts_ast)}"
"* **#{prop.name}** *#{inspect(prop.type)}#{opts}*#{doc}"
end
|> Enum.reverse()
|> Enum.join("\n")
"""
### Properties
#{docs}
"""
end
defp validate_has_init_context(env) do
for var <- Module.get_attribute(env.module, :context) || [] do
if Keyword.get(var.opts, :action) == :set do
message = """
context assign \"#{var.name}\" not initialized. \
You should implement an init_context/1 callback and initialize its \
value by returning {:ok, #{var.name}: ...}\
"""
IOHelper.warn(message, env, fn _ -> var.line end)
end
end
:ok
end
defp validate_slot_props_bindings!(env) do
for slot <- env.module.__slots__(),
slot_props = Keyword.get(slot.opts, :props, []),
%{name: name, generator: generator} <- slot_props,
generator != nil do
case env.module.__get_prop__(generator) do
nil ->
existing_properties_names = env.module.__props__() |> Enum.map(& &1.name)
message = """
cannot bind slot prop `#{name}` to property `#{generator}`. \
Expected a existing property after `^`, \
got: an undefined property `#{generator}`.
Hint: Available properties are #{inspect(existing_properties_names)}\
"""
IOHelper.compile_error(message, env.file, slot.line)
%{type: type} when type != :list ->
message = """
cannot bind slot prop `#{name}` to property `#{generator}`. \
Expected a property of type :list after `^`, \
got: a property of type #{inspect(type)}\
"""
IOHelper.compile_error(message, env.file, slot.line)
_ ->
:ok
end
end
:ok
end
defp validate_required_slots!(env) do
for {{mod, _parent_node_id, parent_node_alias, line}, assigned_slots} <-
env.module.__assigned_slots_by_parent__(),
mod != nil,
name <- mod.__required_slots_names__(),
!MapSet.member?(assigned_slots, name) do
message = "missing required slot \"#{name}\" for component <#{parent_node_alias}>"
IOHelper.warn(message, env, fn _ -> line end)
end
end
defp pop_doc(module) do
doc =
case Module.get_attribute(module, :doc) do
{_, doc} -> doc
_ -> nil
end
Module.delete_attribute(module, :doc)
doc
end
defp build_assign_ast(func, name_ast, type_ast, opts_ast, caller) do
quote bind_quoted: [
func: func,
name: validate_name_ast!(func, name_ast, caller),
type: type_ast,
opts: validate_opts_ast!(func, opts_ast, caller),
opts_ast: Macro.escape(opts_ast),
line: caller.line
] do
Surface.API.put_assign!(__ENV__, func, name, type, opts, opts_ast, line)
end
end
defp remove_private_opts(opts) do
if is_list(opts) do
Enum.reject(opts, fn o -> Enum.any?(@private_opts, fn p -> match?({^p, _}, o) end) end)
else
opts
end
end
end
|
lib/surface/api.ex
| 0.742328
| 0.590396
|
api.ex
|
starcoder
|
defmodule Snitch.Tools.ElasticSearch.Product.Store do
@moduledoc """
Fetches data from product table to be used in product index
"""
@behaviour Elasticsearch.Store
import Ecto.Query
alias Snitch.Core.Tools.MultiTenancy.Repo
alias Snitch.Data.Model.Product, as: PM
alias Snitch.Tools.ElasticsearchCluster, as: EC
@preload [
:brand,
:images,
# Considering max nesting is 4 level
# This will benifit unless forced preload
# like => Repo.preload(taxon, :parent, force: true)
taxon: [parent: [parent: [parent: :parent]]],
parent_variation: [
parent_product: [:brand, :images, reviews: [rating_option_vote: :rating_option]]
],
reviews: [rating_option_vote: :rating_option],
options: [:option_type]
]
@index if Mix.env() == :test, do: "products_test", else: "products"
@impl true
@doc """
Will be able to stream products from all the tenants,
along with a tenant(virtual field) to track teanant in Elasticsearch
"""
def stream(_schema) do
["public" | Triplex.all()]
|> Stream.flat_map(fn tenant ->
IO.puts("\n\t Streaming data for #{tenant} database \n")
Repo.set_tenant(tenant)
query =
PM.sellable_products_query()
|> select([p, v], merge(p, %{tenant: ^tenant}))
|> preload([], ^@preload)
Repo.stream(query)
end)
end
@impl true
def transaction(fun) do
{:ok, result} = Repo.transaction(fun, timeout: :infinity)
result
end
def update_product_to_es(id, action \\ :create)
def update_product_to_es(id, action) when is_binary(id),
do: update_product_to_es(PM.get(id), action)
def update_product_to_es(product, action) when is_map(product) do
product = Repo.preload(%{product | tenant: Repo.get_prefix()}, [:variants | @preload])
case product.variants do
[] ->
update_sellable_product_to_es(product, action)
variants ->
Enum.map(
variants,
fn variant ->
%{variant | tenant: Repo.get_prefix()}
|> Repo.preload(@preload)
|> update_sellable_product_to_es(action)
end
)
end
end
defp update_sellable_product_to_es(%{state: :active, deleted_at: nil} = product, :create),
do:
Elasticsearch.put_document!(
EC,
product,
@index
)
# Incase product is not in active state, then delete the product
defp update_sellable_product_to_es(product, :create),
do: Elasticsearch.delete_document(EC, product, @index)
defp update_sellable_product_to_es(product, :delete),
do: Elasticsearch.delete_document(EC, product, @index)
def search_products(query),
do: Elasticsearch.post!(EC, "/#{@index}/_doc/_search", query)
end
|
apps/snitch_core/lib/core/tools/elasticsearch/product/store.ex
| 0.644337
| 0.502441
|
store.ex
|
starcoder
|
defmodule Tournament do
@doc """
Given `input` lines representing two teams and whether the first of them won,
lost, or reached a draw, separated by semicolons, calculate the statistics
for each team's number of games played, won, drawn, lost, and total points
for the season, and return a nicely-formatted string table.
A win earns a team 3 points, a draw earns 1 point, and a loss earns nothing.
Order the outcome by most total points for the season, and settle ties by
listing the teams in alphabetical order.
"""
@spec tally(input :: list(String.t())) :: String.t()
def tally(input) do
initial = %{mp: 0, w: 0, d: 0, l: 0, p: 0}
scores =
for line <- input,
split = String.split(line, ";", trim: true),
length(split) == 3,
[team_a, team_b, result] = split,
result in ["win", "loss", "draw"],
reduce: %{} do
acc ->
acc
|> Map.update(team_a, update_score(initial, result), &update_score(&1, result))
|> Map.update(
team_b,
update_score(initial, result, :opposite),
&update_score(&1, result, :opposite)
)
end
header = "Team | MP | W | D | L | P"
scores =
scores
|> Enum.sort(fn {team_a, %{p: a}}, {team_b, %{p: b}} ->
if a == b, do: team_a <= team_b, else: a > b
end)
|> Enum.map(fn {team, %{mp: n, w: w, d: d, l: l, p: p}} ->
"#{String.pad_trailing(team, 30)} | #{n} | #{w} | #{d} | #{l} | #{p}"
end)
Enum.join([header | scores], "\n")
end
def update_score(score, "win", :opposite), do: update_score(score, "loss")
def update_score(score, "loss", :opposite), do: update_score(score, "win")
def update_score(score, "draw", :opposite), do: update_score(score, "draw")
def update_score(%{mp: n, d: d, p: p} = score, "draw"),
do: %{score | mp: n + 1, d: d + 1, p: p + 1}
def update_score(%{mp: n, w: w, p: p} = score, "win"),
do: %{score | mp: n + 1, w: w + 1, p: p + 3}
def update_score(%{mp: n, l: l} = score, "loss"),
do: %{score | mp: n + 1, l: l + 1}
end
|
elixir/tournament/lib/tournament.ex
| 0.798815
| 0.562357
|
tournament.ex
|
starcoder
|
defmodule Mnemonix.Builder do
@moduledoc """
Creates functions that proxy to Mnemonix ones.
`use Mnemonix.Builder` to instrument a module with the `Mnemonix` client API.
It will define the `Mnemonix.Supervision` functions and all `Mnemonix.Feature` functions on the module:
- `Mnemonix.Features.Map`
- `Mnemonix.Features.Bump`
- `Mnemonix.Features.Expiry`
- `Mnemonix.Features.Enumerable`
This allows you to define a custom Mnemonix client API:
iex> defmodule My.Store.API do
...> use Mnemonix.Builder
...> end
iex> {:ok, store} = My.Store.API.start_link
iex> My.Store.API.get(store, :a)
nil
iex> My.Store.API.put(store, :a, 1)
iex> My.Store.API.get(store, :a)
1
If you want to create a Mnemonix client API with access to only a subset of Mnemonix features, simply
use those modules as you would the `Mnemonix.Builder` itself.
#### Documentation
By default, the builder will include the `@doc` for each function.
To disable this and leave the functions undocumented, provide `docs: false` when using.
#### Inlining
Additionally, all functions are defined as simple delegates to their source module.
If you would rather have their implementations inlined into your module for a small performance boost at the cost
of longer compile times, provide the `inline: true` option when using.
#### Singletons
You can pass in the `singleton: true` option to have your module use its own name
as a store reference, omitting the need for the first argument to all `Mnemonix.Feature` functions:
iex> defmodule My.Singleton do
...> use Mnemonix.Builder, singleton: true
...> end
iex> My.Singleton.start_link
iex> My.Singleton.get(:a)
nil
iex> My.Singleton.put(:a, 1)
iex> My.Singleton.get(:a)
1
Singletons still play nicely with the standard `Mnemonix` functions:
iex> defmodule My.Other.Singleton do
...> use Mnemonix.Builder, singleton: true
...> end
iex> My.Other.Singleton.start_link
iex> My.Other.Singleton.get(:a)
nil
iex> Mnemonix.get(My.Other.Singleton, :a)
nil
iex> Mnemonix.put(My.Other.Singleton, :a, 1)
iex> My.Other.Singleton.get(:a)
1
Singletons use their own module names as references names to work.
You can change the name used when defining the singleton:
iex> defmodule My.Singleton.Interface do
...> use Mnemonix.Builder, singleton: :store
...> end
iex> My.Singleton.Interface.singleton
:store
iex> My.Singleton.Interface.start_link
iex> My.Singleton.Interface.get(:a)
nil
iex> Mnemonix.get(:store, :a)
nil
iex> Mnemonix.put(:store, :a, 1)
iex> My.Singleton.Interface.get(:a)
1
"""
defmacro __using__(opts) do
{singleton, opts} = Mnemonix.Singleton.Behaviour.establish_singleton(__CALLER__.module, opts)
store = if singleton do
Mnemonix.Singleton.Behaviour.determine_singleton(
__CALLER__.module,
Keyword.get(opts, :singleton)
)
end
if singleton do
quote location: :keep do
@doc """
Retreives the name of the GenServer that this singleton makes calls to.
"""
def singleton, do: unquote(store)
use Mnemonix.Supervision, unquote(opts)
use Mnemonix.Features.Map.Singleton, unquote(opts)
use Mnemonix.Features.Bump.Singleton, unquote(opts)
use Mnemonix.Features.Expiry.Singleton, unquote(opts)
use Mnemonix.Features.Enumerable.Singleton, unquote(opts)
end
else
quote location: :keep do
use Mnemonix.Supervision, unquote(opts)
use Mnemonix.Features.Map, unquote(opts)
use Mnemonix.Features.Bump, unquote(opts)
use Mnemonix.Features.Expiry, unquote(opts)
use Mnemonix.Features.Enumerable, unquote(opts)
end
end
end
end
|
lib/mnemonix/builder.ex
| 0.853287
| 0.401512
|
builder.ex
|
starcoder
|
defmodule Mix.Tasks.Blueprint.Plot.Fun do
@shortdoc "Create a function graph"
@moduledoc """
Creates a function graph.
mix blueprint.plot.fun [APP] [--simple | --complex] [--colour] [[--lib LIB | --path PATH] ...] [-o PATH]
An `APP` name is provided if the function graph should be
limited to the given application. Otherwise it will be
for the entire blueprint (libraries tracked).
A `--simple` or `--complex` option can be used to indicate
the detail of the generated graph.
A `--colour` option can be used to generate a coloured
graph.
A `-o` option can be used to specify the file to be written.
As many `--lib` or `--path` options can be provided to
add additional libraries to the blueprint. If none are
provided, the blueprint will default to using the
libraries found in the project's build directory.
## Examples
Generate a graph for the current project:
mix blueprint.plot.fun
Generate a graph for the current project's `example` application:
mix blueprint.plot.fun example
Generate a graph for the provided libraries:
mix blueprint.plot.fun --lib example1 --lib example2 --path /example
Generate a simple graph of mnesia from the standard erlang runtime:
mix blueprint.plot.fun --path $(elixir -e 'IO.puts :code.lib_dir') --simple mnesia
"""
use Mix.Task
defp options(args, options)
defp options([], options), do: options
defp options(["--path"|args], options), do: options({ :path, args }, options)
defp options(["--lib"|args], options), do: options({ :lib, args }, options)
defp options({ :path, [app|args] }, options = %{ libs: libs }) when is_list(libs), do: options(args, %{ options | libs: [app|options[:libs]]})
defp options({ :lib, [app|args] }, options = %{ libs: libs }) when is_list(libs), do: options(args, %{ options | libs: [String.to_atom(app)|options[:libs]]})
defp options({ :path, [app|args] }, options), do: options(args, %{ options | libs: [app]})
defp options({ :lib, [app|args] }, options), do: options(args, %{ options | libs: [String.to_atom(app)]})
defp options(["--simple"|args], options), do: options(args, %{ options | opts: Map.put(options[:opts], :detail, :low) })
defp options(["--complex"|args], options), do: options(args, %{ options | opts: Map.put(options[:opts], :detail, :high) })
defp options(["--colour"|args], options) do
opts = Map.put(options[:opts], :styler, fn
{ :node, { mod, _, _ } } -> [color: Blueprint.Plot.Style.colourize(Blueprint.Plot.Label.strip_namespace(Blueprint.Plot.Label.to_label((mod))))]
{ :node, mod } -> [color: Blueprint.Plot.Style.colourize(Blueprint.Plot.Label.strip_namespace(Blueprint.Plot.Label.to_label(mod)))]
{ :connection, { { mod, _, _ }, _ } } -> [color: Blueprint.Plot.Style.colourize(Blueprint.Plot.Label.strip_namespace(Blueprint.Plot.Label.to_label(mod)))]
{ :connection, { mod, _ } } -> [color: Blueprint.Plot.Style.colourize(Blueprint.Plot.Label.strip_namespace(Blueprint.Plot.Label.to_label(mod)))]
_ -> [color: "black"]
end)
options(args, %{ options | opts: opts })
end
defp options(["-o"|args], options), do: options({ :output, args }, options)
defp options({ :output, [path|args] }, options), do: options(args, %{ options | opts: Map.put(options[:opts], :name, path) })
defp options([app|args], options), do: options(args, %{ options | app: String.to_atom(app) })
def run(args) do
{ :ok, _ } = :application.ensure_all_started(:graphvix)
libs = case Code.ensure_loaded(Mix.Project) do
{ :module, _ } -> Path.join(Mix.Project.build_path(), "lib")
_ -> []
end
options = options(args, %{ libs: libs, opts: %{}, app: nil })
blueprint = Blueprint.new(options[:libs])
case options do
%{ app: nil } -> Blueprint.Plot.function_graph(blueprint, Keyword.new(options[:opts]))
_ -> Blueprint.Plot.function_graph(blueprint, options[:app], Keyword.new(options[:opts]))
end
Blueprint.close(blueprint)
end
end
|
lib/mix/tasks/blueprint.plot/fun.ex
| 0.796213
| 0.526891
|
fun.ex
|
starcoder
|
defmodule Ratatouille.Runtime.Subscription do
@moduledoc """
Subscriptions provide a way for the app to be notified via
`c:Ratatouille.App.update/2` when something interesting happens.
Subscriptions should be constructed via the functions below and not via the
struct directly, as this is internal and subject to change.
Currently, it's possible to subscribe to time intervals (`interval/2`) and to
create batch subscriptions (i.e., multiple time intervals). More subscription
types may be introduced later.
### Accuracy of Time Intervals
Ratatouille's runtime loop, which handles subscriptions, runs on a interval
itself (by default, every 500 ms). This means that the runtime loop is the
minimum possible subscription interval. If a subscription's interval is more
frequent than the runtime loop interval, the runtime loop interval is the
subscription's effective interval.
There is also no guarantee that subscriptions will be processed on time, as
the runtime may be busy with other tasks (e.g., handling events or rendering).
With that said, if care is taken to keep expensive calls out of the runtime
loop, subscriptions should be processed very close to requested interval.
"""
alias __MODULE__
@enforce_keys [:type]
defstruct [:type, :message, :data]
@doc """
Returns a subscription based on a time interval. Takes the number of
milliseconds (`ms`) and a message as arguments. When returned in the
`c:Ratatouille.App.subscribe/1` callback, the runtime will call the
`c:Ratatouille.App.update/2` function with current model and the message, on
approximately the given interval. See above for details on what
"approximately" means here.
"""
#@spec interval(non_neg_integer(), term()) :: Subscription.t()
def interval(ms, message) do
# Like 0, but accounts for a negative monotonic time
last_at_ms = :erlang.monotonic_time(:millisecond) - ms
%Subscription{type: :interval, data: {ms, last_at_ms}, message: message}
end
@doc """
Creates a batch subscription from a list of subscriptions.
This provides a way to subscribe to multiple things, while still returning a
single subscription in `c:Ratatouille.App.subscribe/1`.
"""
#@spec batch([Subscription.t()]) :: Subscription.t()
def batch([%Subscription{} | _] = subs) do
%Subscription{type: :batch, data: subs}
end
@doc false
def to_list(%Subscription{type: :batch, data: [sub | rest]}) do
to_list(sub) ++ to_list(%Subscription{type: :batch, data: rest})
end
def to_list(%Subscription{type: :batch, data: []}), do: []
def to_list(%Subscription{type: :interval} = sub), do: [sub]
end
|
lib/ratatouille/runtime/subscription.ex
| 0.821331
| 0.628279
|
subscription.ex
|
starcoder
|
defmodule EnumExtras do
@moduledoc """
Provides additional utility functions for working with enumerables.
"""
@type t :: Enumerable.t()
@type element :: any
@doc """
Calculates the average of the elements in the `enumerable`.
It should return `nil` if the `enumerable` is empty.
"""
@spec average(t) :: nil | integer
def average([]), do: nil
def average(list) when is_list(list) do
# FIXME: Susceptible to floating-point errors.
Enum.sum(list) / Enum.count(list)
end
@doc """
Calculates the weighted average of the elements in the `enumerable`.
It should return `nil` if the `enumerable` is empty or the weights sum to zero.
"""
@spec weighted_average(t, t) :: nil | integer
def weighted_average([], _weights), do: nil
def weighted_average(list, weights) when is_list(list) and is_list(weights) do
# TODO: Handle case when number of weights differs from number of elements in list.
case Enum.sum(weights) do
0 ->
nil
sum ->
# FIXME: Susceptible to floating-point errors.
total =
Enum.zip(list, weights)
|> Enum.reduce(0, fn {element, weight}, acc -> acc + element * weight end)
total / sum
end
end
@doc """
Partitions the elements of the `enumerable` according to the pairwise comparator.
## Examples
iex> EnumExtras.chunk_by_pairwise([1, 2, 3, 4, 1, 2, 3, 1, 2, 1], fn a, b -> a <= b end)
[[1, 2, 3, 4], [1, 2, 3], [1, 2], [1]]
"""
@spec chunk_by_pairwise(t, (element, element -> boolean)) :: t
def chunk_by_pairwise([], _comparator), do: []
def chunk_by_pairwise([value], _comparator), do: [[value]]
def chunk_by_pairwise(values, comparator) do
values
|> Enum.reverse()
|> Enum.chunk_every(2, 1)
|> Enum.reduce([[]], fn
[value], [head | tail] ->
[[value | head] | tail]
[left_value, right_value], [head | tail] ->
acc = [[left_value | head] | tail]
# The arguments in the comparator are reversed because the given list is reversed above.
case comparator.(right_value, left_value) do
true -> acc
false -> [[]] ++ acc
end
end)
end
end
|
lib/enum_extras.ex
| 0.717507
| 0.677261
|
enum_extras.ex
|
starcoder
|
defmodule Kaffe.Producer do
@moduledoc """
The producer pulls in values from the Kaffe producer configuration:
- `heroku_kafka_env` - endpoints and SSL configuration will be pulled from ENV
- `endpoints` - plaintext Kafka endpoints
- `topics` - a list of Kafka topics to prep for producing
- `partition_strategy` - the strategy to use when selecting the next partition.
Default `:md5`.
- `:md5`: provides even and deterministic distrbution of the messages over the available partitions based on an MD5 hash of the key
- `:random` - Select a random partition
- function - Pass a function as an argument that accepts five arguments and
returns the partition number to use for the message
- `topic, current_partition, partitions_count, key, value`
Clients can also specify a partition directly when producing.
Currently only synchronous production is supported.
"""
@kafka Application.get_env(:kaffe, :kafka_mod, :brod)
require Logger
## -------------------------------------------------------------------------
## public api
## -------------------------------------------------------------------------
def start_producer_client(opts \\ %{}) do
conf = config(opts)
@kafka.start_client(conf.endpoints, client_name(), conf.producer_config)
end
@doc """
Synchronously produce the `messages_list` to `topic`
- `messages_list` must be a list of `{key, value}` tuples
- `opts` may include the partition strategy to use,
`partition_strategy: :md5`, or `:random` or a function.
Returns:
* `:ok` on successfully producing each message
* `{:error, reason}` for any error
"""
def produce(topic, message_list, opts \\ []) do
produce_list(topic, message_list, partition_strategy_from(opts))
end
@doc """
Synchronously produce the `message_list` to a `topic`.
The first argument must be the topic or the key.
`messages` may be a list of `{key, value}` tuples or simply a value
Not specifying the topic as the first argument is a simpler way to produce if
you've only given Producer a single topic for production and don't want to
specify the topic for each call.
Returns:
* `:ok` on successfully producing each message
* `{:error, reason}` for any error
"""
def produce_sync(topic, message_list) when is_list(message_list) do
produce_list(topic, message_list, global_partition_strategy())
end
def produce_sync(key, value) do
topic = config().topics |> List.first()
produce_value(topic, key, value)
end
@doc """
Synchronously produce the `message_list` to `topic`/`partition`
`message_list` must be a list of `{key, value}` tuples
Returns:
* `:ok` on successfully producing each message
* `{:error, reason}` for any error
"""
def produce_sync(topic, partition, message_list) when is_list(message_list) do
produce_list(topic, message_list, fn _, _, _, _ -> partition end)
end
def produce_sync(topic, key, value) do
produce_value(topic, key, value)
end
@doc """
Synchronously produce the given `key`/`value` to the `topic`/`partition`
See `produce_sync/2` for returns.
"""
def produce_sync(topic, partition, key, value) do
@kafka.produce_sync(client_name(), topic, partition, key, value)
end
## -------------------------------------------------------------------------
## internal
## -------------------------------------------------------------------------
defp produce_list(topic, message_list, partition_strategy) when is_list(message_list) do
Logger.debug("event#produce_list topic=#{topic}")
message_list
|> add_timestamp
|> group_by_partition(topic, partition_strategy)
|> produce_list_to_topic(topic)
end
defp produce_value(topic, key, value) do
{:ok, partitions_count} = @kafka.get_partitions_count(client_name(), topic)
partition = choose_partition(topic, partitions_count, key, value, global_partition_strategy())
Logger.debug(
"event#produce topic=#{topic} key=#{key} partitions_count=#{partitions_count} selected_partition=#{partition}"
)
@kafka.produce_sync(client_name(), topic, partition, key, value)
end
defp add_timestamp(messages) do
messages
|> Enum.map(fn {key, message} ->
{System.system_time(:millisecond), key, message}
end)
end
defp group_by_partition(messages, topic, partition_strategy) do
{:ok, partitions_count} = @kafka.get_partitions_count(client_name(), topic)
messages
|> Enum.group_by(fn {_timestamp, key, message} ->
choose_partition(topic, partitions_count, key, message, partition_strategy)
end)
end
defp produce_list_to_topic(message_list, topic) do
message_list
|> Enum.reduce_while(:ok, fn {partition, messages}, :ok ->
Logger.debug("event#produce_list_to_topic topic=#{topic} partition=#{partition}")
case @kafka.produce_sync(client_name(), topic, partition, "ignored", messages) do
:ok -> {:cont, :ok}
{:error, _reason} = error -> {:halt, error}
end
end)
end
defp partition_strategy_from(opts) do
case Keyword.fetch(opts, :partition_strategy) do
{:ok, partition_strategy} -> partition_strategy
:error -> global_partition_strategy()
end
end
defp choose_partition(_topic, partitions_count, _key, _value, :random) do
Kaffe.PartitionSelector.random(partitions_count)
end
defp choose_partition(_topic, partitions_count, key, _value, :md5) do
Kaffe.PartitionSelector.md5(key, partitions_count)
end
defp choose_partition(topic, partitions_count, key, value, fun) when is_function(fun) do
fun.(topic, partitions_count, key, value)
end
defp client_name do
config().client_name
end
defp global_partition_strategy do
config().partition_strategy
end
defp config(opts \\ %{}) do
Kaffe.Config.Producer.configuration(opts)
end
end
|
lib/kaffe/producer.ex
| 0.870542
| 0.621957
|
producer.ex
|
starcoder
|
defmodule Ravix.Ecto.Parser.Shared do
alias Ravix.Ecto.Conversions
@spec is_op(any) ::
{:and, [{:context, Ravix.Ecto.Parser.Shared} | {:import, Kernel}, ...],
[{:!=, [...], [...]} | {:is_atom, [...], [...]}, ...]}
defmacro is_op(op) do
quote do
is_atom(unquote(op)) and unquote(op) != :^
end
end
def value(expr, pk, place) do
case Conversions.from_ecto(expr, pk) do
{:ok, value} -> value
:error -> error(place)
end
end
def value(expr, params, pk, query, place) do
case Conversions.inject_params(expr, params, pk) do
{:ok, value} -> value
:error -> error(query, place)
end
end
def field(key, _), do: key
def field({{:., _, [{:&, _, [0]}, field]}, _, []}, pk, _query, _place), do: field(field, pk)
def field(_expr, _pk, query, place), do: error(query, place)
def map_unless_empty([]), do: %{}
def map_unless_empty(list), do: list
def primary_key(nil), do: nil
def primary_key(schema) do
case schema.__schema__(:primary_key) do
[] ->
nil
[pk] ->
pk
keys ->
raise ArgumentError,
"RavenDB adapter does not support multiple primary keys " <>
"and #{inspect(keys)} were defined in #{inspect(schema)}."
end
end
@not_supported ~w(lock joins havings offset)a
@query_empty_values %Ecto.Query{} |> Map.take(@not_supported)
def check_query!(query, allow \\ []) do
@query_empty_values
|> Map.drop(allow)
|> Enum.each(fn {element, empty} ->
check(
Map.get(query, element),
empty,
query,
"RavenDB adapter does not support '#{element}' clause in this query"
)
end)
end
defp check(expr, expr, _, _), do: nil
defp check(_, _, query, message), do: raise(Ecto.QueryError, query: query, message: message)
def error(query, place) do
raise Ecto.QueryError,
query: query,
message: "Invalid expression for RavenDB adapter in #{place}"
end
defp error(place) do
raise ArgumentError, "Invalid expression for RavenDB adapter in #{place}"
end
end
|
lib/ravix_ecto/parsers/shared.ex
| 0.629319
| 0.464234
|
shared.ex
|
starcoder
|
defmodule StepFlow.LiveWorkers do
@moduledoc """
The LiveWorkers context.
"""
import Ecto.Query, warn: false
alias StepFlow.Repo
alias StepFlow.LiveWorkers.LiveWorker
@doc """
Returns the list of Live Worker.
## Examples
iex> StepFlow.LiveWorkers.list_live_workers()
%{data: [], page: 0, size: 10, total: 0}
"""
def list_live_workers(params \\ %{}) do
page =
Map.get(params, "page", 0)
|> StepFlow.Integer.force()
size =
Map.get(params, "size", 10)
|> StepFlow.Integer.force()
offset = page * size
query =
from(live_worker in LiveWorker)
|> filter_initializing(params)
|> filter_started(params)
|> filter_terminated(params)
total_query = from(item in query, select: count(item.id))
total =
Repo.all(total_query)
|> List.first()
query =
from(
job in query,
order_by: [desc: :inserted_at],
offset: ^offset,
limit: ^size
)
jobs = Repo.all(query)
%{
data: jobs,
total: total,
page: page,
size: size
}
end
@doc """
Creates a Live Worker entry.
## Examples
iex> create_live_worker(%{field: value})
{:ok, %LiveWorker{}}
iex> create_live_worker(%{field: bad_value})
{:error, %Ecto.Changeset{}}
"""
def create_live_worker(attrs \\ %{}) do
%LiveWorker{}
|> LiveWorker.changeset(attrs)
|> Repo.insert()
end
@doc """
Gets a single live worker by job ID
## Examples
iex> get_by!(123)
%LiveWorker{}
iex> get_by!(456)
nil
"""
def get_by!(%{"job_id" => job_id}) do
Repo.get_by!(LiveWorker, job_id: job_id)
end
@doc """
Gets a single live worker by job ID
## Examples
iex> get_by(%{"job_id" => 123})
%LiveWorker{}
iex> get_by(%{"job_id" => 456})
nil
"""
def get_by(%{"job_id" => job_id}) do
Repo.get_by(LiveWorker, job_id: job_id)
end
@doc """
Updates a live worker.
## Examples
iex> update_live_worker(job, %{field: new_value})
{:ok, %LiveWorker{}}
iex> update_live_worker(job, %{field: bad_value})
{:error, %Ecto.Changeset{}}
"""
def update_live_worker(%LiveWorker{} = live_worker, attrs) do
live_worker
|> LiveWorker.changeset(attrs)
|> Repo.update()
end
@doc """
Deletes a LiveWorker.
## Examples
iex> delete_live_worker(live_worker)
{:ok, %LiveWorker{}}
iex> delete_live_worker(live_worker)
{:error, %Ecto.Changeset{}}
"""
def delete_live_worker(%LiveWorker{} = live_worker) do
Repo.delete(live_worker)
end
@doc """
Returns an `%Ecto.Changeset{}` for tracking live worker changes.
## Examples
iex> change_live_worker(job)
%Ecto.Changeset{source: %LiveWorker{}}
"""
def change_live_worker(%LiveWorker{} = live_worker) do
LiveWorker.changeset(live_worker, %{})
end
defp filter_initializing(query, params) do
case Map.get(params, "initializing") do
nil ->
from(worker in query)
_ ->
from(
worker in query,
where:
(fragment("? = array[]::character varying[]", worker.ips) or
is_nil(worker.creation_date)) and
is_nil(worker.termination_date)
)
end
end
defp filter_started(query, params) do
case Map.get(params, "started") do
nil ->
from(worker in query)
_ ->
from(
worker in query,
where:
fragment("array_length(?, 1)", worker.ips) > 0 and
not is_nil(worker.creation_date) and
is_nil(worker.termination_date)
)
end
end
defp filter_terminated(query, params) do
case Map.get(params, "terminated") do
nil ->
from(worker in query)
_ ->
from(
worker in query,
where: not is_nil(worker.termination_date)
)
end
end
end
|
lib/step_flow/live_workers/live_workers.ex
| 0.769514
| 0.452657
|
live_workers.ex
|
starcoder
|
defmodule Kernel.ParallelCompiler do
@moduledoc """
A module responsible for compiling files in parallel.
"""
@doc """
Compiles the given files.
Those files are compiled in parallel and can automatically
detect dependencies between them. Once a dependency is found,
the current file stops being compiled until the dependency is
resolved.
If there is an error during compilation or if `warnings_as_errors`
is set to `true` and there is a warning, this function will fail
with an exception.
This function accepts the following options:
* `:each_file` - for each file compiled, invokes the callback passing the
file
* `:each_module` - for each module compiled, invokes the callback passing
the file, module and the module bytecode
* `:dest` - the destination directory for the beam files. When using `files/2`,
this information is only used to properly annotate the beam files before
they are loaded into memory. If you want a file to actually be written to
`dest`, use `files_to_path/3` instead.
Returns the modules generated by each compiled file.
"""
def files(files, options \\ [])
def files(files, options) when is_list(options) do
spawn_compilers(files, nil, options)
end
@doc """
Compiles the given files to the given path.
Read `files/2` for more information.
"""
def files_to_path(files, path, options \\ [])
def files_to_path(files, path, options) when is_binary(path) and is_list(options) do
spawn_compilers(files, path, options)
end
defp spawn_compilers(files, path, options) do
true = Code.ensure_loaded?(Kernel.ErrorHandler)
compiler_pid = self()
:elixir_code_server.cast({:reset_warnings, compiler_pid})
schedulers = max(:erlang.system_info(:schedulers_online), 2)
result = spawn_compilers(files, files, path, options, [], [], schedulers, [])
# In case --warning-as-errors is enabled and there was a warning,
# compilation status will be set to error.
case :elixir_code_server.call({:compilation_status, compiler_pid}) do
:ok ->
result
:error ->
IO.puts :stderr, "Compilation failed due to warnings while using the --warnings-as-errors option"
exit({:shutdown, 1})
end
end
# We already have 4 currently running, don't spawn new ones
defp spawn_compilers(entries, original, output, options, waiting, queued, schedulers, result) when
length(queued) - length(waiting) >= schedulers do
wait_for_messages(entries, original, output, options, waiting, queued, schedulers, result)
end
# Release waiting processes
defp spawn_compilers([h|t], original, output, options, waiting, queued, schedulers, result) when is_pid(h) do
{_kind, ^h, ref, _module} = List.keyfind(waiting, h, 1)
send h, {ref, :ready}
waiting = List.keydelete(waiting, h, 1)
spawn_compilers(t, original, output, options, waiting, queued, schedulers, result)
end
# Spawn a compiler for each file in the list until we reach the limit
defp spawn_compilers([h|t], original, output, options, waiting, queued, schedulers, result) do
parent = self()
{pid, ref} =
:erlang.spawn_monitor fn ->
# Notify Code.ensure_compiled/2 that we should
# attempt to compile the module by doing a dispatch.
:erlang.put(:elixir_ensure_compiled, true)
# Set the elixir_compiler_pid used by our custom Kernel.ErrorHandler.
:erlang.put(:elixir_compiler_pid, parent)
:erlang.process_flag(:error_handler, Kernel.ErrorHandler)
exit(try do
_ = if output do
:elixir_compiler.file_to_path(h, output)
else
:elixir_compiler.file(h, Keyword.get(options, :dest))
end
{:shutdown, h}
catch
kind, reason ->
{:failure, kind, reason, System.stacktrace}
end)
end
spawn_compilers(t, original, output, options, waiting,
[{pid, ref, h}|queued], schedulers, result)
end
# No more files, nothing waiting, queue is empty, we are done
defp spawn_compilers([], _original, _output, _options, [], [], _schedulers, result) do
for {:module, mod} <- result, do: mod
end
# Queued x, waiting for x: POSSIBLE ERROR! Release processes so we get the failures
defp spawn_compilers([], original, output, options, waiting, queued, schedulers, result) when length(waiting) == length(queued) do
Enum.each queued, fn {child, _, _} ->
{_kind, ^child, ref, _module} = List.keyfind(waiting, child, 1)
send child, {ref, :release}
end
wait_for_messages([], original, output, options, waiting, queued, schedulers, result)
end
# No more files, but queue and waiting are not full or do not match
defp spawn_compilers([], original, output, options, waiting, queued, schedulers, result) do
wait_for_messages([], original, output, options, waiting, queued, schedulers, result)
end
# Wait for messages from child processes
defp wait_for_messages(entries, original, output, options, waiting, queued, schedulers, result) do
receive do
{:struct_available, module} ->
available = for {:struct, pid, _, waiting_module} <- waiting,
module == waiting_module,
not pid in entries,
do: pid
spawn_compilers(available ++ entries, original, output, options,
waiting, queued, schedulers, [{:struct, module}|result])
{:module_available, child, ref, file, module, binary} ->
if callback = Keyword.get(options, :each_module) do
callback.(file, module, binary)
end
# Release the module loader which is waiting for an ack
send child, {ref, :ack}
available = for {_kind, pid, _, waiting_module} <- waiting,
module == waiting_module,
not pid in entries,
do: pid
spawn_compilers(available ++ entries, original, output, options,
waiting, queued, schedulers, [{:module, module}|result])
{:waiting, kind, child, ref, on} ->
defined = fn {k, m} -> on == m and k in [kind, :module] end
# Oops, we already got it, do not put it on waiting.
if :lists.any(defined, result) do
send child, {ref, :ready}
else
waiting = [{kind, child, ref, on}|waiting]
end
spawn_compilers(entries, original, output, options, waiting, queued, schedulers, result)
{:DOWN, _down_ref, :process, down_pid, {:shutdown, file}} ->
if callback = Keyword.get(options, :each_file) do
callback.(file)
end
# Sometimes we may have spurious entries in the waiting
# list because someone invoked try/rescue UndefinedFunctionError
new_entries = List.delete(entries, down_pid)
new_queued = List.keydelete(queued, down_pid, 0)
new_waiting = List.keydelete(waiting, down_pid, 1)
spawn_compilers(new_entries, original, output, options, new_waiting, new_queued, schedulers, result)
{:DOWN, down_ref, :process, _down_pid, reason} ->
handle_failure(down_ref, reason, entries, waiting, queued)
wait_for_messages(entries, original, output, options, waiting, queued, schedulers, result)
end
end
defp handle_failure(ref, reason, entries, waiting, queued) do
if file = find_failure(ref, queued) do
print_failure(file, reason)
if all_missing?(entries, waiting, queued) do
collect_failures(queued, length(queued) - 1, [{file, reason}])
end
Enum.each queued, fn {child, _, _} ->
Process.exit(child, :kill)
end
exit({:shutdown, 1})
end
end
defp find_failure(ref, queued) do
case List.keyfind(queued, ref, 1) do
{_child, ^ref, file} -> file
_ -> nil
end
end
defp print_failure(_file, {:shutdown, _}) do
:ok
end
defp print_failure(file, {:failure, kind, reason, stacktrace}) do
IO.puts "\n== Compilation error on file #{Path.relative_to_cwd(file)} =="
IO.puts Exception.format(kind, reason, prune_stacktrace(stacktrace))
end
defp print_failure(file, reason) do
IO.puts "\n== Compilation error on file #{Path.relative_to_cwd(file)} =="
IO.puts Exception.format(:exit, reason, [])
end
@elixir_internals [:elixir, :elixir_exp, :elixir_compiler, :elixir_module, :elixir_clauses,
:elixir_translator, :elixir_expand, :elixir_lexical, :elixir_exp_clauses,
:elixir_def]
defp prune_stacktrace([{mod, _, _, _}|t]) when mod in @elixir_internals do
prune_stacktrace(t)
end
defp prune_stacktrace([h|t]) do
[h|prune_stacktrace(t)]
end
defp prune_stacktrace([]) do
[]
end
defp all_missing?(entries, waiting, queued) do
entries == [] and waiting != [] and
length(waiting) == length(queued)
end
defp collect_failures(_queued, 0, collected) do
filtered =
for {file, {:failure, :error, :undef, [{mod, _, _, _}|_]}} <- collected, do: {file, mod}
if filtered != [] do
IO.puts """
Compilation failed because one or more modules are missing. This may
happen when a module does not exist or there are circular dependencies.
The following files are missing the following modules:
"""
max =
filtered
|> Enum.map(& &1 |> elem(0) |> String.length)
|> Enum.max
for {file, mod} <- filtered do
IO.puts " " <> String.rjust(file, max) <> " => " <> inspect(mod)
end
IO.puts "\nThe full error reports can be seen above."
end
end
defp collect_failures(queued, remaining, collected) do
receive do
{:DOWN, down_ref, :process, _down_pid, reason} ->
if file = find_failure(down_ref, queued) do
print_failure(file, reason)
collect_failures(queued, remaining - 1, [{file, reason}|collected])
else
collect_failures(queued, remaining, collected)
end
after
# Give up if no failure appears in 5 seconds
5000 -> :ok
end
end
end
|
lib/elixir/lib/kernel/parallel_compiler.ex
| 0.791338
| 0.439807
|
parallel_compiler.ex
|
starcoder
|
defmodule ExHal.Form do
@moduledoc """
Represents a [Dwolla style HAL
form](https://github.com/Dwolla/hal-forms). Generally these are
acquired from `ExHal.Document.get_form/2`,
`ExHal.Document.fetch_form/2`, etc
"""
alias ExHal.{
FormField,
JsonFormEncoder
}
@typedoc """
A form that can be completed and submitted. This type is opaque and
should only be used as an argument to functions in this, `#{__MODULE__}` module.
"""
@opaque t :: %__MODULE__{}
defstruct [
:target,
:method,
:content_type,
:fields
]
@doc """
Creates a new form from raw form JSON.
Raises `ArgumentError` if `a_map` is not a valid form fragment.
"""
@spec from_forms_entry(%{}) :: __MODULE__.t()
def from_forms_entry(a_map) do
%__MODULE__{
target: extract_target(a_map),
method: extract_method(a_map),
content_type: extract_content_type(a_map),
fields: extract_fields(a_map)
}
end
@doc """
Returns list of the fields in this form.
"""
@spec get_fields(__MODULE__.t()) :: [FormField.t()]
def get_fields(a_form) do
a_form.fields
end
@doc """
Returns form with the specified fields value updated.
Raises `ArgumentError` if the specified field doesn't exist.
"""
@spec set_field_value(__MODULE__.t(), String.t(), FormField.field_value()) :: __MODULE__.t()
def set_field_value(form, field_name, new_value) do
updated_field =
get_field(form, field_name)
|> FormField.set_value(new_value)
replace_field(form, field_name, updated_field)
end
@doc """
Submits form and returns the response.
"""
@spec submit(__MODULE__.t(), Client.t()) :: Client.http_response()
def submit(form, client) do
apply(client_module(),
form.method,
[client,
form.target,
encode(form),
[headers: ["Content-Type": form.content_type]]
]
)
end
# --- private functions ---
def client_module do
Application.get_env(:exhal, :client, ExHal.Client)
end
defp encode(form) do
cond do
Regex.match?(~r/application\/json/i, form.content_type) ->
JsonFormEncoder.encode(form)
Regex.match?(~r/\+json$/i, form.content_type) ->
JsonFormEncoder.encode(form)
true ->
raise ArgumentError, "unrecognized content type: #{form.content_type.inspect}"
end
end
defp get_field(form, field_name) do
form
|> get_fields()
|> Enum.find(&(&1.name == field_name)) || raise(ArgumentError, "no such field: #{field_name}")
end
defp replace_field(form, field_name, updated_field) do
fields_sans_update =
form
|> get_fields()
|> Enum.reject(&(&1.name == field_name))
%__MODULE__{form | fields: [updated_field | fields_sans_update]}
end
defp extract_target(a_map) do
case get_in(a_map, ["_links", "target", "href"]) do
nil ->
raise ArgumentError, "form target link missing"
val ->
val
end
end
defp extract_method(a_map) do
Map.get_lazy(a_map, "method", fn -> raise ArgumentError, "form method missing" end)
|> String.downcase
|> String.to_atom
end
defp extract_content_type(a_map) do
Map.get_lazy(a_map, "contentType", fn -> raise ArgumentError, "form contentType missing" end)
end
defp extract_fields(a_map) do
a_map["fields"]
|> Enum.map(fn entry -> FormField.from_field_entry(entry) end)
end
end
|
lib/exhal/form.ex
| 0.83256
| 0.551936
|
form.ex
|
starcoder
|
defmodule StrawHat.Configurable do
@moduledoc """
It defines a configurable module. A configurable module expose `config`
function that returns the configuration of the module based a merge strategy.
The configuration will use three configurations:
* Using the macro parameters, most likely used for static values.
* Using `Config` module, most likely used for environment-based values.
* Runtime based on the parameters, most likely used when you want to control
the values at runtime.
### Static Configuration
When you need to define default values that do not require runtime execution
nor they are based on the environment. They are most likely static values.
defmodule MyConfigurableModule do
use StrawHat.Configurable,
config: [
site: "https://myproductionsite.com"
]
end
### Environment Configuration
When you pass the `otp_app`, we will lookup based on the OTP app name and the
module name.
defmodule MyConfigurableModule do
use StrawHat.Configurable, otp_app: :my_app
end
Then you can configure the values in your config files.
config :my_app, MyConfigurableModule,
some_value_here: ""
This will allow you to swap values based on the environment.
### Runtime Configuration
Since your module will contain the `config` function, you can always call it
with extra configs.
MyConfigurableModule.config([
another_value: ""
])
"""
defmacro __using__(opts) do
quote do
@module __MODULE__
@opts unquote(opts)
@doc """
Returns the configuration of the module.
"""
@spec config(keyword()) :: keyword()
def config(params \\ []) do
StrawHat.Configurable.config(@module, @opts, params)
end
end
end
@doc false
def config(module, opts, params) do
default_config = Keyword.get(opts, :config, [])
default_config
|> Keyword.merge(environment_config(module, opts))
|> Keyword.merge(params)
end
defp environment_config(module, opts) do
case Keyword.get(opts, :otp_app) do
nil -> []
otp_app -> Application.get_env(otp_app, module, [])
end
end
end
|
lib/straw_hat/configurable.ex
| 0.823257
| 0.497315
|
configurable.ex
|
starcoder
|
defmodule Exnoops.Interviewbot do
@moduledoc """
Module to interact with Github's Noop: Interviewbot
See the [official `noop` documentation](https://noopschallenge.com/challenges/interviewbot) for API information including the accepted parameters.
"""
require Logger
import Exnoops.API
@doc """
Query Interviewbot for question(s)
+ Parameters are sent with a keyword list into the function.
+ Parameters that accept multiple values should be put into the keyword list like `{:key, [value1, value2]}`. See example below.
## Examples
iex> Exnoops.Interviewbot.query()
{:ok, %{
"message" => "Welcome to your interview. Please POST your GitHub login to this URL to get started. See the exampleResponse for more information.",
"exampleResponse" => %{ "login" => "noops-challenger" }
}}
iex> Exnoops.Interviewbot.query("/interviewbot/questions/izZhyS2sCY7kZtZJf8yjizYuMo7zLE0m4Ucom4NeJYc")
{:ok, %{
"questionPath" => "/interviewbot/questions/izZhyS2sCY7kZtZJf8yjizYuMo7zLE0m4Ucom4NeJYc",
"question" => 12111900247,
"message" => "Find the prime factors of the number 12111900247. The prime factors of a number are the prime numbers that result in the number when multiplied together. The prime factors of 12 would be [2,2,3] because 2 * 2 * 3 = 12.",
"exampleResponse" => %{
"answer" => [ 2, 3, 5, 7 ]
}
}}
"""
@spec query(String.t()) :: {atom(), map()}
def query(endpoint \\ "/interviewbot/start") when is_binary(endpoint) do
Logger.debug("Calling Interviewbot.query(#{endpoint})")
case get(endpoint, []) do
{:ok, _} = res ->
res
error ->
error
end
end
@doc """
Submit answers to Interviewbot
## Exmaples
iex> Exnoops.Interviewbot.submit("/interviewbot/start", login: "noops-challenger")
{:ok, %{
"message" => "Hello noops-challenger, get ready for your interview. Your first question is at /interviewbot/questions/izZhyS2sCY7kZtZJf8yjizYuMo7zLE0m4Ucom4NeJYc",
"nextQuestion" => "/interviewbot/questions/izZhyS2sCY7kZtZJf8yjizYuMo7zLE0m4Ucom4NeJYc"
}}
"""
@spec submit(String.t(), keyword()) :: {atom(), map()}
def submit(endpoint, body) when is_binary(endpoint) and is_list(body) do
Logger.debug("Calling Interviewbot.submit()")
case post(endpoint, body) do
{:ok, _} = res ->
res
error ->
error
end
end
end
|
lib/exnoops/interviewbot.ex
| 0.694095
| 0.551695
|
interviewbot.ex
|
starcoder
|
defmodule Wallaby.Element do
@moduledoc """
Defines an Element Struct and interactions with Elements.
Typically these functions are used in conjunction with a `find`:
```
page
|> find(Query.css(".some-element"), fn(element) -> Element.click(element) end)
```
These functions can be used to create new actions specific to your application:
```
def create_todo(todo_field, todo_text) do
todo_field
|> Element.click()
|> Element.fill_in(with: todo_text)
|> Element.send_keys([:enter])
end
```
## Retrying
Unlike `Browser` the actions in `Element` do not retry if the element becomes stale. Instead an exception will be raised.
"""
alias Wallaby.InvalidSelectorError
alias Wallaby.StaleReferenceError
defstruct [:url, :session_url, :parent, :id, :driver, screenshots: []]
@type value :: String.t
| number()
| :selected
| :unselected
@type attr :: String.t
@type keys_to_send :: String.t | list(atom | String.t)
@type t :: %__MODULE__{
session_url: String.t,
url: String.t,
id: String.t,
screenshots: list,
driver: module,
}
@doc """
Clears any value set in the element.
"""
@spec clear(t) :: t
def clear(%__MODULE__{driver: driver} = element) do
case driver.clear(element) do
{:ok, _} ->
element
{:error, :stale_reference} ->
raise StaleReferenceError
{:error, :invalid_selector} ->
raise InvalidSelectorError
end
end
@doc """
Fills in the element with the specified value.
"""
@spec fill_in(t, with: String.t | number()) :: t
def fill_in(element, with: value) when is_number(value) do
fill_in(element, with: to_string(value))
end
def fill_in(element, with: value) when is_binary(value) do
element
|> clear
|> set_value(value)
end
@doc """
Clicks the element.
"""
@spec click(t) :: t
def click(%__MODULE__{driver: driver} = element, retry_count \\ 0) do
case driver.click(element) do
{:ok, _} ->
element
{:error, :stale_reference} ->
raise StaleReferenceError
{:error, :obscured} ->
if retry_count > 4 do
raise Wallaby.ExpectationNotMetError, """
The element you tried to click is obscured by another element.
"""
else
click(element, retry_count + 1)
end
end
end
@doc """
Hovers on the element.
"""
@spec hover(t) :: t
def hover(%__MODULE__{driver: driver} = element) do
case driver.hover(element) do
{:ok, _} ->
element
end
end
@doc """
Gets the element's text value.
If the element is not visible, the return value will be `""`.
"""
@spec text(t) :: String.t
def text(%__MODULE__{driver: driver} = element) do
case driver.text(element) do
{:ok, text} ->
text
{:error, :stale_reference} ->
raise StaleReferenceError
end
end
@doc """
Gets the value of the element's attribute.
"""
@spec attr(t, attr()) :: String.t | nil
def attr(%__MODULE__{driver: driver} = element, name) do
case driver.attribute(element, name) do
{:ok, attribute} ->
attribute
{:error, :stale_reference} ->
raise StaleReferenceError
end
end
@doc """
Returns a boolean based on whether or not the element is selected.
## Note
This only really makes sense for options, checkboxes, and radio buttons.
Everything else will simply return false because they have no notion of
"selected".
"""
@spec selected?(t) :: boolean()
def selected?(%__MODULE__{driver: driver} = element) do
case driver.selected(element) do
{:ok, value} ->
value
{:error, _} ->
false
end
end
@doc """
Returns a boolean based on whether or not the element is visible.
"""
@spec visible?(t) :: boolean()
def visible?(%__MODULE__{driver: driver} = element) do
case driver.displayed(element) do
{:ok, value} ->
value
{:error, _} ->
false
end
end
@doc """
Sets the value of the element.
"""
@spec set_value(t, value()) :: t
def set_value(%__MODULE__{driver: driver} = element, value) do
case driver.set_value(element, value) do
{:ok, _} ->
element
{:error, :stale_reference} ->
raise StaleReferenceError
error -> error
end
end
@doc """
Sends keys to the element.
"""
@spec send_keys(t, keys_to_send) :: t
def send_keys(element, text) when is_binary(text) do
send_keys(element, [text])
end
def send_keys(%__MODULE__{driver: driver} = element, keys) when is_list(keys) do
case driver.send_keys(element, keys) do
{:ok, _} ->
element
{:error, :stale_reference} ->
raise StaleReferenceError
error -> error
end
end
@doc """
Matches the Element's value with the provided value.
"""
@spec value(t) :: String.t
def value(element) do
attr(element, "value")
end
end
defimpl Inspect, for: Wallaby.Element do
import Inspect.Algebra
def inspect(element, opts) do
outer_html = Wallaby.Element.attr(element, "outerHTML")
concat([
Inspect.Any.inspect(element, opts),
"\n\n",
IO.ANSI.cyan <> "outerHTML:\n\n" <> IO.ANSI.reset,
IO.ANSI.yellow <> outer_html <> IO.ANSI.reset
])
end
end
|
lib/wallaby/element.ex
| 0.78842
| 0.891244
|
element.ex
|
starcoder
|
defmodule Processor do
use GenServer
@moduledoc """
A `Processor` handles info regarding one source of input.
It reads data from a queue, processes that same data and forwards it to processed queues.
"""
@doc """
Starts a connection to handle one input source named `name`.
"""
def start_link(name) do
GenServer.start_link(__MODULE__, %{source: name}, [{:name, String.to_atom(name)}])
end
@impl true
def init(state) do
new_state = start_connection(state)
{:ok, new_state}
end
# Creates the connection and the queues that will be used in the life cycle of this process.
# After that, goes into a state where the process waits for messages.
defp start_connection(state) do
url = Application.fetch_env!(:rabbit, :url)
{:ok, connection} = AMQP.Connection.open(url)
{:ok, channel} = AMQP.Channel.open(connection)
AMQP.Exchange.declare(channel, "dharma", :topic)
create_queue(channel, "raw_input", ["insert.raw." <> state.source])
create_queue(channel, "process_dashboard", ["insert.processed.*"])
create_queue(channel, "process_blockchain", [
"insert.processed.dharma",
"insert.processed.other"
])
new_state = Map.put(state, :channel, channel)
AMQP.Queue.subscribe(channel, "raw_input", fn payload, meta ->
process_and_send(payload, meta, new_state)
end)
new_state
end
# Creates a queue named `queue_name` that's binded to each topic in the list.
@spec create_queue(AMQP.Channel.t(), String.t(), [String.t()]) :: :ok
defp create_queue(channel, queue_name, topic) do
AMQP.Queue.declare(channel, queue_name, exclusive: false, durable: true)
Enum.each(topic, fn x ->
AMQP.Queue.bind(channel, queue_name, "dharma", routing_key: x)
end)
end
# Process the payload and send it to the correct topic.
defp process_and_send(payload, meta, state) do
IO.puts(" [x] Received [#{meta.routing_key}] #{payload}")
msg_processed = process(payload)
# TODO: Dynamically select topics?
send("insert.processed.dharma", msg_processed, state.channel)
end
# Processes the `message`, preparing it to be inserted in the processed queues.
# Identity for now, will change later on!
@spec process(any) :: any
defp process(message) do
message
end
# Sends a `message` in the exchange "dharma", in a certain channel, with a `topic`.
@spec send(String.t(), any, AMQP.Channel.t()) :: :ok
defp send(topic, message, channel) do
AMQP.Basic.publish(channel, "dharma", topic, message)
IO.puts(" [x] Sent '[#{topic}] #{message}'")
end
@impl true
def handle_info({:basic_deliver, payload, meta}, state) do
IO.puts(" [x] Received [#{meta.routing_key}] #{payload}")
msg_processed = process(payload)
send("insert.processed.*", msg_processed, state.channel)
AMQP.Basic.ack(state.channel, meta.delivery_tag)
{:noreply, state}
end
@impl true
def handle_info(_, state) do
{:noreply, state}
end
end
|
dharma_server/apps/processor/lib/processor.ex
| 0.749912
| 0.527864
|
processor.ex
|
starcoder
|
defmodule Health.Data.Periods do
@moduledoc """
The Data.Periods context.
"""
import Ecto.Query, warn: false
alias Health.Repo
alias Health.Data.Periods.Period
alias Health.Accounts.User
@doc """
Returns the list of periods.
## Examples
iex> list_periods()
[%Period{}, ...]
"""
def list_periods do
Repo.all(Period)
end
def list_user_periods(%User{} = user) do
Repo.all(from p in Period, where: p.user_id == ^user.id)
end
def list_user_periods(current_page, per_page, %User{} = user) do
Period
|> where([p], p.user_id == ^user.id)
|> offset([], ^((current_page - 1) * per_page))
|> limit([], ^per_page)
|> Repo.all()
end
@doc """
Gets a single period.
Raises `Ecto.NoResultsError` if the Period does not exist.
## Examples
iex> get_period!(123)
%Period{}
iex> get_period!(456)
** (Ecto.NoResultsError)
"""
def get_period!(id), do: Repo.get!(Period, id)
@doc """
Creates a period.
## Examples
iex> create_period(%{field: value})
{:ok, %Period{}}
iex> create_period(%{field: bad_value})
{:error, %Ecto.Changeset{}}
"""
def create_period(attrs \\ %{}) do
%Period{}
|> Period.changeset(attrs)
|> Repo.insert()
end
def create_period(%User{} = user, attrs) do
%Period{}
|> Period.changeset(attrs)
|> Ecto.Changeset.put_change(:user_id, user.id)
|> Repo.insert()
end
@doc """
Updates a period.
## Examples
iex> update_period(period, %{field: new_value})
{:ok, %Period{}}
iex> update_period(period, %{field: bad_value})
{:error, %Ecto.Changeset{}}
"""
def update_period(%Period{} = period, attrs) do
period
|> Period.changeset(attrs)
|> Repo.update()
end
@doc """
Deletes a Period.
## Examples
iex> delete_period(period)
{:ok, %Period{}}
iex> delete_period(period)
{:error, %Ecto.Changeset{}}
"""
def delete_period(%Period{} = period) do
Repo.delete(period)
end
@doc """
Returns an `%Ecto.Changeset{}` for tracking period changes.
## Examples
iex> change_period(period)
%Ecto.Changeset{source: %Period{}}
"""
def change_period(%Period{} = period) do
Period.changeset(period, %{})
end
end
|
lib/health/data/periods.ex
| 0.913729
| 0.482856
|
periods.ex
|
starcoder
|
defmodule Obs do
use GenServer
defstruct [:value, observers: []]
def create(initial_value \\ 0) do
state = %Obs{value: initial_value}
GenServer.start_link(__MODULE__, state)
end
def init(state), do: {:ok, state}
def add_observer(observers, observer_pid) do
[observer_pid | observers]
end
def remove_observer(observers, observer_pid) do
observers -- [observer_pid]
end
defp notify(state) do
state.observers |> Enum.each(&send(&1, state.value))
end
def attach(subject), do: GenServer.cast(subject, {:attach, self()})
def detach(subject), do: GenServer.cast(subject, {:detach, self()})
def read(subject), do: GenServer.call(subject, :read)
def handle_call(:read, _reader_id, state) do
{:reply, state.value, state}
end
def handle_cast({:attach, observer_pid}, state) do
new_observers = state.observers |> add_observer(observer_pid)
{:noreply, Map.put(state, :observers, new_observers)}
end
def handle_cast({:detach, observer_pid}, state) do
new_observers = state.observers |> remove_observer(observer_pid)
{:noreply, Map.put(state, :observers, new_observers)}
end
## Extending
def handle_cast(:increment, state) do
state = Map.put(state, :value, state.value + 1)
notify(state)
{:noreply, state}
end
def handle_cast(:decrement, state) do
state = Map.put(state, :value, state.value - 1)
notify(state)
{:noreply, state}
end
def increment(subject), do: GenServer.cast(subject, :increment)
def decrement(subject), do: GenServer.cast(subject, :decrement)
def await(timeout) do
receive do
count -> count
after
2ß00 -> "There was not answer"
end
end
end
{:ok, subject} = Obs.create()
Obs.read(subject) |> IO.puts()
Obs.attach(subject)
Obs.read(subject) |> IO.puts()
Obs.increment(subject)
Obs.await() |> IO.puts()
Obs.increment(subject)
Obs.await() |> IO.puts()
Obs.decrement(subject)
Obs.await() |> IO.puts()
Obs.detach(subject)
Obs.increment(subject)
Obs.await() |> IO.puts()
IO.puts("The counter is #{Obs.read(subject)}")
|
11-genserver/obs.ex
| 0.661267
| 0.461927
|
obs.ex
|
starcoder
|
defmodule IBMCloud.KeyProtect do
@moduledoc """
IBM Key Protect API.
- [IBM Cloud Docs- IBM Key Protect](https://cloud.ibm.com/docs/services/key-protect)
- [IBM Cloud API - IBM Key Protect API](https://cloud.ibm.com/apidocs/key-protect)
"""
import IBMCloud.Utils
def build_client(endpoint, bearer_token, instance_id, adapter \\ nil) do
Tesla.client(
[
{Tesla.Middleware.BaseUrl, endpoint},
{Tesla.Middleware.JSON,
decode_content_types: [
"application/vnd.ibm.kms.key+json",
"application/vnd.ibm.kms.key_action+json"
]},
{Tesla.Middleware.Headers,
[
{"authorization", "Bearer " <> bearer_token},
{"bluemix-instance", instance_id}
]}
],
adapter
)
end
@doc """
Lists a list of keys.
See [Retrieve a list of keys](https://cloud.ibm.com/apidocs/key-protect#retrieve-a-list-of-keys) for details.
"""
def list_keys(client, opts \\ []) do
case Tesla.get(client, "api/v2/keys", opts) do
# resources is missing when there is no keys
{:ok, %{status: 200, body: %{"metadata" => _}} = resp} -> {:ok, resp}
{_, other} -> {:error, other}
end
end
@doc """
Creates a new key.
See [Create a new key](https://cloud.ibm.com/apidocs/key-protect#create-a-new-key) for details.
"""
def create_key(client, %{} = key_body, opts \\ []) do
body = %{
metadata: %{
collectionType: "application/vnd.ibm.kms.key+json",
collectionTotal: 1
},
resources: [Map.put(key_body, :type, "application/vnd.ibm.kms.key+json")]
}
case Tesla.post(client, "api/v2/keys", body, opts) do
{:ok, %{status: 201, body: %{"resources" => _}} = resp} -> {:ok, resp}
{_, other} -> {:error, other}
end
end
@doc """
Deletes a key by ID.
See [Delete a key by ID](https://cloud.ibm.com/apidocs/key-protect#delete-a-key-by-id) for details.
"""
def delete_key(client, id, opts \\ []) do
case Tesla.delete(client, "api/v2/keys/" <> uri_encode(id), opts) do
{:ok, %{status: 204} = resp} -> {:ok, resp}
{_, other} -> {:error, other}
end
end
@doc """
Invokes an action on key.
See [Invoke an action on a key](https://cloud.ibm.com/apidocs/key-protect#invoke-an-action-on-a-key) for details.
"""
def invoke_action(client, id, action, %{} = body, opts \\ []) do
opts =
opts
|> Keyword.put(:method, :post)
|> Keyword.put(:body, body)
|> Keyword.put(:url, "api/v2/keys/" <> uri_encode(id))
|> opts_put_query({"action", action})
case Tesla.request(client, opts) do
{:ok, %{status: status} = resp} when status in [200, 204] -> {:ok, resp}
{_, other} -> {:error, other}
end
end
def wrap_key(client, id, %{} = body, opts \\ []),
do: invoke_action(client, id, :wrap, body, opts)
def unwrap_key(client, id, %{} = body, opts \\ []),
do: invoke_action(client, id, :unwrap, body, opts)
end
|
lib/ibmcloud/key_protect.ex
| 0.672439
| 0.401072
|
key_protect.ex
|
starcoder
|
defmodule Ockam.SecureChannel.Channel do
@moduledoc false
use GenStateMachine
alias Ockam.Node
alias Ockam.SecureChannel.EncryptedTransportProtocol.AeadAesGcm, as: EncryptedTransport
alias Ockam.SecureChannel.KeyEstablishmentProtocol.XX, as: XXKeyEstablishmentProtocol
alias Ockam.Telemetry
@doc false
def send(channel, message), do: Node.send(channel, message)
@doc false
def peer(_channel), do: :ok
def established?(channel) do
channel |> Node.whereis() |> GenStateMachine.call(:established?)
end
@doc false
def create(options) when is_list(options) do
## TODO: why secure channel is not a worker?
address_prefix = Keyword.get(options, :address_prefix, "")
options =
Keyword.put_new_lazy(options, :address, fn ->
Node.get_random_unregistered_address(address_prefix)
end)
case Node.start_supervised(__MODULE__, options) do
{:ok, _pid, address} -> {:ok, address}
error -> error
end
end
@doc false
def start_link(options) when is_list(options) do
with {:ok, address} <- get_from_options(:address, options),
{:ok, pid} <- start(address, options) do
{:ok, pid, address}
end
end
defp start(address, options) do
name = {:via, Node.process_registry(), address}
GenStateMachine.start_link(__MODULE__, options, name: name)
end
@doc false
@impl true
def init(options) do
metadata = %{options: options}
start_time = Telemetry.emit_start_event([__MODULE__, :init], metadata: metadata)
with {:ok, data} <- setup_plaintext_address(options, %{}),
{:ok, data} <- setup_ciphertext_address(options, data),
{:ok, data} <- setup_vault(options, data),
{:ok, data} <- setup_peer(options, data),
{:ok, data} <- setup_initiating_message(options, data),
{:ok, initial, data} <- setup_key_establishment_protocol(options, data),
{:ok, initial, data} <- setup_encrypted_transport_protocol(options, initial, data) do
return_value = {:ok, initial, data}
metadata = Map.put(metadata, :return_value, return_value)
Telemetry.emit_stop_event([__MODULE__, :init], start_time, metadata: metadata)
return_value
end
end
@doc false
@impl true
def handle_event(event_type, event, state, data) do
metadata = %{event_type: event_type, event: event, state: state, data: data}
start_time = Telemetry.emit_start_event([__MODULE__, :handle_event], metadata: metadata)
return_value = handle_message(event_type, event, state, data)
metadata = Map.put(metadata, :return_value, return_value)
Telemetry.emit_stop_event([__MODULE__, :handle_event], start_time, metadata: metadata)
return_value
end
defp handle_message({:call, from}, :established?, state, data) do
established = {:encrypted_transport, :ready} === state
{:next_state, state, data, [{:reply, from, established}]}
end
defp handle_message(:info, event, {:key_establishment, _role, _role_state} = state, data) do
key_establishment_protocol = Map.get(data, :key_establishment_protocol)
key_establishment_protocol.handle_message(event, state, data)
end
defp handle_message(:info, event, {:encrypted_transport, :ready} = state, data) do
EncryptedTransport.handle_message(event, state, data)
end
# application facing address is plaintext address
defp setup_plaintext_address(options, data) do
case Keyword.get(options, :address) do
nil -> {:error, {:option_is_nil, :address}}
plaintext_address -> {:ok, Map.put(data, :plaintext_address, plaintext_address)}
end
end
# network facing address is ciphertext address
defp setup_ciphertext_address(options, data) do
## TODO: use a different prefix?
address_prefix = Keyword.get(options, :address_prefix, "")
ciphertext_address = Node.get_random_unregistered_address(address_prefix)
with :yes <- Node.register_address(ciphertext_address, self()) do
{:ok, Map.put(data, :ciphertext_address, ciphertext_address)}
end
end
# sets vault based on - vault option
defp setup_vault(options, data) do
with {:ok, vault} <- get_from_options(:vault, options) do
{:ok, Map.put(data, :vault, vault)}
end
end
# sets peer based on - route option
def setup_peer(options, data) do
route = Keyword.get(options, :route, [])
{:ok, Map.put(data, :peer, %{route: route})}
end
# sets initiating_message
defp setup_initiating_message(options, data) do
case Keyword.get(options, :initiating_message) do
nil -> {:ok, data}
initiating_message -> {:ok, Map.put(data, :initiating_message, initiating_message)}
end
end
# sets a key establishment protocol and calls its setup
defp setup_key_establishment_protocol(options, data) do
case Keyword.get(options, :key_establishment_protocol, XXKeyEstablishmentProtocol) do
XXKeyEstablishmentProtocol ->
data = Map.put(data, :key_establishment_protocol, XXKeyEstablishmentProtocol)
data.key_establishment_protocol.setup(options, data)
unexpected_protocol ->
{:error, {:unexpected_key_establishment_protocol, unexpected_protocol}}
end
end
# sets a encrypted transport protocol and calls its setup
defp setup_encrypted_transport_protocol(options, initial_state, data) do
EncryptedTransport.setup(options, initial_state, data)
end
@doc false
defp get_from_options(key, options) do
case Keyword.get(options, key) do
nil -> {:error, {:option_is_nil, key}}
value -> {:ok, value}
end
end
end
|
implementations/elixir/ockam/ockam/lib/ockam/secure_channel/channel.ex
| 0.568416
| 0.411229
|
channel.ex
|
starcoder
|
defmodule NervesTime do
@moduledoc """
Keep time in sync on Nerves devices
`NervesTime` keeps the system clock on [Nerves](http://nerves-project.org)
devices in sync when connected to the network and close to in sync when
disconnected. It's especially useful for devices lacking a [Battery-backed
real-time clock](https://en.wikipedia.org/wiki/Real-time_clock) and will
advance the clock at startup to a reasonable guess.
Nearly all configuration is via the application config (`config.exs`). The
following keys are available:
* `:servers` - a list of NTP servers for time synchronization. Specifying an
empty list turns off NTP
* `:time_file` - a file path for tracking the time. It allows the system to
start with a reasonable time quickly on boot and before the Internet is
available for NTP to work.
* `:earliest_time` - times before this are considered invalid and adjusted
* `:latest_time` - times after this are considered invalid and adjusted
* `:ntpd` - the absolute path to the Busybox `ntpd`. This only needs to be
set if your system does not provide `ntpd` in the `$PATH`.
"""
@doc """
Check whether NTP is synchronized with the configured NTP servers
It's possible that the time is already set correctly when this returns false.
`NervesTime` decides that NTP is synchronized when `ntpd` sends a
notification that the device's clock stratum is 4 or less. Clock adjustments
occur before this, though.
"""
@spec synchronized?() :: boolean()
defdelegate synchronized?, to: NervesTime.Ntpd
@doc """
Set the list of NTP servers
Use this function to replace the list of NTP servers that are queried for
time. It is also possible to set this list in your `config.exs` by doing
something like the following:
```elixir
config :nerves_time, :servers, [
"0.pool.ntp.org",
"1.pool.ntp.org",
"2.pool.ntp.org",
"3.pool.ntp.org"
]
```
`NervesTime` uses [NTP Pool](https://www.ntppool.org/en/) by default. To
disable this and configure servers solely at runtime, specify an empty list
in `config.exs`:
```elixir
config :nerves_time, :servers, []
```
"""
@spec set_ntp_servers([String.t()]) :: :ok
defdelegate set_ntp_servers(servers), to: NervesTime.Ntpd
@doc """
Return the current NTP servers
"""
@spec ntp_servers() :: [String.t()] | {:error, term()}
defdelegate ntp_servers(), to: NervesTime.Ntpd
@doc """
Set tune sync handler
"""
@spec add_time_sync_handler(fun()) :: :ok | {:error, term()}
defdelegate add_time_sync_handler(handler_fun), to: NervesTime.Ntpd
@doc """
Manually restart the NTP daemon
This is normally not necessary since `NervesTime` handles restarting it
automatically. An example of a reason to call this function is if you know
when the Internet becomes available. For this case, calling `restart_ntp`
will cancel `ntpd`'s internal timeouts and cause it to immediately send time
requests. If using NTP Pool, be sure not to violate its terms of service by
calling this function too frequently.
"""
@spec restart_ntpd() :: :ok | {:error, term()}
defdelegate restart_ntpd(), to: NervesTime.Ntpd
end
|
lib/nerves_time.ex
| 0.856167
| 0.921711
|
nerves_time.ex
|
starcoder
|
defmodule AWS.Config do
@moduledoc """
AWS Config
AWS Config provides a way to keep track of the configurations of all the
AWS resources associated with your AWS account. You can use AWS Config to
get the current and historical configurations of each AWS resource and also
to get information about the relationship between the resources. An AWS
resource can be an Amazon Compute Cloud (Amazon EC2) instance, an Elastic
Block Store (EBS) volume, an elastic network Interface (ENI), or a security
group. For a complete list of resources currently supported by AWS Config,
see [Supported AWS
Resources](https://docs.aws.amazon.com/config/latest/developerguide/resource-config-reference.html#supported-resources).
You can access and manage AWS Config through the AWS Management Console,
the AWS Command Line Interface (AWS CLI), the AWS Config API, or the AWS
SDKs for AWS Config. This reference guide contains documentation for the
AWS Config API and the AWS CLI commands that you can use to manage AWS
Config. The AWS Config API uses the Signature Version 4 protocol for
signing requests. For more information about how to sign a request with
this protocol, see [Signature Version 4 Signing
Process](https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html).
For detailed information about AWS Config features and their associated
actions or commands, as well as how to work with AWS Management Console,
see [What Is AWS
Config](https://docs.aws.amazon.com/config/latest/developerguide/WhatIsConfig.html)
in the *AWS Config Developer Guide*.
"""
@doc """
Returns the current configuration items for resources that are present in
your AWS Config aggregator. The operation also returns a list of resources
that are not processed in the current request. If there are no unprocessed
resources, the operation returns an empty `unprocessedResourceIdentifiers`
list.
<note> <ul> <li> The API does not return results for deleted resources.
</li> <li> The API does not return tags and relationships.
</li> </ul> </note>
"""
def batch_get_aggregate_resource_config(client, input, options \\ []) do
request(client, "BatchGetAggregateResourceConfig", input, options)
end
@doc """
Returns the current configuration for one or more requested resources. The
operation also returns a list of resources that are not processed in the
current request. If there are no unprocessed resources, the operation
returns an empty unprocessedResourceKeys list.
<note> <ul> <li> The API does not return results for deleted resources.
</li> <li> The API does not return any tags for the requested resources.
This information is filtered out of the supplementaryConfiguration section
of the API response.
</li> </ul> </note>
"""
def batch_get_resource_config(client, input, options \\ []) do
request(client, "BatchGetResourceConfig", input, options)
end
@doc """
Deletes the authorization granted to the specified configuration aggregator
account in a specified region.
"""
def delete_aggregation_authorization(client, input, options \\ []) do
request(client, "DeleteAggregationAuthorization", input, options)
end
@doc """
Deletes the specified AWS Config rule and all of its evaluation results.
AWS Config sets the state of a rule to `DELETING` until the deletion is
complete. You cannot update a rule while it is in this state. If you make a
`PutConfigRule` or `DeleteConfigRule` request for the rule, you will
receive a `ResourceInUseException`.
You can check the state of a rule by using the `DescribeConfigRules`
request.
"""
def delete_config_rule(client, input, options \\ []) do
request(client, "DeleteConfigRule", input, options)
end
@doc """
Deletes the specified configuration aggregator and the aggregated data
associated with the aggregator.
"""
def delete_configuration_aggregator(client, input, options \\ []) do
request(client, "DeleteConfigurationAggregator", input, options)
end
@doc """
Deletes the configuration recorder.
After the configuration recorder is deleted, AWS Config will not record
resource configuration changes until you create a new configuration
recorder.
This action does not delete the configuration information that was
previously recorded. You will be able to access the previously recorded
information by using the `GetResourceConfigHistory` action, but you will
not be able to access this information in the AWS Config console until you
create a new configuration recorder.
"""
def delete_configuration_recorder(client, input, options \\ []) do
request(client, "DeleteConfigurationRecorder", input, options)
end
@doc """
Deletes the specified conformance pack and all the AWS Config rules,
remediation actions, and all evaluation results within that conformance
pack.
AWS Config sets the conformance pack to `DELETE_IN_PROGRESS` until the
deletion is complete. You cannot update a conformance pack while it is in
this state.
"""
def delete_conformance_pack(client, input, options \\ []) do
request(client, "DeleteConformancePack", input, options)
end
@doc """
Deletes the delivery channel.
Before you can delete the delivery channel, you must stop the configuration
recorder by using the `StopConfigurationRecorder` action.
"""
def delete_delivery_channel(client, input, options \\ []) do
request(client, "DeleteDeliveryChannel", input, options)
end
@doc """
Deletes the evaluation results for the specified AWS Config rule. You can
specify one AWS Config rule per request. After you delete the evaluation
results, you can call the `StartConfigRulesEvaluation` API to start
evaluating your AWS resources against the rule.
"""
def delete_evaluation_results(client, input, options \\ []) do
request(client, "DeleteEvaluationResults", input, options)
end
@doc """
Deletes the specified organization config rule and all of its evaluation
results from all member accounts in that organization.
Only a master account and a delegated administrator account can delete an
organization config rule. When calling this API with a delegated
administrator, you must ensure AWS Organizations
`ListDelegatedAdministrator` permissions are added.
AWS Config sets the state of a rule to DELETE_IN_PROGRESS until the
deletion is complete. You cannot update a rule while it is in this state.
"""
def delete_organization_config_rule(client, input, options \\ []) do
request(client, "DeleteOrganizationConfigRule", input, options)
end
@doc """
Deletes the specified organization conformance pack and all of the config
rules and remediation actions from all member accounts in that
organization.
Only a master account or a delegated administrator account can delete an
organization conformance pack. When calling this API with a delegated
administrator, you must ensure AWS Organizations
`ListDelegatedAdministrator` permissions are added.
AWS Config sets the state of a conformance pack to DELETE_IN_PROGRESS until
the deletion is complete. You cannot update a conformance pack while it is
in this state.
"""
def delete_organization_conformance_pack(client, input, options \\ []) do
request(client, "DeleteOrganizationConformancePack", input, options)
end
@doc """
Deletes pending authorization requests for a specified aggregator account
in a specified region.
"""
def delete_pending_aggregation_request(client, input, options \\ []) do
request(client, "DeletePendingAggregationRequest", input, options)
end
@doc """
Deletes the remediation configuration.
"""
def delete_remediation_configuration(client, input, options \\ []) do
request(client, "DeleteRemediationConfiguration", input, options)
end
@doc """
Deletes one or more remediation exceptions mentioned in the resource keys.
<note> AWS Config generates a remediation exception when a problem occurs
executing a remediation action to a specific resource. Remediation
exceptions blocks auto-remediation until the exception is cleared.
</note>
"""
def delete_remediation_exceptions(client, input, options \\ []) do
request(client, "DeleteRemediationExceptions", input, options)
end
@doc """
Records the configuration state for a custom resource that has been
deleted. This API records a new ConfigurationItem with a ResourceDeleted
status. You can retrieve the ConfigurationItems recorded for this resource
in your AWS Config History.
"""
def delete_resource_config(client, input, options \\ []) do
request(client, "DeleteResourceConfig", input, options)
end
@doc """
Deletes the retention configuration.
"""
def delete_retention_configuration(client, input, options \\ []) do
request(client, "DeleteRetentionConfiguration", input, options)
end
@doc """
Schedules delivery of a configuration snapshot to the Amazon S3 bucket in
the specified delivery channel. After the delivery has started, AWS Config
sends the following notifications using an Amazon SNS topic that you have
specified.
<ul> <li> Notification of the start of the delivery.
</li> <li> Notification of the completion of the delivery, if the delivery
was successfully completed.
</li> <li> Notification of delivery failure, if the delivery failed.
</li> </ul>
"""
def deliver_config_snapshot(client, input, options \\ []) do
request(client, "DeliverConfigSnapshot", input, options)
end
@doc """
Returns a list of compliant and noncompliant rules with the number of
resources for compliant and noncompliant rules.
<note> The results can return an empty result page, but if you have a
`nextToken`, the results are displayed on the next page.
</note>
"""
def describe_aggregate_compliance_by_config_rules(client, input, options \\ []) do
request(client, "DescribeAggregateComplianceByConfigRules", input, options)
end
@doc """
Returns a list of authorizations granted to various aggregator accounts and
regions.
"""
def describe_aggregation_authorizations(client, input, options \\ []) do
request(client, "DescribeAggregationAuthorizations", input, options)
end
@doc """
Indicates whether the specified AWS Config rules are compliant. If a rule
is noncompliant, this action returns the number of AWS resources that do
not comply with the rule.
A rule is compliant if all of the evaluated resources comply with it. It is
noncompliant if any of these resources do not comply.
If AWS Config has no current evaluation results for the rule, it returns
`INSUFFICIENT_DATA`. This result might indicate one of the following
conditions:
<ul> <li> AWS Config has never invoked an evaluation for the rule. To check
whether it has, use the `DescribeConfigRuleEvaluationStatus` action to get
the `LastSuccessfulInvocationTime` and `LastFailedInvocationTime`.
</li> <li> The rule's AWS Lambda function is failing to send evaluation
results to AWS Config. Verify that the role you assigned to your
configuration recorder includes the `config:PutEvaluations` permission. If
the rule is a custom rule, verify that the AWS Lambda execution role
includes the `config:PutEvaluations` permission.
</li> <li> The rule's AWS Lambda function has returned `NOT_APPLICABLE` for
all evaluation results. This can occur if the resources were deleted or
removed from the rule's scope.
</li> </ul>
"""
def describe_compliance_by_config_rule(client, input, options \\ []) do
request(client, "DescribeComplianceByConfigRule", input, options)
end
@doc """
Indicates whether the specified AWS resources are compliant. If a resource
is noncompliant, this action returns the number of AWS Config rules that
the resource does not comply with.
A resource is compliant if it complies with all the AWS Config rules that
evaluate it. It is noncompliant if it does not comply with one or more of
these rules.
If AWS Config has no current evaluation results for the resource, it
returns `INSUFFICIENT_DATA`. This result might indicate one of the
following conditions about the rules that evaluate the resource:
<ul> <li> AWS Config has never invoked an evaluation for the rule. To check
whether it has, use the `DescribeConfigRuleEvaluationStatus` action to get
the `LastSuccessfulInvocationTime` and `LastFailedInvocationTime`.
</li> <li> The rule's AWS Lambda function is failing to send evaluation
results to AWS Config. Verify that the role that you assigned to your
configuration recorder includes the `config:PutEvaluations` permission. If
the rule is a custom rule, verify that the AWS Lambda execution role
includes the `config:PutEvaluations` permission.
</li> <li> The rule's AWS Lambda function has returned `NOT_APPLICABLE` for
all evaluation results. This can occur if the resources were deleted or
removed from the rule's scope.
</li> </ul>
"""
def describe_compliance_by_resource(client, input, options \\ []) do
request(client, "DescribeComplianceByResource", input, options)
end
@doc """
Returns status information for each of your AWS managed Config rules. The
status includes information such as the last time AWS Config invoked the
rule, the last time AWS Config failed to invoke the rule, and the related
error for the last failure.
"""
def describe_config_rule_evaluation_status(client, input, options \\ []) do
request(client, "DescribeConfigRuleEvaluationStatus", input, options)
end
@doc """
Returns details about your AWS Config rules.
"""
def describe_config_rules(client, input, options \\ []) do
request(client, "DescribeConfigRules", input, options)
end
@doc """
Returns status information for sources within an aggregator. The status
includes information about the last time AWS Config verified authorization
between the source account and an aggregator account. In case of a failure,
the status contains the related error code or message.
"""
def describe_configuration_aggregator_sources_status(client, input, options \\ []) do
request(client, "DescribeConfigurationAggregatorSourcesStatus", input, options)
end
@doc """
Returns the details of one or more configuration aggregators. If the
configuration aggregator is not specified, this action returns the details
for all the configuration aggregators associated with the account.
"""
def describe_configuration_aggregators(client, input, options \\ []) do
request(client, "DescribeConfigurationAggregators", input, options)
end
@doc """
Returns the current status of the specified configuration recorder. If a
configuration recorder is not specified, this action returns the status of
all configuration recorders associated with the account.
<note> Currently, you can specify only one configuration recorder per
region in your account.
</note>
"""
def describe_configuration_recorder_status(client, input, options \\ []) do
request(client, "DescribeConfigurationRecorderStatus", input, options)
end
@doc """
Returns the details for the specified configuration recorders. If the
configuration recorder is not specified, this action returns the details
for all configuration recorders associated with the account.
<note> Currently, you can specify only one configuration recorder per
region in your account.
</note>
"""
def describe_configuration_recorders(client, input, options \\ []) do
request(client, "DescribeConfigurationRecorders", input, options)
end
@doc """
Returns compliance details for each rule in that conformance pack.
<note> You must provide exact rule names.
</note>
"""
def describe_conformance_pack_compliance(client, input, options \\ []) do
request(client, "DescribeConformancePackCompliance", input, options)
end
@doc """
Provides one or more conformance packs deployment status.
<note> If there are no conformance packs then you will see an empty result.
</note>
"""
def describe_conformance_pack_status(client, input, options \\ []) do
request(client, "DescribeConformancePackStatus", input, options)
end
@doc """
Returns a list of one or more conformance packs.
"""
def describe_conformance_packs(client, input, options \\ []) do
request(client, "DescribeConformancePacks", input, options)
end
@doc """
Returns the current status of the specified delivery channel. If a delivery
channel is not specified, this action returns the current status of all
delivery channels associated with the account.
<note> Currently, you can specify only one delivery channel per region in
your account.
</note>
"""
def describe_delivery_channel_status(client, input, options \\ []) do
request(client, "DescribeDeliveryChannelStatus", input, options)
end
@doc """
Returns details about the specified delivery channel. If a delivery channel
is not specified, this action returns the details of all delivery channels
associated with the account.
<note> Currently, you can specify only one delivery channel per region in
your account.
</note>
"""
def describe_delivery_channels(client, input, options \\ []) do
request(client, "DescribeDeliveryChannels", input, options)
end
@doc """
Provides organization config rule deployment status for an organization.
Only a master account and a delegated administrator account can call this
API. When calling this API with a delegated administrator, you must ensure
AWS Organizations `ListDelegatedAdministrator` permissions are added.
<note> The status is not considered successful until organization config
rule is successfully deployed in all the member accounts with an exception
of excluded accounts.
When you specify the limit and the next token, you receive a paginated
response. Limit and next token are not applicable if you specify
organization config rule names. It is only applicable, when you request all
the organization config rules.
</note>
"""
def describe_organization_config_rule_statuses(client, input, options \\ []) do
request(client, "DescribeOrganizationConfigRuleStatuses", input, options)
end
@doc """
Returns a list of organization config rules.
Only a master account and a delegated administrator account can call this
API. When calling this API with a delegated administrator, you must ensure
AWS Organizations `ListDelegatedAdministrator` permissions are
added.

<note> When you specify the limit and the next token, you receive a
paginated response. Limit and next token are not applicable if you specify
organization config rule names. It is only applicable, when you request all
the organization config rules.
</note>
"""
def describe_organization_config_rules(client, input, options \\ []) do
request(client, "DescribeOrganizationConfigRules", input, options)
end
@doc """
Provides organization conformance pack deployment status for an
organization.
Only a master account and a delegated administrator account can call this
API. When calling this API with a delegated administrator, you must ensure
AWS Organizations `ListDelegatedAdministrator` permissions are added.
<note> The status is not considered successful until organization
conformance pack is successfully deployed in all the member accounts with
an exception of excluded accounts.
When you specify the limit and the next token, you receive a paginated
response. Limit and next token are not applicable if you specify
organization conformance pack names. They are only applicable, when you
request all the organization conformance packs.
</note>
"""
def describe_organization_conformance_pack_statuses(client, input, options \\ []) do
request(client, "DescribeOrganizationConformancePackStatuses", input, options)
end
@doc """
Returns a list of organization conformance packs.
Only a master account and a delegated administrator account can call this
API. When calling this API with a delegated administrator, you must ensure
AWS Organizations `ListDelegatedAdministrator` permissions are added.
<note> When you specify the limit and the next token, you receive a
paginated response.
Limit and next token are not applicable if you specify organization
conformance packs names. They are only applicable, when you request all the
organization conformance packs.
</note>
"""
def describe_organization_conformance_packs(client, input, options \\ []) do
request(client, "DescribeOrganizationConformancePacks", input, options)
end
@doc """
Returns a list of all pending aggregation requests.
"""
def describe_pending_aggregation_requests(client, input, options \\ []) do
request(client, "DescribePendingAggregationRequests", input, options)
end
@doc """
Returns the details of one or more remediation configurations.
"""
def describe_remediation_configurations(client, input, options \\ []) do
request(client, "DescribeRemediationConfigurations", input, options)
end
@doc """
Returns the details of one or more remediation exceptions. A detailed view
of a remediation exception for a set of resources that includes an
explanation of an exception and the time when the exception will be
deleted. When you specify the limit and the next token, you receive a
paginated response.
<note> AWS Config generates a remediation exception when a problem occurs
executing a remediation action to a specific resource. Remediation
exceptions blocks auto-remediation until the exception is cleared.
When you specify the limit and the next token, you receive a paginated
response.
Limit and next token are not applicable if you request resources in batch.
It is only applicable, when you request all resources.
</note>
"""
def describe_remediation_exceptions(client, input, options \\ []) do
request(client, "DescribeRemediationExceptions", input, options)
end
@doc """
Provides a detailed view of a Remediation Execution for a set of resources
including state, timestamps for when steps for the remediation execution
occur, and any error messages for steps that have failed. When you specify
the limit and the next token, you receive a paginated response.
"""
def describe_remediation_execution_status(client, input, options \\ []) do
request(client, "DescribeRemediationExecutionStatus", input, options)
end
@doc """
Returns the details of one or more retention configurations. If the
retention configuration name is not specified, this action returns the
details for all the retention configurations for that account.
<note> Currently, AWS Config supports only one retention configuration per
region in your account.
</note>
"""
def describe_retention_configurations(client, input, options \\ []) do
request(client, "DescribeRetentionConfigurations", input, options)
end
@doc """
Returns the evaluation results for the specified AWS Config rule for a
specific resource in a rule. The results indicate which AWS resources were
evaluated by the rule, when each resource was last evaluated, and whether
each resource complies with the rule.
<note> The results can return an empty result page. But if you have a
`nextToken`, the results are displayed on the next page.
</note>
"""
def get_aggregate_compliance_details_by_config_rule(client, input, options \\ []) do
request(client, "GetAggregateComplianceDetailsByConfigRule", input, options)
end
@doc """
Returns the number of compliant and noncompliant rules for one or more
accounts and regions in an aggregator.
<note> The results can return an empty result page, but if you have a
nextToken, the results are displayed on the next page.
</note>
"""
def get_aggregate_config_rule_compliance_summary(client, input, options \\ []) do
request(client, "GetAggregateConfigRuleComplianceSummary", input, options)
end
@doc """
Returns the resource counts across accounts and regions that are present in
your AWS Config aggregator. You can request the resource counts by
providing filters and GroupByKey.
For example, if the input contains accountID 12345678910 and region
us-east-1 in filters, the API returns the count of resources in account ID
12345678910 and region us-east-1. If the input contains ACCOUNT_ID as a
GroupByKey, the API returns resource counts for all source accounts that
are present in your aggregator.
"""
def get_aggregate_discovered_resource_counts(client, input, options \\ []) do
request(client, "GetAggregateDiscoveredResourceCounts", input, options)
end
@doc """
Returns configuration item that is aggregated for your specific resource in
a specific source account and region.
"""
def get_aggregate_resource_config(client, input, options \\ []) do
request(client, "GetAggregateResourceConfig", input, options)
end
@doc """
Returns the evaluation results for the specified AWS Config rule. The
results indicate which AWS resources were evaluated by the rule, when each
resource was last evaluated, and whether each resource complies with the
rule.
"""
def get_compliance_details_by_config_rule(client, input, options \\ []) do
request(client, "GetComplianceDetailsByConfigRule", input, options)
end
@doc """
Returns the evaluation results for the specified AWS resource. The results
indicate which AWS Config rules were used to evaluate the resource, when
each rule was last used, and whether the resource complies with each rule.
"""
def get_compliance_details_by_resource(client, input, options \\ []) do
request(client, "GetComplianceDetailsByResource", input, options)
end
@doc """
Returns the number of AWS Config rules that are compliant and noncompliant,
up to a maximum of 25 for each.
"""
def get_compliance_summary_by_config_rule(client, input, options \\ []) do
request(client, "GetComplianceSummaryByConfigRule", input, options)
end
@doc """
Returns the number of resources that are compliant and the number that are
noncompliant. You can specify one or more resource types to get these
numbers for each resource type. The maximum number returned is 100.
"""
def get_compliance_summary_by_resource_type(client, input, options \\ []) do
request(client, "GetComplianceSummaryByResourceType", input, options)
end
@doc """
Returns compliance details of a conformance pack for all AWS resources that
are monitered by conformance pack.
"""
def get_conformance_pack_compliance_details(client, input, options \\ []) do
request(client, "GetConformancePackComplianceDetails", input, options)
end
@doc """
Returns compliance details for the conformance pack based on the cumulative
compliance results of all the rules in that conformance pack.
"""
def get_conformance_pack_compliance_summary(client, input, options \\ []) do
request(client, "GetConformancePackComplianceSummary", input, options)
end
@doc """
Returns the resource types, the number of each resource type, and the total
number of resources that AWS Config is recording in this region for your
AWS account.
<p class="title"> **Example**
<ol> <li> AWS Config is recording three resource types in the US East
(Ohio) Region for your account: 25 EC2 instances, 20 IAM users, and 15 S3
buckets.
</li> <li> You make a call to the `GetDiscoveredResourceCounts` action and
specify that you want all resource types.
</li> <li> AWS Config returns the following:
<ul> <li> The resource types (EC2 instances, IAM users, and S3 buckets).
</li> <li> The number of each resource type (25, 20, and 15).
</li> <li> The total number of all resources (60).
</li> </ul> </li> </ol> The response is paginated. By default, AWS Config
lists 100 `ResourceCount` objects on each page. You can customize this
number with the `limit` parameter. The response includes a `nextToken`
string. To get the next page of results, run the request again and specify
the string for the `nextToken` parameter.
<note> If you make a call to the `GetDiscoveredResourceCounts` action, you
might not immediately receive resource counts in the following situations:
<ul> <li> You are a new AWS Config customer.
</li> <li> You just enabled resource recording.
</li> </ul> It might take a few minutes for AWS Config to record and count
your resources. Wait a few minutes and then retry the
`GetDiscoveredResourceCounts` action.
</note>
"""
def get_discovered_resource_counts(client, input, options \\ []) do
request(client, "GetDiscoveredResourceCounts", input, options)
end
@doc """
Returns detailed status for each member account within an organization for
a given organization config rule.
Only a master account and a delegated administrator account can call this
API. When calling this API with a delegated administrator, you must ensure
AWS Organizations `ListDelegatedAdministrator` permissions are added.
"""
def get_organization_config_rule_detailed_status(client, input, options \\ []) do
request(client, "GetOrganizationConfigRuleDetailedStatus", input, options)
end
@doc """
Returns detailed status for each member account within an organization for
a given organization conformance pack.
Only a master account and a delegated administrator account can call this
API. When calling this API with a delegated administrator, you must ensure
AWS Organizations `ListDelegatedAdministrator` permissions are added.
"""
def get_organization_conformance_pack_detailed_status(client, input, options \\ []) do
request(client, "GetOrganizationConformancePackDetailedStatus", input, options)
end
@doc """
Returns a list of configuration items for the specified resource. The list
contains details about each state of the resource during the specified time
interval. If you specified a retention period to retain your
`ConfigurationItems` between a minimum of 30 days and a maximum of 7 years
(2557 days), AWS Config returns the `ConfigurationItems` for the specified
retention period.
The response is paginated. By default, AWS Config returns a limit of 10
configuration items per page. You can customize this number with the
`limit` parameter. The response includes a `nextToken` string. To get the
next page of results, run the request again and specify the string for the
`nextToken` parameter.
<note> Each call to the API is limited to span a duration of seven days. It
is likely that the number of records returned is smaller than the specified
`limit`. In such cases, you can make another call, using the `nextToken`.
</note>
"""
def get_resource_config_history(client, input, options \\ []) do
request(client, "GetResourceConfigHistory", input, options)
end
@doc """
Accepts a resource type and returns a list of resource identifiers that are
aggregated for a specific resource type across accounts and regions. A
resource identifier includes the resource type, ID, (if available) the
custom resource name, source account, and source region. You can narrow the
results to include only resources that have specific resource IDs, or a
resource name, or source account ID, or source region.
For example, if the input consists of accountID 12345678910 and the region
is us-east-1 for resource type `AWS::EC2::Instance` then the API returns
all the EC2 instance identifiers of accountID 12345678910 and region
us-east-1.
"""
def list_aggregate_discovered_resources(client, input, options \\ []) do
request(client, "ListAggregateDiscoveredResources", input, options)
end
@doc """
Accepts a resource type and returns a list of resource identifiers for the
resources of that type. A resource identifier includes the resource type,
ID, and (if available) the custom resource name. The results consist of
resources that AWS Config has discovered, including those that AWS Config
is not currently recording. You can narrow the results to include only
resources that have specific resource IDs or a resource name.
<note> You can specify either resource IDs or a resource name, but not
both, in the same request.
</note> The response is paginated. By default, AWS Config lists 100
resource identifiers on each page. You can customize this number with the
`limit` parameter. The response includes a `nextToken` string. To get the
next page of results, run the request again and specify the string for the
`nextToken` parameter.
"""
def list_discovered_resources(client, input, options \\ []) do
request(client, "ListDiscoveredResources", input, options)
end
@doc """
List the tags for AWS Config resource.
"""
def list_tags_for_resource(client, input, options \\ []) do
request(client, "ListTagsForResource", input, options)
end
@doc """
Authorizes the aggregator account and region to collect data from the
source account and region.
"""
def put_aggregation_authorization(client, input, options \\ []) do
request(client, "PutAggregationAuthorization", input, options)
end
@doc """
Adds or updates an AWS Config rule for evaluating whether your AWS
resources comply with your desired configurations.
You can use this action for custom AWS Config rules and AWS managed Config
rules. A custom AWS Config rule is a rule that you develop and maintain. An
AWS managed Config rule is a customizable, predefined rule that AWS Config
provides.
If you are adding a new custom AWS Config rule, you must first create the
AWS Lambda function that the rule invokes to evaluate your resources. When
you use the `PutConfigRule` action to add the rule to AWS Config, you must
specify the Amazon Resource Name (ARN) that AWS Lambda assigns to the
function. Specify the ARN for the `SourceIdentifier` key. This key is part
of the `Source` object, which is part of the `ConfigRule` object.
If you are adding an AWS managed Config rule, specify the rule's identifier
for the `SourceIdentifier` key. To reference AWS managed Config rule
identifiers, see [About AWS Managed Config
Rules](https://docs.aws.amazon.com/config/latest/developerguide/evaluate-config_use-managed-rules.html).
For any new rule that you add, specify the `ConfigRuleName` in the
`ConfigRule` object. Do not specify the `ConfigRuleArn` or the
`ConfigRuleId`. These values are generated by AWS Config for new rules.
If you are updating a rule that you added previously, you can specify the
rule by `ConfigRuleName`, `ConfigRuleId`, or `ConfigRuleArn` in the
`ConfigRule` data type that you use in this request.
The maximum number of rules that AWS Config supports is 150.
For information about requesting a rule limit increase, see [AWS Config
Limits](http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_config)
in the *AWS General Reference Guide*.
For more information about developing and using AWS Config rules, see
[Evaluating AWS Resource Configurations with AWS
Config](https://docs.aws.amazon.com/config/latest/developerguide/evaluate-config.html)
in the *AWS Config Developer Guide*.
"""
def put_config_rule(client, input, options \\ []) do
request(client, "PutConfigRule", input, options)
end
@doc """
Creates and updates the configuration aggregator with the selected source
accounts and regions. The source account can be individual account(s) or an
organization.
<note> AWS Config should be enabled in source accounts and regions you want
to aggregate.
If your source type is an organization, you must be signed in to the master
account and all features must be enabled in your organization. AWS Config
calls `EnableAwsServiceAccess` API to enable integration between AWS Config
and AWS Organizations.
</note>
"""
def put_configuration_aggregator(client, input, options \\ []) do
request(client, "PutConfigurationAggregator", input, options)
end
@doc """
Creates a new configuration recorder to record the selected resource
configurations.
You can use this action to change the role `roleARN` or the
`recordingGroup` of an existing recorder. To change the role, call the
action on the existing configuration recorder and specify a role.
<note> Currently, you can specify only one configuration recorder per
region in your account.
If `ConfigurationRecorder` does not have the **recordingGroup** parameter
specified, the default is to record all supported resource types.
</note>
"""
def put_configuration_recorder(client, input, options \\ []) do
request(client, "PutConfigurationRecorder", input, options)
end
@doc """
Creates or updates a conformance pack. A conformance pack is a collection
of AWS Config rules that can be easily deployed in an account and a region
and across AWS Organization.
This API creates a service linked role `AWSServiceRoleForConfigConforms` in
your account. The service linked role is created only when the role does
not exist in your account.
<note> You must specify either the `TemplateS3Uri` or the `TemplateBody`
parameter, but not both. If you provide both AWS Config uses the
`TemplateS3Uri` parameter and ignores the `TemplateBody` parameter.
</note>
"""
def put_conformance_pack(client, input, options \\ []) do
request(client, "PutConformancePack", input, options)
end
@doc """
Creates a delivery channel object to deliver configuration information to
an Amazon S3 bucket and Amazon SNS topic.
Before you can create a delivery channel, you must create a configuration
recorder.
You can use this action to change the Amazon S3 bucket or an Amazon SNS
topic of the existing delivery channel. To change the Amazon S3 bucket or
an Amazon SNS topic, call this action and specify the changed values for
the S3 bucket and the SNS topic. If you specify a different value for
either the S3 bucket or the SNS topic, this action will keep the existing
value for the parameter that is not changed.
<note> You can have only one delivery channel per region in your account.
</note>
"""
def put_delivery_channel(client, input, options \\ []) do
request(client, "PutDeliveryChannel", input, options)
end
@doc """
Used by an AWS Lambda function to deliver evaluation results to AWS Config.
This action is required in every AWS Lambda function that is invoked by an
AWS Config rule.
"""
def put_evaluations(client, input, options \\ []) do
request(client, "PutEvaluations", input, options)
end
@doc """
Adds or updates organization config rule for your entire organization
evaluating whether your AWS resources comply with your desired
configurations.
Only a master account and a delegated administrator can create or update an
organization config rule. When calling this API with a delegated
administrator, you must ensure AWS Organizations
`ListDelegatedAdministrator` permissions are added.
This API enables organization service access through the
`EnableAWSServiceAccess` action and creates a service linked role
`AWSServiceRoleForConfigMultiAccountSetup` in the master or delegated
administrator account of your organization. The service linked role is
created only when the role does not exist in the caller account. AWS Config
verifies the existence of role with `GetRole` action.
To use this API with delegated administrator, register a delegated
administrator by calling AWS Organization
`register-delegated-administrator` for
`config-multiaccountsetup.amazonaws.com`.
You can use this action to create both custom AWS Config rules and AWS
managed Config rules. If you are adding a new custom AWS Config rule, you
must first create AWS Lambda function in the master account or a delegated
administrator that the rule invokes to evaluate your resources. When you
use the `PutOrganizationConfigRule` action to add the rule to AWS Config,
you must specify the Amazon Resource Name (ARN) that AWS Lambda assigns to
the function. If you are adding an AWS managed Config rule, specify the
rule's identifier for the `RuleIdentifier` key.
The maximum number of organization config rules that AWS Config supports is
150 and 3 delegated administrator per organization.
<note> Prerequisite: Ensure you call `EnableAllFeatures` API to enable all
features in an organization.
Specify either `OrganizationCustomRuleMetadata` or
`OrganizationManagedRuleMetadata`.
</note>
"""
def put_organization_config_rule(client, input, options \\ []) do
request(client, "PutOrganizationConfigRule", input, options)
end
@doc """
Deploys conformance packs across member accounts in an AWS Organization.
Only a master account and a delegated administrator can call this API. When
calling this API with a delegated administrator, you must ensure AWS
Organizations `ListDelegatedAdministrator` permissions are added.
This API enables organization service access for
`config-multiaccountsetup.amazonaws.com` through the
`EnableAWSServiceAccess` action and creates a service linked role
`AWSServiceRoleForConfigMultiAccountSetup` in the master or delegated
administrator account of your organization. The service linked role is
created only when the role does not exist in the caller account. To use
this API with delegated administrator, register a delegated administrator
by calling AWS Organization `register-delegate-admin` for
`config-multiaccountsetup.amazonaws.com`.
<note> Prerequisite: Ensure you call `EnableAllFeatures` API to enable all
features in an organization.
You must specify either the `TemplateS3Uri` or the `TemplateBody`
parameter, but not both. If you provide both AWS Config uses the
`TemplateS3Uri` parameter and ignores the `TemplateBody` parameter.
AWS Config sets the state of a conformance pack to CREATE_IN_PROGRESS and
UPDATE_IN_PROGRESS until the conformance pack is created or updated. You
cannot update a conformance pack while it is in this state.
You can create 6 conformance packs with 25 AWS Config rules in each pack
and 3 delegated administrator per organization.
</note>
"""
def put_organization_conformance_pack(client, input, options \\ []) do
request(client, "PutOrganizationConformancePack", input, options)
end
@doc """
Adds or updates the remediation configuration with a specific AWS Config
rule with the selected target or action. The API creates the
`RemediationConfiguration` object for the AWS Config rule. The AWS Config
rule must already exist for you to add a remediation configuration. The
target (SSM document) must exist and have permissions to use the target.
<note> If you make backward incompatible changes to the SSM document, you
must call this again to ensure the remediations can run.
</note>
"""
def put_remediation_configurations(client, input, options \\ []) do
request(client, "PutRemediationConfigurations", input, options)
end
@doc """
A remediation exception is when a specific resource is no longer considered
for auto-remediation. This API adds a new exception or updates an exisiting
exception for a specific resource with a specific AWS Config rule.
<note> AWS Config generates a remediation exception when a problem occurs
executing a remediation action to a specific resource. Remediation
exceptions blocks auto-remediation until the exception is cleared.
</note>
"""
def put_remediation_exceptions(client, input, options \\ []) do
request(client, "PutRemediationExceptions", input, options)
end
@doc """
Records the configuration state for the resource provided in the request.
The configuration state of a resource is represented in AWS Config as
Configuration Items. Once this API records the configuration item, you can
retrieve the list of configuration items for the custom resource type using
existing AWS Config APIs.
<note> The custom resource type must be registered with AWS CloudFormation.
This API accepts the configuration item registered with AWS CloudFormation.
When you call this API, AWS Config only stores configuration state of the
resource provided in the request. This API does not change or remediate the
configuration of the resource.
Write-only schema properites are not recorded as part of the published
configuration item.
</note>
"""
def put_resource_config(client, input, options \\ []) do
request(client, "PutResourceConfig", input, options)
end
@doc """
Creates and updates the retention configuration with details about
retention period (number of days) that AWS Config stores your historical
information. The API creates the `RetentionConfiguration` object and names
the object as **default**. When you have a `RetentionConfiguration` object
named **default**, calling the API modifies the default object.
<note> Currently, AWS Config supports only one retention configuration per
region in your account.
</note>
"""
def put_retention_configuration(client, input, options \\ []) do
request(client, "PutRetentionConfiguration", input, options)
end
@doc """
Accepts a structured query language (SQL) SELECT command and an aggregator
to query configuration state of AWS resources across multiple accounts and
regions, performs the corresponding search, and returns resource
configurations matching the properties.
For more information about query components, see the [ **Query Components**
](https://docs.aws.amazon.com/config/latest/developerguide/query-components.html)
section in the AWS Config Developer Guide.
"""
def select_aggregate_resource_config(client, input, options \\ []) do
request(client, "SelectAggregateResourceConfig", input, options)
end
@doc """
Accepts a structured query language (SQL) `SELECT` command, performs the
corresponding search, and returns resource configurations matching the
properties.
For more information about query components, see the [ **Query Components**
](https://docs.aws.amazon.com/config/latest/developerguide/query-components.html)
section in the AWS Config Developer Guide.
"""
def select_resource_config(client, input, options \\ []) do
request(client, "SelectResourceConfig", input, options)
end
@doc """
Runs an on-demand evaluation for the specified AWS Config rules against the
last known configuration state of the resources. Use
`StartConfigRulesEvaluation` when you want to test that a rule you updated
is working as expected. `StartConfigRulesEvaluation` does not re-record the
latest configuration state for your resources. It re-runs an evaluation
against the last known state of your resources.
You can specify up to 25 AWS Config rules per request.
An existing `StartConfigRulesEvaluation` call for the specified rules must
complete before you can call the API again. If you chose to have AWS Config
stream to an Amazon SNS topic, you will receive a
`ConfigRuleEvaluationStarted` notification when the evaluation starts.
<note> You don't need to call the `StartConfigRulesEvaluation` API to run
an evaluation for a new rule. When you create a rule, AWS Config evaluates
your resources against the rule automatically.
</note> The `StartConfigRulesEvaluation` API is useful if you want to run
on-demand evaluations, such as the following example:
<ol> <li> You have a custom rule that evaluates your IAM resources every 24
hours.
</li> <li> You update your Lambda function to add additional conditions to
your rule.
</li> <li> Instead of waiting for the next periodic evaluation, you call
the `StartConfigRulesEvaluation` API.
</li> <li> AWS Config invokes your Lambda function and evaluates your IAM
resources.
</li> <li> Your custom rule will still run periodic evaluations every 24
hours.
</li> </ol>
"""
def start_config_rules_evaluation(client, input, options \\ []) do
request(client, "StartConfigRulesEvaluation", input, options)
end
@doc """
Starts recording configurations of the AWS resources you have selected to
record in your AWS account.
You must have created at least one delivery channel to successfully start
the configuration recorder.
"""
def start_configuration_recorder(client, input, options \\ []) do
request(client, "StartConfigurationRecorder", input, options)
end
@doc """
Runs an on-demand remediation for the specified AWS Config rules against
the last known remediation configuration. It runs an execution against the
current state of your resources. Remediation execution is asynchronous.
You can specify up to 100 resource keys per request. An existing
StartRemediationExecution call for the specified resource keys must
complete before you can call the API again.
"""
def start_remediation_execution(client, input, options \\ []) do
request(client, "StartRemediationExecution", input, options)
end
@doc """
Stops recording configurations of the AWS resources you have selected to
record in your AWS account.
"""
def stop_configuration_recorder(client, input, options \\ []) do
request(client, "StopConfigurationRecorder", input, options)
end
@doc """
Associates the specified tags to a resource with the specified resourceArn.
If existing tags on a resource are not specified in the request parameters,
they are not changed. When a resource is deleted, the tags associated with
that resource are deleted as well.
"""
def tag_resource(client, input, options \\ []) do
request(client, "TagResource", input, options)
end
@doc """
Deletes specified tags from a resource.
"""
def untag_resource(client, input, options \\ []) do
request(client, "UntagResource", input, options)
end
@spec request(AWS.Client.t(), binary(), map(), list()) ::
{:ok, Poison.Parser.t() | nil, Poison.Response.t()}
| {:error, Poison.Parser.t()}
| {:error, HTTPoison.Error.t()}
defp request(client, action, input, options) do
client = %{client | service: "config"}
host = build_host("config", client)
url = build_url(host, client)
headers = [
{"Host", host},
{"Content-Type", "application/x-amz-json-1.1"},
{"X-Amz-Target", "StarlingDoveService.#{action}"}
]
payload = Poison.Encoder.encode(input, %{})
headers = AWS.Request.sign_v4(client, "POST", url, headers, payload)
case HTTPoison.post(url, payload, headers, options) do
{:ok, %HTTPoison.Response{status_code: 200, body: ""} = response} ->
{:ok, nil, response}
{:ok, %HTTPoison.Response{status_code: 200, body: body} = response} ->
{:ok, Poison.Parser.parse!(body, %{}), response}
{:ok, %HTTPoison.Response{body: body}} ->
error = Poison.Parser.parse!(body, %{})
{:error, error}
{:error, %HTTPoison.Error{reason: reason}} ->
{:error, %HTTPoison.Error{reason: reason}}
end
end
defp build_host(_endpoint_prefix, %{region: "local"}) do
"localhost"
end
defp build_host(endpoint_prefix, %{region: region, endpoint: endpoint}) do
"#{endpoint_prefix}.#{region}.#{endpoint}"
end
defp build_url(host, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}/"
end
end
|
lib/aws/config.ex
| 0.873147
| 0.60842
|
config.ex
|
starcoder
|
defmodule Membrane.Event do
@moduledoc """
Structure representing a single event that flows between elements.
Each event:
- must contain type,
- may contain payload.
Type is used to distinguish event class.
Payload can hold additional information about the event.
Payload should always be a named struct appropriate for given event type.
"""
@type type_t :: atom
@type payload_t :: any
@type t :: %Membrane.Event{
type: type_t,
payload: payload_t,
stick_to: :nothing | :buffer,
mode: :sync | :async
}
defstruct type: nil,
payload: nil,
stick_to: :nothing,
mode: :sync
@spec sos() :: t
def sos() do
%Membrane.Event{type: :sos, stick_to: :buffer}
end
@doc """
Shorthand for creating a generic End of Stream event.
End of Stream event means that all buffers from the stream were processed
and no further buffers are expected to arrive.
"""
@spec eos() :: t
def eos() do
%Membrane.Event{type: :eos}
end
@doc """
Shorthand for creating a generic Discontinuity event.
Discontinuity event means that flow of buffers in the stream was interrupted
but stream itself is not done.
Frequent reasons for this are soundcards' drops while capturing sound, network
data loss etc.
If duration of the discontinuity is known, it can be passed as an argument.
See `Membrane.Event.Discontinuity.Payload` for the full description of the
payload.
"""
@spec discontinuity(Membrane.Event.Discontinuity.Payload.duration_t()) :: t
def discontinuity(duration \\ nil) do
%Membrane.Event{
type: :discontinuity,
payload: %Membrane.Event.Discontinuity.Payload{duration: duration}
}
end
@doc """
Shrothand for creating a generic Underrun event.
Underrun event means that certain element is willing to consume more buffers
but there are none available.
It makes sense to use this event as an upstream event to notify previous
elements in the pipeline that they should generate more buffers.
"""
@spec underrun() :: t
def underrun do
%Membrane.Event{type: :underrun}
end
end
|
lib/membrane/event.ex
| 0.857291
| 0.520679
|
event.ex
|
starcoder
|
defmodule Akd.Build.Phoenix.Npm do
@moduledoc """
A native Hook module that comes shipped with Akd.
This module uses `Akd.Hook`.
Provides a set of operations that build a npm release for a given phoenix app
at a deployment's `build_at` destination. This hook assumes that a package.json
is present.
Ensures to cleanup and remove `node_modules` folder created by this build.
Doesn't have any Rollback operations.
# Options:
* `run_ensure`: `boolean`. Specifies whether to a run a command or not.
* `ignore_failure`: `boolean`. Specifies whether to continue if this hook fails.
* `cmd_envs`: `list` of `tuples`. Specifies the environments to provide while
building the distillery release.
* `package_path`: `string`. Path to package.json
# Defaults:
* `run_ensure`: `true`
* `ignore_failure`: `false`
* `package_path`: `"."`
"""
use Akd.Hook
@default_opts [run_ensure: true, ignore_failure: false, package_path: "."]
@doc """
Callback implementation for `get_hooks/2`.
This function returns a list of operations that can be used to build a npm
release on the `build_at` destination of a deployment.
## Examples
iex> deployment = %Akd.Deployment{mix_env: "prod",
...> build_at: Akd.Destination.local("."),
...> publish_to: Akd.Destination.local("."),
...> name: "name",
...> vsn: "0.1.1"}
iex> Akd.Build.Phoenix.Npm.get_hooks(deployment, [])
[%Akd.Hook{ensure: [], ignore_failure: false,
main: [%Akd.Operation{cmd: "cd \\n npm install", cmd_envs: [],
destination: %Akd.Destination{host: :local, path: ".",
user: :current}}], rollback: [], run_ensure: true}]
"""
def get_hooks(deployment, opts \\ []) do
opts = uniq_merge(opts, @default_opts)
package_path = Keyword.get(opts, :package)
[build_hook(deployment, opts, package_path)]
end
# This function takes a deployment and options and returns an Akd.Hook.t
# struct using FormHook DSL
defp build_hook(deployment, opts, package_path) do
destination = Akd.DestinationResolver.resolve(:build, deployment)
cmd_envs = Keyword.get(opts, :cmd_envs, [])
form_hook opts do
main "cd #{package_path} \n npm install", destination, cmd_envs: cmd_envs
# ensure "cd #{package_path} \n rm -rf node_modules", destination
end
end
# This function takes two keyword lists and merges them keeping the keys
# unique. If there are multiple values for a key, it takes the value from
# the first value of keyword1 corresponding to that key.
defp uniq_merge(keyword1, keyword2) do
keyword2
|> Keyword.merge(keyword1)
|> Keyword.new()
end
end
|
lib/akd/phx/npm.ex
| 0.877411
| 0.542318
|
npm.ex
|
starcoder
|
defmodule TheElixir.Logic.Game do
@moduledoc """
Module to handle the main menus and text of the game
"""
alias TheElixir.Logic.Game
alias TheElixir.Lobby
alias TheElixir.Components.World
alias TheElixir.Components.Inventory
alias TheElixir.Components.Journal
alias TheElixir.Logic.RoomGame
@doc """
On key press `h`
List of commands that `player` can execute
"""
def command_help(player) do
IO.puts([
"m -> move forward\n",
"i -> inspect surroundings\n",
"w -> enter nearest room\n",
"e -> exit / quit\n",
"inv -> view inventory\n",
"world -> view all rooms in the world\n",
"j -> view journal\n",
"c -> clear screen\n"
])
Game.get_input(player)
end
@doc """
Function that takes the `player`'s next input and sends it
to be interpreted
"""
def get_input(player) do
input = IO.gets("(press h for help) >> ")
player |> Game.match_input(String.strip(input))
end
@doc """
Function that interprets `player` `input`
"""
def match_input(player, input) do
rooms = World.get(:world)
case input do
"m" -> Game.move(player)
"i" -> Game.inspect(player)
"w" -> Game.room(rooms, player)
"e" -> Lobby.exit
"inv" -> Game.get_inventory(player)
"world" -> Game.get_rooms(player)
"j" -> Game.get_journal(player)
"h" -> Game.command_help(player)
"c" -> Game.clear_screen(player)
_ -> IO.puts "We don't know this command ( yet ). Read the prompt!"
Game.get_input(player)
end
end
@doc """
Clears the screen for better vision after a second delay
On key press `c`
"""
def clear_screen(player) do
IO.puts("Clearing screen...")
:timer.sleep(1000)
System.cmd "clear", [], into: IO.stream(:stdio, :line)
Game.get_input(player)
end
@doc """
Gets the current quests in the `player` journal
On key press `j`
"""
def get_journal(player) do
quests = Journal.get(:journal)
case quests do
[] -> IO.puts("No entries yet.")
_ -> IO.puts(quests)
end
Game.get_input(player)
end
@doc """
Gets the current items in the `player` inventory
On key press `inv`
"""
def get_inventory(player) do
items = Inventory.get(:inventory)
case items do
[] -> IO.puts("No items yet.")
_ -> IO.puts(items)
end
Game.get_input(player)
end
@doc """
Gets the current rooms in the world
On key press `world`
"""
def get_rooms(player) do
rooms = World.get(:world)
case rooms do
[] -> IO.puts("No rooms yet.")
_ -> IO.puts("Rooms: \n")
Enum.each rooms, &IO.puts(&1 <> "\n")
end
Game.get_input(player)
end
@doc """
The text that pops the first time you enter the game
"""
def game_introduction(player) do
IO.puts([
"|>|>|>|>|>|>|>|>|>|>|>|>\n",
"Hello #{player.name}, and welcome to the world of The Elixir.\n",
"You are embarking on an exciting adventure down the road of Elixir,\n",
"A functional, concurrent, and fun book of spells to learn!\n",
"And now... let's begin!\n",
"|>|>|>|>|>|>|>|>|>|>|>|>"
])
Game.hallway_introduction(player)
end
@doc """
The hallway introduction text, notice the |>
"""
def hallway_introduction(player) do
IO.puts([
"You find yourself in a hallway, the walls are lined with symbols.\n",
"You try to decipher them with your primitive imperative knowledge\n",
"But they all look so unfamiliar, the one you see the most is |>, and\n",
"you begin to wonder what it is. You seem to like it. You want\n",
"to know more. There is a door to your right. What do you do?\n",
"|>|>|>|>|>|>|>|>|>|>|>|>"
])
Game.get_input(player)
end
@doc """
Move the player forward. Currently doesn't do much.
On key press `m`
"""
def move(player) do
IO.puts("You moved forward.")
Game.get_input(player)
end
@doc """
Inspect your surroundings. Notices doors, mostly.
On key press `i`
"""
def inspect(player) do
IO.puts("There is a door next to you. Maybe it leads somewhere?")
Game.get_input(player)
end
@doc """
Choose a room, takes first one for now
On key press `w`
"""
def room(rooms, _) when rooms == [], do: "There is no room nearby!"
def room(rooms, player) do
[room_name | _] = rooms
RoomGame.pick_room(player, room_name)
end
end
|
lib/the_elixir/logic/game.ex
| 0.632503
| 0.439807
|
game.ex
|
starcoder
|
defmodule Scidata.Squad do
@moduledoc """
Module for downloading the [SQuAD1.1 dataset](https://rajpurkar.github.io/SQuAD-explorer).
"""
require Scidata.Utils
alias Scidata.Utils
@base_url "https://rajpurkar.github.io/SQuAD-explorer/dataset/"
@train_dataset_file "train-v1.1.json"
@test_dataset_file "dev-v1.1.json"
@doc """
Downloads the SQuAD training dataset
## Examples
iex> Scidata.Squad.download()
[
%{
"paragraphs" => [
%{
"context" => "Architecturally, the school has a...",
"qas" => [
%{
"answers" => [%{"answer_start" => 515, "text" => "<NAME>"}],
"id" => "5733be284776f41900661182",
"question" => "To whom did the..."
}, ...
]
}
],
"title" => "University_of_Notre_Dame"
}, ...
]
"""
def download() do
download_dataset(@train_dataset_file)
end
@doc """
Downloads the SQuAD test dataset
## Examples
iex> Scidata.Squad.download_test()
[
%{
"paragraphs" => [
%{
"context" => "Super Bowl 50 was an American football game t...",
"qas" => [
%{
"answers" => [
%{"answer_start" => 177, "text" => "<NAME>"},...
],
"id" => "56be4db0acb8001400a502ec",
"question" => "Which NFL team represented the AFC at Super Bowl 50?"
},
]
}
],
"title" => "Super_Bowl_50"
}, ...
]
"""
def download_test() do
download_dataset(@test_dataset_file)
end
defp download_dataset(dataset_name) do
content =
Utils.get!(@base_url <> dataset_name).body
|> Jason.decode!()
content["data"]
end
@doc """
Convert result of `download/0` or `download_test/0` to map for use with [Explorer.DataFrame](https://github.com/elixir-nx/explorer).
## Examples
iex> columns_for_df = Scidata.Squad.download() |> Scidata.Squad.to_columns()
%{
"answer_start" => [515, ...],
"context" => ["Architecturally, the...", ...],
"id" => ["5733be284776f41900661182", ...],
"question" => ["To whom did the Vir...", ...],
"answer_text" => ["<NAME>", ...],
"title" => ["University_of_Notre_Dame", ...]
}
iex> Explorer.DataFrame.from_map(columns_for_df)
#Explorer.DataFrame<
[rows: 87599, columns: 6]
...
>
"""
def to_columns(entries) do
table = %{
"answer_start" => [],
"context" => [],
"id" => [],
"question" => [],
"answer_text" => [],
"title" => []
}
for %{"paragraphs" => paragraph, "title" => title} <- entries,
%{"context" => context, "qas" => qas} <- paragraph,
%{"id" => id, "question" => question, "answers" => answers} <- qas,
%{"answer_start" => answer_start, "text" => answer_text} <- answers,
reduce: table do
%{
"answer_start" => answer_starts,
"context" => contexts,
"id" => ids,
"question" => questions,
"answer_text" => answer_texts,
"title" => titles
} ->
%{
"answer_start" => [answer_start | answer_starts],
"context" => [context | contexts],
"id" => [id | ids],
"question" => [question | questions],
"answer_text" => [answer_text | answer_texts],
"title" => [title | titles]
}
end
|> Enum.map(fn {key, values} -> {key, :lists.reverse(values)} end)
|> Enum.into(%{})
end
end
|
lib/scidata/squad.ex
| 0.640074
| 0.50653
|
squad.ex
|
starcoder
|
defmodule Oban.Repo do
@moduledoc """
Wrappers around `Ecto.Repo` callbacks.
These functions should be used when working with an Ecto repo inside a plugin. These functions
will resolve the correct repo instance, and set the schema prefix and the log level, according
to the Oban configuration.
"""
alias Ecto.{Changeset, Multi, Query, Queryable, Schema}
alias Oban.Config
@doc "Wraps `c:Ecto.Repo.all/2`."
@doc since: "2.2.0"
@spec all(Config.t(), Queryable.t(), Keyword.t()) :: [Schema.t()]
def all(conf, queryable, opts \\ []) do
with_dynamic_repo(
conf,
fn -> conf.repo.all(queryable, query_opts(conf, opts)) end
)
end
@doc "Wraps `c:Ecto.Repo.checkout/2`."
@doc since: "2.2.0"
@spec checkout(Config.t(), (() -> result), Keyword.t()) :: result when result: var
def checkout(conf, function, opts \\ []) do
with_dynamic_repo(
conf,
fn -> conf.repo.checkout(function, query_opts(conf, opts)) end
)
end
@doc "Wraps `c:Ecto.Repo.config/0`."
@doc since: "2.2.0"
@spec config(Config.t()) :: Keyword.t()
def config(conf), do: with_dynamic_repo(conf, &conf.repo.config/0)
@doc "Wraps `c:Ecto.Repo.delete/2`."
@doc since: "2.4.0"
@spec delete(
Config.t(),
struct_or_changeset :: Schema.t() | Changeset.t(),
opts :: Keyword.t()
) :: {:ok, Schema.t()} | {:error, Changeset.t()}
def delete(conf, struct_or_changeset, opts \\ []) do
with_dynamic_repo(
conf,
fn -> conf.repo.delete(struct_or_changeset, query_opts(conf, opts)) end
)
end
@doc "Wraps `c:Ecto.Repo.delete_all/2`."
@doc since: "2.2.0"
@spec delete_all(Config.t(), Queryable.t(), Keyword.t()) :: {integer(), nil | [term()]}
def delete_all(conf, queryable, opts \\ []) do
with_dynamic_repo(
conf,
fn -> conf.repo.delete_all(queryable, query_opts(conf, opts)) end
)
end
@doc "Wraps `c:Ecto.Repo.insert/2`."
@doc since: "2.2.0"
@spec insert(Config.t(), Schema.t() | Changeset.t(), Keyword.t()) ::
{:ok, Schema.t()} | {:error, Changeset.t()}
def insert(conf, struct_or_changeset, opts \\ []) do
with_dynamic_repo(
conf,
fn -> conf.repo.insert(struct_or_changeset, query_opts(conf, opts)) end
)
end
@doc "Wraps `c:Ecto.Repo.insert_all/3`."
@doc since: "2.2.0"
@spec insert_all(
Config.t(),
binary() | {binary(), module()} | module(),
[map() | [{atom(), term() | Query.t()}]],
Keyword.t()
) :: {integer(), nil | [term()]}
def insert_all(conf, schema_or_source, entries, opts \\ []) do
with_dynamic_repo(
conf,
fn -> conf.repo.insert_all(schema_or_source, entries, query_opts(conf, opts)) end
)
end
@doc "Wraps `c:Ecto.Repo.one/2`."
@doc since: "2.2.0"
@spec one(Config.t(), Queryable.t(), Keyword.t()) :: Schema.t() | nil
def one(conf, queryable, opts \\ []) do
with_dynamic_repo(
conf,
fn -> conf.repo.one(queryable, query_opts(conf, opts)) end
)
end
@doc "Wraps `Ecto.Adapters.SQL.Repo.query/4`."
@doc since: "2.2.0"
@spec query(Config.t(), String.t(), [term()], Keyword.t()) ::
{:ok,
%{
:rows => nil | [[term()] | binary()],
:num_rows => non_neg_integer(),
optional(atom()) => any()
}}
| {:error, Exception.t()}
def query(conf, sql, params \\ [], opts \\ []) do
with_dynamic_repo(conf, fn -> conf.repo.query(sql, params, query_opts(conf, opts)) end)
end
@doc "Wraps `c:Ecto.Repo.stream/2`"
@doc since: "2.9.0"
@spec stream(Config.t(), Queryable.t(), Keyword.t()) :: Enum.t()
def stream(conf, queryable, opts \\ []) do
with_dynamic_repo(conf, fn -> conf.repo.stream(queryable, query_opts(conf, opts)) end)
end
@doc "Wraps `c:Ecto.Repo.transaction/2`."
@doc since: "2.2.0"
@spec transaction(Config.t(), (... -> any()) | Multi.t(), opts :: Keyword.t()) ::
{:ok, any()}
| {:error, any()}
| {:error, Multi.name(), any(), %{required(Multi.name()) => any()}}
def transaction(conf, fun_or_multi, opts \\ []) do
with_dynamic_repo(
conf,
fn -> conf.repo.transaction(fun_or_multi, default_opts(conf, opts)) end
)
end
@doc "Wraps `Ecto.Adapters.SQL.Repo.to_sql/2`."
@doc since: "2.2.0"
@spec to_sql(Config.t(), :all | :update_all | :delete_all, Queryable.t()) ::
{String.t(), [term()]}
def to_sql(conf, kind, queryable) do
queryable =
case Map.fetch(conf, :prefix) do
:error -> queryable
{:ok, prefix} -> queryable |> Queryable.to_query() |> Map.put(:prefix, prefix)
end
conf.repo.to_sql(kind, queryable)
end
@doc "Wraps `c:Ecto.Repo.update/2`."
@doc since: "2.2.0"
@spec update(Config.t(), Changeset.t(), Keyword.t()) ::
{:ok, Schema.t()} | {:error, Changeset.t()}
def update(conf, changeset, opts \\ []) do
with_dynamic_repo(conf, fn -> conf.repo.update(changeset, query_opts(conf, opts)) end)
end
@doc "Wraps `c:Ecto.Repo.update_all/3`."
@doc since: "2.2.0"
@spec update_all(Config.t(), Queryable.t(), Keyword.t(), Keyword.t()) ::
{integer(), nil | [term()]}
def update_all(conf, queryable, updates, opts \\ []) do
with_dynamic_repo(
conf,
fn -> conf.repo.update_all(queryable, updates, query_opts(conf, opts)) end
)
end
@doc false
@spec with_dynamic_repo(Config.t(), fun()) :: any()
def with_dynamic_repo(conf, fun) do
case get_dynamic_repo(conf) do
nil ->
fun.()
instance ->
prev_instance = conf.repo.get_dynamic_repo()
maybe_run_in_transaction(conf, fun, instance, prev_instance)
end
end
defp get_dynamic_repo(%{get_dynamic_repo: fun}) when is_function(fun, 0), do: fun.()
defp get_dynamic_repo(_conf), do: nil
defp maybe_run_in_transaction(conf, fun, instance, prev_instance) do
unless in_transaction?(conf, prev_instance) do
conf.repo.put_dynamic_repo(instance)
end
fun.()
after
conf.repo.put_dynamic_repo(prev_instance)
end
defp in_transaction?(conf, instance) when is_pid(instance), do: conf.repo.in_transaction?()
defp in_transaction?(conf, instance) when is_atom(instance) do
case GenServer.whereis(instance) do
pid when is_pid(pid) ->
in_transaction?(conf, pid)
_ ->
false
end
end
defp in_transaction?(_, _), do: false
defp default_opts(conf, opts) do
Keyword.put(opts, :log, conf.log)
end
defp query_opts(conf, opts) do
opts
|> Keyword.put(:log, conf.log)
|> Keyword.put(:prefix, conf.prefix)
end
end
|
lib/oban/repo.ex
| 0.853532
| 0.45847
|
repo.ex
|
starcoder
|
defmodule Flow.Window do
@moduledoc """
Splits a flow into windows that are materialized at certain triggers.
Windows allow developers to split data so we can understand incoming
data as time progresses. Once a window is created, we can specify
triggers that allow us to customize when the data accumulated on every
window is materialized.
Windows must be created by calling one of the window type functions.
The supported window types are as follows:
* Global windows - that's the default window which means all data
belongs to one single window. In other words, the data is not
split in any way. The window finishes when all producers notify
there is no more data
* Fixed windows - splits incoming events into periodic, non-
overlapping windows based on event times. In other words, a given
event belongs to a single window. If data arrives late, a configured
lateness can be specified.
* Periodic windows - splits incoming events into periodic, non-
overlapping windows based on processing times. Similar to fixed
windows, a given event belongs to a single window.
* Count windows - splits incoming events based on a count.
Similar to fixed windows, a given event belongs to a single
window.
* Session windows - splits incoming events into unique windows
which is grouped until there is a configured gap between event
times. Sessions are useful for data that is irregularly
distributed with respect to time.
We discuss all types and include examples below. In the first section,
"Global windows", we build the basic intuition about windows and triggers
as well as discuss the distinction between "Event time and processing time".
Then we explore "Fixed windows" and the concept of lateness before moving
on to other window types.
## Global windows
By default, all events belong to the global window. The global window
is automatically attached to a partition if no window is specified.
The flow below:
Flow.from_stages([some_producer])
|> Flow.partition()
|> Flow.reduce(fn -> 0 end, & &1 + 2)
is equivalent to:
Flow.from_stages([some_producer])
|> Flow.partition(window: Flow.Window.global())
|> Flow.reduce(fn -> 0 end, & &1 + 2)
Even though the global window does not split the data in any way, it
already provides conveniences for working with both bounded (finite)
and unbounded (infinite) via triggers.
For example, the flow below uses a global window with a count-based
trigger to emit the values being summed as we sum them:
iex> window = Flow.Window.global() |> Flow.Window.trigger_every(10)
iex> flow = Flow.from_enumerable(1..100) |> Flow.partition(window: window, stages: 1)
iex> flow |> Flow.reduce(fn -> 0 end, &(&1 + &2)) |> Flow.emit(:state) |> Enum.to_list()
[55, 210, 465, 820, 1275, 1830, 2485, 3240, 4095, 5050, 5050]
Let's explore the types of triggers available next.
### Triggers
Triggers allow us to check point the data processed so far. There
are different triggers we can use:
* Event count triggers - compute state operations every X events
* Processing time triggers - compute state operations every X time
units for every stage
* Punctuation - hand-written triggers based on the data
Flow supports the triggers above via the `trigger_every/3`,
`trigger_periodically/4` and `trigger/3` respectively.
Once a trigger is emitted, the `reduce/3` step halts and invokes
the `on_trigger/2` callback, allowing you to emit events and change
the reducer accumulator.
### Event time and processing time
Before we move to other window types, it is important to discuss
the distinction between event time and processing time. In particular,
triggers created with the `trigger_periodically/4` function are
intrinsically inaccurate and therefore should not be used to split the
data. For example, if you are measuring the frequency that events arrive,
using the event time will always yield the same result, while processing
time will be vulnerable to fluctuations if, for instance, an external
factor causes events to processed slower or faster than usual.
Furthermore, periodic triggers are established per partition and are
message-based, which means partitions will emit the triggers at different
times and possibly with delays based on the partition message queue size.
However, it is exactly this lack of precision which makes them efficient
for checkpointing data.
Flow provides other window types, such as fixed windows, exactly to address
the issues with processing time. Such windows use the event time which is
based on the data itself. When working with event time, we can assign the
data into proper windows even when late or out of order. Such windows can
be used to gather time-based insight from the data (for example, the most
popular hashtags in the last 10 minutes) as well as for checkpointing data.
## Fixed windows (event time)
Fixed windows group the data based on the event times. Regardless if
the data is bounded or not, fixed windows give us time-based insight
about the data.
Fixed windows are created via the `fixed/3` function which specified
the duration of the window and a function that retrieves the event time
from each event:
Flow.Window.fixed(1, :hour, fn {word, timestamp} -> timestamp end)
Let's see an example that will use the window above to count the frequency
of words based on windows that are 1 hour long. The timestamps used by
Flow are integers in milliseconds. For now, we will also set the concurrency
down 1 and max demand down to 5 as it is simpler to reason about the results:
iex> data = [{"elixir", 0}, {"elixir", 1_000}, {"erlang", 60_000},
...> {"concurrency", 3_200_000}, {"elixir", 4_000_000},
...> {"erlang", 5_000_000}, {"erlang", 6_000_000}]
iex> window = Flow.Window.fixed(1, :hour, fn {_word, timestamp} -> timestamp end)
iex> flow = Flow.from_enumerable(data, max_demand: 5, stages: 1)
iex> flow = Flow.partition(flow, window: window, stages: 1)
iex> flow = Flow.reduce(flow, fn -> %{} end, fn {word, _}, acc ->
...> Map.update(acc, word, 1, & &1 + 1)
...> end)
iex> flow |> Flow.emit(:state) |> Enum.to_list
[%{"elixir" => 2, "erlang" => 1, "concurrency" => 1},
%{"elixir" => 1, "erlang" => 2}]
Since the data has been broken in two windows, the first four events belong
to the same window while the last 3 belongs to the second one. Notice that
`reduce/3` is executed per window and that each event belongs to a single
window exclusively.
Similar to global windows, fixed windows can also have triggers, allowing
us to checkpoint the data as the computation happens.
### Data ordering, watermarks and lateness
When working with event time, Flow assumes by default that events are time
ordered. This means that, when we move from one window to another, like
when we received the entry `{"elixir", 4_000_000}` in the example above,
we assume the previous window has been completed.
Let's change the events above to be out of order and move the first event
to the end of the dataset and see what happens:
iex> data = [{"elixir", 1_000}, {"erlang", 60_000},
...> {"concurrency", 3_200_000}, {"elixir", 4_000_000},
...> {"erlang", 5_000_000}, {"erlang", 6_000_000}, {"elixir", 0}]
iex> window = Flow.Window.fixed(1, :hour, fn {_word, timestamp} -> timestamp end)
iex> flow = Flow.from_enumerable(data) |> Flow.partition(window: window, stages: 1, max_demand: 5)
iex> flow = Flow.reduce(flow, fn -> %{} end, fn {word, _}, acc ->
...> Map.update(acc, word, 1, & &1 + 1)
...> end)
iex> flow |> Flow.emit(:state) |> Enum.to_list
[%{"elixir" => 1, "erlang" => 1, "concurrency" => 1},
%{"elixir" => 1, "erlang" => 2}]
Notice that now the first map did not count the "elixir" word twice.
Since the event arrived late, it was marked as lost. However, in many
flows we actually expect data to arrive late or out of order, especially
when talking about concurrent data processing.
Luckily, event time windows include the concept of lateness, which is a
processing time base period we would wait to receive late events.
Let's change the example above once more but now change the window
to also call `allowed_lateness/4`:
iex> data = [{"elixir", 1_000}, {"erlang", 60_000},
...> {"concurrency", 3_200_000}, {"elixir", 4_000_000},
...> {"erlang", 5_000_000}, {"erlang", 6_000_000}, {"elixir", 0}]
iex> window = Flow.Window.fixed(1, :hour, fn {_word, timestamp} -> timestamp end)
iex> window = Flow.Window.allowed_lateness(window, 5, :minute)
iex> flow = Flow.from_enumerable(data) |> Flow.partition(window: window, stages: 1, max_demand: 5)
iex> flow = Flow.reduce(flow, fn -> %{} end, fn {word, _}, acc ->
...> Map.update(acc, word, 1, & &1 + 1)
...> end)
iex> flow |> Flow.emit(:state) |> Enum.to_list
[%{"concurrency" => 1, "elixir" => 1, "erlang" => 1},
%{"concurrency" => 1, "elixir" => 2, "erlang" => 1},
%{"elixir" => 1, "erlang" => 2}]
Now that we allow late events, we can see the first window emitted
twice. Instead of the window being marked as done when 1 hour passes,
we say it emits a **watermark trigger**. The window will be effectively
done only after the allowed lateness period. If desired, we can use
`Flow.on_trigger/2` to get more information about each particular window
and its trigger. Replace the last line above by the following:
flow
|> Flow.on_trigger(fn state, _index, trigger -> {[{state, trigger}], state} end)
|> Enum.to_list()
The trigger parameter will include the type of window, the current
window and what caused the window to be emitted (`:watermark` or
`:done`).
Note that all stages must receive an event that is outside of a specific
window before that window is considered complete. In other words if there are
multiple stages in the partition preceding a reduce operation that has
a window, the reduce step won't release a window until it has seen an event
that is outside of that window from all processes that it receives data from.
This could have an effect on how long events are delayed in the reduce step.
## Periodic windows (processing time)
Periodic windows are similar to fixed windows except triggers are
emitted based on processing time instead of event time. Remember that
relying on periodic windows or triggers is intrinsically inaccurate and
should not be used to split the data, only as a checkpointing device.
Periodic windows are also similar to global windows that use
`trigger_periodically/2` to emit events periodically. The difference is
that periodic windows emit a window in a given interval while a trigger
emits a trigger. This behaviour may affect functions such as `Flow.departition/4`,
which calls the `merge` callback per trigger but the `done` callback per
window. Unless you are relying on functions such as `Flow.departition/4`,
there is no distinction between periodic windows and global windows with
periodic triggers.
## Count windows (event count)
Count windows are simpler versions of fixed windows where windows are split
apart by event count. Since it is not timed-based, it does not provide the
concept of lateness.
iex> window = Flow.Window.count(10)
iex> flow = Flow.from_enumerable(1..100) |> Flow.partition(window: window, stages: 1)
iex> flow |> Flow.reduce(fn -> 0 end, &(&1 + &2)) |> Flow.emit(:state) |> Enum.to_list()
[55, 155, 255, 355, 455, 555, 655, 755, 855, 955, 0]
Count windows are also similar to global windows that use `trigger_every/2`
to emit events per count. The difference is that count windows emit a
window per event count while a trigger belongs to a window. This behaviour
may affect functions such as `Flow.departition/4`, which calls the `merge`
callback per trigger but the `done` callback per window. Unless you are
relying on functions such as `Flow.departition/4`, there is no distinction
between count windows and global windows with count triggers.
"""
@type t :: %{
required(:trigger) => {fun(), fun()} | nil,
required(:periodically) => [trigger],
optional(atom()) => term()
}
@typedoc "The supported window types."
@type type :: :global | :fixed | :periodic | :count | any()
@typedoc """
A function that returns the event time to window by.
It must return an integer representing the time in milliseconds.
Flow does not care if the integer is using the UNIX epoch,
Gregorian epoch or any other as long as it is consistent.
"""
@type by :: (term -> non_neg_integer)
@typedoc """
The window identifier.
It is `:global` for `:global` windows or an integer for fixed windows.
"""
@type id :: :global | non_neg_integer()
@typedoc """
The supported time units for fixed and periodic windows.
"""
@type time_unit :: :millisecond | :second | :minute | :hour
@typedoc "The name of the trigger."
@type trigger :: term
@doc """
Returns a global window.
Global window triggers have the shape of `{:global, :global, trigger_name}`.
See the section on "Global windows" in the module documentation for examples.
"""
@spec global :: t
def global do
%Flow.Window.Global{}
end
@doc """
Returns a count-based window of every `count` elements.
`count` must be a positive integer.
Count window triggers have the shape of `{:count, window, trigger_name}`,
where `window` is an incrementing integer identifying the window.
See the section on "Count windows" in the module documentation for examples.
"""
@spec count(pos_integer) :: t
def count(count) when is_integer(count) and count > 0 do
%Flow.Window.Count{count: count}
end
@doc """
Returns a period-based window of every `count` `unit`.
`count` is a positive integer and `unit` is one of `:millisecond`,
`:second`, `:minute`, or `:hour`. Remember periodic triggers are established
per partition and are message-based, which means partitions will emit the
triggers at different times and possibly with delays based on the partition
message queue size.
Periodic window triggers have the shape of `{:periodic, window, trigger_name}`,
where `window` is an incrementing integer identifying the window.
See the section on "Periodic windows" in the module documentation for examples.
"""
@spec periodic(pos_integer, time_unit) :: t
def periodic(count, unit) when is_integer(count) and count > 0 do
%Flow.Window.Periodic{duration: to_ms(count, unit)}
end
@doc """
Returns a fixed window of duration `count` `unit` where the
event time is calculated by the given function `by`.
`count` is a positive integer and `unit` is one of `:millisecond`,
`:second`, `:minute`, or `:hour`.
Fixed window triggers have the shape of `{:fixed, window, trigger_name}`,
where `window` is an integer that represents the beginning timestamp
for the current window.
If `allowed_lateness/4` is used with fixed windows, the window will
first emit a `{:fixed, window, :watermark}` trigger when the window
terminates and emit `{:fixed, window, :done}` only after the
`allowed_lateness/4` duration has passed.
See the section on "Fixed windows" in the module documentation for examples.
"""
@spec fixed(pos_integer, time_unit, (t -> pos_integer)) :: t
def fixed(count, unit, by) when is_integer(count) and count > 0 and is_function(by, 1) do
%Flow.Window.Fixed{duration: to_ms(count, unit), by: by}
end
@doc """
Sets a duration, in processing time, of how long we will
wait for late events for a given window.
If allowed lateness is configured, once the window is finished,
it won't trigger a `:done` event but instead emit a `:watermark`.
The window will be done only when the allowed lateness time expires,
effectively emitting the `:done` trigger.
`count` is a positive number. The `unit` may be a time unit
(`:millisecond`, `:second`, `:minute`, or `:hour`).
"""
@spec allowed_lateness(t, pos_integer, time_unit) :: t
def allowed_lateness(window, count, unit)
def allowed_lateness(%{lateness: _} = window, count, unit) do
%{window | lateness: to_ms(count, unit)}
end
def allowed_lateness(window, _, _) do
raise ArgumentError, "allowed_lateness/3 not supported for window type #{inspect(window)}"
end
@doc """
Calculates when to emit a trigger.
Triggers are calculated per window and are used to temporarily
halt the window accumulation, typically done with `Flow.reduce/3`,
allowing the next operations to execute before accumulation is
resumed.
This function expects the trigger accumulator function, which will
be invoked at the beginning of every window, and a trigger function
that receives the current batch of events and its own accumulator.
The trigger function must return one of the three values:
* `{:cont, acc}` - the reduce operation should continue as usual.
`acc` is the trigger state.
* `{:cont, events, acc}` - the reduce operation should continue, but
only with the events you want to emit as part of the next state.
`acc` is the trigger state.
* `{:trigger, name, pre, pos, acc}` - where `name` is the trigger `name`,
`pre` are the events to be consumed before the trigger, `pos` controls
events to be processed after the trigger with the `acc` as the new trigger
accumulator.
We recommend looking at the implementation of `trigger_every/3` as
an example of a custom trigger.
"""
@spec trigger(t, (() -> acc), trigger_fun) :: t
when trigger_fun: ([event], acc -> trigger_fun_return),
trigger_fun_return: cont_tuple | cont_tuple_with_emitted_events | trigger_tuple,
cont_tuple: {:cont, acc},
cont_tuple_with_emitted_events: {:cont, [event], acc},
trigger_tuple: {:trigger, trigger(), pre, pos, acc},
pre: [event],
pos: [event],
acc: term(),
event: term()
def trigger(window, acc_fun, trigger_fun) do
if is_function(acc_fun, 0) do
add_trigger(window, {acc_fun, trigger_fun})
else
raise ArgumentError,
"Flow.Window.trigger/3 expects the accumulator to be given as a function"
end
end
@doc """
A trigger emitted every `count` elements in a window.
The trigger will be named `{:every, count}`.
## Examples
Below is an example that checkpoints the sum from 1 to 100, emitting
a trigger with the state every 10 items. The extra 5050 value at the
end is the trigger emitted because processing is done.
iex> window = Flow.Window.global() |> Flow.Window.trigger_every(10)
iex> flow = Flow.from_enumerable(1..100) |> Flow.partition(window: window, stages: 1)
iex> flow |> Flow.reduce(fn -> 0 end, &(&1 + &2)) |> Flow.emit(:state) |> Enum.to_list()
[55, 210, 465, 820, 1275, 1830, 2485, 3240, 4095, 5050, 5050]
"""
@spec trigger_every(t, pos_integer) :: t
def trigger_every(window, count) when is_integer(count) and count > 0 do
name = {:every, count}
trigger(window, fn -> count end, fn events, acc ->
length = length(events)
if length(events) >= acc do
{pre, pos} = Enum.split(events, acc)
{:trigger, name, pre, pos, count}
else
{:cont, acc - length}
end
end)
end
@doc """
Emits a trigger periodically every `count` `unit`.
Such trigger will apply to every window that has changed since the last
periodic trigger.
`count` is a positive integer and `unit` is one of `:millisecond`,
`:second`, `:minute`, or `:hour`. Remember periodic triggers are established
per partition and are message-based, which means partitions will emit the
triggers at different times and possibly with delays based on the partition
message queue size.
The trigger will be named `{:periodically, count, unit}`.
## Message-based triggers (timers)
It is also possible to dispatch a trigger by sending a message to
`self()` with the format of `{:trigger, name}`. This is useful for
custom triggers and timers. One example is to send the message when
building the accumulator for `reduce/3`.
Similar to periodic triggers, message-based triggers will also be
invoked to all windows that have changed since the last trigger.
"""
@spec trigger_periodically(t, pos_integer, time_unit) :: t
def trigger_periodically(%{periodically: periodically} = window, count, unit)
when is_integer(count) and count > 0 do
trigger = {to_ms(count, unit), {:periodically, count, unit}}
%{window | periodically: [trigger | periodically]}
end
@spec to_ms(pos_integer(), time_unit()) :: pos_integer
defp to_ms(count, :millisecond), do: count
defp to_ms(count, :second), do: count * 1000
defp to_ms(count, :minute), do: count * 1000 * 60
defp to_ms(count, :hour), do: count * 1000 * 60 * 60
defp to_ms(_count, unit) do
raise ArgumentError,
"unknown unit #{inspect(unit)} (expected :millisecond, :second, :minute or :hour)"
end
defp add_trigger(%{trigger: nil} = window, trigger) do
%{window | trigger: trigger}
end
defp add_trigger(%{}, _trigger) do
raise ArgumentError,
"Flow.Window.trigger/3 or Flow.Window.trigger_every/3 " <>
"can only be called once per window"
end
end
|
lib/flow/window.ex
| 0.925369
| 0.817028
|
window.ex
|
starcoder
|
defprotocol Transducer do
@type accumulator :: any
@type state :: any
@type annotated_accumulator :: {:cont, accumulator} | {:halt, accumulator} | {:reduce, term, accumulator}
@type stateless_reducer :: (term, accumulator -> annotated_accumulator)
@type stateful_reducer :: (term, {state, accumulator} -> annotated_accumulator)
@type reducer_function :: stateless_reducer | stateful_reducer
@spec reducer(any, reducer_function) :: reducer_function
def reducer(transducer, reducer)
@spec initial_state(any) :: any
def initial_state(transducer)
@spec compose(any, any) :: any
def compose(transducer, other)
end
defmodule StatefulTransducer do
defstruct initial_state: nil, function: nil
end
defmodule ComposedTransducer do
defstruct transducers: []
end
defimpl Transducer, for: Function do
def reducer(transducer, reducer_function), do: transducer.(reducer_function)
def initial_state(_), do: :stateless
def compose(transducer, %StatefulTransducer{}=other) do
%ComposedTransducer{transducers: [transducer, other]}
end
def compose(transducer, %ComposedTransducer{}=other) do
%ComposedTransducer{transducers: [transducer | other.transducers]}
end
def compose(transducer, other), do: fn reducer -> transducer.(other.(reducer)) end
end
defimpl Transducer, for: StatefulTransducer do
def reducer(transducer, reducer_function) do
transducer.function.(reducer_function)
end
def initial_state(transducer), do: transducer.initial_state
def compose(transducer, %ComposedTransducer{}=other) do
%ComposedTransducer{transducers: [transducer | other.transducers]}
end
def compose(transducer, other) do
%ComposedTransducer{transducers: [transducer, other]}
end
end
defimpl Transducer, for: ComposedTransducer do
defp short_circuit(transducer) do
Transducer.reducer(transducer, fn item, acc -> {:reduce, item, acc} end)
end
def reducer(transducer, final_reducer) do
reducers = Enum.map(transducer.transducers, &short_circuit/1) ++ [final_reducer]
fn item, {states, accumulator} ->
reduce_composed(item, accumulator, reducers, states, [])
end
end
defp reduce_composed(item, accumulator, [reducer | reducers], [:stateless | states], used_states) do
case reducer.(item, accumulator) do
{:reduce, item, accumulator} ->
reduce_composed(item, accumulator, reducers, states, [:stateless | used_states])
{:halt, accumulator} ->
{:halt, {Enum.reverse([:stateless | used_states]) ++ states, accumulator}}
{:cont, accumulator} ->
{:cont, {Enum.reverse([:stateless | used_states]) ++ states, accumulator}}
end
end
defp reduce_composed(item, accumulator, [reducer | reducers], [state | states], used_states) do
case reducer.(item, {state, accumulator}) do
{:reduce, item, {state, accumulator}} ->
reduce_composed(item, accumulator, reducers, states, [state | used_states])
{:halt, {state, accumulator}} ->
{:halt, {Enum.reverse([state | used_states]) ++ states, accumulator}}
{:cont, {state, accumulator}} ->
{:cont, {Enum.reverse([state | used_states]) ++ states, accumulator}}
end
end
defp reduce_composed(item, accumulator, [reducer], [], used_states) do
reducer.(item, {Enum.reverse(used_states), accumulator})
end
def initial_state(transducer) do
Enum.map(transducer.transducers, &Transducer.initial_state/1)
end
def compose(transducer, %ComposedTransducer{transducers: [other | transducers]}) do
_compose(transducer.transducers, other, [], transducers)
end
def compose(transducer, other) do
_compose(transducer.transducers, other, [], [])
end
defp _compose([], nil, acc, [head | tail]) do
_compose([], nil, [head | acc], tail)
end
defp _compose([], nil, acc, []) do
%ComposedTransducer{transducers: Enum.reverse(acc)}
end
defp _compose([next], other, acc, tail) when is_function(next) and is_function(other) do
_compose([], nil, [Transducer.compose(next, other) | acc], tail)
end
defp _compose([next], other, acc, tail) do
_compose([], nil, [other | [next | acc]], tail)
end
defp _compose([next | transducers], other, acc, tail) do
_compose(transducers, other, [next | acc], tail)
end
end
defmodule Transduce do
@moduledoc """
Composable algorithmic transformations.
<NAME> introduced the idea in [this
post](http://blog.cognitect.com/blog/2014/8/6/transducers-are-coming).
[This post](http://phuu.net/2014/08/31/csp-and-transducers.html) is a good
conceptual introduction.
The input is always an enumerable. The output can be an enumerable...
iex> import Transduce, only: [transduce: 2, filter: 1, take: 1]
iex> transduce(1..100, [filter(&(&1 > 5)), take(5)])
[6, 7, 8, 9, 10]
...or can also produce another structure:
iex> import Transduce, only: [transduce: 3, filter: 1, take: 1, put: 3]
iex> transduce(
...> [4, 8, 7, 3, 2, 9, 6, 12, 15], [
...> filter(&(&1 > 5)),
...> take(5),
...> put(:min, nil, &min/2),
...> put(:max, 0, &max/2),
...> put(:count, 0, fn _, a -> a+1 end),
...> put(:total, 0, &Kernel.+/2)],
...> %{})
%{count: 5, max: 12, min: 6, total: 42}
You can write two kinds of transducers: stateless and stateful. A stateless
transducer is the most straightforward, since it is just a function. Consider
the `map` transducer.
```
def map(f) do
fn rf ->
fn item, accumulator -> rf.(f.(item), accumulator) end
end
end
```
At its simplest, a transducer takes a reducing function (`rf` above), and
then returns another reducing function that wraps the `rf` with its own
behavior. For `map`, it passes the mapped value to the `rf` for the next
step. This works if the rf eventually returns a value.
The return value is annotated, as defined by the Enumerable protocol.
`{:cont, VALUE}` specifies that the reduction process should continue with
the next item in the enumerable input, but VALUE is the new accumulator.
`{:halt, VALUE}` specifies that the reduction process should stop,
short-circuiting, and return VALUE as the result.
For example, consider the `filter` implementation.
```
def filter(f) do
fn rf ->
fn item, accumulator ->
if f.(item) do rf.(item, accumulator) else {:cont, accumulator} end
end
end
end
```
In that case, if the filter passes the new item from the enumerable, it is
passed through to the inner reducing function. If it doesn't pass, the
code returns `:cont` with an unchanged accumulator, indicating that we should
move on to the next item in the source enumerable without a new accumulator.
For an example of `:halt`, see the `take_while` implementation.
A stateful transducer looks similar, but has some additional features.
Consider the `take` implementation.
```
def take(count) do
%StatefulTransducer{
initial_state: 0,
function: fn rf ->
fn
item, {state, accumulator} when state < count ->
rf.(item, {state+1, accumulator})
item, {state, accumulator} -> {:halt, {state, accumulator}}
end
end
}
end
```
A take transducer returns a StatefulTransducer struct, which specifies an
`initial_state` and a `function`. The function again takes a reducing
function (`rf`) but the wrapping function this time expects that the
accumulator has the shape `{state, accumulator}`. The state is private to
this function, and will not be in the final accumulator result, but must also
be included in the function output, whether it's passed to the wrapped
reducing function or returned to the caller with `:halt` or `:cont`.
"""
@doc ~S"""
Transduce a given enumerable to generate a list.
## Examples
iex> import Transduce, only: [transduce: 2, take: 1, compose: 1, filter: 1]
iex> transduce([2,3,5,7,11], take(3))
[2, 3, 5]
iex> transduce(0..20, compose([filter(&(rem(&1, 2) == 0)), take(5)]))
[0, 2, 4, 6, 8]
iex> transduce(0..20, [filter(&(rem(&1, 3) == 0)), take(6)])
[0, 3, 6, 9, 12, 15]
"""
def transduce(enumerable, transducer) do
transduce(enumerable, transducer, [], &({:cont, [&1 | &2]}), &:lists.reverse/1)
end
@spec transduce(any, any, any, Transducer.stateless_reducer, any) :: any
def transduce(enumerable, transducer, accumulator, stateless_reducer \\ fn _, acc -> {:cont, acc} end, finalizer \\ &(&1))
def transduce(enumerable, transducer, accumulator, stateless_reducer, finalizer) when is_function(transducer) do
{_, result} = Enumerable.reduce(
enumerable, {:cont, accumulator}, transducer.(stateless_reducer))
finalizer.(result)
end
def transduce(enumerable, transducers, accumulator, stateless_reducer, finalizer) when is_list(transducers) do
transduce(enumerable, compose(transducers), accumulator, stateless_reducer, finalizer)
end
def transduce(enumerable, transducer, accumulator, stateless_reducer, finalizer) do
final_reducer = fn element, {state, accumulator} ->
{atom, accumulator} = stateless_reducer.(element, accumulator)
{atom, {state, accumulator}}
end
{_, {_state, result}} = Enumerable.reduce(
enumerable,
{:cont, {Transducer.initial_state(transducer), accumulator}},
Transducer.reducer(transducer, final_reducer))
finalizer.(result)
end
@doc ~S"""
Compose multiple transducers into one.
## Examples
iex> import Transduce, only: [transduce: 2, compose: 1, map: 1, filter: 1]
iex> transduce([2,3,5,7,11,13,17,19,23], compose([map(&(&1+1)), filter(&(rem(&1,3)==0))]))
[3, 6, 12, 18, 24]
Stateless transducers compose into functions.
iex> import Transduce, only: [compose: 1, map: 1]
iex> tr = compose([map(&(&1*2)), map(&(&1+2))])
iex> tr.(fn item, acc -> {item, acc} end).(5, 42)
{12, 42}
If any other kind of transducer enters the mix, it becomes a
ComposedTransducer.
iex> import Transduce, only: [compose: 1, map: 1, take: 1]
iex> tr = compose([map(&(&1*2)), take(5)])
iex> length(tr.transducers)
2
Composed transducers can themselves be composed.
iex> import Transduce, only: [transduce: 2, compose: 1, map: 1, filter: 1, take: 1]
iex> tr1 = compose([filter(&(rem(&1, 3)==0)), map(&(&1*2))])
iex> tr2 = compose([map(&(&1+1)), take(5)])
iex> transduce(0..20, compose([tr1, tr2]))
[1, 7, 13, 19, 25]
"""
def compose([first | rest] = _transducers) do
_compose(first, rest)
end
def _compose(current, [next1, next2 | rest])
when not is_function(current) and is_function(next1) and is_function(next2) do
_compose(current, [Transducer.compose(next1, next2) | rest])
end
def _compose(current, [next | rest]) do
_compose(Transducer.compose(current, next), rest)
end
def _compose(current, []), do: current
@doc ~S"""
Apply a function to each item it receives.
## Examples
iex> import Transduce, only: [transduce: 2, map: 1]
iex> transduce(0..4, map(&(-&1)))
[0, -1, -2, -3, -4]
"""
def map(f) do
fn rf ->
fn item, accumulator -> rf.(f.(item), accumulator) end
end
end
@doc ~S"""
Only include items if the filter function returns true.
## Examples
iex> import Transduce, only: [transduce: 2, filter: 1]
iex> transduce(0..5, filter(&(rem(&1,2)==0)))
[0, 2, 4]
"""
def filter(f) do
fn rf ->
fn item, accumulator ->
if f.(item) do rf.(item, accumulator) else {:cont, accumulator} end
end
end
end
@doc ~S"""
Exclude items if the remove function returns true.
## Examples
iex> import Transduce, only: [transduce: 2, remove: 1]
iex> transduce(0..5, remove(&(rem(&1,2)==0)))
[1, 3, 5]
"""
def remove(f) do
fn rf ->
fn item, accumulator ->
if f.(item) do {:cont, accumulator} else rf.(item, accumulator) end
end
end
end
@doc ~S"""
Only iterate while the function returns true.
## Examples
iex> import Transduce, only: [transduce: 2, take_while: 1]
iex> transduce([0, 1, 2, 10, 11, 4], take_while(&(&1 < 10)))
[0, 1, 2]
"""
def take_while(f) do
fn rf ->
fn item, accumulator ->
if f.(item) do rf.(item, accumulator) else {:halt, accumulator} end
end
end
end
@doc ~S"""
Take the first N items from the enumerable and then stop iteration.
## Examples
iex> import Transduce, only: [transduce: 2, take: 1, filter: 1]
iex> transduce(0..200, take(5))
[0, 1, 2, 3, 4]
iex> transduce(0..200, [filter(&(rem(&1, 5)==0)), take(5)])
[0, 5, 10, 15, 20]
"""
def take(count) do
%StatefulTransducer{
initial_state: 0,
function: fn rf ->
fn
item, {state, accumulator} when state < count ->
rf.(item, {state+1, accumulator})
_, {state, accumulator} -> {:halt, {state, accumulator}}
end
end
}
end
@doc ~S"""
Skip the first N items from the enumerable and then iterate.
## Examples
iex> import Transduce, only: [transduce: 2, skip: 1, take: 1]
iex> transduce(0..10, skip(8))
[8, 9, 10]
iex> transduce(0..20, [skip(4), take(2)])
[4, 5]
"""
def skip(count) do
%StatefulTransducer{
initial_state: 0,
function: fn rf ->
fn
_, {state, accumulator} when state < count ->
{:cont, {state + 1, accumulator}}
item, {state, accumulator} -> rf.(item, {state + 1, accumulator})
end
end
}
end
@doc ~S"""
Call the function with each value and the result of the previous call,
beginning with the initial_value.
## Examples
iex> import Transduce, only: [transduce: 2, scan: 2]
iex> transduce(1..5, scan(0, &(&1 + &2)))
[1, 3, 6, 10, 15]
"""
def scan(initial_value, f) do
%StatefulTransducer{
initial_state: initial_value,
function: fn rf ->
fn item, {state, accumulator} ->
new = f.(item, state)
rf.(new, {new, accumulator})
end
end
}
end
@doc ~S"""
Step over N items in the enumerable, taking 1 between each set. Called with
two arguments, you specify how many to take (take_count, skip_count).
## Examples
iex> import Transduce, only: [transduce: 2, step: 1, step: 2]
iex> transduce(0..10, step(2))
[0, 3, 6, 9]
iex> transduce(0..15, step(2, 3))
[0, 1, 5, 6, 10, 11, 15]
"""
def step(skip_count), do: step(1, skip_count)
def step(take_count, skip_count) do
total = take_count + skip_count
%StatefulTransducer{
initial_state: 0,
function: fn rf ->
fn item, {state, accumulator} ->
position = rem(state, total)
if position < take_count do
rf.(item, {position+1, accumulator})
else
{:cont, {position+1, accumulator}}
end
end
end
}
end
@doc ~S"""
For each item in the enumerable, transform it and the previous result with the
given reducer and then stash the resulting value in the accumulator, which
must be a map.
## Examples
iex> import Transduce, only: [transduce: 3, put: 3]
iex> transduce([6,3,8,2,4,9,5,0,1,7], [put(:min, nil, &min/2), put(:max, 0, &max/2)], %{})
%{max: 9, min: 0}
iex> import Transduce, only: [transduce: 3, filter: 1, put: 3]
iex> transduce(
...> 1..20, [
...> put(:total, 0, &Kernel.+/2),
...> put(:count, 0, fn _, ct -> ct + 1 end),
...> filter(fn v -> rem(v, 2) == 0 end),
...> put(:even, 0, &Kernel.+/2)
...> ],
...> %{})
%{count: 20, even: 110, total: 210}
iex> import Transduce, only: [transduce: 3, filter: 1, put: 4]
iex> transduce(
...> 1..20, [
...> put(:even, 0, filter(&(rem(&1, 2) == 0)), &Kernel.+/2),
...> put(:odd, 0, filter(&(rem(&1, 2) == 1)), &Kernel.+/2)
...> ],
...> %{})
%{even: 110, odd: 100}
iex> import Transduce, only: [transduce: 3, take: 1, put: 4, put: 3]
iex> transduce(
...> 1..20, [
...> put(:first, 0, take(5), &Kernel.+/2),
...> put(:total, 0, &Kernel.+/2)
...> ],
...> %{})
%{first: 15, total: 210}
"""
def put(key, initial_value, reducer) do
fn rf ->
fn item, acc ->
rf.(item, Map.put(acc, key, reducer.(item, Map.get(acc, key, initial_value))))
end
end
end
def put(key, initial_value, transducers, reducer) when is_list(transducers) do
put(key, initial_value, compose(transducers), reducer)
end
def put(key, initial_value, transducer, reducer) when is_function(transducer) do
final_reducer = Transducer.reducer(
transducer,
fn item, accumulator ->
{ :cont,
Map.put(
accumulator,
key,
reducer.(item, Map.get(accumulator, key, initial_value)))
}
end
)
%StatefulTransducer{
initial_state: :cont,
function: fn rf ->
fn
item, {:halt, _} = accumulator -> rf.(item, accumulator)
item, {:cont, accumulator} -> rf.(item, final_reducer.(item, accumulator))
end
end
}
end
def put(key, initial_value, transducer, reducer) do
final_reducer = Transducer.reducer(
transducer,
fn item, {state, accumulator} ->
{ :cont,
{ state,
Map.put(
accumulator,
key,
reducer.(item, Map.get(accumulator, key, initial_value)))
}
}
end)
%StatefulTransducer{
initial_state: {:cont, Transducer.initial_state(transducer)},
function: fn rf ->
fn
item, {{:halt, _}, _}=accumulator -> rf.(item, accumulator)
item, {{:cont, state}, accumulator} ->
{disposition, {new_state, new_accumulator}} =
final_reducer.(item, {state, accumulator})
rf.(item, {{disposition, new_state}, new_accumulator})
end
end
}
end
end
|
lib/transducer.ex
| 0.903709
| 0.490968
|
transducer.ex
|
starcoder
|
defprotocol PayloadType do
@moduledoc """
This protocol indicates a module that is aware of which parser should be used
to handle its body.
"""
@doc """
This function is passed a packet and it returns the parser that should be used
to parse its body.
"""
@spec payload_parser(any) :: PayloadParser.t
def payload_parser(this_type)
@type t :: any
end
defprotocol PayloadParser do
@moduledoc """
This protocol indicates a module that is aware of how to convert binary data
to a parsed packet.
"""
@doc """
Parses the body of a packet into a new packet (presumably of another protocol)
For example a UDP packet body may contain a DNS packet.
"""
@spec from_data(binary) :: any
def from_data(data)
@type t :: any
end
defimpl String.Chars, for: ExPcap do
@spec to_string(ExPcap.t) :: String.t
def to_string(item) do
"""
PCAP
====
Global Header
--------------
#{item.global_header}
Packets
-------
#{item.packets
|> Enum.map(&String.Chars.to_string/1)
|> Enum.join("\n\n")
}
"""
end
end
defmodule ExPcap do
@moduledoc """
This module represents a pcap file that has been parsed.
"""
defstruct global_header: %ExPcap.GlobalHeader{},
packets: [] # %ExPcap.Packet{}
@type t :: %ExPcap{
global_header: ExPcap.GlobalHeader.t,
packets: [ExPcap.Packet.t]
}
@doc """
Parses the content of the packet according to the parser for this packet type.
Then it recurses until the packet has been parsed completely.
It may return something like an ethernet packet that contains an IPv4 packet
that contains a UDP packet that contains a DNS packet.
"""
@spec parse_packet(ExPcap.PacketData.t, ExPcap.GlobalHeader.t) :: [ExPcap.Packet.t]
def parse_packet(packet_data, global_header) do
global_header
|> PayloadType.payload_parser
|> parse_packet(packet_data, [])
end
@doc """
Parses the content of the packet according to the parser for this packet type.
Then it recurses until the packet has been parsed completely.
It may return something like an ethernet packet that contains an IPv4 packet
that contains a UDP packet that contains a DNS packet.
"""
@spec parse_packet(nil, binary, [ExPcap.Packet.t]) :: [ExPcap.Packet.t]
def parse_packet(nil, _payload, acc) do
Enum.reverse acc
end
@spec parse_packet(ExPcap.Parser.t, binary, [ExPcap.Packet.t]) :: [ExPcap.Packet.t]
def parse_packet(parser, payload, acc) do
next_payload = payload.data |> parser.from_data
PayloadType.payload_parser(next_payload)
|> parse_packet(next_payload, [next_payload | acc])
end
@doc """
Reads a packet from a file. This packet is then parsed and the result is
returned.
"""
@spec read_packet(String.t, ExPcap.GlobalHeader.t, ExPcap.PacketHeader.t) :: ExPcap.Packet.t
def read_packet(f, global_header, packet_header) do
packet_data = f |> ExPcap.PacketData.from_file(global_header, packet_header)
payload = packet_data |> parse_packet(global_header)
%ExPcap.Packet{
packet_header: packet_header,
raw_packet_data: packet_data,
parsed_packet_data: payload
}
end
@doc """
Reads a packet from the file and returns it or returns end of file if there
is no data left to be read.
"""
@spec read_packet(String.t, ExPcap.GlobalHeader.t) :: :eof | ExPcap.Packet.t
def read_packet(f, global_header) do
packet_header = ExPcap.PacketHeader.from_file(f, global_header)
case packet_header do
:eof ->
:eof
_ ->
read_packet(f, global_header, packet_header)
end
end
@doc """
Reads all the packets from a file, parses them and returns a list of the
parsed packets.
"""
@spec read_packets(String.t, ExPcap.GlobalHeader.t, list) :: [ExPcap.Packet.t]
def read_packets(f, global_header, acc \\ []) do
next_packet = read_packet(f, global_header)
case next_packet do
:eof ->
acc
_ ->
read_packets(f, global_header, [next_packet | acc])
end
end
@doc """
Reads a pcap file and returns the parsed results.
"""
@spec read_pcap(String.t) :: ExPcap.t
def read_pcap(f) do
magic_number = f |> ExPcap.MagicNumber.from_file
global_header = f |> ExPcap.GlobalHeader.from_file(magic_number)
%ExPcap{
global_header: global_header,
packets: f |> read_packets(global_header)
}
end
@doc """
Reads a file, parses the pcap contents and returns a list of the parsed
packets.
"""
@spec from_file(String.t) :: ExPcap.t
def from_file(filename) do
File.open!(filename, fn(file) ->
file |> read_pcap
end)
end
end
|
lib/expcap.ex
| 0.827026
| 0.467393
|
expcap.ex
|
starcoder
|
defmodule LimitedQueue do
@moduledoc """
An elixir wrapper for erlang's `:queue`, with a constant-time `size/1` and a maximum capacity.
When items pushed on to the `LimitedQueue` put it over its maximum capacity,
it will drop events according to its `drop_strategy/0`.
"""
@typedoc """
The opaque internal state of the `LimitedQueue`.
"""
@opaque t(value) :: %__MODULE__{
queue: :queue.queue(value),
size: non_neg_integer(),
capacity: pos_integer(),
drop_strategy: drop_strategy()
}
@typedoc """
The `drop_strategy/0` determines how the queue handles dropping events when overloaded.
`:drop_newest` (default) drops incoming events and is the most efficient
because it will avoid touching the state when the queue is overloaded.
`:drop_oldest` drops the oldest events from the queue,
which may be better behavior where newer events are more relevant to process than older ones.
"""
@type drop_strategy :: :drop_newest | :drop_oldest
@enforce_keys [:queue, :size, :capacity, :drop_strategy]
defstruct [:queue, :size, :capacity, :drop_strategy]
@doc """
Create a new `LimitedQueue` with the given maximum capacity.
The `drop_strategy` determines how the queue handles dropping events when overloaded.
See `drop_strategy/0` for more information.
"""
@spec new(capacity :: pos_integer()) :: t(value) when value: any()
@spec new(capacity :: pos_integer(), drop_strategy()) :: t(value) when value: any()
def new(capacity, drop_strategy \\ :drop_newest)
when capacity > 0 and drop_strategy in [:drop_newest, :drop_oldest] do
%__MODULE__{queue: :queue.new(), size: 0, capacity: capacity, drop_strategy: drop_strategy}
end
@doc """
Push a value to the back of the `LimitedQueue`.
If the `LimitedQueue` is full, it will drop an event according to the `LimitedQueue`'s `drop_strategy/0`.
"""
@spec push(t(value), value) :: t(value) when value: any()
def push(
%__MODULE__{capacity: capacity, size: capacity, drop_strategy: :drop_newest} = state,
_element
) do
state
end
def push(
%__MODULE__{capacity: capacity, size: capacity, drop_strategy: :drop_oldest} = state,
element
) do
queue = :queue.drop(state.queue)
queue = :queue.in(element, queue)
%__MODULE__{state | queue: queue}
end
def push(%__MODULE__{} = state, element) do
queue = :queue.in(element, state.queue)
%__MODULE__{
state
| queue: queue,
size: state.size + 1
}
end
@doc """
Push multiple values to the back of the `LimitedQueue`.
Returns the number of values that were dropped if the `LimitedQueue` reaches its capacity.
"""
@spec append(t(value), [value]) :: {t(value), dropped :: non_neg_integer()} when value: any()
def append(
%__MODULE__{capacity: capacity, size: capacity, drop_strategy: :drop_newest} = state,
events
) do
{state, length(events)}
end
def append(%__MODULE__{capacity: capacity, size: capacity} = state, [event]) do
state = push(state, event)
{state, 1}
end
def append(%__MODULE__{} = state, [event]) do
state = push(state, event)
{state, 0}
end
def append(%__MODULE__{} = state, events) do
Enum.reduce(events, {state, 0}, fn value, {state, dropped} ->
dropped =
if state.size == state.capacity do
dropped + 1
else
dropped
end
state = push(state, value)
{state, dropped}
end)
end
@doc """
Remove and return a value from the front of the `LimitedQueue`.
If the `LimitedQueue` is empty, {:error, :empty} will be returned.
"""
@spec pop(t(value)) :: {:ok, t(value), value} | {:error, :empty} when value: any()
def pop(%__MODULE__{} = state) do
case :queue.out(state.queue) do
{{:value, value}, queue} ->
state = %__MODULE__{
state
| queue: queue,
size: state.size - 1
}
{:ok, state, value}
{:empty, _queue} ->
{:error, :empty}
end
end
@doc """
Remove and return multiple values from the front of the `LimitedQueue`.
If the `LimitedQueue` runs out of values, fewer values than the requested amount will be returned.
"""
@spec split(t(value), amount :: non_neg_integer()) :: {t(value), [value]} when value: any()
def split(%__MODULE__{size: 0} = state, amount) when amount >= 0 do
{state, []}
end
def split(%__MODULE__{size: size} = state, amount) when amount >= size do
split = state.queue
state = %__MODULE__{state | queue: :queue.new(), size: 0}
{state, :queue.to_list(split)}
end
def split(%__MODULE__{} = state, amount) when amount > 0 do
{split, queue} = :queue.split(amount, state.queue)
state = %__MODULE__{state | queue: queue, size: state.size - amount}
{state, :queue.to_list(split)}
end
def split(%__MODULE__{} = state, 0) do
{state, []}
end
@doc """
The current number of values stored in the `LimitedQueue`.
"""
@spec size(t(value)) :: non_neg_integer() when value: any()
def size(%__MODULE__{} = state) do
state.size
end
@doc """
The maximum capacity of the `LimitedQueue`.
"""
@spec capacity(t(value)) :: non_neg_integer() when value: any()
def capacity(%__MODULE__{} = state) do
state.capacity
end
@doc """
The contents of the `LimitedQueue` as a list.
"""
@spec to_list(t(value)) :: [value] when value: any()
def to_list(%__MODULE__{} = state) do
:queue.to_list(state.queue)
end
end
|
lib/limited_queue/limited_queue.ex
| 0.913175
| 0.622373
|
limited_queue.ex
|
starcoder
|
defmodule Credo.Code do
@moduledoc """
`Credo.Code` contains a lot of utility or helper functions that deal with the
analysis of - you guessed it - code.
Whenever a function serves a general purpose in this area, e.g. getting the
value of a module attribute inside a given module, we want to extract that
function and put it in the `Credo.Code` namespace, so others can utilize them
without reinventing the wheel.
"""
alias Credo.Code.Charlists
alias Credo.Code.Heredocs
alias Credo.Code.Sigils
alias Credo.Code.Strings
alias Credo.SourceFile
defmodule ParserError do
@moduledoc """
This is an internal `Issue` raised by Credo when it finds itself unable to
parse the source code in a file.
"""
end
@doc """
Prewalks a given `Credo.SourceFile`'s AST or a given AST.
Technically this is just a wrapper around `Macro.prewalk/3`.
"""
def prewalk(ast_or_source_file, fun, accumulator \\ [])
def prewalk(%SourceFile{} = source_file, fun, accumulator) do
source_file
|> SourceFile.ast()
|> prewalk(fun, accumulator)
end
def prewalk(source_ast, fun, accumulator) do
{_, accumulated} = Macro.prewalk(source_ast, accumulator, fun)
accumulated
end
@doc """
Postwalks a given `Credo.SourceFile`'s AST or a given AST.
Technically this is just a wrapper around `Macro.postwalk/3`.
"""
def postwalk(ast_or_source_file, fun, accumulator \\ [])
def postwalk(%SourceFile{} = source_file, fun, accumulator) do
source_file
|> SourceFile.ast()
|> postwalk(fun, accumulator)
end
def postwalk(source_ast, fun, accumulator) do
{_, accumulated} = Macro.postwalk(source_ast, accumulator, fun)
accumulated
end
@doc """
Returns an AST for a given `String` or `Credo.SourceFile`.
"""
def ast(string_or_source_file)
def ast(%SourceFile{filename: filename} = source_file) do
source_file
|> SourceFile.source()
|> ast(filename)
end
@doc false
def ast(source, filename \\ "nofilename") when is_binary(source) do
try do
case Code.string_to_quoted(source, line: 1, columns: true, file: filename) do
{:ok, value} ->
{:ok, value}
{:error, error} ->
{:error, [issue_for(error, filename)]}
end
rescue
e in UnicodeConversionError ->
{:error, [issue_for({1, e.message, nil}, filename)]}
end
end
defp issue_for({line_no, error_message, _}, filename) do
%Credo.Issue{
check: ParserError,
category: :error,
filename: filename,
message: error_message,
line_no: line_no
}
end
@doc """
Converts a String or `Credo.SourceFile` into a List of tuples of `{line_no, line}`.
"""
def to_lines(string_or_source_file)
def to_lines(%SourceFile{} = source_file) do
source_file
|> SourceFile.source()
|> to_lines()
end
def to_lines(source) when is_binary(source) do
source
|> String.split("\n")
|> Enum.with_index()
|> Enum.map(fn {line, i} -> {i + 1, line} end)
end
@doc """
Converts a String or `Credo.SourceFile` into a List of tokens using the `:elixir_tokenizer`.
"""
def to_tokens(string_or_source_file)
def to_tokens(%SourceFile{} = source_file) do
source_file
|> SourceFile.source()
|> to_tokens(source_file.filename)
end
def to_tokens(source, filename \\ "nofilename") when is_binary(source) do
result =
source
|> String.to_charlist()
|> :elixir_tokenizer.tokenize(1, file: filename)
case result do
# Elixir < 1.6
{_, _, _, tokens} ->
tokens
# Elixir >= 1.6
{:ok, tokens} ->
tokens
end
end
@doc """
Returns true if the given `child` AST node is part of the larger
`parent` AST node.
"""
def contains_child?(parent, child) do
Credo.Code.prewalk(parent, &find_child(&1, &2, child), false)
end
defp find_child(parent, acc, child), do: {parent, acc || parent == child}
@doc """
Takes a SourceFile and returns its source code stripped of all Strings and
Sigils.
"""
def clean_charlists_strings_and_sigils(source_file_or_source) do
{_source, filename} = Credo.SourceFile.source_and_filename(source_file_or_source)
source_file_or_source
|> Sigils.replace_with_spaces(" ", " ", filename)
|> Strings.replace_with_spaces(" ", " ", filename)
|> Heredocs.replace_with_spaces(" ", " ", "", filename)
|> Charlists.replace_with_spaces(" ", " ", filename)
end
@doc """
Takes a SourceFile and returns its source code stripped of all Strings, Sigils
and code comments.
"""
def clean_charlists_strings_sigils_and_comments(source_file_or_source, sigil_replacement \\ " ") do
{_source, filename} = Credo.SourceFile.source_and_filename(source_file_or_source)
source_file_or_source
|> Heredocs.replace_with_spaces(" ", " ", "", filename)
|> Sigils.replace_with_spaces(sigil_replacement, " ", filename)
|> Strings.replace_with_spaces(" ", " ", filename)
|> Charlists.replace_with_spaces(" ", " ", filename)
|> String.replace(~r/(\A|[^\?])#.+/, "\\1")
end
@doc """
Returns an AST without its metadata.
"""
def remove_metadata(ast) do
Macro.prewalk(ast, &Macro.update_meta(&1, fn _meta -> [] end))
end
end
|
lib/credo/code.ex
| 0.73029
| 0.490114
|
code.ex
|
starcoder
|
defmodule ExsemanticaPhx.Search do
import Ecto.Query
def interests(qstring, opts, operation \\ :count) do
limit = Keyword.get(opts, :limit)
# Construct the query
query = case Keyword.get(opts, :d0) do
# No date bound
nil -> {:ok, from(int in ExsemanticaPhx.Site.Post, where: like(int.title, ^qstring) and int.is_interest, select: [int.title, int.content, int.inserted_at, int.updated_at, int.poster])}
d0 -> case Keyword.get(opts, :d1) do
# Why would you define a bound that's uneven
nil -> {:error, :unsupported}
# Date bound is fully defined, proceed
d1 -> {:ok, from(int in ExsemanticaPhx.Site.Post, where: like(int.title, ^qstring) and int.is_interest and int.inserted_at > ^d0 and int.inserted_at < ^d1, select: [int.title, int.content, int.inserted_at, int.updated_at])}
end
end
# Use the query?
case query do
{:ok, q} ->
case {operation, limit} do
{:count, _} -> ExsemanticaPhx.Repo.all(q) |> length
{:query, nil} -> ExsemanticaPhx.Repo.all(q)
{:query, _} -> ExsemanticaPhx.Repo.all(q |> limit(^limit))
end
end
end
def users(qstring, opts, operation \\ :count) do
limit = Keyword.get(opts, :limit)
# Construct the query
query = case Keyword.get(opts, :d0) do
# No date bound
nil -> {:ok, from(int in ExsemanticaPhx.Site.User, where: like(int.username, ^qstring), select: [int.username, int.biography, int.inserted_at])}
d0 -> case Keyword.get(opts, :d1) do
# Why would you define a bound that's uneven
nil -> {:error, :unsupported}
# Date bound is fully defined, proceed
d1 -> {:ok, from(int in ExsemanticaPhx.Site.User, where: like(int.username, ^qstring) and int.inserted_at > ^d0 and int.inserted_at < ^d1, select: [int.username, int.biography, int.inserted_at])}
end
end
# Use the query?
case query do
{:ok, q} ->
case {operation, limit} do
{:count, _} -> ExsemanticaPhx.Repo.all(q) |> length
{:query, nil} -> ExsemanticaPhx.Repo.all(q)
{:query, _} -> ExsemanticaPhx.Repo.all(q |> limit(^limit))
end
end
end
def max_id() do
nodes = ExsemanticaPhx.Repo.one(from post in ExsemanticaPhx.Site.Post, select: max(post.node_corresponding))
users = ExsemanticaPhx.Repo.one(from user in ExsemanticaPhx.Site.User, select: max(user.node_corresponding))
[nodes, users] |> Enum.map(fn a ->
case a do
nil -> -1
item -> item
end
end) |> Enum.max()
end
end
|
apps/exsemantica_phx/lib/exsemantica_phx/search.ex
| 0.55917
| 0.519217
|
search.ex
|
starcoder
|
defmodule Ueberauth.Strategy.Okta do
@moduledoc """
Provides an Ueberauth strategy for authenticating with Okta.
## Setup
You'll need to register a new application with Okta and get the `client_id` and `client_secret`. That setup is out of the scope of this library, but some notes to remember are:
* Ensure `Authorization Code` grant type is enabled
* You have valid `Login Redirect Urls` listed for the app that correctly reference your callback route(s)
* `user` or `group` permissions may need to be added to your Okta app before successfully authenticating
Include the provider in your configuration for Ueberauth
config :ueberauth, Ueberauth,
providers: [
okta: { Ueberauth.Strategy.Okta, [] }
]
Then include the configuration for okta.
config :ueberauth, Ueberauth.Strategy.Okta.OAuth,
client_id: System.get_env("OKTA_CLIENT_ID"),
client_secret: System.get_env("OKTA_CLIENT_SECRET"),
site: "https://your-doman.okta.com"
If you haven't already, create a pipeline and setup routes for your callback handler
pipeline :auth do
Ueberauth.plug "/auth"
end
scope "/auth" do
pipe_through [:browser, :auth]
get "/:provider/callback", AuthController, :callback
end
Create an endpoint for the callback where you will handle the `Ueberauth.Auth` struct
defmodule MyApp.AuthController do
use MyApp.Web, :controller
def callback_phase(%{ assigns: %{ ueberauth_failure: fails } } = conn, _params) do
# do things with the failure
end
def callback_phase(%{ assigns: %{ ueberauth_auth: auth } } = conn, params) do
# do things with the auth
end
end
You can edit the behaviour of the Strategy by including some options when you register your provider.
To set the `uid_field`: (Default is `:sub`)
config :ueberauth, Ueberauth,
providers: [
okta: { Ueberauth.Strategy.Okta, [uid_field: :email] }
]
To set the params that will be sent in the OAuth request, use the `oauth2_params` key:
config :ueberauth, Ueberauth,
providers: [
okta: { Ueberauth.Strategy.Okta, [oauth2_params: [scope: "openid email", max_age: 3600]] }
]
See [Okta OAuth2 documentation](https://developer.okta.com/docs/api/resources/oidc#authorize) for list of parameters. _Note that not all parameters are compatible with this flow_
"""
use Ueberauth.Strategy, uid_field: :sub,
oauth2_module: Ueberauth.Strategy.Okta.OAuth,
oauth2_params: [scope: "openid email profile"]
alias Ueberauth.Auth.Info
alias Ueberauth.Auth.Credentials
alias Ueberauth.Auth.Extra
alias Plug.Conn
@doc """
Includes the credentials from the Okta response.
"""
def credentials(conn) do
token = conn.private.okta_token
%Credentials{
token: token.access_token,
refresh_token: token.refresh_token,
expires_at: token.expires_at,
token_type: token.token_type,
expires: !!token.expires_at,
scopes: token.other_params["scope"]
}
end
@doc """
Stores the raw information (including the token) obtained from the Okta callback.
"""
def extra(conn) do
%Extra {
raw_info: %{
token: conn.private.okta_token,
user: conn.private.okta_user
}
}
end
@doc """
Handles the initial redirect to the okta authentication page.
Supports `state` and `redirect_uri` params which are required for Okta /authorize request. These will also be generated if omitted.
`redirect_uri` in Ueberauth.Strategy.Okta.OAuth config will take precedence over value provided here
"""
def handle_request!(conn) do
state = conn.params["state"] || "state-#{Base.encode16(:crypto.strong_rand_bytes(12))}"
redirect_uri = conn.params["redirect_uri"] || callback_url(conn)
params = conn
|> option(:oauth2_params)
|> Keyword.put(:state, state)
module = option(conn, :oauth2_module)
url = apply(module, :authorize_url!, [params, [redirect_uri: redirect_uri]])
redirect!(conn, url)
end
@doc """
Handles the callback from Okta.
When there is a failure from Okta the failure is included in the
`ueberauth_failure` struct. Otherwise the information returned from Okta is returned in the `Ueberauth.Auth` struct.
"""
def handle_callback!(%Conn{params: %{"code" => code}} = conn) do
module = option(conn, :oauth2_module)
case apply(module, :get_token, [[code: code], [redirect_uri: callback_url(conn)]]) do
{:ok, %{token: token}} ->
fetch_user(conn, token)
{:error, %{body: %{"error" => key, "error_description" => message}, status_code: status}} ->
set_errors!(conn, error("#{key} [#{status}]", message))
err ->
set_errors!(conn, error("Unknown Error fetching token", inspect(err)))
end
end
@doc false
def handle_callback!(%Conn{params: %{"error" => key, "error_description" => message}} = conn) do
set_errors!(conn, error(key, message))
end
@doc """
Cleans up the private area of the connection used for passing the raw Okta response around during the callback.
"""
def handle_cleanup!(conn) do
conn
|> put_private(:okta_user, nil)
|> put_private(:okta_token, nil)
end
@doc """
Fetches the fields to populate the info section of the `Ueberauth.Auth` struct.
"""
def info(conn) do
user = conn.private.okta_user
%Info{
name: user["name"],
first_name: user["given_name"],
last_name: user["family_name"],
nickname: user["nickname"],
email: user["email"],
location: user["address"],
phone: user["phone_number"],
urls: %{profile: user["profile"]}
}
end
@doc """
Fetches the uid field from the Okta response. This defaults to the option `uid_field` which in-turn defaults to `sub`
"""
def uid(conn) do
conn
|> option(:uid_field)
|> to_string()
|> fetch_uid(conn)
end
defp fetch_uid(field, conn) do
conn.private.okta_user[field]
end
defp fetch_user(conn, token) do
conn = put_private(conn, :okta_token, token)
with {:ok, %OAuth2.Response{status_code: status, body: body}} <- Ueberauth.Strategy.Okta.OAuth.get_user_info(token),
{200, user} <- {status, body}
do
put_private(conn, :okta_user, user)
else
{:error, %OAuth2.Error{reason: reason}} ->
set_errors!(conn, error("OAuth2", inspect(reason)))
{401, _} ->
set_errors!(conn, error("Okta token [401]", "unauthorized"))
{status, body} when status in 400..599 ->
set_errors!(conn, error("Okta [#{status}]", inspect(body)))
end
end
defp option(conn, key) do
Keyword.get(options(conn), key) || Keyword.get(default_options(), key)
end
end
|
lib/ueberauth/strategy/okta.ex
| 0.7324
| 0.422326
|
okta.ex
|
starcoder
|
defmodule BubblewrapEngine.Board do
require BubblewrapEngine.{Bubble, Coordinate}
alias BubblewrapEngine.{Bubble, Coordinate}
@enforce_keys [:max_players, :players, :popped_bubbles]
defstruct [:max_players, :players, :popped_bubbles]
def new(max_players, starting_players \\ []) do
{:ok, %__MODULE__{ max_players: max_players, players: MapSet.new(starting_players), popped_bubbles: %{} }}
end
def add_player(%__MODULE__{ players: current_players, max_players: max_players } = board, player) do
if is_full?(current_players, max_players) do
{:full, board}
else
{:ok, append_player(board, player)}
end
end
defp is_full?(players, max_players) do
(players |> MapSet.size) >= max_players
end
defp append_player(%__MODULE__{ players: players } = board, player) do
%{ board | players: players |> MapSet.put(player) }
end
def remove_player(%__MODULE__{ players: players } = board, player) do
{:ok, %{ board | players: players |> MapSet.delete(player) }}
end
def pop(%__MODULE__{ popped_bubbles: bubbles } = board, player, %Coordinate{} = coord) do
bubble =
board
|> get_bubble(coord)
|> Bubble.set_owner(player)
{:ok, %{ board | popped_bubbles: bubbles |> Map.put(coord |> to_atom, bubble) }, board |> game_status}
end
def game_status(%__MODULE__{} = board) do
if still_playing?(board) do
:keep_playing
else
:game_over
end
end
def still_playing?(%__MODULE__{} = board) do
Coordinate.board_range
|> Enum.any?(fn row ->
Coordinate.board_range
|> Enum.any?(fn col ->
{:ok, coord} = Coordinate.new(row, col)
with {:ok, %Bubble{ owner_id: nil } = board} <- board |> get_bubble(coord)
do
true
else
{:ok, _bubble} -> false
end
end)
end)
end
def get_bubble(%__MODULE__{ popped_bubbles: bubbles }, coord) do
{:ok, bubbles[coord |> to_atom] || %Bubble{ coordinates: coord}}
end
def to_atom(%Coordinate{ row: row, col: col }) do
"#{row}x#{col}" |> String.to_atom
end
end
|
lib/bubblewrap_engine/board.ex
| 0.754373
| 0.40489
|
board.ex
|
starcoder
|
defmodule Graphvix.Graph do
@moduledoc """
Models a directed graph that can be written to disk and displayed using
[Graphviz](http://www.graphviz.org/) notation.
Graphs are created by
* adding vertices of various formats to a graph
* `add_vertex/3`
* `add_record/2`
* `add_html_record/2`
* connecting them with edges
* `add_edge/4`
* grouping them into subgraphs and clusters,
* `add_subgraph/3`
* `add_cluster/3`
* providing styling to all these elements
* `set_graph_property/3`
* `set_global_properties/3`
They can then be
* written to disk in `.dot` format
* `write/2`
* compiled and displayed in any number of image formats (`Graphvix` defaults to `.png`)
* `compile/3`
* `show/2`
"""
import Graphvix.DotHelpers
alias Graphvix.{HTMLRecord, Record}
defstruct [
digraph: nil,
global_properties: [node: [], edge: []],
graph_properties: [],
subgraphs: []
]
@type digraph :: {:digraph, reference(), reference(), reference(), boolean()}
@type t :: %__MODULE__{
digraph: digraph(),
global_properties: keyword(),
graph_properties: keyword(),
subgraphs: list()
}
@doc """
Creates a new `Graph` struct.
A `Graph` struct consists of an Erlang `digraph` record, a list of subgraphs,
and two keyword lists of properties.
## Examples
iex> graph = Graph.new()
iex> Graph.to_dot(graph)
~S(digraph G {
})
iex> graph = Graph.new(graph: [size: "4x4"], node: [shape: "record"])
iex> Graph.to_dot(graph)
~S(digraph G {
size="4x4"
node [shape="record"]
})
"""
def new(attributes \\ []) do
digraph = :digraph.new()
[_, _, ntab] = _digraph_tables(digraph)
true = :ets.insert(ntab, {:"$sid", 0})
%__MODULE__{
digraph: digraph,
global_properties: [
node: Keyword.get(attributes, :node, []),
edge: Keyword.get(attributes, :edge, [])
],
graph_properties: Keyword.get(attributes, :graph, [])
}
end
@doc """
Destructures the references to the ETS tables for vertices, edges, and
neighbours from the `Graph` struct.
## Examples
iex> graph = Graph.new()
iex> Graph.digraph_tables(graph)
[
#Reference<0.4011094290.3698196484.157076>,
#Reference<0.4011094290.3698196484.157077>,
#Reference<0.4011094290.3698196484.157078>
]
"""
def digraph_tables(%__MODULE__{digraph: graph}), do: _digraph_tables(graph)
defp _digraph_tables({:digraph, vtab, etab, ntab, _}) do
[vtab, etab, ntab]
end
@doc """
Adds a vertex to `graph`.
The vertex's label text is the argument `label`, and additional attributes
can be passed in as `attributes`. It returns a tuple of the updated graph
and the `:digraph`-assigned ID for the new vertex.
## Examples
iex> graph = Graph.new()
iex> {_graph, vid} = Graph.add_vertex(graph, "hello", color: "blue")
iex> vid
[:"$v" | 0]
"""
def add_vertex(graph, label, attributes \\ []) do
next_id = get_and_increment_vertex_id(graph)
attributes = Keyword.put(attributes, :label, label)
vertex_id = [:"$v" | next_id]
vid = :digraph.add_vertex(graph.digraph, vertex_id, attributes)
{graph, vid}
end
@doc """
Add an edge between two vertices in a graph.
It takes 3 required arguments and one optional. The first argument is the graph,
the second two arguments are the tail and head of the edge respectively, and the
fourth, optional, argument is a list of layout attributes to apply to the edge.
The arguments for the ends of the edge can each be either the id of a vertex, or
a tuple of a vertex id and a port name to attach the edge to. This second
option is only valid with `Record` or `HTMLRecord` vertices.
## Examples
iex> graph = Graph.new()
iex> {graph, v1id} = Graph.add_vertex(graph, "start")
iex> {graph, v2id} = Graph.add_vertex(graph, "end")
iex> {_graph, eid} = Graph.add_edge(graph, v1id, v2id, color: "green")
iex> eid
[:"$e" | 0]
"""
def add_edge(graph, out_from, in_to, attributes \\ [])
def add_edge(graph, {id = [:"$v" | _], port}, in_to, attributes) do
add_edge(graph, id, in_to, Keyword.put(attributes, :outport, port))
end
def add_edge(graph, out_from, {id = [:"$v" | _], port}, attributes) do
add_edge(graph, out_from, id, Keyword.put(attributes, :inport, port))
end
def add_edge(graph, out_from, in_to, attributes) do
eid = :digraph.add_edge(graph.digraph, out_from, in_to, attributes)
{graph, eid}
end
@doc """
Group a set of vertices into a subgraph within a graph.
In addition to the graph and the vertex ids, you can pass attributes for
`node` and `edge` to apply common styling to the vertices included
in the subgraph, as well as the edges between two vertices both in the subgraph.
## Examples
iex> graph = Graph.new()
iex> {graph, v1id} = Graph.add_vertex(graph, "start")
iex> {graph, v2id} = Graph.add_vertex(graph, "end")
iex> {_graph, sid} = Graph.add_subgraph(
...> graph, [v1id, v2id],
...> node: [shape: "triangle"],
...> edge: [style: "dotted"]
...> )
iex> sid
"subgraph0"
"""
def add_subgraph(graph, vertex_ids, properties \\ []) do
_add_subgraph(graph, vertex_ids, properties, false)
end
@doc """
Group a set of vertices into a cluster in a graph.
In addition to the graph and the vertex ids, you can pass attributes
for `node` and `edge` to apply common styling to the vertices included
in the cluster, as well as the edges between two vertices both in the cluster.
The difference between a cluster and a subgraph is that a cluster can also
accept attributes to style the cluster, such as a border, background color,
and custom label. These attributes can be passed as top-level attributes in
the final keyword list argument to the function.
## Example
iex> graph = Graph.new()
iex> {graph, v1id} = Graph.add_vertex(graph, "start")
iex> {graph, v2id} = Graph.add_vertex(graph, "end")
iex> {_graph, cid} = Graph.add_cluster(
...> graph, [v1id, v2id],
...> color: "blue", label: "cluster0",
...> node: [shape: "triangle"],
...> edge: [style: "dotted"]
...> )
iex> cid
"cluster0"
In `.dot` notation a cluster is specified, as opposed to a subgraph, by
giving the cluster an ID that begins with `"cluster"` as seen in the example
above. Contrast with `Graphvix.Graph.add_subgraph/3`.
"""
def add_cluster(graph, vertex_ids, properties \\ []) do
_add_subgraph(graph, vertex_ids, properties, true)
end
@doc """
Add a vertex built from a `Graphvix.Record` to the graph.
iex> graph = Graph.new()
iex> record = Record.new(["a", "b", "c"])
iex> {_graph, rid} = Graph.add_record(graph, record)
iex> rid
[:"$v" | 0]
See `Graphvix.Record` for details on `Graphvix.Record.new/2`
and the complete module API.
"""
def add_record(graph, record) do
label = Record.to_label(record)
attributes = Keyword.put(record.properties, :shape, "record")
add_vertex(graph, label, attributes)
end
@doc """
Add a vertex built from a `Graphvix.HTMLRecord` to the graph.
iex> graph = Graph.new()
iex> record = HTMLRecord.new([
...> HTMLRecord.tr([
...> HTMLRecord.td("start"),
...> HTMLRecord.td("middle"),
...> HTMLRecord.td("end"),
...> ])
...> ])
iex> {_graph, rid} = Graph.add_html_record(graph, record)
iex> rid
[:"$v" | 0]
See `Graphvix.HTMLRecord` for details on `Graphvix.HTMLRecord.new/2`
and the complete module API.
"""
def add_html_record(graph, record) do
label = HTMLRecord.to_label(record)
attributes = [shape: "plaintext"]
add_vertex(graph, label, attributes)
end
@doc """
Converts a graph to its representation using `.dot` syntax.
## Example
iex> graph = Graph.new(node: [shape: "triangle"], edge: [color: "green"], graph: [size: "4x4"])
iex> {graph, vid} = Graph.add_vertex(graph, "a")
iex> {graph, vid2} = Graph.add_vertex(graph, "b")
iex> {graph, vid3} = Graph.add_vertex(graph, "c")
iex> {graph, eid} = Graph.add_edge(graph, vid, vid2)
iex> {graph, eid2} = Graph.add_edge(graph, vid, vid3)
iex> {graph, clusterid} = Graph.add_cluster(graph, [vid, vid2])
iex> Graph.to_dot(graph)
~S(digraph G {
size="4x4"
node [shape="triange"]
edge [color="green"]
subgraph cluster0 {
v0 [label="a"]
v1 [label="b"]
v0 -> v1
}
v2 [label="c"]
v1 -> v2
})
For more expressive examples, see the `.ex` and `.dot` files in the `examples/` directory of
Graphvix's source code.
"""
def to_dot(graph) do
[
"digraph G {",
graph_properties_to_dot(graph),
global_properties_to_dot(graph),
subgraphs_to_dot(graph),
vertices_to_dot(graph),
edges_to_dot(graph),
"}"
] |> Enum.reject(&is_nil/1)
|> Enum.join("\n\n")
end
@doc """
Writes a `Graph` to a named file in `.dot` format
```
iex> Graph.write(graph, "my_graph")
```
will write a file named `"my_graph.dot"` to your current working directory.
`filename` works as expected in Elixir. Filenames beginning with `/` define
an absolute path on your file system. Filenames otherwise define a path relative
to your current working directory.
"""
def write(graph, filename) do
File.write(filename <> ".dot", to_dot(graph))
end
@doc """
Writes the graph to a `.dot` file and compiles it to the specified output
format (defaults to `.png`).
The following code creates the files `"graph_one.dot"` and `"graph_one.png"`
in your current working directory.
```
iex> Graph.compile(graph, "graph_one")
```
This code creates the files `"graph_one.dot"` and `"graph_one.pdf"`.
```
iex> Graph.compile(graph, "graph_one", :pdf)
```
`filename` works as expected in Elixir. Filenames beginning with `/` define
an absolute path on your file system. Filenames otherwise define a path relative
to your current working directory.
"""
def compile(graph, filename, format \\ :png) do
:ok = write(graph, filename)
{_, 0} = System.cmd("dot", [
"-T", "#{format}", filename <> ".dot",
"-o", filename <> ".#{format}"
])
:ok
end
@doc """
Write a graph to file, compile it, and open the resulting image in your
system's default image viewer.
The following code will write the contents of `graph` to `"graph_one.dot"`,
compile the file to `"graph_one.png"` and open it.
```
iex> Graph.show(graph, "graph_one")
```
`filename` works as expected in Elixir. Filenames beginning with `/` define
an absolute path on your file system. Filenames otherwise define a path relative
to your current working directory.
"""
def show(graph, filename) do
:ok = write(graph, filename <> ".dot")
:ok = compile(graph, filename)
{_, 0} = System.cmd("open", [filename <> ".png"])
:ok
end
@doc """
Adds a top-level graph property.
These attributes affect the overall layout of the graph at a high level.
Use `set_global_properties/3` to modify the global styling for vertices
and edges.
## Example
iex> graph = Graph.new()
iex> graph.graph_properties
[]
iex> graph = Graph.set_graph_property(graph, :rank_direction, "RL")
iex> graph.graph_properties
[
rank_direction: "RL"
]
"""
def set_graph_property(graph, key, value) do
new_properties = Keyword.put(graph.graph_properties, key, value)
%{graph | graph_properties: new_properties}
end
@doc """
Sets a property for a vertex or edge that will apply to all vertices or edges
in the graph.
*NB* `:digraph` uses `vertex` to define the discrete points in
a graph that are connected via edges, while Graphviz and DOT use the word
`node`. `Graphvix` attempts to use "vertex" when the context is constructing
the data for the graph, and "node" in the context of formatting and printing
the graph.
## Example
```
iex> graph = Graph.new()
iex> {graph, vid} = Graph.add_vertex(graph, "label")
iex> graph = Graph.set_global_property(graph, :node, shape: "triangle")
```
When the graph is drawn, the vertex whose id is `vid`, and any other vertices
added to the graph, will have a triangle shape.
Global properties are overwritten by properties added by a subgraph or cluster:
```
{graph, subgraph_id} = Graph.add_subgraph(graph, [vid], shape: "hexagon")
```
Now when the graph is drawn the vertex `vid` will have a hexagon shape.
Properties written directly to a vertex or edge have the highest priority
of all. The vertex created below will have a circle shape despite the global
property set on `graph`.
```
{graph, vid2} = Graph.add_vertex(graph, "this is a circle!")
```
"""
def set_global_properties(graph, attr_for, attrs \\ []) do
Enum.reduce(attrs, graph, fn {k, v}, g ->
_set_global_property(g, attr_for, [{k, v}])
end)
end
## PRIVATE
defp _set_global_property(graph, attr_for, [{key, value}]) do
properties = Keyword.get(graph.global_properties, attr_for)
new_props = Keyword.put(properties, key, value)
new_properties = Keyword.put(graph.global_properties, attr_for, new_props)
%{graph | global_properties: new_properties}
end
defp subgraphs_to_dot(graph) do
case graph.subgraphs do
[] -> nil
subgraphs ->
subgraphs
|> Enum.map(&Graphvix.Subgraph.to_dot(&1, graph))
|> Enum.join("\n\n")
end
end
defp vertices_to_dot(graph) do
[vtab, _, _] = digraph_tables(graph)
elements_to_dot(vtab, fn {vid = [_ | id], attributes} ->
case in_a_subgraph?(vid, graph) do
true -> nil
false ->
[
"v#{id}",
attributes_to_dot(attributes)
] |> compact() |> Enum.join(" ") |> indent()
end
end)
end
defp edge_side_with_port(v_id, nil), do: "v#{v_id}"
defp edge_side_with_port(v_id, port), do: "v#{v_id}:#{port}"
defp edges_to_dot(graph) do
[_, etab, _] = digraph_tables(graph)
elements_to_dot(etab, fn edge = {_, [:"$v" | v1], [:"$v" | v2], attributes} ->
case edge in edges_contained_in_subgraphs(graph) do
true -> nil
false ->
v_out = edge_side_with_port(v1, Keyword.get(attributes, :outport))
v_in = edge_side_with_port(v2, Keyword.get(attributes, :inport))
attributes = attributes |> Keyword.delete(:outport) |> Keyword.delete(:inport)
["#{v_out} -> #{v_in}",
attributes_to_dot(attributes)
] |> compact() |> Enum.join(" ") |> indent()
end
end)
end
defp get_and_increment_vertex_id(graph) do
[_, _, ntab] = digraph_tables(graph)
[{:"$vid", next_id}] = :ets.lookup(ntab, :"$vid")
true = :ets.delete(ntab, :"$vid")
true = :ets.insert(ntab, {:"$vid", next_id + 1})
next_id
end
defp get_and_increment_subgraph_id(graph) do
[_, _, ntab] = digraph_tables(graph)
[{:"$sid", next_id}] = :ets.lookup(ntab, :"$sid")
true = :ets.delete(ntab, :"$sid")
true = :ets.insert(ntab, {:"$sid", next_id + 1})
next_id
end
defp in_a_subgraph?(vertex_id, graph) do
vertex_id in vertex_ids_in_subgraphs(graph)
end
defp vertex_ids_in_subgraphs(%__MODULE__{subgraphs: subgraphs}) do
Enum.reduce(subgraphs, [], fn c, acc ->
acc ++ c.vertex_ids
end)
end
defp edges_contained_in_subgraphs(graph = %__MODULE__{subgraphs: subgraphs}) do
[_, etab, _] = digraph_tables(graph)
edges = :ets.tab2list(etab)
Enum.filter(edges, fn {_, vid1, vid2, _} ->
Enum.any?(subgraphs, fn subgraph ->
Graphvix.Subgraph.both_vertices_in_subgraph?(subgraph.vertex_ids, vid1, vid2)
end)
end)
end
defp graph_properties_to_dot(%{graph_properties: []}), do: nil
defp graph_properties_to_dot(%{graph_properties: properties}) do
properties
|> Enum.map(fn {k, v} ->
attribute_to_dot(k, v)
end)
|> Enum.join("\n") |> indent
end
defp _add_subgraph(graph, vertex_ids, properties, is_cluster) do
next_id = get_and_increment_subgraph_id(graph)
subgraph = Graphvix.Subgraph.new(next_id, vertex_ids, is_cluster, properties)
new_graph = %{graph | subgraphs: graph.subgraphs ++ [subgraph]}
{new_graph, subgraph.id}
end
end
|
lib/graphvix/graph.ex
| 0.943034
| 0.670202
|
graph.ex
|
starcoder
|
defmodule Mix.Tasks.GitHooks.Run do
@shortdoc "Runs all the configured mix tasks for a given git hook."
@moduledoc """
Runs all the configured mix tasks for a given git hook.
Any [git hook](https://git-scm.com/docs/githooks) is supported.
## Examples
You can run any hook by running `mix git_hooks.run hook_name`. For example:
```elixir
mix git_hooks.run pre_commit
```
You can also all the hooks which are configured with `mix git_hooks.run all`.
"""
use Mix.Task
alias GitHooks.Config
alias GitHooks.Printer
@typedoc """
Run options:
* `include_hook_args`: Whether the git hook args should be sent to the
command to be executed. In case of `true`, the args will be amended to the
command. Defaults to `false`.
"""
@type run_opts :: [
{:include_hook_args, String.t()},
{:env, list({String.t(), binary})}
]
@doc """
Runs a task for a given git hook.
The task can be one of three different types:
* `{:cmd, "command arg1 arg2"}`: Runs a command.
* `{:file, "path_to_file"}`: Runs an executable file.
* `"command arg1 arg2"`: Runs a simple command, supports no options.
The first two options above can use a third element in the tuple, see
[here](`t:run_opts/0`) more info about the options.
"""
@impl true
@spec run(list(String.t())) :: :ok | no_return()
def run([]), do: error_exit()
def run(args) do
{[git_hook_name], args} = Enum.split(args, 1)
git_hook_name
|> get_atom_from_arg!()
|> check_is_valid_git_hook!()
|> maybe_run_git_hook!(args)
end
@spec maybe_run_git_hook!(GitHooks.git_hook_type(), args :: list(map)) :: :ok | no_return()
defp maybe_run_git_hook!(git_hook_type, args) do
if Config.current_branch_allowed?(git_hook_type) do
git_hook_type
|> Printer.info("Running hooks for ", append_first_arg: true)
|> Config.tasks()
|> run_tasks(args)
else
Printer.info("skipping git_hooks for #{Config.current_branch()} branch")
end
end
@spec run_tasks({atom, list(GitHooks.allowed_configs())}, GitHooks.git_hook_args()) ::
:ok | no_return
defp run_tasks({git_hook_type, tasks}, git_hook_args) do
Enum.each(tasks, &run_task(&1, git_hook_type, git_hook_args))
end
@spec run_task(GitHooks.allowed_configs(), GitHooks.git_hook_type(), GitHooks.git_hook_args()) ::
:ok | no_return
defp run_task(task_config, git_hook_type, git_hook_args) do
task_config
|> GitHooks.new_task(git_hook_type, git_hook_args)
|> GitHooks.Task.run()
|> GitHooks.Task.print_result()
|> GitHooks.Task.success?()
|> exit_if_failed()
end
@spec get_atom_from_arg!(String.t()) :: atom | no_return
defp get_atom_from_arg!(git_hook_type_arg) do
case git_hook_type_arg do
nil ->
Printer.error("You should provide a git hook type to run")
error_exit()
git_hook_type ->
git_hook_type
|> Recase.to_snake()
|> String.to_atom()
end
end
@spec check_is_valid_git_hook!(atom) :: atom | no_return
defp check_is_valid_git_hook!(git_hook_type) do
unless Enum.any?(Config.supported_hooks(), &(&1 == git_hook_type)) do
Printer.error("Invalid or unsupported hook `#{git_hook_type}`")
Printer.warn("Supported hooks are: #{inspect(Config.supported_hooks())}")
error_exit()
end
git_hook_type
end
@spec exit_if_failed(is_success :: boolean) :: :ok | no_return
defp exit_if_failed(true), do: :ok
defp exit_if_failed(false), do: error_exit()
@spec error_exit(term) :: no_return
@dialyzer {:no_return, error_exit: 0}
defp error_exit(error_code \\ {:shutdown, 1}), do: exit(error_code)
end
|
lib/mix/tasks/git_hooks/run.ex
| 0.843493
| 0.818193
|
run.ex
|
starcoder
|
defmodule Elixium.Validator do
alias Elixium.Block
alias Elixium.Utilities
alias Elixium.KeyPair
alias Elixium.Store.Ledger
alias Elixium.Store.Utxo
alias Decimal, as: D
@moduledoc """
Responsible for implementing the consensus rules to all blocks and transactions
"""
@doc """
A block is considered valid if the index is greater than the index of the previous block,
the previous_hash is equal to the hash of the previous block, and the hash of the block,
when recalculated, is the same as what the listed block hash is
"""
@spec is_block_valid?(Block, number) :: :ok | {:error, any}
def is_block_valid?(%{index: 0} = block, difficulty) do
valid_hash?(block, difficulty)
end
def is_block_valid?(block, difficulty, last_block \\ Ledger.last_block(), pool_check \\ &Utxo.in_pool?/1) do
with :ok <- valid_index(block.index, last_block.index),
:ok <- valid_prev_hash?(block.previous_hash, last_block.hash),
:ok <- valid_hash?(block, difficulty),
:ok <- valid_coinbase?(block),
:ok <- valid_transactions?(block, pool_check) do
:ok
else
err -> err
end
end
@spec valid_index(number, number) :: :ok | {:error, {:invalid_index, number, number}}
defp valid_index(index, prev_index) when index > prev_index, do: :ok
defp valid_index(idx, prev), do: {:error, {:invalid_index, prev, idx}}
@spec valid_prev_hash?(String.t(), String.t()) :: :ok | {:error, {:wrong_hash, {:doesnt_match_last, String.t(), String.t()}}}
defp valid_prev_hash?(prev_hash, last_block_hash) when prev_hash == last_block_hash, do: :ok
defp valid_prev_hash?(phash, lbhash), do: {:error, {:wrong_hash, {:doesnt_match_last, phash, lbhash}}}
@spec valid_hash?(Block, number) :: :ok | {:error, {:wrong_hash, {:too_high, String.t(), number}}}
defp valid_hash?(b, difficulty) do
with :ok <- compare_hash({b.index, b.version, b.previous_hash, b.timestamp, b.nonce, b.merkle_root}, b.hash),
:ok <- beat_target?(b.hash, b.difficulty) do
:ok
else
err -> err
end
end
defp beat_target?(hash, difficulty) do
if Block.hash_beat_target?(%{hash: hash, difficulty: difficulty}) do
:ok
else
{:error, {:wrong_hash, {:too_high, hash, difficulty}}}
end
end
@spec compare_hash({number, number, String.t(), String.t(), number, String.t()}, String.t()) ::
:ok | {:error, {:wrong_hash, {:doesnt_match_provided, String.t(), String.t()}}}
defp compare_hash({index, version, previous_hash, timestamp, nonce, merkle_root}, hash) do
computed =
[Integer.to_string(index), Integer.to_string(version), previous_hash, timestamp, Integer.to_string(nonce), merkle_root]
|> Utilities.sha3_base16()
if computed == hash do
:ok
else
{:error, {:wrong_hash, {:doesnt_match_provided, computed, hash}}}
end
end
@spec valid_coinbase?(Block) :: :ok | {:error, :no_coinbase}
def valid_coinbase?(%{transactions: transactions, index: block_index}) do
coinbase = hd(transactions)
with :ok <- coinbase_exist?(coinbase),
:ok <- is_coinbase?(coinbase),
:ok <- appropriate_coinbase_output?(transactions, block_index) do
:ok
else
err -> err
end
end
def coinbase_exist?(nil), do: {:error, :no_coinbase}
def coinbase_exist?(_coinbase), do: :ok
@doc """
Checks if a transaction is valid. A transaction is considered valid if
1) all of its inputs are currently in our UTXO pool and 2) all of its inputs
have a valid signature, signed by the owner of the private key associated to
the input (the addr). pool_check is a function which tests whether or not a
given input is in a pool (this is mostly used in the case of a fork), and
this function must return a boolean.
"""
@spec valid_transaction?(Transaction, function) :: boolean
def valid_transaction?(%{inputs: inputs}, pool_check \\ &Utxo.in_pool?/1) do
inputs
|> Enum.map(fn input ->
# Ensure that this input is in our UTXO pool
if pool_check.(input) do
{:ok, pub} = Base.decode16(input.addr)
{:ok, sig} = Base.decode16(input.signature)
# Check if this UTXO has a valid signature
KeyPair.verify_signature(pub, sig, input.txoid)
else
false
end
end)
|> Enum.all?()
end
@spec valid_transactions?(Block, function) :: :ok | {:error, :invalid_inputs}
def valid_transactions?(%{transactions: transactions}, pool_check \\ &Utxo.in_pool?/1) do
if Enum.all?(transactions, &valid_transaction?(&1, pool_check)), do: :ok, else: {:error, :invalid_inputs}
end
@spec is_coinbase?(Transaction) :: :ok | {:error, {:not_coinbase, String.t()}}
defp is_coinbase?(%{txtype: "COINBASE"}), do: :ok
defp is_coinbase?(tx), do: {:error, {:not_coinbase, tx.txtype}}
@spec appropriate_coinbase_output?(list, number) :: :ok | {:error, :invalid_coinbase}
defp appropriate_coinbase_output?([coinbase | transactions], block_index) do
total_fees = Block.total_block_fees(transactions)
reward = Block.calculate_block_reward(block_index)
amount = hd(coinbase.outputs).amount
if D.equal?(D.add(total_fees, reward), amount) do
:ok
else
{:error, {:invalid_coinbase, total_fees, reward, amount}}
end
end
end
|
lib/validator.ex
| 0.727201
| 0.458712
|
validator.ex
|
starcoder
|
defmodule Snitch.Seed.Taxonomy do
@moduledoc """
Seeds basic taxonomy.
"""
alias Snitch.Data.Schema.Taxonomy
alias Snitch.Core.Tools.MultiTenancy.Repo
alias Snitch.Tools.Helper.Taxonomy, as: TaxonomyHelper
@product_category {
"Pets",
[
{"Dog",
[
{"Food",
[
{"Dry Food", []},
{"Wet Food", []},
{"Prescription Food", []},
{"Freeze-Dried Food", []},
{"Human-Grade", []},
{"Dehydrated Food", []},
{"Frozen Food", []},
{"Food Toppings", []}
]},
{"Treat",
[
{"Dental & Hard Chews", []},
{"Soft & Chewy Treats", []},
{"Biscuits & Crunchy Treats", []},
{"Bully Sticks & Natural Chews ", []},
{"Jerky Treats", []},
{"Prescription Treats", []},
{"Freeze-Dried Treats", []},
{"Training Treats", []},
{"Dehydrated Treats", []}
]},
{"Toys", []},
{"Healthcare", []},
{"Dental Care", []},
{"Vitamins & Suppliments", []},
{"Cleaning & Potty", []},
{"Creates, Pens and Gates", []},
{"Beds & Mats", []},
{"Carriers & Travel", []},
{"Bowls and Feeders", []},
{"Grooming", []},
{"Flea & Trek", []},
{"Leashes & Collars", []},
{"Training & Behaviour", []},
{"Clothing and Accessories", []},
{"Gifts and Books", []},
{"Technology", []}
]},
{"Cat",
[
{"Food", []},
{"Treat", []},
{"Toys", []},
{"Healthcare", []},
{"Dental Care", []},
{"Vitamins & Suppliments", []},
{"Flea & Trick", []},
{"Training & Cleaning", []},
{"Crates, Pens and Gates", []},
{"Beds & Mats", []},
{"Trees, Condos & Scratchers", []},
{"Carriers & Travel", []},
{"Bowls & Feeders", []},
{"Grooming", []},
{"Leashes & Collars", []},
{"Gifts and Books", []},
{"Clothing and Accessories", []}
]},
{"Fish",
[
{"Food & Treats", []},
{"Aquariums & Starter Kits", []},
{"Heathing and Lighting", []},
{"Water Care", []},
{"Decor & Accessories", []},
{"Filters & Media", []},
{"Cleaning & Maintainance", []},
{"Health & Wellness", []},
{"Gifts & Books", []},
{"New Arrivals", []}
]},
{"Bird",
[
{"Food", []},
{"Treats", []},
{"Cages and Accessories", []},
{"Litter & Nesting", []},
{"Perches & Toys", []},
{"Grooming & Health", []},
{"Gifts & Books", []},
{"New Arrivals", []}
]},
{"Small Pet",
[
{"Food & Treats", []},
{"Habitats & Accessories", []},
{"Bedding and Litter", []},
{"Beds, Hideouts & Toys", []},
{"Harnesses & Health", []},
{"Grooming & Health", []},
{"New Arrivals", []}
]},
{"Reptile",
[
{"Food & Treats", []},
{"Terrariums & Habitats", []},
{"Habitat Accessories", []},
{"Heating & Lighting", []},
{"Cleaning & Maintainance", []},
{"Substrate & Bedding", []},
{"Health & Wellness", []},
{"New Arrivals", []}
]},
{"Horse",
[
{"Health & Wellness", []},
{"Grooming", []},
{"Tack & Stable Supplies", []},
{"Toys", []},
{"Food & Treats", []},
{"Gifts & Books", []},
{"New Arrivals", []}
]}
]
}
@brands {
"Brands",
[
{"A Pet Hub", []},
{"AA Aquarium", []},
{"Absorbine", []},
{"B Air", []},
{"Banixx", []},
{"Cadet", []},
{"Calm Paws", []},
{"Danner", []},
{"Ecovet", []},
{"Inaba", []},
{"Max-Bone", []},
{"Neko Chan", []}
]
}
def seed do
Repo.delete_all(Taxonomy)
TaxonomyHelper.create_taxonomy(@product_category)
TaxonomyHelper.create_taxonomy(@brands)
end
end
|
apps/snitch_core/priv/repo/seed/taxonomy.ex
| 0.524395
| 0.532851
|
taxonomy.ex
|
starcoder
|
defmodule BB84 do
@bases1 {
# |0>
Qubit.new(),
# |1>
Qubit.new() |> Qubit.qnot()
}
@bases2 {
# | + >
Qubit.new() |> Qubit.hadamard(),
# | - >
Qubit.new() |> Qubit.qnot() |> Qubit.hadamard()
}
@doc """
Encodes a list `bits` of 0s and 1s as a list of qubit, using a second list `bases`
"""
def encode(bits, bases) when is_list(bits) and is_list(bases) do
Enum.zip(bits, bases)
|> Enum.map(&encode_qubit/1)
end
@doc """
Decodes a list of qubits `qubits` using a second list `bases`
"""
def decode(qubits, bases) when is_list(qubits) and is_list(bases) do
Enum.zip(qubits, bases)
|> Enum.map(&decode_qubit/1)
end
@doc """
Measures a list `qubits` of qubits using the bases in `bases`
"""
def measure(qubits, bases) when is_list(qubits) and is_list(bases) do
Enum.zip(qubits, bases)
|> Enum.map(&measure_qubit/1)
end
@doc """
Keeps only the qubits (or bits) in `qubits` measured with the same bases (`bases1` and `bases2` are the two bases' lists)
"""
def discard_different_bases(qubits, bases1, bases2) do
Enum.zip([qubits, bases1, bases2])
|> Enum.filter(fn {_, b1, b2} -> b1 == b2 end)
|> Enum.map(&elem(&1, 0))
end
@doc """
It returns a tuple of two subsets of `bits`:
- The first one contains `n` bits and they are the check bits, chosen at random
- The second one are the remaining bits
For example:
iex> BB84.partition_check_bits([0, 1, 1, 0, 0, 1, 0, 0, 0], 4)
{[0, 1, nil, nil, nil, 1, nil, nil, 0], [1, 0, 0, 0, 0]}
"""
def partition_check_bits(bits, n) do
indexes = 0..(length(bits) - 1)
check_indexes =
indexes
|> Enum.take_random(n)
|> Enum.into(MapSet.new())
check_bits =
bits
|> Enum.zip(indexes)
|> Enum.map(fn {b, i} ->
if MapSet.member?(check_indexes, i) do
b
else
nil
end
end)
remaining_bits =
bits
|> Enum.zip(check_bits)
|> Enum.filter(fn {_, cb} -> cb == nil end)
|> Enum.map(&elem(&1, 0))
{check_bits, remaining_bits}
end
defp encode_qubit({0, 0}), do: elem(@bases1, 0)
defp encode_qubit({1, 0}), do: elem(@bases1, 1)
defp encode_qubit({0, 1}), do: elem(@bases2, 0)
defp encode_qubit({1, 1}), do: elem(@bases2, 1)
defp measure_qubit({q, 0}), do: Qubit.measure(q, @bases1)
defp measure_qubit({q, 1}), do: Qubit.measure(q, @bases2)
defp decode_qubit({q, 0}), do: Qubit.to_bit(q, @bases1)
defp decode_qubit({q, 1}), do: Qubit.to_bit(q, @bases2)
end
|
lib/bb84.ex
| 0.902295
| 0.719507
|
bb84.ex
|
starcoder
|
defmodule WebSpell do
@moduledoc """
For a tutorial on how to use WebSpell, see the [README on Github](https://github.com/langalex/web_spell/blob/master/README.md).
"""
@doc """
Add WebSpell to your http client stub module by calling `use WebSpell`.
This adds the following methods to your module:
* `start_link` - call this method in your test before setting up any stubs
* `stub_request` - call this in your test to set up a stubbed http request/response
* `call_stubbed_request` - forward any http calls in your http client module to this method
* `received_request` - use this function in test assertions, i.e. assert MyClient.received_request(…)
* `received_no_request` - use this function in test assertions, i.e. assert MyClient.received_no_request(…)
"""
defmacro __using__(opts) do
server_name = opts[:server_name] || :web_spell
quote do
use GenServer
def start_link do
{:ok, pid} = GenServer.start_link(__MODULE__, :ok, [])
Process.register(pid, unquote(server_name))
end
def stub_request(%WebSpell.Request{} = request, %WebSpell.Response{} = response) do
server = Process.whereis(unquote(server_name))
GenServer.cast(server, {:stub_request, request, response})
end
def call_stubbed_request!(%WebSpell.Request{} = request) do
server = Process.whereis(unquote(server_name))
case GenServer.call(server, {:call_stubbed_request, request}) do
:missing_stub -> raise("missing stub for #{inspect(request)}")
response -> response
end
end
def received_request(%WebSpell.Request{} = request, timeout \\ nil) do
recorded_request = find_recorded_request(request)
if recorded_request do
recorded_request
else
if timeout && timeout > 0 do
:timer.sleep(10)
received_request(request, timeout - 10)
else
IO.puts("\nExpected request #{inspect(request)} to have been made but wasn't.")
print_recorded_requests()
nil
end
end
end
def received_no_request(%WebSpell.Request{} = request) do
recorded_request = find_recorded_request(request)
if recorded_request do
IO.puts("\nExpected request #{inspect(request)} to not have been made but was.")
print_recorded_requests()
nil
else
true
end
end
# genserver
def init(:ok) do
{:ok, %{request_stubs: [], recorded_requests: []}}
end
def handle_cast({:stub_request, request, response}, %{
request_stubs: request_stubs,
recorded_requests: recorded_requests
}) do
{
:noreply,
%{
request_stubs:
request_stubs ++ [%WebSpell.RequestStub{request: request, response: response}],
recorded_requests: recorded_requests
}
}
end
def handle_call({:call_stubbed_request, request}, _from, %{
request_stubs: request_stubs,
recorded_requests: recorded_requests = state
}) do
stub =
request_stubs
|> Enum.filter(fn stub -> WebSpell.Request.match?(stub.request, request) end)
|> Enum.at(-1)
if stub do
{
:reply,
stub.response,
%{request_stubs: request_stubs, recorded_requests: recorded_requests ++ [request]}
}
else
IO.puts("\nNo stub found for request #{inspect(request)}.")
IO.puts("Stubbed requests:")
for stub <- request_stubs do
IO.inspect(stub.request)
end
{:reply, :missing_stub, state}
end
end
def handle_call(
{:fetch_recorded_requests},
_from,
%{recorded_requests: recorded_requests} = state
) do
{:reply, recorded_requests, state}
end
# /genserver
defp print_recorded_requests do
IO.puts("Requests made:")
for request <- recorded_requests() do
IO.inspect(request)
end
end
defp find_recorded_request(request) do
Enum.find(recorded_requests(), fn recorded_request ->
WebSpell.Request.match?(request, recorded_request)
end)
end
defp recorded_requests do
server = Process.whereis(unquote(server_name))
GenServer.call(server, {:fetch_recorded_requests})
end
end
end
end
|
lib/web_spell.ex
| 0.641759
| 0.44571
|
web_spell.ex
|
starcoder
|
defmodule Core.Ecto.TimestampRange do
@moduledoc false
@behaviour Ecto.Type
defstruct [:lower, :upper, lower_inclusive: true, upper_inclusive: true]
@type t :: %__MODULE__{
lower: boundary_t(),
upper: boundary_t(),
lower_inclusive: boolean(),
upper_inclusive: boolean()
}
@type boundary_t :: DateTime.t() | nil
@match_regex ~r{^([^/]+)/([^/]+)$}
@open_boundary ".."
@interval_designator "/"
@spec new(boundary_t(), boundary_t(), Keyword.t()) :: {:ok, t} | {:error, atom}
def new(lower, upper, opts \\ []) do
with {:ok, lower} <- cast_boundary(lower),
{:ok, upper} <- cast_boundary(upper),
:ok <- check_boundaries(lower, upper) do
fields =
opts
|> Keyword.take([:lower_inclusive, :upper_inclusive])
|> Keyword.merge(lower: lower, upper: upper)
{:ok, struct(__MODULE__, fields)}
end
end
defp cast_boundary(%DateTime{} = boundary), do: {:ok, boundary}
defp cast_boundary(nil), do: {:ok, nil}
defp cast_boundary(_), do: {:error, :invalid_boundaries}
def check_boundaries(%DateTime{calendar: calendar} = lower, %DateTime{calendar: calendar} = upper) do
case DateTime.compare(lower, upper) do
:gt -> {:error, :invalid_boundaries}
_ -> :ok
end
end
def check_boundaries(%DateTime{}, %DateTime{}), do: {:error, :calendars_mismatch}
def check_boundaries(nil, nil), do: {:error, :invalid_boundaries}
def check_boundaries(_, _), do: :ok
@spec from_iso8601(String.t()) :: {:ok, t} | {:error, atom}
def from_iso8601(string) do
with {:ok, lower, upper} <- raw_from_iso8601(string),
{:ok, lower} <- boundary_from_string(lower),
{:ok, upper} <- boundary_from_string(upper) do
new(lower, upper)
end
end
defp raw_from_iso8601(string) do
case Regex.run(@match_regex, string, capture: :all_but_first) do
[lower, upper] -> {:ok, lower, upper}
_ -> {:error, :invalid_format}
end
end
defp boundary_from_string(@open_boundary), do: {:ok, nil}
defp boundary_from_string(string) do
with {:ok, datetime, _} <- DateTime.from_iso8601(string) do
{:ok, datetime}
end
end
@spec from_iso8601!(String.t()) :: t
def from_iso8601!(string) do
case from_iso8601(string) do
{:ok, value} ->
value
{:error, reason} ->
raise ArgumentError,
"cannot parse #{inspect(string)} as datetime interval, reason: #{inspect(reason)}"
end
end
@spec to_iso8601(t, :extended | :basic) :: String.t()
def to_iso8601(%__MODULE__{lower: lower, upper: upper}, format \\ :extended) do
boundary_to_string(lower, format) <> @interval_designator <> boundary_to_string(upper, format)
end
defp boundary_to_string(nil, _), do: @open_boundary
defp boundary_to_string(datetime, format), do: DateTime.to_iso8601(datetime, format)
@impl Ecto.Type
def type, do: :tsrange
@impl Ecto.Type
def cast(term)
def cast(%__MODULE__{} = range), do: {:ok, range}
def cast(_), do: :error
@impl Ecto.Type
def load(term)
def load(%Postgrex.Range{
lower: lower,
upper: upper,
lower_inclusive: lower_inclusive,
upper_inclusive: upper_inclusive
}) do
{:ok,
%__MODULE__{
lower: lower,
upper: upper,
lower_inclusive: lower_inclusive,
upper_inclusive: upper_inclusive
}}
end
def load(_), do: :error
@impl Ecto.Type
def dump(term)
def dump(%__MODULE__{
lower: lower,
upper: upper,
lower_inclusive: lower_inclusive,
upper_inclusive: upper_inclusive
}) do
{:ok,
%Postgrex.Range{
lower: lower,
upper: upper,
lower_inclusive: lower_inclusive,
upper_inclusive: upper_inclusive
}}
end
def dump(_), do: :error
end
|
apps/core/lib/core/ecto/timestamp_range.ex
| 0.883594
| 0.470554
|
timestamp_range.ex
|
starcoder
|
defmodule Protocol.Consolidation do
@moduledoc """
Module responsible for consolidating protocols and helpers for
extracting protocols and implementations from code paths for
consolidation.
"""
@doc """
Extract all protocols from the given paths.
The paths can be either a char list or a string. Internally
they are worked on as char lists, so passing them as lists
avoid extra conversion.
## Examples
# Get Elixir's ebin and retrieve all protocols
iex> path = :code.lib_dir(:elixir, :ebin)
iex> mods = Protocol.Consolidation.extract_protocols([path])
iex> Enumerable in mods
true
"""
@spec extract_protocols([char_list | String.t]) :: [atom]
def extract_protocols(paths) do
extract_matching_by_attribute paths, 'Elixir.',
fn module, attributes ->
case attributes[:protocol] do
[fallback_to_any: _, consolidated: _] -> module
_ -> nil
end
end
end
@doc """
Extract all types implemented for the given protocol from
the given paths.
The paths can be either a char list or a string. Internally
they are worked on as char lists, so passing them as lists
avoid extra conversion.
## Examples
# Get Elixir's ebin and retrieve all protocols
iex> path = :code.lib_dir(:elixir, :ebin)
iex> mods = Protocol.Consolidation.extract_impls(Enumerable, [path])
iex> List in mods
true
"""
@spec extract_impls(module, [char_list | String.t]) :: [atom]
def extract_impls(protocol, paths) when is_atom(protocol) do
prefix = atom_to_list(protocol) ++ '.'
extract_matching_by_attribute paths, prefix, fn
_mod, attributes ->
case attributes[:impl] do
[protocol: ^protocol, for: for] -> for
_ -> nil
end
end
end
defp extract_matching_by_attribute(paths, prefix, callback) do
lc path inlist paths,
file inlist list_dir(path),
mod = extract_from_file(path, file, prefix, callback),
do: mod
end
defp list_dir(path) when is_list(path) do
case :file.list_dir(path) do
{ :ok, files } -> files
_ -> []
end
end
defp list_dir(path), do: list_dir(to_char_list(path))
defp extract_from_file(path, file, prefix, callback) do
if :lists.prefix(prefix, file) and Path.extname(file) == '.beam' do
extract_from_beam(Path.join(path, file), callback)
end
end
defp extract_from_beam(file, callback) do
case :beam_lib.chunks(file, [:attributes]) do
{:ok, { module, [attributes: attributes] } } ->
callback.(module, attributes)
_ ->
nil
end
end
defmacrop if_ok(expr, call) do
quote do
case unquote(expr) do
{ :ok, var } -> unquote(Macro.pipe(quote(do: var), call))
other -> other
end
end
end
@doc """
Receives a protocol and a list of implementations and
consolidates the given protocol. Consolidation happens
by changing the protocol `impl_for` in the abstract
format to have fast lookup rules.
It returns the updated version of the protocol bytecode.
A given bytecode or protocol implementation can be checked
to be consolidated or not by analyzing the protocol
attribute:
Enumerable.__info__(:attributes)[:protocol]
If the first element of the tuple is true, it means
the protocol was consolidated.
This function does not load the protocol at any point
nor loads the new bytecode for the compiled module.
"""
@spec apply_to(module, [module]) ::
{ :ok, binary } |
{ :error, :not_a_protocol } |
{ :error, :no_beam_info }
def apply_to(protocol, types) when is_atom(protocol) do
ensure_protocol(protocol)
|> if_ok(change_debug_info types)
|> if_ok(compile)
end
# Ensure the given module is loaded and is a protocol.
defp ensure_protocol(protocol) do
case :beam_lib.chunks(beam_file(protocol), [:abstract_code, :attributes]) do
{ :ok, { ^protocol, [abstract_code: { _raw, abstract_code },
attributes: attributes] } } ->
case attributes[:protocol] do
[fallback_to_any: any, consolidated: _] ->
{ :ok, { protocol, any, abstract_code } }
_ ->
{ :error, :not_a_protocol }
end
_ ->
{ :error, :no_beam_info }
end
end
defp beam_file(module) when is_atom(module) do
case :code.which(module) do
:non_existing -> module
file -> file
end
end
# Change the debug information to the optimized
# impl_for/1 dispatch version.
defp change_debug_info({ protocol, any, code }, types) do
types = if any, do: types, else: List.delete(types, Any)
records = types -- Protocol.builtin
builtin = Protocol.builtin -- (Protocol.builtin -- types)
builtin = if records != [], do: [Record|builtin], else: builtin
change_impl_for(code, protocol, builtin, records, false, [])
end
defp change_impl_for([{ :attribute, line, :protocol, _ }|t], protocol, builtin, records, _, acc) do
attr = [fallback_to_any: Any in builtin, consolidated: true]
change_impl_for(t, protocol, builtin, records, true,
[{ :attribute, line, :protocol, attr }|acc])
end
defp change_impl_for([{ :function, line, :impl_for, 1, _ }|t], protocol, builtin, records, is_protocol, acc) do
clauses = lc type inlist builtin, do: clause_for(type, protocol, line)
unless Any in builtin do
clauses = clauses ++ [fallback_clause_for(nil, protocol, line)]
end
change_impl_for(t, protocol, builtin, records, is_protocol,
[{ :function, line, :impl_for, 1, clauses }|acc])
end
defp change_impl_for([{ :function, line, :rec_impl_for, 1, _ }|t], protocol, builtin, records, is_protocol, acc) do
fallback = if Tuple in builtin, do: Module.concat(protocol, Tuple)
clauses = lc type inlist records, do: record_clause_for(type, protocol, line)
clauses = clauses ++ [fallback_clause_for(fallback, protocol, line)]
change_impl_for(t, protocol, builtin, records, is_protocol,
[{ :function, line, :rec_impl_for, 1, clauses }|acc])
end
defp change_impl_for([h|t], protocol, info, types, is_protocol, acc) do
change_impl_for(t, protocol, info, types, is_protocol, [h|acc])
end
defp change_impl_for([], protocol, _info, _types, is_protocol, acc) do
if is_protocol do
{ :ok, { protocol, Enum.reverse(acc) } }
else
{ :error, :not_a_protocol }
end
end
defp clause_for(Tuple, protocol, line), do: builtin_clause_for(Tuple, :is_tuple, protocol, line)
defp clause_for(Atom, protocol, line), do: builtin_clause_for(Atom, :is_atom, protocol, line)
defp clause_for(List, protocol, line), do: builtin_clause_for(List, :is_list, protocol, line)
defp clause_for(BitString, protocol, line), do: builtin_clause_for(BitString, :is_bitstring, protocol, line)
defp clause_for(Integer, protocol, line), do: builtin_clause_for(Integer, :is_integer, protocol, line)
defp clause_for(Float, protocol, line), do: builtin_clause_for(Float, :is_float, protocol, line)
defp clause_for(Function, protocol, line), do: builtin_clause_for(Function, :is_function, protocol, line)
defp clause_for(PID, protocol, line), do: builtin_clause_for(PID, :is_pid, protocol, line)
defp clause_for(Port, protocol, line), do: builtin_clause_for(Port, :is_port, protocol, line)
defp clause_for(Reference, protocol, line), do: builtin_clause_for(Reference, :is_reference, protocol, line)
defp clause_for(Any, protocol, line) do
{:clause, line, [{:var, line, :_}], [],
[{ :atom, line, Module.concat(protocol, Any) }]}
end
defp clause_for(Record, _protocol, line) do
{:clause, line, [{:var, line, :x}],
[[{:op, line, :andalso,
{:call, line,
{:remote, line, {:atom, line, :erlang}, {:atom, line, :is_tuple}},
[{:var, line, :x}]},
{:call, line,
{:remote, line, {:atom, line, :erlang}, {:atom, line, :is_atom}},
[{:call, line,
{:remote, line, {:atom, line, :erlang}, {:atom, line, :element}},
[{:integer, line, 1}, {:var, line, :x}]
}]},
}]],
[{:call, line,
{:atom, line, :rec_impl_for},
[{:call, line,
{:remote, line, {:atom, line, :erlang}, {:atom, line, :element}},
[{:integer, line, 1}, {:var, line, :x}]}]}]}
end
defp builtin_clause_for(mod, guard, protocol, line) do
{:clause, line,
[{:var, line, :x}],
[[{:call, line,
{:remote, line, {:atom, line, :erlang}, {:atom, line, guard}},
[{:var, line, :x}],
}]],
[{:atom, line, Module.concat(protocol, mod)}]}
end
defp record_clause_for(other, protocol, line) do
{:clause, line, [{:atom, line, other}], [],
[{:atom, line, Module.concat(protocol, other)}]}
end
defp fallback_clause_for(value, _protocol, line) do
{:clause, line, [{:var, line, :_}], [],
[{ :atom, line, value }]}
end
# Finally compile the module and emit its bytecode.
defp compile({ protocol, code }) do
opts = if Code.compiler_options[:debug_info], do: [:debug_info], else: []
{ :ok, ^protocol, binary, _warnings } = :compile.forms(code, [:return|opts])
{ :ok, binary }
end
end
|
lib/elixir/lib/protocol/consolidation.ex
| 0.861217
| 0.461077
|
consolidation.ex
|
starcoder
|
defmodule NervesPack.WiFiWizardButton do
use GenServer
@moduledoc """
Starts the wizard if a button is depressed for long enough.
**Note:** Using this requires `Circuits.GPIO` be included as a dependency in
your project:
```elixir
def deps() do
{:circuits_gpio, "~> 0.4"}
end
```
It is recommended that you start this in your own supervision separate from
NervesPack. This module mainly serves as a convenience and example for simple
management of `VintageNetWizard`:
```elixir
def start(_type, _args) do
children = [
NervesPack.WiFiWizardButton
...
]
opts = [strategy: :one_for_one, name: MyApp.Supervisor]
Supervisor.start_link(children, opts)
end
```
Though you can also enable this in the config as well which will start it
within `NervesPack.Supervisor` instead:
```
config :nerves_pack, wifi_wizard_button: true
```
GPIO 26 is used for the button and the hold time is 5 seconds.
These defaults can be configured when adding as a supervised child or in the
config if desired:
```
# Supervised child
children = [
{NervesPack.WiFiWizardButton, [pin: 12, hold: 4_000]},
...
]
# config.exs
config :nerves_pack,
wifi_wizard_button_pin: 17,
wifi_wizard_button_hold: 3_000
```
"""
alias Circuits.GPIO
require Logger
@default_hold 5_000
@default_pin 26
@doc """
Start the button monitor
"""
@spec start_link(list()) :: GenServer.on_start()
def start_link(opts \\ []) do
if Code.ensure_compiled?(GPIO) do
GenServer.start_link(__MODULE__, opts, name: __MODULE__)
else
_ =
Logger.warn("""
[NervesPack] - Skipping WiFiWizardButton: add {:circuits_gpio, "~> 0.4"} to your dependencies to use
""")
:ignore
end
end
@impl true
def init(opts) do
gpio_pin =
opts[:pin] || Application.get_env(:nerves_pack, :wifi_wizard_button_pin, @default_pin)
{:ok, gpio} = GPIO.open(gpio_pin, :input)
:ok = GPIO.set_interrupts(gpio, :both)
hold =
opts[:hold] || Application.get_env(:nerves_pack, :wifi_wizard_button_hold, @default_hold)
hold = validate_timeout(hold)
_ =
Logger.info("""
[NervesPack] WiFi Wizard can be started any time by pressing down the button
on GPIO #{gpio_pin} for #{hold / 1000} seconds.
If no button is connected, you can manually mock a "press" by connecting the
pin to 3.3v power for the required time with a cable.
""")
{:ok, %{hold: hold, pin: gpio_pin, gpio: gpio}}
end
@impl true
def handle_info({:circuits_gpio, gpio_pin, _timestamp, 1}, %{pin: gpio_pin} = state) do
# Button pressed. Start a timer to launch the wizard when it's long enough
{:noreply, state, state.hold}
end
@impl true
def handle_info({:circuits_gpio, gpio_pin, _timestamp, 0}, %{pin: gpio_pin} = state) do
# Button released. The GenServer timer is implicitly cancelled by receiving this message.
{:noreply, state}
end
@impl true
def handle_info(:timeout, state) do
# Timeout occurred before button released which means
# it was held for long enough
:ok = VintageNetWizard.run_wizard()
_ = Logger.info("[NervesPack] WiFi Wizard started...")
{:noreply, state}
end
defp validate_timeout(timeout) when is_integer(timeout), do: timeout
defp validate_timeout(timeout) do
_ =
Logger.warn(
"[NervesPack] Invalid button hold: #{inspect(timeout)}. Must be an integer in ms. Using default 5_000"
)
@default_hold
end
end
|
lib/nerves_pack/wifi_wizard_button.ex
| 0.818229
| 0.781372
|
wifi_wizard_button.ex
|
starcoder
|
defmodule Nifsy do
@moduledoc """
The Nifsy API.
"""
alias Nifsy.{Handle, Native}
@default_buffer_bytes 64 * 1024
@options [:append, :create, :dsync, :exclusive, :lock, :sync, :truncate]
@type option ::
:append |
{:buffer_bytes, pos_integer} |
:create |
:dsync |
:exclusive |
:lock |
:sync |
:truncate
@type mode :: :read | :write
@type options :: [option]
@doc """
Open the file at `path` for `mode` operations with `options`.
A handle is returned, which you should treat as an opaque term and not attempt to modify in any
way.
A handle can be opened in either `:read` or `:write` mode. This mode cannot be changed after
open, and a handle can only be used for the operations associated with its mode.
The option `{:buffer_bytes, pos_integer}` may be provided to alter the buffer size (which is used
for all operations) from the default, which is 64KiB.
The option `:lock` may be provided, which will cause all operations on the given file descriptor
to be mutually exclusive. If you plan to use a single handle from multiple OTP processes, you
should use this option. However, performance may be better if you can perform all operations on a
handle from a single process and then distribute the data to multiple processes.
The rest of the options: `:append`, `:create`, `:dsync`, `:exclusive`, `:sync`, and `:truncate`,
behave according to their corresponding options in the
[POSIX specification](http://pubs.opengroup.org/onlinepubs/9699919799/functions/open.html).
Given the low level nature of this API, behaviour, error codes, and error messages may not be
perfectly identical across operating systems.
"""
@spec open(Path.t, mode, options) :: {:ok, Handle.t} | {:error, term}
def open(path, mode \\ :read, options \\ [])
when is_binary(path) and mode in [:read, :write] and is_list(options) do
path_charlist = to_charlist(path)
case init_options(options, {[], nil}) do
{:ok, {options, buffer_bytes}} ->
buffer_bytes = if buffer_bytes, do: buffer_bytes, else: @default_buffer_bytes
case Native.open(path_charlist, buffer_bytes, [mode | options]) do
{:ok, handle} ->
{:ok, %Handle{buffer_bytes: buffer_bytes, handle: handle, mode: mode, path: path}}
{:error, reason} ->
{:error, reason}
end
{:error, error} ->
{:error, error}
end
end
@doc """
Read (at most) `bytes` bytes from `handle`.
The handle must be in `:read` mode.
A call will return `bytes` bytes, or fewer, if, for instance, there are less bytes than
requested left in the file.
After the last byte has been returned, the function will return `:eof`.
"""
@spec read(Handle.t, pos_integer) :: {:ok, binary} | :eof | {:error, term}
def read(%Handle{mode: :read} = handle, bytes)
when is_integer(bytes) and bytes > 0 do
Native.read(handle.handle, bytes)
end
@doc ~S"""
Read a line from `handle`.
The handle must be in `:read` mode.
Lines are considered delimited by `"\n"`, the delimiter is not returned, and using this
function with files with CRLF line endings will return the CR at the end of the line.
After the final line has been returned, the function will return `:eof`.
"""
@spec read_line(Handle.t) :: {:ok, binary} | :eof | {:error, term}
def read_line(%Handle{mode: :read} = handle) do
Native.read_line(handle.handle)
end
@doc """
Write `data` to `handle`.
The handle must be in `:write` mode.
Since Nifsy uses buffered IO, the data may not actually be written to the file immediately. If
you wish to force the data to be written, you can use `flush/1` or `close/1`.
"""
@spec write(Handle.t, iodata) :: :ok | {:error, term}
def write(%Handle{mode: :write} = handle, data) do
Native.write(handle.handle, data)
end
@doc """
Flush `handle`.
The handle must be in `:write` mode.
It is not required to explicitly flush the handle, as it will be automatically flushed when it is
closed/garbage collected.
"""
@spec flush(Handle.t) :: :ok | {:error, term}
def flush(%Handle{mode: :write} = handle) do
Native.flush(handle.handle)
end
@doc """
Close `handle`.
It is not required to explicitly close the handle, as it will be automatically closed when it is
garbage collected.
If the handle is in `:write` mode, the function will write any buffered data before closing the
handle.
"""
@spec close(Handle.t) :: :ok | {:error, term}
def close(%Handle{} = handle) do
Native.close(handle.handle)
end
@doc """
Return a `Stream` for the file at `path` with `options`.
The `options` are the same as can be provided to `open/3`.
"""
@spec stream!(Path.t, options) :: Stream.t
def stream!(path, options \\ []) do
%Nifsy.Stream{path: path, options: options}
end
defp init_options([], acc) do
{:ok, acc}
end
defp init_options([{:buffer_bytes, buffer_bytes} | rest], {options, curr_buffer_bytes})
when is_integer(buffer_bytes) and buffer_bytes > 0 do
init_options(rest, {options, if(curr_buffer_bytes, do: curr_buffer_bytes, else: buffer_bytes)})
end
defp init_options([option | rest], {options, buffer_bytes})
when option in @options do
init_options(rest, {options ++ [option], buffer_bytes})
end
defp init_options([option | _rest], _acc) do
{:error, "invalid option #{inspect option}"}
end
end
|
lib/nifsy.ex
| 0.830388
| 0.569553
|
nifsy.ex
|
starcoder
|
defmodule Cassette do
@moduledoc """
Library to generate and validate [CAS](http://jasig.github.io/cas/) TGTs/STs
## Client usage
Generate a tgt and a st for some service:
```elixir
iex> Cassette.tgt
{:ok, "TGT-example-abcd"}
iex> Cassette.st("http://some.authenticated/url")
{:ok, "ST-example-1234"}
```
## Validation usage:
```elixir
iex> st = FakeCas.valid_st
iex> Cassette.validate(st, "http://some.authenticated/url")
{:ok, Cassette.User.new("example", "customer", ["ACME_ADMIN"],
%{"cn" => "<NAME>"})}
```
## Customization and multiple configurations
If you need multiple Cassette servers please refer to `Cassette.Support` for
macros that allow to build your own services.
## Running on development without an actual CAS server
The `FakeCas` module we use for testing is available on `:dev` as well.
To set it up and configure the default `Cassette`, add to yor dependencies on
`mix.exs`:
```elixir
{:fake_cas, "~> 1.0"}
```
Then initialize it with:
```elixir
iex> FakeCas.Support.initialize
:ok
```
With the configuration set, `Cassette` will always return the TGT returned by
`FakeCas.valid_tgt/0`:
```
iex> tgt = FakeCas.valid_tgt
iex> {:ok, ^tgt} = Cassette.tgt
{:ok, "TGT-example-abcd"}
```
Using the stored valid TGT, `Casette` will always generate the same ST:
```elixir
iex> st = FakeCas.valid_st
iex> {:ok, ^st} = Cassette.st("some-service")
{:ok, "ST-example-1234"}
```
Trying to validate the ST in `FakeCas.valid_st/0` will always succeed for any
service:
```elixir
iex> st = FakeCas.valid_st
iex> {:ok, _} = Cassette.validate(st, "some-service")
{:ok, Cassette.User.new("example", "customer", ["ACME_ADMIN"],
%{"cn" => "<NAME>"})}
```
And trying to validate any other ST will always fail:
```elixir
iex> Cassette.validate("any-other-st", "some-service")
{:error, "INVALID_SERVICE: ticket 'X' is invalid"}
```
"""
use Cassette.Support
use Application
@impl Application
def start(_, _), do: start()
@impl Application
def stop(_state) do
GenServer.stop(@name)
end
end
|
lib/cassette.ex
| 0.801276
| 0.873485
|
cassette.ex
|
starcoder
|
defmodule Plymio.Fontais.Option.Macro do
@moduledoc ~S"""
Macros for Custom Option ("opts") Accessors and Mutators.
See `Plymio.Fontais` for overview and documentation terms.
These macros define custom versions of a small set of functions from `Plymio.Fontais.Option` such as `Plymio.Fontais.Option.opts_get/3`
The customisation hardcode ("curries") arguments such as the `key` and `default`.
"""
use Plymio.Fontais.Attribute
defmacro def_custom_opts_get(opts \\ []) do
quote bind_quoted: [opts: opts] do
for {fun_name, fun_spec} <- opts do
fun_spec =
fun_spec
|> case do
x when is_atom(x) -> %{key: x}
x when is_map(x) -> x
end
fun_key = fun_spec |> Map.fetch!(:key)
fun_default = fun_spec |> Map.get(:default)
def unquote(fun_name)(opts, default \\ unquote(fun_default))
def unquote(fun_name)(opts, default) do
opts_get(opts, unquote(fun_key), default)
end
end
end
end
defmacro def_custom_opts_get_values(opts \\ []) do
quote bind_quoted: [opts: opts] do
for {fun_name, fun_spec} <- opts do
fun_spec =
fun_spec
|> case do
x when is_atom(x) -> %{key: x}
x when is_map(x) -> x
end
fun_key = fun_spec |> Map.fetch!(:key)
fun_default = fun_spec |> Map.get(:default)
def unquote(fun_name)(opts, default \\ unquote(fun_default))
def unquote(fun_name)(opts, default) do
opts_get_values(opts, unquote(fun_key), default)
end
end
end
end
defmacro def_custom_opts_fetch(opts \\ []) do
quote bind_quoted: [opts: opts] do
for {fun_name, fun_key} <- opts do
def unquote(fun_name)(opts) do
opts_fetch(opts, unquote(fun_key))
end
end
end
end
defmacro def_custom_opts_put(opts \\ []) do
quote bind_quoted: [opts: opts] do
for {fun_name, fun_key} <- opts do
def unquote(fun_name)(opts, value) do
opts_put(opts, unquote(fun_key), value)
end
end
end
end
defmacro def_custom_opts_put_value(opts \\ []) do
quote bind_quoted: [opts: opts] do
for {fun_name, fun_spec} <- opts do
fun_key = fun_spec |> Map.fetch!(:key)
fun_value = fun_spec |> Map.fetch!(:value)
def unquote(fun_name)(opts) do
opts_put(opts, unquote(fun_key), unquote(fun_value))
end
end
end
end
defmacro def_custom_opts_delete(opts \\ []) do
quote bind_quoted: [opts: opts] do
for {fun_name, fun_key} <- opts do
def unquote(fun_name)(opts) do
opts_delete(opts, unquote(fun_key), value)
end
end
end
end
defmacro def_custom_opts_drop(opts \\ []) do
quote bind_quoted: [opts: opts] do
for {fun_name, fun_key} <- opts do
fun_key =
fun_key
|> List.wrap()
|> Enum.uniq()
def unquote(fun_name)(opts) do
opts_drop(opts, unquote(fun_key))
end
end
end
end
defmacro def_custom_opts_has_key?(opts \\ []) do
quote bind_quoted: [opts: opts] do
for {fun_name, fun_key} <- opts do
def unquote(fun_name)(opts) do
opts
|> Keyword.keyword?()
|> case do
true -> opts |> Keyword.has_key?(unquote(fun_key))
_ -> false
end
end
end
end
end
end
|
lib/fontais/option/macro.ex
| 0.713232
| 0.430387
|
macro.ex
|
starcoder
|
defmodule Cldr.Date do
@moduledoc """
Provides an API for the localization and formatting of a `Date`
struct or any map with the keys `:year`, `:month`,
`:day` and `:calendar`.
`Cldr.Date` provides support for the built-in calendar
`Calendar.ISO`. Use of other calendars may not produce
the expected results.
CLDR provides standard format strings for `Date` which
are reresented by the names `:short`, `:medium`, `:long`
and `:full`. This allows for locale-independent
formatting since each locale may define the underlying
format string as appropriate.
"""
alias Cldr.DateTime.{Formatter, Format}
alias Cldr.LanguageTag
@format_types [:short, :medium, :long, :full]
defmodule Formats do
defstruct Module.get_attribute(Cldr.Date, :format_types)
end
@doc """
Formats a date according to a format string
as defined in CLDR and described in [TR35](http://unicode.org/reports/tr35/tr35-dates.html)
Returns either `{:ok, formatted_string}` or `{:error, reason}`.
* `date` is a `%Date{}` struct or any map that contains the keys
`year`, `month`, `day` and `calendar`
* `options` is a keyword list of options for formatting. The valid options are:
* `format:` `:short` | `:medium` | `:long` | `:full` or a format string. The default is `:medium`
* `locale:` any locale returned by `Cldr.known_locale_names()`. The default is `Cldr.get_current_locale()`
* `number_system:` a number system into which the formatted date digits should be transliterated
## Examples
iex> Cldr.Date.to_string ~D[2017-07-10], format: :medium, locale: Cldr.Locale.new!("en")
{:ok, "Jul 10, 2017"}
iex> Cldr.Date.to_string ~D[2017-07-10], locale: Cldr.Locale.new!("en")
{:ok, "Jul 10, 2017"}
iex> Cldr.Date.to_string ~D[2017-07-10], format: :full, locale: Cldr.Locale.new!("en")
{:ok, "Monday, July 10, 2017"}
iex> Cldr.Date.to_string ~D[2017-07-10], format: :short, locale: Cldr.Locale.new!("en")
{:ok, "7/10/17"}
iex> Cldr.Date.to_string ~D[2017-07-10], format: :short, locale: "fr"
{:ok, "10/07/2017"}
iex> Cldr.Date.to_string ~D[2017-07-10], format: :long, locale: "af"
{:ok, "10 Julie 2017"}
"""
def to_string(date, options \\ [])
def to_string(%{year: _year, month: _month, day: _day, calendar: calendar} = date, options) do
options = Keyword.merge(default_options(), options)
with {:ok, locale} <- Cldr.validate_locale(options[:locale]),
{:ok, cldr_calendar} <- Formatter.type_from_calendar(calendar),
{:ok, format_string} <-
format_string_from_format(options[:format], locale, cldr_calendar),
{:ok, formatted} <- Formatter.format(date, format_string, locale, options) do
{:ok, formatted}
else
{:error, reason} -> {:error, reason}
end
end
def to_string(date, _options) do
error_return(date, [:year, :month, :day, :calendar])
end
defp default_options do
[format: :medium, locale: Cldr.get_current_locale()]
end
@doc """
Formats a date according to a format string
as defined in CLDR and described in [TR35](http://unicode.org/reports/tr35/tr35-dates.html)
## Arguments
* `date` is a `%Date{}` struct or any map that contains the keys
`year`, `month`, `day` and `calendar`
* `options` is a keyword list of options for formatting. The valid options are:
* `format:` `:short` | `:medium` | `:long` | `:full` or a format string.
The default is `:medium`
* `locale` is any valid locale name returned by `Cldr.known_locale_names/0`
or a `Cldr.LanguageTag` struct. The default is `Cldr.get_current_locale/0`
* `number_system:` a number system into which the formatted date digits should
be transliterated
## Returns
* `formatted_date` or
* raises an exception.
## Examples
iex> Cldr.Date.to_string! ~D[2017-07-10], format: :medium, locale: Cldr.Locale.new!("en")
"Jul 10, 2017"
iex> Cldr.Date.to_string! ~D[2017-07-10], locale: Cldr.Locale.new!("en")
"Jul 10, 2017"
iex> Cldr.Date.to_string! ~D[2017-07-10], format: :full, locale: Cldr.Locale.new!("en")
"Monday, July 10, 2017"
iex> Cldr.Date.to_string! ~D[2017-07-10], format: :short, locale: Cldr.Locale.new!("en")
"7/10/17"
iex> Cldr.Date.to_string! ~D[2017-07-10], format: :short, locale: "fr"
"10/07/2017"
iex> Cldr.Date.to_string! ~D[2017-07-10], format: :long, locale: "af"
"10 Julie 2017"
"""
def to_string!(date, options \\ [])
def to_string!(date, options) do
case to_string(date, options) do
{:ok, string} -> string
{:error, {exception, message}} -> raise exception, message
end
end
defp format_string_from_format(format, %LanguageTag{cldr_locale_name: locale_name}, calendar)
when format in @format_types do
with {:ok, date_formats} <- Format.date_formats(locale_name, calendar) do
{:ok, Map.get(date_formats, format)}
end
end
defp format_string_from_format(
%{number_system: number_system, format: format},
locale,
calendar
) do
{:ok, format_string} = format_string_from_format(format, locale, calendar)
{:ok, %{number_system: number_system, format: format_string}}
end
defp format_string_from_format(format, _locale, _calendar) when is_atom(format) do
{:error,
{Cldr.InvalidDateFormatType,
"Invalid date format type. " <> "The valid types are #{inspect(@format_types)}."}}
end
defp format_string_from_format(format_string, _locale, _calendar)
when is_binary(format_string) do
{:ok, format_string}
end
defp error_return(map, requirements) do
{:error,
{ArgumentError,
"Invalid date. Date is a map that requires at least #{inspect(requirements)} fields. " <>
"Found: #{inspect(map)}"}}
end
end
|
lib/cldr/datetime/date.ex
| 0.945362
| 0.755783
|
date.ex
|
starcoder
|
defmodule AWS.GreengrassV2 do
@moduledoc """
AWS IoT Greengrass brings local compute, messaging, data management, sync, and
ML inference capabilities to edge devices.
This enables devices to collect and analyze data closer to the source of
information, react autonomously to local events, and communicate securely with
each other on local networks. Local devices can also communicate securely with
AWS IoT Core and export IoT data to the AWS Cloud. AWS IoT Greengrass developers
can use AWS Lambda functions and components to create and deploy applications to
fleets of edge devices for local operation.
AWS IoT Greengrass Version 2 provides a new major version of the AWS IoT
Greengrass Core software, new APIs, and a new console. Use this API reference to
learn how to use the AWS IoT Greengrass V2 API operations to manage components,
manage deployments, and core devices.
For more information, see [What is AWS IoT Greengrass?](https://docs.aws.amazon.com/greengrass/v2/developerguide/what-is-iot-greengrass.html)
in the *AWS IoT Greengrass V2 Developer Guide*.
"""
alias AWS.Client
alias AWS.Request
def metadata do
%AWS.ServiceMetadata{
abbreviation: nil,
api_version: "2020-11-30",
content_type: "application/x-amz-json-1.1",
credential_scope: nil,
endpoint_prefix: "greengrass",
global?: false,
protocol: "rest-json",
service_id: "GreengrassV2",
signature_version: "v4",
signing_name: "greengrass",
target_prefix: nil
}
end
@doc """
Cancels a deployment.
This operation cancels the deployment for devices that haven't yet received it.
If a device already received the deployment, this operation doesn't change
anything for that device.
"""
def cancel_deployment(%Client{} = client, deployment_id, input, options \\ []) do
url_path = "/greengrass/v2/deployments/#{URI.encode(deployment_id)}/cancel"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Creates a component.
Components are software that run on AWS IoT Greengrass core devices. After you
develop and test a component on your core device, you can use this operation to
upload your component to AWS IoT Greengrass. Then, you can deploy the component
to other core devices.
You can use this operation to do the following:
* ## Create components from recipes
Create a component from a recipe, which is a file that defines the component's
metadata, parameters, dependencies, lifecycle, artifacts, and platform
capability. For more information, see [AWS IoT Greengrass component recipe reference](https://docs.aws.amazon.com/greengrass/v2/developerguide/component-recipe-reference.html)
in the *AWS IoT Greengrass V2 Developer Guide*.
To create a component from a recipe, specify `inlineRecipe` when you call this
operation.
* ## Create components from Lambda functions
Create a component from an AWS Lambda function that runs on AWS IoT Greengrass.
This creates a recipe and artifacts from the Lambda function's deployment
package. You can use this operation to migrate Lambda functions from AWS IoT
Greengrass V1 to AWS IoT Greengrass V2.
This function only accepts Lambda functions that use the following runtimes:
* Python 2.7 – `python2.7`
* Python 3.7 – `python3.7`
* Python 3.8 – `python3.8`
* Java 8 – `java8`
* Node.js 10 – `nodejs10.x`
* Node.js 12 – `nodejs12.x`
To create a component from a Lambda function, specify `lambdaFunction` when you
call this operation.
"""
def create_component_version(%Client{} = client, input, options \\ []) do
url_path = "/greengrass/v2/createComponentVersion"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
201
)
end
@doc """
Creates a continuous deployment for a target, which is a AWS IoT Greengrass core
device or group of core devices.
When you add a new core device to a group of core devices that has a deployment,
AWS IoT Greengrass deploys that group's deployment to the new device.
You can define one deployment for each target. When you create a new deployment
for a target that has an existing deployment, you replace the previous
deployment. AWS IoT Greengrass applies the new deployment to the target devices.
Every deployment has a revision number that indicates how many deployment
revisions you define for a target. Use this operation to create a new revision
of an existing deployment. This operation returns the revision number of the new
deployment when you create it.
For more information, see the [Create deployments](https://docs.aws.amazon.com/greengrass/v2/developerguide/create-deployments.html)
in the *AWS IoT Greengrass V2 Developer Guide*.
"""
def create_deployment(%Client{} = client, input, options \\ []) do
url_path = "/greengrass/v2/deployments"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
201
)
end
@doc """
Deletes a version of a component from AWS IoT Greengrass.
This operation deletes the component's recipe and artifacts. As a result,
deployments that refer to this component version will fail. If you have
deployments that use this component version, you can remove the component from
the deployment or update the deployment to use a valid version.
"""
def delete_component(%Client{} = client, arn, input, options \\ []) do
url_path = "/greengrass/v2/components/#{URI.encode(arn)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:delete,
url_path,
query_params,
headers,
input,
options,
204
)
end
@doc """
Deletes a AWS IoT Greengrass core device, which is an AWS IoT thing.
This operation removes the core device from the list of core devices. This
operation doesn't delete the AWS IoT thing. For more information about how to
delete the AWS IoT thing, see
[DeleteThing](https://docs.aws.amazon.com/iot/latest/apireference/API_DeleteThing.html)
in the *AWS IoT API Reference*.
"""
def delete_core_device(%Client{} = client, core_device_thing_name, input, options \\ []) do
url_path = "/greengrass/v2/coreDevices/#{URI.encode(core_device_thing_name)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:delete,
url_path,
query_params,
headers,
input,
options,
204
)
end
@doc """
Retrieves metadata for a version of a component.
"""
def describe_component(%Client{} = client, arn, options \\ []) do
url_path = "/greengrass/v2/components/#{URI.encode(arn)}/metadata"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Gets the recipe for a version of a component.
Core devices can call this operation to identify the artifacts and requirements
to install a component.
"""
def get_component(%Client{} = client, arn, recipe_output_format \\ nil, options \\ []) do
url_path = "/greengrass/v2/components/#{URI.encode(arn)}"
headers = []
query_params = []
query_params =
if !is_nil(recipe_output_format) do
[{"recipeOutputFormat", recipe_output_format} | query_params]
else
query_params
end
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Gets the pre-signed URL to download a public component artifact.
Core devices call this operation to identify the URL that they can use to
download an artifact to install.
"""
def get_component_version_artifact(%Client{} = client, arn, artifact_name, options \\ []) do
url_path =
"/greengrass/v2/components/#{URI.encode(arn)}/artifacts/#{
AWS.Util.encode_uri(artifact_name, true)
}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Retrieves metadata for a AWS IoT Greengrass core device.
"""
def get_core_device(%Client{} = client, core_device_thing_name, options \\ []) do
url_path = "/greengrass/v2/coreDevices/#{URI.encode(core_device_thing_name)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Gets a deployment.
Deployments define the components that run on AWS IoT Greengrass core devices.
"""
def get_deployment(%Client{} = client, deployment_id, options \\ []) do
url_path = "/greengrass/v2/deployments/#{URI.encode(deployment_id)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Retrieves a paginated list of all versions for a component.
"""
def list_component_versions(
%Client{} = client,
arn,
max_results \\ nil,
next_token \\ nil,
options \\ []
) do
url_path = "/greengrass/v2/components/#{URI.encode(arn)}/versions"
headers = []
query_params = []
query_params =
if !is_nil(next_token) do
[{"nextToken", next_token} | query_params]
else
query_params
end
query_params =
if !is_nil(max_results) do
[{"maxResults", max_results} | query_params]
else
query_params
end
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Retrieves a paginated list of component summaries.
This list includes components that you have permission to view.
"""
def list_components(
%Client{} = client,
max_results \\ nil,
next_token \\ nil,
scope \\ nil,
options \\ []
) do
url_path = "/greengrass/v2/components"
headers = []
query_params = []
query_params =
if !is_nil(scope) do
[{"scope", scope} | query_params]
else
query_params
end
query_params =
if !is_nil(next_token) do
[{"nextToken", next_token} | query_params]
else
query_params
end
query_params =
if !is_nil(max_results) do
[{"maxResults", max_results} | query_params]
else
query_params
end
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Retrieves a paginated list of AWS IoT Greengrass core devices.
"""
def list_core_devices(
%Client{} = client,
max_results \\ nil,
next_token \\ nil,
status \\ nil,
thing_group_arn \\ nil,
options \\ []
) do
url_path = "/greengrass/v2/coreDevices"
headers = []
query_params = []
query_params =
if !is_nil(thing_group_arn) do
[{"thingGroupArn", thing_group_arn} | query_params]
else
query_params
end
query_params =
if !is_nil(status) do
[{"status", status} | query_params]
else
query_params
end
query_params =
if !is_nil(next_token) do
[{"nextToken", next_token} | query_params]
else
query_params
end
query_params =
if !is_nil(max_results) do
[{"maxResults", max_results} | query_params]
else
query_params
end
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Retrieves a paginated list of deployments.
"""
def list_deployments(
%Client{} = client,
history_filter \\ nil,
max_results \\ nil,
next_token \\ nil,
target_arn \\ nil,
options \\ []
) do
url_path = "/greengrass/v2/deployments"
headers = []
query_params = []
query_params =
if !is_nil(target_arn) do
[{"targetArn", target_arn} | query_params]
else
query_params
end
query_params =
if !is_nil(next_token) do
[{"nextToken", next_token} | query_params]
else
query_params
end
query_params =
if !is_nil(max_results) do
[{"maxResults", max_results} | query_params]
else
query_params
end
query_params =
if !is_nil(history_filter) do
[{"historyFilter", history_filter} | query_params]
else
query_params
end
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Retrieves a paginated list of deployment jobs that AWS IoT Greengrass sends to
AWS IoT Greengrass core devices.
"""
def list_effective_deployments(
%Client{} = client,
core_device_thing_name,
max_results \\ nil,
next_token \\ nil,
options \\ []
) do
url_path =
"/greengrass/v2/coreDevices/#{URI.encode(core_device_thing_name)}/effectiveDeployments"
headers = []
query_params = []
query_params =
if !is_nil(next_token) do
[{"nextToken", next_token} | query_params]
else
query_params
end
query_params =
if !is_nil(max_results) do
[{"maxResults", max_results} | query_params]
else
query_params
end
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Retrieves a paginated list of the components that a AWS IoT Greengrass core
device runs.
"""
def list_installed_components(
%Client{} = client,
core_device_thing_name,
max_results \\ nil,
next_token \\ nil,
options \\ []
) do
url_path =
"/greengrass/v2/coreDevices/#{URI.encode(core_device_thing_name)}/installedComponents"
headers = []
query_params = []
query_params =
if !is_nil(next_token) do
[{"nextToken", next_token} | query_params]
else
query_params
end
query_params =
if !is_nil(max_results) do
[{"maxResults", max_results} | query_params]
else
query_params
end
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Retrieves the list of tags for an AWS IoT Greengrass resource.
"""
def list_tags_for_resource(%Client{} = client, resource_arn, options \\ []) do
url_path = "/tags/#{URI.encode(resource_arn)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Retrieves a list of components that meet the component, version, and platform
requirements of a deployment.
AWS IoT Greengrass core devices call this operation when they receive a
deployment to identify the components to install.
This operation identifies components that meet all dependency requirements for a
deployment. If the requirements conflict, then this operation returns an error
and the deployment fails. For example, this occurs if component `A` requires
version `>2.0.0` and component `B` requires version `<2.0.0` of a component
dependency.
When you specify the component candidates to resolve, AWS IoT Greengrass
compares each component's digest from the core device with the component's
digest in the AWS Cloud. If the digests don't match, then AWS IoT Greengrass
specifies to use the version from the AWS Cloud.
To use this operation, you must use the data plane API endpoint and authenticate
with an AWS IoT device certificate. For more information, see [AWS IoT Greengrass endpoints and
quotas](https://docs.aws.amazon.com/general/latest/gr/greengrass.html).
"""
def resolve_component_candidates(%Client{} = client, input, options \\ []) do
url_path = "/greengrass/v2/resolveComponentCandidates"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Adds tags to an AWS IoT Greengrass resource.
If a tag already exists for the resource, this operation updates the tag's
value.
"""
def tag_resource(%Client{} = client, resource_arn, input, options \\ []) do
url_path = "/tags/#{URI.encode(resource_arn)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Removes a tag from an AWS IoT Greengrass resource.
"""
def untag_resource(%Client{} = client, resource_arn, input, options \\ []) do
url_path = "/tags/#{URI.encode(resource_arn)}"
headers = []
{query_params, input} =
[
{"tagKeys", "tagKeys"}
]
|> Request.build_params(input)
Request.request_rest(
client,
metadata(),
:delete,
url_path,
query_params,
headers,
input,
options,
nil
)
end
end
|
lib/aws/generated/greengrass_v2.ex
| 0.868493
| 0.459379
|
greengrass_v2.ex
|
starcoder
|
defmodule Plaid.Transactions do
@moduledoc """
[Plaid Transactions API](https://plaid.com/docs/api/transactions) calls and schema.
"""
defmodule GetResponse do
@moduledoc """
[Plaid API /transactions/get response schema.](https://plaid.com/docs/api/transactions)
"""
@behaviour Plaid.Castable
alias Plaid.Account
alias Plaid.Castable
alias Plaid.Item
alias Plaid.Transactions.Transaction
@type t :: %__MODULE__{
accounts: [Account.t()],
transactions: [Transaction.t()],
item: Item.t(),
total_transactions: integer(),
request_id: String.t()
}
defstruct [
:accounts,
:transactions,
:item,
:total_transactions,
:request_id
]
@impl true
def cast(generic_map) do
%__MODULE__{
accounts: Castable.cast_list(Account, generic_map["accounts"]),
transactions: Castable.cast_list(Transaction, generic_map["transactions"]),
item: Castable.cast(Item, generic_map["item"]),
total_transactions: generic_map["total_transactions"],
request_id: generic_map["request_id"]
}
end
end
@doc """
Get information about transactions.
Does a `POST /transactions/get` call which gives you high level account
data along with transactions from all accounts contained in the
`access_token`'s item.
Params:
* `access_token` - Token to fetch transactions for.
* `start_date` - Start of query for transactions.
* `end_date` - End of query for transactions.
Options:
* `:account_ids` - Specific account ids to fetch balances for.
* `:count` - Amount of transactions to pull.
* `:offset` - Offset to start pulling transactions.
## Example
Transactions.get("access-sandbox-123xxx", "2019-10-10", "2019-10-20", client_id: "123", secret: "abc")
{:ok, %Transactions.GetResponse{}}
"""
@spec get(String.t(), String.t(), String.t(), options, Plaid.config()) ::
{:ok, GetResponse.t()} | {:error, Plaid.Error.t()}
when options: %{
optional(:account_ids) => [String.t()],
optional(:count) => integer(),
optional(:offset) => integer()
}
def get(access_token, start_date, end_date, options \\ %{}, config) do
options_payload = Map.take(options, [:account_ids, :count, :offset])
payload =
%{}
|> Map.put(:access_token, access_token)
|> Map.put(:start_date, start_date)
|> Map.put(:end_date, end_date)
|> Map.put(:options, options_payload)
Plaid.Client.call(
"/transactions/get",
payload,
Plaid.Transactions.GetResponse,
config
)
end
@doc """
Manually refresh transactions.
Does a `POST /transactions/refresh` call which kicks off a manual
transactions extraction for all accounts contained in the `access_token`'s
item.
* `access_token` - Token to fetch transactions for.
## Examples
Transactions.refresh("access-sandbox-123xxx", client_id: "123", secret: "abc")
{:ok, %Plaid.SimpleResponse{}}
"""
@spec refresh(String.t(), Plaid.config()) ::
{:ok, Plaid.SimpleResponse.t()} | {:error, Plaid.Error.t()}
def refresh(access_token, config) do
Plaid.Client.call(
"/transactions/refresh",
%{access_token: access_token},
Plaid.SimpleResponse,
config
)
end
end
|
lib/plaid/transactions.ex
| 0.910264
| 0.455986
|
transactions.ex
|
starcoder
|
defmodule PrometheusParser.Line do
defstruct line_type: nil,
timestamp: nil,
pairs: [],
value: nil,
documentation: nil,
type: nil,
label: nil
end
defimpl String.Chars, for: PrometheusParser.Line do
def pairs_to_string(pairs) do
pairs
|> Enum.map(fn {key, value} -> "#{key}=\"#{value}\"" end)
|> Enum.join(", ")
end
def to_string(%{line_type: "COMMENT", documentation: documentation}),
do: "# #{documentation}"
def to_string(%{line_type: "HELP", label: label, documentation: documentation}),
do: "# HELP #{label} #{documentation}"
def to_string(%{line_type: "TYPE", label: label, type: type}),
do: "# TYPE #{label} #{type}"
def to_string(%{
line_type: "ENTRY",
label: label,
pairs: pairs,
value: value,
timestamp: timestamp
})
when not is_nil(timestamp) do
"#{label}{#{pairs_to_string(pairs)}} #{value} #{timestamp}"
end
def to_string(%{
line_type: "ENTRY",
label: label,
pairs: [],
value: value,
timestamp: timestamp
})
when not is_nil(timestamp) do
"#{label} #{value} #{timestamp}"
end
def to_string(%{
line_type: "ENTRY",
label: label,
pairs: [],
value: value
}) do
"#{label} #{value}"
end
def to_string(%{
line_type: "ENTRY",
label: label,
pairs: pairs,
value: value
}) do
"#{label}{#{pairs_to_string(pairs)}} #{value}"
end
end
defmodule PrometheusParser do
import NimbleParsec
alias PrometheusParser.Line
comment =
string("#")
|> tag(:comment)
|> ignore(string(" "))
documentation =
utf8_string([], min: 1)
|> tag(:documentation)
prom_label = ascii_string([?a..?z] ++ [?_], min: 1)
help =
string("HELP")
|> tag(:help)
|> ignore(string(" "))
|> concat(prom_label)
|> tag(:prom_label)
|> ignore(string(" "))
|> concat(documentation)
type =
string("TYPE")
|> tag(:type)
|> ignore(string(" "))
|> concat(prom_label)
|> ignore(string(" "))
|> choice([
string("gauge"),
string("counter")
])
|> tag(:type)
prom_key_value =
prom_label
|> tag(:pair_key)
|> ignore(string("=\""))
|> optional(ascii_string([?a..?z, ?A..?Z, ?0..?9, ?-..?-, ?_..?_, ?...?:], min: 1))
|> label("expected a-z,A-Z,0-9,\-")
|> tag(:pair_value)
|> ignore(string("\""))
prom_integer = ascii_string([?0..?9], min: 1)
prom_entry =
prom_label
|> tag(:prom_label)
|> ignore(string(" "))
|> concat(prom_integer |> tag(:entry_value))
prom_entry_with_timestamp =
prom_entry
|> ignore(string(" "))
|> concat(prom_integer |> tag(:timestamp))
prom_entry_with_key_and_value =
prom_label
|> tag(:prom_label)
|> ignore(string("{"))
|> repeat(
prom_key_value
|> ignore(optional(string(",")))
|> ignore(optional(string(" ")))
)
|> ignore(string("}"))
|> ignore(string(" "))
|> concat(prom_integer |> tag(:entry_value))
prom_entry_with_key_and_value_and_timestamp =
prom_entry_with_key_and_value
|> ignore(string(" "))
|> concat(prom_integer |> tag(:timestamp))
unsupported =
empty()
|> line()
|> tag(:unsupported)
defparsec(
:parse_line,
choice([
comment |> concat(help),
comment |> concat(type),
comment |> concat(documentation),
prom_entry_with_timestamp,
prom_entry,
prom_entry_with_key_and_value_and_timestamp,
prom_entry_with_key_and_value,
unsupported
])
)
def parse_file(file) do
file
|> File.read!()
|> String.split("\n")
|> Enum.map(&parse/1)
end
def parse(line) do
line
|> parse_line()
|> format()
end
def format({:ok, [{:unsupported, _}], line, %{} = _context, _line, _offset}),
do: {:error, "Unsupported syntax: #{inspect(line)}"}
def format({:ok, acc, "" = _rest, %{} = _context, _line, _offset}),
do: format(acc)
def format(acc) when is_list(acc) do
line =
acc
|> Enum.reduce(%Line{}, fn item, acc ->
case item do
{:comment, ["#"]} ->
%{acc | line_type: "COMMENT"}
{:prom_label, [{:help, ["HELP"]}, label]} ->
%{acc | line_type: "HELP", label: label}
{:prom_label, [label]} ->
%{acc | line_type: "ENTRY", label: label}
{:type, [{:type, ["TYPE"]}, label, type]} ->
%{acc | line_type: "TYPE", label: label, type: type}
{:documentation, [documentation]} ->
%{acc | documentation: documentation}
{:pair_value, [{:pair_key, [key]}]} ->
%{acc | pairs: acc.pairs ++ [{key, ""}]}
{:pair_value, [{:pair_key, [key]}, value]} ->
%{acc | pairs: acc.pairs ++ [{key, value}]}
{:entry_value, [value]} ->
%{acc | value: value}
{:timestamp, [timestamp]} ->
%{acc | timestamp: timestamp}
end
end)
{:ok, line}
end
end
|
lib/prometheus_parser.ex
| 0.640411
| 0.401365
|
prometheus_parser.ex
|
starcoder
|
defmodule AWS.CodeGuruProfiler do
@moduledoc """
This section provides documentation for the Amazon CodeGuru Profiler API
operations.
` Amazon CodeGuru Profiler collects runtime performance data from your live
applications, and provides recommendations that can help you fine-tune your
application performance. Using machine learning algorithms, CodeGuru Profiler
can help you find your most expensive lines of code and suggest ways you can
improve efficiency and remove CPU bottlenecks.
Amazon CodeGuru Profiler provides different visualizations of profiling data to
help you identify what code is running on the CPU, see how much time is
consumed, and suggest ways to reduce CPU utilization.
Amazon CodeGuru Profiler currently supports applications written in all Java
virtual machine (JVM) languages. While CodeGuru Profiler supports both
visualizations and recommendations for applications written in Java, it can also
generate visualizations and a subset of recommendations for applications written
in other JVM languages.
For more information, see [What is Amazon CodeGuru Profiler](https://docs.aws.amazon.com/codeguru/latest/profiler-ug/what-is-codeguru-profiler.html)
in the *Amazon CodeGuru Profiler User Guide*.
`
"""
@doc """
Add up to 2 anomaly notifications channels for a profiling group.
"""
def add_notification_channels(client, profiling_group_name, input, options \\ []) do
path_ = "/profilingGroups/#{URI.encode(profiling_group_name)}/notificationConfiguration"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, 200)
end
@doc """
Returns the time series of values for a requested list of frame metrics from a
time period.
"""
def batch_get_frame_metric_data(client, profiling_group_name, input, options \\ []) do
path_ = "/profilingGroups/#{URI.encode(profiling_group_name)}/frames/-/metrics"
headers = []
{query_, input} =
[
{"endTime", "endTime"},
{"period", "period"},
{"startTime", "startTime"},
{"targetResolution", "targetResolution"},
]
|> AWS.Request.build_params(input)
request(client, :post, path_, query_, headers, input, options, 200)
end
@doc """
Used by profiler agents to report their current state and to receive remote
configuration updates.
For example, `ConfigureAgent` can be used to tell and agent whether to profile
or not and for how long to return profiling data.
"""
def configure_agent(client, profiling_group_name, input, options \\ []) do
path_ = "/profilingGroups/#{URI.encode(profiling_group_name)}/configureAgent"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, 200)
end
@doc """
Creates a profiling group.
"""
def create_profiling_group(client, input, options \\ []) do
path_ = "/profilingGroups"
headers = []
{query_, input} =
[
{"clientToken", "<PASSWORD>Token"},
]
|> AWS.Request.build_params(input)
request(client, :post, path_, query_, headers, input, options, 201)
end
@doc """
Deletes a profiling group.
"""
def delete_profiling_group(client, profiling_group_name, input, options \\ []) do
path_ = "/profilingGroups/#{URI.encode(profiling_group_name)}"
headers = []
query_ = []
request(client, :delete, path_, query_, headers, input, options, 204)
end
@doc """
Returns a [ `ProfilingGroupDescription`
](https://docs.aws.amazon.com/codeguru/latest/profiler-api/API_ProfilingGroupDescription.html)
object that contains information about the requested profiling group.
"""
def describe_profiling_group(client, profiling_group_name, options \\ []) do
path_ = "/profilingGroups/#{URI.encode(profiling_group_name)}"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, 200)
end
@doc """
Returns a list of [ `FindingsReportSummary`
](https://docs.aws.amazon.com/codeguru/latest/profiler-api/API_FindingsReportSummary.html)
objects that contain analysis results for all profiling groups in your AWS
account.
"""
def get_findings_report_account_summary(client, daily_reports_only \\ nil, max_results \\ nil, next_token \\ nil, options \\ []) do
path_ = "/internal/findingsReports"
headers = []
query_ = []
query_ = if !is_nil(next_token) do
[{"nextToken", next_token} | query_]
else
query_
end
query_ = if !is_nil(max_results) do
[{"maxResults", max_results} | query_]
else
query_
end
query_ = if !is_nil(daily_reports_only) do
[{"dailyReportsOnly", daily_reports_only} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, 200)
end
@doc """
Get the current configuration for anomaly notifications for a profiling group.
"""
def get_notification_configuration(client, profiling_group_name, options \\ []) do
path_ = "/profilingGroups/#{URI.encode(profiling_group_name)}/notificationConfiguration"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, 200)
end
@doc """
Returns the JSON-formatted resource-based policy on a profiling group.
"""
def get_policy(client, profiling_group_name, options \\ []) do
path_ = "/profilingGroups/#{URI.encode(profiling_group_name)}/policy"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, 200)
end
@doc """
Gets the aggregated profile of a profiling group for a specified time range.
Amazon CodeGuru Profiler collects posted agent profiles for a profiling group
into aggregated profiles.
` Because aggregated profiles expire over time `GetProfile` is not idempotent.
Specify the time range for the requested aggregated profile using 1 or 2 of the
following parameters: `startTime`, `endTime`, `period`. The maximum time range
allowed is 7 days. If you specify all 3 parameters, an exception is thrown. If
you specify only `period`, the latest aggregated profile is returned.
Aggregated profiles are available with aggregation periods of 5 minutes, 1 hour,
and 1 day, aligned to UTC. The aggregation period of an aggregated profile
determines how long it is retained. For more information, see [
`AggregatedProfileTime`
](https://docs.aws.amazon.com/codeguru/latest/profiler-api/API_AggregatedProfileTime.html).
The aggregated profile's aggregation period determines how long it is retained
by CodeGuru Profiler.
* If the aggregation period is 5 minutes, the aggregated profile is
retained for 15 days.
* If the aggregation period is 1 hour, the aggregated profile is
retained for 60 days.
* If the aggregation period is 1 day, the aggregated profile is
retained for 3 years.
There are two use cases for calling `GetProfile`.
1. If you want to return an aggregated profile that already exists,
use [ `ListProfileTimes`
](https://docs.aws.amazon.com/codeguru/latest/profiler-api/API_ListProfileTimes.html)
to view the time ranges of existing aggregated profiles. Use them in a
`GetProfile` request to return a specific, existing aggregated profile.
2. If you want to return an aggregated profile for a time range that
doesn't align with an existing aggregated profile, then CodeGuru Profiler makes
a best effort to combine existing aggregated profiles from the requested time
range and return them as one aggregated profile.
If aggregated profiles do not exist for the full time range requested, then
aggregated profiles for a smaller time range are returned. For example, if the
requested time range is from 00:00 to 00:20, and the existing aggregated
profiles are from 00:15 and 00:25, then the aggregated profiles from 00:15 to
00:20 are returned.
`
"""
def get_profile(client, profiling_group_name, end_time \\ nil, max_depth \\ nil, period \\ nil, start_time \\ nil, accept \\ nil, options \\ []) do
path_ = "/profilingGroups/#{URI.encode(profiling_group_name)}/profile"
headers = []
headers = if !is_nil(accept) do
[{"Accept", accept} | headers]
else
headers
end
query_ = []
query_ = if !is_nil(start_time) do
[{"startTime", start_time} | query_]
else
query_
end
query_ = if !is_nil(period) do
[{"period", period} | query_]
else
query_
end
query_ = if !is_nil(max_depth) do
[{"maxDepth", max_depth} | query_]
else
query_
end
query_ = if !is_nil(end_time) do
[{"endTime", end_time} | query_]
else
query_
end
case request(client, :get, path_, query_, headers, nil, options, 200) do
{:ok, body, response} when not is_nil(body) ->
body =
[
{"Content-Encoding", "contentEncoding"},
{"Content-Type", "contentType"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Returns a list of [ `Recommendation`
](https://docs.aws.amazon.com/codeguru/latest/profiler-api/API_Recommendation.html)
objects that contain recommendations for a profiling group for a given time
period.
A list of [ `Anomaly`
](https://docs.aws.amazon.com/codeguru/latest/profiler-api/API_Anomaly.html)
objects that contains details about anomalies detected in the profiling group
for the same time period is also returned.
"""
def get_recommendations(client, profiling_group_name, end_time, locale \\ nil, start_time, options \\ []) do
path_ = "/internal/profilingGroups/#{URI.encode(profiling_group_name)}/recommendations"
headers = []
query_ = []
query_ = if !is_nil(start_time) do
[{"startTime", start_time} | query_]
else
query_
end
query_ = if !is_nil(locale) do
[{"locale", locale} | query_]
else
query_
end
query_ = if !is_nil(end_time) do
[{"endTime", end_time} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, 200)
end
@doc """
List the available reports for a given profiling group and time range.
"""
def list_findings_reports(client, profiling_group_name, daily_reports_only \\ nil, end_time, max_results \\ nil, next_token \\ nil, start_time, options \\ []) do
path_ = "/internal/profilingGroups/#{URI.encode(profiling_group_name)}/findingsReports"
headers = []
query_ = []
query_ = if !is_nil(start_time) do
[{"startTime", start_time} | query_]
else
query_
end
query_ = if !is_nil(next_token) do
[{"nextToken", next_token} | query_]
else
query_
end
query_ = if !is_nil(max_results) do
[{"maxResults", max_results} | query_]
else
query_
end
query_ = if !is_nil(end_time) do
[{"endTime", end_time} | query_]
else
query_
end
query_ = if !is_nil(daily_reports_only) do
[{"dailyReportsOnly", daily_reports_only} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, 200)
end
@doc """
Lists the start times of the available aggregated profiles of a profiling group
for an aggregation period within the specified time range.
"""
def list_profile_times(client, profiling_group_name, end_time, max_results \\ nil, next_token \\ nil, order_by \\ nil, period, start_time, options \\ []) do
path_ = "/profilingGroups/#{URI.encode(profiling_group_name)}/profileTimes"
headers = []
query_ = []
query_ = if !is_nil(start_time) do
[{"startTime", start_time} | query_]
else
query_
end
query_ = if !is_nil(period) do
[{"period", period} | query_]
else
query_
end
query_ = if !is_nil(order_by) do
[{"orderBy", order_by} | query_]
else
query_
end
query_ = if !is_nil(next_token) do
[{"nextToken", next_token} | query_]
else
query_
end
query_ = if !is_nil(max_results) do
[{"maxResults", max_results} | query_]
else
query_
end
query_ = if !is_nil(end_time) do
[{"endTime", end_time} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, 200)
end
@doc """
Returns a list of profiling groups.
The profiling groups are returned as [ `ProfilingGroupDescription`
](https://docs.aws.amazon.com/codeguru/latest/profiler-api/API_ProfilingGroupDescription.html)
objects.
"""
def list_profiling_groups(client, include_description \\ nil, max_results \\ nil, next_token \\ nil, options \\ []) do
path_ = "/profilingGroups"
headers = []
query_ = []
query_ = if !is_nil(next_token) do
[{"nextToken", next_token} | query_]
else
query_
end
query_ = if !is_nil(max_results) do
[{"maxResults", max_results} | query_]
else
query_
end
query_ = if !is_nil(include_description) do
[{"includeDescription", include_description} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, 200)
end
@doc """
Returns a list of the tags that are assigned to a specified resource.
"""
def list_tags_for_resource(client, resource_arn, options \\ []) do
path_ = "/tags/#{URI.encode(resource_arn)}"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, 200)
end
@doc """
Submits profiling data to an aggregated profile of a profiling group.
To get an aggregated profile that is created with this profiling data, use [
`GetProfile`
](https://docs.aws.amazon.com/codeguru/latest/profiler-api/API_GetProfile.html).
"""
def post_agent_profile(client, profiling_group_name, input, options \\ []) do
path_ = "/profilingGroups/#{URI.encode(profiling_group_name)}/agentProfile"
{headers, input} =
[
{"contentType", "Content-Type"},
]
|> AWS.Request.build_params(input)
{query_, input} =
[
{"profileToken", "profileToken"},
]
|> AWS.Request.build_params(input)
request(client, :post, path_, query_, headers, input, options, 204)
end
@doc """
Adds permissions to a profiling group's resource-based policy that are provided
using an action group.
If a profiling group doesn't have a resource-based policy, one is created for it
using the permissions in the action group and the roles and users in the
`principals` parameter.
` The one supported action group that can be added is `agentPermission` which
grants `ConfigureAgent` and `PostAgent` permissions. For more information, see
[Resource-based policies in CodeGuru Profiler](https://docs.aws.amazon.com/codeguru/latest/profiler-ug/resource-based-policies.html)
in the *Amazon CodeGuru Profiler User Guide*, [ `ConfigureAgent`
](https://docs.aws.amazon.com/codeguru/latest/profiler-api/API_ConfigureAgent.html),
and [ `PostAgentProfile`
](https://docs.aws.amazon.com/codeguru/latest/profiler-api/API_PostAgentProfile.html).
The first time you call `PutPermission` on a profiling group, do not specify a
`revisionId` because it doesn't have a resource-based policy. Subsequent calls
must provide a `revisionId` to specify which revision of the resource-based
policy to add the permissions to.
The response contains the profiling group's JSON-formatted resource policy.
`
"""
def put_permission(client, action_group, profiling_group_name, input, options \\ []) do
path_ = "/profilingGroups/#{URI.encode(profiling_group_name)}/policy/#{URI.encode(action_group)}"
headers = []
query_ = []
request(client, :put, path_, query_, headers, input, options, 200)
end
@doc """
Remove one anomaly notifications channel for a profiling group.
"""
def remove_notification_channel(client, channel_id, profiling_group_name, input, options \\ []) do
path_ = "/profilingGroups/#{URI.encode(profiling_group_name)}/notificationConfiguration/#{URI.encode(channel_id)}"
headers = []
query_ = []
request(client, :delete, path_, query_, headers, input, options, 200)
end
@doc """
Removes permissions from a profiling group's resource-based policy that are
provided using an action group.
The one supported action group that can be removed is `agentPermission` which
grants `ConfigureAgent` and `PostAgent` permissions. For more information, see
[Resource-based policies in CodeGuru Profiler](https://docs.aws.amazon.com/codeguru/latest/profiler-ug/resource-based-policies.html)
in the *Amazon CodeGuru Profiler User Guide*, [ `ConfigureAgent`
](https://docs.aws.amazon.com/codeguru/latest/profiler-api/API_ConfigureAgent.html),
and [ `PostAgentProfile`
](https://docs.aws.amazon.com/codeguru/latest/profiler-api/API_PostAgentProfile.html).
"""
def remove_permission(client, action_group, profiling_group_name, input, options \\ []) do
path_ = "/profilingGroups/#{URI.encode(profiling_group_name)}/policy/#{URI.encode(action_group)}"
headers = []
{query_, input} =
[
{"revisionId", "revisionId"},
]
|> AWS.Request.build_params(input)
request(client, :delete, path_, query_, headers, input, options, 200)
end
@doc """
Sends feedback to CodeGuru Profiler about whether the anomaly detected by the
analysis is useful or not.
"""
def submit_feedback(client, anomaly_instance_id, profiling_group_name, input, options \\ []) do
path_ = "/internal/profilingGroups/#{URI.encode(profiling_group_name)}/anomalies/#{URI.encode(anomaly_instance_id)}/feedback"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, 204)
end
@doc """
Use to assign one or more tags to a resource.
"""
def tag_resource(client, resource_arn, input, options \\ []) do
path_ = "/tags/#{URI.encode(resource_arn)}"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, 204)
end
@doc """
Use to remove one or more tags from a resource.
"""
def untag_resource(client, resource_arn, input, options \\ []) do
path_ = "/tags/#{URI.encode(resource_arn)}"
headers = []
{query_, input} =
[
{"tagKeys", "tagKeys"},
]
|> AWS.Request.build_params(input)
request(client, :delete, path_, query_, headers, input, options, 204)
end
@doc """
Updates a profiling group.
"""
def update_profiling_group(client, profiling_group_name, input, options \\ []) do
path_ = "/profilingGroups/#{URI.encode(profiling_group_name)}"
headers = []
query_ = []
request(client, :put, path_, query_, headers, input, options, 200)
end
@spec request(AWS.Client.t(), binary(), binary(), list(), list(), map(), list(), pos_integer()) ::
{:ok, map() | nil, map()}
| {:error, term()}
defp request(client, method, path, query, headers, input, options, success_status_code) do
client = %{client | service: "codeguru-profiler"}
host = build_host("codeguru-profiler", client)
url = host
|> build_url(path, client)
|> add_query(query, client)
additional_headers = [{"Host", host}, {"Content-Type", "application/x-amz-json-1.1"}]
headers = AWS.Request.add_headers(additional_headers, headers)
payload = encode!(client, input)
headers = AWS.Request.sign_v4(client, method, url, headers, payload)
perform_request(client, method, url, payload, headers, options, success_status_code)
end
defp perform_request(client, method, url, payload, headers, options, success_status_code) do
case AWS.Client.request(client, method, url, payload, headers, options) do
{:ok, %{status_code: status_code, body: body} = response}
when is_nil(success_status_code) and status_code in [200, 202, 204]
when status_code == success_status_code ->
body = if(body != "", do: decode!(client, body))
{:ok, body, response}
{:ok, response} ->
{:error, {:unexpected_response, response}}
error = {:error, _reason} -> error
end
end
defp build_host(_endpoint_prefix, %{region: "local", endpoint: endpoint}) do
endpoint
end
defp build_host(_endpoint_prefix, %{region: "local"}) do
"localhost"
end
defp build_host(endpoint_prefix, %{region: region, endpoint: endpoint}) do
"#{endpoint_prefix}.#{region}.#{endpoint}"
end
defp build_url(host, path, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}#{path}"
end
defp add_query(url, [], _client) do
url
end
defp add_query(url, query, client) do
querystring = encode!(client, query, :query)
"#{url}?#{querystring}"
end
defp encode!(client, payload, format \\ :json) do
AWS.Client.encode!(client, payload, format)
end
defp decode!(client, payload) do
AWS.Client.decode!(client, payload, :json)
end
end
|
lib/aws/generated/code_guru_profiler.ex
| 0.913193
| 0.454533
|
code_guru_profiler.ex
|
starcoder
|
defmodule Uniq.UUID do
@moduledoc """
This module provides RFC 4122 compliant universally unique identifiers (UUIDs).
See the [README](README.md) for general usage information.
"""
use Bitwise, skip_operators: true
import Kernel, except: [to_string: 1]
defstruct [:format, :version, :variant, :time, :seq, :node, :bytes]
@type t :: <<_::128>>
@type formatted ::
t
| <<_::360>>
| <<_::288>>
| <<_::256>>
| <<_::176>>
@type format :: :default | :raw | :hex | :urn | :slug
@type namespace :: :dns | :url | :oid | :x500 | nil | formatted
@type info :: %__MODULE__{
format: :raw | :hex | :default | :urn | :slug,
version: 1..8,
variant: bitstring,
time: non_neg_integer,
seq: non_neg_integer,
node: <<_::48>>,
bytes: t
}
@formats [:default, :raw, :hex, :urn, :slug]
@namespaces [:dns, :url, :oid, :x500, nil]
# Namespaces
@dns_namespace_id Base.decode16!("6ba7b8109dad11d180b400c04fd430c8", case: :lower)
@url_namespace_id Base.decode16!("6ba7b8119dad11d180b400c04fd430c8", case: :lower)
@oid_namespace_id Base.decode16!("6ba7b8129dad11d180b400c04fd430c8", case: :lower)
@x500_namespace_id Base.decode16!("6ba7b8149dad11d180b400c04fd430c8", case: :lower)
@nil_id <<0::128>>
# Variants
@reserved_ncs <<0::1>>
@rfc_variant <<2::2>>
@reserved_ms <<6::3>>
@reserved_future <<7::3>>
defmacrop bits(n), do: quote(do: bitstring - size(unquote(n)))
defmacrop bytes(n), do: quote(do: binary - size(unquote(n)))
defmacrop uint(n), do: quote(do: unsigned - integer - size(unquote(n)))
defmacrop biguint(n), do: quote(do: big - unsigned - integer - size(unquote(n)))
@doc """
Generates a UUID using the version 1 scheme, as described in RFC 4122
This scheme is based on a few key properties:
* A timestamp, based on the count of 100-nanosecond intervals since the start of
the Gregorian calendar, i.e. October 15th, 1582, in Coordinated Universal Time (UTC).
* A clock sequence number, used to ensure that UUIDs generated with the same timestamp
are still unique, by incrementing the sequence each time a UUID is generated with the
same timestamp as the last UUID that was generated. This sequence is initialized with
random bytes at startup, to protect against conflicts.
* A node identifier, which is based on the MAC address of one of the network interfaces
on the system, or if unavailable, using random bytes. In our case, we specifically look
for the first network interface returned by `:inet.getifaddrs/0` that is up, broadcastable,
and has a hardware address, otherwise falling back to cryptographically strong random bytes.
"""
@spec uuid1() :: t
@spec uuid1(format) :: t
def uuid1(format \\ :default) do
{time, clock} = Uniq.Generator.next()
uuid1(time, clock, mac_address(), format)
end
@doc """
This function is the same as `uuid/1`, except the caller provides the clock sequence
value and the node identifier (which must be a 6-byte binary).
See `uuid/1` for details.
"""
@spec uuid1(clock_seq :: non_neg_integer, node :: <<_::48>>, format) :: t
def uuid1(clock_seq, <<node::bits(48)>>, format \\ :default)
when is_integer(clock_seq) and format in @formats do
{time, _} = Uniq.Generator.next()
uuid1(time, clock_seq, node, format)
end
defp uuid1(time, clock_seq, node, format) do
<<thi::12, tmid::16, tlo::32>> = <<time::biguint(60)>>
# Encode version into high bits of timestamp
thi = bor(thi, bsl(1, 12))
# Encode variant into high bits of clock sequence
clock_hi = bsr(band(clock_seq, 0x3F00), 8)
clock_hi = bor(clock_hi, 0x80)
clock_lo = band(clock_seq, 0xFF)
raw = <<tlo::32, tmid::16, thi::16, clock_hi::8, clock_lo::8, node::bits(48)>>
format(raw, format)
end
@doc """
Generates a UUID using the version 3 scheme, as described in RFC 4122
This scheme provides the means for generating UUIDs deterministically,
given a namespace and a name. This means that with the same inputs, you
get the same UUID as output.
The main difference between this and the version 5 scheme, is that version 3
uses MD5 for hashing, and version 5 uses SHA1. Both hashes are deprecated these
days, but you should prefer version 5 unless otherwise required.
In this scheme, the timestamp, clock sequence and node value are constructed
from the namespace and name, as described in RFC 4122, Section 4.3.
## Namespaces
You may choose one of several options for namespacing your UUIDs:
1. Use a predefined namespace. These are provided by RFC 4122 in order to provide
namespacing for common types of names. See below.
2. Use your own namespace. For this, simply generate a UUID to represent the namespace.
You may provide this UUID in whatever format is supported by `parse/1`.
3. Use `nil`. This is bound to a special-case UUID that has no intrinsic meaning, but is
valid for use as a namespace.
The set of predefined namespaces consist of the following:
* `:dns`, intended for namespacing fully-qualified domain names
* `:url`, intended for namespacing URLs
* `:oid`, intended for namespacing ISO OIDs
* `:x500`, intended for namespacing X.500 DNs (in DER or text output format)
## Notes
One thing to be aware of with version 3 and 5 UUIDs, is that unlike version 1 and 6,
the lexicographical ordering of UUIDs of generated one after the other, is entirely
random, as the most significant bits are dependent upon the hash of the namespace and
name, and thus not based on time or even the lexicographical ordering of the name.
This is generally worth the tradeoff in favor of determinism, but it is something to
be aware of.
Likewise, since the generation is deterministic, care must be taken to ensure that you
do not try to use the same name for two different objects within the same namespace. This
should be obvious, but since the other schemes are _not_ sensitive in this way, it is worth
calling out.
"""
@spec uuid3(namespace, name :: binary) :: t
@spec uuid3(namespace, name :: binary, format) :: t
def uuid3(namespace, name, format \\ :default)
when (namespace in @namespaces or is_binary(namespace)) and is_binary(name) and
format in @formats do
namespaced_uuid(3, :md5, namespace, name, format)
end
@doc """
Generates a UUID using the version 4 scheme, as described in RFC 4122
This scheme is like the version 1 scheme, except it uses randomly generated data
for the timestamp, clock sequence, and node fields.
This scheme is the closest you can get to truly unique identifiers, as they are based
on truly random (or pseudo-random) data, so the chances of generating the same UUID
twice is astronomically small.
## Notes
The version 4 scheme does have some deficiencies. Namely, since they are based on random
data, the lexicographical ordering of the resulting UUID is itself random, which can play havoc
with database indices should you choose to use UUIDs for primary keys.
It is strongly recommended to consider the version 6 scheme instead. They are almost the
same as a version 1 UUID, but with improved semantics that combine some of the beneficial
traits of version 4 UUIDs without the lexicographical ordering downsides. The only caveat
to that recommendation is if you need to pass them through a system that inspects the UUID
encoding itself and doesn't have preliminary support for version 6.
"""
@spec uuid4() :: t
@spec uuid4(format) :: t
def uuid4(format \\ :default) when format in @formats do
<<tlo_mid::48, _::4, thi::12, _::2, rest::62>> = :crypto.strong_rand_bytes(16)
raw = <<tlo_mid::48, 4::biguint(4), thi::12, @rfc_variant, rest::62>>
format(raw, format)
end
@doc """
Generates a UUID using the version 5 scheme, as described in RFC 4122
This scheme provides the means for generating UUIDs deterministically,
given a namespace and a name. This means that with the same inputs, you
get the same UUID as output.
The main difference between this and the version 5 scheme, is that version 3
uses MD5 for hashing, and version 5 uses SHA1. Both hashes are deprecated these
days, but you should prefer version 5 unless otherwise required.
In this scheme, the timestamp, clock sequence and node value are constructed
from the namespace and name, as described in RFC 4122, Section 4.3.
## Namespaces
You may choose one of several options for namespacing your UUIDs:
1. Use a predefined namespace. These are provided by RFC 4122 in order to provide
namespacing for common types of names. See below.
2. Use your own namespace. For this, simply generate a UUID to represent the namespace.
You may provide this UUID in whatever format is supported by `parse/1`.
3. Use `nil`. This is bound to a special-case UUID that has no intrinsic meaning, but is
valid for use as a namespace.
The set of predefined namespaces consist of the following:
* `:dns`, intended for namespacing fully-qualified domain names
* `:url`, intended for namespacing URLs
* `:oid`, intended for namespacing ISO OIDs
* `:x500`, intended for namespacing X.500 DNs (in DER or text output format)
## Notes
One thing to be aware of with version 3 and 5 UUIDs, is that unlike version 1 and 6,
the lexicographical ordering of UUIDs of generated one after the other, is entirely
random, as the most significant bits are dependent upon the hash of the namespace and
name, and thus not based on time or even the lexicographical ordering of the name.
This is generally worth the tradeoff in favor of determinism, but it is something to
be aware of.
Likewise, since the generation is deterministic, care must be taken to ensure that you
do not try to use the same name for two different objects within the same namespace. This
should be obvious, but since the other schemes are _not_ sensitive in this way, it is worth
calling out.
"""
@spec uuid5(namespace, name :: binary) :: t
@spec uuid5(namespace, name :: binary, format) :: t
def uuid5(namespace, name, format \\ :default)
when (namespace in @namespaces or is_binary(namespace)) and is_binary(name) and
format in @formats do
namespaced_uuid(5, :sha, namespace, name, format)
end
@doc """
Generates a UUID using the proposed version 6 scheme,
found [here](https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format).
This is a draft extension of RFC 4122, but has not yet been formally accepted.
Version 6 provides the following benefits over versions 1 and 4:
* Like version 1, it is time-based, but unlike version 1, it is naturally sortable by time
in its raw binary encoded form
* Like version 4, it provides better guarantees of uniqueness and privacy, by basing itself
on random or pseudo-random data, rather than MAC addresses and other potentially sensitive
information.
* Unlike version 4, which tends to interact poorly with database indices due to being derived
entirely from random or psuedo-random data; version 6 ensures that the most significant bits
of the binary encoded form are a 1:1 match with the most significant bits of the timestamp on
which it was derived. This guarantees that version 6 UUIDs are naturally sortable in the order
in which they were generated (with some randomness among those which are generated at the same
time).
There have been a number of similar proposals that address the same set of flaws. For example:
* [KSUID](https://github.com/segmentio/ksuid)
* [ULID](https://github.com/ulid/spec)
"""
@spec uuid6() :: t
@spec uuid6(format) :: t
def uuid6(format \\ :default) when format in @formats do
{time, clock} = Uniq.Generator.next()
node = :crypto.strong_rand_bytes(6)
# Deconstruct timestamp
<<thi::48, tlo::12>> = <<time::biguint(60)>>
# Encode the version to the most significant bits of the last octet of the timestamp
tlo_and_version = <<6::4, tlo::12>>
# Encode the variant in the most significant bits of the clock sequence
clock_seq = <<@rfc_variant, clock::biguint(14)>>
raw = <<thi::48, tlo_and_version::bits(16), clock_seq::bits(16), node::bits(48)>>
format(raw, format)
end
defp namespaced_uuid(version, algorithm, namespace, name, format) do
id = namespace_id(namespace)
<<tlo_mid::48, _::4, thi::12, _::2, rest::62>> = hash(algorithm, id <> name)
raw = <<tlo_mid::48, version::4, thi::12, @rfc_variant, rest::62>>
format(raw, format)
end
@doc """
Like `info/1`, but raises if the input UUID is invalid.
"""
@spec info!(binary, :struct) :: info | no_return
@spec info!(binary, :keyword) :: Keyword.t() | no_return
def info!(bin, style \\ :struct)
def info!(bin, style) when is_binary(bin) do
with {:ok, info} <- info(bin, style) do
info
else
{:error, reason} ->
raise ArgumentError, message: "invalid uuid: #{inspect(reason)}"
end
end
def info!(_, _) do
raise ArgumentError, message: "invalid uuid: :invalid_format"
end
@doc """
This function parses the given UUID, in any of the supported encodings/formats, and produces
the information gleaned from the encoded data.
Two styles of information are supported, depending on whether the function is called via
the compatibility shim for `:elixir_uuid`, or directly. You may pass `:struct` or `:keyword`
manually if you wish to express a preference for one style or the other.
The `:struct` form is the UUID structure used internally by this library, and it contains all
of the information needed to re-encode the UUID as binary.
The `:keyword` form matches 1:1 the keyword list produced by `UUID.info/1` provided by the
`:elixir_uuid` library, and it contains slightly less information, but is useful for compatibility
with legacy code that operates on that structure.
# Examples
iex> Uniq.UUID.info("870df8e8-3107-4487-8316-81e089b8c2cf", :keyword)
{:ok, [uuid: "870df8e8-3107-4487-8316-81e089b8c2cf",
binary: <<135, 13, 248, 232, 49, 7, 68, 135, 131, 22, 129, 224, 137, 184, 194, 207>>,
type: :default,
version: 4,
variant: :rfc4122]}
iex> Uniq.UUID.info("870df8e8-3107-4487-8316-81e089b8c2cf")
{:ok, %Uniq.UUID{
format: :default,
version: 4,
variant: <<2::2>>,
time: 326283406408022248,
seq: 790,
node: <<129, 224, 137, 184, 194, 207>>,
bytes: <<135, 13, 248, 232, 49, 7, 68, 135, 131, 22, 129, 224, 137, 184, 194, 207>>,
}}
"""
@spec info(binary, :struct) :: {:ok, info} | {:error, term}
@spec info(binary, :keyword) :: {:ok, Keyword.t()} | {:error, term}
def info(bin, style \\ :struct)
# Compatibility with :elixir_uuid's info
def info(bin, :keyword) when is_binary(bin) do
with {:ok, uuid} <- parse(bin) do
{:ok,
[
uuid: uuid |> to_string() |> String.downcase(),
binary: uuid.bytes,
type: uuid.format,
version: uuid.version,
variant: format_variant(uuid.variant)
]}
end
end
def info(bin, :struct) when is_binary(bin) do
parse(bin)
end
def info(_, style) when style in [:keyword, :struct],
do: {:error, :invalid_format}
@doc """
Returns true if the given string is a valid UUID.
## Options
* `strict: boolean`, if true, requires strict RFC 4122 conformance,
i.e. version 6 is considered invalid
"""
@spec valid?(binary) :: boolean
@spec valid?(binary, Keyword.t()) :: boolean
def valid?(bin, opts \\ [])
def valid?(bin, opts) do
strict? = Keyword.get(opts, :strict, false)
case parse(bin) do
{:ok, %__MODULE__{version: 6}} when strict? ->
false
{:ok, _} ->
true
{:error, _} ->
false
end
end
@doc """
Parses a `#{__MODULE__}` from a binary.
Supported formats include human-readable strings, as well as
the raw binary form of the UUID.
## Examples
iex> {:ok, uuid} = Uniq.UUID.parse("f81d4fae-7dec-11d0-a765-00a0c91e6bf6")
{:ok, %Uniq.UUID{
bytes: <<248, 29, 79, 174, 125, 236, 17, 208, 167, 101, 0, 160, 201, 30, 107, 246>>,
format: :default,
node: <<0, 160, 201, 30, 107, 246>>,
seq: 10085,
time: 130742845922168750,
variant: <<2::size(2)>>,
version: 1
}}
...> {:ok, %Uniq.UUID{uuid | format: :urn}} == Uniq.UUID.parse("urn:uuid:f81d4fae-7dec-11d0-a765-00a0c91e6bf6")
true
iex> match?({:ok, %Uniq.UUID{format: :default, version: 1}}, Uniq.UUID.uuid1() |> Uniq.UUID.parse())
true
"""
@spec parse(binary) :: {:ok, info} | {:error, term}
def parse(bin)
def parse("urn:uuid:" <> uuid) do
with {:ok, uuid} <- parse(uuid) do
{:ok, %__MODULE__{uuid | format: :urn}}
end
end
def parse(<<_::128>> = bin),
do: parse_raw(bin, %__MODULE__{format: :raw})
def parse(<<bin::bytes(32)>>) do
bin
|> decode_hex()
|> parse_raw(%__MODULE__{format: :hex})
rescue
ArgumentError ->
{:error, {:invalid_format, :hex}}
end
def parse(<<a::bytes(8), ?-, b::bytes(4), ?-, c::bytes(4), ?-, d::bytes(4), ?-, e::bytes(12)>>) do
with {:ok, bin} <- Base.decode16(a <> b <> c <> d <> e, case: :mixed) do
parse_raw(bin, %__MODULE__{format: :default})
else
:error ->
{:error, {:invalid_format, :default}}
end
end
def parse(<<uuid::bytes(22)>>) do
with {:ok, value} <- Base.url_decode64(uuid <> "==") do
parse_raw(value, %__MODULE__{format: :slug})
else
_ ->
{:error, {:invalid_format, :slug}}
end
end
def parse(_bin), do: {:error, :invalid_format}
# Parse version
defp parse_raw(<<_::48, version::uint(4), _::bitstring>> = bin, acc) do
case version do
v when v in [1, 3, 4, 5, 6] ->
with {:ok, uuid} <- parse_raw(version, bin, acc) do
{:ok, %__MODULE__{uuid | bytes: bin}}
end
_ when bin == @nil_id ->
{:ok, %__MODULE__{acc | bytes: @nil_id}}
_ ->
{:error, {:unknown_version, version}}
end
end
# Parse variant
defp parse_raw(version, <<time::64, @reserved_ncs, rest::bits(63)>>, acc),
do: parse_raw(version, @reserved_ncs, time, rest, acc)
defp parse_raw(version, <<time::64, @rfc_variant, rest::bits(62)>>, acc),
do: parse_raw(version, @rfc_variant, time, rest, acc)
defp parse_raw(version, <<time::64, @reserved_ms, rest::bits(61)>>, acc),
do: parse_raw(version, @reserved_ms, time, rest, acc)
defp parse_raw(version, <<time::64, @reserved_future, rest::bits(61)>>, acc),
do: parse_raw(version, @reserved_future, time, rest, acc)
defp parse_raw(_version, <<_time::64, variant::bits(3), _rest::bits(61)>>, _acc) do
{:error, {:unknown_variant, variant}}
end
# Parses RFC 4122, version 1-5 uuids
defp parse_raw(version, variant, time, rest, acc) when version < 6 do
variant_size = bit_size(variant)
clock_hi_size = 8 - variant_size
clock_size = 8 + clock_hi_size
with <<time_lo::bits(32), time_mid::bits(16), _version::4, time_hi::bits(12)>> <-
<<time::64>>,
<<timestamp::uint(60)>> <-
<<time_hi::bits(12), time_mid::bits(16), time_lo::bits(32)>>,
<<clock_hi::bits(clock_hi_size), clock_lo::bits(8), node::bits(48)>> <-
rest,
<<clock::uint(clock_size)>> <-
<<clock_hi::bits(clock_hi_size), clock_lo::bits(8)>> do
{:ok,
%__MODULE__{
acc
| version: version,
variant: variant,
time: timestamp,
seq: clock,
node: node
}}
else
other ->
{:error, {:invalid_format, other, variant_size, clock_hi_size, clock_size}}
end
end
# Parses proposed version 6 uuids, which are very much like version 1, but with some field ordering changes
defp parse_raw(6, <<fc00:e968:6179::de52:7100, 0::1>> = variant, time, rest, acc) do
with <<time_hi::48, _version::4, time_lo::12>> <- <<time::64>>,
<<timestamp::uint(60)>> <- <<time_hi::48, time_lo::12>>,
<<clock::uint(14), node::bits(48)>> <-
rest do
{:ok,
%__MODULE__{
acc
| version: 6,
variant: variant,
time: timestamp,
seq: clock,
node: node
}}
else
_ ->
{:error, {:invalid_format, :v6}}
end
end
defp parse_raw(6, variant, _time, _rest, _acc), do: {:error, {:invalid_variant, variant}}
# Handles proposed version 7 and 8 uuids
defp parse_raw(version, _variant, _time, _rest, _acc),
do: {:error, {:unsupported_version, version}}
@doc """
Formats a `#{__MODULE__}` as a string, using the format it was originally generated with.
See `to_string/2` if you want to specify what format to produce.
"""
@spec to_string(formatted | info) :: String.t()
def to_string(uuid)
def to_string(<<raw::bits(128)>>),
do: format(raw, :default)
def to_string(uuid) when is_binary(uuid) do
uuid
|> string_to_binary!()
|> format(:default)
end
def to_string(%__MODULE__{bytes: raw, format: format}),
do: format(raw, format)
@doc """
Same as `to_string/1`, except you can specify the desired format.
The `format` can be one of the following:
* `:default`, produces strings like `"f81d4fae-7dec-11d0-a765-00a0c91e6bf6"`
* `:urn`, produces strings like `"urn:uuid:f81d4fae-7dec-11d0-a765-00a0c91e6bf6"`
* `:hex`, produces strings like `"f81d4fae7dec11d0a76500a0c91e6bf6"`
* `:slug`, produces strings like `"-B1Prn3sEdCnZQCgyR5r9g=="`
* `:raw`, produces the raw binary encoding of the uuid in 128 bits
"""
@spec to_string(formatted | info, format) :: String.t()
def to_string(uuid, format)
def to_string(<<raw::bits(128)>>, format) when format in @formats,
do: format(raw, format)
def to_string(uuid, format) when is_binary(uuid) and format in @formats do
uuid
|> string_to_binary!()
|> format(format)
end
def to_string(%__MODULE__{bytes: raw}, format) when format in @formats,
do: format(raw, format)
@doc """
This function takes a UUID string in any of the formats supported by `to_string/1`,
and returns the raw, binary-encoded form.
"""
@spec string_to_binary!(String.t()) :: t | no_return
def string_to_binary!(str)
def string_to_binary!(<<_::128>> = uuid), do: uuid
def string_to_binary!(<<hex::bytes(32)>>) do
decode_hex(hex)
end
def string_to_binary!(
<<a::bytes(8), ?-, b::bytes(4), ?-, c::bytes(4), ?-, d::bytes(4), ?-, e::bytes(12)>>
) do
decode_hex(a <> b <> c <> d <> e)
end
def string_to_binary!(<<slug::bytes(22)>>) do
with {:ok, value} <- Base.url_decode64(slug <> "==") do
value
else
_ ->
raise ArgumentError, message: "invalid uuid string"
end
end
def string_to_binary!(_) do
raise ArgumentError, message: "invalid uuid string"
end
@doc """
Compares two UUIDs, using their canonical 128-bit integer form, as described in RFC 4122.
You may provide the UUIDs in either string, binary, or as a `Uniq.UUID` struct.
"""
@spec compare(String.t() | info, String.t() | info) :: :lt | :eq | :gt
def compare(a, b)
def compare(%__MODULE__{} = a, %__MODULE__{} = b) do
a = to_canonical_integer(a)
b = to_canonical_integer(b)
do_compare(a, b)
end
def compare(%__MODULE__{} = a, <<b::biguint(128)>>) do
a = to_canonical_integer(a)
do_compare(a, b)
end
def compare(%__MODULE__{} = a, b) when is_binary(b) do
a = to_canonical_integer(a)
b = string_to_binary!(b)
do_compare(a, b)
end
def compare(<<a::biguint(128)>>, %__MODULE__{} = b) do
b = to_canonical_integer(b)
do_compare(a, b)
end
def compare(a, %__MODULE__{} = b) when is_binary(a) do
a = string_to_binary!(a)
b = to_canonical_integer(b)
do_compare(a, b)
end
def compare(<<a::biguint(128)>>, <<b::biguint(128)>>),
do: do_compare(a, b)
def compare(a, b) when is_binary(a) and is_binary(b) do
a = to_string(a)
b = to_string(b)
do_compare(a, b)
end
defp do_compare(a, b) do
cond do
a < b ->
:lt
a == b ->
:eq
:else ->
:gt
end
end
defp to_canonical_integer(%__MODULE__{bytes: <<value::biguint(128)>>}) do
value
end
@doc false
def format(raw, format)
def format(raw, :raw), do: raw
def format(raw, :default), do: format_default(raw)
def format(raw, :hex), do: encode_hex(raw)
def format(raw, :urn), do: "urn:uuid:#{format(raw, :default)}"
def format(raw, :slug), do: Base.url_encode64(raw, padding: false)
@compile {:inline, [format_default: 1]}
defp format_default(<<
fc00:db20:35b:7399::5,
fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b,
afd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b,
fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b,
fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b,
fc00:db20:35b:7399::5,
fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b,
fc00:e968:6179::de52:7100,
fc00:db20:35b:7399::5,
fdf8:f53e:61e4::18,
b3::4,
b4::4,
cfdf8:f53e:61e4::18,
cfc00:db20:35b:7399::5,
c3::4,
c4::4,
dfdf8:f53e:61e4::18,
dfc00:db20:35b:7399::5,
dfd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b,
dfc00:e968:6179::de52:7100,
fc00:db20:35b:7399::5,
fdf8:f53e:61e4::18,
fc00:db20:35b:7399::5,
efc00:e968:6179::de52:7100,
efc00:db20:35b:7399::5,
e6::4,
e7::4,
e8::4,
e9::4,
e1fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b,
e1fdf8:f53e:61e4::18,
efc00:db20:35b:7399::5
>>) do
<<e(a1), e(a2), e(a3), e(a4), e(a5), e(a6), e(a7), e(a8), ?-, e(b1), e(b2), e(b3), e(b4), ?-,
e(c1), e(c2), e(c3), e(c4), ?-, e(d1), e(d2), e(d3), e(d4), ?-, e(e1), e(e2), e(e3), e(e4),
e(e5), e(e6), e(e7), e(e8), e(e9), e(e10), e(e11), e(e12)>>
end
@doc false
defp format_variant(@reserved_future), do: :reserved_future
defp format_variant(@reserved_ms), do: :reserved_microsoft
defp format_variant(@rfc_variant), do: :rfc4122
defp format_variant(@reserved_ncs), do: :reserved_ncs
defp format_variant(_), do: :unknown
defp mac_address do
candidate_interface? = fn {_if, info} ->
flags = Keyword.get(info, :flags, [])
Enum.member?(flags, :up) and Enum.member?(flags, :broadcast) and
Keyword.has_key?(info, :hwaddr)
end
with {:ok, interfaces} <- :inet.getifaddrs(),
{_if, info} <- Enum.find(interfaces, candidate_interface?) do
IO.iodata_to_binary(info[:hwaddr])
else
_ ->
# In lieu of a MAC address, we can generate an equivalent number of random bytes
<<head::7, _::1, tail::46>> = :crypto.strong_rand_bytes(6)
# Ensure the multicast bit is set, as per RFC 4122
<<head::7, fc00:e968:6179::de52:7100, tail::46>>
end
end
defp hash(:md5, data), do: :crypto.hash(:md5, data)
defp hash(:sha, data), do: :binary.part(:crypto.hash(:sha, data), 0, 16)
defp namespace_id(:dns), do: @dns_namespace_id
defp namespace_id(:url), do: @url_namespace_id
defp namespace_id(:oid), do: @oid_namespace_id
defp namespace_id(:x500), do: @x500_namespace_id
defp namespace_id(nil), do: @nil_id
defp namespace_id(<<_::128>> = ns), do: ns
defp namespace_id(<<ns::bytes(32)>>) do
with {:ok, raw} <- Base.decode16(ns, case: :mixed) do
raw
else
_ ->
invalid_namespace!()
end
end
defp namespace_id(
<<a::bytes(8), ?-, b::bytes(4), ?-, c::bytes(4), ?-, d::bytes(4), ?-, e::bytes(12)>>
) do
with {:ok, raw} <- Base.decode16(a <> b <> c <> d <> e, case: :mixed) do
raw
else
_ ->
invalid_namespace!()
end
end
defp namespace_id(<<ns::bytes(22)>>) do
with {:ok, raw} <- Base.url_decode64(ns <> "==") do
raw
else
_ ->
invalid_namespace!()
end
end
defp namespace_id(_ns), do: invalid_namespace!()
defp invalid_namespace!,
do:
raise(ArgumentError,
message: "expected a valid namespace atom (:dns, :url, :oid, :x500), or a UUID string"
)
otp_version = Application.get_env(:uniq, :otp_version, Version.parse!("1.0.0"))
@compile {:inline, [encode_hex: 1, decode_hex: 1]}
if Version.match?(otp_version, ">= 24.0.0", allow_pre: true) do
defp encode_hex(bin), do: :binary.encode_hex(bin)
defp decode_hex(bin), do: :binary.decode_hex(bin)
else
defp encode_hex(bin), do: IO.iodata_to_binary(for <<bs::4 <- bin>>, do: e(bs))
defp decode_hex(
<<a1, a2, a3, a4, a5, a6, a7, a8, b1, b2, b3, b4, c1, c2, c3, c4, d1, d2, d3, d4, e1,
e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12>>
) do
<<d(a1)::4, d(a2)::4, d(a3)::4, d(a4)::4, d(a5)::4, d(a6)::4, d(a7)::4, d(a8)::4, d(b1)::4,
d(b2)::4, d(b3)::4, d(b4)::4, d(c1)::4, d(c2)::4, d(c3)::4, d(c4)::4, d(d1)::4, d(d2)::4,
d(d3)::4, d(d4)::4, d(e1)::4, d(e2)::4, d(e3)::4, d(e4)::4, d(e5)::4, d(e6)::4, d(e7)::4,
d(e8)::4, d(e9)::4, d(e10)::4, d(e11)::4, d(e12)::4>>
catch
:throw, char ->
raise ArgumentError, message: "#{inspect(<<char::utf8>>)} is not valid hex"
end
@compile {:inline, d: 1}
defp d(?0), do: 0
defp d(?1), do: 1
defp d(?2), do: 2
defp d(?3), do: 3
defp d(?4), do: 4
defp d(?5), do: 5
defp d(?6), do: 6
defp d(?7), do: 7
defp d(?8), do: 8
defp d(?9), do: 9
defp d(?A), do: 10
defp d(?B), do: 11
defp d(?C), do: 12
defp d(?D), do: 13
defp d(?E), do: 14
defp d(?F), do: 15
defp d(?a), do: 10
defp d(?b), do: 11
defp d(?c), do: 12
defp d(?d), do: 13
defp d(?e), do: 14
defp d(?f), do: 15
defp d(char), do: throw(char)
end
@compile {:inline, e: 1}
defp e(0), do: ?0
defp e(1), do: ?1
defp e(2), do: ?2
defp e(3), do: ?3
defp e(4), do: ?4
defp e(5), do: ?5
defp e(6), do: ?6
defp e(7), do: ?7
defp e(8), do: ?8
defp e(9), do: ?9
defp e(10), do: ?a
defp e(11), do: ?b
defp e(12), do: ?c
defp e(13), do: ?d
defp e(14), do: ?e
defp e(15), do: ?f
## Ecto
if Code.ensure_loaded?(Ecto.ParameterizedType) do
use Ecto.ParameterizedType
@doc false
@impl Ecto.ParameterizedType
def init(opts) do
schema = Keyword.fetch!(opts, :schema)
field = Keyword.fetch!(opts, :field)
format = Keyword.get(opts, :format, :default)
dump = Keyword.get(opts, :dump, :raw)
unless format in @formats do
raise ArgumentError,
message:
"invalid :format option, expected one of #{Enum.join(@formats, ",")}; got #{inspect(format)}"
end
unless dump in @formats do
raise ArgumentError,
message:
"invalid :dump option, expected one of #{Enum.join(@formats, ",")}; got #{inspect(format)}"
end
version = Keyword.get(opts, :version, 4)
unless version in [1, 3, 4, 5, 6] do
raise ArgumentError,
message:
"invalid uuid version, expected one of 1, 3, 4, 5, or 6; got #{inspect(version)}"
end
namespace = Keyword.get(opts, :namespace)
case namespace do
nil when version in [3, 5] ->
raise ArgumentError,
message: "you must set :namespace to a valid uuid when :version is 3 or 5"
nil ->
:ok
ns when ns in [:dns, :url, :oid, :x500] ->
raise ArgumentError,
message:
"you must set :namespace to a uuid, the predefined namespaces are not permitted here"
ns when is_binary(ns) ->
:ok
ns ->
raise ArgumentError,
message: "expected :namespace to be a binary, but got #{inspect(ns)}"
end
%{
schema: schema,
field: field,
format: format,
dump: dump,
version: version,
namespace: namespace
}
end
@doc false
@impl Ecto.ParameterizedType
def type(%{dump: :raw}), do: :binary
def type(_), do: :string
# This is provided as a helper for autogenerating version 3 or 5 uuids
@doc false
@impl Ecto.ParameterizedType
def autogenerate(%{format: format, version: version, namespace: namespace}) do
case version do
1 ->
uuid1(format)
4 ->
uuid4(format)
6 ->
uuid6(format)
v when v in [3, 5] ->
# 64 bits of entropy should be more than sufficient, since the total entropy
# of the input here is 192 bits, which we get from the namespace (128 bits) + the name (64 bits).
# That is then represented using only 128 bits (an entire MD5 hash, or 128 of the
# 160 bits of a SHA1 hash). In short, its doubtful that using more than 8 bytes
# of random data is going to have any appreciable benefit on uniqueness. Discounting
# the namespace, the total entropy is only 64 bits, which in practice is constrained
# by the hash itself, which is then further constrained by the fact that 6 bits of the
# UUID are reserved for version and variant information. In short, even though we are
# assuming a namespace that can contain 2^64 unique values, in practice it is less than
# that, though it still leaves room for an astronomical number of unique identifiers.
name = :crypto.strong_rand_bytes(8)
case v do
3 -> uuid3(namespace, name, format)
5 -> uuid5(namespace, name, format)
end
end
end
@doc false
@impl Ecto.ParameterizedType
def cast(data, params)
def cast(uuid, %{format: format}) when is_binary(uuid) do
{:ok, to_string(uuid, format)}
rescue
ArgumentError ->
:error
end
def cast(%__MODULE__{} = uuid, %{format: format}),
do: {:ok, to_string(uuid, format)}
def cast(nil, _params), do: {:ok, nil}
def cast(_, _params), do: :error
@doc false
@impl Ecto.ParameterizedType
def load(value, loader, params)
def load(uuid, _loader, %{format: format}) when is_binary(uuid) do
{:ok, to_string(uuid, format)}
rescue
ArgumentError ->
:error
end
def load(nil, _loader, _params),
do: {:ok, nil}
@doc false
@impl Ecto.ParameterizedType
def dump(value, dumper, params)
def dump(%__MODULE__{} = uuid, _dumper, %{dump: format}),
do: {:ok, to_string(uuid, format)}
def dump(uuid, _dumper, %{dump: format}) when is_binary(uuid) do
{:ok, to_string(uuid, format)}
rescue
ArgumentError ->
:error
end
def dump(nil, _dumper, _params),
do: {:ok, nil}
@doc false
@impl Ecto.ParameterizedType
def embed_as(_format, _params), do: :self
@doc false
@impl Ecto.ParameterizedType
def equal?(a, b, params)
def equal?(nil, nil, _), do: true
def equal?(nil, b, _), do: to_string(b, :raw) == @nil_id
def equal?(a, nil, _), do: to_string(a, :raw) == @nil_id
def equal?(a, b, _), do: compare(to_string(a), to_string(b)) == :eq
end
defimpl String.Chars do
alias Uniq.UUID
def to_string(uuid), do: UUID.to_string(uuid)
end
defimpl Inspect do
import Inspect.Algebra
def inspect(%Uniq.UUID{bytes: bytes, version: version}, opts) do
# Allow overriding the format in which UUIDs are displayed via custom inspect options
format = Keyword.get(opts.custom_options, :format, :default)
uuid = Uniq.UUID.to_string(bytes, format)
concat(["#UUIDv", Kernel.to_string(version), "<", uuid, ">"])
end
end
end
|
lib/uuid.ex
| 0.879419
| 0.608158
|
uuid.ex
|
starcoder
|
defmodule APIacAuthBearer do
@moduledoc """
An `APIac.Authenticator` plug for API authentication using the OAuth2 `Bearer` scheme
The OAuth2 `Bearer` scheme is documented in
[RFC6750 - The OAuth 2.0 Authorization Framework: Bearer Token Usage](https://tools.ietf.org/html/rfc6750)
and consists in sending an OAuth2 access token in the HTTP request. Any party
in possession of that token can use it on the API, hence its name: 'Bearer'.
```http
GET /api/accounts HTTP/1.1
Host: example.com
Authorization: Bearer NLdtYEY8Y4Q09kKBUnsYy9mExGQnBy
Accept: */*
```
That bearer token has been granted beforehand by an OAuth2 authorization server to the
client making the API request (typically through one of the 4
[RFC6749](https://tools.ietf.org/html/rfc6749) flows or one of the 3
[OpenID Connect](https://openid.net/specs/openid-connect-core-1_0.html) flows).
Note that according to the specification, the bearer can be sent:
- in the `Authorization` HTTP header
- in the request body (assuming the request has one)
- as a query parameter
The `bearer_extract_methods` plug option allows to specify where to seek the bearer.
Bearer tokens are usually:
- opaque tokens, to be validated against the OAuth2 authorization server that has released it
- self-contained signed JWT tokens, that can be verified locally by the API
## Validating the access token
This plug provides with 2 bearer verification implementations:
- `APIacAuthBearer.Validator.Introspect` which implements
[RFC7662 - OAuth 2.0 Token Introspection](https://tools.ietf.org/html/rfc7662), and
which consists in requesting validation of the token on the authorization server
that has issued it
- `APIacAuthBearer.Validator.JWT` which implements
[JSON Web Token (JWT) Profile for OAuth 2.0 Access Tokens](https://tools.ietf.org/html/draft-ietf-oauth-access-token-jwt-07)
and which consists in locally verifying signed (and possibly encrypted)
tokens, using the cryptographic keys of the authorization server and of the current
API (using this plug)
A validator must implement the `APIacAuthBearer.Validator` behaviour.
## Caching
A bearer token may be used many times on an API in a short time-frame,
which is why caching is important
when using `APIacAuthBearer.Validator.Introspect` or a similar mechanism as a
back pressure mechanism for the authorization server. This plug comes with 4 caching
implementations:
| Caching implementation | Repository | Use-case |
|--------------------------------|:----------:|-------------------------------------------------------------------------------------------------------------------------------------------------------------|
| APIacAuthBearer.Cache.NoCache | Built-in | No caching, for testing purpose or when using a custom validator that doesn't require caching |
| APIacAuthBearer.Cache.ETSMock | Built-in | Local cache in ETS table, for testing purpose, development environment, etc. Does not have a token expiration clean-up code: the cache will grow endlessly |
| APIacAuthBearerCacheCachex | [github](https://github.com/tanguilp/apiac_auth_bearer_cache_cachex) | Production ready cache, for a single instance or a small cluster of nodes |
| APIacAuthBearerCacheRiak | Work in progress | Production ready cache, for larger clusters of nodes |
A cache implements the `APIacAuthBearer.Cache` behaviour.
## Validation flow sequence diagram

## Plug options
- `realm`: a mandatory `String.t` that conforms to the HTTP quoted-string syntax,
however without
the surrounding quotes (which will be added automatically when needed).
Defaults to `default_realm`
- `bearer_validator`: a `{validator_module, validator_options}` tuple where
`validator_module` is
a module implementing the `APIacAuthBearer.Validator` behaviour and `validator_options`
module-specific options that will be passed to the validator when called. No default
value, mandatory parameter
- `bearer_extract_methods`: a list of methods that will be tried to extract the bearer
token, among `:header`, `:body` and `:query`. Methods will be tried in the list order.
Defaults to `[:header]`
- `set_error_response`: function called when authentication failed. Defaults to
`APIacAuthBearer.send_error_response/3`
- `error_response_verbosity`: one of `:debug`, `:normal` or `:minimal`.
Defaults to `:normal`
- `required_scopes`: a list of scopes required to access this API. Defaults to `[]`.
When the bearer's granted scope are
not sufficient, an HTTP 403 response is sent with the `insufficient_scope` RFC6750 error
- `forward_bearer`: if set to `true`, the bearer is saved in the `Plug.Conn` APIac
metadata (under the "bearer" key) and can be later be retrieved using `APIac.metadata/1`.
Defaults to `false`
- `forward_metadata`: in addition to the bearer's `client` and `subject`, list of the
validator's response to set in the APIac metadata, or the `:all` atom to forward all
of the response's data.
For example: `["username", "aud"]`. Defaults to `[]`
- `resource_indicator`: the name of the resource server as a String, to be
checked against the `aud` attribute returned by the validator. This is an optional
security mecanism for RFC7662 and mandatory for JWT access tokens. See the security
consideration sections. Defaults to `nil`, i.e. no check of this parameter
- `cache`: a `{cache_module, cache_options}` tuple where `cache_module` is
a module implementing the `APIacAuthBearer.Cache` behaviour and `cache_options`
module-specific options that will be passed to the cache when called.
The cached entry expiration ttl can be set thanks to the `:ttl` option. It is set to
200 seconds by default, but is shortened when the bearer's lifetime is less than 200
seconds (as indicated by its expiration timestamp of the `"exp"` member of bearer
metadata returned by the validator)
Defaults to `{APIacAuthBearer.Cache.NoCache, [ttl: 200]}`
## Error responses
This plug, conforming to RFC6750, responds with the following status and parameters
in case of authentication failure when `:error_response_verbosity` is set to `:normal`:
| Error | HTTP status | Included WWW-Authenticate parameters |
|-----------------------------------------|:-----------:|--------------------------------------|
| No bearer token found | 401 | - realm |
| Invalid bearer | 401 | - realm<br>- error |
| Bearer doesn't have the required scopes | 403 | - realm<br>- error<br>- scope |
For other `:error_response_verbosity` values, see the documentation of the
`send_error_response/3` function.
## Example
```elixir
plug APIacAuthBearer, bearer_validator: {
APIacAuthBearer.Validator.Introspect,
[
issuer: "https://example.com/auth"
tesla_middleware:[
{Tesla.Middleware.BasicAuth, [username: "client_id_123", password: "<PASSWORD>"]}
]
]},
bearer_extract_methods: [:header, :body],
required_scopes: ["article:write", "comments:moderate"],
forward_bearer: true,
resource_indicator: "https://example.com/api/data"
cache: {APIacAuthBearerCacheCachex, []}
```
## Security considerations
### HTTPS
As the bearer token is sent in an HTTP header, use of HTTPS is **mandatory**
(but however not verfified by this Plug).
### Bearer methods
As stated by RFC6750, section 2:
> This section defines three methods of sending bearer access tokens in
> resource requests to resource servers. Clients **MUST NOT** use more
> than one method to transmit the token in each request.
This plug does not check whether several methods are used or not. It will
only deal with the first bearer (valid or not) found following the order
of the `bearer_extract_methods`.
### Form-Encoded Body Parameter
RFC6750, section 2.2, demands that the following conditions are met for
form-encoded body bearer access token:
> o The HTTP request entity-header includes the "Content-Type" header
> field set to "application/x-www-form-urlencoded".
>
> o The entity-body follows the encoding requirements of the
> "application/x-www-form-urlencoded" content-type as defined by
> HTML 4.01 [W3C.REC-html401-19991224].
>
> o The HTTP request entity-body is single-part.
>
> o The content to be encoded in the entity-body MUST consist entirely
> of ASCII [USASCII] characters.
>
> o The HTTP request method is one for which the request-body has
> defined semantics. In particular, this means that the "GET"
> method MUST NOT be used.
This plug, however:
- doesn't verify that the HTTP request entity-body is single-part
- the content is entirely US-ASCCI (the plug parser checks that it
is [utf8](https://github.com/elixir-plug/plug/blob/master/lib/plug/parsers/urlencoded.ex#L31)
### Audience
RFC6750, section 5.2, states that:
> To deal with token redirect, it is important for the authorization
> server to include the identity of the intended recipients (the
> audience), typically a single resource server (or a list of resource
> servers), in the token. Restricting the use of the token to a
> specific scope is also RECOMMENDED.
Consider implementing it using the `resource_indicator` parameter when using the
RFC7662 introspection validator.
### URI Query Parameter
According to RFC6750, section 2.3,:
> Clients using the URI Query Parameter method SHOULD also send a
> Cache-Control header containing the "no-store" option. Server
> success (2XX status) responses to these requests SHOULD contain a
> Cache-Control header with the "private" option.
This plug does set the `cache-control` to `private` when such a method
is used. Beware, however, of not overwriting it later unless you
know what you're doing.
"""
@behaviour Plug
@behaviour APIac.Authenticator
alias OAuth2Utils.Scope, as: Scope
alias OAuth2Utils.Scope.Set, as: ScopeSet
@default_realm_name "default_realm"
@type bearer :: String.t()
@impl Plug
def init(opts) do
realm = Keyword.get(opts, :realm, @default_realm_name)
if not is_binary(realm), do: raise("Invalid realm, must be a string")
if not APIac.rfc7230_quotedstring?("\"#{realm}\""),
do: raise("Invalid realm string (do not conform with RFC7230 quoted string)")
required_scopes = ScopeSet.new(Keyword.get(opts, :required_scopes, []))
Enum.each(
required_scopes,
fn scope ->
if not Scope.oauth2_scope?(scope) do
raise "Invalid scope in list required scopes"
end
end
)
if opts[:bearer_validator] == nil,
do: raise("Missing mandatory option `bearer_validator`")
{validator_module, validator_opts} = opts[:bearer_validator]
validator_opts =
Keyword.take(opts, [:resource_indicator])
|> Keyword.merge(validator_opts)
case validator_module.validate_opts(validator_opts) do
:ok ->
:ok
{:error, reason} ->
raise reason
end
{cache_module, cache_opts} =
Keyword.get(opts, :cache, {APIacAuthBearer.Cache.NoCache, []})
cache_opts = Keyword.put_new(cache_opts, :ttl, 200)
opts
|> Enum.into(%{})
|> Map.put(:realm, realm)
|> Map.put_new(:bearer_extract_methods, [:header])
|> Map.put_new(:set_error_response, &APIacAuthBearer.send_error_response/3)
|> Map.put_new(:error_response_verbosity, :normal)
|> Map.put(:required_scopes, required_scopes)
|> Map.put_new(:forward_bearer, false)
|> Map.put_new(:forward_metadata, [])
|> Map.put(:cache, {cache_module, cache_module.init_opts(cache_opts)})
|> Map.put(:validator_module, validator_module)
|> Map.put(:validator_opts, validator_opts)
end
@impl Plug
@spec call(Plug.Conn.t(), Plug.opts()) :: Plug.Conn.t()
def call(conn, %{} = opts) do
if APIac.authenticated?(conn) do
conn
else
do_call(conn, opts)
end
end
def do_call(conn, opts) do
with {:ok, conn, credentials} <- extract_credentials(conn, opts),
{:ok, conn} <- validate_credentials(conn, credentials, opts) do
conn
else
{:error, conn, %APIac.Authenticator.Unauthorized{} = error} ->
opts[:set_error_response].(conn, error, opts)
end
end
@doc """
`APIac.Authenticator` credential extractor callback
Returns the credentials under the form `String.t()` which
is the bearer token
"""
@impl APIac.Authenticator
def extract_credentials(conn, opts) do
case Enum.reduce_while(
opts[:bearer_extract_methods],
conn,
fn method, conn ->
case extract_bearer(conn, method) do
{:ok, _conn, _bearer} = ret ->
{:halt, ret}
{:error, conn, :credentials_not_found} ->
{:cont, conn}
{:error, _conn, _reason} = ret ->
{:halt, ret}
end
end
) do
%Plug.Conn{} = conn ->
{:error, conn,
%APIac.Authenticator.Unauthorized{
authenticator: __MODULE__,
reason: :credentials_not_found
}}
{:ok, conn, bearer} ->
{:ok, conn, bearer}
{:error, conn, reason} ->
{:error, conn,
%APIac.Authenticator.Unauthorized{
authenticator: __MODULE__,
reason: reason
}}
end
end
defp extract_bearer(conn, :header) do
case Plug.Conn.get_req_header(conn, "authorization") do
# Only one header value should be returned
# (https://stackoverflow.com/questions/29282578/multiple-http-authorization-headers)
["Bearer " <> untrimmed_bearer] ->
# rfc7235 syntax allows multiple spaces before the base64 token
bearer = String.trim_leading(untrimmed_bearer, " ")
if APIac.rfc7235_token68?(bearer) do
{:ok, conn, bearer}
else
{:error, conn, :invalid_bearer_format}
end
_ ->
{:error, conn, :credentials_not_found}
end
end
defp extract_bearer(conn, :body) do
try do
plug_parser_opts =
Plug.Parsers.init(
parsers: [:urlencoded],
pass: ["application/x-www-form-urlencoded"]
)
conn = Plug.Parsers.call(conn, plug_parser_opts)
case conn.body_params["access_token"] do
nil ->
{:error, conn, :credentials_not_found}
bearer ->
if APIac.rfc7235_token68?(bearer) do
{:ok, conn, bearer}
else
{:error, conn, :invalid_bearer_format}
end
{:ok, conn, bearer}
end
rescue
UnsupportedMediaTypeError ->
{:error, conn, :unsupported_media_type}
end
end
defp extract_bearer(conn, :query) do
conn = Plug.Conn.fetch_query_params(conn)
case conn.query_params["access_token"] do
nil ->
{:error, conn, :credentials_not_found}
bearer ->
if APIac.rfc7235_token68?(bearer) do
# RFC6750 - section 2.3:
# Clients using the URI Query Parameter method SHOULD also send a
# Cache-Control header containing the "no-store" option. Server
# success (2XX status) responses to these requests SHOULD contain a
# Cache-Control header with the "private" option.
conn = Plug.Conn.put_resp_header(conn, "cache-control", "private")
{:ok, conn, bearer}
else
{:error, conn, :invalid_bearer_format}
end
end
end
@impl APIac.Authenticator
def validate_credentials(conn, bearer, opts) do
{cache, cache_opts} = opts[:cache]
case cache.get(bearer, cache_opts) do
%{} = bearer_data ->
validate_bearer_data(conn, bearer, bearer_data, opts)
# bearer is not in cache
nil ->
case opts[:validator_module].validate_bearer(bearer, opts[:validator_opts]) do
{:ok, bearer_data} ->
try do
# let's lower the ttl when the "exp" member of the bearer's data
# says the bearer expires before the current cache ttl
exp = String.to_integer(bearer_data["exp"])
ttl = min(cache_opts[:ttl], exp - :os.system_time(:second))
cache.put(bearer, bearer_data, Map.put(cache_opts, :ttl, ttl))
rescue
_ ->
cache.put(bearer, bearer_data, cache_opts)
end
validate_bearer_data(conn, bearer, bearer_data, opts)
{:error, error} ->
{:error, conn,
%APIac.Authenticator.Unauthorized{authenticator: __MODULE__, reason: error}}
end
end
end
defp validate_bearer_data(conn, bearer, bearer_data, opts) do
metadata = if opts[:forward_bearer], do: %{"bearer" => bearer}, else: %{}
with :ok <- verify_scopes(conn, bearer_data, opts),
:ok <- verify_audience(conn, bearer_data, opts) do
metadata =
case opts[:forward_metadata] do
:all ->
Map.merge(bearer_data, metadata)
attrs when is_list(attrs) ->
Enum.reduce(
attrs,
metadata,
fn attr, metadata ->
case bearer_data[attr] do
nil ->
metadata
val ->
# put_new/3 prevents from overwriting "bearer"
Map.put_new(metadata, attr, val)
end
end
)
end
conn =
conn
|> Plug.Conn.put_private(:apiac_authenticator, __MODULE__)
|> Plug.Conn.put_private(:apiac_client, bearer_data["client_id"])
|> Plug.Conn.put_private(:apiac_subject, bearer_data["sub"])
|> Plug.Conn.put_private(:apiac_metadata, metadata)
|> Plug.Conn.put_private(:apiac_realm, opts[:realm])
{:ok, conn}
end
end
defp verify_scopes(conn, bearer_data, opts) do
if ScopeSet.subset?(opts[:required_scopes], ScopeSet.new(bearer_data["scope"])) do
:ok
else
{:error, conn,
%APIac.Authenticator.Unauthorized{authenticator: __MODULE__, reason: :insufficient_scope}}
end
end
defp verify_audience(conn, bearer_data, opts) do
if opts[:resource_indicator] != nil do
case bearer_data["aud"] do
aud when is_binary(aud) ->
if opts[:resource_indicator] == aud do
:ok
else
{:error, conn,
%APIac.Authenticator.Unauthorized{
authenticator: __MODULE__,
reason: :invalid_audience
}}
end
aud_list when is_list(aud_list) ->
if opts[:resource_indicator] in aud_list do
:ok
else
{:error, conn,
%APIac.Authenticator.Unauthorized{
authenticator: __MODULE__,
reason: :invalid_audience
}}
end
_ ->
{:error, conn,
%APIac.Authenticator.Unauthorized{
authenticator: __MODULE__,
reason: :invalid_audience
}}
end
else
:ok
end
end
@doc """
Implementation of the `APIac.Authenticator` callback
## Verbosity
The following elements in the HTTP response are set depending on the value
of the `:error_response_verbosity` option:
### `:error_response_verbosity` set to `:debug`:
| Error | HTTP status | Included WWW-Authenticate parameters |
|-----------------------------------------|:-----------:|-------------------------------------------------------------|
| No bearer token found | 401 | - realm |
| Invalid bearer | 401 | - realm<br>- error<br>- error_description |
| Bearer doesn't have the required scopes | 403 | - realm<br>- error<br>- scope<br>- error_description |
### `:error_response_verbosity` set to `:normal`:
| Error | HTTP status | Included WWW-Authenticate parameters |
|-----------------------------------------|:-----------:|--------------------------------------|
| No bearer token found | 401 | - realm |
| Invalid bearer | 401 | - realm<br>- error |
| Bearer doesn't have the required scopes | 403 | - realm<br>- error<br>- scope |
### `:error_response_verbosity` set to `:minimal`:
| Error | HTTP status | Included WWW-Authenticate parameters |
|-----------------------------------------|:-----------:|--------------------------------------|
| No bearer token found | 401 | |
| Invalid bearer | 401 | |
| Bearer doesn't have the required scopes | 401 | |
Note: does not conform to the specification
"""
@impl APIac.Authenticator
def send_error_response(conn, error, %{:error_response_verbosity => :debug} = opts) do
{resp_status, error_map} =
case error do
%APIac.Authenticator.Unauthorized{reason: :credentials_not_found} ->
{:unauthorized, %{"realm" => opts[:realm]}}
%APIac.Authenticator.Unauthorized{reason: :insufficient_scope} ->
{:forbidden,
%{
"error" => "insufficient_scope",
"scope" => ScopeSet.to_scope_param(opts[:required_scopes]),
"realm" => opts[:realm],
"error_description" => "Insufficient scope"
}}
%APIac.Authenticator.Unauthorized{reason: reason} ->
{:unauthorized,
%{
"error" => "invalid_token",
"realm" => opts[:realm],
"error_description" => Atom.to_string(reason)
}}
end
conn
|> APIac.set_WWWauthenticate_challenge("Bearer", error_map)
|> Plug.Conn.send_resp(resp_status, "")
|> Plug.Conn.halt()
end
@impl APIac.Authenticator
def send_error_response(conn, error, %{:error_response_verbosity => :normal} = opts) do
{resp_status, error_map} =
case error do
%APIac.Authenticator.Unauthorized{reason: :credentials_not_found} ->
{:unauthorized, %{"realm" => opts[:realm]}}
%APIac.Authenticator.Unauthorized{reason: :insufficient_scope} ->
{:forbidden,
%{
"error" => "insufficient_scope",
"scope" => ScopeSet.to_scope_param(opts[:required_scopes]),
"realm" => opts[:realm]
}}
%APIac.Authenticator.Unauthorized{} ->
{:unauthorized, %{"error" => "invalid_token", "realm" => opts[:realm]}}
end
conn
|> APIac.set_WWWauthenticate_challenge("Bearer", error_map)
|> Plug.Conn.send_resp(resp_status, "")
|> Plug.Conn.halt()
end
@impl APIac.Authenticator
def send_error_response(conn, _error, %{:error_response_verbosity => :minimal}) do
conn
|> Plug.Conn.send_resp(:unauthorized, "")
|> Plug.Conn.halt()
end
@doc """
Sets the HTTP `WWW-authenticate` header when no such a scheme is used for
authentication.
Sets the HTTP `WWW-Authenticate` header with the `Bearer` scheme and the realm
name, when the `Bearer` scheme was not used in the request. When this scheme is
used in the request, response will be sent by `#{__MODULE__}.send_error_response/3`.
This allows advertising that the `Bearer` scheme is available, without stopping
the plug pipeline.
Raises an exception when the error response verbosity is set to `:minimal` since
it does not set the `WWW-Authenticate` header.
"""
@spec set_WWWauthenticate_header(
Plug.Conn.t(),
%APIac.Authenticator.Unauthorized{},
any()
) :: Plug.Conn.t()
def set_WWWauthenticate_header(_conn, _err, %{:error_response_verbosity => :minimal}) do
raise "#{__ENV__.function} not accepted when :error_response_verbosity is set to :minimal"
end
def set_WWWauthenticate_header(
conn,
%APIac.Authenticator.Unauthorized{reason: :credentials_not_found},
opts
) do
conn
|> APIac.set_WWWauthenticate_challenge("Bearer", %{"realm" => "#{opts[:realm]}"})
end
def set_WWWauthenticate_header(conn, error, opts) do
send_error_response(conn, error, opts)
end
@doc """
Saves failure in a `Plug.Conn.t()`'s private field and returns the `conn`
See the `APIac.AuthFailureResponseData` module for more information.
"""
@spec save_authentication_failure_response(
Plug.Conn.t(),
%APIac.Authenticator.Unauthorized{},
any()
) :: Plug.Conn.t()
def save_authentication_failure_response(conn, error, opts) do
{resp_status, error_map} =
case error do
%APIac.Authenticator.Unauthorized{reason: :credentials_not_found} ->
{:unauthorized, %{"realm" => opts[:realm]}}
%APIac.Authenticator.Unauthorized{reason: :insufficient_scope} ->
{:forbidden,
%{
"error" => "insufficient_scope",
"scope" => ScopeSet.to_scope_param(opts[:required_scopes]),
"realm" => opts[:realm]
}}
%APIac.Authenticator.Unauthorized{} ->
{:unauthorized, %{"error" => "invalid_token", "realm" => opts[:realm]}}
end
failure_response_data = %APIac.AuthFailureResponseData{
module: __MODULE__,
reason: error.reason,
www_authenticate_header: {"Bearer", error_map},
status_code: resp_status,
body: nil
}
APIac.AuthFailureResponseData.put(conn, failure_response_data)
end
end
|
lib/apiac_auth_bearer.ex
| 0.940626
| 0.835819
|
apiac_auth_bearer.ex
|
starcoder
|
defmodule Adventofcode.Day11SeatingSystem do
use Adventofcode
alias __MODULE__.{Printer, State}
def part_1(input) do
input
|> parse()
|> State.new(tolerant: false)
|> step_until_stable
|> State.count_occupied()
end
def part_2(input) do
input
|> parse()
|> State.new(tolerant: true)
|> step_until_stable
|> State.count_occupied()
end
defmodule State do
@enforce_keys [:grid, :x_range, :y_range, :tolerant]
defstruct grid: %{}, x_range: 0..0, y_range: 0..0, step: 0, tolerant: false
def new(grid, options) do
width = grid |> hd |> Enum.count()
height = grid |> Enum.count()
tolerant = Keyword.get(options, :tolerant, false)
%__MODULE__{
grid: grid |> List.flatten() |> Enum.into(%{}),
x_range: 0..(width - 1),
y_range: 0..(height - 1),
tolerant: tolerant
}
end
def get(%State{grid: grid}, {x, y}), do: Map.get(grid, {x, y})
def put(%State{grid: grid} = state, {x, y}, val) when val in '.L#' do
%{state | grid: Map.put(grid, {x, y}, val)}
end
def count_occupied(%State{grid: grid}) do
Enum.count(grid, &(elem(&1, 1) == ?#))
end
end
def step_until_stable(%State{} = state) do
next = state |> step
if next.grid == state.grid, do: state, else: step_until_stable(next)
end
def step(%State{step: step} = state) do
state.grid
|> Enum.map(&find_neighbours(state, &1))
|> Enum.reduce(%{state | step: step + 1}, &check/2)
end
defp find_neighbours(%State{tolerant: true} = state, {{x, y}, char}),
do: {{x, y}, char, get_occupied_visible({x, y}, state)}
defp find_neighbours(%State{} = state, {{x, y}, char}),
do: {{x, y}, char, get_neighbours({x, y}, state)}
defp check({_pos, ?., _}, acc), do: acc
defp check({pos, ?L, 0}, acc), do: State.put(acc, pos, ?#)
defp check({pos, ?#, n}, %{tolerant: false} = acc) when n >= 4, do: State.put(acc, pos, ?L)
defp check({pos, ?#, n}, acc) when n >= 5, do: State.put(acc, pos, ?L)
defp check({_pos, _, _}, acc), do: acc
@neighbours [
{-1, -1},
{0, -1},
{1, -1},
{-1, 0},
{1, 0},
{-1, 1},
{0, 1},
{1, 1}
]
def get_neighbours({x, y}, state) do
@neighbours
|> Enum.map(fn {dx, dy} -> State.get(state, {x + dx, y + dy}) end)
|> Enum.count(&(&1 == ?#))
end
def get_occupied_visible({x, y}, state) do
@neighbours
|> Enum.count(&find_occupied?({x, y}, &1, state))
end
defp find_occupied?({x, y}, {dx, dy}, state) do
pos = {x + dx, y + dy}
case State.get(state, pos) do
nil -> false
?# -> true
?L -> false
?. -> find_occupied?(pos, {dx, dy}, state)
end
end
def parse(input) do
input
|> String.trim()
|> String.split("\n")
|> Enum.map(&String.to_charlist/1)
|> Enum.with_index()
|> Enum.map(&do_parse/1)
end
defp do_parse({chars, y}) do
chars
|> Enum.with_index()
|> Enum.map(fn {char, x} -> {{x, y}, char} end)
end
defmodule Printer do
def print(%State{} = state) do
Enum.map_join(state.y_range, "\n", fn y ->
Enum.map_join(state.x_range, "", fn x ->
[state.grid[{x, y}]]
end)
end)
end
end
end
defimpl Inspect, for: Adventofcode.Day11SeatingSystem.State do
import Inspect.Algebra
alias Adventofcode.Day11SeatingSystem.{Printer, State}
def inspect(%State{} = state, _opts) do
concat([
"#State{#{state.step}",
break("\n"),
to_string(Printer.print(state)),
break("\n"),
"}"
])
end
end
|
lib/day_11_seating_system.ex
| 0.682362
| 0.623635
|
day_11_seating_system.ex
|
starcoder
|
defmodule Csp.Backtracking do
@moduledoc """
Backtracking algorithm implementation.
"""
alias Csp
alias Csp.AC3
@type variable_selector :: :take_head | ([Csp.variable()] -> {Csp.variable(), [Csp.variable()]})
@doc """
Backtracking implementation for solving CSPs.
## Options
The following `opts` are supported:
- `all`, boolean, `false` by default: if only first, or all variables should be returned.
- `ac3`, boolean, `false` by default: if AC3 runs should be performed during each backtracking step.
- `variable_selector`, either `:take_head` (default), `:minimum_remaining_values`
(will select the variable with the least values remaining in the domain as the next candidate to consider),
or a function accepting a list of unassigned variables, and returning a tuple
of a variable we should consider next and a rest of the unassigned variables list.
"""
@spec solve(Csp.t(), Keyword.t()) :: Csp.solve_result()
def solve(%Csp{} = csp, opts \\ []) do
all = Keyword.get(opts, :all, false)
ac3 = Keyword.get(opts, :ac3, false)
variable_selector = Keyword.get(opts, :variable_selector, :take_head)
case backtrack(%{}, csp.variables, csp, variable_selector, ac3, all) do
[] -> :no_solution
[solution] -> {:solved, solution}
solutions when is_list(solutions) -> {:solved, solutions}
end
end
## Helpers
@spec backtrack(Csp.assignment(), [Csp.variable()], Csp.t(), variable_selector(), boolean(), boolean()) ::
[Csp.assignment()]
defp backtrack(assignment, unassigned_variables, csp, variable_selector, ac3, all)
defp backtrack(assignment, [] = _unassigned, _, _, _, _), do: [assignment]
defp backtrack(assignment, [unassigned_variable | rest], csp, :take_head, ac3, all) do
backtrack_variable_selected(assignment, {unassigned_variable, rest}, csp, :take_head, ac3, all)
end
defp backtrack(assignment, unassigned_variables, csp, :minimum_remaining_values, ac3, all) do
{min_domain_values_variable, _domain} =
Map.take(csp.domains, unassigned_variables)
|> Enum.map(fn {variable, domain} ->
{variable, length(domain)}
end)
|> Enum.min(fn {_, domain_length}, {_, domain_length2} ->
domain_length <= domain_length2
end)
{unassigned_variable, rest} =
{min_domain_values_variable, List.delete(unassigned_variables, min_domain_values_variable)}
backtrack_variable_selected(assignment, {unassigned_variable, rest}, csp, :minimum_remaining_values, ac3, all)
end
defp backtrack(assignment, unassigned_variables, csp, variable_selector, run_ac3, all) do
{unassigned_variable, rest} = variable_selector.(unassigned_variables)
backtrack_variable_selected(assignment, {unassigned_variable, rest}, csp, variable_selector, run_ac3, all)
end
defp backtrack_variable_selected(assignment, {variable, unassigned}, csp, variable_selector, ac3, all) do
domain = Map.fetch!(csp.domains, variable)
Enum.reduce_while(domain, [], fn value, acc ->
assignment = Map.put(assignment, variable, value)
if Csp.consistent?(csp, assignment) do
{inconsistent, csp, assignment, unassigned} =
if ac3 do
case AC3.reduce(csp, assignment, unassigned) do
{:ok, csp, assignment, unassigned} -> {false, csp, assignment, unassigned}
:no_solution -> {true, csp, assignment, unassigned}
end
else
{false, csp, assignment, unassigned}
end
if inconsistent do
{:cont, acc}
else
future_result = backtrack(assignment, unassigned, csp, variable_selector, ac3, all)
case future_result do
[] ->
{:cont, acc}
solutions when is_list(solutions) ->
if all, do: {:cont, acc ++ solutions}, else: {:halt, solutions}
end
end
else
{:cont, acc}
end
end)
end
end
|
lib/csp/backtracking.ex
| 0.82425
| 0.639032
|
backtracking.ex
|
starcoder
|
defmodule Journey do
defstruct data: nil, steps: [], state: :new, result: nil
alias Journey.Step
def new() do
%__MODULE__{}
end
def set_data(%__MODULE__{} = journey, data) do
%__MODULE__{journey | data: data}
end
def run(%__MODULE__{} = journey, spec, args \\ []) do
add_step(journey, {spec, args}, :sync)
end
def run_async(%__MODULE__{} = journey, spec, args \\ [], timeout \\ 5000) do
add_step(journey, {spec, args, timeout}, :async)
end
def await(%__MODULE__{steps: steps} = journey) do
journey
|> update_steps(Enum.map(steps, &await(&1)))
|> check_results()
end
def await(%Step{spec: {_, _, timeout}, transaction: {func, %Task{} = task}} = step) do
result =
case Task.yield(task, timeout) do
{:ok, result} -> result
{:exit, error} -> {:error, error}
nil -> {:error, {:timeout, step}}
end
%Step{step | transaction: {func, result}}
end
def await(step), do: step
def finally(%__MODULE__{} = journey, func) do
func.(await(journey))
end
def finally(%__MODULE__{} = journey) do
finally(journey, fn %{result: result} -> result end)
end
defp add_step(%__MODULE__{} = journey, spec, :sync = type) do
journey
|> await()
|> mk_step(spec, type)
|> await()
end
defp add_step(%__MODULE__{} = journey, spec, type) do
mk_step(journey, spec, type)
end
defp mk_step(%__MODULE__{state: :failed} = journey, _, _), do: journey
defp mk_step(%__MODULE__{} = journey, spec, type) do
{transaction, compensation} = get_funcs(spec)
update_steps(
journey,
journey.steps ++
[
%Step{
spec: spec,
compensation: {compensation, nil, :not_called},
transaction: {transaction, call(transaction, journey, type)}
}
]
)
end
defguardp is_valid_function(func) when is_function(func, 0) or is_function(func, 1)
defguardp is_ok(result) when result == :ok or elem(result, 0) == :ok
defp call(transaction, journey, :async) do
Task.async(fn -> call(transaction, journey, :sync) end)
end
defp call(func, journey, type) when is_function(func, 0) do
call(fn _ -> func.() end, journey, type)
end
defp call(func, %__MODULE__{} = journey, _) when is_function(func, 1) do
try do
func.(journey)
rescue
error -> {:error, error}
end
end
defp get_funcs({spec, args}), do: get_funcs({spec, args, 0})
defp get_funcs({func, _, _}) when is_function(func) do
extract_funcs(func.())
end
defp get_funcs({{module, function_name}, args, _}) do
extract_funcs(apply(module, function_name, args))
end
defp extract_funcs({transaction, compensation} = funcs)
when is_valid_function(transaction) and is_valid_function(compensation),
do: funcs
defp extract_funcs(transaction) when is_valid_function(transaction), do: {transaction, nil}
defp check_results(%__MODULE__{steps: steps} = journey) do
with true <-
Enum.any?(steps, fn
%Step{transaction: {_, result}} when is_ok(result) -> false
_ -> true
end) do
rollback(journey)
else
_ -> journey
end
end
defp rollback(%__MODULE__{steps: steps} = journey) do
journey = %__MODULE__{journey | state: :failed}
steps =
steps
|> Enum.reverse()
|> Enum.map(&call_compensation(&1, journey))
|> Enum.reverse()
%__MODULE__{journey | steps: steps}
end
defp call_compensation(
%Step{compensation: {func, _, :not_called}, transaction: {_, result}} = step,
journey
)
when is_function(func) and is_ok(result) do
%Step{step | compensation: {func, call(func, journey, :sync), :called}}
end
defp call_compensation(step, _journey), do: step
defp update_steps(%__MODULE__{} = journey, []), do: journey
defp update_steps(%__MODULE__{} = journey, steps) do
with true <-
Enum.any?(steps, fn
%Step{transaction: {_, %Task{}}} -> true
_ -> false
end) do
%__MODULE__{journey | steps: steps, state: :running, result: nil}
else
_ ->
%Step{transaction: {_, result}} = List.last(steps)
%__MODULE__{journey | steps: steps, state: :done, result: result}
end
end
end
|
lib/journey.ex
| 0.566139
| 0.717556
|
journey.ex
|
starcoder
|
defmodule Mapail do
@moduledoc ~S"""
Helper library to convert a map into a struct or a struct to a struct.
Convert string-keyed maps to structs by calling the
`map_to_struct/3` function.
Convert atom-keyed and atom/string mixed key maps to
structs by piping the `stringify_map/1` into the `map_to_struct/3` function.
Convert structs to structs by calling the `struct_to_struct/3` function.
## Note
- The [Maptu](https://github.com/lexhide/maptu) library already provides many of the
functions necessary for converting "encoded" maps to Elixir structs. Maptu may be
all you need - see [Maptu](https://github.com/lexhide/maptu). Mapail builds on top
of `Maptu` and incorporates it as a dependency.
- `Mapail` offers a few additional more lenient approaches to the conversion process
to a struct as explained in use cases. Maptu may be all you need though.
## Features
- String keyed maps: Convert maps with string keys to a corresponding struct.
- Transformations: Optionally, string manipulations can be applied to the key of the map so as to attempt to
force the key to match the key of the struct. Currently, the only transformation option is conversion to snake_case.
- Residual maps: Optionally, the part of the map leftover after the struct has been built can be retrieved
or merged back into the returned struct.
- Helper function for converting atom-keyed maps or string/atom mixed keyed maps to string-keyed only maps.
- Helper function for converting a struct to another struct.
## Limitations
- Currently, only converts one level deep, that is, it does not convert nested structs.
This is a potential TODO task.
## Use Cases
- Scenario 1:
Map and Struct has a perfect match on the keys.
map_to_struct(map, MODULE)` returns `{:ok, %MODULE{} = new_struct}
- Scenario 2:
Map and Struct has an imperfect match on the keys
map_to_struct(map, MODULE, rest: :true)` returns `{:ok, %MODULE{} = new_struct, rest}
- Scenario 3:
Map and Struct has an imperfect match on the keys and a struct with and additional
field named `:mapail` is returned. The value for the `:mapail` fields is a
nested map with all non-matching key-pairs.
map_to_struct(map, MODULE, rest: :merge)` returns `{:ok, %MODULE{} = new_struct}
where `new_struct.mapail` contains the non-mathing `key-value` pairs.
- Scenario 4:
Map and Struct has an imperfect match on the keys. After an initial attempt to
match the map keys to those of the struct keys, any non-matching keys are piped
through transformation function(s) which modify the key of the map in an attempt
to make a new match with the modified key. For now, the only transformations supported
are `[:snake_case]`. `:snake_case` converts the non-matching keys to snake_case.
***NOTE***: This approach is lenient and will make matches that
otherwise would not have matched. It might prove useful where a `json` encoded map
returned from a server uses camelcasing and matches are otherwise missed. ***Only
use this approach when it is explicitly desired behaviour***
map_to_struct(map, MODULE, transformations: [:snake_case], rest: :true)
returns `{:ok, new_struct, rest}`
- Scenario 5:
Map and Struct has a perfect match but the keys in the map are mixed case. Mapail
provides a utility function which can help in this situation.
stringify_map(map) |> map_to_struct(map, MODULE, rest: :false)
returns {:ok, %MODULE{} = new_struct}
- Scenario 6:
Struct and Struct has a perfect match but the __struct__ fields are non-matching.
struct_to_struct(%Notifications.Email{}, User.Email)` returns `{:ok, %User.Email{} = new_struct}
## Example - exact key matching (no transformations)
defmodule User do
defstruct [:first_name, :username, :password]
end
user = %{
"FirstName" => "John",
"Username" => "john",
"password" => "<PASSWORD>",
"age" => 30
}
Mapail.map_to_struct(user, User)
{:ok, %User{
first_name: :nil,
username: :nil,
password: "<PASSWORD>"
}
}
## Example - key matching with `transformations: [:snake_case]`
defmodule User do
defstruct [:first_name, :username, :password]
end
user = %{
"FirstName" => "John",
"Username" => "john",
"password" => "<PASSWORD>",
"age" => 30
}
Mapail.map_to_struct(user, User, transformations: [:snake_case])
{:ok, %User{
first_name: "John",
username: "john",
password: "<PASSWORD>"
}
}
## Example - getting unmatched elements in a separate map
defmodule User do
defstruct [:first_name, :username, :password]
end
user = %{
"FirstName" => "John",
"Username" => "john",
"password" => "<PASSWORD>",
"age" => 30
}
{:ok, user_struct, leftover} = Mapail.map_to_struct(user, User, rest: :true)
{:ok, %User{
first_name: :nil,
username: "pass",
password: :<PASSWORD>
},
%{
"FirstName" => "John",
"Username" => "john",
"age" => 30
}
}
## Example - getting unmatched elements in a merged nested map
defmodule User do
defstruct [:first_name, :username, :password]
end
user = %{
"FirstName" => "John",
"Username" => "john",
"password" => "<PASSWORD>",
"age" => 30
}
Mapail.map_to_struct(user, User, rest: :merge)
{:ok, %User{
first_name: :nil,
username: "pass",
password: :<PASSWORD>,
mapail: %{
"FirstName" => "John",
"Username" => "john",
"age" => 30
}
}
## Dependencies
This library has a dependency on the following library:
- [Maptu](https://hex.pm/packages/maptu) v1.0.0 library. For converting a matching map to a struct.
MIT © 2016 <NAME>, <NAME>. [Licence](https://github.com/lexhide/maptu/blob/master/LICENSE.txt)
"""
require Maptu.Extension
@transformations [:snake_case]
@doc """
Convert a map with atom only or atom/string mixed keys
to a map with string keys only.
"""
@spec stringify_map(map) :: {:ok, map} | {:error, String.t}
def stringify_map(map) do
Enum.reduce(map, %{}, fn({k,v}, acc) ->
try do
Map.put(acc, Atom.to_string(k), v)
rescue
_e in ArgumentError ->
is_binary(k) && Map.put(acc, k, v) || {:error, "the key is not an atom nor a binary"}
end
end)
end
@doc """
Convert one form of struct into another struct.
## opts
`[]` - same as `[rest: :false]`, `{:ok, struct}` is returned and any non-matching pairs
will be discarded.
`[rest: :true]`, `{:ok, struct, map}` is returned where map are the non-matching
key-value pairs.
`[rest: :false]`, `{:ok, struct}` is returned and any non-matching pairs
will be discarded.
"""
@spec struct_to_struct(map, atom, list) :: {:ok, struct} | {:ok, struct, map} | {:error, String.t}
def struct_to_struct(old_struct, module, opts \\ []) do
rest = Keyword.get(opts, :rest, :false)
with {:ok, new_struct} <- Map.from_struct(old_struct)
|> Mapail.stringify_map()
|> Mapail.map_to_struct(module, rest: rest) do
{:ok, new_struct}
else
{:ok, new_struct, rest} ->
rest = Enum.reduce(rest, %{}, fn({k,v}, acc) ->
{:ok, nk} = Maptu.Extension.to_existing_atom_safe(k)
Map.put(acc, nk, v)
end)
{:ok, new_struct, rest}
{:error, error} -> {:error, error}
end
end
@doc """
Convert one form of struct into another struct and raises an error on fail.
"""
@spec struct_to_struct!(map, atom) :: struct | no_return
def struct_to_struct!(old_struct, module) do
case struct_to_struct(old_struct, module, rest: :false) do
{:error, error} -> raise(ArgumentError, error)
{:ok, new_struct} -> new_struct
end
end
@doc ~s"""
Converts a string-keyed map to a struct.
## Arguments
- module: The module of the struct to be created.
- map: The map to be converted to a struct.
- opts: See below
- `transformations: [atom]`:
A list of transformations to apply to keys in the map where there are `non-matching`
keys after the inital attempt to match.
Defaults to `transformations: []` ie. no transformations are applied and only exactly matching keys are used to
build a struct.
If set to `transformations: [:snake_case]`, then after an initial run, non-matching keys are converted to
snake_case form and another attempt is made to match the keys with the snake_case keys. This
means less than exactly matching keys are considered a match when building the struct.
- `rest: atom`:
Defaults to `rest: :false`
By setting `rest: :true`, the 'leftover' unmatched key-value pairs of the original map
will also be returned in separate map with the keys in their original form.
Returns as a tuple in the format `{:ok, struct, rest}`
- By setting `rest: :merge`, the 'leftover' unmatched key-value pairs of the original map
will be merged into the struct as a nested map under the key `:mapail`.
Returns as a tuple in the format `{:ok, struct}`
- By setting `rest: :false`, unmatched keys are silently discarded and only the struct
is returned with matching keys. Returns as a tuple in the format `{:ok, struct}`.
Example (matching keys):
iex> Mapail.map_to_struct(%{"first" => 1, "last" => 5}, Range)
{:ok, 1..5}
Example (non-matching keys):
iex> Mapail.map_to_struct(%{"line_or_bytes" => [], "Raw" => :false}, File.Stream)
{:ok, %File.Stream{line_or_bytes: [], modes: [], path: nil, raw: true}}
Example (non-matching keys - with `snake_case` transformations):
iex> Mapail.map_to_struct(%{"first" => 1, "Last" => 5}, Range, transformations: [:snake_case])
{:ok, 1..5}
Example (non-matching keys):
iex> {:ok, r} = Mapail.map_to_struct(%{"first" => 1, "Last" => 5}, Range); Map.keys(r);
[:__struct__, :first, :last]
Example (non-matching keys - with transformations):
iex> {:ok, r} = Mapail.map_to_struct(%{"first" => 1, "Last" => 5}, Range, transformations: [:snake_case]); Map.values(r);
[Range, 1, 5]
Example (non-matching keys):
iex> Mapail.map_to_struct(%{"first" => 1, "last" => 5, "next" => 3}, Range)
{:ok, 1..5}
Example (non-matching keys - capturing excess key-value pairs in separate map called rest):
iex> Mapail.map_to_struct(%{"first" => 1, "last" => 5, "next" => 3}, Range, rest: :true)
{:ok, 1..5, %{"next" => 3}}
Example (non-matching keys - capturing excess key-value pairs and merging into struct under `:mapail` key):
iex> {:ok, r} = Mapail.map_to_struct(%{"first" => 1, "last" => 5, "next" => 3}, Range, rest: :merge); Map.values(r);
[Range, 1, 5, %{"next" => 3}]
iex> {:ok, r} = Mapail.map_to_struct(%{"first" => 1, "last" => 5, "next" => 3}, Range, rest: :merge); Map.keys(r);
[:__struct__, :first, :last, :mapail]
"""
@spec map_to_struct(map, atom, Keyword.t) :: {:error, Maptu.Extension.non_strict_error_reason} |
{:ok, struct} |
{:ok, struct, map}
def map_to_struct(map, module, opts \\ []) do
maptu_fn = if Keyword.get(opts, :rest, :false) in [:true, :merge], do: &Maptu.Extension.struct_rest/2, else: &Maptu.struct/2
map_to_struct(map, module, maptu_fn, opts)
end
@doc ~s"""
Converts a string-keyed map to a struct and raises if it fails.
See `map_to_struct/3`
Example (matching keys):
iex> Mapail.map_to_struct!(%{"first" => 1, "last" => 5}, Range)
1..5
Example (non-matching keys):
iex> Mapail.map_to_struct!(%{"line_or_bytes" => [], "Raw" => :false}, File.Stream)
%File.Stream{line_or_bytes: [], modes: [], path: nil, raw: true}
Example (non-matching keys - with `snake_case` transformations):
iex> Mapail.map_to_struct!(%{"first" => 1, "Last" => 5}, Range, transformations: [:snake_case])
1..5
Example (non-matching keys):
iex> Mapail.map_to_struct!(%{"first" => 1, "Last" => 5}, Range) |> Map.keys();
[:__struct__, :first, :last]
iex> Mapail.map_to_struct!(%{"first" => 1, "Last" => 5}, Range) |> Map.values();
[Range, 1, :nil]
Example (non-matching keys - with transformations):
iex> Mapail.map_to_struct!(%{"first" => 1, "Last" => 5}, Range, transformations: [:snake_case]) |> Map.values();
[Range, 1, 5]
Example (non-matching keys):
iex> Mapail.map_to_struct!(%{"first" => 1, "last" => 5, "next" => 3}, Range)
1..5
Example (non-matching keys - capturing excess key-value pairs in separate map):
iex> Mapail.map_to_struct!(%{"first" => 1, "last" => 5, "next" => 3}, Range, rest: :merge) |> Map.values();
[Range, 1, 5, %{"next" => 3}]
iex> Mapail.map_to_struct!(%{"first" => 1, "last" => 5, "next" => 3}, Range, rest: :merge) |> Map.keys();
[:__struct__, :first, :last, :mapail]
"""
@spec map_to_struct!(map, atom, Keyword.t) :: struct | no_return
def map_to_struct!(map, module, opts \\ []) do
maptu_fn = if Keyword.get(opts, :rest, :false) == :merge, do: &Maptu.Extension.struct_rest/2, else: &Maptu.struct/2
map_to_struct(map, module, maptu_fn, opts)
|> Maptu.Extension.raise_on_error()
end
# private
defp map_to_struct(map, module, maptu_fn, opts) do
map_bin_keys = Map.keys(map)
struct_bin_keys = module.__struct__() |> Map.keys() |> Enum.map(&Atom.to_string/1)
non_matching_keys = non_matching_keys(map_bin_keys, struct_bin_keys)
case non_matching_keys do
[] ->
try do
maptu_fn.(module, map)
rescue
e in FunctionClauseError ->
if e.function == :to_existing_atom_safe && e.module == Maptu && e.arity == 1 do
{:error, :atom_key_not_expected}
else
{:error, :unexpected_error}
end
end
_ ->
{transformed_map, keys_trace} = apply_transformations(map, non_matching_keys, opts)
unmatched_map = get_unmatched_map_with_original_keys(map, keys_trace)
merged_map = Map.merge(transformed_map, unmatched_map)
try do
maptu_fn.(module, merged_map)
rescue
e in FunctionClauseError ->
if e.function == :to_existing_atom_safe&& e.arity == 1 do
{:error, :atom_key_not_expected}
else
{:error, :unexpected_error}
end
end
|> remove_transformed_unmatched_keys(keys_trace)
end
|> case do
{:ok, res, rest} ->
if opts[:rest] == :merge do
{:ok, Map.put(res, :mapail, rest)}
else
{:ok, res, rest}
end
{:ok, res} -> {:ok, res}
{:error, reason} -> {:error, reason}
end
end
defp non_matching_keys(map_bin_keys, struct_bin_keys) do
matching = Enum.filter(struct_bin_keys,
fn(struct_key) -> Enum.member?(map_bin_keys, struct_key) end
)
non_matching = Enum.reject(map_bin_keys,
fn(map_key) -> Enum.member?(matching, map_key) end
)
non_matching
end
defp get_unmatched_map_with_original_keys(map, keys_trace) do
Enum.reduce(keys_trace, %{},
fn({k, v}, acc) ->
if k !== v do
Map.put(acc, k, Map.fetch!(map, k))
else
acc
end
end
)
end
defp apply_transformations(map, non_matching_keys, opts) do
transformations = Keyword.get(opts, :transformations, [])
Enum.any?(transformations, &(Enum.member?(@transformations, &1) == :false)) &&
(msg = "Unknown transformation in #{inspect(transformations)}, allowed transformations: #{inspect(@transformations)}"
raise(ArgumentError, msg))
{transformed_map, keys_trace} =
if :snake_case in transformations do
to_snake_case(map, non_matching_keys)
else
keys_trace = Enum.reduce(map, %{}, fn({k, _v}, acc) -> Map.put(acc, k, k) end)
{map, keys_trace}
end
{transformed_map, keys_trace}
end
defp to_snake_case(map, non_matching_keys) do
Enum.reduce(map, {map, %{}},
fn({k, v}, {mod_map, keys_trace}) ->
case k in non_matching_keys do
:true ->
key =
case is_atom(k) do
:true -> raise ArgumentError, "Mapail expects only maps with string keys."
:false -> Macro.underscore(k) |> String.downcase()
end
{
Map.delete(mod_map, k) |> Map.put(key, v),
Map.put(keys_trace, k, key),
}
:false ->
{
mod_map,
Map.put(keys_trace, k, k)
}
end
end
)
end
defp remove_transformed_unmatched_keys({:error, reason}, _keys_trace) do
{:error, reason}
end
defp remove_transformed_unmatched_keys({:ok, res}, _keys_trace) do
{:ok, res}
end
defp remove_transformed_unmatched_keys({:ok, res, rest}, keys_trace) do
rest =
Enum.reduce(keys_trace, rest,
fn({orig_k, trans_k}, acc) ->
if orig_k !== trans_k && Map.has_key?(acc, trans_k) do
Map.delete(acc, trans_k)
else
acc
end
end
)
{:ok, res, rest}
end
end
|
lib/mapail.ex
| 0.730674
| 0.857171
|
mapail.ex
|
starcoder
|
defmodule EctoIPRange.Util.Inet do
@moduledoc false
@doc """
Convert an IPv4 tuple into an IPv6 tuple.
Wrapper around `:inet.ipv4_mapped_ipv6_address/1` to allow only IPv4-to-IPv6
conversion and provide support for OTP < 21.
## Examples
iex> ipv4_to_ipv6({127, 0, 0, 1})
{:ok, {0, 0, 0, 0, 0, 65_535, 32_512, 1}}
iex> ipv4_to_ipv6({512, 0, 0, 1})
{:error, :einval}
iex> ipv4_to_ipv6({"a", "b", "c", "d"})
{:error, :einval}
iex ipv4_to_ipv6({0, 0, 0, 0, 0, 65_535, 32_512, 1})
{:error, :einval}
"""
@spec ipv4_to_ipv6(:inet.ip4_address()) :: {:ok, :inet.ip6_address()} | {:error, :einval}
if function_exported?(:inet, :ipv4_mapped_ipv6_address, 1) do
def ipv4_to_ipv6({a, b, c, d} = ip4_address)
when a in 0..255 and b in 0..255 and c in 0..255 and d in 0..255 do
{:ok, :inet.ipv4_mapped_ipv6_address(ip4_address)}
end
else
def ipv4_to_ipv6({a, b, c, d} = ip4_address)
when a in 0..255 and b in 0..255 and c in 0..255 and d in 0..255 do
ip4_address
|> :inet.ntoa()
|> :inet.parse_ipv6_address()
end
end
def ipv4_to_ipv6(_), do: {:error, :einval}
@doc """
Convert an IP tuple to a binary address.
## Examples
iex> ntoa({1, 2, 3, 4})
"1.2.3.4"
iex> ntoa({1, 2, 3, 4, 5, 6, 7, 8})
"fc00:e968:6179::de52:7100"
iex> ntoa({"a", "b", "c", "d"})
nil
iex> ntoa({"a", "b", "c", "d", "e", "f", "g", "h"})
nil
iex> ntoa("1.2.3.4")
nil
iex> ntoa("a.b.c.d")
nil
"""
@spec ntoa(:inet.ip_address()) :: binary | nil
def ntoa({a, b, c, d} = ip_address)
when a in 0..255 and b in 0..255 and c in 0..255 and d in 0..255 do
case :inet.ntoa(ip_address) do
address when is_list(address) -> Kernel.to_string(address)
_ -> nil
end
end
# credo:disable-for-next-line Credo.Check.Refactor.CyclomaticComplexity
def ntoa({a, b, c, d, e, f, g, h} = ip_address)
when a in 0..65_535 and b in 0..65_535 and c in 0..65_535 and d in 0..65_535 and
e in 0..65_535 and
f in 0..65_535 and g in 0..65_535 and h in 0..65_535 do
case :inet.ntoa(ip_address) do
address when is_list(address) -> Kernel.to_string(address)
_ -> nil
end
end
def ntoa(_), do: nil
@doc """
Parse a binary IPv4 or IPv6 address.
## Examples
iex> parse_binary("1.2.3.4")
{:ok, {1, 2, 3, 4}}
iex> parse_binary("fc00:e968:6179::de52:7100")
{:ok, {1, 2, 3, 4, 5, 6, 7, 8}}
iex> parse_binary("a.b.c.d")
{:error, :einval}
iex> parse_binary("s:t:u:v:w:x:y:z")
{:error, :einval}
"""
@spec parse_binary(binary) :: {:ok, :inet.ip_address()} | {:error, :einval}
def parse_binary(address) do
address
|> String.to_charlist()
|> :inet.parse_address()
end
@doc """
Parse a binary IPv4 address.
## Examples
iex> parse_ipv4_binary("1.2.3.4")
{:ok, {1, 2, 3, 4}}
iex> parse_ipv4_binary("fc00:e968:6179::de52:7100")
{:error, :einval}
iex> parse_ipv4_binary("a.b.c.d")
{:error, :einval}
iex> parse_ipv4_binary("s:t:u:v:w:x:y:z")
{:error, :einval}
"""
@spec parse_ipv4_binary(binary) :: {:ok, :inet.ip4_address()} | {:error, :einval}
def parse_ipv4_binary(address) do
address
|> String.to_charlist()
|> :inet.parse_ipv4_address()
end
@doc """
Parse a binary IPv6 address.
## Examples
iex> parse_ipv6_binary("1.2.3.4")
{:ok, {0, 0, 0, 0, 0, 65_535, 258, 772}}
iex> parse_ipv6_binary("1:2:3:4:5:6:7:8")
{:ok, {1, 2, 3, 4, 5, 6, 7, 8}}
iex> parse_ipv6_binary("a.b.c.d")
{:error, :einval}
iex> parse_ipv6_binary("s:t:u:v:w:x:y:z")
{:error, :einval}
"""
@spec parse_ipv6_binary(binary) :: {:ok, :inet.ip6_address()} | {:error, :einval}
def parse_ipv6_binary(address) do
address
|> String.to_charlist()
|> :inet.parse_ipv6_address()
end
end
|
lib/ecto_ip_range/util/inet.ex
| 0.854582
| 0.534612
|
inet.ex
|
starcoder
|
defmodule Curve448 do
import Bitwise
@moduledoc """
Curve448 Diffie-Hellman functions
"""
@typedoc """
public or secret key
"""
@type key :: <<_:: 224>>
@p 726838724295606890549323807888004534353641360687318060281490199180612328166730772686396383698676545930088884461843637361053498018365439
@a 156326
defp clamp(c) do
c |> band(~~~3)
|> bor(128 <<< 8 * 55)
end
defp square(x), do: x * x # :math.pow yields floats.. and we only need this one
defp expmod(_b,0,_m), do: 1
defp expmod(b,e,m) do
t = b |> expmod(div(e,2), m) |> square |> rem(m)
if (e &&& 1) == 1, do: (t * b) |> rem(m), else: t
end
defp inv(x), do: x|> expmod(@p - 2, @p)
defp add({xn,zn}, {xm,zm}, {xd,zd}) do
x = (xm * xn - zm * zn) |> square |> (&(&1 * 4 * zd)).()
z = (xm * zn - zm * xn) |> square |> (&(&1 * 4 * xd)).()
{rem(x,@p), rem(z,@p)}
end
defp double({xn,zn}) do
x = (square(xn) - square(zn)) |> square
z = 4 * xn * zn * (square(xn) + @a * xn * zn + square(zn))
{rem(x,@p), rem(z,@p)}
end
def curve448(n, base) do
one = {base,1}
two = double(one)
{{x,z}, _} = nth_mult(n, {one,two})
(x * inv(z)) |> rem(@p)
end
defp nth_mult(1, basepair), do: basepair
defp nth_mult(n, {one,two}) do
{pm, pm1} = n |> div(2) |> nth_mult({one,two})
if (n &&& 1) == 1, do: { add(pm, pm1, one), double(pm1) }, else: { double(pm), add(pm, pm1, one) }
end
@doc """
Generate a secret/public key pair
Returned tuple contains `{random_secret_key, derived_public_key}`
"""
@spec generate_key_pair :: {key,key}
def generate_key_pair do
secret = :crypto.strong_rand_bytes(56) # This algorithm is supposed to be resilient against poor RNG, but use the best we can
{secret, derive_public_key(secret)}
end
@doc """
Derive a shared secret for a secret and public key
Given our secret key and our partner's public key, returns a
shared secret which can be derived by the partner in a complementary way.
"""
@spec derive_shared_secret(key,key) :: key | :error
def derive_shared_secret(our_secret, their_public) when byte_size(our_secret) == 56 and byte_size(their_public) == 56 do
our_secret |> :binary.decode_unsigned(:little)
|> clamp
|> curve448(:binary.decode_unsigned(their_public, :little))
|> :binary.encode_unsigned(:little)
end
def derive_shared_secret(_ours,_theirs), do: :error
@doc """
Derive the public key from a secret key
"""
@spec derive_public_key(key) :: key | :error
def derive_public_key(our_secret) when byte_size(our_secret) == 56 do
our_secret |> :binary.decode_unsigned(:little)
|> clamp
|> curve448(5)
|> :binary.encode_unsigned(:little)
end
def derive_public_key(_ours), do: :error
end
|
lib/curve448.ex
| 0.864268
| 0.451508
|
curve448.ex
|
starcoder
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.