code
stringlengths 114
1.05M
| path
stringlengths 3
312
| quality_prob
float64 0.5
0.99
| learning_prob
float64 0.2
1
| filename
stringlengths 3
168
| kind
stringclasses 1
value |
|---|---|---|---|---|---|
defmodule Adventofcode.Day10KnotHash do
require Bitwise
defstruct current: 0, skip: 0, size: 255, lengths: [], values: {}
def first_two_sum(input, list_size \\ 256) do
input
|> build_lengths
|> new(list_size)
|> process_recursively
|> sum_of_first_two
end
def knot_hash(input) do
input
|> build_lengths_from_ascii
|> new(256)
|> process_lengths_many_times(64)
|> sparse_hash_to_dense_hash
|> dense_hash_to_hexadecimal
end
defp sum_of_first_two(%{values: values}), do: elem(values, 0) * elem(values, 1)
def new(lengths, list_size) do
values = 0..(list_size - 1) |> Enum.to_list() |> List.to_tuple()
%__MODULE__{size: list_size, lengths: lengths, values: values}
end
def build_lengths(input) do
input
|> String.split(",")
|> Enum.map(&String.to_integer/1)
end
@standard_length_suffix [17, 31, 73, 47, 23]
def build_lengths_from_ascii(input) do
String.to_charlist(input) ++ @standard_length_suffix
end
def process_recursively(%{lengths: []} = state), do: state
def process_recursively(state) do
state
|> process()
|> process_recursively()
end
defp process_lengths_many_times(state, times) do
do_process_lengths_many_times(state, times, state.lengths)
end
defp do_process_lengths_many_times(state, 0, _lengths), do: state
defp do_process_lengths_many_times(state, times, lengths) when times > 0 do
%{state | lengths: lengths}
|> process_recursively
|> do_process_lengths_many_times(times - 1, lengths)
end
def process(%{lengths: [head | tail]} = state) do
values = reverse(state, state.current, head)
current = rem(state.current + head + state.skip, state.size)
skip = state.skip + 1
%{state | current: current, skip: skip, lengths: tail, values: values}
end
def reverse(state, current, len) do
reversed =
current..(current + len - 1)
|> Enum.map(&rem(&1, state.size))
|> Enum.map(&elem(state.values, &1))
|> Enum.reverse()
Enum.reduce(Enum.with_index(reversed), state.values, fn {value, offset}, acc ->
index = rem(state.current + offset, state.size)
put_elem(acc, index, value)
end)
end
defp sparse_hash_to_dense_hash(%{values: values, size: 256}) do
values
|> Tuple.to_list()
|> Enum.chunk(16)
|> Enum.map(fn digits -> Enum.reduce(digits, &Bitwise.bxor/2) end)
end
def dense_hash_to_hexadecimal(dense_hash) do
dense_hash
|> Enum.map(&Integer.to_string(&1, 16))
|> Enum.map_join(&String.pad_leading(&1, 2, "0"))
|> String.downcase()
end
def pretty(%{current: current} = state) do
Enum.map_join(Enum.with_index(Tuple.to_list(state.values)), fn
{value, ^current} -> "[#{value}]"
{value, _index} -> " #{value} "
end)
end
end
|
lib/day_10_knot_hash.ex
| 0.707506
| 0.627552
|
day_10_knot_hash.ex
|
starcoder
|
defmodule Aja.OrdMap do
base_doc = ~S"""
A map preserving key insertion order, with efficient lookups, updates and enumeration.
It works like regular maps, except that the insertion order is preserved:
iex> %{"one" => 1, "two" => 2, "three" => 3}
%{"one" => 1, "three" => 3, "two" => 2}
iex> Aja.OrdMap.new([{"one", 1}, {"two", 2}, {"three", 3}])
ord(%{"one" => 1, "two" => 2, "three" => 3})
There is an unavoidable overhead compared to natively implemented maps, so
keep using regular maps when you do not care about the insertion order.
`Aja.OrdMap`:
- provides efficient (logarithmic) access: it is not a simple list of tuples
- implements the `Access` behaviour, `Enum` / `Inspect` / `Collectable` protocols
- optionally implements the `Jason.Encoder` protocol if `Jason` is installed
## Examples
`Aja.OrdMap` offers the same API as `Map` :
iex> ord_map = Aja.OrdMap.new([b: "Bat", a: "Ant", c: "Cat"])
ord(%{b: "Bat", a: "Ant", c: "Cat"})
iex> Aja.OrdMap.get(ord_map, :c)
"Cat"
iex> Aja.OrdMap.fetch(ord_map, :a)
{:ok, "Ant"}
iex> Aja.OrdMap.put(ord_map, :d, "Dinosaur")
ord(%{b: "Bat", a: "Ant", c: "Cat", d: "Dinosaur"})
iex> Aja.OrdMap.put(ord_map, :b, "Buffalo")
ord(%{b: "Buffalo", a: "Ant", c: "Cat"})
iex> Enum.to_list(ord_map)
[b: "Bat", a: "Ant", c: "Cat"]
iex> [d: "Dinosaur", b: "Buffalo", e: "Eel"] |> Enum.into(ord_map)
ord(%{b: "Buffalo", a: "Ant", c: "Cat", d: "Dinosaur", e: "Eel"})
## Specific functions
Due to its ordered nature, `Aja.OrdMap` also offers some extra methods not present in `Map`, like:
- `first/1` and `last/1` to efficiently retrieve the first / last key-value pair
- `foldl/3` and `foldr/3` to efficiently fold (reduce) from left-to-right or right-to-left
Examples:
iex> ord_map = Aja.OrdMap.new(b: "Bat", a: "Ant", c: "Cat")
iex> Aja.OrdMap.first(ord_map)
{:b, "Bat"}
iex> Aja.OrdMap.last(ord_map)
{:c, "Cat"}
iex> Aja.OrdMap.foldr(ord_map, [], fn {_key, value}, acc -> [value <> "man" | acc] end)
["Batman", "Antman", "Catman"]
## Access behaviour
`Aja.OrdMap` implements the `Access` behaviour.
iex> ord_map = Aja.OrdMap.new([a: "Ant", b: "Bat", c: "Cat"])
iex> ord_map[:a]
"Ant"
iex> put_in(ord_map[:b], "Buffalo")
ord(%{a: "Ant", b: "Buffalo", c: "Cat"})
iex> put_in(ord_map[:d], "Dinosaur")
ord(%{a: "Ant", b: "Bat", c: "Cat", d: "Dinosaur"})
iex> {"Cat", updated} = pop_in(ord_map[:c]); updated
ord(%{a: "Ant", b: "Bat"})
## Convenience [`ord/1`](`Aja.ord/1`) and [`ord_size/1`](`Aja.ord_size/1`) macros
The `Aja.OrdMap` module can be used without any macro.
The `Aja.ord/1` macro does however provide some syntactic sugar to make
it more convenient to work with ordered maps, namely:
- construct new ordered maps without the clutter of a entry list
- pattern match on key-values like regular maps
- update some existing keys
Examples:
iex> import Aja
iex> ord_map = ord(%{"一" => 1, "二" => 2, "三" => 3})
ord(%{"一" => 1, "二" => 2, "三" => 3})
iex> ord(%{"三" => three, "一" => one}) = ord_map
iex> {one, three}
{1, 3}
iex> ord(%{ord_map | "二" => "NI!"})
ord(%{"一" => 1, "二" => "NI!", "三" => 3})
Notes:
- pattern-matching on keys is not affected by insertion order.
- For expressions with constant keys, `Aja.ord/1` is able to generate the AST at compile time like the `Aja.vec/1` macro.
The `Aja.ord_size/1` macro can be used in guards:
iex> import Aja
iex> match?(v when ord_size(v) > 2, ord%{"一" => 1, "二" => 2, "三" => 3})
true
## With `Jason`
iex> Aja.OrdMap.new([{"un", 1}, {"deux", 2}, {"trois", 3}]) |> Jason.encode!()
"{\"un\":1,\"deux\":2,\"trois\":3}"
JSON encoding preserves the insertion order. Comparing with a regular map:
iex> Map.new([{"un", 1}, {"deux", 2}, {"trois", 3}]) |> Jason.encode!()
"{\"deux\":2,\"trois\":3,\"un\":1}"
There is no way as of now to decode JSON using `Aja.OrdMap`.
## Key deletion and sparse maps
Due to the underlying structures being used, efficient key deletion implies keeping around some
"holes" to avoid rebuilding the whole structure.
Such an ord map will be called **sparse**, while an ord map that never had a key deleted will be
referred as **dense**.
The implications of sparse structures are multiple:
- unlike dense structures, they cannot be compared as erlang terms
(using either `==/2`, `===/2` or the pin operator `^`)
- `Aja.OrdMap.equal?/2` can safely compare both sparse and dense structures, but is slower for sparse
- enumerating sparse structures is less efficient than dense ones
Calling `Aja.OrdMap.new/1` on a sparse ord map will rebuild a new dense one from scratch (which can be expensive).
iex> dense = Aja.OrdMap.new(a: "Ant", b: "Bat")
ord(%{a: "Ant", b: "Bat"})
iex> sparse = Aja.OrdMap.new(c: "Cat", a: "Ant", b: "Bat") |> Aja.OrdMap.delete(:c)
#Aja.OrdMap<%{a: "Ant", b: "Bat"}, sparse?: true>
iex> dense == sparse
false
iex> match?(^dense, sparse)
false
iex> Aja.OrdMap.equal?(dense, sparse) # works with sparse maps, but less efficient
true
iex> new_dense = Aja.OrdMap.new(sparse) # rebuild a dense map from a sparse one
ord(%{a: "Ant", b: "Bat"})
iex> new_dense === dense
true
In order to avoid having to worry about memory issues when adding and deleting keys successively,
ord maps cannot be more than half sparse, and are periodically rebuilt as dense upon deletion.
iex> sparse = Aja.OrdMap.new(c: "Cat", a: "Ant", b: "Bat") |> Aja.OrdMap.delete(:c)
#Aja.OrdMap<%{a: "Ant", b: "Bat"}, sparse?: true>
iex> Aja.OrdMap.delete(sparse, :a)
ord(%{b: "Bat"})
Note: Deleting the last key does not make a dense ord map sparse. This is not a bug,
but an expected behavior due to how data is stored.
iex> Aja.OrdMap.new([one: 1, two: 2, three: 3]) |> Aja.OrdMap.delete(:three)
ord(%{one: 1, two: 2})
The `dense?/1` and `sparse?/1` functions can be used to check if a `Aja.OrdMap` is dense or sparse.
While this design puts some burden on the developer, the idea behind it is:
- to keep it as convenient and performant as possible unless deletion is necessary
- to be transparent about sparse structures and their limitation
- instead of constantly rebuild new dense structures, let users decide the best timing to do it
- still work fine with sparse structures, but in a degraded mode
- protect users about potential memory leaks and performance issues
## Pattern-matching and opaque type
An `Aja.OrdMap` is represented internally using the `%Aja.OrdMap{}` struct. This struct
can be used whenever there's a need to pattern match on something being an `Aja.OrdMap`:
iex> match?(%Aja.OrdMap{}, Aja.OrdMap.new())
true
Note, however, than `Aja.OrdMap` is an [opaque type](https://hexdocs.pm/elixir/typespecs.html#user-defined-types):
its struct internal fields must not be accessed directly.
As discussed in the previous section, [`ord/1`](`Aja.ord/1`) and [`ord_size/1`](`Aja.ord_size/1`) makes it
possible to pattern match on keys as well as check the type and size.
## Memory overhead
`Aja.OrdMap` takes roughly 2~3x more memory than a regular map depending on the type of data:
"""
module_doc =
if(System.otp_release() |> String.to_integer() >= 24) do
base_doc <>
~S"""
iex> map_size = Map.new(1..100, fn i -> {i, i} end) |> :erts_debug.size()
366
iex> ord_map_size = Aja.OrdMap.new(1..100, fn i -> {i, i} end) |> :erts_debug.size()
1019
iex> Float.round(ord_map_size / map_size, 2)
2.78
"""
else
base_doc
end
@moduledoc module_doc
require Aja.Vector.Raw, as: RawVector
@behaviour Access
@type key :: term
@type value :: term
@typep index :: non_neg_integer
@typep internals(key, value) :: %__MODULE__{
__ord_map__: %{optional(key) => [index | value]},
__ord_vector__: RawVector.t({key, value})
}
@type t(key, value) :: internals(key, value)
@type t :: t(key, value)
defstruct __ord_map__: %{}, __ord_vector__: RawVector.empty()
@doc false
defguard is_dense(ord_map)
# TODO simplify when stop supporting Elixir 1.10
when :erlang.map_get(:__ord_map__, ord_map) |> map_size() ===
:erlang.map_get(:__ord_vector__, ord_map) |> RawVector.size()
@doc """
Returns the number of keys in `ord_map`.
## Examples
iex> ord_map = Aja.OrdMap.new(a: "Ant", b: "Bat", c: "Cat")
iex> Aja.OrdMap.size(ord_map)
3
iex> Aja.OrdMap.size(Aja.OrdMap.new())
0
"""
@spec size(t) :: non_neg_integer
def size(ord_map)
def size(%__MODULE__{__ord_map__: map}) do
map_size(map)
end
@doc """
Returns all keys from `ord_map`.
## Examples
iex> ord_map = Aja.OrdMap.new(b: "Bat", c: "Cat", a: "Ant")
iex> Aja.OrdMap.keys(ord_map)
[:b, :c, :a]
"""
@spec keys(t(k, value)) :: [k] when k: key
def keys(ord_map)
def keys(%__MODULE__{__ord_vector__: vector}) do
RawVector.foldr(vector, [], fn
{key, _value}, acc -> [key | acc]
nil, acc -> acc
end)
end
@doc """
Returns all values from `ord_map`.
## Examples
iex> ord_map = Aja.OrdMap.new(b: "Bat", c: "Cat", a: "Ant")
iex> Aja.OrdMap.values(ord_map)
["Bat", "Cat", "Ant"]
"""
@spec values(t(key, v)) :: [v] when v: value
def values(ord_map)
def values(%__MODULE__{__ord_vector__: vector}) do
RawVector.foldr(vector, [], fn
{_key, value}, acc -> [value | acc]
nil, acc -> acc
end)
end
@doc """
Returns all key-values pairs from `ord_map` as a list.
## Examples
iex> ord_map = Aja.OrdMap.new(b: "Bat", c: "Cat", a: "Ant")
iex> Aja.OrdMap.to_list(ord_map)
[b: "Bat", c: "Cat", a: "Ant"]
"""
@spec to_list(t(k, v)) :: [{k, v}] when k: key, v: value
def to_list(ord_map)
def to_list(%__MODULE__{__ord_vector__: vector} = ord_map) when is_dense(ord_map) do
RawVector.to_list(vector)
end
def to_list(%__MODULE__{__ord_vector__: vector}) do
RawVector.sparse_to_list(vector)
end
@doc """
Returns a new empty ordered map.
## Examples
iex> Aja.OrdMap.new()
ord(%{})
"""
@spec new :: t
def new() do
%__MODULE__{}
end
@doc """
Creates an ordered map from an `enumerable`.
Preserves the original order of keys.
Duplicated keys are removed; the latest one prevails.
## Examples
iex> Aja.OrdMap.new(b: "Bat", a: "Ant", c: "Cat")
ord(%{b: "Bat", a: "Ant", c: "Cat"})
iex> Aja.OrdMap.new(b: "Bat", a: "Ant", b: "Buffalo", a: "Antelope")
ord(%{b: "Buffalo", a: "Antelope"})
`new/1` will return dense ord maps untouched, but will rebuild sparse ord maps from scratch.
This can be used to build a dense ord map from from a sparse one.
See the [section about sparse structures](#module-key-deletion-and-sparse-maps) for more information.
iex> sparse = Aja.OrdMap.new(c: "Cat", a: "Ant", b: "Bat") |> Aja.OrdMap.delete(:c)
#Aja.OrdMap<%{a: "Ant", b: "Bat"}, sparse?: true>
iex> Aja.OrdMap.new(sparse)
ord(%{a: "Ant", b: "Bat"})
"""
@spec new(Enumerable.t()) :: t(key, value)
def new(%__MODULE__{} = ord_map) when is_dense(ord_map), do: ord_map
def new(enumerable) do
# TODO add from_vector to avoid intermediate list?
enumerable
|> Aja.EnumHelper.to_list()
|> from_list()
end
@doc """
Creates an ordered map from an `enumerable` via the given `transform` function.
Preserves the original order of keys.
Duplicated keys are removed; the latest one prevails.
## Examples
iex> Aja.OrdMap.new([:a, :b], fn x -> {x, x} end)
ord(%{a: :a, b: :b})
"""
@spec new(Enumerable.t(), (term -> {k, v})) :: t(k, v) when k: key, v: value
def new(enumerable, fun) when is_function(fun, 1) do
enumerable
|> Aja.EnumHelper.map(fun)
|> from_list()
end
@doc """
Returns whether the given `key` exists in `ord_map`.
## Examples
iex> ord_map = Aja.OrdMap.new(a: "Ant", b: "Bat", c: "Cat")
iex> Aja.OrdMap.has_key?(ord_map, :a)
true
iex> Aja.OrdMap.has_key?(ord_map, :d)
false
"""
@spec has_key?(t(k, value), k) :: boolean when k: key
def has_key?(ord_map, key)
def has_key?(%__MODULE__{__ord_map__: map}, key) do
Map.has_key?(map, key)
end
@doc ~S"""
Fetches the value for a specific `key` and returns it in a ok-entry.
If the key does not exist, returns :error.
## Examples
iex> ord_map = Aja.OrdMap.new(a: "A", b: "B", c: "C")
iex> Aja.OrdMap.fetch(ord_map, :c)
{:ok, "C"}
iex> Aja.OrdMap.fetch(ord_map, :z)
:error
"""
@impl Access
@spec fetch(t(k, v), k) :: {:ok, v} | :error when k: key, v: value
def fetch(ord_map, key)
def fetch(%__MODULE__{__ord_map__: map}, key) do
case map do
%{^key => [_index | value]} ->
{:ok, value}
_ ->
:error
end
end
@doc ~S"""
Fetches the value for a specific `key` in the given `ord_map`,
erroring out if `ord_map` doesn't contain `key`.
If `ord_map` doesn't contain `key`, a `KeyError` exception is raised.
## Examples
iex> ord_map = Aja.OrdMap.new(a: "A", b: "B", c: "C")
iex> Aja.OrdMap.fetch!(ord_map, :c)
"C"
iex> Aja.OrdMap.fetch!(ord_map, :z)
** (KeyError) key :z not found in: ord(%{a: "A", b: "B", c: "C"})
"""
@spec fetch!(t(k, v), k) :: v when k: key, v: value
def fetch!(%__MODULE__{__ord_map__: map} = ord_map, key) do
case map do
%{^key => [_index | value]} ->
value
_ ->
raise KeyError, key: key, term: ord_map
end
end
@doc """
Puts the given `value` under `key` unless the entry `key`
already exists in `ord_map`.
## Examples
iex> ord_map = Aja.OrdMap.new(b: "Bat", c: "Cat")
iex> Aja.OrdMap.put_new(ord_map, :a, "Ant")
ord(%{b: "Bat", c: "Cat", a: "Ant"})
iex> Aja.OrdMap.put_new(ord_map, :b, "Buffalo")
ord(%{b: "Bat", c: "Cat"})
"""
@spec put_new(t(k, v), k, v) :: t(k, v) when k: key, v: value
def put_new(
%__MODULE__{__ord_map__: map, __ord_vector__: vector} = ord_map,
key,
value
) do
case map do
%{^key => _value} ->
ord_map
_ ->
do_add_new(map, vector, key, value)
end
end
@doc """
Puts a value under `key` only if the `key` already exists in `ord_map`.
## Examples
iex> ord_map = Aja.OrdMap.new(a: "Ant", b: "Bat", c: "Cat")
iex> Aja.OrdMap.replace(ord_map, :b, "Buffalo")
ord(%{a: "Ant", b: "Buffalo", c: "Cat"})
iex> Aja.OrdMap.replace(ord_map, :d, "Dinosaur")
ord(%{a: "Ant", b: "Bat", c: "Cat"})
"""
@spec replace(t(k, v), k, v) :: t(k, v) when k: key, v: value
def replace(
%__MODULE__{__ord_map__: map, __ord_vector__: vector} = ord_map,
key,
value
) do
case map do
%{^key => [index | _value]} ->
do_add_existing(map, vector, index, key, value)
_ ->
ord_map
end
end
@doc """
Puts a value under `key` only if the `key` already exists in `ord_map`.
If `key` is not present in `ord_map`, a `KeyError` exception is raised.
## Examples
iex> ord_map = Aja.OrdMap.new(a: "Ant", b: "Bat", c: "Cat")
iex> Aja.OrdMap.replace!(ord_map, :b, "Buffalo")
ord(%{a: "Ant", b: "Buffalo", c: "Cat"})
iex> Aja.OrdMap.replace!(ord_map, :d, "Dinosaur")
** (KeyError) key :d not found in: ord(%{a: \"Ant\", b: \"Bat\", c: \"Cat\"})
"""
@spec replace!(t(k, v), k, v) :: t(k, v) when k: key, v: value
def replace!(
%__MODULE__{__ord_map__: map, __ord_vector__: vector} = ord_map,
key,
value
) do
case map do
%{^key => [index | _value]} ->
do_add_existing(map, vector, index, key, value)
_ ->
raise KeyError, key: key, term: ord_map
end
end
@doc """
Evaluates `fun` and puts the result under `key`
in `ord_map` unless `key` is already present.
This function is useful in case you want to compute the value to put under
`key` only if `key` is not already present, as for example, when the value is expensive to
calculate or generally difficult to setup and teardown again.
## Examples
iex> ord_map = Aja.OrdMap.new(b: "Bat", c: "Cat")
iex> expensive_fun = fn -> "Ant" end
iex> Aja.OrdMap.put_new_lazy(ord_map, :a, expensive_fun)
ord(%{b: "Bat", c: "Cat", a: "Ant"})
iex> Aja.OrdMap.put_new_lazy(ord_map, :b, expensive_fun)
ord(%{b: "Bat", c: "Cat"})
"""
@spec put_new_lazy(t(k, v), k, (() -> v)) :: t(k, v) when k: key, v: value
def put_new_lazy(
%__MODULE__{__ord_map__: map, __ord_vector__: vector} = ord_map,
key,
fun
)
when is_function(fun, 0) do
if has_key?(ord_map, key) do
ord_map
else
do_add_new(map, vector, key, fun.())
end
end
@doc """
Returns a new ordered map with all the key-value pairs in `ord_map` where the key
is in `keys`.
If `keys` contains keys that are not in `ord_map`, they're simply ignored.
Respects the order of the `keys` list.
## Examples
iex> ord_map = Aja.OrdMap.new(a: "Ant", b: "Bat", c: "Cat")
iex> Aja.OrdMap.take(ord_map, [:c, :e, :a])
ord(%{c: "Cat", a: "Ant"})
"""
@spec get(t(k, v), [k]) :: t(k, v) when k: key, v: value
def take(ord_map, keys)
def take(%__MODULE__{__ord_map__: map}, keys) when is_list(keys) do
do_take(map, keys, [], %{}, 0)
end
defp do_take(_map, _keys = [], kvs, map_acc, _index) do
vector = kvs |> :lists.reverse() |> RawVector.from_list()
%__MODULE__{__ord_map__: map_acc, __ord_vector__: vector}
end
defp do_take(map, [key | keys], kvs, map_acc, index) do
case map do
%{^key => [_index | value]} ->
case map_acc do
%{^key => _} ->
do_take(map, keys, kvs, map_acc, index)
_ ->
new_kvs = [{key, value} | kvs]
new_map_acc = Map.put(map_acc, key, [index | value])
do_take(map, keys, new_kvs, new_map_acc, index + 1)
end
_ ->
do_take(map, keys, kvs, map_acc, index)
end
end
@doc """
Gets the value for a specific `key` in `ord_map`.
If `key` is present in `ord_map` then its value `value` is
returned. Otherwise, `default` is returned.
If `default` is not provided, `nil` is used.
## Examples
iex> ord_map = Aja.OrdMap.new(a: "Ant", b: "Bat", c: "Cat")
iex> Aja.OrdMap.get(ord_map, :a)
"Ant"
iex> Aja.OrdMap.get(ord_map, :z)
nil
iex> Aja.OrdMap.get(ord_map, :z, "Zebra")
"Zebra"
"""
@spec get(t(k, v), k, v) :: v | nil when k: key, v: value
def get(ord_map, key, default \\ nil)
def get(%__MODULE__{__ord_map__: map}, key, default) do
case map do
%{^key => [_index | value]} ->
value
_ ->
default
end
end
@doc """
Gets the value for a specific `key` in `ord_map`.
If `key` is present in `ord_map` then its value `value` is
returned. Otherwise, `fun` is evaluated and its result is returned.
This is useful if the default value is very expensive to calculate or
generally difficult to setup and teardown again.
## Examples
iex> ord_map = Aja.OrdMap.new(a: "Ant", b: "Bat", c: "Cat")
iex> expensive_fun = fn -> "Zebra" end
iex> Aja.OrdMap.get_lazy(ord_map, :a, expensive_fun)
"Ant"
iex> Aja.OrdMap.get_lazy(ord_map, :z, expensive_fun)
"Zebra"
"""
@spec get_lazy(t(k, v), k, v) :: v | nil when k: key, v: value
def get_lazy(ord_map, key, fun)
def get_lazy(%__MODULE__{__ord_map__: map}, key, fun) when is_function(fun, 0) do
case map do
%{^key => [_index | value]} ->
value
_ ->
fun.()
end
end
@doc """
Puts the given `value` under `key` in `ord_map`.
If the `key` does exist, it overwrites the existing value without
changing its current location.
## Examples
iex> ord_map = Aja.OrdMap.new(a: "Ant", b: "Bat", c: "Cat")
iex> Aja.OrdMap.put(ord_map, :b, "Buffalo")
ord(%{a: "Ant", b: "Buffalo", c: "Cat"})
iex> Aja.OrdMap.put(ord_map, :d, "Dinosaur")
ord(%{a: "Ant", b: "Bat", c: "Cat", d: "Dinosaur"})
"""
@spec put(t(k, v), k, v) :: t(k, v) when k: key, v: value
def put(ord_map, key, value)
def put(
%__MODULE__{__ord_map__: map, __ord_vector__: vector},
key,
value
) do
case map do
%{^key => [index | _value]} ->
do_add_existing(map, vector, index, key, value)
_ ->
do_add_new(map, vector, key, value)
end
end
@doc """
Deletes the entry in `ord_map` for a specific `key`.
If the `key` does not exist, returns `ord_map` unchanged.
## Examples
iex> ord_map = Aja.OrdMap.new(a: "Ant", b: "Bat", c: "Cat")
iex> Aja.OrdMap.delete(ord_map, :b)
#Aja.OrdMap<%{a: "Ant", c: "Cat"}, sparse?: true>
iex> Aja.OrdMap.delete(ord_map, :z)
ord(%{a: "Ant", b: "Bat", c: "Cat"})
"""
@spec delete(t(k, v), k) :: t(k, v) when k: key, v: value
def delete(
%__MODULE__{__ord_map__: map, __ord_vector__: vector} = ord_map,
key
) do
case :maps.take(key, map) do
{[index | _value], new_map} ->
do_delete_existing(new_map, vector, index)
:error ->
ord_map
end
end
@doc """
Merges a map or an ordered map into an `ord_map`.
All keys in `map_or_ord_map` will be added to `ord_map`, overriding any existing one
(i.e., the keys in `map_or_ord_map` "have precedence" over the ones in `ord_map`).
## Examples
iex> Aja.OrdMap.merge(Aja.OrdMap.new(%{a: 1, b: 2}), Aja.OrdMap.new(%{a: 3, d: 4}))
ord(%{a: 3, b: 2, d: 4})
iex> Aja.OrdMap.merge(Aja.OrdMap.new(%{a: 1, b: 2}), %{a: 3, d: 4})
ord(%{a: 3, b: 2, d: 4})
"""
@spec merge(t(k, v), t(k, v) | %{optional(k) => v}) :: t(k, v) when k: key, v: value
def merge(ord_map, map_or_ord_map)
def merge(%__MODULE__{} = ord_map1, %__MODULE__{} = ord_map2) do
merge_list(ord_map1, to_list(ord_map2))
end
def merge(%__MODULE__{}, %_{}) do
raise ArgumentError, "Cannot merge arbitrary structs"
end
def merge(%__MODULE__{} = ord_map1, %{} = map2) do
merge_list(ord_map1, Map.to_list(map2))
end
@doc """
Puts a value under `key` only if the `key` already exists in `ord_map`.
## Examples
iex> ord_map = Aja.OrdMap.new(a: "Ant", b: "Bat", c: "Cat")
iex> Aja.OrdMap.update(ord_map, :b, "N/A", &String.upcase/1)
ord(%{a: "Ant", b: "BAT", c: "Cat"})
iex> Aja.OrdMap.update(ord_map, :z, "N/A", &String.upcase/1)
ord(%{a: "Ant", b: "Bat", c: "Cat", z: "N/A"})
"""
@spec update(t(k, v), k, v, (k -> v)) :: t(k, v) when k: key, v: value
def update(ord_map, key, default, fun)
def update(
%__MODULE__{__ord_map__: map, __ord_vector__: vector},
key,
default,
fun
)
when is_function(fun, 1) do
case map do
%{^key => [index | value]} ->
do_add_existing(map, vector, index, key, fun.(value))
_ ->
do_add_new(map, vector, key, default)
end
end
@doc ~S"""
Returns the value for `key` and the updated ordered map without `key`.
If `key` is present in the ordered map with a value `value`,
`{value, new_ord_map}` is returned.
If `key` is not present in the ordered map, `{default, ord_map}` is returned.
## Examples
iex> ord_map = Aja.OrdMap.new(a: "Ant", b: "Bat", c: "Cat")
iex> {"Bat", updated} = Aja.OrdMap.pop(ord_map, :b)
iex> updated
#Aja.OrdMap<%{a: "Ant", c: "Cat"}, sparse?: true>
iex> {nil, updated} = Aja.OrdMap.pop(ord_map, :z)
iex> updated
ord(%{a: "Ant", b: "Bat", c: "Cat"})
iex> {"Z", updated} = Aja.OrdMap.pop(ord_map, :z, "Z")
iex> updated
ord(%{a: "Ant", b: "Bat", c: "Cat"})
"""
@impl Access
@spec pop(t(k, v), k, v) :: {v, t(k, v)} when k: key, v: value
def pop(
%__MODULE__{__ord_map__: map, __ord_vector__: vector} = ord_map,
key,
default \\ nil
) do
case :maps.take(key, map) do
{[index | value], new_map} ->
{value, do_delete_existing(new_map, vector, index)}
:error ->
{default, ord_map}
end
end
@doc ~S"""
Returns the value for `key` and the updated ordered map without `key`.
Behaves the same as `pop/3` but raises if `key` is not present in `ord_map`.
## Examples
iex> ord_map = Aja.OrdMap.new(a: "Ant", b: "Bat", c: "Cat")
iex> {"Bat", updated} = Aja.OrdMap.pop!(ord_map, :b)
iex> updated
#Aja.OrdMap<%{a: "Ant", c: "Cat"}, sparse?: true>
iex> Aja.OrdMap.pop!(ord_map, :z)
** (KeyError) key :z not found in: ord(%{a: "Ant", b: "Bat", c: "Cat"})
"""
@spec pop!(t(k, v), k) :: {v, t(k, v)} when k: key, v: value
def pop!(
%__MODULE__{__ord_map__: map, __ord_vector__: vector} = ord_map,
key
) do
case :maps.take(key, map) do
{[index | value], new_map} ->
{value, do_delete_existing(new_map, vector, index)}
:error ->
raise KeyError, key: key, term: ord_map
end
end
@doc """
Lazily returns and removes the value associated with `key` in `ord_map`.
If `key` is present in `ord_map`, it returns `{value, new_map}` where `value` is the value of
the key and `new_map` is the result of removing `key` from `ord_map`. If `key`
is not present in `ord_map`, `{fun_result, ord_map}` is returned, where `fun_result`
is the result of applying `fun`.
This is useful if the default value is very expensive to calculate or
generally difficult to setup and teardown again.
## Examples
iex> ord_map = Aja.OrdMap.new(b: "Bat", a: "Ant", c: "Cat")
iex> expensive_fun = fn -> "Zebra" end
iex> {"Ant", updated} = Aja.OrdMap.pop_lazy(ord_map, :a, expensive_fun)
iex> updated
#Aja.OrdMap<%{b: "Bat", c: "Cat"}, sparse?: true>
iex> {"Zebra", not_updated} = Aja.OrdMap.pop_lazy(ord_map, :z, expensive_fun)
iex> not_updated
ord(%{b: "Bat", a: "Ant", c: "Cat"})
"""
@spec pop_lazy(t(k, v), k, (() -> v)) :: {v, t(k, v)} when k: key, v: value
def pop_lazy(
%__MODULE__{__ord_map__: map, __ord_vector__: vector} = ord_map,
key,
fun
)
when is_function(fun, 0) do
case :maps.take(key, map) do
{[index | value], new_map} ->
{value, do_delete_existing(new_map, vector, index)}
:error ->
{fun.(), ord_map}
end
end
@doc """
Drops the given `keys` from `ord_map`.
If `keys` contains keys that are not in `ord_map`, they're simply ignored.
## Examples
iex> ord_map = Aja.OrdMap.new(a: "Ant", b: "Bat", c: "Cat")
iex> Aja.OrdMap.drop(ord_map, [:b, :d])
#Aja.OrdMap<%{a: "Ant", c: "Cat"}, sparse?: true>
"""
@spec drop(t(k, v), [k]) :: t(k, v) when k: key, v: value
def drop(%__MODULE__{__ord_map__: map, __ord_vector__: vector} = ord_map, keys)
when is_list(keys) do
case Map.take(map, keys) do
empty when empty == %{} ->
ord_map
dropped ->
dropped_keys = Map.keys(dropped)
dropped
|> Map.values()
|> Enum.map(fn [index | _value] -> index end)
|> Enum.sort(:desc)
|> do_drop(map, vector, dropped_keys)
end
end
@doc """
Puts a value under `key` only if the `key` already exists in `ord_map`.
If `key` is not present in `ord_map`, a `KeyError` exception is raised.
## Examples
iex> ord_map = Aja.OrdMap.new(a: "Ant", b: "Bat", c: "Cat")
iex> Aja.OrdMap.update!(ord_map, :b, &String.upcase/1)
ord(%{a: "Ant", b: "BAT", c: "Cat"})
iex> Aja.OrdMap.update!(ord_map, :d, &String.upcase/1)
** (KeyError) key :d not found in: ord(%{a: \"Ant\", b: \"Bat\", c: \"Cat\"})
"""
@spec update!(t(k, v), k, v) :: t(k, v) when k: key, v: value
def update!(
%__MODULE__{__ord_map__: map, __ord_vector__: vector} = ord_map,
key,
fun
)
when is_function(fun, 1) do
case map do
%{^key => [index | value]} ->
do_add_existing(map, vector, index, key, fun.(value))
_ ->
raise KeyError, key: key, term: ord_map
end
end
@doc ~S"""
Gets the value from `key` and updates it, all in one pass.
Mirrors `Map.get_and_update/3`, see its documentation.
## Examples
iex> ord_map = Aja.OrdMap.new(a: "Ant", b: "Bat", c: "Cat")
iex> {"bat", updated} = Aja.OrdMap.get_and_update(ord_map, :b, fn current_value ->
...> {current_value && String.downcase(current_value), "Buffalo"}
...> end)
iex> updated
ord(%{a: "Ant", b: "Buffalo", c: "Cat"})
iex> {nil, updated} = Aja.OrdMap.get_and_update(ord_map, :z, fn current_value ->
...> {current_value && String.downcase(current_value), "Zebra"}
...> end)
iex> updated
ord(%{a: "Ant", b: "Bat", c: "Cat", z: "Zebra"})
iex> {"Bat", updated} = Aja.OrdMap.get_and_update(ord_map, :b, fn _ -> :pop end)
iex> updated
#Aja.OrdMap<%{a: "Ant", c: "Cat"}, sparse?: true>
iex> {nil, updated} = Aja.OrdMap.get_and_update(ord_map, :z, fn _ -> :pop end)
iex> updated
ord(%{a: "Ant", b: "Bat", c: "Cat"})
"""
@impl Access
@spec get_and_update(t(k, v), k, (v -> {returned, v} | :pop)) :: {returned, t(k, v)}
when k: key, v: value, returned: term
def get_and_update(%__MODULE__{} = ord_map, key, fun) when is_function(fun, 1) do
current = get(ord_map, key)
do_get_and_update(ord_map, key, fun, current)
end
@doc ~S"""
Gets the value from `key` and updates it, all in one pass.
Mirrors `Map.get_and_update!/3`, see its documentation.
## Examples
iex> ord_map = Aja.OrdMap.new(a: "Ant", b: "Bat", c: "Cat")
iex> {"bat", updated} = Aja.OrdMap.get_and_update!(ord_map, :b, fn current_value ->
...> {current_value && String.downcase(current_value), "Buffalo"}
...> end)
iex> updated
ord(%{a: "Ant", b: "Buffalo", c: "Cat"})
iex> Aja.OrdMap.get_and_update!(ord_map, :z, fn current_value ->
...> {current_value && String.downcase(current_value), "Zebra"}
...> end)
** (KeyError) key :z not found in: ord(%{a: "Ant", b: "Bat", c: "Cat"})
"""
@spec get_and_update!(t(k, v), k, (v -> {returned, v} | :pop)) :: {returned, t(k, v)}
when k: key, v: value, returned: term
def get_and_update!(%__MODULE__{} = ord_map, key, fun) when is_function(fun, 1) do
current = fetch!(ord_map, key)
do_get_and_update(ord_map, key, fun, current)
end
defp do_get_and_update(ord_map, key, fun, current) do
case fun.(current) do
{get, update} ->
{get, put(ord_map, key, update)}
:pop ->
{current, delete(ord_map, key)}
other ->
raise "the given function must return a two-element tuple or :pop, got: #{inspect(other)}"
end
end
@doc """
Converts a `struct` to an ordered map.
It accepts the struct module or a struct itself and
simply removes the `__struct__` field from the given struct
or from a new struct generated from the given module.
## Example
defmodule User do
defstruct [:name, :age]
end
Aja.OrdMap.from_struct(User)
ord(%{age: nil, name: nil})
Aja.OrdMap.from_struct(%User{name: "john", age: 44})
ord(%{age: 44, name: "john"})
"""
@spec from_struct(atom | struct) :: t
def from_struct(struct) do
struct |> Map.from_struct() |> new()
end
@doc """
Checks if two ordered maps are equal, meaning they have the same key-value pairs
in the same order.
## Examples
iex> Aja.OrdMap.equal?(Aja.OrdMap.new(a: 1, b: 2), Aja.OrdMap.new(a: 1, b: 2))
true
iex> Aja.OrdMap.equal?(Aja.OrdMap.new(a: 1, b: 2), Aja.OrdMap.new(b: 2, a: 1))
false
iex> Aja.OrdMap.equal?(Aja.OrdMap.new(a: 1, b: 2), Aja.OrdMap.new(a: 3, b: 2))
false
"""
@spec equal?(t, t) :: boolean
def equal?(ord_map1, ord_map2)
def equal?(%Aja.OrdMap{__ord_map__: map1} = ord_map1, %Aja.OrdMap{__ord_map__: map2} = ord_map2) do
case {map_size(map1), map_size(map2)} do
{size, size} ->
case {RawVector.size(ord_map1.__ord_vector__), RawVector.size(ord_map2.__ord_vector__)} do
{^size, ^size} ->
# both are dense, maps can be compared safely
map1 === map2
{_, _} ->
# one of them is sparse, inefficient comparison
RawVector.sparse_to_list(ord_map1.__ord_vector__) ===
RawVector.sparse_to_list(ord_map2.__ord_vector__)
end
{_, _} ->
# size mismatch: cannot be equal
false
end
end
# Extra specific functions
@doc """
Finds the fist `{key, value}` pair in `ord_map`.
Returns a `{key, value}` entry if `ord_map` is non-empty, or `nil` else.
## Examples
iex> Aja.OrdMap.new([b: "B", d: "D", a: "A", c: "C"]) |> Aja.OrdMap.first()
{:b, "B"}
iex> Aja.OrdMap.new([]) |> Aja.OrdMap.first()
nil
iex> Aja.OrdMap.new([]) |> Aja.OrdMap.first(:error)
:error
"""
@spec first(t(k, v), default) :: {k, v} | default when k: key, v: value, default: term
def first(ord_map, default \\ nil)
def first(%Aja.OrdMap{__ord_vector__: vector} = ord_map, default) when is_dense(ord_map) do
case vector do
RawVector.first_pattern(first) -> first
_ -> default
end
end
def first(%Aja.OrdMap{__ord_vector__: vector}, default) do
RawVector.find(vector, default, fn value -> value end)
end
@doc """
Finds the last `{key, value}` pair in `ord_map`.
Returns a `{key, value}` entry if `ord_map` is non-empty, or `nil` else.
Can be accessed efficiently due to the underlying vector.
## Examples
iex> Aja.OrdMap.new([b: "B", d: "D", a: "A", c: "C"]) |> Aja.OrdMap.last()
{:c, "C"}
iex> Aja.OrdMap.new([]) |> Aja.OrdMap.last()
nil
iex> Aja.OrdMap.new([]) |> Aja.OrdMap.last(:error)
:error
"""
@spec last(t(k, v), default) :: {k, v} | default when k: key, v: value, default: term
def last(ord_map, default \\ nil)
def last(%Aja.OrdMap{__ord_vector__: vector} = ord_map, default) when is_dense(ord_map) do
case vector do
RawVector.last_pattern(last) -> last
_ -> default
end
end
def last(%Aja.OrdMap{__ord_vector__: vector}, default) do
try do
RawVector.foldr(vector, nil, fn value, _acc ->
if value, do: throw(value)
end)
default
catch
value ->
value
end
end
@doc """
Folds (reduces) the given `ord_map` from the left with the function `fun`.
Requires an accumulator `acc`.
## Examples
iex> ord_map = Aja.OrdMap.new([b: "Bat", c: "Cat", a: "Ant"])
iex> Aja.OrdMap.foldl(ord_map, "", fn {_key, value}, acc -> value <> acc end)
"AntCatBat"
iex> Aja.OrdMap.foldl(ord_map, [], fn {key, value}, acc -> [{key, value <> "man"} | acc] end)
[a: "Antman", c: "Catman", b: "Batman"]
"""
def foldl(ord_map, acc, fun)
def foldl(%__MODULE__{__ord_vector__: vector} = ord_map, acc, fun) when is_function(fun, 2) do
case ord_map do
dense when is_dense(dense) -> RawVector.foldl(vector, acc, fun)
_sparse -> RawVector.sparse_to_list(vector) |> List.foldl(acc, fun)
end
end
@doc """
Folds (reduces) the given `ord_map` from the right with the function `fun`.
Requires an accumulator `acc`.
Unlike linked lists, this is as efficient as `foldl/3`. This can typically save a call
to `Enum.reverse/1` on the result when building a list.
## Examples
iex> ord_map = Aja.OrdMap.new([b: "Bat", c: "Cat", a: "Ant"])
iex> Aja.OrdMap.foldr(ord_map, "", fn {_key, value}, acc -> value <> acc end)
"BatCatAnt"
iex> Aja.OrdMap.foldr(ord_map, [], fn {key, value}, acc -> [{key, value <> "man"} | acc] end)
[b: "Batman", c: "Catman", a: "Antman"]
"""
def foldr(ord_map, acc, fun)
def foldr(%__MODULE__{__ord_vector__: vector} = ord_map, acc, fun) when is_function(fun, 2) do
case ord_map do
dense when is_dense(dense) -> RawVector.foldr(vector, acc, fun)
_sparse -> RawVector.sparse_to_list(vector) |> List.foldr(acc, fun)
end
end
@doc """
Returns `true` if `ord_map` is dense; otherwise returns `false`.
See the [section about sparse structures](#module-key-deletion-and-sparse-maps) for more information.
## Examples
iex> ord_map = Aja.OrdMap.new(a: "Ant", b: "Bat", c: "Cat")
ord(%{a: "Ant", b: "Bat", c: "Cat"})
iex> Aja.OrdMap.dense?(ord_map)
true
iex> sparse = Aja.OrdMap.delete(ord_map, :b)
#Aja.OrdMap<%{a: "Ant", c: "Cat"}, sparse?: true>
iex> Aja.OrdMap.dense?(sparse)
false
"""
def dense?(%__MODULE__{} = ord_map) do
is_dense(ord_map)
end
@doc """
Returns `true` if `ord_map` is sparse; otherwise returns `false`.
See the [section about sparse structures](#module-key-deletion-and-sparse-maps) for more information.
## Examples
iex> ord_map = Aja.OrdMap.new(a: "Ant", b: "Bat", c: "Cat")
ord(%{a: "Ant", b: "Bat", c: "Cat"})
iex> Aja.OrdMap.sparse?(ord_map)
false
iex> sparse = Aja.OrdMap.delete(ord_map, :b)
#Aja.OrdMap<%{a: "Ant", c: "Cat"}, sparse?: true>
iex> Aja.OrdMap.sparse?(sparse)
true
"""
def sparse?(%__MODULE__{} = ord_map) do
!is_dense(ord_map)
end
# Exposed "private" functions
@doc false
def merge_list(%__MODULE__{__ord_map__: map, __ord_vector__: vector}, new_kvs) do
{new_map, reversed_kvs, duplicates} =
do_add_optimistic(new_kvs, map, [], RawVector.size(vector))
new_vector =
vector
|> RawVector.concat_list(:lists.reverse(reversed_kvs))
|> do_fix_vector_duplicates(new_map, duplicates)
%__MODULE__{__ord_map__: new_map, __ord_vector__: new_vector}
end
@doc false
def replace_many!(
%__MODULE__{__ord_map__: map, __ord_vector__: vector} = ord_map,
key_values
) do
case do_replace_many(key_values, map, vector) do
{:error, key} ->
raise KeyError, key: key, term: ord_map
{:ok, map, vector} ->
%__MODULE__{__ord_map__: map, __ord_vector__: vector}
end
end
# Private functions
defp do_add_new(map, vector, key, value) do
index = RawVector.size(vector)
new_vector = RawVector.append(vector, {key, value})
new_map = Map.put(map, key, [index | value])
%__MODULE__{__ord_map__: new_map, __ord_vector__: new_vector}
end
defp do_add_existing(map, vector, index, key, value) do
new_vector = RawVector.replace_positive!(vector, index, {key, value})
new_map = Map.put(map, key, [index | value])
%__MODULE__{__ord_map__: new_map, __ord_vector__: new_vector}
end
defp do_delete_existing(new_map, _vector, _index) when new_map === %{} do
# always return the same empty ord map, and reset the index to avoid considering it as sparse
%__MODULE__{}
end
defp do_delete_existing(new_map, vector, index) do
new_vector = vector_delete_at(vector, index)
periodic_rebuild(new_map, new_vector)
end
defp periodic_rebuild(map, vector) when RawVector.size(vector) >= 2 * map_size(map) do
vector
|> RawVector.sparse_to_list()
|> from_list()
end
defp periodic_rebuild(map, vector) do
%__MODULE__{__ord_map__: map, __ord_vector__: vector}
end
defp do_drop(_indexes = [], map, vector, dropped_keys) do
periodic_rebuild_drop_keys(map, vector, dropped_keys)
end
defp do_drop([index | indexes], map, vector, dropped_keys) do
new_vector = vector_delete_at(vector, index)
do_drop(indexes, map, new_vector, dropped_keys)
end
defp periodic_rebuild_drop_keys(map, vector, dropped_keys)
when RawVector.size(vector) >= 2 * (map_size(map) - length(dropped_keys)) do
vector
|> RawVector.sparse_to_list()
|> from_list()
end
defp periodic_rebuild_drop_keys(map, vector, dropped_keys) do
new_map = Map.drop(map, dropped_keys)
%__MODULE__{__ord_map__: new_map, __ord_vector__: vector}
end
defp vector_delete_at(vector, index) when index + 1 == RawVector.size(vector) do
RawVector.delete_last(vector)
end
defp vector_delete_at(vector, index) do
RawVector.replace_positive!(vector, index, nil)
end
defp do_fix_vector_duplicates(vector, _map, _duplicates = nil) do
vector
end
defp do_fix_vector_duplicates(vector, map, duplicates) do
Enum.reduce(duplicates, vector, fn {key, value}, acc ->
%{^key => [index | _value]} = map
RawVector.replace_positive!(acc, index, {key, value})
end)
end
defp do_replace_many([], map, vector) do
{:ok, map, vector}
end
defp do_replace_many([{key, value} | rest], map, vector) do
case map do
%{^key => [index | _value]} ->
new_map = Map.replace!(map, key, [index | value])
new_vector = RawVector.replace_positive!(vector, index, {key, value})
do_replace_many(rest, new_map, new_vector)
_ ->
{:error, key}
end
end
defp from_list([]) do
new()
end
defp from_list(list) do
{map, key_values} =
case do_add_optimistic(list, %{}, [], 0) do
{map, reversed_kvs, nil} ->
{map, :lists.reverse(reversed_kvs)}
{map, reversed_kvs, duplicates} ->
{map, do_reverse_and_update_duplicates(reversed_kvs, duplicates, [])}
end
vector = RawVector.from_list(key_values)
%__MODULE__{__ord_map__: map, __ord_vector__: vector}
end
@doc false
def from_list_ast([], _env) do
quote do
unquote(__MODULE__).new()
end
end
def from_list_ast(kvs_ast, env) do
cond do
Macro.quoted_literal?(kvs_ast) -> from_list_ast_constant_keys(kvs_ast, env)
literal_keys?(kvs_ast) -> from_non_literal_values(kvs_ast, env)
true -> quote do: unquote(__MODULE__).new(unquote(kvs_ast))
end
end
defp literal_keys?(kvs_ast) do
Enum.all?(kvs_ast, fn {key_ast, _} ->
Macro.quoted_literal?(key_ast)
end)
end
defp from_non_literal_values(kvs_ast, env) do
vars = Macro.generate_arguments(length(kvs_ast), nil)
{safe_kvs_ast, assigns} =
kvs_ast
|> Enum.zip(vars)
|> Enum.map_reduce([], fn {{key_ast, value_ast}, var}, acc ->
assign =
quote do
unquote(var) = unquote(value_ast)
end
{{key_ast, var}, [assign | acc]}
end)
instructions = Enum.reverse([from_list_ast_constant_keys(safe_kvs_ast, env) | assigns])
{:__block__, [], instructions}
end
defp from_list_ast_constant_keys(kvs_ast, env) do
{map, key_values} =
case do_add_optimistic(kvs_ast, %{}, [], 0) do
{map, reversed_kvs, nil} ->
{map, :lists.reverse(reversed_kvs)}
{map, reversed_kvs, duplicates} ->
for {key, _} <- duplicates do
IO.warn(
"key #{inspect(key)} will be overridden in ord map",
Macro.Env.stacktrace(env)
)
end
{map, do_reverse_and_update_duplicates(reversed_kvs, duplicates, [])}
end
vector_ast = RawVector.from_list_ast(key_values)
map_ast = {:%{}, [], Enum.map(map, fn {k, [i | v]} -> {k, [{:|, [], [i, v]}]} end)}
quote do
%unquote(__MODULE__){__ord_map__: unquote(map_ast), __ord_vector__: unquote(vector_ast)}
end
end
@compile {:inline, do_add_optimistic: 4}
defp do_add_optimistic([], map, key_values, _next_index) do
{map, key_values, nil}
end
defp do_add_optimistic([{key, value} | rest], map, key_values, next_index) do
case map do
%{^key => [index | _value]} ->
duplicates = %{key => value}
new_map = Map.put(map, key, [index | value])
do_add_with_duplicates(rest, new_map, key_values, duplicates, next_index)
_ ->
new_map = Map.put(map, key, [next_index | value])
new_kvs = [{key, value} | key_values]
do_add_optimistic(rest, new_map, new_kvs, next_index + 1)
end
end
defp do_add_with_duplicates([], map, key_values, duplicates, _next_index) do
{map, key_values, duplicates}
end
defp do_add_with_duplicates([{key, value} | rest], map, key_values, duplicates, next_index) do
case map do
%{^key => [index | _value]} ->
new_duplicates = Map.put(duplicates, key, value)
new_map = Map.put(map, key, [index | value])
do_add_with_duplicates(rest, new_map, key_values, new_duplicates, next_index)
_ ->
new_map = Map.put(map, key, [next_index | value])
new_kvs = [{key, value} | key_values]
do_add_with_duplicates(rest, new_map, new_kvs, duplicates, next_index + 1)
end
end
defp do_reverse_and_update_duplicates([], _duplicates, acc), do: acc
defp do_reverse_and_update_duplicates([{key, value} | rest], duplicates, acc) do
value =
case duplicates do
%{^key => new_value} -> new_value
_ -> value
end
do_reverse_and_update_duplicates(rest, duplicates, [{key, value} | acc])
end
defimpl Enumerable do
def count(ord_map) do
{:ok, Aja.OrdMap.size(ord_map)}
end
def member?(ord_map, key_value) do
with {key, value} <- key_value,
{:ok, ^value} <- Aja.OrdMap.fetch(ord_map, key) do
{:ok, true}
else
_ -> {:ok, false}
end
end
def slice(ord_map) do
ord_map
|> Aja.EnumHelper.to_vec_or_list()
|> Enumerable.slice()
end
def reduce(ord_map, acc, fun) do
ord_map
|> Aja.OrdMap.to_list()
|> Enumerable.List.reduce(acc, fun)
end
end
defimpl Collectable do
def into(map) do
fun = fn
map_acc, {:cont, {key, value}} ->
Aja.OrdMap.put(map_acc, key, value)
map_acc, :done ->
map_acc
_map_acc, :halt ->
:ok
end
{map, fun}
end
end
defimpl Inspect do
import Inspect.Algebra
def inspect(ord_map, opts) do
{open_mark, close_mark} = open_close_marks(ord_map)
open = color(open_mark, :map, opts)
close = color(close_mark, :map, opts)
sep = color(",", :map, opts)
as_list = Aja.OrdMap.to_list(ord_map)
container_doc(open, as_list, close, opts, traverse_fun(as_list, opts),
separator: sep,
break: :strict
)
end
defp traverse_fun(list, opts) do
if Inspect.List.keyword?(list) do
&Inspect.List.keyword/2
else
sep = color(" => ", :map, opts)
&to_map(&1, &2, sep)
end
end
defp to_map({key, value}, opts, sep) do
concat(concat(to_doc(key, opts), sep), to_doc(value, opts))
end
defp open_close_marks(ord_map) do
if Aja.OrdMap.sparse?(ord_map) do
{"#Aja.OrdMap<%{", "}, sparse?: true>"}
else
{"ord(%{", "})"}
end
end
end
if Code.ensure_loaded?(Jason.Encoder) do
defimpl Jason.Encoder do
def encode(map, opts) do
map |> Aja.OrdMap.to_list() |> Jason.Encode.keyword(opts)
end
end
end
end
|
lib/ord_map.ex
| 0.85183
| 0.553566
|
ord_map.ex
|
starcoder
|
defmodule AMQP.Basic.Async do
@moduledoc false
import AMQP.Core
alias AMQP.Utils
alias AMQP.Channel
@doc """
Publishes a message to an Exchange.
This method publishes a message to a specific exchange. The message will be routed
to queues as defined by the exchange configuration and distributed to any subscribers.
The parameter `exchange` specifies the name of the exchange to publish to. If set to
empty string, it publishes to the default exchange.
The `routing_key` parameter specifies the routing key for the message.
The `payload` parameter specifies the message content as a binary.
In addition to the previous parameters, the following options can be used:
# Options
* `:mandatory` - If set, returns an error if the broker can't route the message to a queue (default `false`);
* `:immediate` - If set, returns an error if the broker can't deliver te message to a consumer immediately (default `false`);
* `:content_type` - MIME Content type;
* `:content_encoding` - MIME Content encoding;
* `:headers` - Message headers. Can be used with headers Exchanges;
* `:persistent` - If set, uses persistent delivery mode. Messages marked as `persistent` that are delivered to `durable` \
queues will be logged to disk;
* `:correlation_id` - application correlation identifier;
* `:priority` - message priority, ranging from 0 to 9;
* `:reply_to` - name of the reply queue;
* `:expiration` - how long the message is valid (in milliseconds);
* `:message_id` - message identifier;
* `:timestamp` - timestamp associated with this message (epoch time);
* `:type` - message type as a string;
* `:user_id` - creating user ID. RabbitMQ will validate this against the active connection user;
* `:app_id` - publishing application ID.
## Examples
iex> AMQP.Basic.publish chan, \"my_exchange\", \"my_routing_key\", \"Hello World!\", persistent: true
:ok
"""
@spec publish(Channel.t(), String.t(), String.t(), String.t(), keyword) ::
:ok | :blocked | :closing
def publish(%Channel{pid: pid}, exchange, routing_key, payload, options \\ []) do
basic_publish =
basic_publish(
exchange: exchange,
routing_key: routing_key,
mandatory: Keyword.get(options, :mandatory, false),
immediate: Keyword.get(options, :immediate, false)
)
p_basic =
p_basic(
content_type: Keyword.get(options, :content_type, :undefined),
content_encoding: Keyword.get(options, :content_encoding, :undefined),
headers: Keyword.get(options, :headers, :undefined) |> Utils.to_type_tuple(),
delivery_mode: if(options[:persistent], do: 2, else: 1),
priority: Keyword.get(options, :priority, :undefined),
correlation_id: Keyword.get(options, :correlation_id, :undefined),
reply_to: Keyword.get(options, :reply_to, :undefined),
expiration: Keyword.get(options, :expiration, :undefined),
message_id: Keyword.get(options, :message_id, :undefined),
timestamp: Keyword.get(options, :timestamp, :undefined),
type: Keyword.get(options, :type, :undefined),
user_id: Keyword.get(options, :user_id, :undefined),
app_id: Keyword.get(options, :app_id, :undefined),
cluster_id: Keyword.get(options, :cluster_id, :undefined)
)
:amqp_channel.cast(pid, basic_publish, amqp_msg(props: p_basic, payload: payload))
end
@doc """
Sets the message prefetch count or prefetech size (in bytes). If `global` is set to `true` this
applies to the entire Connection, otherwise it applies only to the specified Channel.
"""
@spec qos(Channel.t(), keyword) :: :ok
def qos(%Channel{pid: pid}, options \\ []) do
basic_qos_ok() =
:amqp_channel.cast(
pid,
basic_qos(
prefetch_size: Keyword.get(options, :prefetch_size, 0),
prefetch_count: Keyword.get(options, :prefetch_count, 0),
global: Keyword.get(options, :global, false)
)
)
:ok
end
@doc """
Acknowledges one or more messages. If `multiple` is set to `true`, all messages up to the one
specified by `delivery_tag` are considered acknowledged by the server.
"""
@spec ack(Channel.t(), String.t(), keyword) :: :ok | :blocked | :closing
def ack(%Channel{pid: pid}, delivery_tag, options \\ []) do
:amqp_channel.cast(
pid,
basic_ack(
delivery_tag: delivery_tag,
multiple: Keyword.get(options, :multiple, false)
)
)
end
@doc """
Rejects (and, optionally, requeues) a message.
"""
@spec reject(Channel.t(), String.t(), keyword) :: :ok | :blocked | :closing
def reject(%Channel{pid: pid}, delivery_tag, options \\ []) do
:amqp_channel.cast(
pid,
basic_reject(
delivery_tag: delivery_tag,
requeue: Keyword.get(options, :requeue, true)
)
)
end
@doc """
Negative acknowledge of one or more messages. If `multiple` is set to `true`, all messages up to the
one specified by `delivery_tag` are considered as not acknowledged by the server. If `requeue` is set
to `true`, the message will be returned to the queue and redelivered to the next available consumer.
This is a RabbitMQ specific extension to AMQP 0.9.1. It is equivalent to reject, but allows rejecting
multiple messages using the `multiple` option.
"""
@spec nack(Channel.t(), String.t(), keyword) :: :ok | :blocked | :closing
def nack(%Channel{pid: pid}, delivery_tag, options \\ []) do
:amqp_channel.cast(
pid,
basic_nack(
delivery_tag: delivery_tag,
multiple: Keyword.get(options, :multiple, false),
requeue: Keyword.get(options, :requeue, true)
)
)
end
@doc """
Polls a queue for an existing message.
Returns the tuple `{:empty, meta}` if the queue is empty or the tuple {:ok, payload, meta} if at least
one message exists in the queue. The returned meta map includes the entry `message_count` with the
current number of messages in the queue.
Receiving messages by polling a queue is not as as efficient as subscribing a consumer to a queue,
so consideration should be taken when receiving large volumes of messages.
Setting the `no_ack` option to true will tell the broker that the receiver will not send an acknowledgement of
the message. Once it believes it has delivered a message, then it is free to assume that the consuming application
has taken responsibility for it. In general, a lot of applications will not want these semantics, rather, they
will want to explicitly acknowledge the receipt of a message and have `no_ack` with the default value of false.
"""
@spec get(Channel.t(), String.t(), keyword) :: {:ok, String.t(), map} | {:empty, map}
def get(%Channel{pid: pid}, queue, options \\ []) do
case :amqp_channel.cast(
pid,
basic_get(queue: queue, no_ack: Keyword.get(options, :no_ack, false))
) do
{basic_get_ok(
delivery_tag: delivery_tag,
redelivered: redelivered,
exchange: exchange,
routing_key: routing_key,
message_count: message_count
),
amqp_msg(
props:
p_basic(
content_type: content_type,
content_encoding: content_encoding,
headers: headers,
delivery_mode: delivery_mode,
priority: priority,
correlation_id: correlation_id,
reply_to: reply_to,
expiration: expiration,
message_id: message_id,
timestamp: timestamp,
type: type,
user_id: user_id,
app_id: app_id,
cluster_id: cluster_id
),
payload: payload
)} ->
{:ok, payload,
%{
delivery_tag: delivery_tag,
redelivered: redelivered,
exchange: exchange,
routing_key: routing_key,
message_count: message_count,
content_type: content_type,
content_encoding: content_encoding,
headers: headers,
persistent: delivery_mode == 2,
priority: priority,
correlation_id: correlation_id,
reply_to: reply_to,
expiration: expiration,
message_id: message_id,
timestamp: timestamp,
type: type,
user_id: user_id,
app_id: app_id,
cluster_id: cluster_id
}}
basic_get_empty(cluster_id: cluster_id) ->
{:empty, %{cluster_id: cluster_id}}
end
end
@doc """
Registers a queue consumer process. The `pid` of the process can be set using
the `consumer_pid` argument and defaults to the calling process.
The consumer process will receive the following data structures:
* `{:basic_deliver, payload, meta}` - This is sent for each message consumed, where \
`payload` contains the message content and `meta` contains all the metadata set when \
sending with Basic.publish or additional info set by the broker;
* `{:basic_consume_ok, %{consumer_tag: consumer_tag}}` - Sent when the consumer \
process is registered with Basic.consume. The caller receives the same information \
as the return of Basic.consume;
* `{:basic_cancel, %{consumer_tag: consumer_tag, no_wait: no_wait}}` - Sent by the \
broker when the consumer is unexpectedly cancelled (such as after a queue deletion)
* `{:basic_cancel_ok, %{consumer_tag: consumer_tag}}` - Sent to the consumer process after a call to Basic.cancel
"""
@spec consume(Channel.t(), String.t(), pid | nil, keyword) :: {:ok, String.t()}
def consume(%Channel{} = chan, queue, consumer_pid \\ nil, options \\ []) do
basic_consume =
basic_consume(
queue: queue,
consumer_tag: Keyword.get(options, :consumer_tag, ""),
no_local: Keyword.get(options, :no_local, false),
no_ack: Keyword.get(options, :no_ack, false),
exclusive: Keyword.get(options, :exclusive, false),
nowait: Keyword.get(options, :no_wait, false),
arguments: Keyword.get(options, :arguments, [])
)
consumer_pid = consumer_pid || self()
adapter_pid =
spawn(fn ->
Process.flag(:trap_exit, true)
Process.monitor(consumer_pid)
Process.monitor(chan.pid)
do_start_consumer(chan, consumer_pid)
end)
basic_consume_ok(consumer_tag: consumer_tag) =
:amqp_channel.subscribe(chan.pid, basic_consume, adapter_pid)
{:ok, consumer_tag}
end
defp do_start_consumer(chan, consumer_pid) do
receive do
basic_consume_ok(consumer_tag: consumer_tag) ->
send(consumer_pid, {:basic_consume_ok, %{consumer_tag: consumer_tag}})
do_consume(chan, consumer_pid, consumer_tag)
end
end
defp do_consume(chan, consumer_pid, consumer_tag) do
receive do
{basic_deliver(
consumer_tag: consumer_tag,
delivery_tag: delivery_tag,
redelivered: redelivered,
exchange: exchange,
routing_key: routing_key
),
amqp_msg(
props:
p_basic(
content_type: content_type,
content_encoding: content_encoding,
headers: headers,
delivery_mode: delivery_mode,
priority: priority,
correlation_id: correlation_id,
reply_to: reply_to,
expiration: expiration,
message_id: message_id,
timestamp: timestamp,
type: type,
user_id: user_id,
app_id: app_id,
cluster_id: cluster_id
),
payload: payload
)} ->
send(
consumer_pid,
{:basic_deliver, payload,
%{
consumer_tag: consumer_tag,
delivery_tag: delivery_tag,
redelivered: redelivered,
exchange: exchange,
routing_key: routing_key,
content_type: content_type,
content_encoding: content_encoding,
headers: headers,
persistent: delivery_mode == 2,
priority: priority,
correlation_id: correlation_id,
reply_to: reply_to,
expiration: expiration,
message_id: message_id,
timestamp: timestamp,
type: type,
user_id: user_id,
app_id: app_id,
cluster_id: cluster_id
}}
)
do_consume(chan, consumer_pid, consumer_tag)
basic_consume_ok(consumer_tag: consumer_tag) ->
send(consumer_pid, {:basic_consume_ok, %{consumer_tag: consumer_tag}})
do_consume(chan, consumer_pid, consumer_tag)
basic_cancel_ok(consumer_tag: consumer_tag) ->
send(consumer_pid, {:basic_cancel_ok, %{consumer_tag: consumer_tag}})
basic_cancel(consumer_tag: consumer_tag, nowait: no_wait) ->
send(consumer_pid, {:basic_cancel, %{consumer_tag: consumer_tag, no_wait: no_wait}})
{:DOWN, _ref, :process, ^consumer_pid, reason} ->
AMQP.Basic.cancel(chan, consumer_tag)
exit(reason)
{:DOWN, _ref, :process, _pid, reason} ->
exit(reason)
end
end
@doc """
Registers a handler to deal with returned messages. The registered
process will receive `{:basic_return, payload, meta}` data structures.
"""
@spec return(Channel.t(), pid) :: :ok
def return(%Channel{pid: pid}, return_handler_pid) do
adapter_pid =
spawn(fn ->
Process.flag(:trap_exit, true)
Process.monitor(return_handler_pid)
Process.monitor(pid)
handle_return_messages(pid, return_handler_pid)
end)
:amqp_channel.register_return_handler(pid, adapter_pid)
end
@doc """
Removes the return handler, if it exists. Does nothing if there is no
such handler.
"""
@spec cancel_return(Channel.t()) :: :ok
def cancel_return(%Channel{pid: pid}) do
:amqp_channel.unregister_return_handler(pid)
end
defp handle_return_messages(chan_pid, return_handler_pid) do
receive do
{basic_return(
reply_code: reply_code,
reply_text: reply_text,
exchange: exchange,
routing_key: routing_key
),
amqp_msg(
props:
p_basic(
content_type: content_type,
content_encoding: content_encoding,
headers: headers,
delivery_mode: delivery_mode,
priority: priority,
correlation_id: correlation_id,
reply_to: reply_to,
expiration: expiration,
message_id: message_id,
timestamp: timestamp,
type: type,
user_id: user_id,
app_id: app_id,
cluster_id: cluster_id
),
payload: payload
)} ->
send(
return_handler_pid,
{:basic_return, payload,
%{
reply_code: reply_code,
reply_text: reply_text,
exchange: exchange,
routing_key: routing_key,
content_type: content_type,
content_encoding: content_encoding,
headers: headers,
persistent: delivery_mode == 2,
priority: priority,
correlation_id: correlation_id,
reply_to: reply_to,
expiration: expiration,
message_id: message_id,
timestamp: timestamp,
type: type,
user_id: user_id,
app_id: app_id,
cluster_id: cluster_id
}}
)
handle_return_messages(chan_pid, return_handler_pid)
{:DOWN, _ref, :process, _pid, reason} ->
exit(reason)
end
end
end
|
lib/amqp/basic/async.ex
| 0.922661
| 0.617311
|
async.ex
|
starcoder
|
defmodule PhoenixBricks.Query do
@moduledoc ~S"""
Defines a common interface for adding scopes to a Schema
## Examples
```elixir
defmodule RecordQuery do
use PhoenixBricks.Query, schema: Record
end
```
It provides you a method `scope` that improve the empty scope of provided schema
with additional scopes.
`scope/0` returns the empty scope
```elixir
iex> RecordQuery.scope()
iex> Record
```
With `scope/1` you can provide a list of scopes that improve the starting scope
```elixir
iex> scopes = [field1: {:eq, "value"}, field2: {:gte, 42}]
iex> RecordQuery.scope(scopes)
iex> #Ecto.Query<from r0 in Record, where: r0.field1 == ^"value" and r0.field2 >= 42>
```
If you need to improve an existing Ecto.Query you can use `scope/2`
```elixir
iex> starting_scope = from(r in Record, where: r.field1 == "value")
iex> scopes = [field2: {:gte, 42}]
iex> RecordQuery.scope(starting_scope, scopes)
iex> #Ecto.Query<from r0 in Record, where: r0.field1 == ^"value" and r0.field2 >= 42>
```
## Built-in scopes
### `:eq`
```elixir
iex> RecordQuery.scope(field: {:eq, "value"})
iex> #Ecto.Query<from r0 in Record, where: r0.fields == ^"value">
```
### `:gt`
```elixir
iex> RecordQuery.scope(field: {:gt, "value"})
iex> #Ecto.Query<from r0 in Record, where: r0.fields > ^"value">
```
### `:lt`
```elixir
iex> RecordQuery.scope(field: {:lt, "value"})
iex> #Ecto.Query<from r0 in Record, where: r0.fields < ^"value">
```
### `:gte`
```elixir
iex> RecordQuery.scope(field: {:gte, "value"})
iex> #Ecto.Query<from r0 in Record, where: r0.fields >= ^"value">
```
### `:lte`
```elixir
iex> RecordQuery.scope(field: {:lte, "value"})
iex> #Ecto.Query<from r0 in Record, where: r0.fields <= ^"value">
```
### `:neq`
```elixir
iex> RecordQuery.scope(field: {:neq, "value"})
iex> #Ecto.Query<from r0 in Record, where: r0.fields != ^"value">
```
### `:matches`
```elixir
iex> RecordQuery.scope(field: {:matches, "value"})
iex> #Ecto.Query<from r0 in Record, where: ilike(r0.field, ^"%value%")>
```
## Customize scopes
If you need to define more comprehensive scopes you can improve the query adding
new `apply_scope/2` methods
```elixir
defmodule RecordQuery, do
use PhoenixBricks.Query, schema: Record
def apply_scope(query, {:name_matches, "value"}) do
query
|> apply_scope(:name, {:matches, "value"})
end
def apply_scope(query, :published) do
query
|> where([p], p.status == "published")
end
end
iex> RecordQuery.scope(:published, name_matches: "value")
iex> #Ecto.Query<from r0 in Record, where: r0.status == "published" and ilike(r0.name, ^"%value%")>
```
"""
defmacro __using__(schema: schema) do
quote do
import Ecto.Query, warn: false
def starting_scope do
unquote(schema)
end
def scope(starting_scope, scopes) do
scopes
|> Enum.reduce(starting_scope, fn scope, query ->
apply_scope(query, scope)
end)
end
def scope(scopes) do
scope(starting_scope(), scopes)
end
def scope do
starting_scope()
end
@type query :: Ecto.Query.t()
@spec apply_scope(query, atom() | {atom(), any()}) :: query
def apply_scope(query, {column, {:eq, value}}) do
where(query, [q], field(q, ^column) == ^value)
end
def apply_scope(query, {column, {:neq, value}}) do
where(query, [q], field(q, ^column) != ^value)
end
def apply_scope(query, {column, {:lte, value}}) do
where(query, [q], field(q, ^column) <= ^value)
end
def apply_scope(query, {column, {:lt, value}}) do
where(query, [q], field(q, ^column) < ^value)
end
def apply_scope(query, {column, {:gte, value}}) do
where(query, [q], field(q, ^column) >= ^value)
end
def apply_scope(query, {column, {:gt, value}}) do
where(query, [q], field(q, ^column) > ^value)
end
def apply_scope(query, {column, {:matches, value}}) do
where(query, [q], ilike(field(q, ^column), ^value))
end
end
end
end
|
lib/phoenix_bricks/query.ex
| 0.902298
| 0.904524
|
query.ex
|
starcoder
|
defmodule Day16.Part2 do
def part2(notes_filename, tickets_filename) do
{notes, my_ticket, nearby_tickets} = parse_input(notes_filename, tickets_filename)
valid_values = notes_to_mapset(notes)
valid_tickets =
Enum.filter(nearby_tickets, &([] == ticket_to_invalid_values(&1, valid_values)))
mapping = decipher_fields(tickets_to_value_sets(valid_tickets), %{}, notes_to_mapsets(notes))
[
"departure location",
"departure station",
"departure platform",
"departure track",
"departure date",
"departure time"
]
|> Enum.map(fn field ->
Enum.at(my_ticket, mapping[field])
end)
|> Enum.reduce(&Kernel.*/2)
end
@doc """
iex> Day16.Part2.part2
279139880759
"""
def part2, do: part2("day16-notes.txt", "day16-tickets.txt")
def parse_input(notes_filename, tickets_filename) do
notes =
"inputs/#{notes_filename}"
|> File.stream!()
|> Stream.map(&String.trim/1)
|> Enum.map(&parse_note/1)
tickets =
"inputs/#{tickets_filename}"
|> File.stream!()
|> Stream.map(&String.trim/1)
|> Enum.map(&parse_ticket/1)
[my_ticket] = Enum.take(tickets, 1)
nearby_tickets = Enum.drop(tickets, 1)
{notes, my_ticket, nearby_tickets}
end
def parse_note(str) do
[attr, ranges_str] = String.split(str, ": ")
ranges =
ranges_str
|> String.split(" or ")
|> Enum.map(fn range -> String.split(range, "-") |> Enum.map(&String.to_integer/1) end)
{attr, ranges}
end
def parse_ticket(str) do
str
|> String.split(",")
|> Enum.map(&String.to_integer/1)
end
def notes_to_mapset(notes) do
Enum.reduce(notes, MapSet.new(), fn {_attr, [[lb1, ub1], [lb2, ub2]]}, mapset ->
MapSet.union(mapset, MapSet.new(Enum.to_list(lb1..ub1) ++ Enum.to_list(lb2..ub2)))
end)
end
def notes_to_mapsets(notes) do
Enum.map(notes, fn {attr, [[lb1, ub1], [lb2, ub2]]} ->
{attr, MapSet.new(Enum.to_list(lb1..ub1) ++ Enum.to_list(lb2..ub2))}
end)
end
def ticket_to_invalid_values(ticket, valid_values) do
Enum.filter(ticket, &(!MapSet.member?(valid_values, &1)))
end
def tickets_to_value_sets(tickets) do
for col <- 0..(length(List.first(tickets)) - 1) do
for ticket <- tickets do
Enum.at(ticket, col)
end
end
|> Enum.map(&MapSet.new/1)
|> Enum.with_index()
end
def decipher_fields([], known_fields, _), do: known_fields
def decipher_fields([{value_set, pos} | unknown_fields], known_fields, field_mapsets) do
field_candidates = list_candidates(value_set, field_mapsets) -- Map.keys(known_fields)
case field_candidates do
[field] -> decipher_fields(unknown_fields, Map.put(known_fields, field, pos), field_mapsets)
_ -> decipher_fields(unknown_fields ++ [{value_set, pos}], known_fields, field_mapsets)
end
end
def list_candidates(value_set, field_mapsets) do
field_mapsets
|> Enum.filter(fn {_, valid_values} -> MapSet.subset?(value_set, valid_values) end)
|> Enum.map(fn {field, _} -> field end)
end
end
|
lib/day16/part2.ex
| 0.507324
| 0.602851
|
part2.ex
|
starcoder
|
defmodule ProxerEx.Api.List do
@moduledoc """
Contains helper methods to build requests for the list api.
"""
use ProxerEx.Api.Base, api_class: "list"
# Processing methods
@doc false
def to_plus_separated_string(%ProxerEx.Request{get_args: get_args} = request, name, value)
when is_list(value) do
get_args = get_args |> Map.put(name, Enum.join(value, "+"))
{:ok, %{request | get_args: get_args}}
end
def to_plus_separated_string(%ProxerEx.Request{get_args: get_args} = request, name, value) do
get_args = get_args |> Map.put(name, value)
{:ok, %{request | get_args: get_args}}
end
# Api function definitions
api_func "characters" do
api_doc("""
Constructs a `ProxerEx.Request` that can be used to send a request to the ```List/Get Characters``` api.
This method receives an optional keyword list as its only argument which represents the information send to
the respective api. All keys must be named as seen in the official documentation. For further information
take a look at the examples below.
## Examples
iex> ProxerEx.Api.List.characters(start: "a", contains: "b", search: "c", subject: "skills", p: 1, limit: 20)
{:ok,
%ProxerEx.Request{
api_class: "list",
api_func: "characters",
authorization: false,
extra_header: [],
get_args: %{start: "a", contains: "b", search: "c", subject: "skills", p: 1, limit: 20},
method: :get
}}
iex> ProxerEx.Api.List.characters()
{:ok,
%ProxerEx.Request{
api_class: "list",
api_func: "characters",
authorization: false,
extra_header: [],
method: :get
}}
""")
parameter("start", :get, optional: true)
parameter("contains", :get, optional: true)
parameter("search", :get, optional: true)
parameter("subject", :get, optional: true)
paging_parameters()
end
api_func "entrylist" do
api_doc("""
Constructs a `ProxerEx.Request` that can be used to send a request to the ```List/Get Entry List``` api.
This method receives an optional keyword list as its only argument which represents the information send to
the respective api. All keys must be named as seen in the official documentation. For further information
take a look at the examples below.
## Examples
iex> ProxerEx.Api.List.entrylist(kat: "anime", medium: "movie", isH: false, state: 1, year: 2001, season: 1,
...> season_type: "start", start: "a", sort: "clicks", sort_type: "DESC", p: 4, limit: 17)
{:ok,
%ProxerEx.Request{
api_class: "list",
api_func: "entrylist",
authorization: false,
extra_header: [],
get_args: %{
kat: "anime",
medium: "movie",
isH: false,
state: 1,
year: 2001,
season: 1,
season_type: "start",
start: "a",
sort: "clicks",
sort_type: "DESC",
p: 4,
limit: 17
},
method: :get
}}
iex> ProxerEx.Api.List.entrylist()
{:ok,
%ProxerEx.Request{
api_class: "list",
api_func: "entrylist",
authorization: false,
extra_header: [],
method: :get
}}
""")
parameter("kat", :get, optional: true)
parameter("medium", :get, optional: true)
parameter("isH", :get, optional: true)
parameter("state", :get, optional: true)
parameter("year", :get, optional: true)
parameter("season", :get, optional: true)
parameter("season_type", :get, optional: true)
parameter("start", :get, optional: true)
parameter("sort", :get, optional: true)
parameter("sort_type", :get, optional: true)
paging_parameters()
end
api_func "entrysearch" do
api_doc("""
Constructs a `ProxerEx.Request` that can be used to send a request to the ```List/Entry Search``` api.
This method receives an optional keyword list as its only argument which represents the information send to
the respective api. All keys must be named as seen in the official documentation. For further information
take a look at the examples below.
## Examples
iex> ProxerEx.Api.List.entrysearch(name: "test", language: "de", type: "animeseries", genre: ["Action", "Adult"],
...> nogenre: ["Romance"], fsk: ["fsk18"], sort: "rating", length: 356, "length-limit": "up", tags: [60, 67],
...> notags: [4, 246, 85], tagratefilter: "rate_1", tagspoilerfilter: "spoiler_10", p: 2, limit: 3)
{:ok,
%ProxerEx.Request{
api_class: "list",
api_func: "entrysearch",
authorization: false,
extra_header: [],
get_args: %{
name: "test",
language: "de",
type: "animeseries",
genre: "Action+Adult",
nogenre: "Romance",
fsk: "fsk18",
sort: "rating",
length: 356,
"length-limit": "up",
tags: "60+67",
notags: "4+246+85",
tagratefilter: "rate_1",
tagspoilerfilter: "spoiler_10",
p: 2,
limit: 3
},
method: :get
}}
iex> ProxerEx.Api.List.entrysearch()
{:ok,
%ProxerEx.Request{
api_class: "list",
api_func: "entrysearch",
authorization: false,
extra_header: [],
method: :get
}}
""")
parameter("name", :get, optional: true)
parameter("language", :get, optional: true)
parameter("type", :get, optional: true)
parameter("genre", :get, optional: true, process: :to_plus_separated_string)
parameter("nogenre", :get, optional: true, process: :to_plus_separated_string)
parameter("fsk", :get, optional: true, process: :to_plus_separated_string)
parameter("sort", :get, optional: true)
parameter("length", :get, optional: true)
parameter("length-limit", :get, optional: true)
parameter("tags", :get, optional: true, process: :to_plus_separated_string)
parameter("notags", :get, optional: true, process: :to_plus_separated_string)
parameter("tagratefilter", :get, optional: true)
parameter("tagspoilerfilter", :get, optional: true)
paging_parameters()
end
api_func "industryprojects" do
api_doc("""
Constructs a `ProxerEx.Request` that can be used to send a request to the ```List/Get Industry Projects``` api.
This method receives an optional keyword list as its only argument which represents the information send to
the respective api. All keys must be named as seen in the official documentation. For further information
take a look at the examples below.
## Examples
iex> ProxerEx.Api.List.industryprojects(id: 5, type: "record_label", isH: "0", p: 0, limit: 42)
{:ok,
%ProxerEx.Request{
api_class: "list",
api_func: "industryprojects",
authorization: false,
extra_header: [],
get_args: %{id: 5, type: "record_label", isH: "0", p: 0, limit: 42},
method: :get
}}
iex> ProxerEx.Api.List.industryprojects(id: 5)
{:ok,
%ProxerEx.Request{
api_class: "list",
api_func: "industryprojects",
authorization: false,
extra_header: [],
get_args: %{id: 5},
method: :get
}}
""")
parameter("id", :get)
parameter("type", :get, optional: true)
parameter("isH", :get, optional: true)
paging_parameters()
end
api_func "industrys" do
api_doc("""
Constructs a `ProxerEx.Request` that can be used to send a request to the ```List/Get Industrys``` api.
This method receives an optional keyword list as its only argument which represents the information send to
the respective api. All keys must be named as seen in the official documentation. For further information
take a look at the examples below.
## Examples
iex> ProxerEx.Api.List.industrys(start: "s", contains: "c", country: "jp", type: "publisher", p: 1, limit: 200)
{:ok,
%ProxerEx.Request{
api_class: "list",
api_func: "industrys",
authorization: false,
extra_header: [],
get_args: %{start: "s", contains: "c", country: "jp", type: "publisher", p: 1, limit: 200},
method: :get
}}
iex> ProxerEx.Api.List.industrys()
{:ok,
%ProxerEx.Request{
api_class: "list",
api_func: "industrys",
authorization: false,
extra_header: [],
method: :get
}}
""")
parameter("start", :get, optional: true)
parameter("contains", :get, optional: true)
parameter("country", :get, optional: true)
parameter("type", :get, optional: true)
paging_parameters()
end
api_func "persons" do
api_doc("""
Constructs a `ProxerEx.Request` that can be used to send a request to the ```List/Get Persons``` api.
This method receives an optional keyword list as its only argument which represents the information send to
the respective api. All keys must be named as seen in the official documentation. For further information
take a look at the examples below.
## Examples
iex> ProxerEx.Api.List.persons(start: "j", contains: "l", search: "k", subject: "awards", p: 9, limit: 120)
{:ok,
%ProxerEx.Request{
api_class: "list",
api_func: "persons",
authorization: false,
extra_header: [],
get_args: %{start: "j", contains: "l", search: "k", subject: "awards", p: 9, limit: 120},
method: :get
}}
iex> ProxerEx.Api.List.persons()
{:ok,
%ProxerEx.Request{
api_class: "list",
api_func: "persons",
authorization: false,
extra_header: [],
method: :get
}}
""")
parameter("start", :get, optional: true)
parameter("contains", :get, optional: true)
parameter("search", :get, optional: true)
parameter("subject", :get, optional: true)
paging_parameters()
end
api_func "tagids" do
api_doc("""
Constructs a `ProxerEx.Request` that can be used to send a request to the ```List/Get Tag IDs``` api.
This method receives an optional keyword list as its only argument which represents the information send to
the respective api. All keys must be named as seen in the official documentation. For further information
take a look at the examples below.
## Examples
iex> ProxerEx.Api.List.tagids(search: "4-Koma Arm -CGI-Animation")
{:ok,
%ProxerEx.Request{
api_class: "list",
api_func: "tagids",
authorization: false,
extra_header: [],
get_args: %{search: "4-Koma Arm -CGI-Animation"},
method: :get
}}
""")
parameter("search", :get)
end
api_func "tags" do
api_doc("""
Constructs a `ProxerEx.Request` that can be used to send a request to the ```List/Get Tags``` api.
This method receives an optional keyword list as its only argument which represents the information send to
the respective api. All keys must be named as seen in the official documentation. For further information
take a look at the examples below.
## Examples
iex> ProxerEx.Api.List.tags(search: "o", type: "entry_tag", sort: "id", sort_type: "DESC", subtype: "misc")
{:ok,
%ProxerEx.Request{
api_class: "list",
api_func: "tags",
authorization: false,
extra_header: [],
get_args: %{search: "o", type: "entry_tag", sort: "id", sort_type: "DESC", subtype: "misc"},
method: :get
}}
iex> ProxerEx.Api.List.tags()
{:ok,
%ProxerEx.Request{
api_class: "list",
api_func: "tags",
authorization: false,
extra_header: [],
method: :get
}}
""")
parameter("search", :get, optional: true)
parameter("type", :get, optional: true)
parameter("sort", :get, optional: true)
parameter("sort_type", :get, optional: true)
parameter("subtype", :get, optional: true)
end
api_func "translatorgroupprojects" do
api_doc("""
Constructs a `ProxerEx.Request` that can be used to send a request to the ```List/Get Translatorgroup Projects``` api.
This method receives an optional keyword list as its only argument which represents the information send to
the respective api. All keys must be named as seen in the official documentation. For further information
take a look at the examples below.
## Examples
iex> ProxerEx.Api.List.translatorgroupprojects(id: 42, type: 3, isH: 0, p: 91, limit: 1)
{:ok,
%ProxerEx.Request{
api_class: "list",
api_func: "translatorgroupprojects",
authorization: false,
extra_header: [],
get_args: %{id: 42, type: 3, isH: 0, p: 91, limit: 1},
method: :get
}}
iex> ProxerEx.Api.List.translatorgroupprojects(id: 17)
{:ok,
%ProxerEx.Request{
api_class: "list",
api_func: "translatorgroupprojects",
authorization: false,
extra_header: [],
get_args: %{id: 17},
method: :get
}}
""")
parameter("id", :get)
parameter("type", :get, optional: true)
parameter("isH", :get, optional: true)
paging_parameters()
end
api_func "translatorgroups" do
api_doc("""
Constructs a `ProxerEx.Request` that can be used to send a request to the ```List/Get Translatorgroups``` api.
This method receives an optional keyword list as its only argument which represents the information send to
the respective api. All keys must be named as seen in the official documentation. For further information
take a look at the examples below.
## Examples
iex> ProxerEx.Api.List.translatorgroups(start: "start", contains: "p", country: "en", p: 1, limit: 71)
{:ok,
%ProxerEx.Request{
api_class: "list",
api_func: "translatorgroups",
authorization: false,
extra_header: [],
get_args: %{start: "start", contains: "p", country: "en", p: 1, limit: 71},
method: :get
}}
iex> ProxerEx.Api.List.translatorgroups()
{:ok,
%ProxerEx.Request{
api_class: "list",
api_func: "translatorgroups",
authorization: false,
extra_header: [],
method: :get
}}
""")
parameter("start", :get, optional: true)
parameter("contains", :get, optional: true)
parameter("country", :get, optional: true)
paging_parameters()
end
end
|
lib/api_classes/list_api.ex
| 0.855791
| 0.691224
|
list_api.ex
|
starcoder
|
defmodule Day10 do
def from_file(path) do
Helper.read_file(path)
|> Enum.to_list
|> List.flatten
end
def new_map(input) do
input
|> Enum.map(&String.graphemes/1)
|> Enum.with_index
|> Enum.map(fn {row, ix} -> row |> to_coordinates(ix) end)
|> List.flatten
end
def to_coordinates(row, y) do
row |> Enum.with_index |> Enum.flat_map(fn {col, x} ->
if col == "#" do
[{x, y}]
else
[]
end
end)
end
def distance_and_angle(map) do
for origin <- map,
target <- map,
origin != target do
{origin, target, distance(origin, target)}
end
|> Enum.sort_by(fn {_, _, {distance, _}} -> distance end)
|> Enum.group_by(fn {origin, _, _} -> origin end)
end
def most_visible(map) do
distance_and_angle(map)
|> Map.to_list
|> Enum.map(fn {origin, targets} -> {origin, visible(targets)} end)
|> Enum.map(fn {origin, targets} -> {origin, length(targets)} end)
|> Enum.max_by(fn {_, nr_visible} -> nr_visible end)
end
def visible(targets) do
targets
|> Enum.filter(fn {_, _, da} -> visible(targets, da) end)
end
def visible(targets, {distance, angle}) do
!Enum.any?(targets, fn {_, _, {d2, a2}} -> d2 < distance && a2 == angle end)
end
def distance(origin, target) do
{manhattan(origin, target), angle(origin, target)}
end
def angle({x1, y1}, {x2, y2}) do
angle = :math.atan2(y2 - y1, x2 - x1) * (180 / :math.pi()) + 90
if angle < 0 do
angle + 360
else
angle
end
end
def manhattan({x1, y1}, {x2, y2}), do: Kernel.abs(x1 - x2) + Kernel.abs(y1 - y2)
def vaporize(map, amount) do
{origin, _} = most_visible(map)
targets = distance_and_angle(map)
|> Map.get(origin)
|> Enum.map(fn {_, target, da} -> {target, da} end)
|> Enum.group_by(fn {_, {_, a}} -> a end)
|> Map.to_list
|> Enum.sort_by(fn {angle, _} -> angle end)
|> Enum.map(fn {_, by_angle} -> Enum.map(by_angle, fn {target, _} -> target end) end)
targets
|> Enum.with_index
|> Enum.map(fn {sub, ix} ->
Enum.with_index(sub)
|> Enum.map(fn {target, sub_ix} -> {target, ix + sub_ix * length(targets)} end)
end)
|> List.flatten
|> Enum.sort_by(fn {_, ix} -> ix end)
|> Enum.fetch!(amount - 1)
|> elem(0)
end
def solution do
IO.puts("#{inspect from_file("day10_input.txt") |> new_map |> most_visible}")
IO.puts("#{inspect from_file("day10_input.txt") |> new_map |> vaporize(200)}")
end
end
|
lib/day10.ex
| 0.692122
| 0.629177
|
day10.ex
|
starcoder
|
defmodule Eiga.Store do
import Ecto.Query
@moduledoc """
Functions for interacting with the database.
"""
alias Eiga.Repo
alias Eiga.Movie
alias Eiga.Review
@doc "Get a list of all movies watched."
def all_movies do
Movie
|> select([movie], %{id: movie.id, title: movie.title, year: movie.year, country: movie.country, short_title: movie.short_title})
|> Repo.all
end
@doc "Get a single movie."
def get_movie(id) do
case Integer.parse(id) do
{movie_id, ""} -> query_movie_by_id(movie_id)
_ -> query_movie_by_short_title(id)
end
end
defp query_movie_by_id(id) do
query = from m in Movie,
where: m.id == ^id,
select: %{id: m.id, title: m.title, year: m.year, country: m.country}
case Repo.all(query) do
[movie] -> movie
[] -> nil
end
end
defp query_movie_by_short_title(short_title) do
query = from m in Movie,
where: m.short_title == ^short_title,
select: %{id: m.id, title: m.title, year: m.year, country: m.country}
case Repo.all(query) do
[movie] -> movie
[] -> nil
end
end
@doc "Get a list of all reviews."
def all_reviews(page \\ 1, size \\ 10) do
query = from r in Review,
join: m in Movie, where: r.movie_id == m.id,
order_by: r.view_date,
limit: ^size,
offset: ^((page - 1) * size),
select: %{movie: m.title, year: m.year, country: m.country,
location: r.location, view_date: r.view_date, review: r.text}
Repo.all(query)
end
@doc "Get a review."
def get_review(id) do
case Integer.parse(id) do
{review_id, ""} -> query_review_by_id(review_id)
_ -> query_review_by_short_title(id)
end
end
defp query_review_by_id(id) do
query = from r in Review,
where: r.id == ^id,
join: m in Movie, where: r.movie_id == m.id,
select: %{movie: m.title, year: m.year, country: m.country,
location: r.location, view_date: r.view_date, review: r.text}
Repo.all(query)
end
defp query_review_by_short_title(short_title) do
query = from r in Review,
join: m in Movie, where: r.movie_id == m.id,
where: m.short_title == ^short_title,
select: %{movie: m.title, year: m.year, country: m.country,
location: r.location, view_date: r.view_date, review: r.text}
Repo.all(query)
end
@doc """
Insert a movie.
If it doesn't exist in the database yet, returns {:new, Movie}.
If the movie already exists, returns {:existing, Movie} without changing.
Assumes that there is only one movie per year with a given name.
"""
def insert_movie(%{"title" => title, "short_title" => short_title, "year" => year, "country" => country}) do
date = if is_integer(year) do year else String.to_integer(year) end
case Repo.get_by(Movie, %{title: title, year: date}) do
nil ->
{:ok, new_movie} = Repo.insert(%Movie{title: title, short_title: short_title, year: date, country: country},
on_conflict: :ignore, conflict_taget: [:title, :year])
{:new, new_movie}
existing -> {:existing, existing}
end
end
@doc """
Insert a movie review, but only if it doesn't exist in the database yet.
Assumes that there is only one review per movie on a given date.
"""
def insert_review(%{"location" => location, "short_title" => short_title, "text": text, "view_date": view_date}) do
{:ok, date} = Ecto.Date.cast(view_date)
# Movie must already be in the DB.
movie = Repo.get_by!(Movie, short_title: short_title)
case Repo.get_by(Review, %{movie_id: movie.id, view_date: date}) do
nil ->
{:ok, new_review} = Repo.insert(%Review{movie_id: movie.id, location: location, view_date: date, text: text},
on_confict: :ignore, conflict_target: [:movie_id, :view_date])
new_review
existing -> existing
end
end
def insert_review(got) do ## FIXME: What is wrong with the function clause above?!? This gets called by data_import
{:ok, date} = Ecto.Date.cast(got["view_date"])
movie = Repo.get_by!(Movie, short_title: got["short_title"])
case Repo.get_by(Review, %{movie_id: movie.id, view_date: date}) do
nil ->
{:ok, new_review} = Repo.insert(%Review{movie_id: movie.id, location: got["location"], view_date: date, text: got["text"]})
new_review
existing -> existing
end
end
end
|
maru_sqlite/lib/eiga/store.ex
| 0.672762
| 0.47457
|
store.ex
|
starcoder
|
defmodule AdventOfCode.Y2020.Day23.Ring do
defstruct current: nil, content: %{}, size: 0, min: nil, max: nil
alias AdventOfCode.Y2020.Day23.Ring
def new(content) do
first = Enum.take(content, 1) |> hd
size = Enum.count(content)
content
|> Stream.chunk_every(2, 1)
|> Enum.reduce(%{}, fn pair, ll ->
case pair do
[a, b] -> Map.put(ll, a, b)
[last] -> Map.put(ll, last, first)
end
end)
|> new_from_mapped(first, size, Enum.min_max(content))
end
defp new_from_mapped(mapped, first, size, {min, max}) do
%Ring{current: first, content: mapped, size: size, min: min, max: max}
end
# Reads the entire ring from current (exlusive)
def to_list(%Ring{} = ring) do
to_list(ring, ring.current, ring.size)
end
# Reads entire ring, starting from `from` (exclusive)
def to_list(%Ring{} = ring, from) do
to_list(ring, from, ring.size)
end
# Reads len items from `from` (exclusive)
def to_list(cups, from, len), do: to_list(cups, from, len, [])
defp to_list(_ring, _cur, 0, result), do: result |> Enum.reverse()
defp to_list(%Ring{} = ring, cur, len, acc) do
next = Map.get(ring.content, cur)
to_list(ring, next, len - 1, [next | acc])
end
def move(%Ring{} = ring, from, len, to) do
m = to_list(ring, from, len + 1)
[first | _] = m
[last, next] = m |> Enum.take(-2)
after_insert = Map.get(ring.content, to)
content =
ring.content
|> Map.put(to, first)
|> Map.put(last, after_insert)
|> Map.put(from, next)
%Ring{ring | content: content}
end
def advance_current(%Ring{} = ring) do
%Ring{ring | current: Map.get(ring.content, ring.current)}
end
end
defmodule AdventOfCode.Y2020.Day23 do
def test_input(), do: "389125467"
def input(), do: "135468729"
alias AdventOfCode.Y2020.Day23.Ring
def parse(raw) do
raw
|> String.graphemes()
|> Enum.map(&String.to_integer/1)
end
def run1(input) do
input
|> parse()
|> solve1(100)
end
def run2(input) do
input
|> parse()
|> solve2(1_000_000, 10_000_000)
end
def solve1(cups, moves) do
cups
|> Ring.new()
|> get_nth_move(moves)
|> Ring.to_list(1)
|> Enum.drop(-1)
|> Enum.join()
end
def solve2(cups, highest, moves) do
cups
|> add_extra_cups(highest)
|> Ring.new()
|> get_nth_move(moves)
|> Ring.to_list(1, 2)
|> Enum.reduce(&Kernel.*/2)
end
def add_extra_cups(cups, upto) do
len = Enum.count(cups)
extra_cups = if upto > len, do: (len + 1)..upto, else: []
cups
|> Enum.concat(extra_cups)
end
def get_nth_move(%Ring{} = ring, moves) do
ring
|> Stream.iterate(&next_move/1)
|> Stream.drop(moves)
|> Enum.take(1)
|> hd()
end
def next_move(%Ring{} = ring) do
triplet = Ring.to_list(ring, ring.current, 3)
to = get_insert_idx(ring.current - 1, triplet, ring.max)
ring
|> Ring.move(ring.current, 3, to)
|> Ring.advance_current()
end
def get_insert_idx(0, removed, highest), do: get_insert_idx(highest, removed, highest)
def get_insert_idx(idx, removed, highest) do
cond do
idx in removed -> get_insert_idx(idx - 1, removed, highest)
true -> idx
end
end
end
|
lib/2020/day23.ex
| 0.802362
| 0.520557
|
day23.ex
|
starcoder
|
defmodule Glock do
@moduledoc """
Glock is a simple websocket client application based on the Gun
HTTP/HTTP2/Websocket Erlang library.
Glock aims to simplify the specific task of starting and configuring
a websocket client connection to a remote server, providing common
default values for all connection settings provided by Gun while still
allowing for full customization.
Glock also provides a set of callbacks for processing messages sent
to and received from the remote server and tracking state across the
life of the connection in whatever way makes sense to your application.
Default callback implementations let you get up and running immediately
for simply sending messages to a server and logging received responses
by implementing the `__using__/1` macro:
### Example
defmodule MySocket do
use Glock.Socket
end
iex> {:ok, conn} = MySocket.start_link(host: "echo.websocket.org", path: "/")
{:ok, #PID<0.260.0>}
iex> :ok = MySocket.push(conn, "hello socket!")
:ok
Implementing the `init_stream/1` callback allows you to create and store
state for the socket connection which can be accessed from subsequent message
send or receive events. A simple example might be to count the number of
messages sent and received from the socket.
### Example
defmodule MySocket do
use Glock.Socket
def init_stream(conn: conn, protocols: _, headers: _) do
%{
"connection" => conn.stream,
"sent" => 0,
"received" => 0
}
end
end
Implementing the `handle_send/2` callback allows for customization of the
message frame types and the encoding or serialization performed on messages
prior to sending. Piggy-backing the prior example, a complex data structure
could be serialized to JSON and counted before being sent to the remote server.
### Example
defmodule MySocket do
...
def handle_send(message, state) do
frame = {:text, JSON.encode(message)}
new_state = Map.put(state, "sent", state["sent"] + 1)
{frame, new_state}
end
end
Finally, implementing the `handle_receive/2` callback allows for custom
handling of messages beyond simply logging them. All messages received
by the gun connection pass through the `handle_receive/2` callback, so you
can decode them, store them, reprocess them or anything else you like.
This handler also covers receiving `:close` control frames and cleaning up/
shutting down the glock process appropriately according to the Websocket
specification.
### Example
defmodule MySocket do
...
def handle_receive(frame, state) do
case frame do
{:text, message} ->
send_to_internal_queue(message)
new_state = Map.put(state, "received", state["received"] + 1)
{:doesnt_matter, {:ok, new_state}}
:close ->
JSON.encode(state) |> write_to_log_service()
{:close, {:close, state}}
_ ->
{:doesnt_matter, {:ok, state}}
end
end
end
"""
@typedoc """
The types of frame structures accepted and returned by underlying gun
client for handling.
"""
@type frame ::
:ping
| :pong
| :close
| {:ping | :pong | :text | :binary | :close, binary}
| {:close, non_neg_integer, binary}
@typedoc """
Keyword list of arguments to initialize a glock socket process.
All keys are optional with default values except for `:host`
and `:path`.
"""
@type init_opts :: [
connect_opts: %{
connect_timeout: non_neg_integer,
retry: non_neg_integer,
retry_timeout: non_neg_integer,
transport: :tcp | :tls
},
handler_init_args: term,
headers: [binary],
host: iodata,
path: iodata,
port: non_neg_integer,
ws_opts: %{
compress: boolean,
closing_timeout: non_neg_integer,
keepalive: non_neg_integer
}
]
@typedoc """
Options available to the `c:init_stream/1` callback for initializing
the state of socket implementing the glock behaviour.
"""
@type stream_init_opts :: [
conn: Glock.Conn.t(),
protocols: [binary],
headers: [{binary, binary}]
]
@doc """
Initialize the state of the socket implementing the glock
behaviour. The state is stored within the glock connection
and passed to the handler callbacks when messages are sent or
received over the socket.
"""
@callback init_stream(stream_init_opts()) :: term
@doc """
Processes messages sent from the client application to the
websocket server and optionally tracks state for the connection.
Messages sent by either the `send/2` or `send_async/2` functions
are passed through `handle_send/2` for processing.
The `c:handle_send/2` callback for the module implementing the
glock behaviour is responsible for packaging messages to be sent
into an appropriate websocket message frame. Terms must be converted
or serialized to an appropriate text (string) or raw binary encoding
and wrapped in a tuple indicating their format, or else a `:close`
control frame can be sent.
"""
@callback handle_push(message :: term, state :: term) ::
{:ok | :push | :close, frame, term}
@doc """
Processes messages received by the client application from the
remote websocket server. Any application-specific operations that
must be done on received messages are performed by the `c:handle_receive/2`
callback.
Based on the frame received and the optionally tracked state available
to the callback, emits a response frame and triggers the appropriate action.
If an `{:ok, state}` tuple is produced, there is no expectation that a frame
should be sent back to the websocket server. If a `{:send, state}` tuple is
produced, the response frame is sent to the server. If a `{:close, state}`
tuple is produced, a simple `:close` frame is returned to the server to
signal the client's acknowledgement the connection is to be terminated and
the process exits, cleaning up its state.
"""
@callback handle_receive(frame, state :: term) ::
{:ok | :push | :close, frame, term}
defdelegate stream(opts), to: Glock.Stream
defmacro is_close(frame) do
quote do
unquote(frame) == :close or unquote(frame) |> elem(0) == :close
end
end
defmodule ConnError do
defexception [:message]
end
end
|
lib/glock.ex
| 0.862648
| 0.469642
|
glock.ex
|
starcoder
|
defmodule Clover do
@moduledoc """
The Clover application
"""
use Application
alias Clover.{
Adapter,
Conversation,
Robot
}
@registry Clover.Registry
@robot_supervisor Clover.Robots
@doc false
def start(_type, _args) do
Clover.Supervisor.start_link(@robot_supervisor, name: Clover.App)
end
def start_supervised_robot(name, robot, adapter, opts \\ [])
def start_supervised_robot(name, robot, adapter, opts) when is_atom(robot) do
start_supervised_robot(name, {robot, []}, adapter, opts)
end
def start_supervised_robot(name, {robot, robot_arg}, adapter, opts) when is_atom(adapter) do
start_supervised_robot(name, {robot, robot_arg}, {adapter, []}, opts)
end
def start_supervised_robot(name, {robot, robot_arg}, {adapter, adapter_arg}, opts) do
DynamicSupervisor.start_child(
@robot_supervisor,
Clover.Robot.Supervisor.child_spec({name, {robot, robot_arg}, {adapter, adapter_arg}}, opts)
)
end
@spec stop_supervised_robot(String.t()) :: :ok | {:error, :not_found}
def stop_supervised_robot(robot) do
case whereis_robot_supervisor(robot) do
nil -> :ok
pid -> DynamicSupervisor.terminate_child(@robot_supervisor, pid)
end
end
@doc """
Start a robot outside of the `Clover` supervision tree.
The robot's pid will still be registered in the `Clover` registry, but the processes will not be
supervised by `Clover`, and you can manage link the robot into your own supervision tree. Returns
the `pid` of the robot's supervisor.
To stop the robot, call `Supervisor.stop(pid)`.
"""
def start_robot(name, robot, adapter, opts \\ [])
def start_robot(name, robot, adapter, opts) when is_atom(robot) do
start_robot(name, {robot, []}, adapter, opts)
end
def start_robot(name, {robot, robot_arg}, adapter, opts) when is_atom(adapter) do
start_robot(name, {robot, robot_arg}, {adapter, []}, opts)
end
def start_robot(name, {robot, robot_arg}, {adapter, adapter_arg}, opts) do
Clover.Robot.Supervisor.start_link({name, {robot, robot_arg}, {adapter, adapter_arg}}, opts)
end
def registry, do: @registry
def whereis_conversation(message) do
whereis(Conversation.via_tuple(message))
end
def whereis_robot(robot) do
whereis(Robot.via_tuple(robot))
end
def whereis_robot_adapter(robot) do
whereis(Adapter.via_tuple(robot))
end
defp whereis({:via, _, {_, key}}) do
case Registry.lookup(@registry, key) do
[{pid, _}] -> pid
[] -> nil
end
end
defp whereis_robot_supervisor(robot) do
case whereis_robot(robot) do
nil ->
nil
pid ->
@robot_supervisor
|> DynamicSupervisor.which_children()
|> find_supervisor_for_robot(pid)
end
end
defp find_supervisor_for_robot(supervisors, robot_pid) do
supervisors
|> Enum.find(fn child ->
child
|> child_pid()
|> Supervisor.which_children()
|> Enum.find(fn child -> child_pid(child) == robot_pid end)
end)
|> case do
nil -> nil
child -> child_pid(child)
end
end
defp child_pid({_, pid, _, _}), do: pid
@doc false
def format_error({:not_exported, {mod, function, arity}}) do
"#{mod} does not export function #{function}/#{arity}"
end
def format_error({:invalid_option, {{mod, function, arity}, option, valid}}) do
"""
invalid option for #{mod}.#{function}/#{arity} #{inspect(option)}
valid options: #{inspect(valid)}
"""
end
def format_error({:unhandled_message, message}) do
"unhandled message #{inspect(message)}"
end
def format_error({:invalid_script_return, invalid_return}) do
"""
invalid script return #{inspect(invalid_return)}")
expected one of:
%Message{action: :say | :typing}
{%Message{action: :say | :typing}, data}
[%Message{action: :say | :typing}]
{:noreply, data}
:noreply
:nomatch
"""
end
def format_error(reason) do
"unexpected error #{inspect(reason)}"
end
end
|
lib/clover.ex
| 0.806091
| 0.549943
|
clover.ex
|
starcoder
|
defmodule Blanket do
@moduledoc """
This is the facade of the Blanket application. Handles starting/stopping the
application and defines the client API.
"""
alias Blanket.Heir
alias Blanket.Metatable
# -- Application API --------------------------------------------------------
use Application
@doc false
def start(_type, _args) do
Blanket.Supervisor.start_link
end
# User API ------------------------------------------------------------------
@doc """
Create an ETS table associated to a table reference, or claims the ownership
of the table, after a restart.
The table reference is used in Blanket and is not the ETS table id. A table
reference can actually be any term but must be unique.
This function must be called by the process that will own the table, best is
to put it in your `c:GenServer.init/1` or `c:Agent.start_link/2` function.
If your process crashes, it must be restarted with the same table reference in
order to retrieve its ETS table. The table reference argument should be in the
supervisor child spec or in the `Supervisor.start_child/2` for instance.
### Available Options
**`:create_table`**, **required**. Determines how to create the ETS table
the first time the heir is created. One of the following :
- A `fn` returning `{:ok, tab}` where `tab` is the identifier of the
created ETS table.
- A tuple `{table_name, table_opts}` which will be used to call
`:ets.new(table_name, table_opts)`.
- A module name, e.g. `MyTableServer` or `__MODULE__`. The module must
export a `create_table/1` function which will be passed the whole
`claim_table` options list and must return `{:ok, tab}` where `tab` is
the identifier of the created ETS table.
Any `{:heir, _, _}` set on the table will be overriden by the Blanket heir.
**`:monitor`**, optional, defaults to false. If true, the calling process will
set a monitor the heir process and receive a `:'DOWN'` message if the latter
crashes. Mostly useless because heir have an extremely rare chance to crash,
as they do basically nothing.
**`:monitor_ref`**, optional, defaults to false. If true, the return of
`claim_table/2` also includ a monitor reference as the third element.
"""
@spec claim_table(term(), Keyword.t()) :: {:ok, :ets.tid()}
| {:ok, :ets.tid(), reference()}
| {:error, term()}
def claim_table(tref, opts) do
# boots a table heir, or get the pid of an existing one, and attempt to set
# the owner. Returns error if the table is already owned.
# The table is created in the heir process so we can then use the same code
# asking foir the table when the heir is owner
{:ok, heir_pid} = Heir.pid_or_create(tref, opts)
# Maybe we want to set a monitor if we expect the heir to crash. This should
# never happen because the heir does nothing, but we offer this safety
monitor = Keyword.get(opts, :monitor, false)
return_monitor_ref = Keyword.get(opts, :monitor_ref, false)
case Heir.claim(heir_pid, self()) do
{:ok, tab} ->
mref = if monitor,
do: Process.monitor(heir_pid)
if monitor and return_monitor_ref do
{:ok, tab, mref}
else
{:ok, tab}
end
other -> other
end
end
@doc """
Creates a new heir for the table.
The calling process must be the table owner. Sets a monitor and return the new
process monitor ref.
This function should not be called if the heir is not dead because the current
heir will not be turned down while booting a new one.
"""
@spec recover_heir(:ets.tid()) :: {:ok, reference()} | {:error, any()}
def recover_heir(tab) do
with {:ok, tref} <- Metatable.get_tab_tref(tab),
{:ok, heir_pid} <- Heir.boot(:recover, tref, :no_opts),
:ok <- Heir.attach(heir_pid, tab) do
{:ok, Process.monitor(heir_pid)}
end
end
@doc """
Finds the heir associated with the table, and stops it.
The calling process must own the table.
"""
@spec abandon_table(:ets.tid()) :: :ok | {:error, any()}
def abandon_table(tab) do
with {:ok, tref} <- Metatable.get_tab_tref(tab),
{:ok, heir_pid} <- Heir.whereis(tref),
:ok <- Heir.detach(heir_pid, tab) do
:ok
end
end
end
|
lib/blanket.ex
| 0.76999
| 0.514156
|
blanket.ex
|
starcoder
|
defmodule Day13.Game do
defstruct pid: nil, paddle: {0, 0}, ball: {0, 0}
def output do
machine = 13 |> InputFile.contents_of() |> Intcode.build
my_pid = self()
spawn(fn -> Intcode.execute(machine, {:mailbox, my_pid}) end)
retrieve_output([])
|> Enum.chunk_every(3)
|> build_screen(%{})
|> Map.values
|> Enum.count(&(&1 == 2))
|> IO.inspect
end
def play do
machine = 13 |> InputFile.contents_of() |> Intcode.build
my_pid = self()
IO.ANSI.clear() |> IO.write
# graphics = spawn(&paint/0)
prog = spawn(fn -> Intcode.execute(machine, {:mailbox, my_pid}) end)
game = %__MODULE__{pid: prog}
paint(game)
IO.ANSI.clear() |> IO.write
end
def input_loop(pid) do
if Process.alive?(pid) do
case IO.getn(:stdio, nil, 2) do
"j\n" ->
send pid, -1
"jj\n" ->
send pid, -1
send pid, -1
"jjj\n" ->
send pid, -1
send pid, -1
send pid, -1
"k\n" ->
send pid, 0
"l\n" ->
send pid, 1
end
input_loop(pid)
end
end
def paint(%__MODULE__{} = game) do
case {recv(), recv(), recv()} do
{-1, 0, score} ->
paint_score(score)
game
{x, y, 0} ->
paint({x, y}, :empty)
game
{x, y, 1} ->
paint({x, y}, :wall)
game
{x, y, 2} ->
paint({x, y}, :block)
game
{x, y, 3} ->
paint({x, y}, :paddle)
%__MODULE__{game | paddle: {x, y}}
{x, y, 4} ->
paint({x, y}, :ball)
:timer.sleep(1)
ball_movement(game, {x, y})
%__MODULE__{game | ball: {x, y}}
a -> IO.inspect a
end
|> paint
end
def ball_movement(%__MODULE__{ball: {_, old_y}} = game, {_x, y}) when old_y <= y, do: send game.pid, 0
def ball_movement(%__MODULE__{ball: {old_x, old_y}} = game, {x, y}) do
# Figure out where the ball will hit the paddle's y coordinate and send the paddle there
delta_x = x - old_x
delta_y = y - old_y
future_x = ((elem(game.paddle, 1) - y) * delta_x) + x
# IO.puts ("MOVE TO #{future_x}")
case (future_x - elem(game.paddle, 0)) do
0 -> send game.pid, 0
moves ->
for x <- 0..moves do
send game.pid, moves / abs(moves)
end
end
end
def recv do
receive do
a -> a
end
end
def paint_score(score) do
IO.ANSI.cursor(0, 0) |> IO.write
IO.write(Integer.to_string(score))
end
def paint(coords, :empty), do: paint(coords, " ")
def paint(coords, :wall), do: paint(coords, "|")
def paint(coords, :block), do: paint(coords, "#")
def paint(coords, :paddle), do: paint(coords, "_")
def paint(coords, :ball), do: paint(coords, "*")
def paint({x, y}, c) do
IO.ANSI.cursor(y, x + 1) |> IO.write
IO.write(c)
#IO.puts "#{c} at #{x}, #{y}"
end
def build_screen([], screen), do: screen
def build_screen([[x, y, val] | rest], screen) do
build_screen(rest, Map.put(screen, {x, y}, val))
end
def retrieve_output(msgs) do
msg = receive do
a -> a
after
1_000 -> false
end
if msg do
retrieve_output([msg | msgs])
else
Enum.reverse(msgs)
end
end
end
|
year_2019/lib/day_13/game.ex
| 0.507324
| 0.451508
|
game.ex
|
starcoder
|
defmodule BigchaindbEx.Condition.Ed25519Sha256 do
@moduledoc """
ED25519: Ed25519 signature condition.
This condition implements Ed25519 signatures.
ED25519 is assigned the type ID 4. It relies only on the ED25519 feature suite
which corresponds to a bitmask of 0x20.
"""
alias BigchaindbEx.{Crypto, Fulfillment, Condition}
@enforce_keys [:cost, :type_id, :hash]
@type t :: %__MODULE__{
cost: Integer.t,
type_id: Integer.t,
hash: binary
}
defstruct [
:cost,
:type_id,
:hash
]
@type_id 4
@type_name "ed25519-sha-256"
@asn1 "ed25519Sha256"
@asn1_condition "ed25519Sha256Condition"
@asn1_fulfillment "ed25519Sha256Fulfillment"
@category "simple"
@constant_cost 131072
@public_key_length 32
@signature_length 64
def type_id, do: @type_id
def type_name, do: @type_name
def type_asn1, do: @asn1
def type_asn1_condition, do: @asn1_condition
def type_asn1_fulfillment, do: @asn1_fulfillment
def type_category, do: @category
def type_cost, do: @constant_cost
def type_pub_key_length, do: @public_key_length
def type_signature_length, do: @signature_length
@doc """
Derives the condition
from a given fulfillment.
"""
@spec from_fulfillment(Fulfillment.Ed25519Sha512.t) :: {:ok, __MODULE__.t} | {:error, String.t}
def from_fulfillment(%Fulfillment.Ed25519Sha512{public_key: pub_key} = ffl) when is_binary(pub_key) do
case generate_hash(pub_key) do
{:ok, hash} -> {:ok, %__MODULE__{
cost: @constant_cost,
type_id: @type_id,
hash: hash
}}
{:error, reason} -> {:error, "Could not derive condition: #{inspect reason}"}
end
end
def from_fulfillment(_), do: {:error, "The given fulfillment is invalid!"}
@doc """
Generates a hash from a
given public key and the
condition's fingerprint contents.
"""
@spec generate_hash(binary) :: {:ok, binary} | {:error, String.t}
def generate_hash(public_key) when is_binary(public_key) do
with {:ok, asn1_binary} <- :Fingerprints.encode(:Ed25519FingerprintContents, {nil, public_key}),
{:ok, hash} <- Crypto.sha3_hash256(asn1_binary, false)
do
{:ok, hash}
else
{:error, reason} -> {:error, "Could not decode public key: #{inspect reason}"}
end
end
@doc """
Derives a condition from
an encoded uri.
"""
@spec from_uri(String.t) :: {:ok, __MODULE__.t} | {:error, String.t}
def from_uri(uri) when is_binary(uri) do
with {:ok, hash} <- Condition.decode_hash_from_uri(uri),
{:ok, type} <- Condition.decode_type_from_uri(uri)
do
cond_type = @type_name
case type do
^cond_type ->
{:ok, %__MODULE__{
cost: @constant_cost,
type_id: @type_id,
hash: hash
}}
_ -> {:error, "Could not decode uri: Type mismatch"}
end
else
{:error, reason} -> {:error, "Could not decode uri: #{inspect uri}"}
end
end
@doc """
Converts a condition struct
to an uri.
"""
@spec to_uri(__MODULE__.t) :: {:ok, String.t} | {:error, String.t}
def to_uri(%__MODULE__{} = condition), do: {:ok, hash_to_uri(condition.hash)}
@doc """
Serializes a given hash
to an url-friendly format.
"""
@spec hash_to_uri(bitstring) :: String.t
def hash_to_uri(hash) when is_bitstring(hash) do
"ni:///sha-256;" <> Base.url_encode64(hash, padding: false) <> "?fpt=" <> @type_name <> "&cost=" <> to_string(@constant_cost)
end
end
|
lib/bigchaindb_ex/condition/ed25519Sha256.ex
| 0.848188
| 0.403214
|
ed25519Sha256.ex
|
starcoder
|
defmodule AntlUtilsElixir.DateTime.Period do
@moduledoc """
Period
"""
alias AntlUtilsElixir.DateTime.Comparison
@type t :: %{start_at: nil | DateTime.t(), end_at: nil | DateTime.t()}
@spec included?(map, map, atom, atom) :: boolean
def included?(a, b, start_at_key, end_at_key)
when is_map(a) and is_map(b) and is_atom(start_at_key) and is_atom(end_at_key) do
period_a = %{start_at: Map.get(a, start_at_key), end_at: Map.get(a, end_at_key)}
period_b = %{start_at: Map.get(b, start_at_key), end_at: Map.get(b, end_at_key)}
included?(period_a, period_b)
end
@spec included?(t, t) :: boolean()
def included?(%{start_at: %DateTime{}} = a, %{start_at: %DateTime{}, end_at: nil} = b) do
Comparison.gte?(a.start_at, b.start_at)
end
def included?(%{start_at: %DateTime{}, end_at: nil}, %{
start_at: %DateTime{},
end_at: %DateTime{}
}),
do: false
def included?(
%{start_at: %DateTime{}, end_at: %DateTime{}} = a,
%{start_at: %DateTime{}, end_at: %DateTime{}} = b
) do
Comparison.gte?(a.start_at, b.start_at) &&
Comparison.lte?(a.end_at, b.end_at)
end
@spec get_status(map, DateTime.t(), atom, atom) :: :ended | :ongoing | :scheduled
def get_status(period, %DateTime{} = datetime, start_at_key, end_at_key)
when is_map(period) and is_atom(start_at_key) and is_atom(end_at_key) do
%{
start_at: Map.get(period, start_at_key),
end_at: Map.get(period, end_at_key)
}
|> get_status(datetime)
end
@spec get_status(t, DateTime.t()) :: :ended | :ongoing | :scheduled
def get_status(%{start_at: start_at, end_at: end_at}, %DateTime{} = datetime) do
comparaison_with_start_at = DateTime.compare(datetime, start_at)
comparaison_with_end_at =
case end_at do
nil -> :lt
_ -> DateTime.compare(datetime, end_at)
end
case {comparaison_with_start_at, comparaison_with_end_at} do
{:gt, :lt} ->
:ongoing
{:eq, :lt} ->
:ongoing
{:lt, _} ->
:scheduled
{_, :gt} ->
:ended
{_, :eq} ->
:ended
end
end
@spec filter_by_status([t], atom() | [atom()], DateTime.t(), atom, atom) :: [any]
def filter_by_status(
periods,
status,
%DateTime{} = datetime,
start_key \\ :start_at,
end_key \\ :end_at
)
when is_list(periods) and is_atom(start_key) and is_atom(end_key) do
status = List.wrap(status)
periods
|> Enum.filter(&(get_status(&1, datetime, start_key, end_key) in status))
end
end
|
lib/datetime/period.ex
| 0.854126
| 0.46873
|
period.ex
|
starcoder
|
defmodule FIQLEx.QueryBuilder do
@moduledoc """
`QueryBuilder` module has to be used to build queries from FIQL.
You have at least to use this module and define the functions `init` and `build`.
Here is the minimal code:
```
defmodule MyQueryBuilder do
use FIQLEx.QueryBuilder
@impl true
def init(ast, _opts) do
%{}
end
@impl true
def build(_ast, state) do
{:ok, state}
end
end
```
The you'll want to override functions like `handle_and_expression/4`, etc, ... to
build your final query. See `SQLQueryBuilder` module for an implementation example.
To build a new query using your module you have to parse a FIQL query and call `FIQLEx.build_query/3`
```
query = "author.age=ge=25;author.name==*Doe"
{:ok, ast} = FIQLEx.parse(query)
{:ok, query} = FIQLEx.build_query(ast, MyQueryBuilder)
```
"""
@doc """
This callback is invoked as soon as you call the function `FIQLEx.build_query/3`.
Parameters are:
* `ast`: The AST returned by `FIQLEx.parse/1`
* `options`: The options you passed to `FIQLEx.build_query/3`
This function must return the initial state of your query builder
"""
@callback init(ast :: FIQLEx.ast(), options :: Keyword.t()) :: state
when state: any()
@doc """
This callback is invoked at the end of the call to the function `FIQLEx.build_query/3`.
Parameters are:
* `ast`: The AST returned by `FIQLEx.parse/1`
* `state`: The current (and final) state of your query builder
This function returns `{:ok, final_state}` if everything is ok, or `{:error, reason}`
if there is something wrong
"""
@callback build(ast :: FIQLEx.ast(), state :: any()) :: {:ok, state} | {:error, any()}
when state: any()
@doc """
This callback is invoked when an OR expression is found in the query.
Parameters are:
* `exp1`: left side of the OR expression
* `exp2`: right side of the OR expression
* `ast`: The AST returned by `FIQLEx.parse/1`
* `state`: The current state of your query builder
This function returns `{:ok, new_state}` if everything is ok, or `{:error, reason}`
if there is something wrong
# Example
```
author.age!=25,author.name==*Doe
```
The `exp1` is `author.age!=25`
The `exp2` is `author.name==*Doe`
Call `handle_ast(exp, ast, state)` to go deeper in the expressions
"""
@callback handle_or_expression(
exp1 :: FIQLEx.ast(),
exp2 :: FIQLEx.ast(),
ast :: FIQLEx.ast(),
state
) ::
{:ok, state} | {:error, any()}
when state: any()
@doc """
This callback is invoked when an AND expression is found in the query.
Parameters are:
* `exp1`: left side of the AND expression
* `exp2`: right side of the AND expression
* `ast`: The AST returned by `FIQLEx.parse/1`
* `state`: The current state of your query builder
This function returns `{:ok, new_state}` if everything is ok, or `{:error, reason}`
if there is something wrong
# Example
```
author.age!=25;author.name==*Doe
```
The `exp1` is `author.age!=25`
The `exp2` is `author.name==*Doe`
Call `handle_ast(exp, ast, state)` to go deeper in the expressions
"""
@callback handle_and_expression(
exp1 :: FIQLEx.ast(),
exp2 :: FIQLEx.ast(),
ast :: FIQLEx.ast(),
state
) ::
{:ok, state} | {:error, any()}
when state: any()
@doc """
This callback is invoked when an expression is found in the query.
An expression is a selector compared to a value, or just a selector.
Parameters are:
* `exp`: the expression
* `ast`: The AST returned by `FIQLEx.parse/1`
* `state`: The current state of your query builder
This function returns `{:ok, new_state}` if everything is ok, or `{:error, reason}`
if there is something wrong
# Example
```
author.age!=25
```
This is an expression. Call `handle_ast(exp, ast, state)` to go deeper in the expression
"""
@callback handle_expression(
exp :: FIQLEx.ast(),
ast :: FIQLEx.ast(),
state
) ::
{:ok, state} | {:error, any()}
when state: any()
@doc """
This callback is invoked when a selector without a value to be compared to is found.
Parameters are:
* `selector_name`: the name of the selector
* `ast`: The AST returned by `FIQLEx.parse/1`
* `state`: The current state of your query builder
This function returns `{:ok, new_state}` if everything is ok, or `{:error, reason}`
if there is something wrong
# Example
```
author.age
```
The `selector_name` is `author.age`
"""
@callback handle_selector(
selector_name :: binary(),
ast :: FIQLEx.ast(),
state
) ::
{:ok, state} | {:error, any()}
when state: any()
@doc """
This callback is invoked when a selector is found with a value it is compared to.
Parameters are:
* `selector_name`: the name of the selector
* `op`: the comparison operator. Either `:equal` or `:not_equal`
* `value`: The value to compare the selector to
* `ast`: The AST returned by `FIQLEx.parse/1`
* `state`: The current state of your query builder
This function returns `{:ok, new_state}` if everything is ok, or `{:error, reason}`
if there is something wrong
# Example
```
author.age==25
```
The `selector_name` is `author.age`
The `op` is `:equal`
The `value` is `25`
"""
@callback handle_selector_and_value(
selector_name :: binary(),
op :: :equal | :not_equal,
value :: any(),
ast :: FIQLEx.ast(),
state
) ::
{:ok, state} | {:error, any()}
when state: any()
@doc """
This callback is invoked when a selector is found with a value it is compared to.
Same as `handle_selector_and_value/5` but with a custom comparison operator
Parameters are:
* `selector_name`: the name of the selector
* `op`: the comparison operator as a string
* `value`: The value to compare the selector to
* `ast`: The AST returned by `FIQLEx.parse/1`
* `state`: The current state of your query builder
This function returns `{:ok, new_state}` if everything is ok, or `{:error, reason}`
if there is something wrong
# Example
```
author.age=ge=25
```
The `selector_name` is `author.age`
The `op` is `ge`
The `value` is `25`
"""
@callback handle_selector_and_value_with_comparison(
selector_name :: binary(),
op :: binary(),
value :: any(),
ast :: FIQLEx.ast(),
state
) ::
{:ok, state} | {:error, any()}
when state: any()
@optional_callbacks init: 2,
build: 2,
handle_or_expression: 4,
handle_and_expression: 4,
handle_expression: 3,
handle_selector: 3,
handle_selector_and_value: 5,
handle_selector_and_value_with_comparison: 5
defmacro __using__(_opts) do
quote do
@behaviour FIQLEx.QueryBuilder
import FIQLEx.QueryBuilder.Helpers
@doc """
This function will go deeper in the ast traversal.
Parameters are:
* `curr_ast`: The AST we want to go deeper with
* `ast`: The global AST
* `state`: The current state of your query builder
The function returns `{:ok, state}` if everything is fine, and `{:error, reason}`
if there is an error
"""
@spec handle_ast(FIQLEx.ast(), FIQLEx.ast(), any()) ::
{:ok, any()} | {:error, any()}
def handle_ast(curr_ast, ast, state) do
do_handle_ast(curr_ast, ast, __MODULE__, state)
end
@doc """
Same as `handle_ast/3` but returns the `state` or raises an exception.
"""
@spec handle_ast!(FIQLEx.ast(), FIQLEx.ast(), any()) :: any()
def handle_ast!(curr_ast, ast, state) do
do_handle_ast!(curr_ast, ast, __MODULE__, state)
end
@doc """
Returns a list of all selectors for a given AST
"""
@spec get_selectors(ast :: FIQLEx.ast()) :: [binary()]
def get_selectors(ast) do
do_get_selectors(ast)
end
def handle_or_expression(_exp1, _exp2, _ast, state), do: {:ok, state}
def handle_and_expression(_exp1, _exp2, _ast, state), do: {:ok, state}
def handle_expression(_exp, _ast, state), do: {:ok, state}
def handle_selector(_selector_name, _ast, state), do: {:ok, state}
def handle_selector_and_value(_selector_name, _op, _value, _ast, state), do: {:ok, state}
def handle_selector_and_value_with_comparison(_selector_name, _op, _value, _ast, state),
do: {:ok, state}
defoverridable handle_or_expression: 4,
handle_and_expression: 4,
handle_expression: 3,
handle_selector: 3,
handle_selector_and_value: 5,
handle_selector_and_value_with_comparison: 5
end
end
defmodule Helpers do
@spec do_handle_ast(FIQLEx.ast(), FIQLEx.ast(), atom(), any()) ::
{:ok, any()} | {:error, any()}
def do_handle_ast(curr_ast, ast, module, state) do
FIQLEx.handle_ast(curr_ast, ast, module, state)
end
@spec do_handle_ast!(FIQLEx.ast(), FIQLEx.ast(), atom(), any()) :: any()
def do_handle_ast!(curr_ast, ast, module, state) do
FIQLEx.handle_ast!(curr_ast, ast, module, state)
end
@spec do_get_selectors(ast :: FIQLEx.ast()) :: [binary()]
def do_get_selectors(ast) do
{:ok, selectors} = FIQLEx.build_query(ast, FIQLEx.SelectorsGetter)
selectors
end
end
end
|
lib/query_builder.ex
| 0.94256
| 0.846578
|
query_builder.ex
|
starcoder
|
defmodule Tammes do
# helper function
def call(args, fun) do
apply(fun, args)
end
def call(args, fun, module) do
apply(module, fun, args)
end
def vector_fun(e1, e2, fun) do
Enum.zip(e1, e2)
|> Enum.map(&Tuple.to_list(&1))
|> Enum.map(&call(&1, fun))
end
def dot_product(c1, c2) do
vector_fun(c1, c2, &(&1*&2)) |> Enum.sum
end
def vector_add(c1, c2) do
vector_fun(c1, c2, &(&1+&2))
end
def vector_minus(c1, c2) do
vector_fun(c1, c2, &(&1-&2))
end
def vector_k(c, k) do
c |> Enum.map(&(k*&1))
end
def vector_norm(c, k \\ 2) do
c
|> Enum.map(&(:math.pow(&1, k)))
|> Enum.sum
|> :math.pow(1/k)
end
def vector_unit(c) do
k = vector_norm(c)
vector_k(c, 1/k)
end
# sphere functions
def angle_to_xyz(point) do
[theta, phi] = point
s_t = :math.sin theta
c_t = :math.cos theta
s_p = :math.sin phi
c_p = :math.cos phi
[s_p*c_t, s_p*s_t, c_p]
end
def xyz_to_angle(c) do
n = vector_norm(c)
[x, y, z] = c
phi = :math.acos z/n
s_t = y/n/(:math.sin phi) |> max(-1) |> min(1)
c_t = x/n/(:math.sin phi) |> max(-1) |> min(1)
theta =
cond do
s_t > 0 -> :math.acos c_t
c_t > 0 -> :math.asin s_t
true -> -:math.pi - :math.asin s_t
end
[theta, phi]
end
def arc_distance(point1, point2) do
[point1, point2]
|> Enum.map(&angle_to_xyz(&1))
|> call(:dot_product, Tammes)
|> min(1)
|> max(-1)
|> :math.acos
end
def project(p, c1, c2, c3) do
c = angle_to_xyz(p)
k = dot_product(c1, c) |> min(1) |> max(-1)
phi = :math.acos k
c4 = vector_minus(c, vector_k(c1, k)) |> vector_unit
c_2 = dot_product(c2, c4) |> min(1) |> max(-1)
c_3 = dot_product(c3, c4) |> min(1) |> max(-1)
theta =
cond do
c_3 > 0 -> :math.acos c_2
true -> -(:math.acos c_2)
end
[theta, phi]
end
def change(v, i, n \\ 15, range \\ [0.935, 0.937]) do
p = Enum.at(v, i)
v = List.delete_at(v, i)
c1 = [x, y, z] = angle_to_xyz(p)
c2 = [z, 0, -x] |> vector_unit
c3 = [x, -(x*x+z*z)/y, z] |> vector_unit
v = v |> Enum.map(&(project(&1, c1, c2, c3))) |> Enum.into([[0,0]])
[a, b] = range
k = for x <- 0..n-1, y <- 0..n-1, x < y, do: {x, y, Tammes.arc_distance(Enum.at(v, x), Enum.at(v, y))}
k = k |> Enum.filter(fn {_, _, d} -> d > a and d < b end)
|> Enum.map(fn {x, y, _} -> [x, y] end)
pi = :math.pi
IO.inspect v = v |> Enum.map(fn [x, y] -> [x/pi*180, 90 - y/pi*180] end)
IO.inspect k |> Enum.map(fn [i, j] -> [Enum.at(v, i), Enum.at(v, j)] end)
:ok
end
# model functions
# algorithm1
def force(p1, p2, rate \\ -36) do
c1 = angle_to_xyz(p1)
c2 = angle_to_xyz(p2)
c3 = vector_minus(c1, c2)
c4 = vector_k(c3, c3 |> vector_norm |> :math.pow(rate-1))
vector_minus(c4, vector_k(c1, dot_product(c1, c4)))
end
def move(v, index, step \\ 1.0e-8) do
List.delete_at(v, index)
|> Enum.map(&(force(Enum.at(v, index), &1)))
|> List.foldr([0,0,0], &(vector_add(&1, &2)))
|> Enum.map(&(&1*step))
end
def step(bucket, n) do
v = values(bucket)
0..n-1
|> Enum.map(&(xyz_to_angle(vector_add(angle_to_xyz(Enum.at(v, &1)), move(v, &1)))))
|> Enum.zip(0..n-1)
|> Enum.map(fn {value, key} -> put(bucket, key, value) end)
end
# algorithm2
def adjust(bucket, n, step \\ 1.0e-4) do
v = values(bucket)
k = for x <- 0..n-1, y <- 0..n-1, x < y, do: {x, y}
d = k |> Enum.map(fn {x, y} -> Tammes.arc_distance(Enum.at(v, x), Enum.at(v, y)) end)
{i, j} = Enum.at(k, Enum.find_index(d, fn(x) -> x == Enum.min(d) end))
p1 = Enum.at(v, i)
p2 = Enum.at(v, j)
c1 = angle_to_xyz(p1)
c2 = angle_to_xyz(p2)
c3 = vector_minus(c1, c2)
k = dot_product(c1, c3) / dot_product(c1, c1)
c4 = vector_minus(c3, vector_k(c1, k))
n = vector_norm(c4)
f1 = c4 |> vector_k(1/n*step)
f2 = f1 |> vector_k(-1)
n1 = xyz_to_angle(vector_add(c1, f1))
n2 = xyz_to_angle(vector_add(c2, f2))
put(bucket, i, n1)
put(bucket, j, n2)
end
# Agent functions
def put(bucket, key, value) do
Agent.update(bucket, &Map.put(&1, key, value))
end
def values(bucket) do
Agent.get(bucket, &Map.values(&1))
end
def r(k), do: :rand.uniform * :math.pi * k
def check(d, a, b, m) do
a..b
|> Enum.map(&(&1/m))
|> List.foldl(0, fn (b, a) ->
IO.inspect d |> Enum.map(&(Enum.count(&1, fn x -> x < b and x >= a end))) |> Enum.sum
b
end)
:ok
end
def init(n \\ 15, loop1_times \\ 500_000, loop2_times \\ 2_000_000) do
{:ok, bucket} = Agent.start_link(fn -> %{} end)
:rand.seed(:exsplus, :os.timestamp())
0..n-1 |> Enum.map(&(put(bucket, &1, [r(2), r(1)])))
# 0..n-1 |> Enum.map(&(put(bucket, &1, Enum.at(v, &1))))
1..loop1_times |> Enum.map(fn _ -> step(bucket, n) end)
1..loop2_times |> Enum.map(fn _ -> adjust(bucket, n) end)
v = values(bucket)
IO.inspect v
d = v |> Enum.map(&(Enum.map(v, fn x -> Tammes.arc_distance(x, &1) end)))
check(d, 935, 940, 1000)
k = for x <- 0..n-1, y <- 0..n-1, x < y, do: Tammes.arc_distance(Enum.at(v, x), Enum.at(v, y))
IO.inspect k |> Enum.sort
:ok
end
end
|
tammes.ex
| 0.5083
| 0.664778
|
tammes.ex
|
starcoder
|
defmodule Bolt.Cogs.Clean do
@moduledoc false
@behaviour Nosedrum.Command
alias Bolt.Converters
alias Bolt.ErrorFormatters
alias Bolt.Humanizer
alias Bolt.ModLog
alias Bolt.Parsers
alias Nosedrum.Predicates
alias Nostrum.Api
alias Nostrum.Cache.Mapping.ChannelGuild
alias Nostrum.Struct.Message
alias Nostrum.Struct.User
@impl true
def usage, do: ["clean <amount:int>", "clean <options...>"]
@impl true
def description,
do: """
Cleanup messages. The execution of this command can be customized with the following options:
`--bots`: Only clean messages authored by bots
`--no-bots`: Do not clean any messages authored by bots
`--limit <amount:int>`: Specify the limit of messages to delete, capped at 1000
`--channel <channel:textchannel>`: The channel to delete messages in
`--user <user:snowflake|user>`: Only delete messages by this user, can be specified multiple times
`--content <content:str>`: Only delete messages containing `content`
**Examples**:
```rs
// delete 60 messages in the current channel
clean 60
// delete up to 10 messages by
// bots in the current channel
clean --bots --limit 10
// delete up to 30 messages sent
// by 197177484792299522 in the #fsharp channel
clean --user 197177484792299522 --channel #fsharp
// delete up to 50 messages containing
// "lol no generics" in the #golang channel
clean --content "lol no generics" --channel #golang --limit 50
```
"""
@impl true
def predicates,
do: [&Predicates.guild_only/1, Predicates.has_permission(:manage_messages)]
@impl true
def parse_args(args) do
OptionParser.parse(
args,
strict: [
# --bots | --no-bots
# clean only bot messages, or exclude bot messages from cleaning
bots: :boolean,
# --channel <channel:textchannel>
# clean in the given channel instead of the current one
channel: :string,
# --limit <limit:int>
# clean at most `limit` messages
limit: :integer,
# --user <user:snowflake|user>
# clean only messages by `user`, can be specified multiple times
user: [:string, :keep],
# --content <str>
# clean only messages containing `content` (case-insensitive)
content: :string
]
)
end
@impl true
def command(msg, {options, [], []}) when options != [] do
with {:ok, target_channel_id} <-
parse_channel(msg.guild_id, options[:channel], msg.channel_id),
limit <- min(Keyword.get(options, :limit, 100), 1000),
{:ok, messages} when messages != [] <-
Api.get_channel_messages(target_channel_id, limit, {:before, msg.id}),
{:ok, message_stream} <- apply_filter(messages, :bots, options[:bots], msg.guild_id),
{:ok, message_stream} <-
apply_filter(message_stream, :user, options[:user], msg.guild_id),
{:ok, message_stream} <-
apply_filter(message_stream, :content, options[:content], msg.guild_id),
false <- Enum.empty?(message_stream),
messages_to_delete <- Enum.to_list(message_stream),
message_ids <- Enum.map(messages_to_delete, & &1.id),
{:ok} <- Api.bulk_delete_messages(msg.channel_id, message_ids) do
Api.create_reaction(msg.channel_id, msg.id, "👌")
log_content =
messages_to_delete
|> Stream.map(
&"#{String.pad_leading(&1.author.username, 20)}##{&1.author.discriminator}: #{&1.content}"
)
|> Enum.reverse()
|> Enum.join("\n")
ModLog.emit(
msg.guild_id,
"MESSAGE_CLEAN",
"#{Humanizer.human_user(msg.author)} deleted" <>
" #{length(messages_to_delete)} messages in <##{target_channel_id}>",
file: %{
name: "deleted_messages.log",
body: log_content
}
)
else
{:ok, []} ->
# No messages returned from the API call
response = "🚫 no messages found, does the bot have `READ_MESSAGE_HISTORY` "
{:ok, _msg} = Api.create_message(msg.channel_id, response)
# `message_stream` is empty
true ->
# No messages found after filter application
response = "🚫 no messages found matching the given options"
{:ok, _msg} = Api.create_message(msg.channel_id, response)
error ->
response = ErrorFormatters.fmt(msg, error)
{:ok, _msg} = Api.create_message(msg.channel_id, response)
end
end
def command(msg, {[], [], []}) do
response =
"ℹ️ usage: `#{List.first(usage())}` or `#{List.last(usage())}`, " <>
"see `help clean` for options"
{:ok, _msg} = Api.create_message(msg.channel_id, response)
end
def command(msg, {[], [maybe_amount | []], []}) do
case Integer.parse(maybe_amount) do
{amount, ""} ->
command(msg, {[limit: amount], [], []})
:error ->
response =
"🚫 expected options or limit to prune as sole argument, " <> "see `help clean` for help"
{:ok, _msg} = Api.create_message(msg.channel_id, response)
end
end
def command(msg, {[], [_maybe_amount | _unrecognized_args], []}) do
{:ok, _msg} =
Api.create_message(
msg.channel_id,
"🚫 expected the message limit as the sole argument, but got some other unrecognized args"
)
end
def command(msg, {options, args, []}) when options != [] and args != [] do
{:ok, _msg} =
Api.create_message(
msg.channel_id,
"🚫 expected either a sole argument (amount to delete) or exact options, got both"
)
end
def command(msg, {_parsed, _args, invalid}) when invalid != [] do
invalid_args = Parsers.describe_invalid_args(invalid)
{:ok, _msg} =
Api.create_message(
msg.channel_id,
"🚫 unrecognized argument(s) or invalid value: #{invalid_args}"
)
end
@spec parse_channel(
invocation_guild_id :: Guild.id(),
passed_channel :: String.t() | nil,
default_channel_id :: Channel.id()
) :: {:ok, Channel.id()} | {:error, String.t()}
defp parse_channel(_guild_id, nil, default_id), do: {:ok, default_id}
defp parse_channel(guild_id, passed_channel, _default_id) do
case Converters.to_channel(guild_id, passed_channel) do
{:ok, channel} -> {:ok, channel.id}
{:error, reason} -> {:error, "could not parse `channel` argument: #{reason}"}
end
end
@spec apply_filter([Message.t()], atom(), String.t(), Guild.id()) ::
{:ok, [Message.t()]} | {:error, String.t()}
defp apply_filter(messages, option_name, option_val, guild_id)
defp apply_filter(messages, :bots, nil, _guild_id), do: {:ok, messages}
# `--bots` given: exclude non-bots
defp apply_filter(messages, :bots, true, _guild_id),
do: {:ok, Stream.filter(messages, & &1.author.bot)}
# `--no-bots` given: exclude bots
defp apply_filter(messages, :bots, false, _guild_id),
do: {:ok, Stream.reject(messages, & &1.author.bot)}
defp apply_filter(messages, :user, nil, _guild_id), do: {:ok, messages}
# single `--user` flag given, `OptionParser` passes it as a string
defp apply_filter(messages, :user, user, _guild_id) when is_bitstring(user) do
[%Message{channel_id: channel_id}] = Enum.take(messages, 1)
with {:ok, guild_id} <- ChannelGuild.get_guild(channel_id),
{:ok, snowflake} <- parse_snowflake(guild_id, user) do
filtered = Stream.filter(messages, &(&1.author.id == snowflake))
{:ok, filtered}
else
error ->
error
end
end
# multiple `--user` flags given
defp apply_filter(messages, :user, users, guild_id) do
parsed_snowflakes = Enum.map(users, &parse_snowflake(guild_id, &1))
# Did any given flag not parse correctly?
if Enum.any?(parsed_snowflakes, &match?({:error, _reason}, &1)) do
# If yes, build a string of errors and return it.
error_description =
parsed_snowflakes
|> Stream.filter(&match?({:error, _reason}, &1))
|> Stream.map(&elem(&1, 1))
|> Stream.map(&"• #{&1}")
|> Enum.join("\n")
{:error, "🚫 failed to parse `--user` flag:\n#{error_description}"}
else
# If not, return only messages that were sent by the given snowflakes.
filtered_messages = Stream.filter(messages, &(&1.author.id in parsed_snowflakes))
{:ok, filtered_messages}
end
end
defp apply_filter(messages, :content, nil, _guild_id), do: {:ok, messages}
defp apply_filter(messages, :content, content, _guild_id) do
{:ok, Stream.filter(messages, &(content in &1.content))}
end
@spec parse_snowflake(Guild.id(), String.t()) :: {:ok, User.id()} | {:error, String.t()}
defp parse_snowflake(guild_id, user_string) do
case Integer.parse(user_string) do
# If the given flag is a valid integer, we're going to
# assume that the command invocator passed a raw snowflake
# to the `--user` flag.
# Although the member converter accounts for this, it only
# searches members on the guild. If a user left the server,
# we have no way of converting a regular user string properly.
{snowflake, ""} ->
{:ok, snowflake}
_error ->
# Otherwise, assume it's a string describing a guild member,
# so let's ask the converter to find us the matching member.
case Converters.to_member(guild_id, user_string) do
{:ok, member} ->
{:ok, member.user.id}
error ->
error
end
end
end
end
|
lib/bolt/cogs/clean.ex
| 0.867962
| 0.515376
|
clean.ex
|
starcoder
|
defmodule Oban.Config do
@moduledoc """
The Config struct validates and encapsulates Oban instance state.
Typically, you won't use the Config module directly. Oban automatically creates a Config struct
on initialization and passes it through to all supervised children with the `:conf` key.
To fetch a running Oban supervisor's config, see `Oban.config/1`.
"""
alias Oban.Validation
@type t :: %__MODULE__{
dispatch_cooldown: pos_integer(),
engine: module(),
get_dynamic_repo: nil | (() -> pid() | atom()),
log: false | Logger.level(),
name: Oban.name(),
node: String.t(),
notifier: module(),
peer: false | module(),
plugins: false | [module() | {module() | Keyword.t()}],
prefix: String.t(),
queues: false | [{atom() | binary(), pos_integer() | Keyword.t()}],
repo: module(),
shutdown_grace_period: timeout()
}
@enforce_keys [:node, :repo]
defstruct dispatch_cooldown: 5,
engine: Oban.Queue.BasicEngine,
notifier: Oban.Notifiers.Postgres,
name: Oban,
node: nil,
peer: Oban.Peer,
plugins: [],
prefix: "public",
queues: [],
repo: nil,
shutdown_grace_period: :timer.seconds(15),
log: false,
get_dynamic_repo: nil
@cron_keys [:crontab, :timezone]
@log_levels ~w(false emergency alert critical error warning warn notice info debug)a
@doc """
Generate a Config struct after normalizing and verifying Oban options.
See `Oban.start_link/1` for a comprehensive description of available options.
## Example
Generate a minimal config with only a `:repo`:
iex> Oban.Config.new(repo: Oban.Test.Repo)
"""
@spec new([Oban.option()]) :: t()
def new(opts) when is_list(opts) do
opts = normalize(opts)
Validation.validate!(opts, &validate/1)
opts =
opts
|> Keyword.update!(:queues, &normalize_queues/1)
|> Keyword.update!(:plugins, &normalize_plugins/1)
struct!(__MODULE__, opts)
end
@doc """
Verify configuration options.
This helper is used by `new/1`, and therefore by `Oban.start_link/1`, to verify configuration
options when an Oban supervisor starts. It is provided publicly to aid in configuration testing,
as `test` config may differ from `prod` config.
# Example
Validating top level options:
iex> Oban.Config.validate(name: Oban)
:ok
iex> Oban.Config.validate(name: Oban, log: false)
:ok
iex> Oban.Config.validate(node: {:not, :binary})
{:error, "expected :node to be a non-empty binary, got: {:not, :binary}"}
iex> Oban.Config.validate(plugins: true)
{:error, "expected :plugins to be a list, got: true"}
Validating plugin options:
iex> Oban.Config.validate(plugins: [{Oban.Plugins.Pruner, max_age: 60}])
:ok
iex> Oban.Config.validate(plugins: [{Oban.Plugins.Pruner, max_age: 0}])
{:error, "expected :max_age to be a positive integer, got: 0"}
"""
@spec validate([Oban.option()]) :: :ok | {:error, String.t()}
def validate(opts) when is_list(opts) do
opts
|> normalize()
|> Validation.validate(&validate_opt/1)
end
@doc false
@spec node_name(%{optional(binary()) => binary()}) :: binary()
def node_name(env \\ System.get_env()) do
cond do
Node.alive?() ->
to_string(node())
Map.has_key?(env, "DYNO") ->
Map.get(env, "DYNO")
true ->
:inet.gethostname()
|> elem(1)
|> to_string()
end
end
@doc false
@spec to_ident(t()) :: binary()
def to_ident(%__MODULE__{name: name, node: node}) do
inspect(name) <> "." <> to_string(node)
end
@doc false
@spec match_ident?(t(), binary()) :: boolean()
def match_ident?(%__MODULE__{} = conf, ident) when is_binary(ident) do
to_ident(conf) == ident
end
# Validation
defp validate_opt({:dispatch_cooldown, cooldown}) do
Validation.validate_integer(:dispatch_cooldown, cooldown)
end
defp validate_opt({:engine, engine}) do
if Code.ensure_loaded?(engine) and function_exported?(engine, :init, 2) do
:ok
else
{:error, "expected :engine to be an Oban.Queue.Engine, got: #{inspect(engine)}"}
end
end
defp validate_opt({:notifier, notifier}) do
if Code.ensure_loaded?(notifier) and function_exported?(notifier, :listen, 2) do
:ok
else
{:error, "expected :notifier to be an Oban.Notifier, got: #{inspect(notifier)}"}
end
end
defp validate_opt({:name, _}), do: :ok
defp validate_opt({:node, node}) do
if is_binary(node) and String.trim(node) != "" do
:ok
else
{:error, "expected :node to be a non-empty binary, got: #{inspect(node)}"}
end
end
defp validate_opt({:peer, peer}) do
if peer == false or Code.ensure_loaded?(peer) do
:ok
else
{:error, "expected :peer to be false or an Oban.Peer, got: #{inspect(peer)}"}
end
end
defp validate_opt({:plugins, plugins}) do
Validation.validate(:plugins, plugins, &validate_plugin/1)
end
defp validate_opt({:prefix, prefix}) do
if is_binary(prefix) and Regex.match?(~r/^[a-z0-9_]+$/i, prefix) do
:ok
else
{:error, "expected :prefix to be an alphanumeric string, got: #{inspect(prefix)}"}
end
end
defp validate_opt({:queues, queues}) do
if Keyword.keyword?(queues) do
Validation.validate(queues, &validate_queue/1)
else
{:error, "expected :queues to be a keyword list, got: #{inspect(queues)}"}
end
end
defp validate_opt({:repo, repo}) do
if Code.ensure_loaded?(repo) and function_exported?(repo, :config, 0) do
:ok
else
{:error, "expected :repo to be an Ecto.Repo, got: #{inspect(repo)}"}
end
end
defp validate_opt({:shutdown_grace_period, period}) do
Validation.validate_integer(:shutdown_grace_period, period)
end
defp validate_opt({:log, log}) do
if log in @log_levels do
:ok
else
{:error, "expected :log to be one of #{inspect(@log_levels)}, got: #{inspect(log)}"}
end
end
defp validate_opt({:get_dynamic_repo, fun}) do
if is_nil(fun) or is_function(fun, 0) do
:ok
else
{:error,
"expected :get_dynamic_repo to be nil or a zero arity function, got: #{inspect(fun)}"}
end
end
defp validate_opt(option) do
{:error, "unknown option provided #{inspect(option)}"}
end
defp validate_plugin(plugin) when not is_tuple(plugin), do: validate_plugin({plugin, []})
defp validate_plugin({plugin, opts}) do
name = inspect(plugin)
cond do
not is_atom(plugin) ->
{:error, "plugin #{name} is not a valid module"}
not Code.ensure_loaded?(plugin) ->
{:error, "plugin #{name} could not be loaded"}
not function_exported?(plugin, :init, 1) ->
{:error, "plugin #{name} is invalid because it's missing an `init/1` function"}
not Keyword.keyword?(opts) ->
{:error, "expected #{name} options to be a keyword list, got: #{inspect(opts)}"}
function_exported?(plugin, :validate, 1) ->
plugin.validate(opts)
true ->
:ok
end
end
defp validate_queue({name, opts}) do
if (is_integer(opts) and opts > 0) or Keyword.keyword?(opts) do
:ok
else
{:error,
"expected queue #{inspect(name)} opts to be a positive integer limit or a " <>
"keyword list, got: #{inspect(opts)}"}
end
end
# Normalization
defp normalize(opts) do
opts
|> crontab_to_plugin()
|> poll_interval_to_plugin()
|> Keyword.put_new(:node, node_name())
|> Keyword.update(:plugins, [], &(&1 || []))
|> Keyword.update(:queues, [], &(&1 || []))
|> Keyword.delete(:circuit_backoff)
|> Enum.reject(&(&1 == {:notifier, Oban.PostgresNotifier}))
end
defp crontab_to_plugin(opts) do
case {opts[:plugins], opts[:crontab]} do
{plugins, [_ | _]} when is_list(plugins) or is_nil(plugins) ->
{cron_opts, base_opts} = Keyword.split(opts, @cron_keys)
plugin = {Oban.Plugins.Cron, cron_opts}
Keyword.update(base_opts, :plugins, [plugin], &[plugin | &1])
_ ->
Keyword.drop(opts, @cron_keys)
end
end
defp poll_interval_to_plugin(opts) do
case {opts[:plugins], opts[:poll_interval]} do
{plugins, interval} when (is_list(plugins) or is_nil(plugins)) and is_integer(interval) ->
plugin = {Oban.Plugins.Stager, interval: interval}
opts
|> Keyword.delete(:poll_interval)
|> Keyword.update(:plugins, [plugin], &[plugin | &1])
{plugins, nil} when is_list(plugins) or is_nil(plugins) ->
plugin = Oban.Plugins.Stager
Keyword.update(opts, :plugins, [plugin], &[plugin | &1])
_ ->
Keyword.drop(opts, [:poll_interval])
end
end
defp normalize_queues(queues) do
for {name, value} <- queues do
opts = if is_integer(value), do: [limit: value], else: value
{name, opts}
end
end
# Manually specified plugins will be overwritten by auto-specified plugins unless we reverse the
# plugin list. The order doesn't matter as they are supervised one-for-one.
defp normalize_plugins(plugins) do
plugins
|> Enum.reverse()
|> Enum.uniq_by(fn
{module, _opts} -> module
module -> module
end)
end
end
|
lib/oban/config.ex
| 0.919163
| 0.46952
|
config.ex
|
starcoder
|
defmodule ChangelogWeb.TimeView do
alias Timex.Duration
def closest_monday_to(date) do
offset = case Timex.weekday(date) do
1 -> 0
2 -> -1
3 -> -2
4 -> -3
5 -> 3
6 -> 2
7 -> 1
end
Timex.shift(date, days: offset)
end
def duration(seconds) when is_nil(seconds), do: duration(0)
def duration(seconds) when seconds < 3600 do
minutes = div(seconds, 60)
seconds = rem(seconds, 60)
"#{leading_zero(minutes)}:#{leading_zero(seconds)}"
end
def duration(seconds) when seconds >= 3600 do
hours = div(seconds, 3600)
remaining = rem(seconds, 3600)
"#{hours}:#{duration(remaining)}"
end
def hacker_date(ts) when is_nil(ts), do: ""
def hacker_date(ts) when is_binary(ts) do
{:ok, result} = Timex.parse(ts, "{YYYY}-{0M}-{0D} {h24}:{m}:{s}")
hacker_date(result)
end
def hacker_date(ts) do
{:ok, result} = Timex.format(ts, "{YYYY}-{0M}-{0D}")
result
end
def hours_ago(hours) do
Timex.subtract(Timex.now, Duration.from_hours(hours))
end
def hours_from_now(hours) do
Timex.add(Timex.now, Duration.from_hours(hours))
end
def pretty_date(ts) when is_nil(ts), do: ""
def pretty_date(ts) when is_binary(ts) do
{:ok, result} = Timex.parse(ts, "{YYYY}-{0M}-{0D} {h24}:{m}:{s}")
pretty_date(result)
end
def pretty_date(ts) do
{:ok, result} = Timex.format(ts, "{Mshort} {D}, {YYYY}")
result
end
def rounded_minutes(seconds) when is_nil(seconds), do: rounded_minutes(0)
def rounded_minutes(seconds) do
(seconds / 60) |> round
end
def rss(ts) when is_nil(ts), do: ""
def rss(ts), do: ts |> format_to("{RFC1123}")
def rfc3339(ts) when is_nil(ts), do: ""
def rfc3339(ts), do: ts |> format_to("{RFC3339}")
defp format_to(ts, format) do
{:ok, result} = Timex.format(ts, format)
result
end
def seconds(duration) when not is_binary(duration), do: seconds("00")
def seconds(duration) do
case String.split(duration, ":") do
[h, m, s] -> to_seconds(:hours, h) + to_seconds(:minutes, m) + to_seconds(s)
[m, s] -> to_seconds(:minutes, m) + to_seconds(s)
[s] -> to_seconds(s)
_ -> 0
end
end
def terse_date(nil), do: ""
def terse_date(ts) do
{:ok, result} = Timex.format(ts, "{0M}/{0D}/{YY}")
result
end
def time_is_url(nil), do: ""
def time_is_url(ts), do: "https://time.is/#{DateTime.to_unix(ts)}"
@ doc """
Formats a timestamp for js-based relativism and display. See functions *Style
functions in time.js for possible styles
"""
def ts(ts, style \\ "admin")
def ts(ts, _style) when is_nil(ts), do: ""
def ts(ts, style) do
{:ok, formatted} = Timex.format(ts, "{ISO:Extended:Z}")
{:safe, "<span class='time' data-style='#{style}'>#{formatted}</span>"}
end
def weeks(start_date \\ Timex.today, count \\ 8) do
Timex.Interval.new(from: Timex.beginning_of_week(start_date), until: [weeks: count], step: [weeks: 1])
end
def week_start_end(date) do
start_date = Timex.beginning_of_week(date)
end_date = Timex.end_of_week(date)
{:ok, pretty_start} = Timex.format(start_date, "{Mshort} {0D}")
{:ok, pretty_end} = Timex.format(end_date, "{Mshort} {0D}")
"#{pretty_start} - #{pretty_end}"
end
defp to_seconds(:hours, str), do: string_to_rounded_integer(str) * 3600
defp to_seconds(:minutes, str), do: string_to_rounded_integer(str) * 60
defp to_seconds(str), do: string_to_rounded_integer(str)
defp string_to_rounded_integer(str) do
if String.contains?(str, ".") do
round(String.to_float(str))
else
String.to_integer(str)
end
end
defp leading_zero(integer) do
if integer < 10 do
"0#{integer}"
else
"#{integer}"
end
end
end
|
lib/changelog_web/views/time_view.ex
| 0.606498
| 0.525125
|
time_view.ex
|
starcoder
|
defmodule Mint.HTTP2 do
@moduledoc """
Processless HTTP client with support for HTTP/2.
This module provides a data structure that represents an HTTP/2 connection to
a given server. The connection is represented as an opaque struct `%Mint.HTTP2{}`.
The connection is a data structure and is not backed by a process, and all the
connection handling happens in the process that creates the struct.
This module and data structure work exactly like the ones described in the `Mint.HTTP`
module, with the exception that `Mint.HTTP2` specifically deals with HTTP/2 while
`Mint.HTTP` deals seamlessly with HTTP/1.1 and HTTP/2. For more information on
how to use the data structure and client architecture, see `Mint.HTTP`.
## HTTP/2 streams and requests
HTTP/2 introduces the concept of **streams**. A stream is an isolated conversation
between the client and the server. Each stream is unique and identified by a unique
**stream ID**, which means that there's no order when data comes on different streams
since they can be identified uniquely. A stream closely corresponds to a request, so
in this documentation and client we will mostly refer to streams as "requests".
We mentioned data on streams can come in arbitrary order, and streams are requests,
so the practical effect of this is that performing request A and then request B
does not mean that the response to request A will come before the response to request B.
This is why we identify each request with a unique reference returned by `request/5`.
See `request/5` for more information.
## Closed connection
In HTTP/2, the connection can either be open, closed, or only closed for writing.
When a connection is closed for writing, the client cannot send requests or stream
body chunks, but it can still read data that the server might be sending. When the
connection gets closed on the writing side, a `:server_closed_connection` error is
returned. `{:error, request_ref, error}` is returned for requests that haven't been
processed by the server, with the reason of `error` being `:unprocessed`.
These requests are safe to retry.
## HTTP/2 settings
HTTP/2 supports settings negotiation between servers and clients. The server advertises
its settings to the client and the client advertises its settings to the server. A peer
(server or client) has to acknowledge the settings advertised by the other peer before
those settings come into action (that's why it's called a negotiation).
A first settings negotiation happens right when the connection starts.
Servers and clients can renegotiate settings at any time during the life of the
connection.
Mint users don't need to care about settings acknowledgements directly since they're
handled transparently by `stream/2`.
To retrieve the server settings, you can use `get_server_setting/2`. Doing so is often
useful to be able to tune your requests based on the server settings.
To communicate client settings to the server, use `put_settings/2` or pass them when
starting up a connection with `connect/4`. Note that the server needs to acknowledge
the settings sent through `put_setting/2` before those settings come into effect. The
server ack is processed transparently by `stream/2`, but this means that if you change
a setting through `put_settings/2` and try to retrieve the value of that setting right
after with `get_client_setting/2`, you'll likely get the old value of that setting. Once
the server acknowledges the new settings, the updated value will be returned by
`get_client_setting/2`.
## Server push
HTTP/2 supports [server push](https://en.wikipedia.org/wiki/HTTP/2_Server_Push), which
is a way for a server to send a response to a client without the client needing to make
the corresponding request. The server sends a `:push_promise` response to a normal request:
this creates a new request reference. Then, the server sends normal responses for the newly
created request reference.
Let's see an example. We will ask the server for `"/index.html"` and the server will
send us a push promise for `"/style.css"`.
{:ok, conn} = Mint.HTTP2.connect(:https, "example.com", 443)
{:ok, conn, request_ref} = Mint.HTTP2.request(conn, "GET", "/index.html", _headers = [], _body = "")
next_message =
receive do
msg -> msg
end
{:ok, conn, responses} = Mint.HTTP2.stream(conn, next_message)
[
{:push_promise, ^request_ref, promised_request_ref, promised_headers},
{:status, ^request_ref, 200},
{:headers, ^request_ref, []},
{:data, ^request_ref, "<html>..."},
{:done, ^request_ref}
] = responses
promised_headers
#=> [{":method", "GET"}, {":path", "/style.css"}]
As you can see in the example above, when the server sends a push promise then a
`:push_promise` response is returned as a response to a request. The `:push_promise`
response contains a `promised_request_ref` and some `promised_headers`. The
`promised_request_ref` is the new request ref that pushed responses will be tagged with.
`promised_headers` are headers that tell the client *what request* the promised response
will respond to. The idea is that the server tells the client a request the client will
want to make and then preemptively sends a response for that request. Promised headers
will always include `:method`, `:path`, and `:authority`.
next_message =
receive do
msg -> msg
end
{:ok, conn, responses} = Mint.HTTP2.stream(conn, next_message)
[
{:status, ^promised_request_ref, 200},
{:headers, ^promised_request_ref, []},
{:data, ^promised_request_ref, "body { ... }"},
{:done, ^promised_request_ref}
]
The response to a promised request is like a response to any normal request.
### Disabling server pushes
HTTP/2 exposes a boolean setting for enabling or disabling server pushes with `:enable_push`.
You can pass this option when connecting or in `put_settings/2`. By default server push
is enabled.
"""
use Bitwise, skip_operators: true
import Mint.Core.Util
import Mint.HTTP2.Frame, except: [encode: 1, decode_next: 1]
alias Mint.{HTTPError, TransportError}
alias Mint.Types
alias Mint.Core.Util
alias Mint.HTTP2.Frame
require Logger
require Integer
@behaviour Mint.Core.Conn
## Constants
@connection_preface "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"
@transport_opts [alpn_advertised_protocols: ["h2"]]
@default_window_size 65_535
@max_window_size 2_147_483_647
@default_max_frame_size 16_384
@valid_max_frame_size_range @default_max_frame_size..16_777_215
@user_agent "mint/" <> Mix.Project.config()[:version]
# HTTP/2 connection struct.
defstruct [
# Transport things.
:transport,
:socket,
:mode,
# Host things.
:hostname,
:port,
:scheme,
# Connection state (open, closed, and so on).
:state,
# Fields of the connection.
buffer: "",
window_size: @default_window_size,
encode_table: HPAX.new(4096),
decode_table: HPAX.new(4096),
# Queue for sent PING frames.
ping_queue: :queue.new(),
# Queue for sent SETTINGS frames.
client_settings_queue: :queue.new(),
# Stream-set-related things.
next_stream_id: 3,
streams: %{},
open_client_stream_count: 0,
open_server_stream_count: 0,
ref_to_stream_id: %{},
# Settings that the server communicates to the client.
server_settings: %{
enable_push: true,
max_concurrent_streams: 100,
initial_window_size: @default_window_size,
max_frame_size: @default_max_frame_size,
max_header_list_size: :infinity,
# Only supported by the server: https://www.rfc-editor.org/rfc/rfc8441.html#section-3
enable_connect_protocol: false
},
# Settings that the client communicates to the server.
client_settings: %{
max_concurrent_streams: 100,
max_frame_size: @default_max_frame_size,
enable_push: true
},
# Headers being processed (when headers are split into multiple frames with CONTINUATIONS, all
# the continuation frames must come one right after the other).
headers_being_processed: nil,
# Stores the headers returned by the proxy in the `CONNECT` method
proxy_headers: [],
# Private store.
private: %{}
]
## Types
@typedoc """
HTTP/2 setting with its value.
This type represents both server settings as well as client settings. To retrieve
server settings use `get_server_setting/2` and to retrieve client settings use
`get_client_setting/2`. To send client settings to the server, see `put_settings/2`.
The supported settings are the following:
* `:header_table_size` - (integer) corresponds to `SETTINGS_HEADER_TABLE_SIZE`.
* `:enable_push` - (boolean) corresponds to `SETTINGS_ENABLE_PUSH`. Sets whether
push promises are supported. If you don't want to support push promises,
use `put_settings/2` to tell the server that your client doesn't want push promises.
* `:max_concurrent_streams` - (integer) corresponds to `SETTINGS_MAX_CONCURRENT_STREAMS`.
Tells what is the maximum number of streams that the peer sending this (client or server)
supports. As mentioned in the module documentation, HTTP/2 streams are equivalent to
requests, so knowing the maximum number of streams that the server supports can be useful
to know how many concurrent requests can be open at any time. Use `get_server_setting/2`
to find out how many concurrent streams the server supports.
* `:initial_window_size` - (integer smaller than `#{inspect(@max_window_size)}`)
corresponds to `SETTINGS_INITIAL_WINDOW_SIZE`. Tells what is the value of
the initial HTTP/2 window size for the peer that sends this setting.
* `:max_frame_size` - (integer in the range `#{inspect(@valid_max_frame_size_range)}`)
corresponds to `SETTINGS_MAX_FRAME_SIZE`. Tells what is the maximum size of an HTTP/2
frame for the peer that sends this setting.
* `:max_header_list_size` - (integer) corresponds to `SETTINGS_MAX_HEADER_LIST_SIZE`.
* `:enable_connect_protocol` - (boolean) corresponds to `SETTINGS_ENABLE_CONNECT_PROTOCOL`.
Sets whether the client may invoke the extended connect protocol which is used to
bootstrap WebSocket connections.
"""
@type setting() ::
{:enable_push, boolean()}
| {:max_concurrent_streams, pos_integer()}
| {:initial_window_size, 1..2_147_483_647}
| {:max_frame_size, 16_384..16_777_215}
| {:max_header_list_size, :infinity | pos_integer()}
| {:enable_connect_protocol, boolean()}
@typedoc """
HTTP/2 settings.
See `t:setting/0`.
"""
@type settings() :: [setting()]
@typedoc """
An HTTP/2-specific error reason.
The values can be:
* `:closed` - when you try to make a request or stream a body chunk but the connection
is closed.
* `:closed_for_writing` - when you try to make a request or stream a body chunk but
the connection is closed for writing. This means you cannot issue any more requests.
See the "Closed connection" section in the module documentation for more information.
* `:too_many_concurrent_requests` - when the maximum number of concurrent requests
allowed by the server is reached. To find out what this limit is, use `get_setting/2`
with the `:max_concurrent_streams` setting name.
* `{:max_header_list_size_exceeded, size, max_size}` - when the maximum size of
the header list is reached. `size` is the actual value of the header list size,
`max_size` is the maximum value allowed. See `get_setting/2` to retrieve the
value of the max size.
* `{:exceeds_window_size, what, window_size}` - when the data you're trying to send
exceeds the window size of the connection (if `what` is `:connection`) or of a request
(if `what` is `:request`). `window_size` is the allowed window size. See
`get_window_size/2`.
* `{:stream_not_found, stream_id}` - when the given request is not found.
* `:unknown_request_to_stream` - when you're trying to stream data on an unknown
request.
* `:request_is_not_streaming` - when you try to send data (with `stream_request_body/3`)
on a request that is not open for streaming.
* `:unprocessed` - when a request was closed because it was not processed by the server.
When this error is returned, it means that the server hasn't processed the request at all,
so it's safe to retry the given request on a different or new connection.
* `{:server_closed_request, error_code}` - when the server closes the request.
`error_code` is the reason why the request was closed.
* `{:server_closed_connection, reason, debug_data}` - when the server closes the connection
gracefully or because of an error. In HTTP/2, this corresponds to a `GOAWAY` frame.
`error` is the reason why the connection was closed. `debug_data` is additional debug data.
* `{:frame_size_error, frame}` - when there's an error with the size of a frame.
`frame` is the frame type, such as `:settings` or `:window_update`.
* `{:protocol_error, debug_data}` - when there's a protocol error.
`debug_data` is a string that explains the nature of the error.
* `{:compression_error, debug_data}` - when there's a header compression error.
`debug_data` is a string that explains the nature of the error.
* `{:flow_control_error, debug_data}` - when there's a flow control error.
`debug_data` is a string that explains the nature of the error.
"""
@type error_reason() :: term()
@opaque t() :: %Mint.HTTP2{}
## Public interface
@doc """
Same as `Mint.HTTP.connect/4`, but forces a HTTP/2 connection.
"""
@spec connect(Types.scheme(), Types.address(), :inet.port_number(), keyword()) ::
{:ok, t()} | {:error, Types.error()}
def connect(scheme, address, port, opts \\ []) do
hostname = Mint.Core.Util.hostname(opts, address)
transport_opts =
opts
|> Keyword.get(:transport_opts, [])
|> Keyword.merge(@transport_opts)
|> Keyword.put(:hostname, hostname)
case negotiate(address, port, scheme, transport_opts) do
{:ok, socket} ->
initiate(scheme, socket, hostname, port, opts)
{:error, reason} ->
{:error, reason}
end
end
@doc false
@spec upgrade(
Types.scheme(),
Mint.Types.socket(),
Types.scheme(),
String.t(),
:inet.port_number(),
keyword()
) :: {:ok, t()} | {:error, Types.error()}
def upgrade(old_scheme, socket, new_scheme, hostname, port, opts) do
transport = scheme_to_transport(new_scheme)
transport_opts =
opts
|> Keyword.get(:transport_opts, [])
|> Keyword.merge(@transport_opts)
with {:ok, socket} <- transport.upgrade(socket, old_scheme, hostname, port, transport_opts) do
initiate(new_scheme, socket, hostname, port, opts)
end
end
@doc """
See `Mint.HTTP.close/1`.
"""
@impl true
@spec close(t()) :: {:ok, t()}
def close(conn)
def close(%__MODULE__{state: :open} = conn) do
send_connection_error!(conn, :no_error, "connection peacefully closed by client")
catch
{:mint, conn, %HTTPError{reason: {:no_error, _}}} ->
{:ok, conn}
# We could have an error sending the GOAWAY frame, but we want to ignore that since
# we're closing the connection anyways.
{:mint, conn, %TransportError{}} ->
conn = put_in(conn.state, :closed)
{:ok, conn}
end
def close(%__MODULE__{state: {:goaway, _error_code, _debug_data}} = conn) do
_ = conn.transport.close(conn.socket)
{:ok, put_in(conn.state, :closed)}
end
def close(%__MODULE__{state: :closed} = conn) do
{:ok, conn}
end
@doc """
See `Mint.HTTP.open?/1`.
"""
@impl true
@spec open?(t(), :read | :write | :read_write) :: boolean()
def open?(%Mint.HTTP2{state: state} = _conn, type \\ :read_write)
when type in [:read, :write, :read_write] do
case state do
:open -> true
{:goaway, _error_code, _debug_data} -> type == :read
:closed -> false
end
end
@doc """
See `Mint.HTTP.request/5`.
In HTTP/2, opening a request means opening a new HTTP/2 stream (see the
module documentation). This means that a request could fail because the
maximum number of concurrent streams allowed by the server has been reached.
In that case, the error reason `:too_many_concurrent_requests` is returned.
If you want to avoid incurring in this error, you can retrieve the value of
the maximum number of concurrent streams supported by the server through
`get_server_setting/2` (passing in the `:max_concurrent_streams` setting name).
## Header list size
In HTTP/2, the server can optionally specify a maximum header list size that
the client needs to respect when sending headers. The header list size is calculated
by summing the length (in bytes) of each header name plus value, plus 32 bytes for
each header. Note that pseudo-headers (like `:path` or `:method`) count towards
this size. If the size is exceeded, an error is returned. To check what the size
is, use `get_server_setting/2`.
## Request body size
If the request body size will exceed the window size of the HTTP/2 stream created by the
request or the window size of the connection Mint will return a `:exceeds_window_size`
error.
To ensure you do not exceed the window size it is recommended to stream the request
body by initially passing `:stream` as the body and sending the body in chunks using
`stream_request_body/3` and using `get_window_size/2` to get the window size of the
request and connection.
"""
@impl true
@spec request(
t(),
method :: String.t(),
path :: String.t(),
Types.headers(),
body :: iodata() | nil | :stream
) ::
{:ok, t(), Types.request_ref()}
| {:error, t(), Types.error()}
def request(conn, method, path, headers, body)
def request(%Mint.HTTP2{state: :closed} = conn, _method, _path, _headers, _body) do
{:error, conn, wrap_error(:closed)}
end
def request(
%Mint.HTTP2{state: {:goaway, _error_code, _debug_data}} = conn,
_method,
_path,
_headers,
_body
) do
{:error, conn, wrap_error(:closed_for_writing)}
end
def request(%Mint.HTTP2{} = conn, method, path, headers, body)
when is_binary(method) and is_binary(path) and is_list(headers) do
headers =
headers
|> downcase_header_names()
|> add_pseudo_headers(conn, method, path)
|> add_default_headers(body)
|> sort_pseudo_headers_to_front()
{conn, stream_id, ref} = open_stream(conn)
{conn, payload} = encode_request_payload(conn, stream_id, headers, body)
conn = send!(conn, payload)
{:ok, conn, ref}
catch
:throw, {:mint, _conn, reason} ->
# The stream is invalid and "_conn" may be tracking it, so we return the original connection instead.
{:error, conn, reason}
end
@doc """
See `Mint.HTTP.stream_request_body/3`.
"""
@impl true
@spec stream_request_body(
t(),
Types.request_ref(),
iodata() | :eof | {:eof, trailing_headers :: Types.headers()}
) :: {:ok, t()} | {:error, t(), Types.error()}
def stream_request_body(conn, request_ref, chunk)
def stream_request_body(%Mint.HTTP2{state: :closed} = conn, _request_ref, _chunk) do
{:error, conn, wrap_error(:closed)}
end
def stream_request_body(
%Mint.HTTP2{state: {:goaway, _error_code, _debug_data}} = conn,
_request_ref,
_chunk
) do
{:error, conn, wrap_error(:closed_for_writing)}
end
def stream_request_body(%Mint.HTTP2{} = conn, request_ref, chunk)
when is_reference(request_ref) do
case Map.fetch(conn.ref_to_stream_id, request_ref) do
{:ok, stream_id} ->
{conn, payload} = encode_stream_body_request_payload(conn, stream_id, chunk)
conn = send!(conn, payload)
{:ok, conn}
:error ->
{:error, conn, wrap_error(:unknown_request_to_stream)}
end
catch
:throw, {:mint, _conn, reason} ->
# The stream is invalid and "_conn" may be tracking it, so we return the original connection instead.
{:error, conn, reason}
end
@doc """
Pings the server.
This function is specific to HTTP/2 connections. It sends a **ping** request to
the server `conn` is connected to. A `{:ok, conn, request_ref}` tuple is returned,
where `conn` is the updated connection and `request_ref` is a unique reference that
identifies this ping request. The response to a ping request is returned by `stream/2`
as a `{:pong, request_ref}` tuple. If there's an error, this function returns
`{:error, conn, reason}` where `conn` is the updated connection and `reason` is the
error reason.
`payload` must be an 8-byte binary with arbitrary content. When the server responds to
a ping request, it will use that same payload. By default, the payload is an 8-byte
binary with all bits set to `0`.
Pinging can be used to measure the latency with the server and to ensure the connection
is alive and well.
## Examples
{:ok, conn, ref} = Mint.HTTP2.ping(conn)
"""
@spec ping(t(), <<_::8>>) :: {:ok, t(), Types.request_ref()} | {:error, t(), Types.error()}
def ping(%Mint.HTTP2{} = conn, payload \\ :binary.copy(<<0>>, 8))
when byte_size(payload) == 8 do
{conn, ref} = send_ping(conn, payload)
{:ok, conn, ref}
catch
:throw, {:mint, conn, error} -> {:error, conn, error}
end
@doc """
Communicates the given client settings to the server.
This function is HTTP/2-specific.
This function takes a connection and a keyword list of HTTP/2 settings and sends
the values of those settings to the server. The settings won't be effective until
the server acknowledges them, which will be handled transparently by `stream/2`.
This function returns `{:ok, conn}` when sending the settings to the server is
successful, with `conn` being the updated connection. If there's an error, this
function returns `{:error, conn, reason}` with `conn` being the updated connection
and `reason` being the reason of the error.
## Supported settings
See `t:setting/0` for the supported settings. You can see the meaning
of these settings [in the corresponding section in the HTTP/2
RFC](https://http2.github.io/http2-spec/#rfc.section.6.5.2).
See the "HTTP/2 settings" section in the module documentation for more information.
## Examples
{:ok, conn} = Mint.HTTP2.put_settings(conn, max_frame_size: 100)
"""
@spec put_settings(t(), settings()) :: {:ok, t()} | {:error, t(), Types.error()}
def put_settings(%Mint.HTTP2{} = conn, settings) when is_list(settings) do
conn = send_settings(conn, settings)
{:ok, conn}
catch
:throw, {:mint, conn, error} -> {:error, conn, error}
end
@doc """
Gets the value of the given HTTP/2 server settings.
This function returns the value of the given HTTP/2 setting that the server
advertised to the client. This function is HTTP/2 specific.
For more information on HTTP/2 settings, see [the related section in
the RFC](https://http2.github.io/http2-spec/#rfc.section.6.5.2).
See the "HTTP/2 settings" section in the module documentation for more information.
## Supported settings
The possible settings that can be retrieved are described in `t:setting/0`.
Any other atom passed as `name` will raise an error.
## Examples
Mint.HTTP2.get_server_setting(conn, :max_concurrent_streams)
#=> 500
"""
@spec get_server_setting(t(), atom()) :: term()
def get_server_setting(%Mint.HTTP2{} = conn, name) when is_atom(name) do
get_setting(conn.server_settings, name)
end
@doc """
Gets the value of the given HTTP/2 client setting.
This function returns the value of the given HTTP/2 setting that the client
advertised to the server. Client settings can be advertised through `put_settings/2`
or when starting up a connection.
Client settings have to be acknowledged by the server before coming into effect.
This function is HTTP/2 specific. For more information on HTTP/2 settings, see
[the related section in the RFC](https://http2.github.io/http2-spec/#rfc.section.6.5.2).
See the "HTTP/2 settings" section in the module documentation for more information.
## Supported settings
The possible settings that can be retrieved are described in `t:setting/0`.
Any other atom passed as `name` will raise an error.
## Examples
Mint.HTTP2.get_client_setting(conn, :max_concurrent_streams)
#=> 500
"""
@spec get_client_setting(t(), atom()) :: term()
def get_client_setting(%Mint.HTTP2{} = conn, name) when is_atom(name) do
get_setting(conn.client_settings, name)
end
defp get_setting(settings, name) do
case Map.fetch(settings, name) do
{:ok, value} -> value
:error -> raise ArgumentError, "unknown HTTP/2 setting: #{inspect(name)}"
end
end
@doc """
Cancels an in-flight request.
This function is HTTP/2 specific. It cancels an in-flight request. The server could have
already sent responses for the request you want to cancel: those responses will be parsed
by the connection but not returned to the user. No more responses
to a request will be returned after you call `cancel_request/2` on that request.
If there's no error in canceling the request, `{:ok, conn}` is returned where `conn` is
the updated connection. If there's an error, `{:error, conn, reason}` is returned where
`conn` is the updated connection and `reason` is the error reason.
## Examples
{:ok, conn, ref} = Mint.HTTP2.request(conn, "GET", "/", _headers = [])
{:ok, conn} = Mint.HTTP2.cancel_request(conn, ref)
"""
@spec cancel_request(t(), Types.request_ref()) :: {:ok, t()} | {:error, t(), Types.error()}
def cancel_request(%Mint.HTTP2{} = conn, request_ref) when is_reference(request_ref) do
case Map.fetch(conn.ref_to_stream_id, request_ref) do
{:ok, stream_id} ->
conn = close_stream!(conn, stream_id, _error_code = :cancel)
{:ok, conn}
:error ->
{:ok, conn}
end
catch
:throw, {:mint, conn, error} -> {:error, conn, error}
end
@doc """
Returns the window size of the connection or of a single request.
This function is HTTP/2 specific. It returns the window size of
either the connection if `connection_or_request` is `:connection` or of a single
request if `connection_or_request` is `{:request, request_ref}`.
Use this function to check the window size of the connection before sending a
full request. Also use this function to check the window size of both the
connection and of a request if you want to stream body chunks on that request.
For more information on flow control and window sizes in HTTP/2, see the section
below.
## HTTP/2 flow control
In HTTP/2, flow control is implemented through a
window size. When the client sends data to the server, the window size is decreased
and the server needs to "refill" it on the client side. You don't need to take care of
the refilling of the client window as it happens behind the scenes in `stream/2`.
A window size is kept for the entire connection and all requests affect this window
size. A window size is also kept per request.
The only thing that affects the window size is the body of a request, regardless of
if it's a full request sent with `request/5` or body chunks sent through
`stream_request_body/3`. That means that if we make a request with a body that is
five bytes long, like `"hello"`, the window size of the connection and the window size
of that particular request will decrease by five bytes.
If we use all the window size before the server refills it, functions like
`request/5` will return an error.
## Examples
On the connection:
HTTP.get_window_size(conn, :connection)
#=> 65_536
On a single streamed request:
{:ok, conn, request_ref} = HTTP2.request(conn, "GET", "/", [], :stream)
HTTP.get_window_size(conn, {:request_ref, request_ref})
#=> 65_536
{:ok, conn} = HTTP2.stream_request_body(conn, request_ref, "hello")
HTTP.get_window_size(conn, {:request_ref, request_ref})
#=> 65_531
"""
@spec get_window_size(t(), :connection | {:request, Types.request_ref()}) :: non_neg_integer()
def get_window_size(conn, connection_or_request)
def get_window_size(%Mint.HTTP2{} = conn, :connection) do
conn.window_size
end
def get_window_size(%Mint.HTTP2{} = conn, {:request, request_ref}) do
case Map.fetch(conn.ref_to_stream_id, request_ref) do
{:ok, stream_id} ->
conn.streams[stream_id].window_size
:error ->
raise ArgumentError,
"request with request reference #{inspect(request_ref)} was not found"
end
end
@doc """
See `Mint.HTTP.stream/2`.
"""
@impl true
@spec stream(t(), term()) ::
{:ok, t(), [Types.response()]}
| {:error, t(), Types.error(), [Types.response()]}
| :unknown
def stream(conn, message)
def stream(%Mint.HTTP2{socket: socket} = conn, {tag, socket, reason})
when tag in [:tcp_error, :ssl_error] do
error = conn.transport.wrap_error(reason)
{:error, %{conn | state: :closed}, error, _responses = []}
end
def stream(%Mint.HTTP2{socket: socket} = conn, {tag, socket})
when tag in [:tcp_closed, :ssl_closed] do
handle_closed(conn)
end
def stream(%Mint.HTTP2{transport: transport, socket: socket} = conn, {tag, socket, data})
when tag in [:tcp, :ssl] do
case maybe_concat_and_handle_new_data(conn, data) do
{:ok, %{mode: mode, state: state} = conn, responses}
when mode == :active and state != :closed ->
case transport.setopts(socket, active: :once) do
:ok -> {:ok, conn, responses}
{:error, reason} -> {:error, put_in(conn.state, :closed), reason, responses}
end
other ->
other
end
catch
:throw, {:mint, conn, error, responses} -> {:error, conn, error, responses}
end
def stream(%Mint.HTTP2{}, _message) do
:unknown
end
@doc """
See `Mint.HTTP.open_request_count/1`.
In HTTP/2, the number of open requests is the number of requests **opened by the client**
that have not yet received a `:done` response. It's important to note that only
requests opened by the client (with `request/5`) count towards the number of open
requests, as requests opened from the server with server pushes (see the "Server push"
section in the module documentation) are not considered open requests. We do this because
clients might need to know how many open requests there are because the server limits
the number of concurrent requests the client can open. To know how many requests the client
can open, see `get_server_setting/2` with the `:max_concurrent_streams` setting.
"""
@impl true
@spec open_request_count(t()) :: non_neg_integer()
def open_request_count(%Mint.HTTP2{} = conn) do
conn.open_client_stream_count
end
@doc """
See `Mint.HTTP.recv/3`.
"""
@impl true
@spec recv(t(), non_neg_integer(), timeout()) ::
{:ok, t(), [Types.response()]}
| {:error, t(), Types.error(), [Types.response()]}
def recv(conn, byte_count, timeout)
def recv(%__MODULE__{mode: :passive} = conn, byte_count, timeout) do
case conn.transport.recv(conn.socket, byte_count, timeout) do
{:ok, data} ->
maybe_concat_and_handle_new_data(conn, data)
{:error, %TransportError{reason: :closed}} ->
handle_closed(conn)
{:error, error} ->
{:error, %{conn | state: :closed}, error, _responses = []}
end
catch
:throw, {:mint, conn, error, responses} -> {:error, conn, error, responses}
end
def recv(_conn, _byte_count, _timeout) do
raise ArgumentError,
"can't use recv/3 to synchronously receive data when the mode is :active. " <>
"Use Mint.HTTP.set_mode/2 to set the connection to passive mode"
end
@doc """
See `Mint.HTTP.set_mode/2`.
"""
@impl true
@spec set_mode(t(), :active | :passive) :: {:ok, t()} | {:error, Types.error()}
def set_mode(%__MODULE__{} = conn, mode) when mode in [:active, :passive] do
active =
case mode do
:active -> :once
:passive -> false
end
with :ok <- conn.transport.setopts(conn.socket, active: active) do
{:ok, put_in(conn.mode, mode)}
end
end
@doc """
See `Mint.HTTP.controlling_process/2`.
"""
@impl true
@spec controlling_process(t(), pid()) :: {:ok, t()} | {:error, Types.error()}
def controlling_process(%__MODULE__{} = conn, new_pid) when is_pid(new_pid) do
with :ok <- conn.transport.controlling_process(conn.socket, new_pid) do
{:ok, conn}
end
end
@doc """
See `Mint.HTTP.put_private/3`.
"""
@impl true
@spec put_private(t(), atom(), term()) :: t()
def put_private(%Mint.HTTP2{private: private} = conn, key, value) when is_atom(key) do
%{conn | private: Map.put(private, key, value)}
end
@doc """
See `Mint.HTTP.get_private/3`.
"""
@impl true
@spec get_private(t(), atom(), term()) :: term()
def get_private(%Mint.HTTP2{private: private} = _conn, key, default \\ nil) when is_atom(key) do
Map.get(private, key, default)
end
@doc """
See `Mint.HTTP.delete_private/2`.
"""
@impl true
@spec delete_private(t(), atom()) :: t()
def delete_private(%Mint.HTTP2{private: private} = conn, key) when is_atom(key) do
%{conn | private: Map.delete(private, key)}
end
# http://httpwg.org/specs/rfc7540.html#rfc.section.6.5
# SETTINGS parameters are not negotiated. We keep client settings and server settings separate.
@doc false
@impl true
@spec initiate(
Types.scheme(),
Types.socket(),
String.t(),
:inet.port_number(),
keyword()
) :: {:ok, t()} | {:error, Types.error()}
def initiate(scheme, socket, hostname, port, opts) do
transport = scheme_to_transport(scheme)
mode = Keyword.get(opts, :mode, :active)
client_settings_params = Keyword.get(opts, :client_settings, [])
validate_settings!(client_settings_params)
unless mode in [:active, :passive] do
raise ArgumentError,
"the :mode option must be either :active or :passive, got: #{inspect(mode)}"
end
conn = %Mint.HTTP2{
hostname: hostname,
port: port,
transport: scheme_to_transport(scheme),
socket: socket,
mode: mode,
scheme: Atom.to_string(scheme),
state: :open
}
with :ok <- inet_opts(transport, socket),
client_settings = settings(stream_id: 0, params: client_settings_params),
preface = [@connection_preface, Frame.encode(client_settings)],
:ok <- transport.send(socket, preface),
conn = update_in(conn.client_settings_queue, &:queue.in(client_settings_params, &1)),
{:ok, server_settings, buffer, socket} <- receive_server_settings(transport, socket),
server_settings_ack =
settings(stream_id: 0, params: [], flags: set_flags(:settings, [:ack])),
:ok <- transport.send(socket, Frame.encode(server_settings_ack)),
conn = put_in(conn.buffer, buffer),
conn = put_in(conn.socket, socket),
conn = apply_server_settings(conn, settings(server_settings, :params)),
:ok <- if(mode == :active, do: transport.setopts(socket, active: :once), else: :ok) do
{:ok, conn}
else
error ->
transport.close(socket)
error
end
catch
{:mint, conn, error} ->
{:ok, _conn} = close(conn)
{:error, error}
end
@doc """
See `Mint.HTTP.get_socket/1`.
"""
@impl true
@spec get_socket(t()) :: Mint.Types.socket()
def get_socket(%Mint.HTTP2{socket: socket} = _conn) do
socket
end
@doc """
See `Mint.HTTP.get_proxy_headers/1`.
"""
if Version.compare(System.version(), "1.7.0") in [:eq, :gt] do
@doc since: "1.4.0"
end
@impl true
@spec get_proxy_headers(t()) :: Mint.Types.headers()
def get_proxy_headers(%__MODULE__{proxy_headers: proxy_headers}), do: proxy_headers
## Helpers
defp handle_closed(conn) do
conn = put_in(conn.state, :closed)
if conn.open_client_stream_count > 0 or conn.open_server_stream_count > 0 do
error = conn.transport.wrap_error(:closed)
{:error, conn, error, _responses = []}
else
{:ok, conn, _responses = []}
end
end
defp negotiate(address, port, :http, transport_opts) do
# We don't support protocol negotiation for TCP connections
# so currently we just assume the HTTP/2 protocol
transport = scheme_to_transport(:http)
transport.connect(address, port, transport_opts)
end
defp negotiate(address, port, :https, transport_opts) do
transport = scheme_to_transport(:https)
with {:ok, socket} <- transport.connect(address, port, transport_opts),
{:ok, protocol} <- transport.negotiated_protocol(socket) do
if protocol == "h2" do
{:ok, socket}
else
{:error, transport.wrap_error({:bad_alpn_protocol, protocol})}
end
end
end
defp receive_server_settings(transport, socket) do
case recv_next_frame(transport, socket, _buffer = "") do
{:ok, settings(), _buffer, _socket} = result ->
result
{:ok, goaway(error_code: error_code, debug_data: debug_data), _buffer, _socket} ->
error = wrap_error({:server_closed_connection, error_code, debug_data})
{:error, error}
{:ok, frame, _buffer, _socket} ->
debug_data = "received invalid frame #{elem(frame, 0)} during handshake"
{:error, wrap_error({:protocol_error, debug_data})}
{:error, error} ->
{:error, error}
end
end
defp recv_next_frame(transport, socket, buffer) do
case Frame.decode_next(buffer, @default_max_frame_size) do
{:ok, frame, rest} ->
{:ok, frame, rest, socket}
:more ->
with {:ok, data} <- transport.recv(socket, 0, _timeout = 10_000) do
data = maybe_concat(buffer, data)
recv_next_frame(transport, socket, data)
end
{:error, {kind, _info} = reason} when kind in [:frame_size_error, :protocol_error] ->
{:error, wrap_error(reason)}
end
end
defp open_stream(conn) do
max_concurrent_streams = conn.server_settings.max_concurrent_streams
if conn.open_client_stream_count >= max_concurrent_streams do
throw({:mint, conn, wrap_error(:too_many_concurrent_requests)})
end
stream = %{
id: conn.next_stream_id,
ref: make_ref(),
state: :idle,
window_size: conn.server_settings.initial_window_size,
received_first_headers?: false
}
conn = put_in(conn.streams[stream.id], stream)
conn = put_in(conn.ref_to_stream_id[stream.ref], stream.id)
conn = update_in(conn.next_stream_id, &(&1 + 2))
{conn, stream.id, stream.ref}
end
defp encode_stream_body_request_payload(conn, stream_id, :eof) do
encode_data(conn, stream_id, "", [:end_stream])
end
defp encode_stream_body_request_payload(conn, stream_id, {:eof, trailing_headers}) do
lowered_headers = downcase_header_names(trailing_headers)
if unallowed_trailing_header = Util.find_unallowed_trailing_header(lowered_headers) do
error = wrap_error({:unallowed_trailing_header, unallowed_trailing_header})
throw({:mint, conn, error})
end
encode_headers(conn, stream_id, trailing_headers, [:end_headers, :end_stream])
end
defp encode_stream_body_request_payload(conn, stream_id, iodata) do
encode_data(conn, stream_id, iodata, [])
end
defp encode_request_payload(conn, stream_id, headers, :stream) do
encode_headers(conn, stream_id, headers, [:end_headers])
end
defp encode_request_payload(conn, stream_id, headers, nil) do
encode_headers(conn, stream_id, headers, [:end_stream, :end_headers])
end
defp encode_request_payload(conn, stream_id, headers, iodata) do
{conn, headers_payload} = encode_headers(conn, stream_id, headers, [:end_headers])
{conn, data_payload} = encode_data(conn, stream_id, iodata, [:end_stream])
{conn, [headers_payload, data_payload]}
end
defp encode_headers(conn, stream_id, headers, enabled_flags) do
assert_headers_smaller_than_max_header_list_size(conn, headers)
headers = Enum.map(headers, fn {name, value} -> {:store_name, name, value} end)
{hbf, conn} = get_and_update_in(conn.encode_table, &HPAX.encode(headers, &1))
payload = headers_to_encoded_frames(conn, stream_id, hbf, enabled_flags)
stream_state = if :end_stream in enabled_flags, do: :half_closed_local, else: :open
conn = put_in(conn.streams[stream_id].state, stream_state)
conn = update_in(conn.open_client_stream_count, &(&1 + 1))
{conn, payload}
end
defp assert_headers_smaller_than_max_header_list_size(
%{server_settings: %{max_header_list_size: :infinity}},
_headers
) do
:ok
end
defp assert_headers_smaller_than_max_header_list_size(conn, headers) do
# The value is based on the uncompressed size of header fields, including the length
# of the name and value in octets plus an overhead of 32 octets for each header field.
total_size =
Enum.reduce(headers, 0, fn {name, value}, acc ->
acc + byte_size(name) + byte_size(value) + 32
end)
max_header_list_size = conn.server_settings.max_header_list_size
if total_size <= max_header_list_size do
:ok
else
error = wrap_error({:max_header_list_size_exceeded, total_size, max_header_list_size})
throw({:mint, conn, error})
end
end
defp headers_to_encoded_frames(conn, stream_id, hbf, enabled_flags) do
if IO.iodata_length(hbf) > conn.server_settings.max_frame_size do
hbf
|> IO.iodata_to_binary()
|> split_payload_in_chunks(conn.server_settings.max_frame_size)
|> split_hbf_to_encoded_frames(stream_id, enabled_flags)
else
Frame.encode(
headers(stream_id: stream_id, hbf: hbf, flags: set_flags(:headers, enabled_flags))
)
end
end
defp split_hbf_to_encoded_frames({[first_chunk | chunks], last_chunk}, stream_id, enabled_flags) do
flags = set_flags(:headers, enabled_flags -- [:end_headers])
first_frame = Frame.encode(headers(stream_id: stream_id, hbf: first_chunk, flags: flags))
middle_frames =
Enum.map(chunks, fn chunk ->
Frame.encode(continuation(stream_id: stream_id, hbf: chunk))
end)
flags =
if :end_headers in enabled_flags do
set_flags(:continuation, [:end_headers])
else
set_flags(:continuation, [])
end
last_frame = Frame.encode(continuation(stream_id: stream_id, hbf: last_chunk, flags: flags))
[first_frame, middle_frames, last_frame]
end
defp encode_data(conn, stream_id, data, enabled_flags) do
stream = fetch_stream!(conn, stream_id)
if stream.state != :open do
error = wrap_error(:request_is_not_streaming)
throw({:mint, conn, error})
end
data_size = IO.iodata_length(data)
cond do
data_size > stream.window_size ->
throw({:mint, conn, wrap_error({:exceeds_window_size, :request, stream.window_size})})
data_size > conn.window_size ->
throw({:mint, conn, wrap_error({:exceeds_window_size, :connection, conn.window_size})})
# If the data size is greater than the max frame size, we chunk automatically based
# on the max frame size.
data_size > conn.server_settings.max_frame_size ->
{chunks, last_chunk} =
data
|> IO.iodata_to_binary()
|> split_payload_in_chunks(conn.server_settings.max_frame_size)
{encoded_chunks, conn} =
Enum.map_reduce(chunks, conn, fn chunk, acc ->
{acc, encoded} = encode_data_chunk(acc, stream_id, chunk, [])
{encoded, acc}
end)
{conn, encoded_last_chunk} = encode_data_chunk(conn, stream_id, last_chunk, enabled_flags)
{conn, [encoded_chunks, encoded_last_chunk]}
true ->
encode_data_chunk(conn, stream_id, data, enabled_flags)
end
end
defp encode_data_chunk(%__MODULE__{} = conn, stream_id, chunk, enabled_flags)
when is_integer(stream_id) and is_list(enabled_flags) do
chunk_size = IO.iodata_length(chunk)
frame = data(stream_id: stream_id, flags: set_flags(:data, enabled_flags), data: chunk)
conn = update_in(conn.streams[stream_id].window_size, &(&1 - chunk_size))
conn = update_in(conn.window_size, &(&1 - chunk_size))
conn =
if :end_stream in enabled_flags do
put_in(conn.streams[stream_id].state, :half_closed_local)
else
conn
end
{conn, Frame.encode(frame)}
end
defp split_payload_in_chunks(binary, chunk_size),
do: split_payload_in_chunks(binary, chunk_size, [])
defp split_payload_in_chunks(chunk, chunk_size, acc) when byte_size(chunk) <= chunk_size do
{Enum.reverse(acc), chunk}
end
defp split_payload_in_chunks(binary, chunk_size, acc) do
<<chunk::size(chunk_size)-binary, rest::binary>> = binary
split_payload_in_chunks(rest, chunk_size, [chunk | acc])
end
defp send_ping(conn, payload) do
frame = Frame.ping(stream_id: 0, opaque_data: payload)
conn = send!(conn, Frame.encode(frame))
ref = make_ref()
conn = update_in(conn.ping_queue, &:queue.in({ref, payload}, &1))
{conn, ref}
end
defp send_settings(conn, settings) do
validate_settings!(settings)
frame = settings(stream_id: 0, params: settings)
conn = send!(conn, Frame.encode(frame))
conn = update_in(conn.client_settings_queue, &:queue.in(settings, &1))
conn
end
defp validate_settings!(settings) do
unless Keyword.keyword?(settings) do
raise ArgumentError, "settings must be a keyword list"
end
Enum.each(settings, fn
{:header_table_size, value} ->
unless is_integer(value) do
raise ArgumentError, ":header_table_size must be an integer, got: #{inspect(value)}"
end
{:enable_push, value} ->
unless is_boolean(value) do
raise ArgumentError, ":enable_push must be a boolean, got: #{inspect(value)}"
end
{:max_concurrent_streams, value} ->
unless is_integer(value) do
raise ArgumentError,
":max_concurrent_streams must be an integer, got: #{inspect(value)}"
end
{:initial_window_size, value} ->
unless is_integer(value) and value <= @max_window_size do
raise ArgumentError,
":initial_window_size must be an integer < #{@max_window_size}, " <>
"got: #{inspect(value)}"
end
{:max_frame_size, value} ->
unless is_integer(value) and value in @valid_max_frame_size_range do
raise ArgumentError,
":max_frame_size must be an integer in #{inspect(@valid_max_frame_size_range)}, " <>
"got: #{inspect(value)}"
end
{:max_header_list_size, value} ->
unless is_integer(value) do
raise ArgumentError, ":max_header_list_size must be an integer, got: #{inspect(value)}"
end
{:enable_connect_protocol, value} ->
unless is_boolean(value) do
raise ArgumentError,
":enable_connect_protocol must be a boolean, got: #{inspect(value)}"
end
{name, _value} ->
raise ArgumentError, "unknown setting parameter #{inspect(name)}"
end)
end
defp downcase_header_names(headers) do
for {name, value} <- headers, do: {Util.downcase_ascii(name), value}
end
defp add_default_headers(headers, body) do
headers
|> Util.put_new_header("user-agent", @user_agent)
|> add_default_content_length_header(body)
end
defp add_default_content_length_header(headers, body) when body in [nil, :stream] do
headers
end
defp add_default_content_length_header(headers, body) do
Util.put_new_header_lazy(headers, "content-length", fn ->
body |> IO.iodata_length() |> Integer.to_string()
end)
end
defp add_pseudo_headers(headers, conn, method, path) do
if String.upcase(method) == "CONNECT" do
[
{":method", method},
{":authority", authority_pseudo_header(conn.scheme, conn.port, conn.hostname)}
| headers
]
else
[
{":method", method},
{":path", path},
{":scheme", conn.scheme},
{":authority", authority_pseudo_header(conn.scheme, conn.port, conn.hostname)}
| headers
]
end
end
defp sort_pseudo_headers_to_front(headers) do
Enum.sort_by(headers, fn {key, _value} ->
not String.starts_with?(key, ":")
end)
end
## Frame handling
defp maybe_concat_and_handle_new_data(conn, data) do
data = maybe_concat(conn.buffer, data)
{conn, responses} = handle_new_data(conn, data, [])
{:ok, conn, Enum.reverse(responses)}
end
defp handle_new_data(%Mint.HTTP2{} = conn, data, responses) do
case Frame.decode_next(data, conn.client_settings.max_frame_size) do
{:ok, frame, rest} ->
assert_valid_frame(conn, frame)
{conn, responses} = handle_frame(conn, frame, responses)
handle_new_data(conn, rest, responses)
:more ->
conn = put_in(conn.buffer, data)
handle_consumed_all_frames(conn, responses)
{:error, :payload_too_big} ->
# TODO: sometimes, this could be handled with RST_STREAM instead of a GOAWAY frame (for
# example, if the payload of a DATA frame is too big).
# http://httpwg.org/specs/rfc7540.html#rfc.section.4.2
debug_data = "frame payload exceeds connection's max frame size"
send_connection_error!(conn, :frame_size_error, debug_data)
{:error, {:frame_size_error, frame}} ->
debug_data = "error with size of frame: #{inspect(frame)}"
send_connection_error!(conn, :frame_size_error, debug_data)
{:error, {:protocol_error, info}} ->
debug_data = "error when decoding frame: #{inspect(info)}"
send_connection_error!(conn, :protocol_error, debug_data)
end
catch
:throw, {:mint, conn, error} -> throw({:mint, conn, error, responses})
:throw, {:mint, _conn, _error, _responses} = thrown -> throw(thrown)
end
defp handle_consumed_all_frames(%{state: state} = conn, responses) do
case state do
# TODO: should we do something with the debug data here, like logging it?
{:goaway, :no_error, _debug_data} ->
{conn, responses}
{:goaway, error_code, debug_data} ->
error = wrap_error({:server_closed_connection, error_code, debug_data})
throw({:mint, conn, error, responses})
_ ->
{conn, responses}
end
end
defp assert_valid_frame(conn, frame) do
stream_id = elem(frame, 1)
assert_frame_on_right_level(conn, elem(frame, 0), stream_id)
assert_stream_id_is_allowed(conn, stream_id)
assert_frame_doesnt_interrupt_header_streaming(conn, frame)
end
# http://httpwg.org/specs/rfc7540.html#HttpSequence
defp assert_frame_doesnt_interrupt_header_streaming(conn, frame) do
case {conn.headers_being_processed, frame} do
{nil, continuation()} ->
debug_data = "CONTINUATION received outside of headers streaming"
send_connection_error!(conn, :protocol_error, debug_data)
{nil, _frame} ->
:ok
{{stream_id, _, _}, continuation(stream_id: stream_id)} ->
:ok
_other ->
debug_data =
"headers are streaming but got a #{inspect(elem(frame, 0))} frame instead " <>
"of a CONTINUATION frame"
send_connection_error!(conn, :protocol_error, debug_data)
end
end
stream_level_frames = [:data, :headers, :priority, :rst_stream, :push_promise, :continuation]
connection_level_frames = [:settings, :ping, :goaway]
defp assert_frame_on_right_level(conn, frame, _stream_id = 0)
when frame in unquote(stream_level_frames) do
debug_data = "frame #{inspect(frame)} not allowed at the connection level (stream_id = 0)"
send_connection_error!(conn, :protocol_error, debug_data)
end
defp assert_frame_on_right_level(conn, frame, stream_id)
when frame in unquote(connection_level_frames) and stream_id != 0 do
debug_data = "frame #{inspect(frame)} only allowed at the connection level"
send_connection_error!(conn, :protocol_error, debug_data)
end
defp assert_frame_on_right_level(_conn, _frame, _stream_id) do
:ok
end
defp assert_stream_id_is_allowed(conn, stream_id) do
if Integer.is_odd(stream_id) and stream_id >= conn.next_stream_id do
debug_data = "frame with stream ID #{inspect(stream_id)} has not been opened yet"
send_connection_error!(conn, :protocol_error, debug_data)
else
:ok
end
end
for frame_name <- stream_level_frames ++ connection_level_frames ++ [:window_update] do
function_name = :"handle_#{frame_name}"
defp handle_frame(conn, Frame.unquote(frame_name)() = frame, responses) do
unquote(function_name)(conn, frame, responses)
end
end
# DATA
defp handle_data(conn, frame, responses) do
data(stream_id: stream_id, flags: flags, data: data, padding: padding) = frame
# Regardless of whether we have the stream or not, we need to abide by flow
# control rules so we still refill the client window for the stream_id we got.
window_size_increment = byte_size(data) + byte_size(padding || "")
conn =
if window_size_increment > 0 do
refill_client_windows(conn, stream_id, window_size_increment)
else
conn
end
case Map.fetch(conn.streams, stream_id) do
{:ok, stream} ->
assert_stream_in_state(conn, stream, [:open, :half_closed_local])
responses = [{:data, stream.ref, data} | responses]
if flag_set?(flags, :data, :end_stream) do
conn = close_stream!(conn, stream.id, :no_error)
{conn, [{:done, stream.ref} | responses]}
else
{conn, responses}
end
:error ->
_ = Logger.debug(fn -> "Received DATA frame on closed stream ID #{stream_id}" end)
{conn, responses}
end
end
defp refill_client_windows(conn, stream_id, data_size) do
connection_frame = window_update(stream_id: 0, window_size_increment: data_size)
stream_frame = window_update(stream_id: stream_id, window_size_increment: data_size)
send!(conn, [Frame.encode(connection_frame), Frame.encode(stream_frame)])
end
# HEADERS
defp handle_headers(conn, frame, responses) do
headers(stream_id: stream_id, flags: flags, hbf: hbf) = frame
stream = Map.get(conn.streams, stream_id)
end_stream? = flag_set?(flags, :headers, :end_stream)
if stream do
assert_stream_in_state(conn, stream, [:open, :half_closed_local, :reserved_remote])
end
if flag_set?(flags, :headers, :end_headers) do
decode_hbf_and_add_responses(conn, responses, hbf, stream, end_stream?)
else
callback = &decode_hbf_and_add_responses(&1, &2, &3, &4, end_stream?)
conn = put_in(conn.headers_being_processed, {stream_id, hbf, callback})
{conn, responses}
end
end
# Here, "stream" can be nil in case the stream was closed. In that case, we
# still need to process the hbf so that the HPACK table is updated, but then
# we don't add any responses.
defp decode_hbf_and_add_responses(conn, responses, hbf, stream, end_stream?) do
{conn, headers} = decode_hbf(conn, hbf)
if stream do
handle_decoded_headers_for_stream(conn, responses, stream, headers, end_stream?)
else
_ = Logger.debug(fn -> "Received HEADERS frame on closed stream ID" end)
{conn, responses}
end
end
defp handle_decoded_headers_for_stream(conn, responses, stream, headers, end_stream?) do
%{ref: ref, received_first_headers?: received_first_headers?} = stream
case headers do
[{":status", status} | headers] when not received_first_headers? ->
conn = put_in(conn.streams[stream.id].received_first_headers?, true)
status = String.to_integer(status)
headers = join_cookie_headers(headers)
new_responses = [{:headers, ref, headers}, {:status, ref, status} | responses]
cond do
# :reserved_remote means that this was a promised stream. As soon as headers come,
# the stream goes in the :half_closed_local state (unless it's not allowed because
# of the client's max concurrent streams limit, or END_STREAM is set).
stream.state == :reserved_remote ->
cond do
conn.open_server_stream_count >= conn.client_settings.max_concurrent_streams ->
conn = close_stream!(conn, stream.id, :refused_stream)
{conn, responses}
end_stream? ->
conn = close_stream!(conn, stream.id, :no_error)
{conn, [{:done, ref} | new_responses]}
true ->
conn = update_in(conn.open_server_stream_count, &(&1 + 1))
conn = put_in(conn.streams[stream.id].state, :half_closed_local)
{conn, new_responses}
end
end_stream? ->
conn = close_stream!(conn, stream.id, :no_error)
{conn, [{:done, ref} | new_responses]}
true ->
{conn, new_responses}
end
# Trailing headers. We don't care about the :status header here.
headers when received_first_headers? ->
if end_stream? do
conn = close_stream!(conn, stream.id, :no_error)
headers = headers |> Util.remove_unallowed_trailing_headers() |> join_cookie_headers()
{conn, [{:done, ref}, {:headers, ref, headers} | responses]}
else
# Trailing headers must set the END_STREAM flag because they're
# the last thing allowed on the stream (other than RST_STREAM and
# the usual frames).
conn = close_stream!(conn, stream.id, :protocol_error)
debug_data = "trailing headers didn't set the END_STREAM flag"
error = wrap_error({:protocol_error, debug_data})
responses = [{:error, stream.ref, error} | responses]
{conn, responses}
end
# Non-trailing headers need to have a :status header, otherwise
# it's a protocol error.
_headers ->
conn = close_stream!(conn, stream.id, :protocol_error)
error = wrap_error(:missing_status_header)
responses = [{:error, stream.ref, error} | responses]
{conn, responses}
end
end
defp decode_hbf(conn, hbf) do
case HPAX.decode(hbf, conn.decode_table) do
{:ok, headers, decode_table} ->
conn = put_in(conn.decode_table, decode_table)
{conn, headers}
{:error, reason} ->
debug_data = "unable to decode headers: #{inspect(reason)}"
send_connection_error!(conn, :compression_error, debug_data)
end
end
# If the port is the default for the scheme, don't add it to the :authority pseudo-header
defp authority_pseudo_header(scheme, port, hostname) do
if URI.default_port(scheme) == port do
hostname
else
"#{hostname}:#{port}"
end
end
defp join_cookie_headers(headers) do
# If we have 0 or 1 Cookie headers, we just use the old list of headers.
case Enum.split_with(headers, fn {name, _value} -> Util.downcase_ascii(name) == "cookie" end) do
{[], _headers} ->
headers
{[_], _headers} ->
headers
{cookies, headers} ->
cookie = Enum.map_join(cookies, "; ", fn {_name, value} -> value end)
[{"cookie", cookie} | headers]
end
end
# PRIORITY
# For now we ignore all PRIORITY frames. This shouldn't cause practical trouble.
defp handle_priority(conn, frame, responses) do
_ = Logger.warn(fn -> "Ignoring PRIORITY frame: #{inspect(frame)}" end)
{conn, responses}
end
# RST_STREAM
defp handle_rst_stream(conn, frame, responses) do
rst_stream(stream_id: stream_id, error_code: error_code) = frame
# If we receive RST_STREAM on a closed stream, we ignore it.
case Map.fetch(conn.streams, stream_id) do
{:ok, stream} ->
# If we receive RST_STREAM then the stream is definitely closed.
# We won't send anything else on the stream so we can simply delete
# it, so that if we get things like DATA on that stream we error out.
conn = delete_stream(conn, stream)
if error_code == :no_error do
{conn, [{:done, stream.ref} | responses]}
else
error = wrap_error({:server_closed_request, error_code})
{conn, [{:error, stream.ref, error} | responses]}
end
:error ->
{conn, responses}
end
end
# SETTINGS
defp handle_settings(conn, frame, responses) do
settings(flags: flags, params: params) = frame
if flag_set?(flags, :settings, :ack) do
{{:value, params}, conn} = get_and_update_in(conn.client_settings_queue, &:queue.out/1)
conn = apply_client_settings(conn, params)
{conn, responses}
else
conn = apply_server_settings(conn, params)
frame = settings(flags: set_flags(:settings, [:ack]), params: [])
conn = send!(conn, Frame.encode(frame))
{conn, responses}
end
end
defp apply_server_settings(conn, server_settings) do
Enum.reduce(server_settings, conn, fn
{:header_table_size, header_table_size}, conn ->
update_in(conn.encode_table, &HPAX.resize(&1, header_table_size))
{:enable_push, enable_push?}, conn ->
put_in(conn.server_settings.enable_push, enable_push?)
{:max_concurrent_streams, max_concurrent_streams}, conn ->
put_in(conn.server_settings.max_concurrent_streams, max_concurrent_streams)
{:initial_window_size, initial_window_size}, conn ->
if initial_window_size > @max_window_size do
debug_data = "INITIAL_WINDOW_SIZE setting of #{initial_window_size} is too big"
send_connection_error!(conn, :flow_control_error, debug_data)
end
update_server_initial_window_size(conn, initial_window_size)
{:max_frame_size, max_frame_size}, conn ->
if max_frame_size not in @valid_max_frame_size_range do
debug_data = "MAX_FRAME_SIZE setting parameter outside of allowed range"
send_connection_error!(conn, :protocol_error, debug_data)
end
put_in(conn.server_settings.max_frame_size, max_frame_size)
{:max_header_list_size, max_header_list_size}, conn ->
put_in(conn.server_settings.max_header_list_size, max_header_list_size)
{:enable_connect_protocol, enable_connect_protocol?}, conn ->
put_in(conn.server_settings.enable_connect_protocol, enable_connect_protocol?)
end)
end
defp apply_client_settings(conn, client_settings) do
Enum.reduce(client_settings, conn, fn
{:max_frame_size, value}, conn ->
put_in(conn.client_settings.max_frame_size, value)
{:max_concurrent_streams, value}, conn ->
put_in(conn.client_settings.max_concurrent_streams, value)
{:enable_push, value}, conn ->
put_in(conn.client_settings.enable_push, value)
end)
end
defp update_server_initial_window_size(conn, new_iws) do
diff = new_iws - conn.server_settings.initial_window_size
conn =
update_in(conn.streams, fn streams ->
for {stream_id, stream} <- streams,
stream.state in [:open, :half_closed_remote],
into: streams do
window_size = stream.window_size + diff
if window_size > @max_window_size do
debug_data =
"INITIAL_WINDOW_SIZE parameter of #{window_size} makes some window sizes too big"
send_connection_error!(conn, :flow_control_error, debug_data)
end
{stream_id, %{stream | window_size: window_size}}
end
end)
put_in(conn.server_settings.initial_window_size, new_iws)
end
# PUSH_PROMISE
defp handle_push_promise(
%Mint.HTTP2{client_settings: %{enable_push: false}} = conn,
push_promise(),
_responses
) do
debug_data = "received PUSH_PROMISE frame when SETTINGS_ENABLE_PUSH was false"
send_connection_error!(conn, :protocol_error, debug_data)
end
defp handle_push_promise(conn, push_promise() = frame, responses) do
push_promise(
stream_id: stream_id,
flags: flags,
promised_stream_id: promised_stream_id,
hbf: hbf
) = frame
assert_valid_promised_stream_id(conn, promised_stream_id)
stream = fetch_stream!(conn, stream_id)
assert_stream_in_state(conn, stream, [:open, :half_closed_local])
if flag_set?(flags, :push_promise, :end_headers) do
decode_push_promise_headers_and_add_response(
conn,
responses,
hbf,
stream,
promised_stream_id
)
else
callback = &decode_push_promise_headers_and_add_response(&1, &2, &3, &4, promised_stream_id)
conn = put_in(conn.headers_being_processed, {stream_id, hbf, callback})
{conn, responses}
end
end
defp decode_push_promise_headers_and_add_response(
conn,
responses,
hbf,
stream,
promised_stream_id
) do
{conn, headers} = decode_hbf(conn, hbf)
promised_stream = %{
id: promised_stream_id,
ref: make_ref(),
state: :reserved_remote,
window_size: conn.server_settings.initial_window_size,
received_first_headers?: false
}
conn = put_in(conn.streams[promised_stream.id], promised_stream)
new_response = {:push_promise, stream.ref, promised_stream.ref, headers}
{conn, [new_response | responses]}
end
defp assert_valid_promised_stream_id(conn, promised_stream_id) do
cond do
not is_integer(promised_stream_id) or Integer.is_odd(promised_stream_id) ->
debug_data = "invalid promised stream ID: #{inspect(promised_stream_id)}"
send_connection_error!(conn, :protocol_error, debug_data)
Map.has_key?(conn.streams, promised_stream_id) ->
debug_data =
"stream with ID #{inspect(promised_stream_id)} already exists and can't be " <>
"reserved by the server"
send_connection_error!(conn, :protocol_error, debug_data)
true ->
:ok
end
end
# PING
defp handle_ping(conn, Frame.ping() = frame, responses) do
Frame.ping(flags: flags, opaque_data: opaque_data) = frame
if flag_set?(flags, :ping, :ack) do
handle_ping_ack(conn, opaque_data, responses)
else
ack = Frame.ping(stream_id: 0, flags: set_flags(:ping, [:ack]), opaque_data: opaque_data)
conn = send!(conn, Frame.encode(ack))
{conn, responses}
end
end
defp handle_ping_ack(conn, opaque_data, responses) do
case :queue.peek(conn.ping_queue) do
{:value, {ref, ^opaque_data}} ->
conn = update_in(conn.ping_queue, &:queue.drop/1)
{conn, [{:pong, ref} | responses]}
{:value, _} ->
_ = Logger.warn("Received PING ack that doesn't match next PING request in the queue")
{conn, responses}
:empty ->
_ = Logger.warn("Received PING ack but no PING requests are pending")
{conn, responses}
end
end
# GOAWAY
defp handle_goaway(conn, frame, responses) do
goaway(
last_stream_id: last_stream_id,
error_code: error_code,
debug_data: debug_data
) = frame
# We gather all the unprocessed requests and form {:error, _, _} tuples for each one.
# At the same time, we delete all the unprocessed requests from the stream set.
{unprocessed_request_responses, conn} =
Enum.flat_map_reduce(conn.streams, conn, fn
{stream_id, _stream}, conn_acc when stream_id <= last_stream_id ->
{[], conn_acc}
{_stream_id, stream}, conn_acc ->
conn_acc = delete_stream(conn_acc, stream)
{[{:error, stream.ref, wrap_error(:unprocessed)}], conn_acc}
end)
conn = put_in(conn.state, {:goaway, error_code, debug_data})
{conn, unprocessed_request_responses ++ responses}
end
# WINDOW_UPDATE
defp handle_window_update(
conn,
window_update(stream_id: 0, window_size_increment: wsi),
responses
) do
new_window_size = conn.window_size + wsi
if new_window_size > @max_window_size do
send_connection_error!(conn, :flow_control_error, "window size too big")
else
conn = put_in(conn.window_size, new_window_size)
{conn, responses}
end
end
defp handle_window_update(
conn,
window_update(stream_id: stream_id, window_size_increment: wsi),
responses
) do
stream = fetch_stream!(conn, stream_id)
new_window_size = conn.streams[stream_id].window_size + wsi
if new_window_size > @max_window_size do
conn = close_stream!(conn, stream_id, :flow_control_error)
error = wrap_error({:flow_control_error, "window size too big"})
{conn, [{:error, stream.ref, error} | responses]}
else
conn = put_in(conn.streams[stream_id].window_size, new_window_size)
{conn, responses}
end
end
# CONTINUATION
defp handle_continuation(conn, frame, responses) do
continuation(stream_id: stream_id, flags: flags, hbf: hbf_chunk) = frame
stream = Map.get(conn.streams, stream_id)
if stream do
assert_stream_in_state(conn, stream, [:open, :half_closed_local, :reserved_remote])
end
{^stream_id, hbf_acc, callback} = conn.headers_being_processed
if flag_set?(flags, :continuation, :end_headers) do
hbf = IO.iodata_to_binary([hbf_acc, hbf_chunk])
conn = put_in(conn.headers_being_processed, nil)
callback.(conn, responses, hbf, stream)
else
conn = put_in(conn.headers_being_processed, {stream_id, [hbf_acc, hbf_chunk], callback})
{conn, responses}
end
end
## General helpers
defp send_connection_error!(conn, error_code, debug_data) do
frame =
goaway(stream_id: 0, last_stream_id: 2, error_code: error_code, debug_data: debug_data)
conn = send!(conn, Frame.encode(frame))
_ = conn.transport.close(conn.socket)
conn = put_in(conn.state, :closed)
throw({:mint, conn, wrap_error({error_code, debug_data})})
end
defp close_stream!(conn, stream_id, error_code) do
stream = Map.fetch!(conn.streams, stream_id)
# First of all we send a RST_STREAM with the given error code so that we
# move the stream to the :closed state (that is, we remove it).
rst_stream_frame = rst_stream(stream_id: stream_id, error_code: error_code)
conn = send!(conn, Frame.encode(rst_stream_frame))
delete_stream(conn, stream)
end
defp delete_stream(conn, stream) do
conn = update_in(conn.streams, &Map.delete(&1, stream.id))
conn = update_in(conn.ref_to_stream_id, &Map.delete(&1, stream.ref))
stream_open? = stream.state in [:open, :half_closed_local, :half_closed_remote]
conn =
cond do
# Stream initiated by the client.
stream_open? and Integer.is_odd(stream.id) ->
update_in(conn.open_client_stream_count, &(&1 - 1))
# Stream initiated by the server.
stream_open? and Integer.is_even(stream.id) ->
update_in(conn.open_server_stream_count, &(&1 - 1))
true ->
conn
end
conn
end
defp fetch_stream!(conn, stream_id) do
case Map.fetch(conn.streams, stream_id) do
{:ok, stream} -> stream
:error -> throw({:mint, conn, wrap_error({:stream_not_found, stream_id})})
end
end
defp assert_stream_in_state(conn, %{state: state}, expected_states) do
if state not in expected_states do
debug_data =
"stream was in state #{inspect(state)} and not in one of the expected states: " <>
Enum.map_join(expected_states, ", ", &inspect/1)
send_connection_error!(conn, :protocol_error, debug_data)
end
end
defp send!(%Mint.HTTP2{transport: transport, socket: socket} = conn, bytes) do
case transport.send(socket, bytes) do
:ok ->
conn
{:error, %TransportError{reason: :closed} = error} ->
throw({:mint, %{conn | state: :closed}, error})
{:error, reason} ->
throw({:mint, conn, reason})
end
end
defp wrap_error(reason) do
%HTTPError{reason: reason, module: __MODULE__}
end
@doc false
def format_error(reason)
def format_error(:closed) do
"the connection is closed"
end
def format_error(:closed_for_writing) do
"the connection is closed for writing, which means that you cannot issue any more " <>
"requests on the connection but you can expect responses to still be delivered for " <>
"part of the requests that are in flight. If a connection is closed for writing, " <>
"it usually means that you got a :server_closed_request error already."
end
def format_error(:too_many_concurrent_requests) do
"the number of max concurrent HTTP/2 requests supported by the server has been reached. " <>
"Use Mint.HTTP2.get_server_setting/2 with the :max_concurrent_streams setting name " <>
"to find out the maximum number of concurrent requests supported by the server."
end
def format_error({:max_header_list_size_exceeded, size, max_size}) do
"the given header list (of size #{size}) goes over the max header list size of " <>
"#{max_size} supported by the server. In HTTP/2, the header list size is calculated " <>
"by summing up the size in bytes of each header name, value, plus 32 for each header."
end
def format_error({:exceeds_window_size, what, window_size}) do
what =
case what do
:request -> "request"
:connection -> "connection"
end
"the given data exceeds the #{what} window size, which is #{window_size}. " <>
"The server will refill the window size of the #{what} when ready. This will be " <>
"handled transparently by stream/2."
end
def format_error({:stream_not_found, stream_id}) do
"request not found (with stream_id #{inspect(stream_id)})"
end
def format_error(:unknown_request_to_stream) do
"can't stream chunk of data because the request is unknown"
end
def format_error(:request_is_not_streaming) do
"can't send more data on this request since it's not streaming"
end
def format_error({:unallowed_trailing_header, {name, value}}) do
"header #{inspect(name)} (with value #{inspect(value)}) is not allowed as a trailing header"
end
def format_error(:missing_status_header) do
"the :status pseudo-header (which is required in HTTP/2) is missing from the response"
end
def format_error({:server_closed_request, error_code}) do
"server closed request with error code #{inspect(error_code)}"
end
def format_error({:server_closed_connection, error, debug_data}) do
"server closed connection with error code #{inspect(error)} and debug data: " <> debug_data
end
def format_error(:unprocessed) do
"request was not processed by the server, which means that it's safe to retry on a " <>
"different or new connection"
end
def format_error({:frame_size_error, frame}) do
"frame size error for #{inspect(frame)} frame"
end
def format_error({:protocol_error, debug_data}) do
"protocol error: " <> debug_data
end
def format_error({:compression_error, debug_data}) do
"compression error: " <> debug_data
end
def format_error({:flow_control_error, debug_data}) do
"flow control error: " <> debug_data
end
end
|
lib/mint/http2.ex
| 0.897933
| 0.563438
|
http2.ex
|
starcoder
|
defmodule AWS.SageMaker do
@moduledoc """
Provides APIs for creating and managing Amazon SageMaker resources.
Other Resources:
* [Amazon SageMaker Developer Guide](https://docs.aws.amazon.com/sagemaker/latest/dg/whatis.html#first-time-user)
* [Amazon Augmented AI Runtime API Reference](https://docs.aws.amazon.com/augmented-ai/2019-11-07/APIReference/Welcome.html)
"""
alias AWS.Client
alias AWS.Request
def metadata do
%AWS.ServiceMetadata{
abbreviation: "SageMaker",
api_version: "2017-07-24",
content_type: "application/x-amz-json-1.1",
credential_scope: nil,
endpoint_prefix: "api.sagemaker",
global?: false,
protocol: "json",
service_id: "SageMaker",
signature_version: "v4",
signing_name: "sagemaker",
target_prefix: "SageMaker"
}
end
@doc """
Creates an *association* between the source and the destination.
A source can be associated with multiple destinations, and a destination can be
associated with multiple sources. An association is a lineage tracking entity.
For more information, see [Amazon SageMaker ML Lineage Tracking](https://docs.aws.amazon.com/sagemaker/latest/dg/lineage-tracking.html).
"""
def add_association(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "AddAssociation", input, options)
end
@doc """
Adds or overwrites one or more tags for the specified Amazon SageMaker resource.
You can add tags to notebook instances, training jobs, hyperparameter tuning
jobs, batch transform jobs, models, labeling jobs, work teams, endpoint
configurations, and endpoints.
Each tag consists of a key and an optional value. Tag keys must be unique per
resource. For more information about tags, see For more information, see [AWS Tagging
Strategies](https://aws.amazon.com/answers/account-management/aws-tagging-strategies/).
Tags that you add to a hyperparameter tuning job by calling this API are also
added to any training jobs that the hyperparameter tuning job launches after you
call this API, but not to training jobs that the hyperparameter tuning job
launched before you called this API. To make sure that the tags associated with
a hyperparameter tuning job are also added to all training jobs that the
hyperparameter tuning job launches, add the tags when you first create the
tuning job by specifying them in the `Tags` parameter of
`CreateHyperParameterTuningJob`
"""
def add_tags(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "AddTags", input, options)
end
@doc """
Associates a trial component with a trial.
A trial component can be associated with multiple trials. To disassociate a
trial component from a trial, call the `DisassociateTrialComponent` API.
"""
def associate_trial_component(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "AssociateTrialComponent", input, options)
end
@doc """
Creates an *action*.
An action is a lineage tracking entity that represents an action or activity.
For example, a model deployment or an HPO job. Generally, an action involves at
least one input or output artifact. For more information, see [Amazon SageMaker ML Lineage
Tracking](https://docs.aws.amazon.com/sagemaker/latest/dg/lineage-tracking.html).
"""
def create_action(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateAction", input, options)
end
@doc """
Create a machine learning algorithm that you can use in Amazon SageMaker and
list in the AWS Marketplace.
"""
def create_algorithm(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateAlgorithm", input, options)
end
@doc """
Creates a running App for the specified UserProfile.
Supported Apps are JupyterServer and KernelGateway. This operation is
automatically invoked by Amazon SageMaker Studio upon access to the associated
Domain, and when new kernel configurations are selected by the user. A user may
have multiple Apps active simultaneously.
"""
def create_app(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateApp", input, options)
end
@doc """
Creates a configuration for running a SageMaker image as a KernelGateway app.
The configuration specifies the Amazon Elastic File System (EFS) storage volume
on the image, and a list of the kernels in the image.
"""
def create_app_image_config(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateAppImageConfig", input, options)
end
@doc """
Creates an *artifact*.
An artifact is a lineage tracking entity that represents a URI addressable
object or data. Some examples are the S3 URI of a dataset and the ECR registry
path of an image. For more information, see [Amazon SageMaker ML Lineage Tracking](https://docs.aws.amazon.com/sagemaker/latest/dg/lineage-tracking.html).
"""
def create_artifact(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateArtifact", input, options)
end
@doc """
Creates an Autopilot job.
Find the best performing model after you run an Autopilot job by calling .
Deploy that model by following the steps described in [Step 6.1: Deploy the Model to Amazon SageMaker Hosting
Services](https://docs.aws.amazon.com/sagemaker/latest/dg/ex1-deploy-model.html).
For information about how to use Autopilot, see [ Automate Model Development with Amazon SageMaker
Autopilot](https://docs.aws.amazon.com/sagemaker/latest/dg/autopilot-automate-model-development.html).
"""
def create_auto_ml_job(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateAutoMLJob", input, options)
end
@doc """
Creates a Git repository as a resource in your Amazon SageMaker account.
You can associate the repository with notebook instances so that you can use Git
source control for the notebooks you create. The Git repository is a resource in
your Amazon SageMaker account, so it can be associated with more than one
notebook instance, and it persists independently from the lifecycle of any
notebook instances it is associated with.
The repository can be hosted either in [AWS CodeCommit](https://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html)
or in any other Git repository.
"""
def create_code_repository(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateCodeRepository", input, options)
end
@doc """
Starts a model compilation job.
After the model has been compiled, Amazon SageMaker saves the resulting model
artifacts to an Amazon Simple Storage Service (Amazon S3) bucket that you
specify.
If you choose to host your model using Amazon SageMaker hosting services, you
can use the resulting model artifacts as part of the model. You can also use the
artifacts with AWS IoT Greengrass. In that case, deploy them as an ML resource.
In the request body, you provide the following:
* A name for the compilation job
* Information about the input model artifacts
* The output location for the compiled model and the device (target)
that the model runs on
* The Amazon Resource Name (ARN) of the IAM role that Amazon
SageMaker assumes to perform the model compilation job.
You can also provide a `Tag` to track the model compilation job's resource use
and costs. The response body contains the `CompilationJobArn` for the compiled
job.
To stop a model compilation job, use `StopCompilationJob`. To get information
about a particular model compilation job, use `DescribeCompilationJob`. To get
information about multiple model compilation jobs, use `ListCompilationJobs`.
"""
def create_compilation_job(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateCompilationJob", input, options)
end
@doc """
Creates a *context*.
A context is a lineage tracking entity that represents a logical grouping of
other tracking or experiment entities. Some examples are an endpoint and a model
package. For more information, see [Amazon SageMaker ML Lineage Tracking](https://docs.aws.amazon.com/sagemaker/latest/dg/lineage-tracking.html).
"""
def create_context(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateContext", input, options)
end
@doc """
Creates a definition for a job that monitors data quality and drift.
For information about model monitor, see [Amazon SageMaker Model Monitor](https://docs.aws.amazon.com/sagemaker/latest/dg/model-monitor.html).
"""
def create_data_quality_job_definition(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateDataQualityJobDefinition", input, options)
end
@doc """
Creates a device fleet.
"""
def create_device_fleet(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateDeviceFleet", input, options)
end
@doc """
Creates a `Domain` used by Amazon SageMaker Studio.
A domain consists of an associated Amazon Elastic File System (EFS) volume, a
list of authorized users, and a variety of security, application, policy, and
Amazon Virtual Private Cloud (VPC) configurations. An AWS account is limited to
one domain per region. Users within a domain can share notebook files and other
artifacts with each other.
## EFS storage
When a domain is created, an EFS volume is created for use by all of the users
within the domain. Each user receives a private home directory within the EFS
volume for notebooks, Git repositories, and data files.
SageMaker uses the AWS Key Management Service (AWS KMS) to encrypt the EFS
volume attached to the domain with an AWS managed customer master key (CMK) by
default. For more control, you can specify a customer managed CMK. For more
information, see [Protect Data at Rest Using Encryption](https://docs.aws.amazon.com/sagemaker/latest/dg/encryption-at-rest.html).
## VPC configuration
All SageMaker Studio traffic between the domain and the EFS volume is through
the specified VPC and subnets. For other Studio traffic, you can specify the
`AppNetworkAccessType` parameter. `AppNetworkAccessType` corresponds to the
network access type that you choose when you onboard to Studio. The following
options are available:
* `PublicInternetOnly` - Non-EFS traffic goes through a VPC managed
by Amazon SageMaker, which allows internet access. This is the default value.
* `VpcOnly` - All Studio traffic is through the specified VPC and
subnets. Internet access is disabled by default. To allow internet access, you
must specify a NAT gateway.
When internet access is disabled, you won't be able to run a Studio notebook or
to train or host models unless your VPC has an interface endpoint to the
SageMaker API and runtime or a NAT gateway and your security groups allow
outbound connections.
For more information, see [Connect SageMaker Studio Notebooks to Resources in a VPC](https://docs.aws.amazon.com/sagemaker/latest/dg/studio-notebooks-and-internet-access.html).
"""
def create_domain(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateDomain", input, options)
end
@doc """
Starts a SageMaker Edge Manager model packaging job.
Edge Manager will use the model artifacts from the Amazon Simple Storage Service
bucket that you specify. After the model has been packaged, Amazon SageMaker
saves the resulting artifacts to an S3 bucket that you specify.
"""
def create_edge_packaging_job(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateEdgePackagingJob", input, options)
end
@doc """
Creates an endpoint using the endpoint configuration specified in the request.
Amazon SageMaker uses the endpoint to provision resources and deploy models. You
create the endpoint configuration with the `CreateEndpointConfig` API.
Use this API to deploy models using Amazon SageMaker hosting services.
For an example that calls this method when deploying a model to Amazon SageMaker
hosting services, see [Deploy the Model to Amazon SageMaker Hosting Services (AWS SDK for Python (Boto
3)).](https://docs.aws.amazon.com/sagemaker/latest/dg/ex1-deploy-model.html#ex1-deploy-model-boto)
You must not delete an `EndpointConfig` that is in use by an endpoint that is
live or while the `UpdateEndpoint` or `CreateEndpoint` operations are being
performed on the endpoint. To update an endpoint, you must create a new
`EndpointConfig`.
The endpoint name must be unique within an AWS Region in your AWS account.
When it receives the request, Amazon SageMaker creates the endpoint, launches
the resources (ML compute instances), and deploys the model(s) on them.
When you call `CreateEndpoint`, a load call is made to DynamoDB to verify that
your endpoint configuration exists. When you read data from a DynamoDB table
supporting [ `Eventually Consistent Reads`
](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadConsistency.html),
the response might not reflect the results of a recently completed write
operation. The response might include some stale data. If the dependent entities
are not yet in DynamoDB, this causes a validation error. If you repeat your read
request after a short time, the response should return the latest data. So retry
logic is recommended to handle these possible issues. We also recommend that
customers call `DescribeEndpointConfig` before calling `CreateEndpoint` to
minimize the potential impact of a DynamoDB eventually consistent read.
When Amazon SageMaker receives the request, it sets the endpoint status to
`Creating`. After it creates the endpoint, it sets the status to `InService`.
Amazon SageMaker can then process incoming requests for inferences. To check the
status of an endpoint, use the `DescribeEndpoint` API.
If any of the models hosted at this endpoint get model data from an Amazon S3
location, Amazon SageMaker uses AWS Security Token Service to download model
artifacts from the S3 path you provided. AWS STS is activated in your IAM user
account by default. If you previously deactivated AWS STS for a region, you need
to reactivate AWS STS for that region. For more information, see [Activating and Deactivating AWS STS in an AWS
Region](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
in the *AWS Identity and Access Management User Guide*.
To add the IAM role policies for using this API operation, go to the [IAM console](https://console.aws.amazon.com/iam/), and choose Roles in the left
navigation pane. Search the IAM role that you want to grant access to use the
`CreateEndpoint` and `CreateEndpointConfig` API operations, add the following
policies to the role.
Option 1: For a full Amazon SageMaker access, search and attach the
`AmazonSageMakerFullAccess` policy.
Option 2: For granting a limited access to an IAM role, paste the
following Action elements manually into the JSON file of the IAM role:
`"Action": ["sagemaker:CreateEndpoint", "sagemaker:CreateEndpointConfig"]` `"Resource": [`
`"arn:aws:sagemaker:region:account-id:endpoint/endpointName"`
`"arn:aws:sagemaker:region:account-id:endpoint-config/endpointConfigName"`
`]`
For more information, see [Amazon SageMaker API Permissions: Actions, Permissions, and Resources
Reference](https://docs.aws.amazon.com/sagemaker/latest/dg/api-permissions-reference.html).
"""
def create_endpoint(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateEndpoint", input, options)
end
@doc """
Creates an endpoint configuration that Amazon SageMaker hosting services uses to
deploy models.
In the configuration, you identify one or more models, created using the
`CreateModel` API, to deploy and the resources that you want Amazon SageMaker to
provision. Then you call the `CreateEndpoint` API.
Use this API if you want to use Amazon SageMaker hosting services to deploy
models into production.
In the request, you define a `ProductionVariant`, for each model that you want
to deploy. Each `ProductionVariant` parameter also describes the resources that
you want Amazon SageMaker to provision. This includes the number and type of ML
compute instances to deploy.
If you are hosting multiple models, you also assign a `VariantWeight` to specify
how much traffic you want to allocate to each model. For example, suppose that
you want to host two models, A and B, and you assign traffic weight 2 for model
A and 1 for model B. Amazon SageMaker distributes two-thirds of the traffic to
Model A, and one-third to model B.
For an example that calls this method when deploying a model to Amazon SageMaker
hosting services, see [Deploy the Model to Amazon SageMaker Hosting Services (AWS SDK for Python (Boto
3)).](https://docs.aws.amazon.com/sagemaker/latest/dg/ex1-deploy-model.html#ex1-deploy-model-boto)
When you call `CreateEndpoint`, a load call is made to DynamoDB to verify that
your endpoint configuration exists. When you read data from a DynamoDB table
supporting [ `Eventually Consistent Reads`
](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadConsistency.html),
the response might not reflect the results of a recently completed write
operation. The response might include some stale data. If the dependent entities
are not yet in DynamoDB, this causes a validation error. If you repeat your read
request after a short time, the response should return the latest data. So retry
logic is recommended to handle these possible issues. We also recommend that
customers call `DescribeEndpointConfig` before calling `CreateEndpoint` to
minimize the potential impact of a DynamoDB eventually consistent read.
"""
def create_endpoint_config(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateEndpointConfig", input, options)
end
@doc """
Creates an SageMaker *experiment*.
An experiment is a collection of *trials* that are observed, compared and
evaluated as a group. A trial is a set of steps, called *trial components*, that
produce a machine learning model.
The goal of an experiment is to determine the components that produce the best
model. Multiple trials are performed, each one isolating and measuring the
impact of a change to one or more inputs, while keeping the remaining inputs
constant.
When you use Amazon SageMaker Studio or the Amazon SageMaker Python SDK, all
experiments, trials, and trial components are automatically tracked, logged, and
indexed. When you use the AWS SDK for Python (Boto), you must use the logging
APIs provided by the SDK.
You can add tags to experiments, trials, trial components and then use the
`Search` API to search for the tags.
To add a description to an experiment, specify the optional `Description`
parameter. To add a description later, or to change the description, call the
`UpdateExperiment` API.
To get a list of all your experiments, call the `ListExperiments` API. To view
an experiment's properties, call the `DescribeExperiment` API. To get a list of
all the trials associated with an experiment, call the `ListTrials` API. To
create a trial call the `CreateTrial` API.
"""
def create_experiment(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateExperiment", input, options)
end
@doc """
Create a new `FeatureGroup`.
A `FeatureGroup` is a group of `Features` defined in the `FeatureStore` to
describe a `Record`.
The `FeatureGroup` defines the schema and features contained in the
FeatureGroup. A `FeatureGroup` definition is composed of a list of `Features`, a
`RecordIdentifierFeatureName`, an `EventTimeFeatureName` and configurations for
its `OnlineStore` and `OfflineStore`. Check [AWS service quotas](https://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html)
to see the `FeatureGroup`s quota for your AWS account.
You must include at least one of `OnlineStoreConfig` and `OfflineStoreConfig` to
create a `FeatureGroup`.
"""
def create_feature_group(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateFeatureGroup", input, options)
end
@doc """
Creates a flow definition.
"""
def create_flow_definition(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateFlowDefinition", input, options)
end
@doc """
Defines the settings you will use for the human review workflow user interface.
Reviewers will see a three-panel interface with an instruction area, the item to
review, and an input area.
"""
def create_human_task_ui(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateHumanTaskUi", input, options)
end
@doc """
Starts a hyperparameter tuning job.
A hyperparameter tuning job finds the best version of a model by running many
training jobs on your dataset using the algorithm you choose and values for
hyperparameters within ranges that you specify. It then chooses the
hyperparameter values that result in a model that performs the best, as measured
by an objective metric that you choose.
"""
def create_hyper_parameter_tuning_job(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateHyperParameterTuningJob", input, options)
end
@doc """
Creates a custom SageMaker image.
A SageMaker image is a set of image versions. Each image version represents a
container image stored in Amazon Container Registry (ECR). For more information,
see [Bring your own SageMaker image](https://docs.aws.amazon.com/sagemaker/latest/dg/studio-byoi.html).
"""
def create_image(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateImage", input, options)
end
@doc """
Creates a version of the SageMaker image specified by `ImageName`.
The version represents the Amazon Container Registry (ECR) container image
specified by `BaseImage`.
"""
def create_image_version(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateImageVersion", input, options)
end
@doc """
Creates a job that uses workers to label the data objects in your input dataset.
You can use the labeled data to train machine learning models.
You can select your workforce from one of three providers:
* A private workforce that you create. It can include employees,
contractors, and outside experts. Use a private workforce when want the data to
stay within your organization or when a specific set of skills is required.
* One or more vendors that you select from the AWS Marketplace.
Vendors provide expertise in specific areas.
* The Amazon Mechanical Turk workforce. This is the largest
workforce, but it should only be used for public data or data that has been
stripped of any personally identifiable information.
You can also use *automated data labeling* to reduce the number of data objects
that need to be labeled by a human. Automated data labeling uses *active
learning* to determine if a data object can be labeled by machine or if it needs
to be sent to a human worker. For more information, see [Using Automated Data Labeling](https://docs.aws.amazon.com/sagemaker/latest/dg/sms-automated-labeling.html).
The data objects to be labeled are contained in an Amazon S3 bucket. You create
a *manifest file* that describes the location of each object. For more
information, see [Using Input and Output Data](https://docs.aws.amazon.com/sagemaker/latest/dg/sms-data.html).
The output can be used as the manifest file for another labeling job or as
training data for your machine learning models.
You can use this operation to create a static labeling job or a streaming
labeling job. A static labeling job stops if all data objects in the input
manifest file identified in `ManifestS3Uri` have been labeled. A streaming
labeling job runs perpetually until it is manually stopped, or remains idle for
10 days. You can send new data objects to an active (`InProgress`) streaming
labeling job in real time. To learn how to create a static labeling job, see
[Create a Labeling Job (API)
](https://docs.aws.amazon.com/sagemaker/latest/dg/sms-create-labeling-job-api.html)
in the Amazon SageMaker Developer Guide. To learn how to create a streaming
labeling job, see [Create a Streaming Labeling Job](https://docs.aws.amazon.com/sagemaker/latest/dg/sms-streaming-create-job.html).
"""
def create_labeling_job(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateLabelingJob", input, options)
end
@doc """
Creates a model in Amazon SageMaker.
In the request, you name the model and describe a primary container. For the
primary container, you specify the Docker image that contains inference code,
artifacts (from prior training), and a custom environment map that the inference
code uses when you deploy the model for predictions.
Use this API to create a model if you want to use Amazon SageMaker hosting
services or run a batch transform job.
To host your model, you create an endpoint configuration with the
`CreateEndpointConfig` API, and then create an endpoint with the
`CreateEndpoint` API. Amazon SageMaker then deploys all of the containers that
you defined for the model in the hosting environment.
For an example that calls this method when deploying a model to Amazon SageMaker
hosting services, see [Deploy the Model to Amazon SageMaker Hosting Services (AWS SDK for Python (Boto
3)).](https://docs.aws.amazon.com/sagemaker/latest/dg/ex1-deploy-model.html#ex1-deploy-model-boto)
To run a batch transform using your model, you start a job with the
`CreateTransformJob` API. Amazon SageMaker uses your model and your dataset to
get inferences which are then saved to a specified S3 location.
In the `CreateModel` request, you must define a container with the
`PrimaryContainer` parameter.
In the request, you also provide an IAM role that Amazon SageMaker can assume to
access model artifacts and docker image for deployment on ML compute hosting
instances or for batch transform jobs. In addition, you also use the IAM role to
manage permissions the inference code needs. For example, if the inference code
access any other AWS resources, you grant necessary permissions via this role.
"""
def create_model(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateModel", input, options)
end
@doc """
Creates the definition for a model bias job.
"""
def create_model_bias_job_definition(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateModelBiasJobDefinition", input, options)
end
@doc """
Creates the definition for a model explainability job.
"""
def create_model_explainability_job_definition(%Client{} = client, input, options \\ []) do
Request.request_post(
client,
metadata(),
"CreateModelExplainabilityJobDefinition",
input,
options
)
end
@doc """
Creates a model package that you can use to create Amazon SageMaker models or
list on AWS Marketplace, or a versioned model that is part of a model group.
Buyers can subscribe to model packages listed on AWS Marketplace to create
models in Amazon SageMaker.
To create a model package by specifying a Docker container that contains your
inference code and the Amazon S3 location of your model artifacts, provide
values for `InferenceSpecification`. To create a model from an algorithm
resource that you created or subscribed to in AWS Marketplace, provide a value
for `SourceAlgorithmSpecification`.
There are two types of model packages:
Versioned - a model that is part of a model group in the model
registry.
Unversioned - a model package that is not part of a model group.
"""
def create_model_package(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateModelPackage", input, options)
end
@doc """
Creates a model group.
A model group contains a group of model versions.
"""
def create_model_package_group(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateModelPackageGroup", input, options)
end
@doc """
Creates a definition for a job that monitors model quality and drift.
For information about model monitor, see [Amazon SageMaker Model Monitor](https://docs.aws.amazon.com/sagemaker/latest/dg/model-monitor.html).
"""
def create_model_quality_job_definition(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateModelQualityJobDefinition", input, options)
end
@doc """
Creates a schedule that regularly starts Amazon SageMaker Processing Jobs to
monitor the data captured for an Amazon SageMaker Endoint.
"""
def create_monitoring_schedule(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateMonitoringSchedule", input, options)
end
@doc """
Creates an Amazon SageMaker notebook instance.
A notebook instance is a machine learning (ML) compute instance running on a
Jupyter notebook.
In a `CreateNotebookInstance` request, specify the type of ML compute instance
that you want to run. Amazon SageMaker launches the instance, installs common
libraries that you can use to explore datasets for model training, and attaches
an ML storage volume to the notebook instance.
Amazon SageMaker also provides a set of example notebooks. Each notebook
demonstrates how to use Amazon SageMaker with a specific algorithm or with a
machine learning framework.
After receiving the request, Amazon SageMaker does the following:
1. Creates a network interface in the Amazon SageMaker VPC.
2. (Option) If you specified `SubnetId`, Amazon SageMaker creates a
network interface in your own VPC, which is inferred from the subnet ID that you
provide in the input. When creating this network interface, Amazon SageMaker
attaches the security group that you specified in the request to the network
interface that it creates in your VPC.
3. Launches an EC2 instance of the type specified in the request in
the Amazon SageMaker VPC. If you specified `SubnetId` of your VPC, Amazon
SageMaker specifies both network interfaces when launching this instance. This
enables inbound traffic from your own VPC to the notebook instance, assuming
that the security groups allow it.
After creating the notebook instance, Amazon SageMaker returns its Amazon
Resource Name (ARN). You can't change the name of a notebook instance after you
create it.
After Amazon SageMaker creates the notebook instance, you can connect to the
Jupyter server and work in Jupyter notebooks. For example, you can write code to
explore a dataset that you can use for model training, train a model, host
models by creating Amazon SageMaker endpoints, and validate hosted models.
For more information, see [How It Works](https://docs.aws.amazon.com/sagemaker/latest/dg/how-it-works.html).
"""
def create_notebook_instance(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateNotebookInstance", input, options)
end
@doc """
Creates a lifecycle configuration that you can associate with a notebook
instance.
A *lifecycle configuration* is a collection of shell scripts that run when you
create or start a notebook instance.
Each lifecycle configuration script has a limit of 16384 characters.
The value of the `$PATH` environment variable that is available to both scripts
is `/sbin:bin:/usr/sbin:/usr/bin`.
View CloudWatch Logs for notebook instance lifecycle configurations in log group
`/aws/sagemaker/NotebookInstances` in log stream
`[notebook-instance-name]/[LifecycleConfigHook]`. Lifecycle configuration scripts cannot run for longer than 5 minutes. If a
script runs for longer than 5 minutes, it fails and the notebook instance is not
created or started.
For information about notebook instance lifestyle configurations, see [Step 2.1:
(Optional) Customize a Notebook
Instance](https://docs.aws.amazon.com/sagemaker/latest/dg/notebook-lifecycle-config.html).
"""
def create_notebook_instance_lifecycle_config(%Client{} = client, input, options \\ []) do
Request.request_post(
client,
metadata(),
"CreateNotebookInstanceLifecycleConfig",
input,
options
)
end
@doc """
Creates a pipeline using a JSON pipeline definition.
"""
def create_pipeline(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreatePipeline", input, options)
end
@doc """
Creates a URL for a specified UserProfile in a Domain.
When accessed in a web browser, the user will be automatically signed in to
Amazon SageMaker Studio, and granted access to all of the Apps and files
associated with the Domain's Amazon Elastic File System (EFS) volume. This
operation can only be called when the authentication mode equals IAM.
The URL that you get from a call to `CreatePresignedDomainUrl` is valid only for
5 minutes. If you try to use the URL after the 5-minute limit expires, you are
directed to the AWS console sign-in page.
"""
def create_presigned_domain_url(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreatePresignedDomainUrl", input, options)
end
@doc """
Returns a URL that you can use to connect to the Jupyter server from a notebook
instance.
In the Amazon SageMaker console, when you choose `Open` next to a notebook
instance, Amazon SageMaker opens a new tab showing the Jupyter server home page
from the notebook instance. The console uses this API to get the URL and show
the page.
The IAM role or user used to call this API defines the permissions to access the
notebook instance. Once the presigned URL is created, no additional permission
is required to access this URL. IAM authorization policies for this API are also
enforced for every HTTP request and WebSocket frame that attempts to connect to
the notebook instance.
You can restrict access to this API and to the URL that it returns to a list of
IP addresses that you specify. Use the `NotIpAddress` condition operator and the
`aws:SourceIP` condition context key to specify the list of IP addresses that
you want to have access to the notebook instance. For more information, see
[Limit Access to a Notebook Instance by IP Address](https://docs.aws.amazon.com/sagemaker/latest/dg/security_iam_id-based-policy-examples.html#nbi-ip-filter).
The URL that you get from a call to `CreatePresignedNotebookInstanceUrl` is
valid only for 5 minutes. If you try to use the URL after the 5-minute limit
expires, you are directed to the AWS console sign-in page.
"""
def create_presigned_notebook_instance_url(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreatePresignedNotebookInstanceUrl", input, options)
end
@doc """
Creates a processing job.
"""
def create_processing_job(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateProcessingJob", input, options)
end
@doc """
Creates a machine learning (ML) project that can contain one or more templates
that set up an ML pipeline from training to deploying an approved model.
"""
def create_project(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateProject", input, options)
end
@doc """
Starts a model training job.
After training completes, Amazon SageMaker saves the resulting model artifacts
to an Amazon S3 location that you specify.
If you choose to host your model using Amazon SageMaker hosting services, you
can use the resulting model artifacts as part of the model. You can also use the
artifacts in a machine learning service other than Amazon SageMaker, provided
that you know how to use them for inference.
In the request body, you provide the following:
* `AlgorithmSpecification` - Identifies the training algorithm to
use.
* `HyperParameters` - Specify these algorithm-specific parameters to
enable the estimation of model parameters during training. Hyperparameters can
be tuned to optimize this learning process. For a list of hyperparameters for
each training algorithm provided by Amazon SageMaker, see
[Algorithms](https://docs.aws.amazon.com/sagemaker/latest/dg/algos.html). * `InputDataConfig` - Describes the training dataset and the Amazon
S3, EFS, or FSx location where it is stored.
* `OutputDataConfig` - Identifies the Amazon S3 bucket where you
want Amazon SageMaker to save the results of model training.
* `ResourceConfig` - Identifies the resources, ML compute instances,
and ML storage volumes to deploy for model training. In distributed training,
you specify more than one instance.
* `EnableManagedSpotTraining` - Optimize the cost of training
machine learning models by up to 80% by using Amazon EC2 Spot instances. For
more information, see [Managed Spot
Training](https://docs.aws.amazon.com/sagemaker/latest/dg/model-managed-spot-training.html).
* `RoleArn` - The Amazon Resource Name (ARN) that Amazon SageMaker
assumes to perform tasks on your behalf during model training. You must grant
this role the necessary permissions so that Amazon SageMaker can successfully
complete model training.
* `StoppingCondition` - To help cap training costs, use
`MaxRuntimeInSeconds` to set a time limit for training. Use
`MaxWaitTimeInSeconds` to specify how long you are willing to wait for a managed
spot training job to complete.
For more information about Amazon SageMaker, see [How It Works](https://docs.aws.amazon.com/sagemaker/latest/dg/how-it-works.html).
"""
def create_training_job(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateTrainingJob", input, options)
end
@doc """
Starts a transform job.
A transform job uses a trained model to get inferences on a dataset and saves
these results to an Amazon S3 location that you specify.
To perform batch transformations, you create a transform job and use the data
that you have readily available.
In the request body, you provide the following:
* `TransformJobName` - Identifies the transform job. The name must
be unique within an AWS Region in an AWS account.
* `ModelName` - Identifies the model to use. `ModelName` must be the
name of an existing Amazon SageMaker model in the same AWS Region and AWS
account. For information on creating a model, see `CreateModel`.
* `TransformInput` - Describes the dataset to be transformed and the
Amazon S3 location where it is stored.
* `TransformOutput` - Identifies the Amazon S3 location where you
want Amazon SageMaker to save the results from the transform job.
* `TransformResources` - Identifies the ML compute instances for the
transform job.
For more information about how batch transformation works, see [Batch Transform](https://docs.aws.amazon.com/sagemaker/latest/dg/batch-transform.html).
"""
def create_transform_job(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateTransformJob", input, options)
end
@doc """
Creates an Amazon SageMaker *trial*.
A trial is a set of steps called *trial components* that produce a machine
learning model. A trial is part of a single Amazon SageMaker *experiment*.
When you use Amazon SageMaker Studio or the Amazon SageMaker Python SDK, all
experiments, trials, and trial components are automatically tracked, logged, and
indexed. When you use the AWS SDK for Python (Boto), you must use the logging
APIs provided by the SDK.
You can add tags to a trial and then use the `Search` API to search for the
tags.
To get a list of all your trials, call the `ListTrials` API. To view a trial's
properties, call the `DescribeTrial` API. To create a trial component, call the
`CreateTrialComponent` API.
"""
def create_trial(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateTrial", input, options)
end
@doc """
Creates a *trial component*, which is a stage of a machine learning *trial*.
A trial is composed of one or more trial components. A trial component can be
used in multiple trials.
Trial components include pre-processing jobs, training jobs, and batch transform
jobs.
When you use Amazon SageMaker Studio or the Amazon SageMaker Python SDK, all
experiments, trials, and trial components are automatically tracked, logged, and
indexed. When you use the AWS SDK for Python (Boto), you must use the logging
APIs provided by the SDK.
You can add tags to a trial component and then use the `Search` API to search
for the tags.
`CreateTrialComponent` can only be invoked from within an Amazon SageMaker
managed environment. This includes Amazon SageMaker training jobs, processing
jobs, transform jobs, and Amazon SageMaker notebooks. A call to
`CreateTrialComponent` from outside one of these environments results in an
error.
"""
def create_trial_component(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateTrialComponent", input, options)
end
@doc """
Creates a user profile.
A user profile represents a single user within a domain, and is the main way to
reference a "person" for the purposes of sharing, reporting, and other
user-oriented features. This entity is created when a user onboards to Amazon
SageMaker Studio. If an administrator invites a person by email or imports them
from SSO, a user profile is automatically created. A user profile is the primary
holder of settings for an individual user and has a reference to the user's
private Amazon Elastic File System (EFS) home directory.
"""
def create_user_profile(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateUserProfile", input, options)
end
@doc """
Use this operation to create a workforce.
This operation will return an error if a workforce already exists in the AWS
Region that you specify. You can only create one workforce in each AWS Region
per AWS account.
If you want to create a new workforce in an AWS Region where a workforce already
exists, use the API operation to delete the existing workforce and then use
`CreateWorkforce` to create a new workforce.
To create a private workforce using Amazon Cognito, you must specify a Cognito
user pool in `CognitoConfig`. You can also create an Amazon Cognito workforce
using the Amazon SageMaker console. For more information, see [ Create a Private Workforce (Amazon
Cognito)](https://docs.aws.amazon.com/sagemaker/latest/dg/sms-workforce-create-private.html).
To create a private workforce using your own OIDC Identity Provider (IdP),
specify your IdP configuration in `OidcConfig`. Your OIDC IdP must support
*groups* because groups are used by Ground Truth and Amazon A2I to create work
teams. For more information, see [ Create a Private Workforce (OIDC IdP)](https://docs.aws.amazon.com/sagemaker/latest/dg/sms-workforce-create-private-oidc.html).
"""
def create_workforce(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateWorkforce", input, options)
end
@doc """
Creates a new work team for labeling your data.
A work team is defined by one or more Amazon Cognito user pools. You must first
create the user pools before you can create a work team.
You cannot create more than 25 work teams in an account and region.
"""
def create_workteam(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateWorkteam", input, options)
end
@doc """
Deletes an action.
"""
def delete_action(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteAction", input, options)
end
@doc """
Removes the specified algorithm from your account.
"""
def delete_algorithm(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteAlgorithm", input, options)
end
@doc """
Used to stop and delete an app.
"""
def delete_app(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteApp", input, options)
end
@doc """
Deletes an AppImageConfig.
"""
def delete_app_image_config(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteAppImageConfig", input, options)
end
@doc """
Deletes an artifact.
Either `ArtifactArn` or `Source` must be specified.
"""
def delete_artifact(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteArtifact", input, options)
end
@doc """
Deletes an association.
"""
def delete_association(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteAssociation", input, options)
end
@doc """
Deletes the specified Git repository from your account.
"""
def delete_code_repository(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteCodeRepository", input, options)
end
@doc """
Deletes an context.
"""
def delete_context(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteContext", input, options)
end
@doc """
Deletes a data quality monitoring job definition.
"""
def delete_data_quality_job_definition(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteDataQualityJobDefinition", input, options)
end
@doc """
Deletes a fleet.
"""
def delete_device_fleet(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteDeviceFleet", input, options)
end
@doc """
Used to delete a domain.
If you onboarded with IAM mode, you will need to delete your domain to onboard
again using SSO. Use with caution. All of the members of the domain will lose
access to their EFS volume, including data, notebooks, and other artifacts.
"""
def delete_domain(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteDomain", input, options)
end
@doc """
Deletes an endpoint.
Amazon SageMaker frees up all of the resources that were deployed when the
endpoint was created.
Amazon SageMaker retires any custom KMS key grants associated with the endpoint,
meaning you don't need to use the
[RevokeGrant](http://docs.aws.amazon.com/kms/latest/APIReference/API_RevokeGrant.html)
API call.
"""
def delete_endpoint(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteEndpoint", input, options)
end
@doc """
Deletes an endpoint configuration.
The `DeleteEndpointConfig` API deletes only the specified configuration. It does
not delete endpoints created using the configuration.
You must not delete an `EndpointConfig` in use by an endpoint that is live or
while the `UpdateEndpoint` or `CreateEndpoint` operations are being performed on
the endpoint. If you delete the `EndpointConfig` of an endpoint that is active
or being created or updated you may lose visibility into the instance type the
endpoint is using. The endpoint must be deleted in order to stop incurring
charges.
"""
def delete_endpoint_config(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteEndpointConfig", input, options)
end
@doc """
Deletes an Amazon SageMaker experiment.
All trials associated with the experiment must be deleted first. Use the
`ListTrials` API to get a list of the trials associated with the experiment.
"""
def delete_experiment(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteExperiment", input, options)
end
@doc """
Delete the `FeatureGroup` and any data that was written to the `OnlineStore` of
the `FeatureGroup`.
Data cannot be accessed from the `OnlineStore` immediately after
`DeleteFeatureGroup` is called.
Data written into the `OfflineStore` will not be deleted. The AWS Glue database
and tables that are automatically created for your `OfflineStore` are not
deleted.
"""
def delete_feature_group(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteFeatureGroup", input, options)
end
@doc """
Deletes the specified flow definition.
"""
def delete_flow_definition(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteFlowDefinition", input, options)
end
@doc """
Use this operation to delete a human task user interface (worker task template).
To see a list of human task user interfaces (work task templates) in your
account, use . When you delete a worker task template, it no longer appears when
you call `ListHumanTaskUis`.
"""
def delete_human_task_ui(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteHumanTaskUi", input, options)
end
@doc """
Deletes a SageMaker image and all versions of the image.
The container images aren't deleted.
"""
def delete_image(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteImage", input, options)
end
@doc """
Deletes a version of a SageMaker image.
The container image the version represents isn't deleted.
"""
def delete_image_version(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteImageVersion", input, options)
end
@doc """
Deletes a model.
The `DeleteModel` API deletes only the model entry that was created in Amazon
SageMaker when you called the `CreateModel` API. It does not delete model
artifacts, inference code, or the IAM role that you specified when creating the
model.
"""
def delete_model(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteModel", input, options)
end
@doc """
Deletes an Amazon SageMaker model bias job definition.
"""
def delete_model_bias_job_definition(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteModelBiasJobDefinition", input, options)
end
@doc """
Deletes an Amazon SageMaker model explainability job definition.
"""
def delete_model_explainability_job_definition(%Client{} = client, input, options \\ []) do
Request.request_post(
client,
metadata(),
"DeleteModelExplainabilityJobDefinition",
input,
options
)
end
@doc """
Deletes a model package.
A model package is used to create Amazon SageMaker models or list on AWS
Marketplace. Buyers can subscribe to model packages listed on AWS Marketplace to
create models in Amazon SageMaker.
"""
def delete_model_package(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteModelPackage", input, options)
end
@doc """
Deletes the specified model group.
"""
def delete_model_package_group(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteModelPackageGroup", input, options)
end
@doc """
Deletes a model group resource policy.
"""
def delete_model_package_group_policy(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteModelPackageGroupPolicy", input, options)
end
@doc """
Deletes the secified model quality monitoring job definition.
"""
def delete_model_quality_job_definition(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteModelQualityJobDefinition", input, options)
end
@doc """
Deletes a monitoring schedule.
Also stops the schedule had not already been stopped. This does not delete the
job execution history of the monitoring schedule.
"""
def delete_monitoring_schedule(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteMonitoringSchedule", input, options)
end
@doc """
Deletes an Amazon SageMaker notebook instance.
Before you can delete a notebook instance, you must call the
`StopNotebookInstance` API.
When you delete a notebook instance, you lose all of your data. Amazon SageMaker
removes the ML compute instance, and deletes the ML storage volume and the
network interface associated with the notebook instance.
"""
def delete_notebook_instance(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteNotebookInstance", input, options)
end
@doc """
Deletes a notebook instance lifecycle configuration.
"""
def delete_notebook_instance_lifecycle_config(%Client{} = client, input, options \\ []) do
Request.request_post(
client,
metadata(),
"DeleteNotebookInstanceLifecycleConfig",
input,
options
)
end
@doc """
Deletes a pipeline if there are no in-progress executions.
"""
def delete_pipeline(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeletePipeline", input, options)
end
@doc """
Delete the specified project.
"""
def delete_project(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteProject", input, options)
end
@doc """
Deletes the specified tags from an Amazon SageMaker resource.
To list a resource's tags, use the `ListTags` API.
When you call this API to delete tags from a hyperparameter tuning job, the
deleted tags are not removed from training jobs that the hyperparameter tuning
job launched before you called this API.
"""
def delete_tags(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteTags", input, options)
end
@doc """
Deletes the specified trial.
All trial components that make up the trial must be deleted first. Use the
`DescribeTrialComponent` API to get the list of trial components.
"""
def delete_trial(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteTrial", input, options)
end
@doc """
Deletes the specified trial component.
A trial component must be disassociated from all trials before the trial
component can be deleted. To disassociate a trial component from a trial, call
the `DisassociateTrialComponent` API.
"""
def delete_trial_component(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteTrialComponent", input, options)
end
@doc """
Deletes a user profile.
When a user profile is deleted, the user loses access to their EFS volume,
including data, notebooks, and other artifacts.
"""
def delete_user_profile(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteUserProfile", input, options)
end
@doc """
Use this operation to delete a workforce.
If you want to create a new workforce in an AWS Region where a workforce already
exists, use this operation to delete the existing workforce and then use to
create a new workforce.
If a private workforce contains one or more work teams, you must use the
operation to delete all work teams before you delete the workforce. If you try
to delete a workforce that contains one or more work teams, you will recieve a
`ResourceInUse` error.
"""
def delete_workforce(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteWorkforce", input, options)
end
@doc """
Deletes an existing work team.
This operation can't be undone.
"""
def delete_workteam(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteWorkteam", input, options)
end
@doc """
Deregisters the specified devices.
After you deregister a device, you will need to re-register the devices.
"""
def deregister_devices(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeregisterDevices", input, options)
end
@doc """
Describes an action.
"""
def describe_action(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeAction", input, options)
end
@doc """
Returns a description of the specified algorithm that is in your account.
"""
def describe_algorithm(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeAlgorithm", input, options)
end
@doc """
Describes the app.
"""
def describe_app(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeApp", input, options)
end
@doc """
Describes an AppImageConfig.
"""
def describe_app_image_config(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeAppImageConfig", input, options)
end
@doc """
Describes an artifact.
"""
def describe_artifact(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeArtifact", input, options)
end
@doc """
Returns information about an Amazon SageMaker job.
"""
def describe_auto_ml_job(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeAutoMLJob", input, options)
end
@doc """
Gets details about the specified Git repository.
"""
def describe_code_repository(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeCodeRepository", input, options)
end
@doc """
Returns information about a model compilation job.
To create a model compilation job, use `CreateCompilationJob`. To get
information about multiple model compilation jobs, use `ListCompilationJobs`.
"""
def describe_compilation_job(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeCompilationJob", input, options)
end
@doc """
Describes a context.
"""
def describe_context(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeContext", input, options)
end
@doc """
Gets the details of a data quality monitoring job definition.
"""
def describe_data_quality_job_definition(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeDataQualityJobDefinition", input, options)
end
@doc """
Describes the device.
"""
def describe_device(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeDevice", input, options)
end
@doc """
A description of the fleet the device belongs to.
"""
def describe_device_fleet(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeDeviceFleet", input, options)
end
@doc """
The description of the domain.
"""
def describe_domain(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeDomain", input, options)
end
@doc """
A description of edge packaging jobs.
"""
def describe_edge_packaging_job(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeEdgePackagingJob", input, options)
end
@doc """
Returns the description of an endpoint.
"""
def describe_endpoint(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeEndpoint", input, options)
end
@doc """
Returns the description of an endpoint configuration created using the
`CreateEndpointConfig` API.
"""
def describe_endpoint_config(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeEndpointConfig", input, options)
end
@doc """
Provides a list of an experiment's properties.
"""
def describe_experiment(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeExperiment", input, options)
end
@doc """
Use this operation to describe a `FeatureGroup`.
The response includes information on the creation time, `FeatureGroup` name, the
unique identifier for each `FeatureGroup`, and more.
"""
def describe_feature_group(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeFeatureGroup", input, options)
end
@doc """
Returns information about the specified flow definition.
"""
def describe_flow_definition(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeFlowDefinition", input, options)
end
@doc """
Returns information about the requested human task user interface (worker task
template).
"""
def describe_human_task_ui(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeHumanTaskUi", input, options)
end
@doc """
Gets a description of a hyperparameter tuning job.
"""
def describe_hyper_parameter_tuning_job(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeHyperParameterTuningJob", input, options)
end
@doc """
Describes a SageMaker image.
"""
def describe_image(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeImage", input, options)
end
@doc """
Describes a version of a SageMaker image.
"""
def describe_image_version(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeImageVersion", input, options)
end
@doc """
Gets information about a labeling job.
"""
def describe_labeling_job(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeLabelingJob", input, options)
end
@doc """
Describes a model that you created using the `CreateModel` API.
"""
def describe_model(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeModel", input, options)
end
@doc """
Returns a description of a model bias job definition.
"""
def describe_model_bias_job_definition(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeModelBiasJobDefinition", input, options)
end
@doc """
Returns a description of a model explainability job definition.
"""
def describe_model_explainability_job_definition(%Client{} = client, input, options \\ []) do
Request.request_post(
client,
metadata(),
"DescribeModelExplainabilityJobDefinition",
input,
options
)
end
@doc """
Returns a description of the specified model package, which is used to create
Amazon SageMaker models or list them on AWS Marketplace.
To create models in Amazon SageMaker, buyers can subscribe to model packages
listed on AWS Marketplace.
"""
def describe_model_package(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeModelPackage", input, options)
end
@doc """
Gets a description for the specified model group.
"""
def describe_model_package_group(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeModelPackageGroup", input, options)
end
@doc """
Returns a description of a model quality job definition.
"""
def describe_model_quality_job_definition(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeModelQualityJobDefinition", input, options)
end
@doc """
Describes the schedule for a monitoring job.
"""
def describe_monitoring_schedule(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeMonitoringSchedule", input, options)
end
@doc """
Returns information about a notebook instance.
"""
def describe_notebook_instance(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeNotebookInstance", input, options)
end
@doc """
Returns a description of a notebook instance lifecycle configuration.
For information about notebook instance lifestyle configurations, see [Step 2.1: (Optional) Customize a Notebook
Instance](https://docs.aws.amazon.com/sagemaker/latest/dg/notebook-lifecycle-config.html).
"""
def describe_notebook_instance_lifecycle_config(%Client{} = client, input, options \\ []) do
Request.request_post(
client,
metadata(),
"DescribeNotebookInstanceLifecycleConfig",
input,
options
)
end
@doc """
Describes the details of a pipeline.
"""
def describe_pipeline(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribePipeline", input, options)
end
@doc """
Describes the details of an execution's pipeline definition.
"""
def describe_pipeline_definition_for_execution(%Client{} = client, input, options \\ []) do
Request.request_post(
client,
metadata(),
"DescribePipelineDefinitionForExecution",
input,
options
)
end
@doc """
Describes the details of a pipeline execution.
"""
def describe_pipeline_execution(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribePipelineExecution", input, options)
end
@doc """
Returns a description of a processing job.
"""
def describe_processing_job(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeProcessingJob", input, options)
end
@doc """
Describes the details of a project.
"""
def describe_project(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeProject", input, options)
end
@doc """
Gets information about a work team provided by a vendor.
It returns details about the subscription with a vendor in the AWS Marketplace.
"""
def describe_subscribed_workteam(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeSubscribedWorkteam", input, options)
end
@doc """
Returns information about a training job.
"""
def describe_training_job(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeTrainingJob", input, options)
end
@doc """
Returns information about a transform job.
"""
def describe_transform_job(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeTransformJob", input, options)
end
@doc """
Provides a list of a trial's properties.
"""
def describe_trial(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeTrial", input, options)
end
@doc """
Provides a list of a trials component's properties.
"""
def describe_trial_component(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeTrialComponent", input, options)
end
@doc """
Describes a user profile.
For more information, see `CreateUserProfile`.
"""
def describe_user_profile(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeUserProfile", input, options)
end
@doc """
Lists private workforce information, including workforce name, Amazon Resource
Name (ARN), and, if applicable, allowed IP address ranges
([CIDRs](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html)).
Allowable IP address ranges are the IP addresses that workers can use to access
tasks.
This operation applies only to private workforces.
"""
def describe_workforce(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeWorkforce", input, options)
end
@doc """
Gets information about a specific work team.
You can see information such as the create date, the last updated date,
membership information, and the work team's Amazon Resource Name (ARN).
"""
def describe_workteam(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeWorkteam", input, options)
end
@doc """
Disables using Service Catalog in SageMaker.
Service Catalog is used to create SageMaker projects.
"""
def disable_sagemaker_servicecatalog_portfolio(%Client{} = client, input, options \\ []) do
Request.request_post(
client,
metadata(),
"DisableSagemakerServicecatalogPortfolio",
input,
options
)
end
@doc """
Disassociates a trial component from a trial.
This doesn't effect other trials the component is associated with. Before you
can delete a component, you must disassociate the component from all trials it
is associated with. To associate a trial component with a trial, call the
`AssociateTrialComponent` API.
To get a list of the trials a component is associated with, use the `Search`
API. Specify `ExperimentTrialComponent` for the `Resource` parameter. The list
appears in the response under `Results.TrialComponent.Parents`.
"""
def disassociate_trial_component(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DisassociateTrialComponent", input, options)
end
@doc """
Enables using Service Catalog in SageMaker.
Service Catalog is used to create SageMaker projects.
"""
def enable_sagemaker_servicecatalog_portfolio(%Client{} = client, input, options \\ []) do
Request.request_post(
client,
metadata(),
"EnableSagemakerServicecatalogPortfolio",
input,
options
)
end
@doc """
Describes a fleet.
"""
def get_device_fleet_report(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetDeviceFleetReport", input, options)
end
@doc """
Gets a resource policy that manages access for a model group.
For information about resource policies, see [Identity-based policies and resource-based
policies](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_identity-vs-resource.html)
in the *AWS Identity and Access Management User Guide.*.
"""
def get_model_package_group_policy(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetModelPackageGroupPolicy", input, options)
end
@doc """
Gets the status of Service Catalog in SageMaker.
Service Catalog is used to create SageMaker projects.
"""
def get_sagemaker_servicecatalog_portfolio_status(%Client{} = client, input, options \\ []) do
Request.request_post(
client,
metadata(),
"GetSagemakerServicecatalogPortfolioStatus",
input,
options
)
end
@doc """
An auto-complete API for the search functionality in the Amazon SageMaker
console.
It returns suggestions of possible matches for the property name to use in
`Search` queries. Provides suggestions for `HyperParameters`, `Tags`, and
`Metrics`.
"""
def get_search_suggestions(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetSearchSuggestions", input, options)
end
@doc """
Lists the actions in your account and their properties.
"""
def list_actions(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListActions", input, options)
end
@doc """
Lists the machine learning algorithms that have been created.
"""
def list_algorithms(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListAlgorithms", input, options)
end
@doc """
Lists the AppImageConfigs in your account and their properties.
The list can be filtered by creation time or modified time, and whether the
AppImageConfig name contains a specified string.
"""
def list_app_image_configs(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListAppImageConfigs", input, options)
end
@doc """
Lists apps.
"""
def list_apps(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListApps", input, options)
end
@doc """
Lists the artifacts in your account and their properties.
"""
def list_artifacts(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListArtifacts", input, options)
end
@doc """
Lists the associations in your account and their properties.
"""
def list_associations(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListAssociations", input, options)
end
@doc """
Request a list of jobs.
"""
def list_auto_ml_jobs(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListAutoMLJobs", input, options)
end
@doc """
List the Candidates created for the job.
"""
def list_candidates_for_auto_ml_job(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListCandidatesForAutoMLJob", input, options)
end
@doc """
Gets a list of the Git repositories in your account.
"""
def list_code_repositories(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListCodeRepositories", input, options)
end
@doc """
Lists model compilation jobs that satisfy various filters.
To create a model compilation job, use `CreateCompilationJob`. To get
information about a particular model compilation job you have created, use
`DescribeCompilationJob`.
"""
def list_compilation_jobs(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListCompilationJobs", input, options)
end
@doc """
Lists the contexts in your account and their properties.
"""
def list_contexts(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListContexts", input, options)
end
@doc """
Lists the data quality job definitions in your account.
"""
def list_data_quality_job_definitions(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListDataQualityJobDefinitions", input, options)
end
@doc """
Returns a list of devices in the fleet.
"""
def list_device_fleets(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListDeviceFleets", input, options)
end
@doc """
A list of devices.
"""
def list_devices(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListDevices", input, options)
end
@doc """
Lists the domains.
"""
def list_domains(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListDomains", input, options)
end
@doc """
Returns a list of edge packaging jobs.
"""
def list_edge_packaging_jobs(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListEdgePackagingJobs", input, options)
end
@doc """
Lists endpoint configurations.
"""
def list_endpoint_configs(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListEndpointConfigs", input, options)
end
@doc """
Lists endpoints.
"""
def list_endpoints(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListEndpoints", input, options)
end
@doc """
Lists all the experiments in your account.
The list can be filtered to show only experiments that were created in a
specific time range. The list can be sorted by experiment name or creation time.
"""
def list_experiments(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListExperiments", input, options)
end
@doc """
List `FeatureGroup`s based on given filter and order.
"""
def list_feature_groups(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListFeatureGroups", input, options)
end
@doc """
Returns information about the flow definitions in your account.
"""
def list_flow_definitions(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListFlowDefinitions", input, options)
end
@doc """
Returns information about the human task user interfaces in your account.
"""
def list_human_task_uis(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListHumanTaskUis", input, options)
end
@doc """
Gets a list of `HyperParameterTuningJobSummary` objects that describe the
hyperparameter tuning jobs launched in your account.
"""
def list_hyper_parameter_tuning_jobs(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListHyperParameterTuningJobs", input, options)
end
@doc """
Lists the versions of a specified image and their properties.
The list can be filtered by creation time or modified time.
"""
def list_image_versions(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListImageVersions", input, options)
end
@doc """
Lists the images in your account and their properties.
The list can be filtered by creation time or modified time, and whether the
image name contains a specified string.
"""
def list_images(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListImages", input, options)
end
@doc """
Gets a list of labeling jobs.
"""
def list_labeling_jobs(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListLabelingJobs", input, options)
end
@doc """
Gets a list of labeling jobs assigned to a specified work team.
"""
def list_labeling_jobs_for_workteam(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListLabelingJobsForWorkteam", input, options)
end
@doc """
Lists model bias jobs definitions that satisfy various filters.
"""
def list_model_bias_job_definitions(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListModelBiasJobDefinitions", input, options)
end
@doc """
Lists model explainability job definitions that satisfy various filters.
"""
def list_model_explainability_job_definitions(%Client{} = client, input, options \\ []) do
Request.request_post(
client,
metadata(),
"ListModelExplainabilityJobDefinitions",
input,
options
)
end
@doc """
Gets a list of the model groups in your AWS account.
"""
def list_model_package_groups(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListModelPackageGroups", input, options)
end
@doc """
Lists the model packages that have been created.
"""
def list_model_packages(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListModelPackages", input, options)
end
@doc """
Gets a list of model quality monitoring job definitions in your account.
"""
def list_model_quality_job_definitions(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListModelQualityJobDefinitions", input, options)
end
@doc """
Lists models created with the `CreateModel` API.
"""
def list_models(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListModels", input, options)
end
@doc """
Returns list of all monitoring job executions.
"""
def list_monitoring_executions(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListMonitoringExecutions", input, options)
end
@doc """
Returns list of all monitoring schedules.
"""
def list_monitoring_schedules(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListMonitoringSchedules", input, options)
end
@doc """
Lists notebook instance lifestyle configurations created with the
`CreateNotebookInstanceLifecycleConfig` API.
"""
def list_notebook_instance_lifecycle_configs(%Client{} = client, input, options \\ []) do
Request.request_post(
client,
metadata(),
"ListNotebookInstanceLifecycleConfigs",
input,
options
)
end
@doc """
Returns a list of the Amazon SageMaker notebook instances in the requester's
account in an AWS Region.
"""
def list_notebook_instances(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListNotebookInstances", input, options)
end
@doc """
Gets a list of `PipeLineExecutionStep` objects.
"""
def list_pipeline_execution_steps(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListPipelineExecutionSteps", input, options)
end
@doc """
Gets a list of the pipeline executions.
"""
def list_pipeline_executions(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListPipelineExecutions", input, options)
end
@doc """
Gets a list of parameters for a pipeline execution.
"""
def list_pipeline_parameters_for_execution(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListPipelineParametersForExecution", input, options)
end
@doc """
Gets a list of pipelines.
"""
def list_pipelines(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListPipelines", input, options)
end
@doc """
Lists processing jobs that satisfy various filters.
"""
def list_processing_jobs(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListProcessingJobs", input, options)
end
@doc """
Gets a list of the projects in an AWS account.
"""
def list_projects(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListProjects", input, options)
end
@doc """
Gets a list of the work teams that you are subscribed to in the AWS Marketplace.
The list may be empty if no work team satisfies the filter specified in the
`NameContains` parameter.
"""
def list_subscribed_workteams(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListSubscribedWorkteams", input, options)
end
@doc """
Returns the tags for the specified Amazon SageMaker resource.
"""
def list_tags(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListTags", input, options)
end
@doc """
Lists training jobs.
"""
def list_training_jobs(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListTrainingJobs", input, options)
end
@doc """
Gets a list of `TrainingJobSummary` objects that describe the training jobs that
a hyperparameter tuning job launched.
"""
def list_training_jobs_for_hyper_parameter_tuning_job(%Client{} = client, input, options \\ []) do
Request.request_post(
client,
metadata(),
"ListTrainingJobsForHyperParameterTuningJob",
input,
options
)
end
@doc """
Lists transform jobs.
"""
def list_transform_jobs(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListTransformJobs", input, options)
end
@doc """
Lists the trial components in your account.
You can sort the list by trial component name or creation time. You can filter
the list to show only components that were created in a specific time range. You
can also filter on one of the following:
* `ExperimentName`
* `SourceArn`
* `TrialName`
"""
def list_trial_components(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListTrialComponents", input, options)
end
@doc """
Lists the trials in your account.
Specify an experiment name to limit the list to the trials that are part of that
experiment. Specify a trial component name to limit the list to the trials that
associated with that trial component. The list can be filtered to show only
trials that were created in a specific time range. The list can be sorted by
trial name or creation time.
"""
def list_trials(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListTrials", input, options)
end
@doc """
Lists user profiles.
"""
def list_user_profiles(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListUserProfiles", input, options)
end
@doc """
Use this operation to list all private and vendor workforces in an AWS Region.
Note that you can only have one private workforce per AWS Region.
"""
def list_workforces(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListWorkforces", input, options)
end
@doc """
Gets a list of private work teams that you have defined in a region.
The list may be empty if no work team satisfies the filter specified in the
`NameContains` parameter.
"""
def list_workteams(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListWorkteams", input, options)
end
@doc """
Adds a resouce policy to control access to a model group.
For information about resoure policies, see [Identity-based policies and resource-based
policies](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_identity-vs-resource.html)
in the *AWS Identity and Access Management User Guide.*.
"""
def put_model_package_group_policy(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "PutModelPackageGroupPolicy", input, options)
end
@doc """
Register devices.
"""
def register_devices(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "RegisterDevices", input, options)
end
@doc """
Renders the UI template so that you can preview the worker's experience.
"""
def render_ui_template(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "RenderUiTemplate", input, options)
end
@doc """
Finds Amazon SageMaker resources that match a search query.
Matching resources are returned as a list of `SearchRecord` objects in the
response. You can sort the search results by any resource property in a
ascending or descending order.
You can query against the following value types: numeric, text, Boolean, and
timestamp.
"""
def search(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "Search", input, options)
end
@doc """
Starts a previously stopped monitoring schedule.
By default, when you successfully create a new schedule, the status of a
monitoring schedule is `scheduled`.
"""
def start_monitoring_schedule(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "StartMonitoringSchedule", input, options)
end
@doc """
Launches an ML compute instance with the latest version of the libraries and
attaches your ML storage volume.
After configuring the notebook instance, Amazon SageMaker sets the notebook
instance status to `InService`. A notebook instance's status must be `InService`
before you can connect to your Jupyter notebook.
"""
def start_notebook_instance(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "StartNotebookInstance", input, options)
end
@doc """
Starts a pipeline execution.
"""
def start_pipeline_execution(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "StartPipelineExecution", input, options)
end
@doc """
A method for forcing the termination of a running job.
"""
def stop_auto_ml_job(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "StopAutoMLJob", input, options)
end
@doc """
Stops a model compilation job.
To stop a job, Amazon SageMaker sends the algorithm the SIGTERM signal. This
gracefully shuts the job down. If the job hasn't stopped, it sends the SIGKILL
signal.
When it receives a `StopCompilationJob` request, Amazon SageMaker changes the
`CompilationJobSummary$CompilationJobStatus` of the job to `Stopping`. After
Amazon SageMaker stops the job, it sets the
`CompilationJobSummary$CompilationJobStatus` to `Stopped`.
"""
def stop_compilation_job(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "StopCompilationJob", input, options)
end
@doc """
Request to stop an edge packaging job.
"""
def stop_edge_packaging_job(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "StopEdgePackagingJob", input, options)
end
@doc """
Stops a running hyperparameter tuning job and all running training jobs that the
tuning job launched.
All model artifacts output from the training jobs are stored in Amazon Simple
Storage Service (Amazon S3). All data that the training jobs write to Amazon
CloudWatch Logs are still available in CloudWatch. After the tuning job moves to
the `Stopped` state, it releases all reserved resources for the tuning job.
"""
def stop_hyper_parameter_tuning_job(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "StopHyperParameterTuningJob", input, options)
end
@doc """
Stops a running labeling job.
A job that is stopped cannot be restarted. Any results obtained before the job
is stopped are placed in the Amazon S3 output bucket.
"""
def stop_labeling_job(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "StopLabelingJob", input, options)
end
@doc """
Stops a previously started monitoring schedule.
"""
def stop_monitoring_schedule(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "StopMonitoringSchedule", input, options)
end
@doc """
Terminates the ML compute instance.
Before terminating the instance, Amazon SageMaker disconnects the ML storage
volume from it. Amazon SageMaker preserves the ML storage volume. Amazon
SageMaker stops charging you for the ML compute instance when you call
`StopNotebookInstance`.
To access data on the ML storage volume for a notebook instance that has been
terminated, call the `StartNotebookInstance` API. `StartNotebookInstance`
launches another ML compute instance, configures it, and attaches the preserved
ML storage volume so you can continue your work.
"""
def stop_notebook_instance(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "StopNotebookInstance", input, options)
end
@doc """
Stops a pipeline execution.
"""
def stop_pipeline_execution(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "StopPipelineExecution", input, options)
end
@doc """
Stops a processing job.
"""
def stop_processing_job(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "StopProcessingJob", input, options)
end
@doc """
Stops a training job.
To stop a job, Amazon SageMaker sends the algorithm the `SIGTERM` signal, which
delays job termination for 120 seconds. Algorithms might use this 120-second
window to save the model artifacts, so the results of the training is not lost.
When it receives a `StopTrainingJob` request, Amazon SageMaker changes the
status of the job to `Stopping`. After Amazon SageMaker stops the job, it sets
the status to `Stopped`.
"""
def stop_training_job(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "StopTrainingJob", input, options)
end
@doc """
Stops a transform job.
When Amazon SageMaker receives a `StopTransformJob` request, the status of the
job changes to `Stopping`. After Amazon SageMaker stops the job, the status is
set to `Stopped`. When you stop a transform job before it is completed, Amazon
SageMaker doesn't store the job's output in Amazon S3.
"""
def stop_transform_job(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "StopTransformJob", input, options)
end
@doc """
Updates an action.
"""
def update_action(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateAction", input, options)
end
@doc """
Updates the properties of an AppImageConfig.
"""
def update_app_image_config(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateAppImageConfig", input, options)
end
@doc """
Updates an artifact.
"""
def update_artifact(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateArtifact", input, options)
end
@doc """
Updates the specified Git repository with the specified values.
"""
def update_code_repository(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateCodeRepository", input, options)
end
@doc """
Updates a context.
"""
def update_context(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateContext", input, options)
end
@doc """
Updates a fleet of devices.
"""
def update_device_fleet(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateDeviceFleet", input, options)
end
@doc """
Updates one or more devices in a fleet.
"""
def update_devices(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateDevices", input, options)
end
@doc """
Updates the default settings for new user profiles in the domain.
"""
def update_domain(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateDomain", input, options)
end
@doc """
Deploys the new `EndpointConfig` specified in the request, switches to using
newly created endpoint, and then deletes resources provisioned for the endpoint
using the previous `EndpointConfig` (there is no availability loss).
When Amazon SageMaker receives the request, it sets the endpoint status to
`Updating`. After updating the endpoint, it sets the status to `InService`. To
check the status of an endpoint, use the `DescribeEndpoint` API.
You must not delete an `EndpointConfig` in use by an endpoint that is live or
while the `UpdateEndpoint` or `CreateEndpoint` operations are being performed on
the endpoint. To update an endpoint, you must create a new `EndpointConfig`.
If you delete the `EndpointConfig` of an endpoint that is active or being
created or updated you may lose visibility into the instance type the endpoint
is using. The endpoint must be deleted in order to stop incurring charges.
"""
def update_endpoint(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateEndpoint", input, options)
end
@doc """
Updates variant weight of one or more variants associated with an existing
endpoint, or capacity of one variant associated with an existing endpoint.
When it receives the request, Amazon SageMaker sets the endpoint status to
`Updating`. After updating the endpoint, it sets the status to `InService`. To
check the status of an endpoint, use the `DescribeEndpoint` API.
"""
def update_endpoint_weights_and_capacities(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateEndpointWeightsAndCapacities", input, options)
end
@doc """
Adds, updates, or removes the description of an experiment.
Updates the display name of an experiment.
"""
def update_experiment(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateExperiment", input, options)
end
@doc """
Updates the properties of a SageMaker image.
To change the image's tags, use the `AddTags` and `DeleteTags` APIs.
"""
def update_image(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateImage", input, options)
end
@doc """
Updates a versioned model.
"""
def update_model_package(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateModelPackage", input, options)
end
@doc """
Updates a previously created schedule.
"""
def update_monitoring_schedule(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateMonitoringSchedule", input, options)
end
@doc """
Updates a notebook instance.
NotebookInstance updates include upgrading or downgrading the ML compute
instance used for your notebook instance to accommodate changes in your workload
requirements.
"""
def update_notebook_instance(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateNotebookInstance", input, options)
end
@doc """
Updates a notebook instance lifecycle configuration created with the
`CreateNotebookInstanceLifecycleConfig` API.
"""
def update_notebook_instance_lifecycle_config(%Client{} = client, input, options \\ []) do
Request.request_post(
client,
metadata(),
"UpdateNotebookInstanceLifecycleConfig",
input,
options
)
end
@doc """
Updates a pipeline.
"""
def update_pipeline(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdatePipeline", input, options)
end
@doc """
Updates a pipeline execution.
"""
def update_pipeline_execution(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdatePipelineExecution", input, options)
end
@doc """
Update a model training job to request a new Debugger profiling configuration.
"""
def update_training_job(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateTrainingJob", input, options)
end
@doc """
Updates the display name of a trial.
"""
def update_trial(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateTrial", input, options)
end
@doc """
Updates one or more properties of a trial component.
"""
def update_trial_component(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateTrialComponent", input, options)
end
@doc """
Updates a user profile.
"""
def update_user_profile(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateUserProfile", input, options)
end
@doc """
Use this operation to update your workforce.
You can use this operation to require that workers use specific IP addresses to
work on tasks and to update your OpenID Connect (OIDC) Identity Provider (IdP)
workforce configuration.
Use `SourceIpConfig` to restrict worker access to tasks to a specific range of
IP addresses. You specify allowed IP addresses by creating a list of up to ten
[CIDRs](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html). By
default, a workforce isn't restricted to specific IP addresses. If you specify a
range of IP addresses, workers who attempt to access tasks using any IP address
outside the specified range are denied and get a `Not Found` error message on
the worker portal.
Use `OidcConfig` to update the configuration of a workforce created using your
own OIDC IdP.
You can only update your OIDC IdP configuration when there are no work teams
associated with your workforce. You can delete work teams using the operation.
After restricting access to a range of IP addresses or updating your OIDC IdP
configuration with this operation, you can view details about your update
workforce using the operation.
This operation only applies to private workforces.
"""
def update_workforce(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateWorkforce", input, options)
end
@doc """
Updates an existing work team with new member definitions or description.
"""
def update_workteam(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateWorkteam", input, options)
end
end
|
lib/aws/generated/sage_maker.ex
| 0.901977
| 0.461563
|
sage_maker.ex
|
starcoder
|
defmodule Phoenix.Socket.Transport do
@moduledoc """
Outlines the Socket <-> Transport communication.
This module specifies a behaviour that all sockets must implement.
`Phoenix.Socket` is just one possible implementation of a socket
that multiplexes events over multiple channels. Developers can
implement their own sockets as long as they implement the behaviour
outlined here.
Developers interested in implementing custom transports must invoke
the socket API defined in this module. This module also provides
many conveniences that invokes the underlying socket API to make
it easier to build custom transports.
## Booting sockets
Whenever your endpoint starts, it will automatically invoke the
`child_spec/1` on each listed socket and start that specification
under the endpoint supervisor.
Since the socket supervision tree is started by the endpoint,
any custom transport must be started after the endpoint in a
supervision tree.
## Operating sockets
Sockets are operated by a transport. When a transport is defined,
it usually receives a socket module and the module will be invoked
when certain events happen at the transport level.
Whenever the transport receives a new connection, it should invoke
the `c:connect/1` callback with a map of metadata. Different sockets
may require different metadatas.
If the connection is accepted, the transport can move the connection
to another process, if so desires, or keep using the same process. The
process responsible for managing the socket should then call `c:init/1`.
For each message received from the client, the transport must call
`c:handle_in/2` on the socket. For each informational message the
transport receives, it should call `c:handle_info/2` on the socket.
Transports can optionally implement `c:handle_control/2` for handling
control frames such as `:ping` and `:pong`.
On termination, `c:terminate/2` must be called. A special atom with
reason `:closed` can be used to specify that the client terminated
the connection.
## Example
Here is a simple echo socket implementation:
defmodule EchoSocket do
@behaviour Phoenix.Socket.Transport
def child_spec(opts) do
# We won't spawn any process, so let's return a dummy task
%{id: __MODULE__, start: {Task, :start_link, [fn -> :ok end]}, restart: :transient}
end
def connect(state) do
# Callback to retrieve relevant data from the connection.
# The map contains options, params, transport and endpoint keys.
{:ok, state}
end
def init(state) do
# Now we are effectively inside the process that maintains the socket.
{:ok, state}
end
def handle_in({text, _opts}, state) do
{:reply, :ok, {:text, text}, state}
end
def handle_info(_, state) do
{:ok, state}
end
def terminate(_reason, _state) do
:ok
end
end
It can be mounted in your endpoint like any other socket:
socket "/socket", EchoSocket, websocket: true, longpoll: true
You can now interact with the socket under `/socket/websocket`
and `/socket/longpoll`.
## Security
This module also provides functions to enable a secure environment
on transports that, at some point, have access to a `Plug.Conn`.
The functionality provided by this module helps in performing "origin"
header checks and ensuring only SSL connections are allowed.
"""
@type state :: term()
@doc """
Returns a child specification for socket management.
This is invoked only once per socket regardless of
the number of transports and should be responsible
for setting up any process structure used exclusively
by the socket regardless of transports.
Each socket connection is started by the transport
and the process that controls the socket likely
belongs to the transport. However, some sockets spawn
new processes, such as `Phoenix.Socket` which spawns
channels, and this gives the ability to start a
supervision tree associated to the socket.
It receives the socket options from the endpoint,
for example:
socket "/my_app", MyApp.Socket, shutdown: 5000
means `child_spec([shutdown: 5000])` will be invoked.
"""
@callback child_spec(keyword) :: :supervisor.child_spec
@doc """
Connects to the socket.
The transport passes a map of metadata and the socket
returns `{:ok, state}` or `:error`. The state must be
stored by the transport and returned in all future
operations.
This function is used for authorization purposes and it
may be invoked outside of the process that effectively
runs the socket.
In the default `Phoenix.Socket` implementation, the
metadata expects the following keys:
* `:endpoint` - the application endpoint
* `:transport` - the transport name
* `:params` - the connection parameters
* `:options` - a keyword list of transport options, often
given by developers when configuring the transport.
It must include a `:serializer` field with the list of
serializers and their requirements
"""
@callback connect(transport_info :: map) :: {:ok, state} | :error
@doc """
Initializes the socket state.
This must be executed from the process that will effectively
operate the socket.
"""
@callback init(state) :: {:ok, state}
@doc """
Handles incoming socket messages.
The message is represented as `{payload, options}`. It must
return one of:
* `{:ok, state}` - continues the socket with no reply
* `{:reply, status, reply, state}` - continues the socket with reply
* `{:stop, reason, state}` - stops the socket
The `reply` is a tuple contain an `opcode` atom and a message that can
be any term. The built-in websocket transport supports both `:text` and
`:binary` opcode and the message must be always iodata. Long polling only
supports text opcode.
"""
@callback handle_in({message :: term, opts :: keyword}, state) ::
{:ok, state}
| {:reply, :ok | :error, {opcode :: atom, message :: term}, state}
| {:stop, reason :: term, state}
@doc """
Handles incoming control frames.
The message is represented as `{payload, options}`. It must
return one of:
* `{:ok, state}` - continues the socket with no reply
* `{:reply, status, reply, state}` - continues the socket with reply
* `{:stop, reason, state}` - stops the socket
Control frames only supported when using websockets.
The `options` contains an `opcode` key, this will be either `:ping` or
`:pong`.
If a control frame doesn't have a payload, then the payload value
will be `nil`.
"""
@callback handle_control({message :: term, opts :: keyword}, state) ::
{:ok, state}
| {:reply, :ok | :error, {opcode :: atom, message :: term}, state}
| {:stop, reason :: term, state}
@doc """
Handles info messages.
The message is a term. It must return one of:
* `{:ok, state}` - continues the socket with no reply
* `{:push, reply, state}` - continues the socket with reply
* `{:stop, reason, state}` - stops the socket
The `reply` is a tuple contain an `opcode` atom and a message that can
be any term. The built-in websocket transport supports both `:text` and
`:binary` opcode and the message must be always iodata. Long polling only
supports text opcode.
"""
@callback handle_info(message :: term, state) ::
{:ok, state}
| {:push, {opcode :: atom, message :: term}, state}
| {:stop, reason :: term, state}
@doc """
Invoked on termination.
If `reason` is `:closed`, it means the client closed the socket. This is
considered a `:normal` exit signal, so linked process will not automatically
exit. See `Process.exit/2` for more details on exit signals.
"""
@callback terminate(reason :: term, state) :: :ok
@optional_callbacks handle_control: 2
require Logger
@doc false
def load_config(true, module),
do: module.default_config()
def load_config(config, module),
do: module.default_config() |> Keyword.merge(config) |> load_config()
@doc false
def load_config(config) do
{connect_info, config} = Keyword.pop(config, :connect_info, [])
connect_info =
Enum.map(connect_info, fn
key when key in [:peer_data, :trace_context_headers, :uri, :user_agent, :x_headers] ->
key
{:session, session} ->
{:session, init_session(session)}
{_, _} = pair ->
pair
other ->
raise ArgumentError,
":connect_info keys are expected to be one of :peer_data, :trace_context_headers, :x_headers, :uri, or {:session, config}, " <>
"optionally followed by custom keyword pairs, got: #{inspect(other)}"
end)
[connect_info: connect_info] ++ config
end
defp init_session(session_config) when is_list(session_config) do
key = Keyword.fetch!(session_config, :key)
store = Plug.Session.Store.get(Keyword.fetch!(session_config, :store))
init = store.init(Keyword.drop(session_config, [:store, :key]))
{key, store, init}
end
defp init_session({_, _, _} = mfa) do
{:mfa, mfa}
end
@doc """
Runs the code reloader if enabled.
"""
def code_reload(conn, endpoint, opts) do
reload? = Keyword.get(opts, :code_reloader, endpoint.config(:code_reloader))
reload? && Phoenix.CodeReloader.reload(endpoint)
conn
end
@doc """
Forces SSL in the socket connection.
Uses the endpoint configuration to decide so. It is a
noop if the connection has been halted.
"""
def force_ssl(%{halted: true} = conn, _socket, _endpoint, _opts) do
conn
end
def force_ssl(conn, socket, endpoint, opts) do
if force_ssl = force_ssl_config(socket, endpoint, opts) do
Plug.SSL.call(conn, force_ssl)
else
conn
end
end
defp force_ssl_config(socket, endpoint, opts) do
Phoenix.Config.cache(endpoint, {:force_ssl, socket}, fn _ ->
opts =
if force_ssl = Keyword.get(opts, :force_ssl, endpoint.config(:force_ssl)) do
force_ssl
|> Keyword.put_new(:host, {endpoint, :host, []})
|> Plug.SSL.init()
end
{:cache, opts}
end)
end
@doc """
Logs the transport request.
Available for transports that generate a connection.
"""
def transport_log(conn, level) do
if level do
Plug.Logger.call(conn, Plug.Logger.init(log: level))
else
conn
end
end
@doc """
Checks the origin request header against the list of allowed origins.
Should be called by transports before connecting when appropriate.
If the origin header matches the allowed origins, no origin header was
sent or no origin was configured, it will return the given connection.
Otherwise a 403 Forbidden response will be sent and the connection halted.
It is a noop if the connection has been halted.
"""
def check_origin(conn, handler, endpoint, opts, sender \\ &Plug.Conn.send_resp/1)
def check_origin(%Plug.Conn{halted: true} = conn, _handler, _endpoint, _opts, _sender),
do: conn
def check_origin(conn, handler, endpoint, opts, sender) do
import Plug.Conn
origin = conn |> get_req_header("origin") |> List.first()
check_origin = check_origin_config(handler, endpoint, opts)
cond do
is_nil(origin) or check_origin == false ->
conn
origin_allowed?(check_origin, URI.parse(origin), endpoint, conn) ->
conn
true ->
Logger.error """
Could not check origin for Phoenix.Socket transport.
Origin of the request: #{origin}
This happens when you are attempting a socket connection to
a different host than the one configured in your config/
files. For example, in development the host is configured
to "localhost" but you may be trying to access it from
"127.0.0.1". To fix this issue, you may either:
1. update [url: [host: ...]] to your actual host in the
config file for your current environment (recommended)
2. pass the :check_origin option when configuring your
endpoint or when configuring the transport in your
UserSocket module, explicitly outlining which origins
are allowed:
check_origin: ["https://example.com",
"//another.com:888", "//other.com"]
"""
resp(conn, :forbidden, "")
|> sender.()
|> halt()
end
end
@doc """
Checks the Websocket subprotocols request header against the allowed subprotocols.
Should be called by transports before connecting when appropriate.
If the sec-websocket-protocol header matches the allowed subprotocols,
it will put sec-websocket-protocol response header and return the given connection.
If no sec-websocket-protocol header was sent it will return the given connection.
Otherwise a 403 Forbidden response will be sent and the connection halted.
It is a noop if the connection has been halted.
"""
def check_subprotocols(conn, subprotocols)
def check_subprotocols(%Plug.Conn{halted: true} = conn, _subprotocols), do: conn
def check_subprotocols(conn, nil), do: conn
def check_subprotocols(conn, subprotocols) when is_list(subprotocols) do
case Plug.Conn.get_req_header(conn, "sec-websocket-protocol") do
[] ->
conn
[subprotocols_header | _] ->
request_subprotocols = subprotocols_header |> Plug.Conn.Utils.list()
subprotocol = Enum.find(subprotocols, fn elem -> Enum.find(request_subprotocols, &(&1 == elem)) end)
if subprotocol do
Plug.Conn.put_resp_header(conn, "sec-websocket-protocol", subprotocol)
else
subprotocols_error_response(conn, subprotocols)
end
end
end
def check_subprotocols(conn, subprotocols), do: subprotocols_error_response(conn, subprotocols)
@doc """
Extracts connection information from `conn` and returns a map.
Keys are retrieved from the optional transport option `:connect_info`.
This functionality is transport specific. Please refer to your transports'
documentation for more information.
The supported keys are:
* `:peer_data` - the result of `Plug.Conn.get_peer_data/1`
* `:trace_context_headers` - a list of all trace context headers
* `:x_headers` - a list of all request headers that have an "x-" prefix
* `:uri` - a `%URI{}` derived from the conn
* `:user_agent` - the value of the "user-agent" request header
"""
def connect_info(conn, endpoint, keys) do
for key <- keys, into: %{} do
case key do
:peer_data ->
{:peer_data, Plug.Conn.get_peer_data(conn)}
:trace_context_headers ->
{:trace_context_headers, fetch_trace_context_headers(conn)}
:x_headers ->
{:x_headers, fetch_x_headers(conn)}
:uri ->
{:uri, fetch_uri(conn)}
:user_agent ->
{:user_agent, fetch_user_agent(conn)}
{:session, session} ->
{:session, connect_session(conn, endpoint, session)}
{key, val} ->
{key, val}
end
end
end
defp connect_session(conn, endpoint, {key, store, store_config}) do
conn = Plug.Conn.fetch_cookies(conn)
with csrf_token when is_binary(csrf_token) <- conn.params["_csrf_token"],
cookie when is_binary(cookie) <- conn.cookies[key],
conn = put_in(conn.secret_key_base, endpoint.config(:secret_key_base)),
{_, session} <- store.get(conn, cookie, store_config),
csrf_state when is_binary(csrf_state) <- Plug.CSRFProtection.dump_state_from_session(session["_csrf_token"]),
true <- Plug.CSRFProtection.valid_state_and_csrf_token?(csrf_state, csrf_token) do
session
else
_ -> nil
end
end
defp connect_session(conn, endpoint, {:mfa, {module, function, args}}) do
case apply(module, function, args) do
session_config when is_list(session_config) ->
connect_session(conn, endpoint, init_session(session_config))
other ->
raise ArgumentError,
"the MFA given to `session_config` must return a keyword list, got: #{inspect other}"
end
end
defp subprotocols_error_response(conn, subprotocols) do
import Plug.Conn
request_headers = get_req_header(conn, "sec-websocket-protocol")
Logger.error """
Could not check Websocket subprotocols for Phoenix.Socket transport.
Subprotocols of the request: #{inspect(request_headers)}
Configured supported subprotocols: #{inspect(subprotocols)}
This happens when you are attempting a socket connection to
a different subprotocols than the one configured in your endpoint
or when you incorrectly configured supported subprotocols.
To fix this issue, you may either:
1. update websocket: [subprotocols: [..]] to your actual subprotocols
in your endpoint socket configuration.
2. check the correctness of the `sec-websocket-protocol` request header
sent from the client.
3. remove `websocket` option from your endpoint socket configuration
if you don't use Websocket subprotocols.
"""
resp(conn, :forbidden, "")
|> send_resp()
|> halt()
end
defp fetch_x_headers(conn) do
for {header, _} = pair <- conn.req_headers,
String.starts_with?(header, "x-"),
do: pair
end
defp fetch_trace_context_headers(conn) do
for {header, _} = pair <- conn.req_headers,
header in ["traceparent", "tracestate"],
do: pair
end
defp fetch_uri(conn) do
%URI{
scheme: to_string(conn.scheme),
query: conn.query_string,
port: conn.port,
host: conn.host,
authority: conn.host,
path: conn.request_path
}
end
defp fetch_user_agent(conn) do
with {_, value} <- List.keyfind(conn.req_headers, "user-agent", 0) do
value
end
end
defp check_origin_config(handler, endpoint, opts) do
Phoenix.Config.cache(endpoint, {:check_origin, handler}, fn _ ->
check_origin =
case Keyword.get(opts, :check_origin, endpoint.config(:check_origin)) do
origins when is_list(origins) ->
Enum.map(origins, &parse_origin/1)
boolean when is_boolean(boolean) ->
boolean
{module, function, arguments} ->
{module, function, arguments}
:conn ->
:conn
invalid ->
raise ArgumentError, ":check_origin expects a boolean, list of hosts, :conn, or MFA tuple, got: #{inspect(invalid)}"
end
{:cache, check_origin}
end)
end
defp parse_origin(origin) do
case URI.parse(origin) do
%{host: nil} ->
raise ArgumentError,
"invalid :check_origin option: #{inspect origin}. " <>
"Expected an origin with a host that is parsable by URI.parse/1. For example: " <>
"[\"https://example.com\", \"//another.com:888\", \"//other.com\"]"
%{scheme: scheme, port: port, host: host} ->
{scheme, host, port}
end
end
defp origin_allowed?({module, function, arguments}, uri, _endpoint, _conn),
do: apply(module, function, [uri | arguments])
defp origin_allowed?(:conn, uri, _endpoint, %Plug.Conn{} = conn),
do: uri.host == conn.host and uri.scheme == conn.scheme and uri.port == conn.port
defp origin_allowed?(_check_origin, %{host: nil}, _endpoint, _conn),
do: false
defp origin_allowed?(true, uri, endpoint, _conn),
do: compare?(uri.host, host_to_binary(endpoint.config(:url)[:host]))
defp origin_allowed?(check_origin, uri, _endpoint, _conn) when is_list(check_origin),
do: origin_allowed?(uri, check_origin)
defp origin_allowed?(uri, allowed_origins) do
%{scheme: origin_scheme, host: origin_host, port: origin_port} = uri
Enum.any?(allowed_origins, fn {allowed_scheme, allowed_host, allowed_port} ->
compare?(origin_scheme, allowed_scheme) and
compare?(origin_port, allowed_port) and
compare_host?(origin_host, allowed_host)
end)
end
defp compare?(request_val, allowed_val) do
is_nil(allowed_val) or request_val == allowed_val
end
defp compare_host?(_request_host, nil),
do: true
defp compare_host?(request_host, "*." <> allowed_host),
do: String.ends_with?(request_host, allowed_host)
defp compare_host?(request_host, allowed_host),
do: request_host == allowed_host
# TODO: Deprecate {:system, env_var} once we require Elixir v1.9+
defp host_to_binary({:system, env_var}), do: host_to_binary(System.get_env(env_var))
defp host_to_binary(host), do: host
end
|
lib/phoenix/socket/transport.ex
| 0.912185
| 0.566498
|
transport.ex
|
starcoder
|
defmodule ExZample do
@moduledoc """
ExZample is a factory library based on Elixir behaviours.
"""
alias ExZample.{Sequence, SequenceSupervisor}
import ExZample.Since
@doc """
Invoked every time you build your data using `ExZample` module.
You need to return a struct with example values.
This callback is optional when the module given is a struct. It will use
the struct default values if no callback is given.
"""
since("0.1.0")
@callback example() :: struct
@doc """
Same as `c:example/0`, but here you have the full control in how will build
your struct given the attributes.
The keyword list given in functions like `build/2` are transformed in map
for your convenience and you need to return a struct.
You can have two scenarios when using this callback:
1. If you define `example/0` and `example/1` in same factory, `example/0` will
be prefered when you use `build/1`. The `example/1` will preferend if you
use with `build/2`.
2. If you only implement `example/1` and use `build/1`, your callback will
invoked with an empty map.
This callback is optional.
"""
since("0.5.0")
@callback example(attrs :: map) :: struct
@doc """
Invoked every time you insert your data using `ExZample` module.
You need to return the Ecto `Repo` module that ExZample should use
to insert records in database
This callback is optional if the goal is to use only in memory.
"""
since("0.10.0")
@callback ecto_repo :: module
@type factory :: module
@type sequence_fun :: (pos_integer -> term)
@optional_callbacks example: 0, example: 1, ecto_repo: 0
defguardp is_greater_than_0(term) when is_integer(term) and term > 0
@doc false
since("0.3.0")
@deprecated "Use config_aliases/1 instead"
def add_aliases(aliases), do: config_aliases(aliases)
@doc false
since("0.3.0")
@deprecated "Use config_aliases/2 instead"
def add_aliases(scope, aliases), do: config_aliases(scope, aliases)
@doc """
Creates aliases for your factories to simplify the build calls.
A `aliases` should be a map with atom keys and values as `factory` compatible
modules. If you call with repeated keys this function will fail. This function
is ideal to be called once, for example in your `test_helper.ex` file.
## Examples
iex> ExZample.config_aliases(%{user: UserFactory})
...> ExZample.build(:user)
%User{age: 21, email: "<EMAIL>", first_name: "<NAME>", id: 1, last_name: "<NAME>"}
"""
since("0.4.0")
@spec config_aliases(%{required(atom) => factory}) :: :ok
def config_aliases(aliases) when is_map(aliases), do: config_aliases(:global, aliases)
@doc """
Same as `config_aliases/1`, but you can define a different scope.
This function is specially useful for umbrella apps where each app can define
their factories without leaking any aliases to other apps. You can enforce the
current scope with `ex_zample/1`.
## Examples
iex> ExZample.config_aliases(:my_app, %{user: UserFactory})
...> ExZample.ex_zample(%{ex_zample_scope: :my_app})
...> ExZample.build(:user)
%User{age: 21, email: "<EMAIL>", first_name: "<NAME>", id: 1, last_name: "<NAME>"}
"""
since("0.4.0")
@spec config_aliases(atom, %{required(atom) => factory}) :: :ok
def config_aliases(scope, aliases) when is_map(aliases) do
config = get_config(scope)
current_aliases = Map.get(config, :aliases, %{})
updated_aliases =
Map.merge(current_aliases, aliases, fn factory_alias, current_factory, new_factory ->
if current_factory == new_factory do
current_factory
else
raise ArgumentError, """
The alias #{inspect(factory_alias)} already exists!
It is registered with the factory #{inspect(current_factory)} and
can't be replaced with the new #{inspect(new_factory)} in #{inspect(scope)} scope.
Rename the alias or add it in a different scope.
"""
end
end)
put_config(scope, Map.put(config, :aliases, updated_aliases))
:ok
end
@doc """
Creates a sequence with the given `name`.
A sequence is global runtime counter that can be invoked with `sequence/1`. The
default counter starts from `1` and increments `1` by `1`.
## Examples
iex> ExZample.create_sequence(:customer_id)
...> Enum.map(1..10, fn _ -> ExZample.sequence(:customer_id) end)
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
"""
since("0.4.0")
@spec create_sequence(atom) :: :ok
def create_sequence(name), do: create_sequence(:global, name, & &1)
@doc """
Same as `create_sequence/1`, but you can define a different scope or a
sequence function.
When use with `scope` and `name` you define scoped global counter, it's
useful for umbrella apps for example.
When you use `name` and `sequence_fun`, the given function will receive the
counter and then you can transform in anything you want.
## Examples
iex> ExZample.create_sequence(:my_app, :customer_id)
...> ExZample.ex_zample(%{ex_zample_scope: :my_app})
...> ExZample.sequence(:customer_id)
1
iex> ExZample.create_sequence(:customer_id, &("customer_" <> to_string(&1)))
...> Enum.map(1..3, fn _ -> ExZample.sequence(:customer_id) end)
["customer_1", "customer_2", "customer_3"]
"""
since("0.4.0")
@spec create_sequence(scope_or_name :: atom, sequence_fun_or_name :: sequence_fun | atom) :: :ok
def create_sequence(scope_or_name, sequence_fun_or_name)
def create_sequence(name, sequence_fun)
when is_atom(name) and is_function(sequence_fun, 1),
do: create_sequence(:global, name, sequence_fun)
def create_sequence(scope, name)
when is_atom(scope) and is_atom(name),
do: create_sequence(scope, name, & &1)
@doc """
Same as `create_sequence/1`, but you can define a different scope and a
sequence function.
The `scope` is where where your global counter will lives, useful for umbrella
apps for example. The given `sequence_fun` will receive the counter and then
you can transform in anything you want.
## Examples
iex> ExZample.create_sequence(:my_app, :customer_id, &("customer_" <> to_string(&1)))
...> ExZample.ex_zample(%{ex_zample_scope: :my_app})
...> Enum.map(1..3, fn _ -> ExZample.sequence(:customer_id) end)
["customer_1", "customer_2", "customer_3"]
"""
since("0.4.0")
@spec create_sequence(atom, atom, sequence_fun) :: :ok
def create_sequence(scope, name, sequence_fun)
when is_atom(scope) and is_atom(name) and is_function(sequence_fun, 1) do
params = %{sequence_name: sequence_name(scope, name), sequence_fun: sequence_fun}
case DynamicSupervisor.start_child(SequenceSupervisor, {Sequence, params}) do
{:ok, _} ->
:ok
{:error, {:already_started, _}} ->
raise ArgumentError, """
The sequence #{inspect(name)} in #{inspect(scope)} scope already exists!
Rename the sequence or add it in a different scope.
"""
end
catch
:exit, {:noproc, _} ->
raise ArgumentError, """
Looks like :ex_zample application wasn't started.
Make sure you have started it in your `test_helper.exs`:
:ok = Application.ensure_started(:ex_zample)
"""
end
@doc """
Builds a struct with given `factory_or_alias` module.
If the given factory exports the `c:example/0` function it will use to return
the struct and its values. Otherwise, if the module is a struct it will use
its default values.
If will override the generated data with the given `attrs`.
## Examples
iex> ExZample.build(User)
%ExZample.User{}
iex> ExZample.build(UserFactory)
%ExZample.User{age: 21, email: "<EMAIL>", first_name: "<NAME>", id: 1, last_name: "<NAME>"}
iex> ExZample.build(:book)
%ExZample.Book{code: "1321", title: "The Book's Title"}
iex> ExZample.build(User, age: 45)
%ExZample.User{age: 45}
iex> ExZample.build(UserFactory, age: 45)
%ExZample.User{age: 45, email: "<EMAIL>", first_name: "<NAME>", id: 1, last_name: "<NAME>"}
iex> ExZample.build(:book, code: "007")
%ExZample.Book{code: "007", title: "The Book's Title"}
"""
since("0.1.0")
@spec build(factory, Enum.t() | nil) :: struct
def build(factory_or_alias, attrs \\ nil) when is_atom(factory_or_alias),
do: try_factory(factory_or_alias, attrs) || try_alias(factory_or_alias, attrs)
defp try_factory(factory, nil) do
cond do
function_exported?(factory, :example, 0) ->
factory.example()
function_exported?(factory, :example, 1) ->
factory.example(%{})
function_exported?(factory, :__struct__, 1) ->
struct!(factory)
true ->
nil
end
end
defp try_factory(factory, enum) do
cond do
function_exported?(factory, :example, 1) ->
enum
|> Map.new()
|> factory.example()
function_exported?(factory, :example, 0) ->
struct!(factory.example(), enum)
function_exported?(factory, :__struct__, 1) ->
struct!(factory, enum)
true ->
nil
end
end
defp try_alias(factory_alias, attrs) do
scope = lookup_scope()
aliases = get_config(scope)[:aliases] || %{}
if factory = aliases[factory_alias] do
inspected_argument = inspect(factory)
try_factory(factory, attrs) ||
raise ArgumentError,
message: """
#{inspected_argument} is not a factory
You need to create a `example/0` function
"""
else
inspected_argument = inspect(factory_alias)
raise ArgumentError,
message: """
#{inspected_argument} is not a factory in #{inspect(scope)} scope
If #{inspected_argument} is a module, you need to create a `example/0` function
If #{inspected_argument} is a alias, you need to register it with `ExZample.config_aliases/1`
"""
end
end
defp lookup_scope, do: lookup_in_processes_and_set(:ex_zample_scope, :global)
defp lookup_in_processes_and_set(key, default) do
if value = Process.get(key) do
value
else
value =
lookup_in_processes(key, :"$callers") || lookup_in_processes(key, :"$ancestors") ||
default
# NOTE: Faster future lookups
Process.put(key, value)
value
end
end
defp lookup_in_processes(key, process),
do: process |> Process.get() |> List.wrap() |> Enum.find_value(&get_process_key(key, &1))
defp get_process_key(key, name_or_pid) do
pid = if is_atom(name_or_pid), do: Process.whereis(name_or_pid), else: name_or_pid
{:dictionary, dictionary} = Process.info(pid, :dictionary)
dictionary[key]
end
@doc """
Same as `build/2`, but returns a list with where the size is the given
`count`.
## Examples
iex> ExZample.build_list(3, User)
[%ExZample.User{}, %ExZample.User{}, %ExZample.User{}]
iex> ExZample.build_list(3, :book)
[%ExZample.Book{},%ExZample.Book{}, %ExZample.Book{}]
iex> ExZample.build_list(3, User, age: 45)
[%ExZample.User{age: 45}, %ExZample.User{age: 45}, %ExZample.User{age: 45}]
iex> ExZample.build_list(3, :book, code: "007")
[%ExZample.Book{code: "007"},%ExZample.Book{code: "007"}, %ExZample.Book{code: "007"}]
"""
since("0.2.0")
@spec build_list(count :: pos_integer, factory, attrs :: Enum.t() | nil) :: [struct]
def build_list(count, factory, attrs \\ nil)
def build_list(0, _factory, _attrs), do: []
def build_list(count, factory, attrs) when is_greater_than_0(count),
do: Enum.map(1..count, fn _ -> build(factory, attrs) end)
@doc """
Same as `build/2`, but returns a tuple with a pair of structs.
## Examples
iex> ExZample.build_pair(User)
{%ExZample.User{}, %ExZample.User{}}
iex> ExZample.build_pair(:book)
{%ExZample.Book{},%ExZample.Book{}}
iex> ExZample.build_pair(User, age: 45)
{%ExZample.User{age: 45}, %ExZample.User{age: 45}}
iex> ExZample.build_pair(:book, code: "007")
{%ExZample.Book{code: "007"},%ExZample.Book{code: "007"}}
"""
since("0.2.0")
@spec build_pair(factory, attrs :: Enum.t() | nil) :: {struct, struct}
def build_pair(factory, attrs \\ nil), do: {build(factory, attrs), build(factory, attrs)}
@doc """
Builds a map with given `factory_or_alias` module. Has the same mechanism of
`build/2`.
## Examples
iex> ExZample.map_for(User)
%{age: nil, email: nil, first_name: nil, id: nil, last_name: nil}
iex> ExZample.map_for(UserFactory)
%{age: 21, email: "<EMAIL>", first_name: "<NAME>", id: 1, last_name: "<NAME>"}
iex> ExZample.map_for(:book)
%{code: "1321", title: "The Book's Title"}
iex> ExZample.map_for(User, age: 45)
%{age: 45, email: nil, first_name: nil, id: nil, last_name: nil}
iex> ExZample.map_for(UserFactory, age: 45)
%{age: 45, email: "<EMAIL>", first_name: "<NAME>", id: 1, last_name: "<NAME>"}
iex> ExZample.map_for(:book, code: "007")
%{code: "007", title: "The Book's Title"}
"""
since("0.8.0")
@spec map_for(factory, Enum.t() | nil) :: map
def map_for(factory, attributes \\ nil), do: factory |> build(attributes) |> to_map()
defp to_map(map) when is_map(map),
do: for({k, v} <- Map.from_struct(map), into: %{}, do: {k, to_map(v)})
defp to_map(list) when is_list(list),
do: Enum.map(list, &to_map/1)
defp to_map(item), do: item
@doc """
Same as `map_for/2`, but returns a list with where the size is the given
`count`.
## Examples
iex> ExZample.map_list_for(3, User)
[%{age: nil, email: nil, first_name: nil, id: nil, last_name: nil},
%{age: nil, email: nil, first_name: nil, id: nil, last_name: nil},
%{age: nil, email: nil, first_name: nil, id: nil, last_name: nil}]
iex> ExZample.map_list_for(3, :book)
[%{code: "1321", title: "The Book's Title"},
%{code: "1321", title: "The Book's Title"},
%{code: "1321", title: "The Book's Title"}]
iex> ExZample.map_list_for(3, User, age: 45)
[%{age: 45, email: nil, first_name: nil, id: nil, last_name: nil},
%{age: 45, email: nil, first_name: nil, id: nil, last_name: nil},
%{age: 45, email: nil, first_name: nil, id: nil, last_name: nil}]
iex> ExZample.map_list_for(3, :book, code: "007")
[%{code: "007", title: "The Book's Title"},
%{code: "007", title: "The Book's Title"},
%{code: "007", title: "The Book's Title"}]
"""
since("0.8.0")
@spec map_list_for(count :: pos_integer, factory, attrs :: Enum.t() | nil) :: [struct]
def map_list_for(count, factory, attrs \\ nil)
def map_list_for(0, _factory, _attrs), do: []
def map_list_for(count, factory, attrs) when is_greater_than_0(count),
do: Enum.map(1..count, fn _ -> map_for(factory, attrs) end)
@doc """
Same as `map_for/2`, but returns a tuple with a pair of maps.
## Examples
iex> ExZample.map_pair_for(User)
{%{age: nil, email: nil, first_name: nil, id: nil, last_name: nil},
%{age: nil, email: nil, first_name: nil, id: nil, last_name: nil}}
iex> ExZample.map_pair_for(:book)
{%{code: "1321", title: "The Book's Title"},
%{code: "1321", title: "The Book's Title"}}
iex> ExZample.map_pair_for(User, age: 45)
{%{age: 45, email: nil, first_name: nil, id: nil, last_name: nil},
%{age: 45, email: nil, first_name: nil, id: nil, last_name: nil}}
iex> ExZample.map_pair_for(:book, code: "007")
{%{code: "007", title: "The Book's Title"},
%{code: "007", title: "The Book's Title"}}
"""
since("0.8.0")
@spec map_pair_for(factory, attrs :: Enum.t() | nil) :: {struct, struct}
def map_pair_for(factory, attrs \\ nil), do: {map_for(factory, attrs), map_for(factory, attrs)}
@doc """
Builds a map with string keys given `factory_or_alias` module. Has the same mechanism of
`build/2`. Useful to simulate request parameters in a plug or phoenix
controller.
## Examples
iex> ExZample.params_for(User)
%{"age" => nil, "email" => nil, "first_name" => nil, "id" => nil, "last_name" => nil}
iex> ExZample.params_for(UserFactory)
%{"age" => 21, "email" => "<EMAIL>", "first_name" => "<NAME>", "id" => 1, "last_name" => "<NAME>"}
iex> ExZample.params_for(:book)
%{"code" => "1321", "title" => "The Book's Title"}
iex> ExZample.params_for(User, age: 45)
%{"age" => 45, "email" => nil, "first_name" => nil, "id" => nil, "last_name" => nil}
iex> ExZample.params_for(UserFactory, age: 45)
%{"age" => 45, "email" => "<EMAIL>", "first_name" => "<NAME>", "id" => 1, "last_name" => "<NAME>"}
iex> ExZample.params_for(:book, code: "007")
%{"code" => "007", "title" => "The Book's Title"}
"""
since("0.9.0")
@spec params_for(factory, Enum.t() | nil) :: map
def params_for(factory, attributes \\ nil), do: factory |> build(attributes) |> to_str_map()
defp to_str_map(map) when is_map(map),
do: for({k, v} <- Map.from_struct(map), into: %{}, do: {to_string(k), to_str_map(v)})
defp to_str_map(list) when is_list(list),
do: Enum.map(list, &to_str_map/1)
defp to_str_map(item), do: item
@doc """
Same as `params_for/2`, but returns a list with where the size is the given
`count`.
## Examples
iex> ExZample.params_list_for(3, User)
[%{"age" => nil, "email" => nil, "first_name" => nil, "id" => nil, "last_name" => nil},
%{"age" => nil, "email" => nil, "first_name" => nil, "id" => nil, "last_name" => nil},
%{"age" => nil, "email" => nil, "first_name" => nil, "id" => nil, "last_name" => nil}]
iex> ExZample.params_list_for(3, :book)
[%{"code" => "1321", "title" => "The Book's Title"},
%{"code" => "1321", "title" => "The Book's Title"},
%{"code" => "1321", "title" => "The Book's Title"}]
iex> ExZample.params_list_for(3, User, age: 45)
[%{"age" => 45, "email" => nil, "first_name" => nil, "id" => nil, "last_name" => nil},
%{"age" => 45, "email" => nil, "first_name" => nil, "id" => nil, "last_name" => nil},
%{"age" => 45, "email" => nil, "first_name" => nil, "id" => nil, "last_name" => nil}]
iex> ExZample.params_list_for(3, :book, code: "007")
[%{"code" => "007", "title" => "The Book's Title"},
%{"code" => "007", "title" => "The Book's Title"},
%{"code" => "007", "title" => "The Book's Title"}]
"""
since("0.9.0")
@spec params_list_for(count :: pos_integer, factory, attrs :: Enum.t() | nil) :: [struct]
def params_list_for(count, factory, attrs \\ nil)
def params_list_for(0, _factory, _attrs), do: []
def params_list_for(count, factory, attrs) when is_greater_than_0(count),
do: Enum.map(1..count, fn _ -> params_for(factory, attrs) end)
@doc """
Same as `params_for/2`, but returns a tuple with a pair of maps.
## Examples
iex> ExZample.params_pair_for(User)
{%{"age" => nil, "email" => nil, "first_name" => nil, "id" => nil, "last_name" => nil},
%{"age" => nil, "email" => nil, "first_name" => nil, "id" => nil, "last_name" => nil}}
iex> ExZample.params_pair_for(:book)
{%{"code" => "1321", "title" => "The Book's Title"},
%{"code" => "1321", "title" => "The Book's Title"}}
iex> ExZample.params_pair_for(User, age: 45)
{%{"age" => 45, "email" => nil, "first_name" => nil, "id" => nil, "last_name" => nil},
%{"age" => 45, "email" => nil, "first_name" => nil, "id" => nil, "last_name" => nil}}
iex> ExZample.params_pair_for(:book, code: "007")
{%{"code" => "007", "title" => "The Book's Title"},
%{"code" => "007", "title" => "The Book's Title"}}
"""
since("0.9.0")
@spec params_pair_for(factory, attrs :: Enum.t() | nil) :: {struct, struct}
def params_pair_for(factory, attrs \\ nil),
do: {params_for(factory, attrs), params_for(factory, attrs)}
@doc """
Utiliy function that you can define several settings that `ExZample` will look
for before executing their functions.
## Options
* `:ex_zample_scope`, the scope that ExZample should look up for aliases. If no
scope is defined, `:global` is the default scope.
* `:ex_zample_ecto_repo`, the Ecto repo that ExZample should use to run their insert
functions.
This function works well with `setup/1` callback of `ExUnit` and `@tags`.
For example:
defmodule MyTest do
use ExUnit.Case
import ExZample
@moduletag ex_zample_scope: :my_app
setup :ex_zample
test "returns a user" do
assert %User{} == build(:user)
end
end
In the example above, `ExZample` will look for a factory registered in alias
`:user` in the `:my_app` scope.
"""
since("0.3.0")
@spec ex_zample(map) :: :ok
def ex_zample(settings) when is_map(settings) do
if ex_zample_scope = settings[:ex_zample_scope] do
Process.put(:ex_zample_scope, ex_zample_scope)
else
Process.put(:ex_zample_scope, :global)
end
if ecto_repo = settings[:ex_zample_ecto_repo] do
Process.put(:ex_zample_ecto_repo, ecto_repo)
end
:ok
end
@doc """
Returns the current counter registered in the given sequence `name`.
## Examples
iex> ExZample.create_sequence(:customer_id, &("customer_" <> to_string(&1)))
...> ExZample.sequence(:customer_id)
"customer_1"
"""
since("0.4.0")
@spec sequence(atom) :: term
def sequence(name) when is_atom(name) do
lookup_scope()
|> sequence_name(name)
|> Sequence.next()
catch
:exit, {:noproc, _} ->
scope = lookup_scope()
raise ArgumentError, """
The sequence #{inspect(name)} doesn't exist in the current #{inspect(scope)} scope.
Make sure your created a sequence using `ExZample.create_sequence/1`.
"""
end
@doc """
Same as `sequence/1`, but returns a list of where the number is determined by
the given `count`.
## Examples
iex> ExZample.create_sequence(:customer_id, &("customer_" <> to_string(&1)))
...> ExZample.sequence_list(3, :customer_id)
["customer_1", "customer_2", "customer_3"]
"""
since("0.4.0")
@spec sequence_list(pos_integer, atom) :: [term]
def sequence_list(0, _name), do: []
def sequence_list(count, name) when is_greater_than_0(count),
do: Enum.map(1..count, fn _ -> sequence(name) end)
@doc """
Same as `sequence/1`, but returns a pair of sequence items.
## Examples
iex> ExZample.create_sequence(:customer_id, &("customer_" <> to_string(&1)))
...> ExZample.sequence_pair(:customer_id)
{"customer_1", "customer_2"}
"""
since("0.4.0")
@spec sequence_pair(atom) :: {term, term}
def sequence_pair(name), do: {sequence(name), sequence(name)}
if Code.ensure_loaded?(Ecto.Repo) do
@doc """
Inserts in the repository the example built by the `factory_or_alias` module.
If the given factory exports the `c:repo/0` function it will use it call the
`insert!` function. Beyond that, it works similar as `build/2`.
If will override the generated data with the given `attributes`.
## Options
* `ecto_opts`, when given, it will be forwarded to the second argument of
`Ecto.Repo.insert/2`
## Examples
iex> ExZample.insert(:player)
%ExZample.RPG.Player{}
iex> ExZample.insert(:player, email: "testmail")
%ExZample.RPG.Player{email: "testmail"}
"""
since("0.10.0")
@spec insert(factory, Enum.t() | nil) :: struct()
def insert(factory, attributes \\ nil)
def insert(factory, attributes) when is_list(attributes) do
{opts, attributes} = Keyword.split(attributes, [:ecto_opts])
insert(factory, attributes, opts)
end
def insert(factory, attributes), do: insert(factory, attributes, [])
@doc """
Same as `insert/2`, but the `attributes` and `opts` are explicit
separated.
## Options
* `ecto_opts`, when given, it will be forwarded to the second argument of
`Ecto.Repo.insert/2`
## Examples
iex> ExZample.insert(:player, %{email: "testmail"}, ecto_opts: [prefix: "private"])
%ExZample.RPG.Player{email: "testmail"}
"""
since("0.10.0")
@spec insert(factory, Enum.t() | nil, Keyword.t()) :: struct()
def insert(factory, attributes, opts) do
record = build(factory, attributes)
repo = lookup_in_processes_and_set(:ex_zample_ecto_repo, :not_in_processes)
repo = if repo == :not_in_processes, do: lookup_repo(factory), else: repo
repo.insert!(record, Keyword.get(opts, :ecto_opts, []))
end
defp lookup_repo(factory) do
factory_module =
if function_exported?(factory, :ecto_repo, 0) do
factory
else
scope = lookup_scope()
aliases = get_config(scope)[:aliases]
aliases[factory]
end
factory_module.ecto_repo() ||
raise ArgumentError, "Your #{factory_module}.repo/0 should return an ecto Repo module"
end
@doc """
Same as `insert/2`, but returns a tuple with a pair of structs.
## Examples
iex> ExZample.insert_pair(:character)
{%ExZample.RPG.Character{}, %ExZample.RPG.Character{}}
iex> ExZample.insert_pair(:character, name: "Todd")
{%ExZample.RPG.Character{name: "Todd"}, %ExZample.RPG.Character{name: "Todd"}}
"""
since("0.10.0")
@spec insert_pair(factory, Enum.t() | nil) :: {struct(), struct()}
def insert_pair(factory, attributes \\ nil),
do: {insert(factory, attributes), insert(factory, attributes)}
@doc """
Same as `insert/3`, but returns a tuple with a pair of structs.
## Examples
iex> ExZample.insert_pair(:character, %{name: "Todd"}, ecto_opts: [prefix: "private"])
{%ExZample.RPG.Character{name: "Todd"}, %ExZample.RPG.Character{name: "Todd"}}
"""
since("0.10.0")
@spec insert_pair(factory, Enum.t() | nil, Keyword.t()) :: {struct(), struct()}
def insert_pair(factory, attributes, opts),
do: {insert(factory, attributes, opts), insert(factory, attributes, opts)}
@doc """
Same as `insert/2`, but returns a list with where the size is the given
`count`.
## Examples
iex> ExZample.insert_list(3, :character)
[%ExZample.RPG.Character{}, %ExZample.RPG.Character{}, %ExZample.RPG.Character{}]
iex> ExZample.insert_list(3, :character, name: "Todd")
[%ExZample.RPG.Character{name: "Todd"}, %ExZample.RPG.Character{name: "Todd"}, %ExZample.RPG.Character{name: "Todd"}]
"""
since("0.10.0")
@spec insert_list(pos_integer, factory, Enum.t() | nil) :: [struct()]
def insert_list(count, factory, attributes \\ nil)
def insert_list(0, _factory, _attributes), do: []
def insert_list(count, factory, attributes) when is_greater_than_0(count),
do: Enum.map(1..count, fn _ -> insert(factory, attributes) end)
@doc """
Same as `insert/3`, but returns a list with where the size is the given
`count`.
## Examples
iex> ExZample.insert_list(3, :character, %{name: "Todd"}, ecto_opts: [prefix: "private"])
[%ExZample.RPG.Character{name: "Todd"}, %ExZample.RPG.Character{name: "Todd"}, %ExZample.RPG.Character{name: "Todd"}]
"""
since("0.10.0")
@spec insert_list(pos_integer, factory, Enum.t() | nil, Keyword.t()) :: [struct()]
def insert_list(count, factory, attributes, opts)
def insert_list(0, _factory, _attributes, _opts), do: []
def insert_list(count, factory, attributes, opts) when is_greater_than_0(count),
do: Enum.map(1..count, fn _ -> insert(factory, attributes, opts) end)
end
defp get_config(scope), do: Application.get_env(:ex_zample, scope) || %{}
defp put_config(scope, config),
do: Application.put_env(:ex_zample, scope, config)
defp sequence_name(scope, name), do: "#{scope}.#{name}"
end
|
lib/ex_zample/ex_zample.ex
| 0.91854
| 0.5425
|
ex_zample.ex
|
starcoder
|
defmodule Application do
@moduledoc """
A module for working with applications and defining application callbacks.
In Elixir (actually, in Erlang/OTP), an application is a component
implementing some specific functionality, that can be started and stopped
as a unit, and which can be re-used in other systems as well.
Applications are defined with an application file named `APP.app` where
`APP` is the APP name, usually in `underscore_case` convention. The
application file must reside in the same `ebin` directory as the
application's modules bytecode.
In Elixir, Mix is responsible for compiling your source code and
generating your application `.app` file. Furthermore, Mix is also
responsible for configuring, starting and stopping your application
and its dependencies. For this reason, this documentation will focus
on the remaining aspects of your application: the application environment,
and the application callback module.
You can learn more about Mix compilation of `.app` files by typing
`mix help compile.app`.
## Application environment
Once an application is started, OTP provides an application environment
that can be used to configure applications.
Assuming you are inside a Mix project, you can edit your application
function in the `mix.exs` file to the following:
def application do
[env: [hello: :world]]
end
In the application function, we can define the default environment values
for our application. By starting your application with `iex -S mix`, you
can access the default value:
Application.get_env(:APP_NAME, :hello)
#=> {:ok, :hello}
It is also possible to put and delete values from the application value,
including new values that are not defined in the environment file (although
those should be avoided).
In the future, we plan to support configuration files which allows
developers to configure the environment of their dependencies.
Keep in mind that each application is responsible for its environment.
Do not use the functions in this module for directly access or modify
the environment of other application (as it may lead to inconsistent
data in the application environment).
## Application module callback
Often times, an application defines a supervision tree that must be started
and stopped when the application starts and stops. For such, we need to
define an application module callback. The first step is to define the
module callback in the application definition in the `mix.exs` file:
def application do
[mod: {MyApp, []}]
end
Our application now requires the `MyApp` module to provide an application
callback. This can be done by invoking `use Application` in that module
and defining a `start/2` callback, for example:
defmodule MyApp do
use Application
def start(_type, _args) do
MyApp.Supervisor.start_link()
end
end
`start/2` most commonly returns `{:ok, pid}` or `{:ok, pid, state}` where
`pid` identifies the supervision tree and the state is the application state.
`args` is second element of the tuple given to the `:mod` option.
The `type` passed into `start/2` is usually `:normal` unless in a distributed
setup where applications takeover and failovers are configured. This particular
aspect of applications can be read with more detail in the OTP documentation:
* http://www.erlang.org/doc/man/application.html
* http://www.erlang.org/doc/design_principles/applications.html
A developer may also implement the `stop/1` callback (automatically defined
by `use Application`) which does any application cleanup. It receives the
application state and can return any value. Notice that shutting down the
supervisor is automatically handled by the VM;
"""
@doc false
defmacro __using__(_) do
quote location: :keep do
@behaviour :application
@doc false
def stop(_state) do
:ok
end
defoverridable [stop: 1]
end
end
@type app :: atom
@type key :: atom
@type value :: term
@type start_type :: :permanent | :transient | :temporary
@doc """
Returns all key-value pairs for `app`.
"""
@spec get_all_env(app) :: [{key,value}]
def get_all_env(app) do
:application.get_all_env(app)
end
@doc """
Returns the value for `key` in `app`'s environment.
If the specified application is not loaded, or the configuration parameter
does not exist, the function returns the `default` value.
"""
@spec get_env(app, key, value) :: value
def get_env(app, key, default \\ nil) do
case :application.get_env(app, key) do
{:ok, value} -> value
:undefined -> default
end
end
@doc """
Returns the value for `key` in `app`'s environment in a tuple.
If the specified application is not loaded, or the configuration parameter
does not exist, the function returns `:error`.
"""
@spec fetch_env(app, key) :: {:ok, value} | :error
def fetch_env(app, key) do
case :application.get_env(app, key) do
{:ok, value} -> {:ok, value}
:undefined -> :error
end
end
@doc """
Puts the `value` in `key` for the given `app`.
## Options
* `:timeout` - the timeout for the change (defaults to 5000ms)
* `:persistent` - persists the given value on application load and reloads
If `put_env/4` is called before the application is loaded, the application
environment values specified in the `.app` file will override the ones
previously set.
The persistent option can be set to true when there is a need to guarantee
parameters set with this function will not be overridden by the ones defined
in the application resource file on load. This means persistent values will
stick after the application is loaded and also on application reload.
"""
@spec put_env(app, key, value, [timeout: timeout, persistent: boolean]) :: :ok
def put_env(app, key, value, opts \\ []) do
:application.set_env(app, key, value, opts)
end
@doc """
Deletes the `key` from the given `app` environment.
See `put_env/4` for a description of the options.
"""
@spec delete_env(app, key, [timeout: timeout, persistent: boolean]) :: :ok
def delete_env(app, key, opts \\ []) do
:application.unset_env(app, key, opts)
end
@doc """
Ensures the given `app` is started.
Same as `start/2` but returns `:ok` if the application was already
started. This is useful in scripts and in test setup, where test
applications need to be explicitly started:
:ok = Application.ensure_started(:my_test_dep)
"""
@spec ensure_started(app, start_type) :: :ok | {:error, term}
def ensure_started(app, type \\ :temporary) when is_atom(app) do
:application.ensure_started(app, type)
end
@doc """
Ensures the given `app` and its applications are started.
Same as `start/2` but also starts the applications listed under
`:applications` in the `.app` file in case they were not previously
started.
"""
@spec ensure_all_started(app, start_type) :: {:ok, [app]} | {:error, term}
def ensure_all_started(app, type \\ :temporary) when is_atom(app) do
:application.ensure_all_started(app, type)
end
@doc """
Starts the given `app`.
If the `app` is not loaded, the application will first be loaded using `load/1`.
Any included application, defined in the `:included_applications` key of the
`.app` file will also be loaded, but they won't be started.
Furthermore, all applications listed in the `:applications` key must be explicitly
started before this application is. If not, `{:error, {:not_started, app}}` is
returned, where `app` is the name of the missing application.
In case you want to automatically load **and start** all of `app`'s dependencies,
see `ensure_all_started/2`.
The `type` argument specifies the type of the application:
* `:permanent` - if `app` terminates, all other applications and the entire
node are also terminated.
* `:transient` - if `app` terminates with `:normal` reason, it is reported
but no other applications are terminated. If a transient application
terminates abnormally, all other applications and the entire node are
also terminated.
* `:temporary` - if `app` terminates, it is reported but no other
applications are terminated (the default).
Note that it is always possible to stop an application explicitly by calling
`stop/1`. Regardless of the type of the application, no other applications will
be affected.
Note also that the `:transient` type is of little practical use, since when a
supervision tree terminates, the reason is set to `:shutdown`, not `:normal`.
"""
@spec start(app, start_type) :: :ok | {:error, term}
def start(app, type \\ :temporary) when is_atom(app) do
:application.start(app, type)
end
@doc """
Stops the given `app`.
When stopped, the application is still loaded.
"""
@spec stop(app) :: :ok | {:error, term}
def stop(app) do
:application.stop(app)
end
@doc """
Loads the given `app`.
In order to be loaded, an `.app` file must be in the load paths.
All `:included_applications` will also be loaded.
Loading the application does not start it nor load its modules, but
it does load its environment.
"""
@spec load(app) :: :ok | {:error, term}
def load(app) when is_atom(app) do
:application.load(app)
end
@doc """
Unloads the given `app`.
It will also unload all `:included_applications`.
Note that the function does not purge the application modules.
"""
@spec unload(app) :: :ok | {:error, term}
def unload(app) when is_atom(app) do
:application.unload(app)
end
@doc """
Gets the directory for app.
This information is returned based on the code path. Here is an
example:
File.mkdir_p!("foo/ebin")
Code.prepend_path("foo/ebin")
Application.app_dir(:foo)
#=> "foo"
Even though the directory is empty and there is no `.app` file
it is considered the application directory based on the name
"foo/ebin". The name may contain a dash `-` which is considered
to be the app version and it is removed for the lookup purposes:
File.mkdir_p!("bar-123/ebin")
Code.prepend_path("bar-123/ebin")
Application.app_dir(:bar)
#=> "bar-123"
For more information on code paths, check the `Code` module in
Elixir and also Erlang's `:code` module.
"""
@spec app_dir(app) :: String.t
def app_dir(app) when is_atom(app) do
case :code.lib_dir(app) do
lib when is_list(lib) -> IO.chardata_to_string(lib)
{:error, :bad_name} -> raise ArgumentError, "unknown application: #{inspect app}"
end
end
@doc """
Returns the given path inside `app_dir/1`.
"""
@spec app_dir(app, String.t) :: String.t
def app_dir(app, path) when is_binary(path) do
Path.join(app_dir(app), path)
end
@doc """
Formats the error reason returned by `start/2`,
`ensure_started/2`, `stop/1`, `load/1` and `unload/1`,
returns a string.
"""
@spec format_error(any) :: String.t
def format_error(reason) do
try do
impl_format_error(reason)
catch
# A user could create an error that looks like a builtin one
# causing an error.
:error, _ ->
inspect(reason)
end
end
# exit(:normal) call is special cased, undo the special case.
defp impl_format_error({{:EXIT, :normal}, {mod, :start, args}}) do
Exception.format_exit({:normal, {mod, :start, args}})
end
# {:error, reason} return value
defp impl_format_error({reason, {mod, :start, args}}) do
Exception.format_mfa(mod, :start, args) <> " returned an error: " <>
Exception.format_exit(reason)
end
# error or exit(reason) call, use exit reason as reason.
defp impl_format_error({:bad_return, {{mod, :start, args}, {:EXIT, reason}}}) do
Exception.format_exit({reason, {mod, :start, args}})
end
# bad return value
defp impl_format_error({:bad_return, {{mod, :start, args}, return}}) do
Exception.format_mfa(mod, :start, args) <>
" returned a bad value: " <> inspect(return)
end
defp impl_format_error({:already_started, app}) when is_atom(app) do
"already started application #{app}"
end
defp impl_format_error({:not_started, app}) when is_atom(app) do
"not started application #{app}"
end
defp impl_format_error({:bad_application, app}) do
"bad application: #{inspect(app)}"
end
defp impl_format_error({:already_loaded, app}) when is_atom(app) do
"already loaded application #{app}"
end
defp impl_format_error({:not_loaded, app}) when is_atom(app) do
"not loaded application #{app}"
end
defp impl_format_error({:invalid_restart_type, restart}) do
"invalid application restart type: #{inspect(restart)}"
end
defp impl_format_error({:invalid_name, name}) do
"invalid application name: #{inspect(name)}"
end
defp impl_format_error({:invalid_options, opts}) do
"invalid application options: #{inspect(opts)}"
end
defp impl_format_error({:badstartspec, spec}) do
"bad application start specs: #{inspect(spec)}"
end
defp impl_format_error({'no such file or directory', file}) do
"could not find application file: #{file}"
end
defp impl_format_error(reason) do
Exception.format_exit(reason)
end
end
|
lib/elixir/lib/application.ex
| 0.832747
| 0.621914
|
application.ex
|
starcoder
|
defmodule AWS.CloudWatch do
@moduledoc """
Amazon CloudWatch monitors your Amazon Web Services (AWS) resources and the
applications you run on AWS in real time. You can use CloudWatch to collect
and track metrics, which are the variables you want to measure for your
resources and applications.
CloudWatch alarms send notifications or automatically change the resources
you are monitoring based on rules that you define. For example, you can
monitor the CPU usage and disk reads and writes of your Amazon EC2
instances. Then, use this data to determine whether you should launch
additional instances to handle increased load. You can also use this data
to stop under-used instances to save money.
In addition to monitoring the built-in metrics that come with AWS, you can
monitor your own custom metrics. With CloudWatch, you gain system-wide
visibility into resource utilization, application performance, and
operational health.
"""
@doc """
Deletes the specified alarms. You can delete up to 100 alarms in one
operation. However, this total can include no more than one composite
alarm. For example, you could delete 99 metric alarms and one composite
alarms with one operation, but you can't delete two composite alarms with
one operation.
In the event of an error, no alarms are deleted.
<note> It is possible to create a loop or cycle of composite alarms, where
composite alarm A depends on composite alarm B, and composite alarm B also
depends on composite alarm A. In this scenario, you can't delete any
composite alarm that is part of the cycle because there is always still a
composite alarm that depends on that alarm that you want to delete.
To get out of such a situation, you must break the cycle by changing the
rule of one of the composite alarms in the cycle to remove a dependency
that creates the cycle. The simplest change to make to break a cycle is to
change the `AlarmRule` of one of the alarms to `False`.
Additionally, the evaluation of composite alarms stops if CloudWatch
detects a cycle in the evaluation path.
</note>
"""
def delete_alarms(client, input, options \\ []) do
request(client, "DeleteAlarms", input, options)
end
@doc """
Deletes the specified anomaly detection model from your account.
"""
def delete_anomaly_detector(client, input, options \\ []) do
request(client, "DeleteAnomalyDetector", input, options)
end
@doc """
Deletes all dashboards that you specify. You can specify up to 100
dashboards to delete. If there is an error during this call, no dashboards
are deleted.
"""
def delete_dashboards(client, input, options \\ []) do
request(client, "DeleteDashboards", input, options)
end
@doc """
Permanently deletes the specified Contributor Insights rules.
If you create a rule, delete it, and then re-create it with the same name,
historical data from the first time the rule was created might not be
available.
"""
def delete_insight_rules(client, input, options \\ []) do
request(client, "DeleteInsightRules", input, options)
end
@doc """
Retrieves the history for the specified alarm. You can filter the results
by date range or item type. If an alarm name is not specified, the
histories for either all metric alarms or all composite alarms are
returned.
CloudWatch retains the history of an alarm even if you delete the alarm.
"""
def describe_alarm_history(client, input, options \\ []) do
request(client, "DescribeAlarmHistory", input, options)
end
@doc """
Retrieves the specified alarms. You can filter the results by specifying a
a prefix for the alarm name, the alarm state, or a prefix for any action.
"""
def describe_alarms(client, input, options \\ []) do
request(client, "DescribeAlarms", input, options)
end
@doc """
Retrieves the alarms for the specified metric. To filter the results,
specify a statistic, period, or unit.
"""
def describe_alarms_for_metric(client, input, options \\ []) do
request(client, "DescribeAlarmsForMetric", input, options)
end
@doc """
Lists the anomaly detection models that you have created in your account.
You can list all models in your account or filter the results to only the
models that are related to a certain namespace, metric name, or metric
dimension.
"""
def describe_anomaly_detectors(client, input, options \\ []) do
request(client, "DescribeAnomalyDetectors", input, options)
end
@doc """
Returns a list of all the Contributor Insights rules in your account. All
rules in your account are returned with a single operation.
For more information about Contributor Insights, see [Using Contributor
Insights to Analyze High-Cardinality
Data](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/ContributorInsights.html).
"""
def describe_insight_rules(client, input, options \\ []) do
request(client, "DescribeInsightRules", input, options)
end
@doc """
Disables the actions for the specified alarms. When an alarm's actions are
disabled, the alarm actions do not execute when the alarm state changes.
"""
def disable_alarm_actions(client, input, options \\ []) do
request(client, "DisableAlarmActions", input, options)
end
@doc """
Disables the specified Contributor Insights rules. When rules are disabled,
they do not analyze log groups and do not incur costs.
"""
def disable_insight_rules(client, input, options \\ []) do
request(client, "DisableInsightRules", input, options)
end
@doc """
Enables the actions for the specified alarms.
"""
def enable_alarm_actions(client, input, options \\ []) do
request(client, "EnableAlarmActions", input, options)
end
@doc """
Enables the specified Contributor Insights rules. When rules are enabled,
they immediately begin analyzing log data.
"""
def enable_insight_rules(client, input, options \\ []) do
request(client, "EnableInsightRules", input, options)
end
@doc """
Displays the details of the dashboard that you specify.
To copy an existing dashboard, use `GetDashboard`, and then use the data
returned within `DashboardBody` as the template for the new dashboard when
you call `PutDashboard` to create the copy.
"""
def get_dashboard(client, input, options \\ []) do
request(client, "GetDashboard", input, options)
end
@doc """
This operation returns the time series data collected by a Contributor
Insights rule. The data includes the identity and number of contributors to
the log group.
You can also optionally return one or more statistics about each data point
in the time series. These statistics can include the following:
<ul> <li> `UniqueContributors` -- the number of unique contributors for
each data point.
</li> <li> `MaxContributorValue` -- the value of the top contributor for
each data point. The identity of the contributor might change for each data
point in the graph.
If this rule aggregates by COUNT, the top contributor for each data point
is the contributor with the most occurrences in that period. If the rule
aggregates by SUM, the top contributor is the contributor with the highest
sum in the log field specified by the rule's `Value`, during that period.
</li> <li> `SampleCount` -- the number of data points matched by the rule.
</li> <li> `Sum` -- the sum of the values from all contributors during the
time period represented by that data point.
</li> <li> `Minimum` -- the minimum value from a single observation during
the time period represented by that data point.
</li> <li> `Maximum` -- the maximum value from a single observation during
the time period represented by that data point.
</li> <li> `Average` -- the average value from all contributors during the
time period represented by that data point.
</li> </ul>
"""
def get_insight_rule_report(client, input, options \\ []) do
request(client, "GetInsightRuleReport", input, options)
end
@doc """
You can use the `GetMetricData` API to retrieve as many as 500 different
metrics in a single request, with a total of as many as 100,800 data
points. You can also optionally perform math expressions on the values of
the returned statistics, to create new time series that represent new
insights into your data. For example, using Lambda metrics, you could
divide the Errors metric by the Invocations metric to get an error rate
time series. For more information about metric math expressions, see
[Metric Math Syntax and
Functions](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/using-metric-math.html#metric-math-syntax)
in the *Amazon CloudWatch User Guide*.
Calls to the `GetMetricData` API have a different pricing structure than
calls to `GetMetricStatistics`. For more information about pricing, see
[Amazon CloudWatch Pricing](https://aws.amazon.com/cloudwatch/pricing/).
Amazon CloudWatch retains metric data as follows:
<ul> <li> Data points with a period of less than 60 seconds are available
for 3 hours. These data points are high-resolution metrics and are
available only for custom metrics that have been defined with a
`StorageResolution` of 1.
</li> <li> Data points with a period of 60 seconds (1-minute) are available
for 15 days.
</li> <li> Data points with a period of 300 seconds (5-minute) are
available for 63 days.
</li> <li> Data points with a period of 3600 seconds (1 hour) are available
for 455 days (15 months).
</li> </ul> Data points that are initially published with a shorter period
are aggregated together for long-term storage. For example, if you collect
data using a period of 1 minute, the data remains available for 15 days
with 1-minute resolution. After 15 days, this data is still available, but
is aggregated and retrievable only with a resolution of 5 minutes. After 63
days, the data is further aggregated and is available with a resolution of
1 hour.
If you omit `Unit` in your request, all data that was collected with any
unit is returned, along with the corresponding units that were specified
when the data was reported to CloudWatch. If you specify a unit, the
operation returns only data that was collected with that unit specified. If
you specify a unit that does not match the data collected, the results of
the operation are null. CloudWatch does not perform unit conversions.
"""
def get_metric_data(client, input, options \\ []) do
request(client, "GetMetricData", input, options)
end
@doc """
Gets statistics for the specified metric.
The maximum number of data points returned from a single call is 1,440. If
you request more than 1,440 data points, CloudWatch returns an error. To
reduce the number of data points, you can narrow the specified time range
and make multiple requests across adjacent time ranges, or you can increase
the specified period. Data points are not returned in chronological order.
CloudWatch aggregates data points based on the length of the period that
you specify. For example, if you request statistics with a one-hour period,
CloudWatch aggregates all data points with time stamps that fall within
each one-hour period. Therefore, the number of values aggregated by
CloudWatch is larger than the number of data points returned.
CloudWatch needs raw data points to calculate percentile statistics. If you
publish data using a statistic set instead, you can only retrieve
percentile statistics for this data if one of the following conditions is
true:
<ul> <li> The SampleCount value of the statistic set is 1.
</li> <li> The Min and the Max values of the statistic set are equal.
</li> </ul> Percentile statistics are not available for metrics when any of
the metric values are negative numbers.
Amazon CloudWatch retains metric data as follows:
<ul> <li> Data points with a period of less than 60 seconds are available
for 3 hours. These data points are high-resolution metrics and are
available only for custom metrics that have been defined with a
`StorageResolution` of 1.
</li> <li> Data points with a period of 60 seconds (1-minute) are available
for 15 days.
</li> <li> Data points with a period of 300 seconds (5-minute) are
available for 63 days.
</li> <li> Data points with a period of 3600 seconds (1 hour) are available
for 455 days (15 months).
</li> </ul> Data points that are initially published with a shorter period
are aggregated together for long-term storage. For example, if you collect
data using a period of 1 minute, the data remains available for 15 days
with 1-minute resolution. After 15 days, this data is still available, but
is aggregated and retrievable only with a resolution of 5 minutes. After 63
days, the data is further aggregated and is available with a resolution of
1 hour.
CloudWatch started retaining 5-minute and 1-hour metric data as of July 9,
2016.
For information about metrics and dimensions supported by AWS services, see
the [Amazon CloudWatch Metrics and Dimensions
Reference](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CW_Support_For_AWS.html)
in the *Amazon CloudWatch User Guide*.
"""
def get_metric_statistics(client, input, options \\ []) do
request(client, "GetMetricStatistics", input, options)
end
@doc """
You can use the `GetMetricWidgetImage` API to retrieve a snapshot graph of
one or more Amazon CloudWatch metrics as a bitmap image. You can then embed
this image into your services and products, such as wiki pages, reports,
and documents. You could also retrieve images regularly, such as every
minute, and create your own custom live dashboard.
The graph you retrieve can include all CloudWatch metric graph features,
including metric math and horizontal and vertical annotations.
There is a limit of 20 transactions per second for this API. Each
`GetMetricWidgetImage` action has the following limits:
<ul> <li> As many as 100 metrics in the graph.
</li> <li> Up to 100 KB uncompressed payload.
</li> </ul>
"""
def get_metric_widget_image(client, input, options \\ []) do
request(client, "GetMetricWidgetImage", input, options)
end
@doc """
Returns a list of the dashboards for your account. If you include
`DashboardNamePrefix`, only those dashboards with names starting with the
prefix are listed. Otherwise, all dashboards in your account are listed.
`ListDashboards` returns up to 1000 results on one page. If there are more
than 1000 dashboards, you can call `ListDashboards` again and include the
value you received for `NextToken` in the first call, to receive the next
1000 results.
"""
def list_dashboards(client, input, options \\ []) do
request(client, "ListDashboards", input, options)
end
@doc """
List the specified metrics. You can use the returned metrics with
[GetMetricData](https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_GetMetricData.html)
or
[GetMetricStatistics](https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_GetMetricStatistics.html)
to obtain statistical data.
Up to 500 results are returned for any one call. To retrieve additional
results, use the returned token with subsequent calls.
After you create a metric, allow up to 15 minutes before the metric
appears. You can see statistics about the metric sooner by using
[GetMetricData](https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_GetMetricData.html)
or
[GetMetricStatistics](https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_GetMetricStatistics.html).
`ListMetrics` doesn't return information about metrics if those metrics
haven't reported data in the past two weeks. To retrieve those metrics, use
[GetMetricData](https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_GetMetricData.html)
or
[GetMetricStatistics](https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_GetMetricStatistics.html).
"""
def list_metrics(client, input, options \\ []) do
request(client, "ListMetrics", input, options)
end
@doc """
Displays the tags associated with a CloudWatch resource. Currently, alarms
and Contributor Insights rules support tagging.
"""
def list_tags_for_resource(client, input, options \\ []) do
request(client, "ListTagsForResource", input, options)
end
@doc """
Creates an anomaly detection model for a CloudWatch metric. You can use the
model to display a band of expected normal values when the metric is
graphed.
For more information, see [CloudWatch Anomaly
Detection](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch_Anomaly_Detection.html).
"""
def put_anomaly_detector(client, input, options \\ []) do
request(client, "PutAnomalyDetector", input, options)
end
@doc """
Creates or updates a *composite alarm*. When you create a composite alarm,
you specify a rule expression for the alarm that takes into account the
alarm states of other alarms that you have created. The composite alarm
goes into ALARM state only if all conditions of the rule are met.
The alarms specified in a composite alarm's rule expression can include
metric alarms and other composite alarms.
Using composite alarms can reduce alarm noise. You can create multiple
metric alarms, and also create a composite alarm and set up alerts only for
the composite alarm. For example, you could create a composite alarm that
goes into ALARM state only when more than one of the underlying metric
alarms are in ALARM state.
Currently, the only alarm actions that can be taken by composite alarms are
notifying SNS topics.
<note> It is possible to create a loop or cycle of composite alarms, where
composite alarm A depends on composite alarm B, and composite alarm B also
depends on composite alarm A. In this scenario, you can't delete any
composite alarm that is part of the cycle because there is always still a
composite alarm that depends on that alarm that you want to delete.
To get out of such a situation, you must break the cycle by changing the
rule of one of the composite alarms in the cycle to remove a dependency
that creates the cycle. The simplest change to make to break a cycle is to
change the `AlarmRule` of one of the alarms to `False`.
Additionally, the evaluation of composite alarms stops if CloudWatch
detects a cycle in the evaluation path.
</note> When this operation creates an alarm, the alarm state is
immediately set to `INSUFFICIENT_DATA`. The alarm is then evaluated and its
state is set appropriately. Any actions associated with the new state are
then executed. For a composite alarm, this initial time after creation is
the only time that the alarm can be in `INSUFFICIENT_DATA` state.
When you update an existing alarm, its state is left unchanged, but the
update completely overwrites the previous configuration of the alarm.
"""
def put_composite_alarm(client, input, options \\ []) do
request(client, "PutCompositeAlarm", input, options)
end
@doc """
Creates a dashboard if it does not already exist, or updates an existing
dashboard. If you update a dashboard, the entire contents are replaced with
what you specify here.
All dashboards in your account are global, not region-specific.
A simple way to create a dashboard using `PutDashboard` is to copy an
existing dashboard. To copy an existing dashboard using the console, you
can load the dashboard and then use the View/edit source command in the
Actions menu to display the JSON block for that dashboard. Another way to
copy a dashboard is to use `GetDashboard`, and then use the data returned
within `DashboardBody` as the template for the new dashboard when you call
`PutDashboard`.
When you create a dashboard with `PutDashboard`, a good practice is to add
a text widget at the top of the dashboard with a message that the dashboard
was created by script and should not be changed in the console. This
message could also point console users to the location of the
`DashboardBody` script or the CloudFormation template used to create the
dashboard.
"""
def put_dashboard(client, input, options \\ []) do
request(client, "PutDashboard", input, options)
end
@doc """
Creates a Contributor Insights rule. Rules evaluate log events in a
CloudWatch Logs log group, enabling you to find contributor data for the
log events in that log group. For more information, see [Using Contributor
Insights to Analyze High-Cardinality
Data](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/ContributorInsights.html).
If you create a rule, delete it, and then re-create it with the same name,
historical data from the first time the rule was created might not be
available.
"""
def put_insight_rule(client, input, options \\ []) do
request(client, "PutInsightRule", input, options)
end
@doc """
Creates or updates an alarm and associates it with the specified metric,
metric math expression, or anomaly detection model.
Alarms based on anomaly detection models cannot have Auto Scaling actions.
When this operation creates an alarm, the alarm state is immediately set to
`INSUFFICIENT_DATA`. The alarm is then evaluated and its state is set
appropriately. Any actions associated with the new state are then executed.
When you update an existing alarm, its state is left unchanged, but the
update completely overwrites the previous configuration of the alarm.
If you are an IAM user, you must have Amazon EC2 permissions for some alarm
operations:
<ul> <li> `iam:CreateServiceLinkedRole` for all alarms with EC2 actions
</li> <li> `ec2:DescribeInstanceStatus` and `ec2:DescribeInstances` for all
alarms on EC2 instance status metrics
</li> <li> `ec2:StopInstances` for alarms with stop actions
</li> <li> `ec2:TerminateInstances` for alarms with terminate actions
</li> <li> No specific permissions are needed for alarms with recover
actions
</li> </ul> If you have read/write permissions for Amazon CloudWatch but
not for Amazon EC2, you can still create an alarm, but the stop or
terminate actions are not performed. However, if you are later granted the
required permissions, the alarm actions that you created earlier are
performed.
If you are using an IAM role (for example, an EC2 instance profile), you
cannot stop or terminate the instance using alarm actions. However, you can
still see the alarm state and perform any other actions such as Amazon SNS
notifications or Auto Scaling policies.
If you are using temporary security credentials granted using AWS STS, you
cannot stop or terminate an EC2 instance using alarm actions.
The first time you create an alarm in the AWS Management Console, the CLI,
or by using the PutMetricAlarm API, CloudWatch creates the necessary
service-linked role for you. The service-linked role is called
`AWSServiceRoleForCloudWatchEvents`. For more information, see [AWS
service-linked
role](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts.html#iam-term-service-linked-role).
"""
def put_metric_alarm(client, input, options \\ []) do
request(client, "PutMetricAlarm", input, options)
end
@doc """
Publishes metric data points to Amazon CloudWatch. CloudWatch associates
the data points with the specified metric. If the specified metric does not
exist, CloudWatch creates the metric. When CloudWatch creates a metric, it
can take up to fifteen minutes for the metric to appear in calls to
[ListMetrics](https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_ListMetrics.html).
You can publish either individual data points in the `Value` field, or
arrays of values and the number of times each value occurred during the
period by using the `Values` and `Counts` fields in the `MetricDatum`
structure. Using the `Values` and `Counts` method enables you to publish up
to 150 values per metric with one `PutMetricData` request, and supports
retrieving percentile statistics on this data.
Each `PutMetricData` request is limited to 40 KB in size for HTTP POST
requests. You can send a payload compressed by gzip. Each request is also
limited to no more than 20 different metrics.
Although the `Value` parameter accepts numbers of type `Double`, CloudWatch
rejects values that are either too small or too large. Values must be in
the range of -2^360 to 2^360. In addition, special values (for example,
NaN, +Infinity, -Infinity) are not supported.
You can use up to 10 dimensions per metric to further clarify what data the
metric collects. Each dimension consists of a Name and Value pair. For more
information about specifying dimensions, see [Publishing
Metrics](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/publishingMetrics.html)
in the *Amazon CloudWatch User Guide*.
Data points with time stamps from 24 hours ago or longer can take at least
48 hours to become available for
[GetMetricData](https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_GetMetricData.html)
or
[GetMetricStatistics](https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_GetMetricStatistics.html)
from the time they are submitted. Data points with time stamps between 3
and 24 hours ago can take as much as 2 hours to become available for for
[GetMetricData](https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_GetMetricData.html)
or
[GetMetricStatistics](https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_GetMetricStatistics.html).
CloudWatch needs raw data points to calculate percentile statistics. If you
publish data using a statistic set instead, you can only retrieve
percentile statistics for this data if one of the following conditions is
true:
<ul> <li> The `SampleCount` value of the statistic set is 1 and `Min`,
`Max`, and `Sum` are all equal.
</li> <li> The `Min` and `Max` are equal, and `Sum` is equal to `Min`
multiplied by `SampleCount`.
</li> </ul>
"""
def put_metric_data(client, input, options \\ []) do
request(client, "PutMetricData", input, options)
end
@doc """
Temporarily sets the state of an alarm for testing purposes. When the
updated state differs from the previous value, the action configured for
the appropriate state is invoked. For example, if your alarm is configured
to send an Amazon SNS message when an alarm is triggered, temporarily
changing the alarm state to `ALARM` sends an SNS message.
Metric alarms returns to their actual state quickly, often within seconds.
Because the metric alarm state change happens quickly, it is typically only
visible in the alarm's **History** tab in the Amazon CloudWatch console or
through
[DescribeAlarmHistory](https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_DescribeAlarmHistory.html).
If you use `SetAlarmState` on a composite alarm, the composite alarm is not
guaranteed to return to its actual state. It returns to its actual state
only once any of its children alarms change state. It is also reevaluated
if you update its configuration.
If an alarm triggers EC2 Auto Scaling policies or application Auto Scaling
policies, you must include information in the `StateReasonData` parameter
to enable the policy to take the correct action.
"""
def set_alarm_state(client, input, options \\ []) do
request(client, "SetAlarmState", input, options)
end
@doc """
Assigns one or more tags (key-value pairs) to the specified CloudWatch
resource. Currently, the only CloudWatch resources that can be tagged are
alarms and Contributor Insights rules.
Tags can help you organize and categorize your resources. You can also use
them to scope user permissions by granting a user permission to access or
change only resources with certain tag values.
Tags don't have any semantic meaning to AWS and are interpreted strictly as
strings of characters.
You can use the `TagResource` action with an alarm that already has tags.
If you specify a new tag key for the alarm, this tag is appended to the
list of tags associated with the alarm. If you specify a tag key that is
already associated with the alarm, the new tag value that you specify
replaces the previous value for that tag.
You can associate as many as 50 tags with a CloudWatch resource.
"""
def tag_resource(client, input, options \\ []) do
request(client, "TagResource", input, options)
end
@doc """
Removes one or more tags from the specified resource.
"""
def untag_resource(client, input, options \\ []) do
request(client, "UntagResource", input, options)
end
@spec request(AWS.Client.t(), binary(), map(), list()) ::
{:ok, map() | nil, map()}
| {:error, term()}
defp request(client, action, input, options) do
client = %{client | service: "monitoring"}
host = build_host("monitoring", client)
url = build_url(host, client)
headers = [
{"Host", host},
{"Content-Type", "application/x-www-form-urlencoded"}
]
input = Map.merge(input, %{"Action" => action, "Version" => "2010-08-01"})
payload = encode!(client, input)
headers = AWS.Request.sign_v4(client, "POST", url, headers, payload)
post(client, url, payload, headers, options)
end
defp post(client, url, payload, headers, options) do
case AWS.Client.request(client, :post, url, payload, headers, options) do
{:ok, %{status_code: 200, body: body} = response} ->
body = if body != "", do: decode!(client, body)
{:ok, body, response}
{:ok, response} ->
{:error, {:unexpected_response, response}}
error = {:error, _reason} -> error
end
end
defp build_host(_endpoint_prefix, %{region: "local", endpoint: endpoint}) do
endpoint
end
defp build_host(_endpoint_prefix, %{region: "local"}) do
"localhost"
end
defp build_host(endpoint_prefix, %{region: region, endpoint: endpoint}) do
"#{endpoint_prefix}.#{region}.#{endpoint}"
end
defp build_url(host, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}/"
end
defp encode!(client, payload) do
AWS.Client.encode!(client, payload, :query)
end
defp decode!(client, payload) do
AWS.Client.decode!(client, payload, :xml)
end
end
|
lib/aws/generated/cloud_watch.ex
| 0.929748
| 0.641605
|
cloud_watch.ex
|
starcoder
|
defmodule Handkit do
@moduledoc """


Handkit is an Elixir client for the [Handcash Connect API](https://handcash.dev).
Handkit offers 100% coverage of the Handcash Connect APIs, so you can build
blazing fast Bitcoin apps with Elixir in hours.
## Installation
The package can be installed by adding `handkit` to your list of dependencies
in `mix.exs`.
def deps do
[
{:handkit, "~> #{ Mix.Project.config[:version] }"}
]
end
You will need to register your application using the Handcash
[developer dashboard](https://dashboard.handcash.dev) and make a note of your
app's **app ID**.
## User authorization
Familiarize yourself with the Handcash Connect
[user authorization flow](https://docs.handcash.dev/authorization/).
Within your Elixir app, use Handkit to generate a redirection URL and present
a button in your app's UI for users to click and grant your app's permissions.
iex> redirect_url = Handkit.get_redirection_url("123456789")
"https://app.handcash.io/#/authorizeApp?appId=123456789"
When a user grants your app's permissions, they will be redirected back to the
URL configured in the developer dashboard. You app must handle that request
and capture the `authToken` parameter. For example, a Phoenix action to
capture the `authToken` and store it in a session may look like this:
def auth(conn, %{"authToken" => auth_token}) do
conn
|> put_session(:auth_token, auth_token)
|> put_flash(:info, "Successfully authenticated with Handcash")
|> redirect(to: "/app")
end
## Usage
Once a user is connected, the Handcash Connect APIs are interfaced with by
creating a [`Connect Client`](`t:Handkit.Connect.t/0`) struct and passing that
to all subsequent API calls.
# Create the client
iex> client = Handkit.create_connect_client(auth_token)
%Connect{}
# Get the full profile of the currently connected user
iex> Handkit.Profile.get_current_profile(client)
{:ok, %{
"private_profile" => %{
"email" => "<EMAIL>",
"phone_number" => "+11234567891"
},
"public_profile" => %{
"avatar_url" => "https://handcash.io/avatar/7d399a0c-22cf-40cf-b162-f5511a4645db",
"bitcoin_unit" => "DUR",
"display_name" => "<NAME>.",
"handle" => "stuk_91",
"id" => "5f15c31c3c177d003028eb97",
"local_currency_code" => "USD",
"paymail" => "<EMAIL>"
}
}}
# Create a payment
iex> payment_params = %{
...> app_action: "test",
...> description: "testing testing...",
...> payments: [%{to: "Libs", amount: 5, currency_code: "DUR"}]
...> }
iex> Handkit.Wallet.pay(client, payment_params)
{:ok, %{
"app_action" => "test",
"attachments" => [],
"fiat_currency_code" => "GBP",
"fiat_exchange_rate" => 115.93006376003423,
"note" => "testing testing...",
"participants" => [
%{
"alias" => "Libs",
"amount" => 2500,
"display_name" => "Libitx",
"profile_picture_url" => "https://www.gravatar.com/avatar/8c69771156957d453f9b74f9d57a523c?d=identicon",
"response_note" => "",
"type" => "user"
}
],
"raw_transaction_hex" => "0100000001019bab35b4d62fceaa6953c6b95ecf9e3c25e40836fc055d7b47addd9ce687a5010000006a4730440220221b6c59751de9576efd9e60cb7c648141e5331602656c97dc8722b740502287022025d9868894f72b4b0bcc70e95dc4820a9cbd79721974d95b184b72933af6cec4412102ed3547b19ce413e2e36c6182737d4d05b0e27022886bec96a7f7f2ea89f8cb78ffffffff03c4090000000000001976a914715779ac130d8ef5668425ce6e8f68ebd6c4596688acc4090000000000001976a914b0e9e9fa4ec584f76e4564d3f646151ce201920e88ac5c040000000000001976a914da5d00c49e00b27d5dbc59204230afa3f059b16888ac00000000",
"satoshi_amount" => 2500,
"satoshi_fees" => 128,
"time" => 1624025595,
"transaction_id" => "5a139f3f475f48001d733c8c767fa7124bb2835d927e01dd2782effd22f7081b",
"type" => "send"
}}
Refer to the following modules for details of all the available API calls.
* `Handkit.Profile`
* `Handkit.Wallet`
"""
alias Handkit.Connect
@doc """
Returns a redirection URL for the given app ID.
Your app should redirect the user and they will be asked to grant your app
permissions.
Once the user selects *accept* or *decline*, they will be redirected back your
app's *Authorization Success URL* or *Authorization Failed URL*.
You app must handle that request and capture the `authToken` parameter.
## Example
iex> redirect_url = Handkit.get_redirection_url("123456789")
"https://app.handcash.io/#/authorizeApp?appId=123456789"
"""
@spec get_redirection_url(String.t, map, Connect.env) :: String.t
def get_redirection_url(app_id, query \\ %{}, env \\ :prod) when is_map(query) do
client_url = Connect.client_url(env)
query_str = query
|> Map.put("appId", app_id)
|> Tesla.encode_query()
"#{ client_url }/#/authorizeApp?#{ query_str }"
end
@doc """
Creates a Handcash Connect Client from the given auth token.
The client can then passed to all subsequent API functions.
## Example
iex> client = Handkit.create_connect_client(auth_token)
%Connect{}
"""
@spec create_connect_client(String.t, Connect.env) :: Connect.t
def create_connect_client(auth_token, env \\ :prod) do
Connect.init_client(auth_token, env)
end
end
|
lib/handkit.ex
| 0.820073
| 0.519826
|
handkit.ex
|
starcoder
|
defmodule Phoenix.LiveView.Router do
@moduledoc """
Provides LiveView routing for Phoenix routers.
"""
@cookie_key "__phoenix_flash__"
@doc """
Defines a LiveView route.
A LiveView can be routed to by using the `live` macro with a path and
the name of the LiveView:
live "/thermostat", ThermostatLive
By default, you can generate a route to this LiveView by using the `live_path` helper:
live_path(@socket, ThermostatLive)
## Actions and live navigation
It is common for a LiveView to have multiple states and multiple URLs.
For example, you can have a single LiveView that lists all articles on
your web app. For each article there is an "Edit" button which, when
pressed, opens up a modal on the same page to edit the article. It is a
best practice to use live navigation in those cases, so when you click
edit, the URL changes to "/articles/1/edit", even though you are still
within the same LiveView. Similarly, you may also want to show a "New"
button, which opens up the modal to create new entries, and you want
this to be reflected in the URL as "/articles/new".
In order to make it easier to recognize the current "action" your
LiveView is on, you can pass the action option when defining LiveViews
too:
live "/articles", ArticleLive.Index, :index
live "/articles/new", ArticleLive.Index, :new
live "/articles/:id/edit", ArticleLive.Index, :edit
When an action is given, the generated route helpers are named after
the LiveView itself (in the same way as for a controller). For the example
above, we will have:
article_index_path(@socket, :index)
article_index_path(@socket, :new)
article_index_path(@socket, :edit, 123)
The current action will always be available inside the LiveView as
the `@live_action` assign, that can be used to render a LiveComponent:
<%= if @live_action == :new do %>
<.live_component module={MyAppWeb.ArticleLive.FormComponent} id="form" />
<% end %>
Or can be used to show or hide parts of the template:
<%= if @live_action == :edit do %>
<%= render("form.html", user: @user) %>
<% end %>
Note that `@live_action` will be `nil` if no action is given on the route definition.
## Options
* `:container` - an optional tuple for the HTML tag and DOM attributes to
be used for the LiveView container. For example: `{:li, style: "color: blue;"}`.
See `Phoenix.LiveView.Helpers.live_render/3` for more information and examples.
* `:as` - optionally configures the named helper. Defaults to `:live` when
using a LiveView without actions or defaults to the LiveView name when using
actions.
* `:metadata` - a map to optional feed metadata used on telemetry events and route info,
for example: `%{route_name: :foo, access: :user}`.
* `:private` - an optional map of private data to put in the plug connection.
for example: `%{route_name: :foo, access: :user}`.
## Examples
defmodule MyApp.Router
use Phoenix.Router
import Phoenix.LiveView.Router
scope "/", MyApp do
pipe_through [:browser]
live "/thermostat", ThermostatLive
live "/clock", ClockLive
live "/dashboard", DashboardLive, container: {:main, class: "row"}
end
end
iex> MyApp.Router.Helpers.live_path(MyApp.Endpoint, MyApp.ThermostatLive)
"/thermostat"
"""
defmacro live(path, live_view, action \\ nil, opts \\ []) do
quote bind_quoted: binding() do
{action, router_options} =
Phoenix.LiveView.Router.__live__(__MODULE__, live_view, action, opts)
Phoenix.Router.get(path, Phoenix.LiveView.Plug, action, router_options)
end
end
@doc """
Defines a live session for live redirects within a group of live routes.
`live_session/3` allow routes defined with `live/4` to support
`live_redirect` from the client with navigation purely over the existing
websocket connection. This allows live routes defined in the router to
mount a new root LiveView without additional HTTP requests to the server.
## Security Considerations
You must always perform authentication and authorization in your LiveViews.
If your application handle both regular HTTP requests and LiveViews, then
you must perform authentication and authorization on both. This is important
because `live_redirect`s *do not go through the plug pipeline*.
`live_session` can be used to draw boundaries between groups of LiveViews.
Redirecting between `live_session`s will always force a full page reload
and establish a brand new LiveView connection. This is useful when LiveViews
require different authentication strategies or simply when they use different
root layouts (as the root layout is not updated between live redirects).
Please [read our guide on the security model](security-model.md) for a
detailed description and general tips on authentication, authorization,
and more.
## Options
* `:session` - The optional extra session map or MFA tuple to be merged with
the LiveView session. For example, `%{"admin" => true}`, `{MyMod, :session, []}`.
For MFA, the function is invoked, passing the `Plug.Conn` struct is prepended
to the arguments list.
* `:root_layout` - The optional root layout tuple for the initial HTTP render to
override any existing root layout set in the router.
* `:on_mount` - The optional list of hooks to attach to the mount lifecycle _of
each LiveView in the session_. See `Phoenix.LiveView.on_mount/1`. Passing a
single value is also accepted.
## Examples
scope "/", MyAppWeb do
pipe_through :browser
live_session :default do
live "/feed", FeedLive, :index
live "/status", StatusLive, :index
live "/status/:id", StatusLive, :show
end
live_session :admin, on_mount: MyAppWeb.AdminLiveAuth do
live "/admin", AdminDashboardLive, :index
live "/admin/posts", AdminPostLive, :index
end
end
In the example above, we have two live sessions. Live navigation between live views
in the different sessions is not possible and will always require a full page reload.
This is important in the example above because the `:admin` live session has authentication
requirements, defined by `on_mount: MyAppWeb.AdminLiveAuth`, that the other LiveViews
do not have.
If you have both regular HTTP routes (via get, post, etc) and `live` routes, then
you need to perform the same authentication and authorization rules in both.
For example, if you were to add a `get "/admin/health"` entry point inside the
`:admin` live session above, then you must create your own plug that performs the
same authentication and authorization rules as `MyAppWeb.AdminLiveAuth`, and then
pipe through it:
live_session :admin, on_mount: MyAppWeb.AdminLiveAuth do
# Regular routes
pipe_through [MyAppWeb.AdminPlugAuth]
get "/admin/health"
# Live routes
live "/admin", AdminDashboardLive, :index
live "/admin/posts", AdminPostLive, :index
end
The opposite is also true, if you have regular http routes and you want to
add your own `live` routes, the same authentication and authorization checks
executed by the plugs listed in `pipe_through` must be ported to LiveViews
and be executed via `on_mount` hooks.
"""
defmacro live_session(name, opts \\ [], do: block) do
opts =
if Macro.quoted_literal?(opts) do
Macro.prewalk(opts, &expand_alias(&1, __CALLER__))
else
opts
end
quote do
unquote(__MODULE__).__live_session__(__MODULE__, unquote(opts), unquote(name))
unquote(block)
Module.delete_attribute(__MODULE__, :phoenix_live_session_current)
end
end
defp expand_alias({:__aliases__, _, _} = alias, env),
do: Macro.expand(alias, %{env | function: {:mount, 3}})
defp expand_alias(other, _env), do: other
@doc false
def __live_session__(module, opts, name) do
Module.register_attribute(module, :phoenix_live_sessions, accumulate: true)
vsn = session_vsn(module)
unless is_atom(name) do
raise ArgumentError, """
expected live_session name to be an atom, got: #{inspect(name)}
"""
end
extra = validate_live_session_opts(opts, module, name)
if nested = Module.get_attribute(module, :phoenix_live_session_current) do
raise """
attempting to define live_session #{inspect(name)} inside #{inspect(nested.name)}.
live_session definitions cannot be nested.
"""
end
live_sessions = Module.get_attribute(module, :phoenix_live_sessions)
existing = Enum.find(live_sessions, fn %{name: existing_name} -> name == existing_name end)
if existing do
raise """
attempting to redefine live_session #{inspect(name)}.
live_session routes must be declared in a single named block.
"""
end
Module.put_attribute(module, :phoenix_live_session_current, %{name: name, extra: extra, vsn: vsn})
Module.put_attribute(module, :phoenix_live_sessions, %{name: name, extra: extra, vsn: vsn})
end
@live_session_opts [:on_mount, :root_layout, :session]
defp validate_live_session_opts(opts, module, _name) when is_list(opts) do
opts
|> Keyword.put_new(:session, %{})
|> Enum.reduce(%{}, fn
{:session, val}, acc when is_map(val) or (is_tuple(val) and tuple_size(val) == 3) ->
Map.put(acc, :session, val)
{:session, bad_session}, _acc ->
raise ArgumentError, """
invalid live_session :session
expected a map with string keys or an MFA tuple, got #{inspect(bad_session)}
"""
{:root_layout, {mod, template}}, acc when is_atom(mod) and is_binary(template) ->
Map.put(acc, :root_layout, {mod, template})
{:root_layout, {mod, template}}, acc when is_atom(mod) and is_atom(template) ->
Map.put(acc, :root_layout, {mod, "#{template}.html"})
{:root_layout, false}, acc ->
Map.put(acc, :root_layout, false)
{:root_layout, bad_layout}, _acc ->
raise ArgumentError, """
invalid live_session :root_layout
expected a tuple with the view module and template string or atom name, got #{inspect(bad_layout)}
"""
{:on_mount, on_mount}, acc ->
hooks = Enum.map(List.wrap(on_mount), &Phoenix.LiveView.Lifecycle.on_mount(module, &1))
Map.put(acc, :on_mount, hooks)
{key, _val}, _acc ->
raise ArgumentError, """
unknown live_session option "#{inspect(key)}"
Supported options include: #{inspect(@live_session_opts)}
"""
end)
end
defp validate_live_session_opts(invalid, _module, name) do
raise ArgumentError, """
expected second argument to live_session to be a list of options, got:
live_session #{inspect(name)}, #{inspect(invalid)}
"""
end
@doc """
Fetches the LiveView and merges with the controller flash.
Replaces the default `:fetch_flash` plug used by `Phoenix.Router`.
## Examples
defmodule MyAppWeb.Router do
use LiveGenWeb, :router
import Phoenix.LiveView.Router
pipeline :browser do
...
plug :fetch_live_flash
end
...
end
"""
def fetch_live_flash(%Plug.Conn{} = conn, _) do
case cookie_flash(conn) do
{conn, nil} ->
Phoenix.Controller.fetch_flash(conn, [])
{conn, flash} ->
conn
|> Phoenix.Controller.fetch_flash([])
|> Phoenix.Controller.merge_flash(flash)
end
end
@doc false
def __live__(router, live_view, action, opts)
when is_list(action) and is_list(opts) do
__live__(router, live_view, nil, Keyword.merge(action, opts))
end
def __live__(router, live_view, action, opts)
when is_atom(action) and is_list(opts) do
live_session =
Module.get_attribute(router, :phoenix_live_session_current) ||
%{name: :default, extra: %{session: %{}}, vsn: session_vsn(router)}
live_view = Phoenix.Router.scoped_alias(router, live_view)
{private, metadata, opts} = validate_live_opts!(opts)
opts =
opts
|> Keyword.put(:router, router)
|> Keyword.put(:action, action)
{as_helper, as_action} = inferred_as(live_view, opts[:as], action)
metadata =
metadata
|> Map.put(:phoenix_live_view, {live_view, action, opts, live_session})
|> Map.put_new(:log_module, live_view)
|> Map.put_new(:log_function, :mount)
{as_action,
alias: false,
as: as_helper,
private: Map.put(private, :phoenix_live_view, {live_view, opts, live_session}),
metadata: metadata}
end
defp validate_live_opts!(opts) do
{private, opts} = Keyword.pop(opts, :private, %{})
{metadata, opts} = Keyword.pop(opts, :metadata, %{})
Enum.each(opts, fn
{:container, {tag, attrs}} when is_atom(tag) and is_list(attrs) ->
:ok
{:container, val} ->
raise ArgumentError, """
expected live :container to be a tuple matching {atom, attrs :: list}, got: #{inspect(val)}
"""
{:as, as} when is_atom(as) ->
:ok
{:as, bad_val} ->
raise ArgumentError, """
expected live :as to be an atom, got: #{inspect(bad_val)}
"""
{key, %{} = meta} when key in [:metadata, :private] and is_map(meta) ->
:ok
{key, bad_val} when key in [:metadata, :private] ->
raise ArgumentError, """
expected live :#{key} to be a map, got: #{inspect(bad_val)}
"""
{key, val} ->
raise ArgumentError, """
unknown live option :#{key}.
Supported options include: :container, :as, :metadata, :private.
Got: #{inspect([{key, val}])}
"""
end)
{private, metadata, opts}
end
defp inferred_as(live_view, as, nil), do: {as || :live, live_view}
defp inferred_as(live_view, nil, action) do
live_view
|> Module.split()
|> Enum.drop_while(&(not String.ends_with?(&1, "Live")))
|> Enum.map(&(&1 |> String.replace_suffix("Live", "") |> Macro.underscore()))
|> Enum.reject(&(&1 == ""))
|> Enum.join("_")
|> case do
"" ->
raise ArgumentError,
"could not infer :as option because a live action was given and the LiveView " <>
"does not have a \"Live\" suffix. Please pass :as explicitly or make sure your " <>
"LiveView is named like \"FooLive\" or \"FooLive.Index\""
as ->
{String.to_atom(as), action}
end
end
defp inferred_as(_live_view, as, action), do: {as, action}
defp cookie_flash(%Plug.Conn{cookies: %{@cookie_key => token}} = conn) do
endpoint = Phoenix.Controller.endpoint_module(conn)
flash =
case Phoenix.LiveView.Utils.verify_flash(endpoint, token) do
%{} = flash when flash != %{} -> flash
%{} -> nil
end
{Plug.Conn.delete_resp_cookie(conn, @cookie_key), flash}
end
defp cookie_flash(%Plug.Conn{} = conn), do: {conn, nil}
defp session_vsn(module) do
if vsn = Module.get_attribute(module, :phoenix_session_vsn) do
vsn
else
vsn = System.system_time()
Module.put_attribute(module, :phoenix_session_vsn, vsn)
vsn
end
end
end
|
lib/phoenix_live_view/router.ex
| 0.909293
| 0.555315
|
router.ex
|
starcoder
|
defmodule Services.Service do
@moduledoc "Processes Services, including dates and notes"
alias JsonApi.Item
defstruct added_dates: [],
added_dates_notes: [],
description: "",
end_date: nil,
name: "",
id: nil,
removed_dates: [],
removed_dates_notes: [],
start_date: nil,
type: nil,
typicality: :unknown,
valid_days: [],
rating_start_date: nil,
rating_end_date: nil,
rating_description: ""
@type typicality ::
:unknown
# Typical service with perhaps minor modifications
| :typical_service
# Extra service supplements typical schedules
| :extra_service
# Reduced holiday service is provided by typical Saturday or Sunday schedule
| :holiday_service
# Major changes in service due to a planned disruption, such as construction
| :planned_disruption
# Major reductions in service for weather events or other atypical situations
| :unplanned_disruption
@type type :: :weekday | :saturday | :sunday | :other
@type date_notes :: %{String.t() => String.t() | nil}
# 1 = Monday, 7 = Sunday
@type valid_day :: 1 | 2 | 3 | 4 | 5 | 6 | 7
@type t :: %__MODULE__{
added_dates: [String.t()],
added_dates_notes: date_notes,
description: String.t(),
end_date: Date.t() | nil,
name: String.t(),
id: String.t(),
removed_dates: [String.t()],
removed_dates_notes: date_notes,
start_date: Date.t() | nil,
type: type,
typicality: typicality,
valid_days: [valid_day],
rating_start_date: Date.t() | nil,
rating_end_date: Date.t() | nil,
rating_description: String.t()
}
def new(%Item{id: id, attributes: attributes, type: "service"}) do
%__MODULE__{}
|> dates(attributes)
|> rating_dates(attributes)
|> date_notes(attributes)
|> Map.put(:description, Map.get(attributes, "description", ""))
|> Map.put(:name, Map.get(attributes, "schedule_name", ""))
|> Map.put(:id, id)
|> Map.put(:type, attributes |> Map.get("schedule_type") |> type())
|> Map.put(:typicality, attributes |> Map.get("schedule_typicality") |> typicality())
|> Map.put(:valid_days, Map.get(attributes, "valid_days", []))
|> Map.put(:rating_description, Map.get(attributes, "rating_description", ""))
end
defp dates(service, attributes) do
service =
Enum.reduce(["added", "removed"], service, fn date_type, acc ->
%{acc | "#{date_type}_dates": Map.get(attributes, date_type <> "_dates", [])}
end)
Enum.reduce(["start", "end"], service, fn date_type, acc ->
date =
case Map.get(attributes, date_type <> "_date") do
nil -> nil
date_string -> Date.from_iso8601!(date_string)
end
%{acc | "#{date_type}_date": date}
end)
end
defp rating_dates(service, attributes) do
Enum.reduce(["rating_start", "rating_end"], service, fn date_type, acc ->
date =
case Map.get(attributes, date_type <> "_date") do
nil -> nil
date_string -> Date.from_iso8601!(date_string)
end
%{acc | "#{date_type}_date": date}
end)
end
defp date_notes(service, attributes) do
Enum.reduce(["added", "removed"], service, fn date_type, acc ->
dates = Map.get(acc, :"#{date_type}_dates", [])
notes = Map.get(attributes, date_type <> "_dates_notes", [])
note_map =
dates
|> Enum.zip(notes)
|> Enum.reduce(%{}, &Map.put(&2, elem(&1, 0), elem(&1, 1)))
%{acc | "#{date_type}_dates_notes": note_map}
end)
end
defp type("Weekday"), do: :weekday
defp type("Saturday"), do: :saturday
defp type("Sunday"), do: :sunday
defp type("Other"), do: :other
defp typicality(nil), do: :unknown
defp typicality(0), do: :unknown
defp typicality(1), do: :typical_service
defp typicality(2), do: :extra_service
defp typicality(3), do: :holiday_service
defp typicality(4), do: :planned_disruption
defp typicality(5), do: :unplanned_disruption
end
|
apps/services/lib/service.ex
| 0.732974
| 0.466542
|
service.ex
|
starcoder
|
use Croma
defmodule RaftKV.Config do
@default_stats_collection_interval (if Mix.env() == :test, do: 2_000, else: 60_000)
@default_workflow_execution_interval (if Mix.env() == :test, do: 2_000, else: 60_000)
@default_workflow_lock_period (if Mix.env() == :test, do: 2_000, else: 30_000)
@default_shard_ineligible_period_after_split_or_merge 2 * @default_stats_collection_interval
@moduledoc """
`:raft_kv` defines the following application configs:
- `:stats_collection_interval`:
- Interval (in milliseconds) between collections of the following metrics of all shards:
- Number of keys in a shard
- Aggregated size of all keys in a shard
- Aggregated load which a shard has experienced since the last stats collection
- By using smaller value you can adjust (split/merge) number of shards more quickly, with higher overhead.
Defaults to `#{@default_stats_collection_interval}`.
- `:workflow_execution_interval`:
- Interval (in milliseconds) between executions of workflow tasks.
By using smaller value you can execute workflow tasks more quickly, with higher overhead.
Defaults to `#{@default_workflow_execution_interval}`.
Workflow task here means:
- removing a keyspace
- splitting 1 shard into 2
- merging 2 consecutive shards into 1
- `:workflow_lock_period`:
- When executing a workflow task it is locked for this period (in milliseconds) in order to avoid running the same task simultaneously.
Defaults to `#{@default_workflow_lock_period}`.
- `:shard_ineligible_period_after_split_or_merge`:
- When a shard has just been split/merged, stats of the affected shard(s) become stale.
To prevent from incorrectly splitting/merging based on the stale stats, shards that have been split/merged within
this period (in milliseconds) are excluded from next split/merge candidates.
Defaults to `#{@default_shard_ineligible_period_after_split_or_merge}`.
Note that each `raft_kv` process uses application configs stored in the local node.
If you want to configure the options above you must set them on all nodes in your cluster.
In addition to the configurations above, the following configurations defined by the underlying libraries are also available:
- `RaftedValue.make_config/2` and `RaftedValue.change_config/2`.
- `RaftFleet.Config`.
## About `:rafted_value_config_maker` option for `:raft_fleet`
`:raft_fleet` provides a way to configure each consensus group by setting an implementation of `RaftFleet.RaftedValueConfigMaker` behaviour
as `:rafted_value_config_maker` option.
`:raft_kv` respects this option; it uses (if any) the provided callback module when creating a `t:RaftedValue.Config.t/0`.
`:raft_kv` defines the following consensus groups:
- `RaftKV.Keyspaces`
- Each shard is impelemented as a Raft consensus group whose name is `:"\#{keyspace_name}_\#{hash_integer}"`
To construct `t:RaftedValue.Config.t/0`s for these consensus groups in your implementation of
`RaftFleet.RaftedValueConfigMaker` behaviour, you can use `RaftKV.Keyspaces.make_rv_config/1` and `RaftKV.Shard.make_rv_config/1`.
As a result implementations of `RaftFleet.RaftedValueConfigMaker` behaviour should look like the following:
defmodule ConfigMaker do
@behaviour RaftFleet.RaftedValueConfigMaker
@options [heartbeat_timeout: 500]
@impl true
def make(name) do
case Atom.to_string(name) do
"Elixir.RaftFleet.Cluster" -> RaftFleet.Cluster.make_rv_config(@options) # consensus group defined by `:raft_fleet` itself
"Elixir.RaftKV.Keyspaces" -> RaftKV.Keyspaces.make_rv_config(@options)
"some_keyspace_" <> _hash -> RaftKV.Shard.make_rv_config(@options)
"another_keyspace_" <> _hash -> RaftKV.Shard.make_rv_config(@options)
end
end
end
"""
defun stats_collection_interval() :: pos_integer do
Application.get_env(:raft_kv, :stats_collection_interval, @default_stats_collection_interval)
end
defun workflow_execution_interval() :: pos_integer do
Application.get_env(:raft_kv, :workflow_execution_interval, @default_workflow_execution_interval)
end
defun workflow_lock_period() :: pos_integer do
Application.get_env(:raft_kv, :workflow_lock_period, @default_workflow_lock_period)
end
defun shard_ineligible_period_after_split_or_merge() :: pos_integer do
Application.get_env(:raft_kv, :shard_ineligible_period_after_split_or_merge, @default_shard_ineligible_period_after_split_or_merge)
end
end
|
lib/raft_kv/config.ex
| 0.774796
| 0.584983
|
config.ex
|
starcoder
|
defmodule ContentSecurityPolicy.Plug.AddNonce do
@moduledoc """
Plug which adds a random nonce to the content security policy. Sets this
nonce in `Plug.assigns` under the `csp_nonce` key.
This plug must be run after the `ContentSecurityPolicy.Setup` plug, or it
will raise an exception.
## Example Usage
In a controller or router:
plug ContentSecurityPolicy.Setup
plug ContentSecurityPolicy.AddNonce directives: [:script_src]
The nonce is then added to the `script-src` directive and will be sent in the
"content-security-policy" response header. To access this nonce value when
rendering a response, check `conn.assigns[:csp_nonce]`.
conn.assigns[:csp_nonce]
"EDNnf03nceIOfn39fn3e9h3sdfa"
If using `.eex` templates to render a response, that might look something
like:
<script nonce="<%= @conn.assigns[:csp_nonce] %>">
... #JavaScript I'd like to be allowed
</script>
When the response is sent to the browser, the `"content-security-policy"`
response header will contain `"script-src
'nonce-EDNnf03nceIOfn39fn3e9h3sdfa'"`, which should cause the browser to
whitelist this specific script.
Note that the nonce is randomly generated for every single request, which
ensures that an attacker can't just guess your nonce and get their malicious
script/resource run.
"""
import Plug.Conn
alias ContentSecurityPolicy.Directive
alias ContentSecurityPolicy.Policy
@default_directives [:default_src]
@default_byte_size 32
def init(opts) do
directives = Keyword.get(opts, :directives, @default_directives)
Enum.each(directives, fn directive ->
Directive.validate_directive!(directive)
end)
opts
end
def call(conn, opts) do
directives = opts
|> Keyword.get(:directives, @default_directives)
|> Enum.uniq
bytes = Keyword.get(opts, :byte_size, @default_byte_size)
nonce = ContentSecurityPolicy.generate_nonce(bytes)
nonce_source_value = "'nonce-#{nonce}'"
existing_policy = get_policy!(conn)
updated_policy = Enum.reduce(directives, existing_policy, fn directive, policy ->
ContentSecurityPolicy.add_source_value(policy, directive, nonce_source_value)
end)
conn
|> put_private(:content_security_policy, updated_policy)
|> assign(:csp_nonce, nonce)
end
defp get_policy!(%{private: %{content_security_policy: %Policy{} = policy}}) do
policy
end
defp get_policy!(_) do
raise """
Attempted to add a nonce to the content security policy, but the content
security policy was not initialized.
Please make sure that the `ContentSecurityPolicy.Plug.Setup` plug is run
before the `ContentSecurityPolicy.Plug.AddNonce` plug.
"""
end
end
|
lib/content_security_policy/plug/add_nonce.ex
| 0.840783
| 0.482856
|
add_nonce.ex
|
starcoder
|
defmodule Benchee.Benchmark do
@moduledoc """
Functions related to building and running benchmarking scenarios.
Exposes `benchmark/4` and `measure/3` functions.
"""
alias Benchee.Benchmark.{Runner, Scenario, ScenarioContext}
alias Benchee.Output.BenchmarkPrinter, as: Printer
alias Benchee.Suite
alias Benchee.Utility.DeepConvert
@type job_name :: String.t() | atom
@no_input :__no_input
@doc """
Public access for the key representing no input for a scenario.
"""
def no_input, do: @no_input
@doc """
Takes the current suite and adds a new benchmarking scenario (represented by a
%Scenario{} struct) to the suite's scenarios. If there are inputs in the
suite's config, a scenario will be added for the given function for each
input.
"""
@spec benchmark(Suite.t(), job_name, fun, module) :: Suite.t()
def benchmark(suite = %Suite{scenarios: scenarios}, job_name, function, printer \\ Printer) do
normalized_name = to_string(job_name)
if duplicate?(scenarios, normalized_name) do
printer.duplicate_benchmark_warning(normalized_name)
suite
else
add_scenario(suite, normalized_name, function)
end
end
defp duplicate?(scenarios, job_name) do
Enum.any?(scenarios, fn scenario -> scenario.name == job_name end)
end
defp add_scenario(
suite = %Suite{scenarios: scenarios, configuration: config},
job_name,
function
) do
new_scenarios = build_scenarios_for_job(job_name, function, config)
%Suite{suite | scenarios: List.flatten([scenarios | new_scenarios])}
end
defp build_scenarios_for_job(job_name, function, config)
defp build_scenarios_for_job(job_name, function, nil) do
[
build_scenario(%{
job_name: job_name,
function: function,
input: @no_input,
input_name: @no_input
})
]
end
defp build_scenarios_for_job(job_name, function, %{inputs: nil}) do
[
build_scenario(%{
job_name: job_name,
function: function,
input: @no_input,
input_name: @no_input
})
]
end
defp build_scenarios_for_job(job_name, function, %{inputs: inputs}) do
Enum.map(inputs, fn {input_name, input} ->
build_scenario(%{
job_name: job_name,
function: function,
input: input,
input_name: input_name
})
end)
end
defp build_scenario(scenario_data = %{function: {function, options}}) do
scenario_data
|> Map.put(:function, function)
|> Map.merge(DeepConvert.to_map(options))
|> build_scenario
end
defp build_scenario(scenario_data) do
struct!(Scenario, add_scenario_name(scenario_data))
end
defp add_scenario_name(scenario_data) do
Map.put(scenario_data, :name, Scenario.display_name(scenario_data))
end
@doc """
Kicks off the benchmarking of all scenarios in the suite by passing the list
of scenarios and a scenario context to our benchmark runner. For more
information on how bencharmks are actually run, see
`Benchee.Benchmark.Runner.run_scenarios/2`.
"""
@spec measure(Suite.t(), module, module) :: Suite.t()
def measure(
suite = %Suite{scenarios: scenarios, configuration: config},
printer \\ Printer,
runner \\ Runner
) do
printer.configuration_information(suite)
scenario_context = %ScenarioContext{config: config, printer: printer}
scenarios = runner.run_scenarios(scenarios, scenario_context)
%Suite{suite | scenarios: scenarios}
end
end
|
lib/benchee/benchmark.ex
| 0.816443
| 0.553686
|
benchmark.ex
|
starcoder
|
defmodule Plug.Static do
@moduledoc """
A plug for serving static assets.
It expects two options on initialization:
* `:at` - the request path to reach for static assets.
It must be a binary.
* `:from` - the filesystem path to read static assets from.
It must be a binary, containing a file system path, or an
atom representing the application name, where assets will
be served from the priv/static.
The preferred form is to use `:from` with an atom, since
it will make your application independent from the starting
directory.
If a static asset cannot be found, it simply forwards
the connection to the rest of the pipeline.
## Options
* `:gzip` - use `FILE.gz` if it exists in the static directory
and if `accept-encoding` is set to allow gzipped content
(defaults to `false`).
* `:cache` - sets cache headers on response (defaults to: `true`)
## Examples
This filter can be mounted in a Plug.Builder as follow:
defmodule MyPlug do
use Plug.Builder
plug Plug.Static, at: "/public", from: :my_app
plug :not_found
def not_found(conn, _) do
Plug.Conn.send_resp(conn, 404, "not found")
end
end
"""
@behaviour Plug
@allowed_methods ~w(GET HEAD)
import Plug.Conn
alias Plug.Conn
defmodule InvalidPathError do
defexception message: "invalid path for static asset", plug_status: 400
end
def init(opts) do
at = Keyword.fetch!(opts, :at)
from = Keyword.fetch!(opts, :from)
gzip = Keyword.get(opts, :gzip, false)
cache = Keyword.get(opts, :cache, true)
unless is_atom(from) or is_binary(from) do
raise ArgumentError, message: ":from must be an atom or a binary"
end
{Plug.Router.Utils.split(at), from, gzip, cache}
end
def call(conn = %Conn{method: meth}, {at, from, gzip, cache}) when meth in @allowed_methods do
send_static_file(conn, at, from, gzip, cache)
end
def call(conn, _opts), do: conn
defp send_static_file(conn, at, from, gzip, cache) do
segments = subset(at, conn.path_info)
segments = for segment <- List.wrap(segments), do: URI.decode(segment)
path = path(from, segments)
cond do
segments in [nil, []] ->
conn
invalid_path?(segments) ->
raise InvalidPathError
true ->
case file_encoding(conn, path, gzip) do
{conn, path} ->
if cache do
conn = put_resp_header(conn, "cache-control", "public, max-age=31536000")
end
conn
|> put_resp_header("content-type", Plug.MIME.path(List.last(segments)))
|> send_file(200, path)
|> halt
:error ->
conn
end
end
end
defp file_encoding(conn, path, gzip) do
path_gz = path <> ".gz"
cond do
gzip && gzip?(conn) && File.regular?(path_gz) ->
{put_resp_header(conn, "content-encoding", "gzip"), path_gz}
File.regular?(path) ->
{conn, path}
true ->
:error
end
end
defp gzip?(conn) do
fun = &(:binary.match(&1, ["gzip", "*"]) != :nomatch)
Enum.any? get_req_header(conn, "accept-encoding"), fn accept ->
Enum.any?(Plug.Conn.Utils.list(accept), fun)
end
end
defp path(from, segments) when is_atom(from),
do: Path.join([Application.app_dir(from), "priv/static" | segments])
defp path(from, segments),
do: Path.join([from | segments])
defp subset([h|expected], [h|actual]),
do: subset(expected, actual)
defp subset([], actual),
do: actual
defp subset(_, _), do:
nil
defp invalid_path?([h|_]) when h in [".", "..", ""], do: true
defp invalid_path?([h|t]) do
case :binary.match(h, ["/", "\\", ":"]) do
{_, _} -> true
:nomatch -> invalid_path?(t)
end
end
defp invalid_path?([]), do: false
end
|
lib/plug/static.ex
| 0.866019
| 0.591251
|
static.ex
|
starcoder
|
defmodule Authex.Token do
@moduledoc """
A struct wrapper for token claims.
Typically, we shouldnt need to directly interact with this module. Rather, we
should use the `Authex.token/3` function.
"""
alias Authex.Token
defstruct nbf: nil,
exp: nil,
iat: nil,
jti: nil,
sub: nil,
iss: nil,
aud: nil,
scopes: [],
meta: %{}
@type t :: %__MODULE__{
nbf: integer | nil,
exp: integer | nil,
iat: integer | nil,
jti: binary | nil,
sub: binary | integer | nil,
iss: binary | nil,
aud: binary | nil,
scopes: list,
meta: map
}
@type claim ::
{:sub, binary | integer}
| {:aud, binary}
| {:iss, binary}
| {:jti, binary}
| {:scopes, list}
| {:meta, map}
@type claims :: [claim]
@type option ::
{:time, integer}
| {:ttl, integer | :infinity}
@type options :: [option]
@type compact :: binary
@doc """
Creates a new `Authex.Token` struct from the given claims and options.
## Options
* `:time` - the base time (timestamp format) in which to use.
* `:ttl` - the TTL for the token or `:infinity` if no expiration is required.
## Examples
Authex.Token.new(MyApp.Auth, [sub: 1], [ttl: 60])
"""
def new(module, claims \\ [], opts \\ []) do
claims = build_claims(module, claims)
opts = build_options(module, opts)
%Token{}
|> put_iat(opts.time)
|> put_nbf(opts.time)
|> put_exp(opts.time, opts.ttl)
|> put_jti(claims.jti)
|> put_sub(claims.sub)
|> put_aud(claims.aud)
|> put_iss(claims.iss)
|> put_scopes(claims.scopes)
|> put_meta(claims.meta)
end
@doc false
def from_map(claims) when is_map(claims) do
claims =
Enum.reduce(claims, %{}, fn {key, val}, acc ->
Map.put(acc, String.to_atom(key), val)
end)
struct(__MODULE__, claims)
end
@doc false
def get_claims(token) do
token
|> Map.from_struct()
|> Map.to_list()
|> Enum.map(fn {key, val} -> {Atom.to_string(key), val} end)
|> Enum.reject(fn {_, val} -> val == nil end)
|> Map.new()
end
@doc false
def put_nbf(token, time) do
%{token | nbf: time - 1}
end
@doc false
def put_iat(token, time) do
%{token | iat: time}
end
@doc false
def put_exp(token, _time, :infinity) do
%{token | exp: nil}
end
def put_exp(token, time, ttl) do
%{token | exp: time + ttl}
end
@doc false
def put_jti(token, false) do
%{token | jti: nil}
end
def put_jti(token, {mod, fun, args}) do
%{token | jti: apply(mod, fun, args)}
end
def put_jti(token, jti) when is_binary(jti) do
%{token | jti: jti}
end
@doc false
def put_sub(token, sub) do
%{token | sub: sub}
end
@doc false
def put_iss(token, iss) do
%{token | iss: iss}
end
@doc false
def put_aud(token, aud) do
%{token | aud: aud}
end
@doc false
def put_scopes(token, scopes) do
%{token | scopes: scopes}
end
@doc false
def put_meta(token, meta) do
%{token | meta: meta}
end
@doc false
def has_scope?(%Token{scopes: current_scopes}, scopes) do
has_scope?(current_scopes, scopes)
end
def has_scope?(current_scopes, scopes)
when is_list(current_scopes) and is_list(scopes) do
Enum.find(scopes, false, fn scope ->
Enum.member?(current_scopes, scope)
end)
end
def has_scope?(_, _) do
false
end
defp build_claims(module, claims) do
Enum.into(claims, %{
jti: Authex.config(module, :default_jti, {Authex.UUID, :generate, []}),
scopes: Authex.config(module, :default_scopes, []),
sub: Authex.config(module, :default_sub),
aud: Authex.config(module, :default_aud),
iss: Authex.config(module, :default_iss),
meta: Authex.config(module, :default_meta, %{})
})
end
defp build_options(module, opts) do
Enum.into(opts, %{
ttl: Authex.config(module, :default_ttl, 3600),
time: :os.system_time(:seconds)
})
end
end
|
lib/authex/token.ex
| 0.881704
| 0.418133
|
token.ex
|
starcoder
|
defmodule LiveSup.PromEx do
@moduledoc """
Be sure to add the following to finish setting up PromEx:
1. Update your configuration (config.exs, dev.exs, prod.exs, releases.exs, etc) to
configure the necessary bit of PromEx. Be sure to check out `PromEx.Config` for
more details regarding configuring PromEx:
```
config :live_sup, LiveSup.PromEx,
disabled: false,
manual_metrics_start_delay: :no_delay,
drop_metrics_groups: [],
grafana: :disabled,
metrics_server: :disabled
```
2. Add this module to your application supervision tree. It should be one of the first
things that is started so that no Telemetry events are missed. For example, if PromEx
is started after your Repo module, you will miss Ecto's init events and the dashboards
will be missing some data points:
```
def start(_type, _args) do
children = [
LiveSup.PromEx,
...
]
...
end
```
3. Update your `endpoint.ex` file to expose your metrics (or configure a standalone
server using the `:metrics_server` config options). Be sure to put this plug before
your `Plug.Telemetry` entry so that you can avoid having calls to your `/metrics`
endpoint create their own metrics and logs which can pollute your logs/metrics given
that Prometheus will scrape at a regular interval and that can get noisy:
```
defmodule LiveSupWeb.Endpoint do
use Phoenix.Endpoint, otp_app: :live_sup
...
plug PromEx.Plug, prom_ex_module: LiveSup.PromEx
...
end
```
4. Update the list of plugins in the `plugins/0` function return list to reflect your
application's dependencies. Also update the list of dashboards that are to be uploaded
to Grafana in the `dashboards/0` function.
"""
use PromEx, otp_app: :live_sup
alias PromEx.Plugins
@impl true
def plugins do
[
# PromEx built in plugins
Plugins.Application,
Plugins.Beam,
# {Plugins.Phoenix, router: LiveSupWeb.Router, endpoint: LiveSupWeb.Endpoint},
Plugins.Ecto,
# Plugins.Oban,
Plugins.PhoenixLiveView,
# Plugins.Absinthe
# Add your own PromEx metrics plugins
# LiveSup.Users.PromExPlugin
LiveSup.PromEx.WidgetPlugin
]
end
@impl true
def dashboard_assigns do
[
datasource_id: "prometheus",
default_selected_interval: "30s"
]
end
@impl true
def dashboards do
[
# PromEx built in Grafana dashboards
{:prom_ex, "app.json"},
{:prom_ex, "beam.json"},
{:prom_ex, "phoenix.json"},
{:prom_ex, "ecto.json"},
# {:prom_ex, "oban.json"},
{:prom_ex, "phoenix_live_view.json"}
# {:prom_ex, "absinthe.json"}
# Add your dashboard definitions here with the format: {:otp_app, "path_in_priv"}
# {:live_sup, "/grafana_dashboards/user_metrics.json"}
]
end
end
|
lib/live_sup/prom_ex.ex
| 0.825941
| 0.675791
|
prom_ex.ex
|
starcoder
|
defmodule PhoenixToggl.TimeBoundries.TimeEntry do
use Ecto.Schema
use Timex
import Ecto.{ Query, Changeset }
alias PhoenixToggl.{ TimeBoundries.TimeEntry, Accounts.User, Workspace.Board }
@derive {Poison.Encoder, only: [
:id, :description, :started_at, :stopped_at, :restarted_at, :duration,
:updated_at
]}
schema "time_boundries_time_entries" do
field :description, :string
field :duration, :integer
field :restarted_at, :naive_datetime
field :started_at, :naive_datetime
field :stopped_at, :naive_datetime
belongs_to :board, Board
belongs_to :user, User
timestamps()
end
@doc false
def changeset(%TimeEntry{} = time_entry, attrs) do
time_entry
|> cast(attrs, [
:description, :started_at, :stopped_at, :restarted_at, :duration,
:user_id, :board_id
])
|> validate_required([:started_at])
|> foreign_key_constraint(:user_id)
end
@doc """
Creates a default changeset and sets the first time_range
"""
def start_changeset(model, params \\ :empty) do
model
|> changeset(params)
|> put_change(:duration, 0)
end
@doc """
Creates a default changeset and calculates the duration depending on
if the TimeEntry has been restarted or not.
"""
def stop_changeset(model, params \\ :empty) do
duration = case model.restarted_at do
nil ->
Timex.diff(params.stopped_at, model.started_at, :seconds)
restarted_at ->
model.duration + Timex.diff(params.stopped_at, restarted_at, :seconds)
end
model
|> changeset(params)
|> put_change(:duration, duration)
end
@doc """
Creates a default changeset and sets the stop key value
on the last time_range
"""
def restart_changeset(model, params \\ :empty) do
model
|> changeset(params)
|> cast(
params, [:description, :stopped_at, :duration, :board_id, :restarted_at]
)
|> validate_required([:restarted_at])
|> put_change(:stopped_at, nil)
end
@doc """
Returns a start_changeset
"""
def start(time_entry, attrs) do
time_entry
|> start_changeset(attrs)
end
@doc """
Returns a stop_changeset
"""
def stop(time_entry, date_time \\ Timex.now) do
time_entry
|> stop_changeset(%{stopped_at: date_time})
end
@doc """
Returns a restart_changeset
"""
def restart(time_entry, date_time \\ Timex.now) do
time_entry
|> restart_changeset(%{restarted_at: date_time})
end
def active_for_user(query, user_id) do
from t in query,
where: t.user_id == ^user_id and is_nil(t.stopped_at)
end
def not_active_for_user(query, user_id) do
from t in query,
where: t.user_id == ^user_id and not(is_nil(t.stopped_at))
end
def sorted(query) do
from t in query,
order_by: [desc: t.stopped_at]
end
def total_duration_for_date(query, date) do
from t in query,
select: sum(t.duration),
where: fragment("date(?) = date(?)", t.started_at, type(^date, Ecto.Date))
end
def by_ids(query \\ TimeEntry, ids) do
from t in query,
where: t.id in ^ids
end
end
|
lib/phoenix_toggl/time_boundries/time_entry.ex
| 0.765856
| 0.477615
|
time_entry.ex
|
starcoder
|
defmodule Grizzly.Inclusions.InclusionRunner.Inclusion do
@moduledoc false
# This module is useful for moving an inclusion process through
# the various states
alias Grizzly.ZWave.Command
alias Grizzly.ZWave.Commands.{NodeAdd, NodeRemove, NodeAddKeysSet, NodeAddDSKSet, LearnModeSet}
@type state ::
:started
| :complete
| :node_adding
| :node_adding_stop
| :node_removing_stop
| :node_removing
| :keys_requested
| :keys_granted
| :dsk_requested
| :dsk_set
| :learn_mode
| :learn_mode_stop
@type t :: %__MODULE__{
handler: pid() | module() | {module(), keyword},
current_command_ref: reference(),
controller_id: Grizzly.node_id(),
state: state(),
dsk_input_length: non_neg_integer() | nil
}
defstruct handler: nil,
current_command_ref: nil,
controller_id: nil,
state: :started,
dsk_input_length: nil
@spec current_command_ref(t()) :: reference()
def current_command_ref(inclusion), do: inclusion.current_command_ref
def update_command_ref(inclusion, new_command_ref),
do: %__MODULE__{inclusion | current_command_ref: new_command_ref}
@spec controller_id(t()) :: Grizzly.node_id()
def controller_id(inclusion), do: inclusion.controller_id
@doc """
Handle incoming command from the Z-Wave network
This commands are:
- `:node_add_keys_report` - command to tell Grizzly to add the security keys
- `:node_add_status` - the report about the node add process
- `:node_remove_status` - the report about the node removal process
- `:node_add_dsk_report` - the report to tell Grizzly to add the DSK
- `:learn_mode_set_status` - the report about setting learn mode
"""
@spec handle_command(t(), Command.t(), keyword()) :: t()
def handle_command(inclusion, command, opts \\ []) do
case command.name do
:node_add_keys_report -> keys_requested(inclusion)
:node_add_status -> complete(inclusion)
:node_remove_status -> complete(inclusion)
:node_add_dsk_report -> dsk_requested(inclusion, opts)
:learn_mode_set_status -> complete(inclusion)
end
end
@spec complete?(t()) :: boolean()
def complete?(%__MODULE__{state: :complete}), do: true
def complete?(%__MODULE__{}), do: false
@doc """
Generate the next command based off the desired state of the inclusion
This will return the next Z-Wave command to run along with the updated
inclusion to track the current state of the inclusion
"""
@spec next_command(t(), state(), Grizzly.seq_number(), keyword()) ::
{Command.t() | nil, t()} | {:error, :dsk_required}
def next_command(inclusion, desired_state, seq_number, command_params \\ [])
def next_command(inclusion, :node_adding, seq_number, _command_params) do
{:ok, command} = NodeAdd.new(seq_number: seq_number)
{command, node_adding(inclusion)}
end
def next_command(inclusion, :node_adding_stop, seq_number, _) do
{:ok, command} = NodeAdd.new(seq_number: seq_number, mode: :node_add_stop)
{command, node_adding_stop(inclusion)}
end
def next_command(inclusion, :node_removing, seq_number, _) do
{:ok, command} = NodeRemove.new(seq_number: seq_number)
{command, node_removing(inclusion)}
end
def next_command(inclusion, :node_removing_stop, seq_number, _) do
{:ok, command} = NodeRemove.new(seq_number: seq_number, mode: :remove_node_stop)
{command, node_removing_stop(inclusion)}
end
def next_command(inclusion, :keys_requested, _seq_number, _) do
{nil, keys_requested(inclusion)}
end
def next_command(inclusion, :keys_granted, seq_number, command_params) do
{:ok, command} = NodeAddKeysSet.new(command_params ++ [seq_number: seq_number])
{command, keys_granted(inclusion)}
end
def next_command(inclusion, :dsk_set, seq_number, command_params) do
dsk = Keyword.fetch!(command_params, :dsk)
input_dsk_length = byte_size_for_int(dsk)
if input_dsk_length == inclusion.dsk_input_length do
{:ok, command} =
NodeAddDSKSet.new(
seq_number: seq_number,
accept: true,
input_dsk_length: input_dsk_length,
input_dsk: dsk
)
{command, dsk_set(inclusion)}
else
{:error, :dsk_required}
end
end
def next_command(inclusion, :learn_mode, seq_number, _) do
{:ok, command} =
LearnModeSet.new(
seq_number: seq_number,
mode: :direct_range_only,
return_interview_status: :off
)
{command, learn_mode(inclusion)}
end
def next_command(inclusion, :learn_mode_stop, seq_number, _) do
{:ok, command} =
LearnModeSet.new(seq_number: seq_number, mode: :disable, return_interview_status: :off)
{command, learn_mode_stop(inclusion)}
end
def node_adding(%__MODULE__{state: :started} = inclusion) do
%__MODULE__{inclusion | state: :node_adding}
end
def node_adding_stop(%__MODULE__{state: :node_adding} = inclusion) do
%__MODULE__{inclusion | state: :node_adding_stop}
end
def node_removing(%__MODULE__{state: :started} = inclusion) do
%__MODULE__{inclusion | state: :node_removing}
end
def node_removing_stop(%__MODULE__{state: :node_removing} = inclusion) do
%__MODULE__{inclusion | state: :node_removing_stop}
end
def complete(%__MODULE__{state: state} = inclusion)
when not (state in [:started, :complete]) do
%__MODULE__{inclusion | state: :complete}
end
def keys_requested(%__MODULE__{state: :node_adding} = inclusion) do
%__MODULE__{inclusion | state: :keys_requested}
end
def keys_granted(%__MODULE__{state: :keys_requested} = inclusion) do
%__MODULE__{inclusion | state: :keys_granted}
end
def dsk_requested(%__MODULE__{state: :keys_granted} = inclusion, opts) do
dsk_input_length = Keyword.fetch!(opts, :dsk_input_length)
%__MODULE__{inclusion | state: :dsk_requested, dsk_input_length: dsk_input_length}
end
def dsk_set(%__MODULE__{state: :dsk_requested} = inclusion) do
%__MODULE__{inclusion | state: :dsk_set}
end
def learn_mode(%__MODULE__{state: :started} = inclusion) do
%__MODULE__{inclusion | state: :learn_mode}
end
def learn_mode_stop(%__MODULE__{state: :learn_mode} = inclusion) do
%__MODULE__{inclusion | state: :learn_mode_stop}
end
# Have not ran into any case for need to check higher than two
# bytes. If this happens the guards should fail loudly and we can
# add support quickly
defp byte_size_for_int(0), do: 0
defp byte_size_for_int(integer) when integer in 0x00..0xFF, do: 1
defp byte_size_for_int(integer) when integer in 0x0100..0xFFFF, do: 2
end
|
lib/grizzly/inclusions/inclusion_runner/inclusion.ex
| 0.860384
| 0.512022
|
inclusion.ex
|
starcoder
|
defmodule Appsignal.Span do
alias Appsignal.{Config, Nif, Span}
defstruct [:reference, :pid]
@nif Application.get_env(:appsignal, :appsignal_tracer_nif, Appsignal.Nif)
@type t() :: %__MODULE__{
reference: reference(),
pid: pid()
}
@spec create_root(String.t(), pid()) :: t() | nil
@doc """
Create a root `Appsignal.Span` with a namespace and a pid.
For a description of namespaces, see `set_namespace/2`.
## Example
Appsignal.Span.create_root("http_request", self())
"""
def create_root(namespace, pid) do
if Config.active?() do
{:ok, reference} = @nif.create_root_span(namespace)
%Span{reference: reference, pid: pid}
end
end
@spec create_root(String.t(), pid(), integer()) :: t() | nil
@doc """
Create a root `Appsignal.Span` with a namespace, a pid and an explicit start time.
For a description of namespaces, see `set_namespace/2`.
## Example
Appsignal.Span.create_root("http_request", self(), :os.system_time())
"""
def create_root(namespace, pid, start_time) do
if Config.active?() do
sec = :erlang.convert_time_unit(start_time, :native, :second)
nsec = :erlang.convert_time_unit(start_time, :native, :nanosecond) - sec * 1_000_000_000
{:ok, reference} = @nif.create_root_span_with_timestamp(namespace, sec, nsec)
%Span{reference: reference, pid: pid}
end
end
@spec create_child(t() | nil, pid()) :: t() | nil
@doc """
Create a child `Appsignal.Span`.
## Example
Appsignal.Tracer.root_span()
|> Appsignal.Span.create_child(self())
"""
def create_child(%Span{reference: parent}, pid) do
if Config.active?() do
{:ok, reference} = @nif.create_child_span(parent)
%Span{reference: reference, pid: pid}
end
end
@spec create_child(t() | nil, pid(), integer()) :: t() | nil
@doc """
Create a child `Appsignal.Span` with an explicit start time.
## Example
Appsignal.Tracer.root_span()
|> Appsignal.Span.create_child(self(), :os.system_time())
"""
def create_child(%Span{reference: parent}, pid, start_time) do
if Config.active?() do
sec = :erlang.convert_time_unit(start_time, :native, :second)
nsec = :erlang.convert_time_unit(start_time, :native, :nanosecond) - sec * 1_000_000_000
{:ok, reference} = @nif.create_child_span_with_timestamp(parent, sec, nsec)
%Span{reference: reference, pid: pid}
end
end
@spec set_name(t() | nil, String.t()) :: t() | nil
@doc """
Sets an `Appsignal.Span`'s name.
## Example
Appsignal.Tracer.root_span()
|> Appsignal.Span.set_name("PageController#index")
"""
def set_name(%Span{reference: reference} = span, name)
when is_reference(reference) and is_binary(name) do
if Config.active?() do
:ok = @nif.set_span_name(reference, name)
span
end
end
def set_name(_span, _name), do: nil
@spec set_namespace(t() | nil, String.t()) :: t() | nil
@doc """
Sets an `Appsignal.Span`'s namespace. The namespace is `"http_request"` or
`"background_job'` to add the span to the "web" and "background" namespaces
respectively. Passing another string creates a custom namespace to store the
`Appsignal.Span`'s samples in.
## Example
Appsignal.Tracer.root_span()
|> Appsignal.Span.set_namespace("http_request")
"""
def set_namespace(%Span{reference: reference} = span, namespace) when is_binary(namespace) do
:ok = @nif.set_span_namespace(reference, namespace)
span
end
def set_namespace(_span, _name), do: nil
@spec set_attribute(t() | nil, String.t(), String.t() | integer() | boolean() | float()) ::
t() | nil
@doc """
Sets an `Appsignal.Span` attribute.
## Example
Appsignal.Tracer.root_span()
|> Appsignal.Span.set_attribute("appsignal:category", "query.ecto")
"""
def set_attribute(%Span{reference: reference} = span, key, true) when is_binary(key) do
:ok = Nif.set_span_attribute_bool(reference, key, 1)
span
end
def set_attribute(%Span{reference: reference} = span, key, false) when is_binary(key) do
:ok = Nif.set_span_attribute_bool(reference, key, 0)
span
end
def set_attribute(%Span{reference: reference} = span, key, value)
when is_binary(key) and is_binary(value) do
:ok = Nif.set_span_attribute_string(reference, key, value)
span
end
def set_attribute(%Span{reference: reference} = span, key, value)
when is_binary(key) and is_integer(value) do
:ok = Nif.set_span_attribute_int(reference, key, value)
span
end
def set_attribute(%Span{reference: reference} = span, key, value)
when is_binary(key) and is_float(value) do
:ok = Nif.set_span_attribute_double(reference, key, value)
span
end
def set_attribute(_span, _key, _value), do: nil
@spec set_sql(t() | nil, String.t()) :: t() | nil
@doc """
Sets the `"appsignal:body"` attribute with an SQL query string.
## Example
Appsignal.Tracer.root_span()
|> Appsignal.Span.set_sql("SELECT * FROM users")
"""
def set_sql(%Span{reference: reference} = span, body) when is_binary(body) do
:ok = Nif.set_span_attribute_sql_string(reference, "appsignal:body", body)
span
end
def set_sql(_span, _body), do: nil
@spec set_sample_data(t() | nil, String.t(), map()) :: t() | nil
@doc """
Sets sample data for an `Appsignal.Span`.
## Example
Appsignal.Tracer.root_span()
|> Appsignal.Span.set_sample_data("environment", %{"method" => "GET"})
"""
def set_sample_data(%Span{reference: reference} = span, key, value)
when is_binary(key) and is_map(value) do
data =
value
|> Appsignal.Utils.MapFilter.filter()
|> Appsignal.Utils.DataEncoder.encode()
:ok = Nif.set_span_sample_data(reference, key, data)
span
end
def set_sample_data(_span, _key, _value), do: nil
@spec add_error(t() | nil, Exception.kind(), any(), Exception.stacktrace()) :: t() | nil
@doc """
Add an error to an `Appsignal.Span` by passing a `kind` and `reason` from a
`catch` block, and a stack trace.
## Example
span = Appsignal.Tracer.root_span()
try
raise "Exception!"
catch
kind, reason ->
Appsignal.Span.add_error(span, kind, reason, __STACKTRACE__)
end
"""
def add_error(span, kind, reason, stacktrace) do
{name, message, formatted_stacktrace} = Appsignal.Error.metadata(kind, reason, stacktrace)
do_add_error(span, name, message, formatted_stacktrace)
end
@spec add_error(t() | nil, Exception.t(), Exception.stacktrace()) :: t() | nil
@doc """
Add an error to an `Appsignal.Span` by passing an exception from a `rescue`
block, and a stack trace.
## Example
span = Appsignal.Tracer.root_span()
try
raise "Exception!"
rescue
exception ->
Appsignal.Span.add_error(span, exception, __STACKTRACE__)
end
"""
def add_error(span, %_{__exception__: true} = exception, stacktrace) do
{name, message, formatted_stacktrace} = Appsignal.Error.metadata(exception, stacktrace)
do_add_error(span, name, message, formatted_stacktrace)
end
@doc false
def do_add_error(%Span{reference: reference} = span, name, message, stacktrace) do
if Config.active?() do
:ok =
@nif.add_span_error(
reference,
name,
message,
Appsignal.Utils.DataEncoder.encode(stacktrace)
)
span
end
end
def do_add_error(nil, _name, _message, _stacktrace), do: nil
@spec close(t() | nil) :: t() | nil
@doc """
Close an `Appsignal.Span`.
## Example
Appsignal.Tracer.root_span()
|> Span.close()
"""
def close(%Span{reference: reference} = span) do
:ok = @nif.close_span(reference)
span
end
def close(nil), do: nil
@spec close(t() | nil, integer()) :: t() | nil
@doc """
Close an `Appsignal.Span` with an explicit end time.
## Example
Appsignal.Tracer.root_span()
|> Span.close(span, :os.system_time())
"""
def close(%Span{reference: reference} = span, end_time) do
sec = :erlang.convert_time_unit(end_time, :native, :second)
nsec = :erlang.convert_time_unit(end_time, :native, :nanosecond) - sec * 1_000_000_000
:ok = @nif.close_span_with_timestamp(reference, sec, nsec)
span
end
def close(nil, _end_time), do: nil
@doc false
def to_map(%Span{reference: reference}) do
{:ok, json} = Nif.span_to_json(reference)
Appsignal.Json.decode!(json)
end
end
|
lib/appsignal/span.ex
| 0.873754
| 0.510313
|
span.ex
|
starcoder
|
defmodule Recurly.APIError do
@moduledoc """
Module represents a generic APIError with a `symbol` and `description`.
"""
defstruct ~w(status_code symbol description)a
defimpl Recurly.XML.Parser do
import SweetXml
def parse(_error, xml_doc, false) do
parsed =
xpath(
xml_doc,
~x"//error",
symbol: ~x"./symbol/text()"s |> transform_by(&String.to_atom/1),
description: ~x"./description/text()"s
)
struct(Recurly.APIError, parsed)
end
end
end
defmodule Recurly.NotFoundError do
@moduledoc """
Module represents a not found error (404)
## Examples
```
case Recurly.Account.find("accountdoesntexist") do
{:ok, account} -> account
{:error, error} -> error
end
# %Recurly.NotFoundError{description: "Couldn't find Account with account_code = accountdoesntexist",
# path: nil, status_code: 404, symbol: :not_found}
```
"""
defstruct ~w(status_code path symbol description)a
defimpl Recurly.XML.Parser do
import SweetXml
def parse(_error, xml_doc, false) do
parsed =
xpath(
xml_doc,
~x"//error",
symbol: ~x"./symbol/text()"s |> transform_by(&String.to_atom/1),
description: ~x"./description/text()"s
)
struct(Recurly.NotFoundError, parsed)
end
end
end
defmodule Recurly.ValidationError do
@moduledoc """
Module represents a validation error that you may encounter on a create or update.
See the [documentation on validation errors](https://dev.recurly.com/docs/api-validation-errors)
for more details.
The ValidationError is a struct that constains the `status_code` and an array of field errors called
`errors` that map 1x1 with the xml tags.
## Examples
```
case Recurly.Account.create(account_code: "myaccountcode") do
{:ok, account} -> account
{:error, error} -> error
end
# %Recurly.ValidationError{errors: [%{field: "account.account_code", lang: "",
# symbol: :taken, text: "has already been taken"}], status_code: 422}
```
"""
defstruct ~w(status_code errors)a
defimpl Recurly.XML.Parser do
import SweetXml
def parse(_error, xml_doc, false) do
errors =
xml_doc
|> xpath(~x"//errors")
|> xpath(~x"./error"l)
|> Enum.map(fn xml_node ->
%{
symbol: xml_node |> xpath(~x"./@symbol"s) |> String.to_atom,
field: xml_node |> xpath(~x"./@field"s),
lang: xml_node |> xpath(~x"./@lang"s),
text: xml_node |> xpath(~x"./text()"s)
}
end)
struct(Recurly.ValidationError, %{errors: errors})
end
end
end
|
lib/recurly/errors.ex
| 0.900283
| 0.711631
|
errors.ex
|
starcoder
|
defmodule Playground.Scenario.ETS do
use Playground.Scenario
def scenario_type do
{:iterations, Stream.map(10..20, &round(:math.pow(2, &1)))}
end
def scenario_banner do
"""
Scenario: use an ETS table.
Data: {integer}
Tasks:
- Bulk Load of <count> items (in one go)
- Concurrent Load of <count> items
- Sequential Read of <count> items
- Randomised Read of <count> items
- Sequential Update Counter
- Sequential Update Element
- Sequential Lookup Element + Update Element
"""
end
def scenario_arguments do
[StreamData.tuple({StreamData.integer()})]
end
def scenario_iteration(count, stream, options \\ []) do
IO.write("#{String.pad_leading(Integer.to_string(count), 11)}: ")
table_options =
case Keyword.get(options, :concurrent, false) do
true -> [:set, :public, {:read_concurrency, true}, {:write_concurrency, true}]
false -> [:set, :public]
end
run_tasks("Bulk Load", 1, fn _ ->
table_ref = :ets.new(__MODULE__, table_options)
data =
Stream.map(Stream.zip(1..count, stream), fn {key, values} ->
List.to_tuple([key | Tuple.to_list(values)])
end)
|> Enum.take(count)
:ets.insert(table_ref, data)
end)
table_ref = :ets.new(__MODULE__, table_options)
run_tasks("Concurrent Load", count, fn x ->
[value] = Enum.take(stream, 1)
tuple = List.to_tuple([x | Tuple.to_list(value)])
:ets.insert(table_ref, tuple)
end)
run_tasks("Sequential Read", count, fn x ->
:ets.lookup(table_ref, x)
end)
run_tasks("Random Read", random_count(count), fn x ->
:ets.lookup(table_ref, x)
end)
run_tasks("Sequential Update Counter", count, fn x ->
:ets.update_counter(table_ref, x, {2, 1})
end)
run_tasks("Sequential Update Element", count, fn x ->
:ets.update_element(table_ref, x, {2, 0})
end)
run_tasks("Sequential Lookup Element + Update Element", count, fn x ->
element = :ets.lookup_element(table_ref, x, 2)
:ets.update_element(table_ref, x, {2, element})
end)
end
end
|
lib/playground/scenario/ets.ex
| 0.758958
| 0.609669
|
ets.ex
|
starcoder
|
defmodule DeltaCrdt.AWLWWMap do
defstruct keys: MapSet.new(),
dots: MapSet.new(),
value: %{}
require Logger
@doc false
def new(), do: %__MODULE__{}
defmodule Dots do
@moduledoc false
def compress(dots = %MapSet{}) do
Enum.reduce(dots, %{}, fn {c, i}, dots_map ->
Map.update(dots_map, c, i, fn
x when x > i -> x
_x -> i
end)
end)
end
def decompress(dots = %MapSet{}), do: dots
def decompress(dots) do
Enum.flat_map(dots, fn {i, x} ->
Enum.map(1..x, fn y -> {i, y} end)
end)
end
def next_dot(i, c = %MapSet{}) do
Logger.warn("inefficient next_dot computation")
next_dot(i, compress(c))
end
def next_dot(i, c) do
{i, Map.get(c, i, 0) + 1}
end
def union(dots1 = %MapSet{}, dots2 = %MapSet{}) do
MapSet.union(dots1, dots2)
end
def union(dots1 = %MapSet{}, dots2), do: union(dots2, dots1)
def union(dots1, dots2) do
Enum.reduce(dots2, dots1, fn {c, i}, dots_map ->
Map.update(dots_map, c, i, fn
x when x > i -> x
_x -> i
end)
end)
end
def difference(dots1 = %MapSet{}, dots2 = %MapSet{}) do
MapSet.difference(dots1, dots2)
end
def difference(_dots1, _dots2 = %MapSet{}), do: raise("this should not happen")
def difference(dots1, dots2) do
Enum.reject(dots1, fn dot ->
member?(dots2, dot)
end)
|> MapSet.new()
end
def member?(dots = %MapSet{}, dot = {_, _}) do
MapSet.member?(dots, dot)
end
def member?(dots, {i, x}) do
Map.get(dots, i, 0) >= x
end
def strict_expansion?(_dots = %MapSet{}, _delta_dots) do
raise "we should not get here"
end
def strict_expansion?(dots, delta_dots) do
Enum.all?(min_dots(delta_dots), fn {i, x} ->
Map.get(dots, i, 0) + 1 >= x
end)
end
def min_dots(dots = %MapSet{}) do
Enum.reduce(dots, %{}, fn {i, x}, min ->
Map.update(min, i, x, fn
min when min < x -> min
_min -> x
end)
end)
end
def min_dots(_dots) do
%{}
end
end
def add(key, value, i, state) do
rem = remove(key, i, state)
add =
fn aw_set, context ->
aw_set_add(i, {value, System.system_time(:nanosecond)}, {aw_set, context})
end
|> apply_op(key, state)
case MapSet.size(rem.dots) do
0 -> add
_ -> join(rem, add)
end
end
@doc false
def compress_dots(state) do
%{state | dots: Dots.compress(state.dots)}
end
defp aw_set_add(i, el, {aw_set, c}) do
d = Dots.next_dot(i, c)
{%{el => MapSet.new([d])}, MapSet.put(Map.get(aw_set, el, MapSet.new()), d)}
end
defp apply_op(op, key, %{value: m, dots: c}) do
{val, c_p} = op.(Map.get(m, key, %{}), c)
%__MODULE__{
dots: MapSet.new(c_p),
keys: MapSet.new([key]),
value: %{key => val}
}
end
def remove(key, _i, state) do
%{value: val} = state
to_remove_dots =
case Map.fetch(val, key) do
{:ok, value} -> Enum.flat_map(value, fn {_val, to_remove_dots} -> to_remove_dots end)
:error -> []
end
%__MODULE__{
dots: MapSet.new(to_remove_dots),
keys: MapSet.new([key]),
value: %{}
}
end
def clear(_i, state) do
Map.put(state, :value, %{})
end
@doc false
def minimum_deltas(delta, state) do
join_decomposition(delta)
|> Enum.filter(fn delta -> expansion?(delta, state) end)
end
@doc false
def expansion?(%{value: values} = d, state) when map_size(values) == 0 do
# check remove expansion
case Enum.to_list(d.dots) do
[] ->
false
[dot] ->
Dots.member?(state.dots, dot) && !MapSet.disjoint?(d.keys, state.keys) &&
Map.take(state.value, d.keys)
|> Enum.any?(fn {_k, val} ->
Enum.any?(val, fn
{_v, dots} -> MapSet.member?(dots, dot)
end)
end)
end
end
@doc false
def expansion?(d, state) do
# check add expansion
case Enum.to_list(d.dots) do
[] -> false
[dot] -> !Dots.member?(state.dots, dot)
end
end
defp dots_to_deltas(%{value: val}) do
Enum.flat_map(val, fn {key, dot_map} ->
Enum.flat_map(dot_map, fn {_key, dots} ->
Enum.map(dots, fn dot -> {dot, key} end)
end)
end)
|> Map.new()
end
@doc false
def join_decomposition(delta) do
d2d = dots_to_deltas(delta)
Enum.map(Dots.decompress(delta.dots), fn dot ->
case Map.get(d2d, dot) do
nil ->
%__MODULE__{
dots: MapSet.new([dot]),
keys: delta.keys,
value: %{}
}
key ->
dots = Map.get(delta.value, key)
%__MODULE__{
dots: MapSet.new([dot]),
keys: MapSet.new([key]),
value: %{key => dots}
}
end
end)
end
@doc false
def join(delta1, delta2) do
new_dots = Dots.union(delta1.dots, delta2.dots)
new_keys = MapSet.union(delta1.keys, delta2.keys)
join_or_maps(delta1, delta2, [:join_or_maps, :join_dot_sets])
|> Map.put(:dots, new_dots)
|> Map.put(:keys, new_keys)
end
@doc false
def join_or_maps(delta1, delta2, nested_joins) do
val1 = delta1.value
val2 = delta2.value
all_intersecting = Enum.empty?(delta1.keys) || Enum.empty?(delta2.keys)
intersecting_keys =
if all_intersecting do
# "no keys" means that we have to check every key
MapSet.new(Map.keys(val1) ++ Map.keys(val2))
else
MapSet.intersection(delta1.keys, delta2.keys)
end
resolved_conflicts =
Enum.flat_map(intersecting_keys, fn key ->
sub_delta1 =
Map.put(delta1, :value, Map.get(delta1.value, key, %{}))
|> Map.put(:keys, MapSet.new())
sub_delta2 =
Map.put(delta2, :value, Map.get(delta2.value, key, %{}))
|> Map.put(:keys, MapSet.new())
[next_join | other_joins] = nested_joins
%{value: new_sub} = apply(__MODULE__, next_join, [sub_delta1, sub_delta2, other_joins])
if Enum.empty?(new_sub) do
[]
else
[{key, new_sub}]
end
end)
|> Map.new()
new_val =
if all_intersecting do
resolved_conflicts
else
Map.drop(delta1.value, intersecting_keys)
|> Map.merge(Map.drop(delta2.value, intersecting_keys))
|> Map.merge(resolved_conflicts)
end
%__MODULE__{
value: new_val
}
end
@doc false
def join_dot_sets(%{value: s1, dots: c1}, %{value: s2, dots: c2}, []) do
s1 = MapSet.new(s1)
s2 = MapSet.new(s2)
new_s =
[
MapSet.intersection(s1, s2),
Dots.difference(s1, c2),
Dots.difference(s2, c1)
]
|> Enum.reduce(&MapSet.union/2)
%__MODULE__{value: new_s}
end
def read(%{value: values}) do
Map.new(values, fn {key, values} ->
{{val, _ts}, _c} = Enum.max_by(values, fn {{_val, ts}, _c} -> ts end)
{key, val}
end)
end
end
|
lib/delta_crdt/aw_lww_map.ex
| 0.677581
| 0.442998
|
aw_lww_map.ex
|
starcoder
|
defmodule Broadway.Acknowledger do
@moduledoc """
A behaviour used to acknowledge that the received messages
were successfully processed or failed.
When implementing a new connector for Broadway, you should
implement this behaviour and consider how the technology
you're working with handles message acknowledgement.
The `c:ack/3` callback must be implemented in order to notify
the origin of the data that a message can be safely removed
after been successfully processed and published. In case of
failed messages or messages without acknowledgement, depending
on the technology chosen, the messages can be either moved back
in the queue or, alternatively, moved to a *dead-letter queue*.
"""
alias Broadway.Message
require Logger
@doc """
Invoked to acknowledge successful and failed messages.
* `ack_ref` is a term that uniquely identifies how messages
should be grouped and sent for acknowledgement. Imagine
you have a scenario where messages are coming from
different producers. Broadway will use this information
to correctly identify the acknowledger and pass it among
with the messages so you can properly communicate with
the source of the data for acknowledgement.
* `successful` is the list of messages that were
successfully processed and published.
* `failed` is the list of messages that, for some reason,
could not be processed or published.
"""
@callback ack(ack_ref :: term, successful :: [Message.t()], failed :: [Message.t()]) ::
:ok
@doc """
Configures the acknowledger with new `options`.
Every acknowledger can decide how to incorporate the given `options` into its
`ack_data`. The `ack_data` is the current acknowledger's data. The return value
of this function is `{:ok, new_ack_data}` where `new_ack_data` is the updated
data for the acknowledger.
Note that `options` are different for every acknowledger, as the acknowledger
is what specifies what are the supported options. Check the documentation for the
acknowledger you're using to see the supported options.
"""
@callback configure(ack_ref :: term, ack_data :: term, options :: keyword) ::
{:ok, new_ack_data :: term}
@optional_callbacks [configure: 3]
@doc false
@spec ack_messages([Message.t()], [Message.t()]) :: no_return
def ack_messages(successful, failed) do
%{}
|> group_by_acknowledger(successful, :successful)
|> group_by_acknowledger(failed, :failed)
|> Enum.each(&call_ack/1)
end
defp group_by_acknowledger(ackers, messages, key) do
Enum.reduce(messages, ackers, fn %{acknowledger: {acknowledger, ack_ref, _}} = msg, acc ->
ack_info = {acknowledger, ack_ref}
pdict_key = {ack_info, key}
Process.put(pdict_key, [msg | Process.get(pdict_key, [])])
Map.put(acc, ack_info, true)
end)
end
defp call_ack({{acknowledger, ack_ref} = ack_info, true}) do
successful = Process.delete({ack_info, :successful}) || []
failed = Process.delete({ack_info, :failed}) || []
acknowledger.ack(ack_ref, Enum.reverse(successful), Enum.reverse(failed))
end
@doc false
# Builds a crash reason used in Logger reporting.
def crash_reason(:throw, reason, stack), do: {{:nocatch, reason}, stack}
def crash_reason(:error, reason, stack), do: {reason, stack}
def crash_reason(:exit, reason, stack), do: {reason, stack}
# Used by the processor and the batcher to maybe call c:handle_failed/2
# on failed messages.
@doc false
def maybe_handle_failed_messages(messages, module, context, size) do
if function_exported?(module, :handle_failed, 2) and messages != [] do
handle_failed_messages(messages, module, context, size)
else
messages
end
end
defp handle_failed_messages(messages, module, context, size) do
module.handle_failed(messages, context)
catch
kind, reason ->
Logger.error(Exception.format(kind, reason, __STACKTRACE__),
crash_reason: crash_reason(kind, reason, __STACKTRACE__)
)
messages
else
messages when is_list(messages) ->
returned_size = length(messages)
if returned_size != size do
Logger.error(
"#{inspect(module)}.handle_failed/2 received #{size} messages and " <>
"returned only #{returned_size}. All messages given to handle_failed/2 " <>
"must be returned"
)
end
messages
_other ->
Logger.error(
"#{inspect(module)}.handle_failed/2 didn't return a list of messages, " <>
"so ignoring its return value"
)
messages
end
end
|
lib/broadway/acknowledger.ex
| 0.847732
| 0.58673
|
acknowledger.ex
|
starcoder
|
defmodule EnvHelper do
@moduledoc """
Helpers for enviroment and application variables.
"""
@doc """
creates a method `name/0` which returns either `alt` or the environment variable set for the upcase version `name`.
## Paramenters
* `name` :: atom
The name of a system environment variable, downcase, as an atom
* `alt` :: any
The value used if the system variable is not set.
## Example
defmodule EnvSets do
import EnvHelper
system_env(:app_url, "localhost:2000")
end
> EnvSets.app_url
"localhost:2000"
> System.put_env("APP_URL", "app.io")
:ok
> EnvSets.app_url
"app.io"
"""
defmacro system_env(name, alt) do
env_name = Atom.to_string(name) |> String.upcase()
quote do
def unquote(name)() do
System.get_env(unquote(env_name)) || unquote(alt)
end
end
end
@doc """
creates a method `name/0` which returns either `alt` or the environment variable set for the upcase version `name`, cast to an integer.
## Paramenters
* `name` :: atom
The name of a system environment variable, downcase, as an atom
* `alt` :: any
The value used if the system variable is not set.
* :string_to_integer
forces the returned value to be an integer.
## Example
defmodule EnvSets do
import EnvHelper
system_env(:app_port, 2000, :string_to_integer)
end
> EnvSets.app_port
2000
> System.put_env("APP_PORT", "2340")
:ok
> EnvSets.app_port
2340
> System.put_env("APP_PORT", 2360)
:ok
> EnvSets.app_url
2360
"""
@spec system_env(atom, any, :string_to_integer) :: integer
defmacro system_env(name, alt, :string_to_integer) do
env_name = Atom.to_string(name) |> String.upcase()
quote do
def unquote(name)() do
value = System.get_env(unquote(env_name)) || unquote(alt)
if is_binary(value), do: String.to_integer(value), else: value
end
end
end
@doc """
creates a method `name/0` which returns either `alt` or the environment variable set for the upcase version `name`, cast to a boolean.
## Paramenters
* `name` :: atom
The name of a system environment variable, downcase, as an atom
* `alt` :: any
The value used if the system variable is not set.
* :string_to_boolean
forces the returned value to be a boolean.
## Example
defmodule EnvSets do
import EnvHelper
system_env(:auto_connect, false, :string_to_boolean)
end
> EnvSets.auto_connect
false
> System.put_env("AUTO_CONNECT", "true")
:ok
> EnvSets.auto_connect
true
> System.put_env("AUTO_CONNECT", "false")
:ok
> EnvSets.auto_connect
false
"""
@spec system_env(atom, any, :string_to_boolean) :: boolean
defmacro system_env(name, alt, :string_to_boolean) do
env_name = Atom.to_string(name) |> String.upcase()
quote do
def unquote(name)() do
value = System.get_env(unquote(env_name)) || unquote(alt)
if is_binary(value) do
String.downcase(value) not in ["", "false", "nil"]
else
value && true
end
end
end
end
@doc """
defines a method `name/0` which returns either the application variable for `appname[key]` or the provided `alt` value.
Works best in simple cases, such as `config :appname, :key, value`
## Example (simple app variable)
defmodule AppEnvs do
import EnvHelper
app_env(:port, [:appname, :port], 1234)
end
> AppEnvs.port
1234
> Application.put_env(:appname, :port, 5678)
:ok
> AppEnvs.port
5678
## Example (nested app variable)
defmodule AppEnvs do
import EnvHelper
defmodule Section do
app_env(:secret, [:appname, :section, :secret], "default secret")
end
end
> AppEnvs.Section.secret
"default secret"
> Application.puts_env(:appname, :section, [secret: "my super secret"])
:ok
> AppEnvs.Section.secret
"my super secret"
"""
defmacro app_env(name, [appname, key], alt) do
quote do
def unquote(name)() do
Application.get_env(unquote(appname), unquote(key)) || unquote(alt)
end
end
end
defmacro app_env(name, [appname, key, subkey], alt) do
quote do
def unquote(name)() do
opts = Application.get_env(unquote(appname), unquote(key)) || []
case List.keyfind(opts, unquote(subkey), 0) do
{_, value} -> value
nil -> unquote(alt)
end
end
end
end
end
|
lib/env_helper.ex
| 0.869535
| 0.430985
|
env_helper.ex
|
starcoder
|
defmodule Dwolla.WebhookSubscription do
@moduledoc """
Functions for `webhook-subscriptions` endpoint.
"""
alias Dwolla.Utils
defstruct id: nil, created: nil, url: nil, paused: false
@type t :: %__MODULE__{id: String.t(), created: String.t(), url: String.t(), paused: boolean}
@type token :: String.t()
@type id :: String.t()
@type params :: %{required(atom) => any}
@type error :: HTTPoison.Error.t() | Dwolla.Errors.t() | tuple
@type location :: %{id: String.t()}
@endpoint "webhook-subscriptions"
@doc """
Creates a webhook subscription.
Parameters
```
%{
url: "http://myapplication.com/webhooks",
secret: "s3cret"
}
```
"""
@spec create(token, map) :: {:ok, location} | {:error, error}
def create(token, params) do
headers = Utils.idempotency_header(params)
Dwolla.make_request_with_token(:post, @endpoint, token, params, headers)
|> Utils.handle_resp(:webhook_subscription)
end
@doc """
Gets a webhook subscription by id.
"""
@spec get(token, id) ::
{:ok, Dwolla.WebhookSubscription.t()} | {:error, error}
def get(token, id) do
endpoint = @endpoint <> "/#{id}"
Dwolla.make_request_with_token(:get, endpoint, token)
|> Utils.handle_resp(:webhook_subscription)
end
@doc """
Pauses a webhook subscription.
"""
@spec pause(token, id) ::
{:ok, Dwolla.WebhookSubscription.t()} | {:error, error}
def pause(token, id) do
update(token, id, %{paused: true})
end
@doc """
Resume a webhook subscription.
"""
@spec resume(token, id) ::
{:ok, Dwolla.WebhookSubscription.t()} | {:error, error}
def resume(token, id) do
update(token, id, %{paused: false})
end
defp update(token, id, params) do
endpoint = @endpoint <> "/#{id}"
Dwolla.make_request_with_token(:post, endpoint, token, params)
|> Utils.handle_resp(:webhook_subscription)
end
@doc """
Lists webhook subscriptions.
"""
@spec list(token) ::
{:ok, [Dwolla.WebhookSubscription.t()]} | {:error, error}
def list(token) do
Dwolla.make_request_with_token(:get, @endpoint, token)
|> Utils.handle_resp(:webhook_subscription)
end
@doc """
Deletes a webhook subscription.
"""
@spec delete(token, id) ::
{:ok, Dwolla.WebhookSubscription.t()} | {:error, error}
def delete(token, id) do
endpoint = @endpoint <> "/#{id}"
Dwolla.make_request_with_token(:delete, endpoint, token)
|> Utils.handle_resp(:webhook_subscription)
end
@doc """
Lists webhooks for a given webhook subscription.
Parameters (optional)
```
%{
limit: 50,
offset: 0
}
```
"""
@spec webhooks(token, id, params | nil) :: {:ok, [Dwolla.Webhook.t()]} | {:error, error}
def webhooks(token, id, params \\ %{}) do
endpoint =
case Map.keys(params) do
[] -> @endpoint <> "/#{id}/webhooks"
_ -> @endpoint <> "/#{id}/webhooks?" <> Utils.encode_params(params)
end
Dwolla.make_request_with_token(:get, endpoint, token)
|> Utils.handle_resp(:webhook)
end
end
|
lib/dwolla/webhook_subscription.ex
| 0.811974
| 0.447581
|
webhook_subscription.ex
|
starcoder
|
defprotocol Gringotts.Money do
@moduledoc """
Money protocol used by the Gringotts API.
The `amount` argument required for some of Gringotts' API methods must
implement this protocol.
If your application is already using a supported Money library, just pass in
the Money struct and things will work out of the box.
Otherwise, just wrap your `amount` with the `currency` together in a `Map`
like so,
price = %{value: Decimal.new("20.18"), currency: "USD"}
and the API will accept it (as long as the currency is valid [ISO 4217
currency code](https://www.iso.org/iso-4217-currency-codes.html)).
## Note on the `Any` implementation
Both `to_string/1` and `to_integer/1` assume that the precision for the `currency`
is 2 digits after decimal.
"""
@fallback_to_any true
@type t :: Gringotts.Money.t()
@doc """
Returns the ISO 4217 compliant currency code associated with this sum of money.
This must be an UPCASE `string`
"""
@spec currency(t) :: String.t()
def currency(money)
@doc """
Returns a `Decimal.t` representing the "worth" of this sum of money in the
associated `currency`.
"""
@spec value(t) :: Decimal.t()
def value(money)
@doc """
Returns the ISO4217 `currency` code as string and `value` as an integer.
Useful for gateways that require amount as integer (like cents instead of
dollars).
## Note
Conversion from `Decimal.t` to `integer` is potentially lossy and the rounding
(if required) is performed (automatically) by the Money library defining the
type, or in the implementation of this protocol method.
If you want to implement this method for your custom type, please ensure that
the rounding strategy (if any rounding is applied) must be
[`half_even`][wiki-half-even].
**To keep things predictable and transparent, merchants should round the
`amount` themselves**, perhaps by explicitly calling the relevant method of
the Money library in their application _before_ passing it to `Gringotts`'s
public API.
## Examples
# the money lib is aliased as "MoneyLib"
iex> usd_price = MoneyLib.new("4.1234", :USD)
#MoneyLib<4.1234, "USD">
iex> Gringotts.Money.to_integer(usd_price)
{"USD", 412, -2}
iex> bhd_price = MoneyLib.new("4.1234", :BHD)
#MoneyLib<4.1234, "BHD">
iex> Gringotts.Money.to_integer(bhd_price)
{"BHD", 4123, -3}
# the Bahraini dinar is divided into 1000 fils unlike the dollar which is
# divided in 100 cents
[wiki-half-even]: https://en.wikipedia.org/wiki/Rounding#Round_half_to_even
"""
@spec to_integer(Money.t()) ::
{currency :: String.t(), value :: integer, exponent :: neg_integer}
def to_integer(money)
@doc """
Returns a tuple of ISO4217 `currency` code and the `value` as strings.
The stringified `value` must match this regex: `~r/-?\\d+\\.\\d\\d{n}/` where
`n+1` should match the required precision for the `currency`. There should be
no place value separators except the decimal point (like commas).
> Gringotts will not (and cannot) validate this of course.
## Note
Conversion from `Decimal.t` to `string` is potentially lossy and the rounding
(if required) is performed (automatically) by the Money library defining the
type, or in the implementation of this protocol method.
If you want to implement this method for your custom type, please ensure that
the rounding strategy (if any rounding is applied) must be
[`half_even`][wiki-half-even].
**To keep things predictable and transparent, merchants should round the
`amount` themselves**, perhaps by explicitly calling the relevant method of
the Money library in their application _before_ passing it to `Gringotts`'s
public API.
## Examples
# the money lib is aliased as "MoneyLib"
iex> usd_price = MoneyLib.new("4.1234", :USD)
#MoneyLib<4.1234, "USD">
iex> Gringotts.Money.to_string(usd_price)
{"USD", "4.12"}
iex> bhd_price = MoneyLib.new("4.1234", :BHD)
#MoneyLib<4.1234, "BHD">
iex> Gringotts.Money.to_string(bhd_price)
{"BHD", "4.123"}
[wiki-half-even]: https://en.wikipedia.org/wiki/Rounding#Round_half_to_even
"""
@spec to_string(t) :: {currency :: String.t(), value :: String.t()}
def to_string(money)
end
# this implementation is used for dispatch on ex_money (and will also fire for
# money)
if Code.ensure_compiled?(Money) do
defimpl Gringotts.Money, for: Money do
def currency(money), do: money.currency |> Atom.to_string()
def value(money), do: money.amount
def to_integer(money) do
{_, int_value, exponent, _} = Money.to_integer_exp(money)
{currency(money), int_value, exponent}
end
def to_string(money) do
{:ok, currency_data} = Cldr.Currency.currency_for_code(currency(money))
reduced = Money.reduce(money)
{
currency(reduced),
value(reduced)
|> Decimal.round(currency_data.digits)
|> Decimal.to_string()
}
end
end
end
if Code.ensure_compiled?(Monetized.Money) do
defimpl Gringotts.Money, for: Monetized.Money do
def currency(money), do: money.currency
def value(money), do: money.amount
end
end
# Assumes that the currency is subdivided into 100 units
defimpl Gringotts.Money, for: Any do
def currency(%{currency: currency}), do: currency
def value(%{value: %Decimal{} = value}), do: value
def to_integer(%{value: %Decimal{} = value, currency: currency}) do
{
currency,
value
|> Decimal.mult(Decimal.new(100))
|> Decimal.round(0)
|> Decimal.to_integer(),
-2
}
end
def to_string(%{value: %Decimal{} = value, currency: currency}) do
{
currency,
value |> Decimal.round(2) |> Decimal.to_string()
}
end
end
|
lib/gringotts/money.ex
| 0.920361
| 0.640959
|
money.ex
|
starcoder
|
defmodule AOC.IEx do
@moduledoc """
IEx helpers for advent of code.
This module contains various helpers that make it easy to call procedures in your solution
modules. This is particularly useful when you are testing your solutions from within iex.
In order to avoid prefixing all calls with `AOC.IEx`, we recommend adding `import AOC.IEx` to
your [`.iex.exs` file](https://hexdocs.pm/iex/IEx.html#module-the-iex-exs-file).
## Requirements and `AOC.aoc/3`
In order to find a module for a given day and year, this module expects the module to have the
name `Y<year>.D<day>`. This is automatically the case if the `AOC.aoc/3` macro was used to build
the solution module.
Besides this, it is expected that the solutions for part 1 and part 2 are defined in non-private
functions named `p1` and `p2`.
## Using this module
The `p1/0` and `p2/0` functions can be used to call the `p1` and `p2` functions of your solution
module. By default, these functions are called on the module that represents the current day.
The current day (and year) is determined by `NaiveDateTime.local_now/0`.
If it is past midnight, or if you wish to solve an older challenge, there are a few options at
your disposal:
- `p1/2` and `p2/2` accept a day and year argument.
- `p1/1` and `p1/1` accept a day argument and uses the current year by default.
- The current year and day can be configured through the `:advent_of_code_utils` application
environment. For instance, you can set the year to `1991` and the day to `8` by placing the
following in your `config/config.exs`:
```
import Config
config :advent_of_code_utils,
day: 8,
year: 1991
```
To summarise, the day or year is determined according to the following rules:
1. If year or day was passed as an argument (to `p1/2`, `p1/1`, `p2/2` or `p2/1`) it is always
used.
2. If `:year` or `:day` is present in the `:advent_of_code_utils` application environment, it
is used.
3. The `year` or `day` returned by `NaiveDateTime.local_now/0` is used.
## Automatic recompilation
It is often necessary to recompile the current `mix` project before running `p1` or `p2`. To
avoid manually doing this, all the functions in this module will recompile the current mix
project (with `IEx.Helpers.recompile/1`) before calling `p1` or `p2` when `:auto_compile?` is
set to `true` in the `:advent_of_code_utils` application environment.
Auto reload can be enabled by adding the following to your `config/config.exs`:
```
import Config
config :advent_of_code_utils, auto_compile?: true
```
"""
alias AOC.Helpers
defp mix_started? do
Application.started_applications() |> Enum.find(false, fn {name, _, _} -> name == :mix end)
end
defp maybe_compile() do
compile? = Application.get_env(:advent_of_code_utils, :auto_compile?, false)
if(compile? and mix_started?(), do: IEx.Helpers.recompile())
end
@doc """
Get the module name for `year`, `day`.
This function may cause recompilation if `auto_compile?` is enabled.
## Examples
iex> mod(1991, 8)
Y1991.D8
"""
def mod(year, day) do
maybe_compile()
Helpers.module_name(year, day)
end
@doc """
Get the module name for `day`, lookup `year`.
`year` is fetched from the application environment, otherwise the local time is used.
Please refer to the module documentation for additional information.
This function may cause recompilation if `auto_compile?` is enabled.
## Examples
iex> Application.put_env(:advent_of_code_utils, :year, 1991)
iex> mod(8)
Y1991.D8
iex> Application.put_env(:advent_of_code_utils, :year, 2000)
iex> mod(3)
Y2000.D3
"""
def mod(day) do
maybe_compile()
Helpers.module_name(Helpers.year(), day)
end
@doc """
Get the module name for the current or configured `day` and `year`.
`day` and `year` are fetched from the application environment, the local time is used if they
are not available.
Please refer to the module documentation for additional information.
This function may cause recompilation if `auto_compile?` is enabled.
## Examples
iex> Application.put_env(:advent_of_code_utils, :year, 1991)
iex> Application.put_env(:advent_of_code_utils, :day, 8)
iex> mod()
Y1991.D8
iex> Application.put_env(:advent_of_code_utils, :year, 2000)
iex> Application.put_env(:advent_of_code_utils, :day, 3)
iex> mod()
Y2000.D3
"""
def mod do
maybe_compile()
Helpers.module_name(Helpers.year(), Helpers.day())
end
@doc """
Call `Y<year>.D<day>.p1()`
`day` and `year` are fetched from the application environment, the local time is used if they
are not available.
Please refer to the module documentation for additional information.
This function may cause recompilation if `auto_compile?` is enabled.
"""
def p1(), do: p1(Helpers.year(), Helpers.day())
@doc """
Call `Y<year>.D<day>.p2()`
`day` and `year` are fetched from the application environment, the local time is used if they
are not available.
Please refer to the module documentation for additional information.
This function may cause recompilation if `auto_compile?` is enabled.
"""
def p2(), do: p2(Helpers.year(), Helpers.day())
@doc """
Call `Y<year>.D<day>.p1()`
`year` is fetched from the application environment, the local time is used if it is not
available.
Please refer to the module documentation for additional information.
This function may cause recompilation if `auto_compile?` is enabled.
"""
def p1(day), do: p1(Helpers.year(), day)
@doc """
Call `Y<year>.D<day>.p2()`
`year` is fetched from the application environment, the local time is used if it is not
available.
Please refer to the module documentation for additional information.
This function may cause recompilation if `auto_compile?` is enabled.
"""
def p2(day), do: p2(Helpers.year(), day)
@doc """
Call `Y<year>.D<day>.p1()`
This function may cause recompilation if `auto_compile?` is enabled.
"""
def p1(year, day), do: mod(year, day).p1()
@doc """
Call `Y<year>.D<day>.p2()`
This function may cause recompilation if `auto_compile?` is enabled.
"""
def p2(year, day), do: mod(year, day).p2()
end
|
lib/aoc/iex.ex
| 0.864925
| 0.89765
|
iex.ex
|
starcoder
|
defmodule Ancestry do
@moduledoc """
Documentation for Ancestry.
"""
defmacro __using__(opts) do
quote do
import unquote(__MODULE__)
@ancestry_opts unquote(opts)
@before_compile unquote(__MODULE__)
end
end
defmacro __before_compile__(%{module: module}) do
ancestry_opts = Module.get_attribute(module, :ancestry_opts)
default_opts = [
ancestry_column: :ancestry,
orphan_strategy: :destroy
]
opts = Keyword.merge(default_opts, ancestry_opts)
quote do
import Ecto.Query
defdelegate delete(record, opts \\ unquote(opts), module \\ unquote(module)),
to: Ancestry.Repo
defdelegate arrange(record, opts \\ unquote(opts), module \\ unquote(module)),
to: Ancestry.Repo
defdelegate get_ancestry_value(
record,
relation \\ "children",
opts \\ unquote(opts),
module \\ unquote(module)
),
to: Ancestry.Repo
@doc """
Gets Root nodes.
"""
@spec roots() :: Enum.t()
def roots(prefix \\ nil) do
query =
from(
u in unquote(module),
where:
fragment(
unquote("#{opts[:ancestry_column]} IS NULL OR #{opts[:ancestry_column]} = ''")
)
)
unquote(opts[:repo]).all(query, prefix: prefix)
end
@doc """
Gets ancestor ids of the record
"""
@spec ancestor_ids(Ecto.Schema.t()) :: Enum.t()
def ancestor_ids(record) do
record.unquote(opts[:ancestry_column])
|> parse_ancestry_column()
end
@doc """
Retutns ancestors of the record, starting with the root and ending with the parent
"""
@spec ancestors(Ecto.Schema.t()) :: Enum.t()
def ancestors(record, prefix \\ nil) do
case ancestor_ids(record) do
nil ->
nil
ancestors ->
query =
from(
u in unquote(module),
where: u.id in ^ancestors
)
unquote(opts[:repo]).all(query, prefix: prefix)
end
end
@doc """
Returns true if the record is a root node, false otherwise
"""
@spec is_root?(Ecto.Schema.t()) :: true | false
def is_root?(record) do
case record.unquote(opts[:ancestry_column]) do
"" -> true
nil -> true
_ -> false
end
end
@doc """
Gets root of the record's tree, self for a root node
"""
@spec root(Ecto.Schema.t()) :: Ecto.Schema.t()
def root(record, prefix \\ nil) do
unquote(opts[:repo]).get!(unquote(module), root_id(record), prefix: prefix)
end
@doc """
Gets root id of the record's tree, self for a root node
"""
@spec root_id(Ecto.Schema.t()) :: integer
def root_id(record) do
case is_root?(record) do
true ->
record.id
false ->
record.unquote(opts[:ancestry_column])
|> parse_ancestry_column()
|> hd()
end
end
@doc """
Direct children of the record
"""
@spec children(Ecto.Schema.t()) :: Enum.t()
def children(record, prefix \\ nil) do
record
|> do_children_query()
|> unquote(opts[:repo]).all(prefix: prefix)
end
@doc """
Direct children's ids
"""
@spec child_ids(Ecto.Schema.t()) :: Enum.t()
def child_ids(record) do
record
|> children()
|> Enum.map(fn child -> Map.get(child, :id) end)
end
@doc """
Returns true if the record has any children, false otherwise
"""
@spec has_children?(Ecto.Schema.t()) :: true | false
def has_children?(record) do
record
|> children()
|> length
|> Kernel.>(0)
end
@doc """
Returns true is the record has no children, false otherwise
"""
@spec is_childless?(Ecto.Schema.t()) :: true | false
def is_childless?(record) do
not has_children?(record)
end
@doc """
Gets parent of the record, nil for a root node
"""
@spec parent(Ecto.Schema.t()) :: nil | Ecto.Schema.t()
def parent(record, prefix \\ nil) do
case parent_id(record) do
nil -> nil
id -> unquote(opts[:repo]).get!(unquote(module), id, prefix: prefix)
end
end
@doc """
Gets parent id of the record, nil for a root node
"""
@spec parent_id(Ecto.Schema.t()) :: nil | integer
def parent_id(record) do
case ancestor_ids(record) do
nil ->
nil
ancestors ->
ancestors |> List.last()
end
end
@doc """
Returns true if the record has a parent, false otherwise
"""
@spec has_parent?(Ecto.Schema.t()) :: true | false
def has_parent?(record) do
case parent_id(record) do
nil -> false
_ -> true
end
end
@doc """
Gets siblings of the record, the record itself is included
"""
@spec siblings(Ecto.Schema.t()) :: Enum.t()
def siblings(record, prefix \\ nil) do
record
|> do_siblings_query()
|> unquote(opts[:repo]).all(prefix: prefix)
end
@doc """
Gets sibling ids
"""
@spec sibling_ids(Ecto.Schema.t()) :: Enum.t()
def sibling_ids(record) do
record
|> siblings()
|> Enum.map(fn x -> Map.get(x, :id) end)
end
@doc """
Returns true if the record's parent has more than one child
"""
@spec has_siblings?(Ecto.Schema.t()) :: true | false
def has_siblings?(record) do
record
|> siblings()
|> length()
|> Kernel.>(0)
end
@doc """
Returns true if the record is the only child of its parent.
"""
@spec is_only_child?(Ecto.Schema.t()) :: true | false
def is_only_child?(record) do
siblings(record) == [record]
end
@doc """
Gets direct and indirect children of the record
"""
@spec descendants(Ecto.Schema.t()) :: Enum.t()
def descendants(record, prefix \\ nil) do
record
|> descendants_query()
|> unquote(opts[:repo]).all(prefix: prefix)
end
@doc """
Gets direct and indirect children's ids of the record
"""
@spec descendant_ids(Ecto.Schema.t()) :: Enum.t()
def descendant_ids(record, prefix \\ nil) do
record
|> descendants(prefix)
|> Enum.map(fn x -> Map.get(x, :id) end)
end
@doc false
def descendants_query(record, prefix \\ nil) do
query_string =
case is_root?(record) do
true -> "#{record.id}"
false -> "#{record.unquote(opts[:ancestry_column])}/#{record.id}"
end
query =
from(
u in unquote(module),
where: fragment(unquote("#{opts[:ancestry_column]} = ?"), ^"#{query_string}") or
fragment(
unquote("#{opts[:ancestry_column]} LIKE ?"),
^"#{query_string}/%"
)
)
end
@doc """
Gets the model on descendants and itself.
"""
@spec subtree(Ecto.Schema.t()) :: Enum.t()
def subtree(record, prefix \\ nil) do
[record | descendants(record, prefix)]
end
@doc """
Returns path of the record, starting with the root and ending with self
"""
@spec path(Ecto.Schema.t()) :: Enum.t()
def path(record, prefix \\ nil) do
case is_root?(record) do
true -> [record]
false -> ancestors(record, prefix) ++ [record]
end
end
@doc """
a list the path ids, starting with the root id and ending with the node's own id
"""
@spec path_ids(Ecto.Schema.t()) :: Enum.t()
def path_ids(record) do
case is_root?(record) do
true -> [record.id]
false -> ancestor_ids(record) ++ [record.id]
end
end
@doc """
the depth of the node, root nodes are at depth 0
"""
@spec depth(Ecto.Schema.t()) :: integer
def depth(record) do
path_ids(record)
|> length()
|> Kernel.-(1)
end
@doc """
Gets a list of all ids in the record's subtree
"""
@spec subtree_ids(Ecto.Schema.t()) :: Enum.t()
def subtree_ids(record) do
record
|> subtree()
|> Enum.map(fn x -> Map.get(x, :id) end)
end
@doc false
def child_ancestry(record) do
case is_root?(record) do
true -> "#{record.id}"
false -> "#{record.unquote(opts[:ancestry_column])}/#{record.id}"
end
end
defp do_siblings_query(record) do
query =
from(
u in unquote(module),
where:
fragment(
unquote("#{opts[:ancestry_column]} = ?"),
^record.unquote(opts[:ancestry_column])
)
)
end
defp do_children_query(record) do
query =
from(
u in unquote(module),
where:
fragment(
unquote("#{opts[:ancestry_column]} = ?"),
^child_ancestry(record)
)
)
end
defp parse_ancestry_column(""), do: nil
defp parse_ancestry_column(nil), do: nil
defp parse_ancestry_column(field) do
field
|> String.split("/")
|> Enum.map(fn x -> String.to_integer(x) end)
end
end
end
end
|
lib/ancestry.ex
| 0.797911
| 0.414662
|
ancestry.ex
|
starcoder
|
defmodule Cldr.Locale do
@moduledoc """
Functions to parse and normalize locale names into a structure
locale represented by a `Cldr.LanguageTag`.
CLDR represents localisation data organized into locales, with
each locale being identified by a locale name that is formatted
according to [RFC5646](https://tools.ietf.org/html/rfc5646).
In practise, the CLDR data utilizes a simple subset of locale name
formats being:
* a Language code such as `en` or `fr`
* a Language code and Tertitory code such as `en-GB`
* a Language code and Script such as `zh-Hant`
* and in only two cases a Language code, Territory code and Variant
such as `ca-ES-VALENCIA` and `en-US-POSIX`.
The RFC defines a language tag as:
> A language tag is composed from a sequence of one or more "subtags",
each of which refines or narrows the range of language identified by
the overall tag. Subtags, in turn, are a sequence of alphanumeric
characters (letters and digits), distinguished and separated from
other subtags in a tag by a hyphen ("-", [Unicode] U+002D)
Therefore `Cldr` uses the hyphen ("-", [Unicode] U+002D) as the subtag
separator. On certain platforms, including POSIX platforms, the
subtag separator is a "_" (underscore) rather than a "-" (hyphen). Where
appropriate, `Cldr` will transliterate any underscore into a hyphen before
parsing or processing.
### Locale name validity
When validating a locale name, `Cldr` will attempt to match the requested
locale name to a configured locale. Therefore `Cldr.Locale.new/2` may
return an `{:ok, language_tag}` tuple even when the locale returned does
not exactly match the requested locale name. For example, the following
attempts to create a locale matching the non-existent "english as spoken
in Spain" local name. Here `Cldr` will match to the nearest configured
locale, which in this case will be "en".
iex> Cldr.Locale.new("en-ES", TestBackend.Cldr)
{:ok, %Cldr.LanguageTag{
canonical_locale_name: "en-Latn-ES",
cldr_locale_name: "en",
extensions: %{},
gettext_locale_name: "en",
language: "en",
locale: %{},
private_use: [],
rbnf_locale_name: "en",
requested_locale_name: "en-ES",
script: "Latn",
territory: "ES",
transform: %{},
language_variant: nil
}}
### Matching locales to requested locale names
When attempting to match the requested locale name to a configured
locale, `Cldr` attempt to match against a set of reductions in the
following order and will return the first match:
* language, script, territory, variant
* language, territory, variant
* language, script, variant
* language, variant
* language, script, territory
* language, territory
* language, script
* language
* requested locale name
* nil
Therefore matching is tolerant of a request for unknown scripts,
territories and variants. Only the requested language is a
requirement to be matched to a configured locale.
### Substitutions for Obsolete and Deprecated locale names
CLDR provides data to help manage the transition from obsolete
or deprecated locale names to current names. For example, the
following requests the locale name "mo" which is the deprecated
code for "Moldovian". The replacement code is "ro" (Romanian).
iex> Cldr.Locale.new("mo", TestBackend.Cldr)
{:ok, %Cldr.LanguageTag{extensions: %{},
gettext_locale_name: nil,
language: "ro",
language_subtags: [],
language_variant: nil,
locale: %{}, private_use: [],
rbnf_locale_name: "ro",
requested_locale_name: "mo",
script: "Latn",
transform: %{},
canonical_locale_name: "ro-Latn-RO",
cldr_locale_name: "ro",
territory: "RO"
}}
### Likely subtags
CLDR also provides data to indetify the most likely subtags for a
requested locale name. This data is based on the default content data,
the population data, and the the suppress-script data in [BCP47]. It is
heuristically derived, and may change over time. For example, when
requesting the locale "en", the following is returned:
iex> Cldr.Locale.new("en", TestBackend.Cldr)
{:ok, %Cldr.LanguageTag{
canonical_locale_name: "en-Latn-US",
cldr_locale_name: "en",
extensions: %{},
gettext_locale_name: "en",
language: "en",
locale: %{},
private_use: [],
rbnf_locale_name: "en",
requested_locale_name: "en",
script: "Latn",
territory: "US",
transform: %{},
language_variant: nil
}}
Which shows that a the likely subtag for the script is "Latn" and the likely
territory is "US".
Using the example for Substitutions above, we can see the
result of combining substitutions and likely subtags for locale name "mo"
returns the current language code of "ro" as well as the likely
territory code of "MD" (Moldova).
### Unknown territory codes
Whilst `Cldr` is tolerant of invalid territory codes, it is also important
that such invalid codes not shadow the potential replacement of deprecated
codes nor the insertion of likely subtags. Therefore invalid territory
codes are ignored during this process. For example requesting a locale
name "en-XX" which requests the invalid territory "XX", the following
will be returned:
iex> Cldr.Locale.new("en-XX", TestBackend.Cldr)
{:ok, %Cldr.LanguageTag{
canonical_locale_name: "en-Latn-US",
cldr_locale_name: "en",
extensions: %{},
gettext_locale_name: "en",
language: "en",
locale: %{},
private_use: [],
rbnf_locale_name: "en",
requested_locale_name: "en",
script: "Latn",
territory: "US",
transform: %{},
language_variant: nil
}}
### Locale extensions
Unicode defines the [U extension](https://unicode.org/reports/tr35/#Locale_Extension_Key_and_Type_Data)
which support defining the requested treatment of CLDR data formats. For example, a locale name
can configure the requested:
* calendar to be used for dates
* collation
* currency
* currency format
* number system
* first day of the week
* 12-hour or 24-hour time
* time zone
* and many other items
For example, the following locale name will request the use of the timezone `Australia/Sydney`,
and request the use of `accounting` format when formatting currencies:
iex> MyApp.Cldr.validate_locale "en-AU-u-tz-ausyd-cf-account"
{:ok,
%Cldr.LanguageTag{
canonical_locale_name: "en-Latn-AU",
cldr_locale_name: "en-AU",
extensions: %{},
gettext_locale_name: "en",
language: "en",
language_subtags: [],
language_variant: nil,
locale: %{currency_format: :accounting, timezone: "Australia/Sydney"},
private_use: [],
rbnf_locale_name: "en",
requested_locale_name: "en-AU",
script: "Latn",
territory: "AU",
transform: %{}
}}
"""
alias Cldr.LanguageTag
import Cldr.Helpers
@typedoc "The name of a locale in a string format"
@type locale_name() :: String.t()
@type language :: String.t() | nil
@type script :: String.t() | nil
@type territory :: String.t() | nil
@type variant :: String.t() | nil
@type subtags :: [String.t(), ...] | []
@doc false
def define_locale_new(config) do
quote location: :keep do
defmodule Locale do
@moduledoc false
if Cldr.Config.include_module_docs?(unquote(config.generate_docs)) do
@moduledoc """
Backend module that provides functions
to define new locales.
"""
end
def new(locale_name), do: Cldr.Locale.new(locale_name, unquote(config.backend))
def new!(locale_name), do: Cldr.Locale.new!(locale_name, unquote(config.backend))
end
end
end
defdelegate new(locale_name, backend), to: __MODULE__, as: :canonical_language_tag
defdelegate new!(locale_name, backend), to: __MODULE__, as: :canonical_language_tag!
defdelegate locale_name_to_posix(locale_name), to: Cldr.Config
defdelegate locale_name_from_posix(locale_name), to: Cldr.Config
@doc """
Parses a locale name and returns a `Cldr.LanguageTag` struct
that represents a locale.
## Arguments
* `language_tag` is any language tag returned by `Cldr.Locale.new/2`
or any `locale_name` returned by `Cldr.known_locale_names/1`
* `backend` is any module that includes `use Cldr` and therefore
is a `Cldr` backend module
## Returns
* `{:ok, language_tag}` or
* `{:eror, reason}`
## Method
1. The language tag is parsed in accordance with [RFC5646](https://tools.ietf.org/html/rfc5646)
2. Any language, script or region aliases are replaced. This
will replace any obsolete elements with current versions
3. If a territory or script is not specified, a default is provided
using the CLDR information returned by `Cldr.Locale.likely_subtags/1`
4. A `Cldr` locale name is selected that is the nearest fit to the
requested locale.
## Example
iex> Cldr.Locale.canonical_language_tag("en", TestBackend.Cldr)
{
:ok,
%Cldr.LanguageTag{
canonical_locale_name: "en-Latn-US",
cldr_locale_name: "en",
extensions: %{},
gettext_locale_name: "en",
language: "en",
locale: %{},
private_use: [],
rbnf_locale_name: "en",
requested_locale_name: "en",
script: "Latn",
territory: "US",
transform: %{},
language_variant: nil
}
}
"""
def canonical_language_tag(locale_name, backend)
when is_binary(locale_name) do
if locale_name in backend.known_locale_names do
Cldr.validate_locale(locale_name, backend)
else
case LanguageTag.parse(locale_name) do
{:ok, language_tag} ->
canonical_language_tag(language_tag, backend)
{:error, reason} ->
{:error, reason}
end
end
end
def canonical_language_tag(%LanguageTag{} = language_tag, backend) do
supress_requested_locale_substitution? = !language_tag.language
canonical_tag =
language_tag
|> check_valid_territory
|> put_requested_locale_name(supress_requested_locale_substitution?)
|> substitute_aliases
|> add_likely_subtags
canonical_tag =
canonical_tag
|> Map.put(:canonical_locale_name, locale_name_from(canonical_tag))
|> put_cldr_locale_name(backend)
|> put_rbnf_locale_name(backend)
|> put_gettext_locale_name(backend)
{:ok, canonical_tag}
end
@doc """
Parses a locale name and returns a `Cldr.LanguageTag` struct
that represents a locale or raises on error.
## Arguments
* `language_tag` is any language tag returned by `Cldr.Locale.new/2`
or any `locale_name` returned by `Cldr.known_locale_names/1`
* `backend` is any module that includes `use Cldr` and therefore
is a `Cldr` backend module
See `Cldr.Locale.canonical_language_tag/2` for more information.
"""
@spec canonical_language_tag!(locale_name | Cldr.LanguageTag.t(), Cldr.backend()) ::
Cldr.LanguageTag.t() | none()
def canonical_language_tag!(language_tag, backend) do
case canonical_language_tag(language_tag, backend) do
{:ok, canonical_tag} -> canonical_tag
{:error, {exception, reason}} -> raise exception, reason
end
end
@spec put_requested_locale_name(Cldr.LanguageTag.t(), boolean()) :: Cldr.LanguageTag.t()
defp put_requested_locale_name(language_tag, true) do
language_tag
end
defp put_requested_locale_name(language_tag, false) do
Map.put(language_tag, :requested_locale_name, locale_name_from(language_tag))
end
@spec put_cldr_locale_name(Cldr.LanguageTag.t(), Cldr.backend()) :: Cldr.LanguageTag.t()
defp put_cldr_locale_name(%LanguageTag{} = language_tag, backend) do
cldr_locale_name = cldr_locale_name(language_tag, backend)
%{language_tag | cldr_locale_name: cldr_locale_name}
end
@spec put_rbnf_locale_name(Cldr.LanguageTag.t(), Cldr.backend()) :: Cldr.LanguageTag.t()
defp put_rbnf_locale_name(%LanguageTag{} = language_tag, backend) do
rbnf_locale_name = rbnf_locale_name(language_tag, backend)
%{language_tag | rbnf_locale_name: rbnf_locale_name}
end
@spec put_gettext_locale_name(Cldr.LanguageTag.t(), Cldr.backend()) :: Cldr.LanguageTag.t()
def put_gettext_locale_name(nil, _backend) do
nil
end
def put_gettext_locale_name(%LanguageTag{} = language_tag, backend) do
gettext_locale_name = gettext_locale_name(language_tag, backend)
%{language_tag | gettext_locale_name: gettext_locale_name}
end
@spec cldr_locale_name(Cldr.LanguageTag.t(), Cldr.backend()) :: locale_name() | nil
defp cldr_locale_name(%LanguageTag{} = language_tag, backend) do
first_match(language_tag, &Cldr.known_locale_name(&1, backend)) ||
Cldr.known_locale_name(language_tag.requested_locale_name, backend) || nil
end
@spec rbnf_locale_name(Cldr.LanguageTag.t(), Cldr.backend()) :: locale_name | nil
defp rbnf_locale_name(%LanguageTag{} = language_tag, backend) do
first_match(language_tag, &Cldr.known_rbnf_locale_name(&1, backend))
end
@spec gettext_locale_name(Cldr.LanguageTag.t(), Cldr.backend()) :: locale_name | nil
defp gettext_locale_name(%LanguageTag{} = language_tag, backend) do
language_tag
|> first_match(&known_gettext_locale_name(&1, backend))
|> locale_name_to_posix
end
@spec known_gettext_locale_name(locale_name(), Cldr.backend() | Cldr.Config.t()) ::
locale_name() | false
def known_gettext_locale_name(locale_name, backend) when is_atom(backend) do
gettext_locales = backend.known_gettext_locale_names
Enum.find(gettext_locales, &Kernel.==(&1, locale_name)) || false
end
# This clause is only called at compile time when we're
# building a backend. In normal use is should not be used.
@doc false
def known_gettext_locale_name(locale_name, config) when is_map(config) do
gettext_locales = Cldr.Config.known_gettext_locale_names(config)
Enum.find(gettext_locales, &Kernel.==(&1, locale_name)) || false
end
defp first_match(
%LanguageTag{
language: language,
script: script,
territory: territory,
language_variant: variant
},
fun
)
when is_function(fun) do
# Including variant
# Not including variant
fun.(locale_name_from(language, script, territory, variant)) ||
fun.(locale_name_from(language, nil, territory, variant)) ||
fun.(locale_name_from(language, script, nil, variant)) ||
fun.(locale_name_from(language, nil, nil, variant)) ||
fun.(locale_name_from(language, script, territory, nil)) ||
fun.(locale_name_from(language, nil, territory, nil)) ||
fun.(locale_name_from(language, script, nil, nil)) ||
fun.(locale_name_from(language, nil, nil, nil)) || nil
end
@doc """
Normalize the casing of a locale name.
## Options
* `locale_name` is any valid locale name returned by `Cldr.known_locale_names/1`
or a `Cldr.LanguageTag` struct
## Returns
* The normalized locale name as a `String.t`
## Method
Locale names are case insensitive but certain common
casing is followed in practise:
* lower case for a language
* capital case for a script
* upper case for a region/territory
**Note** this function is intended to support only the CLDR
locale names which have a format that is a subset of the full
langauge tag specification.
For proper parsing of local names and language tags, see
`Cldr.Locale.canonical_language_tag/2`
## Examples
iex> Cldr.Locale.normalize_locale_name "zh_hant"
"zh-Hant"
iex> Cldr.Locale.normalize_locale_name "en_us"
"en-US"
iex> Cldr.Locale.normalize_locale_name "EN"
"en"
iex> Cldr.Locale.normalize_locale_name "ca_es_valencia"
"ca-ES-VALENCIA"
"""
@spec normalize_locale_name(locale_name) :: locale_name
def normalize_locale_name(locale_name) when is_binary(locale_name) do
case String.split(locale_name, ~r/[-_]/) do
[lang, other] ->
if String.length(other) == 4 do
String.downcase(lang) <> "-" <> String.capitalize(other)
else
String.downcase(lang) <> "-" <> String.upcase(other)
end
[lang, script, region] ->
# Its a lang-script-region
# Its lang-region-variant
if String.length(script) == 4 do
String.downcase(lang) <>
"-" <> String.capitalize(script) <> "-" <> String.upcase(region)
else
String.downcase(lang) <> "-" <> String.upcase(script) <> "-" <> String.upcase(region)
end
[lang] ->
String.downcase(lang)
_ ->
locale_name_from_posix(locale_name)
end
end
@doc """
Return a locale name from a `Cldr.LanguageTag`
## Options
* `locale_name` is any `Cldr.LanguageTag` struct returned by
`Cldr.Locale.new!/2`
## Example
iex> Cldr.Locale.locale_name_from Cldr.Locale.new!("en", TestBackend.Cldr)
"en-Latn-US"
"""
@spec locale_name_from(Cldr.LanguageTag.t()) :: locale_name()
def locale_name_from(%LanguageTag{
language: language,
script: script,
territory: territory,
language_variant: variant
}) do
locale_name_from(language, script, territory, variant)
end
@doc """
Return a locale name by combining language, script, territory and variant
parameters
## Arguments
* `language`, `script`, `territory` and `variant` are string
representations, or `nil`, of the language subtags
## Returns
* The locale name constructed from the non-nil arguments joined
by a "-"
## Example
iex> Cldr.Locale.locale_name_from("en", "Latn", "001", nil)
"en-Latn-001"
"""
@spec locale_name_from(language(), script(), territory(), variant()) :: locale_name()
def locale_name_from(language, script, territory, variant) do
[language, script, territory, variant]
|> Enum.reject(&is_nil/1)
|> Enum.join("-")
end
@doc """
Substitute deprectated subtags with a `Cldr.LanguageTag` with their
non-deprecated alternatives.
## Arguments
* `language_tag` is any language tag returned by `Cldr.Locale.new/2`
## Method
* Replace any deprecated subtags with their canonical values using the alias
data. Use the first value in the replacement list, if
it exists. Language tag replacements may have multiple parts, such as
`sh` ➞ `sr_Latn` or `mo` ➞ `ro_MD`. In such a case, the original script and/or
region/territory are retained if there is one. Thus `sh_Arab_AQ` ➞ `sr_Arab_AQ`, not
`sr_Latn_AQ`.
* Remove the script code 'Zzzz' and the territory code 'ZZ' if they occur.
* Get the components of the cleaned-up source tag (languages, scripts, and
regions/territories), plus any variants and extensions.
## Example
iex> Cldr.Locale.substitute_aliases Cldr.LanguageTag.Parser.parse!("mo")
%Cldr.LanguageTag{
canonical_locale_name: nil,
cldr_locale_name: nil,
extensions: %{},
gettext_locale_name: nil,
language: "ro",
language_subtags: [],
language_variant: nil,
locale: %{}, private_use: [],
rbnf_locale_name: nil,
requested_locale_name: "mo",
script: nil, transform: %{},
territory: nil
}
"""
def substitute_aliases(%LanguageTag{} = language_tag) do
language_tag
|> substitute(:language)
|> substitute(:script)
|> substitute(:territory)
|> merge_language_tags(language_tag)
|> remove_unknown(:script)
|> remove_unknown(:territory)
end
defp substitute(%LanguageTag{language: language}, :language) do
aliases(language, :language) || %LanguageTag{}
end
defp substitute(%LanguageTag{script: script} = language_tag, :script) do
%{language_tag | script: aliases(script, :script) || script}
end
defp substitute(%LanguageTag{territory: territory} = language_tag, :territory) do
%{language_tag | territory: aliases(territory, :region) || territory}
end
defp merge_language_tags(alias_tag, original_language_tag) do
Map.merge(alias_tag, original_language_tag, fn
:language, v_alias, v_original ->
if empty?(v_alias), do: v_original, else: v_alias
_k, v_alias, v_original ->
if empty?(v_original), do: v_alias, else: v_original
end)
end
defp remove_unknown(%LanguageTag{script: "Zzzz"} = language_tag, :script) do
%{language_tag | script: nil}
end
defp remove_unknown(%LanguageTag{} = language_tag, :script), do: language_tag
defp remove_unknown(%LanguageTag{territory: "ZZ"} = language_tag, :territory) do
%{language_tag | territory: nil}
end
defp remove_unknown(%LanguageTag{} = language_tag, :territory), do: language_tag
@doc """
Replace empty subtags within a `t:Cldr.LanguageTag.t/0` with the most likely
subtag.
## Options
* `language_tag` is any language tag returned by `Cldr.Locale.new/2`
A subtag is called empty if it has a missing script or territory subtag, or it is
a base language subtag with the value `und`. In the description below,
a subscript on a subtag x indicates which tag it is from: x<sub>s</sub> is in the
source, x<sub>m</sub> is in a match, and x<sub>r</sub> is in the final result.
## Lookup
Lookup each of the following in order, and stops on the first match:
* language<sub>s</sub>-script<sub>s</sub>-region<sub>s</sub>
* language<sub>s</sub>-region<sub>s</sub>
* language<sub>s</sub>-script<sub>s</sub>
* language<sub>s</sub>
* und-script<sub>s</sub>
## Returns
* If there is no match,either return
* an error value, or
* the match for `und`
* Otherwise there is a match = language<sub>m</sub>-script<sub>m</sub>-region<sub>m</sub>
* Let x<sub>r</sub> = x<sub>s</sub> if x<sub>s</sub> is not empty, and x<sub>m</sub> otherwise.
* Return the language tag composed of language<sub>r</sub>-script<sub>r</sub>-region<sub>r</sub> + variants + extensions .
## Example
iex> Cldr.Locale.add_likely_subtags Cldr.LanguageTag.parse!("zh-SG")
%Cldr.LanguageTag{
canonical_locale_name: nil,
cldr_locale_name: nil,
language_subtags: [],
extensions: %{},
gettext_locale_name: nil,
language: "zh",
locale: %{},
private_use: [],
rbnf_locale_name: nil,
requested_locale_name: "zh-SG",
script: "Hans",
territory: "SG",
transform: %{},
language_variant: nil
}
"""
def add_likely_subtags(
%LanguageTag{language: language, script: script, territory: territory} = language_tag
) do
subtags =
likely_subtags(locale_name_from(language, script, territory, nil)) ||
likely_subtags(locale_name_from(language, nil, territory, nil)) ||
likely_subtags(locale_name_from(language, script, nil, nil)) ||
likely_subtags(locale_name_from(language, nil, nil, nil)) ||
likely_subtags(locale_name_from("und", script, nil, nil)) ||
likely_subtags(locale_name_from("und", nil, nil, nil))
Map.merge(subtags, language_tag, fn _k, v1, v2 -> if empty?(v2), do: v1, else: v2 end)
end
@doc """
Returns an error tuple for an invalid locale.
## Arguments
* `locale_name` is any locale name returned by `Cldr.known_locale_names/1`
## Returns
* `{:error, {Cldr.UnknownLocaleError, message}}`
## Examples
iex> Cldr.Locale.locale_error :invalid
{Cldr.UnknownLocaleError, "The locale :invalid is not known."}
"""
@spec locale_error(locale_name() | LanguageTag.t()) :: {Cldr.UnknownLocaleError, String.t()}
def locale_error(%LanguageTag{requested_locale_name: requested_locale_name}) do
locale_error(requested_locale_name)
end
def locale_error(locale_name) do
{Cldr.UnknownLocaleError, "The locale #{inspect(locale_name)} is not known."}
end
@doc """
Returns an error tuple for an invalid gettext locale.
## Options
* `locale_name` is any locale name returned by `Cldr.known_gettext_locale_names/1`
## Returns
* `{:error, {Cldr.UnknownLocaleError, message}}`
## Examples
iex> Cldr.Locale.gettext_locale_error :invalid
{Cldr.UnknownLocaleError, "The gettext locale :invalid is not known."}
"""
@spec gettext_locale_error(locale_name() | LanguageTag.t()) ::
{Cldr.UnknownLocaleError, String.t()}
def gettext_locale_error(%LanguageTag{gettext_locale_name: gettext_locale_name}) do
gettext_locale_error(gettext_locale_name)
end
def gettext_locale_error(locale_name) do
{Cldr.UnknownLocaleError, "The gettext locale #{inspect(locale_name)} is not known."}
end
@doc """
Returns the map of likely subtags.
Note that not all locales are guaranteed
to have likely subtags.
## Example
Cldr.Locale.likely_subtags
%{
"bez" => %Cldr.LanguageTag{
canonical_locale_name: nil,
cldr_locale_name: nil,
extensions: %{},
language: "bez",
locale: %{},
private_use: [],
rbnf_locale_name: nil,
requested_locale_name: nil,
script: "Latn",
territory: "TZ",
transform: %{},
language_variant: nil
},
"fuf" => %Cldr.LanguageTag{
canonical_locale_name: nil,
cldr_locale_name: nil,
extensions: %{},
language: "fuf",
locale: %{},
private_use: [],
rbnf_locale_name: nil,
requested_locale_name: nil,
script: "Latn",
territory: "GN",
transform: %{},
language_variant: nil
},
...
"""
@likely_subtags Cldr.Config.likely_subtags()
def likely_subtags do
@likely_subtags
end
@doc """
Returns the likely substags, as a `Cldr.LanguageTag`,
for a given locale name.
## Options
* `locale` is any valid locale name returned by `Cldr.known_locale_names/1`
or a `Cldr.LanguageTag` struct
## Examples
iex> Cldr.Locale.likely_subtags "en"
%Cldr.LanguageTag{
canonical_locale_name: nil,
cldr_locale_name: nil,
extensions: %{},
gettext_locale_name: nil,
language: "en",
locale: %{},
private_use: [],
rbnf_locale_name: nil,
requested_locale_name: "en-Latn-US",
script: "Latn",
territory: "US",
transform: %{},
language_variant: nil
}
"""
@spec likely_subtags(locale_name) :: LanguageTag.t() | nil
def likely_subtags(locale_name) when is_binary(locale_name) do
Map.get(likely_subtags(), locale_name)
end
def likely_subtags(%LanguageTag{requested_locale_name: requested_locale_name}) do
likely_subtags(requested_locale_name)
end
@doc """
Return a map of the known aliases for Language, Script and Territory
"""
@aliases Cldr.Config.aliases()
@spec aliases :: map()
def aliases do
@aliases
end
@doc """
Return a map of the aliases for a given alias key and type
## Options
* `type` is one of `[:language, :region, :script, :variant, :zone]`
* `key` is the substitution key (a language, region, script, variant or zone)
"""
@alias_keys Map.keys(@aliases)
@spec aliases(locale_name(), atom()) :: map() | nil
def aliases(key, type) when type in @alias_keys do
aliases()
|> Map.get(type)
|> Map.get(key)
end
@doc """
Returns an error tuple for an invalid locale alias.
## Options
* `locale_name` is any locale name returned by `Cldr.known_locale_names/1`
"""
@spec alias_error(locale_name() | LanguageTag.t(), String.t()) ::
{Cldr.UnknownLocaleError, String.t()}
def alias_error(locale_name, alias_name) when is_binary(locale_name) do
{
Cldr.UnknownLocaleError,
"The locale #{inspect(locale_name)} and its " <>
"alias #{inspect(alias_name)} are not known."
}
end
def alias_error(%LanguageTag{requested_locale_name: requested_locale_name}, alias_name) do
alias_error(requested_locale_name, alias_name)
end
defp check_valid_territory(%LanguageTag{territory: nil} = language_tag), do: language_tag
defp check_valid_territory(%LanguageTag{territory: territory} = language_tag) do
territory =
try do
code = String.to_existing_atom(territory)
if code in Cldr.known_territories(), do: territory, else: nil
rescue
ArgumentError ->
nil
end
%{language_tag | territory: territory}
end
end
|
lib/cldr/locale.ex
| 0.926416
| 0.739322
|
locale.ex
|
starcoder
|
defmodule Borsh do
@moduledoc """
BORSH, binary serializer for security-critical projects.
Borsh stands for `Binary` `Object` `Representation` `Serializer` for `Hashing`.
It is meant to be used in security-critical projects as it prioritizes consistency, safety, speed;
and comes with a strict specification.
In short, Borsh is a non self-describing binary serialization format.
It is designed to serialize any objects to canonical and deterministic set of bytes.
General principles:
- integers are little endian;
- sizes of dynamic containers are written before values as u32;
- all unordered containers (hashmap/hashset) are ordered in lexicographic order by key (in tie breaker case on value);
- structs are serialized in the order of fields in the struct;
- enums are serialized with using u8 for the enum ordinal and then storing data inside the enum value (if present).
This is Elixir implementation of the serializer.
Official specification: https://github.com/near/borsh#specification
## Usage
```elixir
use Borsh,
schema: [
signer_id: :string,
public_key: :borsh,
nonce: :u64,
receiver_id: :string,
block_hash: [32],
actions: [:borsh]
]
```
### Options
`schema`:
Borsh schema itself, structure of fields for serialisation with serialisation formats.
### Borsh literal formats
`:string` - String representation of a value. Borsh encodes it as is, with a little-endian 32bit (4 bytes) header of a string byte size
`:borsh` - Struct of the borsh-ed module. The serializer will take this struct and executes struct's module `.borsh_encode`
against this struct and assign binary result to the literal.
`[:borsh]` - Enum of borsh-ed structs. Each element of this list of `:borsh` struct must have a Borsh schema
`:u64` - Unsigned integer 64-bit size. There are also `:u8`, `:u16`, `:u32` and `:u128`
`[32]` or `[64]` - A string with 32/64 chars length.
"""
defmacro __using__(opts) do
schema = opts[:schema]
quote do
def is_borsh, do: true
def borsh_schema do
unquote(schema)
end
@doc """
Encodes objects according to the schema into the bytestring
"""
@spec borsh_encode(obj :: keyword) :: bitstring()
def borsh_encode(obj) do
{_, res} =
Enum.map_reduce(borsh_schema(), [], fn schema_item, acc ->
{schema_item, acc ++ [extract_encode_item(obj, schema_item)]}
end)
res |> List.flatten() |> :erlang.list_to_binary()
end
# Encode
defp extract_encode_item(obj, {key, format}) do
value = Map.get(obj, key)
encode_item(value, {key, format})
end
defp encode_item(value, {key, format}) when format === [:borsh] do
[
# 4bytes binary length of the List
value |> length() |> binarify(32),
Enum.map(value, fn i ->
i.__struct__.borsh_encode(i)
end)
]
end
defp encode_item(value, {key, format}) when format === :borsh do
value.__struct__.borsh_encode(value)
end
# TODO: add string length validation
defp encode_item(value, {key, format}) when format in [[32], [64]], do: value
defp encode_item(value, {key, size})
when size in [:u8, :u16, :u32, :u64, :u128] and is_binary(value) do
value
|> String.to_integer()
|> encode_item({key, size})
end
defp encode_item(value, {key, size}) when size in [:u8, :u16, :u32, :u64, :u128] do
size = convert_size(size)
binarify(value, size)
end
defp encode_item(string_value, {key, :string}) do
# 4 bytes of the string length
[string_value |> byte_size() |> binarify(32), string_value]
end
defp binarify(int_value, size \\ 32) do
<<int_value::size(size)-integer-unsigned-little>>
end
defp convert_size(size) do
size |> Atom.to_string() |> String.slice(1..3) |> String.to_integer()
end
end
end
end
|
lib/borsh.ex
| 0.852383
| 0.925903
|
borsh.ex
|
starcoder
|
defmodule Interp.SubprogramInterp do
alias Interp.Stack
alias Interp.Interpreter
alias Interp.Globals
alias Commands.ListCommands
alias Commands.GeneralCommands
import Interp.Functions
use Bitwise
def interp_step(op, subcommands, stack, environment) do
case op do
# For N in range [0, n)
"F" ->
{a, stack, environment} = Stack.pop(stack, environment)
current_n = environment.range_variable
{new_stack, new_env} = GeneralCommands.loop(subcommands, stack, environment, 0, to_integer!(a) - 1)
{new_stack, %{new_env | range_variable: current_n}}
# For N in range [1, n]
"E" ->
{a, stack, environment} = Stack.pop(stack, environment)
current_n = environment.range_variable
{new_stack, new_env} = GeneralCommands.loop(subcommands, stack, environment, 1, to_integer!(a))
{new_stack, %{new_env | range_variable: current_n}}
# For N in range [1, n)
"G" ->
{a, stack, environment} = Stack.pop(stack, environment)
current_n = environment.range_variable
{new_stack, new_env} = GeneralCommands.loop(subcommands, stack, environment, 1, to_integer!(a) - 1)
{new_stack, %{new_env | range_variable: current_n}}
# For N in range [0, n]
"ƒ" ->
{a, stack, environment} = Stack.pop(stack, environment)
current_n = environment.range_variable
{new_stack, new_env} = GeneralCommands.loop(subcommands, stack, environment, 0, to_integer!(a))
{new_stack, %{new_env | range_variable: current_n}}
# Infinite loop
"[" ->
current_n = environment.range_variable
{new_stack, new_env} = GeneralCommands.loop(subcommands, stack, environment, 0, :infinity)
{new_stack, %{new_env | range_variable: current_n}}
# Iterate through string
"v" ->
{a, stack, environment} = Stack.pop(stack, environment)
current_n = environment.range_variable
current_y = environment.range_element
{new_stack, new_env} = GeneralCommands.loop(subcommands, stack, environment, 0, if is_iterable(a) do a else to_non_number(a) end)
{new_stack, %{new_env | range_variable: current_n, range_element: current_y}}
# Filter by
"ʒ" ->
{a, stack, environment} = Stack.pop(stack, environment)
result = to_list(a)
|> Stream.with_index
|> Stream.transform(environment, fn ({x, index}, curr_env) ->
{result_stack, new_env} = Interpreter.interp(subcommands, %Stack{elements: [x]}, %{curr_env | range_variable: index, range_element: x})
{result, _, new_env} = Stack.pop(result_stack, new_env)
case to_number(result) do
1 -> {[x], new_env}
_ -> {[], new_env}
end
end)
|> Stream.map(fn x -> x end)
|> Globals.lazy_safe
{Stack.push(stack, normalize_to(result, a)), environment}
# Filter by command
"w" ->
interp_step("ʒ", subcommands, stack, environment)
# Map for each
"ε" ->
{a, stack, environment} = Stack.pop(stack, environment)
result = to_list(a)
|> Stream.with_index
|> Stream.transform(environment, fn ({x, index}, curr_env) ->
{result_stack, new_env} = Interpreter.interp(subcommands, %Stack{elements: [x]}, %{curr_env | range_variable: index, range_element: x})
{result, _, new_env} = Stack.pop(result_stack, new_env)
{[result], new_env} end)
|> Stream.map(fn x -> x end)
|> Globals.lazy_safe
{Stack.push(stack, result), environment}
# Sort by (finite lists only)
"Σ" ->
{a, stack, environment} = Stack.pop(stack, environment)
result = to_list(a)
|> Stream.with_index
|> Stream.transform(environment, fn ({x, index}, curr_env) ->
{result_stack, new_env} = Interpreter.interp(subcommands, %Stack{elements: [x]}, %{curr_env | range_variable: index, range_element: x})
{result, _, new_env} = Stack.pop(result_stack, new_env)
{[{eval(result), x}], new_env} end)
|> Enum.sort_by(fn {a, _} -> a end)
|> Stream.map(fn {_, x} -> x end)
|> Globals.lazy_safe
{Stack.push(stack, normalize_to(result, a)), environment}
# Run until a doesn't change
"Δ" ->
{a, stack, environment} = Stack.pop(stack, environment)
{result, new_env} = GeneralCommands.run_while(a, subcommands, environment, 0)
{Stack.push(stack, result), new_env}
# Run until a doesn't change and return all intermediate results
".Γ" ->
{a, stack, environment} = Stack.pop(stack, environment)
{result, new_env} = GeneralCommands.run_while(a, subcommands, environment, 0, [])
{Stack.push(stack, result), new_env}
# Find first
".Δ" ->
{a, stack, environment} = Stack.pop(stack, environment)
result = to_list(a)
|> Stream.with_index
|> Enum.find(-1, fn {x, index} ->
result = Interpreter.flat_interp(subcommands, [x], %{environment | range_variable: index, range_element: x})
GeneralCommands.equals(result, 1) end)
case result do
{res, _} -> {Stack.push(stack, res), environment}
_ -> {Stack.push(stack, -1), environment}
end
# Find first index
"ÅΔ" ->
{a, stack, environment} = Stack.pop(stack, environment)
result = to_list(a)
|> Stream.with_index
|> Enum.find_index(fn {x, index} ->
result = Interpreter.flat_interp(subcommands, [x], %{environment | range_variable: index, range_element: x})
GeneralCommands.equals(result, 1) end)
result = case result do
nil -> -1
_ -> result
end
{Stack.push(stack, result), environment}
# Counter variable loop
"µ" ->
{a, stack, environment} = Stack.pop(stack, environment)
GeneralCommands.counter_loop(subcommands, stack, environment, 1, to_integer!(a))
# Map for each
"€" ->
{a, stack, environment} = Stack.pop(stack, environment)
result = to_list(a)
|> Stream.with_index
|> Stream.transform(environment, fn ({x, index}, curr_env) ->
{result_stack, new_env} = Interpreter.interp(subcommands, %Stack{elements: [x]}, %{curr_env | range_variable: index, range_element: x})
{result_stack.elements, new_env} end)
|> Stream.map(fn x -> x end)
|> Globals.lazy_safe
{Stack.push(stack, result), environment}
# 2-arity map for each
"δ" ->
{b, stack, environment} = Stack.pop(stack, environment)
{a, stack, environment} = Stack.pop(stack, environment)
{a, b} = cond do
is_number?(a) and is_number?(b) -> {1..to_integer!(a), 1..to_integer!(b)}
true -> {a, b}
end
result = cond do
is_iterable(a) and is_iterable(b) -> a |> Stream.with_index |> Stream.map(fn {x, x_index} -> Stream.map(b |> Stream.with_index, fn {y, y_index} ->
Interpreter.flat_interp(subcommands, [x, y], %{environment | range_variable: [y_index, x_index], range_element: [x, y]}) end) end)
is_iterable(a) -> a |> Stream.with_index |> Stream.map(fn {x, x_index} -> Interpreter.flat_interp(subcommands, [x, b], %{environment | range_variable: x_index, range_element: x}) end)
is_iterable(b) -> b |> Stream.with_index |> Stream.map(fn {y, y_index} -> Interpreter.flat_interp(subcommands, [a, y], %{environment | range_variable: y_index, range_element: y}) end)
true -> Interpreter.flat_interp(subcommands, [a, b], environment)
end
{Stack.push(stack, Globals.lazy_safe(result)), environment}
# Pairwise command
"ü" ->
{a, stack, environment} = Stack.pop(stack, environment)
result = case subcommands do
[{:number, number}] ->
cond do
is_iterable(a) ->
a |> Stream.chunk_every(to_integer!(number), 1, :discard)
|> Stream.map(fn x -> x end)
true ->
String.graphemes(to_string(a)) |> Stream.chunk_every(to_integer!(number), 1, :discard)
|> Stream.map(fn x -> Enum.join(Enum.to_list(x), "") end)
end
_ -> to_list(a) |> Stream.chunk_every(2, 1, :discard)
|> Stream.map(fn [x, y] -> Interpreter.flat_interp(subcommands, [x, y], environment) end)
end
{Stack.push(stack, Globals.lazy_safe(result)), environment}
# Recursive list generation
"λ" ->
{base_cases, stack, environment} = Stack.pop(stack, environment)
# If there are no base cases specified, assume that a(0) = 1
base_cases = cond do
base_cases == [] or base_cases == "" or base_cases == nil -> [1]
is_iterable(base_cases) -> Enum.to_list to_number(base_cases)
true -> [to_number(base_cases)]
end
{flag, subcommands} = case subcommands do
[{_, "j"} | remaining] -> {:contains, remaining}
[{_, "£"} | remaining] -> {:first_n, remaining}
[{_, "è"} | remaining] -> {:at_n, remaining}
_ -> {:normal, subcommands}
end
result = ListCommands.listify(0, :infinity) |> Stream.map(fn x -> GeneralCommands.recursive_program(subcommands, base_cases, x) end) |> Globals.lazy_safe
case flag do
:normal -> {Stack.push(stack, result), environment}
:contains ->
{b, stack, environment} = Stack.pop(stack, environment)
{Stack.push(stack, to_number(ListCommands.increasing_contains(result, to_number(b)))), environment}
:first_n ->
{b, stack, environment} = Stack.pop(stack, environment)
{Stack.push(stack, ListCommands.take_first(result, to_integer(b))), environment}
:at_n ->
{b, stack, environment} = Stack.pop(stack, environment)
{Stack.push(stack, GeneralCommands.element_at(result, to_integer(b))), environment}
end
# Group by function
".γ" ->
{a, stack, environment} = Stack.pop(stack, environment)
result = to_list(a)
|> Stream.with_index
|> Stream.chunk_by(
fn {x, index} ->
{result_stack, _} = Interpreter.interp(subcommands, %Stack{elements: [x]}, %{environment | range_variable: index, range_element: x})
{result_elem, _, _} = Stack.pop(result_stack, environment)
to_number(result_elem)
end)
|> Stream.map(fn x -> x |> Stream.map(fn {element, _} -> element end) end)
|> Globals.lazy_safe
result = cond do
is_iterable(a) -> result
true -> result |> Stream.map(fn x -> Enum.join(x, "") end)
end
{Stack.push(stack, result), environment}
# Split with
".¡" ->
{a, stack, environment} = Stack.pop(stack, environment)
a = to_list(a)
result = a
|> Stream.with_index
|> Stream.transform([],
fn ({x, index}, acc) ->
result_elem = Interpreter.flat_interp(subcommands, [x], %{environment | range_variable: index, range_element: x})
if Enum.any?(acc, fn n -> GeneralCommands.equals(n, result_elem) end) do {[], acc} else {[result_elem], [result_elem | acc]} end
end)
|> Stream.map(
fn outcome ->
a |> Stream.with_index
|> Stream.filter(fn {element, index} -> GeneralCommands.equals(
Interpreter.flat_interp(subcommands, [element], %{environment | range_variable: index, range_element: element}),
outcome)
end)
|> Stream.map(fn {element, _} -> element end)
end)
|> Globals.lazy_safe
{Stack.push(stack, result), environment}
# Left reduce
".»" ->
{a, stack, environment} = Stack.pop(stack, environment)
result = to_list(a) |> Enum.reduce(fn (x, acc) -> Interpreter.flat_interp(subcommands, [acc, x], environment) end)
{Stack.push(stack, result), environment}
# Right reduce
".«" ->
{a, stack, environment} = Stack.pop(stack, environment)
result = to_list(a) |> Enum.reverse |> Enum.reduce(fn (x, acc) -> Interpreter.flat_interp(subcommands, [x, acc], environment) end)
{Stack.push(stack, result), environment}
# Cumulative left reduce
"Å»" ->
{a, stack, environment} = Stack.pop(stack, environment)
result = to_list(a) |> Enum.scan(fn (x, acc) -> Interpreter.flat_interp(subcommands, [acc, x], environment) end)
{Stack.push(stack, result), environment}
# Cumulative right reduce
"Å«" ->
{a, stack, environment} = Stack.pop(stack, environment)
result = to_list(a) |> Enum.reverse |> Enum.scan(fn (x, acc) -> Interpreter.flat_interp(subcommands, [x, acc], environment) end) |> Enum.reverse
{Stack.push(stack, result), environment}
# Map function on every nth element
"Å€" ->
{b, stack, environment} = Stack.pop(stack, environment)
{a, stack, environment} = Stack.pop(stack, environment)
result = GeneralCommands.map_every(subcommands, environment, to_list(a), to_integer(b))
{Stack.push(stack, result), environment}
# Permute by function
".æ" ->
{a, stack, environment} = Stack.pop(stack, environment)
result = ListCommands.permute_by_function(Enum.to_list(to_list(a)), subcommands, environment)
{Stack.push(stack, result), environment}
# Split on function
".¬" ->
{a, stack, environment} = Stack.pop(stack, environment)
a = to_list(a)
result = a |> Stream.chunk_every(2, 1, :discard)
|> Stream.with_index
|> Stream.map(fn {items, index} -> Interpreter.flat_interp(subcommands, items, %{environment | range_variable: index, range_element: items}) end)
|> Globals.lazy_safe
{Stack.push(stack, ListCommands.split_on_truthy_indices(a, Stream.concat([0], result))), environment}
# Apply function at indices
"ÅÏ" ->
{b, stack, environment} = Stack.pop(stack, environment)
b = Stream.concat(to_list(b), Stream.cycle([0]))
{a, stack, environment} = Stack.pop(stack, environment)
a_list = to_list(a)
result = a_list |> Stream.zip(b)
|> Stream.with_index
|> Stream.map(
fn {{item, val}, index} ->
if GeneralCommands.equals(val, 1) do
Interpreter.flat_interp(subcommands, [item], %{environment | range_variable: index, range_element: [item]})
else
item
end
end)
|> Globals.lazy_safe
{Stack.push(stack, normalize_to(result, a)), environment}
end
end
end
|
lib/interp/commands/subprogram_interp.ex
| 0.601242
| 0.464234
|
subprogram_interp.ex
|
starcoder
|
defmodule Jobbit do
# get the absolute path to via the relative path of this file to the README.
@readme_path Path.join(__DIR__, "../README.md")
# ensure the module recompiles if the README changes
# NOTE: @external_resource requires an absolute path.
@external_resource @readme_path
# read the README into the moduledoc
@moduledoc File.read!(@readme_path)
alias Jobbit.Configuration
alias Jobbit.ExitError
alias Jobbit.TimeoutError
alias Jobbit.TaskError
@type supervisor_t :: Task.Supervisor
@type supervisor :: atom() | pid() | {atom, any} | {:via, atom, any}
@type args :: list(any)
@type closure :: (() -> any())
@type option :: Task.Supervisor.option()
@type on_start :: Task.Supervisor.on_start()
@type func_name :: atom()
@type error :: TimeoutError.t() | TaskError.t() | ExitError.t()
@type result :: :ok | {:ok, any()} | {:error, any()} | tuple() | {:error, error}
@enforce_keys [:task]
@type t :: %Jobbit{
task: Task.t()
}
defstruct [:task]
@doc """
The child spec for a Jobbit.
The `child_spec` for a `Jobbit` task supervisor. This `child_spec`
forwards the provided `jobbit_opts` to `Jobbit.start_link/1` when
the `child_spec` is applied as a child by a supervisor.
"""
@spec child_spec([option]) :: Supervisor.child_spec()
def child_spec(jobbit_opts \\ []) do
%{
id: Jobbit,
start: {Jobbit, :start_link, [jobbit_opts]},
type: :supervisor,
restart: :permanent,
shutdown: 1_000,
}
end
@doc """
Starts a Task.Supervisor passing the provided `option` list.
Example:
iex> {:ok, pid} = Jobbit.start_link()
iex> is_pid(pid)
true
iex> {:ok, pid} = Jobbit.start_link(name: :jobbit_test_task_sup)
iex> is_pid(pid)
true
iex> Process.whereis(:jobbit_test_task_sup) == pid
true
"""
@spec start_link([option]) :: on_start()
def start_link(opts \\ []), do: Task.Supervisor.start_link(opts)
@doc """
Runs the given `closure` as a task on the given `supervisor`.
The task runs as an unlinked, asynchronous, task process supervised by the
`supervisor` (default: `Jobbit.DefaultTaskSupervisor`).
See `Task.Supervisor.async_nolink/3` for `opts` details.
"""
@spec async(supervisor, closure(), Keyword.t()) :: t()
def async(supervisor \\ default_supervisor(), closure, opts \\ []) when is_function(closure, 0) do
supervisor
|> Task.Supervisor.async_nolink(closure, opts)
|> build()
end
@doc """
Runs the given `module`, `func`, and `args` as a task on the given `supervisor`.
The task runs as an unlinked, asynchronous, task process supervised by the
`supervisor` (default: `Jobbit.DefaultTaskSupervisor`).
See `Task.Supervisor.async_nolink/3` for `opts` details.
"""
@spec async_apply(supervisor(), module(), func_name(), args(), opts :: Keyword.t()) :: t()
def async_apply(supervisor \\ default_supervisor(), module, func, args, opts \\ []) do
supervisor
|> Task.Supervisor.async_nolink(module, func, args, opts)
|> build()
end
defguardp is_ok_tuple(t) when is_tuple(t) and elem(t, 0) == :ok
defguardp is_error_tuple(t) when is_tuple(t) and elem(t, 0) == :error
@doc """
Synchronously blocks the caller waiting for the Jobbit task to finish.
## Easier than `Task.yield/2`
When yielding with the Task module it is the caller's responsibility to
ensure a task does not live beyond it's timeout. The Task documentation
recommends the following code:
Task.yield(task, timeout) || Task.shutdown(task)
With the above code, if the caller to `Task.yield/2` forgets to call
`Task.shutdown/1` the running task *might* never stop. Additionally, the
outcome of call above is not very straight forward; there are a multitude
of return values (If you are curious take a look at the source code of
`yield/2`).
In Jobbit, `yield/2` calls both `Task.yield/2` and `Task.shutdown/2` and
wraps/handles the multitude of result types
## Outcomes
Yielding a Jobbit task with `yield/2` will result in 1 of 4 outcomes:
- success: The task finished without crashing the task process.
In the case of success, the return value with be either an ok-tuple
that the closure/mfa returned, an error-tuple that the closure/mfa
returned, or `{:ok, returned_value}` where `returned_value` was the
return value from the closure.
- exception: An exception occured while the task was running and the task
process crashed. When a exception occurs during task execution the return
value is `{:error, TaskError.t()}`. The TaskError itself is an exception
(it can be raised). Also, TaskError wraps the task's exception and
stacktrace which can be used to find the cause of the exception or
reraised if necessary.
- timeout: The task took too long to complete and was gracefully shut down.
In the case of a timeout, `yield/2` returns `{:error, TimeoutError.t()}`.
TimeoutError is an exception (it can be raised) that wraps the `timeout`
value.
- exit: The task process was terminated with an exit signal e.g.
`Process.exit(pid, :kill)`. In the case of a non-exception exit signal,
`yield/2` returns `{:error, ExitError.t()}`. ExitError
"""
@spec yield(Jobbit.t(), timeout) :: result()
def yield(%Jobbit{task: task}, timeout \\ 5_000) do
result = Task.yield(task, timeout) || Task.shutdown(task)
handle_result(result, task, timeout)
end
@type shutdown :: :brutal_kill | :infinity | non_neg_integer()
@doc """
Shuts down a Jobbit task.
"""
@spec shutdown(t(), shutdown()) :: result()
def shutdown(%Jobbit{task: task}, shutdown \\ 5_000) do
case Task.shutdown(task, shutdown) do
nil -> :ok
result -> handle_result(result, task, nil)
end
end
@spec default_supervisor :: atom()
def default_supervisor, do: Configuration.default_supervisor()
defp build(%Task{} = task), do: %Jobbit{task: task}
defp handle_result(result, task, timeout) do
case result do
{:ok, :ok} ->
:ok
{:ok, okay} when is_ok_tuple(okay) ->
okay
{:ok, err} when is_error_tuple(err) ->
err
okay when is_ok_tuple(okay) ->
okay
nil ->
error = TimeoutError.build(task, timeout)
{:error, error}
{:exit, {exception, stacktrace}} ->
error = TaskError.build(task, exception, stacktrace)
{:error, error}
{:exit, signal} when is_atom(signal) ->
error = ExitError.build(task, signal)
{:error, error}
end
end
end
|
lib/jobbit.ex
| 0.861203
| 0.474631
|
jobbit.ex
|
starcoder
|
defmodule ReWeb.Types.Interest do
@moduledoc """
GraphQL types for interests
"""
use Absinthe.Schema.Notation
import Absinthe.Resolution.Helpers, only: [dataloader: 1]
alias ReWeb.Resolvers.Interests, as: InterestsResolver
object :interest do
field :id, :id
field :uuid, :uuid
field :name, :string
field :email, :string
field :phone, :string
field :message, :string
field :campaign, :string
field :medium, :string
field :source, :string
field :initial_campaign, :string
field :initial_medium, :string
field :initial_source, :string
field :listing, :listing, resolve: dataloader(Re.Listings)
end
input_object :interest_input do
field :name, :string
field :phone, :string
field :email, :string
field :message, :string
field :campaign, :string
field :medium, :string
field :source, :string
field :initial_campaign, :string
field :initial_medium, :string
field :initial_source, :string
field :interest_type_id, :id
field :listing_id, non_null(:id)
end
object :contact do
field :id, :id
field :name, :string
field :email, :string
field :phone, :string
field :message, :string
field :state, :string
field :city, :string
field :neighborhood, :string
end
object :simulation do
field :cem, :string
field :cet, :string
end
input_object :simulation_request do
field :mutuary, non_null(:string)
field :birthday, non_null(:date)
field :include_coparticipant, non_null(:boolean)
field :net_income, non_null(:decimal)
field :net_income_coparticipant, :decimal
field :birthday_coparticipant, :date
field :fundable_value, non_null(:decimal)
field :term, non_null(:integer)
field :amortization, :boolean
field :annual_interest, :float
field :home_equity_annual_interest, :float
field :calculate_tr, :boolean
field :evaluation_rate, :decimal
field :itbi_value, :decimal
field :listing_price, :decimal
field :listing_type, :string
field :product_type, :string
field :sum, :boolean
field :insurer, :string
end
object :interest_queries do
@desc "Request funding simulation"
field :simulate, type: :simulation do
arg :input, non_null(:simulation_request)
resolve &InterestsResolver.simulate/2
end
end
object :interest_mutations do
@desc "Show interest in listing"
field :interest_create, type: :interest do
arg :input, non_null(:interest_input)
resolve &InterestsResolver.create_interest/2
end
@desc "Request contact"
field :request_contact, type: :contact do
arg :name, :string
arg :phone, :string
arg :email, :string
arg :message, :string
resolve &InterestsResolver.request_contact/2
end
end
object :interest_subscriptions do
@desc "Subscribe to email change"
field :interest_created, :interest do
config(fn _args, %{context: %{current_user: current_user}} ->
case current_user do
:system -> {:ok, topic: "interest_created"}
_ -> {:error, :unauthorized}
end
end)
trigger :interest_create,
topic: fn _ ->
"interest_created"
end
end
@desc "Subscribe to email change"
field :contact_requested, :contact do
config(fn _args, %{context: %{current_user: current_user}} ->
case current_user do
:system -> {:ok, topic: "contact_requested"}
_ -> {:error, :unauthorized}
end
end)
trigger :request_contact,
topic: fn _ ->
"contact_requested"
end
end
end
end
|
apps/re_web/lib/graphql/types/interest.ex
| 0.593138
| 0.526647
|
interest.ex
|
starcoder
|
defmodule Murmur do
@moduledoc ~S"""
This module implements the x86_32, x86_128 and x64_128 variants of the
non-cryptographic hash Murmur3.
## Examples
iex> Murmur.hash_x86_32("b2622f5e1310a0aa14b7f957fe4246fa", 2147368987)
3297211900
iex> Murmur.hash_x86_128("some random data")
5586633072055552000169173700229798482
iex> Murmur.hash_x64_128([:yes, :you, :can, :use, :any, :erlang, :term!])
300414073828138369336317731503972665325
"""
use Bitwise
# murmur constants
@c1_32 0xCC9E2D51
@c2_32 0x1B873593
@n_32 0xE6546B64
@c1_32_128 0x239B961B
@c2_32_128 0xAB0E9789
@c3_32_128 0x38B34AE5
@c4_32_128 0xA1E38B93
@n1_32_128 0x561CCD1B
@n2_32_128 0x0BCAA747
@n3_32_128 0x96CD1C35
@n4_32_128 0x32AC3B17
@c1_64_128 0x87C37B91114253D5
@c2_64_128 0x4CF5AD432745937F
@n1_64_128 0x52DCE729
@n2_64_128 0x38495AB5
# since erlang/elixir integers are variable-length we have to guarantee them
# to be 32 or 64 bit long
defmacrop mask_32(x), do: quote(do: unquote(x) &&& 0xFFFFFFFF)
defmacrop mask_64(x), do: quote(do: unquote(x) &&& 0xFFFFFFFFFFFFFFFF)
@doc """
Returns the hashed erlang term `data` using an optional `seed` which defaults to `0`.
This function uses the x64 128bit variant.
"""
@spec hash_x64_128(binary | term, non_neg_integer) :: non_neg_integer
def hash_x64_128(data, seed \\ 0)
def hash_x64_128(data, seed) when is_binary(data) do
hashes =
[seed, seed]
|> hash_64_128_aux(data)
|> Stream.zip([
{31, @c1_64_128, @c2_64_128},
{33, @c2_64_128, @c1_64_128}
])
|> Stream.map(fn {x, {r, a, b}} ->
case x do
{h, []} ->
h ^^^ byte_size(data)
{h, t} ->
h ^^^
(t
|> swap_uint()
|> Kernel.*(a)
|> mask_64
|> rotl64(r)
|> Kernel.*(b)
|> mask_64) ^^^ byte_size(data)
end
end)
|> Enum.to_list()
[h1, h2] =
hashes
|> hash_64_128_intermix
|> Enum.map(&fmix64/1)
|> hash_64_128_intermix
h1 <<< 64 ||| h2
end
def hash_x64_128(data, seed) do
hash_x64_128(:erlang.term_to_binary(data), seed)
end
@doc """
Returns the hashed erlang term `data` using an optional `seed` which defaults to `0`.
This function uses the x86 128bit variant.
"""
@spec hash_x86_128(binary | term, non_neg_integer) :: non_neg_integer
def hash_x86_128(data, seed \\ 0)
def hash_x86_128(data, seed) when is_binary(data) do
hashes =
[seed, seed, seed, seed]
|> hash_32_128_aux(data)
|> Stream.zip([
{15, @c1_32_128, @c2_32_128},
{16, @c2_32_128, @c3_32_128},
{17, @c3_32_128, @c4_32_128},
{18, @c4_32_128, @c1_32_128}
])
|> Stream.map(fn {x, {r, a, b}} ->
case x do
{h, []} ->
h ^^^ byte_size(data)
{h, t} ->
h ^^^
(t
|> swap_uint()
|> Kernel.*(a)
|> mask_32
|> rotl32(r)
|> Kernel.*(b)
|> mask_32) ^^^ byte_size(data)
end
end)
|> Enum.to_list()
[h1, h2, h3, h4] =
hashes
|> hash_32_128_intermix
|> Enum.map(&fmix32/1)
|> hash_32_128_intermix
h1 <<< 96 ||| h2 <<< 64 ||| h3 <<< 32 ||| h4
end
def hash_x86_128(data, seed) do
hash_x86_128(:erlang.term_to_binary(data), seed)
end
@doc """
Returns the hashed erlang term `data` using an optional `seed` which defaults to `0`.
This function uses the x86 32bit variant.
"""
@spec hash_x86_32(binary | term, non_neg_integer) :: non_neg_integer
def hash_x86_32(data, seed \\ 0)
def hash_x86_32(data, seed) when is_binary(data) do
hash =
case hash_32_aux(seed, data) do
{h, []} ->
h
{h, t} ->
h ^^^
(t
|> swap_uint()
|> Kernel.*(@c1_32)
|> mask_32
|> rotl32(15)
|> Kernel.*(@c2_32)
|> mask_32)
end
fmix32(hash ^^^ byte_size(data))
end
def hash_x86_32(data, seed) do
hash_x86_32(:erlang.term_to_binary(data), seed)
end
# x64_128 helper functions
@spec hash_64_128_intermix([non_neg_integer]) :: [non_neg_integer]
defp hash_64_128_intermix([h1, h2]) do
h1 = mask_64(h1 + h2)
h2 = mask_64(h2 + h1)
[h1, h2]
end
@spec k_64_op(
non_neg_integer,
5_545_529_020_109_919_103 | 9_782_798_678_568_883_157,
31 | 33,
5_545_529_020_109_919_103 | 9_782_798_678_568_883_157
) :: non_neg_integer
defp k_64_op(k, c1, rotl, c2) do
k
|> Kernel.*(c1)
|> mask_64
|> rotl64(rotl)
|> mask_64
|> Kernel.*(c2)
|> mask_64
end
@spec h_64_op(
non_neg_integer,
non_neg_integer,
non_neg_integer,
non_neg_integer,
non_neg_integer,
non_neg_integer
) :: non_neg_integer
defp h_64_op(h1, k, rotl, h2, const, n) do
h1
|> Bitwise.^^^(k)
|> rotl64(rotl)
|> Kernel.+(h2)
|> Kernel.*(const)
|> Kernel.+(n)
|> mask_64
end
@spec hash_64_128_aux([non_neg_integer], binary) :: [{non_neg_integer, [binary]}]
defp hash_64_128_aux(
[h1, h2],
<<k1::size(16)-little-unit(4), k2::size(16)-little-unit(4), t::binary>>
) do
k1 = k_64_op(k1, @c1_64_128, 31, @c2_64_128)
h1 = h_64_op(h1, k1, 27, h2, 5, @n1_64_128)
k2 = k_64_op(k2, @c2_64_128, 33, @c1_64_128)
h2 = h_64_op(h2, k2, 31, h1, 5, @n2_64_128)
hash_64_128_aux([h1, h2], t)
end
defp hash_64_128_aux([h1, h2], <<t1::size(8)-binary, t::binary>>) do
[{h1, t1}, {h2, t}]
end
defp hash_64_128_aux([h1, h2], t) when is_binary(t) do
[{h1, t}, {h2, []}]
end
defp hash_64_128_aux([h1, h2], _) do
[{h1, []}, {h2, []}]
end
# x86_128 helper functions
@spec hash_32_128_intermix([non_neg_integer]) :: [non_neg_integer]
defp hash_32_128_intermix([h1, h2, h3, h4]) do
h1 =
h1
|> Kernel.+(h2)
|> mask_32
|> Kernel.+(h3)
|> mask_32
|> Kernel.+(h4)
|> mask_32
h2 = mask_32(h2 + h1)
h3 = mask_32(h3 + h1)
h4 = mask_32(h4 + h1)
[h1, h2, h3, h4]
end
@spec k_32_op(
non_neg_integer(),
597_399_067 | 951_274_213 | 2_716_044_179 | 2_869_860_233 | 3_432_918_353,
15 | 16 | 17 | 18,
461_845_907 | 597_399_067 | 951_274_213 | 2_716_044_179 | 2_869_860_233
) :: non_neg_integer
defp k_32_op(k, c1, rotl, c2) do
k
|> Kernel.*(c1)
|> mask_32
|> rotl32(rotl)
|> mask_32
|> Kernel.*(c2)
|> mask_32
end
@spec h_32_op(
non_neg_integer,
non_neg_integer,
non_neg_integer,
non_neg_integer,
non_neg_integer,
non_neg_integer
) :: non_neg_integer
defp h_32_op(h1, k, rotl, h2, const, n) do
h1
|> Bitwise.^^^(k)
|> rotl32(rotl)
|> Kernel.+(h2)
|> Kernel.*(const)
|> Kernel.+(n)
|> mask_32
end
@spec hash_32_128_aux([non_neg_integer], binary) :: [{non_neg_integer, [binary]}]
defp hash_32_128_aux(
[h1, h2, h3, h4],
<<k1::size(8)-little-unit(4), k2::size(8)-little-unit(4), k3::size(8)-little-unit(4),
k4::size(8)-little-unit(4), t::binary>>
) do
k1 = k_32_op(k1, @c1_32_128, 15, @c2_32_128)
h1 = h_32_op(h1, k1, 19, h2, 5, @n1_32_128)
k2 = k_32_op(k2, @c2_32_128, 16, @c3_32_128)
h2 = h_32_op(h2, k2, 17, h3, 5, @n2_32_128)
k3 = k_32_op(k3, @c3_32_128, 17, @c4_32_128)
h3 = h_32_op(h3, k3, 15, h4, 5, @n3_32_128)
k4 = k_32_op(k4, @c4_32_128, 18, @c1_32_128)
h4 = h_32_op(h4, k4, 13, h1, 5, @n4_32_128)
hash_32_128_aux([h1, h2, h3, h4], t)
end
defp hash_32_128_aux(
[h1, h2, h3, h4],
<<t1::size(4)-binary, t2::size(4)-binary, t3::size(4)-binary, t::binary>>
) do
[{h1, t1}, {h2, t2}, {h3, t3}, {h4, t}]
end
defp hash_32_128_aux([h1, h2, h3, h4], <<t1::size(4)-binary, t2::size(4)-binary, t3::binary>>) do
[{h1, t1}, {h2, t2}, {h3, t3}, {h4, []}]
end
defp hash_32_128_aux([h1, h2, h3, h4], <<t1::size(4)-binary, t2::binary>>) do
[{h1, t1}, {h2, t2}, {h3, []}, {h4, []}]
end
defp hash_32_128_aux([h1, h2, h3, h4], t1) when is_binary(t1) do
[{h1, t1}, {h2, []}, {h3, []}, {h4, []}]
end
defp hash_32_128_aux([h1, h2, h3, h4], _) do
[{h1, []}, {h2, []}, {h3, []}, {h4, []}]
end
# x86_32 helper functions
@spec hash_32_aux(non_neg_integer, binary) :: {non_neg_integer, [binary] | binary}
defp hash_32_aux(h0, <<k::size(8)-little-unit(4), t::binary>>) do
k1 = k_32_op(k, @c1_32, 15, @c2_32)
h0
|> Bitwise.^^^(k1)
|> rotl32(13)
|> Kernel.*(5)
|> Kernel.+(@n_32)
|> mask_32
|> hash_32_aux(t)
end
defp hash_32_aux(h, t) when byte_size(t) > 0, do: {h, t}
defp hash_32_aux(h, _), do: {h, []}
# 32 bit helper functions
@spec fmix32(non_neg_integer) :: non_neg_integer
defp fmix32(h0) do
h0
|> xorbsr(16)
|> Kernel.*(0x85EBCA6B)
|> mask_32
|> xorbsr(13)
|> Kernel.*(0xC2B2AE35)
|> mask_32
|> xorbsr(16)
end
@spec rotl32(non_neg_integer, non_neg_integer) :: non_neg_integer
defp rotl32(x, r), do: mask_32(x <<< r ||| x >>> (32 - r))
# 64bit helper functions
@spec fmix64(non_neg_integer) :: non_neg_integer
defp fmix64(h0) do
h0
|> xorbsr(33)
|> Kernel.*(0xFF51AFD7ED558CCD)
|> mask_64
|> xorbsr(33)
|> Kernel.*(0xC4CEB9FE1A85EC53)
|> mask_64
|> xorbsr(33)
end
@spec rotl64(non_neg_integer, non_neg_integer) :: non_neg_integer
defp rotl64(x, r), do: mask_64(x <<< r ||| x >>> (64 - r))
# generic helper functions
@spec swap_uint(binary) :: non_neg_integer
defp swap_uint(
<<v1::size(8), v2::size(8), v3::size(8), v4::size(8), v5::size(8), v6::size(8),
v7::size(8), v8::size(8)>>
) do
(v8 <<< 56) ^^^ (v7 <<< 48) ^^^ (v6 <<< 40) ^^^ (v5 <<< 32) ^^^ (v4 <<< 24) ^^^ (v3 <<< 16) ^^^
(v2 <<< 8) ^^^ v1
end
defp swap_uint(
<<v1::size(8), v2::size(8), v3::size(8), v4::size(8), v5::size(8), v6::size(8),
v7::size(8)>>
) do
(v7 <<< 48) ^^^ (v6 <<< 40) ^^^ (v5 <<< 32) ^^^ (v4 <<< 24) ^^^ (v3 <<< 16) ^^^ (v2 <<< 8) ^^^
v1
end
defp swap_uint(<<v1::size(8), v2::size(8), v3::size(8), v4::size(8), v5::size(8), v6::size(8)>>) do
(v6 <<< 40) ^^^ (v5 <<< 32) ^^^ (v4 <<< 24) ^^^ (v3 <<< 16) ^^^ (v2 <<< 8) ^^^ v1
end
defp swap_uint(<<v1::size(8), v2::size(8), v3::size(8), v4::size(8), v5::size(8)>>) do
(v5 <<< 32) ^^^ (v4 <<< 24) ^^^ (v3 <<< 16) ^^^ (v2 <<< 8) ^^^ v1
end
defp swap_uint(<<v1::size(8), v2::size(8), v3::size(8), v4::size(8)>>) do
(v4 <<< 24) ^^^ (v3 <<< 16) ^^^ (v2 <<< 8) ^^^ v1
end
defp swap_uint(<<v1::size(8), v2::size(8), v3::size(8)>>) do
(v3 <<< 16) ^^^ (v2 <<< 8) ^^^ v1
end
defp swap_uint(<<v1::size(8), v2::size(8)>>) do
(v2 <<< 8) ^^^ v1
end
defp swap_uint(<<v1::size(8)>>) do
0 ^^^ v1
end
defp swap_uint(""), do: 0
@spec xorbsr(non_neg_integer, non_neg_integer) :: non_neg_integer
defp xorbsr(h, v), do: h ^^^ (h >>> v)
end
|
lib/murmur.ex
| 0.862598
| 0.53279
|
murmur.ex
|
starcoder
|
defmodule Fluex do
@moduledoc """
The `Fluex` module provides a localization system for natural-sounding translations using [fluent-rs](https://github.com/projectfluent/fluent-rs).
Fluex uses [NIFs](https://github.com/rusterlium/rustler) to make calls to fluent-rs.
## Installation
Add `Fluex` to your list of dependencies in mix.exs:
def deps do
[{:fluex, ">= 0.0.0"}]
end
Then run mix deps.get to fetch the new dependency.
## Translations
Translations are stored inside Fluent files, with a `.ftl`
extension. For example, this is a snippet from a .ftl file:
# Simple things are simple.
hello-user = Hello, {$userName}!
# Complex things are possible.
shared-photos =
{$userName} {$photoCount ->
[one] added a new photo
*[other] added {$photoCount} new photos
} to {$userGender ->
[male] his stream
[female] her stream
*[other] their stream
}.
For more information visit [Project Fluent](https://projectfluent.org/).
Fluex loads `.ftl` files (resources) at compile time.
These resource files must be available for every locale. The resource paths must be provided
as compile-time configuration (see "Translator configuration") The directory structure
could look like this:
priv/fluex/
├── en
│ ├── second
│ │ └── resource.ftl
│ ├── fluex.ftl
│ └── other.ftl
└── it
├── second
│ └── resource.ftl
├── fluex.ftl
└── other.ftl
## Configuration
### `:fluex` configuration
Fluex uses a similar configuration to [Gettext](https://hexdocs.pm/gettext/Gettext.html#module-configuration)
It supports the following configuration options:
* `:default_locale` - see [Module Gettext Configuration](https://hexdocs.pm/gettext/Gettext.html#module-gettext-configuration)
### Translator configuration
A Fluex translator (backend) supports some compile-time options. These options
can be configured in two ways: either by passing them to `use Fluex` (hence
at compile time):
defmodule MyApp.Fluex do
use Fluex, options
end
or by using Mix configuration, configuring the key corresponding to the
backend in the configuration for your application:
# For example, in config/config.exs
config :my_app, MyApp.Fluex, options
Note that the `:otp_app` option (an atom representing an OTP application) has
to always be present and has to be passed to `use Fluex` because it's used
to determine the application to read the configuration of (`:my_app` in the
example above); for this reason, `:otp_app` can't be configured via the Mix
configuration. This option is also used to determine the Fluex resources.
The following is a comprehensive list of supported options:
* `:dir` - a string representing the directory where translations will be
searched. The directory is relative to the directory of the application
specified by the `:otp_app` option. By default it's
`"priv/fluex"`.
* `:resources` - a list of resources which should be used for translation.
Pathnames are relative to the locale directory, e.g. `["fluex.ftl", "other.ftl", "second/resource.ftl"]`.
By default, it uses the opt app name with a `.ftl` extension, e.g. `["my_app.ftl"]`.
* `:locales` - a list of requested locales to be considered for the application. During
compile time the list is compared with available locales. Only locales available in
both lists are considered. By default, all available locales are considered.
### Fluex API
Fluex provides `translate/3` and `ltranslate/3` macros to your own Fluex module, like `MyApp.Fluex`.
These macros call the `translate/3` and `ltranslate/3` functions from the `Fluex` module
A simple example is:
defmodule MyApp.Fluex do
use Fluex, otp_app: :my_app
end
Fluex.put_locale(MyApp.Fluex, "pt_BR")
msgid = "Hello"
MyApp.Fluex.translate!(msgid, %{user: "mundo"})
#=> "Olá \u{2068}mundo\u{2069}"
MyApp.Fluex.ltranslate!("en", msgid, %{user: "world"})
#=> "Hello \u{2068}world\u{2069}"
The result string contains FSI/PDI isolation marks to ensure that
the direction of the text from the variable is not affected by the
translation.
"""
alias Fluex.FluentNIF
@doc false
defmacro __using__(opts) do
quote do
@fluex_opts unquote(opts)
@before_compile Fluex.Compiler
end
end
@doc false
def child_spec(translator, opts) do
%{
id: translator,
start: {translator, :start_link, [opts]},
type: :supervisor
}
end
@doc false
def start_link(translator, opts \\ []) do
Fluex.Supervisor.start_link(
translator,
opts
)
end
def translate!(translator, id, bindings \\ %{}) do
ltranslate!(translator, get_locale(translator), id, bindings)
end
def translate(translator, id, bindings \\ %{}) do
ltranslate(translator, get_locale(translator), id, bindings)
end
def ltranslate!(translator, locale, id, bindings \\ %{}) do
case ltranslate(translator, locale, id, bindings) do
{:ok, msg} ->
msg
{:error, _} ->
raise(
RuntimeError,
"bundles in translator #{translator} do no contain a message with id: #{id}"
)
end
end
def ltranslate(translator, locale, id, bindings \\ %{}) do
bundles = Fluex.Registry.lookup(translator)
locale = Map.get(bundles, locale)
fallback = Map.get(bundles, translator.__fluex__(:default_locale))
cond do
locale && FluentNIF.has_message?(locale, id) ->
{:ok, FluentNIF.format_pattern(locale, id, stringify(bindings))}
fallback && FluentNIF.has_message?(fallback, id) ->
{:ok, FluentNIF.format_pattern(fallback, id, stringify(bindings))}
true ->
{:error, :not_found}
end
end
defp stringify(bindings) when is_map(bindings) do
Map.new(bindings, fn
{key, val} -> {to_string(key), to_string(val)}
end)
end
@doc false
def __fluex__(:default_locale) do
# If this is not set by the user, it's still set in mix.exs (to "en").
Application.fetch_env!(:fluex, :default_locale)
end
def get_locale(translator \\ Fluex) do
with nil <- Process.get(translator),
nil <- Process.get(Fluex) do
translator.__fluex__(:default_locale)
end
end
def put_locale(translator \\ Fluex, locale)
def put_locale(translator, locale) when is_binary(locale),
do: Process.put(translator, locale)
def put_locale(_translator, locale),
do: raise(ArgumentError, "put_locale/1 only accepts binary locales, got: #{inspect(locale)}")
end
|
lib/fluex.ex
| 0.843154
| 0.534916
|
fluex.ex
|
starcoder
|
defmodule Exdn do
@moduledoc """
Exdn is a two-way translator between Elixir data structures and data
following the [edn specification](https://github.com/edn-format/edn);
it wraps the [erldn edn parser](https://github.com/marianoguerra/erldn)
for Erlang, with some changes in the data formats.
## Examples
iex> Exdn.to_elixir! "[1 :foo]"
[1, :foo]
iex> Exdn.to_elixir "{1 :foo, 2 :bar}"
{:ok, %{1 => :foo, 2 => :bar}}
iex> Exdn.from_elixir! %{1 => :foo, 2 => :bar}
"{1 :foo 2 :bar}"
iex> Exdn.from_elixir %{:foo => {:char, ?a}, {:char, ?b} => {:tag, :inst, "1985-04-12T23:20:50.52Z"} }
{:ok, "{:foo \\a \\b #inst \"1985-04-12T23:20:50.52Z\"}" }
## Type mappings:
edn | Elixir generated by `to_elixir` functions (when no custom converter is provided)
--------------- | -----------------------------------------
integer | integer
float | float
boolean | boolean
nil | nil (atom)
char | string
string | string
list | tagged list `{:list, [...]}`
vector | list
map | map
set | mapset
symbol | tagged atom `{:symbol, atom}`
tagged elements | call registered handler for that tag, fail if not found
Elixir accepted by `from_elixir` functions | edn
----------------------------------------------------- | ---------------
integer | integer
float | float
boolean | boolean
nil (atom) | nil
tagged integer `{:char, <integer>}` | char
string | string
tagged list `{:list, [...]}` | list
list | vector
map | map
struct | map
mapset | set
tagged atom `{:symbol, atom}` | symbol
tagged tuple with tag and value `{:tag, Symbol, Value}` | tagged elements
"""
alias Calendar.DateTime.Parse
@type exdn ::
atom | boolean | number | String.t() | tuple | [exdn] | %{exdn => exdn} | MapSet.t()
@type converter :: (exdn -> term)
@type handler :: (atom, term, converter, [{atom, handler}] -> term)
@doc """
parses an edn string into an Elixir data structure; this is not a reversible
conversion as chars are converted to strings, and tagged expressions are
interpreted. This function can throw exceptions; for example, if a tagged
expression cannot be interpreted.
The second (optional) argument
The third (optional) argument allows you to supply your own handlers for
the interpretation of tagged expressions. These should be in the form of a
keyword list. The first element of each pair should be a keyword corresponding
to the tag, and the second element a function of three parameters
(tag, value, handlers) that handles the tagged values.
The one-argument version provides default handlers for #inst and #uuid.
## Examples:
iex> Exdn.to_elixir! "41.2"
41.2
iex> Exdn.to_elixir! ":foo"
:foo
iex> Exdn.to_elixir! "true"
true
iex> Exdn.to_elixir! "nil"
nil
iex> Exdn.to_elixir! "\"asd\""
"asd"
# Char
iex> Exdn.to_elixir! "\\a"
"a"
# Symbol
iex> Exdn.to_elixir! "foo"
{:symbol, :foo}
# edn vectors become Elixir lists:
iex> Exdn.to_elixir! "[1 :foo]"
[1, :foo]
# edn lists are always tagged. Since Datomic is a principal use of edn, and since lists are
# used in Datomic primarily for executable expressions rather than as data structures, we
# use Elixir lists to represent vectors and keep edn lists specially tagged:
iex> Exdn.to_elixir! "(1, :foo)"
{:list, [1, :foo]}
# edn sets become Elixir sets:
iex> Exdn.to_elixir! "\#{1 \\a 1}"
#MapSet<[1, "a"]>
# Maps become Elixir maps:
iex> Exdn.to_elixir! "{1 :foo, 2 :bar}"
%{1 => :foo, 2 => :bar}
# You can also transform maps to Elixir structs by providing your own converter in the second argument:
iex> defmodule FooStruct do
...> defstruct foo: "default"
...> end
iex> converter = fn map ->
...> case map do
...> %{:foo => _} -> struct(FooStruct, map)
...> anything_else -> anything_else
...> end
...> end
iex> Exdn.to_elixir! "{:foo 1, :bar 2}", converter
%FooStruct{foo: 1}
# Tagged expressions are converted. Standard converters for #inst and #uuid are included:
iex> Exdn.to_elixir! "#inst \"1985-04-12T23:20:50.52Z\""
%Calendar.DateTime{abbr: "UTC", day: 12, hour: 23, min: 20, month: 4, sec: 50,
std_off: 0, timezone: "Etc/UTC", usec: 520000, utc_off: 0, year: 1985}
iex> Exdn.to_elixir! "#uuid \"f81d4fae-7dec-11d0-a765-00a0c91e6bf6\""
"f81d4fae-7dec-11d0-a765-00a0c91e6bf6"
# You can provide your own handlers for tagged expressions:
iex> handler = fn(_tag, val, _handlers) -> val <> "-converted" end
iex> Exdn.to_elixir! "#foo \"blarg\"", [{:foo, handler}]
"blarg-converted"
"""
@spec to_elixir!(String.t(), converter, [{atom, handler}, ...]) :: term
def to_elixir!(edn_str, converter \\ fn x -> x end, handlers \\ standard_handlers()) do
erlang_str = edn_str |> to_charlist
{:ok, erlang_intermediate} = :erldn.parse_str(erlang_str)
elrldn_to_elixir!(erlang_intermediate, converter, handlers)
end
@doc """
parses an edn string into an Elixir data structure, but does not throw
exceptions. The parse result is returned as the second element of a pair
whose first element is `:ok` -- if there is an error the first element will
be `:error` and the second the error that was raised.
## Examples:
iex> Exdn.to_elixir "{1 :foo, 2 :bar}"
{:ok, %{1 => :foo, 2 => :bar}}
iex> Exdn.to_elixir "{:foo, \\a, \\b #foo \"blarg\" }"
{:error, %RuntimeError{:message => "Handler not found for tag foo with tagged expression blarg"}}
"""
@spec to_elixir(String.t(), (exdn -> exdn), [{atom, handler}, ...]) ::
{:ok, term} | {:error, term}
def to_elixir(edn_str, converter \\ fn x -> x end, handlers \\ standard_handlers()) do
try do
{:ok, to_elixir!(edn_str, converter, handlers)}
rescue
e -> {:error, e}
end
end
defp elrldn_to_elixir!({:char, char}, converter, _handlers) do
case converter.({:char, char}) do
{:char, char} -> to_string([char])
anything_else -> anything_else
end
end
defp elrldn_to_elixir!({:keyword, nil}, converter, _handlers), do: converter.(nil)
defp elrldn_to_elixir!({:tag, tag, val}, converter, handlers) do
case converter.({:tag, tag, val}) do
{:tag, atag, aval} ->
converted_val = elrldn_to_elixir!(aval, converter, handlers)
evaluate_tagged_expr({:tag, atag, converted_val}, converter, handlers)
anything_else ->
anything_else
end
end
defp elrldn_to_elixir!({:vector, items}, converter, handlers) do
Enum.map(items, fn item -> elrldn_to_elixir!(item, converter, handlers) end)
|> (fn x -> converter.(x) end).()
end
defp elrldn_to_elixir!({:set, items}, converter, handlers) do
convert_set(items, fn x -> elrldn_to_elixir!(x, converter, handlers) end)
|> (fn x -> converter.(x) end).()
end
defp elrldn_to_elixir!({:map, pairs}, converter, handlers) do
convert_map(pairs, fn x -> elrldn_to_elixir!(x, converter, handlers) end)
|> (fn x -> converter.(x) end).()
end
defp elrldn_to_elixir!(items, converter, handlers) when is_list(items) do
{:list,
Enum.map(items, fn item -> elrldn_to_elixir!(item, converter, handlers) end)
|> (fn x -> converter.(x) end).()}
end
defp elrldn_to_elixir!(val, converter, _handlers), do: converter.(val)
@doc """
parses an edn string into an Elixir data structure, but in a reversible way --
chars and tagged expressions are represented using tuples whose first element
is `:char` or `:tag`, respectively.
## Examples:
iex> Exdn.to_reversible( "\\a" )
{:char, ?a}
iex> Exdn.to_reversible "#inst \"1985-04-12T23:20:50.52Z\""
{:tag, :inst, "1985-04-12T23:20:50.52Z"}
# An unknown tag raises no error when using the reversible conversion:
iex> Exdn.to_reversible "#foo \"blarg\""
{:tag, :foo, "blarg"}
"""
@spec to_reversible(String.t()) :: exdn
def to_reversible(edn_str) do
erlang_str = edn_str |> to_charlist
{:ok, erlang_intermediate} = :erldn.parse_str(erlang_str)
reversible(erlang_intermediate)
end
defp reversible({:char, char}), do: {:char, char}
defp reversible({:keyword, nil}), do: nil
defp reversible({:tag, tag, val}), do: {:tag, tag, val}
defp reversible({:vector, items}), do: Enum.map(items, fn item -> reversible(item) end)
defp reversible({:set, items}), do: convert_set(items, fn x -> reversible(x) end)
defp reversible({:map, pairs}), do: convert_map(pairs, fn x -> reversible(x) end)
defp reversible(items) when is_list(items),
do: {:list, Enum.map(items, fn item -> reversible(item) end)}
defp reversible(val), do: val
defp convert_map(pairs, converter) do
convert_pair = fn {key, val} -> {converter.(key), converter.(val)} end
pairs |> Enum.map(convert_pair) |> Map.new()
end
defp convert_set(items, converter) do
convert_item = fn item -> converter.(item) end
items |> Enum.map(convert_item) |> MapSet.new()
end
@doc """
converts an Elixir data structure in the "reversible" format (see below) into
an edn string. Will raise exceptions if the data structure cannot be converted.
## Examples:
# The intermediate representation can be converted back to edn:
iex> Exdn.from_elixir! 41.2
"41.2"
iex> Exdn.from_elixir! :foo
":foo"
iex> Exdn.from_elixir! true
"true"
iex> Exdn.from_elixir! nil
"nil"
iex> Exdn.from_elixir! "asd"
"\"asd\""
iex> Exdn.from_elixir! {:char, ?a}
"\\a"
iex> Exdn.from_elixir! {:symbol, :foo}
"foo"
iex> Exdn.from_elixir! [1, :foo]
"[1 :foo]"
iex> Exdn.from_elixir! {:list, [1, :foo]}
"(1 :foo)"
iex> Exdn.from_elixir! MapSet.new([1, :foo])
"\#{1 :foo}"
iex> Exdn.from_elixir! %{1 => :foo, 2 => :bar}
"{1 :foo 2 :bar}"
iex> Exdn.from_elixir! %SomeStruct{foo: 1, bar: 2}
"{:foo 1 :bar 2}"
iex> Exdn.from_elixir! {:tag, :inst, "1985-04-12T23:20:50.52Z"}
"#inst \"1985-04-12T23:20:50.52Z\""
"""
@spec from_elixir!(exdn) :: String.t()
def from_elixir!(elixir_data) do
erldn_intermediate = to_erldn_intermediate(elixir_data)
:erldn.to_string(erldn_intermediate) |> to_string
end
@doc """
safe version of `from_elixir!/1` -- the edn string is returned as the second
element of a pair whose first element is `:ok` -- if there is an error the first
element will be `:error` and the second the error that was raised.
## Example:
iex> Exdn.from_elixir %{:foo => {:char, ?a}, {:char, ?b} => {:tag, :inst, "1985-04-12T23:20:50.52Z"} }
{:ok, "{:foo \\a \\b #inst \"1985-04-12T23:20:50.52Z\"}" }
"""
@spec from_elixir(exdn) :: {:ok, String.t()} | {:error, term}
def from_elixir(elixir_data) do
try do
{:ok, from_elixir!(elixir_data)}
rescue
e -> {:error, e}
end
end
defp to_erldn_intermediate(items) when is_list(items) do
{:vector, Enum.map(items, fn x -> to_erldn_intermediate(x) end)}
end
defp to_erldn_intermediate({:list, items}) do
Enum.map(items, fn x -> to_erldn_intermediate(x) end)
end
defp to_erldn_intermediate(%MapSet{} = set) do
items = Enum.map(set, fn x -> to_erldn_intermediate(x) end)
{:set, items}
end
# Works on structs or maps
defp to_erldn_intermediate(pairs) when is_map(pairs) do
convert_pair = fn {key, val} -> {to_erldn_intermediate(key), to_erldn_intermediate(val)} end
keyword_list = pairs |> to_map |> Enum.map(convert_pair)
{:map, keyword_list}
end
defp to_erldn_intermediate({:tag, tag, val}), do: {:tag, tag, to_erldn_intermediate(val)}
defp to_erldn_intermediate(val), do: val
defp to_map(struct_or_map) do
case struct_or_map do
%{__struct__: _} -> Map.from_struct(struct_or_map)
_ -> struct_or_map
end
end
@doc """
extracts a list from the tagged reversible representation; does not operate at all
on the contents of the extracted list.
## Example:
iex> Exdn.tagged_list_to_list {:list, [:foo]}
[:foo]
"""
@spec tagged_list_to_list({:list, [term]}) :: term
def tagged_list_to_list({:list, list}), do: list
@doc """
extracts a char (as a string) from the tagged reversible representation.
## Example:
iex> Exdn.tagged_char_to_string {:char, ?a}
"a"
"""
@spec tagged_char_to_string({:char, [integer]}) :: String.t()
def tagged_char_to_string({:char, code}), do: to_string([code])
@doc """
interprets a tagged expression using the tagged reversible representation and
handlers passed in as a keyword list. Assumes the expression inside the tag has
already been translated from edn.
## Example:
iex> tagged = {:tag, :foo, "blarg"}
iex> handler = fn(_tag, val, _converter, _handlers) -> val <> "-converted" end
iex> Exdn.evaluate_tagged_expr(tagged, [{:foo, handler}]
"blarg-converted"
"""
@spec evaluate_tagged_expr({:tag, atom, exdn}, converter, [{atom, handler}, ...]) :: term
def evaluate_tagged_expr({:tag, tag, expr}, converter, handlers) do
handler = handlers[tag]
if handler do
handler.(tag, expr, converter, handlers)
else
expr_string = inspect(expr)
raise "Handler not found for tag #{tag} with tagged expression #{expr_string}"
end
end
@doc """
handlers for standard edn tagged expressions #inst and #uuid. If you need to supply
your own custom handlers for other tags, you may wish to append them to this list of
handlers.
"""
@spec standard_handlers() :: [{:inst, handler} | {:uuid, handler}, ...]
def standard_handlers do
timestamp_handler = {:inst, fn _tag, val, _converter, _handlers -> inst_handler(val) end}
uuid_handler = {:uuid, fn _tag, val, _converter, _handlers -> val |> to_string end}
# TODO Discard Handler This shouldn't return nil; it should swallow the val.
# discard_handler = { :_, fn(tag, val, _converter, _handlers) -> ??? end }
[timestamp_handler, uuid_handler]
end
defp inst_handler(char_list) do
{:ok, result} = char_list |> to_string |> Parse.rfc3339_utc()
result
end
end
|
lib/exdn.ex
| 0.913295
| 0.531088
|
exdn.ex
|
starcoder
|
defmodule EV.ChangesetHelper do
@moduledoc """
Helper meant for retrieving normalised changes from Ecto.Changeset.
"""
@doc """
Gets changes from a given changeset, recursively normalised.
## Options
* `:carry_fields` - full_path [`:changeset_helper_opts`, :carry_fields]; optional; atom or list of atoms;
specifies which, if any, fields should be taken from data when not present in changes; defaults to `[:id]`
## Examples
```elixir
iex> changeset = %Ecto.Changeset{valid?: true, data: %{id: 1, foo: "abc"}, changes: %{foo: "cde", bar: "efg"}}
iex> EV.ChangesetHelper.fetch_changes(changeset)
{:ok, %{id: 1, foo: "cde", bar: "efg"}}
iex> changeset = %Ecto.Changeset{valid?: true, data: %{id: 1, foo: "abc", baz: "123"}, changes: %{foo: "cde", bar: "efg"}}
iex> EV.ChangesetHelper.fetch_changes(changeset, carry_fields: [:id, :baz])
{:ok, %{id: 1, foo: "cde", bar: "efg", baz: "123"}}
```
"""
@spec fetch_changes(Ecto.Changeset.t(), opts :: Keyword.t()) ::
{:ok, any()} | {:error, Ecto.Changeset.t()}
def fetch_changes(changeset, opts \\ [])
def fetch_changes(%{valid?: true} = changeset, opts) do
carry_fields =
opts
|> EV.ConfigHelper.get_config(:carry_fields, [:id], :changeset_helper_opts)
|> List.wrap()
{:ok, do_get_changes(changeset, carry_fields)}
end
def fetch_changes(%{valid?: false} = changeset, _opts) do
{:error, changeset}
end
defp do_get_changes(%Ecto.Changeset{data: data, changes: changes}, carry_fields) do
carry_fields
|> Enum.reduce(changes, fn carried_field, acc ->
case Map.fetch(data, carried_field) do
{:ok, carried_field_value} -> Map.put_new(acc, carried_field, carried_field_value)
:error -> acc
end
end)
|> do_get_changes(carry_fields)
end
defp do_get_changes(list, carry_fields) when is_list(list) do
Enum.map(list, &do_get_changes(&1, carry_fields))
end
defp do_get_changes(map, carry_fields) when is_map(map) and not is_struct(map) do
map
|> Enum.map(fn {k, v} -> {k, do_get_changes(v, carry_fields)} end)
|> Enum.into(%{})
end
defp do_get_changes(binary, _carry_fields) when is_binary(binary) do
if String.printable?(binary) do
binary
else
Base.encode64(binary)
end
end
defp do_get_changes(term, _carry_fields), do: term
@spec cast_params!(map(), module()) :: map()
def cast_params!(params, module) do
fields = module.__schema__(:fields)
module
|> struct()
|> Ecto.Changeset.cast(params, fields)
|> Ecto.Changeset.apply_action!(:insert)
|> Map.take(fields)
end
end
|
lib/helpers/changeset_helper.ex
| 0.894091
| 0.586878
|
changeset_helper.ex
|
starcoder
|
defmodule HTMLAssertion.Matcher do
@moduledoc false
alias HTMLAssertion
alias HTMLAssertion.{Selector}
@compile {:inline, raise_match: 3}
@typep assert_or_refute :: :assert | :refute
@spec selector(assert_or_refute, binary, binary()) :: nil | HTMLAssertion.html()
def selector(matcher, html, selector) when is_binary(html) and is_binary(selector) do
sub_html = Selector.find(html, selector)
raise_match(matcher, sub_html == nil, fn
:assert -> "Element `#{selector}` not found.\n\n\t#{html}\n"
:refute -> "Selector `#{selector}` succeeded, but should have failed.\n\n\t#{html}\n"
end)
sub_html
end
@spec attributes(assert_or_refute, HTMLAssertion.html(), HTMLAssertion.attributes()) :: any()
def attributes(matcher, html, attributes) when is_list(attributes) do
attributes
|> Enum.into(%{}, fn {k, v} -> {to_string(k), v} end)
|> Enum.each(fn {attribute, check_value} ->
attr_value = Selector.attribute(html, attribute)
match_attribute(matcher, attribute, check_value, attr_value, html)
end)
end
@spec contain(assert_or_refute, binary(), Regex.t()) :: any()
def contain(matcher, html, %Regex{} = value) when is_binary(html) do
raise_match(matcher, !Regex.match?(value, html), fn
:assert -> [message: "Value not matched.", left: html, right: value]
:refute -> [message: "Value `#{inspect(value)}` matched, but shouldn't.", left: html, right: value]
end)
end
@spec contain(assert_or_refute, HTMLAssertion.html(), HTMLAssertion.html()) :: any()
def contain(matcher, html, value) when is_binary(html) and is_binary(value) do
raise_match(matcher, !String.contains?(html, value), fn
:assert -> [message: "Value not found.", left: html, right: value]
:refute -> [message: "Value `#{inspect(value)}` found, but shouldn't.", left: html, right: value]
end)
end
@spec match_attribute(assert_or_refute, HTMLAssertion.attribute_name, HTMLAssertion.value, binary() | nil, HTMLAssertion.html) :: no_return
defp match_attribute(matcher, attribute, check_value, attr_value, html)
# attribute should exists
defp match_attribute(matcher, attribute, check_value, attr_value, html) when check_value in [nil, true, false] do
raise_match(matcher, (if check_value, do: attr_value == nil, else: attr_value != nil), fn
:assert ->
if check_value,
do: "Attribute `#{attribute}` should exists.\n\n\t#{html}\n",
else: "Attribute `#{attribute}` shouldn't exists.\n\n\t#{html}\n"
:refute ->
if check_value,
do: "Attribute `#{attribute}` shouldn't exists.\n\n\t#{html}\n",
else: "Attribute `#{attribute}` should exists.\n\n\t#{html}\n"
end)
end
# attribute should not exists
defp match_attribute(matcher, attribute, _check_value, nil = _attr_value, html) do
raise_match(matcher, matcher == :assert, fn
_ -> "Attribute `#{attribute}` not found.\n\n\t#{html}\n"
end)
end
defp match_attribute(matcher, attribute, %Regex{} = check_value, attr_value, html) do
raise_match(matcher, !Regex.match?(check_value, attr_value), fn _ ->
[
message: "Matching `#{attribute}` attribute failed.\n\n\t#{html}.\n",
left: check_value,
right: attr_value
]
end)
end
defp match_attribute(matcher, "class", check_value, attr_value, html) do
for check_class <- String.split(to_string(check_value), " ") do
raise_match(matcher, !String.contains?(attr_value, check_class), fn
:assert -> "Class `#{check_class}` not found in `#{attr_value}` class attribute\n\n\t#{html}\n"
:refute -> "Class `#{check_class}` found in `#{attr_value}` class attribute\n\n\t#{html}\n"
end)
end
end
defp match_attribute(matcher, attribute, check_value, attr_value, html) do
str_check_value = to_string(check_value)
raise_match(matcher, str_check_value != attr_value, fn _ ->
[
message: "Comparison `#{attribute}` attribute failed.\n\n\t#{html}.\n",
left: str_check_value,
right: attr_value
]
end)
end
defp raise_match(check, condition, message_fn) when check in [:assert, :refute] do
cond do
check == :assert -> condition
check == :refute -> !condition
true -> false
end
|> if do
message_or_args = message_fn.(check)
args = (is_list(message_or_args) && message_or_args) || [message: message_or_args]
raise ExUnit.AssertionError, args
end
end
end
|
lib/html_assertion/matcher.ex
| 0.801004
| 0.566648
|
matcher.ex
|
starcoder
|
defmodule ScrapyCloudEx.Endpoints do
@moduledoc """
Documents commonalities between all endpoint-related functions.
## Options
The last argument provided to most endpoint functions is a keyword list
of options. These options are made available to the HttpAdapter during the
API request.
- `:decoder` - specifies how the response body should be processed. Can be
a module implementing the `ScrapyCloudEx.Decoder` behaviour, or a function
conforming to the `t:ScrapyCloudEx.Decoder.decoder_function/0` typespec.
Defaults to `ScrapyCloudEx.Decoders.Default`.
- `:headers` - list of headers that are added to the `ScrapyCloudEx.HttpAdapter.RequestConfig`
`headers` attribute provided to the HttpAdapter instance making the API call.
The default HttpAdapter does not make use of this option.
- `:http_adapter` - specifies the module to use to make the HTTP request to
the API. This module is expected to implement the `ScrapyCloudEx.HttpAdapter`
behaviour. Defaults to `ScrapyCloudEx.HttpAdapters.Default`.
"""
require Logger
# warns on unscoped params and puts relevant parameters into scope
@doc false
@spec scope_params(Keyword.t(), atom, [atom, ...]) :: Keyword.t()
def scope_params(params, scope_name, expected_scoped_params) do
unscoped = params |> get_params(expected_scoped_params)
scoped = Keyword.get(params, scope_name, [])
warn_on_unscoped_params(scoped, unscoped, scope_name)
scoped_params = unscoped |> Keyword.merge(scoped)
params
|> Enum.reject(fn {k, _} -> Enum.member?(expected_scoped_params, k) end)
|> Keyword.put(scope_name, scoped_params)
end
@doc false
@spec merge_scope(Keyword.t(), atom()) :: Keyword.t()
def merge_scope(params, scope) do
scoped_params = Keyword.get(params, scope, [])
params
|> Keyword.merge(scoped_params)
|> Keyword.delete(scope)
end
@spec get_params(Keyword.t(), [atom]) :: Keyword.t()
defp get_params(params, keys) do
keys
|> Enum.map(&{&1, Keyword.get(params, &1)})
|> Enum.reject(fn {_, v} -> v == nil end)
end
@spec warn_on_unscoped_params(Keyword.t(), Keyword.t(), atom) :: any
defp warn_on_unscoped_params(scoped, unscoped, scope_name) do
if length(unscoped) > 0 do
Logger.warn(
"values `#{inspect(unscoped)}` should be provided within the `#{scope_name}` parameter"
)
common_params = intersection(Keyword.keys(unscoped), Keyword.keys(scoped))
if length(common_params) > 0 do
Logger.warn(
"top-level #{scope_name} params `#{inspect(common_params)}` will be overridden by values provided in `#{
scope_name
}` parameter"
)
end
end
end
@spec intersection(list, list) :: list
defp intersection(a, b) when is_list(a) and is_list(b) do
items_only_in_a = a -- b
a -- items_only_in_a
end
end
|
lib/endpoints.ex
| 0.870961
| 0.607459
|
endpoints.ex
|
starcoder
|
defmodule Routemaster.Drains.Siphon do
@moduledoc """
Allows to pluck events for one or more topics and remove them
from the current payload. The removed -- siphoned -- events
are sent to a siphon module that must implement the `call/1`
function, that will be invoked with a list of `Routemaster.Drain.Event`
structures as argument.
The siphons' `call/1` function is invoked in a supervised `Task`,
so all siphons are to be considered asynchronous and independent.
This drain plug is very similar to the `Routemaster.Drains.Notify`
module, with the difference that it modifies the event list in the
current payload before passing it downstream to the next drain in
the pipeline.
It's meant to be used when some topic should not be processed
with the rest of the drain pipeline, and you want to extract
it from the stream before it reaches the next drains.
A common use case is when you care about every single event
for a topic (e.g. fast changing resources where each event
carries a data payload), and these need to be processed
separately before a `Routemaster.Drains.Dedup` or
`Routemaster.Drains.IgnoreStale` drain can discard any of them.
### Options
* `:topic` (or `:topics`, plural): either a binary or a list of
binaries. This is the topic or topics that will be removed from
the current payload and sent to the siphon module.
* `:to`: the siphon module that will receive the events.
### Examples
```elixir
alias Routemaster.Drains.Siphon
drain Siphon, topic: "burgers", to: BurgerSiphon
drain Siphon, topics: ~w(coke fanta), to: DrinksSiphon
```
"""
@supervisor DrainEvents.TaskSupervisor
def init(opts) do
topic = fetch_topic!(opts)
siphon = Keyword.fetch!(opts, :to)
[topic: topic, siphon: siphon]
end
def call(conn, [topic: topic, siphon: siphon]) do
{matched, others} = partition(conn.assigns.events, topic)
send_to_siphon(matched, siphon)
Plug.Conn.assign(conn, :events, others)
end
defp send_to_siphon([], _), do: nil
defp send_to_siphon(events, siphon) do
Task.Supervisor.start_child(@supervisor, fn() ->
siphon.call(events)
end)
end
defp fetch_topic!(kw) do
case Keyword.get(kw, :topic, Keyword.get(kw, :topics)) do
nil ->
raise KeyError, key: ":topic or :topics", term: kw
topic ->
topic
end
end
defp partition(events, topics) when is_list(topics) do
Enum.split_with(events, &(&1.topic in topics))
end
defp partition(events, topic) do
Enum.split_with(events, &(&1.topic == topic))
end
end
|
lib/routemaster/drain/drains/siphon.ex
| 0.792665
| 0.779909
|
siphon.ex
|
starcoder
|
defmodule Populate do
@moduledoc """
The `Populate` module provides helpers for creating a builder
interface which can be used with population specs.
## Usage
defmodule Builder do
use Populate
def create(:frog, _opts), do: :frog
def create(:lizard, _opts), do: :lizard
end
schema = [frog: 2, :lizard]
%{frog: [:frog, :frog], lizard: lizard} = Builder.populate(schema)
"""
require Logger
@type type :: Atom.t
@type item :: type
| {type, Integer.t}
| {type, Dict.t}
@type schema :: [item]
@type mod_fun :: Atom.t # module, function assumed to be `create`
| {Atom.t | Atom.t} # module and function
defmodule Interface do
@moduledoc """
The interface for a population builder.
"""
use Behaviour
@doc """
Create the given item.
"""
defcallback create(item :: Atom.t, opts :: Dict.t) ::
{:ok, any} | {:error, any}
end
defmacro __using__(_env) do
quote do
@behaviour Populate.Interface
@spec populate(Populate.schema) :: Dict.t
def populate(populace) do
Populate.populate(populace, __MODULE__)
end
end
end
@doc """
Using the schema and builder create function, build the population.
"""
@spec populate(schema, mod_fun) :: Dict.t
def populate(schema, mod) when is_atom(mod) do
populate(schema, {mod, :create})
end
def populate(schema, mod_fun) when is_tuple(mod_fun) do
populate(schema, mod_fun, [])
end
@doc false
@spec populate(schema, mod_fun, Keyword.t) :: Dict.t
defp populate([], _, acc), do: Enum.into(acc, %{})
defp populate([type | rest], mod_fun, acc) when is_atom(type) do
# Handle the type being specified without options
populate([{type, []} | rest], mod_fun, acc)
end
defp populate([{type, count} | rest], mod_fun, acc) when is_integer(count) do
# Handle the count being passed in as a bare integer
populate([{type, [count: count]} | rest], mod_fun, acc)
end
defp populate([{name, opts} | rest], {mod, fun} = mod_fun, acc) do
{%{type: type, count: count, all: all, each: each}, opts} =
extract_opts(name, opts)
# If the all spec is passed in, populate it and merge it with the
# create options
opts = Dict.merge(opts, populate(all, mod_fun))
items = fn ->
opts = Dict.merge(opts, populate(each, mod_fun))
apply(mod, fun, [type, Enum.into(opts, %{})])
end
|> Stream.repeatedly
|> Enum.take(count)
# If there is only 1 item (count == 1), unwrap it from the list
items = if length(items) == 1, do: hd(items), else: items
populate(rest, mod_fun, [{name, items} | acc])
end
@doc false
@spec extract_opts(Populate.type, Dict.t) :: {Dict.t, Dict.t}
defp extract_opts(type, opts) do
# Extract and return options
{type, opts} = Dict.pop(opts, :type, type)
{count, opts} = Dict.pop(opts, :count, 1)
{all, opts} = Dict.pop(opts, :all, [])
{each, opts} = Dict.pop(opts, :each, [])
{%{type: type, count: count, all: all, each: each}, opts}
end
end
|
lib/populate.ex
| 0.798344
| 0.494568
|
populate.ex
|
starcoder
|
defmodule BrainWall.Solvers.MarksSolver do
alias BrainWall.Solution
@moduledoc """
this is what I think the logic is for placing a point:
1) of the edges that this point has, which edges are fixed on the other end?
2) for each of those edges, compute the possible places where the unfixed endpoint
could go, treat these possible places as a set
3) find the intersection of those sets, these are the only places where the new
point can be fixed
4) if the set is empty, backtrack to the previous point placement and try the
next possibility
5) if the set is not empty, try each point, making sure that the point is within
the hole, and that any edges that it completes do not cross any boundaries
of the hole
6) if there are no more points to place, compute the score and if it is better
than the previous score (we need to pass this up and down the call chain),
return the score and the solution as the new best solution the logic for
what point to try next is: for all the fixed points, get the list of points
that they connect to that are not yet fixed, and compute the possible locations
for those points, try fixing the point with the fewest possible locations.
If there is a point that has 0 possible locations, we need to backtrack because
that point can never be filled
"""
def solve(problem) do
solution = BrainWall.Solution.new(problem)
fix_points = compute_initial_fix_points(solution)
first_fixed_index = compute_first_fix_index(solution)
fix_points
|> Enum.reduce(solution, fn fp, acc ->
# {x,y} = fp
# IO.puts("Trying first point #{x},#{y}")
new_solution = fix_point_and_solve(first_fixed_index, fp, solution)
Solution.get_best_solution(acc, new_solution)
end)
end
def get_points_in_hole(hole_points) do
{{minx,_},{maxx,_}} = Enum.min_max_by(hole_points, fn {x,_y} -> x end)
{{_,miny},{_,maxy}} = Enum.min_max_by(hole_points, fn {_x,y} -> y end)
List.flatten(
for x <- :lists.seq(minx, maxx, 1) do
for y <- :lists.seq(miny, maxy, 1) do
{x,y}
end
end)
end
def compute_initial_fix_points(solution) do
get_points_in_hole(solution.problem.hole.points)
|> Enum.filter(fn p -> BrainWall.Cartesian.point_in_polygon?(p, solution.problem.hole.edges) end)
end
def compute_first_fix_index(_solution) do
0
end
def fix_point_and_solve(first_fixed_index, fp, in_solution) do
# IO.puts("Fixing point at index #{first_fixed_index}")
solution = Solution.fix_point(in_solution, first_fixed_index, fp)
solution
|> Solution.get_unfixed_point_indices_connected_to_fixed_points()
|> case do
[] ->
IO.puts("Computing score")
Solution.compute_score(solution)
unfixed ->
# IO.puts("There are #{Enum.count(unfixed)} unfixed points left")
points_to_try = rank_unfixed_indices(unfixed, solution)
{unfixed_index, points} = List.first(points_to_try)
if Enum.empty?(points) do
solution
else
points
|> Enum.reduce(solution, fn point, acc ->
Solution.get_best_solution(acc,
fix_point_and_solve(unfixed_index, point, solution))
end)
end
end
end
def rank_unfixed_indices(unfixed_indices, solution) do
index_pairs = Enum.map(unfixed_indices, fn idx ->
points = Solution.get_possible_fixed_point_for_unfixed_index(solution, idx)
{idx,points}
end)
Enum.sort(index_pairs, fn {_,points1}, {_, points2} ->
Enum.count(points1) < Enum.count(points2)
end)
end
end
|
brain_wall/lib/brain_wall/solvers/marks_solver.ex
| 0.720368
| 0.665566
|
marks_solver.ex
|
starcoder
|
defmodule Liaison.Strategy.Epmd do
@moduledoc """
Strategy for node connections via EPMD
```elixir
config :liaison,
strategy: [
[
strategy: #{__MODULE__},
reconnect_period: 10,
nodes: []
]
]
```
## Staying Connected
Staying conencted via the Epmd strategy is now easier than ever, just specify
the nodes you want to connect to, and how often you want to attempt re-connect
"""
@type strategy_config :: Keyword.t()
@default_config [
nodes: [],
reconnect_period: 10
]
alias Liaison.Logger
alias Liaison.NodeHelper
use GenServer
@doc false
def child_spec(opts) do
%{
id: __MODULE__,
start: {__MODULE__, :start_link, [opts]}
}
end
@spec start_link(keyword) :: :ignored | {:error, any()} | {:ok, pid}
def start_link(strategy_config) do
config =
merge_config(strategy_config)
|> expand_nodes()
GenServer.start_link(__MODULE__, config, name: __MODULE__)
end
defp merge_config(config1 \\ @default_config, config2) do
Keyword.merge(config1, config2)
end
defp expand_nodes(config) do
nodes = Enum.map(config[:nodes], &NodeHelper.to_nodename/1)
Keyword.put(config, :nodes, nodes)
end
@doc false
@spec init(any) :: {:ok, any}
def init(state) do
log(:info, "Monitoring started")
Process.flag(:trap_exit, true)
:net_kernel.monitor_nodes(true)
send(self(), :ensure_connected)
{:ok, state}
end
# ---------------- node up and down management --------------
def handle_info(:ensure_connected, state) do
{_already_con, connected, not_connected} = connect_all(state[:nodes])
case connected do
[] -> nil
con -> log(:info, "Connected: #{inspect(con)}")
end
case not_connected do
[] ->
nil
not_connected ->
log(:warn, "Couldnt connect: #{inspect(not_connected)}")
schedule(:ensure_connected, state[:reconnect_period])
end
{:noreply, state}
end
def handle_info({:nodedown, node}, state) do
log(:debug, "node #{node} down")
send(self(), :ensure_connected)
display_nodes()
{:noreply, state}
end
def handle_info({:nodeup, node}, state) do
log(:debug, "node #{node} up")
display_nodes()
{:noreply, state}
end
# --------------------- server calls --------------
def handle_call(:get_config, _from, state) do
{:reply, state, state}
end
def handle_call({:update_strategy, strategy}, _from, old_state) do
new_state =
merge_config(old_state, strategy)
|> expand_nodes()
{:reply, {old_state, new_state}, new_state}
end
# ---------------------- helper functions ------------
defp log(level, msg) do
Logger.log(level, "Epmd", msg)
end
defp schedule(msg, seconds) do
Process.send_after(self(), msg, seconds * 1000)
end
defp display_nodes() do
log(:debug, "node list: #{inspect(get_nodes())}")
end
# returns {already_connected, connected, not_connected}
defp connect_all(nodes) do
default = %{already_connected: [], connected: [], not_connected: []}
Enum.reduce(nodes, default, fn node, acc ->
case Enum.member?(get_nodes(), node) do
true ->
Map.update!(acc, :already_connected, &[node | &1])
_ ->
case connect(node) do
true -> Map.update!(acc, :connected, &[node | &1])
_ -> Map.update!(acc, :not_connected, &[node | &1])
end
end
end)
|> Map.values()
|> List.to_tuple()
end
defp connect(node), do: Node.connect(node)
defp get_nodes(), do: Node.list()
# -------------------- client calls ----------------
@doc """
Returns the current configuration of the strategy
"""
@spec get_config :: keyword
def get_config() do
GenServer.call(__MODULE__, :get_config)
end
@doc """
Updates the given module to adopt a new strategy. Returns {old config, new config}
"""
@spec update_strategy(strategy_config) :: {:ok, {strategy_config, strategy_config}}
def update_strategy(strat_config) do
change = GenServer.call(__MODULE__, {:update_strategy, strat_config})
{:ok, change}
end
end
|
lib/strategy/empd.ex
| 0.620392
| 0.546496
|
empd.ex
|
starcoder
|
defmodule Flex.EngineAdapter.Mamdani do
@moduledoc """
Mamdani fuzzy inference was first introduced as a method to create a control system by synthesizing a set of linguistic control rules obtained from experienced human operators.
In a Mamdani system, the output of each rule is a fuzzy set. Since Mamdani systems have more intuitive and easier to understand rule bases,
they are well-suited to expert system applications where the rules are created from human expert knowledge, such as medical diagnostics.
"""
alias Flex.{EngineAdapter, EngineAdapter.State, Variable}
@behaviour EngineAdapter
import Flex.Rule, only: [statement: 2, get_rule_parameters: 3]
@impl EngineAdapter
def validation(engine_state, _antecedent, _rules, _consequent),
do: engine_state
@impl EngineAdapter
def fuzzification(%State{input_vector: input_vector} = engine_state, antecedent) do
fuzzy_antecedent = EngineAdapter.default_fuzzification(input_vector, antecedent, %{})
%{engine_state | fuzzy_antecedent: fuzzy_antecedent}
end
@impl EngineAdapter
def inference(%State{fuzzy_antecedent: fuzzy_antecedent} = engine_state, rules, consequent) do
fuzzy_consequent =
fuzzy_antecedent
|> inference_engine(rules, consequent)
|> output_combination()
%{engine_state | fuzzy_consequent: fuzzy_consequent}
end
@impl EngineAdapter
def defuzzification(%State{fuzzy_consequent: fuzzy_consequent} = engine_state) do
%{engine_state | crisp_output: centroid_method(fuzzy_consequent)}
end
def inference_engine(_fuzzy_antecedent, [], consequent), do: consequent
def inference_engine(fuzzy_antecedent, [rule | tail], consequent) do
rule_parameters = get_rule_parameters(rule.antecedent, fuzzy_antecedent, []) ++ [consequent]
consequent =
if is_function(rule.statement) do
rule.statement.(rule_parameters)
else
args = Map.merge(fuzzy_antecedent, %{consequent.tag => consequent})
statement(rule.statement, args)
end
inference_engine(fuzzy_antecedent, tail, consequent)
end
defp output_combination(cons_var) do
output = Enum.map(cons_var.fuzzy_sets, fn x -> root_sum_square(cons_var.mf_values[x.tag]) end)
%{cons_var | rule_output: output}
end
defp root_sum_square(nil), do: 0.0
defp root_sum_square(mf_value) do
mf_value
|> Enum.map(fn x -> x * x end)
|> Enum.sum()
|> :math.sqrt()
end
@doc """
Turns an consequent fuzzy variable (output) from a fuzzy value to a crisp value (centroid method).
"""
@spec centroid_method(Flex.Variable.t()) :: float
def centroid_method(%Variable{type: type} = fuzzy_var) when type == :consequent do
fuzzy_to_crisp(fuzzy_var.fuzzy_sets, fuzzy_var.rule_output, 0, 0)
end
defp fuzzy_to_crisp([], _input, nom, den), do: nom / den
defp fuzzy_to_crisp([fs | f_tail], [input | i_tail], nom, den) do
nom = nom + fs.mf_center * input
den = den + input
fuzzy_to_crisp(f_tail, i_tail, nom, den)
end
end
|
lib/engine_adapters/mamdani.ex
| 0.846974
| 0.585753
|
mamdani.ex
|
starcoder
|
defmodule Kalevala.World.Room.Private do
@moduledoc """
Store private information for a room, e.g. characters in the room
"""
defstruct characters: [], item_instances: []
end
defmodule Kalevala.World.Room.Feature do
@moduledoc """
A room feature is a highlighted part of a room
"""
defstruct [:id, :keyword, :short_description, :description]
end
defmodule Kalevala.World.Room do
@moduledoc """
Rooms are the base unit of space in Kalevala
"""
use GenServer
require Logger
alias Kalevala.Event
alias Kalevala.Event.Message
alias Kalevala.World.Room.Callbacks
alias Kalevala.World.Room.Context
alias Kalevala.World.Room.Events
alias Kalevala.World.Room.Handler
alias Kalevala.World.Room.Private
@doc """
Confirm movement for a character
"""
def confirm_movement(event = %Event{topic: Voting, data: %{aborted: true}}, _room_id) do
event
end
def confirm_movement(event, room_id) do
GenServer.call(global_name(room_id), event)
end
@doc """
Replace internal room state
"""
def update(pid, room) do
GenServer.call(pid, {:update, room})
end
@doc """
Replace internal room items state
"""
def update_items(pid, item_instances) do
GenServer.call(pid, {:update_items, item_instances})
end
@doc false
def global_name(%{id: id}), do: global_name(id)
def global_name(room_id), do: {:global, {__MODULE__, room_id}}
@doc false
def start_link(options) do
genserver_options = options.genserver_options
options = Map.delete(options, :genserver_options)
GenServer.start_link(__MODULE__, options, genserver_options)
end
@impl true
def init(options) do
Logger.info("Room starting - #{options.room.id}")
config = options.config
room = Callbacks.init(options.room)
state = %{
data: room,
supervisor_name: config.supervisor_name,
private: %Private{
item_instances: options.item_instances
}
}
{:ok, state, {:continue, :initialized}}
end
@impl true
def handle_continue(:initialized, state) do
Callbacks.initialized(state.data)
{:noreply, state}
end
@impl true
def handle_call(event = %Event{topic: Event.Movement.Voting}, _from, state) do
{context, event} = Handler.confirm_movement(state, event)
Context.handle_context(context)
state = Map.put(state, :data, context.data)
{:reply, event, state}
end
def handle_call({:update, room}, _from, state) do
state = %{state | data: room}
{:reply, :ok, state}
end
def handle_call({:update_items, item_instances}, _from, state) do
state = %{state | private: %{state.private | item_instances: item_instances}}
{:reply, :ok, state}
end
@impl true
def handle_info(event = %Event{}, state) do
Events.handle_event(event, state)
end
def handle_info(message = %Message{}, state) do
context =
state
|> Handler.event(message)
|> Context.handle_context()
state = Map.put(state, :data, context.data)
{:noreply, state}
end
end
defmodule Kalevala.World.Room.Handler do
@moduledoc false
alias Kalevala.World.Room.Callbacks
alias Kalevala.World.Room.Context
def event(state, event) do
Callbacks.event(state.data, Context.new(state), event)
end
# Items
def load_item(state, item_instance) do
Callbacks.load_item(state.data, item_instance)
end
def item_request_drop(state, event, item_instance) do
Callbacks.item_request_drop(state.data, Context.new(state), event, item_instance)
end
def item_request_pickup(state, event, item_instance) do
Callbacks.item_request_pickup(state.data, Context.new(state), event, item_instance)
end
# Movement
def exits(state), do: Callbacks.exits(state.data)
def movement_request(state, event, room_exit) do
Callbacks.movement_request(state.data, Context.new(state), event, room_exit)
end
def confirm_movement(state, event) do
Callbacks.confirm_movement(state.data, Context.new(state), event)
end
end
defprotocol Kalevala.World.Room.Callbacks do
@doc """
Called when the room is initializing
"""
def init(room)
@doc """
Called after the room process is started
Directly after `init` is completed.
"""
def initialized(room)
@doc """
Callback for when a new event is received
"""
def event(room, context, event)
@doc """
Load the exits for a given room
Used when a character is trying to move, the appropriate exit is chosen
and forwarded into movement request callbacks. Since this is a common thing
that will happen 99% of the time, Kalevala handles it.
"""
def exits(room)
@doc """
Convert item instances into items
"""
def load_item(room, item_instance)
@doc """
Callback for allowing an item drop off
A character is requesting to pick up an item, this let's the room
accept or reject the request.
"""
def item_request_drop(room, context, item_request_drop, item_instance)
@doc """
Callback for allowing an item pick up
A character is requesting to pick up an item, this let's the room
accept or reject the request.
"""
def item_request_pickup(room, context, item_request_pickup, item_instance)
@doc """
Callback for the room to hook into movement between exits
The character is requesting to move via an exit, a tuple allowing or rejecting
the movement before being pitched up to the Zone should be returned.
Can immediately terminate a room before being checked in a more detailed fashion
with `confirm_movement/2` below.
"""
def movement_request(room, context, movement_request, room_exit)
@doc """
Callback for confirming or aborting character movement
Called while the Zone is checking each side of the exit to know if the movement
is indeed allowed. Returning the original event allows movement to proceed, otherwise
return an aborted event to prevent movement.
Hook to allow for the room to reject movement for custom reasons, e.g. an NPC
is blocking the exit and needs to be convinced first, or there is a trap blocking
the exit.
"""
def confirm_movement(room, context, event)
end
defmodule Kalevala.World.BasicRoom do
@moduledoc """
A basic room
These are the minimum fields a room should have. You likely want more, so
we have a protocol `Kalevala.World.Room.Callbacks` to let you create your own
local struct.
The following functions provide default implementations you can use for the
`defimpl` of that protocol.
```elixir
defimpl Kalevala.World.Room.Callbacks do
alias Kalevala.World.BasicRoom
@impl true
def movement_request(_room, context, event, room_exit),
do: BasicRoom.movement_request(context, event, room_exit)
@impl true
def confirm_movement(_room, context, event),
do: BasicRoom.confirm_movement(context, event)
@impl true
def item_request_drop(_room, context, event, item_instance),
do: BasicRoom.item_request_drop(context, event, item_instance)
@impl true
def item_request_pickup(_room, context, event, item_instance),
do: BasicRoom.item_request_pickup(context, event, item_instance)
# ...
end
```
"""
defstruct [:id]
def movement_request(_context, event, nil), do: {:abort, event, :no_exit}
def movement_request(_context, event, room_exit), do: {:proceed, event, room_exit}
def confirm_movement(context, event), do: {context, event}
def item_request_drop(_context, event, item_instance),
do: {:proceed, event, item_instance}
def item_request_pickup(_context, event, nil), do: {:abort, event, :no_item, nil}
def item_request_pickup(_context, event, item_instance),
do: {:proceed, event, item_instance}
end
|
lib/kalevala/world/room.ex
| 0.794943
| 0.451206
|
room.ex
|
starcoder
|
defmodule ArbejdQ.Job do
@moduledoc """
Job queued or running within ArbejdQ.
When a job is `:running` it is taken by a worker node, and the job
may not be executed by other nodes.
`:status_updated` is to be updated at regular intervals. If `:status_update` has not
been updated for too long, other nodes are free the execute the job.
There are two configuration options controling these intervals:
- `:update_interval` controls the interval between updating `:status_updated` (in seconds).
Default: 60 (1 minute).
- `:stale_job_period` is the period after which a job can be considered stale, and may be
executed on a new worker node (in seconds).
Default: 300 (5 minutes).
"""
use TypedEctoSchema
import Ecto.Changeset
import Ecto.Query
alias ArbejdQ.{
Types.Term,
Job
}
@primary_key {:id, Ecto.UUID, autogenerate: true}
@foreign_key_type Ecto.UUID
@timestamps_opts [type: :utc_datetime_usec]
typed_schema "arbejdq_jobs" do
field(:queue, :string)
field(:worker_module, ArbejdQ.Types.Atom)
field(:result, Term)
field(:progress, Term)
field(:worker_pid, Term)
field(:status, ArbejdQ.Types.Status)
field(:status_updated, :utc_datetime_usec)
field(:expiration_time, :utc_datetime_usec)
field(:completion_time, :utc_datetime_usec)
field(:lock_version, :integer, default: 1)
field(:stale_counter, :integer, default: 0)
embeds_many(:resource_requirements, ArbejdQ.ResourceRequirement)
timestamps()
end
@spec changeset(Job.t(), map) :: Ecto.Changeset.t()
def changeset(struct, params) do
struct
|> cast(params, [
:queue,
:worker_module,
:result,
:progress,
:worker_pid,
:status,
:status_updated,
:expiration_time,
:completion_time,
:stale_counter
])
|> validate_required([:queue, :worker_module])
|> optimistic_lock(:lock_version)
end
@spec build(String.t(), atom, term, map) :: Ecto.Multi.t()
def build(queue, worker_module, parameters, params \\ %{}) do
Ecto.Multi.new()
|> Ecto.Multi.insert(
:insert,
%__MODULE__{}
|> changeset(
Map.merge(
%{
queue: queue,
worker_module: worker_module
},
params
)
)
|> maybe_put_resource_requirements(params)
)
|> Ecto.Multi.run(:update, fn _repo, changes ->
job = changes[:insert]
Ecto.Query.from(job in "arbejdq_jobs", where: job.id == ^UUID.string_to_binary!(job.id))
|> ArbejdQ.repo().update_all(set: [parameters: :erlang.term_to_binary(parameters)])
{:ok, job}
end)
end
@spec maybe_put_resource_requirements(Ecto.Changeset.t(), map()) :: Ecto.Changeset.t()
defp maybe_put_resource_requirements(changeset, %{resource_requirements: resource_requirements}) do
changeset
|> put_embed(:resource_requirements, resource_requirements)
end
defp maybe_put_resource_requirements(changeset, _), do: changeset
@spec list_queued_jobs(String.t()) :: %Ecto.Query{}
def list_queued_jobs(queue_name) do
from(job in Job,
where: job.queue == ^queue_name and job.status == ^:queued,
order_by: job.inserted_at
)
end
@spec list_queued_jobs(String.t(), non_neg_integer) :: %Ecto.Query{}
def list_queued_jobs(queue_name, max_jobs) do
from(job in Job,
where: job.queue == ^queue_name and job.status == ^:queued,
order_by: job.inserted_at,
limit: ^max_jobs
)
end
@spec get_job(String.t()) :: %Ecto.Query{}
def get_job(job_id) do
from(job in Job,
where: job.id == ^job_id
)
end
@spec list_stale_jobs(String.t(), DateTime.t()) :: %Ecto.Query{}
def list_stale_jobs(queue_name, stale_progress_timestamp) do
from(job in Job,
where: job.queue == ^queue_name and job.status == ^:running,
where: job.status_updated < ^stale_progress_timestamp,
order_by: job.inserted_at
)
end
@spec list_expired_jobs(String.t(), DateTime.t()) :: %Ecto.Query{}
def list_expired_jobs(queue_name, expiration_time) do
from(job in Job,
where: job.queue == ^queue_name and job.status == ^:done,
where: job.expiration_time < ^expiration_time,
order_by: job.inserted_at
)
end
@spec list_all :: %Ecto.Query{}
def list_all do
from(job in Job,
order_by: job.inserted_at
)
end
end
|
lib/arbejd_q/job.ex
| 0.820397
| 0.403802
|
job.ex
|
starcoder
|
defmodule Proj do
@moduledoc """
Provides functions to transform coordinates between given coordinate systems.
iex> {:ok, bng} = Proj.from_epsg(27700) # British National Grid CRS is EPSG:27700
{:ok, #Proj<+init=epsg:27700 ...>}
iex> Proj.to_lat_lng!({529155, 179699}, bng)
{51.50147938477216, -0.1406319210455952}
"""
@deg_rad :math.pi / 180
@rad_deg 180 / :math.pi
@on_load :load
defstruct [:pj]
defimpl Inspect, for: Proj do
def inspect(proj, _opts) do
"#Proj<#{String.trim(Proj.get_def(proj))}>"
end
end
def load do
filename = :filename.join(:code.priv_dir(:proj), 'proj_nif')
:ok = :erlang.load_nif(filename, 0)
end
@doc """
Returns a new Proj projection specification object for a given PROJ.4
parameter list.
Returns `{:ok, proj}` on success, or `{:error, "reason"}` if the PROJ.4
parameter string is invalid.
## Examples
Proj.from_def("+init=epsg:4326")
Proj.from_def("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")
Proj.from_def("+init=world:bng")
Proj.from_def("+proj=tmerc +lat_0=49 +lon_0=-2 +k=0.9996012717 +x_0=400000
+y_0=-100000 +ellps=airy +datum=OSGB36 +units=m +no_defs")
See https://trac.osgeo.org/proj/wiki/GenParms for documentation on how these
parameter lists.
One way of finding the the PROJ.4 parameter list you require is to search
http://spatialreference.org/ for your desired CRS and find the PROJ.4
parameter list under the "Proj4" link on a CRS's page.
"""
def from_def(_def) do
raise "NIF not loaded"
end
@doc """
Transforms coordinates from one Proj CRS to another.
Coordinates are given in the order `{x, y, z}`, or for geographic coordinates,
`{longitude, latitude, z}`, where `z` is the altitude above the geoid of the
CRS. `longitude` and `latitude` must be given in radians. `Proj.to_rad/1`
may be helpful if you have coordinates in degrees.
Returns `{:ok, {x, y, z}}` on success, or `{:error, "reason"}` if the PROJ.4
library was unable to perform a transformation. If geographic coordinates are
returned, they will be in the order `{longitude, latitude, z}`, and will be in
radians.
"""
def transform({_, _, _}, _from_proj, _to_proj) do
raise "NIF not loaded"
end
@doc """
Returns the `def` string given to create the given Proj object, expanded to
its fullest form if possible.
iex> Proj.get_def(Proj.wgs84)
" +init=epsg:4326 +proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0"
"""
def get_def(_proj) do
raise "NIF not loaded"
end
@doc """
Returns a Proj object for the WGS84 geographic coordinate reference system.
WGS84 is the standard coordinate system used for GPS and is most likely what
you need when working with `{latitude, longitude}` coordinates.
"""
def wgs84 do
raise "NIF not loaded"
end
@doc """
Turns a `{longitude_radians, latitude_radians, z}` tuple into
`{longitude_degrees, latitude_degrees, z}`.
"""
def to_deg({lon, lat, z}) do
{lon * @rad_deg, lat * @rad_deg, z}
end
@doc """
Turns a `{longitude_degrees, latitude_degrees, z}` tuple into
`{longitude_radians, latitude_radians, z}`.
"""
def to_rad({lon, lat, z}) do
{lon * @deg_rad, lat * @deg_rad, z}
end
@doc """
Returns a Proj object for a given known PROJ.4 init file definition.
Returns `{:ok, proj}` on success, or `{:error, "reason"}` if the definition is
not found.
On Linux, by default, these definitions should be stored in `/usr/share/proj/`
with your PROJ.4 installation.
Proj.from_known_def("epsg", "4326") # WGS84
Proj.from_known_def("world", "bng") # British National Grid
"""
def from_known_def(file, name) do
from_def("+init=#{file}:#{name}")
end
@doc """
Returns a Proj object for a given EPSG code from the EPSG Geodetic Parameter
Dataset.
Returns `{:ok, proj}` on success, or `{:error, "reason"}` if the EPSG
definition is not found.
Proj.from_epsg(4326) # WGS84
Proj.from_epsg(27700) # British National Grid
"""
def from_epsg(name) do
from_known_def("epsg", name)
end
@doc """
Converts a given `{easting, northing}` pair and its CRS `proj` to a WGS84
`{latitude, longitude}` pair, where `latitude` and `longitude` are in degrees.
This function raises on error, unlike `Proj.transform/3`.
This is a convenience function for a common use case of Proj.
iex> {:ok, bng} = Proj.from_epsg(27700)
{:ok, #Proj<+init=epsg:27700 ...>}
iex> Proj.to_lat_lng!({529155, 179699}, bng)
{51.50147938477216, -0.1406319210455952}
"""
def to_lat_lng!({x, y}, proj) do
case transform({x, y, 0}, proj, wgs84()) do
{:ok, coords} ->
{lng, lat, _z} = to_deg(coords)
{lat, lng}
{:error, error} ->
raise error
end
end
@doc """
Converts a given WGS84 `{latitude, longitude}` pair in degrees to the
equivalent `{easting, northing}` in the CRS `proj`.
This function raises on error, unlike `Proj.transform/3`.
This is a convenience function for a common use case of Proj.
iex> {:ok, bng} = Proj.from_epsg(27700)
{:ok, #Proj<+init=epsg:27700 ...>}
iex> Proj.from_lat_lng!({51.501479, -0.140631}, bng)
{529155.0658918166, 179698.9583449281}
"""
def from_lat_lng!({lat, lng}, proj) do
case transform(to_rad({lng, lat, 0}), wgs84(), proj) do
{:ok, {x, y, _z}} ->
{x, y}
{:error, error} ->
raise error
end
end
end
|
lib/proj.ex
| 0.899636
| 0.581719
|
proj.ex
|
starcoder
|
defmodule Lens do
use Lens.Macros
@opaque t :: (:get, any, function -> list(any)) | (:get_and_update, any, function -> {list(any), any})
@doc ~S"""
Returns a lens that does not focus on any part of the data.
iex> Lens.empty |> Lens.to_list(:anything)
[]
iex> Lens.empty |> Lens.map(1, &(&1 + 1))
1
"""
@spec empty :: t
deflens_raw empty, do: fn data, _fun -> {[], data} end
@doc ~S"""
Returns a lens that ignores the data and always focuses on the given value.
iex> Lens.const(3) |> Lens.one!(:anything)
3
iex> Lens.const(3) |> Lens.map(1, &(&1 + 1))
4
iex> import Integer
iex> lens = Lens.keys([:a, :b]) |> Lens.match(fn v -> if is_odd(v), do: Lens.root, else: Lens.const(0) end)
iex> Lens.map(lens, %{a: 11, b: 12}, &(&1 + 1))
%{a: 12, b: 1}
"""
@spec const(any) :: t
deflens_raw const(value) do
fn _data, fun ->
{res, updated} = fun.(value)
{[res], updated}
end
end
@doc ~S"""
Returns a lens that yields the entirety of the data currently under focus.
iex> Lens.to_list(Lens.root, :data)
[:data]
iex> Lens.map(Lens.root, :data, fn :data -> :other_data end)
:other_data
iex> Lens.key(:a) |> Lens.both(Lens.root, Lens.key(:b)) |> Lens.to_list(%{a: %{b: 1}})
[%{b: 1}, 1]
"""
@spec root :: t
deflens_raw root do
fn data, fun ->
{res, updated} = fun.(data)
{[res], updated}
end
end
@doc ~S"""
Select the lens to use based on a matcher function
iex> selector = fn
...> {:a, _} -> Lens.at(1)
...> {:b, _, _} -> Lens.at(2)
...> end
iex> Lens.match(selector) |> Lens.one!({:b, 2, 3})
3
"""
@spec match((any -> t)) :: t
deflens_raw match(matcher_fun) do
fn data, fun ->
get_and_map(matcher_fun.(data), data, fun)
end
end
@doc ~S"""
Returns a lens that focuses on the n-th element of a list or tuple.
iex> Lens.at(2) |> Lens.one!({:a, :b, :c})
:c
iex> Lens.at(1) |> Lens.map([:a, :b, :c], fn :b -> :d end)
[:a, :d, :c]
"""
@spec at(non_neg_integer) :: t
deflens_raw at(index) do
fn data, fun ->
{res, updated} = fun.(get_at_index(data, index))
{[res], set_at_index(data, index, updated)}
end
end
@doc ~S"""
An alias for `at`.
"""
@spec index(non_neg_integer) :: t
deflens index(index), do: at(index)
@doc ~S"""
Returns a lens that focuses on all of the supplied indices.
iex> Lens.indices([0, 2]) |> Lens.to_list([:a, :b, :c])
[:a, :c]
iex> Lens.indices([0, 2]) |> Lens.map([1, 2, 3], &(&1 + 1))
[2, 2, 4]
"""
@spec indices([non_neg_integer]) :: t
deflens indices(indices), do: indices |> Enum.map(&index/1) |> multiple
@doc ~S"""
Returns a lens that focuses between a given index and the previous one in a list. It will always return a nil when
accessing, but can be used to insert elements.
iex> Lens.before(2) |> Lens.one!([:a, :b, :c])
nil
iex> Lens.before(2) |> Lens.map([:a, :b, :c], fn nil -> :d end)
[:a, :b, :d, :c]
"""
@spec before(non_neg_integer) :: t
deflens_raw before(index) do
fn data, fun ->
{res, item} = fun.(nil)
{init, tail} = Enum.split(data, index)
{[res], init ++ [item] ++ tail}
end
end
@doc ~S"""
Returns a lens that focuses between a given index and the next one in a list. It will always return a nil when
accessing, but can be used to insert elements.
iex> Lens.behind(1) |> Lens.one!([:a, :b, :c])
nil
iex> Lens.behind(1) |> Lens.map([:a, :b, :c], fn nil -> :d end)
[:a, :b, :d, :c]
"""
@spec behind(non_neg_integer) :: t
deflens_raw behind(index) do
fn data, fun ->
{res, item} = fun.(nil)
{init, tail} = Enum.split(data, index + 1)
{[res], init ++ [item] ++ tail}
end
end
@doc ~S"""
Returns a lens that focuses before the first element of a list. It will always return a nil when accessing, but can
be used to prepend elements.
iex> Lens.front |> Lens.one!([:a, :b, :c])
nil
iex> Lens.front |> Lens.map([:a, :b, :c], fn nil -> :d end)
[:d, :a, :b, :c]
"""
@spec front :: t
deflens front, do: before(0)
@doc ~S"""
Returns a lens that focuses after the last element of a list. It will always return a nil when accessing, but can
be used to append elements.
iex> Lens.back |> Lens.one!([:a, :b, :c])
nil
iex> Lens.back |> Lens.map([:a, :b, :c], fn nil -> :d end)
[:a, :b, :c, :d]
"""
@spec back :: t
deflens_raw back do
fn data, fun ->
data |> Enum.count() |> behind |> get_and_map(data, fun)
end
end
@doc ~S"""
Returns a lens that focuses on the value under `key`.
iex> Lens.to_list(Lens.key(:foo), %{foo: 1, bar: 2})
[1]
iex> Lens.map(Lens.key(:foo), %{foo: 1, bar: 2}, fn x -> x + 10 end)
%{foo: 11, bar: 2}
If the key doesn't exist in the map a nil will be returned or passed to the update function.
iex> Lens.to_list(Lens.key(:foo), %{})
[nil]
iex> Lens.map(Lens.key(:foo), %{}, fn nil -> 3 end)
%{foo: 3}
"""
@spec key(any) :: t
deflens_raw key(key) do
fn data, fun ->
{res, updated} = fun.(get_at_key(data, key))
{[res], set_at_key(data, key, updated)}
end
end
@doc ~S"""
Returns a lens that focuses on the value under the given key. If the key does not exist an error will be raised.
iex> Lens.key!(:a) |> Lens.one!(%{a: 1, b: 2})
1
iex> Lens.key!(:a) |> Lens.one!([a: 1, b: 2])
1
iex> Lens.key!(:c) |> Lens.one!(%{a: 1, b: 2})
** (KeyError) key :c not found in: %{a: 1, b: 2}
"""
@spec key!(any) :: t
deflens_raw key!(key) do
fn data, fun ->
{res, updated} = fun.(fetch_at_key!(data, key))
{[res], set_at_key(data, key, updated)}
end
end
@doc ~S"""
Returns a lens that focuses on the value under the given key. If they key does not exist it focuses on nothing.
iex> Lens.key?(:a) |> Lens.to_list(%{a: 1, b: 2})
[1]
iex> Lens.key?(:a) |> Lens.to_list([a: 1, b: 2])
[1]
iex> Lens.key?(:c) |> Lens.to_list(%{a: 1, b: 2})
[]
"""
@spec key?(any) :: t
deflens_raw key?(key) do
fn data, fun ->
case fetch_at_key(data, key) do
:error ->
{[], data}
{:ok, value} ->
{res, updated} = fun.(value)
{[res], set_at_key(data, key, updated)}
end
end
end
@doc ~S"""
Returns a lens that focuses on the values of all the keys.
iex> Lens.keys([:a, :c]) |> Lens.to_list(%{a: 1, b: 2, c: 3})
[1, 3]
iex> Lens.keys([:a, :c]) |> Lens.map([a: 1, b: 2, c: 3], &(&1 + 1))
[a: 2, b: 2, c: 4]
If any of the keys doesn't exist the update function will receive a nil.
iex> Lens.keys([:a, :c]) |> Lens.map(%{a: 1, b: 2}, fn nil -> 3; x -> x end)
%{a: 1, b: 2, c: 3}
"""
@spec keys(nonempty_list(any)) :: t
deflens keys(keys), do: keys |> Enum.map(&Lens.key/1) |> multiple
@doc ~S"""
Returns a lens that focuses on the values of all the keys. If any of the keys does not exist, an error is raised.
iex> Lens.keys!([:a, :c]) |> Lens.to_list(%{a: 1, b: 2, c: 3})
[1, 3]
iex> Lens.keys!([:a, :c]) |> Lens.map([a: 1, b: 2, c: 3], &(&1 + 1))
[a: 2, b: 2, c: 4]
iex> Lens.keys!([:a, :c]) |> Lens.to_list(%{a: 1, b: 2})
** (KeyError) key :c not found in: %{a: 1, b: 2}
"""
@spec keys!(nonempty_list(any)) :: t
deflens keys!(keys), do: keys |> Enum.map(&Lens.key!/1) |> multiple
@doc ~S"""
Returns a lens that focuses on the values of all the keys. If any of the keys does not exist, it is ignored.
iex> Lens.keys?([:a, :c]) |> Lens.to_list(%{a: 1, b: 2, c: 3})
[1, 3]
iex> Lens.keys?([:a, :c]) |> Lens.map([a: 1, b: 2, c: 3], &(&1 + 1))
[a: 2, b: 2, c: 4]
iex> Lens.keys?([:a, :c]) |> Lens.to_list(%{a: 1, b: 2})
[1]
"""
@spec keys?(nonempty_list(any)) :: t
deflens keys?(keys), do: keys |> Enum.map(&Lens.key?/1) |> multiple
@doc ~S"""
Returns a lens that focuses on all the values in an enumerable.
iex> Lens.all |> Lens.to_list([1, 2, 3])
[1, 2, 3]
Does work with updates but produces a list from any enumerable by default:
iex> Lens.all |> Lens.map(MapSet.new([1, 2, 3]), &(&1 + 1))
[2, 3, 4]
See [into](#into/2) on how to rectify this.
"""
@spec all :: t
deflens_raw all do
fn data, fun ->
{res, updated} =
Enum.reduce(data, {[], []}, fn item, {res, updated} ->
{res_item, updated_item} = fun.(item)
{[res_item | res], [updated_item | updated]}
end)
{Enum.reverse(res), Enum.reverse(updated)}
end
end
@doc ~S"""
Compose a pair of lens by applying the second to the result of the first
iex> Lens.seq(Lens.key(:a), Lens.key(:b)) |> Lens.one!(%{a: %{b: 3}})
3
Piping lenses has the exact same effect:
iex> Lens.key(:a) |> Lens.key(:b) |> Lens.one!(%{a: %{b: 3}})
3
"""
@spec seq(t, t) :: t
deflens_raw seq(lens1, lens2) do
fn data, fun ->
{res, changed} =
get_and_map(lens1, data, fn item ->
get_and_map(lens2, item, fun)
end)
{Enum.concat(res), changed}
end
end
@doc ~S"""
Combine the composition of both lens with the first one.
iex> Lens.seq_both(Lens.key(:a), Lens.key(:b)) |> Lens.to_list(%{a: %{b: :c}})
[:c, %{b: :c}]
"""
@spec seq_both(t, t) :: t
deflens seq_both(lens1, lens2), do: both(seq(lens1, lens2), lens1)
@doc ~S"""
Given a lens L this creates a lens that applies L, then applies L to the results of that application and so on,
focusing on all the results encountered on the way.
iex> data = %{
...> items: [
...> %{id: 1, items: []},
...> %{id: 2, items: [
...> %{id: 3, items: []}
...> ]}
...> ]}
iex> lens = Lens.recur(Lens.key(:items) |> Lens.all) |> Lens.key(:id)
iex> Lens.to_list(lens, data)
[1, 3, 2]
Note that it does not focus on the root item. You can remedy that with `Lens.root`:
iex> data = %{
...> id: 4,
...> items: [
...> %{id: 1, items: []},
...> %{id: 2, items: [
...> %{id: 3, items: []}
...> ]}
...> ]
...> }
iex> lens = Lens.both(Lens.recur(Lens.key(:items) |> Lens.all), Lens.root) |> Lens.key(:id)
iex> Lens.to_list(lens, data)
[1, 3, 2, 4]
"""
@spec recur(t) :: t
deflens_raw recur(lens), do: &do_recur(lens, &1, &2)
@doc ~S"""
Just like `recur` but also focuses on the root of the data.
iex> data = {:x, [{:y, []}, {:z, [{:w, []}]}]}
iex> Lens.recur_root(Lens.at(1) |> Lens.all()) |> Lens.at(0) |> Lens.to_list(data)
[:y, :w, :z, :x]
"""
deflens recur_root(lens), do: Lens.both(Lens.recur(lens), Lens.root())
@doc ~s"""
Returns a lens that focuses on what both the lenses focus on.
iex> Lens.both(Lens.key(:a), Lens.key(:b) |> Lens.at(1)) |> Lens.to_list(%{a: 1, b: [2, 3]})
[1, 3]
Bear in mind that what the first lens focuses on will be processed first. Other functions in the library are designed
so that the part is processed before the whole and it is advisable to do the same when using this function directly.
Not adhering to this principle might lead to the second lens not being able to perform its traversal on a changed
version of the structure.
iex> Lens.both(Lens.root, Lens.key(:a)) |> Lens.get_and_map(%{a: 1}, fn x -> {x, :foo} end)
** (FunctionClauseError) no function clause matching in Access.fetch/2
iex> Lens.both(Lens.key(:a), Lens.root) |> Lens.get_and_map(%{a: 1}, fn x -> {x, :foo} end)
{[1, %{a: :foo}], :foo}
"""
@spec both(t, t) :: t
deflens_raw both(lens1, lens2) do
fn data, fun ->
{res1, changed1} = get_and_map(lens1, data, fun)
{res2, changed2} = get_and_map(lens2, changed1, fun)
{res1 ++ res2, changed2}
end
end
@doc """
Combines the two provided lenses in a way similar to `seq`. However instead of only focusing on what the final lens
would focus on, it focuses on pairs of the form `{context, part}`, where context is the focus of the first lens in
which the focus of the second lens was found.
iex> lens = Lens.context(Lens.keys([:a, :c]), Lens.key(:b) |> Lens.all())
iex> Lens.to_list(lens, %{a: %{b: [1, 2]}, c: %{b: [3]}})
[{%{b: [1, 2]}, 1}, {%{b: [1, 2]}, 2}, {%{b: [3]}, 3}]
iex> Lens.map(lens, %{a: %{b: [1, 2]}, c: %{b: [3]}}, fn({%{b: bs}, value}) ->
...> length(bs) + value
...> end)
%{a: %{b: [3, 4]}, c: %{b: [4]}}
"""
@spec context(t, t) :: t
deflens_raw context(context_lens, item_lens) do
fn data, fun ->
{results, changed} =
get_and_map(context_lens, data, fn context ->
get_and_map(item_lens, context, fn item -> fun.({context, item}) end)
end)
{Enum.concat(results), changed}
end
end
@doc ~S"""
Returns a lens that focuses on what all of the supplied lenses focus on.
iex> Lens.multiple([Lens.key(:a), Lens.key(:b), Lens.root]) |> Lens.to_list(%{a: 1, b: 2})
[1, 2, %{a: 1, b: 2}]
"""
@spec multiple([t]) :: t
deflens multiple(lenses), do: lenses |> Enum.reverse() |> Enum.reduce(empty(), &both/2)
@doc ~S"""
Returns a lens that focuses on what the first lens focuses on, unless it's nothing. In that case the
lens will focus on what the second lens focuses on.
iex(1)> get_in(%{a: 1}, [Lens.either(Lens.key?(:a), Lens.key?(:b))])
[1]
iex(2)> get_in(%{b: 2}, [Lens.either(Lens.key?(:a), Lens.key?(:b))])
[2]
It can be used to return a default value:
iex> get_in([%{id: 8}], [Lens.all |> Lens.filter(&(&1.id == 8)) |> Lens.either(Lens.const(:default))])
[%{id: 8}]
iex> get_in([%{id: 8}], [Lens.all |> Lens.filter(&(&1.id == 1)) |> Lens.either(Lens.const(:default))])
[:default]
Or to upsert:
iex> upsert = Lens.all() |> Lens.filter(&(&1[:id] == 1)) |> Lens.either(Lens.front())
iex> update_in([%{id: 0}, %{id: 1}], [upsert], fn _ -> %{id: 1, x: :y} end)
[%{id: 0}, %{id: 1, x: :y}]
iex> update_in([%{id: 0}, %{id: 2}], [upsert], fn _ -> %{id: 1, x: :y} end)
[%{id: 1, x: :y}, %{id: 0}, %{id: 2}]
"""
@spec either(t, t) :: t
deflens_raw either(lens, other_lens) do
fn data, fun ->
case get_and_map(lens, data, fun) do
{[], _updated} -> get_and_map(other_lens, data, fun)
{res, updated} -> {res, updated}
end
end
end
@doc ~S"""
Returns a lens that does not change the focus of of the given lens, but puts the results into the given collectable
when updating.
iex> Lens.into(Lens.all(), MapSet.new) |> Lens.map(MapSet.new([-2, -1, 1, 2]), &(&1 * &1))
MapSet.new([1, 4])
Notice that collectable composes in a somewhat surprising way, for example:
iex> Lens.map_values() |> Lens.all() |> Lens.into(%{}) |>
...> Lens.map(%{key1: %{key2: :value}}, fn {k, v} -> {v, k} end)
%{key1: [{:value, :key2}]}
To prevent this, avoid using `|>` with `into`:
iex> Lens.map_values() |> Lens.into(Lens.all(), %{}) |>
...> Lens.map(%{key1: %{key2: :value}}, fn {k, v} -> {v, k} end)
%{key1: %{value: :key2}}
"""
@spec into(t, Collectable.t()) :: t
deflens_raw into(lens, collectable) do
fn data, fun ->
{res, updated} = get_and_map(lens, data, fun)
{res, Enum.into(updated, collectable)}
end
end
@doc ~S"""
Returns a lens that focuses on a subset of elements focused on by the given lens that satisfy the given condition.
iex> Lens.map_values() |> Lens.filter(&Integer.is_odd/1) |> Lens.to_list(%{a: 1, b: 2, c: 3, d: 4})
[1, 3]
"""
@spec filter(t, (any -> boolean)) :: t
def filter(predicate), do: Lens.root() |> filter(predicate)
deflens_raw filter(lens, predicate) do
fn data, fun ->
{res, changed} =
get_and_map(lens, data, fn item ->
if predicate.(item) do
{res, changed} = fun.(item)
{[res], changed}
else
{[], item}
end
end)
{Enum.concat(res), changed}
end
end
@doc false
@deprecated "Use filter/2 instead"
@spec satisfy(t, (any -> boolean)) :: t
def satisfy(lens, predicate), do: filter(lens, predicate)
@doc ~S"""
Returns a lens that focuses on a subset of elements focused on by the given lens that don't satisfy the given
condition.
iex> Lens.map_values() |> Lens.reject(&Integer.is_odd/1) |> Lens.to_list(%{a: 1, b: 2, c: 3, d: 4})
[2, 4]
"""
@spec reject(t, (any -> boolean)) :: t
def reject(lens, predicate), do: filter(lens, &(not predicate.(&1)))
@doc ~S"""
Returns a lens that focuses on all values of a map.
iex> Lens.map_values() |> Lens.to_list(%{a: 1, b: 2})
[1, 2]
iex> Lens.map_values() |> Lens.map(%{a: 1, b: 2}, &(&1 + 1))
%{a: 2, b: 3}
"""
@spec map_values :: t
deflens map_values, do: all() |> into(%{}) |> at(1)
@doc ~S"""
Returns a lens that focuses on all keys of a map.
iex> Lens.map_keys() |> Lens.to_list(%{a: 1, b: 2})
[:a, :b]
iex> Lens.map_keys() |> Lens.map(%{1 => :a, 2 => :b}, &(&1 + 1))
%{2 => :a, 3 => :b}
"""
@spec map_keys :: t
deflens map_keys, do: all() |> into(%{}) |> at(0)
@doc ~S"""
Returns a list of values that the lens focuses on in the given data.
iex> Lens.keys([:a, :c]) |> Lens.to_list(%{a: 1, b: 2, c: 3})
[1, 3]
"""
@spec to_list(t, any) :: list(any)
def to_list(lens, data), do: get_in(data, [lens])
@doc ~S"""
Performs a side effect for each values this lens focuses on in the given data.
iex> data = %{a: 1, b: 2, c: 3}
iex> fun = fn -> Lens.keys([:a, :c]) |> Lens.each(data, &IO.inspect/1) end
iex> import ExUnit.CaptureIO
iex> capture_io(fun)
"1\n3\n"
"""
@spec each(t, any, (any -> any)) :: :ok
def each(lens, data, fun), do: to_list(lens, data) |> Enum.each(fun)
@doc ~S"""
Returns an updated version of the data by applying the given function to each value the lens focuses on and building
a data structure of the same shape with the updated values in place of the original ones.
iex> data = [1, 2, 3, 4]
iex> Lens.all() |> Lens.filter(&Integer.is_odd/1) |> Lens.map(data, fn v -> v + 10 end)
[11, 2, 13, 4]
"""
@spec map(t, any, (any -> any)) :: any
def map(lens, data, fun), do: update_in(data, [lens], fun)
@doc ~S"""
Returns an updated version of the data by replacing each spot the lens focuses on with the given value.
iex> data = [1, 2, 3, 4]
iex> Lens.all() |> Lens.filter(&Integer.is_odd/1) |> Lens.put(data, 0)
[0, 2, 0, 4]
"""
@spec put(t, any, any) :: any
def put(lens, data, value), do: put_in(data, [lens], value)
@doc ~S"""
Returns an updated version of the data and a transformed value from each location the lens focuses on. The
transformation function must return a tuple `{value_to_return, value_to_update}`.
iex> data = %{a: 1, b: 2, c: 3}
iex> Lens.keys([:a, :b, :c])
...> |> Lens.filter(&Integer.is_odd/1)
...> |> Lens.get_and_map(data, fn v -> {v + 1, v + 10} end)
{[2, 4], %{a: 11, b: 2, c: 13}}
"""
@spec get_and_map(t, any, (any -> {any, any})) :: {list(any), any}
def get_and_map(lens, data, fun), do: get_and_update_in(data, [lens], fun)
@doc ~S"""
Executes `to_list` and returns the single item that the given lens focuses on for the given data. Crashes if there
is more than one item.
"""
@spec one!(t, any) :: any
def one!(lens, data) do
[result] = to_list(lens, data)
result
end
defp do_recur(lens, data, fun) do
{res, changed} =
get_and_map(lens, data, fn item ->
{results, changed1} = do_recur(lens, item, fun)
{res_parent, changed2} = fun.(changed1)
{results ++ [res_parent], changed2}
end)
{Enum.concat(res), changed}
end
defp get_at_key(data, key) do
case fetch_at_key(data, key) do
:error -> nil
{:ok, value} -> value
end
end
defp set_at_key(data, key, value) when is_map(data), do: Map.put(data, key, value)
defp set_at_key(data, key, value) do
{_, updated} = Access.get_and_update(data, key, fn _ -> {nil, value} end)
updated
end
defp fetch_at_key!(data, key) do
case fetch_at_key(data, key) do
:error -> raise(KeyError, key: key, term: data)
{:ok, value} -> value
end
end
defp fetch_at_key(data, key) when is_map(data), do: Map.fetch(data, key)
defp fetch_at_key(data, key), do: Access.fetch(data, key)
defp get_at_index(data, index) when is_tuple(data), do: elem(data, index)
defp get_at_index(data, index), do: Enum.at(data, index)
defp set_at_index(data, index, value) when is_tuple(data), do: put_elem(data, index, value)
defp set_at_index(data, index, value) when is_list(data) do
List.update_at(data, index, fn _ -> value end)
end
end
|
lib/lens.ex
| 0.836921
| 0.572484
|
lens.ex
|
starcoder
|
defmodule Phoenix.LiveDashboard.PageBuilder do
defstruct info: nil,
module: nil,
node: nil,
params: nil,
route: nil,
tick: 0
@opaque component :: {module, map}
@type session :: map
@type requirements :: [{:application | :process | :module, atom()}]
@type unsigned_params :: map
@type capabilities :: %{
applications: [atom()],
modules: [atom()],
processes: [atom()],
dashboard_running?: boolean(),
system_info: nil | binary()
}
alias Phoenix.LiveDashboard.{TableComponent, NavBarComponent}
@doc """
Callback invoked when a page is declared in the router.
It receives the router options and it must return the
tuple `{:ok, session, requirements}`.
The page session will be serialized to the client and
received on `mount`.
The requirements is an optional keyword to detect the
state of the node.
The result of this detection will be passed as second
argument in the `c:menu_link/2` callback.
The possible values are:
* `:applications` list of applications that are running or not.
* `:modules` list of modules that are loaded or not.
* `:pids` list of processes that alive or not.
"""
@callback init(term()) :: {:ok, session()} | {:ok, session(), requirements()}
@doc """
Callback invoked when a page is declared in the router.
It receives the session returned by the `c:init/1` callback
and the capabilities of the current node.
The possible return values are:
* `{:ok, text}` when the link should be enable and text to be shown.
* `{:disabled, text}` when the link should be disable and text to be shown.
* `{:disabled, text, more_info_url}` similar to the previous one but
it also includes a link to provide more information to the user.
* `:skip` when the link should not be shown at all.
"""
@callback menu_link(session(), capabilities()) ::
{:ok, String.t()}
| {:disabled, String.t()}
| {:disabled, String.t(), String.t()}
| :skip
@callback mount(unsigned_params(), session(), socket :: Socket.t()) ::
{:ok, Socket.t()} | {:ok, Socket.t(), keyword()}
@callback render_page(assigns :: Socket.assigns()) :: component()
@callback handle_params(unsigned_params(), uri :: String.t(), socket :: Socket.t()) ::
{:noreply, Socket.t()}
@callback handle_event(event :: binary, unsigned_params(), socket :: Socket.t()) ::
{:noreply, Socket.t()} | {:reply, map, Socket.t()}
@callback handle_info(msg :: term, socket :: Socket.t()) ::
{:noreply, Socket.t()}
@callback handle_refresh(socket :: Socket.t()) ::
{:noreply, Socket.t()}
@optional_callbacks mount: 3,
handle_params: 3,
handle_event: 3,
handle_info: 2,
handle_refresh: 1
@doc """
Renders a table component.
This component is used in different pages like applications or sockets.
It can be used in a `Phoenix.LiveView` in the `render/1` function:
def render_page(assigns) do
table(
columns: columns(),
id: @table_id,
row_attrs: &row_attrs/1,
row_fetcher: &fetch_applications/2,
title: "Applications"
)
end
# Options
These are the options supported by the component:
* `:id` - Required. Because is a stateful `Phoenix.LiveComponent` an unique id is needed.
* `:columns` - Required. A `Keyword` list with the following keys:
* `:field` - Required. An identifier for the column.
* `:header` - Label to show in the current column. Default value is calculated from `:field`.
* `:header_attrs` - A list with HTML attributes for the column header.
More info: `Phoenix.HTML.Tag.tag/1`. Default `[]`.
* `:format` - Function which receives the row data and returns the cell information.
Default is calculated from `:field`: `row[:field]`.
* `:cell_attrs` - A list with HTML attributes for the table cell.
It also can be a function which receives the row data and returns an attribute list.
More info: `Phoenix.HTML.Tag.tag/1`. Default: `[]`.
* `:sortable` - Either `:asc` or `:desc` with the default sorting. When set, the column
header is clickable and it fetches again rows with the new order. At least one column
should be sortable. Default: `nil`
* `:limit_options` - A list of integers to limit the number of rows to show.
Default: `[50, 100, 500, 1000, 5000]`
* `:params` - Required. All the params received by the parent `Phoenix.LiveView`,
so the table can handle its own parameters.
* `:row_fetcher` - Required. A function which receives the params and the node and
returns a tuple with the rows and the total number:
`(params(), node()) -> {list(), integer() | binary()}`
* `:rows_name` - A string to name the representation of the rows.
Default is calculated from the current page.
* `:title` - The title of the table.
Default is calculated with the current page.
"""
@spec table(keyword()) :: component()
def table(assigns) do
assigns =
assigns
|> Map.new()
|> TableComponent.normalize_params()
{TableComponent, assigns}
end
@spec nav_bar(keyword()) :: component()
def nav_bar(assigns) do
assigns =
assigns
|> Map.new()
|> NavBarComponent.normalize_params()
{NavBarComponent, assigns}
end
defmacro __using__(opts) do
quote bind_quoted: [opts: opts] do
import Phoenix.LiveView
import Phoenix.LiveView.Helpers
import Phoenix.LiveDashboard.Helpers
import Phoenix.LiveDashboard.PageBuilder
@behaviour Phoenix.LiveDashboard.PageBuilder
refresher? = Keyword.get(opts, :refresher?, true)
def __page_live__(:refresher?) do
unquote(refresher?)
end
def init(opts), do: {:ok, opts}
defoverridable init: 1
end
end
end
|
lib/phoenix/live_dashboard/page_builder.ex
| 0.856512
| 0.547041
|
page_builder.ex
|
starcoder
|
defmodule StaffNotesApi.TokenAuthentication do
@moduledoc """
A module `Plug` for validating the API token in the authorization request header.
Looks for the token in the request `Authorization` header in the format
`token [big-long-token-thing]`. When found, it verifies the token (using
`Phoenix.Token.verify/4`). If the token is valid, it extracts the user ID from the token and
verifies that there is a user in the database with the given user ID. If any step fails,
a `StaffNotesApi.AuthenticationError` is raised which causes a `401 Unauthorized` HTTP
status to be returned with a message.
## Options
* `:api_access_salt` — Salt to use when verifying the token _(**default:**
"api-access-salt")_
* `:max_age` — Maximum allowable age of the token in seconds _(**default:** one day or
86,400 seconds)_
"""
@behaviour Plug
import Plug.Conn, only: [get_req_header: 2]
require Logger
alias Phoenix.Token
alias StaffNotes.Accounts
alias StaffNotesApi.AuthenticationError
@doc """
Initialize the plug with the supplied options.
## Examples
Default options:
```
iex> StaffNotesApi.TokenAuthentication.init()
[api_access_salt: "api-access-salt", max_age: 86_400]
```
Overriding options:
```
iex> StaffNotesApi.TokenAuthentication.init(api_access_salt: "test", max_age: 1_000)
[api_access_salt: "test", max_age: 1_000]
```
"""
def init(options \\ []) do
default_options()
|> Keyword.merge(Application.get_env(get_app(), __MODULE__) || [])
|> Keyword.merge(options)
end
@doc """
Call the plug with the initialized options.
"""
def call(conn, options), do: validate_token!(conn, options)
defp get_app, do: Application.get_application(__MODULE__)
defp default_options do
[
api_access_salt: "api-access-salt",
max_age: 86_400
]
end
defp validate_token!(conn, options) do
conn
|> verify_token!(options)
|> verify_user!()
conn
end
defp verify_token!(conn, options) do
conn
|> get_token!(options)
|> do_verify_token!()
end
defp get_token!(conn, options) do
case get_req_header(conn, "authorization") do
["token " <> token] ->
Token.verify(conn, options[:api_access_salt], token, options)
_ ->
raise AuthenticationError, message: "`Authorization` request header missing or malformed"
end
end
defp do_verify_token!({:ok, nil}), do: raise(AuthenticationError, message: "Not logged in")
defp do_verify_token!({:ok, user_id}), do: user_id
defp do_verify_token!({:error, reason}) do
raise AuthenticationError, message: "Authentication token is #{reason}"
end
defp verify_user!(id) do
case Accounts.get_user(id) do
nil -> raise AuthenticationError, message: "Authentication token is invalid"
_ -> nil
end
end
end
|
lib/staff_notes_api/plugs/token_authentication.ex
| 0.850717
| 0.70791
|
token_authentication.ex
|
starcoder
|
defmodule Crux.Structs.Member do
@moduledoc """
Represents a Discord [Guild Member Object](https://discordapp.com/developers/docs/resources/guild#guild-member-object-guild-member-structure).
Differences opposed to the Discord API Object:
- `:user` is just the user id
"""
@behaviour Crux.Structs
alias Crux.Structs.{Member, Snowflake, User, Util}
require Util
Util.modulesince("0.1.0")
defstruct(
user: nil,
nick: nil,
roles: nil,
joined_at: nil,
deaf: nil,
mute: nil,
guild_id: nil
)
Util.typesince("0.1.0")
@type t :: %__MODULE__{
user: Snowflake.t(),
nick: String.t() | nil,
roles: MapSet.t(Snowflake.t()),
joined_at: String.t(),
deaf: boolean() | nil,
mute: boolean() | nil,
guild_id: Snowflake.t() | nil
}
@typedoc """
All available types that can be resolved into a user id.
"""
Util.typesince("0.2.1")
@type id_resolvable() :: User.id_resolvable()
@doc """
Resolves the id of a `t:Crux.Structs.Member.t/0`.
> Automatically invoked by `Crux.Structs.resolve_id/2`.
For examples see `Crux.Structs.User.resolve_id/1`.
"""
@spec resolve_id(id_resolvable()) :: Snowflake.t() | nil
Util.since("0.2.1")
defdelegate resolve_id(resolvable), to: User
@doc """
Creates a `t:Crux.Structs.Member.t/0` struct from raw data.
> Automatically invoked by `Crux.Structs.create/2`.
"""
@spec create(data :: map()) :: t()
Util.since("0.1.0")
def create(data) do
member =
data
|> Util.atomify()
|> Map.update!(:user, Util.map_to_id())
|> Map.update!(:roles, &MapSet.new(&1, fn role_id -> Snowflake.to_snowflake(role_id) end))
|> Map.update(:guild_id, nil, &Snowflake.to_snowflake/1)
struct(__MODULE__, member)
end
@doc ~S"""
Converts a `t:Crux.Structs.Member.t/0` into its discord mention format.
## Examples
```elixir
# Without nickname
iex> %Crux.Structs.Member{user: 218348062828003328, nick: nil}
...> |> Crux.Structs.Member.to_mention()
"<@218348062828003328>"
# With nickname
iex> %Crux.Structs.Member{user: 218348062828003328, nick: "weltraum"}
...> |> Crux.Structs.Member.to_mention()
"<@!218348062828003328>"
```
"""
@spec to_mention(user :: Crux.Structs.Member.t()) :: String.t()
Util.since("0.1.1")
def to_mention(%__MODULE__{user: id, nick: nil}), do: "<@#{id}>"
def to_mention(%__MODULE__{user: id}), do: "<@!#{id}>"
defimpl String.Chars, for: Crux.Structs.Member do
@spec to_string(Member.t()) :: String.t()
def to_string(%Member{} = data), do: Member.to_mention(data)
end
end
|
lib/structs/member.ex
| 0.846689
| 0.519948
|
member.ex
|
starcoder
|
defmodule Cloak.AES.CTR do
@moduledoc """
A `Cloak.Cipher` which encrypts values with the AES cipher in CTR (stream) mode.
Internally relies on Erlang's `:crypto.stream_encrypt/2`.
## Configuration
In addition to the normal `:default` and `:tag` configuration options, this
cipher takes a `:keys` option to support using multiple AES keys at the same
time.
config :cloak, Cloak.AES.CTR,
default: true,
tag: "AES",
keys: [
%{tag: <<1>>, key: Base.decode64!("..."), default: true},
%{tag: <<2>>, key: Base.decode64!("..."), default: false}
]
If you want to store your key in the environment variable, you can use
`{:system, "VAR"}` syntax:
config :cloak, Cloak.AES.CTR,
default: true,
tag: "AES",
keys: [
%{tag: <<1>>, key: {:system, "CLOAK_KEY_PRIMARY"}, default: true},
%{tag: <<2>>, key: {:system, "CLOAK_KEY_SECONDARY"}, default: false}
]
If you want to store your key in the OTP app environment, you can use
`{:app_env, :otp_app, :env_key}` syntax:
config :cloak, Cloak.AES.CTR,
default: true,
tag: "AES",
keys: [
%{tag: <<1>>, key: {:app_env, :my_app, :env_primary_key}, default: true},
%{tag: <<2>>, key: {:app_env, :my_app, :env_secondary_key}, default: false}
]
### Key Configuration Options
A key may have the following attributes:
- `:tag` - The ID of the key. This is included in the ciphertext, and should be
only a single byte. See `encrypt/2` for more details.
- `:key` - The AES key to use, in binary. If you store your keys in Base64
format you will need to decode them first. The key must be 128, 192, or 256 bits
long (16, 24 or 32 bytes, respectively).
- `:default` - Boolean. Whether to use this key by default or not.
## Upgrading to a New Key
To upgrade to a new key, simply add the key to the `:keys` array, and set it
as `default: true`.
keys: [
%{tag: <<1>>, key: "old key", default: false},
%{tag: <<2>>, key: "new key", default: true}
]
After this, your new key will automatically be used for all new encyption,
while the old key will be used to decrypt legacy values.
To migrate everything proactively to the new key, see the `mix cloak.migrate`
mix task defined in `Mix.Tasks.Cloak.Migrate`.
"""
@behaviour Cloak.Cipher
@doc """
Callback implementation for `Cloak.Cipher.encrypt`. Encrypts a value using
AES in CTR mode.
Generates a random IV for every encryption, and prepends the key tag and IV to
the beginning of the ciphertext. The format can be diagrammed like this:
+----------------------------------+----------------------+
| HEADER | BODY |
+------------------+---------------+----------------------+
| Key Tag (1 byte) | IV (16 bytes) | Ciphertext (n bytes) |
+------------------+---------------+----------------------+
When this function is called through `Cloak.encrypt/1`, the module's `:tag`
will be added, and the resulting binary will be in this format:
+---------------------------------------------------------+----------------------+
| HEADER | BODY |
+----------------------+------------------+---------------+----------------------+
| Module Tag (n bytes) | Key Tag (1 byte) | IV (16 bytes) | Ciphertext (n bytes) |
+----------------------+------------------+---------------+----------------------+
The header information allows Cloak to know enough about each ciphertext to
ensure a successful decryption. See `decrypt/1` for more details.
**Important**: Because a random IV is used for every encryption, `encrypt/2`
will not produce the same ciphertext twice for the same value.
### Parameters
- `plaintext` - Any type of value to encrypt.
- `key_tag` - Optional. The tag of the key to use for encryption.
### Example
iex> encrypt("Hello") != "Hello"
true
iex> encrypt("Hello") != encrypt("Hello")
true
"""
def encrypt(plaintext, key_tag \\ nil) do
iv = :crypto.strong_rand_bytes(16)
key = Cloak.Ciphers.Util.config(__MODULE__, key_tag) || default_key()
state = :crypto.stream_init(:aes_ctr, Cloak.Ciphers.Util.key_value(key), iv)
{_state, ciphertext} = :crypto.stream_encrypt(state, to_string(plaintext))
key.tag <> iv <> ciphertext
end
@doc """
Callback implementation for `Cloak.Cipher.decrypt/2`. Decrypts a value
encrypted with AES in CTR mode.
Uses the key tag to find the correct key for decryption, and the IV included
in the header to decrypt the body of the ciphertext.
### Parameters
- `ciphertext` - Binary ciphertext generated by `encrypt/2`.
### Examples
iex> encrypt("Hello") |> decrypt
"Hello"
"""
def decrypt(<<key_tag::binary-1, iv::binary-16, ciphertext::binary>> = _ciphertext) do
key = Cloak.Ciphers.Util.config(__MODULE__, key_tag)
state = :crypto.stream_init(:aes_ctr, Cloak.Ciphers.Util.key_value(key), iv)
{_state, plaintext} = :crypto.stream_decrypt(state, ciphertext)
plaintext
end
@doc """
Callback implementation for `Cloak.Cipher.version/0`. Returns the tag of the
current default key.
"""
def version do
default_key().tag
end
defp default_key do
Cloak.Ciphers.Util.default_key(__MODULE__)
end
end
|
lib/cloak/ciphers/aes_ctr.ex
| 0.892878
| 0.573081
|
aes_ctr.ex
|
starcoder
|
defmodule Shapeshifter.TXO do
@moduledoc """
Module for converting to and from [`TXO`](`t:Shapeshifter.txo/0`) structured
maps.
Usually used internally, although can be used directly for specific use cases
such as converting single inputs and outputs to and from [`TXO`](`t:Shapeshifter.txo/0`)
formatted maps.
"""
import Shapeshifter.Shared
@doc """
Creates a new [`TXO`](`t:Shapeshifter.txo/0`) formatted map from the given
[`Shapeshifter`](`t:Shapeshifter.t/0`) struct.
"""
@spec new(Shapeshifter.t) :: Shapeshifter.txo
def new(%Shapeshifter{src: tx, format: :tx} = _shifter) do
txid = BSV.Transaction.get_txid(tx)
ins = tx.inputs
|> Enum.with_index
|> Enum.map(&cast_input/1)
outs = tx.outputs
|> Enum.with_index
|> Enum.map(&cast_output/1)
%{
"tx" => %{"h" => txid},
"in" => ins,
"out" => outs,
"lock" => 0
}
end
def new(%Shapeshifter{src: src, format: :txo}), do: src
def new(%Shapeshifter{src: src, format: :bob}) do
ins = Enum.map(src["in"], &cast_input/1)
outs = Enum.map(src["out"], &cast_output/1)
src
|> Map.delete("_id")
|> Map.put("in", ins)
|> Map.put("out", outs)
end
@doc """
Converts the given input parameters to a [`TXO`](`t:Shapeshifter.txo/0`)
formatted input.
Accepts either a [`BSV Input`](`t:BSV.Transaction.Input.t/0`) struct or a
[`BOB`](`t:Shapeshifter.bob/0`) formatted input.
"""
@spec cast_input({BSV.Transaction.Input.t | map, integer}) :: map
def cast_input({%BSV.Transaction.Input{} = src, index}) do
input = %{
"i" => index,
"seq" => src.sequence,
"e" => %{
"h" => src.output_txid,
"i" => src.output_index,
"a" => script_address(src.script.chunks)
},
"len" => length(src.script.chunks)
}
src.script.chunks
|> Enum.with_index
|> Enum.reduce(input, &from_script_chunk/2)
end
def cast_input(%{"tape" => _tape} = src),
do: from_bob_tape(src)
@doc """
Converts the given output parameters to a [`TXO`](`t:Shapeshifter.txo/0`)
formatted output.
Accepts either a [`BSV Output`](`t:BSV.Transaction.Output.t/0`) struct or a
[`BOB`](`t:Shapeshifter.bob/0`) formatted output.
"""
@spec cast_output({BSV.Transaction.Output.t | map, integer}) :: map
def cast_output({%BSV.Transaction.Output{} = src, index}) do
output = %{
"i" => index,
"e" => %{
"v" => src.satoshis,
"i" => index,
"a" => script_address(src.script.chunks)
},
"len" => length(src.script.chunks)
}
src.script.chunks
|> Enum.with_index
|> Enum.reduce(output, &from_script_chunk/2)
end
def cast_output(%{"tape" => _tape} = src),
do: from_bob_tape(src)
@doc """
Converts the given [`TXO`](`t:Shapeshifter.txo/0`) formatted transaction back
to a [`BSV Transaction`](`t:BSV.Transaction.t/0`) struct.
"""
@spec to_tx(%Shapeshifter{
src: Shapeshifter.txo,
format: :txo
} | Shapeshifter.txo) :: BSV.Transaction.t
def to_tx(%Shapeshifter{src: src, format: :txo}),
do: to_tx(src)
def to_tx(%{"in" => ins, "out" => outs} = src) do
%BSV.Transaction{
inputs: Enum.map(ins, &to_tx_input/1),
outputs: Enum.map(outs, &to_tx_output/1),
lock_time: src["lock"]
}
end
@doc """
Converts the given [`TXO`](`t:Shapeshifter.txo/0`) formatted input back to a
[`BSV Input`](`t:BSV.Transaction.Input.t/0`) struct.
"""
@spec to_tx_input(map) :: BSV.Transaction.Input.t
def to_tx_input(%{} = src) do
%BSV.Transaction.Input{
output_index: get_in(src, ["e", "i"]),
output_txid: get_in(src, ["e", "h"]),
sequence: src["seq"],
script: to_tx_script(src)
}
end
@doc """
Converts the given [`TXO`](`t:Shapeshifter.txo/0`) formatted output back to a
[`BSV Output`](`t:BSV.Transaction.Output.t/0`) struct.
"""
@spec to_tx_output(map) :: BSV.Transaction.Output.t
def to_tx_output(%{} = src) do
%BSV.Transaction.Output{
satoshis: get_in(src, ["e", "v"]),
script: to_tx_script(src)
}
end
# Converts a BSV Script chunk to TXO parameters. The index is given with the
# script chunk.
defp from_script_chunk({opcode, index}, target) when is_atom(opcode),
do: Map.put(target, "o#{index}", Atom.to_string(opcode))
defp from_script_chunk({data, index}, target) when is_binary(data) do
Map.merge(target, %{
"s#{index}" => data,
"b#{index}" => Base.encode64(data),
"h#{index}" => Base.encode16(data, case: :lower),
})
end
# Converts a BOB formatted tape to TXO parameters.
defp from_bob_tape(%{"tape" => tape} = src) do
target = src
|> Map.delete("tape")
|> Map.put("len", 0)
tape
|> Enum.flat_map(& &1["cell"])
|> Enum.reduce(target, &from_bob_cell/2)
end
# Converts a BOB formatted cell to TXO parameters.
defp from_bob_cell(%{"ops" => opcode, "ii" => index}, target) do
target
|> check_expected_index(index)
|> Map.put("o#{index}", opcode)
end
defp from_bob_cell(%{"ii" => index} = cell, target) do
target
|> check_expected_index(index)
|> Map.merge(%{
"s#{index}" => cell["s"],
"b#{index}" => cell["b"],
"h#{index}" => cell["h"]
})
end
# Checks the expected index when iterrating over a BOB tape. If the expected
# index is less, then we know to add a pipe character into the TXO map.
defp check_expected_index(%{"len" => expected_index} = target, index)
when expected_index == index,
do: Map.put(target, "len", expected_index+1)
defp check_expected_index(%{"len" => expected_index} = target, index)
when expected_index < index
do
Map.merge(target, %{
"s#{expected_index}" => "|",
"b#{expected_index}" => Base.encode64("|"),
"h#{expected_index}" => Base.encode16("|", case: :lower),
"len" => expected_index+1
})
|> check_expected_index(index)
end
# Converts TXO formatted attributes into a BSV Script struct.
defp to_tx_script(%{} = src) do
0..src["len"]-1
|> Enum.reduce(%BSV.Script{}, fn i, script ->
data = cond do
Map.has_key?(src, "o#{i}") ->
Map.get(src, "o#{i}") |> String.to_atom
Map.has_key?(src, "b#{i}") ->
Map.get(src, "b#{i}") |> Base.decode64!
Map.has_key?(src, "h#{i}") ->
Map.get(src, "h#{i}") |> Base.decode16!(case: :mixed)
end
BSV.Script.push(script, data)
end)
end
end
|
lib/shapeshifter/txo.ex
| 0.767733
| 0.672601
|
txo.ex
|
starcoder
|
defmodule Tensorflow.TypeSpecProto.TypeSpecClass do
@moduledoc false
use Protobuf, enum: true, syntax: :proto3
@type t ::
integer
| :UNKNOWN
| :SPARSE_TENSOR_SPEC
| :INDEXED_SLICES_SPEC
| :RAGGED_TENSOR_SPEC
| :TENSOR_ARRAY_SPEC
| :DATA_DATASET_SPEC
| :DATA_ITERATOR_SPEC
| :OPTIONAL_SPEC
| :PER_REPLICA_SPEC
| :VARIABLE_SPEC
| :ROW_PARTITION_SPEC
| :NDARRAY_SPEC
field(:UNKNOWN, 0)
field(:SPARSE_TENSOR_SPEC, 1)
field(:INDEXED_SLICES_SPEC, 2)
field(:RAGGED_TENSOR_SPEC, 3)
field(:TENSOR_ARRAY_SPEC, 4)
field(:DATA_DATASET_SPEC, 5)
field(:DATA_ITERATOR_SPEC, 6)
field(:OPTIONAL_SPEC, 7)
field(:PER_REPLICA_SPEC, 8)
field(:VARIABLE_SPEC, 9)
field(:ROW_PARTITION_SPEC, 10)
field(:NDARRAY_SPEC, 11)
end
defmodule Tensorflow.StructuredValue do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
kind: {atom, any}
}
defstruct [:kind]
oneof(:kind, 0)
field(:none_value, 1, type: Tensorflow.NoneValue, oneof: 0)
field(:float64_value, 11, type: :double, oneof: 0)
field(:int64_value, 12, type: :sint64, oneof: 0)
field(:string_value, 13, type: :string, oneof: 0)
field(:bool_value, 14, type: :bool, oneof: 0)
field(:tensor_shape_value, 31, type: Tensorflow.TensorShapeProto, oneof: 0)
field(:tensor_dtype_value, 32,
type: Tensorflow.DataType,
enum: true,
oneof: 0
)
field(:tensor_spec_value, 33, type: Tensorflow.TensorSpecProto, oneof: 0)
field(:type_spec_value, 34, type: Tensorflow.TypeSpecProto, oneof: 0)
field(:bounded_tensor_spec_value, 35,
type: Tensorflow.BoundedTensorSpecProto,
oneof: 0
)
field(:list_value, 51, type: Tensorflow.ListValue, oneof: 0)
field(:tuple_value, 52, type: Tensorflow.TupleValue, oneof: 0)
field(:dict_value, 53, type: Tensorflow.DictValue, oneof: 0)
field(:named_tuple_value, 54, type: Tensorflow.NamedTupleValue, oneof: 0)
end
defmodule Tensorflow.NoneValue do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{}
defstruct []
end
defmodule Tensorflow.ListValue do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
values: [Tensorflow.StructuredValue.t()]
}
defstruct [:values]
field(:values, 1, repeated: true, type: Tensorflow.StructuredValue)
end
defmodule Tensorflow.TupleValue do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
values: [Tensorflow.StructuredValue.t()]
}
defstruct [:values]
field(:values, 1, repeated: true, type: Tensorflow.StructuredValue)
end
defmodule Tensorflow.DictValue.FieldsEntry do
@moduledoc false
use Protobuf, map: true, syntax: :proto3
@type t :: %__MODULE__{
key: String.t(),
value: Tensorflow.StructuredValue.t() | nil
}
defstruct [:key, :value]
field(:key, 1, type: :string)
field(:value, 2, type: Tensorflow.StructuredValue)
end
defmodule Tensorflow.DictValue do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
fields: %{String.t() => Tensorflow.StructuredValue.t() | nil}
}
defstruct [:fields]
field(:fields, 1,
repeated: true,
type: Tensorflow.DictValue.FieldsEntry,
map: true
)
end
defmodule Tensorflow.PairValue do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
key: String.t(),
value: Tensorflow.StructuredValue.t() | nil
}
defstruct [:key, :value]
field(:key, 1, type: :string)
field(:value, 2, type: Tensorflow.StructuredValue)
end
defmodule Tensorflow.NamedTupleValue do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
name: String.t(),
values: [Tensorflow.PairValue.t()]
}
defstruct [:name, :values]
field(:name, 1, type: :string)
field(:values, 2, repeated: true, type: Tensorflow.PairValue)
end
defmodule Tensorflow.TensorSpecProto do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
name: String.t(),
shape: Tensorflow.TensorShapeProto.t() | nil,
dtype: Tensorflow.DataType.t()
}
defstruct [:name, :shape, :dtype]
field(:name, 1, type: :string)
field(:shape, 2, type: Tensorflow.TensorShapeProto)
field(:dtype, 3, type: Tensorflow.DataType, enum: true)
end
defmodule Tensorflow.BoundedTensorSpecProto do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
name: String.t(),
shape: Tensorflow.TensorShapeProto.t() | nil,
dtype: Tensorflow.DataType.t(),
minimum: Tensorflow.TensorProto.t() | nil,
maximum: Tensorflow.TensorProto.t() | nil
}
defstruct [:name, :shape, :dtype, :minimum, :maximum]
field(:name, 1, type: :string)
field(:shape, 2, type: Tensorflow.TensorShapeProto)
field(:dtype, 3, type: Tensorflow.DataType, enum: true)
field(:minimum, 4, type: Tensorflow.TensorProto)
field(:maximum, 5, type: Tensorflow.TensorProto)
end
defmodule Tensorflow.TypeSpecProto do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
type_spec_class: Tensorflow.TypeSpecProto.TypeSpecClass.t(),
type_state: Tensorflow.StructuredValue.t() | nil,
type_spec_class_name: String.t()
}
defstruct [:type_spec_class, :type_state, :type_spec_class_name]
field(:type_spec_class, 1,
type: Tensorflow.TypeSpecProto.TypeSpecClass,
enum: true
)
field(:type_state, 2, type: Tensorflow.StructuredValue)
field(:type_spec_class_name, 3, type: :string)
end
|
lib/tensorflow/core/protobuf/struct.pb.ex
| 0.790126
| 0.456228
|
struct.pb.ex
|
starcoder
|
defmodule RGG.Shared do
@doc """
This function calculates the maximum number of buckets we can use when connecting nodes to achieve linear runtime when connecting the nodes..
The only parameter is r as we need to make sure we place nodes within radius r of each other within 1 bucket of each other.
## Examples
iex>RGG.Square.calculate_radius_square(1000, 25) |> RGG.Shared.num_buckets()
10
"""
def num_buckets(r) do
round(max(:math.floor(1/r) - 1, 1))
end
@doc """
This function selects the proper bucket numbers for a node based on it's x location, y location, and the number of buckets
iex>RGG.Shared.get_bucket_from_node(%RGG.Node{x: 0, y: 0}, 10)
{0, 0}
iex>RGG.Shared.get_bucket_from_node(%RGG.Node{x: 1, y: 0.5}, 10)
{10, 5}
"""
def get_bucket_from_node(%RGG.Node{x: x, y: y}, number_buckets) do
{round(x * number_buckets), round(y * number_buckets)}
end
@doc """
This function returns the nodes that must be tested in order to connect a node in the graph.
"""
def get_adjacent_nodes_for_bucket(node, buckets) do
offsets = [
{-1, -1}, {-1, 0}, {-1, 1},
{0, -1}, {0, 0}, {0, 1},
{1, -1}, {1, 0}, {1, 1},
]
{x, y} = get_bucket_from_node(node, map_size(buckets))
Enum.map(offsets,
fn {dx, dy} ->
Map.get(buckets, x+dx, %{}) |>
Map.get(y+dy, [])
end) |>
List.flatten
end
@doc """
This function maps nodes into their appropriate buckets before connection.
"""
def create_buckets(nodes, number_of_buckets) do
Enum.reduce(nodes, %{}, curry_put_node_in_bucket(number_of_buckets))
end
def curry_put_node_in_bucket(n) do
fn node, buckets ->
{x, y} = get_bucket_from_node(node, n)
inner_map = Map.get(buckets, x, %{})
bucket = [node | Map.get(inner_map, y, [])]
Map.put(buckets, x, Map.put(inner_map, y, bucket))
end
end
@doc """
This function connects nodes to their appropriate neighbors
"""
def connect_to_neighbors(node = %RGG.Node{id: id}, buckets, r) do
get_adjacent_nodes_for_bucket(node, buckets) |>
Enum.reject(
fn
%RGG.Node{id: ^id} -> true
node2 -> RGG.Util.distance(node, node2) > r
end) |>
Enum.map(fn %RGG.Node{id: id} -> id end)
end
end
|
lib/rgg/shared.ex
| 0.832373
| 0.764935
|
shared.ex
|
starcoder
|
defmodule Flamelex.Fluxus.RadixReducer do
@moduledoc """
The RootReducer for all flamelex actions.
These pure-functions are called by ActionListener, to handle specific
actions within the application. Every action that gets processed, is
routed down to the sub-reducers, through this module. Every possible
action, must also be declared inside this file.
A reducer is a function that determines changes to an application's state.
All the reducers in Flamelex.Fluxus (and this includes both action
handlers, and user-input handlers) work the same way - they take in
the application state, & an action, & return an updated state. They
may also fire off side-effects along the way, including further actions.
```
A reducer is a function that determines changes to an application's state.
It uses the action it receives to determine this change. We have tools,
like Redux, that help manage an application's state changes in a single
store so that they behave consistently.
```
https://css-tricks.com/understanding-how-reducers-are-used-in-redux/
Here we have the function which `reduces` a radix_state and an action.
Our main way of handling actions is simply to broadcast them on to the
`:actions` broker, which will forward it to all the main Manager processes
in turn (GUiManager, BufferManager, AgentManager, etc.)
The reason for this is, what's going to happen is, say I send a command
like `open_buffer` to open my journal. We spin up this action handler
task - say that takes 2 seconds to run for some reason. If I send the
same action again, another process will spin up. Eventually, they're
both going to finish, and whoever is getting the results (FluxusRadix)
is going to get 2 messages, and then have to handle the situation of
dealing with double-processes of actions (yuck!)
what we want to do instead is, the reducer broadcasts the message to
the "actions" channel - all the managers are able to react to this event.
"""
require Logger
def process(radix_state, {reducer, action}) when is_atom(reducer) do
reducer.process(radix_state, action)
end
@memex_actions [
:open_memex, :close_memex
]
def process(%{memex: %{active?: false}}, action) when action in @memex_actions do
Logger.warn "#{__MODULE__} ignoring a memex action, because the memex is set to `inactive`"
:ignore
end
def process(%{memex: %{active?: true}} = radix_state, action) when action in @memex_actions do
Flamelex.Fluxus.Reducers.Memex.process(radix_state, action)
end
def process(radix_state, action) do
{:error, "RootReducer bottomed-out! No match was found."}
end
end
|
lib/flamelex/fluxus/reducers/radix_reducer.ex
| 0.714728
| 0.698946
|
radix_reducer.ex
|
starcoder
|
defmodule Wand.CLI.Commands.Core do
use Wand.CLI.Command
alias Wand.CLI.Display
alias Wand.CLI.CoreValidator
@io Wand.Interfaces.IO.impl()
@moduledoc """
# Core
Manage the related wand_core package
### Usage
```
wand core install
wand core version
```
Wand comes in two parts, the CLI and the wand.core tasks.
In order to run mix deps.get, only the wand.core tasks are needed. For everything else, the CLI is needed.
Wand validates to make sure the CLI is using a compatible version of wand_core. If they get out of sync, you can type wand core upgrade to fix the issue.
"""
@doc false
def help(:banner) do
"""
Manage the related wand_core package
### Usage
```
wand core install
wand core version
```
"""
|> Display.print()
end
@doc false
def help(:verbose), do: Display.print(@moduledoc)
@doc false
def help(:wrong_command) do
"""
The command is invalid.
The correct commands are:
<pre>
wand core install
wand core version
</pre>
See wand help core --verbose for more information
"""
|> Display.print()
end
@doc false
def validate(args) do
{switches, [_ | commands], errors} = OptionParser.parse(args, strict: get_flags(args))
case Wand.CLI.Command.parse_errors(errors) do
:ok -> parse(commands, switches)
error -> error
end
end
@doc false
def execute(:version, _extras) do
case CoreValidator.core_version() do
{:ok, version} ->
@io.puts(version)
{:ok, %Result{message: nil}}
{:error, _} ->
{:error, :wand_core_missing, nil}
end
end
@doc false
def execute(:install, _extras) do
case Wand.CLI.Mix.install_core() do
:ok -> {:ok, %Result{}}
{:error, _} -> {:error, :wand_core_api_error, nil}
end
end
@doc false
def handle_error(:wand_core_api_error, _extra) do
"""
# Error
Could not install the wand_core archive. Please check the error message and then run wand core install again.
"""
end
@doc false
def handle_error(:wand_core_missing, _extra) do
"""
# Error
Could not determine the version for wand_core.
You can try installing it with wand core install
"""
end
defp parse([], switches) do
case Keyword.get(switches, :version) do
true -> {:ok, :version}
_ -> {:error, :wrong_command}
end
end
defp parse(commands, _switches) do
case commands do
["install"] -> {:ok, :install}
["version"] -> {:ok, :version}
_ -> {:error, :wrong_command}
end
end
defp get_flags(args) do
{_switches, [_ | commands], _errors} = OptionParser.parse(args)
case commands do
["version"] -> [version: :boolean]
[] -> [version: :boolean]
_ -> []
end
end
end
|
lib/cli/commands/core.ex
| 0.558327
| 0.655274
|
core.ex
|
starcoder
|
defmodule Timex do
@moduledoc File.read!("README.md")
defmacro __using__(_) do
quote do
alias Timex.DateTime
alias Timex.AmbiguousDateTime
alias Timex.Date
alias Timex.Time
alias Timex.Interval
alias Timex.TimezoneInfo
alias Timex.AmbiguousTimezoneInfo
alias Timex.Timezone
alias Timex.Convertable
end
end
alias Timex.Date
alias Timex.DateTime
alias Timex.AmbiguousDateTime
alias Timex.Timezone
alias Timex.TimezoneInfo
alias Timex.AmbiguousTimezoneInfo
alias Timex.Types
alias Timex.Helpers
alias Timex.Convertable
alias Timex.Comparable
alias Timex.Translator
use Timex.Constants
import Timex.Macros
@doc """
Creates a new Date value, which represents the first day of year zero.
If a date/time value is provided, it will convert it to a Date struct.
"""
@spec date(Timex.Convertable) :: Date.t | {:error, term}
defdelegate date(from), to: Timex.Convertable, as: :to_date
@doc """
Creates a new DateTime value, which represents the first moment of the first day of year zero.
The provided date/time value will be converted via the `Timex.Convertable` protocol.
"""
@spec datetime(Convertable) :: DateTime.t | {:error, term}
defdelegate datetime(from), to: Timex.Convertable, as: :to_datetime
@doc """
Same as `datetime/1`, except this version returns a DateTime or AmbiguousDateTime in the provided timezone.
"""
@spec datetime(Convertable, Types.valid_timezone) :: DateTime.t | AmbiguousDateTime.t | {:error, term}
def datetime(from, timezone) do
case Convertable.to_datetime(from) do
{:error, _} = err ->
err
%DateTime{} = datetime ->
case Timezone.name_of(timezone) do
{:error, _} = err ->
err
name ->
seconds_from_zeroyear = DateTime.to_seconds(datetime, :zero)
case Timezone.resolve(name, seconds_from_zeroyear) do
{:error, _} = err ->
err
%TimezoneInfo{} = tzinfo ->
%{datetime | :timezone => tzinfo}
%AmbiguousTimezoneInfo{:before => b, :after => a} ->
%AmbiguousDateTime{:before => %{datetime | :timezone => b},
:after => %{datetime | :timezone => a}}
end
end
end
end
@doc """
WARNING: Added to ease the migration to 2.x, but it is deprecated.
Returns a DateTime, like the old `Date.from/1` API
"""
def from(from) do
IO.write :stderr, "warning: Timex.from/1 is deprecated, use Timex.date/1 or Timex.datetime/1 instead\n"
Convertable.to_datetime(from)
end
@doc """
WARNING: Added to ease the migration to 2.x, but it is deprecated.
Use Timex.date/1 or Timex.datetime/2 instead.
Returns a DateTime, like the old `Date.from/2` API
"""
def from(from, timezone) do
IO.write :stderr, "warning: Timex.from/1 is deprecated, use Timex.date/1 or Timex.datetime/1 instead\n"
Timex.datetime(from, timezone)
end
@doc """
Convert a date/time value to a Gregorian calendar datetme+timezone tuple.
i.e. { {year, month, day}, {hour, minute, second}, {offset_hours, timezone_abbreviation}}
"""
@spec to_gregorian(Convertable) :: Types.gregorian | {:error, term}
defdelegate to_gregorian(datetime), to: Convertable
@doc """
Convert a date/time value to a Julian calendar date number
"""
@spec to_julian(Convertable) :: float
defdelegate to_julian(datetime), to: Convertable
@doc """
Convert a date/time value to gregorian seconds (seconds since start of year zero)
"""
@spec to_gregorian_seconds(Convertable) :: non_neg_integer | {:error, term}
defdelegate to_gregorian_seconds(datetime), to: Convertable
@doc """
Convert a date/time value to a standard Erlang datetme tuple.
i.e. { {year, month, day}, {hour, minute, second} }
"""
@spec to_erlang_datetime(Convertable) :: Types.datetime | {:error, term}
defdelegate to_erlang_datetime(datetime), to: Convertable
@doc """
Convert a date/time value to a Date struct
"""
@spec to_date(Convertable) :: Date.t | {:error, term}
defdelegate to_date(datetime), to: Convertable
@doc """
Convert a date/time value to a DateTime struct
"""
@spec to_datetime(Convertable) :: DateTime.t | {:error, term}
defdelegate to_datetime(datetime), to: Convertable
@doc """
Convert a date/time value to seconds since the UNIX epoch
"""
@spec to_unix(Convertable) :: non_neg_integer | {:error, term}
defdelegate to_unix(datetime), to: Convertable
@doc """
Convert a date/time value to an Erlang timestamp
"""
@spec to_timestamp(Convertable) :: Types.timestamp | {:error, term}
defdelegate to_timestamp(datetime), to: Convertable
@doc """
Formats a date/time value using the given format string (and optional formatter).
See Timex.Format.DateTime.Formatters.Default or Timex.Format.DateTime.Formatters.Strftime
for documentation on the syntax supported by those formatters.
To use the Default formatter, simply call format/2. To use the Strftime formatter, you
can either alias and pass Strftime by module name, or as a shortcut, you can pass :strftime
instead.
Formatting uses the Convertable protocol to convert non-DateTime structs to DateTime structs.
## Examples
iex> date = Timex.date({2016, 2, 29})
...> Timex.format!(date, "{YYYY}-{0M}-{D}")
"2016-02-29"
iex> Timex.format!({{2016,2,29},{22,25,0}}, "{ISO:Extended}")
"2016-02-29T22:25:00+00:00"
"""
@spec format(Convertable, format :: String.t) :: {:ok, String.t} | {:error, term}
defdelegate format(datetime, format_string), to: Timex.Format.DateTime.Formatter
@doc """
Same as format/2, except using a custom formatter
## Examples
iex> use Timex
...> datetime = Timex.datetime({{2016,2,29},{22,25,0}}, "America/Chicago")
iex> Timex.format!(datetime, "%FT%T%:z", :strftime)
"2016-02-29T22:25:00-06:00"
"""
@spec format(Convertable, format :: String.t, formatter :: atom) :: {:ok, String.t} | {:error, term}
defdelegate format(datetime, format_string, formatter), to: Timex.Format.DateTime.Formatter
@doc """
Same as format/2, except takes a locale name to translate text to.
Translations only apply to units, relative time phrases, and only for the locales in the
list of supported locales in the Timex documentation.
"""
@spec lformat(Convertable, format :: String.t, locale :: String.t) :: {:ok, String.t} | {:error, term}
defdelegate lformat(datetime, format_string, locale), to: Timex.Format.DateTime.Formatter
@doc """
Same as lformat/3, except takes a formatter as it's last argument.
Translations only apply to units, relative time phrases, and only for the locales in the
list of supported locales in the Timex documentation.
"""
@spec lformat(Convertable, format :: String.t, locale :: String.t, formatter :: atom) :: {:ok, String.t} | {:error, term}
defdelegate lformat(datetime, format_string, locale, formatter), to: Timex.Format.DateTime.Formatter
@doc """
Same as format/2, except format! raises on error.
See format/2 docs for usage examples.
"""
@spec format!(Convertable, format :: String.t) :: String.t | no_return
defdelegate format!(datetime, format_string), to: Timex.Format.DateTime.Formatter
@doc """
Same as format/3, except format! raises on error.
See format/3 docs for usage examples
"""
@spec format!(Convertable, format :: String.t, formatter :: atom) :: String.t | no_return
defdelegate format!(datetime, format_string, formatter), to: Timex.Format.DateTime.Formatter
@doc """
Same as lformat/3, except local_format! raises on error.
See lformat/3 docs for usage examples.
"""
@spec lformat!(Convertable, format :: String.t, locale :: String.t) :: String.t | no_return
defdelegate lformat!(datetime, format_string, locale), to: Timex.Format.DateTime.Formatter
@doc """
Same as lformat/4, except local_format! raises on error.
See lformat/4 docs for usage examples
"""
@spec lformat!(Convertable, format :: String.t, locale :: String.t, formatter :: atom) :: String.t | no_return
defdelegate lformat!(datetime, format_string, locale, formatter), to: Timex.Format.DateTime.Formatter
@doc """
Formats a DateTime using a fuzzy relative duration, from now.
## Examples
iex> use Timex
...> Timex.from_now(Timex.shift(DateTime.now, days: 2))
"in 2 days"
iex> use Timex
...> Timex.from_now(Timex.shift(DateTime.now, days: -2))
"2 days ago"
"""
@spec from_now(Convertable) :: String.t | {:error, term}
def from_now(datetime), do: from_now(datetime, Timex.Translator.default_locale)
@doc """
Formats a DateTime using a fuzzy relative duration, translated using given locale
## Examples
iex> use Timex
...> Timex.from_now(Timex.shift(DateTime.now, days: 2), "ru")
"через 2 дней"
iex> use Timex
...> Timex.from_now(Timex.shift(DateTime.now, days: -2), "ru")
"2 дня назад"
"""
@spec from_now(Convertable, String.t) :: String.t | {:error, term}
def from_now(datetime, locale) when is_binary(locale) do
case Convertable.to_datetime(datetime) do
{:error, _} = err -> err
%DateTime{} = dt ->
case lformat(dt, "{relative}", locale, :relative) do
{:ok, formatted} -> formatted
{:error, _} = err -> err
end
end
end
@doc """
Formats a DateTime using a fuzzy relative duration, with a reference datetime other than now
"""
@spec from_now(Convertable, Convertable) :: String.t | {:error, term}
def from_now(datetime, reference_date), do: from_now(datetime, reference_date, Timex.Translator.default_locale)
@doc """
Formats a DateTime using a fuzzy relative duration, with a reference datetime other than now,
translated using the given locale
"""
@spec from_now(Convertable, Convertable, String.t) :: String.t | {:error, term}
def from_now(datetime, reference_date, locale) when is_binary(locale) do
case Convertable.to_datetime(datetime) do
{:error, _} = err -> err
%DateTime{} = dt ->
case Convertable.to_datetime(reference_date) do
{:error, _} = err -> err
%DateTime{} = ref ->
case Timex.Format.DateTime.Formatters.Relative.relative_to(dt, ref, "{relative}", locale) do
{:ok, formatted} -> formatted
{:error, _} = err -> err
end
end
end
end
@doc """
Formats an Erlang timestamp using the ISO-8601 duration format, or optionally, with a custom
formatter of your choosing.
See Timex.Format.Time.Formatters.Default or Timex.Format.Time.Formatters.Humanized
for documentation on the specific formatter behaviour.
To use the Default formatter, simply call format_time/2. To use the Humanized formatter, you
can either alias and pass Humanized by module name, or as a shortcut, you can pass :humanized
instead.
## Examples
iex> use Timex
...> date = Date.to_timestamp(Timex.date({2016, 2, 29}), :epoch)
...> Timex.format_time(date)
"P46Y2M10D"
iex> use Timex
...> date = Date.to_timestamp(Timex.date({2016, 2, 29}), :epoch)
...> Timex.format_time(date, :humanized)
"46 years, 2 months, 1 week, 3 days"
iex> use Timex
...> datetime = Timex.datetime({{2016, 2, 29}, {22, 25, 0}}) |> DateTime.to_timestamp
...> Timex.format_time(datetime, :humanized)
"46 years, 2 months, 1 week, 3 days, 22 hours, 25 minutes"
"""
@spec format_time(Types.timestamp) :: String.t | {:error, term}
defdelegate format_time(timestamp), to: Timex.Format.Time.Formatter, as: :format
@doc """
Same as format_time/1, except it also accepts a formatter
"""
@spec format_time(Types.timestamp, atom) :: String.t | {:error, term}
defdelegate format_time(timestamp, formatter),
to: Timex.Format.Time.Formatter, as: :format
@doc """
Same as format_time/1, except takes a locale for use in translation
"""
@spec lformat_time(Types.timestamp, locale :: String.t) :: String.t | {:error, term}
defdelegate lformat_time(timestamp, locale),
to: Timex.Format.Time.Formatter, as: :lformat
@doc """
Same as lformat_time/2, except takes a formatter as an argument
"""
@spec lformat_time(Types.timestamp, locale :: String.t, atom) :: String.t | {:error, term}
defdelegate lformat_time(timestamp, locale, formatter),
to: Timex.Format.Time.Formatter, as: :lformat
@doc """
Parses a datetime string into a DateTime struct, using the provided format string (and optional tokenizer).
See Timex.Format.DateTime.Formatters.Default or Timex.Format.DateTime.Formatters.Strftime
for documentation on the syntax supported in format strings by their respective tokenizers.
To use the Default tokenizer, simply call parse/2. To use the Strftime tokenizer, you
can either alias and pass Timex.Parse.DateTime.Tokenizer.Strftime by module name,
or as a shortcut, you can pass :strftime instead.
## Examples
iex> use Timex
...> expected = Timex.datetime({2016, 2, 29})
...> {:ok, result} = Timex.parse("2016-02-29", "{YYYY}-{0M}-{D}")
...> result == expected
true
iex> use Timex
...> expected = Timex.datetime({{2016, 2, 29}, {22, 25, 0}}, "America/Chicago")
...> {:ok, result} = Timex.parse("2016-02-29T22:25:00-06:00", "{ISO:Extended}")
...> Timex.equal?(expected, result)
true
iex> use Timex
...> expected = Timex.datetime({{2016, 2, 29}, {22, 25, 0}}, "America/Chicago")
...> {:ok, result} = Timex.parse("2016-02-29T22:25:00-06:00", "%FT%T%:z", :strftime)
...> Timex.equal?(expected, result)
true
"""
@spec parse(String.t, String.t) :: {:ok, Timex.DateTime.t} | {:error, term}
@spec parse(String.t, String.t, atom) :: {:ok, Timex.DateTime.t} | {:error, term}
defdelegate parse(datetime_string, format_string), to: Timex.Parse.DateTime.Parser
defdelegate parse(datetime_string, format_string, tokenizer), to: Timex.Parse.DateTime.Parser
@doc """
Same as parse/2 and parse/3, except parse! raises on error.
See parse/2 or parse/3 docs for usage examples.
"""
@spec parse!(String.t, String.t) :: Timex.DateTime.t | no_return
@spec parse!(String.t, String.t, atom) :: Timex.DateTime.t | no_return
defdelegate parse!(datetime_string, format_string), to: Timex.Parse.DateTime.Parser
defdelegate parse!(datetime_string, format_string, tokenizer), to: Timex.Parse.DateTime.Parser
@doc """
Given a format string, validates that the format string is valid for the Default formatter.
Given a format string and a formatter, validates that the format string is valid for that formatter.
## Examples
iex> use Timex
...> Timex.validate_format("{YYYY}-{M}-{D}")
:ok
iex> use Timex
...> Timex.validate_format("{YYYY}-{M}-{V}")
{:error, "Expected end of input at line 1, column 11"}
iex> use Timex
...> Timex.validate_format("%FT%T%:z", :strftime)
:ok
"""
@spec validate_format(String.t) :: :ok | {:error, term}
@spec validate_format(String.t, atom) :: :ok | {:error, term}
defdelegate validate_format(format_string), to: Timex.Format.DateTime.Formatter, as: :validate
defdelegate validate_format(format_string, formatter), to: Timex.Format.DateTime.Formatter, as: :validate
@doc """
Gets the current century
## Examples
iex> #{__MODULE__}.century
21
"""
@spec century() :: non_neg_integer
def century(), do: century(Date.today)
@doc """
Given a date, get the century this date is in.
## Examples
iex> Timex.Date.today |> #{__MODULE__}.century
21
iex> Timex.DateTime.now |> #{__MODULE__}.century
21
iex> #{__MODULE__}.century(2016)
21
"""
@spec century(Convertable | Types.year) :: non_neg_integer | {:error, term}
def century(date) when not is_integer(date) do
case Convertable.to_date(date) do
{:error, _} = err -> err
%Date{:year => year} -> century(year)
end
end
def century(year) when is_integer(year) do
base_century = div(year, 100)
years_past = rem(year, 100)
cond do
base_century == (base_century - years_past) -> base_century
true -> base_century + 1
end
end
@doc """
Convert an iso ordinal day number to the day it represents in the current year.
## Examples
iex> use Timex
iex> %Date{:year => year} = Timex.from_iso_day(180)
...> %Date{:year => todays_year} = Date.today
...> year == todays_year
true
"""
@spec from_iso_day(non_neg_integer) :: Date.t | {:error, term}
def from_iso_day(day) when is_day_of_year(day) do
{{year,_,_},_} = :calendar.universal_time
from_iso_day(day, year)
end
def from_iso_day(_), do: {:error, {:from_iso_day, :invalid_iso_day}}
@doc """
Same as from_iso_day/1, except you can expect the following based on the second parameter:
- If an integer year is given, the result will be a Date struct
- If a Date struct is given, the result will be a Date struct
- If a DateTime struct is given, the result will be a DateTime struct
- If a Convertable is given, the result will be a DateTime struct
In all cases, the resulting value will be the date representation of the provided ISO day in that year
## Examples
### Creating a Date from the given day
iex> use Timex
...> expected = Timex.date({2015, 6, 29})
...> (expected === Timex.from_iso_day(180, 2015))
true
### Creating a Date/DateTime from the given day
iex> use Timex
...> expected = Timex.datetime({{2015, 6, 29}, {0,0,0}})
...> (expected === Timex.from_iso_day(180, Timex.datetime({{2015,1,1}, {0,0,0}})))
true
### Shifting a Date/DateTime to the given day
iex> use Timex
...> date = Timex.datetime({{2015,6,26}, {12,0,0}})
...> expected = Timex.datetime({{2015, 6, 29}, {12,0,0}})
...> (Timex.from_iso_day(180, date) === expected)
true
"""
@spec from_iso_day(non_neg_integer, Types.year | Date.t | DateTime.t | Convertable) :: Date.t | DateTime.t | {:error, term}
def from_iso_day(day, year) when is_day_of_year(day) and is_year(year) do
datetime = Helpers.iso_day_to_date_tuple(year, day)
Timex.date(datetime)
end
def from_iso_day(day, %Date{year: year} = date) when is_day_of_year(day) and is_year(year) do
{year, month, day_of_month} = Helpers.iso_day_to_date_tuple(year, day)
%{date | :year => year, :month => month, :day => day_of_month}
end
def from_iso_day(day, %DateTime{year: year} = date) when is_day_of_year(day) and is_year(year) do
{year, month, day_of_month} = Helpers.iso_day_to_date_tuple(year, day)
%{date | :year => year, :month => month, :day => day_of_month}
end
def from_iso_day(day, date) when is_day_of_year(day) do
case Convertable.to_datetime(date) do
{:error, _} = err -> err
%DateTime{} = datetime ->
from_iso_day(day, datetime)
end
end
def from_iso_day(_, _),
do: {:error, {:from_iso_day, :invalid_iso_day}}
@doc """
Return a pair {year, week number} (as defined by ISO 8601) that the given
Date/DateTime value falls on.
## Examples
iex> #{__MODULE__}.iso_week({1970, 1, 1})
{1970,1}
"""
@spec iso_week(Convertable) :: {Types.year, Types.weeknum} | {:error, term}
def iso_week(%Date{:year => y, :month => m, :day => d}) when is_date(y,m,d),
do: iso_week(y, m, d)
def iso_week(%DateTime{:year => y, :month => m, :day => d}) when is_date(y,m,d),
do: iso_week(y, m, d)
def iso_week(date) do
case Convertable.to_date(date) do
{:error, _} = err ->
err
%Date{} = d ->
iso_week(d)
end
end
@doc """
Same as iso_week/1, except this takes a year, month, and day as distinct arguments.
## Examples
iex> #{__MODULE__}.iso_week(1970, 1, 1)
{1970,1}
"""
@spec iso_week(Types.year, Types.month, Types.day) :: {Types.year, Types.weeknum} | {:error, term}
def iso_week(year, month, day) when is_date(year, month, day),
do: :calendar.iso_week_number({year, month, day})
def iso_week(_, _, _),
do: {:error, {:iso_week, :invalid_date}}
@doc """
Return a 3-tuple {year, week number, weekday} for the given Date/DateTime.
## Examples
iex> #{__MODULE__}.iso_triplet(Timex.DateTime.epoch)
{1970, 1, 4}
"""
@spec iso_triplet(Convertable) :: {Types.year, Types.weeknum, Types.weekday} | {:error, term}
def iso_triplet(datetime) do
case Convertable.to_date(datetime) do
{:error, _} = err ->
err
%Date{} = d ->
{iso_year, iso_week} = iso_week(d)
{iso_year, iso_week, Timex.weekday(d)}
end
end
@doc """
Given an ISO triplet `{year, week number, weekday}`, convert it to a Date struct.
## Examples
iex> expected = Timex.date({2014, 1, 28})
iex> Timex.from_iso_triplet({2014, 5, 2}) === expected
true
"""
@spec from_iso_triplet(Types.iso_triplet) :: Date.t | {:error, term}
def from_iso_triplet({year, week, weekday})
when is_year(year) and is_week_of_year(week) and is_day_of_week(weekday, :mon)
do
{_, _, jan4weekday} = Date.from({year, 1, 4}) |> iso_triplet
offset = jan4weekday + 3
ordinal_date = ((week * 7) + weekday) - offset
date = Helpers.iso_day_to_date_tuple(year, ordinal_date)
Timex.date(date)
end
def from_iso_triplet(_, _, _), do: {:error, {:from_iso_triplet, :invalid_triplet}}
@doc """
Returns a list of all valid timezone names in the Olson database
"""
@spec timezones() :: [String.t]
def timezones(), do: Tzdata.zone_list
@doc """
Get a TimezoneInfo object for the specified offset or name.
When offset or name is invalid, exception is raised.
If no DateTime value is given for the second parameter, the current date/time
will be used (in other words, it will return the current timezone info for the
given zone). If one is provided, the timezone info returned will be based on
the provided DateTime (or Erlang datetime tuple) value.
## Examples
iex> date = Timex.datetime({2015, 4, 12})
...> tz = Timex.timezone(:utc, date)
...> tz.full_name
"UTC"
iex> tz = Timex.timezone("America/Chicago", {2015,4,12})
...> {tz.full_name, tz.abbreviation}
{"America/Chicago", "CDT"}
iex> tz = #{__MODULE__}.timezone(+2, {2015, 4, 12})
...> {tz.full_name, tz.abbreviation}
{"Etc/GMT-2", "GMT-2"}
"""
@spec timezone(Types.valid_timezone, Convertable) :: TimezoneInfo.t | AmbiguousTimezoneInfo.t
def timezone(:utc, _), do: %TimezoneInfo{}
def timezone(%TimezoneInfo{full_name: name}, datetime) do
case Convertable.to_datetime(datetime) do
{:error, _} = err ->
err
%DateTime{} = d ->
seconds_from_zeroyear = DateTime.to_seconds(d, :zero)
Timezone.resolve(name, seconds_from_zeroyear)
end
end
def timezone(:local, datetime) do
case Convertable.to_datetime(datetime) do
{:error, _} = err ->
err
%DateTime{} = d ->
Timezone.local(d)
end
end
def timezone(tz, datetime) do
case Convertable.to_datetime(datetime) do
{:error, _} = err ->
err
%DateTime{} = d ->
Timezone.get(tz, d)
end
end
@doc """
Return a boolean indicating whether the given date is valid.
## Examples
iex> use Timex
...> Timex.is_valid?({{1,1,1},{1,1,1}})
true
iex> use Timex
...> %DateTime{} |> #{__MODULE__}.set([month: 13, validate: false]) |> #{__MODULE__}.is_valid?
false
iex> use Timex
...> %DateTime{} |> #{__MODULE__}.set(hour: -1) |> #{__MODULE__}.is_valid?
false
"""
@spec is_valid?(Convertable) :: boolean
def is_valid?(%Date{:year => y, :month => m, :day => d}) do
:calendar.valid_date({y,m,d})
end
def is_valid?(%DateTime{:year => y, :month => m, :day => d, :hour => h, :minute => min, :second => sec, :timezone => tz}) do
:calendar.valid_date({y,m,d}) and is_valid_time?({h,min,sec}) and is_valid_timezone?(tz)
end
def is_valid?(datetime) do
case Convertable.to_datetime(datetime) do
{:error, _} -> false
%DateTime{} -> true
end
end
@doc """
Returns a boolean indicating whether the provided term represents a valid time,
valid times are one of:
- `{hour, min, sec}`
- `{hour, min, sec, ms}`
"""
@spec is_valid_time?(term) :: boolean
def is_valid_time?({hour,min,sec}) when is_time(hour,min,sec), do: true
def is_valid_time?({hour,min,sec,ms}) when is_time(hour,min,sec,ms), do: true
def is_valid_time?(_), do: false
@doc """
Returns a boolean indicating whether the provided term represents a valid timezone,
valid timezones are one of:
- TimezoneInfo struct
- A timezone name as a string
- `:utc` as a shortcut for the UTC timezone
- `:local` as a shortcut for the local timezone
- A number representing an offset from UTC
"""
@spec is_valid_timezone?(term) :: boolean
def is_valid_timezone?(timezone) do
case Timezone.name_of(timezone) do
{:error, _} -> false
_name -> true
end
end
@doc """
Returns a boolean indicating whether the first `Timex.Comparable` occurs before the second
"""
@spec before?(Comparable.comparable, Comparable.comparable) :: boolean | {:error, term}
def before?(a, b), do: Comparable.compare(a, b) == -1
@doc """
Returns a boolean indicating whether the first `Timex.Comparable` occurs after the second
"""
@spec after?(Comparable.comparable, Comparable.comparable) :: boolean | {:error, term}
def after?(a, b), do: Comparable.compare(a, b) == 1
@doc """
Returns a boolean indicating whether the first `Timex.Comparable` occurs between the second and third
"""
@spec between?(Comparable.comparable, Comparable.comparable, Comparable.comparable) :: boolean | {:error, term}
def between?(a, start, ending) do
is_after_start? = after?(a, start)
is_before_end? = before?(a, ending)
case {is_after_start?, is_before_end?} do
{{:error, _} = err, _} -> err
{_, {:error, _} = err} -> err
{true, true} -> true
_ -> false
end
end
@doc """
Returns a boolean indicating whether the two `Timex.Comparable` values are equivalent.
Equality here implies that the two Comparables represent the same moment in time,
not equality of the data structure.
## Examples
iex> date1 = Timex.date({2014, 3, 1})
...> date2 = Timex.date({2014, 3, 1})
...> #{__MODULE__}.equal?(date1, date2)
true
iex> date1 = Timex.date({2014, 3, 1})
...> date2 = Timex.datetime({2014, 3, 1})
...> #{__MODULE__}.equal?(date1, date2)
true
"""
@spec equal?(Date.t | DateTime.t, Date.t | DateTime.t) :: boolean | {:error, :badarg}
def equal?(a, a), do: true
def equal?(a, b), do: Comparable.compare(a, b, :seconds) == 0
@doc """
See docs for `compare/3`
"""
@spec compare(Comparable.comparable, Comparable.comparable) :: Comparable.compare_result | {:error, term}
defdelegate compare(a, b), to: Timex.Comparable
@doc """
Compare two `Timex.Comparable` values, returning one of the following values:
* `-1` -- the first date comes before the second one
* `0` -- both arguments represent the same date when coalesced to the same timezone.
* `1` -- the first date comes after the second one
You can provide a few reference constants for the second argument:
- :epoch will compare the first parameter against the Date/DateTime of the first moment of the UNIX epoch
- :zero will compare the first parameter against the Date/DateTime of the first moment of year zero
- :distant_past will compare the first parameter against a date/time infinitely in the past (i.e. it will always return 1)
- :distant_future will compare the first parameter against a date/time infinitely in the future (i.e. it will always return -1)
You can optionally specify a comparison granularity, any of the following:
- :years
- :months
- :weeks
- :calendar_weeks (weeks of the calendar as opposed to actual weeks in terms of days)
- :days
- :hours
- :minutes
- :seconds
- :timestamp
and the dates will be compared with the cooresponding accuracy.
The default granularity is :seconds.
## Examples
iex> date1 = Timex.date({2014, 3, 4})
iex> date2 = Timex.date({2015, 3, 4})
iex> Timex.compare(date1, date2, :years)
-1
iex> Timex.compare(date2, date1, :years)
1
iex> Timex.compare(date1, date1)
0
"""
@spec compare(Comparable.comparable, Comparable.comparable, Comparable.granularity) :: Comparable.compare_result | {:error, term}
defdelegate compare(a, b, granularity), to: Timex.Comparable
@doc """
See docs for `diff/3`
"""
@spec diff(Comparable.comparable, Comparable.comparable) :: Types.timestamp | {:error, term}
defdelegate diff(a, b), to: Timex.Comparable
@doc """
Calculate time interval between two dates. The result will always be a non-negative integer
You must specify one of the following units:
- :years
- :months
- :calendar_weeks (weeks of the calendar as opposed to actual weeks in terms of days)
- :weeks
- :days
- :hours
- :minutes
- :seconds
- :timestamp
and the result will be an integer value of those units or a timestamp.
"""
@spec diff(Timex.Comparable.comparable, Timex.Comparable.comparable, Timex.Comparable.granularity) :: Types.timestamp | non_neg_integer | {:error, term}
defdelegate diff(a, b, granularity), to: Timex.Comparable
@doc """
Add time to a date using a timestamp, i.e. {megasecs, secs, microsecs}
Same as shift(date, Time.to_timestamp(5, :minutes), :timestamp).
"""
@spec add(Convertable, Types.timestamp) :: DateTime.t | {:error, term}
def add(%Date{} = date, {mega,sec,_}), do: shift(date, [seconds: (mega * @million) + sec])
def add(%DateTime{} = date, {mega,sec,_}), do: shift(date, [seconds: (mega * @million) + sec])
def add(datetime, {_,_,_} = timestamp) do
case Convertable.to_datetime(datetime) do
{:error, _} = err -> err
%DateTime{} = d -> add(d, timestamp)
end
end
@doc """
Subtract time from a date using a timestamp, i.e. {megasecs, secs, microsecs}
Same as shift(date, Time.to_timestamp(5, :minutes) |> Time.invert, :timestamp).
"""
@spec subtract(Convertable, Types.timestamp) :: DateTime.t | {:error, term}
def subtract(%Date{} = date, {mega,sec,_}), do: shift(date, [seconds: (-mega * @million) - sec])
def subtract(%DateTime{} = date, {mega,sec,_}), do: shift(date, [seconds: (-mega * @million) - sec])
def subtract(datetime, {_,_,_} = timestamp) do
case Convertable.to_datetime(datetime) do
{:error, _} = err -> err
%DateTime{} = d -> subtract(d, timestamp)
end
end
@doc """
A single function for adjusting the date using various units: timestamp,
milliseconds, seconds, minutes, hours, days, weeks, months, years.
TODO: When shifting by timestamps, microseconds are ignored.
The result of applying the shift will either be:
- a Date
- a DateTime
- an AmbiguousDateTime, which will require you to make a choice about which DateTime to use
- an error tuple, which should only occur if something goes wrong with timezone resolution
## Examples
### Shifting across timezone changes
iex> use Timex
...> %DateTime{} = datetime = Timex.datetime({{2016,3,13}, {1,0,0}}, "America/Chicago")
...> # 2-3 AM doesn't exist
...> shifted = Timex.shift(datetime, hours: 1)
...> {datetime.timezone.abbreviation, shifted.timezone.abbreviation, shifted.hour}
{"CST", "CDT", 3}
### Shifting into an ambiguous time period
iex> use Timex
...> %DateTime{} = datetime = Timex.datetime({{1895,12,31}, {0,0,0}}, "Asia/Taipei")
...> %AmbiguousDateTime{} = expected = Timex.datetime({{1895,12,31}, {23,55,0}}, "Asia/Taipei")
...> expected == Timex.shift(datetime, hours: 23, minutes: 53, seconds: 120)
true
### Shifting and leap days
iex> use Timex
...> %DateTime{} = datetime = Timex.datetime({2016,2,29})
...> Timex.shift(datetime, years: -1)
Timex.datetime({2015, 2, 28})
"""
@spec shift(Date.t | DateTime.t, list({Types.shift_units, term})) :: DateTime.t | {:error, term}
def shift(%Date{} = date, options), do: Date.shift(date, options)
def shift(%DateTime{} = datetime, options), do: DateTime.shift(datetime, options)
def shift(datetime, options) do
case Convertable.to_datetime(datetime) do
{:error, _} = err -> err
%DateTime{} = d -> shift(d, options)
end
end
@doc """
Get the day of the week corresponding to the given name.
## Examples
iex> #{__MODULE__}.day_to_num("Monday")
1
iex> #{__MODULE__}.day_to_num("monday")
1
iex> #{__MODULE__}.day_to_num("Mon")
1
iex> #{__MODULE__}.day_to_num("mon")
1
iex> #{__MODULE__}.day_to_num(:mon)
1
"""
@spec day_to_num(binary | atom()) :: integer | {:error, :invalid_day_name}
Enum.each(@weekdays, fn {day_name, day_num} ->
lower = day_name |> String.downcase
abbr_cased = day_name |> String.slice(0..2)
abbr_lower = lower |> String.slice(0..2)
symbol = abbr_lower |> String.to_atom
day_quoted = quote do
def day_to_num(unquote(day_name)), do: unquote(day_num)
def day_to_num(unquote(lower)), do: unquote(day_num)
def day_to_num(unquote(abbr_cased)), do: unquote(day_num)
def day_to_num(unquote(abbr_lower)), do: unquote(day_num)
def day_to_num(unquote(symbol)), do: unquote(day_num)
end
Module.eval_quoted __MODULE__, day_quoted, [], __ENV__
end)
# Make an attempt at cleaning up the provided string
def day_to_num(_), do: {:error, :invalid_day_name}
@doc """
Get the name of the day corresponding to the provided number
## Examples
iex> #{__MODULE__}.day_name(1)
"Monday"
iex> #{__MODULE__}.day_name(0)
{:error, :invalid_weekday_number}
"""
@spec day_name(Types.weekday) :: String.t | {:error, :invalid_weekday_number}
def day_name(num) when num in 1..7 do
weekdays = Translator.get_weekdays(Translator.default_locale)
Map.get(weekdays, num)
end
def day_name(_), do: {:error, :invalid_weekday_number}
@doc """
Get the short name of the day corresponding to the provided number
## Examples
iex> #{__MODULE__}.day_shortname(1)
"Mon"
iex> #{__MODULE__}.day_shortname(0)
{:error, :invalid_weekday_number}
"""
@spec day_shortname(Types.weekday) :: String.t | {:error, :invalid_weekday_number}
def day_shortname(num) when num in 1..7 do
weekdays = Translator.get_weekdays_abbreviated(Translator.default_locale)
Map.get(weekdays, num)
end
def day_shortname(_), do: {:error, :invalid_weekday_number}
@doc """
Get the number of the month corresponding to the given name.
## Examples
iex> #{__MODULE__}.month_to_num("January")
1
iex> #{__MODULE__}.month_to_num("january")
1
iex> #{__MODULE__}.month_to_num("Jan")
1
iex> #{__MODULE__}.month_to_num("jan")
1
iex> #{__MODULE__}.month_to_num(:jan)
1
"""
@spec month_to_num(binary) :: integer | {:error, :invalid_month_name}
Enum.each(@months, fn {month_name, month_num} ->
lower = month_name |> String.downcase
abbr_cased = month_name |> String.slice(0..2)
abbr_lower = lower |> String.slice(0..2)
symbol = abbr_lower |> String.to_atom
full_chars = month_name |> String.to_char_list
abbr_chars = abbr_cased |> String.to_char_list
month_quoted = quote do
def month_to_num(unquote(month_name)), do: unquote(month_num)
def month_to_num(unquote(lower)), do: unquote(month_num)
def month_to_num(unquote(abbr_cased)), do: unquote(month_num)
def month_to_num(unquote(abbr_lower)), do: unquote(month_num)
def month_to_num(unquote(symbol)), do: unquote(month_num)
def month_to_num(unquote(full_chars)), do: unquote(month_num)
def month_to_num(unquote(abbr_chars)), do: unquote(month_num)
end
Module.eval_quoted __MODULE__, month_quoted, [], __ENV__
end)
# Make an attempt at cleaning up the provided string
def month_to_num(_), do: {:error, :invalid_month_name}
@doc """
Get the name of the month corresponding to the provided number
## Examples
iex> #{__MODULE__}.month_name(1)
"January"
iex> #{__MODULE__}.month_name(0)
{:error, :invalid_month_number}
"""
@spec month_name(Types.month) :: String.t | {:error, :invalid_month_number}
def month_name(num) when num in 1..12 do
months = Translator.get_months(Translator.default_locale)
Map.get(months, num)
end
def month_name(_), do: {:error, :invalid_month_number}
@doc """
Get the short name of the month corresponding to the provided number
## Examples
iex> #{__MODULE__}.month_name(1)
"January"
iex> #{__MODULE__}.month_name(0)
{:error, :invalid_month_number}
"""
@spec month_shortname(Types.month) :: String.t | {:error, :invalid_month_number}
def month_shortname(num) when num in 1..12 do
months = Translator.get_months_abbreviated(Translator.default_locale)
Map.get(months, num)
end
def month_shortname(_), do: {:error, :invalid_month_number}
@doc """
Return weekday number (as defined by ISO 8601) of the specified date.
## Examples
iex> Timex.Date.epoch |> #{__MODULE__}.weekday
4 # (i.e. Thursday)
"""
@spec weekday(Convertable) :: Types.weekday | {:error, term}
def weekday(%Date{:year => y, :month => m, :day => d}), do: :calendar.day_of_the_week({y, m, d})
def weekday(%DateTime{:year => y, :month => m, :day => d}), do: :calendar.day_of_the_week({y, m, d})
def weekday(datetime) do
case Convertable.to_date(datetime) do
{:error, _} = err -> err
%Date{} = d -> weekday(d)
end
end
@doc """
Returns the ordinal day number of the date.
## Examples
iex> Timex.datetime({{2015,6,26},{0,0,0}}) |> Timex.day
177
"""
@spec day(Convertable) :: Types.daynum | {:error, term}
def day(%Date{} = date), do: day(to_datetime(date))
def day(%DateTime{} = date) do
start_of_year = DateTime.set(date, [month: 1, day: 1])
1 + diff(start_of_year, date, :days)
end
def day(datetime) do
case Convertable.to_date(datetime) do
{:error, _} = err -> err
%Date{} = d -> day(d)
end
end
@doc """
Return the number of days in the month which the date falls on.
## Examples
iex> Timex.Date.epoch |> Timex.days_in_month
31
"""
@spec days_in_month(Convertable) :: Types.num_of_days | {:error, term}
def days_in_month(%DateTime{:year => y, :month => m}), do: days_in_month(y, m)
def days_in_month(%Date{:year => y, :month => m}), do: days_in_month(y, m)
def days_in_month(date) do
case Convertable.to_date(date) do
{:error, _} = err -> err
%Date{} = d -> days_in_month(d)
end
end
@doc """
Same as days_in_month/2, except takes year and month as distinct arguments
"""
@spec days_in_month(Types.year, Types.month) :: Types.num_of_days | {:error, term}
defdelegate days_in_month(year, month), to: Timex.Helpers
@doc """
Given a Convertable, this function returns the week number of the date provided, starting at 1.
## Examples
iex> Timex.week_of_month({2016,3,5})
1
iex> Timex.week_of_month(Timex.datetime({2016, 3, 14}))
3
"""
@spec week_of_month(Convertable) :: Types.week_of_month
def week_of_month(%DateTime{:year => y, :month => m, :day => d}), do: week_of_month(y,m,d)
def week_of_month(%Date{:year => y, :month => m, :day => d}), do: week_of_month(y,m,d)
def week_of_month(datetime) do
case Convertable.to_date(datetime) do
{:error, _} = err -> err
%Date{} = d -> week_of_month(d)
end
end
@doc """
Same as week_of_month/1, except takes year, month, and day as distinct arguments
## Examples
iex> Timex.week_of_month(2016, 3, 30)
5
"""
@spec week_of_month(Types.year, Types.month, Types.day) :: Types.week_of_month
def week_of_month(year, month, day) when is_date(year, month, day) do
{_, week_index_of_given_date} = iso_week(year, month, day)
{_, week_index_of_first_day_of_given_month} = iso_week(year, month, 1)
week_index_of_given_date - week_index_of_first_day_of_given_month + 1
end
def week_of_month(_, _, _), do: {:error, :invalid_date}
@doc """
Given a date returns a date at the beginning of the month.
iex> date = Timex.datetime({{2015, 6, 15}, {12,30,0}}, "Europe/Paris")
iex> #{__MODULE__}.beginning_of_month(date)
Timex.datetime({{2015, 6, 1}, {0, 0, 0}}, "Europe/Paris")
"""
@spec beginning_of_month(Date.t | DateTime.t | Comparable) :: Date.t | DateTime.t | {:error, term}
def beginning_of_month(%Date{year: year, month: month}),
do: Timex.date({year, month, 1})
def beginning_of_month(%DateTime{year: year, month: month, timezone: tz}) when not is_nil(tz),
do: Timex.datetime({{year, month, 1},{0, 0, 0}}, tz)
def beginning_of_month(%DateTime{year: year, month: month}),
do: Timex.datetime({{year, month, 1},{0, 0, 0}})
def beginning_of_month(datetime) do
case Convertable.to_datetime(datetime) do
{:error, _} = err -> err
%DateTime{} = d ->
%{d | :day => 1, :hour => 0, :minute => 0, :second => 0, :millisecond => 0}
end
end
@doc """
Same as beginning_of_month/1, except takes year and month as distinct arguments
"""
@spec beginning_of_month(Types.year, Types.month) :: Date.t | {:error, term}
def beginning_of_month(year, month) when is_year(month) and is_month(month),
do: Timex.date({year, month, 1})
def beginning_of_month(_, _),
do: {:error, :invalid_year_or_month}
@doc """
Given a date returns a date at the end of the month.
iex> date = Timex.datetime({{2015, 6, 15}, {12, 30, 0}}, "Europe/London")
iex> Timex.end_of_month(date)
Timex.datetime({{2015, 6, 30}, {23, 59, 59}}, "Europe/London")
"""
@spec end_of_month(Date.t | DateTime.t) :: Date.t | DateTime.t | {:error, term}
def end_of_month(%Date{year: year, month: month} = date),
do: Timex.date({year, month, days_in_month(date)})
def end_of_month(%DateTime{year: year, month: month, timezone: tz} = date) when not is_nil(tz),
do: Timex.datetime({{year, month, days_in_month(date)},{23, 59, 59}}, tz)
def end_of_month(%DateTime{year: year, month: month} = date),
do: Timex.datetime({{year, month, days_in_month(date)},{23, 59, 59}})
def end_of_month(datetime) do
case Convertable.to_datetime(datetime) do
{:error, _} = err -> err
%DateTime{} = d -> end_of_month(d)
end
end
@doc """
Same as end_of_month/1, except takes year and month as distinct arguments
## Examples
iex> Timex.end_of_month(2016, 2)
Timex.date({2016, 2, 29})
"""
@spec end_of_month(Types.year, Types.month) :: Date.t
def end_of_month(year, month) when is_year(year) and is_month(month),
do: end_of_month(Timex.date({year, month, 1}))
def end_of_month(_, _),
do: {:error, :invalid_year_or_month}
@spec quarter(Convertable | Types.month) :: integer | {:error, term}
defp quarter(month) when is_month(month) do
case month do
m when m in 1..3 -> 1
m when m in 4..6 -> 2
m when m in 7..9 -> 3
m when m in 10..12 -> 4
end
end
defp quarter(m) when is_integer(m), do: {:error, :invalid_month}
defp quarter(%Date{month: month}), do: quarter(month)
defp quarter(%DateTime{month: month}), do: quarter(month)
defp quarter(datetime) do
case Convertable.to_date(datetime) do
{:error, _} = err -> err
%Date{month: month} -> quarter(month)
end
end
@doc """
Given a date returns a date at the beginning of the quarter.
iex> date = Timex.datetime({{2015, 6, 15}, {12,30,0}}, "CST")
iex> Timex.beginning_of_quarter(date)
Timex.datetime({{2015, 4, 1}, {0, 0, 0}}, "CST")
"""
@spec beginning_of_quarter(Date.t | Convertable) :: Date.t | DateTime.t | {:error, term}
def beginning_of_quarter(%Date{year: year, month: month}) when is_year(year) and is_month(month) do
month = 1 + (3 * (quarter(month) - 1))
Timex.date({year, month, 1})
end
def beginning_of_quarter(%DateTime{year: year, month: month, timezone: tz})
when is_year(year) and is_month(month) and not is_nil(tz)
do
month = 1 + (3 * (quarter(month) - 1))
Timex.datetime({{year, month, 1},{0, 0, 0}}, tz)
end
def beginning_of_quarter(%DateTime{year: year, month: month}) when is_year(year) and is_month(month) do
month = 1 + (3 * (quarter(month) - 1))
Timex.datetime({{year, month, 1},{0, 0, 0}})
end
def beginning_of_quarter(datetime) do
case Convertable.to_datetime(datetime) do
{:error, _} = err -> err
%DateTime{} = d -> beginning_of_quarter(d)
end
end
@doc """
Given a date or a year and month returns a date at the end of the quarter.
iex> date = Timex.datetime({{2015, 6, 15}, {12,30,0}}, "CST")
iex> Timex.end_of_quarter(date)
Timex.datetime({{2015, 6, 30}, {23, 59, 59}}, "CST")
iex> Timex.end_of_quarter(2015, 4)
Timex.date({{2015, 6, 30}, {23, 59, 59}})
"""
@spec end_of_quarter(Convertable) :: Date.t | DateTime.t | {:error, term}
def end_of_quarter(%Date{year: year, month: month}) when is_year(year) and is_month(month) do
month = 3 * quarter(month)
end_of_month(Timex.date({year, month, 1}))
end
def end_of_quarter(%DateTime{year: year, month: month, timezone: tz})
when is_year(year) and is_month(month) and not is_nil(tz)
do
month = 3 * quarter(month)
case Timex.datetime({{year,month,1},{0,0,0}}, tz) do
{:error, _} = err -> err
%DateTime{} = d -> end_of_month(d)
%AmbiguousDateTime{:before => b, :after => a} ->
%AmbiguousDateTime{:before => end_of_month(b),
:after => end_of_month(a)}
end
end
def end_of_quarter(%DateTime{year: year, month: month}) when is_year(year) and is_month(month) do
month = 3 * quarter(month)
end_of_month(Timex.datetime({year, month, 1}))
end
def end_of_quarter(datetime) do
case Convertable.to_datetime(datetime) do
{:error, _} = err -> err
%DateTime{} = d -> end_of_quarter(d)
%AmbiguousDateTime{:before => b, :after => a} ->
%AmbiguousDateTime{:before => end_of_quarter(b),
:after => end_of_quarter(a)}
end
end
@doc """
Same as end_of_quarter/1, except takes year and month as distinct arguments
"""
@spec end_of_quarter(Types.year, Types.month) :: Date.t | {:error, term}
def end_of_quarter(year, month) when is_year(year) and is_month(month) do
end_of_month(Timex.date({year, 3 * quarter(month), 1}))
end
def end_of_quarter(_, _), do: {:error, :invalid_year_or_month}
@doc """
Given a date or a number create a date at the beginning of that year
Examples
iex> date = Timex.datetime({{2015, 6, 15}, {0, 0, 0, 0}})
iex> Timex.beginning_of_year(date)
Timex.datetime({{2015, 1, 1}, {0, 0, 0, 0}})
iex> Timex.beginning_of_year(2015)
Timex.date({{2015, 1, 1}, {0, 0, 0, 0}})
iex> Timex.beginning_of_year(2015, "Europe/London")
Timex.datetime({{2015, 1, 1}, {0, 0, 0, 0}}, "Europe/London")
"""
@spec beginning_of_year(Date.t | Comparable | Types.year) :: Date.t | DateTime.t | {:error, term}
def beginning_of_year(%Date{year: year}) when is_year(year),
do: Timex.date({year, 1, 1})
def beginning_of_year(%DateTime{year: year, timezone: tz}) when is_year(year) and not is_nil(tz),
do: Timex.datetime({year, 1, 1}, tz)
def beginning_of_year(%DateTime{year: year}) when is_year(year),
do: Timex.datetime({year, 1, 1})
def beginning_of_year(year) when is_year(year),
do: Timex.date({year, 1, 1})
def beginning_of_year(datetime) do
case Convertable.to_datetime(datetime) do
{:error, _} = err -> err
%DateTime{} = d -> beginning_of_year(d)
end
end
@doc """
Same as beginning_of_year, except takes an integer year + timezone as arguments.
"""
@spec beginning_of_year(Types.year, Types.valid_timezone) :: DateTime.t | {:error, term}
def beginning_of_year(year, %TimezoneInfo{} = tz) when is_year(year) and is_binary(tz),
do: Timex.datetime({year, 1, 1}, tz)
def beginning_of_year(year, tz) when is_year(year) and is_tz_value(tz),
do: Timex.datetime({year, 1, 1}, tz)
def beginning_of_year(_, _),
do: {:error, :badarg}
@doc """
Given a date or a number create a date at the end of that year
Examples
iex> date = Timex.datetime({{2015, 6, 15}, {0, 0, 0, 0}})
iex> Timex.end_of_year(date)
Timex.datetime({{2015, 12, 31}, {23, 59, 59}})
iex> Timex.end_of_year(2015)
Timex.date({{2015, 12, 31}, {23, 59, 59}})
iex> Timex.end_of_year(2015, "Europe/London")
Timex.datetime {{2015, 12, 31}, {23, 59, 59}}, "Europe/London"
"""
@spec end_of_year(Date.t | Types.year | Comparable) :: Date.t | DateTime.t | {:error, term}
def end_of_year(%Date{year: year}) when is_year(year),
do: Timex.date({year, 12, 31})
def end_of_year(%DateTime{year: year, timezone: tz}) when is_year(year) and not is_nil(tz),
do: Timex.datetime({{year, 12, 31}, {23, 59, 59}}, tz)
def end_of_year(%DateTime{year: year}) when is_year(year),
do: Timex.datetime({{year, 12, 31}, {23, 59, 59}})
def end_of_year(year) when is_year(year),
do: Timex.date({{year, 12, 31}, {23, 59, 59}})
def end_of_year(datetime) do
case Convertable.to_datetime(datetime) do
{:error, _} = err -> err
%DateTime{} = d -> end_of_year(d)
end
end
@doc """
Same as end_of_year/1, except takes an integer year + timezone as arguments
"""
@spec end_of_year(Types.year, Types.valid_timezone) :: DateTime.t | {:error, term}
def end_of_year(year, %TimezoneInfo{} = tz) when is_year(year),
do: Timex.datetime({{year, 12, 31}, {23, 59, 59}}, tz)
def end_of_year(year, tz) when is_year(year) and is_tz_value(tz),
do: Timex.datetime({{year, 12, 31}, {23, 59, 59}}, tz)
def end_of_year(_, _),
do: {:error, :badarg}
@doc """
Number of days to the beginning of the week
The weekstart can between 1..7, an atom e.g. :mon, or a string e.g. "Monday"
## Examples
Week starting Monday
iex> date = Timex.datetime({2015, 11, 30}) # Monday 30th November
iex> Timex.days_to_beginning_of_week(date)
0
Week starting Sunday
iex> date = Timex.date({2015, 11, 30}) # Monday 30th November
iex> Timex.days_to_beginning_of_week(date, :sun)
1
"""
@spec days_to_beginning_of_week(Types.valid_datetime, Types.weekday) :: integer | {:error, term}
def days_to_beginning_of_week(date, weekstart \\ 1)
def days_to_beginning_of_week(date, weekstart) when is_atom(weekstart) or is_binary(weekstart) do
days_to_beginning_of_week(date, Timex.day_to_num(weekstart))
end
def days_to_beginning_of_week(date, weekstart) when is_day_of_week(weekstart, :mon) do
case weekday(date) do
{:error, _} = err ->
err
wd ->
case wd - weekstart do
diff when diff < 0 ->
7 + diff
diff ->
diff
end
end
end
def days_to_beginning_of_week(_, {:error, _} = err), do: err
def days_to_beginning_of_week(_, _), do: {:error, :badarg}
@doc """
Number of days to the end of the week.
The weekstart can between 1..7, an atom e.g. :mon, or a string e.g. "Monday"
## Examples
Week starting Monday
iex> date = Timex.datetime({2015, 11, 30}) # Monday 30th November
iex> Timex.days_to_end_of_week(date)
6
Week starting Sunday
iex> date = Timex.date({2015, 11, 30}) # Monday 30th November
iex> Timex.days_to_end_of_week(date, :sun)
5
"""
@spec days_to_end_of_week(Convertable, Types.weekday) :: integer | {:error, term}
def days_to_end_of_week(date, weekstart \\ :mon) do
case days_to_beginning_of_week(date, weekstart) do
{:error, _} = err -> err
days -> abs(days - 6)
end
end
@doc """
Shifts the date to the beginning of the week
The weekstart can between 1..7, an atom e.g. :mon, or a string e.g. "Monday"
## Examples
iex> date = Timex.datetime({{2015, 11, 30}, {13, 30, 30}}) # Monday 30th November
iex> Timex.beginning_of_week(date)
Timex.datetime({2015, 11, 30})
iex> date = Timex.date({{2015, 11, 30}, {13, 30, 30}}) # Monday 30th November
iex> Timex.beginning_of_week(date, :sun)
Timex.date({2015, 11, 29})
"""
@spec beginning_of_week(Types.valid_datetime, Types.weekday) :: Date.t | DateTime.t | {:error, term}
def beginning_of_week(date, weekstart \\ :mon)
def beginning_of_week(%Date{} = date, weekstart) do
days_to_beginning = days_to_beginning_of_week(date, weekstart)
case days_to_beginning do
{:error, _} = err -> err
_ ->
date
|> Date.shift([days: -days_to_beginning])
|> beginning_of_day
end
end
def beginning_of_week(%DateTime{} = datetime, weekstart) do
days_to_beginning = days_to_beginning_of_week(datetime, weekstart)
case days_to_beginning do
{:error, _} = err -> err
_ ->
datetime
|> DateTime.shift([days: -days_to_beginning])
|> beginning_of_day
end
end
def beginning_of_week(datetime, weekstart) do
case Convertable.to_datetime(datetime) do
{:error, _} = err -> err
%DateTime{} = d ->
beginning_of_week(d, weekstart)
end
end
@doc """
Returns a Date or a DateTime representing the end of the week, depending on the input,
i.e. if you pass a date/time value which represents just a date, you will get back a Date,
if both a date and time are present, you will get back a DateTime
The weekstart can between 1..7, an atom e.g. :mon, or a string e.g. "Monday"
## Examples
iex> date = Timex.datetime({{2015, 11, 30}, {13, 30, 30}}) # Monday 30th November
...> Timex.end_of_week(date)
Timex.datetime({{2015, 12, 6}, {23, 59, 59}})
iex> date = Timex.date({{2015, 11, 30}, {13, 30, 30}}) # Monday 30th November
...> Timex.end_of_week(date, :sun)
Timex.date({2015, 12, 5})
"""
@spec end_of_week(Convertable, Types.weekday) :: Date.t | DateTime.t | {:error, term}
def end_of_week(datetime, weekstart \\ 1)
def end_of_week(%Date{} = date, weekstart) do
days_to_end = days_to_end_of_week(date, weekstart)
case days_to_end do
{:error, _} = err -> err
_ ->
date
|> Date.shift([days: days_to_end])
|> end_of_day
end
end
def end_of_week(%DateTime{} = datetime, weekstart) do
days_to_end = days_to_end_of_week(datetime, weekstart)
case days_to_end do
{:error, _} = err -> err
_ ->
datetime
|> DateTime.shift([days: days_to_end])
|> end_of_day
end
end
def end_of_week(datetime, weekstart) do
case Convertable.to_datetime(datetime) do
{:error, _} = err -> err
%DateTime{} = d -> end_of_week(d, weekstart)
end
end
@doc """
Returns a DateTime representing the beginning of the day
## Examples
iex> date = Timex.datetime({{2015, 1, 1}, {13, 14, 15}})
iex> Timex.beginning_of_day(date)
Timex.datetime({{2015, 1, 1}, {0, 0, 0}})
iex> date = Timex.date({{2015, 1, 1}, {13, 14, 15}})
...> Timex.beginning_of_day(date)
Timex.date({{2015,1,1}, {0,0,0}})
"""
@spec beginning_of_day(Convertable) :: DateTime.t | {:error, term}
def beginning_of_day(%Date{} = date), do: date
def beginning_of_day(%DateTime{} = datetime) do
DateTime.set(datetime, [hour: 0, minute: 0, second: 0])
end
def beginning_of_day(datetime) do
case Convertable.to_datetime(datetime) do
{:error, _} = err -> err
%DateTime{} = d -> beginning_of_day(d)
end
end
@doc """
Returns a DateTime representing the end of the day
## Examples
iex> date = Timex.datetime({{2015, 1, 1}, {13, 14, 15}})
...> Timex.end_of_day(date)
Timex.datetime({{2015, 1, 1}, {23, 59, 59}})
iex> date = Timex.date({{2015, 1, 1}, {13, 14, 15}})
...> Timex.end_of_day(date)
Timex.date({{2015,1,1}, {23,59,59}})
"""
@spec end_of_day(Convertable) :: DateTime.t | {:error, term}
def end_of_day(%Date{} = date), do: date
def end_of_day(%DateTime{} = datetime) do
DateTime.set(datetime, [hour: 23, minute: 59, second: 59])
end
def end_of_day(datetime) do
case Convertable.to_datetime(datetime) do
{:error, _} = err -> err
%DateTime{} = d -> end_of_day(d)
end
end
@doc """
Return a boolean indicating whether the given year is a leap year. You may
pase a date or a year number.
## Examples
iex> DateTime.epoch |> #{__MODULE__}.is_leap?
false
iex> #{__MODULE__}.is_leap?(2012)
true
"""
@spec is_leap?(Types.valid_datetime | Types.year) :: boolean | {:error, term}
def is_leap?(year) when is_year(year),
do: :calendar.is_leap_year(year)
def is_leap?(%Date{:year => year}),
do: is_leap?(year)
def is_leap?(%DateTime{:year => year}),
do: is_leap?(year)
def is_leap?(datetime) do
case Convertable.to_date(datetime) do
{:error, _} = err -> err
%Date{:year => y} -> :calendar.is_leap_year(y)
end
end
@doc """
Produces a valid Date or DateTime object based on a date or datetime tuple respectively.
All date's components will be clamped to the minimum or maximum valid value.
## Examples
iex> use Timex
...> localtz = Timezone.local({{1,12,31},{0,59,59}})
...> Timex.normalize({{1,12,31},{0,59,59}, localtz})
Timex.datetime({{1,12,31},{0,59,59}}, :local)
iex> use Timex
...> Timex.normalize({1,12,31})
Timex.date({1,12,31})
"""
@spec normalize(Types.valid_datetime) :: Date.t | DateTime.t | {:error, term}
def normalize({{_,_,_} = date, {_,_,_} = time}),
do: Timex.datetime({normalize(:date, date), normalize(:time, time)})
def normalize({y,m,d} = date) when is_integer(y) and is_integer(m) and is_integer(d),
do: Timex.date(normalize(:date, date))
def normalize({{_,_,_}=date, time, {_offset, tz}}),
do: Timex.datetime({normalize(:date, date), normalize(:time, time), tz})
def normalize({{_,_,_}=date, time, %TimezoneInfo{} = tz}),
do: Timex.datetime({normalize(:date, date), normalize(:time, time), tz})
def normalize({{_,_,_}=date, time, tz}) when is_tz_value(tz),
do: Timex.datetime({normalize(:date, date), normalize(:time, time), tz})
def normalize(%Date{:year => y, :month => m, :day => d}),
do: Timex.date(normalize(:date, {y,m,d}))
def normalize(%DateTime{:year => y, :month => m, :day => d, :hour => h, :minute => m, :second => s, :millisecond => ms, :timezone => tz}),
do: Timex.datetime({normalize(:date, {y,m,d}), normalize(:time, {h,m,s,ms}), normalize(:timezone, tz)})
def normalize(datetime) do
case Convertable.to_datetime(datetime) do
{:error, _} = err -> err
%DateTime{} = d -> normalize(d)
end
end
@doc """
Like normalize/1, but for specific types of values.
"""
@spec normalize(:date, {integer,integer,integer}) :: {Types.year, Types.month, Types.day}
@spec normalize(:time, {integer,integer,integer} | {integer,integer,integer,integer}) :: Types.time
@spec normalize(:year | :month | :day | :hour | :minute | :second | :millisecond, integer) :: integer
@spec normalize(:timezone, term) :: TimezoneInfo.t
def normalize(:date, {year, month, day}) do
year = normalize(:year, year)
month = normalize(:month, month)
day = normalize(:day, {year, month, day})
{year, month, day}
end
def normalize(:year, year) when year < 0, do: 0
def normalize(:year, year), do: year
def normalize(:month, month) do
cond do
month < 1 -> 1
month > 12 -> 12
true -> month
end
end
def normalize(:time, {hour,min,sec}) do
hour = normalize(:hour, hour)
min = normalize(:minute, min)
sec = normalize(:second, sec)
{hour, min, sec}
end
def normalize(:time, {hour,min,sec,ms}) do
{h,m,s} = normalize(:time, {hour,min,sec})
msecs = normalize(:millisecond, ms)
{h, m, s, msecs}
end
def normalize(:hour, hour) do
cond do
hour < 0 -> 0
hour > 23 -> 23
true -> hour
end
end
def normalize(:minute, min) do
cond do
min < 0 -> 0
min > 59 -> 59
true -> min
end
end
def normalize(:second, sec) do
cond do
sec < 0 -> 0
sec > 59 -> 59
true -> sec
end
end
def normalize(:millisecond, ms) do
cond do
ms < 0 -> 0
ms > 999 -> 999
true -> ms
end
end
def normalize(:timezone, tz), do: tz
def normalize(:day, {year, month, day}) do
year = normalize(:year, year)
month = normalize(:month, month)
ndays = Timex.days_in_month(year, month)
cond do
day < 1 -> 1
day > ndays -> ndays
true -> day
end
end
@doc """
Return a new Date/DateTime with the specified fields replaced by new values.
Values are automatically validated and clamped to good values by default. If
you wish to skip validation, perhaps for performance reasons, pass `validate: false`.
Values are applied in order, so if you pass `[datetime: dt, date: d]`, the date value
from `date` will override `datetime`'s date value.
## Example
iex> use Timex
...> expected = Timex.date({2015, 2, 28})
...> result = Timex.set(expected, [month: 2, day: 30])
...> result == expected
true
iex> use Timex
...> expected = Timex.datetime({{2016, 2, 29}, {23, 30, 0}})
...> result = Timex.set(expected, [hour: 30])
...> result === expected
true
"""
def set(%Date{} = date, options), do: Date.set(date, options)
def set(%DateTime{} = datetime, options), do: DateTime.set(datetime, options)
def set(datetime, options) do
case Convertable.to_datetime(datetime) do
{:error, _} = err -> err
%DateTime{} = d -> DateTime.set(d, options)
end
end
end
|
lib/timex.ex
| 0.912723
| 0.550366
|
timex.ex
|
starcoder
|
defmodule Versioned.Schema do
@moduledoc """
Enhances `Ecto.Schema` modules to track a full history of changes.
The `versioned_schema` macro works just like `schema` in `Ecto.Schema` but it
also builds an `OriginalModule.Version` schema module as well to represent a
version at a particular point in time.
In addition to options allowed by `Ecto.Schema`, new ones are also allowed.
## Additional `belongs_to` Options
* `:versioned` - If `true`, the version schema will include an extra field
of the same name but with `_version` appended. The parent record which
existed when this version was created can be loaded into this field via
`Versioned.preload/2`.
Example:
versioned_schema "people" do
belongs_to :car, Car, type: :binary_id, versioned: true
end
## Additional `has_many` Options
* `:versioned` - If `true`, the version schema will include an extra field
of the same name but with `_version` appended. If defined as another
truthy atom, then that field name will be used instead. The child records
which existed when this version was created can be loaded into this field
via `Versioned.preload/2`.
Example:
versioned_schema "cars" do
has_many :people, Person, on_replace: :delete, versioned: :person_versions
end
## I already have an integer primary key.
While Versioned generally operates with binary_ids, it is possible to adopt it
for an existing table which uses integers. First, you'll need to create a
migration to create the versions table. Then, define your own `@primary_key`
attribute. Versioned will preserve it. (Note that the versions table must
still use UUIDs.)
@primary_key {:id, :id, autogenerate: true}
versioned_schema "cars" do
...
end
## Add Code to the Version Module
To add additional functions to the version module, auto-generated by
`versioned_schema/2`, add a `version` block somewhere before
`versioned_schema`.
version do
def hello_world, do: :ok
end
versioned_schema "cars" do
...
end
## Example
defmodule MyApp.Car do
use Versioned.Schema
versioned_schema "cars" do
field :name, :string
has_many :people, MyApp.Person, versioned: true
end
end
"""
alias Versioned.Helpers
defmacro __using__(opts) do
{singular_opt, ecto_opts} = Keyword.pop(opts, :singular)
quote do
use Ecto.Schema, unquote(ecto_opts)
import unquote(__MODULE__)
@ecto_opts unquote(ecto_opts)
@source_singular unquote(singular_opt && to_string(singular_opt))
end
end
@doc "Register some code to be injected into the Version module."
defmacro version(do: block) do
Module.put_attribute(__CALLER__.module, :extra_version_ast, block)
end
@doc "Create a versioned schema."
defmacro versioned_schema(source, do: block) do
{:__block__, _m, lines_ast} = Helpers.normalize_block(block)
mod = __CALLER__.module
quote do
@source_singular Module.get_attribute(__MODULE__, :source_singular) ||
unquote(String.trim_trailing(source, "s"))
@doc """
Get some information about this versioned schema.
The argument, an atom, can be one of:
* `:entity_fk` - `:#{@source_singular}_id` will be returned, the foreign
key column on the versions table which points at the real record.
* `:source_singular` - String `"#{@source_singular}"` will be returned.
* `:has_many_fields` - List of field name atoms which are has_many.
"""
@spec __versioned__(:entity_fk | :source_singular | :has_many_fields) ::
atom | [atom] | String.t()
def __versioned__(:entity_fk), do: :"#{@source_singular}_id"
def __versioned__(:source_singular), do: @source_singular
def __versioned__(:has_many_fields), do: __MODULE__.Version.has_many_fields()
@doc """
Given the has_many `field` name, get the has_many field name for the
versioned schema.
"""
@spec __versioned__(:has_many_field, atom) :: atom
def __versioned__(:has_many_field, field), do: __MODULE__.Version.has_many_field(field)
# Using module can set @primary_key as an exit hatch, but uuid by default.
unless Module.get_attribute(__MODULE__, :primary_key) do
@primary_key {:id, :binary_id, autogenerate: true}
end
schema unquote(source) do
field :version_id, :binary_id, virtual: true
has_many :versions, __MODULE__.Version
timestamps type: :utc_datetime_usec
unquote(remove_versioned_opts(block))
end
defmodule Version do
@moduledoc "A single version in history."
use Ecto.Schema, @ecto_opts
@before_compile {unquote(__MODULE__), :version_before_compile}
@source_singular Module.get_attribute(unquote(mod), :source_singular)
{_id_key, parent_primary_key_type, _opts} =
Module.get_attribute(unquote(mod), :primary_key)
Module.register_attribute(__MODULE__, :has_many_fields, accumulate: true)
# Set @foreign_key_type if the main module did.
fkt = Module.get_attribute(unquote(mod), :foreign_key_type)
fkt && @foreign_key_type fkt
@typedoc """
#{String.capitalize(@source_singular)} version. See
`#{unquote(inspect(mod))}` for base fields. Additionally, this schema
has `:is_deleted` (true if the record is deleted) and
`:#{@source_singular}_id` which holds id of the #{@source_singular}
in the main table to which this version belongs. Note that it is just
a field and not a true relationship so that the main record can be
deleted while preserving the versions.
"""
@type t :: %__MODULE__{}
@primary_key {:id, :binary_id, autogenerate: true}
schema "#{unquote(source)}_versions" do
field :is_deleted, :boolean
belongs_to :"#{@source_singular}", unquote(mod), type: parent_primary_key_type
timestamps type: :utc_datetime_usec, updated_at: false
version_lines(unquote(lines_ast))
end
@doc "Get the Ecto.Schema module for which this version module belongs."
@spec entity_module :: module
def entity_module, do: unquote(mod)
unquote(Module.get_attribute(mod, :extra_version_ast))
end
end
end
# This ast is added to the end of the Version module.
defmacro version_before_compile(_env) do
quote do
@doc "List of field name atoms in the main schema which are has_many."
@spec has_many_fields :: [atom]
def has_many_fields, do: Keyword.keys(@has_many_fields)
@doc """
Given the has_many `field` name in the main schema, get the has_many field
name for the versioned schema.
"""
@spec has_many_field(atom) :: atom
def has_many_field(field), do: @has_many_fields[field]
end
end
@doc """
Convert a list of ast lines from the main schema into ast lines to be used
for the version schema.
"""
defmacro version_lines(lines_ast) do
lines_ast
|> Enum.reduce([], &do_version_line/2)
|> Enum.reverse()
end
# Take the original schema declaration ast and attach to the accumulator the
# corresponding version schema ast to use.
@spec do_version_line(Macro.t(), Macro.t()) :: Macro.t()
defp do_version_line({:belongs_to, m, [field, entity]}, acc),
do: do_version_line({:belongs_to, m, [field, entity, []]}, acc)
defp do_version_line({:belongs_to, _m, [field, entity, field_opts]} = orig_ast, acc) do
do_belongs_to = fn key ->
quote do
belongs_to unquote(key),
unquote(entity),
unquote(Keyword.delete(field_opts, :versioned))
belongs_to :"#{unquote(key)}_version", Versioned.version_mod(unquote(entity)),
define_field: false,
foreign_key: :"#{unquote(field)}_id"
end
end
line =
if field_opts[:versioned] in [nil, false],
do: orig_ast,
else: do_belongs_to.(field)
[line | acc]
end
defp do_version_line({:has_many, m, [field, entity]}, acc),
do: do_version_line({:has_many, m, [field, entity, []]}, acc)
defp do_version_line({:has_many, _m, [field, entity, field_opts]}, acc) do
do_has_many = fn key ->
quote do
@has_many_fields {unquote(field), unquote(key)}
ver_mod = Versioned.version_mod(unquote(entity))
foreign_key = unquote(field_opts[:foreign_key]) || :"#{@source_singular}_id"
has_many unquote(key), ver_mod,
foreign_key: foreign_key,
references: :"#{@source_singular}_id"
end
end
line =
case field_opts[:versioned] do
# Field is not versioned.
v when v in [nil, false] ->
quote do
@has_many_fields {unquote(field), unquote(field)}
has_many :"#{unquote(field)}", unquote(entity),
foreign_key: :"#{@source_singular}_id",
references: :"#{@source_singular}_id"
end
# has_many declaration used `versioned: true` -- just use an obvious name.
true ->
do_has_many.(quote do: :"#{unquote(entity).__versioned__(:source_singular)}_versions")
# `:versioned` option used a proper key name -- use that.
versions_key ->
do_has_many.(versions_key)
end
[line | acc]
end
defp do_version_line(line, acc) do
[line | acc]
end
# Drop our options from the AST for Ecto.Schema because it croaks otherwise.
@spec remove_versioned_opts(Macro.t()) :: Macro.t()
defp remove_versioned_opts({:__block__, top_m, lines}) do
lines =
Enum.map(lines, fn
{:has_many, m, [a, b, opts]} ->
{:has_many, m, [a, b, Keyword.delete(opts, :versioned)]}
{:belongs_to, m, [a, b, opts]} ->
{:belongs_to, m, [a, b, Keyword.delete(opts, :versioned)]}
other ->
other
end)
{:__block__, top_m, lines}
end
end
|
lib/versioned/schema.ex
| 0.908686
| 0.554893
|
schema.ex
|
starcoder
|
defmodule Zest do
@doc """
Add some debug information to the context for the duration of a
block or expression. If a `raise`, `throw` or `exit` occurs, the
context will be pretty printed to the screen to aid with debugging.
Examples:
```scope [foo: :bar[, assert(true == false)```
```scope [foo: :bar] do
assert true == false
end
```
"""
defmacro scope(list, do: block) when is_list(list) do
quote do
Zest.in_scope(unquote(list), fn -> unquote(block) end)
end
end
defmacro scope(list, expr) when is_list(list) do
quote do
Zest.in_scope(unquote(list), fn -> unquote(expr) end)
end
end
@doc "Wrap a function such that it is as if its body was wrapped in `scope/2`"
@spec scoped(Keyword.t(), region :: function) :: function
def scoped(scopes, fun) when is_function(fun) do
hijack(fun, fn _, args -> in_scope(scopes, fn -> apply(fun, args) end) end)
end
@doc """
Add some debug information to the context for the duration of a
function's execution. If a `raise`, `throw` or `exit` occurs, the
context will be pretty printed to the screen to aid with debugging.
"""
@spec in_scope(Keyword.t(), function) :: term
def in_scope(scopes, fun) when is_list(scopes) and is_function(fun, 0) do
old = push_scopes(scopes)
if old == [] do
intercept(
fn ->
ret = fun.()
put_scopes([])
ret
end,
rethrowing(fn -> dump_scopes() end)
)
else
ret = fun.()
put_scopes(old)
ret
end
end
@doc """
You take on the role of the `apply` function in this exciting
function that wraps execution of a function such that your function
is responsible for calling it.
"""
@spec hijack(function, jack :: (function, [term] -> term)) :: function
def hijack(fun, jack) when is_function(fun, 0) and is_function(jack, 2) do
fn -> jack.(fun, []) end
end
def hijack(fun, jack) when is_function(fun, 1) and is_function(jack, 2) do
fn a -> jack.(fun, [a]) end
end
def hijack(fun, jack) when is_function(fun, 2) and is_function(jack, 2) do
fn a, b -> jack.(fun, [a, b]) end
end
def hijack(fun, jack) when is_function(fun, 3) and is_function(jack, 2) do
fn a, b, c -> jack.(fun, [a, b, c]) end
end
def hijack(fun, jack) when is_function(fun, 4) and is_function(jack, 2) do
fn a, b, c, d -> jack.(fun, [a, b, c, d]) end
end
def hijack(fun, jack) when is_function(fun, 5) and is_function(jack, 2) do
fn a, b, c, d, e -> jack.(fun, [a, b, c, d, e]) end
end
def hijack(fun, jack) when is_function(fun, 6) and is_function(jack, 2) do
fn a, b, c, d, e, f -> jack.(fun, [a, b, c, d, e, f]) end
end
def hijack(fun, jack) when is_function(fun, 7) and is_function(jack, 2) do
fn a, b, c, d, e, f, g -> jack.(fun, [a, b, c, d, e, f, g]) end
end
def hijack(fun, jack) when is_function(fun, 8) and is_function(jack, 2) do
fn a, b, c, d, e, f, g, h -> jack.(fun, [a, b, c, d, e, f, g, h]) end
end
def hijack(fun, jack) when is_function(fun, 9) and is_function(jack, 2) do
fn a, b, c, d, e, f, g, h, i -> jack.(fun, [a, b, c, d, e, f, g, h, i]) end
end
@type intercept_type :: :rescue | :catch | :exit
@type interceptor :: (intercept_type, error :: term, maybe_stacktrace :: term -> term)
@doc "Catches errors and exceptions, invoking an interceptor function"
@spec intercept(function, interceptor) :: function
def intercept(fun, interceptor)
when is_function(fun, 0) and is_function(interceptor, 3) do
try do
fun.()
rescue
e -> interceptor.(:rescue, e, __STACKTRACE__)
catch
e -> interceptor.(:catch, e, nil)
:exit, e -> interceptor.(:exit, e, nil)
end
end
@doc """
Wraps an interceptor or nullary function into an interceptor
function such that after the execution of the provided function, the
error or exception will be rethrown.
"""
@spec rethrowing(function) :: function
def rethrowing(fun) when is_function(fun, 0) do
rethrowing(fn _, _, _ -> fun.() end)
end
def rethrowing(fun) when is_function(fun, 3) do
fn type, e, stack ->
fun.(type, e, stack)
rethrow(type, e, stack)
end
end
@doc "An interceptor function which simply rethrows/reraises/re-exits"
@spec rethrow(intercept_type, error :: term, maybe_stacktrace :: term) :: none
def rethrow(:rescue, e, stacktrace), do: reraise(e, stacktrace)
def rethrow(:catch, e, _), do: throw(e)
def rethrow(:exit, e, _), do: exit(e)
@doc """
Iterates over a collections, calling the provided effectful
function with each item.
"""
def each([l|list], fun) do
scope [each: l] do
fun.(l)
each(list, fun)
end
end
def each(_, _), do: nil
@doc """
Iterates over two collections, calling the provided effectful
function with each pair of items
"""
def each(a, b, fun) when not is_list(a), do: each(Enum.to_list(a), b, fun)
def each(a, b, fun) when not is_list(b), do: each(a, Enum.to_list(b), fun)
def each([ a | as ], [b | bs], fun) do
scope [each: %{a: a, b: b}] do
fun.(a,b)
each(as, bs, fun)
end
end
def each(_, _, _), do: nil
### implementation
@scopes_key Zest.Context
defp get_scopes(), do: Process.get(@scopes_key, [])
defp put_scopes(scope), do: Process.put(@scopes_key, scope)
defp push_scopes(new) when is_list(new) do
old = get_scopes()
put_scopes(Enum.reduce(new, old, fn {k, v}, acc -> [{k, v} | acc] end))
old
end
defp dump_scopes(scopes \\ get_scopes()) do
IO.puts("Zest Context:")
Enum.each(Enum.reverse(scopes), fn {k, v} ->
IO.puts("* #{k}: #{inspect(v, pretty: true)}")
end)
end
end
|
lib/zest.ex
| 0.788949
| 0.86431
|
zest.ex
|
starcoder
|
defmodule Kernel.Typespec do
@moduledoc false
## Deprecated API moved to Code.Typespec
@doc false
@deprecated "Use Code.Typespec.spec_to_quoted/2 instead"
def spec_to_ast(name, spec) do
Code.Typespec.spec_to_quoted(name, spec)
end
@doc false
@deprecated "Use Code.Typespec.type_to_quoted/1 instead"
def type_to_ast(type) do
Code.Typespec.type_to_quoted(type)
end
@doc false
@deprecated "Use Code.fetch_docs/1 instead"
def beam_typedocs(module) when is_atom(module) or is_binary(module) do
case Code.fetch_docs(module) do
{:docs_v1, _, _, _, _, _, docs} ->
for {{:type, name, arity}, _, _, doc, _} <- docs do
case doc do
:none -> {{name, arity}, nil}
:hidden -> {{name, arity}, false}
%{"en" => doc_string} -> {{name, arity}, doc_string}
end
end
{:error, _} ->
nil
end
end
@doc false
@deprecated "Use Code.Typespec.fetch_types/1 instead"
def beam_types(module) when is_atom(module) or is_binary(module) do
case Code.Typespec.fetch_types(module) do
{:ok, types} -> types
:error -> nil
end
end
@doc false
@deprecated "Use Code.Typespec.fetch_specs/1 instead"
def beam_specs(module) when is_atom(module) or is_binary(module) do
case Code.Typespec.fetch_specs(module) do
{:ok, specs} -> specs
:error -> nil
end
end
@doc false
@deprecated "Use Code.Typespec.fetch_callbacks/1 instead"
def beam_callbacks(module) when is_atom(module) or is_binary(module) do
case Code.Typespec.fetch_callbacks(module) do
{:ok, callbacks} -> callbacks
:error -> nil
end
end
## Hooks for Module functions
def defines_type?(module, {name, arity} = signature)
when is_atom(module) and is_atom(name) and arity in 0..255 do
{_set, bag} = :elixir_module.data_tables(module)
finder = fn {_kind, expr, _caller} ->
type_to_signature(expr) == signature
end
:lists.any(finder, get_typespecs(bag, [:type, :opaque, :typep]))
end
def spec_to_callback(module, {name, arity} = signature)
when is_atom(module) and is_atom(name) and arity in 0..255 do
{set, bag} = :elixir_module.data_tables(module)
filter = fn {:spec, expr, pos} ->
if spec_to_signature(expr) == signature do
kind = :callback
store_typespec(bag, kind, expr, pos)
case :ets.lookup(set, {:function, name, arity}) do
[{{:function, ^name, ^arity}, line, _, doc, doc_meta}] ->
store_doc(set, kind, name, arity, line, :doc, doc, doc_meta)
_ ->
nil
end
true
else
false
end
end
:lists.filter(filter, get_typespecs(bag, :spec)) != []
end
## Typespec definition and storage
@doc """
Defines a typespec.
Invoked by `Kernel.@/1` expansion.
"""
def deftypespec(:spec, expr, _line, _file, module, pos) do
{_set, bag} = :elixir_module.data_tables(module)
store_typespec(bag, :spec, expr, pos)
end
def deftypespec(kind, expr, line, _file, module, pos)
when kind in [:callback, :macrocallback] do
{set, bag} = :elixir_module.data_tables(module)
case spec_to_signature(expr) do
{name, arity} ->
# store doc only once in case callback has multiple clauses
unless :ets.member(set, {kind, name, arity}) do
{line, doc} = get_doc_info(set, :doc, line)
store_doc(set, kind, name, arity, line, :doc, doc, %{})
end
:error ->
:error
end
store_typespec(bag, kind, expr, pos)
end
def deftypespec(kind, expr, line, file, module, pos)
when kind in [:type, :typep, :opaque] do
{set, bag} = :elixir_module.data_tables(module)
case type_to_signature(expr) do
{name, arity} when kind == :typep ->
{line, doc} = get_doc_info(set, :typedoc, line)
if doc do
warning =
"type #{name}/#{arity} is private, @typedoc's are always discarded for private types"
:elixir_errors.erl_warn(line, file, warning)
end
{name, arity} ->
{line, doc} = get_doc_info(set, :typedoc, line)
spec_meta = if kind == :opaque, do: %{opaque: true}, else: %{}
store_doc(set, :type, name, arity, line, :typedoc, doc, spec_meta)
:error ->
:error
end
store_typespec(bag, kind, expr, pos)
end
defp get_typespecs(bag, keys) when is_list(keys) do
:lists.flatmap(&get_typespecs(bag, &1), keys)
end
defp get_typespecs(bag, key) do
:ets.lookup_element(bag, {:accumulate, key}, 2)
catch
:error, :badarg -> []
end
defp take_typespecs(bag, keys) when is_list(keys) do
:lists.flatmap(&take_typespecs(bag, &1), keys)
end
defp take_typespecs(bag, key) do
:lists.map(&elem(&1, 1), :ets.take(bag, {:accumulate, key}))
end
defp store_typespec(bag, key, expr, pos) do
:ets.insert(bag, {{:accumulate, key}, {key, expr, pos}})
:ok
end
defp store_doc(set, kind, name, arity, line, doc_kind, doc, spec_meta) do
doc_meta = get_doc_meta(spec_meta, doc_kind, set)
:ets.insert(set, {{kind, name, arity}, line, doc, doc_meta})
end
defp get_doc_info(set, attr, line) do
case :ets.take(set, attr) do
[{^attr, {line, doc}, _}] -> {line, doc}
[] -> {line, nil}
end
end
defp get_doc_meta(spec_meta, doc_kind, set) do
case :ets.take(set, {doc_kind, :meta}) do
[{{^doc_kind, :meta}, metadata, _}] -> Map.merge(metadata, spec_meta)
[] -> spec_meta
end
end
defp spec_to_signature({:when, _, [spec, _]}), do: type_to_signature(spec)
defp spec_to_signature(other), do: type_to_signature(other)
defp type_to_signature({:::, _, [{name, _, context}, _]})
when is_atom(name) and name != ::: and is_atom(context),
do: {name, 0}
defp type_to_signature({:::, _, [{name, _, args}, _]}) when is_atom(name) and name != :::,
do: {name, length(args)}
defp type_to_signature(_), do: :error
## Translation from Elixir AST to typespec AST
@doc false
def translate_typespecs_for_module(_set, bag) do
type_typespecs = take_typespecs(bag, [:type, :opaque, :typep])
defined_type_pairs = collect_defined_type_pairs(type_typespecs)
state = %{
defined_type_pairs: defined_type_pairs,
used_type_pairs: [],
local_vars: %{},
undefined_type_error_enabled?: true
}
{types, state} = :lists.mapfoldl(&translate_type/2, state, type_typespecs)
{specs, state} = :lists.mapfoldl(&translate_spec/2, state, take_typespecs(bag, :spec))
{callbacks, state} = :lists.mapfoldl(&translate_spec/2, state, take_typespecs(bag, :callback))
{macrocallbacks, state} =
:lists.mapfoldl(&translate_spec/2, state, take_typespecs(bag, :macrocallback))
optional_callbacks = :lists.flatten(get_typespecs(bag, :optional_callbacks))
used_types = filter_used_types(types, state)
{used_types, specs, callbacks, macrocallbacks, optional_callbacks}
end
defp collect_defined_type_pairs(type_typespecs) do
fun = fn {_kind, expr, pos}, type_pairs ->
%{file: file, line: line} = env = :elixir_locals.get_cached_env(pos)
case type_to_signature(expr) do
{name, arity} = type_pair ->
if built_in_type?(name, arity) do
message = "type #{name}/#{arity} is a built-in type and it cannot be redefined"
compile_error(env, message)
end
if Map.has_key?(type_pairs, type_pair) do
compile_error(env, "type #{name}/#{arity} is already defined")
end
Map.put(type_pairs, type_pair, {file, line})
:error ->
compile_error(env, "invalid type specification: #{Macro.to_string(expr)}")
end
end
:lists.foldl(fun, %{}, type_typespecs)
end
defp filter_used_types(types, state) do
fun = fn {_kind, {name, arity} = type_pair, _line, _type, export} ->
if not export and not :lists.member(type_pair, state.used_type_pairs) do
%{^type_pair => {file, line}} = state.defined_type_pairs
:elixir_errors.erl_warn(line, file, "type #{name}/#{arity} is unused")
false
else
true
end
end
:lists.filter(fun, types)
end
defp translate_type({kind, {:::, _, [{name, _, args}, definition]}, pos}, state) do
caller = :elixir_locals.get_cached_env(pos)
state = clean_local_state(state)
args =
if is_atom(args) do
[]
else
for(arg <- args, do: variable(arg))
end
vars = :lists.filter(&match?({:var, _, _}, &1), args)
var_names = :lists.map(&elem(&1, 2), vars)
state = :lists.foldl(&update_local_vars(&2, &1), state, var_names)
{spec, state} = typespec(definition, var_names, caller, state)
type = {name, spec, vars}
arity = length(args)
ensure_no_unused_local_vars!(caller, state.local_vars)
{kind, export} =
case kind do
:type -> {:type, true}
:typep -> {:type, false}
:opaque -> {:opaque, true}
end
invalid_args = :lists.filter(&(not valid_variable_ast?(&1)), args)
unless invalid_args == [] do
invalid_args = :lists.join(", ", :lists.map(&Macro.to_string/1, invalid_args))
message =
"@type definitions expect all arguments to be variables. The type " <>
"#{name}/#{arity} has an invalid argument(s): #{invalid_args}"
compile_error(caller, message)
end
if underspecified?(kind, arity, spec) do
message = "@#{kind} type #{name}/#{arity} is underspecified and therefore meaningless"
:elixir_errors.erl_warn(caller.line, caller.file, message)
end
{{kind, {name, arity}, caller.line, type, export}, state}
end
defp valid_variable_ast?({variable_name, _, context})
when is_atom(variable_name) and is_atom(context),
do: true
defp valid_variable_ast?(_), do: false
defp underspecified?(:opaque, 0, {:type, _, type, []}) when type in [:any, :term], do: true
defp underspecified?(_kind, _arity, _spec), do: false
defp translate_spec({kind, {:when, _meta, [spec, guard]}, pos}, state) do
caller = :elixir_locals.get_cached_env(pos)
translate_spec(kind, spec, guard, caller, state)
end
defp translate_spec({kind, spec, pos}, state) do
caller = :elixir_locals.get_cached_env(pos)
translate_spec(kind, spec, [], caller, state)
end
defp translate_spec(kind, {:::, meta, [{name, _, args}, return]}, guard, caller, state)
when is_atom(name) and name != ::: do
translate_spec(kind, meta, name, args, return, guard, caller, state)
end
defp translate_spec(_kind, {name, _meta, _args} = spec, _guard, caller, _state)
when is_atom(name) and name != ::: do
spec = Macro.to_string(spec)
compile_error(caller, "type specification missing return type: #{spec}")
end
defp translate_spec(_kind, spec, _guard, caller, _state) do
spec = Macro.to_string(spec)
compile_error(caller, "invalid type specification: #{spec}")
end
defp translate_spec(kind, meta, name, args, return, guard, caller, state) when is_atom(args),
do: translate_spec(kind, meta, name, [], return, guard, caller, state)
defp translate_spec(kind, meta, name, args, return, guard, caller, state) do
ensure_no_defaults!(args)
state = clean_local_state(state)
unless Keyword.keyword?(guard) do
error = "expected keywords as guard in type specification, got: #{Macro.to_string(guard)}"
compile_error(caller, error)
end
line = line(meta)
vars = Keyword.keys(guard)
{fun_args, state} = fn_args(meta, args, return, vars, caller, state)
spec = {:type, line, :fun, fun_args}
{spec, state} =
case guard_to_constraints(guard, vars, meta, caller, state) do
{[], state} -> {spec, state}
{constraints, state} -> {{:type, line, :bounded_fun, [spec, constraints]}, state}
end
ensure_no_unused_local_vars!(caller, state.local_vars)
arity = length(args)
{{kind, {name, arity}, caller.line, spec}, state}
end
# TODO: Remove char_list type by v2.0
defp built_in_type?(:char_list, 0), do: true
defp built_in_type?(:charlist, 0), do: true
defp built_in_type?(:as_boolean, 1), do: true
defp built_in_type?(:struct, 0), do: true
defp built_in_type?(:nonempty_charlist, 0), do: true
defp built_in_type?(:keyword, 0), do: true
defp built_in_type?(:keyword, 1), do: true
defp built_in_type?(:var, 0), do: true
defp built_in_type?(name, arity), do: :erl_internal.is_type(name, arity)
defp ensure_no_defaults!(args) do
fun = fn
{:::, _, [left, right]} ->
ensure_not_default(left)
ensure_not_default(right)
left
other ->
ensure_not_default(other)
other
end
:lists.foreach(fun, args)
end
defp ensure_not_default({:\\, _, [_, _]}) do
raise ArgumentError, "default arguments \\\\ not supported in typespecs"
end
defp ensure_not_default(_), do: :ok
defp guard_to_constraints(guard, vars, meta, caller, state) do
line = line(meta)
fun = fn
{_name, {:var, _, context}}, {constraints, state} when is_atom(context) ->
{constraints, state}
{name, type}, {constraints, state} ->
{spec, state} = typespec(type, vars, caller, state)
constraint = [{:atom, line, :is_subtype}, [{:var, line, name}, spec]]
state = update_local_vars(state, name)
{[{:type, line, :constraint, constraint} | constraints], state}
end
{constraints, state} = :lists.foldl(fun, {[], state}, guard)
{:lists.reverse(constraints), state}
end
## To typespec conversion
defp line(meta) do
Keyword.get(meta, :line, 0)
end
# Handle unions
defp typespec({:|, meta, [_, _]} = exprs, vars, caller, state) do
exprs = collect_union(exprs)
{union, state} = :lists.mapfoldl(&typespec(&1, vars, caller, &2), state, exprs)
{{:type, line(meta), :union, union}, state}
end
# Handle binaries
defp typespec({:<<>>, meta, []}, _, _, state) do
line = line(meta)
{{:type, line, :binary, [{:integer, line, 0}, {:integer, line, 0}]}, state}
end
defp typespec(
{:<<>>, meta, [{:::, unit_meta, [{:_, _, ctx1}, {:*, _, [{:_, _, ctx2}, unit]}]}]},
_,
_,
state
)
when is_atom(ctx1) and is_atom(ctx2) and is_integer(unit) and unit >= 0 do
line = line(meta)
{{:type, line, :binary, [{:integer, line, 0}, {:integer, line(unit_meta), unit}]}, state}
end
defp typespec({:<<>>, meta, [{:::, size_meta, [{:_, _, ctx}, size]}]}, _, _, state)
when is_atom(ctx) and is_integer(size) and size >= 0 do
line = line(meta)
{{:type, line, :binary, [{:integer, line(size_meta), size}, {:integer, line, 0}]}, state}
end
defp typespec(
{
:<<>>,
meta,
[
{:::, size_meta, [{:_, _, ctx1}, size]},
{:::, unit_meta, [{:_, _, ctx2}, {:*, _, [{:_, _, ctx3}, unit]}]}
]
},
_,
_,
state
)
when is_atom(ctx1) and is_atom(ctx2) and is_atom(ctx3) and is_integer(size) and
is_integer(unit) and size >= 0 and unit >= 0 do
args = [{:integer, line(size_meta), size}, {:integer, line(unit_meta), unit}]
{{:type, line(meta), :binary, args}, state}
end
defp typespec({:<<>>, _meta, _args}, _vars, caller, _state) do
message =
"invalid binary specification, expected <<_::size>>, <<_::_*unit>>, " <>
"or <<_::size, _::_*unit>> with size and unit being non-negative integers"
compile_error(caller, message)
end
## Handle maps and structs
defp typespec({:map, meta, args}, _vars, _caller, state) when args == [] or is_atom(args) do
{{:type, line(meta), :map, :any}, state}
end
defp typespec({:%{}, meta, fields} = map, vars, caller, state) do
fun = fn
{{:required, meta2, [k]}, v}, state ->
{arg1, state} = typespec(k, vars, caller, state)
{arg2, state} = typespec(v, vars, caller, state)
{{:type, line(meta2), :map_field_exact, [arg1, arg2]}, state}
{{:optional, meta2, [k]}, v}, state ->
{arg1, state} = typespec(k, vars, caller, state)
{arg2, state} = typespec(v, vars, caller, state)
{{:type, line(meta2), :map_field_assoc, [arg1, arg2]}, state}
{k, v}, state ->
{arg1, state} = typespec(k, vars, caller, state)
{arg2, state} = typespec(v, vars, caller, state)
{{:type, line(meta), :map_field_exact, [arg1, arg2]}, state}
{:|, _, [_, _]}, _state ->
error =
"invalid map specification. When using the | operator in the map key, " <>
"make sure to wrap the key type in parentheses: #{Macro.to_string(map)}"
compile_error(caller, error)
_, _state ->
compile_error(caller, "invalid map specification: #{Macro.to_string(map)}")
end
{fields, state} = :lists.mapfoldl(fun, state, fields)
{{:type, line(meta), :map, fields}, state}
end
defp typespec({:%, _, [name, {:%{}, meta, fields}]}, vars, caller, state) do
# We cannot set a function name to avoid tracking
# as a compile time dependency, because for structs it actually is one.
module = Macro.expand(name, caller)
struct =
module
|> Macro.struct!(caller)
|> Map.delete(:__struct__)
|> Map.to_list()
unless Keyword.keyword?(fields) do
compile_error(caller, "expected key-value pairs in struct #{Macro.to_string(name)}")
end
types =
:lists.map(
fn {field, _} -> {field, Keyword.get(fields, field, quote(do: term()))} end,
struct
)
fun = fn {field, _} ->
unless Keyword.has_key?(struct, field) do
compile_error(
caller,
"undefined field #{inspect(field)} on struct #{Macro.to_string(name)}"
)
end
end
:lists.foreach(fun, fields)
typespec({:%{}, meta, [__struct__: module] ++ types}, vars, caller, state)
end
# Handle records
defp typespec({:record, meta, [atom]}, vars, caller, state) do
typespec({:record, meta, [atom, []]}, vars, caller, state)
end
defp typespec({:record, meta, [tag, field_specs]}, vars, caller, state)
when is_atom(tag) and is_list(field_specs) do
# We cannot set a function name to avoid tracking
# as a compile time dependency because for records it actually is one.
case Macro.expand({tag, [], [{:{}, [], []}]}, caller) do
{_, _, [name, fields | _]} when is_list(fields) ->
types =
:lists.map(
fn {field, _} -> Keyword.get(field_specs, field, quote(do: term())) end,
fields
)
fun = fn {field, _} ->
unless Keyword.has_key?(fields, field) do
compile_error(caller, "undefined field #{field} on record #{inspect(tag)}")
end
end
:lists.foreach(fun, field_specs)
typespec({:{}, meta, [name | types]}, vars, caller, state)
_ ->
compile_error(caller, "unknown record #{inspect(tag)}")
end
end
defp typespec({:record, _meta, [_tag, _field_specs]}, _vars, caller, _state) do
message = "invalid record specification, expected the record name to be an atom literal"
compile_error(caller, message)
end
# Handle ranges
defp typespec({:.., meta, [left, right]}, vars, caller, state) do
{left, state} = typespec(left, vars, caller, state)
{right, state} = typespec(right, vars, caller, state)
:ok = validate_range(left, right, caller)
{{:type, line(meta), :range, [left, right]}, state}
end
# Handle special forms
defp typespec({:__MODULE__, _, atom}, vars, caller, state) when is_atom(atom) do
typespec(caller.module, vars, caller, state)
end
defp typespec({:__aliases__, _, _} = alias, vars, caller, state) do
# We set a function name to avoid tracking
# aliases in typespecs as compile time dependencies.
atom = Macro.expand(alias, %{caller | function: {:typespec, 0}})
typespec(atom, vars, caller, state)
end
# Handle funs
defp typespec([{:->, meta, [args, return]}], vars, caller, state)
when is_list(args) do
{args, state} = fn_args(meta, args, return, vars, caller, state)
{{:type, line(meta), :fun, args}, state}
end
# Handle type operator
defp typespec(
{:::, meta, [{var_name, var_meta, context}, expr]} = ann_type,
vars,
caller,
state
)
when is_atom(var_name) and is_atom(context) do
case typespec(expr, vars, caller, state) do
{{:ann_type, _, _}, _state} ->
message =
"invalid type annotation. Type annotations cannot be nested: " <>
"#{Macro.to_string(ann_type)}"
# TODO: Make this an error on v2.0 and remove the code below
:elixir_errors.erl_warn(caller.line, caller.file, message)
# This may be generating an invalid typespec but we need to generate it
# to avoid breaking existing code that was valid but only broke dialyzer
{right, state} = typespec(expr, vars, caller, state)
{{:ann_type, line(meta), [{:var, line(var_meta), var_name}, right]}, state}
{right, state} ->
{{:ann_type, line(meta), [{:var, line(var_meta), var_name}, right]}, state}
end
end
defp typespec({:::, meta, [left, right]} = expr, vars, caller, state) do
message =
"invalid type annotation. When using the | operator to represent the union of types, " <>
"make sure to wrap type annotations in parentheses: #{Macro.to_string(expr)}"
# TODO: Make this an error on v2.0, and remove the code below and
# the :undefined_type_error_enabled? key from the state
:elixir_errors.erl_warn(caller.line, caller.file, message)
# This may be generating an invalid typespec but we need to generate it
# to avoid breaking existing code that was valid but only broke dialyzer
state = %{state | undefined_type_error_enabled?: false}
{left, state} = typespec(left, vars, caller, state)
state = %{state | undefined_type_error_enabled?: true}
{right, state} = typespec(right, vars, caller, state)
{{:ann_type, line(meta), [left, right]}, state}
end
# Handle unary ops
defp typespec({op, meta, [integer]}, _, _, state) when op in [:+, :-] and is_integer(integer) do
line = line(meta)
{{:op, line, op, {:integer, line, integer}}, state}
end
# Handle remote calls in the form of @module_attribute.type.
# These are not handled by the general remote type clause as calling
# Macro.expand/2 on the remote does not expand module attributes (but expands
# things like __MODULE__).
defp typespec(
{{:., meta, [{:@, _, [{attr, _, _}]}, name]}, _, args} = orig,
vars,
caller,
state
) do
remote = Module.get_attribute(caller.module, attr)
unless is_atom(remote) and remote != nil do
message =
"invalid remote in typespec: #{Macro.to_string(orig)} (@#{attr} is #{inspect(remote)})"
compile_error(caller, message)
end
{remote_spec, state} = typespec(remote, vars, caller, state)
{name_spec, state} = typespec(name, vars, caller, state)
type = {remote_spec, meta, name_spec, args}
remote_type(type, vars, caller, state)
end
# Handle remote calls
defp typespec({{:., meta, [remote, name]}, _, args} = orig, vars, caller, state) do
# We set a function name to avoid tracking
# aliases in typespecs as compile time dependencies.
remote = Macro.expand(remote, %{caller | function: {:typespec, 0}})
unless is_atom(remote) do
compile_error(caller, "invalid remote in typespec: #{Macro.to_string(orig)}")
end
{remote_spec, state} = typespec(remote, vars, caller, state)
{name_spec, state} = typespec(name, vars, caller, state)
type = {remote_spec, meta, name_spec, args}
remote_type(type, vars, caller, state)
end
# Handle tuples
defp typespec({:tuple, meta, []}, _vars, _caller, state) do
{{:type, line(meta), :tuple, :any}, state}
end
defp typespec({:{}, meta, t}, vars, caller, state) when is_list(t) do
{args, state} = :lists.mapfoldl(&typespec(&1, vars, caller, &2), state, t)
{{:type, line(meta), :tuple, args}, state}
end
defp typespec({left, right}, vars, caller, state) do
typespec({:{}, [], [left, right]}, vars, caller, state)
end
# Handle blocks
defp typespec({:__block__, _meta, [arg]}, vars, caller, state) do
typespec(arg, vars, caller, state)
end
# Handle variables or local calls
defp typespec({name, meta, atom}, vars, caller, state) when is_atom(atom) do
if :lists.member(name, vars) do
state = update_local_vars(state, name)
{{:var, line(meta), name}, state}
else
typespec({name, meta, []}, vars, caller, state)
end
end
# Handle local calls
defp typespec({:string, meta, args}, vars, caller, state) do
warning =
"string() type use is discouraged. " <>
"For character lists, use charlist() type, for strings, String.t()\n" <>
Exception.format_stacktrace(Macro.Env.stacktrace(caller))
:elixir_errors.erl_warn(caller.line, caller.file, warning)
{args, state} = :lists.mapfoldl(&typespec(&1, vars, caller, &2), state, args)
{{:type, line(meta), :string, args}, state}
end
defp typespec({:nonempty_string, meta, args}, vars, caller, state) do
warning =
"nonempty_string() type use is discouraged. " <>
"For non-empty character lists, use nonempty_charlist() type, for strings, String.t()\n" <>
Exception.format_stacktrace(Macro.Env.stacktrace(caller))
:elixir_errors.erl_warn(caller.line, caller.file, warning)
{args, state} = :lists.mapfoldl(&typespec(&1, vars, caller, &2), state, args)
{{:type, line(meta), :nonempty_string, args}, state}
end
defp typespec({type, _meta, []}, vars, caller, state) when type in [:charlist, :char_list] do
if type == :char_list do
warning = "the char_list() type is deprecated, use charlist()"
:elixir_errors.erl_warn(caller.line, caller.file, warning)
end
typespec(quote(do: :elixir.charlist()), vars, caller, state)
end
defp typespec({:nonempty_charlist, _meta, []}, vars, caller, state) do
typespec(quote(do: :elixir.nonempty_charlist()), vars, caller, state)
end
defp typespec({:struct, _meta, []}, vars, caller, state) do
typespec(quote(do: :elixir.struct()), vars, caller, state)
end
defp typespec({:as_boolean, _meta, [arg]}, vars, caller, state) do
typespec(quote(do: :elixir.as_boolean(unquote(arg))), vars, caller, state)
end
defp typespec({:keyword, _meta, args}, vars, caller, state) when length(args) <= 1 do
typespec(quote(do: :elixir.keyword(unquote_splicing(args))), vars, caller, state)
end
defp typespec({:fun, meta, args}, vars, caller, state) do
{args, state} = :lists.mapfoldl(&typespec(&1, vars, caller, &2), state, args)
{{:type, line(meta), :fun, args}, state}
end
defp typespec({name, meta, args}, vars, caller, state) do
{args, state} = :lists.mapfoldl(&typespec(&1, vars, caller, &2), state, args)
arity = length(args)
case :erl_internal.is_type(name, arity) do
true ->
{{:type, line(meta), name, args}, state}
false ->
if state.undefined_type_error_enabled? and
not Map.has_key?(state.defined_type_pairs, {name, arity}) do
compile_error(caller, "type #{name}/#{arity} undefined")
end
state =
if :lists.member({name, arity}, state.used_type_pairs) do
state
else
%{state | used_type_pairs: [{name, arity} | state.used_type_pairs]}
end
{{:user_type, line(meta), name, args}, state}
end
end
# Handle literals
defp typespec(atom, _, _, state) when is_atom(atom) do
{{:atom, 0, atom}, state}
end
defp typespec(integer, _, _, state) when is_integer(integer) do
{{:integer, 0, integer}, state}
end
defp typespec([], vars, caller, state) do
typespec({nil, [], []}, vars, caller, state)
end
defp typespec([{:..., _, atom}], vars, caller, state) when is_atom(atom) do
typespec({:nonempty_list, [], []}, vars, caller, state)
end
defp typespec([spec, {:..., _, atom}], vars, caller, state) when is_atom(atom) do
typespec({:nonempty_list, [], [spec]}, vars, caller, state)
end
defp typespec([spec], vars, caller, state) do
typespec({:list, [], [spec]}, vars, caller, state)
end
defp typespec(list, vars, caller, state) when is_list(list) do
[head | tail] = :lists.reverse(list)
union =
:lists.foldl(
fn elem, acc -> {:|, [], [validate_kw(elem, list, caller), acc]} end,
validate_kw(head, list, caller),
tail
)
typespec({:list, [], [union]}, vars, caller, state)
end
defp typespec(other, _vars, caller, _state) do
compile_error(caller, "unexpected expression in typespec: #{Macro.to_string(other)}")
end
## Helpers
defp compile_error(caller, desc) do
raise CompileError, file: caller.file, line: caller.line, description: desc
end
defp remote_type({remote, meta, name, args}, vars, caller, state) do
{args, state} = :lists.mapfoldl(&typespec(&1, vars, caller, &2), state, args)
{{:remote_type, line(meta), [remote, name, args]}, state}
end
defp collect_union({:|, _, [a, b]}), do: [a | collect_union(b)]
defp collect_union(v), do: [v]
defp validate_kw({key, _} = t, _, _caller) when is_atom(key), do: t
defp validate_kw(_, original, caller) do
compile_error(caller, "unexpected list in typespec: #{Macro.to_string(original)}")
end
defp validate_range({:op, _, :-, {:integer, meta, first}}, last, caller) do
validate_range({:integer, meta, -first}, last, caller)
end
defp validate_range(first, {:op, _, :-, {:integer, meta, last}}, caller) do
validate_range(first, {:integer, meta, -last}, caller)
end
defp validate_range({:integer, _, first}, {:integer, _, last}, _caller) when first < last do
:ok
end
defp validate_range(_, _, caller) do
message =
"invalid range specification, expected both sides to be integers, " <>
"with the left side lower than the right side"
compile_error(caller, message)
end
defp fn_args(meta, args, return, vars, caller, state) do
{fun_args, state} = fn_args(meta, args, vars, caller, state)
{spec, state} = typespec(return, vars, caller, state)
case [fun_args, spec] do
[{:type, _, :any}, {:type, _, :any, []}] -> {[], state}
x -> {x, state}
end
end
defp fn_args(meta, [{:..., _, _}], _vars, _caller, state) do
{{:type, line(meta), :any}, state}
end
defp fn_args(meta, args, vars, caller, state) do
{args, state} = :lists.mapfoldl(&typespec(&1, vars, caller, &2), state, args)
{{:type, line(meta), :product, args}, state}
end
defp variable({name, meta, args}) when is_atom(name) and is_atom(args) do
{:var, line(meta), name}
end
defp variable(expr), do: expr
defp clean_local_state(state) do
%{state | local_vars: %{}}
end
defp update_local_vars(%{local_vars: local_vars} = state, var_name) do
case Map.fetch(local_vars, var_name) do
{:ok, :used_once} -> %{state | local_vars: Map.put(local_vars, var_name, :used_multiple)}
{:ok, :used_multiple} -> state
:error -> %{state | local_vars: Map.put(local_vars, var_name, :used_once)}
end
end
defp ensure_no_unused_local_vars!(caller, local_vars) do
fun = fn
{name, :used_once} -> compile_error(caller, "type variable #{name} is unused")
_ -> :ok
end
:lists.foreach(fun, :maps.to_list(local_vars))
end
end
|
lib/elixir/lib/kernel/typespec.ex
| 0.537284
| 0.496338
|
typespec.ex
|
starcoder
|
defmodule AshPolicyAuthorizer.SatSolver do
@moduledoc false
def solve(expression) do
expression
|> add_negations_and_solve([])
|> get_all_scenarios(expression)
|> case do
[] ->
{:error, :unsatisfiable}
scenarios ->
static_checks = [
{AshPolicyAuthorizer.Check.Static, [result: true]},
{AshPolicyAuthorizer.Check.Static, [result: false]}
]
{:ok,
scenarios
|> Enum.uniq()
|> remove_irrelevant_clauses()
|> Enum.map(&Map.drop(&1, static_checks))}
end
end
defp get_all_scenarios(scenario_result, expression, scenarios \\ [])
defp get_all_scenarios({:error, :unsatisfiable}, _, scenarios), do: scenarios
defp get_all_scenarios({:ok, scenario}, expression, scenarios) do
expression
|> add_negations_and_solve([Map.drop(scenario, [true, false]) | scenarios])
|> get_all_scenarios(expression, [Map.drop(scenario, [true, false]) | scenarios])
end
def remove_irrelevant_clauses([scenario]), do: [scenario]
def remove_irrelevant_clauses(scenarios) do
new_scenarios =
scenarios
|> Enum.uniq()
|> Enum.map(fn scenario ->
unnecessary_fact = find_unnecessary_fact(scenario, scenarios)
Map.delete(scenario, unnecessary_fact)
end)
|> Enum.uniq()
if new_scenarios == scenarios do
scenarios
else
remove_irrelevant_clauses(new_scenarios)
end
end
defp find_unnecessary_fact(scenario, scenarios) do
Enum.find_value(scenario, fn
{fact, value_in_this_scenario} ->
matching =
Enum.find(scenarios, fn potential_irrelevant_maker ->
potential_irrelevant_maker != scenario &&
Map.delete(scenario, fact) == Map.delete(potential_irrelevant_maker, fact)
end)
case matching do
%{^fact => value} when is_boolean(value) and value != value_in_this_scenario ->
fact
_ ->
false
end
end)
end
@spec add_negations_and_solve(term, term) :: term | no_return()
defp add_negations_and_solve(requirements_expression, negations) do
negations =
Enum.reduce(negations, nil, fn negation, expr ->
negation_statement =
negation
|> Map.drop([true, false])
|> facts_to_statement()
if expr do
{:and, expr, {:not, negation_statement}}
else
{:not, negation_statement}
end
end)
full_expression =
if negations do
{:and, requirements_expression, negations}
else
requirements_expression
end
solve_expression(full_expression)
end
def facts_to_statement(facts) do
Enum.reduce(facts, nil, fn {fact, true?}, expr ->
expr_component =
if true? do
fact
else
{:not, fact}
end
if expr do
{:and, expr, expr_component}
else
expr_component
end
end)
end
defp solve_expression(expression) do
Ash.SatSolver.solve_expression(expression)
end
end
|
lib/ash_policy_authorizer/sat_solver.ex
| 0.66769
| 0.40116
|
sat_solver.ex
|
starcoder
|
defmodule Serum.Page do
@moduledoc """
Defines a struct describing a normal page.
## Fields
* `file`: Source path
* `type`: Type of source file
* `title`: Page title
* `label`: Page label
* `group`: A group the page belongs to
* `order`: Order of the page within its group
* `url`: Absolute URL of the page within the website
* `output`: Destination path
* `data`: Source data
"""
@type t :: %__MODULE__{
file: binary(),
type: binary(),
title: binary(),
label: binary(),
group: binary(),
order: integer(),
url: binary(),
output: binary(),
data: binary()
}
alias Serum.Fragment
alias Serum.Markdown
alias Serum.Plugin
alias Serum.Renderer
alias Serum.Result
alias Serum.Template
alias Serum.Template.Compiler, as: TC
defstruct [:file, :type, :title, :label, :group, :order, :url, :output, :data]
@spec new(binary(), map(), binary(), map()) :: t()
def new(path, header, data, proj) do
page_dir = (proj.src == "." && "pages") || Path.join(proj.src, "pages")
filename = Path.relative_to(path, page_dir)
type = get_type(filename)
{url, output} =
with name <- String.replace_suffix(filename, type, ".html") do
{Path.join(proj.base_url, name), Path.join(proj.dest, name)}
end
__MODULE__
|> struct(header)
|> Map.merge(%{
file: path,
type: type,
url: url,
output: output,
data: data
})
end
@spec compact(t()) :: map()
def compact(%__MODULE__{} = page) do
page
|> Map.drop(~w(__struct__ data file output type)a)
|> Map.put(:type, :page)
end
@spec get_type(binary) :: binary
defp get_type(filename) do
case Path.extname(filename) do
".eex" ->
filename
|> Path.basename(".eex")
|> Path.extname()
|> Kernel.<>(".eex")
ext ->
ext
end
end
@spec to_fragment(t(), map()) :: Result.t(Fragment.t())
def to_fragment(page, proj) do
metadata = compact(page)
with {:ok, temp} <- preprocess(page, proj),
{:ok, html} <- render(temp, metadata) do
fragment = Fragment.new(page.file, page.output, metadata, html)
Plugin.rendered_fragment(fragment)
else
{:error, _} = error -> error
end
end
@spec preprocess(t(), Project.t()) :: Result.t(binary())
defp preprocess(page, proj)
defp preprocess(%__MODULE__{type: ".md"} = page, proj) do
{:ok, Markdown.to_html(page.data, proj)}
end
defp preprocess(%__MODULE__{type: ".html"} = page, _proj) do
{:ok, page.data}
end
defp preprocess(%__MODULE__{type: ".html.eex"} = page, _proj) do
case TC.compile_string(page.data, :template) do
{:ok, ast} ->
template = Template.new(ast, :template, page.file)
Renderer.render_fragment(template, [])
{:ct_error, msg, line} ->
{:error, {msg, page.file, line}}
end
end
@spec render(binary(), map()) :: Result.t(binary())
defp render(html, metadata) do
template = Template.get("page")
bindings = [page: metadata, contents: html]
Renderer.render_fragment(template, bindings)
end
defimpl Fragment.Source do
alias Serum.Page
alias Serum.Project
alias Serum.Result
@spec to_fragment(Page.t(), Project.t()) :: Result.t(Fragment.t())
def to_fragment(page, proj), do: Page.to_fragment(page, proj)
end
end
|
lib/serum/page.ex
| 0.808861
| 0.468791
|
page.ex
|
starcoder
|
defmodule AWS.Backup do
@moduledoc """
AWS Backup
AWS Backup is a unified backup service designed to protect AWS services and
their associated data. AWS Backup simplifies the creation, migration,
restoration, and deletion of backups, while also providing reporting and
auditing.
"""
@doc """
Backup plans are documents that contain information that AWS Backup uses to
schedule tasks that create recovery points of resources.
If you call `CreateBackupPlan` with a plan that already exists, an
`AlreadyExistsException` is returned.
"""
def create_backup_plan(client, input, options \\ []) do
path_ = "/backup/plans/"
headers = []
query_ = []
request(client, :put, path_, query_, headers, input, options, nil)
end
@doc """
Creates a JSON document that specifies a set of resources to assign to a
backup plan. Resources can be included by specifying patterns for a
`ListOfTags` and selected `Resources`.
For example, consider the following patterns:
<ul> <li> `Resources: "arn:aws:ec2:region:account-id:volume/volume-id"`
</li> <li> `ConditionKey:"department"`
`ConditionValue:"finance"`
`ConditionType:"STRINGEQUALS"`
</li> <li> `ConditionKey:"importance"`
`ConditionValue:"critical"`
`ConditionType:"STRINGEQUALS"`
</li> </ul> Using these patterns would back up all Amazon Elastic Block
Store (Amazon EBS) volumes that are tagged as `"department=finance"`,
`"importance=critical"`, in addition to an EBS volume with the specified
volume Id.
Resources and conditions are additive in that all resources that match the
pattern are selected. This shouldn't be confused with a logical AND, where
all conditions must match. The matching patterns are logically 'put
together using the OR operator. In other words, all patterns that match are
selected for backup.
"""
def create_backup_selection(client, backup_plan_id, input, options \\ []) do
path_ = "/backup/plans/#{URI.encode(backup_plan_id)}/selections/"
headers = []
query_ = []
request(client, :put, path_, query_, headers, input, options, nil)
end
@doc """
Creates a logical container where backups are stored. A `CreateBackupVault`
request includes a name, optionally one or more resource tags, an
encryption key, and a request ID.
<note> Sensitive data, such as passport numbers, should not be included the
name of a backup vault.
</note>
"""
def create_backup_vault(client, backup_vault_name, input, options \\ []) do
path_ = "/backup-vaults/#{URI.encode(backup_vault_name)}"
headers = []
query_ = []
request(client, :put, path_, query_, headers, input, options, nil)
end
@doc """
Deletes a backup plan. A backup plan can only be deleted after all
associated selections of resources have been deleted. Deleting a backup
plan deletes the current version of a backup plan. Previous versions, if
any, will still exist.
"""
def delete_backup_plan(client, backup_plan_id, input, options \\ []) do
path_ = "/backup/plans/#{URI.encode(backup_plan_id)}"
headers = []
query_ = []
request(client, :delete, path_, query_, headers, input, options, nil)
end
@doc """
Deletes the resource selection associated with a backup plan that is
specified by the `SelectionId`.
"""
def delete_backup_selection(client, backup_plan_id, selection_id, input, options \\ []) do
path_ = "/backup/plans/#{URI.encode(backup_plan_id)}/selections/#{URI.encode(selection_id)}"
headers = []
query_ = []
request(client, :delete, path_, query_, headers, input, options, nil)
end
@doc """
Deletes the backup vault identified by its name. A vault can be deleted
only if it is empty.
"""
def delete_backup_vault(client, backup_vault_name, input, options \\ []) do
path_ = "/backup-vaults/#{URI.encode(backup_vault_name)}"
headers = []
query_ = []
request(client, :delete, path_, query_, headers, input, options, nil)
end
@doc """
Deletes the policy document that manages permissions on a backup vault.
"""
def delete_backup_vault_access_policy(client, backup_vault_name, input, options \\ []) do
path_ = "/backup-vaults/#{URI.encode(backup_vault_name)}/access-policy"
headers = []
query_ = []
request(client, :delete, path_, query_, headers, input, options, nil)
end
@doc """
Deletes event notifications for the specified backup vault.
"""
def delete_backup_vault_notifications(client, backup_vault_name, input, options \\ []) do
path_ = "/backup-vaults/#{URI.encode(backup_vault_name)}/notification-configuration"
headers = []
query_ = []
request(client, :delete, path_, query_, headers, input, options, nil)
end
@doc """
Deletes the recovery point specified by a recovery point ID.
"""
def delete_recovery_point(client, backup_vault_name, recovery_point_arn, input, options \\ []) do
path_ = "/backup-vaults/#{URI.encode(backup_vault_name)}/recovery-points/#{URI.encode(recovery_point_arn)}"
headers = []
query_ = []
request(client, :delete, path_, query_, headers, input, options, nil)
end
@doc """
Returns metadata associated with creating a backup of a resource.
"""
def describe_backup_job(client, backup_job_id, options \\ []) do
path_ = "/backup-jobs/#{URI.encode(backup_job_id)}"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Returns metadata about a backup vault specified by its name.
"""
def describe_backup_vault(client, backup_vault_name, options \\ []) do
path_ = "/backup-vaults/#{URI.encode(backup_vault_name)}"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Returns metadata associated with creating a copy of a resource.
"""
def describe_copy_job(client, copy_job_id, options \\ []) do
path_ = "/copy-jobs/#{URI.encode(copy_job_id)}"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Returns information about a saved resource, including the last time it was
backed up, its Amazon Resource Name (ARN), and the AWS service type of the
saved resource.
"""
def describe_protected_resource(client, resource_arn, options \\ []) do
path_ = "/resources/#{URI.encode(resource_arn)}"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Returns metadata associated with a recovery point, including ID, status,
encryption, and lifecycle.
"""
def describe_recovery_point(client, backup_vault_name, recovery_point_arn, options \\ []) do
path_ = "/backup-vaults/#{URI.encode(backup_vault_name)}/recovery-points/#{URI.encode(recovery_point_arn)}"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Returns the current service opt-in settings for the Region. If the service
has a value set to `true`, AWS Backup attempts to protect that service's
resources in this Region, when included in an on-demand backup or scheduled
backup plan. If the value is set to `false` for a service, AWS Backup does
not attempt to protect that service's resources in this Region.
"""
def describe_region_settings(client, options \\ []) do
path_ = "/account-settings"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Returns metadata associated with a restore job that is specified by a job
ID.
"""
def describe_restore_job(client, restore_job_id, options \\ []) do
path_ = "/restore-jobs/#{URI.encode(restore_job_id)}"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Returns the backup plan that is specified by the plan ID as a backup
template.
"""
def export_backup_plan_template(client, backup_plan_id, options \\ []) do
path_ = "/backup/plans/#{URI.encode(backup_plan_id)}/toTemplate/"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Returns the body of a backup plan in JSON format, in addition to plan
metadata.
"""
def get_backup_plan(client, backup_plan_id, version_id \\ nil, options \\ []) do
path_ = "/backup/plans/#{URI.encode(backup_plan_id)}/"
headers = []
query_ = []
query_ = if !is_nil(version_id) do
[{"versionId", version_id} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Returns a valid JSON document specifying a backup plan or an error.
"""
def get_backup_plan_from_j_s_o_n(client, input, options \\ []) do
path_ = "/backup/template/json/toPlan"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Returns the template specified by its `templateId` as a backup plan.
"""
def get_backup_plan_from_template(client, backup_plan_template_id, options \\ []) do
path_ = "/backup/template/plans/#{URI.encode(backup_plan_template_id)}/toPlan"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Returns selection metadata and a document in JSON format that specifies a
list of resources that are associated with a backup plan.
"""
def get_backup_selection(client, backup_plan_id, selection_id, options \\ []) do
path_ = "/backup/plans/#{URI.encode(backup_plan_id)}/selections/#{URI.encode(selection_id)}"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Returns the access policy document that is associated with the named backup
vault.
"""
def get_backup_vault_access_policy(client, backup_vault_name, options \\ []) do
path_ = "/backup-vaults/#{URI.encode(backup_vault_name)}/access-policy"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Returns event notifications for the specified backup vault.
"""
def get_backup_vault_notifications(client, backup_vault_name, options \\ []) do
path_ = "/backup-vaults/#{URI.encode(backup_vault_name)}/notification-configuration"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Returns a set of metadata key-value pairs that were used to create the
backup.
"""
def get_recovery_point_restore_metadata(client, backup_vault_name, recovery_point_arn, options \\ []) do
path_ = "/backup-vaults/#{URI.encode(backup_vault_name)}/recovery-points/#{URI.encode(recovery_point_arn)}/restore-metadata"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Returns the AWS resource types supported by AWS Backup.
"""
def get_supported_resource_types(client, options \\ []) do
path_ = "/supported-resource-types"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Returns metadata about your backup jobs.
"""
def list_backup_jobs(client, by_account_id \\ nil, by_backup_vault_name \\ nil, by_created_after \\ nil, by_created_before \\ nil, by_resource_arn \\ nil, by_resource_type \\ nil, by_state \\ nil, max_results \\ nil, next_token \\ nil, options \\ []) do
path_ = "/backup-jobs/"
headers = []
query_ = []
query_ = if !is_nil(next_token) do
[{"nextToken", next_token} | query_]
else
query_
end
query_ = if !is_nil(max_results) do
[{"maxResults", max_results} | query_]
else
query_
end
query_ = if !is_nil(by_state) do
[{"state", by_state} | query_]
else
query_
end
query_ = if !is_nil(by_resource_type) do
[{"resourceType", by_resource_type} | query_]
else
query_
end
query_ = if !is_nil(by_resource_arn) do
[{"resourceArn", by_resource_arn} | query_]
else
query_
end
query_ = if !is_nil(by_created_before) do
[{"createdBefore", by_created_before} | query_]
else
query_
end
query_ = if !is_nil(by_created_after) do
[{"createdAfter", by_created_after} | query_]
else
query_
end
query_ = if !is_nil(by_backup_vault_name) do
[{"backupVaultName", by_backup_vault_name} | query_]
else
query_
end
query_ = if !is_nil(by_account_id) do
[{"accountId", by_account_id} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Returns metadata of your saved backup plan templates, including the
template ID, name, and the creation and deletion dates.
"""
def list_backup_plan_templates(client, max_results \\ nil, next_token \\ nil, options \\ []) do
path_ = "/backup/template/plans"
headers = []
query_ = []
query_ = if !is_nil(next_token) do
[{"nextToken", next_token} | query_]
else
query_
end
query_ = if !is_nil(max_results) do
[{"maxResults", max_results} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Returns version metadata of your backup plans, including Amazon Resource
Names (ARNs), backup plan IDs, creation and deletion dates, plan names, and
version IDs.
"""
def list_backup_plan_versions(client, backup_plan_id, max_results \\ nil, next_token \\ nil, options \\ []) do
path_ = "/backup/plans/#{URI.encode(backup_plan_id)}/versions/"
headers = []
query_ = []
query_ = if !is_nil(next_token) do
[{"nextToken", next_token} | query_]
else
query_
end
query_ = if !is_nil(max_results) do
[{"maxResults", max_results} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Returns metadata of your saved backup plans, including Amazon Resource
Names (ARNs), plan IDs, creation and deletion dates, version IDs, plan
names, and creator request IDs.
"""
def list_backup_plans(client, include_deleted \\ nil, max_results \\ nil, next_token \\ nil, options \\ []) do
path_ = "/backup/plans/"
headers = []
query_ = []
query_ = if !is_nil(next_token) do
[{"nextToken", next_token} | query_]
else
query_
end
query_ = if !is_nil(max_results) do
[{"maxResults", max_results} | query_]
else
query_
end
query_ = if !is_nil(include_deleted) do
[{"includeDeleted", include_deleted} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Returns an array containing metadata of the resources associated with the
target backup plan.
"""
def list_backup_selections(client, backup_plan_id, max_results \\ nil, next_token \\ nil, options \\ []) do
path_ = "/backup/plans/#{URI.encode(backup_plan_id)}/selections/"
headers = []
query_ = []
query_ = if !is_nil(next_token) do
[{"nextToken", next_token} | query_]
else
query_
end
query_ = if !is_nil(max_results) do
[{"maxResults", max_results} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Returns a list of recovery point storage containers along with information
about them.
"""
def list_backup_vaults(client, max_results \\ nil, next_token \\ nil, options \\ []) do
path_ = "/backup-vaults/"
headers = []
query_ = []
query_ = if !is_nil(next_token) do
[{"nextToken", next_token} | query_]
else
query_
end
query_ = if !is_nil(max_results) do
[{"maxResults", max_results} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Returns metadata about your copy jobs.
"""
def list_copy_jobs(client, by_account_id \\ nil, by_created_after \\ nil, by_created_before \\ nil, by_destination_vault_arn \\ nil, by_resource_arn \\ nil, by_resource_type \\ nil, by_state \\ nil, max_results \\ nil, next_token \\ nil, options \\ []) do
path_ = "/copy-jobs/"
headers = []
query_ = []
query_ = if !is_nil(next_token) do
[{"nextToken", next_token} | query_]
else
query_
end
query_ = if !is_nil(max_results) do
[{"maxResults", max_results} | query_]
else
query_
end
query_ = if !is_nil(by_state) do
[{"state", by_state} | query_]
else
query_
end
query_ = if !is_nil(by_resource_type) do
[{"resourceType", by_resource_type} | query_]
else
query_
end
query_ = if !is_nil(by_resource_arn) do
[{"resourceArn", by_resource_arn} | query_]
else
query_
end
query_ = if !is_nil(by_destination_vault_arn) do
[{"destinationVaultArn", by_destination_vault_arn} | query_]
else
query_
end
query_ = if !is_nil(by_created_before) do
[{"createdBefore", by_created_before} | query_]
else
query_
end
query_ = if !is_nil(by_created_after) do
[{"createdAfter", by_created_after} | query_]
else
query_
end
query_ = if !is_nil(by_account_id) do
[{"accountId", by_account_id} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Returns an array of resources successfully backed up by AWS Backup,
including the time the resource was saved, an Amazon Resource Name (ARN) of
the resource, and a resource type.
"""
def list_protected_resources(client, max_results \\ nil, next_token \\ nil, options \\ []) do
path_ = "/resources/"
headers = []
query_ = []
query_ = if !is_nil(next_token) do
[{"nextToken", next_token} | query_]
else
query_
end
query_ = if !is_nil(max_results) do
[{"maxResults", max_results} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Returns detailed information about the recovery points stored in a backup
vault.
"""
def list_recovery_points_by_backup_vault(client, backup_vault_name, by_backup_plan_id \\ nil, by_created_after \\ nil, by_created_before \\ nil, by_resource_arn \\ nil, by_resource_type \\ nil, max_results \\ nil, next_token \\ nil, options \\ []) do
path_ = "/backup-vaults/#{URI.encode(backup_vault_name)}/recovery-points/"
headers = []
query_ = []
query_ = if !is_nil(next_token) do
[{"nextToken", next_token} | query_]
else
query_
end
query_ = if !is_nil(max_results) do
[{"maxResults", max_results} | query_]
else
query_
end
query_ = if !is_nil(by_resource_type) do
[{"resourceType", by_resource_type} | query_]
else
query_
end
query_ = if !is_nil(by_resource_arn) do
[{"resourceArn", by_resource_arn} | query_]
else
query_
end
query_ = if !is_nil(by_created_before) do
[{"createdBefore", by_created_before} | query_]
else
query_
end
query_ = if !is_nil(by_created_after) do
[{"createdAfter", by_created_after} | query_]
else
query_
end
query_ = if !is_nil(by_backup_plan_id) do
[{"backupPlanId", by_backup_plan_id} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Returns detailed information about recovery points of the type specified by
a resource Amazon Resource Name (ARN).
"""
def list_recovery_points_by_resource(client, resource_arn, max_results \\ nil, next_token \\ nil, options \\ []) do
path_ = "/resources/#{URI.encode(resource_arn)}/recovery-points/"
headers = []
query_ = []
query_ = if !is_nil(next_token) do
[{"nextToken", next_token} | query_]
else
query_
end
query_ = if !is_nil(max_results) do
[{"maxResults", max_results} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Returns a list of jobs that AWS Backup initiated to restore a saved
resource, including metadata about the recovery process.
"""
def list_restore_jobs(client, by_account_id \\ nil, by_created_after \\ nil, by_created_before \\ nil, by_status \\ nil, max_results \\ nil, next_token \\ nil, options \\ []) do
path_ = "/restore-jobs/"
headers = []
query_ = []
query_ = if !is_nil(next_token) do
[{"nextToken", next_token} | query_]
else
query_
end
query_ = if !is_nil(max_results) do
[{"maxResults", max_results} | query_]
else
query_
end
query_ = if !is_nil(by_status) do
[{"status", by_status} | query_]
else
query_
end
query_ = if !is_nil(by_created_before) do
[{"createdBefore", by_created_before} | query_]
else
query_
end
query_ = if !is_nil(by_created_after) do
[{"createdAfter", by_created_after} | query_]
else
query_
end
query_ = if !is_nil(by_account_id) do
[{"accountId", by_account_id} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Returns a list of key-value pairs assigned to a target recovery point,
backup plan, or backup vault.
<note> `ListTags` are currently only supported with Amazon EFS backups.
</note>
"""
def list_tags(client, resource_arn, max_results \\ nil, next_token \\ nil, options \\ []) do
path_ = "/tags/#{URI.encode(resource_arn)}/"
headers = []
query_ = []
query_ = if !is_nil(next_token) do
[{"nextToken", next_token} | query_]
else
query_
end
query_ = if !is_nil(max_results) do
[{"maxResults", max_results} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Sets a resource-based policy that is used to manage access permissions on
the target backup vault. Requires a backup vault name and an access policy
document in JSON format.
"""
def put_backup_vault_access_policy(client, backup_vault_name, input, options \\ []) do
path_ = "/backup-vaults/#{URI.encode(backup_vault_name)}/access-policy"
headers = []
query_ = []
request(client, :put, path_, query_, headers, input, options, nil)
end
@doc """
Turns on notifications on a backup vault for the specified topic and
events.
"""
def put_backup_vault_notifications(client, backup_vault_name, input, options \\ []) do
path_ = "/backup-vaults/#{URI.encode(backup_vault_name)}/notification-configuration"
headers = []
query_ = []
request(client, :put, path_, query_, headers, input, options, nil)
end
@doc """
Starts a job to create a one-time backup of the specified resource.
"""
def start_backup_job(client, input, options \\ []) do
path_ = "/backup-jobs"
headers = []
query_ = []
request(client, :put, path_, query_, headers, input, options, nil)
end
@doc """
Starts a job to create a one-time copy of the specified resource.
"""
def start_copy_job(client, input, options \\ []) do
path_ = "/copy-jobs"
headers = []
query_ = []
request(client, :put, path_, query_, headers, input, options, nil)
end
@doc """
Recovers the saved resource identified by an Amazon Resource Name (ARN).
If the resource ARN is included in the request, then the last complete
backup of that resource is recovered. If the ARN of a recovery point is
supplied, then that recovery point is restored.
"""
def start_restore_job(client, input, options \\ []) do
path_ = "/restore-jobs"
headers = []
query_ = []
request(client, :put, path_, query_, headers, input, options, nil)
end
@doc """
Attempts to cancel a job to create a one-time backup of a resource.
"""
def stop_backup_job(client, backup_job_id, input, options \\ []) do
path_ = "/backup-jobs/#{URI.encode(backup_job_id)}"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Assigns a set of key-value pairs to a recovery point, backup plan, or
backup vault identified by an Amazon Resource Name (ARN).
"""
def tag_resource(client, resource_arn, input, options \\ []) do
path_ = "/tags/#{URI.encode(resource_arn)}"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Removes a set of key-value pairs from a recovery point, backup plan, or
backup vault identified by an Amazon Resource Name (ARN)
"""
def untag_resource(client, resource_arn, input, options \\ []) do
path_ = "/untag/#{URI.encode(resource_arn)}"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Replaces the body of a saved backup plan identified by its `backupPlanId`
with the input document in JSON format. The new version is uniquely
identified by a `VersionId`.
"""
def update_backup_plan(client, backup_plan_id, input, options \\ []) do
path_ = "/backup/plans/#{URI.encode(backup_plan_id)}"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Sets the transition lifecycle of a recovery point.
The lifecycle defines when a protected resource is transitioned to cold
storage and when it expires. AWS Backup transitions and expires backups
automatically according to the lifecycle that you define.
Backups transitioned to cold storage must be stored in cold storage for a
minimum of 90 days. Therefore, the “expire after days” setting must be 90
days greater than the “transition to cold after days” setting. The
“transition to cold after days” setting cannot be changed after a backup
has been transitioned to cold.
"""
def update_recovery_point_lifecycle(client, backup_vault_name, recovery_point_arn, input, options \\ []) do
path_ = "/backup-vaults/#{URI.encode(backup_vault_name)}/recovery-points/#{URI.encode(recovery_point_arn)}"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Updates the current service opt-in settings for the Region. If the service
has a value set to `true`, AWS Backup attempts to protect that service's
resources in this Region, when included in an on-demand backup or scheduled
backup plan. If the value is set to `false` for a service, AWS Backup does
not attempt to protect that service's resources in this Region.
"""
def update_region_settings(client, input, options \\ []) do
path_ = "/account-settings"
headers = []
query_ = []
request(client, :put, path_, query_, headers, input, options, nil)
end
@spec request(AWS.Client.t(), binary(), binary(), list(), list(), map(), list(), pos_integer()) ::
{:ok, Poison.Parser.t(), Poison.Response.t()}
| {:error, Poison.Parser.t()}
| {:error, HTTPoison.Error.t()}
defp request(client, method, path, query, headers, input, options, success_status_code) do
client = %{client | service: "backup"}
host = build_host("backup", client)
url = host
|> build_url(path, client)
|> add_query(query)
additional_headers = [{"Host", host}, {"Content-Type", "application/x-amz-json-1.1"}]
headers = AWS.Request.add_headers(additional_headers, headers)
payload = encode_payload(input)
headers = AWS.Request.sign_v4(client, method, url, headers, payload)
perform_request(method, url, payload, headers, options, success_status_code)
end
defp perform_request(method, url, payload, headers, options, nil) do
case HTTPoison.request(method, url, payload, headers, options) do
{:ok, %HTTPoison.Response{status_code: 200, body: ""} = response} ->
{:ok, response}
{:ok, %HTTPoison.Response{status_code: status_code, body: body} = response}
when status_code == 200 or status_code == 202 or status_code == 204 ->
{:ok, Poison.Parser.parse!(body, %{}), response}
{:ok, %HTTPoison.Response{body: body}} ->
error = Poison.Parser.parse!(body, %{})
{:error, error}
{:error, %HTTPoison.Error{reason: reason}} ->
{:error, %HTTPoison.Error{reason: reason}}
end
end
defp perform_request(method, url, payload, headers, options, success_status_code) do
case HTTPoison.request(method, url, payload, headers, options) do
{:ok, %HTTPoison.Response{status_code: ^success_status_code, body: ""} = response} ->
{:ok, %{}, response}
{:ok, %HTTPoison.Response{status_code: ^success_status_code, body: body} = response} ->
{:ok, Poison.Parser.parse!(body, %{}), response}
{:ok, %HTTPoison.Response{body: body}} ->
error = Poison.Parser.parse!(body, %{})
{:error, error}
{:error, %HTTPoison.Error{reason: reason}} ->
{:error, %HTTPoison.Error{reason: reason}}
end
end
defp build_host(_endpoint_prefix, %{region: "local"}) do
"localhost"
end
defp build_host(endpoint_prefix, %{region: region, endpoint: endpoint}) do
"#{endpoint_prefix}.#{region}.#{endpoint}"
end
defp build_url(host, path, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}#{path}"
end
defp add_query(url, []) do
url
end
defp add_query(url, query) do
querystring = AWS.Util.encode_query(query)
"#{url}?#{querystring}"
end
defp encode_payload(input) do
if input != nil, do: Poison.Encoder.encode(input, %{}), else: ""
end
end
|
lib/aws/backup.ex
| 0.893379
| 0.494446
|
backup.ex
|
starcoder
|
defmodule OT.Text.Transformation do
@moduledoc """
The transformation of two concurrent operations such that they satisfy the
[TP1][tp1] property of operational transformation.
[tp1]: https://en.wikipedia.org/wiki/Operational_transformation#Convergence_properties
"""
alias OT.Text.{Component, Operation, Scanner}
@doc """
Transform an operation against another operation.
Given an operation A that occurred at the same time as operation B against the
same text state, transform the components of operation A such that the state
of the text after applying operation A and then operation B is the same as
after applying operation B and then the transformation of operation A against
operation B:
*S ○ Oa ○ transform(Ob, Oa) = S ○ Ob ○ transform(Oa, Ob)*
This function also takes a third `side` argument that indicates which
operation came later. This is important when deciding whether it is acceptable
to break up insert components from one operation or the other.
"""
@spec transform(Operation.t, Operation.t, OT.Type.side) :: Operation.t
def transform(op_a, op_b, side) do
{op_a, op_b}
|> next
|> do_transform(side)
end
@spec do_transform(Scanner.output, OT.Type.side, Operation.t) :: Operation.t
defp do_transform(next_pair, side, result \\ [])
# Operation A is exhausted
defp do_transform({{nil, _}, _}, _, result) do
result
end
# Operation B is exhausted
defp do_transform({{head_a, tail_a}, {nil, _}}, _, result) do
result
|> Operation.append(head_a)
|> Operation.join(tail_a)
end
# insert / insert / left
defp do_transform({{head_a = %{i: _}, tail_a},
{head_b = %{i: _}, tail_b}}, :left, result) do
{tail_a, [head_b | tail_b]}
|> next
|> do_transform(:left, Operation.append(result, head_a))
end
# insert / insert / right
defp do_transform({{head_a = %{i: _}, tail_a},
{head_b = %{i: _}, tail_b}}, :right, result) do
{[head_a | tail_a], tail_b}
|> next
|> do_transform(:right, Operation.append(result, Component.length(head_b)))
end
# insert / retain
defp do_transform({{head_a = %{i: _}, tail_a},
{head_b, tail_b}}, side, result) when is_integer(head_b) do
{tail_a, [head_b | tail_b]}
|> next
|> do_transform(side, Operation.append(result, head_a))
end
# insert / delete
defp do_transform({{head_a = %{i: _}, tail_a},
{head_b = %{d: _}, tail_b}}, side, result) do
{tail_a, [head_b | tail_b]}
|> next
|> do_transform(side, Operation.append(result, head_a))
end
# retain / insert
defp do_transform({{head_a, tail_a},
{head_b = %{i: _}, tail_b}}, side, result)
when is_integer(head_a) do
{[head_a | tail_a], tail_b}
|> next
|> do_transform(side, Operation.append(result, Component.length(head_b)))
end
# retain / retain
defp do_transform({{head_a, tail_a},
{head_b, tail_b}}, side, result)
when is_integer(head_a) and is_integer(head_b) do
{tail_a, tail_b}
|> next
|> do_transform(side, Operation.append(result, head_a))
end
# retain / delete
defp do_transform({{head_a, tail_a},
{%{d: _}, tail_b}}, side, result)
when is_integer(head_a) do
{tail_a, tail_b}
|> next
|> do_transform(side, result)
end
# delete / insert
defp do_transform({{head_a = %{d: _}, tail_a},
{head_b = %{i: _}, tail_b}}, side, result) do
{[head_a | tail_a], tail_b}
|> next
|> do_transform(side, Operation.append(result, Component.length(head_b)))
end
# delete / retain
defp do_transform({{head_a = %{d: _}, tail_a},
{head_b, tail_b}}, side, result) when is_integer(head_b) do
{tail_a, tail_b}
|> next
|> do_transform(side, Operation.append(result, head_a))
end
# delete / delete
defp do_transform({{%{d: _}, tail_a},
{%{d: _}, tail_b}}, side, result) do
{tail_a, tail_b}
|> next
|> do_transform(side, result)
end
@spec next(Scanner.input) :: Scanner.output
defp next(scanner_input), do: Scanner.next(scanner_input, :insert)
end
|
lib/ot/text/transformation.ex
| 0.845481
| 0.742095
|
transformation.ex
|
starcoder
|
defmodule Logger.Backends.Gelf do
@moduledoc """
GELF Logger Backend
# GelfLogger [](https://travis-ci.org/jschniper/gelf_logger)
A logger backend that will generate Graylog Extended Log Format messages. The
current version only supports UDP messages.
## Configuration
In the config.exs, add gelf_logger as a backend like this:
```
config :logger,
backends: [:console, {Logger.Backends.Gelf, :gelf_logger}]
```
In addition, you'll need to pass in some configuration items to the backend
itself:
```
config :logger, :gelf_logger,
host: "127.0.0.1",
port: 12201,
application: "myapp",
compression: :gzip, # Defaults to :gzip, also accepts :zlib or :raw
metadata: [:request_id, :function, :module, :file, :line],
hostname: "hostname-override",
tags: [
list: "of",
extra: "tags"
]
```
In addition to the backend configuration, you might want to check the
[Logger configuration](https://hexdocs.pm/logger/Logger.html) for other
options that might be important for your particular environment. In
particular, modifying the `:utc_log` setting might be necessary
depending on your server configuration.
This backend supports `metadata: :all`.
## Usage
Just use Logger as normal.
## Improvements
- [x] Tests
- [ ] TCP Support
- [x] Options for compression (none, zlib)
- [x] Send timestamp instead of relying on the Graylog server to set it
- [x] Find a better way of pulling the hostname
And probably many more. This is only out here because it might be useful to
someone in its current state. Pull requests are always welcome.
## Notes
Credit where credit is due, this would not exist without
[protofy/erl_graylog_sender](https://github.com/protofy/erl_graylog_sender).
"""
use GenEvent
@max_size 1047040
@max_packet_size 8192
@max_payload_size 8180
@epoch :calendar.datetime_to_gregorian_seconds({{1970, 1, 1}, {0, 0, 0}})
def init({__MODULE__, name}) do
if user = Process.whereis(:user) do
Process.group_leader(self(), user)
{:ok, configure(name, [])}
else
{:error, :ignore}
end
end
def handle_call({:configure, options}, state) do
{:ok, :ok, configure(state[:name], options)}
end
def handle_event({_level, gl, _event}, state) when node(gl) != node() do
{:ok, state}
end
def handle_event({level, _gl, {Logger, msg, ts, md}}, %{level: min_level} = state) do
if is_nil(min_level) or Logger.compare_levels(level, min_level) != :lt do
log_event(level, msg, ts, md, state)
end
{:ok, state}
end
def handle_event(:flush, state) do
{:ok, state}
end
## Helpers
defp configure(name, options) do
config = Keyword.merge(Application.get_env(:logger, name, []), options)
Application.put_env(:logger, name, config)
{:ok, socket} = :gen_udp.open(0)
{:ok, hostname} = :inet.gethostname
hostname = Keyword.get(config, :hostname, hostname)
gl_host = Keyword.get(config, :host) |> to_char_list
port = Keyword.get(config, :port)
application = Keyword.get(config, :application)
level = Keyword.get(config, :level)
metadata = Keyword.get(config, :metadata, [])
compression = Keyword.get(config, :compression, :gzip)
tags = Keyword.get(config, :tags, [])
port =
cond do
is_binary(port) ->
{val, ""} = Integer.parse(to_string(port))
val
true ->
port
end
%{name: name, gl_host: gl_host, host: to_string(hostname), port: port, metadata: metadata, level: level, application: application, socket: socket, compression: compression, tags: tags}
end
defp log_event(level, msg, ts, md, state) do
int_level =
case level do
:debug -> 7
:info -> 6
:warn -> 4
:error -> 3
end
fields =
state[:metadata]
|> case do
:all -> md # Use all metadata
keys -> Keyword.take(md, keys) # Use only configured metadata keys
end
|> Keyword.merge(state[:tags])
|> Map.new(fn({k,v}) ->
case String.Chars.impl_for(v) do
nil ->
{"_#{k}", inspect(v)}
_ ->
{"_#{k}", to_string(v)}
end
end)
{{year, month, day}, {hour, min, sec, milli}} = ts
epoch_seconds = :calendar.datetime_to_gregorian_seconds({{year, month, day}, {hour, min, sec}}) - @epoch
{timestamp, _remainder} = "#{epoch_seconds}.#{milli}" |> Float.parse
gelf = %{
short_message: String.slice(to_string(msg), 0..79),
long_message: to_string(msg),
version: "1.1",
host: state[:host],
level: int_level,
timestamp: Float.round(timestamp, 3),
_application: state[:application]
} |> Map.merge(fields)
data = Poison.encode!(gelf) |> compress(state[:compression])
size = byte_size(data)
cond do
size > @max_size ->
raise ArgumentError, message: "Message too large"
size > @max_packet_size ->
num = div(size, @max_packet_size)
num =
if (num * @max_packet_size) < size do
num + 1
else
num
end
id = :crypto.strong_rand_bytes(8)
send_chunks(state[:socket], state[:gl_host], state[:port], data, id, :binary.encode_unsigned(num), 0, size)
true ->
:gen_udp.send(state[:socket], state[:gl_host], state[:port], data)
end
end
defp send_chunks(socket, host, port, data, id, num, seq, size) when size > @max_payload_size do
<<payload :: binary - size(@max_payload_size), rest :: binary >> = data
:gen_udp.send(socket, host, port, make_chunk(payload, id, num, seq))
send_chunks(socket, host, port, rest, id, num, seq + 1, byte_size(rest))
end
defp send_chunks(socket, host, port, data, id, num, seq, _size) do
:gen_udp.send(socket, host, port, make_chunk(data, id, num, seq))
end
defp make_chunk(payload, id, num, seq) do
bin = :binary.encode_unsigned(seq)
<< 0x1e, 0x0f, id :: binary - size(8), bin :: binary - size(1), num :: binary - size(1), payload :: binary >>
end
defp compress(data, type) do
case type do
:gzip ->
:zlib.gzip(data)
:zlib ->
:zlib.compress(data)
_ ->
data
end
end
end
|
lib/gelf_logger.ex
| 0.795658
| 0.757458
|
gelf_logger.ex
|
starcoder
|
defmodule Prometheus do
@moduledoc """
[Prometheus.io](https://prometheus.io) client library powered by
[prometheus.erl](https://hexdocs.pm/prometheus)
Prometheus.ex is a thin, mostly macro-based wrapper around prometheus.erl.
While it's pretty straightforward to use prometheus.erl from Elixir,
you might prefer prometheus.ex because it gives you:
- native Elixir syntax;
- native Elixir exceptions;
- configuration helpers that are really handy if you plan to write your custom
instrumenter.
```elixir
defmodule ExampleInstrumenter do
use Prometheus ## require common Prometheus modules, also alias metrics.
def setup do
Histogram.new([name: :http_request_duration_milliseconds,
labels: [:method],
buckets: [100, 300, 500, 750, 1000],
help: "Http Request execution time"])
end
def instrument(%{time: time, method: method}) do
Histogram.observe([name: :http_request_duration_milliseconds,
labels: [method]],
time)
end
end
```
## Integrations
- [Ecto Instrumenter](https://hex.pm/packages/prometheus_ecto);
- [Elixir plugs Instrumenters and Exporter](https://hex.pm/packages/prometheus_plugs);
- [Fuse plugin](https://github.com/jlouis/fuse#fuse_stats_prometheus)
- [OS process info Collector](https://hex.pm/packages/prometheus_process_collector)
(linux-only);
- [Phoenix Instrumenter](https://hex.pm/packages/prometheus_phoenix);
- [RabbitMQ Exporter](https://github.com/deadtrickster/prometheus_rabbitmq_exporter).
## Erlang VM Collectors
- [Memory Collector](vm-memory-collector.html);
- [Statistics Collector](vm-statistics-collector.html);
- [System Information Collector](vm-system-info-collector.html).
## API
API can be grouped like this:
### Standard Metrics & Registry
- `Prometheus.Metric.Counter` - counter metric, to track counts of events or running
totals;
- `Prometheus.Metric.Gauge` - gauge metric, to report instantaneous values;
- `Prometheus.Metric.Histogram` - histogram metric, to track distributions of events;
- `Prometheus.Metric.Summary` - summary metric, to track the size of events;
- `Prometheus.Registry` - working with Prometheus registries.
All metrics created via `new/1` or `declare/1` macros. The difference is that `new/1`
actually wants metric to be new and raises `Prometheus.MFAlreadyExistsError`
if it isn't.
Both `new/1` and `declare/1` accept options as
[Keyword](http://elixir-lang.org/docs/stable/elixir/Keyword.html).
Common options are:
- name - metric name, can be an atom or a string (required);
- help - metric help, string (required);
- labels - metric labels, label can be an atom or a string (default is []);
- registry - Prometheus registry for the metric, can be any term. (default is :default)
Histogram also accepts `buckets` option. Please refer to respective modules docs
for the more information.
### General Helpers
- `Prometheus.Buckets` - linear or exponential bucket generators;
- `Prometheus.Contrib.HTTP` - helpers for HTTP instrumenters.
### Integration Helpers
- `Prometheus.Config` - provides standard configuration mechanism
for custom instrumenters/exporters.
### Exposition Formats
- `Prometheus.Format.Text` - renders metrics for a given registry
(default is `:default`) in text format;
- `Prometheus.Format.Protobuf` - renders metrics for a given registry
(default is `:default`) in protobuf v2 format.
### Advanced
You will need this modules only if you're writing custom collector for app/lib
that can't be instrumented directly.
- `Prometheus.Collector` - exports macros for managing/creating collectors;
- `Prometheus.Model` - provides API for working with underlying Prometheus models.
You'll use that if you want to create custom collector.
"""
defmacro __using__(_opts) do
quote do
require Prometheus.Collector
require Prometheus.Registry
require Prometheus.Buckets
use Prometheus.Metric
require Prometheus.Contrib.HTTP
require Prometheus.Contrib.Mnesia
end
end
end
|
astreu/deps/prometheus_ex/lib/prometheus.ex
| 0.916992
| 0.783036
|
prometheus.ex
|
starcoder
|
defmodule Absinthe.Blueprint.Transform do
@moduledoc false
alias Absinthe.Blueprint
@doc """
Apply `fun` to a node, then walk to its children and do the same
"""
@spec prewalk(
Blueprint.node_t(),
(Blueprint.node_t() -> Blueprint.node_t() | {:halt, Blueprint.node_t()})
) :: Blueprint.node_t()
def prewalk(node, fun) when is_function(fun, 1) do
{node, _} =
prewalk(node, nil, fn x, nil ->
case fun.(x) do
{:halt, x} -> {:halt, x, nil}
x -> {x, nil}
end
end)
node
end
@doc """
Same as `prewalk/2` but takes and returns an accumulator
The supplied function must be arity 2.
"""
@spec prewalk(
Blueprint.node_t(),
acc,
(Blueprint.node_t(), acc ->
{Blueprint.node_t(), acc} | {:halt, Blueprint.node_t(), acc})
) :: {Blueprint.node_t(), acc}
when acc: var
def prewalk(node, acc, fun) when is_function(fun, 2) do
walk(node, acc, fun, &pass/2)
end
@doc """
Apply `fun` to all children of a node, then apply `fun` to node
"""
@spec postwalk(Blueprint.node_t(), (Blueprint.node_t() -> Blueprint.node_t())) ::
Blueprint.node_t()
def postwalk(node, fun) when is_function(fun, 1) do
{node, _} = postwalk(node, nil, fn x, nil -> {fun.(x), nil} end)
node
end
@doc """
Same as `postwalk/2` but takes and returns an accumulator
"""
@spec postwalk(Blueprint.node_t(), acc, (Blueprint.node_t(), acc -> {Blueprint.node_t(), acc})) ::
{Blueprint.node_t(), acc}
when acc: var
def postwalk(node, acc, fun) when is_function(fun, 2) do
walk(node, acc, &pass/2, fun)
end
defp pass(x, acc), do: {x, acc}
nodes_with_children = %{
Blueprint => [:fragments, :operations, :types, :directives],
Blueprint.Directive => [:arguments],
Blueprint.Document.Field => [:selections, :arguments, :directives],
Blueprint.Document.Operation => [:selections, :variable_definitions, :directives],
Blueprint.TypeReference.List => [:of_type],
Blueprint.TypeReference.NonNull => [:of_type],
Blueprint.Document.Fragment.Inline => [:selections, :directives],
Blueprint.Document.Fragment.Named => [:selections, :directives],
Blueprint.Document.Fragment.Spread => [:directives],
Blueprint.Document.VariableDefinition => [:type, :default_value],
Blueprint.Input.Argument => [:input_value],
Blueprint.Input.Field => [:input_value],
Blueprint.Input.Object => [:fields],
Blueprint.Input.List => [:items],
Blueprint.Input.Value => [:normalized, :literal],
Blueprint.Schema.DirectiveDefinition => [:directives, :types],
Blueprint.Schema.EnumTypeDefinition => [:directives, :values],
Blueprint.Schema.EnumValueDefinition => [:directives],
Blueprint.Schema.FieldDefinition => [:type, :arguments, :directives],
Blueprint.Schema.InputObjectTypeDefinition => [:interfaces, :fields, :directives],
Blueprint.Schema.InputValueDefinition => [:type, :default_value, :directives],
Blueprint.Schema.InterfaceTypeDefinition => [:fields, :directives],
Blueprint.Schema.ObjectTypeDefinition => [:interfaces, :fields, :directives],
Blueprint.Schema.ScalarTypeDefinition => [:directives],
Blueprint.Schema.SchemaDefinition => [:directives, :fields],
Blueprint.Schema.UnionTypeDefinition => [:directives, :types]
}
@spec walk(
Blueprint.node_t(),
acc,
(Blueprint.node_t(), acc ->
{Blueprint.node_t(), acc} | {:halt, Blueprint.node_t(), acc}),
(Blueprint.node_t(), acc -> {Blueprint.node_t(), acc})
) :: {Blueprint.node_t(), acc}
when acc: var
def walk(blueprint, acc, pre, post)
for {node_name, children} <- nodes_with_children do
if :selections in children do
def walk(%unquote(node_name){flags: %{flat: _}} = node, acc, pre, post) do
node_with_children(node, unquote(children -- [:selections]), acc, pre, post)
end
end
def walk(%unquote(node_name){} = node, acc, pre, post) do
node_with_children(node, unquote(children), acc, pre, post)
end
end
def walk(nodes, acc, pre, post) when is_list(nodes) do
Enum.map_reduce(nodes, acc, &walk(&1, &2, pre, post))
end
def walk(leaf_node, acc, pre, post) do
{leaf_node, acc} =
case pre.(leaf_node, acc) do
{:halt, leaf_node, acc} -> {leaf_node, acc}
val -> val
end
post.(leaf_node, acc)
end
defp node_with_children(node, children, acc, pre, post) do
{node, acc} =
case pre.(node, acc) do
{:halt, node, acc} ->
{node, acc}
{node, acc} ->
walk_children(node, children, acc, pre, post)
end
post.(node, acc)
end
defp walk_children(node, children, acc, pre, post) do
Enum.reduce(children, {node, acc}, fn child_key, {node, acc} ->
{children, acc} =
node
|> Map.fetch!(child_key)
|> walk(acc, pre, post)
{Map.put(node, child_key, children), acc}
end)
end
end
|
lib/absinthe/blueprint/transform.ex
| 0.728265
| 0.544014
|
transform.ex
|
starcoder
|
defmodule EEx.TransformerEngine do
@moduledoc """
An abstract engine that is meant to be used and
built upon in other modules. This engine implements
the `EEx.Engine` behavior and provides a `transform`
overridable directive that allows a developer to
customize the expression returned by the engine.
Check `EEx.AssignsEngine` and `EEx.SmartEngine` for
examples of using this module.
"""
@doc false
defmacro __using__(_) do
quote do
@behavior EEx.Engine
def handle_text(buffer, text) do
EEx.Engine.handle_text(buffer, text)
end
def handle_expr(buffer, mark, expr) do
EEx.Engine.handle_expr(buffer, mark, transform(expr))
end
defp transform({ a, b, c }) do
{ transform(a), b, transform(c) }
end
defp transform({ a, b }) do
{ transform(a), transform(b) }
end
defp transform(list) when is_list(list) do
lc i inlist list, do: transform(i)
end
defp transform(other) do
other
end
defoverridable [transform: 1, handle_expr: 3, handle_text: 2]
end
end
end
defmodule EEx.AssignsEngine do
@moduledoc """
An abstract engine that, when used with the
`TransformerEngine`, allows a developer to access
assigns using `@` as syntax.
This engine is included by default on the SmartEngine.
## Examples
defmodule MyEngine do
use EEx.TransformerEngine
use EEx.AssignsEngine
end
EEx.eval_string("<%= @foo %>", assigns: [foo: 1])
#=> 1
In the example above, we can access the value `foo` under
the binding `assigns` using `@foo`. This is useful when
a template, after compiled, may receive different assigns
and the developer don't want to recompile it for each
variable set.
Assigns can also be used when compiled to a function:
# sample.eex
<%= @a + @b %>
# sample.ex
defmodule Sample do
require EEx
EEx.function_from_file :def, :sample, "sample.eex", [:assigns]
end
# iex
Sample.sample(a: 1, b: 2) #=> "3"
"""
@doc false
defmacro __using__(_) do
quote unquote: false do
defp transform({ :@, line, [{ name, _, atom }] }) when is_atom(name) and is_atom(atom) do
quote do: Dict.get(var!(assigns), unquote(name))
end
defp transform(arg) do
super(arg)
end
defoverridable [transform: 1]
end
end
end
defmodule EEx.SmartEngine do
use EEx.TransformerEngine
use EEx.AssignsEngine
@moduledoc """
An engine meant for end-user usage that includes
`EEx.AssignsEngine` and other conveniences. Read
`EEx.AssignsEngine` for examples.
"""
end
|
lib/eex/lib/eex/smart_engine.ex
| 0.841517
| 0.514156
|
smart_engine.ex
|
starcoder
|
defmodule DeltaCrdt do
@moduledoc """
Start and interact with the Delta CRDTs provided by this library.
A CRDT is a conflict-free replicated data-type. That is to say, it is a distributed data structure that automatically resolves conflicts in a way that is consistent across all replicas of the data. In other words, your distributed data is guaranteed to eventually converge globally.
Normal CRDTs (otherwise called "state CRDTs") require transmission of the entire CRDT state with every change. This clearly doesn't scale, but there has been exciting research in the last few years into "Delta CRDTs", CRDTs that only transmit their deltas. This has enabled a whole new scale of applications for CRDTs, and it's also what this library is based on.
A Delta CRDT is made of two parts. First, the data structure itself, and second, an anti-entropy algorithm, which is responsible for ensuring convergence. `DeltaCrdt` implements Algorithm 2 from ["Delta State Replicated Data Types – Almeida et al. 2016"](https://arxiv.org/pdf/1603.01529.pdf) which is an anti-entropy algorithm for δ-CRDTs. `DeltaCrdt` also implements join decomposition to ensure that deltas aren't transmitted unnecessarily in the cluster.
While it is certainly interesting to have a look at this paper and spend time grokking it, in theory I've done the hard work so that you don't have to, and this library is the result.
With this library, you can build distributed applications that share some state. [`Horde.Supervisor`](https://hexdocs.pm/horde/Horde.Supervisor.html) and [`Horde.Registry`](https://hexdocs.pm/horde/Horde.Registry.html) are both built atop `DeltaCrdt`, but there are certainly many more possibilities.
Here's a simple example for illustration:
```
iex> {:ok, crdt1} = DeltaCrdt.start_link(DeltaCrdt.AWLWWMap, sync_interval: 3)
iex> {:ok, crdt2} = DeltaCrdt.start_link(DeltaCrdt.AWLWWMap, sync_interval: 3)
iex> DeltaCrdt.set_neighbours(crdt1, [crdt2])
iex> DeltaCrdt.set_neighbours(crdt2, [crdt1])
iex> DeltaCrdt.to_map(crdt1)
%{}
iex> DeltaCrdt.put(crdt1, "CRDT", "is magic!")
iex> Process.sleep(10) # need to wait for propagation for the doctest
iex> DeltaCrdt.to_map(crdt2)
%{"CRDT" => "is magic!"}
```
"""
@default_sync_interval 200
@default_max_sync_size 200
@default_timeout 5_000
@type t :: GenServer.server()
@type key :: any()
@type value :: any()
@type diff :: {:add, key :: any(), value :: any()} | {:remove, key :: any()}
@type crdt_option ::
{:on_diffs, ([diff()] -> any()) | {module(), function(), [any()]}}
| {:sync_interval, pos_integer()}
| {:max_sync_size, pos_integer() | :infinite}
| {:storage_module, DeltaCrdt.Storage.t()}
@type crdt_options :: [crdt_option()]
@doc """
Start a DeltaCrdt and link it to the calling process.
There are a number of options you can specify to tweak the behaviour of DeltaCrdt:
- `:sync_interval` - the delta CRDT will attempt to sync its local changes with its neighbours at this interval (specified in milliseconds). Default is 200.
- `:on_diffs` - function which will be invoked on every diff
- `:max_sync_size` - maximum size of synchronization (specified in number of items to sync)
- `:storage_module` - module which implements `DeltaCrdt.Storage` behaviour
"""
@spec start_link(
crdt_module :: module(),
opts :: crdt_options()
) :: GenServer.on_start()
def start_link(crdt_module, opts \\ []) do
init_arg =
Keyword.put(opts, :crdt_module, crdt_module)
|> Keyword.put_new(:sync_interval, @default_sync_interval)
|> Keyword.put_new(:max_sync_size, @default_max_sync_size)
GenServer.start_link(DeltaCrdt.CausalCrdt, init_arg, Keyword.take(opts, [:name]))
end
@doc """
Include DeltaCrdt in a supervision tree with `{DeltaCrdt, [crdt: DeltaCrdt.AWLWWMap, name: MyCRDTMap]}`
"""
def child_spec(opts \\ []) do
name = Keyword.get(opts, :name, __MODULE__)
crdt_module = Keyword.get(opts, :crdt, nil)
shutdown = Keyword.get(opts, :shutdown, 5000)
if is_nil(crdt_module) do
raise "must specify :crdt in options, got: #{inspect(opts)}"
end
%{
id: name,
start: {DeltaCrdt, :start_link, [crdt_module, opts]},
shutdown: shutdown
}
end
@doc """
Notify a CRDT of its neighbours.
This function allows CRDTs to communicate with each other and sync their states.
**Note: this sets up a unidirectional sync, so if you want bidirectional syncing (which is normally desirable), then you must call this function twice (or thrice for 3 nodes, etc):**
```
DeltaCrdt.set_neighbours(c1, [c2, c3])
DeltaCrdt.set_neighbours(c2, [c1, c3])
DeltaCrdt.set_neighbours(c3, [c1, c2])
```
"""
@spec set_neighbours(crdt :: t(), neighbours :: list(t())) :: :ok
def set_neighbours(crdt, neighbours) when is_list(neighbours) do
send(crdt, {:set_neighbours, neighbours})
:ok
end
@spec put(t(), key(), value(), timeout()) :: t()
def put(crdt, key, value, timeout \\ @default_timeout) do
:ok = GenServer.call(crdt, {:operation, {:add, [key, value]}}, timeout)
crdt
end
@spec merge(t(), map(), timeout()) :: t()
def merge(crdt, map, timeout \\ @default_timeout) do
:ok =
GenServer.call(
crdt,
{:bulk_operation, Enum.map(map, fn {key, value} -> {:add, [key, value]} end)},
timeout
)
crdt
end
@spec drop(t(), [key()], timeout()) :: t()
def drop(crdt, keys, timeout \\ @default_timeout) do
:ok =
GenServer.call(
crdt,
{:bulk_operation, Enum.map(keys, fn key -> {:remove, [key]} end)},
timeout
)
crdt
end
@spec delete(t(), key(), timeout()) :: t()
def delete(crdt, key, timeout \\ @default_timeout) do
:ok = GenServer.call(crdt, {:operation, {:remove, [key]}}, timeout)
crdt
end
@spec get(t(), key(), timeout()) :: value()
def get(crdt, key, timeout \\ @default_timeout) do
case GenServer.call(crdt, {:read, [key]}, timeout) do
%{^key => elem} -> elem
_ -> nil
end
end
@spec take(t(), [key()], timeout()) :: [{key(), value()}]
def take(crdt, keys, timeout \\ @default_timeout) when is_list(keys) do
GenServer.call(crdt, {:read, keys}, timeout)
end
@spec to_map(t(), timeout()) :: map()
def to_map(crdt, timeout \\ @default_timeout) do
GenServer.call(crdt, :read, timeout)
end
@spec mutate(
crdt :: t(),
function :: atom,
arguments :: list(),
timeout :: timeout()
) :: :ok
@doc """
Mutate the CRDT synchronously.
For the asynchronous version of this function, see `mutate_async/3`.
To see which operations are available, see the documentation for the crdt module that was provided in `start_link/3`.
For example, `DeltaCrdt.AWLWWMap` has a function `add` that takes 4 arguments. The last 2 arguments are supplied by DeltaCrdt internally, so you have to provide only the first two arguments: `key` and `val`. That would look like this: `DeltaCrdt.mutate(crdt, :add, ["CRDT", "is magic!"])`. This pattern is repeated for all mutation functions. Another example: to call `DeltaCrdt.AWLWWMap.clear`, use `DeltaCrdt.mutate(crdt, :clear, [])`.
"""
@deprecated "Use put/4 instead"
def mutate(crdt, f, a, timeout \\ @default_timeout)
when is_atom(f) and is_list(a) do
GenServer.call(crdt, {:operation, {f, a}}, timeout)
end
@spec mutate_async(crdt :: t(), function :: atom, arguments :: list()) :: :ok
@doc """
Mutate the CRDT asynchronously.
"""
@deprecated "Will be removed without replacement in a future version"
def mutate_async(crdt, f, a)
when is_atom(f) and is_list(a) do
GenServer.cast(crdt, {:operation, {f, a}})
end
@doc """
Read the state of the CRDT.
Forwards arguments to the used crdt module, so `read(crdt, ["my-key"])` would call `crdt_module.read(state, ["my-key"])`.
For example, `DeltaCrdt.AWLWWMap` accepts a list of keys to limit the returned values instead of returning everything.
"""
@spec read(crdt :: t()) :: crdt_state :: term()
@spec read(crdt :: t(), timeout :: timeout()) :: crdt_state :: term()
@spec read(crdt :: t(), keys :: list()) :: crdt_state :: term()
@spec read(crdt :: t(), keys :: list(), timeout :: timeout()) ::
crdt_state :: term()
@deprecated "Use get/2 or take/3 or to_map/2"
def read(crdt), do: read(crdt, @default_timeout)
def read(crdt, keys) when is_list(keys), do: read(crdt, keys, @default_timeout)
def read(crdt, timeout) do
GenServer.call(crdt, :read, timeout)
end
def read(crdt, keys, timeout) when is_list(keys) do
GenServer.call(crdt, {:read, keys}, timeout)
end
end
|
lib/delta_crdt.ex
| 0.916661
| 0.819424
|
delta_crdt.ex
|
starcoder
|
defmodule Freecodecamp.BasicAlgo do
@moduledoc """
Documentation for Freecodecamp (Basic Alogrithmic Scripting).
"""
@moduledoc since: "0.1.0"
@doc """
Convert Celsius to Fahrenheit
## Examples
iex> BasicAlgo.convert_to_f(30)
86
"""
@spec convert_to_f(integer) :: integer
def convert_to_f(celsius), do: div(celsius * 9, 5) + 32
@doc """
Reverses a string
## Examples
iex> BasicAlgo.reverse_string("hello")
"olleh"
"""
@spec reverse_string(String.t()) :: String.t()
# defdelegate reverse_string(str), to: String, as: :reverse
def reverse_string(""), do: ""
def reverse_string(<<letter::utf8, rest::binary>> = _string),
do: reverse_string(rest) <> <<letter>>
@doc """
Factorialize a number
## Examples
iex> BasicAlgo.factorialize(0)
1
iex> BasicAlgo.factorialize(5)
120
"""
@spec factorialize(integer) :: integer
def factorialize(0), do: 1
def factorialize(number) when is_integer(number) do
1..number
|> Stream.filter(&(&1 !== 0))
|> Enum.to_list()
|> do_factorialize()
end
@spec do_factorialize([integer]) :: integer
defp do_factorialize(list) when list === [], do: 1
defp do_factorialize([head | tail]), do: head * do_factorialize(tail)
@doc """
Find the longest word and returns the length of it
## Examples
iex> BasicAlgo.find_longest_word_length("")
0
iex> BasicAlgo.find_longest_word_length("May the force be with you")
5
"""
@spec find_longest_word_length(String.t()) :: integer
def find_longest_word_length(""), do: 0
def find_longest_word_length(string) when is_binary(string) do
string
|> String.splitter([" "])
|> Enum.map(&String.length(&1))
|> Enum.max()
end
@doc """
Return largest numbers in lists
## Examples
iex> BasicAlgo.largest_of_four([])
[]
iex> BasicAlgo.largest_of_four([[17, 23, 25, 12], [25, 7, 34, 48], [4, -10, 18, 21], [-72, -3, -17, -10]])
[25, 48, 21, -3]
"""
@spec largest_of_four([integer]) :: [integer]
def largest_of_four(list) do
list
|> Stream.filter(&(&1 !== []))
|> Enum.to_list()
|> do_largest_of_four()
end
@spec do_largest_of_four([integer]) :: [integer]
defp do_largest_of_four([]), do: []
defp do_largest_of_four([head | tail] = _list) do
sorted_head = head |> Enum.sort(:desc) |> hd()
[sorted_head | do_largest_of_four(tail)]
end
@doc """
Return repeated string
## Examples
iex> BasicAlgo.repeat_string_num_times("abc", 2)
"abcabc"
iex> BasicAlgo.repeat_string_num_times("abc", 0)
""
iex> BasicAlgo.repeat_string_num_times("abc", -1)
""
"""
@spec repeat_string_num_times(String.t(), integer) :: String.t()
def repeat_string_num_times(_string, num) when num <= 0, do: ""
def repeat_string_num_times("", _num), do: ""
def repeat_string_num_times(string, 1), do: string
def repeat_string_num_times(string, num) when num > 1 do
string <> repeat_string_num_times(string, num - 1)
end
@doc """
Returns true if the string in the first element of the array
contains all of the letters of the string in the second
element of the array.
## Examples
iex> BasicAlgo.mutation(["hello", "Hey"])
false
iex> BasicAlgo.mutation(["hello", "neo"])
false
iex> BasicAlgo.mutation(["Noel", "Ole"])
true
"""
@spec mutation([String.t()]) :: boolean()
def mutation(["", ""]), do: false
def mutation(["", _source]), do: false
def mutation([_target, ""]), do: false
def mutation([target, source] = _list) do
list =
&(String.downcase(&1)
|> String.split("", trim: true)
|> Enum.uniq()
|> Enum.sort())
new_list = list.(target) |> Enum.filter(&(&1 in list.(source)))
new_list == list.(source)
end
@doc """
Truncate a string (first argument) if it is longer than
the given maximum string length (second argument). Return
the truncated string with a `...` ending.
## Examples
iex> BasicAlgo.truncate_string("A-tisket a-tasket A green and yellow basket", 8)
"A-tisket..."
iex> BasicAlgo.truncate_string("Absolutely Longer", 2)
"Ab..."
iex> BasicAlgo.truncate_string("A-", 1)
"A..."
iex> BasicAlgo.truncate_string("A-tisket", -1)
"..."
iex> BasicAlgo.truncate_string("Hello", 50)
"Hello..."
"""
@spec truncate_string(String.t(), integer) :: String.t()
def truncate_string(_words, len) when len <= 0, do: "..."
def truncate_string("", _len), do: "..."
def truncate_string(words, len) do
case String.length(words) < len do
true ->
words <> "..."
false ->
words
|> String.to_charlist()
|> do_truncate_string(len)
|> to_string()
|> Kernel.<>("...")
end
end
@spec do_truncate_string([char()], integer) :: list()
defp do_truncate_string(_letter, 0), do: []
defp do_truncate_string([head | tails] = _list, len),
do: [[head] | do_truncate_string(tails, len - 1)]
@doc """
Return the lowest index at which a value (second argument) should be inserted into an array (first argument) once it has been **sorted**. The returned value should be a number.
For example, `get_index_to_ins([1,2,3,4], 1.5)` should return 1 because it is greater than 1 (index 0), but less than 2 (index 1).
Likewise, `get_index_to_ins([20,3,5], 19)` should return 2 because once the array has been sorted it will look like `[3,5,20]` and 19 is less than 20 (index 2) and greater than 5 (index 1).
## Examples
iex> BasicAlgo.get_index_to_ins([1, 2, 3, 4], 1.5)
1
iex> BasicAlgo.get_index_to_ins([20, 3, 5], 19)
2
iex> BasicAlgo.get_index_to_ins([3, 10, 5], 3)
0
"""
@spec get_index_to_ins([integer], integer) :: integer
def get_index_to_ins([], _value), do: 0
def get_index_to_ins(list, value) do
sorted_list = Enum.sort(list)
result =
for element <- sorted_list,
element >= value,
do:
sorted_list
|> Enum.find_index(&(&1 == round(element)))
List.first(result) |> do_get_index_to_ins()
end
@spec do_get_index_to_ins(non_neg_integer | nil) :: non_neg_integer
def do_get_index_to_ins(nil), do: 0
def do_get_index_to_ins(result), do: result
@doc """
Check if a string (first argument, `string`) ends with the
given target string (second argument, `target`).
## Examples
iex> BasicAlgo.confirm_ending("Bastian", "n")
true
iex> BasicAlgo.confirm_ending("Congratulation", "on")
true
iex> BasicAlgo.confirm_ending("Connor", "n")
false
"""
@spec confirm_ending(String.t(), String.t()) :: boolean()
def confirm_ending(string, target)
when byte_size(string) < byte_size(target) do
false
end
def confirm_ending(string, target) do
length = String.length(string) - String.length(target)
<<_substr::binary-size(length), rest::binary>> = string
rest === target
end
@doc """
Returns the first element thats passes the `truth test` from a given function.
## Examples
iex> BasicAlgo.find_element([1, 3, 5, 8, 9, 10], &Integer.mod(&1, 2) === 0)
8
iex> BasicAlgo.find_element([1, 3, 5, 9], &(Integer.mod(&1, 2) === 0))
nil
iex> BasicAlgo.find_element([], & &1 === 0)
nil
"""
@spec find_element(list(), function()) :: any()
def find_element([], _fun), do: nil
def find_element([head | _tail] = list, fun) do
do_find_element(fun.(head), list, fun)
end
@spec do_find_element(boolean(), list(), function()) :: any()
defp do_find_element(true, [head | _tail], _fun), do: head
defp do_find_element(false, [_head | tail], fun), do: find_element(tail, fun)
@doc """
Check if a value is classified as a boolean primitive. Return true or false.
## Examples
iex> BasicAlgo.boo_who(true)
true
iex> BasicAlgo.boo_who(false)
true
iex> BasicAlgo.boo_who([])
false
iex> BasicAlgo.boo_who("a")
false
"""
@spec boo_who(any()) :: boolean()
def boo_who(any) when is_boolean(any), do: true
def boo_who(_not_boolean), do: false
@doc """
Capitalize each word in a sentence
## Examples
iex> BasicAlgo.title_case("I'm a little tea pot")
"I'm A Little Tea Pot"
iex> BasicAlgo.title_case("sHoRt AnD sToUt")
"Short And Stout"
iex> BasicAlgo.title_case("HERE IS MY HANDLE HERE IS MY SPOUT")
"Here Is My Handle Here Is My Spout"
"""
@spec title_case(String.t()) :: String.t()
def title_case(""), do: ""
def title_case(string) do
do_title_case(~w(#{string}), "")
end
@spec do_title_case(String.t() | [], String.t()) :: String.t()
defp do_title_case([], <<_::binary-size(1), string::binary>>), do: string
defp do_title_case([head | tail] = _list, string) do
<<first_letter::binary-size(1), rest::binary>> = head
capitalized = ~s(#{string} #{String.upcase(first_letter)}#{String.downcase(rest)})
do_title_case(tail, capitalized)
end
@doc """
Inserts the 1st list in 2nd list at its index position (3rd param).
Also an [SO link](https://stackoverflow.com/a/27420592/10250774) why doing
binary search on linked list is slower. Used linear search instead.
## Examples
iex> BasicAlgo.franken_splice([1, 2, 3], [4, 5], 1)
[4, 1, 2, 3, 5]
iex> BasicAlgo.franken_splice([1, 2], ["a", "b"], 1)
["a", 1, 2, "b"]
iex> BasicAlgo.franken_splice(["claw", "tentacle"], ["head", "shoulders", "knees", "toes"], 2)
["head", "shoulders", "claw", "tentacle", "knees", "toes"]
"""
@spec franken_splice(Enumerable.t(), Enumerable.t(), integer) :: Enumerable.t()
def franken_splice(list_a, list_b, el) when el >= 0 do
do_franken_splice(el, list_a, [], list_b)
|> List.flatten()
end
def franken_splice(list_a, list_b, el) when el < 0 do
(length(list_b) + (el + 1))
|> do_franken_splice(list_a, [], list_b)
|> List.flatten()
end
@spec do_franken_splice(integer, list(), list(), list()) :: list()
defp do_franken_splice(counter, list_a, list, list_b)
when counter === 0,
do: [list | [list_a | [list_b]]]
defp do_franken_splice(counter, list_a, list, [h | t] = _list_b)
when counter > 0,
do: do_franken_splice(counter - 1, list_a, [list | [h]], t)
defp do_franken_splice(_counter, list_a, list, list_b)
when list_b === [],
do: [list | [list_b | [list_a]]]
defp do_franken_splice(_counter, list_a, list, list_b)
when list === [],
do: [list_a | [list | [list_b]]]
@doc """
Remove all falsy values from an array. Falsy values
in JavaScript are `false, null, 0, "", undefined, and NaN`,
only "", false, nil or 0 were implemented for simplicity
reasons.
## Examples
iex> BasicAlgo.bouncer([7, "ate", "", false, 9])
[7, "ate", 9]
iex> BasicAlgo.bouncer(["a", "b", "c"])
["a", "b", "c"]
iex> BasicAlgo.bouncer([false, nil, 0, ""])
[]
iex> BasicAlgo.bouncer([7, [], false, ""])
[7, []]
"""
@spec bouncer(list()) :: list()
def bouncer(list), do: do_bouncer(list, [])
@spec do_bouncer(list(), list()) :: list()
defp do_bouncer(list, filtered_list) when list === [] do
filtered_list
end
defp do_bouncer([head | tails] = _list, list) when head in ["", false, nil, 0] do
do_bouncer(tails, list)
end
defp do_bouncer([head | tails] = _list, list) do
do_bouncer(tails, List.flatten(list, [head]))
end
@doc """
Splits a list (first argument) into groups the length
of size (second argument) and returns them as a
two-dimensional list.
## Examples
iex> BasicAlgo.chunk_array_in_groups(["a", "b", "c", "d"], 2)
[["a", "b"], ["c", "d"]]
iex> BasicAlgo.chunk_array_in_groups([0, 1, 2, 3, 4, 5], 3)
[[0, 1, 2], [3, 4, 5]]
iex> BasicAlgo.chunk_array_in_groups([0, 1, 2, 3, 4, 5], 2)
[[0, 1], [2, 3], [4, 5]]
"""
@spec chunk_array_in_groups(list(), integer) :: [list()]
def chunk_array_in_groups([], _size), do: []
def chunk_array_in_groups(list, size) when size < 1, do: list
def chunk_array_in_groups(list, size) do
do_chunk_array_in_groups([], [], list, size, 0)
end
@spec do_chunk_array_in_groups(list(), list(), list(), integer, integer) :: list()
defp do_chunk_array_in_groups(list_a, list_b, [], _, _) do
list_a ++ [list_b]
end
defp do_chunk_array_in_groups(list_a, list_b, [h | t] = _, size, counter)
when counter < size do
do_chunk_array_in_groups(list_a, list_b ++ [h], t, size, counter + 1)
end
defp do_chunk_array_in_groups(list_a, list_b, [h | t] = _, size, counter)
when counter === size do
do_chunk_array_in_groups(list_a ++ [list_b], [h], t, size, 1)
end
end
|
lib/freecodecamp/basic_algo.ex
| 0.850142
| 0.413625
|
basic_algo.ex
|
starcoder
|
defmodule SweetXpath do
defmodule Priv do
@moduledoc false
@doc false
def self_val(val), do: val
end
defstruct path: ".",
is_value: true,
is_list: false,
is_keyword: false,
is_optional: false,
cast_to: false,
transform_fun: &(Priv.self_val/1),
namespaces: []
end
defmodule SweetXml do
@moduledoc ~S"""
`SweetXml` is a thin wrapper around `:xmerl`. It allows you to convert a
string or xmlElement record as defined in `:xmerl` to an elixir value such
as `map`, `list`, `char_list`, or any combination of these.
For normal sized documents, `SweetXml` primarily exposes 3 functions
* `SweetXml.xpath/2` - return a value based on the xpath expression
* `SweetXml.xpath/3` - similar to above but allowing nesting of mapping
* `SweetXml.xmap/2` - return a map with keywords mapped to values returned
from xpath
For something larger, `SweetXml` mainly exposes 1 function
* `SweetXml.stream_tags/3` - stream a given tag or a list of tags, and
optionally "discard" some dom elements in order to free memory during
streaming for big files which cannot fit entirely in memory
## Examples
Simple Xpath
iex> import SweetXml
iex> doc = "<h1><a>Some linked title</a></h1>"
iex> doc |> xpath(~x"//a/text()")
'Some linked title'
Nested Mapping
iex> import SweetXml
iex> doc = "<body><header><p>Message</p><ul><li>One</li><li><a>Two</a></li></ul></header></body>"
iex> doc |> xpath(~x"//header", message: ~x"./p/text()", a_in_li: ~x".//li/a/text()"l)
%{a_in_li: ['Two'], message: 'Message'}
Streaming
iex> import SweetXml
iex> doc = ["<ul><li>l1</li><li>l2", "</li><li>l3</li></ul>"]
iex> SweetXml.stream_tags(doc, :li)
...> |> Stream.map(fn {:li, doc} ->
...> doc |> SweetXml.xpath(~x"./text()")
...> end)
...> |> Enum.to_list
['l1', 'l2', 'l3']
For more examples please see help for each individual functions
## The ~x Sigil
Notice in the above examples, we used the expression `~x"//a/text()"` to
define the path. The reason is it allows us to more precisely specify what
is being returned.
* `~x"//some/path"`
without any modifiers, `xpath/2` will return the value of the entity if
the entity is of type `xmlText`, `xmlAttribute`, `xmlPI`, `xmlComment`
as defined in `:xmerl`
* `~x"//some/path"e`
`e` stands for (e)ntity. This forces `xpath/2` to return the entity with
which you can further chain your `xpath/2` call
* `~x"//some/path"l`
'l' stands for (l)ist. This forces `xpath/2` to return a list. Without
`l`, `xpath/2` will only return the first element of the match
* `~x"//some/path"el` - mix of the above
* `~x"//some/path"k`
'k' stands for (K)eyword. This forces `xpath/2` to return a Keyword instead of a Map.
* `~x"//some/path"s`
's' stands for (s)tring. This forces `xpath/2` to return the value as
string instead of a char list.
* `x"//some/path"o`
'o' stands for (O)ptional. This allows the path to not exist, and will return nil.
* `~x"//some/path"sl` - string list.
Notice also in the examples section, we always import SweetXml first. This
makes `x_sigil` available in the current scope. Without it, instead of using
`~x`, you can do the following
iex> doc = "<h1><a>Some linked title</a></h1>"
iex> doc |> SweetXml.xpath(%SweetXpath{path: '//a/text()', is_value: true, cast_to: false, is_list: false, is_keyword: false})
'Some linked title'
Note the use of char_list in the path definition.
"""
require Record
Record.defrecord :xmlDecl, Record.extract(:xmlDecl, from_lib: "xmerl/include/xmerl.hrl")
Record.defrecord :xmlAttribute, Record.extract(:xmlAttribute, from_lib: "xmerl/include/xmerl.hrl")
Record.defrecord :xmlNamespace, Record.extract(:xmlNamespace, from_lib: "xmerl/include/xmerl.hrl")
Record.defrecord :xmlNsNode, Record.extract(:xmlNsNode, from_lib: "xmerl/include/xmerl.hrl")
Record.defrecord :xmlElement, Record.extract(:xmlElement, from_lib: "xmerl/include/xmerl.hrl")
Record.defrecord :xmlText, Record.extract(:xmlText, from_lib: "xmerl/include/xmerl.hrl")
Record.defrecord :xmlComment, Record.extract(:xmlComment, from_lib: "xmerl/include/xmerl.hrl")
Record.defrecord :xmlPI, Record.extract(:xmlPI, from_lib: "xmerl/include/xmerl.hrl")
Record.defrecord :xmlDocument, Record.extract(:xmlDocument, from_lib: "xmerl/include/xmerl.hrl")
Record.defrecord :xmlObj, Record.extract(:xmlObj, from_lib: "xmerl/include/xmerl.hrl")
@doc ~s"""
`sigil_x/2` simply returns a `SweetXpath` struct, with modifiers converted to
boolean fields
iex> SweetXml.sigil_x("//some/path", 'e')
%SweetXpath{path: '//some/path', is_value: false, cast_to: false, is_list: false, is_keyword: false}
or you can simply import and use the `~x` expression
iex> import SweetXml
iex> ~x"//some/path"e
%SweetXpath{path: '//some/path', is_value: false, cast_to: false, is_list: false, is_keyword: false}
Valid modifiers are `e`, `s`, `l` and `k`. Below is the full explanation
* `~x"//some/path"`
without any modifiers, `xpath/2` will return the value of the entity if
the entity is of type `xmlText`, `xmlAttribute`, `xmlPI`, `xmlComment`
as defined in `:xmerl`
* `~x"//some/path"e`
`e` stands for (e)ntity. This forces `xpath/2` to return the entity with
which you can further chain your `xpath/2` call
* `~x"//some/path"l`
'l' stands for (l)ist. This forces `xpath/2` to return a list. Without
`l`, `xpath/2` will only return the first element of the match
* `~x"//some/path"el` - mix of the above
* `~x"//some/path"k`
'k' stands for (K)eyword. This forces `xpath/2` to return a Keyword instead of a Map.
* `~x"//some/path"s`
's' stands for (s)tring. This forces `xpath/2` to return the value as
string instead of a char list.
* `x"//some/path"o`
'o' stands for (O)ptional. This allows the path to not exist, and will return nil.
* `~x"//some/path"sl` - string list.
* `~x"//some/path"i`
'i' stands for (i)nteger. This forces `xpath/2` to return the value as
integer instead of a char list.
* `~x"//some/path"f`
'f' stands for (f)loat. This forces `xpath/2` to return the value as
float instead of a char list.
* `~x"//some/path"il` - integer list
"""
def sigil_x(path, modifiers \\ '') do
%SweetXpath{
path: String.to_charlist(path),
is_value: not(?e in modifiers),
is_list: ?l in modifiers,
is_keyword: ?k in modifiers,
is_optional: ?o in modifiers,
cast_to: cond do
?s in modifiers -> :string
?S in modifiers -> :soft_string
?i in modifiers -> :integer
?I in modifiers -> :soft_integer
?f in modifiers -> :float
?F in modifiers -> :soft_float
:otherwise -> false
end
}
end
def add_namespace(xpath, prefix, uri) do
%SweetXpath{xpath | namespaces: [{to_charlist(prefix), to_charlist(uri)}
| xpath.namespaces]}
end
@doc """
`doc` can be
- a byte list (iodata)
- a binary
- any enumerable of binaries (for instance `File.stream!/3` result)
`options` are `xmerl` options described here [http://www.erlang.org/doc/man/xmerl_scan.html](http://www.erlang.org/doc/man/xmerl_scan.html),
see [the erlang tutorial](http://www.erlang.org/doc/apps/xmerl/xmerl_examples.html) for usage.
When `doc` is an enumerable, the `:cont_fun` option cannot be given.
Return an `xmlElement` record
"""
def parse(doc), do: parse(doc, [])
def parse(doc, options) when is_binary(doc) do
doc |> :erlang.binary_to_list |> parse(options)
end
def parse([c | _] = doc, options) when is_integer(c) do
{parsed_doc, _} = :xmerl_scan.string(doc, options)
parsed_doc
end
def parse(doc_enum, options) do
{parsed_doc, _} = :xmerl_scan.string('', options ++ continuation_opts(doc_enum))
parsed_doc
end
@doc """
Most common usage of streaming: stream a given tag or a list of tags, and
optionally "discard" some dom elements in order to free memory during streaming
for big files which cannot fit entirely in memory.
Note that each matched tag produces it's own tree. If a given tag appears in
the discarded options, it is ignored.
- `doc` is an enumerable, data will be pulled during the result stream
enumeration. e.g. `File.stream!("some_file.xml")`
- `tags` is an atom or a list of atoms you want to extract. Each stream element
will be `{:tagname, xmlelem}`. e.g. :li, :header
- `options[:discard]` is the list of tag which will be discarded:
not added to its parent DOM.
Examples:
iex> import SweetXml
iex> doc = ["<ul><li>l1</li><li>l2", "</li><li>l3</li></ul>"]
iex> SweetXml.stream_tags(doc, :li, discard: [:li])
...> |> Stream.map(fn {:li, doc} -> doc |> SweetXml.xpath(~x"./text()") end)
...> |> Enum.to_list
['l1', 'l2', 'l3']
iex> SweetXml.stream_tags(doc, [:ul, :li])
...> |> Stream.map(fn {_, doc} -> doc |> SweetXml.xpath(~x"./text()") end)
...> |> Enum.to_list
['l1', 'l2', 'l3', nil]
Becareful if you set `options[:discard]`. If any of the discarded tags is nested
inside a kept tag, you will not be able to access them.
Examples:
iex> import SweetXml
iex> doc = ["<header>", "<title>XML</title", "><header><title>Nested</title></header></header>"]
iex> SweetXml.stream_tags(doc, :header)
...> |> Stream.map(fn {_, doc} -> SweetXml.xpath(doc, ~x".//title/text()") end)
...> |> Enum.to_list
['Nested', 'XML']
iex> SweetXml.stream_tags(doc, :header, discard: [:title])
...> |> Stream.map(fn {_, doc} -> SweetXml.xpath(doc, ~x"./title/text()") end)
...> |> Enum.to_list
[nil, nil]
"""
def stream_tags(doc, tags, options \\ []) do
tags = if is_atom(tags), do: [tags], else: tags
{discard_tags, xmerl_options} = if options[:discard] do
{options[:discard], Keyword.delete(options, :discard)}
else
{[], options}
end
doc |> stream(fn emit ->
[
hook_fun: fn
entity, xstate when Record.is_record(entity, :xmlElement) ->
name = xmlElement(entity, :name)
if length(tags) == 0 or name in tags do
emit.({name, entity})
end
{entity, xstate}
entity, xstate ->
{entity, xstate}
end,
acc_fun: fn
entity, acc, xstate when Record.is_record(entity, :xmlElement) ->
if xmlElement(entity, :name) in discard_tags do
{acc, xstate}
else
{[entity | acc], xstate}
end
entity, acc, xstate ->
{[entity | acc], xstate}
end
] ++ xmerl_options
end)
end
@doc """
Create an element stream from a xml `doc`.
This is a lower level API compared to `SweetXml.stream_tags`. You can use
the `options_callback` argument to get fine control of what data to be streamed.
- `doc` is an enumerable, data will be pulled during the result stream
enumeration. e.g. `File.stream!("some_file.xml")`
- `options_callback` is an anonymous function `fn emit -> xmerl_opts` use it to
define your :xmerl callbacks and put data into the stream using
`emit.(elem)` in the callbacks.
For example, here you define a stream of all `xmlElement` :
iex> import Record
iex> doc = ["<h1", "><a>Som", "e linked title</a><a>other</a></h1>"]
iex> SweetXml.stream(doc, fn emit ->
...> [
...> hook_fun: fn
...> entity, xstate when is_record(entity, :xmlElement)->
...> emit.(entity)
...> {entity, xstate}
...> entity, xstate ->
...> {entity,xstate}
...> end
...> ]
...> end) |> Enum.count
3
"""
def stream(doc, options_callback) when is_binary(doc) do
stream([doc], options_callback)
end
def stream([c | _] = doc, options_callback) when is_integer(c) do
stream([IO.iodata_to_binary(doc)], options_callback)
end
def stream(doc, options_callback) do
Stream.resource fn ->
{parent, ref} = waiter = {self(), make_ref()}
opts = options_callback.(fn e -> send(parent, {:event, ref, e}) end)
pid = spawn_link fn -> :xmerl_scan.string('', opts ++ continuation_opts(doc, waiter)) end
{ref, pid, Process.monitor(pid)}
end, fn {ref, pid, monref} = acc ->
receive do
{:DOWN, ^monref, _, _, _} ->
{:halt, :parse_ended} ## !!! maybe do something when reason !== :normal
{:event, ^ref, event} ->
{[event], acc}
{:wait, ^ref} ->
send(pid, {:continue, ref})
{[], acc}
end
end, fn
:parse_ended -> :ok
{ref, pid, monref} ->
Process.demonitor(monref)
flush_halt(pid, ref)
end
end
@doc ~S"""
`xpath` allows you to query an xml document with xpath.
The second argument to xpath is a `SweetXpath` struct. The optional third
argument is a keyword list, such that the value of each keyword is also
either a `SweetXpath` or a list with head being a `SweetXpath` and tail being
another keyword list exactly like before. Please see examples below for better
understanding.
## Examples
Simple
iex> import SweetXml
iex> doc = "<h1><a>Some linked title</a></h1>"
iex> doc |> xpath(~x"//a/text()")
'Some linked title'
With optional mapping
iex> import SweetXml
iex> doc = "<body><header><p>Message</p><ul><li>One</li><li><a>Two</a></li></ul></header></body>"
iex> doc |> xpath(~x"//header", message: ~x"./p/text()", a_in_li: ~x".//li/a/text()"l)
%{a_in_li: ['Two'], message: 'Message'}
With optional mapping and nesting
iex> import SweetXml
iex> doc = "<body><header><p>Message</p><ul><li>One</li><li><a>Two</a></li></ul></header></body>"
iex> doc
...> |> xpath(
...> ~x"//header",
...> ul: [
...> ~x"./ul",
...> a: ~x"./li/a/text()"
...> ]
...> )
%{ul: %{a: 'Two'}}
"""
def xpath(parent, spec) when not is_tuple(parent) do
parent |> parse |> xpath(spec)
end
def xpath(parent, %SweetXpath{is_list: true, is_value: true, cast_to: cast, is_optional: is_opt?} = spec) do
get_current_entities(parent, spec) |> Enum.map(&(_value(&1)) |> to_cast(cast,is_opt?)) |> spec.transform_fun.()
end
def xpath(parent, %SweetXpath{is_list: true, is_value: false} = spec) do
get_current_entities(parent, spec) |> spec.transform_fun.()
end
def xpath(parent, %SweetXpath{is_list: false, is_value: true, cast_to: string_type, is_optional: is_opt?} = spec) when string_type in [:string,:soft_string] do
spec = %SweetXpath{spec | is_list: true}
get_current_entities(parent, spec)
|> Enum.map(&(_value(&1) |> to_cast(string_type, is_opt?)))
|> case do
[] -> nil
items -> Enum.join(items)
end
|> spec.transform_fun.()
end
def xpath(parent, %SweetXpath{is_list: false, is_value: true, cast_to: cast, is_optional: is_opt?} = spec) do
get_current_entities(parent, spec) |> _value |> to_cast(cast, is_opt?) |> spec.transform_fun.()
end
def xpath(parent, %SweetXpath{is_list: false, is_value: false} = spec) do
get_current_entities(parent, spec) |> spec.transform_fun.()
end
def xpath(parent, sweet_xpath, subspec) do
if sweet_xpath.is_list do
current_entities = xpath(parent, sweet_xpath)
Enum.map(current_entities, fn (entity) -> xmap(entity, subspec, sweet_xpath) end)
else
current_entity = xpath(parent, sweet_xpath)
xmap(current_entity, subspec, sweet_xpath)
end
end
@doc ~S"""
`xmap` returns a mapping with each value being the result of `xpath`
Just as `xpath`, you can nest the mapping structure. Please see `xpath` for
more detail.
## Examples
Simple
iex> import SweetXml
iex> doc = "<h1><a>Some linked title</a></h1>"
iex> doc |> xmap(a: ~x"//a/text()")
%{a: 'Some linked title'}
With optional mapping
iex> import SweetXml
iex> doc = "<body><header><p>Message</p><ul><li>One</li><li><a>Two</a></li></ul></header></body>"
iex> doc |> xmap(message: ~x"//p/text()", a_in_li: ~x".//li/a/text()"l)
%{a_in_li: ['Two'], message: 'Message'}
With optional mapping and nesting
iex> import SweetXml
iex> doc = "<body><header><p>Message</p><ul><li>One</li><li><a>Two</a></li></ul></header></body>"
iex> doc
...> |> xmap(
...> message: ~x"//p/text()",
...> ul: [
...> ~x"//ul",
...> a: ~x"./li/a/text()"
...> ]
...> )
%{message: 'Message', ul: %{a: 'Two'}}
iex> doc
...> |> xmap(
...> message: ~x"//p/text()",
...> ul: [
...> ~x"//ul"k,
...> a: ~x"./li/a/text()"
...> ]
...> )
%{message: 'Message', ul: [a: 'Two']}
iex> doc
...> |> xmap([
...> message: ~x"//p/text()",
...> ul: [
...> ~x"//ul",
...> a: ~x"./li/a/text()"
...> ]
...> ], true)
[message: 'Message', ul: %{a: 'Two'}]
"""
def xmap(parent, mapping), do: xmap(parent, mapping, %{is_keyword: false})
def xmap(nil, _, %{is_optional: true}), do: nil
def xmap(parent, [], atom) when is_atom(atom), do: xmap(parent, [], %{is_keyword: atom})
def xmap(_, [], %{is_keyword: false}), do: %{}
def xmap(_, [], %{is_keyword: true}), do: []
def xmap(parent, [{label, spec} | tail], is_keyword) when is_list(spec) do
[sweet_xpath | subspec] = spec
result = xmap(parent, tail, is_keyword)
put_in result[label], xpath(parent, sweet_xpath, subspec)
end
def xmap(parent, [{label, sweet_xpath} | tail], is_keyword) do
result = xmap(parent, tail, is_keyword)
put_in result[label], xpath(parent, sweet_xpath)
end
@doc """
Tags `%SweetXpath{}` with `fun` to be applied at the end of `xpath` query.
## Examples
iex> import SweetXml
iex> string_to_range = fn str ->
...> [first, last] = str |> String.split("-", trim: true) |> Enum.map(&String.to_integer/1)
...> first..last
...> end
iex> doc = "<weather><zone><name>north</name><wind-speed>5-15</wind-speed></zone></weather>"
iex> doc
...> |> xpath(
...> ~x"//weather/zone"l,
...> name: ~x"//name/text()"s |> transform_by(&String.capitalize/1),
...> wind_speed: ~x"./wind-speed/text()"s |> transform_by(string_to_range)
...> )
[%{name: "North", wind_speed: 5..15}]
"""
def transform_by(%SweetXpath{}=sweet_xpath, fun) when is_function(fun) do
%{sweet_xpath | transform_fun: fun}
end
defp _value(entity) do
cond do
is_record? entity, :xmlText ->
xmlText(entity, :value)
is_record? entity, :xmlComment ->
xmlComment(entity, :value)
is_record? entity, :xmlPI ->
xmlPI(entity, :value)
is_record? entity, :xmlAttribute ->
xmlAttribute(entity, :value)
is_record? entity, :xmlObj ->
xmlObj(entity, :value)
true ->
entity
end
end
defp is_record?(data, kind) do
is_tuple(data) and tuple_size(data) > 0 and :erlang.element(1, data) == kind
end
defp continuation_opts(enum, waiter \\ nil) do
[{
:continuation_fun,
fn xcont, xexc, xstate ->
case :xmerl_scan.cont_state(xstate).({:cont, []}) do
{:halted, _acc} ->
xexc.(xstate)
{:suspended, bin, cont}->
case waiter do
nil -> :ok
{parent, ref} ->
send(parent, {:wait, ref}) # continuation behaviour, pause and wait stream decision
receive do
{:continue, ^ref} -> # stream continuation fun has been called: parse to find more elements
:ok
{:halt, ^ref} -> # stream halted: halt the underlying stream and exit parsing process
cont.({:halt, []})
exit(:normal)
end
end
xcont.(bin, :xmerl_scan.cont_state(cont, xstate))
{:done, _} -> xexc.(xstate)
end
end,
&Enumerable.reduce(split_by_whitespace(enum), &1, fn bin, _ -> {:suspend, bin} end)
},
{
:close_fun,
fn xstate -> # make sure the XML end halts the binary stream (if more bytes are available after XML)
:xmerl_scan.cont_state(xstate).({:halt,[]})
xstate
end
}]
end
defp split_by_whitespace(enum) do
reducer = fn
:last, prev ->
{[:erlang.binary_to_list(prev)], :done}
bin, prev ->
bin = if (prev === ""), do: bin, else: IO.iodata_to_binary([prev, bin])
case split_last_whitespace(bin) do
:white_bin -> {[], bin}
{head, tail} -> {[:erlang.binary_to_list(head)], tail}
end
end
Stream.concat(enum, [:last]) |> Stream.transform("", reducer)
end
defp split_last_whitespace(bin), do: split_last_whitespace(byte_size(bin) - 1, bin)
defp split_last_whitespace(0, _), do: :white_bin
defp split_last_whitespace(size, bin) do
case bin do
<<_::binary - size(size), h>> <> tail when h == ?\s or h == ?\n or h == ?\r or h == ?\t ->
{head, _} = :erlang.split_binary(bin, size + 1)
{head, tail}
_ ->
split_last_whitespace(size - 1, bin)
end
end
defp flush_halt(pid, ref) do
receive do
{:event, ^ref, _} ->
flush_halt(pid, ref) # flush all emitted elems after :halt
{:wait, ^ref} ->
send(pid, {:halt, ref}) # tell the continuation function to halt the underlying stream
end
end
defp get_current_entities(parent, %SweetXpath{path: path, is_list: true, namespaces: namespaces}) do
:xmerl_xpath.string(path, parent, [namespace: namespaces]) |> List.wrap
end
defp get_current_entities(parent, %SweetXpath{path: path, is_list: false, namespaces: namespaces}) do
ret = :xmerl_xpath.string(path, parent, [namespace: namespaces])
if is_record?(ret, :xmlObj) do
ret
else
List.first(ret)
end
end
defp to_cast(value, false, _is_opt?), do: value
defp to_cast(nil, _, true), do: nil
defp to_cast(value, :string, _is_opt?), do: to_string(value)
defp to_cast(value, :integer, _is_opt?), do: String.to_integer(to_string(value))
defp to_cast(value, :float, _is_opt?) do
{float,_} = Float.parse(to_string(value))
float
end
defp to_cast(value, :soft_string, is_opt?) do
if String.Chars.impl_for(value) do
to_string(value)
else
if is_opt?, do: nil, else: ""
end
end
defp to_cast(value, :soft_integer, is_opt?) do
if String.Chars.impl_for(value) do
case Integer.parse(to_string(value)) do
:error-> if is_opt?, do: nil, else: 0
{int,_}-> int
end
else
if is_opt?, do: nil, else: 0
end
end
defp to_cast(value, :soft_float, is_opt?) do
if String.Chars.impl_for(value) do
case Float.parse(to_string(value)) do
:error-> if is_opt?, do: nil, else: 0.0
{float,_}->float
end
else
if is_opt?, do: nil, else: 0.0
end
end
end
|
lib/sweet_xml.ex
| 0.766294
| 0.550124
|
sweet_xml.ex
|
starcoder
|
defmodule PhoenixActiveLink do
@moduledoc """
PhoenixActiveLink provides helpers to add active links in views.
## Configuration
Default options can be customized in the configuration:
```elixir
use Mix.Config
config :phoenix_active_link, :defaults,
wrap_tag: :li,
class_active: "enabled",
class_inactive: "disabled"
```
## Integrate in Phoenix
The simplest way to add the helpers to Phoenix is to `import PhoenixActiveLink`
either in your `web.ex` under views to have it available under every views,
or under for example `App.LayoutView` to have it available in your layout.
"""
use Phoenix.HTML
import Plug.Conn
import Phoenix.LiveView.Helpers
alias Plug.Conn.Query
@opts ~w(active wrap_tag class_active class_inactive active_disable wrap_tag_opts using)a
@doc """
`active_link/3` is a wrapper around `Phoenix.HTML.Link.link/2`.
It generates a link and adds an `active` class depending on the
desired state. It can be customized using the following options.
## Options
* `:active` - See `active_path?/2` documentation for more information
* `:wrap_tag` - Wraps the link in another tag which will also have the same active class.
This options is useful for usage with `li` in bootstrap for example.
* `:class_active` - The class to add when the link is active. Defaults to `"active"`
* `:class_inactive` - The class to add when the link is not active. Empty by default.
* `:active_disable` - Uses a `span` element instead of an anchor when not active.
* `:using` - Define which function to use. Accepts `:link` (default), `:live_redirect` and `:live_patch`.
## Examples
```elixir
<%= active_link(@conn, "Link text", to: "/my/path") %>
<%= active_link(@conn, "Link text", to: "/my/path", wrap_tag: :li) %>
<%= active_link(@conn, "Link text", to: "/my/path", active: :exact) %>
<%= active_link(@conn, "Link text", to: "/my/live-view", using: :live_redirect) %>
```
"""
def active_link(conn, opts, do: contents) when is_list(opts) do
active_link(conn, contents, opts)
end
def active_link(conn, text, opts) do
opts = Keyword.merge(default_opts(), opts)
active? = active_path?(conn, opts)
extra_class = extra_class(active?, opts)
opts = append_class(opts, extra_class)
link = make_link(active?, text, opts)
cond do
tag = opts[:wrap_tag] -> content_tag(tag, link, wrap_tag_opts(extra_class, opts))
true -> link
end
end
@doc """
`active_path?/2` is a helper to determine if the element should be in active state or not.
The `:opts` should contain the `:to` option and the active detection can be customized
using by passing `:active` one of the following values.
* `true` - Will always return `true`
* `false` - Will always return `false`
* `:inclusive` - Will return `true` if the current path starts with the link path.
For example, `active_path?(conn, to: "/foo")` will return `true` if the path is `"/foo"` or `"/foobar"`.
* `:exclusive` - Will return `true` if the current path and the link path are the same,
but will ignore the trailing slashes
For example, `active_path?(conn, "/foo")` will return `true`
when the path is `"/foo/"`
* `:exact` - Will return `true` if the current path and the link path are exactly the same,
including trailing slashes.
* a `%Regex{}` - Will return `true` if the current path matches the regex.
Beware that `active?(conn, active: ~r/foo/)` will return `true` if the path is `"/bar/foo"`, so
you must use `active?(conn, active: ~r/^foo/)` if you want to match the beginning of the path.
* a `{controller, action}` list - A list of tuples with a controller module and an action symbol.
Both can be the `:any` symbol to match any controller or action.
* a `{live_view, action}` list - A list of tuples with a live view module and an action symbol.
Both can be the `:any` symbol to match any live view module or action.
* `:exact_with_params` - Will return `true` if the current path and the link path are exactly the same,
including trailing slashes and query string as is.
* `:inclusive_with_params` - Will return `true` if the current path is equal to the link path and the query params of the current path are included to the link path.
For example, `active_path?(conn, to: "/foo?bar=2")` will return `true` if the path is `"/foo?bar=2"` or `"/foo?baz=2&bar=2"`.
For example, `active_path?(conn, to: "/foo?bar=2")` will return `false` if the path is `"/foobaz?bar=2"`.
## Examples
```elixir
active_path?(conn, to: "/foo")
active_path?(conn, to: "/foo", active: false)
active_path?(conn, to: "/foo", active: :exclusive)
active_path?(conn, to: "/foo", active: ~r(^/foo/[0-9]+))
active_path?(conn, to: "/foo", active: [{MyController, :index}, {OtherController, :any}])
active_path?(conn, to: "/foo", active: [{MyLive, :index}, {OtherLive, :any}])
active_path?(conn, to: "/foo?baz=2", active: :inclusive_with_params)
```
"""
def active_path?(conn, opts) do
to = Keyword.get(opts, :to, "")
case Keyword.get(opts, :active, :inclusive) do
true ->
true
false ->
false
:inclusive ->
starts_with_path?(conn.request_path, to)
:exclusive ->
String.trim_trailing(conn.request_path, "/") == String.trim_trailing(to, "/")
:exact ->
conn.request_path == to
:exact_with_params ->
request_path_with_params(conn) == to
:inclusive_with_params ->
compare_path_and_params(conn, to)
%Regex{} = regex ->
Regex.match?(regex, conn.request_path)
module_actions when is_list(module_actions) ->
module_actions_active?(conn, module_actions)
_ ->
false
end
end
# NOTE: root path is an exception, otherwise it would be active all the time
defp starts_with_path?(request_path, "/") when request_path != "/", do: false
defp starts_with_path?(request_path, to) do
# Parse both paths to strip any query parameters
%{path: request_path} = URI.parse(request_path)
%{path: to_path} = URI.parse(to)
String.starts_with?(request_path, String.trim_trailing(to_path, "/"))
end
defp module_actions_active?(conn, module_actions) do
{current_module, current_action} =
case conn.private do
%{phoenix_controller: module, phoenix_action: action} -> {module, action}
%{phoenix_live_view: {module, opts}} -> {module, Keyword.get(opts, :action)}
%{} -> {nil, nil}
end
Enum.any?(module_actions, fn {module, action} ->
(module == :any or module == current_module) and
(action == :any or action == current_action)
end)
end
defp request_path_with_params(conn) do
case conn.query_string do
"" -> conn.request_path
query_string -> conn.request_path <> "?" <> query_string
end
end
defp compare_path_and_params(conn, to) do
%{query_params: request_params} = fetch_query_params(conn)
with [path, query_params] <- String.split(to, "?"),
true <- starts_with_path?(conn.request_path, path) do
decoded_params =
query_params
|> Query.decode()
map_include?(request_params, decoded_params)
else
[path] -> conn.request_path == path
false -> false
end
end
defp map_include?(map, {key, %{} = value}), do: map_include?(map[key], value)
defp map_include?(map, {key, value}), do: map[key] == value
defp map_include?(in_map, %{} = map), do: Enum.all?(map, &map_include?(in_map, &1))
defp wrap_tag_opts(extra_class, opts) do
Keyword.get(opts, :wrap_tag_opts, [])
|> append_class(extra_class)
end
defp make_link(active?, text, opts) do
if active? and opts[:active_disable] do
content_tag(:span, text, span_opts(opts))
else
link_fun = link_fun(opts)
link_fun.(text, link_opts(opts))
end
end
defp link_fun(opts) do
case Keyword.get(opts, :using, :link) do
:live_redirect ->
&live_redirect/2
:live_patch ->
&live_patch/2
:link ->
&link/2
value ->
raise "Value #{inspect(value)} is invalid, valids using are: :link, :live_redirect, :live_patch"
end
end
defp extra_class(active?, opts) do
if active? do
opts[:class_active] || "active"
else
opts[:class_inactive] || ""
end
end
defp append_class(opts, class) do
class =
opts
|> Keyword.get(:class, "")
|> String.split(" ")
|> List.insert_at(0, class)
|> Enum.reject(&(&1 == ""))
|> Enum.join(" ")
Keyword.put(opts, :class, class)
end
defp link_opts(opts) do
Enum.reject(opts, &(elem(&1, 0) in @opts))
end
defp span_opts(opts) do
opts |> link_opts() |> Enum.reject(&(elem(&1, 0) in ~w(to form method)a))
end
defp default_opts do
Application.get_env(:phoenix_active_link, :defaults, [])
end
end
|
lib/phoenix_active_link.ex
| 0.921865
| 0.822296
|
phoenix_active_link.ex
|
starcoder
|
defmodule Pique do
@moduledoc """
Main Pique application. Starts the `gen_smtp_server` with the
default configuration. If the configuration states
that `auth` is `true` then the application will not start unless
it is configured with `sessionoptions` that specify a cert and key
file as well as listening on `:ssl` vs. `:tcp`.
Example SSL configuration:
```
config :pique,
auth: true,
smtp_opts: [
port: 4646,
protocol: :ssl,
sessionoptions: [
certfile: "foo",
keyfile: "bar"
]
]
```
"""
require Logger
use Application
@spec start(any, any) :: {:error, any} | {:ok, pid}
def start(_type, _args) do
smtp_options = Application.get_env(:pique, :smtp_opts, [])
children = [
%{
id: :gen_smtp_server,
start: {:gen_smtp_server, :start_link, [
Application.get_env(:pique, :callback, Pique.Smtp),
[smtp_options]
]}
}
]
# Check if SSL is configured properly
validate_ssl_options(smtp_options)
opts = [strategy: :one_for_one, name: Pique.Supervisor]
Supervisor.start_link(children, opts)
end
@spec validate_ssl_options(any) :: nil
def validate_ssl_options(smtp_options) do
if Application.get_env(:pique, :auth) == true do
if !Keyword.has_key?(smtp_options, :protocol) or Keyword.get(smtp_options, :protocol) == :tcp do
Logger.error("Pique auth set to true, but protocol needs to be :ssl")
exit(:shutdown)
end
if !Keyword.has_key?(smtp_options, :sessionoptions) do
Logger.error("Pique auth set to true, but no sessionoptions defined")
exit(:shutdown)
end
options = smtp_options[:sessionoptions]
if !Keyword.has_key?(options, :certfile) do
Logger.error("Pique auth set to true, but no certfile specified")
exit(:shutdown)
end
if !Keyword.has_key?(options, :keyfile) do
Logger.error("Pique auth set to true, but no keyfile specified")
exit(:shutdown)
end
end
end
end
|
lib/pique.ex
| 0.826081
| 0.693642
|
pique.ex
|
starcoder
|
defmodule OpenGraph do
@moduledoc """
Fetch and parse websites to extract Open Graph meta tags.
The example above shows how to fetch the GitHub Open Graph rich objects.
```
OpenGraph.fetch("https://github.com")
%OpenGraph{description: "GitHub is where people build software. More than 15 million...",
image: "https://assets-cdn.github.com/images/modules/open_graph/github-octocat.png",
site_name: "GitHub", title: "Build software better, together", type: nil,
url: "https://github.com"}
```
"""
# Basic fields
defstruct [
:title,
:type,
:image,
:url,
# Optional fields
:description,
:audio,
:determiner,
:locale,
:site_name,
:video,
# Image fields
:"image:secure_url",
:"image:type",
:"image:width",
:"image:height",
:"image:alt",
# Video fields
:"video:secure_url",
:"video:type",
:"video:width",
:"video:height",
:"video:alt",
# Audio fields
:"audio:secure_url",
:"audio:type",
# Book fields
:"book:author",
:"book:isbn",
:"book:release_date",
:"book:tag",
:"price:amount",
:"price:currency"
]
@type t :: %OpenGraph{
title: String.t() | nil,
type: String.t() | nil,
url: String.t() | nil,
description: String.t() | nil,
audio: String.t() | nil,
determiner: String.t() | nil,
locale: String.t() | nil,
site_name: String.t() | nil,
video: String.t() | nil,
"image:secure_url": String.t() | nil,
"image:type": String.t() | nil,
"image:width": String.t() | nil,
"image:height": String.t() | nil,
"image:alt": String.t() | nil,
"video:secure_url": String.t() | nil,
"video:type": String.t() | nil,
"video:width": String.t() | nil,
"video:height": String.t() | nil,
"video:alt": String.t() | nil,
"audio:secure_url": String.t() | nil,
"audio:type": String.t() | nil,
"book:author": list(String.t()) | nil,
"book:isbn": String.t() | nil,
"book:release_date": String.t() | nil,
"book:tag": list(String.t()) | nil,
"price:amount": String.t() | nil,
"price:currency": String.t() | nil
}
@type html :: String.t() | charlist()
@doc """
Parses the given HTML to extract the Open Graph objects.
Args:
* `html` - raw HTML as a binary string or char list
This functions returns an OpenGraph struct.
"""
@spec parse(String.t()) :: t()
def parse(html) when is_binary(html) or is_list(html) do
{:ok, document} = Floki.parse_document(html)
allowed_keys = get_allowed_keys()
data =
document
|> Floki.find("meta")
|> Stream.filter(fn metatag ->
Floki.attribute(metatag, "property") != nil
end)
|> Stream.filter(fn metatag ->
property = Floki.attribute(metatag, "property") |> List.first()
filter_og_metatags(property)
end)
|> Stream.flat_map(fn x -> format(x) end)
|> Stream.flat_map(fn x -> replace_books_with_book(x) end)
|> Enum.reduce(%{}, fn {key, value}, acc ->
if Enum.member?(["book:tag", "book:author"], key) do
array = Map.get(acc, key, [])
Map.merge(acc, %{key => Enum.concat(array, [value])})
else
Map.merge(acc, %{key => value})
end
end)
|> Enum.filter(fn {key, value} ->
value != nil && Enum.member?(allowed_keys, key)
end)
|> Enum.map(fn {key, value} ->
{String.to_atom(key), value}
end)
struct(OpenGraph, data)
end
defp format(metatag) do
property = Floki.attribute(metatag, "property") |> List.first() |> drop_og_prefix()
content = Floki.attribute(metatag, "content") |> List.first()
[{property, content}]
end
defp replace_books_with_book({key_string, value}) do
if String.starts_with?(key_string, "books:") do
new_key = key_string |> String.replace(~r/^books:/, "book:")
[{new_key, value}]
else
[{key_string, value}]
end
end
defp filter_og_metatags("og:" <> _property), do: true
defp filter_og_metatags("book:" <> _property), do: true
defp filter_og_metatags("books:" <> _property), do: true
defp filter_og_metatags(_), do: false
defp drop_og_prefix("og:" <> property), do: property
defp drop_og_prefix(property), do: property
defp get_allowed_keys do
Map.keys(OpenGraph.__struct__())
|> Enum.map(&Atom.to_string(&1))
|> Enum.filter(fn x -> x !== "__struct__" end)
end
end
|
lib/open_graph.ex
| 0.796609
| 0.885136
|
open_graph.ex
|
starcoder
|
defmodule DBConnection.Sojourn do
@moduledoc """
A `DBConnection.Pool` using sbroker.
### Options
* `:pool_size` - The number of connections (default: `10`)
* `:broker` - The sbroker callback module (see `:sbroker`,
default: `DBConnection.Sojourn.Timeout`)
* `:broker_start_opts` - Start options for the broker (see
`:sbroker`, default: `[]`)
* `:max_restarts` - the maximum amount of connection restarts allowed in a
time frame (default `3`)
* `:max_seconds` - the time frame in which `:max_restarts` applies (default
`5`)
* `:shutdown` - the shutdown strategy for connections (default `5_000`)
All options are passed as the argument to the sbroker callback module.
"""
@behaviour DBConnection.Pool
@broker DBConnection.Sojourn.Timeout
@time_unit :micro_seconds
import Supervisor.Spec
@doc false
def start_link(mod, opts) do
Supervisor.start_link(children(mod, opts), [strategy: :rest_for_one])
end
@doc false
def child_spec(mod, opts, child_opts \\ []) do
args = [children(mod, opts), [strategy: :rest_for_one]]
supervisor(Supervisor, args, child_opts)
end
@doc false
def checkout(broker, opts) do
case ask(broker, opts) do
{:go, ref, {pid, mod, state}, _, _} -> {:ok, {pid, ref}, mod, state}
{drop, _} when drop in [:drop, :retry] -> :error
end
end
@doc false
defdelegate checkin(ref, state, opts), to: DBConnection.Connection
@doc false
defdelegate disconnect(ref, err, state, opts), to: DBConnection.Connection
@doc false
defdelegate stop(ref, reason, state, opts), to: DBConnection.Connection
## Helpers
defp children(mod, opts) do
[broker(opts), conn_sup(mod, opts), starter(opts)]
end
defp broker(opts) do
case Keyword.get(opts, :name, nil) do
nil ->
worker(:sbroker, broker_args(opts))
name when is_atom(name) ->
worker(:sbroker, [{:local, name} | broker_args(opts)])
name ->
worker(:sbroker, [name | broker_args(opts)])
end
end
defp broker_args(opts) do
mod = Keyword.get(opts, :broker, @broker)
start_opts = Keyword.get(opts, :broker_start_opt, [time_unit: @time_unit])
[mod, opts, start_opts]
end
defp conn_sup(mod, opts) do
child_opts = Keyword.take(opts, [:shutdown])
conn = DBConnection.Connection.child_spec(mod, opts, :sojourn, child_opts)
sup_opts = Keyword.take(opts, [:max_restarts, :max_seconds])
sup_opts = [strategy: :simple_one_for_one] ++ sup_opts
supervisor(Supervisor, [[conn], sup_opts])
end
defp starter(opts) do
worker(DBConnection.Sojourn.Starter, [opts], [restart: :transient])
end
defp ask(broker, opts) do
timeout = Keyword.get(opts, :timeout, 5_000)
info = {self(), timeout}
case Keyword.get(opts, :queue, true) do
true -> :sbroker.ask(broker, info)
false -> :sbroker.nb_ask(broker, info)
end
end
end
|
deps/db_connection/lib/db_connection/sojourn.ex
| 0.827759
| 0.468487
|
sojourn.ex
|
starcoder
|
defmodule Dwolla.Transfer do
@moduledoc """
Functions for `transfers` endpoint.
"""
alias Dwolla.Utils
defstruct id: nil, created: nil, status: nil, amount: nil, metadata: nil,
source_resource: nil, source_resource_id: nil,
source_funding_source_id: nil, dest_resource: nil,
dest_resource_id: nil, can_cancel: false
@type t :: %__MODULE__{id: String.t,
created: String.t,
status: String.t, # "pending" | "processed" | "cancelled" | "failed" | "reclaimed"
amount: Dwolla.Transfer.Amount.t,
metadata: Dwolla.Transfer.Metadata.t,
source_resource: String.t,
source_resource_id: String.t,
source_funding_source_id: String.t,
dest_resource: String.t,
dest_resource_id: String.t,
can_cancel: boolean
}
@type token :: String.t
@type id :: String.t
@type params :: %{required(atom) => any}
@type error :: HTTPoison.Error.t | Dwolla.Errors.t | tuple
@type location :: %{id: String.t}
@endpoint "transfers"
defmodule Amount do
@moduledoc """
Dwolla Transfer Amount data structure.
"""
defstruct value: nil, currency: nil
@type t :: %__MODULE__{value: String.t, currency: String.t}
end
defmodule Metadata do
@moduledoc """
Dwolla Transfer Metatdata data structure.
"""
defstruct vendor: nil, origin_trans_id: nil, title: nil, note: nil
@type t :: %__MODULE__{vendor: String.t,
origin_trans_id: String.t,
title: String.t,
note: String.t
}
end
defmodule Failure do
@moduledoc """
Dwolla Transfer Failure data structure.
"""
defstruct code: nil, description: nil
@type t :: %__MODULE__{code: String.t, description: String.t}
end
@doc """
Initiates a transfer.
The parameters are verbose because of the many options available to the user
for setting the source and destination of the funds in the `href` field.
Parameters
```
%{
_links: %{
source: %{
href: "https://api-sandbox.dwolla.com/funding-sources/..."
},
destination: %{
href: "https://api-sandbox.dwolla.com/funding-sources/..."
}
},
amount: %{
value: 100.00,
currency: "USD"
},
metadata: %{
vendor: "Acme Inc.",
note: "Invoice #12314"
}
}
```
"""
@spec initiate(token, params, any | nil) :: {:ok, location} | {:error, error}
def initiate(token, params, idempotency_key \\ nil) do
headers = Utils.idempotency_header(idempotency_key || params)
Dwolla.make_request_with_token(:post, @endpoint, token, params, headers)
|> Utils.handle_resp(:transfer)
end
@doc """
Gets a transfer by id.
"""
@spec get(token, id) :: {:ok, Dwolla.Transfer.t} | {:error, error}
def get(token, id) do
endpoint = @endpoint <> "/#{id}"
Dwolla.make_request_with_token(:get, endpoint, token)
|> Utils.handle_resp(:transfer)
end
@doc """
Gets reason for a transfer's failure.
"""
@spec get_transfer_failure_reason(token, id) :: {:ok, Dwolla.Transfer.Failure} | {:error, error}
def get_transfer_failure_reason(token, id) do
endpoint = @endpoint <> "/#{id}/failure"
Dwolla.make_request_with_token(:get, endpoint, token)
|> Utils.handle_resp(:failure)
end
@doc """
Cancels a transfer.
"""
@spec cancel(token, id) :: {:ok, Dwolla.Transfer.t} | {:error, error}
def cancel(token, id) do
endpoint = @endpoint <> "/#{id}"
params = %{status: "cancelled"}
headers = Utils.idempotency_header(params)
Dwolla.make_request_with_token(:post, endpoint, token, params, headers)
|> Utils.handle_resp(:transfer)
end
end
|
lib/dwolla/transfer.ex
| 0.801509
| 0.568116
|
transfer.ex
|
starcoder
|
defmodule ElixirSense.Providers.Suggestion do
@moduledoc """
Provider responsible for finding suggestions for auto-completing.
It provides suggestions based on a list of pre-defined reducers.
## Reducers
A reducer is a function with the following spec:
@spec reducer(
String.t(),
String.t(),
State.Env.t(),
Metadata.t(),
acc()
) :: {:cont | :halt, acc()}
## Examples
Adding suggestions:
def my_reducer(hint, prefix, env, buffer_metadata, acc) do
suggestions = ...
{:cont, %{acc | result: acc.result ++ suggestions}}
end
Defining the only set of suggestions to be provided:
def my_reducer(hint, prefix, env, buffer_metadata, acc) do
suggestions = ...
{:halt, %{acc | result: suggestions}}
end
Defining a list of suggestions to be provided and allow an extra
limited set of additional reducers to run next:
def my_reducer(hint, prefix, env, buffer_metadata, acc) do
suggestions = ...
{:cont, %{acc | result: fields, reducers: [:populate_common, :variables]}}
end
"""
alias ElixirSense.Core.Metadata
alias ElixirSense.Core.State
alias ElixirSense.Providers.Suggestion.Reducers
@type generic :: %{
type: :generic,
label: String.t(),
detail: String.t() | nil,
documentation: String.t() | nil,
insert_text: String.t() | nil,
filter_text: String.t() | nil,
snippet: String.t() | nil,
priority: integer() | nil,
kind: atom(),
command: map()
}
@type suggestion ::
generic()
| Reducers.Common.attribute()
| Reducers.Common.variable()
| Reducers.Struct.field()
| Reducers.Returns.return()
| Reducers.Callbacks.callback()
| Reducers.Protocol.protocol_function()
| Reducers.Common.func()
| Reducers.Common.mod()
| Reducers.Params.param_option()
| Reducers.TypeSpecs.type_spec()
@type acc :: %{result: [suggestion], reducers: [atom], context: map}
@type cursor_context :: %{
text_before: String.t(),
text_after: String.t(),
at_module_body?: boolean()
}
@reducers [
ecto: &ElixirSense.Plugins.Ecto.reduce/5,
structs_fields: &Reducers.Struct.add_fields/5,
returns: &Reducers.Returns.add_returns/5,
callbacks: &Reducers.Callbacks.add_callbacks/5,
protocol_functions: &Reducers.Protocol.add_functions/5,
overridable: &Reducers.Overridable.add_overridable/5,
param_options: &Reducers.Params.add_options/5,
typespecs: &Reducers.TypeSpecs.add_types/5,
populate_common: &Reducers.Common.populate/5,
variables: &Reducers.Common.add_variables/5,
modules: &Reducers.Common.add_modules/5,
functions: &Reducers.Common.add_functions/5,
macros: &Reducers.Common.add_macros/5,
variable_fields: &Reducers.Common.add_fields/5,
attributes: &Reducers.Common.add_attributes/5,
docs_snippets: &Reducers.DocsSnippets.add_snippets/5
]
@decorators [
&ElixirSense.Plugins.Ecto.decorate/1
]
@doc """
Finds all suggestions for a hint based on context information.
"""
@spec find(String.t(), State.Env.t(), Metadata.t(), cursor_context) :: [suggestion()]
def find(hint, env, buffer_metadata, cursor_context) do
acc = %{result: [], reducers: Keyword.keys(@reducers), context: %{}}
%{result: result} =
Enum.reduce_while(@reducers, acc, fn {key, fun}, acc ->
if key in acc.reducers do
fun.(hint, env, buffer_metadata, cursor_context, acc)
else
{:cont, acc}
end
end)
for item <- result do
Enum.reduce(@decorators, item, fn d, item -> d.(item) end)
end
end
end
|
lib/elixir_sense/providers/suggestion.ex
| 0.85022
| 0.409427
|
suggestion.ex
|
starcoder
|
defmodule Ash.Query.Function.GetPath do
@moduledoc """
Gets the value at the provided path in the value, which must be a map or embed.
If you are using a datalayer that provides a `type` function (like AshPostgres), it is a good idea to
wrap your call in that function, e.g `type(author[:bio][:title], :string)`, since data layers that depend
on knowing types may not be able to infer the type from the path. Ash may eventually be able to figure out
the type, in the case that the path consists of only embedded attributes.
If an atom key is provided, access is *indiscriminate* of atoms vs strings. The atom key is checked first.
If a string key is provided, that is the only thing that is checked. If the value will or may be a struct, be sure to use atoms.
The data layer may handle this differently, for example, AshPostgres only checks
strings at the data layer (because thats all it can be in the database anyway).
Available in query expressions using bracket syntax, e.g `foo[:bar][:baz]`.
"""
use Ash.Query.Function, name: :get_path, predicate?: true, no_inspect?: true
def args,
do: [
[:map, {:array, :any}]
]
def new([%__MODULE__{arguments: [inner_left, inner_right]} = get_path, right])
when is_list(inner_right) and is_list(right) do
{:ok, %{get_path | arguments: [inner_left, inner_right ++ right]}}
end
def new([_, right]) when not (is_list(right) or is_atom(right) or is_binary(right)) do
{:error, "#{inspect(right)} is not a valid path to get"}
end
def new([left, right]) when not is_list(right) do
new([left, [right]])
end
def new([left, right]) do
super([left, right])
end
def evaluate(%{arguments: [%{} = obj, path]}) when is_list(path) do
Enum.reduce_while(path, {:known, obj}, fn key, {:known, obj} ->
if is_map(obj) do
value =
if is_atom(key) do
Map.get(obj, key) || Map.get(obj, to_string(key))
else
Map.get(obj, to_string(key))
end
case value do
nil ->
{:halt, {:known, nil}}
value ->
{:cont, {:known, value}}
end
else
{:halt, :unknown}
end
end)
end
def evaluate(_), do: :unknown
defimpl Inspect do
import Inspect.Algebra
def inspect(%{arguments: [value, path]}, opts) do
path_items =
path
|> Enum.map(fn item ->
concat(["[", to_doc(item, opts), "]"])
end)
|> concat()
value
|> to_doc(opts)
|> concat(path_items)
end
end
end
|
lib/ash/query/function/get_path.ex
| 0.855323
| 0.634883
|
get_path.ex
|
starcoder
|
defmodule Aecore.Channel.Updates.ChannelTransferUpdate do
@moduledoc """
State channel update implementing transfers in the state channel. This update can be included in ChannelOffchainTx.
This update allows for transfering tokens between peers in the state channel(later for transfers to offchain contract accounts)..
"""
alias Aecore.Channel.Updates.ChannelTransferUpdate
alias Aecore.Channel.ChannelOffChainUpdate
alias Aecore.Chain.Chainstate
alias Aecore.Account.AccountStateTree
alias Aecore.Account.Account
@behaviour ChannelOffChainUpdate
@typedoc """
Structure of the ChannelTransferUpdate type
"""
@type t :: %ChannelTransferUpdate{
from: binary(),
to: binary(),
amount: non_neg_integer()
}
@typedoc """
The type of errors returned by this module
"""
@type error :: {:error, String.t()}
@doc """
Definition of ChannelTransferUpdate structure
## Parameters
- from: the offchain account where the transfer originates
- to: the offchain account which is the destination of the transfer
- amount: number of the tokens transfered between the peers
"""
defstruct [:from, :to, :amount]
@doc """
Creates an ChannelTransferUpdate
"""
@spec new(binary(), binary(), non_neg_integer()) :: ChannelTransferUpdate.t()
def new(from, to, amount) do
%ChannelTransferUpdate{
from: from,
to: to,
amount: amount
}
end
@doc """
Deserializes ChannelTransferUpdate.
"""
@spec decode_from_list(list(binary())) :: ChannelTransferUpdate.t()
def decode_from_list([from, to, amount]) do
%ChannelTransferUpdate{
from: from,
to: to,
amount: :binary.decode_unsigned(amount)
}
end
@doc """
Serializes ChannelTransferUpdate.
"""
@spec encode_to_list(ChannelTransferUpdate.t()) :: list(binary())
def encode_to_list(%ChannelTransferUpdate{
from: from,
to: to,
amount: amount
}) do
[from, to, :binary.encode_unsigned(amount)]
end
@doc """
Performs the transfer on the offchain chainstate. Returns an error if the transfer failed.
"""
@spec update_offchain_chainstate!(Chainstate.t(), ChannelTransferUpdate.t()) ::
Chainstate.t() | no_return()
def update_offchain_chainstate!(
%Chainstate{
accounts: accounts
} = chainstate,
%ChannelTransferUpdate{
from: from,
to: to,
amount: amount
}
) do
updated_accounts =
accounts
|> AccountStateTree.update(
from,
&update_initiator_account!(&1, amount)
)
|> AccountStateTree.update(
to,
&Account.apply_transfer!(&1, nil, amount)
)
%Chainstate{chainstate | accounts: updated_accounts}
end
@spec update_initiator_account!(Account.t(), non_neg_integer()) :: Account.t() | no_return()
defp update_initiator_account!(account, amount) do
account
|> Account.apply_transfer!(nil, -amount)
|> Account.apply_nonce!(account.nonce + 1)
end
@spec half_signed_preprocess_check(ChannelTransferUpdate.t(), map()) :: :ok | error()
def half_signed_preprocess_check(
%ChannelTransferUpdate{
from: from,
to: to,
amount: amount
},
%{
our_pubkey: correct_to,
foreign_pubkey: correct_from
}
) do
cond do
amount <= 0 ->
{:error, "#{__MODULE__}: Can't transfer zero or negative amount of tokens"}
from != correct_from ->
{:error,
"#{__MODULE__}: Transfer must originate from the initiator of the update (#{
inspect(correct_from)
}), got #{inspect(from)}"}
to != correct_to ->
{:error,
"#{__MODULE__}: Transfer must be to the peer responding to the update (#{
inspect(correct_to)
}), got #{inspect(to)}"}
true ->
:ok
end
end
def half_signed_preprocess_check(%ChannelTransferUpdate{}, _) do
{:error,
"#{__MODULE__}: Missing keys in the opts dictionary. This probably means that the update was unexpected."}
end
@doc """
Validates an update considering state before applying it to the provided chainstate.
"""
@spec fully_signed_preprocess_check(
Chainstate.t() | nil,
ChannelTransferUpdate.t(),
non_neg_integer()
) :: :ok | error()
def fully_signed_preprocess_check(
%Chainstate{accounts: accounts},
%ChannelTransferUpdate{from: from, to: to, amount: amount},
channel_reserve
) do
%Account{balance: from_balance} = AccountStateTree.get(accounts, from)
cond do
!AccountStateTree.has_key?(accounts, from) ->
{:error, "#{__MODULE__}: Transfer initiator is not a party of this channel"}
!AccountStateTree.has_key?(accounts, to) ->
{:error, "#{__MODULE__}: Transfer responder is not a party of this channel"}
from_balance - amount < channel_reserve ->
{:error,
"#{__MODULE__}: Transfer initiator tried to transfer #{amount} tokens but can transfer at most #{
from_balance - channel_reserve
} tokens"}
true ->
:ok
end
end
def fully_signed_preprocess_check(nil, %ChannelTransferUpdate{}, _channel_reserve) do
{:error, "#{__MODULE__}: OffChain Chainstate must exist"}
end
end
|
apps/aecore/lib/aecore/channel/updates/channel_transfer_update.ex
| 0.852798
| 0.415936
|
channel_transfer_update.ex
|
starcoder
|
defmodule MerklePatriciaTree.Trie.Node do
@moduledoc """
This module encodes and decodes nodes from a
trie encoding back into RLP form. We effectively implement
`c(I, i)` from the Yellow Paper.
TODO: Add richer set of tests, esp. in re: storage and branch values.
"""
alias MerklePatriciaTree.Trie
alias MerklePatriciaTree.Trie.Storage
@type trie_node ::
:empty
| {:leaf, [integer()], binary()}
| {:ext, [integer()], binary()}
| {:branch, [binary()]}
@doc """
Given a node, this function will encode the node
and put the value to storage (for nodes that are
greater than 32 bytes encoded). This implements
`c(I, i)`, Eq.(179) of the Yellow Paper.
## Examples
iex> trie = MerklePatriciaTree.Trie.new(MerklePatriciaTree.Test.random_ets_db())
iex> MerklePatriciaTree.Trie.Node.encode_node(:empty, trie)
<<>>
iex> trie = MerklePatriciaTree.Trie.new(MerklePatriciaTree.Test.random_ets_db())
iex> MerklePatriciaTree.Trie.Node.encode_node({:leaf, [5,6,7], "ok"}, trie)
["5g", "ok"]
iex> trie = MerklePatriciaTree.Trie.new(MerklePatriciaTree.Test.random_ets_db())
iex> MerklePatriciaTree.Trie.Node.encode_node({:branch, [<<>>, <<>>, <<>>, <<>>, <<>>, <<>>, <<>>, <<>>, <<>>, <<>>, <<>>, <<>>, <<>>, <<>>, <<>>, <<>>, <<>>]}, trie)
["", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", ""]
iex> trie = MerklePatriciaTree.Trie.new(MerklePatriciaTree.Test.random_ets_db())
iex> MerklePatriciaTree.Trie.Node.encode_node({:ext, [1, 2, 3], <<>>}, trie)
[<<17, 35>>, ""]
"""
@spec encode_node(trie_node, Trie.t()) :: binary()
def encode_node(trie_node, trie) do
trie_node
|> encode_node_type()
|> Storage.put_node(trie)
end
defp encode_node_type({:leaf, key, value}) do
[HexPrefix.encode({key, true}), value]
end
defp encode_node_type({:branch, branches}) when length(branches) == 17 do
branches
end
defp encode_node_type({:ext, shared_prefix, next_node}) do
[HexPrefix.encode({shared_prefix, false}), next_node]
end
defp encode_node_type(:empty) do
<<>>
end
@doc """
Decodes the root of a given trie, effectively
inverting the encoding from `c(I, i)` defined in
Eq.(179) fo the Yellow Paper.
## Examples
iex> MerklePatriciaTree.Trie.new(MerklePatriciaTree.Test.random_ets_db(), <<128>>)
iex> |> MerklePatriciaTree.Trie.Node.decode_trie()
:empty
iex> MerklePatriciaTree.Trie.new(MerklePatriciaTree.Test.random_ets_db(), <<198, 130, 53, 103, 130, 111, 107>>)
iex> |> MerklePatriciaTree.Trie.Node.decode_trie()
{:leaf, [5,6,7], "ok"}
iex> MerklePatriciaTree.Trie.new(MerklePatriciaTree.Test.random_ets_db(), <<209, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128>>)
iex> |> MerklePatriciaTree.Trie.Node.decode_trie()
{:branch, [<<>>, <<>>, <<>>, <<>>, <<>>, <<>>, <<>>, <<>>, <<>>, <<>>, <<>>, <<>>, <<>>, <<>>, <<>>, <<>>, <<>>]}
iex> MerklePatriciaTree.Trie.new(MerklePatriciaTree.Test.random_ets_db(), <<196, 130, 17, 35, 128>>)
iex> |> MerklePatriciaTree.Trie.Node.decode_trie()
{:ext, [1, 2, 3], <<>>}
"""
@spec decode_trie(Trie.t()) :: trie_node
def decode_trie(trie) do
case Storage.get_node(trie) do
nil ->
:empty
<<>> ->
:empty
:not_found ->
:empty
branches when length(branches) == 17 ->
{:branch, branches}
[hp_k, v] ->
# extension or leaf node
{prefix, is_leaf} = HexPrefix.decode(hp_k)
if is_leaf do
{:leaf, prefix, v}
else
{:ext, prefix, v}
end
end
end
end
|
apps/merkle_patricia_tree/lib/trie/node.ex
| 0.665845
| 0.576333
|
node.ex
|
starcoder
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.