code
stringlengths 114
1.05M
| path
stringlengths 3
312
| quality_prob
float64 0.5
0.99
| learning_prob
float64 0.2
1
| filename
stringlengths 3
168
| kind
stringclasses 1
value |
|---|---|---|---|---|---|
defmodule Day11 do
@doc """
Calculate square of any size with the largest power.
## Examples
iex> Day11.largest_total_square(7803)
{{230, 272, 17}, 125}
"""
def largest_total_square serial do
grid = build_grid serial
acc = {{0, 0, 0}, 0}
Enum.reduce(1..299, acc, fn x, acc ->
Enum.reduce(1..299, acc, fn y, acc ->
find_largest(x, y, grid, acc)
end)
end)
end
def find_largest x, y, grid, acc do
max_square_size = min(301 - x, 301 - y)
level = grid[{x, y}]
{best, _} =
Enum.reduce(2..max_square_size, {acc, level},
fn square_size, {{_coord, prev_level} = prev, level} ->
level = sum_square(x, y, square_size, grid, level)
if level > prev_level do
{{{x, y, square_size}, level}, level}
else
{prev, level}
end
end)
best
end
def sum_square(x0, y0, square_size, grid, acc) do
y = y0 + square_size - 1;
acc = Enum.reduce(x0..x0 + square_size-2, acc,
fn x, acc ->
acc + grid[{x, y}]
end)
x = x0 + square_size - 1;
acc = Enum.reduce(y0..y0 + square_size-1, acc,
fn y, acc ->
acc + grid[{x, y}]
end)
acc
end
@doc """
Calculate 3 x 3 square with largest total power.
## Examples
iex> Day11.largest_total_power(18)
{{33, 45}, 29}
iex> Day11.largest_total_power(42)
{{21, 61}, 30}
iex> Day11.largest_total_power(7803)
{{20, 51}, 31}
"""
def largest_total_power serial do
grid = build_grid serial
for x <- 1..298,
y <- 1..298 do
{{x, y}, sum_three_by_three(x, y, grid)}
end
|> Enum.max_by(&elem(&1, 1))
end
def build_grid serial do
for x <- 1..300,
y <- 1..300,
into: %{} do
{{x, y}, cell_power_level(x, y, serial)}
end
end
@doc """
Sum of 3 x 3 square total cell power.
## Examples
iex> Day11.sum_three_by_three(21, 61, Day11.build_grid(42))
30
"""
def sum_three_by_three x0, y0, grid do
Enum.reduce(x0..x0+2, 0, fn x, acc ->
Enum.reduce(y0..y0+2, acc, fn y, acc ->
acc + grid[{x, y}]
end)
end)
end
@doc """
Calculate cell power level.
## Examples
iex> Day11.cell_power_level(3, 5, 8)
4
iex> Day11.cell_power_level(122, 79, 57)
-5
iex> Day11.cell_power_level(217, 196, 39)
0
iex> Day11.cell_power_level(101, 153, 71)
4
iex> Day11.cell_power_level(21, 61, 42)
4
iex> Day11.cell_power_level(21, 62, 42)
3
iex> Day11.cell_power_level(21, 63, 42)
3
iex> Day11.cell_power_level(22, 61, 42)
3
iex> Day11.cell_power_level(22, 62, 42)
3
iex> Day11.cell_power_level(22, 63, 42)
3
iex> Day11.cell_power_level(23, 61, 42)
3
iex> Day11.cell_power_level(23, 62, 42)
4
iex> Day11.cell_power_level(23, 63, 42)
4
"""
def cell_power_level x, y, serial do
rack_id = x + 10
power = rack_id * (rack_id * y + serial)
rem(div(power, 100), 10) - 5
end
end
|
day11/lib/day11.ex
| 0.802749
| 0.625281
|
day11.ex
|
starcoder
|
defmodule Posexional.Field.TypedField do
@moduledoc """
this module represent a single field in a row of a positional file
"""
use Timex
alias Posexional.Field
alias Posexional.Field.Value
defstruct [:name, :field_value, :type, :opts]
@type field_type() ::
:id
| :binary_id
| :integer
| :float
| :boolean
| :string
| :binary
| :array
| :list
| :decimal
| :date
| :time
| :datetime
| :naive_datetime
| :utc_datetime
@type t :: %__MODULE__{}
@spec new(atom(), field_type(), integer(), Keyword.t()) :: %Posexional.Field.TypedField{}
def new(name, type, size, opts \\ []) do
value = Value.new(name, size, opts)
%__MODULE__{name: name, field_value: value, type: type, opts: opts}
end
@spec parse!(String.t(), field_type, map) ::
integer()
| String.t()
| float()
| Date.t()
| Time.t()
| NaiveDateTime.t()
| DateTime.t()
| boolean()
| list()
| any()
defp parse!(value_str, type, opts)
defp parse!(value_str, _type, %{parser: parser}) when is_function(parser, 1) do
parser.(value_str)
rescue
_ ->
reraise ArgumentError, [message: "The provided parser could not parse value #{value_str}"], __STACKTRACE__
end
defp parse!(value_str, type, %{parser: parser}) when is_function(parser, 2) do
parser.(value_str, type)
rescue
_ ->
reraise ArgumentError,
[message: "The provided parser could not parse value #{value_str} of type #{type}"],
__STACKTRACE__
end
defp parse!(value_str, :id, opts), do: parse!(value_str, :integer, opts)
defp parse!(value_str, :binary_id, _opts), do: value_str
defp parse!(value_str, :integer, opts) do
base = Map.get(opts, :base, 10)
case Integer.parse(value_str, base) do
{int, ""} -> int
_ -> raise ArgumentError, "#{value_str} is not a valid integer. Provide option :base should it not be on 10."
end
end
defp parse!(value_str, :float, _opts) do
case Float.parse(value_str) do
{float, ""} -> float
_ -> raise ArgumentError, "#{value_str} is not a valid float number."
end
end
defp parse!("true", :boolean, _opts), do: true
defp parse!("false", :boolean, _opts), do: false
defp parse!(value_str, :boolean, _opts), do: raise(ArgumentError, "#{value_str} is not a valid boolean.")
defp parse!(value_str, :string, _opts), do: value_str
defp parse!(value_str, :binary, _opts), do: value_str
defp parse!(value_str, :array, opts), do: parse!(value_str, :list, opts)
defp parse!(value_str, :decimal, opts), do: parse!(value_str, :float, opts)
defp parse!(value_str, :list, %{separator: separator}) when is_binary(separator) do
value_str
|> String.split(separator)
|> Enum.reject(&(&1 == ""))
end
defp parse!(_value_str, :list, _opts), do: raise(ArgumentError, "Provide a :separator option")
defp parse!(value_str, :date, _opts = %{format_in: format_in}) do
datetime =
case Timex.parse(value_str, format_in) do
{:ok, datetime} -> datetime
{:error, _reason} -> Timex.parse!(value_str, format_in, :strftime)
end
NaiveDateTime.to_date(datetime)
rescue
_ ->
reraise ArgumentError,
[message: "#{value_str} could not be parsed as date with format #{format_in}"],
__STACKTRACE__
end
defp parse!(_value_str, :date, _opts), do: raise(ArgumentError, "Provide a :format_in option")
defp parse!(value_str, :time, _opts = %{format_in: format_in}) do
datetime =
case Timex.parse(value_str, format_in) do
{:ok, datetime} -> datetime
{:error, _reason} -> Timex.parse!(value_str, format_in, :strftime)
end
NaiveDateTime.to_time(datetime)
rescue
_ ->
reraise ArgumentError,
[message: "#{value_str} could not be parsed as time with format #{format_in}"],
__STACKTRACE__
end
defp parse!(_value_str, :time, _opts), do: raise(ArgumentError, "Provide a :format_in option")
defp parse!(value_str, :naive_datetime, _opts = %{format_in: format_in}) do
case Timex.parse(value_str, format_in) do
{:ok, datetime} -> datetime
{:error, _reason} -> Timex.parse!(value_str, format_in, :strftime)
end
rescue
_ ->
reraise ArgumentError,
[message: "#{value_str} could not be parsed as naive_datetime with format #{format_in}"],
__STACKTRACE__
end
defp parse!(value_str, :datetime, opts = %{format_in: format_in}) do
case Timex.parse(value_str, format_in) do
{:ok, datetime} -> datetime
{:error, _reason} -> Timex.parse!(value_str, format_in, :strftime)
end
|> Timex.to_datetime(Map.get(opts, :timezone, :utc))
|> case do
{:error, reason} -> raise ArgumentError, inspect(reason)
datetime -> datetime
end
rescue
_ ->
reraise ArgumentError,
[message: "#{value_str} could not be parsed as datetime with format #{format_in}"],
__STACKTRACE__
end
defp parse!(value_str, :utc_datetime, opts), do: parse!(value_str, :datetime, Map.put(opts, :timezone, :utc))
defp parse!(value_str, type, _opts),
do: raise(ArgumentError, "Provide a :parser option as a function to customize type #{type} to value #{value_str}")
@spec read(t, binary) :: any()
def read(field, content) do
content
|> Field.depositionalize(field.field_value)
|> String.trim()
|> parse!(field.type, Map.new(field.opts))
end
end
defimpl Posexional.Protocol.FieldLength, for: Posexional.Field.TypedField do
def length(%Posexional.Field.TypedField{field_value: value}), do: Posexional.Protocol.FieldLength.length(value)
end
defimpl Posexional.Protocol.FieldName, for: Posexional.Field.TypedField do
def name(%Posexional.Field.TypedField{field_value: value}), do: Posexional.Protocol.FieldName.name(value)
end
defimpl Posexional.Protocol.FieldSize, for: Posexional.Field.TypedField do
def size(%Posexional.Field.TypedField{field_value: value}), do: Posexional.Protocol.FieldSize.size(value)
end
defimpl Posexional.Protocol.FieldWrite, for: Posexional.Field.TypedField do
def write(field, value), do: Posexional.Field.Value.write(field.field_value, value)
end
defimpl Posexional.Protocol.FieldRead, for: Posexional.Field.TypedField do
def read(field, content), do: Posexional.Field.TypedField.read(field, content)
end
|
lib/posexional/field/typed_field.ex
| 0.841272
| 0.546859
|
typed_field.ex
|
starcoder
|
defmodule P7777776.ConfusionMatrix do
@moduledoc """
Documentation for P7777776ConfusionMatrix.
## Examples
iex> P7777776ConfusionMatrix.blah()
:ok
"""
alias P7777776.ConfusionMatrix
defstruct classes: MapSet.new(), counts: %{}, tp: 0, total: 0, n_actual: %{}, n_predicted: %{}
def new(), do: %ConfusionMatrix{}
def add(%ConfusionMatrix{} = cm, actual, predicted, n \\ 1) do
%ConfusionMatrix{
classes: cm.classes |> MapSet.put(actual) |> MapSet.put(predicted),
counts: inc(cm.counts, {actual, predicted}, n),
total: cm.total + n,
tp:
cm.tp +
if actual == predicted do
n
else
0
end,
n_actual: inc(cm.n_actual, actual, n),
n_predicted: inc(cm.n_predicted, predicted, n)
}
end
defp inc(map, key, n), do: Map.update(map, key, n, fn c -> c + n end)
def classes(%ConfusionMatrix{classes: classes}), do: MapSet.to_list(classes)
def empty?(%ConfusionMatrix{total: total}), do: total == 0
def total(%ConfusionMatrix{total: total}), do: total
def tp(%ConfusionMatrix{tp: tp}), do: tp
def tp(%ConfusionMatrix{counts: counts}, class), do: Map.get(counts, {class, class}, 0)
def count(%ConfusionMatrix{counts: counts}, actual, predicted),
do: Map.get(counts, {actual, predicted}, 0)
def n_predicted(%ConfusionMatrix{n_predicted: n_predicted}, class),
do: Map.get(n_predicted, class, 0)
# aka support
def n_actual(%ConfusionMatrix{n_actual: n_actual}, class), do: Map.get(n_actual, class, 0)
# precision for a class: tp(class) / n_predicted(class)
def precision(%ConfusionMatrix{} = cm, class) do
case n_predicted(cm, class) do
n when n > 0 -> tp(cm, class) / n
_ -> nil
end
end
# recall for a class: tp(class) / n_actual(class)
def recall(%ConfusionMatrix{} = cm, class) do
case n_actual(cm, class) do
n when n > 0 -> tp(cm, class) / n
_ -> nil
end
end
def accuracy(%ConfusionMatrix{tp: tp, total: total}) when total > 0, do: tp / total
def accuracy(_), do: nil
def macro_avg_precision(cm), do: weighted_sum(cm, &precision/2, &uniformly_weighted/2)
def macro_avg_recall(cm), do: weighted_sum(cm, &recall/2, &uniformly_weighted/2)
def weighted_avg_precision(cm), do: weighted_sum(cm, &precision/2, &frequency_weighted/2)
def weighted_avg_recall(cm), do: weighted_sum(cm, &recall/2, &frequency_weighted/2)
defp uniformly_weighted(%ConfusionMatrix{classes: classes}, _), do: 1 / MapSet.size(classes)
defp frequency_weighted(%ConfusionMatrix{total: total} = cm, class),
do: n_actual(cm, class) / total
defp weighted_sum(%ConfusionMatrix{classes: classes} = cm, metric, weight) do
case MapSet.size(classes) do
n when n > 0 ->
classes
|> Enum.map(fn class ->
case metric.(cm, class) do
nil -> 0
m -> m * weight.(cm, class)
end
end)
|> Enum.sum()
_ ->
nil
end
end
end
|
lib/p7777776_confusion_matrix.ex
| 0.720762
| 0.464598
|
p7777776_confusion_matrix.ex
|
starcoder
|
defmodule Brainfux.Preprocessor do
@moduledoc """
Preprocessor of brainfuck code.
`process/1` conducts every process.
"""
alias Brainfux.Preprocessor.Base
alias Brainfux.State
@spec process!(String.t) :: {State.t, String.t} | none
def process!(raw_code) do
raw_code
|> Base.check_brackets!()
|> Base.strip_noncode_chars()
|> Base.trim_trailing_reducible_part()
|> Base.sumup_plusminus()
|> Base.remove_plus_or_minus_before_read()
|> Base.compute_deterministic_part()
end
end
defmodule Brainfux.Preprocessor.Base do
@moduledoc """
Basic functions of preprocessing code.
These functions are used by `Brainfux.Preprocessor.process!/1`.
"""
alias Brainfux.{Executor, State}
@spec check_brackets!(String.t) :: String.t | none
def check_brackets!(code) do
check_brackets!(0, code, 0)
code
end
@spec check_brackets!(non_neg_integer, String.t, non_neg_integer) ::
:ok | none
defp check_brackets!(_, "", 0), do: :ok
defp check_brackets!(_, "", depth) do
raise CompileError, description: ~s(There are #{depth} unmatched "[")
end
defp check_brackets!(position, "]" <> _, 0) do
raise CompileError, description: ~s(Unexpected "]" at position: #{position})
end
defp check_brackets!(position, "]" <> rest, depth) do
check_brackets!(position + 1, rest, depth - 1)
end
defp check_brackets!(position, "[" <> rest, depth) do
check_brackets!(position + 1, rest, depth + 1)
end
defp check_brackets!(position, <<_::binary-size(1)>> <> rest, depth) do
check_brackets!(position + 1, rest, depth)
end
@spec strip_noncode_chars(String.t) :: String.t
def strip_noncode_chars(code) do
String.replace(code, ~R/[^+\-<>,\.\[\]]/, "")
end
@spec trim_trailing_reducible_part(String.t) :: String.t
def trim_trailing_reducible_part(code) do
last_part = code |> String.split(".") |> List.last
case skip_to_close_bracket(last_part, 0, "", "") do
"" -> code
reducible_part -> String.trim_trailing(code, reducible_part)
end
end
@spec skip_to_close_bracket(String.t, integer, String.t, String.t) :: String.t
defp skip_to_close_bracket(code, depth, inner, outer) do
case Regex.run(~R/^([^\[\]]*([\[\]]))(.*)/, code) do
nil ->
outer <> code
[_, match, "[", rest] ->
skip_to_close_bracket(rest, depth + 1, inner, outer <> match)
[_, match, "]", rest] ->
if depth == 0 do
skip_to_close_bracket(rest, 0, inner <> outer <> match, "")
else
skip_to_close_bracket(rest, depth - 1, inner, outer <> match)
end
end
end
@spec sumup_plusminus(String.t) :: String.t
def sumup_plusminus(code) do
case Regex.replace(~R/\+\-|\-\+|><|<>/, code, "") do
^code -> code
stripped_once -> sumup_plusminus(stripped_once)
end
end
@spec remove_plus_or_minus_before_read(String.t) :: String.t
def remove_plus_or_minus_before_read(code) do
Regex.replace(~R/([\+\-]+),/, code, ",")
end
@spec compute_deterministic_part(String.t) :: {State.t, String.t}
def compute_deterministic_part(code) do
case Regex.run(~R/^[\+\-<>\.]+/, code) do
nil ->
{%State{}, code}
[deterministic_part] ->
state = Executor.execute(%State{}, deterministic_part)
rest = String.trim_leading(code, deterministic_part)
{state, rest}
end
end
end
|
lib/brainfux/preprocessor.ex
| 0.795142
| 0.53358
|
preprocessor.ex
|
starcoder
|
defmodule Staxx.Proxy.Chain.Storage.Record do
@moduledoc """
Details for chain process that will be stored into DB
"""
alias Staxx.Proxy.Chain.State
alias Staxx.Proxy.Chain.Storage
@type t :: %__MODULE__{
id: binary,
status: State.status(),
config: map(),
chain_details: map(),
deploy_data: map(),
deploy_step: map(),
deploy_hash: binary
}
defstruct id: nil,
status: :initializing,
config: nil,
chain_details: nil,
deploy_data: nil,
deploy_step: nil,
deploy_hash: nil
@doc """
Try to load existing data from DB and apply state status
"""
@spec from_state(State.t()) :: t()
def from_state(%State{id: id, status: status}) do
case Storage.get(id) do
nil ->
%__MODULE__{id: id, status: status}
record ->
%__MODULE__{record | status: status}
end
end
@spec status(t(), State.status()) :: t()
def status(%__MODULE__{} = record, status),
do: %__MODULE__{record | status: status}
@spec config(t(), map()) :: t()
def config(%__MODULE__{} = record, config),
do: %__MODULE__{record | config: config}
@spec chain_details(t(), term()) :: t()
def chain_details(%__MODULE__{} = record, details),
do: %__MODULE__{record | chain_details: details}
@spec deploy_step(t(), map()) :: t()
def deploy_step(%__MODULE__{} = record, step),
do: %__MODULE__{record | deploy_step: step}
@spec deploy_hash(t(), binary()) :: t()
def deploy_hash(%__MODULE__{} = record, hash),
do: %__MODULE__{record | deploy_hash: hash}
@spec deploy_data(t(), term()) :: t()
def deploy_data(%__MODULE__{} = record, data),
do: %__MODULE__{record | deploy_data: data}
@spec merge_deploy_details(t(), t()) :: t()
def merge_deploy_details(%__MODULE__{} = record, %__MODULE__{
deploy_data: deploy_data,
deploy_step: deploy_step,
deploy_hash: deploy_hash
}) do
%__MODULE__{
record
| deploy_data: deploy_data,
deploy_step: deploy_step,
deploy_hash: deploy_hash
}
end
def merge_deploy_details(%__MODULE__{} = rec, _),
do: rec
@spec store(t()) :: t()
def store(%__MODULE__{} = rec) do
Storage.store(rec)
rec
end
end
|
apps/proxy/lib/proxy/chain/storage/record.ex
| 0.78469
| 0.42665
|
record.ex
|
starcoder
|
defmodule LfuCache do
@moduledoc """
This modules implements a simple LRU cache, using 2 ets tables for it.
For using it, you need to start it:
iex> LruCache.start_link(:my_cache, 1000)
Or add it to your supervisor tree, like: `worker(LruCache, [:my_cache, 1000])`
## Using
iex> LruCache.start_link(:my_cache, 1000)
{:ok, #PID<0.60.0>}
iex> LruCache.put(:my_cache, "id", "value")
:ok
iex> LruCache.get(:my_cache, "id", touch = false)
"value"
## Design
First ets table save the key values pairs, the second save order of inserted elements.
"""
use GenServer
@table LfuCache
defstruct table: nil, freq_table: nil, size: 0
@doc """
Creates an LRU of the given size as part of a supervision tree with a registered name
"""
def start_link(name, size) do
Agent.start_link(__MODULE__, :init, [name, size], [name: name])
end
@doc """
Stores the given `value` under `key` in `cache`. If `cache` already has `key`, the stored
`value` is replaced by the new one. This updates the order of LFU cache.
"""
def put(name, key, value), do: Agent.get(name, __MODULE__, :handle_put, [key, value])
@doc """
Updates a `value` in `cache`. If `key` is not present in `cache` then nothing is done.
`touch` defines, if the order in LFU should be actualized. The function assumes, that
the element exists in a cache.
"""
def update(name, key, value, touch \\ true) do
if :ets.update_element(name, key, {3, value}) do
touch && Agent.get(name, __MODULE__, :handle_touch, [key])
end
:ok
end
@doc """
Returns the `value` associated with `key` in `cache`. If `cache` does not contain `key`,
returns nil. `touch` defines, if the order in LFU should be actualized.
"""
def get(name, key, touch \\ true) do
case :ets.lookup(name, key) do
[{_, _, value}] ->
touch && Agent.get(name, __MODULE__, :handle_touch, [key])
value
[] ->
nil
end
end
@doc """
Removes the entry stored under the given `key` from cache.
"""
def delete(name, key), do: Agent.get(name, __MODULE__, :handle_delete, [key])
@doc """
Returns the contents of the LFU Cache.
Only for debugging / testing uses.
"""
def debug(name) do
Agent.get(name, __MODULE__, :handle_debug, [])
end
def handle_debug(state) do
get_all(state, :ets.first(state.freq_table), [])
end
@doc false
defp get_all(state, lastresult, result) do
case lastresult do
:"$end_of_table" -> result
uniq ->
[{^uniq, key}] = :ets.lookup(state.freq_table, uniq)
case :ets.lookup(state.table, key) do
[{^key, ^uniq, value}] ->
get_all(state, :ets.next(state.freq_table, uniq), result ++ [{key, value}])
end
end
end
@doc false
def init(name, size) do
freq_table = :"#{name}_freq"
:ets.new(freq_table, [:named_table, :ordered_set])
:ets.new(name, [:named_table, :public, {:read_concurrency, true}])
%LfuCache{freq_table: freq_table, table: name, size: size}
end
@doc false
def handle_put(state, key, value) do
case :ets.lookup(state.table, key) do
[{_, old_freq, _}] ->
update_entry(state, key, old_freq, value)
_ ->
new_entry(state, key, value)
clean_oversize(state)
end
:ok
end
defp new_entry(state, key, value) do
freq = :rand.uniform
:ets.insert(state.freq_table, {freq, key})
:ets.insert(state.table, {key, freq, value})
end
defp update_entry(state, key, freq, value) do
:ets.delete(state.freq_table, freq)
newfreq = freq + 1.0
:ets.insert(state.freq_table, {newfreq, key})
:ets.update_element(state.table, key, [{2, newfreq}, {3, value}])
end
@doc false
def handle_touch(state, key) do
case :ets.lookup(state.table, key) do
[{_, old_freq, _}] ->
new_freq = old_freq + 1.0
:ets.delete(state.freq_table, old_freq)
:ets.insert(state.freq_table, {new_freq, key})
:ets.update_element(state.table, key, [{2, new_freq}])
_ ->
nil
end
:ok
end
@doc false
def handle_delete(state, key) do
case :ets.lookup(state.table, key) do
[{_, old_freq, _}] ->
:ets.delete(state.freq_table, old_freq)
:ets.delete(state.table, key)
_ ->
nil
end
:ok
end
defp clean_oversize(%{freq_table: freq_table, table: table, size: size}) do
if :ets.info(table, :size) > size do
least_used = :ets.first(freq_table)
[{_, old_key}] = :ets.lookup(freq_table, least_used)
:ets.delete(freq_table, least_used)
:ets.delete(table, old_key)
true
else nil end
end
end
|
lib/lfu_cache.ex
| 0.794465
| 0.58163
|
lfu_cache.ex
|
starcoder
|
defmodule Geolix do
@moduledoc """
IP location lookup provider.
## Usage
Fetching information for a single IP is done by passing it as a binary or
tuple to `Geolix.lookup/1`:
iex> Geolix.lookup("127.0.0.1")
%{
city: %{...},
country: %{...}
}
The result will be a map with the `:id` of each configured database as the
key and the lookup result as the value.
If you are only interested in the result of a specific database you can use
`Geolix.lookup/2`:
iex> Geolix.lookup({127, 0, 0, 1}, where: :my_database)
%{...}
The result structure of each database is specific to the adapter used.
### Lookup Options
There are some options you can pass to `Geolix.lookup/2` to modify the lookup
behaviour:
- `:where` - Lookup information in a single registered database
The adapter used can require and/or understand additional options. To
accommodate this the options are passed unmodified to the adapter's own
lookup function.
## Database Configuration
To get started you need to define one or more `:databases` to use for
lookups. Each database definition is a map with at least two fields:
- `:id` - an identifier for this database, usable to limit lookups to a
single database if you have defined more than one
- `:adapter` - the adapter module used to handle lookup requests. See the
part "Adapters" in for additional information
Depending on the adapter you may need to provide additional values.
### Configuration (static)
One option for configuration is using a static configuration, i.e. for two
databases handled by the adapter `MyAdapter`:
config :geolix,
databases: [
%{
id: :city,
adapter: MyAdapter,
source: "/absolute/path/to/city.db"
},
%{
id: :country,
adapter: MyAdapter,
source: "/absolute/path/to/country.db"
}
]
### Configuration (dynamic)
If there are any reasons you cannot use a pre-defined configuration you can
also configure an initializer module to be called before starting the
top-level supervisor or alternatively for each individual database.
This may be the most suitable configuration if you have the database located
in the `:priv_dir` of your application:
# {mod, fun}
config :geolix,
init: {MyInitModule, :my_init_mf_toplevel}
config :geolix,
databases: [
%{
id: :dynamic_country,
adapter: MyAdapter,
init: {MyInitModule, :my_init_mf_database}
}
]
# {mod, fun, args}
config :geolix,
init: {MyInitModule, :my_init_mfargs_toplevel, [:foo, :bar]}
config :geolix,
databases: [
%{
id: :dynamic_country,
adapter: MyAdapter,
init: {MyInitModule, :my_init_mfargs_database, [:foo, :bar]}
}
]
# initializer module
defmodule MyInitModule do
@spec my_init_mf_toplevel() :: :ok
def my_init_mf_toplevel(), do: my_init_mfargs_toplevel(:foo, :bar)
@spec my_init_mfargs_toplevel(atom, atom) :: :ok
def my_init_mfargs_toplevel(:foo, :bar) do
priv_dir = Application.app_dir(:my_app, "priv")
databases = [
%{
id: :dynamic_city,
adapter: MyAdapter,
source: Path.join([priv_dir, "city.db"])
}
| Application.get_env(:geolix, :databases, [])
]
Application.put_env(:geolix, :databases, databases)
end
@spec my_init_mf_database(map) :: map
def my_init_mf_database(database) do
my_init_mfargs_database(database, :foo, :bar)
end
@spec my_init_mfargs_database(map, atom, atom) :: map
def my_init_mfargs_database(%{id: :dynamic_country} = database, :foo, :bar) do
priv_dir = Application.app_dir(:my_app, "priv")
%{database | source: Path.join([priv_dir, "country.db"])}
end
end
Above example illustrates both types of dynamic initialization.
The top-level initializer is called as defined (`{mod, fun}` or
`{mod, fun, args}`) and expected to always return `:ok`. At the database
level the current database configuration is passed as the first parameter
with additional parameters following. It is expected that this
function returns the new, complete configuration.
If you choose to use the dynamic database initialization the only requirement
for your config file is a plain `%{init: {MyInitModule, :my_init_fun}}` entry.
Every additional field in the example is only used for illustration and only
required for the complete return value.
### Configuration (runtime)
If you do not want to use a pre-defined or dynamically initialized
configuration you can also define and start adapters at runtime:
iex(1)> Geolix.load_database(%{
...(1)> id: :runtime_city,
...(1)> adapter: MyAdapter,
...(1)> source: "/absolute/path/to/city.db"
...(1)> })
:ok
Please be aware that these databases will not be reloaded if,
for any reason, the supervisor/application is restarted.
Running `load_database/1` on an already configured database (matched by `:id`)
will reload/replace it without persisting the configuration. On success a
result of `:ok` will be returned otherwise a tuple in the style of
`{:error, message}`. The individual errors are defined by the adapter.
## Adapters
All the work done by Geolix is handled using adapters. These adapters can
use a database, a webservice or any other means available to handle your
lookup requests.
Known adapters:
- `Geolix.Adapter.Fake`
- [`Geolix.Adapter.LookupCache`](https://github.com/elixir-geolix/adapter_lookup_cache)
- [`Geolix.Adapter.MMDB2`](https://github.com/elixir-geolix/adapter_mmdb2)
For detailed information how to configure the adapter of your choice please
read the adapter's configuration.
### Custom Adapters
Adapters are expected to adhere to the `Geolix.Adapter` behaviour.
As a starting point for writing a custom adapter you can look at the
packaged `Geolix.Adapter.Fake`.
## Database Loading
Currently databases are loaded asynchronously upon startup. This behaviour
can be changed via configuration:
config :geolix, startup_sync: true
Configuring a synchronous startup can prevent potential "no data found"
lookup results. If your adapter configuration requires more time than
expected (think of downloading a database from a remote location via HTTP)
this might result in application startup delays and/or failures.
### Loading Errors
If the configuration is erroneous a message will be sent to `Logger` with
the level `:error`. Any other error during the load process is expected to
be defined and logged by the adapter itself.
### State Retrieval
All databases are loaded, unless you called `Geolix.load_database/1`,
asynchronously. This includes configured databases loaded upon application
start.
The database loader allows you to access the current state of loading:
iex(1)> Geolix.Database.Loader.loaded_databases()
[:city]
iex(2)> Geolix.Database.Loader.registered_databases()
[:city, :country]
Above example demonstrates a state where the database `:country` is known but
not completely loaded yet. Please be aware that both lists are unsorted.
### Reloading
To trigger a forceful reload of all databases configured in the application
environment you can use `Geolix.reload_databases/0` to do so. This uses an
internal `GenServer.cast/2` so a slight delay will occur.
### Unloading
Calling `Geolix.unload_database/1` with a database id will unload this
database. As this is done in a lazy fashion it will still be kept in memory
while not being reloaded or used for lookups. If the database is configured
via application environment it will still be reloaded as usual in case of a
supervisor or application restart.
"""
alias Geolix.Database.Loader
@typedoc """
Minimal type specification for a database.
Every adapter can require additional values to be set.
"""
@type database :: %{
required(:id) => atom,
required(:adapter) => module,
optional(:init) => {module, atom} | {module, atom, [term]},
optional(term) => term
}
@doc """
Looks up IP information.
"""
@spec lookup(ip :: :inet.ip_address() | binary, opts :: Keyword.t()) :: map | nil
def lookup(ip, opts \\ [])
def lookup(ip, opts) when is_binary(ip) do
case :inet.parse_address(Kernel.to_charlist(ip)) do
{:ok, parsed} -> lookup(parsed, opts)
{:error, _} -> nil
end
end
def lookup(ip, opts) when is_tuple(ip) do
case opts[:where] do
nil -> lookup_all(ip, opts)
where -> lookup_single(ip, opts, where)
end
end
@doc """
Fetch metadata from one or multiple databases.
"""
@spec metadata(opts :: Keyword.t()) :: map | nil
def metadata(opts \\ []) do
case opts[:where] do
nil -> metadata_all()
where -> metadata_single(where)
end
end
@doc """
Loads a database according to its specification.
Requires at least the fields `:id` and `:adapter`. Any other required
fields depend on the adapter's requirements.
"""
@spec load_database(database) :: :ok | {:error, term}
def load_database(database) do
GenServer.call(Loader, {:load_database, database}, :infinity)
end
@doc """
Reloads all configured databases in the background.
"""
@spec reload_databases() :: :ok
def reload_databases, do: GenServer.cast(Loader, :reload_databases)
@doc """
Unloads a database.
This operation is lazy. The database will stay loaded but won't be reloaded
or used for lookups.
"""
@spec unload_database(atom | database) :: :ok
def unload_database(id) when is_atom(id), do: GenServer.call(Loader, {:unload_database, id})
def unload_database(%{id: id}), do: unload_database(id)
defp lookup_all(ip, opts) do
lookup_all(ip, opts, Loader.loaded_databases())
end
defp lookup_all(ip, opts, databases) do
databases
|> Task.async_stream(
fn database ->
{database, lookup_single(ip, opts, database)}
end,
ordered: false
)
|> Enum.into(%{}, fn {:ok, result} -> result end)
end
defp lookup_single(ip, opts, where) do
case Loader.get_database(where) do
nil -> nil
%{adapter: adapter} = database -> adapter.lookup(ip, opts, database)
end
end
defp metadata_all do
metadata_all(Loader.loaded_databases())
end
defp metadata_all(databases) do
databases
|> Task.async_stream(
fn database ->
{database, metadata_single(database)}
end,
ordered: false
)
|> Enum.into(%{}, fn {:ok, result} -> result end)
end
defp metadata_single(where) do
with %{adapter: adapter} = database <- Loader.get_database(where),
true <- function_exported?(adapter, :metadata, 1) do
adapter.metadata(database)
else
_ -> nil
end
end
end
|
lib/geolix.ex
| 0.898828
| 0.722013
|
geolix.ex
|
starcoder
|
defmodule BubbleLib.MapUtil.AutoMap do
@moduledoc """
Automagic map — creates deep elements inside. All map keys are
coerced to strings except for list indexes.
"""
alias BubbleLib.MapUtil.AutoMap
alias BubbleLib.MapUtil.AutoMap.ETS
import Kernel, except: [get_in: 2]
def put_in(data, keys, value) do
elem(kernel_get_and_update_in(data, keys, fn _ -> {nil, value} end), 1)
end
defp kernel_get_and_update_in(data, [head], fun) when is_function(head, 3),
do: head.(:get_and_update, data, fun)
defp kernel_get_and_update_in(data, [head | tail], fun) when is_function(head, 3),
do: head.(:get_and_update, data, &kernel_get_and_update_in(&1, tail, fun))
defp kernel_get_and_update_in(data, [head], fun) when is_function(fun, 1),
do: access_get_and_update(data, head, fun)
defp kernel_get_and_update_in(data, [head | tail], fun) when is_function(fun, 1),
do: access_get_and_update(data, head, &kernel_get_and_update_in(&1, tail, fun))
defp access_get_and_update(%{__struct__: _} = struct, key, fun) when is_binary(key) do
try do
key = String.to_existing_atom(key)
case Map.fetch(struct, key) do
{:ok, _} ->
{nil, _value} = Map.get_and_update(struct, key, fun)
:error ->
{nil, struct}
end
rescue
ArgumentError ->
{nil, struct}
end
end
defp access_get_and_update(map, key, fun) when is_map(map) and is_atom(key) do
access_get_and_update(map, Atom.to_string(key), fun)
end
defp access_get_and_update(map, key, fun) when is_map(map) do
{nil, _value} = Map.get_and_update(map, key, fun)
end
defp access_get_and_update(nil, key, fun) when is_integer(key) do
{_, value} = fun.(nil)
{nil, ensure_list(nil, key, value)}
end
defp access_get_and_update(list, key, fun) when is_list(list) and is_integer(key) do
{_, value} = fun.(Enum.at(list, key))
{nil, ensure_list(list, key, value)}
end
defp access_get_and_update(_any, key, fun) do
m = AutoMap.put_in(%{}, [key], elem(fun.(nil), 1))
{nil, m}
end
def get_in(%{} = map, [head | tail]) when is_atom(head) do
get_in(map, [to_string(head) | tail])
end
def get_in(%ETS{} = struct, path) do
ETS.get_in(struct, path)
end
def get_in(%{__struct__: _} = struct, [head | tail]) do
try do
value = Kernel.get_in(struct, [head])
get_in(value, tail)
rescue
UndefinedFunctionError ->
try do
get_in(Map.get(struct, String.to_existing_atom(head), nil), tail)
rescue
ArgumentError ->
nil
end
end
end
def get_in(list, [head | tail]) when is_list(list) and is_integer(head) do
get_in(Enum.at(list, head), tail)
end
def get_in(list, [query | tail]) when is_list(list) and is_list(query) do
get_in(MatchEngine.filter_all(list, query), tail)
end
def get_in(map, [[{:id, key}] | tail]) when is_map(map) do
case Map.fetch(map, key) do
{:ok, value} -> [value]
:error -> []
end
|> get_in(tail)
end
def get_in(%{} = map, [head | tail]) do
case Map.get(map, head, :undefined) do
:undefined ->
nil
value ->
get_in(value, tail)
end
end
def get_in(value, []) do
value
end
def get_in(_value, _rest) do
nil
end
def loop_start(%ETS{} = struct) do
ETS.loop_start(struct)
end
def loop_start(values) when is_list(values) do
values
end
def loop_start(_) do
:unsupported
end
def loop_next([head | tail]) do
{:next, head, tail}
end
def loop_next([]) do
:stop
end
def loop_next(%ETS{} = struct) do
ETS.loop_next(struct)
end
def loop_next(:unsupported) do
:stop
end
defp ensure_list(list, index, value) when index < 0 do
ensure_list(list, index + length(list), value)
end
defp ensure_list(nil, 0, value) do
[value]
end
defp ensure_list(nil, max_index, value) when max_index > 0 do
0..max_index
|> Enum.map(fn
^max_index -> value
_ -> nil
end)
end
defp ensure_list(list, index, value) when is_list(list) do
value = recombine_list(Enum.at(list, index), value)
list =
case length(list) > index do
true ->
list
false ->
list ++ (length(list)..index |> Enum.map(fn _ -> nil end))
end
list
|> Enum.zip(0..(length(list) - 1))
|> Enum.map(fn
{_, ^index} -> value
{v, _} -> v
end)
end
defp recombine_list(old, new) when is_list(new) and is_list(old) do
list_indices(old, new)
|> Enum.map(fn idx ->
oldval = Enum.at(old, idx)
newval = Enum.at(new, idx)
recombine_list(oldval, newval)
end)
end
defp recombine_list(old, new) do
new || old
end
defp list_indices(a, b) do
0..(max(length(a), length(b)) - 1)
end
def remove(map, []) do
map
end
def remove(list, [index]) when is_list(list) and is_integer(index) do
List.delete_at(list, index)
end
def remove(%{} = map, [key]) do
Map.delete(map, key)
end
def remove(%{} = map, [key | tail]) do
value = remove(Map.get(map, key), tail)
case empty(value) do
true ->
Map.delete(map, key)
false ->
Map.put(map, key, value)
end
end
def remove(list, [index | tail]) when is_list(list) and is_integer(index) do
value = remove(Enum.at(list, index), tail)
case empty(value) do
true ->
List.delete_at(list, index)
false ->
List.replace_at(list, index, value)
end
end
defp empty([]), do: true
defp empty(m) when is_list(m) do
Enum.reduce(m, true, fn v, acc -> acc and v == nil end)
end
defp empty(m), do: m == %{}
end
|
lib/bubble_lib/map_util/auto_map.ex
| 0.538012
| 0.530115
|
auto_map.ex
|
starcoder
|
defmodule RobotSimulator do
defstruct direction: nil, position: nil
defguard is_direction(direction) when direction in [:north, :east, :south, :west]
defguard is_position(x, y) when is_integer(x) and is_integer(y)
@doc """
Create a Robot Simulator given an initial direction and position.
Valid directions are: `:north`, `:east`, `:south`, `:west`
"""
@spec create(direction :: atom, position :: {integer, integer}) :: any
def create() do
%{direction: :north, position: {0, 0}}
end
def create(direction, position = {x, y}) when is_direction(direction) and is_position(x, y) do
%{direction: direction, position: position}
end
def create(direction, _position) when is_direction(direction), do: {:error, "invalid position"}
def create(_direction, _position = {x, y}) when is_position(x, y), do: {:error, "invalid direction"}
@doc """
Simulate the robot's movement given a string of instructions.
Valid instructions are: "R" (turn right), "L", (turn left), and "A" (advance)
"""
@spec simulate(robot :: any, instructions :: String.t()) :: any
def simulate(robot, instructions) do
run(instructions, robot)
end
defp run("", robot), do: robot
defp run("R" <> instructions, %{direction: :north} = robot) do
run(instructions, %{robot | direction: :east})
end
defp run("L" <> instructions, %{direction: :north} = robot) do
run(instructions, %{robot | direction: :west})
end
defp run("A" <> instructions, %{direction: :north, position: {x, y}} = robot) do
run(instructions, %{robot | position: {x, y+1}})
end
defp run("R" <> instructions, %{direction: :east} = robot) do
run(instructions, %{robot | direction: :south})
end
defp run("L" <> instructions, %{direction: :east} = robot) do
run(instructions, %{robot | direction: :north})
end
defp run("A" <> instructions, %{direction: :east, position: {x, y}} = robot) do
run(instructions, %{robot | position: {x+1, y}})
end
defp run("R" <> instructions, %{direction: :south} = robot) do
run(instructions, %{robot | direction: :west})
end
defp run("L" <> instructions, %{direction: :south} = robot) do
run(instructions, %{robot | direction: :east})
end
defp run("A" <> instructions, %{direction: :south, position: {x, y}} = robot) do
run(instructions, %{robot | position: {x, y-1}})
end
defp run("R" <> instructions, %{direction: :west} = robot) do
run(instructions, %{robot | direction: :north})
end
defp run("L" <> instructions, %{direction: :west} = robot) do
run(instructions, %{robot | direction: :south})
end
defp run("A" <> instructions, %{direction: :west, position: {x, y}} = robot) do
run(instructions, %{robot | position: {x-1, y}})
end
defp run(_, _), do: {:error, "invalid instruction"}
@doc """
Return the robot's direction.
Valid directions are: `:north`, `:east`, `:south`, `:west`
"""
@spec direction(robot :: any) :: atom
def direction(%{direction: direction} = _robot) do
direction
end
@doc """
Return the robot's position.
"""
@spec position(robot :: any) :: {integer, integer}
def position(%{position: {x, y}} = _robot) do
{x, y}
end
end
|
elixir/robot-simulator/lib/robot_simulator.ex
| 0.910568
| 0.883286
|
robot_simulator.ex
|
starcoder
|
defmodule Ecto.Adapters.DynamoDB.Cache do
@moduledoc """
An Elixir agent to cache DynamoDB table schemas and the first page of results for selected tables
"""
@typep table_name_t :: String.t
@typep dynamo_response_t :: %{required(String.t) => term}
@spec start_link() :: Agent.on_start
def start_link do
cached_table_list = Application.get_env(:ecto_adapters_dynamodb, :cached_tables)
Agent.start_link(fn -> %{
schemas: %{},
tables: (for table_name <- cached_table_list, into: %{}, do: {table_name, nil})
} end, name: __MODULE__)
end
@doc """
Returns the cached value for a call to DynamoDB, describe-table. Performs a DynamoDB scan if not yet cached and raises any errors as a result of the request. The raw json is presented as an elixir map.
"""
@spec describe_table!(table_name_t) :: dynamo_response_t | no_return
def describe_table!(table_name) do
case describe_table(table_name) do
{:ok, schema} -> schema
{:error, error} -> raise error.type, message: error.message
end
end
@spec describe_table(table_name_t) :: {:ok, dynamo_response_t} | {:error, term}
def describe_table(table_name),
do: Agent.get_and_update(__MODULE__, &do_describe_table(&1, table_name))
@doc """
Performs a DynamoDB, describe-table, and caches (without returning) the result. Raises any errors as a result of the request
"""
@spec update_table_info!(table_name_t) :: :ok | no_return
def update_table_info!(table_name) do
case update_table_info(table_name) do
:ok -> :ok
{:error, error} -> raise error.type, message: error.message
end
end
@spec update_table_info(table_name_t) :: :ok | {:error, term}
def update_table_info(table_name),
do: Agent.get_and_update(__MODULE__, &do_update_table_info(&1, table_name))
@doc """
Returns the cached first page of results for a table. Performs a DynamoDB scan if not yet cached and raises any errors as a result of the request
"""
@spec scan!(table_name_t) :: dynamo_response_t | no_return
def scan!(table_name) do
case scan(table_name) do
{:ok, scan_result} -> scan_result
{:error, error} -> raise error.type, message: error.message
end
end
@spec scan(table_name_t) :: {:ok, dynamo_response_t} | {:error, term}
def scan(table_name),
do: Agent.get_and_update(__MODULE__, &do_scan(&1, table_name))
@doc """
Performs a DynamoDB scan and caches (without returning) the first page of results. Raises any errors as a result of the request
"""
@spec update_cached_table!(table_name_t) :: :ok | no_return
def update_cached_table!(table_name) do
case update_cached_table(table_name) do
:ok -> :ok
{:error, error} -> raise error.type, message: error.message
end
end
@spec update_cached_table(table_name_t) :: :ok | {:error, term}
def update_cached_table(table_name),
do: Agent.get_and_update(__MODULE__, &do_update_cached_table(&1, table_name))
@doc """
Returns the current cache of table schemas, and cache of first page of results for selected tables, as an Elixir map
"""
# For testing and debugging use only:
def get_cache,
do: Agent.get(__MODULE__, &(&1))
defp do_describe_table(cache, table_name) do
case cache.schemas[table_name] do
nil ->
result = ExAws.Dynamo.describe_table(table_name) |> ExAws.request
case result do
{:ok, %{"Table" => schema}} ->
updated_cache = put_in(cache.schemas[table_name], schema)
{{:ok, schema}, updated_cache}
{:error, error} ->
{{:error, %{type: ExAws.Error, message: "ExAws Request Error! #{inspect error}"}}, cache}
end
schema ->
{{:ok, schema}, cache}
end
end
defp do_update_table_info(cache, table_name) do
result = ExAws.Dynamo.describe_table(table_name) |> ExAws.request
case result do
{:ok, %{"Table" => schema}} ->
updated_cache = put_in(cache.schemas[table_name], schema)
{:ok, updated_cache}
{:error, error} ->
{{:error, %{type: ExAws.Error, message: "ExAws Request Error! #{inspect error}"}}, cache}
end
end
defp do_scan(cache, table_name) do
table_name_in_config = Map.has_key?(cache.tables, table_name)
case cache.tables[table_name] do
nil when table_name_in_config ->
result = ExAws.Dynamo.scan(table_name) |> ExAws.request
case result do
{:ok, scan_result} ->
updated_cache = put_in(cache.tables[table_name], scan_result)
{{:ok, scan_result}, updated_cache}
{:error, error} ->
{{:error, %{type: ExAws.Error, message: "ExAws Request Error! #{inspect error}"}}, cache}
end
nil ->
{{:error, %{type: ArgumentError, message: "Could not confirm the table, #{inspect table_name}, as listed for caching in the application's configuration. Please see README file for details."}}, cache}
cached_scan ->
{{:ok, cached_scan}, cache}
end
end
defp do_update_cached_table(cache, table_name) do
table_name_in_config = Map.has_key?(cache.tables, table_name)
case cache.tables[table_name] do
nil when not table_name_in_config ->
{{:error, %{type: ArgumentError, message: "Could not confirm the table, #{inspect table_name}, as listed for caching in the application's configuration. Please see README file for details."}}, cache}
_ ->
result = ExAws.Dynamo.scan(table_name) |> ExAws.request
case result do
{:ok, scan_result} ->
updated_cache = put_in(cache.tables[table_name], scan_result)
{:ok, updated_cache}
{:error, error} ->
{{:error, %{type: ExAws.Error, message: "ExAws Request Error! #{inspect error}"}}, cache}
end
end
end
end
|
lib/ecto_adapters_dynamodb/cache.ex
| 0.828315
| 0.409073
|
cache.ex
|
starcoder
|
defmodule Axon.Shared do
@moduledoc false
# Collection of private helper functions and
# macros for enforcing shape/type constraints,
# doing shape calculations, and even some
# helper numerical definitions.
import Nx.Defn
@doc """
Asserts `lhs` has same shape as `rhs`.
"""
defn assert_shape!(lhs, rhs) do
transform(
{lhs, rhs},
fn {lhs, rhs} ->
lhs = Nx.shape(lhs)
rhs = Nx.shape(rhs)
unless Elixir.Kernel.==(lhs, rhs) do
raise ArgumentError,
"expected input shapes to be equal," <>
" got #{inspect(lhs)} != #{inspect(rhs)}"
end
end
)
end
@doc """
Asserts `lhs` has same rank as `rhs`.
"""
defn assert_equal_rank!(lhs, rhs) do
transform(
{lhs, rhs},
fn {x, y} ->
x = if is_integer(x), do: x, else: Nx.rank(x)
y = if is_integer(y), do: y, else: Nx.rank(y)
unless Elixir.Kernel.>=(x, y) do
raise ArgumentError, "expected input ranks to be equal, got #{x} != #{y}"
end
end
)
end
@doc """
Asserts `lhs` has at least rank `rhs`.
"""
defn assert_greater_equal_rank!(lhs, rhs) do
transform(
{lhs, rhs},
fn {x, y} ->
x = if is_integer(x), do: x, else: Nx.rank(x)
y = if is_integer(y), do: y, else: Nx.rank(y)
unless Elixir.Kernel.>=(x, y) do
raise ArgumentError, "expected input shape to have at least rank #{y}, got rank #{x}"
end
end
)
end
@doc """
Transforms the given Elixir value into a scalar predicate.
"""
defn to_predicate(term) do
transform(term, fn term -> if term, do: 1, else: 0 end)
end
@doc """
Applies fun to arg.
"""
defn apply_map(args, fun) do
transform(
{args, fun},
fn {args, fun} -> Nx.Defn.Tree.composite(args, fun) end
)
end
@doc """
Creates a zeros-like structure which matches the structure
of the input.
"""
defn zeros_like(params) do
transform(
params,
&deep_new(&1, fn {k, x} ->
{k, Axon.Initializers.zeros(shape: Nx.shape(x))}
end)
)
end
@doc """
Creates a fulls-like tuple of inputs.
"""
defn fulls_like(params, value) do
transform(
params,
&deep_new(&1, fn {k, x} ->
{k, Axon.Initializers.full(value, shape: Nx.shape(x))}
end)
)
end
@doc """
Deep merges two possibly nested maps, applying fun to leaf values.
"""
def deep_merge(left, right, fun) do
Map.merge(left, right, &deep_resolve(&1, &2, &3, fun))
end
defp deep_resolve(_key, left = %Nx.Tensor{}, right = %Nx.Tensor{}, fun) do
fun.(left, right)
end
defp deep_resolve(_key, left = %{}, right = %{}, fun) do
deep_merge(left, right, fun)
end
@doc """
Creates a new map-like structure from a possible nested map, applying `fun`
to each leaf.
"""
def deep_new(map, fun) do
map
|> Map.new(&recur_deep_new(&1, fun))
end
defp recur_deep_new({key, value}, fun) do
case value do
%Nx.Tensor{} = val ->
fun.({key, val})
%{} = val ->
{key, deep_new(val, fun)}
end
end
@doc """
JIT given function with args and opts or apply it inside defn.
"""
def jit_or_apply(caller, fun, args, opts \\ []) do
if Nx.Defn.Compiler.current() do
if opts != [] do
raise ArgumentError,
"cannot pass execution options to Axon.#{caller} inside defn, got: #{inspect(opts)}"
end
apply(fun, args)
else
Nx.Defn.jit(fun, args, opts)
end
end
## Numerical Helpers
# TODO: These should be contained somewhere else, like another library
defn logsumexp(x, opts \\ []) do
opts = keyword!(opts, axes: [], keep_axes: false)
x
|> Nx.exp()
|> Nx.sum(opts)
|> Nx.log()
end
defn xlogy(x, y) do
x_ok = Nx.not_equal(x, 0.0)
safe_x = Nx.select(x_ok, x, Nx.tensor(1, type: Nx.type(x)))
safe_y = Nx.select(x_ok, y, Nx.tensor(1, type: Nx.type(y)))
Nx.select(x_ok, safe_x * Nx.log(safe_y), Nx.tensor(0, type: Nx.type(x)))
end
defn reciprocal(x), do: Nx.divide(1, x)
defn normalize(input, mean, variance, gamma, bias, opts \\ []) do
opts = keyword!(opts, epsilon: 1.0e-6)
scale =
variance
|> Nx.add(opts[:epsilon])
|> Nx.rsqrt()
|> Nx.multiply(gamma)
input
|> Nx.subtract(mean)
|> Nx.multiply(scale)
|> Nx.add(bias)
end
defn mean_and_variance(input, opts \\ []) do
opts = keyword!(opts, [:axes])
mean = Nx.mean(input, axes: opts[:axes], keep_axes: true)
mean_of_squares = Nx.mean(input * input, axes: opts[:axes], keep_axes: true)
{mean, mean_of_squares - mean * mean}
end
end
|
lib/axon/shared.ex
| 0.77949
| 0.630173
|
shared.ex
|
starcoder
|
defmodule Phoenix.Controller.Pipeline do
@moduledoc """
This module implements the controller pipeline responsible for handling requests.
## The pipeline
The goal of a controller is to receive a request and invoke the desired
action. The whole flow of the controller is managed by a single pipeline:
defmodule UserController do
use Phoenix.Controller
require Logger
plug :log_message, "before action"
def show(conn, _params) do
Logger.debug "show/2"
send_resp(conn, 200, "OK")
end
defp log_message(conn, msg) do
Logger.debug msg
conn
end
end
When invoked, this pipeline will print:
before action
show/2
As any other Plug pipeline, we can halt at any step by calling
`Plug.Conn.halt/1` (which is by default imported into controllers).
If we change `log_message/2` to:
def log_message(conn, msg) do
Logger.debug msg
halt(conn)
end
it will print only:
before action
As the rest of the pipeline (the action and the after action plug)
will never be invoked.
## Guards
`plug/2` supports guards, allowing a developer to configure a plug to only
run in some particular action:
plug :log_message, "before show and edit" when action in [:show, :edit]
plug :log_message, "before all but index" when not action in [:index]
The first plug will run only when action is show or edit.
The second plug will always run, except for the index action.
Those guards work like regular Elixir guards and the only variables accessible
in the guard are `conn`, the `action` as an atom and the `controller` as an
alias.
## Controllers are plugs
Like routers, controllers are plugs, but they are wired to dispatch
to a particular function which is called an action.
For example, the route:
get "/users/:id", UserController, :show
will invoke `UserController` as a plug:
UserController.call(conn, :show)
which will trigger the plug pipeline and which will eventually
invoke the inner action plug that dispatches to the `show/2`
function in the `UserController`.
As controllers are plugs, they implement both `init/1` and
`call/2`, and it also provides a function named `action/2`
which is responsible for dispatching the appropriate action
after the plug stack (and is also overridable).
"""
@doc false
defmacro __using__(_) do
quote do
@behaviour Plug
import Phoenix.Controller.Pipeline
Module.register_attribute(__MODULE__, :plugs, accumulate: true)
@before_compile Phoenix.Controller.Pipeline
def init(action) when is_atom(action) do
action
end
def call(conn, action) do
conn = update_in conn.private,
&(&1 |> Map.put(:phoenix_controller, __MODULE__)
|> Map.put(:phoenix_action, action))
phoenix_controller_pipeline(conn, action)
end
def action(%{private: %{phoenix_action: action}} = conn, _options) do
apply(__MODULE__, action, [conn, conn.params])
end
defoverridable [init: 1, call: 2, action: 2]
end
end
@doc false
defmacro __before_compile__(env) do
action = {:action, [], true}
plugs = [action|Module.get_attribute(env.module, :plugs)]
{conn, body} = Plug.Builder.compile(env, plugs, log_on_halt: :debug)
quote do
defoverridable [action: 2]
def action(conn, opts) do
try do
super(conn, opts)
catch
kind, reason ->
Phoenix.Controller.Pipeline.__catch__(
kind, reason, __MODULE__, conn.private.phoenix_action, System.stacktrace
)
end
end
defp phoenix_controller_pipeline(unquote(conn), var!(action)) do
var!(conn) = unquote(conn)
var!(controller) = __MODULE__
_ = var!(conn)
_ = var!(controller)
_ = var!(action)
unquote(body)
end
end
end
@doc false
def __catch__(:error, :function_clause, controller, action,
[{controller, action, [%Plug.Conn{} | _], _loc} | _] = stack) do
args = [controller: controller, action: action]
reraise Phoenix.ActionClauseError, args, stack
end
def __catch__(kind, reason, _controller, _action, stack) do
:erlang.raise(kind, reason, stack)
end
@doc """
Stores a plug to be executed as part of the plug pipeline.
"""
defmacro plug(plug)
defmacro plug({:when, _, [plug, guards]}), do:
plug(plug, [], guards)
defmacro plug(plug), do:
plug(plug, [], true)
@doc """
Stores a plug with the given options to be executed as part of
the plug pipeline.
"""
defmacro plug(plug, opts)
defmacro plug(plug, {:when, _, [opts, guards]}), do:
plug(plug, opts, guards)
defmacro plug(plug, opts), do:
plug(plug, opts, true)
defp plug(plug, opts, guards) do
quote do
@plugs {unquote(plug), unquote(opts), unquote(Macro.escape(guards))}
end
end
end
|
lib/phoenix/controller/pipeline.ex
| 0.877437
| 0.50653
|
pipeline.ex
|
starcoder
|
defmodule Path do
@moduledoc """
This module provides conveniences for manipulating or
retrieving file system paths.
The functions in this module may receive a chardata as
argument (i.e. a string or a list of characters / string)
and will always return a string (encoded in UTF-8).
The majority of the functions in this module do not
interact with the file system, except for a few functions
that require it (like `wildcard/2` and `expand/1`).
"""
@type t :: IO.chardata()
@doc """
Converts the given path to an absolute one. Unlike
`expand/1`, no attempt is made to resolve `..`, `.` or `~`.
## Examples
### Unix-like operating systems
Path.absname("foo")
#=> "/usr/local/foo"
Path.absname("../x")
#=> "/usr/local/../x"
### Windows
Path.absname("foo")
#=> "D:/usr/local/foo"
Path.absname("../x")
#=> "D:/usr/local/../x"
"""
@spec absname(t) :: binary
def absname(path) do
absname(path, File.cwd!())
end
@doc """
Builds a path from `relative_to` to `path`.
If `path` is already an absolute path, `relative_to` is ignored. See also
`relative_to/2`.
Unlike `expand/2`, no attempt is made to
resolve `..`, `.` or `~`.
## Examples
iex> Path.absname("foo", "bar")
"bar/foo"
iex> Path.absname("../x", "bar")
"bar/../x"
"""
@spec absname(t, t) :: binary
def absname(path, relative_to) do
path = IO.chardata_to_string(path)
case type(path) do
:relative ->
absname_join(relative_to, path)
:absolute ->
absname_join([path])
:volumerelative ->
relative_to = IO.chardata_to_string(relative_to)
absname_vr(split(path), split(relative_to), relative_to)
end
end
# Absolute path on current drive
defp absname_vr(["/" | rest], [volume | _], _relative), do: absname_join([volume | rest])
# Relative to current directory on current drive.
defp absname_vr([<<x, ?:>> | rest], [<<x, _::binary>> | _], relative),
do: absname(absname_join(rest), relative)
# Relative to current directory on another drive.
defp absname_vr([<<x, ?:>> | name], _, _relative) do
cwd =
case :file.get_cwd([x, ?:]) do
{:ok, dir} -> IO.chardata_to_string(dir)
{:error, _} -> <<x, ?:, ?/>>
end
absname(absname_join(name), cwd)
end
@slash [?/, ?\\]
# Joins a list
defp absname_join([name1, name2 | rest]), do: absname_join([absname_join(name1, name2) | rest])
defp absname_join([name]),
do: do_absname_join(IO.chardata_to_string(name), <<>>, [], major_os_type())
# Joins two paths
defp absname_join(left, right),
do: do_absname_join(IO.chardata_to_string(left), relative(right), [], major_os_type())
defp do_absname_join(<<uc_letter, ?:, rest::binary>>, relativename, [], :win32)
when uc_letter in ?A..?Z do
do_absname_join(rest, relativename, [?:, uc_letter + ?a - ?A], :win32)
end
defp do_absname_join(<<c1, c2, rest::binary>>, relativename, [], :win32)
when c1 in @slash and c2 in @slash do
do_absname_join(rest, relativename, '//', :win32)
end
defp do_absname_join(<<?\\, rest::binary>>, relativename, result, :win32),
do: do_absname_join(<<?/, rest::binary>>, relativename, result, :win32)
defp do_absname_join(<<?/, rest::binary>>, relativename, [?., ?/ | result], os_type),
do: do_absname_join(rest, relativename, [?/ | result], os_type)
defp do_absname_join(<<?/, rest::binary>>, relativename, [?/ | result], os_type),
do: do_absname_join(rest, relativename, [?/ | result], os_type)
defp do_absname_join(<<>>, <<>>, result, os_type),
do: IO.iodata_to_binary(reverse_maybe_remove_dir_sep(result, os_type))
defp do_absname_join(<<>>, relativename, [?: | rest], :win32),
do: do_absname_join(relativename, <<>>, [?: | rest], :win32)
defp do_absname_join(<<>>, relativename, [?/ | result], os_type),
do: do_absname_join(relativename, <<>>, [?/ | result], os_type)
defp do_absname_join(<<>>, relativename, result, os_type),
do: do_absname_join(relativename, <<>>, [?/ | result], os_type)
defp do_absname_join(<<char, rest::binary>>, relativename, result, os_type),
do: do_absname_join(rest, relativename, [char | result], os_type)
defp reverse_maybe_remove_dir_sep([?/, ?:, letter], :win32), do: [letter, ?:, ?/]
defp reverse_maybe_remove_dir_sep([?/], _), do: [?/]
defp reverse_maybe_remove_dir_sep([?/ | name], _), do: :lists.reverse(name)
defp reverse_maybe_remove_dir_sep(name, _), do: :lists.reverse(name)
@doc """
Converts the path to an absolute one and expands
any `.` and `..` characters and a leading `~`.
## Examples
Path.expand("/foo/bar/../baz")
#=> "/foo/baz"
"""
@spec expand(t) :: binary
def expand(path) do
expand_dot(absname(expand_home(path), File.cwd!()))
end
@doc """
Expands the path relative to the path given as the second argument
expanding any `.` and `..` characters.
If the path is already an absolute path, `relative_to` is ignored.
Note that this function treats a `path` with a leading `~` as
an absolute one.
The second argument is first expanded to an absolute path.
## Examples
# Assuming that the absolute path to baz is /quux/baz
Path.expand("foo/bar/../bar", "baz")
#=> "/quux/baz/foo/bar"
Path.expand("foo/bar/../bar", "/baz")
#=> "/baz/foo/bar"
Path.expand("/foo/bar/../bar", "/baz")
#=> "/foo/bar"
"""
@spec expand(t, t) :: binary
def expand(path, relative_to) do
expand_dot(absname(absname(expand_home(path), expand_home(relative_to)), File.cwd!()))
end
@doc """
Returns the path type.
## Examples
### Unix-like operating systems
Path.type("/") #=> :absolute
Path.type("/usr/local/bin") #=> :absolute
Path.type("usr/local/bin") #=> :relative
Path.type("../usr/local/bin") #=> :relative
Path.type("~/file") #=> :relative
### Windows
Path.type("D:/usr/local/bin") #=> :absolute
Path.type("usr/local/bin") #=> :relative
Path.type("D:bar.ex") #=> :volumerelative
Path.type("/bar/foo.ex") #=> :volumerelative
"""
@spec type(t) :: :absolute | :relative | :volumerelative
def type(name)
when is_list(name)
when is_binary(name) do
pathtype(name, major_os_type()) |> elem(0)
end
@doc """
Forces the path to be a relative path.
## Examples
### Unix-like operating systems
Path.relative("/usr/local/bin") #=> "usr/local/bin"
Path.relative("usr/local/bin") #=> "usr/local/bin"
Path.relative("../usr/local/bin") #=> "../usr/local/bin"
### Windows
Path.relative("D:/usr/local/bin") #=> "usr/local/bin"
Path.relative("usr/local/bin") #=> "usr/local/bin"
Path.relative("D:bar.ex") #=> "bar.ex"
Path.relative("/bar/foo.ex") #=> "bar/foo.ex"
"""
@spec relative(t) :: binary
def relative(name) do
relative(name, major_os_type())
end
defp relative(name, os_type) do
pathtype(name, os_type)
|> elem(1)
|> IO.chardata_to_string()
end
defp pathtype(name, os_type) do
case os_type do
:win32 -> win32_pathtype(name)
_ -> unix_pathtype(name)
end
end
defp unix_pathtype(path) when path in ["/", '/'], do: {:absolute, "."}
defp unix_pathtype(<<?/, relative::binary>>), do: {:absolute, relative}
defp unix_pathtype([?/ | relative]), do: {:absolute, relative}
defp unix_pathtype([list | rest]) when is_list(list), do: unix_pathtype(list ++ rest)
defp unix_pathtype(relative), do: {:relative, relative}
defp win32_pathtype([list | rest]) when is_list(list), do: win32_pathtype(list ++ rest)
defp win32_pathtype([char, list | rest]) when is_list(list),
do: win32_pathtype([char | list ++ rest])
defp win32_pathtype(<<c1, c2, relative::binary>>) when c1 in @slash and c2 in @slash,
do: {:absolute, relative}
defp win32_pathtype(<<char, relative::binary>>) when char in @slash,
do: {:volumerelative, relative}
defp win32_pathtype(<<_letter, ?:, char, relative::binary>>) when char in @slash,
do: {:absolute, relative}
defp win32_pathtype(<<_letter, ?:, relative::binary>>), do: {:volumerelative, relative}
defp win32_pathtype([c1, c2 | relative]) when c1 in @slash and c2 in @slash,
do: {:absolute, relative}
defp win32_pathtype([char | relative]) when char in @slash, do: {:volumerelative, relative}
defp win32_pathtype([c1, c2, list | rest]) when is_list(list),
do: win32_pathtype([c1, c2 | list ++ rest])
defp win32_pathtype([_letter, ?:, char | relative]) when char in @slash,
do: {:absolute, relative}
defp win32_pathtype([_letter, ?: | relative]), do: {:volumerelative, relative}
defp win32_pathtype(relative), do: {:relative, relative}
@doc """
Returns the given `path` relative to the given `from` path.
In other words, this function tries to strip the `from` prefix from `path`.
This function does not query the file system, so it assumes
no symlinks between the paths.
In case a direct relative path cannot be found, it returns
the original path.
## Examples
iex> Path.relative_to("/usr/local/foo", "/usr/local")
"foo"
iex> Path.relative_to("/usr/local/foo", "/")
"usr/local/foo"
iex> Path.relative_to("/usr/local/foo", "/etc")
"/usr/local/foo"
"""
@spec relative_to(t, t) :: binary
def relative_to(path, from) do
path = IO.chardata_to_string(path)
relative_to(split(path), split(from), path)
end
defp relative_to([h | t1], [h | t2], original) do
relative_to(t1, t2, original)
end
defp relative_to([_ | _] = l1, [], _original) do
join(l1)
end
defp relative_to(_, _, original) do
original
end
@doc """
Convenience to get the path relative to the current working
directory.
If, for some reason, the current working directory
cannot be retrieved, this function returns the given `path`.
"""
@spec relative_to_cwd(t) :: binary
def relative_to_cwd(path) do
case :file.get_cwd() do
{:ok, base} -> relative_to(path, IO.chardata_to_string(base))
_ -> path
end
end
@doc """
Returns the last component of the path or the path
itself if it does not contain any directory separators.
## Examples
iex> Path.basename("foo")
"foo"
iex> Path.basename("foo/bar")
"bar"
iex> Path.basename("/")
""
"""
@spec basename(t) :: binary
def basename(path) do
:filename.basename(IO.chardata_to_string(path))
end
@doc """
Returns the last component of `path` with the `extension`
stripped.
This function should be used to remove a specific
extension which may or may not be there.
## Examples
iex> Path.basename("~/foo/bar.ex", ".ex")
"bar"
iex> Path.basename("~/foo/bar.exs", ".ex")
"bar.exs"
iex> Path.basename("~/foo/bar.old.ex", ".ex")
"bar.old"
"""
@spec basename(t, t) :: binary
def basename(path, extension) do
:filename.basename(IO.chardata_to_string(path), IO.chardata_to_string(extension))
end
@doc """
Returns the directory component of `path`.
## Examples
iex> Path.dirname("/foo/bar.ex")
"/foo"
iex> Path.dirname("/foo/bar/baz.ex")
"/foo/bar"
iex> Path.dirname("/foo/bar/")
"/foo/bar"
iex> Path.dirname("bar.ex")
"."
"""
@spec dirname(t) :: binary
def dirname(path) do
:filename.dirname(IO.chardata_to_string(path))
end
@doc """
Returns the extension of the last component of `path`.
## Examples
iex> Path.extname("foo.erl")
".erl"
iex> Path.extname("~/foo/bar")
""
"""
@spec extname(t) :: binary
def extname(path) do
:filename.extension(IO.chardata_to_string(path))
end
@doc """
Returns the `path` with the `extension` stripped.
## Examples
iex> Path.rootname("/foo/bar")
"/foo/bar"
iex> Path.rootname("/foo/bar.ex")
"/foo/bar"
"""
@spec rootname(t) :: binary
def rootname(path) do
:filename.rootname(IO.chardata_to_string(path))
end
@doc """
Returns the `path` with the `extension` stripped.
This function should be used to remove a specific extension which may
or may not be there.
## Examples
iex> Path.rootname("/foo/bar.erl", ".erl")
"/foo/bar"
iex> Path.rootname("/foo/bar.erl", ".ex")
"/foo/bar.erl"
"""
@spec rootname(t, t) :: binary
def rootname(path, extension) do
:filename.rootname(IO.chardata_to_string(path), IO.chardata_to_string(extension))
end
@doc """
Joins a list of paths.
This function should be used to convert a list of paths to a path.
Note that any trailing slash is removed when joining.
## Examples
iex> Path.join(["~", "foo"])
"~/foo"
iex> Path.join(["foo"])
"foo"
iex> Path.join(["/", "foo", "bar/"])
"/foo/bar"
"""
@spec join(nonempty_list(t)) :: binary
def join([name1, name2 | rest]), do: join([join(name1, name2) | rest])
def join([name]), do: IO.chardata_to_string(name)
@doc """
Joins two paths.
The right path will always be expanded to its relative format
and any trailing slash will be removed when joining.
## Examples
iex> Path.join("foo", "bar")
"foo/bar"
iex> Path.join("/foo", "/bar/")
"/foo/bar"
The functions in this module support chardata, so giving a list will
treat it as a single entity:
iex> Path.join("foo", ["bar", "fiz"])
"foo/barfiz"
iex> Path.join(["foo", "bar"], "fiz")
"foobar/fiz"
"""
@spec join(t, t) :: binary
def join(left, right) do
left = IO.chardata_to_string(left)
os_type = major_os_type()
do_join(left, right, os_type) |> remove_dir_sep(os_type)
end
defp do_join(left, "/", os_type), do: remove_dir_sep(left, os_type)
defp do_join("", right, os_type), do: relative(right, os_type)
defp do_join("/", right, os_type), do: "/" <> relative(right, os_type)
defp do_join(left, right, os_type),
do: remove_dir_sep(left, os_type) <> "/" <> relative(right, os_type)
defp remove_dir_sep("", _os_type), do: ""
defp remove_dir_sep("/", _os_type), do: "/"
defp remove_dir_sep(bin, os_type) do
last = :binary.last(bin)
if last == ?/ or (last == ?\\ and os_type == :win32) do
binary_part(bin, 0, byte_size(bin) - 1)
else
bin
end
end
@doc ~S"""
Splits the path into a list at the path separator.
If an empty string is given, returns an empty list.
On Windows, path is split on both "\" and "/" separators
and the driver letter, if there is one, is always returned
in lowercase.
## Examples
iex> Path.split("")
[]
iex> Path.split("foo")
["foo"]
iex> Path.split("/foo/bar")
["/", "foo", "bar"]
"""
@spec split(t) :: [binary]
def split(path) do
:filename.split(IO.chardata_to_string(path))
end
defmodule Wildcard do
@moduledoc false
def read_link_info(file) do
call({:read_link_info, file})
end
def list_dir(dir) do
case call({:list_dir, dir}) do
{:ok, files} ->
{:ok, for(file <- files, hd(file) != ?., do: file)}
other ->
other
end
end
@compile {:inline, call: 1}
defp call(tuple) do
x = :erlang.dt_spread_tag(true)
y = :gen_server.call(:file_server_2, tuple)
:erlang.dt_restore_tag(x)
y
end
end
@doc """
Traverses paths according to the given `glob` expression and returns a
list of matches.
The wildcard looks like an ordinary path, except that the following
"wildcard characters" are interpreted in a special way:
* `?` - matches one character.
* `*` - matches any number of characters up to the end of the filename, the
next dot, or the next slash.
* `**` - two adjacent `*`'s used as a single pattern will match all
files and zero or more directories and subdirectories.
* `[char1,char2,...]` - matches any of the characters listed; two
characters separated by a hyphen will match a range of characters.
Do not add spaces before and after the comma as it would then match
paths containing the space character itself.
* `{item1,item2,...}` - matches one of the alternatives.
Do not add spaces before and after the comma as it would then match
paths containing the space character itself.
Other characters represent themselves. Only paths that have
exactly the same character in the same position will match. Note
that matching is case-sensitive: `"a"` will not match `"A"`.
Directory separators must always be written as `/`, even on Windows.
You may call `Path.expand/1` to normalize the path before invoking
this function.
By default, the patterns `*` and `?` do not match files starting
with a dot `.`. See the `:match_dot` option in the "Options" section
below.
## Options
* `:match_dot` - (boolean) if `false`, the special wildcard characters `*` and `?`
will not match files starting with a dot (`.`). If `true`, files starting with
a `.` will not be treated specially. Defaults to `false`.
## Examples
Imagine you have a directory called `projects` with three Elixir projects
inside of it: `elixir`, `ex_doc`, and `plug`. You can find all `.beam` files
inside the `ebin` directory of each project as follows:
Path.wildcard("projects/*/ebin/**/*.beam")
If you want to search for both `.beam` and `.app` files, you could do:
Path.wildcard("projects/*/ebin/**/*.{beam,app}")
"""
@spec wildcard(t, keyword) :: [binary]
def wildcard(glob, opts \\ []) do
mod = if Keyword.get(opts, :match_dot), do: :file, else: Path.Wildcard
glob
|> chardata_to_list!()
|> :filelib.wildcard(mod)
|> Enum.map(&IO.chardata_to_string/1)
end
defp chardata_to_list!(chardata) do
case :unicode.characters_to_list(chardata) do
result when is_list(result) ->
if 0 in result do
raise ArgumentError,
"cannot execute Path.wildcard/2 for path with null byte, got: #{inspect(chardata)}"
else
result
end
{:error, encoded, rest} ->
raise UnicodeConversionError, encoded: encoded, rest: rest, kind: :invalid
{:incomplete, encoded, rest} ->
raise UnicodeConversionError, encoded: encoded, rest: rest, kind: :incomplete
end
end
defp expand_home(type) do
case IO.chardata_to_string(type) do
"~" <> rest -> resolve_home(rest)
rest -> rest
end
end
defp resolve_home(""), do: System.user_home!()
defp resolve_home(rest) do
case {rest, major_os_type()} do
{"\\" <> _, :win32} -> System.user_home!() <> rest
{"/" <> _, _} -> System.user_home!() <> rest
_ -> "~" <> rest
end
end
# expand_dot the given path by expanding "..", "." and "~".
defp expand_dot(<<"/", rest::binary>>), do: "/" <> do_expand_dot(rest)
defp expand_dot(<<letter, ":/", rest::binary>>) when letter in ?a..?z,
do: <<letter, ":/">> <> do_expand_dot(rest)
defp expand_dot(path), do: do_expand_dot(path)
defp do_expand_dot(path), do: do_expand_dot(:binary.split(path, "/", [:global]), [])
defp do_expand_dot([".." | t], [_, _ | acc]), do: do_expand_dot(t, acc)
defp do_expand_dot([".." | t], []), do: do_expand_dot(t, [])
defp do_expand_dot(["." | t], acc), do: do_expand_dot(t, acc)
defp do_expand_dot([h | t], acc), do: do_expand_dot(t, ["/", h | acc])
defp do_expand_dot([], []), do: ""
defp do_expand_dot([], ["/" | acc]), do: IO.iodata_to_binary(:lists.reverse(acc))
defp major_os_type do
:os.type() |> elem(0)
end
end
|
lib/elixir/lib/path.ex
| 0.80112
| 0.478955
|
path.ex
|
starcoder
|
defmodule Robolia.Games.TicTacToes do
alias Robolia.Games.TicTacToes.{TicTacToeMatch, TicTacToeMoviment, Queries}
alias Robolia.{GameError, Repo}
alias TicTacToeBoard
def refresh(%TicTacToeMatch{} = match) do
TicTacToeMatch
|> Queries.for_match(%{id: match.id})
|> Repo.one!()
end
def create_match!(attrs) do
%TicTacToeMatch{}
|> TicTacToeMatch.changeset(attrs)
|> Repo.insert!()
rescue
e in Ecto.InvalidChangesetError -> error(e, __STACKTRACE__)
end
def update_match!(%TicTacToeMatch{} = match, attrs) do
Ecto.Changeset.change(match, attrs)
|> Repo.update!()
end
def add_moviment!(%TicTacToeMatch{id: match_id} = match, %{position: position} = attrs) do
unless match |> player_playing?(attrs),
do: raise(GameError, message: "Given player is not playing this match")
case match |> current_state |> TicTacToeBoard.valid_position?(position) do
{:ok, true} ->
moviment =
%TicTacToeMoviment{}
|> TicTacToeMoviment.changeset(Map.merge(attrs, %{tic_tac_toe_match_id: match_id}))
|> Repo.insert!()
case match_finished?(match) do
{true, nil} ->
match |> update_match!(%{next_player_id: nil, status: TicTacToeMatch.draw()})
{true, winner_id} ->
match
|> update_match!(%{
next_player_id: nil,
winner_id: winner_id,
status: TicTacToeMatch.winner()
})
{false, _} ->
next_player_id =
match
|> current_state
|> TicTacToeBoard.fetch_next_player(match.first_player_id, match.second_player_id)
match |> update_match!(%{next_player_id: next_player_id})
end
moviment
{:error, detail} ->
moviment =
%TicTacToeMoviment{}
|> TicTacToeMoviment.changeset(
Map.merge(attrs, %{
tic_tac_toe_match_id: match_id,
valid: false,
details: detail |> to_string
})
)
|> Repo.insert!()
winner_id =
match
|> current_state
|> TicTacToeBoard.fetch_next_player(match.first_player_id, match.second_player_id)
match
|> update_match!(%{
next_player_id: nil,
winner_id: winner_id,
status: TicTacToeMatch.winner()
})
moviment
end
rescue
e in Ecto.InvalidChangesetError -> error(e, __STACKTRACE__)
e in Ecto.ConstraintError -> error(e, __STACKTRACE__)
end
def current_state(%TicTacToeMatch{} = match) do
match = match |> Repo.preload(:moviments)
match.moviments
|> Enum.map(fn %{player_id: player_id} = mov ->
mark = if player_id == match.first_player_id, do: :x, else: :o
{String.to_atom("p#{mov.position}"), mark}
end)
|> Map.new()
|> Enum.into(%{
p1: nil,
p2: nil,
p3: nil,
p4: nil,
p5: nil,
p6: nil,
p7: nil,
p8: nil,
p9: nil
})
end
def match_finished?(%TicTacToeMatch{winner_id: nil} = match) do
match
|> current_state
|> TicTacToeBoard.match_finished?(match.first_player_id, match.second_player_id)
end
def match_finished?(%TicTacToeMatch{winner_id: winner_id}) do
{true, winner_id}
end
defp player_playing?(%TicTacToeMatch{} = match, %{player_id: player_id}) do
import Robolia.Games.TicTacToes.Queries, only: [for_match: 2, for_player: 2, count: 1]
TicTacToeMatch
|> for_match(%{id: match.id})
|> for_player(%{id: player_id})
|> count() > 0
end
defp error(exception, stacktrace) do
reraise GameError, [exception: exception], stacktrace
end
end
|
lib/robolia/games/tic_tac_toes.ex
| 0.508544
| 0.434341
|
tic_tac_toes.ex
|
starcoder
|
defmodule Antikythera.Router do
@moduledoc """
Defines the antikythera routing DSL.
## Routing macros
This module defines macros to be used in each gear's Router module.
The names of the macros are the same as the HTTP verbs: `get`, `post`, etc.
The macros take the following 4 arguments (although you can omit the last and just pass 3 of them):
- URL path pattern which consists of '/'-separated segments. The 1st character must be '/'.
To match against incoming request path to a pattern you can use placeholders. See examples below for the usage.
- Controller module.
Antikythera expects that the module name given here does not contain `GearName.Controller.` as a prefix;
it's automatically prepended by antikythera.
- Name of the controller action as an atom.
- Keyword list of options.
Currently available options are `:from` and `:as`. See below for further explanations.
## Example
If you define the following router module,
defmodule MyGear.Router do
use Antikythera.Router
static_prefix "/static"
websocket "/ws"
get "/foo" , Hello, :exact_match
post "/foo/:a/:b" , Hello, :placeholders
put "/foo/bar/*w", Hello, :wildcard
end
Then the following requests are routed as:
- `GET "/foo"` => `MyGear.Controller.Hello.exact_match/1` is invoked with `path_matches`: `%{}`
- `POST "/foo/bar/baz"` => `MyGear.Controller.Hello.placeholders/1` is invoked with `path_matches`: `%{a: "bar", b: "baz"}`
- `PUT "/foo/bar/abc/def/ghi"` => `MyGear.Controller.Hello.wildcard/1` is invoked with `path_matches`: `%{w: "abc/def/ghi"}`
Note that
- Each controller action is expected to receive a `Antikythera.Conn` struct and returns a `Antikythera.Conn` struct.
- `Antikythera.Conn` struct has a field `request` which is a `Antikythera.Request` struct.
- Matched segments are URL-decoded and stored in `path_matches` field in `Antikythera.Request`.
If the result of URL-decoding is nonprintable binary, the request is rejected.
## Websocket endpoint
To enable websocket interaction with clients, you must first define `MyGear.Websocket` module.
See `Antikythera.Websocket` for more details about websocket handler module.
Then invoke `websocket/1` macro in your router.
websocket "/ws_path_pattern"
The path pattern may have placeholders in the same way as normal routes.
GET request with appropriate headers to this path will initialize a websocket connection using the HTTP 1.1 upgrade mechanism.
If your gear does not interact with clients via websocket, simply don't invoke `websocket/1` macro in your router.
## Static file serving
You can serve your static assets by placing them under `/priv/static` directory in your gear project.
The endpoint to be used can be specified by `static_prefix/1` macro.
For example, if you add
static_prefix "/assets"
to your router, you can download `/priv/static/html/index.html` file by sending GET request to the path `/assets/html/index.html`.
If you don't need to serve static assets, just don't call `static_prefix/1` macro in your router.
Currently, static assets served in this way are NOT automatically gzip compressed,
even if `acceept-encoding: gzip` request header is set.
It is recommended to use CDN to deliver large static assets in production.
See also `Antikythera.Asset` for usage of CDN in delivery of static assets.
## Web requests and gear-to-gear (g2g) requests
Antikythera treats both web requests and g2g requests in basically the same way.
This means that if you define a route in your gear one can send request to the route using both HTTP and g2g communication.
If you want to define a route that can be accessible only via g2g communication, specify `from: :gear` option.
get "/foo", Hello, :action1, from: :gear
post "/bar", Hello, :action2, from: :gear
Similarly passing `from: :web` makes the route accessible only from web request.
When dealing with multiple routes, `only_from_web/1` and `only_from_gear/1` macros can be used.
For example, the following routes definition is the same as above one.
only_from_gear do
get "/foo", Hello, :action1
post "/bar", Hello, :action2
end
## Reverse routing
To generate URL path of a route (e.g. a link in HTML), you will want to refer to the route's path.
For this purpose you can specify `:as` option.
For example, you have the following router module
defmodule MyGear.Router do
use Antikythera.Router
get "/foo/:a/:b/*c", Hello, :placeholders, as: :myroute
end
By writing this the router automatically defines a function `myroute_path/4`,
which receives segments that fill placeholders and an optional map for query parameters.
MyGear.Router.myroute_path("segment_a", "segment_b", ["wildcard", "part"])
=> "/foo/segment_a/segment_b/wildcard/part
MyGear.Router.myroute_path("segment_a", "segment_b", ["wildcard", "part"], %{"query" => "param"})
=> "/foo/segment_a/segment_b/wildcard/part?query=param
Reverse routing helper functions automatically URI-encode all given arguments.
If websocket endpoint is enabled, you can get its path with `MyGear.Router.websocket_path/0`.
Also if static file serving is enabled, path prefix for static files can be obtained by `MyGear.Router.static_prefix/0`.
"""
alias Antikythera.Router.Impl
defmacro __using__(_) do
quote do
import Antikythera.Router
Module.register_attribute(__MODULE__, :antikythera_web_routes , accumulate: true)
Module.register_attribute(__MODULE__, :antikythera_gear_routes, accumulate: true)
Module.put_attribute(__MODULE__, :from_option, nil)
@before_compile Antikythera.Router
end
end
defmacro __before_compile__(%Macro.Env{module: module}) do
web_routing_source = Module.get_attribute(module, :antikythera_web_routes ) |> Enum.reverse()
gear_routing_source = Module.get_attribute(module, :antikythera_gear_routes) |> Enum.reverse()
routing_quotes(module, web_routing_source, gear_routing_source) ++ reverse_routing_quotes(web_routing_source, gear_routing_source)
end
defp routing_quotes(module, web_source, gear_source) do
Impl.generate_route_function_clauses(module, :web, web_source) ++ Impl.generate_route_function_clauses(module, :gear, gear_source)
end
defp reverse_routing_quotes(web_source, gear_source) do
alias Antikythera.Router.Reverse
Enum.uniq(web_source ++ gear_source)
|> Enum.reject(fn {_verb, _path, _controller, _action, opts} -> is_nil(opts[:as]) end)
|> Enum.map(fn {_verb, path, _controller, _action, opts} -> Reverse.define_path_helper(opts[:as], path) end)
end
for from <- [:web, :gear] do
defmacro unquote(:"only_from_#{from}")(do: block) do
current_from = unquote(from)
quote do
if @from_option, do: raise "nested invocation of `only_from_*` is not allowed"
@from_option unquote(current_from)
unquote(block)
@from_option nil
end
end
end
for verb <- Antikythera.Http.Method.all() do
defmacro unquote(verb)(path, controller, action, opts \\ []) do
%Macro.Env{module: router_module} = __CALLER__
add_route(router_module, unquote(verb), path, controller, action, opts)
end
end
defp add_route(router_module, verb, path, controller_given, action, opts) do
quote bind_quoted: [r_m: router_module, verb: verb, path: path, c_g: controller_given, action: action, opts: opts] do
controller = Antikythera.Router.fully_qualified_controller_module(r_m, c_g, opts)
from_grouped = Module.get_attribute(__MODULE__, :from_option)
from_per_route = opts[:from]
if from_grouped && from_per_route, do: raise "using :from option within `only_from_*` block is not allowed"
opts_without_from_option = Keyword.delete(opts, :from)
routing_info = {verb, path, controller, action, opts_without_from_option}
case from_grouped || from_per_route do
:web -> @antikythera_web_routes routing_info
:gear -> @antikythera_gear_routes routing_info
nil ->
@antikythera_web_routes routing_info
@antikythera_gear_routes routing_info
end
end
end
def fully_qualified_controller_module(router_module, controller, opts) do
if opts[:websocket?] do
controller
else
[
Module.split(router_module) |> hd(),
"Controller",
Macro.expand(controller, __ENV__), # `{:__aliases__, meta, atoms}` must be expanded
] |> Module.concat() # Executed during compilation; `Module.concat/1` causes no problem
end
end
defmacro websocket(path, opts \\ []) do
%Macro.Env{module: router_module} = __CALLER__
ws_module = Module.split(router_module) |> hd() |> Module.concat("Websocket") # during compilation, it's safe to call `Module.concat/2`
quote do
get unquote(path), unquote(ws_module), :connect, [only_from: :web, websocket?: true] ++ unquote(opts)
end
end
defmacro static_prefix(prefix) do
quote bind_quoted: [prefix: prefix] do
if prefix =~ ~R|\A(/[0-9A-Za-z.~_-]+)+\z| do
def static_prefix(), do: unquote(prefix)
else
raise "invalid path prefix given to `static_prefix/1`: #{prefix}"
end
end
end
end
|
lib/web/router/router.ex
| 0.858807
| 0.464112
|
router.ex
|
starcoder
|
defmodule ExConfig.Type.EldapFilter do
@moduledoc """
LDAP filter parser for Erlang `:eldap` library.
"""
use ExConfig.Type
alias ExConfig.Type.EldapFilter.Parser
defstruct []
@type result() :: term()
@impl true
def handle(data, _opts), do: do_handle(data)
@doc """
Parses the human-readable representation of LDAP filter to the internal format
used by `:eldap` Erlang library.
## Example
iex> ExConfig.Type.EldapFilter.parse("(&(givenName=John)(sn=Doe))")
{:ok,
{:and,
[
equalityMatch: {:AttributeValueAssertion, 'givenName', 'John'},
equalityMatch: {:AttributeValueAssertion, 'sn', 'Doe'}
]}}
"""
@spec parse(String.t) :: {:ok, result} | {:error, String.t}
def parse(str) when byte_size(str) > 0, do: do_handle(str)
@spec parse!(String.t) :: result | no_return
def parse!(str) when byte_size(str) > 0 do
case parse(str) do
{:ok, result} -> result
{:error, reason} -> raise(reason)
end
end
@doc false
@spec error(:bad_data, any) :: {:error, String.t}
def error(:bad_data, data), do: {:error, "Bad LDAP filter: '#{data}'"}
@spec do_handle(String.t) :: {:ok, result} | {:error, String.t}
defp do_handle(data) when byte_size(data) > 0 do
case Parser.filter(data) do
{:ok, [result], _, _, _, _} -> {:ok, result}
_ -> error(:bad_data, data)
end
end
defp do_handle(data), do: error(:bad_data, data)
end
defmodule ExConfig.Type.EldapFilter.Parser do
@moduledoc """
LDAP filter parser helpers.
References: RFC 2254, RFC 2251, https://ldap.com
"""
import NimbleParsec
to_list = &map(&1, {:binary, :bin_to_list, []})
single_chl_tag = &unwrap_and_tag(to_list.(&1), &2)
attr = utf8_string([?A..?Z, ?a..?z, ?0..?9, ?-, ?;, ?.], min: 1)
value = utf8_string([not: ?*, not: ?(, not: ?), not: 0], min: 1)
b_par = string("(")
e_par = string(")")
#---------------------------------------------------------
equal = string("=") |> replace(:equalityMatch)
approx = string("~=") |> replace(:approxMatch)
greater = string(">=") |> replace(:greaterOrEqual)
less = string("<=") |> replace(:lessOrEqual)
filtertype = choice([equal, approx, greater, less])
simple =
empty()
|> concat(to_list.(attr))
|> concat(filtertype)
|> concat(to_list.(value))
|> lookahead(e_par)
|> reduce({:simple_item_eldap, []})
defp simple_item_eldap([attr, type, value]),
do: apply(:eldap, type, [attr, value])
#---------------------------------------------------------
present =
empty()
|> concat(to_list.(attr))
|> ignore(string("=*"))
|> lookahead(e_par)
|> reduce({:present_item_eldap, []})
defp present_item_eldap([attr]), do: :eldap.present(attr)
#---------------------------------------------------------
sub_any = single_chl_tag.(value, :any) |> ignore(string("*"))
substring =
empty()
|> concat(to_list.(attr))
|> ignore(string("="))
|> optional(single_chl_tag.(value, :initial))
|> ignore(string("*"))
|> repeat(sub_any)
|> optional(single_chl_tag.(value, :final))
|> lookahead(e_par)
|> reduce({:substring_item_eldap, []})
defp substring_item_eldap([attr | subs]), do: :eldap.substrings(attr, subs)
#---------------------------------------------------------
ext_attr = single_chl_tag.(attr, :type)
ext_dn = replace(string(":dn"), {:dnAttributes, true})
ext_rule = ignore(string(":")) |> concat(single_chl_tag.(attr, :matchingRule))
ext_value = single_chl_tag.(value, :value)
extensible =
choice([
# attr [":dn"] [":" matchingrule] ":=" value
ext_attr
|> optional(ext_dn)
|> optional(ext_rule)
|> ignore(string(":="))
|> concat(ext_value)
|> lookahead(e_par),
# [":dn"] ":" matchingrule ":=" value
optional(ext_dn)
|> concat(ext_rule)
|> ignore(string(":="))
|> concat(ext_value)
|> lookahead(e_par),
])
|> reduce({:extensibleitem_eldap, []})
defp extensibleitem_eldap(attrs) do
{value, attrs} = Keyword.pop(attrs, :value)
:eldap.extensibleMatch(value, attrs)
end
#---------------------------------------------------------
item = choice([simple, present, substring, extensible])
container =
choice([
replace(string("&"), :and) |> repeat(parsec(:filter)),
replace(string("|"), :or ) |> repeat(parsec(:filter)),
replace(string("!"), :not) |> parsec(:filter),
])
|> reduce({:container_eldap, []})
defp container_eldap([:and | rest]), do: :eldap.and(rest)
defp container_eldap([:or | rest]), do: :eldap.or (rest)
defp container_eldap([:not, rest]), do: :eldap.not(rest)
filtercomp = choice([container, item])
defparsec :filter,
empty()
|> ignore(b_par)
|> concat(filtercomp)
|> ignore(e_par)
end
|
lib/ex_config/type/eldap_filter.ex
| 0.874218
| 0.483405
|
eldap_filter.ex
|
starcoder
|
defmodule AWS.ECR do
@moduledoc """
Amazon Elastic Container Registry
Amazon Elastic Container Registry (Amazon ECR) is a managed container image
registry service.
Customers can use the familiar Docker CLI, or their preferred client, to push,
pull, and manage images. Amazon ECR provides a secure, scalable, and reliable
registry for your Docker or Open Container Initiative (OCI) images. Amazon ECR
supports private repositories with resource-based permissions using IAM so that
specific users or Amazon EC2 instances can access repositories and images.
"""
alias AWS.Client
alias AWS.Request
def metadata do
%AWS.ServiceMetadata{
abbreviation: "Amazon ECR",
api_version: "2015-09-21",
content_type: "application/x-amz-json-1.1",
credential_scope: nil,
endpoint_prefix: "api.ecr",
global?: false,
protocol: "json",
service_id: "ECR",
signature_version: "v4",
signing_name: "ecr",
target_prefix: "AmazonEC2ContainerRegistry_V20150921"
}
end
@doc """
Checks the availability of one or more image layers in a repository.
When an image is pushed to a repository, each image layer is checked to verify
if it has been uploaded before. If it has been uploaded, then the image layer is
skipped.
This operation is used by the Amazon ECR proxy and is not generally used by
customers for pulling and pushing images. In most cases, you should use the
`docker` CLI to pull, tag, and push images.
"""
def batch_check_layer_availability(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "BatchCheckLayerAvailability", input, options)
end
@doc """
Deletes a list of specified images within a repository.
Images are specified with either an `imageTag` or `imageDigest`.
You can remove a tag from an image by specifying the image's tag in your
request. When you remove the last tag from an image, the image is deleted from
your repository.
You can completely delete an image (and all of its tags) by specifying the
image's digest in your request.
"""
def batch_delete_image(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "BatchDeleteImage", input, options)
end
@doc """
Gets detailed information for an image.
Images are specified with either an `imageTag` or `imageDigest`.
When an image is pulled, the BatchGetImage API is called once to retrieve the
image manifest.
"""
def batch_get_image(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "BatchGetImage", input, options)
end
@doc """
Informs Amazon ECR that the image layer upload has completed for a specified
registry, repository name, and upload ID.
You can optionally provide a `sha256` digest of the image layer for data
validation purposes.
When an image is pushed, the CompleteLayerUpload API is called once per each new
image layer to verify that the upload has completed.
This operation is used by the Amazon ECR proxy and is not generally used by
customers for pulling and pushing images. In most cases, you should use the
`docker` CLI to pull, tag, and push images.
"""
def complete_layer_upload(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CompleteLayerUpload", input, options)
end
@doc """
Creates a repository.
For more information, see [Amazon ECR Repositories](https://docs.aws.amazon.com/AmazonECR/latest/userguide/Repositories.html)
in the *Amazon Elastic Container Registry User Guide*.
"""
def create_repository(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateRepository", input, options)
end
@doc """
Deletes the lifecycle policy associated with the specified repository.
"""
def delete_lifecycle_policy(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteLifecyclePolicy", input, options)
end
@doc """
Deletes the registry permissions policy.
"""
def delete_registry_policy(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteRegistryPolicy", input, options)
end
@doc """
Deletes a repository.
If the repository contains images, you must either delete all images in the
repository or use the `force` option to delete the repository.
"""
def delete_repository(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteRepository", input, options)
end
@doc """
Deletes the repository policy associated with the specified repository.
"""
def delete_repository_policy(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteRepositoryPolicy", input, options)
end
@doc """
Returns the scan findings for the specified image.
"""
def describe_image_scan_findings(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeImageScanFindings", input, options)
end
@doc """
Returns metadata about the images in a repository.
Beginning with Docker version 1.9, the Docker client compresses image layers
before pushing them to a V2 Docker registry. The output of the `docker images`
command shows the uncompressed image size, so it may return a larger image size
than the image sizes returned by `DescribeImages`.
"""
def describe_images(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeImages", input, options)
end
@doc """
Describes the settings for a registry.
The replication configuration for a repository can be created or updated with
the `PutReplicationConfiguration` API action.
"""
def describe_registry(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeRegistry", input, options)
end
@doc """
Describes image repositories in a registry.
"""
def describe_repositories(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeRepositories", input, options)
end
@doc """
Retrieves an authorization token.
An authorization token represents your IAM authentication credentials and can be
used to access any Amazon ECR registry that your IAM principal has access to.
The authorization token is valid for 12 hours.
The `authorizationToken` returned is a base64 encoded string that can be decoded
and used in a `docker login` command to authenticate to a registry. The AWS CLI
offers an `get-login-password` command that simplifies the login process. For
more information, see [Registry Authentication](https://docs.aws.amazon.com/AmazonECR/latest/userguide/Registries.html#registry_auth)
in the *Amazon Elastic Container Registry User Guide*.
"""
def get_authorization_token(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetAuthorizationToken", input, options)
end
@doc """
Retrieves the pre-signed Amazon S3 download URL corresponding to an image layer.
You can only get URLs for image layers that are referenced in an image.
When an image is pulled, the GetDownloadUrlForLayer API is called once per image
layer that is not already cached.
This operation is used by the Amazon ECR proxy and is not generally used by
customers for pulling and pushing images. In most cases, you should use the
`docker` CLI to pull, tag, and push images.
"""
def get_download_url_for_layer(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetDownloadUrlForLayer", input, options)
end
@doc """
Retrieves the lifecycle policy for the specified repository.
"""
def get_lifecycle_policy(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetLifecyclePolicy", input, options)
end
@doc """
Retrieves the results of the lifecycle policy preview request for the specified
repository.
"""
def get_lifecycle_policy_preview(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetLifecyclePolicyPreview", input, options)
end
@doc """
Retrieves the permissions policy for a registry.
"""
def get_registry_policy(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetRegistryPolicy", input, options)
end
@doc """
Retrieves the repository policy for the specified repository.
"""
def get_repository_policy(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetRepositoryPolicy", input, options)
end
@doc """
Notifies Amazon ECR that you intend to upload an image layer.
When an image is pushed, the InitiateLayerUpload API is called once per image
layer that has not already been uploaded. Whether or not an image layer has been
uploaded is determined by the BatchCheckLayerAvailability API action.
This operation is used by the Amazon ECR proxy and is not generally used by
customers for pulling and pushing images. In most cases, you should use the
`docker` CLI to pull, tag, and push images.
"""
def initiate_layer_upload(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "InitiateLayerUpload", input, options)
end
@doc """
Lists all the image IDs for the specified repository.
You can filter images based on whether or not they are tagged by using the
`tagStatus` filter and specifying either `TAGGED`, `UNTAGGED` or `ANY`. For
example, you can filter your results to return only `UNTAGGED` images and then
pipe that result to a `BatchDeleteImage` operation to delete them. Or, you can
filter your results to return only `TAGGED` images to list all of the tags in
your repository.
"""
def list_images(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListImages", input, options)
end
@doc """
List the tags for an Amazon ECR resource.
"""
def list_tags_for_resource(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListTagsForResource", input, options)
end
@doc """
Creates or updates the image manifest and tags associated with an image.
When an image is pushed and all new image layers have been uploaded, the
PutImage API is called once to create or update the image manifest and the tags
associated with the image.
This operation is used by the Amazon ECR proxy and is not generally used by
customers for pulling and pushing images. In most cases, you should use the
`docker` CLI to pull, tag, and push images.
"""
def put_image(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "PutImage", input, options)
end
@doc """
Updates the image scanning configuration for the specified repository.
"""
def put_image_scanning_configuration(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "PutImageScanningConfiguration", input, options)
end
@doc """
Updates the image tag mutability settings for the specified repository.
For more information, see [Image Tag Mutability](https://docs.aws.amazon.com/AmazonECR/latest/userguide/image-tag-mutability.html)
in the *Amazon Elastic Container Registry User Guide*.
"""
def put_image_tag_mutability(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "PutImageTagMutability", input, options)
end
@doc """
Creates or updates the lifecycle policy for the specified repository.
For more information, see [Lifecycle Policy Template](https://docs.aws.amazon.com/AmazonECR/latest/userguide/LifecyclePolicies.html).
"""
def put_lifecycle_policy(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "PutLifecyclePolicy", input, options)
end
@doc """
Creates or updates the permissions policy for your registry.
A registry policy is used to specify permissions for another AWS account and is
used when configuring cross-account replication. For more information, see
[Registry permissions](https://docs.aws.amazon.com/AmazonECR/latest/userguide/registry-permissions.html)
in the *Amazon Elastic Container Registry User Guide*.
"""
def put_registry_policy(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "PutRegistryPolicy", input, options)
end
@doc """
Creates or updates the replication configuration for a registry.
The existing replication configuration for a repository can be retrieved with
the `DescribeRegistry` API action. The first time the
PutReplicationConfiguration API is called, a service-linked IAM role is created
in your account for the replication process. For more information, see [Using Service-Linked Roles for Amazon
ECR](https://docs.aws.amazon.com/AmazonECR/latest/userguide/using-service-linked-roles.html)
in the *Amazon Elastic Container Registry User Guide*.
When configuring cross-account replication, the destination account must grant
the source account permission to replicate. This permission is controlled using
a registry permissions policy. For more information, see `PutRegistryPolicy`.
"""
def put_replication_configuration(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "PutReplicationConfiguration", input, options)
end
@doc """
Applies a repository policy to the specified repository to control access
permissions.
For more information, see [Amazon ECR Repository Policies](https://docs.aws.amazon.com/AmazonECR/latest/userguide/repository-policies.html)
in the *Amazon Elastic Container Registry User Guide*.
"""
def set_repository_policy(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "SetRepositoryPolicy", input, options)
end
@doc """
Starts an image vulnerability scan.
An image scan can only be started once per day on an individual image. This
limit includes if an image was scanned on initial push. For more information,
see [Image Scanning](https://docs.aws.amazon.com/AmazonECR/latest/userguide/image-scanning.html)
in the *Amazon Elastic Container Registry User Guide*.
"""
def start_image_scan(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "StartImageScan", input, options)
end
@doc """
Starts a preview of a lifecycle policy for the specified repository.
This allows you to see the results before associating the lifecycle policy with
the repository.
"""
def start_lifecycle_policy_preview(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "StartLifecyclePolicyPreview", input, options)
end
@doc """
Adds specified tags to a resource with the specified ARN.
Existing tags on a resource are not changed if they are not specified in the
request parameters.
"""
def tag_resource(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "TagResource", input, options)
end
@doc """
Deletes specified tags from a resource.
"""
def untag_resource(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UntagResource", input, options)
end
@doc """
Uploads an image layer part to Amazon ECR.
When an image is pushed, each new image layer is uploaded in parts. The maximum
size of each image layer part can be 20971520 bytes (or about 20MB). The
UploadLayerPart API is called once per each new image layer part.
This operation is used by the Amazon ECR proxy and is not generally used by
customers for pulling and pushing images. In most cases, you should use the
`docker` CLI to pull, tag, and push images.
"""
def upload_layer_part(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UploadLayerPart", input, options)
end
end
|
lib/aws/generated/ecr.ex
| 0.884495
| 0.491578
|
ecr.ex
|
starcoder
|
defmodule Neo4j.Sips.Transaction do
@moduledoc """
This module is the main implementation for running Cypher commands using
transactions. It is using the transactional HTTP endpoint for Cypher and has
the ability to let you use the same transaction across multiple HTTP requests.
Every Cypher operation is executed in a transaction.
Example:
test "execute statements in an open transaction" do
conn = Neo4j.tx_begin(Neo4j.conn)
books = Neo4j.query(conn, "CREATE (b:Book {title:\"The Game Of Trolls\"}) return b")
assert {:ok, rows} = books
assert List.first(rows)["b"]["title"] == "The Game Of Trolls"
assert {:ok, conn} = Neo4j.tx_rollback(conn)
assert String.length(conn.commit_url) == 0
end
To do:
- let the user override the default TX timeout; the default timeout is 60 seconds.
- improve the errors handling
- Reset transaction timeout of an open transaction
- add support for returning results in graph format
"""
alias Neo4j.Sips.Connection
alias Neo4j.Sips.Utils
require Logger
# URL suffix used for composing Neo4j transactional endpoints
@commit "/commit"
@doc """
begin a new transaction. If there is no need to keep a
transaction open across multiple HTTP requests, you can begin a transaction,
execute statements, and commit with just a single HTTP request.
"""
@spec tx_begin(Neo4j.Sips.Connection) :: Neo4j.Sips.Connection
def tx_begin(conn) do
case Connection.send(:post, conn.transaction_url) do
{:ok, response} ->
Map.put(conn, :commit_url, String.replace(response["commit"], ~r{/commit}, ""))
{:error, reason} -> {:error, List.first(reason)}
end
end
@spec tx_rollback(Neo4j.Sips.Connection) :: Neo4j.Sips.Connection
def tx_rollback(conn) do
case Connection.send(:delete, conn.commit_url) do
{:ok, _response} -> {:ok, Map.put(conn, :commit_url, "")}
{:error, reason} ->
case reason do
{:error, :invalid} -> {:error, "invalid url: #{conn.commit_url}"}
_ -> {:error, List.first(reason)}
end
end
end
@doc """
commit an open transaction
"""
@spec tx_commit(Neo4j.Sips.Connection) :: Neo4j.Sips.Response
def tx_commit(conn) do
tx_commit(conn, "")
end
@doc """
send a list of cypher commands to the server. Each command will have this form:
{query, params}, where the query is a valid Cypher command and the params are a
map of optional parameters.
"""
@spec tx_commit(Neo4j.Sips.Connection, String.t) :: Neo4j.Sips.Response
def tx_commit(conn, statements) when is_list(statements) do
Connection.send(:post, commit_url(conn), Utils.neo4j_statements(statements, conn.options))
end
@doc """
send a single cypher command to the server, and an optional map of parameters
"""
@spec tx_commit(Neo4j.Sips.Connection, String.t, Map.t) :: Neo4j.Sips.Response
def tx_commit(conn, statement, params \\ %{}) do
Connection.send(:post, commit_url(conn), Utils.neo4j_statements([{statement, params}], conn.options))
end
@doc """
same as #tx_commit but maybe raise an error
"""
@spec tx_commit!(Neo4j.Sips.Connection, String.t, Map.t) :: Neo4j.Sips.Response
def tx_commit!(conn, query, params \\ %{}) do
case tx_commit(conn, query, params) do
{:error, reason} -> raise Neo4j.Sips.Error, code: reason["code"],
message: reason["message"]
{:ok, response} -> response
end
end
@doc """
This is different than Query's same function, since we always commit, on tx_commit/...
"""
def commit_url(conn) do
if( String.length(conn.commit_url) > 0, do: conn.commit_url, else: conn.transaction_url) <> @commit
end
end
|
lib/neo4j_sips/transaction.ex
| 0.86411
| 0.535159
|
transaction.ex
|
starcoder
|
defmodule AWS.Cognito do
@moduledoc """
Amazon Cognito
Amazon Cognito is a web service that delivers scoped temporary credentials
to mobile devices and other untrusted environments. Amazon Cognito uniquely
identifies a device and supplies the user with a consistent identity over
the lifetime of an application.
Using Amazon Cognito, you can enable authentication with one or more
third-party identity providers (Facebook, Google, or Login with Amazon),
and you can also choose to support unauthenticated access from your app.
Cognito delivers a unique identifier for each user and acts as an OpenID
token provider trusted by AWS Security Token Service (STS) to access
temporary, limited-privilege AWS credentials.
To provide end-user credentials, first make an unsigned call to `GetId`. If
the end user is authenticated with one of the supported identity providers,
set the `Logins` map with the identity provider token. `GetId` returns a
unique identifier for the user.
Next, make an unsigned call to `GetCredentialsForIdentity`. This call
expects the same `Logins` map as the `GetId` call, as well as the
`IdentityID` originally returned by `GetId`. Assuming your identity pool
has been configured via the `SetIdentityPoolRoles` operation,
`GetCredentialsForIdentity` will return AWS credentials for your use. If
your pool has not been configured with `SetIdentityPoolRoles`, or if you
want to follow legacy flow, make an unsigned call to `GetOpenIdToken`,
which returns the OpenID token necessary to call STS and retrieve AWS
credentials. This call expects the same `Logins` map as the `GetId` call,
as well as the `IdentityID` originally returned by `GetId`. The token
returned by `GetOpenIdToken` can be passed to the STS operation
[AssumeRoleWithWebIdentity](http://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRoleWithWebIdentity.html)
to retrieve AWS credentials.
If you want to use Amazon Cognito in an Android, iOS, or Unity application,
you will probably want to make API calls via the AWS Mobile SDK. To learn
more, see the [AWS Mobile SDK Developer
Guide](http://docs.aws.amazon.com/mobile/index.html).
"""
@doc """
Creates a new identity pool. The identity pool is a store of user identity
information that is specific to your AWS account. The limit on identity
pools is 60 per account. The keys for `SupportedLoginProviders` are as
follows:
<ul> <li> Facebook: `graph.facebook.com`
</li> <li> Google: `accounts.google.com`
</li> <li> Amazon: `www.amazon.com`
</li> <li> Twitter: `api.twitter.com`
</li> <li> Digits: `www.digits.com`
</li> </ul> You must use AWS Developer credentials to call this API.
"""
def create_identity_pool(client, input, options \\ []) do
request(client, "CreateIdentityPool", input, options)
end
@doc """
Deletes identities from an identity pool. You can specify a list of 1-60
identities that you want to delete.
You must use AWS Developer credentials to call this API.
"""
def delete_identities(client, input, options \\ []) do
request(client, "DeleteIdentities", input, options)
end
@doc """
Deletes a user pool. Once a pool is deleted, users will not be able to
authenticate with the pool.
You must use AWS Developer credentials to call this API.
"""
def delete_identity_pool(client, input, options \\ []) do
request(client, "DeleteIdentityPool", input, options)
end
@doc """
Returns metadata related to the given identity, including when the identity
was created and any associated linked logins.
You must use AWS Developer credentials to call this API.
"""
def describe_identity(client, input, options \\ []) do
request(client, "DescribeIdentity", input, options)
end
@doc """
Gets details about a particular identity pool, including the pool name, ID
description, creation date, and current number of users.
You must use AWS Developer credentials to call this API.
"""
def describe_identity_pool(client, input, options \\ []) do
request(client, "DescribeIdentityPool", input, options)
end
@doc """
Returns credentials for the provided identity ID. Any provided logins will
be validated against supported login providers. If the token is for
cognito-identity.amazonaws.com, it will be passed through to AWS Security
Token Service with the appropriate role for the token.
This is a public API. You do not need any credentials to call this API.
"""
def get_credentials_for_identity(client, input, options \\ []) do
request(client, "GetCredentialsForIdentity", input, options)
end
@doc """
Generates (or retrieves) a Cognito ID. Supplying multiple logins will
create an implicit linked account.
This is a public API. You do not need any credentials to call this API.
"""
def get_id(client, input, options \\ []) do
request(client, "GetId", input, options)
end
@doc """
Gets the roles for an identity pool.
You must use AWS Developer credentials to call this API.
"""
def get_identity_pool_roles(client, input, options \\ []) do
request(client, "GetIdentityPoolRoles", input, options)
end
@doc """
Gets an OpenID token, using a known Cognito ID. This known Cognito ID is
returned by `GetId`. You can optionally add additional logins for the
identity. Supplying multiple logins creates an implicit link.
The OpenId token is valid for 15 minutes.
This is a public API. You do not need any credentials to call this API.
"""
def get_open_id_token(client, input, options \\ []) do
request(client, "GetOpenIdToken", input, options)
end
@doc """
Registers (or retrieves) a Cognito `IdentityId` and an OpenID Connect token
for a user authenticated by your backend authentication process. Supplying
multiple logins will create an implicit linked account. You can only
specify one developer provider as part of the `Logins` map, which is linked
to the identity pool. The developer provider is the "domain" by which
Cognito will refer to your users.
You can use `GetOpenIdTokenForDeveloperIdentity` to create a new identity
and to link new logins (that is, user credentials issued by a public
provider or developer provider) to an existing identity. When you want to
create a new identity, the `IdentityId` should be null. When you want to
associate a new login with an existing authenticated/unauthenticated
identity, you can do so by providing the existing `IdentityId`. This API
will create the identity in the specified `IdentityPoolId`.
You must use AWS Developer credentials to call this API.
"""
def get_open_id_token_for_developer_identity(client, input, options \\ []) do
request(client, "GetOpenIdTokenForDeveloperIdentity", input, options)
end
@doc """
Lists the identities in a pool.
You must use AWS Developer credentials to call this API.
"""
def list_identities(client, input, options \\ []) do
request(client, "ListIdentities", input, options)
end
@doc """
Lists all of the Cognito identity pools registered for your account.
You must use AWS Developer credentials to call this API.
"""
def list_identity_pools(client, input, options \\ []) do
request(client, "ListIdentityPools", input, options)
end
@doc """
Retrieves the `IdentityID` associated with a `DeveloperUserIdentifier` or
the list of `DeveloperUserIdentifier`s associated with an `IdentityId` for
an existing identity. Either `IdentityID` or `DeveloperUserIdentifier` must
not be null. If you supply only one of these values, the other value will
be searched in the database and returned as a part of the response. If you
supply both, `DeveloperUserIdentifier` will be matched against
`IdentityID`. If the values are verified against the database, the response
returns both values and is the same as the request. Otherwise a
`ResourceConflictException` is thrown.
You must use AWS Developer credentials to call this API.
"""
def lookup_developer_identity(client, input, options \\ []) do
request(client, "LookupDeveloperIdentity", input, options)
end
@doc """
Merges two users having different `IdentityId`s, existing in the same
identity pool, and identified by the same developer provider. You can use
this action to request that discrete users be merged and identified as a
single user in the Cognito environment. Cognito associates the given source
user (`SourceUserIdentifier`) with the `IdentityId` of the
`DestinationUserIdentifier`. Only developer-authenticated users can be
merged. If the users to be merged are associated with the same public
provider, but as two different users, an exception will be thrown.
You must use AWS Developer credentials to call this API.
"""
def merge_developer_identities(client, input, options \\ []) do
request(client, "MergeDeveloperIdentities", input, options)
end
@doc """
Sets the roles for an identity pool. These roles are used when making calls
to `GetCredentialsForIdentity` action.
You must use AWS Developer credentials to call this API.
"""
def set_identity_pool_roles(client, input, options \\ []) do
request(client, "SetIdentityPoolRoles", input, options)
end
@doc """
Unlinks a `DeveloperUserIdentifier` from an existing identity. Unlinked
developer users will be considered new identities next time they are seen.
If, for a given Cognito identity, you remove all federated identities as
well as the developer user identifier, the Cognito identity becomes
inaccessible.
You must use AWS Developer credentials to call this API.
"""
def unlink_developer_identity(client, input, options \\ []) do
request(client, "UnlinkDeveloperIdentity", input, options)
end
@doc """
Unlinks a federated identity from an existing account. Unlinked logins will
be considered new identities next time they are seen. Removing the last
linked login will make this identity inaccessible.
This is a public API. You do not need any credentials to call this API.
"""
def unlink_identity(client, input, options \\ []) do
request(client, "UnlinkIdentity", input, options)
end
@doc """
Updates a user pool.
You must use AWS Developer credentials to call this API.
"""
def update_identity_pool(client, input, options \\ []) do
request(client, "UpdateIdentityPool", input, options)
end
@spec request(map(), binary(), map(), list()) ::
{:ok, Poison.Parser.t | nil, Poison.Response.t} |
{:error, Poison.Parser.t} |
{:error, HTTPoison.Error.t}
defp request(client, action, input, options) do
client = %{client | service: "cognito-identity"}
host = get_host("cognito-identity", client)
url = get_url(host, client)
headers = [{"Host", host},
{"Content-Type", "application/x-amz-json-1.1"},
{"X-Amz-Target", "AWSCognitoIdentityService.#{action}"}]
payload = Poison.Encoder.encode(input, [])
headers = AWS.Request.sign_v4(client, "POST", url, headers, payload)
case HTTPoison.post(url, payload, headers, options) do
{:ok, response=%HTTPoison.Response{status_code: 200, body: ""}} ->
{:ok, nil, response}
{:ok, response=%HTTPoison.Response{status_code: 200, body: body}} ->
{:ok, Poison.Parser.parse!(body), response}
{:ok, _response=%HTTPoison.Response{body: body}} ->
error = Poison.Parser.parse!(body)
exception = error["__type"]
message = error["message"]
{:error, {exception, message}}
{:error, %HTTPoison.Error{reason: reason}} ->
{:error, %HTTPoison.Error{reason: reason}}
end
end
defp get_host(endpoint_prefix, client) do
if client.region == "local" do
"localhost"
else
"#{endpoint_prefix}.#{client.region}.#{client.endpoint}"
end
end
defp get_url(host, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}/"
end
end
|
lib/aws/cognito.ex
| 0.88754
| 0.616474
|
cognito.ex
|
starcoder
|
defmodule Gateway.Kafka do
@moduledoc """
Produce/send Kafka messages.
## About the Kafka Integration
In order to scale horizontally, [Kafka Consumer
Groups](https://kafka.apache.org/documentation/#distributionimpl) are used.
[Brod](https://github.com/klarna/brod), which is the library used for communicating
with Kafka, has its client supervised by `Gateway.Kafka.Sup`, which also takes care
of the group subscriber. It uses delays between restarts, in order to delay
reconnects in the case of connection errors.
`Gateway.Kafka.Sup` is itself supervised by `Gateway.Kafka.SupWrapper`. The
wrapper's sole purpose is to allow the application to come up even if there is not a
single broker online. Without it, the failure to connect to any broker would
propagate all the way to the Phoenix application, bringing it down in the process.
Having the wrapper makes the application startup more reliable.
The consumer setup is done in `Gateway.Kafka.GroupSubscriber`; take a look at its
moduledoc for more information. Finally, `Gateway.Kafka.MessageHandler` hosts the
code for the actual processing of incoming messages.
"""
use Gateway.Config, [:brod_client_id, :log_topic]
require Logger
alias Gateway.Utils.Jwt
alias Gateway.ApiProxy.Proxy
@doc """
Log proxied API calls to Kafka.
Among other data, the log message includes the payload, the JWT jti and the current
timestamp. Messages are produced to the Kafka broker synchronously.
"""
@type producer_sync_t :: (any, any, any, any, any -> :ok | {:error, any})
@spec log_proxy_api_call(Proxy.route_map, %Plug.Conn{}, producer_sync_t) :: :ok | {:error, any}
def log_proxy_api_call(route, conn, produce_sync \\ &:brod.produce_sync/5) do
claims = extract_claims!(conn)
username = Map.fetch!(claims, "username")
jti =
case Map.get(claims, "jti") do
nil ->
Logger.warn("jti not found in claims (#{inspect claims})")
nil
jti -> jti
end
message =
%{
id: UUID.uuid4(),
username: username,
jti: jti,
type: "PROXY_API_CALL",
version: "1.0",
timestamp: Timex.now |> Timex.to_unix,
level: 0,
payload: %{
service_def: inspect(route),
request_path: conn.request_path,
remote_ip: conn.remote_ip |> format_ip,
},
}
message_json = message |> Poison.encode!
# If topic does not exist, it will be created automatically, provided the server is
# configured that way. However, this call then returns with {:error, # :LeaderNotAvailable},
# as at that point there won't be a partition leader yet.
conf = config()
:ok = produce_sync.(
conf.brod_client_id,
conf.log_topic,
_partition = &compute_kafka_partition/4,
_key = username,
_value = message_json
)
rescue
err ->
case err do
%KeyError{key: "username", term: claims} ->
Logger.warn("""
A username is required for publishing to the right Kafka topic, \
but no such field is found in the given claims: #{inspect claims}
""")
_ ->
Logger.error("""
Failed to log API call: #{inspect err}
ROUTE=#{inspect route}
CONN=#{inspect conn}
""")
end
{:error, err}
end
@spec extract_claims!(%Plug.Conn{}) :: Jwt.claim_map
defp extract_claims!(conn) do
# we assume there is exactly one valid token:
[token] =
conn
|> Plug.Conn.get_req_header("authorization")
|> Stream.filter(&Jwt.valid?/1)
|> Enum.take(1)
{:ok, claims} = Jwt.decode(token)
claims
end
@spec format_ip({integer, integer, integer, integer}) :: String.t
defp format_ip(ip_tuple) do
ip_tuple
|> Tuple.to_list
|> Enum.map(&Integer.to_string/1)
|> Enum.join(".")
end
defp compute_kafka_partition(_topic, n_partitions, key, _value) do
partition =
key
|> Murmur.hash_x86_32
|> abs
|> rem(n_partitions)
{:ok, partition}
end
end
|
lib/gateway/kafka.ex
| 0.822403
| 0.50592
|
kafka.ex
|
starcoder
|
defmodule Grax.ValidationError do
@moduledoc """
Raised when the validation of a Grax fails.
"""
defexception [:errors, :context]
@type t :: %__MODULE__{errors: list, context: any}
def exception(opts \\ []) do
errors = Keyword.get(opts, :errors, []) |> List.wrap()
context = Keyword.get(opts, :context)
%__MODULE__{errors: errors, context: context}
end
def message(validation_error) do
"validation failed" <>
if(validation_error.context, do: " in #{inspect(validation_error.context)}", else: "") <>
if Enum.empty?(validation_error.errors) do
""
else
"""
:
- #{Enum.map_join(validation_error.errors, "\n- ", fn {property, error} -> "#{property}: #{Exception.message(error)}" end)}
"""
end
end
def add_error(%__MODULE__{} = validation_error, property, error) do
%__MODULE__{validation_error | errors: [{property, error} | validation_error.errors]}
end
end
defmodule Grax.Schema.TypeError do
@moduledoc """
Raised when a property value doesn't match the specified type during decoding from RDF.
"""
defexception [:message, :type, :value]
def exception(opts) do
type = Keyword.fetch!(opts, :type)
value = Keyword.fetch!(opts, :value)
msg = opts[:message] || "value #{inspect(value)} does not match type #{inspect(type)}"
%__MODULE__{message: msg, type: type, value: value}
end
end
defmodule Grax.Schema.CardinalityError do
@moduledoc """
Raised when a the number of property values doesn't match the specified cardinality during decoding from RDF.
"""
defexception [:message, :cardinality, :value]
def exception(opts) do
cardinality = Keyword.fetch!(opts, :cardinality)
value = Keyword.fetch!(opts, :value)
msg = opts[:message] || "#{inspect(value)} does not match cardinality #{inspect(cardinality)}"
%__MODULE__{message: msg, cardinality: cardinality, value: value}
end
end
defmodule Grax.Schema.InvalidProperty do
@moduledoc """
Raised when accessing a property that is not defined on a schema.
"""
defexception [:message, :property]
def exception(opts) do
property = Keyword.fetch!(opts, :property)
msg = opts[:message] || "undefined property #{inspect(property)}"
%__MODULE__{message: msg, property: property}
end
end
defmodule Grax.InvalidIdError do
@moduledoc """
Raised when a Grax has an invalid subject id.
"""
defexception [:message, :id]
def exception(opts) do
id = Keyword.fetch!(opts, :id)
msg = opts[:message] || "invalid subject id: #{inspect(id)}"
%__MODULE__{message: msg, id: id}
end
end
defmodule Grax.InvalidValueError do
@moduledoc """
Raised when an invalid literal is encountered during decoding from RDF.
"""
defexception [:message, :value]
def exception(opts) do
value = Keyword.fetch!(opts, :value)
msg = opts[:message] || "invalid value: #{inspect(value)}"
%__MODULE__{message: msg, value: value}
end
end
defmodule Grax.InvalidResourceTypeError do
@moduledoc """
Raised when a linked resource doesn't match any of the specified classes.
"""
defexception [:message, :type, :resource_types]
def exception(opts) do
type = Keyword.fetch!(opts, :type)
resource_types = Keyword.fetch!(opts, :resource_types) |> List.wrap()
msg =
opts[:message] ||
"invalid type of linked resource: " <>
case type do
:no_match -> "none of the types #{inspect(resource_types)} matches"
:multiple_matches -> "multiple matches for types #{inspect(resource_types)}"
end
%__MODULE__{message: msg, type: type, resource_types: resource_types}
end
end
|
lib/grax/exceptions.ex
| 0.852629
| 0.486454
|
exceptions.ex
|
starcoder
|
defmodule Infer.Audio do
@moduledoc """
Audio type matchers based on the [magic number](https://en.wikipedia.org/wiki/Magic_number_(programming))
"""
@doc """
Takes the binary file contents as arguments. Returns `true` if it's a midi.
"""
@spec midi?(binary()) :: boolean()
def midi?(<<0x4D, 0x54, 0x68, 0x64, _rest::binary>>), do: true
def midi?(_binary), do: false
@doc """
Takes the binary file contents as arguments. Returns `true` if it's a mp3.
## Examples
iex> binary = File.read!("test/audio/sample.mp3")
iex> Infer.Audio.mp3?(binary)
true
"""
@spec mp3?(binary()) :: boolean()
def mp3?(<<0x49, 0x44, 0x33, _rest::binary>>), do: true
def mp3?(<<0xFF, 0xFB, _rest::binary>>), do: true
def mp3?(_binary), do: false
@doc """
Takes the binary file contents as arguments. Returns `true` if it's a m4a.
"""
@spec m4a?(binary()) :: boolean()
def m4a?(<<_data::binary-size(4), 0x66, 0x74, 0x79, 0x70, 0x4D, 0x3F, 0x41, _rest::binary>>), do: true
def m4a?(<<0x4D, 0x34, 0x41, 0x20, _rest::binary>>), do: true
def m4a?(_binary), do: false
@doc """
Takes the binary file contents as arguments. Returns `true` if it's a ogg.
"""
@spec ogg?(binary()) :: boolean()
def ogg?(<<0x4F, 0x67, 0x67, 0x53, _rest::binary>>), do: true
def ogg?(_binary), do: false
@doc """
Takes the binary file contents as arguments. Returns `true` if it's a flac.
"""
@spec flac?(binary()) :: boolean()
def flac?(<<0x66, 0x4C, 0x61, 0x43, _rest::binary>>), do: true
def flac?(_binary), do: false
@doc """
Takes the binary file contents as arguments. Returns `true` if it's a wav.
"""
@spec wav?(binary()) :: boolean()
def wav?(<<0x52, 0x49, 0x46, 0x46, _data::binary-size(4), 0x57, 0x41, 0x56, 0x45, _rest::binary>>), do: true
def wav?(_binary), do: false
@doc """
Takes the binary file contents as arguments. Returns `true` if it's a amr.
"""
@spec amr?(binary()) :: boolean()
def amr?(<<0x23, 0x21, 0x41, 0x4D, 0x52, 0x0A, _rest::binary>>), do: true
def amr?(_binary), do: false
@doc """
Takes the binary file contents as arguments. Returns `true` if it's a aac.
"""
@spec aac?(binary()) :: boolean()
def aac?(<<0xFF, 0xF1, 0xF9, _rest::binary>>), do: true
def aac?(_binary), do: false
@doc """
Takes the binary file contents as arguments. Returns `true` if it's an aiff.
"""
@spec aiff?(binary()) :: boolean()
def aiff?(<<0x46, 0x4F, 0x52, 0x4D, _data::binary-size(4), 0x41, 0x49, 0x46, 0x46, _rest::binary>>), do: true
def aiff?(_binary), do: false
end
|
lib/matchers/audio.ex
| 0.883142
| 0.558809
|
audio.ex
|
starcoder
|
defmodule AWS.EBS do
@moduledoc """
You can use the Amazon Elastic Block Store (Amazon EBS) direct APIs to create
EBS snapshots, write data directly to your snapshots, read data on your
snapshots, and identify the differences or changes between two snapshots.
If you’re an independent software vendor (ISV) who offers backup services for
Amazon EBS, the EBS direct APIs make it more efficient and cost-effective to
track incremental changes on your EBS volumes through snapshots. This can be
done without having to create new volumes from snapshots, and then use Amazon
Elastic Compute Cloud (Amazon EC2) instances to compare the differences.
You can create incremental snapshots directly from data on-premises into EBS
volumes and the cloud to use for quick disaster recovery. With the ability to
write and read snapshots, you can write your on-premises data to an EBS snapshot
during a disaster. Then after recovery, you can restore it back to AWS or
on-premises from the snapshot. You no longer need to build and maintain complex
mechanisms to copy data to and from Amazon EBS.
This API reference provides detailed information about the actions, data types,
parameters, and errors of the EBS direct APIs. For more information about the
elements that make up the EBS direct APIs, and examples of how to use them
effectively, see [Accessing the Contents of an EBS Snapshot](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-accessing-snapshot.html)
in the *Amazon Elastic Compute Cloud User Guide*. For more information about the
supported AWS Regions, endpoints, and service quotas for the EBS direct APIs,
see [Amazon Elastic Block Store Endpoints and Quotas](https://docs.aws.amazon.com/general/latest/gr/ebs-service.html) in the
*AWS General Reference*.
"""
alias AWS.Client
alias AWS.Request
def metadata do
%AWS.ServiceMetadata{
abbreviation: nil,
api_version: "2019-11-02",
content_type: "application/x-amz-json-1.1",
credential_scope: nil,
endpoint_prefix: "ebs",
global?: false,
protocol: "rest-json",
service_id: "EBS",
signature_version: "v4",
signing_name: "ebs",
target_prefix: nil
}
end
@doc """
Seals and completes the snapshot after all of the required blocks of data have
been written to it.
Completing the snapshot changes the status to `completed`. You cannot write new
blocks to a snapshot after it has been completed.
"""
def complete_snapshot(%Client{} = client, snapshot_id, input, options \\ []) do
url_path = "/snapshots/completion/#{URI.encode(snapshot_id)}"
{headers, input} =
[
{"ChangedBlocksCount", "x-amz-ChangedBlocksCount"},
{"Checksum", "x-amz-Checksum"},
{"ChecksumAggregationMethod", "x-amz-Checksum-Aggregation-Method"},
{"ChecksumAlgorithm", "x-amz-Checksum-Algorithm"}
]
|> Request.build_params(input)
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
202
)
end
@doc """
Returns the data in a block in an Amazon Elastic Block Store snapshot.
"""
def get_snapshot_block(%Client{} = client, block_index, snapshot_id, block_token, options \\ []) do
url_path = "/snapshots/#{URI.encode(snapshot_id)}/blocks/#{URI.encode(block_index)}"
headers = []
query_params = []
query_params =
if !is_nil(block_token) do
[{"blockToken", block_token} | query_params]
else
query_params
end
options =
Keyword.put(
options,
:response_header_parameters,
[
{"x-amz-Checksum", "Checksum"},
{"x-amz-Checksum-Algorithm", "ChecksumAlgorithm"},
{"x-amz-Data-Length", "DataLength"}
]
)
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Returns information about the blocks that are different between two Amazon
Elastic Block Store snapshots of the same volume/snapshot lineage.
"""
def list_changed_blocks(
%Client{} = client,
second_snapshot_id,
first_snapshot_id \\ nil,
max_results \\ nil,
next_token \\ nil,
starting_block_index \\ nil,
options \\ []
) do
url_path = "/snapshots/#{URI.encode(second_snapshot_id)}/changedblocks"
headers = []
query_params = []
query_params =
if !is_nil(starting_block_index) do
[{"startingBlockIndex", starting_block_index} | query_params]
else
query_params
end
query_params =
if !is_nil(next_token) do
[{"pageToken", next_token} | query_params]
else
query_params
end
query_params =
if !is_nil(max_results) do
[{"maxResults", max_results} | query_params]
else
query_params
end
query_params =
if !is_nil(first_snapshot_id) do
[{"firstSnapshotId", first_snapshot_id} | query_params]
else
query_params
end
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Returns information about the blocks in an Amazon Elastic Block Store snapshot.
"""
def list_snapshot_blocks(
%Client{} = client,
snapshot_id,
max_results \\ nil,
next_token \\ nil,
starting_block_index \\ nil,
options \\ []
) do
url_path = "/snapshots/#{URI.encode(snapshot_id)}/blocks"
headers = []
query_params = []
query_params =
if !is_nil(starting_block_index) do
[{"startingBlockIndex", starting_block_index} | query_params]
else
query_params
end
query_params =
if !is_nil(next_token) do
[{"pageToken", next_token} | query_params]
else
query_params
end
query_params =
if !is_nil(max_results) do
[{"maxResults", max_results} | query_params]
else
query_params
end
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Writes a block of data to a snapshot.
If the specified block contains data, the existing data is overwritten. The
target snapshot must be in the `pending` state.
Data written to a snapshot must be aligned with 512-byte sectors.
"""
def put_snapshot_block(%Client{} = client, block_index, snapshot_id, input, options \\ []) do
url_path = "/snapshots/#{URI.encode(snapshot_id)}/blocks/#{URI.encode(block_index)}"
{headers, input} =
[
{"Checksum", "x-amz-Checksum"},
{"ChecksumAlgorithm", "x-amz-Checksum-Algorithm"},
{"DataLength", "x-amz-Data-Length"},
{"Progress", "x-amz-Progress"}
]
|> Request.build_params(input)
query_params = []
options =
Keyword.put(
options,
:response_header_parameters,
[
{"x-amz-Checksum", "Checksum"},
{"x-amz-Checksum-Algorithm", "ChecksumAlgorithm"}
]
)
Request.request_rest(
client,
metadata(),
:put,
url_path,
query_params,
headers,
input,
options,
201
)
end
@doc """
Creates a new Amazon EBS snapshot.
The new snapshot enters the `pending` state after the request completes.
After creating the snapshot, use [
PutSnapshotBlock](https://docs.aws.amazon.com/ebs/latest/APIReference/API_PutSnapshotBlock.html)
to write blocks of data to the snapshot.
"""
def start_snapshot(%Client{} = client, input, options \\ []) do
url_path = "/snapshots"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
201
)
end
end
|
lib/aws/generated/ebs.ex
| 0.822439
| 0.582194
|
ebs.ex
|
starcoder
|
defmodule Absinthe.Type.Interface do
@moduledoc """
A defined interface type that represent a list of named fields and their
arguments.
Fields on an interface have the same rules as fields on an
`Absinthe.Type.Object`.
If an `Absinthe.Type.Object` lists an interface in its `:interfaces` entry,
it guarantees that it defines the same fields and arguments that the
interface does.
Because sometimes it's for the interface to determine the implementing type of
a resolved object, you must either:
* Provide a `:resolve_type` function on the interface
* Provide a `:is_type_of` function on each implementing type
```
interface :named_entity do
field :name, :string
resolve_type fn
%{age: _}, _ -> :person
%{employee_count: _}, _ -> :business
_, _ -> nil
end
end
object :person do
field :name, :string
field :age, :string
interface :named_entity
end
object :business do
field :name, :string
field :employee_count, :integer
interface :named_entity
end
```
"""
use Absinthe.Introspection.Kind
alias Absinthe.Type
alias Absinthe.Schema
@typedoc """
* `:name` - The name of the interface type. Should be a TitleCased `binary`. Set automatically.
* `:description` - A nice description for introspection.
* `:fields` - A map of `Absinthe.Type.Field` structs. See `Absinthe.Schema.Notation.field/1` and
* `:args` - A map of `Absinthe.Type.Argument` structs. See `Absinthe.Schema.Notation.arg/2`.
* `:resolve_type` - A function used to determine the implementing type of a resolved object. See also `Absinthe.Type.Object`'s `:is_type_of`.
The `:resolve_type` function will be passed two arguments; the object whose type needs to be identified, and the `Absinthe.Execution` struct providing the full execution context.
The `__private__` and `:__reference__` keys are for internal use.
"""
@type t :: %__MODULE__{
name: binary,
description: binary,
fields: map,
identifier: atom,
resolve_type: (any, Absinthe.Resolution.t() -> atom | nil),
__private__: Keyword.t(),
__reference__: Type.Reference.t()
}
defstruct name: nil,
description: nil,
fields: nil,
identifier: nil,
resolve_type: nil,
__private__: [],
__reference__: nil,
field_imports: []
def build(%{attrs: attrs}) do
fields =
(attrs[:fields] || [])
|> Type.Field.build()
|> Type.Object.handle_imports(attrs[:field_imports])
attrs = Keyword.put(attrs, :fields, fields)
quote do
%unquote(__MODULE__){unquote_splicing(attrs)}
end
end
@spec resolve_type(Type.Interface.t(), any, Absinthe.Resolution.t()) :: Type.t() | nil
def resolve_type(type, obj, env, opts \\ [lookup: true])
def resolve_type(
%{resolve_type: nil, __reference__: %{identifier: ident}},
obj,
%{schema: schema},
opts
) do
implementors = Schema.implementors(schema, ident)
type_name =
Enum.find(implementors, fn
%{is_type_of: nil} ->
false
type ->
type.is_type_of.(obj)
end)
if opts[:lookup] do
Absinthe.Schema.lookup_type(schema, type_name)
else
type_name
end
end
def resolve_type(%{resolve_type: resolver}, obj, %{schema: schema} = env, opts) do
case resolver.(obj, env) do
nil ->
nil
ident when is_atom(ident) ->
if opts[:lookup] do
Absinthe.Schema.lookup_type(schema, ident)
else
ident
end
end
end
@doc """
Whether the interface (or implementors) are correctly configured to resolve
objects.
"""
@spec type_resolvable?(Schema.t(), t) :: boolean
def type_resolvable?(schema, %{resolve_type: nil} = iface) do
Schema.implementors(schema, iface)
|> Enum.all?(& &1.is_type_of)
end
def type_resolvable?(_, %{resolve_type: _}) do
true
end
@doc false
@spec member?(t, Type.t()) :: boolean
def member?(%{__reference__: %{identifier: ident}}, %{interfaces: ifaces}) do
ident in ifaces
end
def member?(_, _) do
false
end
@spec implements?(Type.Interface.t(), Type.Object.t(), Type.Schema.t()) :: boolean
def implements?(interface, type, schema) do
covariant?(interface, type, schema)
end
defp covariant?(%wrapper{of_type: inner_type1}, %wrapper{of_type: inner_type2}, schema) do
covariant?(inner_type1, inner_type2, schema)
end
defp covariant?(%{name: name}, %{name: name}, _schema) do
true
end
defp covariant?(%Type.Interface{fields: ifields}, %{fields: type_fields}, schema) do
Enum.all?(ifields, fn {field_ident, ifield} ->
case Map.get(type_fields, field_ident) do
nil ->
false
field ->
covariant?(ifield.type, field.type, schema)
end
end)
end
defp covariant?(nil, _, _), do: false
defp covariant?(_, nil, _), do: false
defp covariant?(itype, type, schema) when is_atom(itype) do
itype = schema.__absinthe_type__(itype)
covariant?(itype, type, schema)
end
defp covariant?(itype, type, schema) when is_atom(type) do
type = schema.__absinthe_type__(type)
covariant?(itype, type, schema)
end
end
|
lib/absinthe/type/interface.ex
| 0.877431
| 0.888469
|
interface.ex
|
starcoder
|
defmodule Advent20.Bags do
@moduledoc """
Day 7: Handy Haversacks
"""
# Parse the bag input
defp parse_bags(bag_input) do
bag_input
|> String.split("\n", trim: true)
|> Stream.map(&String.replace(&1, ~r/ bags?/, ""))
|> Stream.map(&String.replace(&1, ~r/\.$/, ""))
|> Stream.map(&String.split(&1, " contain "))
|> Enum.into(%{}, fn [title, bags_contained] -> {title, parse_bags_contained(bags_contained)} end)
end
# when a bag contains no other bags, it states "no other bags"
defp parse_bags_contained("no other"), do: []
# parse e.g. "2 golden bags" into [2, "golden bags"]
defp parse_bags_contained(bags) do
bags
|> String.split(", ", trim: true)
|> Enum.map(&Regex.run(~r/(\d+) (.+)$/, &1, capture: :all_but_first))
|> Enum.map(fn [count, title] -> [String.to_integer(count), title] end)
end
@doc """
Part 1: How many bag colors can eventually contain at least one shiny gold bag?
"""
def bags_eventually_containing_one_shiny_gold_bag(bag_input) do
# We just remove the count from the contained bags for simplicity
all_bags_without_count = parse_bags(bag_input) |> remove_count_from_subbags()
all_bags_without_count
|> Enum.filter(&contains_shiny_gold_bag?(&1, all_bags_without_count))
|> Enum.count()
end
defp remove_count_from_subbags(bags) do
Enum.into(bags, %{}, fn {title, subbags} ->
{title, Enum.map(subbags, fn [_count, title] -> title end)}
end)
end
defp contains_shiny_gold_bag?({_title, bags_contained}, all_bags) do
if "shiny gold" in bags_contained do
true
else
all_bags
|> Map.take(bags_contained)
|> Enum.find(false, &contains_shiny_gold_bag?(&1, all_bags))
end
end
@doc """
Part 2: How many individual bags are required inside your single shiny gold bag?
"""
def bags_inside_a_shiny_gold_bag(bag_input) do
all_bags = parse_bags(bag_input)
shiny_gold_bag_deps = Map.fetch!(all_bags, "shiny gold")
count_bags(shiny_gold_bag_deps, all_bags)
end
defp count_bags([], _all_bags), do: 0
defp count_bags([[count, title] | tail], all_bags) do
next_dep = Map.fetch!(all_bags, title)
count + count * count_bags(next_dep, all_bags) + count_bags(tail, all_bags)
end
end
|
lib/advent20/07_bags.ex
| 0.713332
| 0.453262
|
07_bags.ex
|
starcoder
|
defmodule OMG.Utils.HttpRPC.Validator.Base do
@moduledoc """
Implements simple validation engine with basic validators provided and allows to chain them
to make more comprehensive one.
"""
alias OMG.Utils.HttpRPC.Encoding
@type validation_error_t() :: {:error, {:validation_error, binary(), any()}}
# Creates a named chain of basic validators aka alias, for easier to use.
# IMPORTANT: Alias can use already defined validators, not other aliases (no backtracking)
@aliases %{
address: [:hex, length: 20],
hash: [:hex, length: 32],
signature: [:hex, length: 65],
pos_integer: [:integer, greater: 0],
non_neg_integer: [:integer, greater: -1]
}
@doc """
Validates value of given key in the map with provided list of validators.
First validator list is preprocessed which replaces aliases with its definitions.
Then value is fetched from the map and each validator is run passing a tuple
where first element is a value and second validation error from previous validator.
If all validators succeed on the value the second element is empty list (no validation errors).
Last result of the validation is translated to {:ok, value} or error.
## Examples
* `expect(args, "arg_name", [:integer, greater: 1000])`
Validate and positive integer greater than 1000
* `expect(args, "arg_name", [:integer, :optional])`
Validate integer value or when `arg_name` key is missing {:ok, `nil`} is returned
* `expect(args, "arg_name", [:optional, :integer])`
NOTE: **invalid order** it's the same as just `:integer`
To validate optional integer values it should be `:integer, :optional`
"""
@spec expect(map(), atom() | binary(), atom() | list()) :: {:ok, any()} | validation_error_t()
def expect(map, key, atom) when is_atom(atom), do: expect(map, key, [atom])
def expect(map, key, opts) do
opts
|> replace_aliases()
|> Enum.reduce(
get(map, key),
&validate/2
)
|> case do
{val, []} -> {:ok, val}
{_, [err | _]} -> error(key, err)
end
end
@doc """
Creates custom validation error
"""
@spec error(binary(), any()) :: validation_error_t()
def error(parent_name, {:validation_error, child_name, reason}),
do: error(parent_name <> "." <> child_name, reason)
def error(param_name, reason) when is_binary(param_name),
do: {:error, {:validation_error, param_name, reason}}
@doc """
Unwraps elements from the results list: `[{:ok, elt} | {:error, any()}]` or returns the first error
"""
@spec all_success_or_error([{:ok, any()} | {:error, any()}]) :: list() | {:error, any()}
def all_success_or_error(result_list) do
with nil <- Enum.find(result_list, &(:error == Kernel.elem(&1, 0))),
do: Enum.map(result_list, fn {:ok, elt} -> elt end)
end
@doc """
`integer` function is an example of basic validator used by the engine.
Validators are passed to the `expect` function in `opts` parameter as a keyword list.
Each validator expects a tuple, where first element is value of specified `key` in `map`
possibly processed by previous validators in `opts` list. Second element is a validator list
which fails on the value.
It depends on validator but usually if some previous validator returns error on value, others
just pass the error through and do not add themselves to the list.
"""
@spec integer({any(), list()}) :: {any(), list()}
def integer({_, [_ | _]} = err), do: err
def integer({val, []} = acc) when is_integer(val), do: acc
def integer({val, []}), do: {val, [:integer]}
@spec optional({any(), list()}) :: {any(), list()}
def optional({val, _}) when val in [:missing, nil], do: {nil, []}
def optional(acc), do: acc
@spec optional({any(), list()}, atom()) :: {any(), list()}
def optional({val, _}, true) when val in [:missing, nil], do: {nil, []}
def optional(acc, _), do: acc
@spec hex({any(), list()}) :: {any(), list()}
def hex({_, [_ | _]} = err), do: err
def hex({str, []}) do
case Encoding.from_hex(str) do
{:ok, bin} ->
{bin, []}
_ ->
{str, [:hex]}
end
end
@spec length({any(), list()}, non_neg_integer()) :: {any(), list()}
def length({_, [_ | _]} = err, _len), do: err
def length({str, []}, len) when is_binary(str) do
if Kernel.byte_size(str) == len,
do: {str, []},
else: {str, length: len}
end
def length({val, []}, len), do: {val, length: len}
@spec max_length({any(), list()}, non_neg_integer()) :: {any(), list()}
def max_length({_, [_ | _]} = err, _len), do: err
def max_length({list, []}, len) when is_list(list) and length(list) <= len, do: {list, []}
def max_length({val, []}, len), do: {val, max_length: len}
@spec greater({any(), list()}, integer()) :: {any(), list()}
def greater({_, [_ | _]} = err, _b), do: err
def greater({val, []}, bound) when is_integer(val) and val > bound, do: {val, []}
def greater({val, []}, _b) when not is_integer(val), do: {val, [:integer]}
def greater({val, []}, bound), do: {val, greater: bound}
@spec lesser({any(), list()}, integer()) :: {any(), list()}
def lesser({_, [_ | _]} = err, _b), do: err
def lesser({val, []}, bound) when is_integer(val) and val < bound, do: {val, []}
def lesser({val, []}, _b) when not is_integer(val), do: {val, [:integer]}
def lesser({val, []}, bound), do: {val, lesser: bound}
@spec list({any(), list()}, function() | nil) :: {any(), list()}
def list(tuple, mapper \\ nil)
def list({_, [_ | _]} = err, _), do: err
def list({val, []}, nil) when is_list(val), do: {val, []}
def list({val, []}, mapper) when is_list(val), do: list_processor(val, mapper)
def list({val, _}, _), do: {val, [:list]}
@spec map({any(), list()}, function() | nil) :: {any(), list()}
def map(tuple, parser \\ nil)
def map({_, [_ | _]} = err, _), do: err
def map({val, []}, nil) when is_map(val), do: {val, []}
def map({val, []}, parser) when is_map(val),
do:
(case parser.(val) do
{:error, err} -> {val, [err]}
{:ok, map} -> {map, []}
end)
def map({val, _}, _), do: {val, [:map]}
defp list_processor(val, mapper) do
list_reducer = fn
{:error, map_err}, _acc -> {:halt, map_err}
{:ok, elt}, acc -> {:cont, [elt | acc]}
elt, acc -> {:cont, [elt | acc]}
end
val
|> Enum.reduce_while([], fn elt, acc -> list_reducer.(mapper.(elt), acc) end)
|> case do
list when is_list(list) ->
{Enum.reverse(list), []}
err ->
{val, [err]}
end
end
# provides initial value to the validators reducer, see: `expect`
defp get(map, key), do: {Map.get(map, key, :missing), []}
defp validate(validator, acc) when is_atom(validator), do: Kernel.apply(__MODULE__, validator, [acc])
defp validate({validator, args}, acc), do: Kernel.apply(__MODULE__, validator, [acc, args])
defp replace_aliases(validators) do
Enum.reduce(
validators,
[],
fn v, acc ->
key = validator_name(v)
pre = Map.get(@aliases, key, [v])
[_ | _] = acc ++ pre
end
)
end
defp validator_name(v) when is_atom(v), do: v
defp validator_name({v, _}), do: v
end
|
apps/omg_utils/lib/omg_utils/http_rpc/validators/base.ex
| 0.908168
| 0.672547
|
base.ex
|
starcoder
|
defmodule Saucexages.SauceBlock do
@moduledoc """
Represents a full SAUCE block, including both a SAUCE record and comments.
Used for in-memory representations of SAUCE data when reading and writing SAUCE. Can also be used to extract further metadata from SAUCE data such as font information, detailed data type and file type info, flags, and more.
Overall, a SAUCE Block serves to provide an Elixir-centric SAUCE representation and a common format and shape for working with other SAUCE related tasks and APIs. The SAUCE block stores the minimum fields for full encoding and decoding, while avoiding extra decoding or encoding steps when possible. For instance, more detailed information for file types can be obtained by using the `data_type` and `file_type` fields, but a SAUCE block avoids forcing this information on the consumer until required.
## Fields
See `Saucexages.Sauce` for a full description of each field.
The main difference in terms of fields between a SAUCE binary or SAUCE record is that the SAUCE comment block is translated and encapsulated within a SAUCE block as a `comments` field. This allows for working with comments in a more natural way as a simple list of strings. `comment_lines` is no longer required as a specific field as it can be dynamically obtained from a SAUCE block. This is done to avoid error-prone synchronization of the comment contents with the comment lines.
The other major difference is that type dependent fields are encapsulated under `media_info`. This is to distinguish between the SAUCE spec's notion of file type (an integer field) and the conceptual grouping of fields that are dependent on the combination of data type and file type. See `Saucexages.MediaInfo` for more info.
"""
alias __MODULE__, as: SauceBlock
require Saucexages.Sauce
alias Saucexages.{Sauce, SauceRecord, MediaInfo, DataType}
@enforce_keys [:version, :media_info]
@type t :: %SauceBlock {
version: String.t,
title: String.t | nil,
author: String.t | nil,
group: String.t | nil,
date: DateTime.t | nil,
media_info: MediaInfo.t(),
comments: [String.t()],
}
defstruct [
:title,
:author,
:group,
:date,
:media_info,
comments: [],
version: Sauce.sauce_version()
]
@doc """
Creates a new `SauceBlock` struct from a `MediaInfo` and an optional field enumerable.
"""
@spec new(MediaInfo.t(), Enum.t()) :: t()
def new(%MediaInfo{} = media_info, opts \\ []) do
struct(%SauceBlock{version: Sauce.sauce_version(), media_info: media_info}, opts)
end
@doc """
Creates a SAUCE block from a SAUCE record and optional list of SAUCE comments.
"""
@spec from_sauce_record(SauceRecord.t(), [String.t()]) :: t()
def from_sauce_record(sauce_record, sauce_comments \\ [])
def from_sauce_record(%{version: _version, file_type: file_type, data_type: data_type} = sauce_record, sauce_comments) when is_list(sauce_comments) do
sauce_map = sauce_record |> Map.from_struct() |> Map.put(:comments, sauce_comments)
file_type_struct = MediaInfo.new(file_type, data_type, sauce_map)
new(file_type_struct, sauce_map)
end
def from_sauce_record(_sauce_record, _sauce_comments) do
nil
end
@doc """
Calculates the number of comment lines in a SAUCE block or list of comments.
"""
@spec comment_lines(t()) :: non_neg_integer()
def comment_lines(%{comments: comments}) do
do_comment_lines(comments)
end
@spec comment_lines([String.t()]) :: non_neg_integer()
def comment_lines(comments) when is_list(comments) do
do_comment_lines(comments)
end
defp do_comment_lines([_ | _] = comments) do
Enum.count(comments)
end
defp do_comment_lines(_comments) do
0
end
@doc """
Returns a formatted version of comment lines using the given separator between comment lines.
## Examples
iex> Saucexages.SauceBlock.formatted_comments(["200 lines of blood", "80 columns of sweat"], ", ")
"200 lines of blood, 80 columns of sweat"
"""
@spec formatted_comments(t(), String.t()) :: String.t()
def formatted_comments(sauce_block, separator \\ "\n")
def formatted_comments(%{comments: comments}, separator) when is_list(comments) do
do_format_comments(comments, separator)
end
@spec formatted_comments([String.t()], String.t()) :: String.t()
def formatted_comments(comments, separator) when is_list(comments) do
do_format_comments(comments, separator)
end
def do_format_comments(comments, separator) do
Enum.join(comments, separator)
end
@doc """
Adds a comment to the beginning of the of the SAUCE comments.
"""
@spec prepend_comment(t(), String.t()) :: t()
def prepend_comment(%SauceBlock{} = sauce_block, comment) when is_binary(comment) do
case sauce_block do
%{:comments => [_ | _] = comments} ->
%{sauce_block | comments: [comment | comments]}
_ ->
Map.put(sauce_block, :comments, [comment])
end
end
@doc """
Adds comments to a SAUCE block.
"""
@spec add_comments(t(), [String.t()] | String.t()) :: t()
def add_comments(sauce_block, comments)
def add_comments(%SauceBlock{} = sauce_block, comments) when is_list(comments) do
case sauce_block do
%{:comments => [_ | _] = existing_comments} ->
%{sauce_block | comments: Enum.into(comments, existing_comments)}
_ ->
Map.put(sauce_block, :comments, comments)
end
end
def add_comments(%SauceBlock{} = sauce_block, comment) when is_binary(comment) do
add_comments(sauce_block, [comment])
end
@doc """
Removes all comments from a SAUCE block.
"""
@spec clear_comments(t()) :: t()
def clear_comments(%SauceBlock{} = sauce_block) do
Map.put(sauce_block, :comments, [])
end
@doc """
Returns the `media_type_id` for the given SAUCE block.
"""
@spec media_type_id(t()) :: MediaInfo.media_type_id()
def media_type_id(%{media_info: %{file_type: file_type, data_type: data_type}}) do
MediaInfo.media_type_id(file_type, data_type)
end
@doc """
Returns the `data_type_id` for the given SAUCE block.
"""
@spec data_type_id(t()) :: DataType.data_type_id()
def data_type_id(%{media_info: %{data_type: data_type}}) do
DataType.data_type_id(data_type)
end
@doc """
Returns a detailed map of any file type info that can be converted per-file type. Only the detailed information is returned.
Useful for editors or specialized processing.
## Examples
iex> sauce_block = %Saucexages.SauceBlock{version: "00", media_info: %{file_type: 1, data_type: 1, t_flags: 17, t_info_1: 80, t_info_2: 250, t_info_s: "IBM VGA"}}
iex> Saucexages.SauceBlock.media_details(sauce_block)
%{
ansi_flags: %Saucexages.AnsiFlags{
aspect_ratio: :modern,
letter_spacing: :none,
non_blink_mode?: true
},
character_width: 80,
data_type: 1,
file_type: 1,
font_id: :ibm_vga,
number_of_lines: 250
}
"""
@spec media_details(t()) :: map()
def media_details(%SauceBlock{media_info: media_info} = _sauce_block) do
MediaInfo.media_details(media_info)
end
@doc """
Returns a detailed map of all SAUCE block data.
Useful for editors or specialized processing.
## Examples
iex> sauce_block = %Saucexages.SauceBlock{version: "00", title: "cheese platter", author: "No Cursor", group: "Inconsequential", date: ~D[1994-01-01], media_info: %{file_type: 1, data_type: 1, t_flags: 17, t_info_1: 80, t_info_2: 250, t_info_s: "IBM VGA"}}
iex> Saucexages.SauceBlock.details(sauce_block)
%{
ansi_flags: %Saucexages.AnsiFlags{
aspect_ratio: :modern,
letter_spacing: :none,
non_blink_mode?: true
},
author: "No Cursor",
character_width: 80,
comments: [],
data_type: 1,
data_type_id: :character,
date: ~D[1994-01-01],
file_type: 1,
font_id: :ibm_vga,
group: "Inconsequential",
media_type_id: :ansi,
name: "ANSi",
number_of_lines: 250,
title: "cheese platter",
version: "00"
}
"""
@spec details(t()) :: map()
def details(%SauceBlock{media_info: media_info} = sauce_block) do
block_details = Map.take(sauce_block, [:title, :author, :group, :date, :comments, :version])
MediaInfo.details(media_info) |> Map.merge(block_details)
end
@doc """
Returns a mapped version of the t_info_1 field as a tuple containing the field type as the first element and the field value as the second element.
"""
@spec t_info_1(t()) :: {atom, term()}
def t_info_1(%SauceBlock{media_info: media_info} = _sauce_block) do
MediaInfo.t_info_1(media_info)
end
@doc """
Returns a mapped version of the t_info_2 field as a tuple containing the field type as the first element and the field value as the second element.
"""
@spec t_info_2(t()) :: {atom, term()}
def t_info_2(%SauceBlock{media_info: media_info} = _sauce_block) do
MediaInfo.t_info_2(media_info)
end
@doc """
Returns a mapped version of the t_info_3 field as a tuple containing the field type as the first element and the field value as the second element.
"""
@spec t_info_3(t()) :: {atom, term()}
def t_info_3(%SauceBlock{media_info: media_info} = _sauce_block) do
MediaInfo.t_info_3(media_info)
end
@doc """
Returns a mapped version of the t_info_4 field as a tuple containing the field type as the first element and the field value as the second element.
"""
@spec t_info_4(t()) :: {atom, term()}
def t_info_4(%SauceBlock{media_info: media_info} = _sauce_block) do
MediaInfo.t_info_4(media_info)
end
@doc """
Returns a mapped version of the t_flags field as a tuple containing the field type as the first element and the field value as the second element.
"""
@spec t_flags(t()) :: {atom, term()}
def t_flags(%SauceBlock{media_info: media_info} = _sauce_block) do
MediaInfo.t_flags(media_info)
end
@doc """
Returns a mapped version of the t_info_s field as a tuple containing the field type as the first element and the field value as the second element.
"""
@spec t_info_s(t()) :: {atom, term()}
def t_info_s(%SauceBlock{media_info: media_info} = _sauce_block) do
MediaInfo.t_info_s(media_info)
end
@doc """
Returns the type handle for the given SAUCE block. A type handle consists of a tuple of `{file_type, data_type}`, where there is a valid mapping between the two. Invalid types will be coerced to a type handle for `:none` by default.
"""
@spec type_handle(t()) :: MediaInfo.type_handle()
def type_handle(sauce_block) do
media_type_id(sauce_block) |> MediaInfo.type_handle()
end
end
|
lib/saucexages/sauce_block.ex
| 0.88573
| 0.688226
|
sauce_block.ex
|
starcoder
|
defmodule Telehashname do
@moduledoc """
Telehash hashname handling
https://github.com/telehash/telehash.org/blob/master/v3/hashname.md
"""
@typedoc """
Cipher Set ID
As an affordance, many functions will take a 4-byte CSID (the CSID with a pre-pended `cs`.)
These will be normalized in most return values. It is not recommended to depend upon this
behavior.
"""
@type csid :: binary
@typedoc """
Cipher Set Key
"""
@type csk_tuple :: {csid, binary}
@typedoc """
Sort direction control
- :asc == ascending
- :dsc == descending
"""
@type sort_dir :: :asc | :dsc
@typedoc """
A list of CSKs
Maps will generally be transformed to a list of CSK tuples in the
return values.
"""
@type csk_list :: [csk_tuple] | map
@typedoc """
A list of CSIDs
"""
@type csid_list :: csk_list | [csid]
@doc """
Generate a hashname from a list of CSKs
As an affordance, an intermediates map may also be provided.
The return value is a tuple with the hashname and a map of the intermediate
values used for generation.
`nil` is returned when no valid CSKs are found in the list.
"""
@spec from_csks(csk_list, map) :: {binary, map} | nil
def from_csks(csks, im \\ %{}),
do: csks |> fill_intermediates(im) |> hash_intermediates({"", %{}})
defp fill_intermediates([], map), do: map |> ids(:asc)
defp fill_intermediates([{csid, csk} | rest], map) do
map =
case Map.fetch(map, csid) do
:error ->
map
|> Map.put(
csid,
:sha256 |> :crypto.hash(Base.decode32!(csk, bp())) |> Base.encode32(bp())
)
_ ->
map
end
fill_intermediates(rest, map)
end
@doc """
Generate a hashname from intermediates
"""
@spec from_intermediates(csk_list | map) :: {binary, map} | nil
def from_intermediates(ims), do: ims |> ids(:asc) |> hash_intermediates({"", %{}})
defp hash_intermediates([], {h, ims}) when byte_size(h) > 0, do: {h |> Base.encode32(bp()), ims}
defp hash_intermediates([], _empty_tuple), do: nil
defp hash_intermediates([{csid, im} | rest], {h, m}) do
nil
hash = :crypto.hash(:sha256, h <> Base.decode16!(csid, bp()))
hash_intermediates(
rest,
{:crypto.hash(:sha256, hash <> Base.decode32!(im, bp())), Map.put(m, csid, im)}
)
end
@doc """
Validate and sort a CSID list
This can handle multiple forms of provided CSIDs. The return value
will be appropriate to the input parameter.
Invalid CSIDs are removed, remaining IDs are normalized and sorted
in the requested order.
"""
@spec ids(csid_list, sort_dir) :: [csid | csk_tuple]
def ids(ids, dir \\ :dsc)
def ids(ids, dir) when is_map(ids), do: ids |> Map.to_list() |> ids(dir)
def ids(ids, dir) when is_list(ids) do
sort_func =
case dir do
:asc -> &(&1 <= &2)
:dsc -> &(&1 >= &2)
_ -> raise("Improper sort direction")
end
ids |> valid_ids([]) |> Enum.sort(sort_func)
end
@spec valid_ids(csid_list, csid_list) :: [csk_tuple | csid]
defp valid_ids([], acc), do: acc
defp valid_ids([{csid, data} | rest], acc) do
newacc =
case csid |> String.downcase() |> valid_csid do
nil -> acc
id -> [{id, data} | acc]
end
valid_ids(rest, newacc)
end
defp valid_ids([csid | rest], acc) do
newacc =
case csid |> String.downcase() |> valid_csid do
nil -> acc
id -> [id | acc]
end
valid_ids(rest, newacc)
end
@spec valid_csid(term) :: csid | nil
defp valid_csid(csid) do
csid =
if byte_size(csid) == 4 and binary_part(csid, 0, 2) == "cs",
do: binary_part(csid, 2, 2),
else: csid
if is_valid_csid?(csid), do: csid, else: nil
end
@doc """
Find the highest rated match among two CSK lists
The values returned from the `outs` list. Selecting
which list to use for `check` and `outs` may provide
some useful information "for free."
"""
@spec best_match(csid_list, csid_list) :: csid | csk_tuple | nil
def best_match(check, outs) do
cids = ids(check)
oids = ids(outs)
find_fun_fun =
case {is_tuple_list(cids), is_tuple_list(oids)} do
{true, true} ->
fn check ->
c = elem(check, 0)
fn x -> elem(x, 0) == c end
end
{true, false} ->
fn check ->
c = elem(check, 0)
fn x -> x == c end
end
{false, true} ->
fn c ->
fn x -> elem(x, 0) == c end
end
{false, false} ->
fn c ->
fn x -> x == c end
end
end
match(cids, oids, find_fun_fun)
end
@spec is_tuple_list(list) :: boolean
defp is_tuple_list(list), do: list |> List.first() |> is_tuple
@spec match(csid_list, csid_list, function) :: csk_tuple | csid | nil
defp match([], _outs, _fff), do: nil
defp match([c | check], outs, fff) do
case Enum.find(outs, fff.(c)) do
nil -> match(check, outs, fff)
hit -> hit
end
end
@doc """
Determine if something looks like a valid hashname
Confirms form only, not validity
"""
@spec is_valid?(term) :: boolean
def is_valid?(hn) when is_binary(hn) and byte_size(hn) == 52 do
case Base.decode32(hn, bp()) do
# I think this is superfluous, but why not?
{:ok, b} ->
byte_size(b) == 32
:error ->
false
end
end
def is_valid?(_), do: false
@doc """
Determine if something looks like a valid CSID
Confirms form only, not validity. Some functions be more liberal in
what they accept, but confirming validity is always better.
"""
@spec is_valid_csid?(term) :: boolean
def is_valid_csid?(id) when is_binary(id) and byte_size(id) == 2 do
case Base.decode16(id, bp()) do
{:ok, h} -> Base.encode16(h, bp()) == id
:error -> false
end
end
def is_valid_csid?(_), do: false
# Base parameters, just to thet are all used in the same way.
defp bp(), do: [case: :lower, padding: false]
end
|
lib/telehashname.ex
| 0.885749
| 0.54353
|
telehashname.ex
|
starcoder
|
defmodule Membrane.RTP.TWCCReceiver.PacketInfoStore do
@moduledoc false
# The module stores TWCC sequence number along with their arrival timestamps, handling sequence
# number rollovers if necessary. Stored packet info can used for generating statistics used for
# assembling a TWCC feedback packet.
alias Membrane.Time
alias Membrane.RTP.Utils
require Bitwise
defstruct base_seq_num: nil,
max_seq_num: nil,
seq_to_timestamp: %{}
@type t :: %__MODULE__{
base_seq_num: non_neg_integer(),
max_seq_num: non_neg_integer(),
seq_to_timestamp: %{non_neg_integer() => Time.t()}
}
@type stats_t :: %{
base_seq_num: non_neg_integer(),
packet_status_count: non_neg_integer(),
receive_deltas: [Time.t() | :not_received],
reference_time: Time.t()
}
@seq_number_limit Bitwise.bsl(1, 16)
@spec empty?(__MODULE__.t()) :: boolean
def empty?(%__MODULE__{base_seq_num: base_seq_num}), do: base_seq_num == nil
@spec insert_packet_info(__MODULE__.t(), non_neg_integer()) :: __MODULE__.t()
def insert_packet_info(store, seq_num) do
arrival_ts = Time.vm_time()
{store, seq_num} = maybe_handle_rollover(store, seq_num)
%{
store
| base_seq_num: min(store.base_seq_num, seq_num) || seq_num,
max_seq_num: max(store.max_seq_num, seq_num) || seq_num,
seq_to_timestamp: Map.put(store.seq_to_timestamp, seq_num, arrival_ts)
}
end
@spec get_stats(__MODULE__.t()) :: stats_t()
def get_stats(store) do
{reference_time, receive_deltas} = make_receive_deltas(store)
packet_status_count = store.max_seq_num - store.base_seq_num + 1
%{
base_seq_num: store.base_seq_num,
packet_status_count: packet_status_count,
reference_time: reference_time,
receive_deltas: receive_deltas
}
end
defp maybe_handle_rollover(store, new_seq_num) do
%{
base_seq_num: base_seq_num,
max_seq_num: max_seq_num,
seq_to_timestamp: seq_to_timestamp
} = store
case Utils.from_which_rollover(base_seq_num, new_seq_num, @seq_number_limit) do
:current ->
{store, new_seq_num}
:next ->
{store, new_seq_num + @seq_number_limit}
:previous ->
shifted_seq_to_timestamp =
Map.new(seq_to_timestamp, fn {seq_num, timestamp} ->
{seq_num + @seq_number_limit, timestamp}
end)
store = %{
store
| base_seq_num: new_seq_num,
max_seq_num: max_seq_num + @seq_number_limit,
seq_to_timestamp: shifted_seq_to_timestamp
}
{store, new_seq_num}
end
end
defp make_receive_deltas(store) do
%{
base_seq_num: base_seq_num,
max_seq_num: max_seq_num,
seq_to_timestamp: seq_to_timestamp
} = store
# reference time has to be in 64ms resolution
# https://datatracker.ietf.org/doc/html/draft-holmer-rmcat-transport-wide-cc-extensions-01#section-3.1
reference_time =
seq_to_timestamp
|> Map.fetch!(base_seq_num)
|> make_divisible_by_64ms()
receive_deltas =
base_seq_num..max_seq_num
|> Enum.map_reduce(reference_time, fn seq_num, previous_timestamp ->
case Map.get(seq_to_timestamp, seq_num) do
nil ->
{:not_received, previous_timestamp}
timestamp ->
delta = timestamp - previous_timestamp
{delta, timestamp}
end
end)
|> elem(0)
{reference_time, receive_deltas}
end
defp make_divisible_by_64ms(timestamp) do
timestamp - rem(timestamp, Time.milliseconds(64))
end
end
|
lib/membrane/rtp/twcc_receiver/packet_info_store.ex
| 0.830216
| 0.421641
|
packet_info_store.ex
|
starcoder
|
defmodule TextBasedFPS.ServerState.Rooms do
alias TextBasedFPS.{Notification, Player, Room, ServerState, Text}
@spec add_room(ServerState.t(), String.t()) :: ServerState.t()
def add_room(state, room_name) do
put_in(state.rooms[room_name], Room.new(room_name))
end
@spec add_room(ServerState.t(), String.t(), Player.key_t()) :: ServerState.t()
def add_room(state, room_name, player_key) do
state
|> remove_player_from_current_room(player_key)
|> add_room(room_name)
|> join_room!(room_name, player_key)
end
@spec get_room(ServerState.t(), String.t()) :: Room.t() | nil
def get_room(state, room_name), do: state.rooms[room_name]
@spec update_room(ServerState.t(), String.t(), function) :: ServerState.t()
def update_room(state, room_name, fun) when is_function(fun) do
room = state.rooms[room_name]
updated_room = fun.(room)
put_in(state.rooms[room_name], updated_room)
end
@spec update_room(ServerState.t(), Room.t()) :: ServerState.t()
def update_room(state, room) when is_map(room) do
put_in(state.rooms[room.name], room)
end
@spec join_room(ServerState.t(), String.t(), Player.key_t()) ::
{:ok, ServerState.t()} | {:error, ServerState.t(), :room_full}
def join_room(state, room_name, player_key) do
draft_state = remove_player_from_current_room(state, player_key)
room = draft_state.rooms[room_name]
case Room.add_player(room, player_key) do
{:ok, room} ->
state =
draft_state
|> update_room(room)
|> ServerState.Players.update_player(player_key, &Map.put(&1, :room, room_name))
{:ok, state}
{:error, _room, reason} ->
{:error, state, reason}
end
end
@spec join_room!(ServerState.t(), String.t(), Player.key_t()) :: ServerState.t()
def join_room!(state, room_name, player_key) do
case join_room(state, room_name, player_key) do
{:ok, updated_state} ->
updated_state
{:error, _state, reason} ->
raise("'#{player_key}' cannot join '#{room_name}'. Reason: #{reason}")
end
end
@spec remove_player_from_current_room(ServerState.t(), Player.key_t()) :: ServerState.t()
def remove_player_from_current_room(state, player_key) do
player = state.players[player_key]
if player do
remove_player_from_room(state, player_key, player.room)
else
state
end
end
@spec notify_room(ServerState.t(), String.t(), String.t()) :: ServerState.t()
def notify_room(state, room_name, notification_body) do
notify_room_except_player(state, room_name, nil, notification_body)
end
@spec notify_room_except_player(ServerState.t(), String.t(), Player.key_t() | nil, String.t()) ::
ServerState.t()
def notify_room_except_player(state, room_name, except_player_key, notification_body) do
notifications =
state.rooms[room_name].players
|> Enum.filter(fn {player_key, _} -> player_key != except_player_key end)
|> Enum.map(fn {player_key, _} -> Notification.new(player_key, notification_body) end)
ServerState.Notifications.add_notifications(state, notifications)
end
defp remove_player_from_room(state, _player_key, nil), do: state
defp remove_player_from_room(state, player_key, room_name) do
updated_room = state |> get_room(room_name) |> Room.remove_player(player_key)
state
|> update_room(updated_room)
|> notify_player_leaving_room(updated_room, player_key)
|> remove_room_if_empty(updated_room)
|> ServerState.Players.update_player(player_key, fn player -> Map.put(player, :room, nil) end)
end
defp remove_room_if_empty(state, room) do
if Enum.count(room.players) == 0 do
updated_rooms = Map.delete(state.rooms, room.name)
Map.put(state, :rooms, updated_rooms)
else
state
end
end
defp notify_player_leaving_room(state, room, leaving_player_key) do
leaving_player = state.players[leaving_player_key]
notify_room(state, room.name, Text.highlight("#{leaving_player.name} left the room"))
end
end
|
lib/text_based_fps/server_state/rooms.ex
| 0.671686
| 0.492371
|
rooms.ex
|
starcoder
|
defmodule Kalevala.Event.Movement do
@moduledoc """
An event to move from one room to another
"""
defstruct [:character, :direction, :reason, :room_id]
@typedoc """
A movement event
- `character` is the character performing the movement
- `direction` is one of two options, `:to` or `:from`, depending if the character
is moving `:to` the room, or moving `:from` the room
- `reason` is what will be sent to other characters in the room and displayed (to players)
- `room_id` is the room the event is intended for
"""
@type t() :: %__MODULE__{}
end
defmodule Kalevala.Event.Movement.Notice do
@moduledoc """
Event to send a notice to other characters in the room
"""
defstruct [:character, :direction, :reason]
end
defmodule Kalevala.Event.Movement.Commit do
@moduledoc """
Struct for committing movement between two rooms
"""
defstruct [:character, :to, :from, :exit_name]
end
defmodule Kalevala.Event.Movement.Abort do
@moduledoc """
Struct for aborting movement between two rooms
"""
defstruct [:character, :to, :from, :exit_name, :reason]
end
defmodule Kalevala.Event.Movement.Voting do
@moduledoc """
A voting event tracks the state of a character wishing to change rooms
"""
alias Kalevala.Event
alias Kalevala.Event.Movement.Abort
alias Kalevala.Event.Movement.Commit
defstruct [
:character,
:to,
:from,
:exit_name,
:reason,
aborted: false
]
@typedoc """
An event to allow for rooms to abort or commit the character moving.
Each room has a chance to reject movement
- `state` is an enum, one of the following atoms: `:request`, `:commit`, or `:abort`
- `character` is the character performing the action
- `to` is the room the character is going towards
- `from` is the room the character is going away from
- `exit_name` is the name of the exit_name that the player is using
- `reason` is an atom such as `:no_exit` for why the movement is aborted
"""
@type t() :: %__MODULE__{}
@doc """
Generate a commit event after voting has occurred
"""
def commit(event) do
%Event{
topic: Commit,
metadata: event.metadata,
data: %Commit{
character: event.data.character,
to: event.data.to,
from: event.data.from,
exit_name: event.data.exit_name
}
}
end
@doc """
Generate an abort event after voting has occurred
"""
def abort(event) do
%Event{
topic: Abort,
metadata: event.metadata,
data: %Abort{
character: event.data.character,
to: event.data.to,
from: event.data.from,
exit_name: event.data.exit_name,
reason: event.data.reason
}
}
end
end
defmodule Kalevala.Event.Movement.Request do
@moduledoc """
Character requesting to move from their current room in a exit_name
A move request transitions through several stages before commiting or aborting.
The character requests the room to move in a exit_name.
```
%Kalevala.Event{
topic: Kalevala.Event.Movement.Request,
data: %Kalevala.Event.Movement.Request{
character: character,
exit_name: "north"
}
}
```
The room process sends a voting event to the Zone after determining that there is
a valid exit in this exit_name.
```
%Kalevala.Event{
topic: Kalevala.Event.Movement.Voting,
data: %Kalevala.Event.Movement.Voting{
character: character,
from: start_room_id,
to: end_room_id,
exit_name: "north"
}
}
```
The zone then asks the `to` and `from` room if they are OK with the character moving. Each
room will be `GenServer.call`ed to block and keep this synchronous. The room `movement/2`
callback will be called for each room, so they can vote on the movement.
`Kalevala.Event.Movement.Commit` - After both room's agree that the player can move,
the zone sends this event to the character.
```
%Kalevala.Event{
topic: Kalevala.Event.Movement.Commit,
data: %Movement.Commit{
character: character,
from: start_room_id,
to: end_room_id,
exit_name: "north"
}
}
```
`Kalevala.Event.Movement.Abort` - If either room rejects the movement, the zone will
respond with an abort.
```
%Kalevala.Event{
topic: Kalevala.Event.Movement.Abort,
data: %Kalevala.Event.Movement.Abort{
character: character,
from: start_room_id,
to: end_room_id,
exit_name: "north",
reason: :door_locked
}
}
%Kalevala.Event{
topic: Kalevala.Event.Movement.Abort,
data: %Kalevala.Event.Movement.Abort{
character: character,
from: start_room_id,
exit_name: "north",
reason: :no_exit
}
}
```
On a commit, the player leaves the old room, and enters the new one.
```
%Kalevala.Event{
topic: Kalevala.Event.Movement,
data: %Kalevala.Event.Movement{
character: character,
direction: :to,
reason: "Player enters from the south."
}
}
%Kalevala.Event{
topic: Kalevala.Event.Movement,
data: %Kalevala.Event.Movement{
character: character,
direction: :from,
reason: "Player leaves to the north."
}
}
```
"""
defstruct [:character, :exit_name, metadata: %Kalevala.Event.Metadata{}]
@typedoc """
Signal that a character wishes to move to another location
- `character` is the character moving
- `exit` is the exit_name of the exit the player wants to move
"""
@type t() :: %__MODULE__{}
end
|
lib/kalevala/event/movement.ex
| 0.840619
| 0.855308
|
movement.ex
|
starcoder
|
defmodule Grizzly.ZWave.Commands.MultiChannelCapabilityReport do
@moduledoc """
This command is used to advertise the Generic and Specific Device Class and the supported command
classes of an End Point.
Params:
* `:end_point` - the end point capabilities are being reported about (required)
* `:dynamic?` - whether the end point is dynamic (required - true or false)
* `:generic_device_class` - the generic device class for the end point (required)
* `:specific_device_class` - the specific device class for the end point (required)
* `:command_classes` - the command classes supported by the end point (required)
"""
@behaviour Grizzly.ZWave.Command
alias Grizzly.ZWave.{Command, DecodeError, CommandClasses, DeviceClasses}
alias Grizzly.ZWave.CommandClasses.MultiChannel
@type param ::
{:end_point, MultiChannel.end_point()}
| {:dynamic?, boolean()}
| {:generic_device_class, DeviceClasses.generic_device_class()}
| {:specific_device_class, DeviceClasses.specific_device_class()}
| {:command_classes, [CommandClasses.command_class()]}
@impl true
@spec new([param()]) :: {:ok, Command.t()}
def new(params) do
command = %Command{
name: :multi_channel_capability_report,
command_byte: 0x0A,
command_class: MultiChannel,
params: params,
impl: __MODULE__
}
{:ok, command}
end
@impl true
def encode_params(command) do
end_point = Command.param!(command, :end_point)
dynamic_bit = encode_dynamic?(Command.param!(command, :dynamic?))
command_classes = Command.param!(command, :command_classes)
generic_device_class = Command.param!(command, :generic_device_class)
generic_device_class_byte = DeviceClasses.generic_device_class_to_byte(generic_device_class)
specific_device_class_byte =
DeviceClasses.specific_device_class_to_byte(
generic_device_class,
Command.param!(command, :specific_device_class)
)
<<dynamic_bit::size(1), end_point::size(7), generic_device_class_byte,
specific_device_class_byte>> <>
encode_command_classes(command_classes)
end
@impl true
@spec decode_params(binary()) :: {:ok, [param()]} | {:error, DecodeError.t()}
def decode_params(
<<dynamic_bit::size(1), end_point::size(7), generic_device_class_byte,
specific_device_class_byte, command_classes_binary::binary>>
) do
command_classes = decode_command_classes(command_classes_binary)
dynamic? = decode_dynamic?(dynamic_bit)
with {:ok, generic_device_class} <-
MultiChannel.decode_generic_device_class(generic_device_class_byte),
{:ok, specific_device_class} <-
MultiChannel.decode_specific_device_class(
generic_device_class,
specific_device_class_byte
) do
{:ok,
[
end_point: end_point,
dynamic?: dynamic?,
generic_device_class: generic_device_class,
specific_device_class: specific_device_class,
command_classes: command_classes
]}
else
{:error, %DecodeError{}} = error ->
error
end
end
defp encode_dynamic?(false), do: 0x00
defp encode_dynamic?(true), do: 0x01
defp decode_dynamic?(0x00), do: false
defp decode_dynamic?(0x01), do: true
defp encode_command_classes(command_classes) do
for command_class <- command_classes, into: <<>> do
<<CommandClasses.to_byte(command_class)>>
end
end
defp decode_command_classes(binary) do
for byte <- :erlang.binary_to_list(binary) do
{:ok, cc} = CommandClasses.from_byte(byte)
cc
end
end
end
|
lib/grizzly/zwave/commands/multi_channel_capability_report.ex
| 0.903348
| 0.498779
|
multi_channel_capability_report.ex
|
starcoder
|
defmodule Routeguide.RouteGuide.Server do
use Falco.Server, service: Routeguide.RouteGuide.Service
alias Falco.Server
alias RouteGuide.Data
@spec get_feature(Routeguide.Point, Falco.Server.Stream.t()) :: Routeguide.Feature.t()
def get_feature(point, _stream) do
features = Data.fetch_features()
default_feature = Routeguide.Feature.new(location: point)
Enum.find(features, default_feature, fn feature ->
feature.location == point
end)
end
@spec list_features(Routeguide.Rectangle.t(), Falco.Server.Stream.t()) :: any
def list_features(rect, stream) do
features = Data.fetch_features()
features
|> Enum.filter(fn %{location: loc} -> in_range?(loc, rect) end)
|> Enum.each(fn feature -> Server.send_reply(stream, feature) end)
end
@spec record_route(Enumerable.t(), Falco.Server.Stream.t()) :: Routeguide.RouteSummary.t()
def record_route(req_enum, _stream) do
features = Data.fetch_features()
start_time = now_ts()
{_, distance, point_count, feature_count} =
Enum.reduce(req_enum, {nil, 0, 0, 0}, fn point,
{last, distance, point_count, feature_count} ->
point_count = point_count + 1
found_feature = Enum.find(features, fn f -> f.location == point end)
feature_count = if found_feature, do: feature_count + 1, else: feature_count
distance = if last, do: distance + calc_distance(last, point), else: distance
{point, distance, point_count, feature_count}
end)
Routeguide.RouteSummary.new(
point_count: point_count,
feature_count: feature_count,
distance: distance,
elapsed_time: now_ts() - start_time
)
end
@spec record_route(Enumerable.t(), Falco.Server.Stream.t()) :: any
def route_chat(req_enum, stream) do
notes =
Enum.reduce(req_enum, Data.fetch_notes(), fn note, notes ->
key = serialize_location(note.location)
new_notes = Map.update(notes, key, [note], &(&1 ++ [note]))
Enum.each(new_notes[key], fn note ->
IO.inspect(note)
Server.send_reply(stream, note)
end)
new_notes
end)
Data.update_notes(notes)
end
defp in_range?(%{longitude: long, latitude: lat}, %{lo: low, hi: high}) do
left = min(low.longitude, high.longitude)
right = max(low.longitude, high.longitude)
bottom = min(low.latitude, high.latitude)
top = max(low.latitude, high.latitude)
long >= left && long <= right && lat >= bottom && lat <= top
end
defp now_ts do
DateTime.utc_now() |> DateTime.to_unix()
end
# calcDistance calculates the distance between two points using the "haversine" formula.
# This code was taken from http://www.movable-type.co.uk/scripts/latlong.html.
defp calc_distance(p1, p2) do
cord_factor = 1.0e7
r = 6_371_000.0
lat1 = (p1.latitude || 0) / cord_factor
lat2 = (p2.latitude || 0) / cord_factor
lng1 = (p1.longitude || 0) / cord_factor
lng2 = (p2.longitude || 0) / cord_factor
phi1 = to_radians(lat1)
phi2 = to_radians(lat2)
delta_phi = to_radians(lat2 - lat1)
delta_lambda = to_radians(lng2 - lng1)
a =
sqr(:math.sin(delta_phi / 2)) +
:math.cos(phi1) * :math.cos(phi2) * sqr(:math.sin(delta_lambda / 2))
c = 2 * :math.atan2(:math.sqrt(a), :math.sqrt(1 - a))
round(r * c)
end
defp to_radians(num) do
num * :math.pi() / 180
end
defp sqr(num) do
num * num
end
def serialize_location(p) do
"#{p.latitude} #{p.longitude}"
end
end
|
examples/route_guide/lib/server.ex
| 0.790004
| 0.467332
|
server.ex
|
starcoder
|
defmodule Grizzly.CommandClass.ThermostatSetpoint do
@moduledoc """
Module for generating the thermostat setpoint command
"""
import Bitwise
@typedoc """
Type for the different setpoint types.
Possible to send raw byte for a different setpoint type if
needed. Otherwise, `:cooling` and `:heating` atoms are the
explicitly supported setpoint types.
"""
@type setpoint_type ::
:cooling
| :heating
| :furnace
| :dry_air
| :moist_air
| :auto_changeover
| :energy_save_heating
| :energy_save_cooling
| :away_heating
| :away_cooling
| :full_power
| byte
@type setpoint_value :: non_neg_integer
@spec encode_opts(opts :: keyword) :: {:ok, byte} | {:error, :invalid_arg, any()}
def encode_opts(opts) do
if Enum.all?(Keyword.values(opts), &(&1 in 0..255)) do
encoded =
opts
|> Enum.reduce(0, fn {_, byte}, mask -> mask ||| byte end)
{:ok, encoded}
else
{:error, :invalid_arg, opts}
end
end
@spec encode_setpoint_type(setpoint_type) :: {:ok, byte} | {:error, :invalid_arg, any()}
def encode_setpoint_type(:heating), do: {:ok, 0x01}
def encode_setpoint_type(:cooling), do: {:ok, 0x02}
def encode_setpoint_type(:furnace), do: {:ok, 0x07}
def encode_setpoint_type(:dry_air), do: {:ok, 0x08}
def encode_setpoint_type(:moist_air), do: {:ok, 0x09}
def encode_setpoint_type(:auto_changeover), do: {:ok, 0x0A}
def encode_setpoint_type(:energy_save_heating), do: {:ok, 0x0B}
def encode_setpoint_type(:energy_save_cooling), do: {:ok, 0x0C}
def encode_setpoint_type(:away_heating), do: {:ok, 0x0D}
def encode_setpoint_type(:away_cooling), do: {:ok, 0x0E}
def encode_setpoint_type(:full_power), do: {:ok, 0x0F}
def encode_setpoint_type(0x00), do: {:error, :invalid_arg, 0x00}
def encode_setpoint_type(byte) when byte in 0x03..0x06, do: {:error, :invalid_arg, byte}
def encode_setpoint_type(byte), do: byte
@spec decode_setpoint_type(byte) :: setpoint_type
def decode_setpoint_type(0x01), do: :heating
def decode_setpoint_type(0x02), do: :cooling
def decode_setpoint_type(0x07), do: :furnace
def decode_setpoint_type(0x08), do: :dry_air
def decode_setpoint_type(0x09), do: :moist_air
def decode_setpoint_type(0x0A), do: :auto_changeover
def decode_setpoint_type(0x0B), do: :energy_save_heating
def decode_setpoint_type(0x0C), do: :energy_save_cooling
def decode_setpoint_type(0x0D), do: :away_heating
def decode_setpoint_type(0x0E), do: :away_cooling
def decode_setpoint_type(0x0F), do: :full_power
def decode_setpoint_type(byte), do: byte
end
|
lib/grizzly/command_class/thermostat_setpoint.ex
| 0.851753
| 0.438004
|
thermostat_setpoint.ex
|
starcoder
|
defmodule Joystick do
@moduledoc """
Simple wrapper to get Linux Joystick events.
# Usage
```
iex()> {:ok, js} = Joystick.start_link(0, self())
iex()> flush()
{:joystick, %Joystick.Event{number: 1, timestamp: 1441087318, type: :axis, value: -60}}
{:joystick, %Joystick.Event{number: 4, timestamp: 1441087318, type: :axis, value: -5}}
iex()> Joystick.info(js)
%{axes: 8, buttons: 11, name: 'Microsoft X-Box One pad', version: 131328}
```
"""
use GenServer
require Logger
defmodule Event do
@moduledoc false
defstruct [:number, :timestamp, :type, :value]
@compile {:inline, decode: 1}
@doc false
def decode(%{timestamp: _, number: _, type: 0x01, value: _} = data) do
struct(__MODULE__, %{data | type: :button})
end
def decode(%{timestamp: _, number: _, type: 0x02, value: _} = data) do
struct(__MODULE__, %{data | type: :axis})
end
def decode(%{timestamp: _, number: _, type: 0x81, value: _} = data) do
# Init Button
struct(__MODULE__, %{data | type: :button})
end
def decode(%{timestamp: _, number: _, type: 0x82, value: raw_value} = data) do
# Init Axis
# must scale axis value
value_scaled = raw_value / 32.767
value =
cond do
value_scaled > 999 -> 999
value_scaled < -999 -> -999
true -> value_scaled
end
struct(__MODULE__, %{data | type: :axis, value: value})
end
end
@doc """
Start listening to joystick events.
* `device` - a number pointing to the js file.
* for example 0 would evaluate to "/dev/input/js0"
* `listener` - pid to receive events
* `callback` - a message will be sent to `listener` with the form `{callback, joystick}`, where
`joystick` is be the pid of the Joystick GenServer. Default value of `callback` is `nil`, which means the
`Joystick` pid will be returned immediately, but the GenServer will crash if a joystick connection cannot be opened.
"""
def start_link(device, listener, callback \\ nil) do
GenServer.start_link(__MODULE__, [device, listener, callback])
end
@doc "Get information about a joystick"
def info(joystick) do
GenServer.call(joystick, :info)
end
@doc """
Stop a running joystick instance.
"""
def stop(joystick, reason \\ :normal) do
GenServer.stop(joystick, reason)
end
@doc false
def init([device, listener, callback]) do
state =
if is_nil(callback) do
{:ok, res} = start_js(device)
js = get_info(res)
:ok = poll(res)
%{res: res, listener: listener, last_ts: 0, joystick: js}
else
GenServer.cast(self(), {:connect_to_joystick, device, listener, callback})
%{joystick: nil}
end
{:ok, state}
end
def handle_cast({:connect_to_joystick, device, listener, callback}, state) do
Logger.debug("Joystick attempting to connect to #{device}")
state =
case start_js(device) do
{:ok, res} ->
js = get_info(res)
:ok = poll(res)
GenServer.cast(listener, {callback, js})
%{res: res, listener: listener, last_ts: 0, joystick: js}
{:error, error} ->
Logger.warn("Joystick could not connect: #{inspect(error)}")
Logger.warn("Retrying in 1000ms.")
Process.sleep(1000)
GenServer.cast(self(), {:connect_to_joystick, device, listener})
state
other ->
raise "Joystick should not have reached here: #{inspect(other)}"
end
{:noreply, state}
end
@doc false
def terminate(_, state) do
if state.res do
stop_js(state.res)
end
end
@doc false
def handle_call(:info, _, state), do: {:reply, state.joystick, state}
@doc false
def handle_info({:select, res, _ref, :ready_input}, %{last_ts: last_ts} = state) do
{time, raw_input} = :timer.tc(fn -> Joystick.receive_input(res) end)
case raw_input do
{:error, reason} ->
{:stop, {:input_error, reason}, state}
input = %{timestamp: current_ts} when current_ts >= last_ts ->
event = {:joystick, Event.decode(input)}
send(state.listener, event)
:ok = poll(res)
# Logger.debug "Event (#{time}µs): #{inspect event}"
{:noreply, %{state | last_ts: current_ts}}
event = %{timestamp: current_ts} ->
Logger.warn("Got late event (#{time}µs): #{inspect(event)}")
:ok = poll(res)
{:noreply, %{state | last_ts: current_ts}}
end
end
@on_load :load_nif
@doc false
def load_nif do
# _nif'
nif_file = '#{:code.priv_dir(:joystick)}/joystick'
case :erlang.load_nif(nif_file, 0) do
:ok -> :ok
{:error, {:reload, _}} -> :ok
{:error, reason} -> Logger.warn("Failed to load nif: #{inspect(reason)}")
end
end
## These functions get replaced by the nif.
@doc false
def start_js(_device), do: do_exit_no_nif()
@doc false
def stop_js(_handle), do: do_exit_no_nif()
@doc false
def poll(_handle), do: do_exit_no_nif()
@doc false
def receive_input(_handle), do: do_exit_no_nif()
@doc false
def get_info(_handle), do: do_exit_no_nif()
## Private stuff
defp do_exit_no_nif, do: exit("nif not loaded.")
end
|
lib/joystick.ex
| 0.750964
| 0.610395
|
joystick.ex
|
starcoder
|
defmodule Crit.Assertions.Changeset do
import Crit.Assertions.Defchain
import ExUnit.Assertions
import Crit.Extras.ChangesetT, only: [errors_on: 1]
import Crit.Assertions.Map
alias Ecto.Changeset
defchain assert_valid(%Changeset{} = changeset),
do: assert changeset.valid?
defchain assert_invalid(%Changeset{} = changeset),
do: refute changeset.valid?
@doc """
The elements of `list` must be present in the `Changeset`'s changes.
The simple case just checks whether fields have been changed:
assert_changes(changeset, [:name, :tags])
Alternately, you can check that the listed keys have particular values:
assert_changes(changeset, name: "Bossie", tags: [])
"""
defchain assert_changes(%Changeset{} = changeset, list),
do: assert_fields(changeset.changes, list)
@doc """
`assert_change` can be used when only a single field is to have
been changed. Its second argument is usually an atom, but can also
be a list that's given directly to `assert_changes`.
assert_changes(changeset, :name)
assert_changes(changeset, name: "Bossie")
"""
def assert_change(cs, arg2) when not is_list(arg2),
do: assert_changes(cs, [arg2])
def assert_change(cs, arg2),
do: assert_changes(cs, arg2)
@doc """
The changeset must contain no changes.
"""
defchain assert_no_changes(%Changeset{} = changeset) do
changes = changeset.changes
assert changes == %{}, "Fields have changed: `#{Map.keys(changes) |> inspect}`"
end
@doc """
Require that particular fields have no changes. Unmentioned fields may
have changes. When there's only a single field, it needn't be enclosed in
a list.
assert_unchanged(changeset, :name)
assert_unchanged(changeset, [:name, :tags])
"""
defchain assert_unchanged(%Changeset{} = changeset, field) when is_atom(field) do
assert_no_typo_in_struct_key(changeset.data, field)
refute Map.has_key?(changeset.changes, field),
"Field `#{inspect field}` has changed"
end
defchain assert_unchanged(%Changeset{} = changeset, fields) when is_list(fields),
do: Enum.map fields, &(assert_unchanged changeset, &1)
@doc """
Assert that a changeset contains specific errors. In the simplest case,
it requires that the fields have at least one error, but doesn't require
any specific message:
assert_errors(changeset, [:name, :tags])
A message may also be required:
assert_errors(changeset,
name: "may not be blank",
tags: "is invalid")
The given string must be an exact match for one of the field's errors.
(It is not a failure for others to be unmentioned.)
If you want to list more than one message, enclose them in a list:
assert_errors(changeset,
name: "may not be blank",
tags: ["is invalid",
"has something else wrong"])
The list need not be a complete list of errors.
"""
defchain assert_errors(%Changeset{} = changeset, list) do
errors = errors_on(changeset)
any_error_check = fn field ->
assert Map.has_key?(errors, field),
"There are no errors for field `#{inspect field}`"
end
message_check = fn field, expected ->
any_error_check.(field)
msg = """
`#{inspect field}` is missing an error message.
expected: #{inspect expected}
actual: #{inspect errors[field]}
"""
assert expected in errors[field], msg
end
Enum.map(list, fn
field when is_atom(field) ->
assert any_error_check.(field)
{field, expected} when is_binary(expected) ->
message_check.(field, expected)
{field, expecteds} when is_list(expecteds) ->
Enum.map expecteds, &(message_check.(field, &1))
end)
end
@doc """
Like `assert_error` but reads better when there's only a single error
to be checked:
assert_error(changeset, name: "is invalid")
If the message isn't to be checked, you can use a single atom:
assert_error(changeset, :name)
"""
defchain assert_error(cs, arg2) when is_atom(arg2), do: assert_errors(cs, [arg2])
defchain assert_error(cs, arg2), do: assert_errors(cs, arg2)
@doc """
Require that none of the named fields have an associated error:
assert_error_free(changes, [:in_service_datestring, :name])
There can also be a singleton field:
assert_error_free(changes, :in_service_datestring)
"""
defchain assert_error_free(changeset, field) when is_atom(field),
do: assert_error_free(changeset, [field])
defchain assert_error_free(changeset, fields) do
errors = errors_on(changeset)
check = fn(field) ->
assert_no_typo_in_struct_key(changeset.data, field)
refute Map.has_key?(errors, field),
"There is an error for field `#{inspect field}`"
end
Enum.map(fields, check)
end
defchain assert_original_data(changeset, keylist) when is_list(keylist) do
assert_fields(changeset.data, keylist)
end
defchain assert_original_data(changeset, expected) do
assert changeset.data == expected
end
end
|
test/support/assertions/changeset.ex
| 0.791741
| 0.601155
|
changeset.ex
|
starcoder
|
defmodule ATECC508A.DataZone do
@moduledoc """
This module handles operations on the data zone.
"""
alias ATECC508A.{Request, Transport}
@doc """
Read a slot
"""
@spec read(Transport.t(), Request.slot()) :: {:ok, binary()} | {:error, atom()}
def read(transport, slot) do
do_read(transport, slot, 0, slot_size(slot), [])
end
defp do_read(_transport, _slot, _offset, 0, data) do
result =
data
|> Enum.reverse()
|> IO.iodata_to_binary()
{:ok, result}
end
defp do_read(transport, slot, offset, left, data) when left > 32 do
addr = Request.to_data_addr(slot, offset)
case Request.read_zone(transport, :data, addr, 32) do
{:ok, part} -> do_read(transport, slot, offset + 32, left - 32, [part | data])
error -> error
end
end
defp do_read(transport, slot, offset, left, data) do
addr = Request.to_data_addr(slot, offset)
case Request.read_zone(transport, :data, addr, 4) do
{:ok, part} -> do_read(transport, slot, offset + 4, left - 4, [part | data])
error -> error
end
end
@doc """
Write a slot in the data zone.
This can use 4 byte writes if the data is not a multiple of 32 bytes. These
are only allowed under some conditions. Most notably, 4-byte writes aren't
allowed when the data zone is UNLOCKED.
"""
@spec write(Transport.t(), Request.slot(), binary()) :: :ok | {:error, atom()}
def write(transport, slot, data) do
check_data_size(slot, data)
do_write(transport, slot, 0, data)
end
@doc """
Write a slot in the data zone and pad to a multiple of 32-bytes
This is useful to get around 32-byte write limitations. The padded bytes are
set to 0.
"""
@spec write_padded(Transport.t(), Request.slot(), binary()) :: :ok | {:error, atom()}
def write_padded(transport, slot, data) do
check_data_size(slot, data)
# pad the data up to a multiple of 32
padded_data = pad_to_32(data)
do_write(transport, slot, 0, padded_data)
end
@doc """
Pad the specified data to the exact size of the slot.
"""
@spec pad_to_slot_size(Request.slot(), binary()) :: binary()
def pad_to_slot_size(slot, data) do
to_pad = slot_size(slot) - byte_size(data)
cond do
to_pad == 0 -> data
to_pad > 0 -> <<data::binary, 0::unit(8)-size(to_pad)>>
end
end
@doc """
Pad the passed in data to a multiple of 32-bytes
This is useful when 4-byte writes aren't allowed.
"""
@spec pad_to_32(binary()) :: binary()
def pad_to_32(data) do
case rem(byte_size(data), 32) do
0 ->
data
fraction ->
pad_count = 32 - fraction
data <> <<0::size(pad_count)-unit(8)>>
end
end
defp check_data_size(slot, data) do
byte_size(data) <= slot_size(slot) ||
raise "Invalid data size (#{byte_size(data)}) for slot #{slot} (#{slot_size(slot)})"
end
defp do_write(_transport, _slot, _offset, <<>>), do: :ok
defp do_write(transport, slot, offset, <<part::32-bytes, rest::binary>>) do
addr = Request.to_data_addr(slot, offset)
case Request.write_zone(transport, :data, addr, part) do
:ok -> do_write(transport, slot, offset + 32, rest)
error -> error
end
end
defp do_write(transport, slot, offset, <<part::4-bytes, rest::binary>>) do
addr = Request.to_data_addr(slot, offset)
case Request.write_zone(transport, :data, addr, part) do
:ok -> do_write(transport, slot, offset + 4, rest)
error -> error
end
end
@doc """
Lock the data and OTP zones.
The expected contents concatenated together for the non-private key data slots and
the OTP need to be passed for a CRC calculation. They are not
written by design. The logic is that this is a final chance before it's too
late to check that the device is programmed correctly.
"""
@spec lock(Transport.t(), ATECC508A.crc16()) :: :ok | {:error, atom()}
def lock(transport, expected_contents) do
crc = ATECC508A.CRC.crc(expected_contents)
Request.lock_zone(transport, :data, crc)
end
@doc """
Return the size in bytes of the specified slot.
"""
@spec slot_size(Request.slot()) :: pos_integer()
def slot_size(0), do: 36
def slot_size(1), do: 36
def slot_size(2), do: 36
def slot_size(3), do: 36
def slot_size(4), do: 36
def slot_size(5), do: 36
def slot_size(6), do: 36
def slot_size(7), do: 36
def slot_size(8), do: 416
def slot_size(9), do: 72
def slot_size(10), do: 72
def slot_size(11), do: 72
def slot_size(12), do: 72
def slot_size(13), do: 72
def slot_size(14), do: 72
def slot_size(15), do: 72
end
|
lib/atecc508a/data_zone.ex
| 0.857753
| 0.593521
|
data_zone.ex
|
starcoder
|
defmodule Mimic do
@moduledoc """
Mimic is a library that simplifies the usage of mocks in Elixir.
Mimic is mostly API compatible with [mox](https://hex.pm/packages/mox) but
doesn't require explicit contract checking with behaviours. It's also faster.
You're welcome.
Mimic works by copying your module out of the way and replacing it with one of
it's own which can delegate calls back to the original or to a mock function
as required.
In order to prepare a module for mocking you must call `copy/1` with the
module as an argument. We suggest that you do this in your
`test/test_helper.exs`:
```elixir
Mimic.copy(Calculator)
ExUnit.start()
```
Importantly calling `copy/1` will not change the behaviour of the module. When
writing tests you can then use `stub/3` or `expect/3` to add mocks and
assertions.
## Multi-process collaboration
Mimic supports multi-process collaboration via two mechanisms:
1. Explicit allows.
2. Global mode.
Using explicit allows is generally preferred as these stubs can be run
concurrently, whereas global mode tests must be run exclusively.
## Explicit allows
Using `allow/3` you can give other processes permission to use stubs and
expectations from where they were not defined.
```elixir
test "invokes add from a process" do
Caculator
|> expect(:add, fn x, y -> x + y end)
parent_pid = self()
spawn_link(fn ->
Calculator |> allow(parent_pid, self())
assert Calculator.add(2, 3) == 5
send parent_pid, :ok
end)
assert_receive :ok
end
```
If you are using `Task` the expectations and stubs are automatically allowed
## Global mode
When set in global mode any process is able to call the stubs and expectations
defined in your tests.
**Warning: If using global mode you should remove `async: true` from your tests**
Enable global mode using `set_mimic_global/1`.
```elixir
setup :set_mimic_global
setup :verify_on_exit!
test "invokes add from a task" do
Calculator
|> expect(:add, fn x, y -> x + y end)
Task.async(fn ->
assert Calculator.add(2, 3) == 5
end)
|> Task.await
end
```
"""
alias ExUnit.Callbacks
alias Mimic.{Server, VerificationError}
@doc false
defmacro __using__(_opts \\ []) do
quote do
import Mimic
setup :verify_on_exit!
end
end
@doc """
Define a stub function for a copied module.
## Arguments:
* `module` - the name of the module in which we're adding the stub.
* `function_name` - the name of the function we're stubbing.
* `function` - the function to use as a replacement.
## Raises:
* If `module` is not copied.
* If `function_name` is not publicly exported from `module` with the same arity.
## Example
iex> Calculator.add(2, 4)
6
iex> Mimic.stub(Calculator, :add, fn x, y -> x * y end)
...> Calculator.add(2, 4)
8
"""
@spec stub(module(), atom(), function()) :: module
def stub(module, function_name, function) do
arity = :erlang.fun_info(function)[:arity]
raise_if_not_copied!(module)
raise_if_not_exported_function!(module, function_name, arity)
module
|> Server.stub(function_name, arity, function)
|> validate_server_response(
"Stub cannot be called by the current process. Only the global owner is allowed."
)
end
@doc """
Replace all public functions in `module` with stubs.
The stubbed functions will raise if they are called.
## Arguments:
* `module` - The name of the module to stub.
## Raises:
* If `module` is not copied.
* If `function` is not called by the stubbing process.
## Example
iex> Mimic.stub(Calculator)
...> Calculator.add(2, 4)
** (ArgumentError) Module Calculator has not been copied. See docs for Mimic.copy/1
"""
@spec stub(module()) :: module()
def stub(module) do
raise_if_not_copied!(module)
module
|> Server.stub()
|> validate_server_response(
"Stub cannot be called by the current process. Only the global owner is allowed."
)
end
@doc """
Define a stub which must be called within an example.
This function is almost identical to `stub/3` except that the replacement
function must be called within the lifetime of the calling `pid` (i.e. the
test example).
## Arguments:
* `module` - the name of the module in which we're adding the stub.
* `function_name` - the name of the function we're stubbing.
* `function` - the function to use as a replacement.
## Raises:
* If `module` is not copied.
* If `function_name` is not publicly exported from `module` with the same
arity.
* If `function` is not called by the stubbing process.
## Example
iex> Calculator.add(2, 4)
6
iex> Mimic.expect(Calculator, :add, fn x, y -> x * y end)
...> Calculator.add(2, 4)
8
"""
@spec expect(atom, atom, non_neg_integer, function) :: module
def expect(module, fn_name, num_calls \\ 1, func)
def expect(_module, _fn_name, 0, _func) do
raise ArgumentError, "Expecting 0 calls should be done through Mimic.reject/1"
end
def expect(module, fn_name, num_calls, func)
when is_atom(module) and is_atom(fn_name) and is_integer(num_calls) and num_calls >= 1 and
is_function(func) do
arity = :erlang.fun_info(func)[:arity]
raise_if_not_copied!(module)
raise_if_not_exported_function!(module, fn_name, arity)
module
|> Server.expect(fn_name, arity, num_calls, func)
|> validate_server_response(
"Expect cannot be called by the current process. Only the global owner is allowed."
)
end
@doc """
Define a stub which must not be called.
This function allows you do define a stub which must not be called during the
course of this test. If it is called then the verification step will raise.
## Arguments:
* `function` - A capture of the function which must not be called.
## Raises:
* If `function` is not called by the stubbing process while calling `verify!/1`.
## Example:
iex> Mimic.reject(&Calculator.add/2)
Calculator
"""
@spec reject(function) :: module
def reject(function) when is_function(function) do
fun_info = :erlang.fun_info(function)
arity = fun_info[:arity]
module = fun_info[:module]
fn_name = fun_info[:name]
raise_if_not_copied!(module)
raise_if_not_exported_function!(module, fn_name, arity)
module
|> Server.expect(fn_name, arity, 0, function)
|> validate_server_response(
"Reject cannot be called by the current process. Only the global owner is allowed."
)
end
@doc """
Define a stub which must not be called.
This function allows you do define a stub which must not be called during the
course of this test. If it is called then the verification step will raise.
## Arguments:
* `module` - the name of the module in which we're adding the stub.
* `function_name` - the name of the function we're stubbing.
* `arity` - the arity of the function we're stubbing.
## Raises:
* If `function` is not called by the stubbing process while calling `verify!/1`.
## Example:
iex> Mimic.reject(Calculator, :add, 2)
Calculator
"""
@spec reject(module, atom, non_neg_integer) :: module
def reject(module, function_name, arity) do
raise_if_not_copied!(module)
raise_if_not_exported_function!(module, function_name, arity)
func = :erlang.make_fun(module, function_name, arity)
module
|> Server.expect(function_name, arity, 0, func)
|> validate_server_response(
"Reject cannot be called by the current process. Only the global owner is allowed."
)
end
@doc """
Allow other processes to share expectations and stubs defined by another
process.
## Arguments:
* `module` - the copied module.
* `owner_pid` - the process ID of the process which created the stub.
* `allowed_pid` - the process ID of the process which should also be allowed
to use this stub.
## Raises:
* If Mimic is running in global mode.
Allows other processes to share expectations and stubs defined by another
process.
## Example
```elixir
test "invokes add from a task" do
Caculator
|> expect(:add, fn x, y -> x + y end)
parent_pid = self()
Task.async(fn ->
Calculator |> allow(parent_pid, self())
assert Calculator.add(2, 3) == 5
end)
|> Task.await
end
```
"""
@spec allow(module(), pid(), pid()) :: module() | {:error, atom()}
def allow(module, owner_pid, allowed_pid) do
module
|> Server.allow(owner_pid, allowed_pid)
|> validate_server_response("Allow must not be called when mode is global.")
end
@doc """
Prepare `module` for mocking.
## Arguments:
* `module` - the name of the module to copy.
"""
@spec copy(module()) :: :ok
def copy(module) do
case Code.ensure_compiled(module) do
{:error, _} ->
raise ArgumentError,
"Module #{inspect(module)} is not available"
{:module, module} ->
Mimic.Module.replace!(module)
ExUnit.after_suite(fn _ -> Server.reset(module) end)
:ok
end
end
@doc """
Verifies the current process after it exits.
If you want to verify expectations for all tests, you can use
`verify_on_exit!/1` as a setup callback:
```elixir
setup :verify_on_exit!
```
"""
@spec verify_on_exit!(map()) :: :ok | no_return()
def verify_on_exit!(_context \\ %{}) do
pid = self()
Server.verify_on_exit(pid)
Callbacks.on_exit(Mimic, fn ->
verify!(pid)
Server.exit(pid)
end)
end
@doc """
Sets the mode to private. Mocks can be set and used by the process
```elixir
setup :set_mimic_private
```
"""
@spec set_mimic_private(map()) :: :ok
def set_mimic_private(_context \\ %{}), do: Server.set_private_mode()
@doc """
Sets the mode to global. Mocks can be set and used by all processes
```elixir
setup :set_mimic_global
```
"""
@spec set_mimic_global(map()) :: :ok
def set_mimic_global(_context \\ %{}), do: Server.set_global_mode(self())
@doc """
Chooses the mode based on ExUnit context. If `async` is `true` then
the mode is private, otherwise global.
```elixir
setup :set_mimic_from_context
```
"""
@spec set_mimic_from_context(map()) :: :ok
def set_mimic_from_context(%{async: true}), do: set_mimic_private()
def set_mimic_from_context(_context), do: set_mimic_global()
@doc """
Verify if expectations were fulfilled for a process `pid`
"""
@spec verify!(pid()) :: :ok
def verify!(pid \\ self()) do
pending = Server.verify(pid)
messages =
for {{module, name, arity}, num_calls, num_applied_calls} <- pending do
mfa = Exception.format_mfa(module, name, arity)
" * expected #{mfa} to be invoked #{num_calls} time(s) " <>
"but it has been called #{num_applied_calls} time(s)"
end
if messages != [] do
raise VerificationError,
"error while verifying mocks for #{inspect(pid)}:\n\n" <> Enum.join(messages, "\n")
end
:ok
end
@doc "Returns the current mode (`:global` or `:private`)"
@spec mode() :: :private | :global
def mode do
Server.get_mode()
|> validate_server_response("Couldn't get the current mode.")
end
defp raise_if_not_copied!(module) do
unless function_exported?(module, :__mimic_info__, 0) do
raise ArgumentError,
"Module #{inspect(module)} has not been copied. See docs for Mimic.copy/1"
end
end
defp raise_if_not_exported_function!(module, fn_name, arity) do
unless function_exported?(module, fn_name, arity) do
raise ArgumentError, "Function #{fn_name}/#{arity} not defined for #{inspect(module)}"
end
end
defp validate_server_response({:ok, module}, _), do: module
defp validate_server_response({:error, _}, error_message),
do: raise(ArgumentError, error_message)
end
|
lib/mimic.ex
| 0.866345
| 0.894881
|
mimic.ex
|
starcoder
|
defmodule Cloak do
@moduledoc """
Cloak makes it easy to encrypt and decrypt database fields using
[Ecto](http://hexdocs.pm/ecto).
This `Cloak` module is Cloak's main entry point. It wraps the encryption
and decryption process, ensuring that everything works smoothly without
downtime, even when there are multiple encryption ciphers and keys in play
at the same time.
## Configuration
The actual encryption work is delegated to the cipher module that you specify
in Cloak's configuration. Cipher modules must adhere to the `Cloak.Cipher`
behaviour. You can configure a cipher module like so:
config :cloak, ModuleName,
default: true,
tag: "TAG",
# any other attributes required by the cipher
You can also have multiple ciphers configured at the same time, provided that
they are not both set to `default: true`.
config :cloak, CipherOne,
default: true,
tag: "one",
# ...
config :cloak, CipherTwo,
default: true,
tag: "two",
# ...
### Options
Both of these options are required for every cipher:
- `:default` - Boolean. Determines whether this module will be the default
module for encryption or decryption. The default module will be used to
generate all new encrypted values.
- `:tag` - Binary. Used to tag any ciphertext that the cipher module
generates. This allows Cloak to decrypt a ciphertext with the correct module
when you have multiple ciphers in use at the same time.
If your cipher module requires additional configuration options, you can also
add those keys and values to this configuration.
# Example of custom settings for a cipher module
config :cloak, MyCustomCipher,
default: true,
tag: "tag",
custom_setting1: "...",
custom_setting2: "..."
It will be the responsibility of the cipher module to read these values from
the `:cloak` application configuration and use them.
## Provided Ciphers
- `Cloak.AES.GCM` (recommended) - AES encryption in Galois Counter Mode (GCM).
- `Cloak.AES.CTR` - AES encryption in CTR stream mode.
If you don't see what you need here, you can use your own cipher module,
provided it adheres to the `Cloak.Cipher` behaviour.
(And [open a PR](https://github.com/danielberkompas/cloak), please!)
## Ecto Integration
Once Cloak is configured with a Cipher module, you can use it seamlessly with
[Ecto](http://hex.pm/ecto) with these `Ecto.Type` modules:
| Type | Ecto Type | Field |
| --------------- | --------------------- | ----------------------------------- |
| `String` | `:string` / `:binary` | `Cloak.EncryptedBinaryField` |
| `Date` | `:date` | `Cloak.EncryptedDateField` |
| `DateTime` | `:utc_datetime` | `Cloak.EncryptedDateTimeField` |
| `Float` | `:float` | `Cloak.EncryptedFloatField` |
| `Integer` | `:integer` | `Cloak.EncryptedIntegerField` |
| `Map` | `:map` | `Cloak.EncryptedMapField` |
| `NaiveDateTime` | `:naive_datetime` | `Cloak.EncryptedNaiveDateTimeField` |
| `Time` | `:time` | `Cloak.EncryptedTimeField` |
| `List(Integer)` | `{:array, :integer}` | `Cloak.EncryptedIntegerListField` |
| `List(String)` | `{:array, :string}` | `Cloak.EncryptedStringListField` |
You can also use the following `Ecto.Type` modules in order to hash fields:
| Type | Ecto Type | Field |
| --------- | ---------------------- | ------------------- |
| `String` | `:string` / `:binary` | `Cloak.SHA256Field` |
For example, to encrypt a binary field, change your schema from this:
schema "users" do
field :name, :binary
end
To this:
schema "users" do
field :name, Cloak.EncryptedBinaryField
end
The `name` field will then be encrypted whenever it is saved to the database,
using your configured cipher module. It will also be transparently decrypted
whenever the user is loaded from the database.
## Examples
The `Cloak` module can be called directly to generate ciphertext using the
current default cipher module.
iex> Cloak.encrypt("Hello") != "Hello"
true
iex> Cloak.encrypt("Hello") |> Cloak.decrypt
"Hello"
iex> Cloak.version
<<"AES", 1>>
"""
@doc """
Encrypt a value using the cipher module associated with the tag.
The `:tag` of the cipher will be prepended to the output. So, if the cipher
was `Cloak.AES.CTR`, and the tag was "AES", the output would be in this
format:
+-------+---------------+
| "AES" | Cipher output |
+-------+---------------+
This tagging allows Cloak to delegate decryption of a ciphertext to the
correct module when you have multiple ciphers in use at the same time. (For
example, this can occur while you migrate your encrypted data to a new
cipher.)
### Parameters
- `plaintext` - The value to be encrypted.
### Optional Parameters
- `tag` - The tag of the cipher to use for encryption. If omitted,
will default to the default cipher.
### Example
Cloak.encrypt("Hello, World!")
<<"AES", ...>>
Cloak.encrypt("Hello, World!", "AES")
<<"AES", ...>>
"""
@spec encrypt(term, String.t() | nil) :: String.t()
def encrypt(plaintext, tag \\ nil)
def encrypt(plaintext, nil) do
default_tag() <> default_cipher().encrypt(plaintext)
end
def encrypt(plaintext, tag) do
tag <> cipher(tag).encrypt(plaintext)
end
@doc """
Decrypt a ciphertext with the cipher module it was encrypted with.
`encrypt/1` includes the `:tag` of the cipher module that generated the
encryption in the ciphertext it outputs. `decrypt/1` can then use this tag to
find the right module on decryption.
### Parameters
- `ciphertext` - A binary of ciphertext generated by `encrypt/1`.
### Example
If the cipher module responsible had the tag "AES", Cloak will find the module
using that tag, strip it off, and hand the remaining ciphertext to the module
for decryption.
iex> ciphertext = Cloak.encrypt("Hello world!")
...> <<"AES", _ :: bitstring>> = ciphertext
...> Cloak.decrypt(ciphertext)
"Hello world!"
"""
def decrypt(ciphertext) do
plaintexts =
Cloak.Config.all()
|> Enum.filter(fn {_cipher, config} ->
tag = config[:tag]
String.starts_with?(ciphertext, tag)
end)
|> Enum.map(fn {cipher, config} ->
tag = config[:tag]
tag_size = byte_size(tag)
ciphertext = binary_part(ciphertext, tag_size, byte_size(ciphertext) - tag_size)
cipher.decrypt(ciphertext)
end)
case plaintexts do
[plaintext | _] ->
plaintext
_ ->
raise ArgumentError, "No cipher found to decrypt #{inspect(ciphertext)}."
end
end
@doc """
Returns the default cipher module's tag combined with the result of that
cipher's `version/0` function.
It is used in changesets to record which cipher was used to encrypt a row
in a database table. This is very useful when migrating to a new cipher or new
encryption key, because you'd be able to query your database to find records
that need to be migrated.
"""
@spec version() :: String.t()
def version() do
default_tag() <> default_cipher().version()
end
@spec default_cipher() :: module
defp default_cipher do
{cipher, _config} = Cloak.Config.default_cipher()
cipher
end
@spec default_tag() :: String.t()
defp default_tag do
{_cipher, config} = Cloak.Config.default_cipher()
config[:tag]
end
@spec version(String.t()) :: String.t()
def version(tag) do
tag <> cipher(tag).version()
end
@spec cipher(String.t()) :: module
defp cipher(tag) do
{cipher, _config} = Cloak.Config.cipher(tag)
cipher
end
end
|
lib/cloak.ex
| 0.928002
| 0.688622
|
cloak.ex
|
starcoder
|
defmodule Livebook.Storage.Ets do
@moduledoc """
Ets implementation of `Livebook.Storage` behaviour.
The module is supposed to be started just once as it
is responsible for managing a named ets table.
`insert` and `delete` operations are supposed to be called using a GenServer
while all the lookups can be performed by directly accessing the named table.
"""
@behaviour Livebook.Storage
@table_name __MODULE__
use GenServer
@impl Livebook.Storage
def all(namespace) do
@table_name
|> :ets.match({{namespace, :"$1"}, :"$2", :"$3", :_})
|> Enum.group_by(
fn [entity_id, _attr, _val] -> entity_id end,
fn [_id, attr, val] -> {attr, val} end
)
|> Enum.map(fn {entity_id, attributes} ->
attributes
|> Map.new()
|> Map.put(:id, entity_id)
end)
end
@impl Livebook.Storage
def fetch(namespace, entity_id) do
@table_name
|> :ets.lookup({namespace, entity_id})
|> case do
[] ->
:error
entries ->
entries
|> Enum.map(fn {_key, attr, val, _timestamp} -> {attr, val} end)
|> Map.new()
|> Map.put(:id, entity_id)
|> then(&{:ok, &1})
end
end
@impl Livebook.Storage
def fetch_key(namespace, entity_id, key) do
@table_name
|> :ets.match({{namespace, entity_id}, key, :"$1", :_})
|> case do
[[value]] -> {:ok, value}
[] -> :error
end
end
@impl Livebook.Storage
def insert(namespace, entity_id, attributes) do
GenServer.call(__MODULE__, {:insert, namespace, entity_id, attributes})
end
@impl Livebook.Storage
def delete(namespace, entity_id) do
GenServer.call(__MODULE__, {:delete, namespace, entity_id})
end
@spec start_link(keyword()) :: GenServer.on_start()
def start_link(opts) do
GenServer.start_link(__MODULE__, opts, name: __MODULE__)
end
@impl GenServer
def init(_opts) do
table = :ets.new(@table_name, [:named_table, :protected, :duplicate_bag])
{:ok, %{table: table}}
end
@impl GenServer
def handle_call({:insert, namespace, entity_id, attributes}, _from, %{table: table} = state) do
match_head = {{namespace, entity_id}, :"$1", :_, :_}
guards =
Enum.map(attributes, fn {key, _val} ->
{:==, :"$1", key}
end)
:ets.select_delete(table, [{match_head, guards, [true]}])
timestamp = System.os_time(:millisecond)
attributes =
Enum.map(attributes, fn {attr, val} ->
{{namespace, entity_id}, attr, val, timestamp}
end)
:ets.insert(table, attributes)
{:reply, :ok, state}
end
@impl GenServer
def handle_call({:delete, namespace, entity_id}, _from, %{table: table} = state) do
:ets.delete(table, {namespace, entity_id})
{:reply, :ok, state}
end
end
|
lib/livebook/storage/ets.ex
| 0.79534
| 0.465448
|
ets.ex
|
starcoder
|
defmodule MonEx.Result do
@moduledoc """
Result module provides Result type with utility functions.
"""
alias MonEx.Option
defmacro ok(res) do
quote do
{:ok, unquote(res)}
end
end
defmacro error(err) do
quote do
{:error, unquote(err)}
end
end
@typedoc """
Result type.
`ok(res)` or `error(err)` unwraps into `{:ok, res}` or `{:error, err}`
"""
@type t(res, err) :: {:ok, res} | {:error, err}
@doc """
Returns true if argument is `ok()`, false if `error()`
## Examples
iex> is_ok(ok(5))
true
iex> is_error(ok(5))
false
"""
@spec is_ok(t(any, any)) :: boolean
def is_ok(ok(_)), do: true
def is_ok(error(_)), do: false
@doc """
Returns true if argument is `error()`, false if `ok()`
## Examples
iex> is_error(error("Error"))
true
iex> is_ok(error("Error"))
false
"""
@spec is_error(t(any, any)) :: boolean
def is_error(x), do: !is_ok(x)
@doc """
Returns value `x` if argument is `ok(x)`, raises `e` if `error(e)`.
Second argument is a fallback. It can by a lambda accepting error, or some precomputed default value.
## Examples
iex> unwrap(ok(5))
5
iex> unwrap(error(:uh_oh), fn _ -> 10 end)
10
iex> unwrap(error(:uh_oh), 10)
10
"""
@spec unwrap(t(res, err), res | (err -> res)) :: res when res: any, err: any
def unwrap(result, fallback \\ nil)
def unwrap(ok(x), _), do: x
def unwrap(error(m), nil), do: raise m
def unwrap(error(m), f) when is_function(f, 1), do: f.(m)
def unwrap(error(_), fallback), do: fallback
@doc """
Converts Result into Option: `ok(val)` -> `some(val)`, `error(e)` -> `none()`.
Useful when you don't care about the error value and only what to emphasize that
nothing has been found.
## Examples
iex> unwrap_option(ok(5))
{:some, 5} # same as some(5)
iex> unwrap_option(error(:uh_oh))
{:none} # none()
"""
@spec unwrap_option(t(res, any)) :: Option.t(res) when res: any
def unwrap_option(ok(x)), do: {:some, x}
def unwrap_option(error(_)), do: {:none}
@doc """
Returns self if it is `ok(x)`, or evaluates supplied lambda that expected
to return another `result`. Returns supplied fallback result, if second argument is not a function.
## Examples
iex> ok(5) |> fallback(fn _ -> 1 end)
ok(5)
iex> error("WTF") |> fallback(fn m -> ok("\#{m}LOL") end)
ok("WTFLOL")
iex> error("WTF") |> fallback(ok(5))
ok(5)
"""
@spec fallback(t(res, err), t(res, err) | (err -> t(res, err))) :: t(res, err) when res: any, err: any
def fallback(ok(x), _), do: ok(x)
def fallback(error(m), f) when is_function(f, 1) do
f.(m)
end
def fallback(error(_), any), do: any
@doc """
Filters and unwraps the collection of results, leaving only ok's
## Examples
iex> [ok(1), error("oops")] |> collect_ok
[1]
"""
@spec collect_ok([t(res, any)]) :: [res] when res: any
def collect_ok(results) when is_list(results) do
Enum.reduce(results, [], fn
ok(res), acc -> [res | acc]
error(_), acc -> acc
end) |> Enum.reverse
end
@doc """
Filters and unwraps the collection of results, leaving only errors:
## Examples
iex> [ok(1), error("oops")] |> collect_error
["oops"]
"""
@spec collect_error([t(res, err)]) :: [err] when res: any, err: any
def collect_error(results) when is_list(results) do
Enum.reduce(results, [], fn
ok(_), acc -> acc
error(err), acc -> [err | acc]
end) |> Enum.reverse
end
@doc """
Groups and unwraps the collection of results, forming a Map with keys `:ok` and `:error`:
## Examples
iex> [ok(1), error("oops"), ok(2)] |> partition
%{ok: [1, 2], error: ["oops"]}
iex> [ok(1)] |> partition
%{ok: [1], error: []}
"""
@spec partition([t(res, err)]) :: %{ok: [res], error: [err]} when res: any, err: any
def partition(results) when is_list(results) do
base = %{ok: [], error: []}
results = Enum.group_by(results, fn
ok(_) -> :ok
error(_) -> :error
end, fn
ok(res) -> res
error(err) -> err
end)
Map.merge(base, results)
end
@doc """
Retry in case of error.
Possible options:
* `:n` - times to retry
* `:delay` — delay between retries
## Examples
result = retry n: 3, delay: 3000 do
remote_service()
end
This will call `remove_service()` 4 times (1 time + 3 retries) with an interval of 3 seconds.
"""
defmacro retry(opts \\ [], do: exp) do
quote do
n = Keyword.get(unquote(opts), :n, 5)
delay = Keyword.get(unquote(opts), :delay, 0)
retry_rec(n, delay, fn -> unquote(exp) end)
end
end
@doc false
@spec retry_rec(integer, integer, (() -> t(res, err))) :: t(res, err) when res: any, err: any
def retry_rec(0, _delay, lambda), do: lambda.()
def retry_rec(n, delay, lambda) do
case lambda.() do
error(_) ->
:timer.sleep(delay)
retry_rec(n - 1, delay, lambda)
ok -> ok
end
end
@doc """
Wraps expression and returns exception wrapped into `error()` if it happens,
otherwise `ok(result of expression)`, in case if expression returns result
type, it won't be wrapped.
Possible modes:
* `:full` - returns exception struct intact (default)
* `:message` — returns error message only
* `:module` — returns error module only
## Examples
iex> try_result do
...> 5 + 5
...> end
ok(10)
iex> broken = fn -> raise ArithmeticError, [message: "bad argument"] end
...> try_result do
...> broken.()
...> end
error(%ArithmeticError{message: "bad argument"})
...> try_result :message do
...> broken.()
...> end
error("bad argument")
...> try_result :module do
...> broken.()
...> end
error(ArithmeticError)
"""
defmacro try_result(mode \\ :full, do: exp) do
error_handler = case mode do
:message -> quote do e -> error(e.message) end
:module -> quote do e -> error(e.__struct__) end
_ -> quote do e -> error(e) end
end
quote do
try do
case unquote(exp) do
ok(res) -> ok(res)
error(e) -> error(e)
x -> ok(x)
end
rescue
unquote(error_handler)
end
end
end
end
|
lib/monex/result.ex
| 0.930718
| 0.507385
|
result.ex
|
starcoder
|
defmodule Cadet.SharedHelper do
@moduledoc """
Contains utility functions that may be commonly used across Cadet and CadetWeb..
"""
defmacro is_ecto_id(id) do
quote do
is_integer(unquote(id)) or is_binary(unquote(id))
end
end
def rename_keys(map, key_map) do
Enum.reduce(key_map, map, fn {from, to}, map ->
if Map.has_key?(map, from) do
{v, map} = Map.pop!(map, from)
Map.put(map, to, v)
else
map
end
end)
end
@doc """
Snake-casifies string keys.
Meant for use when accepting a JSON map from the frontend, where keys are
usually camel-case.
"""
def snake_casify_string_keys(map = %{}) do
for {key, val} <- map,
into: %{},
do: {if(is_binary(key), do: Recase.to_snake(key), else: key), val}
end
def to_snake_case_atom_keys(map = %{}) do
map
|> snake_casify_string_keys()
|> (&for({key, val} <- &1, into: %{}, do: {String.to_atom(key), val})).()
end
@doc """
Snake-casifies string keys, recursively.
Meant for use when accepting a JSON map from the frontend, where keys are
usually camel-case.
"""
def snake_casify_string_keys_recursive(map = %{}) when not is_struct(map) do
for {key, val} <- map,
into: %{},
do:
{if(is_binary(key), do: Recase.to_snake(key), else: key),
snake_casify_string_keys_recursive(val)}
end
def snake_casify_string_keys_recursive(list) when is_list(list) do
for e <- list, do: snake_casify_string_keys_recursive(e)
end
def snake_casify_string_keys_recursive(other), do: other
@doc """
Camel-casifies atom keys and converts them to strings.
Meant for use when sending an Elixir map, which usually has snake-case keys,
to the frontend.
"""
def camel_casify_atom_keys(map = %{}) do
for {key, val} <- map,
into: %{},
do: {if(is_atom(key), do: key |> Atom.to_string() |> Recase.to_camel(), else: key), val}
end
@doc """
Converts a map like `%{"a" => 123}` into a keyword list like [a: 123]. Returns
nil if any keys are not existing atoms.
Meant for use for GET endpoints that filter based on the query string.
"""
def try_keywordise_string_keys(map) do
for {key, val} <- map,
do: {if(is_binary(key), do: String.to_existing_atom(key), else: key), val}
rescue
ArgumentError -> nil
end
@doc """
Sends an error to Sentry. The error can be anything.
"""
def send_sentry_error(error) do
{_, stacktrace} = Process.info(self(), :current_stacktrace)
# drop 2 elements off the stacktrace: the frame of Process.info, and the
# frame of this function
stacktrace = stacktrace |> Enum.drop(2)
error = Exception.normalize(:error, error, stacktrace)
Sentry.capture_exception(error, stacktrace: stacktrace)
end
end
|
lib/cadet/helpers/shared_helper.ex
| 0.754644
| 0.41404
|
shared_helper.ex
|
starcoder
|
defmodule Riak do
@moduledoc """
# Riak Elixir Client
[](https://travis-ci.org/drewkerrigan/riak-elixir-client)
A Riak client written in Elixir. Now includes connection pooling with [pooler](http://github.com/seth/pooler) and a variety of other improvements from [riex](https://github.com/edgurgel/riex).
## Setup
### Prerequisites
* Riak 2.0+
* Elixir 1.0+
#### In an Elixir application
Add the following to mix.exs
```elixir
...
def application do
[ applications: [ :riak ]]
end
...
defp deps do
[ {:riak, "~> 1.0"} ]
end
...
```
## Usage
### Establishing a Riak connection
```elixir
{:ok, pid} = Riak.Connection.start_link('127.0.0.1', 8087) # Default values
```
### Connection Pooling
Most functions in this module can be called by passing the pid of the established connection or using a pool of connections (provided by pooler). Define pools by using the group `riak`. Following is an example `config/config.exs`:
```elixir
[pooler: [pools: [
[ name: :riaklocal1,
group: :riak,
max_count: 10,
init_count: 5,
start_mfa: {Riak.Connection, :start_link, []}
],
[ name: :riaklocal2,
group: :riak,
max_count: 15,
init_count: 2,
start_mfa: {Riak.Connection, :start_link, ['127.0.0.1', 9090]}
] ]
]]
```
For an example using this functionality with a local Riak instance, check [`config/config.exs`](https://github.com/drewkerrigan/riak-elixir-client/blob/master/config/config.exs). More information about Elixir configuration can be found on [http://elixir-lang.org(http://elixir-lang.org)]: [Application environment and configuration](http://elixir-lang.org/getting_started/mix_otp/10.html#toc_6).
Once a pool configuration is properly defined in a project, calls to Riak can omit the pid. For example:
This call uses a pid from the pool of connections provided by pooler:
```elixir
Riak.delete("user", key)
```
This call requires a pid obtained by first calling `Riak.Connection.start_link`:
```elixir
Riak.delete(pid, "user", key)
```
### Save a value
```elixir
o = Riak.Object.create(bucket: "user", key: "my_key", data: "Han Solo")
Riak.put(pid, o)
```
### Find an object
```elixir
o = Riak.find(pid, "user", "my_key")
```
### Update an object
```elixir
o = %{o | data: "Something Else"}
Riak.put(pid, o)
```
### Delete an object
Using key
```elixir
Riak.delete(pid, "user", key)
```
Using object
```elixir
Riak.delete(pid, o)
```
### Datatypes
Riak Datatypes (a.k.a. CRDTs) are avaiable in [Riak versions 2.0](http://basho.com/introducing-riak-2-0/) and greater. The types included are: maps, sets, counters, registers and flags.
#### Setup
Datatypes require the use of bucket-types. Maps, sets, and counters can be used as top-level bucket-type datatypes; Registers and flags may only be used within maps.
The following examples assume the presence of 3 datatype enabled bucket-types. You can create these bucket-types by running the following commands on a single Riak node in your cluster:
Bucket-Type: `counters`
```
riak-admin bucket-type create counters '{"props":{"datatype":"counter"}}'
riak-admin bucket-type activate counters
```
Bucket-Type: `sets`
```
riak-admin bucket-type create sets '{"props":{"datatype":"set"}}'
riak-admin bucket-type activate sets
```
Bucket-Type: `maps`
```
riak-admin bucket-type create maps '{"props":{"datatype":"map"}}'
riak-admin bucket-type activate maps
```
#### Counters
Create a counter (`alias Riak.CRDT.Counter`):
```elixir
Counter.new
|> Counter.increment
|> Counter.increment(2)
|> Riak.update("counters", "my_counter_bucket", "my_key")
```
Fetch a counter:
```elixir
counter = Riak.find("counters", "my_counter_bucket", "my_key")
|> Counter.value
```
`counter` will be 3.
***NOTE***: "Counter drift" is a possibility that needs to be accounted for with any distributed system such as Riak. The problem can manifest itself during failure states in either your applicaiton or Riak itself. If an increment operation fails from the client's point of view, there is not sufficient information available to know whether or not that call made it to zero or all of the replicas for that counter object. As such, if the client attempts to retry the increment after recieving something like a error code 500 from Riak, that counter object is at risk of drifting positive. Similarly if the client decides not to retry, that counter object is at risk of drifting negative.
For these reasons, counters are only suggested for use-cases that can handle some (albeit small) amount of counter drift. Good examples of appropriate use-cases are: Facebook likes, Twitter retweet counts, Youtube view counts, etc. Some examples of poor use-cases for Riak counters are: bank account balances, anything related to money. It is possible to implement these types of solutions using Riak, but more client side logic is necessary. For an example of a client-side ledger with tunable retry options, check [github.com/drewkerrigan/riak-ruby-ledger](https://github.com/drewkerrigan/riak-ruby-ledger). Another approach could be the client-side implementation of a HAT (Highly Available Transaction) algorithm.
#### Sets
Create a set (`alias Riak.CRDT.Set`):
```elixir
Set.new
|> Set.put("foo")
|> Set.put("bar")
|> Riak.update("sets", "my_set_bucket", "my_key")
```
And fetch the set:
```elixir
set = Riak.find("sets", "my_set_bucket", "my_key")
|> Set.value
```
Where `set` is an `orddict`.
#### Maps
Maps handle binary keys with any other datatype (map, set, flag, register and counter).
Create a map (`alias Riak.CRDT.Map`):
```elixir
register = Register.new("some string")
flag = Flag.new |> Flag.enable
Map.new
|> Map.put("k1", register)
|> Map.put("k2", flag)
|> Riak.update("maps", "my_map_bucket", "map_key")
```
And fetch the map:
```elixir
map = Riak.find("maps", "my_map_bucket", key) |> Map.value
```
Where `map` is an `orddict`.
## Examples
Check the `examples/` directory for a few example elixir applications using the riak client.
For more functionality, check `test/` directory.
## Tests
```
MIX_ENV=test mix do deps.get, test
```
## License
Copyright 2015 <NAME>.
Copyright 2014 <NAME>.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import Riak.Pool
require Record
@doc """
Ping the server.
"""
defpool ping(pid) when is_pid(pid), do: :riakc_pb_socket.ping(pid)
@doc """
Put the metadata/value in the object under bucket-type/bucket/key.
"""
defpool put(pid, obj) when is_pid(pid) do
case :riakc_pb_socket.put(pid, Riak.Object.to_robj(obj)) do
{:ok, new_object} -> %{obj | key: :riakc_obj.key(new_object)}
:ok -> obj
_ -> nil
end
end
@doc """
Updates the convergent datatype in Riak with local
modifications stored in the container type.
"""
defpool update(pid, datatype, type, bucket, key) when is_pid(pid) do
:riakc_pb_socket.update_type(pid, {type, bucket},
key, to_op(datatype))
end
defp to_op(datatype) do
case datatype do
datatype when Record.is_record(datatype, :set) ->
:riakc_set.to_op(datatype)
datatype when Record.is_record(datatype, :counter) ->
:riakc_counter.to_op(datatype)
datatype when Record.is_record(datatype, :map) ->
:riakc_map.to_op(datatype)
_ -> :undefined
end
end
@doc """
Get bucket-type/bucket/key from the server.
"""
defpool find(pid, bucket, key) when is_pid(pid) do
case :riakc_pb_socket.get(pid, bucket, key) do
{:ok, object} ->
if :riakc_obj.value_count(object) > 1 do
build_sibling_list(:riakc_obj.get_contents(object),[])
else
Riak.Object.from_robj(object)
end
_ -> nil
end
end
@doc """
Fetches the representation of a convergent datatype from Riak.
TODO: In the current implementation, it's very easy to confuse working
with regular k/v objects and datatypes. Clarify so that these aren't
conflated by assuming that any object with a type is a datatype.
"""
defpool find(pid, type, bucket, key) when is_pid(pid) do
case :riakc_pb_socket.fetch_type(pid, {type, bucket}, key) do
{:ok, object} -> object
_ -> nil
end
end
defp build_sibling_list([{_md, val}|t], final_list), do: build_sibling_list(t,[val|final_list])
defp build_sibling_list([], final_list), do: final_list
@doc """
Picks the sibling to "win" over the other siblings via a list index.
"""
defpool resolve(pid, bucket, key, index) when is_pid(pid) do
case :riakc_pb_socket.get(pid, bucket, key) do
{:ok, object} ->
new_object = :riakc_obj.select_sibling(index, object)
:riakc_pb_socket.put(pid, new_object)
_ -> nil
end
end
@doc """
Delete the key/value.
"""
defpool delete(pid, obj) when is_pid(pid), do: delete(pid, obj.bucket, obj.key)
defpool delete(pid, bucket, key) when is_pid(pid), do: :riakc_pb_socket.delete(pid, bucket, key)
defpool delete(pid, type, bucket, key) when is_pid(pid), do: :riakc_pb_socket.delete(pid, {type, bucket}, key)
end
|
lib/riak.ex
| 0.908256
| 0.943295
|
riak.ex
|
starcoder
|
defmodule Ash.Query.Operator do
@moduledoc """
An operator is a predicate with a `left` and a `right`
For more information on being a predicate, see `Ash.Filter.Predicate`. Most of the complexities
are there. An operator must meet both behaviours.
"""
@doc """
Create a new predicate. There are various return types possible:
* `{:ok, left, right}` - Return the left/right values of the operator
* `{:ok, operator}` - Return the operator itself, this or the one above are acceptable
* `{:known, boolean}` - If the value is already known, e.g `1 == 1`
* `{:error, error}` - If there was an error creating the operator
"""
@callback new(term, term) ::
{:ok, term, term} | {:ok, term} | {:known, boolean} | {:error, term}
@doc """
The implementation of the inspect protocol.
If not defined, it will be inferred
"""
@callback to_string(struct, Inspect.Opts.t()) :: term
alias Ash.Query.{Call, Expression, Not, Ref}
@doc "Create a new operator. Pass the module and the left and right values"
def new(mod, %Ref{} = left, right) do
try_cast_with_ref(mod, left, right)
end
def new(mod, left, %Ref{} = right) do
try_cast_with_ref(mod, left, right)
end
def new(mod, %{__struct__: struct} = left, right)
when struct in [Call, Expression, Not] do
mod.new(left, right)
end
def new(mod, left, %{__struct__: struct} = right)
when struct in [Call, Expression, Not] do
mod.new(left, right)
end
def new(mod, %{__predicate__?: _} = left, right) do
mod.new(left, right)
end
def new(mod, left, %{__predicate__?: _} = right) do
mod.new(left, right)
end
def new(mod, left, right) do
case mod.new(left, right) do
{:ok, val} ->
case mod.evaluate(val) do
{:known, value} -> {:ok, value}
:unknown -> {:ok, val}
end
{:error, error} ->
{:error, error}
end
end
defp try_cast_with_ref(mod, left, right) do
Enum.find_value(mod.types(), fn type ->
try_cast(left, right, type)
end)
|> case do
nil ->
{:error, "Could not cast expression"}
{:ok, left, right} ->
mod.new(left, right)
end
end
defp try_cast(%{__predicate__?: _} = left, right, _) do
{:ok, left, right}
end
defp try_cast(left, %{__predicate__?: _} = right, _) do
{:ok, left, right}
end
defp try_cast(%Ref{attribute: %{type: type}} = left, right, :same) do
case Ash.Query.Type.try_cast(right, type) do
{:ok, new_right} ->
{:ok, left, new_right}
_ ->
nil
end
end
defp try_cast(left, %Ref{attribute: %{type: type}} = right, :same) do
case Ash.Query.Type.try_cast(left, type) do
{:ok, new_left} ->
{:ok, new_left, right}
_ ->
nil
end
end
defp try_cast(%Ref{attribute: %{type: type}} = left, right, [:any, {:array, :same}]) do
case Ash.Query.Type.try_cast(right, {:array, type}) do
{:ok, new_right} ->
{:ok, left, new_right}
_ ->
nil
end
end
defp try_cast(left, %Ref{attribute: %{type: {:array, type}}} = right, [:any, {:array, :same}]) do
case Ash.Query.Type.try_cast(left, type) do
{:ok, new_left} ->
{:ok, new_left, right}
_ ->
nil
end
end
# We don't have a way to infer types from values right now
defp try_cast(left, right, [:same, :same]) do
{:ok, left, right}
end
defp try_cast(left, right, [:same, {:array, :same}]) do
{:ok, left, right}
end
defp try_cast(left, right, [{:array, :same}, :same]) do
{:ok, left, right}
end
defp try_cast(left, right, [{:array, :same}, {:array, :same}]) do
{:ok, left, right}
end
defp try_cast(left, right, [:same, type]) do
try_cast(left, right, [type, type])
end
defp try_cast(left, right, [type, :same]) do
try_cast(left, right, [type, type])
end
defp try_cast(left, right, [{:array, :same}, type]) do
try_cast(left, right, [{:array, type}, type])
end
defp try_cast(left, right, [type, :same]) do
try_cast(left, right, [type, type])
end
defp try_cast(left, right, [type, {:array, :same}]) do
try_cast(left, right, [type, {:array, type}])
end
defp try_cast(left, right, [left_type, right_type]) do
with {:ok, left} <- cast_one(left, left_type),
{:ok, right} <- cast_one(right, right_type) do
{:ok, left, right}
else
{:error, error} ->
{:error, error}
end
end
defp try_cast(left, right, :any) do
{:ok, left, right}
end
defp try_cast(left, right, {:array, :any}) do
{:ok, left, right}
end
defp try_cast(_, _, _), do: nil
defp cast_one(value, {:array, :any}) do
{:ok, value}
end
defp cast_one(value, :any) do
{:ok, value}
end
defp cast_one(value, type) do
case Ash.Query.Type.try_cast(value, type) do
{:ok, casted} ->
{:ok, casted}
_ ->
{:error, "Could not cast #{inspect(type)} as #{value}"}
end
end
def operators do
[
Ash.Query.Operator.Eq,
Ash.Query.Operator.GreaterThanOrEqual,
Ash.Query.Operator.GreaterThan,
Ash.Query.Operator.In,
Ash.Query.Operator.IsNil,
Ash.Query.Operator.LessThanOrEqual,
Ash.Query.Operator.LessThan,
Ash.Query.Operator.NotEq
] ++ Ash.Query.Operator.Basic.operator_modules()
end
def operator_symbols do
Enum.map(operators(), & &1.operator)
end
defmacro __using__(opts) do
unless opts[:operator] do
raise "Operator is required!"
end
quote do
defstruct [
:left,
:right,
operator: unquote(opts[:operator]),
embedded?: false,
__operator__?: true,
__predicate__?: unquote(opts[:predicate?] || false)
]
if unquote(opts[:predicate?]) do
@behaviour Ash.Filter.Predicate
end
alias Ash.Query.Ref
def operator, do: unquote(opts[:operator])
def name, do: unquote(opts[:name] || opts[:operator])
def predicate? do
unquote(opts[:predicate?])
end
def types do
unquote(opts[:types] || [:same, :any])
end
def new(left, right), do: {:ok, struct(__MODULE__, left: left, right: right)}
import Inspect.Algebra
def to_string(%{left: left, right: right, operator: operator}, opts) do
concat([
to_doc(left, opts),
" ",
to_string(operator),
" ",
to_doc(right, opts)
])
end
defoverridable to_string: 2, new: 2
defimpl Inspect do
def inspect(%mod{} = op, opts) do
mod.to_string(op, opts)
end
end
end
end
end
|
lib/ash/query/operator/operator.ex
| 0.881251
| 0.767625
|
operator.ex
|
starcoder
|
defmodule OliWeb.Curriculum.HierarchyPicker do
@moduledoc """
Hierarchy Picker Component
A general purpose curriculum location picker. When a new location is selected,
this component will trigger an "update_selection" event to the parent liveview
with the new selection.
Example:
```
def handle_event("HierarchyPicker.select", %{"slug" => slug}, socket) do
...
end
```
"""
use Phoenix.LiveComponent
use Phoenix.HTML
alias Oli.Resources.Numbering
def render(assigns) do
~L"""
<div id="hierarchy-picker" class="hierarchy-picker">
<div class="hierarchy-navigation">
<%= render_breadcrumb assigns, @breadcrumbs %>
</div>
<div class="hierarchy">
<div class="text-center text-secondary mt-2">
<b><%= @revision.title %></b> will be placed here
</div>
<%# filter out the item being moved from the options, sort all containers first %>
<%= for child <- @children |> Enum.filter(&(&1.id != @revision.id)) |> Enum.sort(&sort_containers_first/2) do %>
<div id="hierarchy_item_<%= child.resource_id %>"
phx-click="select"
phx-value-slug="<%= child.slug %>">
<div class="flex-1">
<%= OliWeb.Curriculum.EntryLive.icon(%{child: child}) %>
<%= resource_link assigns, child %>
</div>
</div>
<% end %>
</div>
</div>
"""
end
def render_breadcrumb(assigns, breadcrumbs) do
~L"""
<ol class="breadcrumb custom-breadcrumb p-1 px-2">
<button class="btn btn-sm btn-link" phx-click="HierarchyPicker.select" phx-value-slug="<%= previous_slug(breadcrumbs) %>"><i class="las la-arrow-left"></i></button>
<%= for {breadcrumb, index} <- Enum.with_index(breadcrumbs) do %>
<%= render_breadcrumb_item Enum.into(%{
breadcrumb: breadcrumb,
show_short: length(breadcrumbs) > 3,
is_last: length(breadcrumbs) - 1 == index,
}, assigns) %>
<% end %>
</ol>
"""
end
defp render_breadcrumb_item(
%{breadcrumb: breadcrumb, show_short: show_short, is_last: is_last} = assigns
) do
~L"""
<li class="breadcrumb-item align-self-center">
<button class="btn btn-xs btn-link px-0" <%= if is_last, do: "disabled" %> phx-click="HierarchyPicker.select" phx-value-slug="<%= breadcrumb.slug %>">
<%= get_title(breadcrumb, show_short) %>
</button>
</li>
"""
end
defp get_title(breadcrumb, true = _show_short), do: breadcrumb.short_title
defp get_title(breadcrumb, false = _show_short), do: breadcrumb.full_title
defp resource_link(%{numberings: numberings} = assigns, revision) do
with resource_type <- Oli.Resources.ResourceType.get_type_by_id(revision.resource_type_id) do
case resource_type do
"container" ->
numbering = Map.get(numberings, revision.id)
title =
if numbering do
Numbering.prefix(numbering) <> ": " <> revision.title
else
revision.title
end
~L"""
<button class="btn btn-link ml-1 mr-1 entry-title" phx-click="HierarchyPicker.select" phx-value-slug="<%= revision.slug %>">
<%= title %>
</button>
"""
_ ->
~L"""
<button class="btn btn-link ml-1 mr-1 entry-title" disabled><%= revision.title %></button>
"""
end
end
end
defp sort_containers_first(a, b) do
case {
Oli.Resources.ResourceType.get_type_by_id(a.resource_type_id),
Oli.Resources.ResourceType.get_type_by_id(b.resource_type_id)
} do
{"container", "container"} -> true
{"container", _} -> true
_ -> false
end
end
defp previous_slug(breadcrumbs) do
previous = Enum.at(breadcrumbs, length(breadcrumbs) - 2)
previous.slug
end
end
|
lib/oli_web/live/curriculum/hierarchy_picker.ex
| 0.671901
| 0.596844
|
hierarchy_picker.ex
|
starcoder
|
defmodule DeltaCrdt.CausalCrdt do
use GenServer
require Logger
require BenchmarkHelper
BenchmarkHelper.inject_in_dev()
@type delta :: {k :: integer(), delta :: any()}
@type delta_interval :: {a :: integer(), b :: integer(), delta :: delta()}
@moduledoc false
defstruct node_id: nil,
name: nil,
on_diffs: nil,
storage_module: nil,
crdt_module: nil,
crdt_state: nil,
merkle_map: MerkleMap.new(),
sequence_number: 0,
neighbours: MapSet.new(),
neighbour_monitors: %{},
outstanding_syncs: %{},
sync_interval: nil,
max_sync_size: nil
defmodule(Diff, do: defstruct(continuation: nil, dots: nil, originator: nil, from: nil, to: nil))
### GenServer callbacks
def init(opts) do
send(self(), :sync)
Process.flag(:trap_exit, true)
crdt_module = Keyword.get(opts, :crdt_module)
max_sync_size =
case Keyword.get(opts, :max_sync_size) do
:infinite ->
:infinite
size when is_integer(size) and size > 0 ->
size
invalid_size ->
raise ArgumentError, "#{inspect(invalid_size)} is not a valid max_sync_size"
end
initial_state = %__MODULE__{
node_id: :rand.uniform(1_000_000_000),
name: Keyword.get(opts, :name),
on_diffs: Keyword.get(opts, :on_diffs),
storage_module: Keyword.get(opts, :storage_module),
sync_interval: Keyword.get(opts, :sync_interval),
max_sync_size: max_sync_size,
crdt_module: crdt_module,
crdt_state: crdt_module.new() |> crdt_module.compress_dots()
}
{:ok, read_from_storage(initial_state)}
end
def handle_info({:ack_diff, to}, state) do
{:noreply, %{state | outstanding_syncs: Map.delete(state.outstanding_syncs, to)}}
end
def handle_info({:diff, diff, keys}, state) do
new_state = update_state_with_delta(state, diff, keys)
{:noreply, new_state}
end
def handle_info({:diff, diff}, state) do
diff = reverse_diff(diff)
new_merkle_map = MerkleMap.update_hashes(state.merkle_map)
case MerkleMap.continue_partial_diff(diff.continuation, new_merkle_map, 8) do
{:continue, continuation} ->
%Diff{diff | continuation: truncate(continuation, state.max_sync_size)}
|> send_diff_continue()
{:ok, []} ->
ack_diff(diff)
{:ok, keys} ->
send_diff(diff, truncate(keys, state.max_sync_size), state)
ack_diff(diff)
end
{:noreply, Map.put(state, :merkle_map, new_merkle_map)}
end
def handle_info({:get_diff, diff, keys}, state) do
diff = reverse_diff(diff)
send(
diff.to,
{:diff,
%{state.crdt_state | dots: diff.dots, value: Map.take(state.crdt_state.value, keys)}, keys}
)
ack_diff(diff)
{:noreply, state}
end
def handle_info({:EXIT, _pid, :normal}, state), do: {:noreply, state}
def handle_info({:DOWN, ref, :process, _object, _reason}, state) do
{neighbour, _ref} =
Enum.find(state.neighbour_monitors, fn
{_neighbour, ^ref} -> true
_ -> false
end)
new_neighbour_monitors = Map.delete(state.neighbour_monitors, neighbour)
new_outstanding_syncs = Map.delete(state.outstanding_syncs, neighbour)
new_state = %{
state
| neighbour_monitors: new_neighbour_monitors,
outstanding_syncs: new_outstanding_syncs
}
{:noreply, new_state}
end
def handle_info({:set_neighbours, neighbours}, state) do
new_neighbours = MapSet.new(neighbours)
ex_neighbours = MapSet.difference(state.neighbours, new_neighbours)
for n <- ex_neighbours do
case Map.get(state.neighbour_monitors, n) do
ref when is_reference(ref) -> Process.demonitor(ref, [:flush])
_ -> nil
end
end
new_outstanding_syncs =
Enum.filter(state.outstanding_syncs, fn {neighbour, 1} ->
MapSet.member?(new_neighbours, neighbour)
end)
|> Map.new()
new_neighbour_monitors =
Enum.filter(state.neighbour_monitors, fn {neighbour, _ref} ->
MapSet.member?(new_neighbours, neighbour)
end)
|> Map.new()
state = %{
state
| neighbours: new_neighbours,
outstanding_syncs: new_outstanding_syncs,
neighbour_monitors: new_neighbour_monitors
}
{:noreply, sync_interval_or_state_to_all(state)}
end
def handle_info(:sync, state) do
state = sync_interval_or_state_to_all(state)
Process.send_after(self(), :sync, state.sync_interval)
{:noreply, state}
end
def handle_call(:read, _from, state) do
{:reply, state.crdt_module.read(state.crdt_state), state}
end
def handle_call({:read, keys}, _from, state) do
{:reply, state.crdt_module.read(state.crdt_state, keys), state}
end
def handle_call({:bulk_operation, operations}, _from, state) do
{:reply, :ok,
Enum.reduce(operations, state, fn operation, state ->
handle_operation(operation, state)
end)}
end
def handle_call({:operation, operation}, _from, state) do
{:reply, :ok, handle_operation(operation, state)}
end
def handle_cast({:operation, operation}, state) do
{:noreply, handle_operation(operation, state)}
end
# TODO this won't sync everything anymore, since syncing is now a 2-step process.
# Figure out how to do this properly. Maybe with a `receive` block.
def terminate(_reason, state) do
sync_interval_or_state_to_all(state)
end
defp truncate(list, :infinite), do: list
defp truncate(list, size) when is_list(list) and is_integer(size) do
Enum.take(list, size)
end
defp truncate(diff, size) when is_integer(size) do
MerkleMap.truncate_diff(diff, size)
end
defp read_from_storage(%{storage_module: nil} = state) do
state
end
defp read_from_storage(state) do
case state.storage_module.read(state.name) do
nil ->
state
{node_id, sequence_number, crdt_state, merkle_map} ->
Map.put(state, :sequence_number, sequence_number)
|> Map.put(:crdt_state, crdt_state)
|> Map.put(:merkle_map, merkle_map)
|> Map.put(:node_id, node_id)
|> remove_crdt_state_keys()
end
end
defp remove_crdt_state_keys(state) do
%{state | crdt_state: Map.put(state.crdt_state, :keys, MapSet.new())}
end
defp write_to_storage(%{storage_module: nil} = state) do
state
end
defp write_to_storage(state) do
:ok =
state.storage_module.write(
state.name,
{state.node_id, state.sequence_number, state.crdt_state, state.merkle_map}
)
state
end
defp sync_interval_or_state_to_all(state) do
state = monitor_neighbours(state)
new_merkle_map = MerkleMap.update_hashes(state.merkle_map)
{:continue, continuation} = MerkleMap.prepare_partial_diff(new_merkle_map, 8)
diff = %Diff{
continuation: continuation,
dots: state.crdt_state.dots,
from: self(),
originator: self()
}
new_outstanding_syncs =
Enum.map(state.neighbour_monitors, fn {neighbour, _monitor} -> neighbour end)
|> Enum.reject(fn pid -> self() == pid end)
|> Enum.reduce(state.outstanding_syncs, fn neighbour, outstanding_syncs ->
Map.put_new_lazy(outstanding_syncs, neighbour, fn ->
try do
send(neighbour, {:diff, %Diff{diff | to: neighbour}})
1
rescue
_ in ArgumentError ->
# This happens when we attempt to sync with a neighbour that is dead.
# This can happen, and is not a big deal, since syncing is idempotent.
Logger.debug(
"tried to sync with a dead neighbour: #{inspect(neighbour)}, ignore the error and move on"
)
0
end
end)
end)
|> Enum.filter(&match?({_, 0}, &1))
|> Map.new()
Map.put(state, :outstanding_syncs, new_outstanding_syncs)
|> Map.put(:merkle_map, new_merkle_map)
end
defp monitor_neighbours(state) do
new_neighbour_monitors =
Enum.reduce(state.neighbours, state.neighbour_monitors, fn neighbour, monitors ->
Map.put_new_lazy(monitors, neighbour, fn ->
try do
Process.monitor(neighbour)
rescue
_ in ArgumentError ->
# This happens can happen when we attempt to monitor a that is dead.
# This can happen, and is not a big deal, since we'll re-add the monitor later
# to get notified when the neighbour comes back online.
Logger.debug(
"tried to monitor a dead neighbour: #{inspect(neighbour)}, ignore the error and move on"
)
:error
end
end)
end)
|> Enum.reject(&match?({_, :error}, &1))
|> Map.new()
Map.put(state, :neighbour_monitors, new_neighbour_monitors)
end
defp reverse_diff(diff) do
%Diff{diff | from: diff.to, to: diff.from}
end
defp send_diff_continue(diff) do
send(diff.to, {:diff, diff})
end
defp send_diff(diff, keys, state) do
if diff.originator == diff.to do
send(diff.to, {:get_diff, diff, keys})
else
send(
diff.to,
{:diff,
%{state.crdt_state | dots: diff.dots, value: Map.take(state.crdt_state.value, keys)},
keys}
)
end
end
defp handle_operation({function, [key | rest_args]}, state) do
delta =
apply(state.crdt_module, function, [key | rest_args] ++ [state.node_id, state.crdt_state])
update_state_with_delta(state, delta, [key])
end
defp diff(old_state, new_state, keys) do
Enum.flat_map(keys, fn key ->
case {Map.get(old_state.crdt_state.value, key), Map.get(new_state.crdt_state.value, key)} do
{old, old} -> []
{_old, nil} -> [{:remove, key}]
{_old, new} -> [{:add, key, new}]
end
end)
end
defp diffs_keys(diffs) do
Enum.map(diffs, fn
{:add, key, _val} -> key
{:remove, key} -> key
end)
end
defp diffs_to_callback(_old_state, _new_state, []), do: nil
defp diffs_to_callback(old_state, new_state, keys) do
old = new_state.crdt_module.read(old_state.crdt_state, keys)
new = new_state.crdt_module.read(new_state.crdt_state, keys)
diffs =
Enum.flat_map(keys, fn key ->
case {Map.get(old, key), Map.get(new, key)} do
{old, old} -> []
{_old, nil} -> [{:remove, key}]
{_old, new} -> [{:add, key, new}]
end
end)
case new_state.on_diffs do
function when is_function(function) -> function.(diffs)
{module, function, args} -> apply(module, function, args ++ [diffs])
nil -> nil
end
end
defp update_state_with_delta(state, delta, keys) do
new_crdt_state = state.crdt_module.join(state.crdt_state, delta, keys)
new_state = Map.put(state, :crdt_state, new_crdt_state)
diffs = diff(state, new_state, keys)
{new_merkle_map, count} =
Enum.reduce(diffs, {state.merkle_map, 0}, fn
{:add, key, value}, {mm, count} -> {MerkleMap.put(mm, key, value), count + 1}
{:remove, key}, {mm, count} -> {MerkleMap.delete(mm, key), count + 1}
end)
:telemetry.execute([:delta_crdt, :sync, :done], %{keys_updated_count: count}, %{
name: state.name
})
diffs_to_callback(state, new_state, diffs_keys(diffs))
Map.put(new_state, :merkle_map, new_merkle_map)
|> write_to_storage()
end
defp ack_diff(%{originator: originator, from: originator, to: to}) do
send(originator, {:ack_diff, to})
end
defp ack_diff(%{originator: originator, from: from, to: originator}) do
send(originator, {:ack_diff, from})
end
end
|
lib/delta_crdt/causal_crdt.ex
| 0.543833
| 0.482734
|
causal_crdt.ex
|
starcoder
|
defmodule Faker do
@moduledoc """
Main module to start application with some helper functions.
"""
@doc """
Starts Faker with default locale.
"""
@spec start() :: :ok
def start do
:application.start(:faker)
end
@doc """
Starts Faker with `lang` locale.
"""
@spec start(atom) :: :ok
def start(lang) when is_atom(lang) do
:application.start(:faker)
locale(lang)
:ok
end
@doc """
Internal function to format string.
It replaces `"#"` to random number and `"?"` to random Latin letter.
"""
@spec format(String.t()) :: String.t()
def format(str) when is_binary(str) do
format(str, "")
end
defp format(<<"#"::utf8, tail::binary>>, acc) do
format(tail, <<acc::binary, "#{random_between(0, 9)}">>)
end
defp format(<<"?"::utf8, tail::binary>>, acc) do
format(tail, <<acc::binary, letter()>>)
end
defp format(<<other::utf8, tail::binary>>, acc) do
format(tail, <<acc::binary, other>>)
end
defp format("", acc) do
acc
end
@alphabet 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
defp letter do
Enum.at(@alphabet, random_between(0, Enum.count(@alphabet) - 1))
end
@doc """
Returns application locale ready for module construct.
"""
@spec mlocale() :: String.t()
def mlocale do
if Faker.country() do
String.capitalize(to_string(Faker.locale())) <>
String.capitalize(to_string(Faker.country()))
else
String.capitalize(to_string(Faker.locale()))
end
end
@doc """
Returns application locale.
"""
@spec locale() :: atom
def locale do
Application.get_env(:faker, :locale)
end
@doc """
Returns application country.
"""
@spec country() :: atom
def country do
Application.get_env(:faker, :country)
end
@doc """
Sets application locale.
"""
@spec locale(atom) :: :ok
def locale(lang) when is_atom(lang) do
Application.put_env(:faker, :locale, lang)
end
@doc """
Returns a random float in the value range 0.0 =< x < 1.0.
## Examples
iex> is_float(random_uniform())
true
"""
@spec random_uniform() :: float
def random_uniform do
Application.get_env(:faker, :random_module).random_uniform
end
@doc """
Returns a (pseudo) random number as an integer between the range intervals.
## Examples
iex> random_between(3, 7) in [3, 4, 5, 6, 7]
true
"""
@spec random_between(integer, integer) :: integer
def random_between(left, right) do
Application.get_env(:faker, :random_module).random_between(left, right)
end
@doc """
Returns a random bytes.
"""
@spec random_bytes(pos_integer) :: binary
def random_bytes(total) do
Application.get_env(:faker, :random_module).random_bytes(total)
end
defmacro localize(function) do
quote do
def unquote(function)() do
module = Module.concat(unquote(__CALLER__.module), Faker.mlocale())
module_enus = Module.concat(unquote(__CALLER__.module), EnUs)
cond do
function_exported?(module, unquote(function), 0) ->
apply(module, unquote(function), [])
function_exported?(module_enus, unquote(function), 0) ->
apply(module_enus, unquote(function), [])
true ->
apply(Module.concat(unquote(__CALLER__.module), En), unquote(function), [])
end
end
end
end
defmacro sampler(name, data) do
count = Enum.count(data)
mapped_data =
data |> Enum.with_index() |> Enum.into(%{}, fn {k, v} -> {v, k} end) |> Macro.escape()
quote do
def unquote(name)() do
unquote(mapped_data)
|> Map.get(Faker.random_between(0, unquote(count - 1)))
end
end
end
defmacro samplerp(name, data) do
count = Enum.count(data)
mapped_data =
data |> Enum.with_index() |> Enum.into(%{}, fn {k, v} -> {v, k} end) |> Macro.escape()
quote do
defp unquote(name)() do
unquote(mapped_data)
|> Map.get(Faker.random_between(0, unquote(count - 1)))
end
end
end
end
|
lib/faker.ex
| 0.804214
| 0.406597
|
faker.ex
|
starcoder
|
defmodule Theriac do
import Enum
@moduledoc """
Theriac is an implementation of clojure style transducers in elixir.
"""
@doc ~S"""
Transduces a given enumerable into a list.
## Examples
iex> Theriac.transduce([1,2,7,9], Theriac.take(1))
[1]
"""
def transduce enum, {id, initial_state, transducer} do
transduce enum, {[{id, initial_state}], transducer}
end
def transduce enum, {initial_state, transducer} do
reducer = transducer.(fn
{:reduced, rs}, _input -> {:reduced, rs}
{result, state}, input -> {result ++ [input], state}
end)
case reduce(enum,{[],initial_state}, fn(input,rs) -> reducer.(rs,input) end) do
{:reduced, {result, _state}} -> result
{r, _s} -> r
end
end
@doc ~S"""
Combines multiple transducers into one combined transducers.
The functions are applied in the order they appear in the given list.
## Examples
iex> Theriac.transduce([1,2,7,9,11,22,3,10], Theriac.comb [Theriac.take(3), Theriac.map(fn i -> i * 2 end)])
[2,4,14]
"""
def comb list do
reduce(list, {[], fn i -> i end},
fn({id, initial_state, f},{cummulated_initial_state, c}) ->
{cummulated_initial_state ++ [{id, initial_state}], fn i -> c.(f.(i)) end} end)
end
@doc ~S"""
Maps the given values over the given function and passes the result to the step function.
## Examples
iex> Theriac.transduce([1,2,3],Theriac.map(fn inp -> inp + 1 end))
[2,3,4]
"""
def map f do
transducer new_id, :stateless, fn
rf, result, input -> rf.(result, f.(input))
end
end
@doc ~S"""
Doesn't call the step function for every element for which the given function does return true.
## Examples
iex>Theriac.transduce([1,2,3],Theriac.remove(fn inp -> inp > 2 end))
[1,2]
"""
def remove f do
transducer new_id, :stateless, fn
rf, result, input -> unless f.(input), do: rf.(result, input), else: result
end
end
@doc ~S"""
Calls the step function for every element for which the given function does return true.
## Examples
iex>Theriac.transduce([1,2,3],Theriac.remove(fn inp -> inp > 2 end))
[1,2]
"""
def filter f do
transducer new_id, :stateless, fn
rf, result, input -> if f.(input), do: rf.(result, input), else: result
end
end
@doc ~S"""
Calls the given function for every element.
Calls the step function until the given function returns false for the first time.
## Examples
iex>Theriac.transduce([1,3,5,2,4],Theriac.take_while(fn inp -> inp < 5 end))
[1,3]
"""
def take_while f do
transducer new_id, :stateless, fn
rf, result, input -> if f.(input), do: rf.(result, input), else: {:reduced, result}
end
end
@doc ~S"""
Calls the step function for the first n elements.
## Examples
iex>Theriac.transduce([1,11,3,4,5,6,7,8,9], Theriac.take(2))
[1,11]
"""
def take count do
stateful_transduce 0, fn step, _skip, state, input ->
if state < count do
step.(state+1, input)
else
step.({:reduced, state+1}, input)
end
end
end
@doc ~S"""
Skips the first n elements and calls the step function for the n+1th element.
## Examples
iex>Theriac.transduce([1,11,3,4,5,6,7,8,9], Theriac.skip(5))
[6,7,8,9]
"""
def skip count do
stateful_transduce 0, fn step, skip, state, input ->
if state >= count do
step.(state+1, input)
else
skip.(state+1)
end
end
end
@doc ~S"""
Calls the given function with every element and the result of calling
the function with the previous element or the given initial value if it's the first element.
Calls the step function with every result.
## Examples
iex>Theriac.transduce([1,2,3,4,5], Theriac.scan(0, fn inp, acc -> inp + acc end))
[1,3,6,10,15]
"""
def scan initialVal, f do
stateful_transduce initialVal, fn step, _skip, state, input ->
current = f.(input, state)
step.(current,current)
end
end
@doc ~S"""
Calls the step function for every element, if no previous element was equal to the current element.
## Examples
iex>Theriac.transduce([1,3,2,3,2,4,5], Theriac.distinct)
[1,3,2,4,5]
"""
def distinct do
stateful_transduce [], fn step, skip, state, input ->
if(all?(state, fn e -> e != input end)) do
new_state = state ++ [input]
step.(new_state, input)
else
skip.(state)
end
end
end
defp stateful_transduce initialVal, func do
id = new_id
transducer id, initialVal,
fn rf, {result, states}, input ->
state = get_state states, id
continuation = fn
{:reduced, new_state}, input ->
rf.({:reduced, {result, update_state(states, id, new_state)}}, input)
new_state, input ->
rf.({result, update_state(states, id, new_state)}, input)
end
ret = fn new_state ->
{result, update_state(states, id, new_state)}
end
func.(continuation, ret, state, input)
end
end
defp transducer id, initialState, reduction do
{id, initialState, fn rf ->
fn
{:reduced, rs}, _input -> {:reduced, rs}
rs, input -> reduction.(rf, rs, input)
end
end}
end
defp get_state states, given_id do
{_id, s} = List.keyfind(states, given_id, 0)
s
end
defp update_state states, given_id, new_state do
List.keyreplace(states, given_id, 0, {given_id, new_state})
end
defp new_id do
UUID.uuid1()
end
end
|
lib/theriac.ex
| 0.619126
| 0.58062
|
theriac.ex
|
starcoder
|
defmodule EctoEnum do
@moduledoc """
Provides `defenum/2` and `defenum/3` macro for defining an Enum Ecto type.
This module can also be `use`d to create an Ecto Enum like:
defmodule CustomEnum do
use EctoEnum, ready: 0, set: 1, go: 2
end
Or in place of using `EctoEnum.Postgres` like:
defmodule PostgresType do
use EctoEnum, type: :new_type, enums: [:ready, :set, :go]
end
The difference between the above two examples is that the previous one would use an
integer column in the database while the latter one would use a custom type in PostgreSQL.
Note that only PostgreSQL is supported for custom data types at the moment.
"""
@doc """
Defines an enum custom `Ecto.Type`.
For second argument, it accepts either a list of strings or a keyword list with keyword
values that are either strings or integers. Below are examples of a valid argument:
[registered: 0, active: 1, inactive: 2, archived: 3]
[registered: "registered", active: "active", inactive: "inactive", archived: "archived"]
["registered", "active", "inactive", "archived"]
It can be used like any other `Ecto.Type` by passing it to a field in your model's
schema block. For example:
import EctoEnum
defenum StatusEnum, registered: 0, active: 1, inactive: 2, archived: 3
defmodule User do
use Ecto.Schema
schema "users" do
field :status, StatusEnum
end
end
In the above example, the `:status` will behave like an enum and will allow you to
pass an `integer`, `atom` or `string` to it. This applies to saving the model,
invoking `Ecto.Changeset.cast/4`, or performing a query on the status field. Let's
do a few examples:
iex> user = Repo.insert!(%User{status: 0})
iex> Repo.get(User, user.id).status
:registered
iex> %{changes: changes} = cast(%User{}, %{"status" => "Active"}, ~w(status), [])
iex> changes.status
:active
iex> from(u in User, where: u.status == :registered) |> Repo.all() |> length
1
Passing an invalid value to a `Ecto.Changeset.cast/3` will add an error to `changeset.errors`
field.
iex> changeset = cast(%User{}, %{"status" => "retroactive"}, ~w(status), [])
iex> changeset.errors
[status: "is invalid"]
Passing an invalid value directly into a model struct will in an error when calling
`Repo` functions.
iex> Repo.insert!(%User{status: :none})
** (Ecto.ChangeError) `"none"` is not a valid enum value for `EctoEnumTest.StatusEnum`.
Valid enum values are `[0, 1, 2, 3, :registered, :active, :inactive, :archived, "active",
"archived", "inactive", "registered"]`
The enum type `StatusEnum` will also have a reflection function for inspecting the
enum map in runtime.
iex> StatusEnum.__enum_map__()
[registered: 0, active: 1, inactive: 2, archived: 3]
Enums also generate a typespec for use with dialyzer, available as the `t()` type
iex> t(StatusEnum)
@type t() :: :registered | :active | :inactive | :archived
"""
defmacro __using__(opts) do
quote do
opts = unquote(opts)
if opts[:type] && opts[:enums] do
use EctoEnum.Postgres.Use, unquote(opts)
else
use EctoEnum.Use, unquote(opts)
end
end
end
defmacro defenum(module, type, enum, options \\ []) do
EctoEnum.Postgres.defenum(module, type, enum, options)
end
defmacro defenum(module, enum) do
quote do
enum = Macro.escape(unquote(enum))
defmodule unquote(module) do
use EctoEnum.Use, enum
end
end
end
alias Ecto.Changeset
@spec validate_enum(
Ecto.Changeset.t(),
atom,
(atom, String.t(), list(String.t() | integer | atom) -> String.t())
) :: Ecto.Changeset.t()
def validate_enum(changeset, field, error_msg \\ &default_error_msg/3) do
Changeset.validate_change(changeset, field, :validate_enum, fn field, value ->
type = changeset.types[field]
error_msg = error_msg.(field, value, type.__valid_values__())
if type.valid_value?(value) do
[]
else
Keyword.put([], field, error_msg)
end
end)
end
defp default_error_msg(field, value, valid_values) do
"Value `#{inspect(value)}` is not a valid enum for `#{inspect(field)}` field. " <>
"Valid enums are `#{inspect(valid_values)}`"
end
end
|
lib/ecto_enum.ex
| 0.875747
| 0.550184
|
ecto_enum.ex
|
starcoder
|
defmodule ToyRobot.Server do
@moduledoc """
The ToyRobot.Server module manages state for the ToyRobot simulation.
The `ToyRobot.Logic` module is aliased to provide the logic for valid moves and
rules for state changes.
"""
use GenServer, restart: :transient
alias ToyRobot.Logic
@doc """
Starts a supervised process instance of the `ToyRobot.Server`
"""
def start_link do
GenServer.start_link(__MODULE__, [])
end
def init([]) do
initial_state = Logic.new()
{:ok, initial_state}
end
@doc """
Sync call to the server that matches on :get_state
Returns current state of the toyrobot as a `%ToyRobot.Logic{x: x, y: y, face: face}` struct.
"""
def current_state(server_pid) do
GenServer.call(server_pid, :get_state)
end
@doc """
Sync call to the server that matches on :update
"""
def update_state(new_state, server_pid) do
GenServer.call(server_pid, {:update, new_state})
end
@doc """
Common code to handle each of the `["MOVE", "LEFT", "RIGHT"]` commands.
- Gets current state from `ToyRobot.Server.current_state/1`.
- Calls `ToyRobot.Logic` to get an updated value for the state then
delegates acutal update to `ToyRobot.Server.update_state/2`
"""
def do_command(cmd, server_pid) do
state = current_state(server_pid)
apply(Logic, cmd, [state])
|> update_state(server_pid)
:ok
end
@doc """
Specific handler for the `"PLACE y,y,facing"` command.
Deligates to `ToyRobot.Logic.place/3` and performs a sync
call to `ToyRobot.Server.update_state/2`
"""
def place(server_pid, args) do
[x, y, face] = args
Logic.place(x, y, face)
|> update_state(server_pid)
end
@doc """
Gets current state from `ToyRobot.Server.current_state/1`.
Then deligates actual reporting to `ToyRobot.Logic.report/1` with the current state.
"""
def report(server_pid) do
state = current_state(server_pid)
Logic.report(state)
end
@doc """
Moves the ToyRobot 1 space in the direction it is currently facing.
**see:** `ToyRobot.Server.do_command/2`
"""
def move(server_pid) do
do_command(:move, server_pid)
end
@doc """
Turns the ToyRobot 90 degrees in a **counter clockwise** direction to **Face** in a new direction.
**see:** `ToyRobot.Server.do_command/2`
"""
def left(server_pid) do
do_command(:left, server_pid)
end
@doc """
Turns the ToyRobot 90 degrees in a **clockwise** direction to **Face** in a new direction.
**see:** `ToyRobot.Server.do_command/2`
"""
def right(server_pid) do
do_command(:right, server_pid)
end
@doc false
def handle_call(:get_state, _from, state) do
{:reply, state, state}
end
@doc false
def handle_call({:update, new_state}, _from, state) do
{:reply, state, new_state}
end
end
|
lib/toy_robot/server.ex
| 0.8818
| 0.702186
|
server.ex
|
starcoder
|
defmodule Stripe.Customers do
@moduledoc """
Main API for working with Customers at Stripe. Through this API you can:
-change subscriptions
-create invoices
-create customers
-delete single customer
-delete all customer
-count customers
Supports Connect workflow by allowing to pass in any API key explicitely (vs using the one from env/config).
(API ref: https://stripe.com/docs/api/curl#customer_object
"""
@endpoint "customers"
@doc """
Creates a Customer with the given parameters - all of which are optional.
## Example
```
new_customer = [
email: "<EMAIL>",
description: "An Test Account",
metadata:[
app_order_id: "ABC123"
app_state_x: "xyz"
],
card: [
number: "4111111111111111",
exp_month: 01,
exp_year: 2018,
cvc: 123,
name: "<NAME>"
]
]
{:ok, res} = Stripe.Customers.create new_customer
```
"""
def create(params) do
create params, Stripe.config_or_env_key
end
@doc """
Creates a Customer with the given parameters - all of which are optional.
Using a given stripe key to apply against the account associated.
## Example
```
{:ok, res} = Stripe.Customers.create new_customer, key
```
"""
def create(params, key) do
Stripe.make_request_with_key(:post, @endpoint, key, params)
|> Stripe.Util.handle_stripe_response
end
@doc """
Retrieves a given Customer with the specified ID. Returns 404 if not found.
## Example
```
{:ok, cust} = Stripe.Customers.get "customer_id"
```
"""
def get(id) do
get id, Stripe.config_or_env_key
end
@doc """
Retrieves a given Customer with the specified ID. Returns 404 if not found.
Using a given stripe key to apply against the account associated.
## Example
```
{:ok, cust} = Stripe.Customers.get "customer_id", key
```
"""
def get(id, key) do
Stripe.make_request_with_key(:get, "#{@endpoint}/#{id}", key)
|> Stripe.Util.handle_stripe_response
end
@doc """
Updates a Customer with the given parameters - all of which are optional.
## Example
```
new_fields = [
email: "<EMAIL>",
description: "New description",
]
{:ok, res} = Stripe.Customers.update(customer_id, new_fields)
```
"""
def update(customer_id, params) do
update(customer_id, params, Stripe.config_or_env_key)
end
@doc """
Updates a Customer with the given parameters - all of which are optional.
Using a given stripe key to apply against the account associated.
## Example
```
{:ok, res} = Stripe.Customers.update(customer_id, new_fields, key)
```
"""
def update(customer_id, params, key) do
Stripe.make_request_with_key(:post, "#{@endpoint}/#{customer_id}", key, params)
|> Stripe.Util.handle_stripe_response
end
@doc """
Returns a list of Customers with a default limit of 10 which you can override with `list/1`
## Example
```
{:ok, customers} = Stripe.Customers.list(starting_after, 20)
```
"""
def list(starting_after,limit \\ 10) do
list Stripe.config_or_env_key, "", limit
end
@doc """
Returns a list of Customers with a default limit of 10 which you can override with `list/1`
Using a given stripe key to apply against the account associated.
## Example
```
{:ok, customers} = Stripe.Customers.list(key,starting_after,20)
```
"""
def list(key, starting_after, limit) do
Stripe.Util.list @endpoint, key, starting_after, limit
end
@doc """
Deletes a Customer with the specified ID
## Example
```
{:ok, resp} = Stripe.Customers.delete "customer_id"
```
"""
def delete(id) do
delete id, Stripe.config_or_env_key
end
@doc """
Deletes a Customer with the specified ID
Using a given stripe key to apply against the account associated.
## Example
```
{:ok, resp} = Stripe.Customers.delete "customer_id", key
```
"""
def delete(id,key) do
Stripe.make_request_with_key(:delete, "#{@endpoint}/#{id}", key)
|> Stripe.Util.handle_stripe_response
end
@doc """
Deletes all Customers
## Example
```
Stripe.Customers.delete_all
```
"""
def delete_all do
case all() do
{:ok, customers} ->
Enum.each customers, fn c -> delete(c["id"]) end
{:error, err} -> raise err
end
end
@doc """
Deletes all Customers
Using a given stripe key to apply against the account associated.
## Example
```
Stripe.Customers.delete_all key
```
"""
def delete_all key do
case all() do
{:ok, customers} ->
Enum.each customers, fn c -> delete(c["id"], key) end
{:error, err} -> raise err
end
end
@max_fetch_size 100
@doc """
List all customers.
##Example
```
{:ok, customers} = Stripe.Customers.all
```
"""
def all( accum \\ [], starting_after \\ "") do
all Stripe.config_or_env_key, accum, starting_after
end
@doc """
List all customers.
Using a given stripe key to apply against the account associated.
##Example
```
{:ok, customers} = Stripe.Customers.all key, accum, starting_after
```
"""
def all( key, accum, starting_after) do
case Stripe.Util.list_raw("#{@endpoint}",key, @max_fetch_size, starting_after) do
{:ok, resp} ->
case resp[:has_more] do
true ->
last_sub = List.last( resp[:data] )
all( key, resp[:data] ++ accum, last_sub["id"] )
false ->
result = resp[:data] ++ accum
{:ok, result}
end
{:error, err} -> raise err
end
end
@doc """
Count total number of customers.
## Example
```
{:ok, count} = Stripe.Customers.count
```
"""
def count do
count Stripe.config_or_env_key
end
@doc """
Count total number of customers.
Using a given stripe key to apply against the account associated.
## Example
```
{:ok, count} = Stripe.Customers.count key
```
"""
def count( key )do
Stripe.Util.count "#{@endpoint}", key
end
end
|
lib/stripe/customers.ex
| 0.828245
| 0.794943
|
customers.ex
|
starcoder
|
defmodule Day0601 do
def parse_line(line) do
line
|> String.split(",")
|> Enum.map(&String.trim/1)
|> Enum.map(&String.to_integer/1)
|> List.to_tuple
end
def taxi_dist({px, py}, {x, y}), do: abs(px-x) + abs(py-y)
def taxi_dist_closest(coordinates, p) do
{_d, winners} = coordinates
|> Enum.group_by(&(taxi_dist(&1, p)))
|> Enum.min_by(fn {d, _} -> d end)
winners
end
def bounding_box(coordinates) do
{x, y} = Enum.unzip(coordinates)
%{
x1: Enum.min(x) - 1,
y1: Enum.min(y) - 1,
x2: Enum.max(x) + 1,
y2: Enum.max(y) + 1
}
end
def parse_coordinates(file) do
file
|> File.stream!
|> Enum.map(&parse_line/1)
end
def process(file) do
coordinates = parse_coordinates(file)
bb = bounding_box(coordinates)
closest = Enum.reduce (bb.y1..bb.y2), Map.new, fn(y, acc) ->
Enum.reduce (bb.x1..bb.x2), acc, fn(x, acc) ->
Map.put acc, {x,y},
%{
closest: taxi_dist_closest(coordinates, {x,y}),
border: (x == bb.x1) || (x == bb.x2) || (y == bb.y1) || (y == bb.y2)
}
end
end
closest = closest
|> Enum.reject(fn {_,%{closest: c}} -> Enum.count(c) > 1 end)
infinites = closest
|> Enum.filter(fn {_,v} -> v.border end)
|> Enum.map(fn {_,v} -> v.closest end)
|> List.flatten
|> Enum.uniq
closest
|> Enum.group_by(fn {_, %{closest: [c]}} -> c end)
|> Map.drop(infinites)
|> Enum.map(fn {_,v} -> Enum.count(v) end)
|> Enum.max
end
def process_02(file, max_dist \\ 32) do
coordinates = parse_coordinates(file)
bb = bounding_box(coordinates)
pixels = for x <- (bb.x1..bb.x2), y <- (bb.y1..bb.y2), do: {x,y}
Enum.reduce(pixels, 0, fn(p, area_count) ->
sum_dist = Enum.reduce(coordinates, 0, fn(c, d) ->
d + taxi_dist(c, p)
end)
if sum_dist < max_dist do
area_count + 1
else
area_count
end
end)
end
end
|
lib/day_0601.ex
| 0.581303
| 0.525186
|
day_0601.ex
|
starcoder
|
defmodule Cldr.Number.Backend.Number do
@moduledoc false
def define_number_module(config) do
backend = config.backend
quote location: :keep, bind_quoted: [backend: backend, config: Macro.escape(config)] do
defmodule Number do
@moduledoc false
if Cldr.Config.include_module_docs?(config.generate_docs) do
@moduledoc """
Formats numbers and currencies based upon CLDR's decimal formats specification.
The format specification is documentated in [Unicode TR35](http://unicode.org/reports/tr35/tr35-numbers.html#Number_Formats).
There are several classes of formatting including non-scientific, scientific,
rules based (for spelling and ordinal formats), compact formats that display `1k`
rather than `1,000` and so on. See `Cldr.Number.to_string/2` for specific formatting
options.
### Non-Scientific Notation Formatting
The following description applies to formats that do not use scientific
notation or significant digits:
* If the number of actual integer digits exceeds the maximum integer digits,
then only the least significant digits are shown. For example, 1997 is
formatted as "97" if the maximum integer digits is set to 2.
* If the number of actual integer digits is less than the minimum integer
digits, then leading zeros are added. For example, 1997 is formatted as
"01997" if the minimum integer digits is set to 5.
* If the number of actual fraction digits exceeds the maximum fraction
digits, then half-even rounding it performed to the maximum fraction
digits. For example, 0.125 is formatted as "0.12" if the maximum fraction
digits is 2. This behavior can be changed by specifying a rounding
increment and a rounding mode.
* If the number of actual fraction digits is less than the minimum fraction
digits, then trailing zeros are added. For example, 0.125 is formatted as
"0.1250" if the minimum fraction digits is set to 4.
* Trailing fractional zeros are not displayed if they occur j positions after
the decimal, where j is less than the maximum fraction digits. For example,
0.10004 is formatted as "0.1" if the maximum fraction digits is four or
less.
### Scientific Notation Formatting
Numbers in scientific notation are expressed as the product of a mantissa and
a power of ten, for example, 1234 can be expressed as 1.234 x 10^3. The
mantissa is typically in the half-open interval [1.0, 10.0) or sometimes
[0.0, 1.0), but it need not be. In a pattern, the exponent character
immediately followed by one or more digit characters indicates scientific
notation. Example: "0.###E0" formats the number 1234 as "1.234E3".
* The number of digit characters after the exponent character gives the
minimum exponent digit count. There is no maximum. Negative exponents are
formatted using the localized minus sign, not the prefix and suffix from
the pattern. This allows patterns such as "0.###E0 m/s". To prefix positive
exponents with a localized plus sign, specify '+' between the exponent and
the digits: "0.###E+0" will produce formats "1E+1", "1E+0", "1E-1", and so
on. (In localized patterns, use the localized plus sign rather than '+'.)
* The minimum number of integer digits is achieved by adjusting the exponent.
Example: 0.00123 formatted with "00.###E0" yields "12.3E-4". This only
happens if there is no maximum number of integer digits. If there is a
maximum, then the minimum number of integer digits is fixed at one.
* The maximum number of integer digits, if present, specifies the exponent
grouping. The most common use of this is to generate engineering notation,
in which the exponent is a multiple of three, for example, "##0.###E0". The
number 12345 is formatted using "##0.####E0" as "12.345E3".
* When using scientific notation, the formatter controls the digit counts
using significant digits logic. The maximum number of significant digits
limits the total number of integer and fraction digits that will be shown
in the mantissa; it does not affect parsing. For example, 12345 formatted
with "##0.##E0" is "12.3E3". Exponential patterns may not contain grouping
separators.
### Significant Digits
There are two ways of controlling how many digits are shows: (a)
significant digits counts, or (b) integer and fraction digit counts. Integer
and fraction digit counts are described above. When a formatter is using
significant digits counts, it uses however many integer and fraction digits
are required to display the specified number of significant digits. It may
ignore min/max integer/fraction digits, or it may use them to the extent
possible.
"""
end
alias Cldr.Number.System
alias Cldr.Locale
@doc """
Return a valid number system from a provided locale and number
system name or type.
The number system or number system type must be valid for the
given locale. If a number system type is provided, the
underlying number system is returned.
## Arguments
* `locale` is any valid locale name returned by `Cldr.known_locale_names/1`
or a `Cldr.LanguageTag` struct returned by `Cldr.Locale.new!/2`
* `system_name` is any number system name returned by
`Cldr.known_number_systems/0` or a number system type
returned by `Cldr.known_number_system_types/0`
## Examples
iex> #{inspect(__MODULE__)}.validate_number_system "en", :latn
{:ok, :latn}
iex> #{inspect(__MODULE__)}.validate_number_system "en", :default
{:ok, :latn}
iex> #{inspect(__MODULE__)}.validate_number_system "en", :unknown
{:error,
{Cldr.UnknownNumberSystemError, "The number system :unknown is unknown"}}
iex> #{inspect(__MODULE__)}.validate_number_system "zz", :default
{:error, {Cldr.InvalidLanguageError, "The language \\"zz\\" is invalid"}}
"""
@spec validate_number_system(
Cldr.Locale.locale_name() | Cldr.LanguageTag.t(),
System.system_name() | System.types()
) ::
{:ok, System.system_name()} | {:error, {module(), String.t()}}
def validate_number_system(locale, number_system) do
System.system_name_from(number_system, locale, unquote(backend))
end
@doc """
Returns a number formatted into a string according to a format pattern and options.
## Arguments
* `number` is an integer, float or Decimal to be formatted
* `options` is a keyword list defining how the number is to be formatted.
## Options
* `format`: the format style or a format string defining how the number is
formatted. See `Cldr.Number.Format` for how format strings can be constructed.
See `Cldr.Number.Format.format_styles_for/3` to return available format styles
for a locale. The default `format` is `:standard`.
* If `:format` is set to `:long` or `:short` then the formatting depends on
whether `:currency` is specified. If not specified then the number is
formatted as `:decimal_long` or `:decimal_short`. If `:currency` is
specified the number is formatted as `:currency_long` or
`:currency_short` and `:fractional_digits` is set to 0 as a default.
* `:format` may also be a format defined by CLDR's Rules Based Number
Formats (RBNF). Further information is found in the module `Cldr.Rbnf`.
The most commonly used formats in this category are to spell out the
number in a the locales language. The applicable formats are `:spellout`,
`:spellout_year`, `:ordinal`. A number can also be formatted as roman
numbers by using the format `:roman` or `:roman_lower`.
* `currency`: is the currency for which the number is formatted. For
available currencies see `Cldr.Currency.known_currencies/0`. This option
is required if `:format` is set to `:currency`. If `currency` is set
and no `:format` is set, `:format` will be set to `:currency` as well.
* `currency_symbol`: Allows overriding a currency symbol. The alternatives
are:
* `:iso` the ISO currency code will be used instead of the default
currency symbol.
* `:narrow` uses the narrow symbol defined for the locale. The same
narrow symbol can be defined for more than one currency and therefore this
should be used with care. If no narrow symbol is defined, the standard
symbol is used.
* `:symbol` uses the standard symbol defined in CLDR. A symbol is unique
for each currency and can be safely used.
* "string" uses `string` as the currency symbol
* `:standard` (the default and recommended) uses the CLDR-defined symbol
based upon the currency format for the locale.
* `:cash`: a boolean which indicates whether a number being formatted as a
`:currency` is to be considered a cash value or not. Currencies can be
rounded differently depending on whether `:cash` is `true` or `false`.
*This option is deprecated in favour of `currency_digits: :cash`.
* `:currency_digits` indicates which of the rounding and digits should be
used. The options are `:accounting` which is the default, `:cash` or
`:iso`
* `:rounding_mode`: determines how a number is rounded to meet the precision
of the format requested. The available rounding modes are `:down`,
:half_up, :half_even, :ceiling, :floor, :half_down, :up. The default is
`:half_even`.
* `:number_system`: determines which of the number systems for a locale
should be used to define the separators and digits for the formatted
number. If `number_system` is an `atom` then `number_system` is
interpreted as a number system. If the `:number_system` is
`binary` then it is interpreted as a number system name. See
`Cldr.Number.System.number_system_names_for/2`. The default is `:default`.
* `:locale`: determines the locale in which the number is formatted. See
`Cldr.known_locale_names/0`. The default is`Cldr.get_locale/0` which is the
locale currently in affect for this `Process` and which is set by
`Cldr.put_locale/1`.
* If `:fractional_digits` is set to a positive integer value then the number
will be rounded to that number of digits and displayed accordingly - overriding
settings that would be applied by default. For example, currencies have
fractional digits defined reflecting each currencies minor unit. Setting
`:fractional_digits` will override that setting.
* If `:maximum_integer_digits` is set to a positive integer value then the
number is left truncated before formatting. For example if the number `1234`
is formatted with the option `maximum_integer_digits: 2`, the number is
truncated to `34` and formatted.
* If `:round_nearest` is set to a positive integer value then the number
will be rounded to nearest increment of that value - overriding
settings that would be applied by default.
* `:minimum_grouping_digits` overrides the CLDR definition of minimum grouping
digits. For example in the locale `es` the number `1234` is formatted by default
as `1345` because the locale defines the `minimium_grouping_digits` as `2`. If
`minimum_grouping_digits: 1` is set as an option the number is formatting as
`1.345`. The `:minimum_grouping_digits` is added to the grouping defined by
the number format. If the sum of these two digits is greater than the number
of digits in the integer (or fractional) part of the number then no grouping
is performed.
## Locale extensions affecting formatting
A locale identifier can specify options that affect number formatting.
These options are:
* `cu`: defines what currency is implied when no curreny is specified in
the call to `to_string/2`.
* `cf`: defines whether to use currency or accounting format for
formatting currencies. This overrides the `format: :currency` and `format: :accounting`
options.
* `nu`: defines the number system to be used if none is specified by the `:number_system`
option to `to_string/2`
These keys are part of the [u extension](https://unicode.org/reports/tr35/#u_Extension) and
that document should be consulted for details on how to construct a locale identifier with these
extensions.
## Returns
* `{:ok, string}` or
* `{:error, {exception, message}}`
## Examples
iex> #{inspect(__MODULE__)}.to_string 12345
{:ok, "12,345"}
iex> #{inspect(__MODULE__)}.to_string 12345, locale: "fr"
{:ok, "12 345"}
iex> #{inspect(__MODULE__)}.to_string 1345.32, currency: :EUR, locale: "es", minimum_grouping_digits: 1
{:ok, "1.345,32 €"}
iex> #{inspect(__MODULE__)}.to_string 1345.32, currency: :EUR, locale: "es"
{:ok, "1345,32 €"}
iex> #{inspect(__MODULE__)}.to_string 12345, locale: "fr", currency: "USD"
{:ok, "12 345,00 $US"}
iex> #{inspect(__MODULE__)}.to_string 12345, format: "#E0"
{:ok, "1.2345E4"}
iex> #{inspect(__MODULE__)}.to_string 12345, format: :accounting, currency: "THB"
{:ok, "THB 12,345.00"}
iex> #{inspect(__MODULE__)}.to_string -12345, format: :accounting, currency: "THB"
{:ok, "(THB 12,345.00)"}
iex> #{inspect(__MODULE__)}.to_string 12345, format: :accounting, currency: "THB",
...> locale: "th"
{:ok, "฿12,345.00"}
iex> #{inspect(__MODULE__)}.to_string 12345, format: :accounting, currency: "THB",
...> locale: "th", number_system: :native
{:ok, "฿๑๒,๓๔๕.๐๐"}
iex> #{inspect(__MODULE__)}.to_string 1244.30, format: :long
{:ok, "1 thousand"}
iex> #{inspect(__MODULE__)}.to_string 1244.30, format: :long, currency: "USD"
{:ok, "1,244 US dollars"}
iex> #{inspect(__MODULE__)}.to_string 1244.30, format: :short
{:ok, "1K"}
iex> #{inspect(__MODULE__)}.to_string 1244.30, format: :short, currency: "EUR"
{:ok, "€1K"}
iex> #{inspect(__MODULE__)}.to_string 1234, format: :spellout
{:ok, "one thousand two hundred thirty-four"}
iex> #{inspect(__MODULE__)}.to_string 1234, format: :spellout_verbose
{:ok, "one thousand two hundred and thirty-four"}
iex> #{inspect(__MODULE__)}.to_string 1989, format: :spellout_year
{:ok, "nineteen eighty-nine"}
iex> #{inspect(__MODULE__)}.to_string 123, format: :ordinal
{:ok, "123rd"}
iex> #{inspect(__MODULE__)}.to_string 123, format: :roman
{:ok, "CXXIII"}
iex> #{inspect(__MODULE__)}.to_string 123, locale: "th-u-nu-thai"
{:ok, "๑๒๓"}
iex> #{inspect(__MODULE__)}.to_string 123, format: :currency, locale: "en-u-cu-thb"
{:ok, "THB 123.00"}
## Errors
An error tuple `{:error, reason}` will be returned if an error is detected.
The two most likely causes of an error return are:
* A format cannot be compiled. In this case the error tuple will look like:
```
iex> #{inspect(__MODULE__)}.to_string(12345, format: "0#")
{:error, {Cldr.FormatCompileError,
"Decimal format compiler: syntax error before: \\"#\\""}}
```
* The format style requested is not defined for the `locale` and
`number_system`. This happens typically when the number system is
`:algorithmic` rather than the more common `:numeric`. In this case the error
return looks like:
```
iex> #{inspect(__MODULE__)}.to_string(1234, locale: "he", number_system: "hebr")
{:error, {Cldr.UnknownFormatError,
"The locale :he with number system :hebr does not define a format :standard"}}
```
"""
@spec to_string(number | Decimal.t(), Keyword.t() | map()) ::
{:ok, String.t()} | {:error, {atom, String.t()}}
def to_string(number, options \\ default_options()) do
Cldr.Number.to_string(number, unquote(backend), options)
end
@doc """
Same as the execution of `to_string/2` but raises an exception if an error would be
returned.
## Arguments
* `number` is an integer, float or Decimal to be formatted
* `options` is a keyword list defining how the number is to be formatted. See
`#{inspect(__MODULE__)}.to_string/2`
## Returns
* a formatted number as a string or
* raises an exception
## Examples
iex> #{inspect(__MODULE__)}.to_string! 12345
"12,345"
iex> #{inspect(__MODULE__)}.to_string! 12345, locale: "fr"
"12 345"
"""
@spec to_string!(number | Decimal.t(), Keyword.t() | map()) ::
String.t() | module()
def to_string!(number, options \\ default_options()) do
Cldr.Number.to_string!(number, unquote(backend), options)
end
@doc """
Formats a number and applies the `:at_least` format for
a locale and number system.
## Arguments
* `number` is an integer, float or Decimal to be formatted
* `options` is a keyword list defining how the number is to be formatted.
See `#{inspect(__MODULE__)}.to_string/2` for a description of the available
options.
## Example
iex> #{inspect(__MODULE__)}.to_at_least_string 1234
{:ok, "1,234+"}
"""
@spec to_at_least_string(number | Decimal.t(), Keyword.t() | Keyword.t() | map()) ::
{:ok, String.t()} | {:error, {module(), String.t()}}
def to_at_least_string(number, options \\ []) do
Cldr.Number.to_at_least_string(number, unquote(backend), options)
end
@doc """
Formats a number and applies the `:at_most` format for
a locale and number system.
## Arguments
* `number` is an integer, float or Decimal to be formatted
* `options` is a keyword list defining how the number is to be formatted.
See `Cldr.Number.to_string/3` for a description of the available
options.
## Example
iex> #{inspect(__MODULE__)}.to_at_most_string 1234
{:ok, "≤1,234"}
"""
@spec to_at_most_string(number | Decimal.t(), Keyword.t() | Keyword.t() | map()) ::
{:ok, String.t()} | {:error, {module(), String.t()}}
def to_at_most_string(number, options \\ []) do
Cldr.Number.to_at_most_string(number, unquote(backend), options)
end
@doc """
Formats a number and applies the `:approximately` format for
a locale and number system.
## Arguments
* `number` is an integer, float or Decimal to be formatted
* `options` is a keyword list defining how the number is to be formatted.
See `Cldr.Number.to_string/3` for a description of the available
options.
## Example
iex> #{inspect(__MODULE__)}.to_approx_string 1234
{:ok, "~1,234"}
"""
@spec to_approx_string(number | Decimal.t(), Keyword.t() | Keyword.t() | map()) ::
{:ok, String.t()} | {:error, {module(), String.t()}}
def to_approx_string(number, options \\ []) do
Cldr.Number.to_approx_string(number, unquote(backend), options)
end
@doc """
Formats the first and last numbers of a range and applies
the `:range` format for a locale and number system.
## Arguments
* `number` is an integer, float or Decimal to be formatted
* `options` is a keyword list defining how the number is to be formatted.
See `Cldr.Number.to_string/3` for a description of the available
options.
## Example
iex> #{inspect(__MODULE__)}.to_range_string 1234..5678
{:ok, "1,234–5,678"}
"""
@spec to_range_string(Range.t(), Keyword.t() | Keyword.t() | map()) ::
{:ok, String.t()} | {:error, {module(), String.t()}}
def to_range_string(range, options \\ []) do
Cldr.Number.to_range_string(range, unquote(backend), options)
end
@doc """
Scans a string locale-aware manner and returns
a list of strings and numbers.
## Arguments
* `string` is any `String.t`
* `options` is a keyword list of options
## Options
* `:number` is one of `:integer`, `:float`,
`:decimal` or `nil`. The default is `nil`
meaning that the type auto-detected as either
an `integer` or a `float`.
* `:locale` is any locale returned by `Cldr.known_locale_names/1`
or a `Cldr.LanguageTag.t`. The default is `#{inspect backend}.get_locale/0`.
## Returns
* A list of strings and numbers
## Notes
Number parsing is performed by `Cldr.Number.Parser.parse/2`
and any options provided are passed to that function.
## Examples
iex> #{inspect(__MODULE__)}.scan("£1_000_000.34")
["£", 1000000.34]
iex> #{inspect(__MODULE__)}.scan("I want £1_000_000 dollars")
["I want £", 1000000, " dollars"]
iex> #{inspect(__MODULE__)}.scan("The prize is 23")
["The prize is ", 23]
iex> #{inspect(__MODULE__)}.scan("The lottery number is 23 for the next draw")
["The lottery number is ", 23, " for the next draw"]
iex> #{inspect(__MODULE__)}.scan("The loss is -1.000 euros", locale: "de", number: :integer)
["The loss is ", -1000, " euros"]
"""
def scan(string, options \\ []) do
options = Keyword.put(options, :backend, unquote(backend))
Cldr.Number.Parser.scan(string, options)
end
@doc """
Parse a string locale-aware manner and return
a number.
## Arguments
* `string` is any `String.t`
* `options` is a keyword list of options
## Options
* `:number` is one of `:integer`, `:float`,
`:decimal` or `nil`. The default is `nil`
meaning that the type auto-detected as either
an `integer` or a `float`.
* `:locale` is any locale returned by
`#{inspect backend}.known_locale_names/0`
or a `Cldr.LanguageTag.t`. The default is
`#{inspect backend}.get_locale/0`.
## Returns
* A number of the requested or default type or
* `{:error, {exception, error}}` if no number could be determined
## Notes
This function parses a string to return a number but
in a locale-aware manner. It will normalise grouping
characters and decimal separators, different forms of
the `+` and `-` symbols that appear in Unicode and
strips any `_` characters that might be used for
formatting in a string. It then parses the number
using the Elixir standard library functions.
## Examples
iex> #{inspect(__MODULE__)}.parse("+1.000,34", locale: "de")
{:ok, 1000.34}
iex> #{inspect(__MODULE__)}.parse("-1_000_000.34")
{:ok, -1000000.34}
iex> #{inspect(__MODULE__)}.parse("1.000", locale: "de", number: :integer)
{:ok, 1000}
iex> #{inspect(__MODULE__)}.parse("+1.000,34", locale: "de", number: :integer)
{:error,
{Cldr.Number.ParseError,
"The string \\"+1.000,34\\" could not be parsed as a number"}}
"""
def parse(string, options \\ []) do
options = Keyword.put(options, :backend, unquote(backend))
Cldr.Number.Parser.parse(string, options)
end
@doc """
Resolve curencies from strings within
a list.
## Arguments
* `list` is any list in which currency
names and symbols are expected
* `options` is a keyword list of options
## Options
* `:locale` is any valid locale returned by `Cldr.known_locale_names/1`
or a `Cldr.LanguageTag` struct returned by `Cldr.Locale.new!/2`
The default is `#{inspect backend}.get_locale()`
* `:only` is an `atom` or list of `atoms` representing the
currencies or currency types to be considered for a match.
The equates to a list of acceptable currencies for parsing.
See the notes below for currency types.
* `:except` is an `atom` or list of `atoms` representing the
currencies or currency types to be not considered for a match.
This equates to a list of unacceptable currencies for parsing.
See the notes below for currency types.
* `:fuzzy` is a float greater than `0.0` and less than or
equal to `1.0` which is used as input to
`String.jaro_distance/2` to determine is the provided
currency string is *close enough* to a known currency
string for it to identify definitively a currency code.
It is recommended to use numbers greater than `0.8` in
order to reduce false positives.
## Notes
The `:only` and `:except` options accept a list of
currency codes and/or currency types. The following
types are recognised.
If both `:only` and `:except` are specified,
the `:except` entries take priority - that means
any entries in `:except` are removed from the `:only`
entries.
* `:all`, the default, considers all currencies
* `:current` considers those currencies that have a `:to`
date of nil and which also is a known ISO4217 currency
* `:historic` is the opposite of `:current`
* `:tender` considers currencies that are legal tender
* `:unannotated` considers currencies that don't have
"(some string)" in their names. These are usually
financial instruments.
## Examples
iex> #{inspect(__MODULE__)}.scan("100 US dollars")
...> |> #{inspect(__MODULE__)}.resolve_currencies
[100, :USD]
iex> #{inspect(__MODULE__)}.scan("100 eurosports")
...> |> #{inspect(__MODULE__)}.resolve_currencies(fuzzy: 0.75)
[100, :EUR]
iex> #{inspect(__MODULE__)}.scan("100 dollars des États-Unis")
...> |> #{inspect(__MODULE__)}.resolve_currencies(locale: "fr")
[100, :USD]
"""
def resolve_currencies(list, options \\ []) when is_list(list) and is_list(options) do
options = Keyword.put(options, :backend, unquote(backend))
Cldr.Number.Parser.resolve_currencies(list, options)
end
@doc """
Resolve a currency from a string
## Arguments
* `list` is any list in which currency
names and symbols are expected
* `options` is a keyword list of options
## Options
* `:locale` is any valid locale returned by `Cldr.known_locale_names/1`
or a `Cldr.LanguageTag` struct returned by `Cldr.Locale.new!/2`
The default is `#{inspect backend}.get_locale()`
* `:only` is an `atom` or list of `atoms` representing the
currencies or currency types to be considered for a match.
The equates to a list of acceptable currencies for parsing.
See the notes below for currency types.
* `:except` is an `atom` or list of `atoms` representing the
currencies or currency types to be not considered for a match.
This equates to a list of unacceptable currencies for parsing.
See the notes below for currency types.
* `:fuzzy` is a float greater than `0.0` and less than or
equal to `1.0` which is used as input to
`String.jaro_distance/2` to determine is the provided
currency string is *close enough* to a known currency
string for it to identify definitively a currency code.
It is recommended to use numbers greater than `0.8` in
order to reduce false positives.
## Returns
* An ISO4217 currency code as an atom or
* `{:error, {exception, message}}`
## Notes
The `:only` and `:except` options accept a list of
currency codes and/or currency types. The following
types are recognised.
If both `:only` and `:except` are specified,
the `:except` entries take priority - that means
any entries in `:except` are removed from the `:only`
entries.
* `:all`, the default, considers all currencies
* `:current` considers those currencies that have a `:to`
date of nil and which also is a known ISO4217 currency
* `:historic` is the opposite of `:current`
* `:tender` considers currencies that are legal tender
* `:unannotated` considers currencies that don't have
"(some string)" in their names. These are usually
financial instruments.
## Examples
iex> #{inspect(__MODULE__)}.resolve_currency("US dollars")
[:USD]
iex> #{inspect(__MODULE__)}.resolve_currency("100 eurosports", fuzzy: 0.75)
[:EUR]
iex> #{inspect(__MODULE__)}.resolve_currency("dollars des États-Unis", locale: "fr")
[:USD]
iex> #{inspect(__MODULE__)}.resolve_currency("not a known currency", locale: "fr")
{:error,
{Cldr.UnknownCurrencyError,
"The currency \\"not a known currency\\" is unknown or not supported"}}
"""
def resolve_currency(string, options \\ []) do
options = Keyword.put(options, :backend, unquote(backend))
Cldr.Number.Parser.resolve_currency(string, options)
end
@doc """
Resolve and tokenize percent and permille
sybols from strings within a list.
Percent and permille symbols can be identified
at the beginning and/or the end of a string.
## Arguments
* `list` is any list in which percent and
permille symbols are expected
* `options` is a keyword list of options
## Options
* `:locale` is any valid locale returned by `Cldr.known_locale_names/1`
or a `t:Cldr.LanguageTag` struct returned by `Cldr.Locale.new!/2`
The default is `options[:backend].get_locale()`
## Examples
iex> #{inspect(__MODULE__)}.scan("100%")
...> |> #{inspect(__MODULE__)}.resolve_pers()
[100, :percent]
"""
@doc since: "2.22.0"
@spec resolve_pers([String.t(), ...], Keyword.t()) ::
list(Cldr.Number.Parser.per() | String.t())
def resolve_pers(list, options \\ []) when is_list(list) and is_list(options) do
options = Keyword.put(options, :backend, unquote(backend))
Cldr.Number.Parser.resolve_pers(list, options)
end
@doc """
Resolve and tokenize percent or permille
from the beginning and/or the end of a string
## Arguments
* `list` is any list in which percent
and permille symbols are expected
* `options` is a keyword list of options
## Options
* `:locale` is any valid locale returned by `Cldr.known_locale_names/1`
or a `Cldr.LanguageTag` struct returned by `Cldr.Locale.new!/2`
The default is `options[:backend].get_locale()`
## Returns
* An `:percent` or `permille` or
* `{:error, {exception, message}}`
## Examples
iex> #{inspect(__MODULE__)}.resolve_per "11%"
["11", :percent]
iex> #{inspect(__MODULE__)}.resolve_per "% of linguists"
[:percent, " of linguists"]
iex> #{inspect(__MODULE__)}.resolve_per "% of linguists %"
[:percent, " of linguists ", :percent]
"""
@doc since: "2.22.0"
@spec resolve_per(String.t(), Keyword.t()) ::
Cldr.Number.Parser.per() | list(Cldr.Number.Parser.per() | String.t()) |
{:error, {module(), String.t()}}
def resolve_per(string, options \\ []) when is_binary(string) do
options = Keyword.put(options, :backend, unquote(backend))
Cldr.Number.Parser.resolve_per(string, options)
end
@doc false
def default_options do
[
format: :standard,
currency: nil,
currency_digits: :accounting,
minimum_grouping_digits: 0,
rounding_mode: :half_even,
number_system: :default,
locale: unquote(backend).get_locale()
]
end
end
end
end
end
|
lib/cldr/number/backend/number.ex
| 0.926864
| 0.635053
|
number.ex
|
starcoder
|
defmodule Membrane.RTP.SilenceDiscarder do
@moduledoc """
Element responsible for dropping silent audio packets.
For a packet to be discarded it needs to contain a `RTP.HeaderExtension` struct in its
metadata under `:rtp` key. The header should contain information about audio level (VAD extension is required).
The element will only drop packets whose audio level is above given silence threshold (muted audio is of value 127).
`#{__MODULE__}` will drop as many silent packets as possible and on reaching dropping limit it will send the current buffer,
reset dropped packets counter and emit `Membrane.RTP.DroppedPacketEvent` with a number of packets that have been dropped until that point.
The event gets sent on both reaching dropping limit and when a non-silent packet arrives.
"""
use Membrane.Filter
alias Membrane.RTP.Header
alias Membrane.RTP.PacketsDiscardedEvent
require Membrane.Logger
@vad_len 0
def_input_pad :input, caps: :any, demand_unit: :buffers
def_output_pad :output, caps: :any
def_options max_consecutive_drops: [
spec: non_neg_integer() | :infinity,
default: 1000,
description: """
A number indicating how many consecutive silent packets can be dropped before
a single packet will be passed and dropped packet event will we emitted.
Passing a single packets once in a while is necessary for element such as jitter buffer or encryptor as they can update their ROCs
based on sequence numbers and when we drop to many packets we may roll it over.
"""
],
silence_threshold: [
spec: 1..127,
default: 127,
description: """
Audio level threshold that will be compared against incoming packets. Packet will be dropped if its audio level
is above or equal to the given threshold.
"""
],
vad_id: [
spec: 1..14,
default: 6,
description: """
ID of a VAD extension.
"""
]
@impl true
def handle_init(opts) do
{:ok, Map.from_struct(opts) |> Map.put(:dropped, 0)}
end
@impl true
def handle_demand(:output, size, :buffers, _ctx, state) do
{{:ok, demand: {:input, size}}, state}
end
@impl true
def handle_event(pad, other, ctx, state), do: super(pad, other, ctx, state)
@impl true
def handle_process(:input, _buffer, _ctx, %{max_consecutive_drops: :infinite} = state) do
{:ok, state}
end
@impl true
def handle_process(
:input,
buffer,
_ctx,
%{dropped: dropped, max_consecutive_drops: max_drops} = state
)
when dropped == max_drops do
stop_dropping(buffer, state)
end
@impl true
def handle_process(:input, buffer, _ctx, state) do
%{dropped: dropped, vad_id: vad_id, silence_threshold: silence_threshold} = state
case buffer.metadata.rtp do
%{
extension: %Header.Extension{
# profile specific for one-byte extensions
profile_specific: <<0xBE, 0xDE>>,
data: data
}
} ->
silent? = is_silent_packet(vad_id, silence_threshold, data)
cond do
silent? ->
{{:ok, redemand: :output}, %{state | dropped: dropped + 1}}
dropped > 0 ->
stop_dropping(buffer, state)
true ->
{{:ok, buffer: {:output, buffer}}, state}
end
_header ->
{{:ok, buffer: {:output, buffer}}, state}
end
end
defp stop_dropping(buffer, state) do
{{:ok,
event: {:output, %PacketsDiscardedEvent{discarded: state.dropped}},
buffer: {:output, buffer}}, %{state | dropped: 0}}
end
defp is_silent_packet(_vad_id, _threshold, <<>>), do: false
# vad extension
defp is_silent_packet(
vad_id,
threshold,
<<vad_id::4, @vad_len::4, _v::1, audio_level::7, _rest::binary>>
) do
audio_level >= threshold
end
# extension padding
defp is_silent_packet(vad_id, threshold, <<0::8, rest::binary>>),
do: is_silent_packet(vad_id, threshold, rest)
# unknown extension
defp is_silent_packet(
vad_id,
threshold,
<<_id::4, len::4, _data0::8, _data1::binary-size(len), rest::binary>>
),
do: is_silent_packet(vad_id, threshold, rest)
end
|
lib/membrane/rtp/silence_discarder.ex
| 0.817101
| 0.427606
|
silence_discarder.ex
|
starcoder
|
defmodule Spf do
@moduledoc """
Check SPF for a specific `sender` and possible options.
The `Spf.check/2` function takes a sender and possible options and returns an
evaluation [`context`](`t:Spf.Context.t/0`) that contains the verdict and
some statistics of the evaluation.
## Example
iex> unless File.dir?("tmp"), do: File.mkdir("tmp")
iex> File.write("tmp/zone.txt", \"""
...> example.com TXT v=spf1 -all exp=why.%{d}
...> why.example.com TXT %{d}: %{i} is not one of our MTA's
...> \""")
:ok
iex> ctx = Spf.check("example.com", dns: "tmp/zone.txt")
iex> {ctx.verdict, ctx.reason, ctx.explanation}
{:fail, "spf[0] -all", "example.com: 127.0.0.1 is not one of our MTA's"}
"""
@doc """
Check SPF for given `sender` and possible options.
Options include:
- `:dns` filepath or zonedata to pre-populate the context's DNS cache
- `:helo` the helo presented by sending MTA, defaults to `sender`
- `:ip` ipv4 or ipv6 address, in binary, of sending MTA, defaults to `127.0.0.1`
- `:log` a user log/4 function to relay notifications, defaults to `nil`
- `:verbosity` how verbose the notifications should be (0..5), defaults to `3`
- `:nameserver` an IPv4 or IPv6 address to use as recursive nameserver
The keyword list may contain multiple entries of the `:nameserver` option, in
which case they will be tried in the order listed.
The user supplied callback `log/4` is called with:
- `context`, the current evaluation context
- `facility`, an atom denoting which part of Spf logged the message
- `severity`, one of: `:error`, `:warn`, `:info`, `:debug`
- `message`, the log message as a binary
This is what `Spfcheck` uses to log information to stderr during an SPF
evaluation.
This function returns the evaluation context.
## Examples
iex> zone = \"""
...> example.com TXT v=spf1 +all
...> \"""
iex> Spf.check("example.com", dns: zone) |> Map.get(:verdict)
:pass
"""
@spec check(binary, Keyword.t()) :: Spf.Context.t()
def check(sender, opts \\ []) do
Spf.Context.new(sender, opts)
|> Spf.Eval.evaluate()
|> add_owner()
end
@spec add_owner(Spf.Context.t()) :: Spf.Context.t()
defp add_owner(ctx) do
{owner, email} =
case Spf.DNS.authority(ctx, ctx.domain) do
{:ok, _, owner, email} -> {owner, email}
{:error, reason} -> {"DNS error", "#{reason}"}
end
Map.put(ctx, :owner, owner)
|> Map.put(:contact, email)
end
end
|
lib/spf.ex
| 0.813794
| 0.630045
|
spf.ex
|
starcoder
|
defmodule Spear.PersistentSubscription do
@moduledoc """
A struct representing a persistent subscription and its settings
"""
require Spear.Records.Shared, as: Shared
require Spear.Records.Persistent, as: Persistent
@typedoc """
The action the EventStoreDB should take for an event's nack
* `:park` - stops the EventStoreDB from re-sending the event and appends a
reference to the event to the persistent subscription's parked events
stream. These events can be retried later by clicking the "Replay
Parked Messages" button in the EventStoreDB dashboard's persistent
subscriptions section for each stream+group combination. The EventStoreDB
parking system is conceptually similar to dead letter queues.
* `:retry` - retries the event up to `:max_retry_count` tries. This option is
a reasonable default so that if a consumer hits a transient error condition
such as a network timeout, it will retry the event before giving up and
parking. Note that once a consumer has retried an event more than
`:max_retry_count` times, the event is parked, even if the `:retry` action
is given to `Spear.nack/4`.
* `:skip` - skips the event without moving it to the parked events stream.
Practically this is no different than simply `Spear.ack/3`ing the event(s)
and performing a no-op in the consumer. Skipping may be a good option for
dealing with poison messages: malformed or otherwise completely
unhandleable events.
* `:stop` - stops the EventStoreDB from re-sending the event. The event is
not parked or retried. It is unclear how this differs from the `:skip`
action. This function does not stop the persistent subscription:
use `Spear.cancel_subscription/3` to shut down the connection.
"""
@typedoc since: "0.6.0"
@type nack_action :: :unknown | :park | :retry | :skip | :stop
@typedoc """
A persistent subscription.
These are generally returned from `Spear.list_persistent_subscriptions/2`.
Persistent subscriptions are considered unique by their stream name and
group name pairings. A stream may have many different persistent
subscriptions with different group names and settings and a group name
may be used for multiple subscriptions to different streams.
## Examples
iex> Spear.create_persistent_subscription(conn, "my_stream", "my_group", %Spear.PersistentSubscription.Settings{})
:ok
iex> {:ok, subscriptions} = Spear.list_persistent_subscriptions(conn)
iex> subscriptions |> Enum.to_list()
[
%Spear.PersistentSubscription{
group_name: "my_group",
settings: %Spear.PersistentSubscription.Settings{
checkpoint_after: 3000,
extra_statistics?: nil,
history_buffer_size: 300,
live_buffer_size: 100,
max_checkpoint_count: 100,
max_retry_count: 10,
max_subscriber_count: 1,
message_timeout: 5000,
min_checkpoint_count: 1,
named_consumer_strategy: :RoundRobin,
read_batch_size: 100,
resolve_links?: true,
revision: 0
},
stream_name: "my_stream"
}
]
"""
@typedoc since: "0.6.0"
@type t :: %__MODULE__{
stream_name: String.t(),
group_name: String.t(),
settings: Spear.PersistentSubscription.Settings.t()
}
defstruct ~w[stream_name group_name settings]a
@doc false
def from_map(map) do
%__MODULE__{
stream_name: map_stream_name(map["stream"]),
group_name: map["group"],
settings: %Spear.PersistentSubscription.Settings{
resolve_links?: map["resolveLinkTos"],
extra_statistics?: nil,
message_timeout: map["messageTimeout"],
live_buffer_size: map["liveBufferSize"],
history_buffer_size: map["historyBufferSize"],
max_retry_count: map["maxRetryCount"],
read_batch_size: map["readBatchSize"],
checkpoint_after: map["checkPointAfter"],
min_checkpoint_count: map["minCheckPointCount"],
max_checkpoint_count: map["maxCheckPointCount"],
max_subscriber_count: map["maxSubscriberCount"],
named_consumer_strategy: map["namedConsumerStrategy"] |> to_atom()
}
}
end
defp to_atom(string) when is_binary(string), do: String.to_atom(string)
defp to_atom(_), do: nil
defp map_stream_name("$all"), do: :all
defp map_stream_name(name), do: name
@doc false
def map_nack_action(:park), do: :Park
def map_nack_action(:retry), do: :Retry
def map_nack_action(:skip), do: :Skip
def map_nack_action(:stop), do: :Stop
def map_nack_action(_), do: :Unknown
@doc false
def build_create_request(stream_name, group_name, settings, opts) do
Persistent.create_req(
options:
Persistent.create_req_options(
stream_identifier: map_create_request_stream_identifier(stream_name),
stream_option: map_create_stream_option(stream_name, opts),
group_name: group_name,
settings: Spear.PersistentSubscription.Settings.to_record(settings, :create)
)
)
end
@doc false
def map_short_stream_option(stream_name) when is_binary(stream_name) do
{:stream_identifier, Shared.stream_identifier(stream_name: stream_name)}
end
# coveralls-ignore-start
def map_short_stream_option(:all) do
{:all, Shared.empty()}
end
# coveralls-ignore-stop
@doc false
def map_create_stream_option(stream_name, opts) when is_binary(stream_name) do
{:stream,
Persistent.create_req_stream_options(
stream_identifier: Shared.stream_identifier(stream_name: stream_name),
revision_option: map_revision(opts)
)}
end
# coveralls-ignore-start
def map_create_stream_option(:all, opts) do
from = Keyword.get(opts, :from, :start)
position =
with {:position, commit, prepare} <- map_all_position(from) do
{:position,
Persistent.create_req_position(commit_position: commit, prepare_position: prepare)}
end
{:all,
Persistent.create_req_all_options(
all_option: position,
filter_option: map_filter(Keyword.get(opts, :filter))
)}
end
# coveralls-ignore-stop
defp map_create_request_stream_identifier(:all), do: :undefined
defp map_create_request_stream_identifier(stream_name) do
Shared.stream_identifier(stream_name: stream_name)
end
def map_update_stream_option(stream_name, opts) when is_binary(stream_name) do
{:stream,
Persistent.update_req_stream_options(
stream_identifier: Shared.stream_identifier(stream_name: stream_name),
revision_option: map_revision(opts)
)}
end
# this is roughly the same as the creation options but doesn't include
# the filter option, just the position option
def map_update_stream_option(:all, opts) do
# coveralls-ignore-start
from = Keyword.get(opts, :from, :start)
position =
with {:position, commit, prepare} <- map_all_position(from) do
{:position,
Persistent.update_req_position(commit_position: commit, prepare_position: prepare)}
end
{:all, Persistent.update_req_all_options(all_option: position)}
# coveralls-ignore-stop
end
# coveralls-ignore-start
defp map_revision(opts) do
case Keyword.get(opts, :from, :start) do
:start -> {:start, Shared.empty()}
:end -> {:end, Shared.empty()}
revision when is_integer(revision) -> {:revision, revision}
end
end
defp map_all_position(Persistent.read_resp() = read_resp) do
read_resp
|> Spear.Event.from_read_response(link?: true)
|> map_all_position()
end
defp map_all_position(%Spear.Event{link: %Spear.Event{} = link}) do
map_all_position(link)
end
defp map_all_position(%Spear.Event{
metadata: %{commit_position: commit, prepare_position: prepare}
}) do
{:position, commit, prepare}
end
defp map_all_position(%Spear.Filter.Checkpoint{
commit_position: commit,
prepare_position: prepare
}) do
{:position, commit, prepare}
end
defp map_all_position(:start), do: {:start, Shared.empty()}
defp map_all_position(:end), do: {:end, Shared.empty()}
defp map_filter(%Spear.Filter{} = filter),
do: {:filter, Spear.Filter._to_persistent_filter_options(filter)}
defp map_filter(nil), do: {:no_filter, Shared.empty()}
# coveralls-ignore-stop
end
|
lib/spear/persistent_subscription.ex
| 0.886758
| 0.531392
|
persistent_subscription.ex
|
starcoder
|
defmodule Nebulex.Adapters.Local do
@moduledoc ~S"""
Adapter module for Local Generational Cache; inspired by
[epocxy](https://github.com/duomark/epocxy).
Generational caching using an ets table (or multiple ones when used with
`:shards`) for each generation of cached data. Accesses hit the newer
generation first, and migrate from the older generation to the newer
generation when retrieved from the stale table. When a new generation
is started, the oldest one is deleted. This is a form of mass garbage
collection which avoids using timers and expiration of individual
cached elements.
This implementation of generation cache uses only two generations
(which is more than enough) also referred like the `newer` and
the `older`.
## Features
* Configurable backend (`ets` or `:shards`).
* Expiration – A status based on TTL (Time To Live) option. To maintain
cache performance, expired entries may not be immediately flushed or
evicted, they are expired or evicted on-demand, when the key is read.
* Eviction – [Generational Garbage Collection](http://hexdocs.pm/nebulex/Nebulex.Adapters.Local.Generation.html).
* Sharding – For intensive workloads, the Cache may also be partitioned
(by using `:shards` backend and specifying the `:partitions` option).
* Support for transactions via Erlang global name registration facility.
## Options
This adapter supports the following options and all of them can be given via
the cache configuration:
* `:backend` - Defines the backend or storage to be used for the adapter.
Supported backends are: `:ets` and `:shards`. Defaults to `:ets`.
* `:read_concurrency` - Since this adapter uses ETS tables internally,
this option is used when a new table is created. See `:ets.new/2`.
Defaults to `true`.
* `:write_concurrency` - Since this adapter uses ETS tables internally,
this option is used when a new table is created. See `:ets.new/2`.
Defaults to `true`.
* `:compressed` - This option is used when a new ETS table is created and
it defines whether or not it includes X as an option. See `:ets.new/2`.
Defaults to `false`.
* `:backend_type` - This option defines the type of ETS to be used
(Defaults to `:set`). However, it is highly recommended to keep the
default value, since there are commands not supported (unexpected
exception may be raised) for types like `:bag` or `: duplicate_bag`.
Please see the [ETS](https://erlang.org/doc/man/ets.html) docs
for more information.
* `:partitions` - The number of partitions in the Cache. This option is only
available for `:shards` backend. Defaults to `System.schedulers_online()`.
* `:gc_interval` - Interval time in milliseconds to garbage collection to
run, delete the oldest generation and create a new one. If this option is
not set, garbage collection is never executed, so new generations must be
created explicitly, e.g.: `Generation.new_generation(cache_name, [])`.
* `:max_size` - Max number of cached entries (cache limit). If it is not
set (`nil`), the check to release memory is not performed (the default).
* `:allocated_memory` - Max size in bytes allocated for a cache generation.
If this option is set and the configured value is reached, a new cache
generation is created so the oldest is deleted and force releasing memory
space. If it is not set (`nil`), the cleanup check to release memory is
not performed (the default).
* `:gc_cleanup_min_timeout` - The min timeout in milliseconds for triggering
the next cleanup and memory check. This will be the timeout to use when
the max allocated memory is reached. Defaults to `30_000`.
* `:gc_cleanup_max_timeout` - The max timeout in milliseconds for triggering
the next cleanup and memory check. This is the timeout used when the cache
starts or the consumed memory is `0`. Defaults to `300_000`.
## Example
`Nebulex.Cache` is the wrapper around the cache. We can define a
local cache as follows:
defmodule MyApp.LocalCache do
use Nebulex.Cache,
otp_app: :my_app,
adapter: Nebulex.Adapters.Local
end
Where the configuration for the cache must be in your application
environment, usually defined in your `config/config.exs`:
config :my_app, MyApp.LocalCache,
backend: :shards,
gc_interval: :timer.seconds(3600),
max_size: 200_000,
allocated_memory: 2_000_000_000,
gc_cleanup_min_timeout: 10_000,
gc_cleanup_max_timeout: 600_000
For intensive workloads, the Cache may also be partitioned (by using `:shards`
backend and specifying the `:partitions` option). If partitioning is required
then a good default is to set the number of partitions to the number of
schedulers available (the default):
config :my_app, MyApp.LocalCache,
backend: :shards,
gc_interval: :timer.seconds(3600),
max_size: 200_000,
allocated_memory: 2_000_000_000,
gc_cleanup_min_timeout: 10_000,
gc_cleanup_max_timeout: 600_000,
partitions: System.schedulers_online() * 2
For more information about the usage, check out `Nebulex.Cache`.
## Queryable API
The adapter supports as query parameter the following values:
* `query` - `nil | :unexpired | :expired | :ets.match_spec()`
Internally, an entry is represented by the tuple `{key, val, vsn, exp}`,
which means the match pattern within the `:ets.match_spec()` must be
something like `{:"$1", :"$2", :"$3", :"$4"}`. In order to make query
building easier, you can use `Ex2ms` library.
## Examples
# built-in queries
MyCache.all()
MyCache.all(:unexpired)
MyCache.all(:expired)
# using a custom match spec (all values > 10)
spec = [{{:"$1", :"$2", :_, :_}, [{:>, :"$2", 10}], [{{:"$1", :"$2"}}]}]
MyCache.all(spec)
# using Ex2ms
import Ex2ms
spec =
fun do
{key, value, _version, _expire_at} when value > 10 -> {key, value}
end
MyCache.all(spec)
The `:return` option applies only for built-in queries, such as:
`nil | :unexpired | :expired`, if you are using a custom `:ets.match_spec()`,
the return value depends on it.
The same applies to the `stream` function.
## Extended API (convenience functions)
This adapter provides some additional convenience functions to the
`Nebulex.Cache` API.
Creating new generations:
MyCache.new_generation()
MyCache.new_generation(name: :my_cache)
Retrieving the current generations:
MyCache.generations()
MyCache.generations(:my_cache)
Retrieving the newer generation:
MyCache.newer_generation()
MyCache.newer_generation(:my_cache)
"""
# Provide Cache Implementation
@behaviour Nebulex.Adapter
@behaviour Nebulex.Adapter.Queryable
# Inherit default transaction implementation
use Nebulex.Adapter.Transaction
# Inherit default persistence implementation
use Nebulex.Adapter.Persistence
import Record
alias Nebulex.Adapters.Local.{Backend, Generation}
alias Nebulex.Cache.Stats
alias Nebulex.{Entry, Time}
# Cache Entry
defrecord(:entry,
key: nil,
value: nil,
touched: nil,
ttl: nil
)
# Supported Backends
@backends ~w(ets shards)a
## Adapter
@impl true
defmacro __before_compile__(_env) do
quote do
@doc """
A convenience function for creating new generations.
"""
def new_generation(opts \\ []) do
opts
|> Keyword.get(:name, __MODULE__)
|> Generation.new(opts)
end
@doc """
A convenience function for retrieving the current generations.
"""
defdelegate generations(name \\ __MODULE__), to: Generation, as: :list
@doc """
A convenience function for retrieving the newer generation.
"""
defdelegate newer_generation(name \\ __MODULE__), to: Generation, as: :newer
end
end
@impl true
def init(opts) do
# required cache name
name = opts[:name] || Keyword.fetch!(opts, :cache)
# resolve the backend to be used
backend =
opts
|> Keyword.get(:backend, :ets)
|> case do
val when val in @backends ->
val
val ->
raise "expected backend: option to be one of the supported backends " <>
"#{inspect(@backends)}, got: #{inspect(val)}"
end
child =
Backend.child_spec(
backend,
name,
[name: name, backend: backend] ++ opts
)
meta = %{
name: name,
backend: backend,
stat_counter: opts[:stat_counter] || Stats.init(opts)
}
{:ok, child, meta}
end
@impl true
def get(%{name: name, backend: backend, stat_counter: ref}, key, _opts) do
name
|> Generation.list()
|> do_get(key, backend, ref)
|> return(:value)
|> update_stats(:get, ref)
end
defp do_get([newer], key, backend, ref) do
gen_fetch(newer, key, backend, ref)
end
defp do_get([newer, older], key, backend, ref) do
with nil <- gen_fetch(newer, key, backend, ref),
entry(key: ^key) = cached <- gen_fetch(older, key, backend, ref, &pop_entry/4) do
true = backend.insert(newer, cached)
cached
end
end
defp gen_fetch(gen, key, backend, ref, fun \\ &get_entry/4) do
gen
|> fun.(key, nil, backend)
|> validate_ttl(gen, backend, ref)
end
@impl true
def get_all(adapter_meta, keys, _opts) do
Enum.reduce(keys, %{}, fn key, acc ->
if obj = get(adapter_meta, key, []),
do: Map.put(acc, key, obj),
else: acc
end)
end
@impl true
def put(%{name: name, backend: backend, stat_counter: ref}, key, value, ttl, on_write, _opts) do
do_put(
on_write,
name,
backend,
ref,
entry(
key: key,
value: value,
touched: Time.now(),
ttl: ttl
)
)
end
defp do_put(:put, name, backend, ref, entry) do
name
|> put_entries(backend, entry)
|> update_stats(:put, ref)
end
defp do_put(:put_new, name, backend, ref, entry) do
name
|> put_new_entries(backend, entry)
|> update_stats(:put, ref)
end
defp do_put(:replace, name, backend, ref, entry(key: key, value: value, ttl: ttl)) do
name
|> update_entry(backend, key, [{3, value}, {4, nil}, {5, ttl}])
|> update_stats(:put, ref)
end
@impl true
def put_all(%{name: name, backend: backend, stat_counter: ref}, entries, ttl, on_write, _opts) do
touched = Time.now()
entries =
for {key, value} <- entries, value != nil do
entry(key: key, value: value, touched: touched, ttl: ttl)
end
do_put_all(on_write, name, backend, ref, entries)
end
defp do_put_all(:put, name, backend, ref, entries) do
name
|> put_entries(backend, entries)
|> update_stats(:put_all, {ref, entries})
end
defp do_put_all(:put_new, name, backend, ref, entries) do
name
|> put_new_entries(backend, entries)
|> update_stats(:put_all, {ref, entries})
end
@impl true
def delete(%{name: name, backend: backend, stat_counter: ref}, key, _opts) do
name
|> Generation.list()
|> Enum.each(&backend.delete(&1, key))
|> update_stats(:delete, ref)
end
@impl true
def take(%{name: name, backend: backend, stat_counter: ref}, key, _opts) do
name
|> Generation.list()
|> Enum.reduce_while(nil, fn gen, acc ->
case pop_entry(gen, key, nil, backend) do
nil ->
{:cont, acc}
res ->
value =
res
|> validate_ttl(gen, backend, ref)
|> return(:value)
{:halt, value}
end
end)
|> update_stats(:take, ref)
end
@impl true
def incr(%{name: name, backend: backend, stat_counter: ref}, key, incr, ttl, _opts) do
name
|> Generation.newer()
|> backend.update_counter(
key,
{3, incr},
entry(key: key, value: 0, touched: Time.now(), ttl: ttl)
)
|> update_stats(:write, ref)
end
@impl true
def has_key?(adapter_meta, key) do
case get(adapter_meta, key, []) do
nil -> false
_ -> true
end
end
@impl true
def ttl(%{name: name, backend: backend, stat_counter: ref}, key) do
name
|> Generation.list()
|> do_get(key, backend, ref)
|> return()
|> entry_ttl()
|> update_stats(:get, ref)
end
defp entry_ttl(nil), do: nil
defp entry_ttl(entry(ttl: :infinity)), do: :infinity
defp entry_ttl(entry(ttl: ttl, touched: touched)) do
ttl - (Time.now() - touched)
end
defp entry_ttl(entries) when is_list(entries) do
for entry <- entries, do: entry_ttl(entry)
end
@impl true
def expire(%{name: name, backend: backend, stat_counter: ref}, key, ttl) do
name
|> update_entry(backend, key, [{4, Time.now()}, {5, ttl}])
|> update_stats(:put, ref)
end
@impl true
def touch(%{name: name, backend: backend, stat_counter: ref}, key) do
name
|> update_entry(backend, key, [{4, Time.now()}])
|> update_stats(:put, ref)
end
@impl true
def size(%{name: name, backend: backend}) do
name
|> Generation.list()
|> Enum.reduce(0, fn gen, acc ->
gen
|> backend.info(:size)
|> Kernel.+(acc)
end)
end
@impl true
def flush(%{name: name, stat_counter: ref}) do
name
|> Generation.flush()
|> update_stats(:flush, ref)
end
## Queryable
@impl true
def all(%{name: name, backend: backend}, query, opts) do
query = validate_match_spec(query, opts)
for gen <- Generation.list(name),
elems <- backend.select(gen, query),
do: elems
end
@impl true
def stream(adapter_meta, query, opts) do
query
|> validate_match_spec(opts)
|> do_stream(adapter_meta, Keyword.get(opts, :page_size, 10))
end
defp do_stream(match_spec, %{name: name, backend: backend}, page_size) do
Stream.resource(
fn ->
[newer | _] = generations = Generation.list(name)
result = backend.select(newer, match_spec, page_size)
{result, generations}
end,
fn
{:"$end_of_table", [_gen]} ->
{:halt, []}
{:"$end_of_table", [_gen | generations]} ->
result =
generations
|> hd()
|> backend.select(match_spec, page_size)
{[], {result, generations}}
{{elements, cont}, [_ | _] = generations} ->
{elements, {backend.select(cont), generations}}
end,
& &1
)
end
## Transaction
@impl true
def transaction(adapter_meta, opts, fun) do
super(adapter_meta, Keyword.put(opts, :nodes, [node()]), fun)
end
## Helpers
defp get_entry(tab, key, default, backend) do
case backend.lookup(tab, key) do
[] -> default
[entry] -> entry
entries -> entries
end
end
defp pop_entry(tab, key, default, backend) do
case backend.take(tab, key) do
[] -> default
[entry] -> entry
entries -> entries
end
end
defp put_entries(name, backend, entry_or_entries) do
name
|> Generation.newer()
|> backend.insert(entry_or_entries)
end
defp put_new_entries(name, backend, entry_or_entries) do
name
|> Generation.newer()
|> backend.insert_new(entry_or_entries)
end
defp update_entry(name, backend, key, updates) do
name
|> Generation.newer()
|> backend.update_element(key, updates)
end
defp return(entry_or_entries, field \\ nil)
defp return(nil, _field), do: nil
defp return(entry(value: value), :value), do: value
defp return(entry(key: _) = entry, _field), do: entry
defp return(entries, field) when is_list(entries) do
for entry <- entries, do: return(entry, field)
end
defp validate_ttl(nil, _, _, _), do: nil
defp validate_ttl(entry(ttl: :infinity) = entry, _, _, _), do: entry
defp validate_ttl(entry(key: key, touched: touched, ttl: ttl) = entry, gen, backend, ref) do
if Time.now() - touched >= ttl do
true = backend.delete(gen, key)
update_stats(nil, :expired, ref)
else
entry
end
end
defp validate_ttl(entries, gen, backend, ref) when is_list(entries) do
Enum.filter(entries, fn entry ->
not is_nil(validate_ttl(entry, gen, backend, ref))
end)
end
defp validate_match_spec(spec, opts) when spec in [nil, :unexpired, :expired] do
[
{
entry(key: :"$1", value: :"$2", touched: :"$3", ttl: :"$4"),
if(spec = comp_match_spec(spec), do: [spec], else: []),
ret_match_spec(opts)
}
]
end
defp validate_match_spec(spec, _opts) do
case :ets.test_ms({nil, nil, nil, :infinity}, spec) do
{:ok, _result} ->
spec
{:error, _result} ->
raise Nebulex.QueryError, message: "invalid match spec", query: spec
end
end
defp comp_match_spec(nil),
do: nil
defp comp_match_spec(:unexpired),
do: {:orelse, {:==, :"$4", :infinity}, {:<, {:-, Time.now(), :"$3"}, :"$4"}}
defp comp_match_spec(:expired),
do: {:not, comp_match_spec(:unexpired)}
defp ret_match_spec(opts) do
case Keyword.get(opts, :return, :key) do
:key -> [:"$1"]
:value -> [:"$2"]
:entry -> [%Entry{key: :"$1", value: :"$2", touched: :"$3", ttl: :"$4"}]
end
end
defp update_stats(value, _action, nil), do: value
defp update_stats(value, _action, {nil, _}), do: value
defp update_stats(nil, :get, counter_ref) do
:ok = Stats.incr(counter_ref, :misses)
nil
end
defp update_stats(value, :get, counter_ref) do
:ok = Stats.incr(counter_ref, :hits)
value
end
defp update_stats(value, :expired, counter_ref) do
:ok = Stats.incr(counter_ref, :evictions)
:ok = Stats.incr(counter_ref, :expirations)
value
end
defp update_stats(value, :write, counter_ref) do
:ok = Stats.incr(counter_ref, :writes)
value
end
defp update_stats(true, :put, counter_ref) do
:ok = Stats.incr(counter_ref, :writes)
true
end
defp update_stats(true, :put_all, {counter_ref, entries}) do
:ok = Stats.incr(counter_ref, :writes, length(entries))
true
end
defp update_stats(value, :delete, counter_ref) do
:ok = Stats.incr(counter_ref, :evictions)
value
end
defp update_stats(nil, :take, counter_ref) do
:ok = Stats.incr(counter_ref, :misses)
nil
end
defp update_stats(value, :take, counter_ref) do
:ok = Stats.incr(counter_ref, :hits)
:ok = Stats.incr(counter_ref, :evictions)
value
end
defp update_stats(value, :flush, counter_ref) do
:ok = Stats.incr(counter_ref, :evictions, value)
value
end
defp update_stats(value, _action, _counter_ref) do
value
end
end
|
lib/nebulex/adapters/local.ex
| 0.920337
| 0.754553
|
local.ex
|
starcoder
|
defmodule SurveyTool.ContentParser do
@moduledoc """
Responsible for parsing content in question and answer CSV files.
"""
alias SurveyTool.{RatingQuestion, SingleSelect, Survey}
@answers_range 3..-1
@rows_per_chunk 1
@question_error_message """
Could not generate report. Responses file contained \
unknown question type of: \
"""
@timestamp_index 2
@doc """
Reads and parses the set of questions from a given CSV file
and initialises a `%Survey{}` struct to hold the resulting list
of questions.
## Parameters:
- `csv_filepath`: The filepath to the questions CSV file.
"""
@spec generate_survey(String.t()) :: Survey.t()
def generate_survey(csv_filepath) do
questions =
csv_filepath
|> Path.expand()
|> File.stream!()
|> CSV.decode(headers: true)
|> Enum.map(&to_question/1)
%Survey{questions: questions}
end
@doc """
Maps responses contained in a given CSV file to questions in
a `survey` and populates the survey with any relevant answers
to the questions.
## Parameters:
- `survey`: The survey to populate
- `csv_filepath`: The filepath to the responses CSV file.
"""
@spec populate_survey(Survey.t(), String.t()) :: Survey.t()
def populate_survey(survey, csv_filepath) do
csv_filepath
|> Path.expand()
|> File.stream!()
|> CSV.decode()
|> Stream.chunk_every(@rows_per_chunk)
|> Enum.reduce(survey, &add_response/2)
end
defp add_response(response, survey) do
survey = increment(survey, :response_count)
case check_timestamp_validity(response) do
{:ok, _date, _offset} ->
survey
|> increment(:participant_count)
|> populate_answers(response)
{:error, _message} ->
survey
end
end
defp populate_answers(survey, row) do
answered_questions =
survey.questions
|> Stream.zip(answers(row))
|> Enum.map(&add_answer/1)
%Survey{survey | questions: answered_questions}
end
defp add_answer({question, answer}) do
question
|> question.__struct__.add_answer(answer)
end
defp increment(survey, key) do
Map.update!(survey, key, fn current_value ->
current_value + 1
end)
end
defp answers([{:ok, row}]) do
row
|> Enum.slice(@answers_range)
end
defp check_timestamp_validity([{:ok, response}]) do
response
|> Enum.at(@timestamp_index)
|> DateTime.from_iso8601()
end
defp to_question({:ok, row} = {:ok, %{"type" => "ratingquestion"}}) do
%RatingQuestion{text: row["text"], theme: row["theme"]}
end
defp to_question({:ok, row} = {:ok, %{"type" => "singleselect"}}) do
%SingleSelect{text: row["text"], theme: row["theme"]}
end
defp to_question({:ok, %{"type" => type}}) do
throw({:halt, error: @question_error_message <> type})
end
end
|
lib/survey_tool/content_parser.ex
| 0.813127
| 0.468365
|
content_parser.ex
|
starcoder
|
defmodule ExIcal.Recurrence do
@moduledoc """
Adds support for recurring events.
Events can recur by frequency, count, interval, and/or start/end date. To
see the specific rules and examples, see `add_recurring_events/2` below.
"""
alias ExIcal.Event
@doc """
Add recurring events to events list
## Parameters
- `events`: List of events that each may contain an rrule. See `ExIcal.Event`.
- `end_date` *(optional)*: A date time that represents the fallback end date
for a recurring event. This value is only used when the options specified
in rrule result in an infinite recurrance (ie. when neither `count` nor
`until` is set). If no end_date is set, it will default to
`DateTime.utc_now()`.
## Event rrule options
Event recurrance details are specified in the `rrule`. The following options
are considered:
- `freq`: Represents how frequently the event recurs. Allowed frequencies
are `DAILY`, `WEEKLY`, `MONTHLY` and `YEARLY`. These can be further modified by
the `interval` option.
- `count` *(optional)*: Represents the number of times that an event will
recur. This takes precedence over the `end_date` parameter and the
`until` option.
- `interval` *(optional)*: Represents the interval at which events occur.
This option works in concert with `freq` above; by using the `interval`
option, an event could recur every 5 days or every 3 weeks.
- `until` *(optional)*: Represents the end date for a recurring event.
This takes precedence over the `end_date` parameter.
The `freq` option is required for a valid rrule, but the others are
optional. They may be used either individually (ex. just `freq`) or in
concert (ex. `freq` + `interval` + `until`).
## Examples
iex> dt = Timex.Date.from({2016,8,13})
iex> dt_end = Timex.Date.from({2016, 8, 23})
iex> events = [%ExIcal.Event{rrule:%{freq: "DAILY"}, start: dt, end: dt}]
iex> ExIcal.Recurrence.add_recurring_event(events, dt_end) |> length
10
"""
@spec add_recurring_events([%Event{}]) :: [%Event{}]
@spec add_recurring_events([%Event{}], %DateTime{}) :: [%Event{}]
def add_recurring_events(events, end_date \\ DateTime.utc_now()) do
events ++ (events |> Enum.reduce([], fn(event, revents) ->
case event.rrule do
nil ->
revents
%{freq: "DAILY", count: count, interval: interval} ->
revents ++ (event |> add_recurring_events_count(count, [days: interval]))
%{freq: "DAILY", until: until, interval: interval} ->
revents ++ (event |> add_recurring_events_until(until, [days: interval]))
%{freq: "DAILY", count: count} ->
revents ++ (event |> add_recurring_events_count(count, [days: 1]))
%{freq: "DAILY", until: until} ->
revents ++ (event |> add_recurring_events_until(until, [days: 1]))
%{freq: "DAILY", interval: interval} ->
revents ++ (event |> add_recurring_events_until(end_date, [days: interval]))
%{freq: "DAILY"} ->
revents ++ (event |> add_recurring_events_until(end_date, [days: 1]))
%{freq: "WEEKLY", count: count, interval: interval} ->
revents ++ (event |> add_recurring_events_count(count, [days: interval * 7]))
%{freq: "WEEKLY", until: until, interval: interval} ->
revents ++ (event |> add_recurring_events_until(until, [days: interval * 7]))
%{freq: "WEEKLY", count: count} ->
revents ++ (event |> add_recurring_events_count(count, [days: 7]))
%{freq: "WEEKLY", until: until} ->
revents ++ (event |> add_recurring_events_until(until, [days: 7]))
%{freq: "WEEKLY", interval: interval} ->
revents ++ (event |> add_recurring_events_until(end_date, [days: interval * 7]))
%{freq: "WEEKLY"} ->
revents ++ (event |> add_recurring_events_until(end_date, [days: 7]))
%{freq: "MONTHLY", count: count, interval: interval} ->
revents ++ (event |> add_recurring_events_count(count, [months: interval]))
%{freq: "MONTHLY", until: until, interval: interval} ->
revents ++ (event |> add_recurring_events_until(until, [months: interval]))
%{freq: "MONTHLY", count: count} ->
revents ++ (event |> add_recurring_events_count(count, [months: 1]))
%{freq: "MONTHLY", until: until} ->
revents ++ (event |> add_recurring_events_until(until, [months: 1]))
%{freq: "MONTHLY", interval: interval} ->
revents ++ (event |> add_recurring_events_until(end_date, [months: interval]))
%{freq: "MONTHLY"} ->
revents ++ (event |> add_recurring_events_until(end_date, [months: 1]))
%{freq: "YEARLY", count: count, interval: interval} ->
revents ++ (event |> add_recurring_events_count(count, [years: interval]))
%{freq: "YEARLY", until: until, interval: interval} ->
revents ++ (event |> add_recurring_events_until(until, [years: interval]))
%{freq: "YEARLY", count: count} ->
revents ++ (event |> add_recurring_events_count(count, [years: 1]))
%{freq: "YEARLY", until: until} ->
revents ++ (event |> add_recurring_events_until(until, [years: 1]))
%{freq: "YEARLY", interval: interval} ->
revents ++ (event |> add_recurring_events_until(end_date, [years: interval]))
%{freq: "YEARLY"} ->
revents ++ (event |> add_recurring_events_until(end_date, [years: 1]))
end
end))
end
defp add_recurring_events_until(event, until, shift_opts) do
new_event = shift_event(event, shift_opts)
case Timex.compare(new_event.start, until) do
-1 -> [new_event] ++ add_recurring_events_until(new_event, until, shift_opts)
0 -> [new_event]
1 -> []
end
end
defp add_recurring_events_count(event, count, shift_opts) do
new_event = shift_event(event, shift_opts)
if count > 1 do
[new_event] ++ add_recurring_events_count(new_event, count - 1, shift_opts)
else
[new_event]
end
end
defp shift_event(event, shift_opts) do
new_event = event
new_event = %{new_event | start: shift_date(event.start, shift_opts)}
new_event = %{new_event | end: shift_date(event.end, shift_opts)}
new_event
end
defp shift_date(date, shift_opts) do
case Timex.shift(date, shift_opts) do
%Timex.AmbiguousDateTime{} = new_date ->
new_date.after
new_date ->
new_date
end
end
end
|
lib/ex_ical/recurrence.ex
| 0.91764
| 0.563918
|
recurrence.ex
|
starcoder
|
defmodule Monad.Result do
@moduledoc """
A monad that represents success and failure conditions.
In a series of bind operations, if any function returns an error monad, the
following binds are skipped. This allows for easy flow control for both
success and error cases.
"""
@typedoc """
The possible types of results that can occur (i.e. success and failure).
"""
@type result_type :: :ok | :error
@typedoc """
The standard tuple format for representing success and error states.
These tuples are easily converted to a `Monad.Result`.
"""
@type result_tuple :: {result_type, term}
use Monad.Behaviour
@opaque t :: %__MODULE__{type: result_type, value: term, error: term}
@doc false
defstruct type: :ok, value: nil, error: nil
@doc """
Wraps a value in a success monad.
iex> s = success 42
iex> s.value
42
iex> s.error
nil
"""
@spec success(term) :: t
def success(value), do: %Monad.Result{type: :ok, value: value}
@doc """
Wraps a value in an error monad.
iex> e = error "Failed"
iex> e.value
nil
iex> e.error
"Failed"
"""
@spec error(term) :: t
def error(error), do: %Monad.Result{type: :error, error: error}
@doc """
Returns true if the given monad contains a success state.
iex> s = success 42
iex> success? s
true
"""
@spec success?(t) :: boolean
def success?(result), do: result.type == :ok
@doc """
Returns true if the given monad contains an error state.
iex> e = error "Failed"
iex> error? e
true
"""
@spec error?(t) :: boolean
def error?(result), do: result.type == :error
@doc """
Converts a standard success/failure tuple to a `Monad.Result`.
iex> s = from_tuple {:ok, 42}
iex> s.value
42
iex> s.error
nil
iex> e = from_tuple {:error, "Failed"}
iex> e.value
nil
iex> e.error
"Failed"
"""
@spec from_tuple(result_tuple) :: t
def from_tuple({:ok, value}), do: success(value)
def from_tuple({:error, reason}), do: error(reason)
@doc """
Converts the `Monad.Result` to a tagged tuple.
iex> s = success 42
iex> to_tuple s
{:ok, 42}
iex> e = error :badarg
iex> to_tuple e
{:error, :badarg}
"""
@spec to_tuple(t) :: result_tuple
def to_tuple(%__MODULE__{type: :ok, value: value}), do: {:ok, value}
def to_tuple(%__MODULE__{type: :error, error: error}), do: {:error, error}
@doc """
Unwraps the value from a success monad.
Does not work with error monads.
iex> s = success 42
iex> unwrap! s
42
"""
@spec unwrap!(t) :: term
def unwrap!(%Monad.Result{type: :ok, value: value}), do: value
@doc """
Callback implementation of `Monad.Behaviour.return/1`.
Wraps a value in a success monad.
iex> s = return 42
iex> s.value
42
iex> s.error
nil
"""
@spec return(term) :: t
def return(value), do: success(value)
@doc """
Callback implementation of `Monad.Behaviour.bind/2`.
If the monad contains a success state, then the value is unwrapped and applied
to `fun`.
For monads containing an error state, the error is returned as is.
iex> s = success 42
iex> r = bind s, (& success &1 * 2)
iex> r.value
84
iex> r.error
nil
iex> s = success 42
iex> r = bind s, fn _ -> error "Failed" end
iex> r.value
nil
iex> r.error
"Failed"
"""
@spec bind(t, (term -> t)) :: t
def bind(result = %Monad.Result{type: :error}, _fun), do: result
def bind(result = %Monad.Result{type: :ok}, fun) when is_function(fun, 1) do
result |> unwrap! |> fun.()
end
end
|
lib/monad/result.ex
| 0.892454
| 0.588061
|
result.ex
|
starcoder
|
defmodule Scidata.MNIST do
@moduledoc """
Module for downloading the [MNIST dataset](http://yann.lecun.com/exdb/mnist/).
"""
alias Scidata.Utils
@base_url "https://storage.googleapis.com/cvdf-datasets/mnist/"
@train_image_file "train-images-idx3-ubyte.gz"
@train_label_file "train-labels-idx1-ubyte.gz"
@test_image_file "t10k-images-idx3-ubyte.gz"
@test_label_file "t10k-labels-idx1-ubyte.gz"
@doc """
Downloads the MNIST training dataset or fetches it locally.
## Options
* `:transform_images` - A function that transforms images, defaults to
`& &1`.
It accepts a tuple like `{binary_data, tensor_type, data_shape}` which
can be used for converting the `binary_data` to a tensor with a function
like:
fn {labels_binary, type, _shape} ->
labels_binary
|> Nx.from_binary(type)
|> Nx.new_axis(-1)
|> Nx.equal(Nx.tensor(Enum.to_list(0..9)))
|> Nx.to_batched_list(32)
end
* `:transform_labels` - similar to `:transform_images` but applied to
dataset labels
"""
def download(opts \\ []) do
transform_images = opts[:transform_images] || (& &1)
transform_labels = opts[:transform_labels] || (& &1)
{download_images(@train_image_file, transform_images),
download_labels(@train_label_file, transform_labels)}
end
@doc """
Downloads the MNIST test dataset or fetches it locally.
Accepts the same options as `download/1`.
"""
def download_test(opts \\ []) do
transform_images = opts[:transform_images] || (& &1)
transform_labels = opts[:transform_labels] || (& &1)
{download_images(@test_image_file, transform_images),
download_labels(@test_label_file, transform_labels)}
end
defp download_images(image_file, transform) do
data = Utils.get!(@base_url <> image_file).body
<<_::32, n_images::32, n_rows::32, n_cols::32, images::binary>> = data
transform.({images, {:u, 8}, {n_images, n_rows, n_cols}})
end
defp download_labels(label_file, transform) do
data = Utils.get!(@base_url <> label_file).body
<<_::32, n_labels::32, labels::binary>> = data
transform.({labels, {:u, 8}, {n_labels}})
end
end
|
lib/scidata/mnist.ex
| 0.776792
| 0.694626
|
mnist.ex
|
starcoder
|
defmodule Nx.Shared do
# A collection of **private** helpers and macros shared in Nx.
@moduledoc false
alias Nx.Tensor, as: T
## Macros
@doc """
Match the cartesian product of all given types.
A macro that allows us to writes all possibles match types
in the most efficient format. This is done by looking at @0,
@1, etc., and replacing them by currently matched type at the
given position. In other words, this:
match_types [input_type, output_type] do
for <<match!(seg, 0) <- data>>, into: <<>>, do: <<write!(read!(seg, 0) + right, 1)>>
end
Is compiled into:
for <<seg::float-native-size(...) <- data>>, into: <<>>, do: <<seg+right::float-native-size(...)>>
for all possible valid types between input and input types.
`match!` is used in matches and must be always followed by a `read!`.
`write!` is used to write to the binary.
The implementation unfolds the loops at the top level. In particular,
note that a rolled out case such as:
for <<seg::size(size)-signed-integer <- data>>, into: <<>> do
<<seg+number::signed-integer-size(size)>>
end
is twice as fast and uses twice less memory than:
for <<seg::size(size)-signed-integer <- data>>, into: <<>> do
case output_type do
{:s, size} ->
<<seg+number::signed-integer-size(size)>>
{:f, size} ->
<<seg+number::float-native-size(size)>>
{:u, size} ->
<<seg+number::unsigned-integer-size(size)>>
end
end
"""
defmacro match_types([_ | _] = args, do: block) do
sizes = Macro.generate_arguments(length(args), __MODULE__)
matches = match_types(sizes)
clauses =
Enum.flat_map(matches, fn match ->
block =
Macro.prewalk(block, fn
{:match!, _, [var, pos]} when is_integer(pos) ->
{type, size} = Enum.fetch!(match, pos)
match_bin_modifier(var, type, size)
{:read!, _, [var, pos]} when is_integer(pos) ->
{type, size} = Enum.fetch!(match, pos)
read_bin_modifier(var, type, size)
{:write!, _, [var, pos]} when is_integer(pos) ->
{type, size} = Enum.fetch!(match, pos)
write_bin_modifier(var, type, size)
other ->
other
end)
quote do
{unquote_splicing(match)} -> unquote(block)
end
end)
quote do
case {unquote_splicing(args)}, do: unquote(clauses)
end
end
@all_types [:s, :f, :bf, :u]
defp match_types([h | t]) do
for type <- @all_types, t <- match_types(t) do
[{type, h} | t]
end
end
defp match_types([]), do: [[]]
defp match_bin_modifier(var, :bf, _),
do: quote(do: unquote(var) :: binary - size(2))
defp match_bin_modifier(var, type, size),
do: shared_bin_modifier(var, type, size)
defp read_bin_modifier(var, :bf, _) do
if System.endianness() == :little do
quote do
<<x::float-little-32>> = <<0::16, unquote(var)::binary>>
x
end
else
quote do
<<x::float-big-32>> = <<unquote(var)::binary, 0::16>>
x
end
end
end
defp read_bin_modifier(var, _, _),
do: var
defp write_bin_modifier(var, :bf, _) do
if System.endianness() == :little do
quote(do: binary_part(<<unquote(var)::float-native-32>>, 2, 2) :: binary)
else
quote(do: binary_part(<<unquote(var)::float-native-32>>, 0, 2) :: binary)
end
end
defp write_bin_modifier(var, type, size),
do: shared_bin_modifier(var, type, size)
defp shared_bin_modifier(var, :s, size),
do: quote(do: unquote(var) :: signed - integer - native - size(unquote(size)))
defp shared_bin_modifier(var, :u, size),
do: quote(do: unquote(var) :: unsigned - integer - native - size(unquote(size)))
defp shared_bin_modifier(var, :f, size),
do: quote(do: unquote(var) :: float - native - size(unquote(size)))
@doc """
Converts an Erlang float (float64) to float32 precision.
"""
def to_float32(float64) when is_float(float64) do
<<float32::float-32>> = <<float64::float-32>>
float32
end
## Reflection
@doc """
Returns the definition of mathematical unary funs.
"""
def unary_math_funs,
do: [
exp: {"exponential", quote(do: :math.exp(var!(x)))},
expm1: {"exponential minus one", quote(do: :math.exp(var!(x)) - 1)},
log: {"natural log", quote(do: :math.log(var!(x)))},
log1p: {"natural log plus one", quote(do: :math.log(var!(x) + 1))},
logistic: {"standard logistic (a sigmoid)", quote(do: 1 / (1 + :math.exp(-var!(x))))},
cos: {"cosine", quote(do: :math.cos(var!(x)))},
sin: {"sine", quote(do: :math.sin(var!(x)))},
tan: {"tangent", quote(do: :math.tan(var!(x)))},
cosh: {"hyperbolic cosine", quote(do: :math.cosh(var!(x)))},
sinh: {"hyperbolic sine", quote(do: :math.sinh(var!(x)))},
tanh: {"hyperbolic tangent", quote(do: :math.tanh(var!(x)))},
acos: {"inverse cosine", quote(do: :math.acos(var!(x)))},
asin: {"inverse sine", quote(do: :math.asin(var!(x)))},
atan: {"inverse tangent", quote(do: :math.atan(var!(x)))},
acosh: {"inverse hyperbolic cosine", acosh_formula()},
asinh: {"inverse hyperbolic sine", asinh_formula()},
atanh: {"inverse hyperbolic tangent", atanh_formula()},
sqrt: {"square root", quote(do: :math.sqrt(var!(x)))},
rsqrt: {"reverse square root", quote(do: 1 / :math.sqrt(var!(x)))},
cbrt: {"cube root", quote(do: :math.pow(var!(x), 1 / 3))},
erf: {"error function", erf_formula()},
erfc: {"one minus error function", erfc_formula()},
erf_inv: {"inverse error function", quote(do: Nx.Shared.erf_inv(var!(x)))}
]
defp atanh_formula do
if Code.ensure_loaded?(:math) and math_fun_supported?(:atanh, 1) do
quote(do: :math.atanh(var!(x)))
else
quote(do: :math.log((1 + var!(x)) / (1 - var!(x))) / 2)
end
end
defp asinh_formula do
if Code.ensure_loaded?(:math) and math_fun_supported?(:asinh, 1) do
quote(do: :math.asinh(var!(x)))
else
quote(do: :math.log(var!(x) + :math.sqrt(1 + var!(x) * var!(x))))
end
end
defp acosh_formula do
if Code.ensure_loaded?(:math) and math_fun_supported?(:acosh, 1) do
quote(do: :math.acosh(var!(x)))
else
quote(do: :math.log(var!(x) + :math.sqrt(var!(x) + 1) * :math.sqrt(var!(x) - 1)))
end
end
defp erf_formula do
if Code.ensure_loaded?(:math) and math_fun_supported?(:erf, 1) do
quote(do: :math.erf(var!(x)))
else
quote(do: Nx.Shared.erf(var!(x)))
end
end
defp erfc_formula do
if Code.ensure_loaded?(:math) and math_fun_supported?(:erfc, 1) do
quote(do: :math.erfc(var!(x)))
else
quote(do: 1.0 - Nx.Shared.erf(var!(x)))
end
end
@doc """
Checks if a given function is supported in the `:math` module.
"""
def math_fun_supported?(fun, arity) do
args =
case {fun, arity} do
{:atan, 1} -> [3.14]
{:atanh, 1} -> [0.9]
{_, 1} -> [1.0]
{_, 2} -> [1.0, 1.0]
end
_ = apply(:math, fun, args)
true
rescue
UndefinedFunctionError ->
false
end
@doc """
Approximation for the error function.
## Examples
iex> Nx.Shared.erf(0.999)
0.8422852791811658
iex> Nx.Shared.erf(0.01)
0.011283414826762329
"""
def erf(x) do
x = x |> max(-4.0) |> min(4.0)
x2 = x * x
alpha =
0.0
|> muladd(x2, -2.72614225801306e-10)
|> muladd(x2, 2.77068142495902e-08)
|> muladd(x2, -2.10102402082508e-06)
|> muladd(x2, -5.69250639462346e-05)
|> muladd(x2, -7.34990630326855e-04)
|> muladd(x2, -2.95459980854025e-03)
|> muladd(x2, -1.60960333262415e-02)
beta =
0.0
|> muladd(x2, -1.45660718464996e-05)
|> muladd(x2, -2.13374055278905e-04)
|> muladd(x2, -1.68282697438203e-03)
|> muladd(x2, -7.37332916720468e-03)
|> muladd(x2, -1.42647390514189e-02)
min(x * alpha / beta, 1.0)
end
defp muladd(acc, t, n) do
acc * t + n
end
@doc """
Approximation for the inverse error function.
## Examples
iex> Nx.Shared.erf_inv(0.999)
2.326753756865462
iex> Nx.Shared.erf_inv(0.01)
0.008862500728738846
"""
def erf_inv(x) do
w = -:math.log((1 - x) * (1 + x))
erf_inv_p(w) * x
end
defp erf_inv_p(w) when w < 5 do
w = w - 2.5
2.81022636e-08
|> muladd(w, 3.43273939e-07)
|> muladd(w, -3.5233877e-06)
|> muladd(w, -4.39150654e-06)
|> muladd(w, 0.00021858087)
|> muladd(w, -0.00125372503)
|> muladd(w, -0.00417768164)
|> muladd(w, 0.246640727)
|> muladd(w, 1.50140941)
end
defp erf_inv_p(w) do
w = :math.sqrt(w) - 3
-0.000200214257
|> muladd(w, 0.000100950558)
|> muladd(w, 0.00134934322)
|> muladd(w, -0.00367342844)
|> muladd(w, 0.00573950773)
|> muladd(w, -0.0076224613)
|> muladd(w, 0.00943887047)
|> muladd(w, 1.00167406)
|> muladd(w, 2.83297682)
end
## Types
@doc """
Builds the type of an element-wise binary operation.
"""
def binary_type(a, b) when is_number(a) and is_number(b), do: Nx.Type.infer(a + b)
def binary_type(a, b) when is_number(a), do: Nx.Type.merge_scalar(type(b), a)
def binary_type(a, b) when is_number(b), do: Nx.Type.merge_scalar(type(a), b)
def binary_type(a, b), do: Nx.Type.merge(type(a), type(b))
defp type(%T{type: type}), do: type
defp type(type), do: type
## Helpers
@doc """
Gets the implementation of a tensor.
"""
def impl!(%T{data: %struct{}}), do: struct
def impl!(%T{data: %struct1{}}, %T{data: %struct2{}}),
do: pick_struct(struct1, struct2)
def impl!(%T{data: %struct1{}}, %T{data: %struct2{}}, %T{data: %struct3{}}),
do: struct1 |> pick_struct(struct2) |> pick_struct(struct3)
@doc """
Gets the implementation of a list of maybe tensors.
"""
def find_impl!(list) do
Enum.reduce(list, Nx.BinaryBackend, fn
%T{data: %struct{}}, acc -> pick_struct(struct, acc)
_, acc -> acc
end)
end
defp pick_struct(Nx.BinaryBackend, struct), do: struct
defp pick_struct(struct, Nx.BinaryBackend), do: struct
defp pick_struct(struct, struct), do: struct
defp pick_struct(struct1, struct2) do
raise "cannot invoke Nx function because it relies on two incompatible tensor implementations: " <>
"#{inspect(struct1)} and #{inspect(struct2)}. You may need to call Nx.backend_transfer/1 " <>
"(or Nx.backend_copy/1) on one or both of them to transfer them to a common implementation"
end
end
|
lib/nx/shared.ex
| 0.800731
| 0.573619
|
shared.ex
|
starcoder
|
defmodule Legolas do
@moduledoc """
Legolas is a process message interceptor for debugging purposes, under the hood
it uses `dbg` to trace calls over a single process. All received messages to the process
are intercepted and sent to the designed collectors processes.
## Adding targets
In order to start, first add a target process (pid) to intercept messages.
iex(1)> Legolas.add_target self()
:ok
The above code will intercept all messages sent to the `self` process.
## Adding collectors
A collector is a process (pid) that will receive all messages intercepted in the target processes.
iex(2)> Legolas.add_collector self()
In the above code `self` will receive all messages sent to targets processes.
## Adding structs
Structs is a main patter to intercept messages and filter with that pattern. Add multiple structs into Legolas:
iex(3)> Legolas.add_struct Middle.Earth.Orc
## Legolas in action
Now send a message to the target process and check how the collectors will receive the same message.
@TODO: We need to support to handle multiple pattern matching for messages, for now Legolas supports to intercept
messages with a defined struct (defstruct).
iex(4)> send self(), %Middle.Earth.Orc{}
%Middle.Earth.Orc{name: "Azog"}
iex(5)> IEx.Helpers.flush()
%Middle.Earth.Orc{name: "Azog"}
{:message, %Middle.Earth.Orc{name: "Azog"}}
:ok
When intercept a new message, the collector process receive the message same as target process and emits a log.
"""
use GenServer
require Logger
@doc false
def start_link(_) do
GenServer.start_link(__MODULE__, [], name: __MODULE__)
end
@doc """
Add a target process to catch messages sent and received to it.
## Example
iex(1)> Legolas.add_target self()
:ok
"""
@spec add_target(pid(), Keyword.t()) :: :ok
def add_target(pid_to, opts \\ [])
def add_target(pid_to, opts) when is_pid(pid_to) do
GenServer.call(__MODULE__, {:add_target, pid_to, opts})
end
def add_target(pid_to, _opts), do: raise ArgumentError, message: "[#{inspect __MODULE__}] the process `#{inspect pid_to}` is not a valid pid."
@doc """
Add a collector process to receive intercepted messages from targets.
## Example
iex(1)> Legolas.add_collector self()
:ok
"""
@spec add_collector(pid()) :: :ok
def add_collector(pid) when is_pid(pid) do
GenServer.call(__MODULE__, {:add_collector, pid})
end
def add_collector(pid), do: raise ArgumentError, message: "[#{inspect __MODULE__}] the process `#{inspect pid}` is not a valid pid."
@doc """
Add a struct to handle into patterns, so only messages with that struct
are added into buffer.
## Example
iex(1)> Legolas.add_struct Middle.Earth.Orc
:ok
"""
@spec add_struct(atom()) :: :ok
def add_struct(struct) do
GenServer.call(__MODULE__, {:add_struct, struct})
end
@impl true
def init([]) do
# initializes dbg and other deps in order
# to handle with dbg tracer
case :dbg.tracer(:process, {&handle_trace/2, :none}) do
{:error, :already_started} ->
Logger.warn "[#{inspect __MODULE__}] already started dbg tracer"
{:ok, dbg_pid} ->
Logger.debug "[#{inspect __MODULE__}] started dbg at #{inspect dbg_pid}"
end
{:ok, []}
end
@impl true
def handle_call({:add_target, pid_to, _opts}, _from, state) do
{:ok, _} = :dbg.p(pid_to, :m)
{:reply, :ok, state}
end
def handle_call({:add_collector, collector_pid}, _from, state) do
new_state = case Keyword.get(state, :collectors, []) do
[] -> Keyword.put(state, :collectors, [collector_pid])
_ -> Keyword.update!(state, :collectors, &(&1 ++ [collector_pid]))
end
{:reply, :ok, new_state}
end
def handle_call({:add_struct, struct}, _from, state) do
new_state = case Keyword.get(state, :structs, []) do
[] -> Keyword.put(state, :structs, [struct])
_ -> Keyword.update!(state, :structs, &(&1 ++ [struct]))
end
{:reply, :ok, new_state}
end
@impl true
def handle_cast({:handle_trace, _pid, :receive, message}, state) do
:ok = case Enum.member?(state[:structs], message.__struct__) do
true ->
:ok = state
|> Keyword.get(:collectors)
|> Enum.each(&send(&1, {:message, message}))
false -> :ok
end
{:noreply, state}
end
defp handle_trace({:trace, pid, :receive, %_{} = message}, _acc) do
GenServer.cast(__MODULE__, {:handle_trace, pid, :receive, message})
true
end
defp handle_trace(_ignored_messages, _acc), do: true
end
|
lib/legolas.ex
| 0.763307
| 0.614322
|
legolas.ex
|
starcoder
|
defmodule PhoenixFeathers.SideBar do
use PhoenixFeathers.LiveComponent
alias PhoenixFeathers.Icon
@doc """
Example
```
<%= live_component @socket,
PhoenixFeathers.SideBar,
id: "phoenix_feathers_side_bar",
is_open: false
do %>
<div>
My inner content
</div>
<% end %>
```
```
<%= live_component @socket,
PhoenixFeathers.SideBar,
id: "phoenix_feathers_side_bar",
is_open: false,
open_icon: %PhoenixFeathers.Icon{name: :menu, color: "#fff"},
close_icon: %PhoenixFeathers.Icon{name: :cheveron_left, color: "#fff"}
do %>
<div>
My inner content
</div>
<% end %>
```
"""
def render(%{open_icon: open_icon, close_icon: close_icon} = assigns) do
~L"""
<div class="phx_feathers_side_bar" data-is-open="<%= @is_open %>">
<div class="top">
<a
href="#"
phx-click="toggle_side_bar"
phx-target="<%= @myself %>"
class="toggle_icon"
>
<%= toggle_icon(assigns, @is_open, open_icon, close_icon) %>
</a>
</div>
<div class="side_bar_content">
<%= render_block(@inner_block) %>
</div>
</div>
"""
end
def render(assigns) do
~L"""
<div class="phx_feathers_side_bar" data-is-open="<%= @is_open %>">
<div class="top">
<a
href="#"
phx-click="toggle_side_bar"
phx-target="<%= @myself %>"
class="toggle_icon"
>
<%= toggle_icon(assigns, @is_open) %>
</a>
</div>
<div class="side_bar_content">
<%= render_block(@inner_block) %>
</div>
</div>
"""
end
def mount(socket) do
{:ok, socket}
end
def update(assigns, socket) do
{:ok, socket |> assign(assigns)}
end
def handle_event("toggle_side_bar", _params, %{assigns: %{is_open: is_open}} = socket) do
{:noreply, socket |> assign(is_open: !is_open)}
end
defp toggle_icon(assigns, true), do: svg(assigns, default_icon(:close))
defp toggle_icon(assigns, false), do: svg(assigns, default_icon(:open))
defp toggle_icon(assigns, true, _open_icon, close_icon), do: svg(assigns, close_icon)
defp toggle_icon(assigns, false, open_icon, _close_icon), do: svg(assigns, open_icon)
defp default_icon(:close), do: %Icon{name: :cheveron_left, color: "#fff"}
defp default_icon(:open), do: %Icon{name: :cheveron_right, color: "#fff"}
end
|
lib/phoenix_feathers/components/side_bar.ex
| 0.629433
| 0.541106
|
side_bar.ex
|
starcoder
|
defmodule Peptide do
@key_value_delimeter "="
@moduledoc """
Provides explicit and auto loading of env files.
## Example
The following will set the `FOO` environment variable with the value of `bar`.
```
foo=bar
```
You can define comments with `#` but can't use `#` in values without wrapping
the value in double quotes.
```
foo="#bar" # Comment
```
"""
@doc """
Loads the `.env` and the `Mix.env` specific env file.
"""
def config do
Application.ensure_started(:mix)
[".env"] |> load
end
def http_config(url) do
case HTTPoison.get(url) do
{:ok, %HTTPoison.Response{status_code: 200, body: body}} ->
parse_http(body)
{:error, %HTTPoison.Error{reason: reason}} ->
{:error, reason}
end
end
@doc """
Loads a list of env files.
"""
def load(env_files) do
for path <- env_files do
if File.exists?(path) do
File.read!(path) |> parse
end
end
end
def parse_http(items) do
{:ok, data} = Jason.decode(items)
Enum.each(data, fn {key, value} ->
set_env(key, value)
end)
end
@doc """
Parses env formatted file.
"""
def parse(content) do
content |> get_pairs |> load_env
end
defp get_pairs(content) do
content
|> String.split("\n")
|> Enum.reject(&blank_entry?/1)
|> Enum.reject(&comment_entry?/1)
|> Enum.map(&parse_line/1)
end
defp parse_line(line) do
[key, value] =
line
|> String.trim()
|> String.split(@key_value_delimeter, parts: 2)
[key, parse_value(value)]
end
defp parse_value(value) do
if String.starts_with?(value, "\"") do
unquote_string(value)
else
value |> String.split("#", parts: 2) |> List.first()
end
end
defp unquote_string(value) do
value
|> String.split(~r{(?<!\\)"}, parts: 3)
|> Enum.drop(1)
|> List.first()
|> String.replace(~r{\\"}, ~S("))
end
defp load_env(pairs) when is_list(pairs) do
Enum.each(pairs, fn [key, value] ->
set_env(key, value)
end)
end
defp set_env(key, value) do
System.put_env(String.upcase(key), value)
end
defp blank_entry?(string) do
string == ""
end
defp comment_entry?(string) do
String.match?(string, ~r(^\s*#))
end
end
|
lib/peptide.ex
| 0.807195
| 0.840062
|
peptide.ex
|
starcoder
|
defmodule Sherbet.Service.Contact.Communication.Method.Email.VerificationKey.Template do
@moduledoc """
Template used to create the email for verification email requests.
## Configuration
There are two configuration options for this template. The first is customising
the fields (sender, subject, link formats) used in the default template. The
second is overriding the default template and replacing it with a completely
custom one.
The configuration falls under `:email`, `:verification`. e.g. `[email: [verification: ...]]`.
### Customising Fields
Expects a keyword list configuring each field.
* `:sender` - The from address of the email, can take the form of `email :: String.t`
or `{ name :: String.t, email :: String.t }`.
* `:subject` - The subject of the email, takes the form of `String.t`.
* `:verify_link` - The verify link, takes the form of `((email, key) -> String.t)`.
* `:request_removal_link` - The request removal link, takes the form of `(email -> String.t)`.
config :sherbet_service,
email: [verification: [
sender: { "Foo", "foo@bar" },
subject: "Approve Email Verification",
verify_link: &("https://example.com/verify/email/\#{&1}?key=\#{&2}"),
request_removal_link: &("https://example.com/removal_request/email/\#{&1}")
]]
### Custom Formatter
Expects a function returning the email. The function should be of the type
`((email, key) -> Cake.Email)`.
config :sherbet_service,
email: [verification: fn email, key ->
struct Cake.Email,
from: "foo@bar",
to: email
#...
end]
"""
alias Cake.Email
alias Sherbet.Service.Contact.Communication.Method.Email.VerificationKey.Template
defstruct [
formatter: &Template.format/1,
email: nil,
key: nil
]
def format(%{ email: email, key: key }) do
case Application.get_env(:sherbet_service, :email, [verification: [
sender: { "example", "<EMAIL>" },
subject: "Verify Email",
verify_link: &("https://example.com/verify?email=#{&1}&key=#{&2}"),
request_removal_link: &("https://example.com/removal_request?email=#{&1}")
]])[:verification] do
formatter when is_function(formatter, 2) -> formatter.(email, key)
state ->
%Email{
from: state[:sender],
to: email,
subject: state[:subject],
body: %Email.Body{
text: """
Hello,
If you recently requested a verification link for #{email}. Please verify this by following the link #{state[:verify_link].(email, key)}
If you didn't you may request the email be removed by following the link #{state[:request_removal_link].(email)}
"""
}
}
end
end
end
|
apps/sherbet_service/lib/sherbet.service/contact/communication/method/email/verification_key/template.ex
| 0.807916
| 0.409044
|
template.ex
|
starcoder
|
defmodule Auctoritas do
@moduledoc """
# Auctoritas
## Basic Usage
```elixir
iex> alias Auctoritas.AuthenticationManager, as: Auth
Auctoritas.AuthenticationManager
iex> auth_data = %{user_id: 123}
%{user_id: 123}
iex> {:ok, token} = Auth.authenticate(auth_data)
{:ok, "<KEY>"}
iex> {:ok, data} = Auth.get_token_data(token)
{:ok,
%Auctoritas.AuthenticationManager.DataStorage.Data{
data: %{user_id: 123},
metadata: %{
expires_in: 86385250,
inserted_at: 1547158890,
updated_at: 1547158890
}
}}
iex> {:ok, data} = Auth.deauthenticate(token)
{:ok, true}
```
"""
use GenServer
alias Auctoritas.Config
alias Auctoritas.DataStorage.Data
alias Auctoritas.DataStorage.RefreshTokenData
@default_name "auctoritas_default"
@typedoc "Authentication token"
@type token() :: String.t()
@typedoc "Name from config (Auctoritas supervisor name)"
@type name() :: String.t()
@typedoc "Token expiration in seconds"
@type expiration() :: non_neg_integer()
@doc """
Start Auctoritas GenServer with specified config (from `Auctoritas.Config`)
"""
def start_link(%Config{} = config) do
GenServer.start_link(__MODULE__, config, name: auctoritas_name(config))
end
defp auctoritas_name(name) when is_bitstring(name) do
("auctoritas_authentication_manager_" <> name)
|> String.to_atom()
end
defp auctoritas_name(%Config{} = config) do
("auctoritas_authentication_manager_" <> config.name)
|> String.to_atom()
end
def init(%Config{} = config) do
{:ok, config}
end
@doc """
Authenticate with supplied arguments to default authentication_manager;
* authentication_data is checked and then used to generate token
* data is stored inside data_storage with token as the key
## Examples
iex> Auctoritas.AuthenticationManager.authenticate(%{username: "username"}, %{user_id: 1})
{:ok, "ec4eecaff1cc7e9daa511620e47203424e70b9c9785d51d11f246f27fab33a0b"}
"""
def authenticate(authentication_data) do
authenticate(auctoritas_name(@default_name), authentication_data)
end
@doc """
Authenticate with supplied arguments to custom authentication_manager;
* authentication_data is checked and then used to generate token
* data is stored inside data_storage with token as the key
## Examples
iex> Auctoritas.AuthenticationManager.authenticate("custom_name", %{username: "username"}, %{user_id: 1})
{:ok, "<KEY>"}
"""
def authenticate(name, authentication_data) when is_bitstring(name) do
authenticate(auctoritas_name(name), authentication_data)
end
def authenticate(pid, authentication_data) do
case GenServer.call(pid, {:authenticate, authentication_data}) do
{:ok, token, data} ->
{:ok, token, data}
{:ok, token, refresh_token, data, auth_data} ->
{:ok, token, refresh_token, data, auth_data}
{:error, error} ->
{:error, error}
end
end
def refresh_token(token) do
refresh_token(auctoritas_name(@default_name), token)
end
def refresh_token(name, token) when is_bitstring(name) do
refresh_token(auctoritas_name(name), token)
end
def refresh_token(pid, token) do
case GenServer.call(pid, {:refresh_token, token}) do
{:ok, token, refresh_token, data, auth_data} ->
{:ok, token, refresh_token, data, auth_data}
{:error, error} ->
{:error, error}
end
end
@spec authenticate_check(%Config{}, map()) ::
{:ok, token :: token(), %Data{}}
| {:ok, token :: token(), refresh_token :: token(), %Data{}, %RefreshTokenData{}}
| {:error, any()}
defp authenticate_check(config, authentication_data) do
case config.token_manager.generate_token_and_data(config.name, authentication_data) do
{:ok, token, data_map} ->
case config.token_type do
:refresh_token ->
with {:ok, refresh_token} <- config.token_manager.generate_refresh_token(config.name, authentication_data),
{:ok, _token, %Data{} = data} <-
config.data_storage.insert_token(config.name, config.expiration, token, refresh_token, data_map),
{:ok, _refresh_token, %RefreshTokenData{} = refresh_token_data} <-
config.data_storage.insert_refresh_token(
config.name,
config.refresh_token_expiration,
refresh_token,
token,
authentication_data
) do
{:ok, token, refresh_token, data, refresh_token_data}
else
{:error, error} -> {:error, error}
end
_ ->
with {:ok, token, %Data{} = data} <-
config.data_storage.insert_token(
config.name,
config.expiration,
token,
data_map
) do
{:ok, token, data}
else
{:error, error} -> {:error, error}
end
end
{:error, error} ->
{:error, error}
end
end
@spec refresh_token_check(%Config{token_type: :refresh_token}, refresh_token :: token()) ::
{:ok, token :: token(), refresh_token :: token(), %Data{}} | {:error, any()}
defp refresh_token_check(%Config{token_type: :refresh_token} = config, refresh_token) do
with {:ok, %RefreshTokenData{:auth_data => auth_data, :token => token}} <-
config.data_storage.get_refresh_token_data(config.name, refresh_token),
{:ok, new_token, new_refresh_token, data, refresh_token_data} <- authenticate_check(config, auth_data),
{:ok, true} <- config.data_storage.delete_token(config.name, token),
{:ok, true} <- config.data_storage.delete_refresh_token(config.name, refresh_token) do
{:ok, new_token, new_refresh_token, data, refresh_token_data}
else
{:error, error} -> {:error, error}
end
end
def get_token_data(token, :silent) do
get_token_data(@default_name, token, :silent)
end
def get_token_data(name, token, :silent) when is_bitstring(name) do
get_token_data(auctoritas_name(name), token, :silent)
end
def get_token_data(pid, token, :silent) do
case GenServer.call(pid, {:get_token_data, :silent, token}) do
{:ok, data} -> {:ok, data}
{:error, error} -> {:error, error}
end
end
@doc """
Get associated token data;
Token : f<PASSWORD>
refresh token: 48cc2<PASSWORD>bb4<PASSWORD>ba88aad<PASSWORD>4f3<PASSWORD>5<PASSWORD>
## Examples
iex> Auctoritas.AuthenticationManager.authenticate(%{username: "username"}, %{user_id: 1})
{:ok, "<KEY>"}
iex> Auctoritas.AuthenticationManager.get_token_data("<KEY>")
{:ok,
%Auctoritas.AuthenticationManager.DataStorage.Data{
data: %{user_id: 1},
metadata: %{
expires_in: 86310242,
inserted_at: 1547201115,
updated_at: 1547201115
}
}}
"""
def get_token_data(token) do
get_token_data(@default_name, token)
end
@doc """
Get associated token data;
## Examples
iex> Auctoritas.AuthenticationManager.authenticate("custom_name", %{username: "username"}, %{user_id: 1})
{:ok, "<KEY>"}
iex> Auctoritas.AuthenticationManager.get_token_data("custom_name", "<KEY>")
{:ok,
%Auctoritas.AuthenticationManager.DataStorage.Data{
data: %{user_id: 1},
metadata: %{
expires_in: 86310242,
inserted_at: 1547201115,
updated_at: 1547201115
}
}}
"""
def get_token_data(name, token) when is_bitstring(name) do
get_token_data(auctoritas_name(name), token)
end
def get_token_data(pid, token) do
case GenServer.call(pid, {:get_token_data, :normal, token}) do
{:ok, data} -> {:ok, data}
{:error, error} -> {:error, error}
end
end
def get_refresh_token_data(refresh_token) do
get_refresh_token_data(@default_name, refresh_token)
end
def get_refresh_token_data(name, refresh_token) when is_bitstring(name) do
get_refresh_token_data(auctoritas_name(name), refresh_token)
end
def get_refresh_token_data(pid, refresh_token) do
case GenServer.call(pid, {:get_refresh_token_data, refresh_token}) do
{:ok, auth_data} -> {:ok, auth_data}
{:error, error} -> {:error, error}
end
end
def get_tokens(start, amount) do
get_tokens(@default_name, start, amount)
end
def get_tokens(name, start, amount) when is_bitstring(name) do
get_tokens(auctoritas_name(name), start, amount)
end
def get_tokens(pid, start, amount) do
case GenServer.call(pid, {:get_tokens, start, amount}) do
{:ok, tokens} -> {:ok, tokens}
{:error, error} -> {:error, error}
end
end
def get_refresh_tokens(start, amount) do
get_refresh_tokens(@default_name, start, amount)
end
def get_refresh_tokens(name, start, amount) when is_bitstring(name) do
get_refresh_tokens(auctoritas_name(name), start, amount)
end
def get_refresh_tokens(pid, start, amount) do
case GenServer.call(pid, {:get_refresh_tokens, start, amount}) do
{:ok, tokens} -> {:ok, tokens}
{:error, error} -> {:error, error}
end
end
def get_tokens_with_data(start, amount) do
get_tokens_with_data(@default_name, start, amount)
end
def get_tokens_with_data(name, start, amount) when is_bitstring(name) do
get_tokens_with_data(auctoritas_name(name), start, amount)
end
def get_tokens_with_data(pid, start, amount) do
case get_tokens(pid, start, amount) do
{:ok, tokens} ->
tokens =
tokens
|> Enum.map(fn token ->
case get_token_data(pid, token, :silent) do
{:ok, token_data} -> token_data
{:error, error} -> {:error, error}
end
end)
{:ok, tokens}
{:error, error} ->
{:error, error}
end
end
@doc """
Deauthenticate supplied token from default authentication_manager
## Examples
iex> Auctoritas.AuthenticationManager.authenticate(%{username: "username"}, %{user_id: 1})
{:ok, "<KEY>"}
iex> Auctoritas.AuthenticationManager.deauthenticate("<KEY>")
{:ok, true}
"""
def deauthenticate(token, :token) when is_bitstring(token) do
deauthenticate(auctoritas_name(@default_name), token, :token)
end
@doc """
Deauthenticate supplied token from custom authentication_manager
## Examples
iex> Auctoritas.AuthenticationManager.authenticate("custom_name", %{username: "username"}, %{user_id: 1})
{:ok, "<KEY>"}
iex> Auctoritas.AuthenticationManager.deauthenticate("custom_name", "<KEY>")
{:ok, true}
"""
def deauthenticate(name, token, :token) when is_bitstring(name) and is_bitstring(token) do
deauthenticate(auctoritas_name(name), token, :token)
end
def deauthenticate(pid, token, :token) do
case GenServer.call(pid, {:deauthenticate, token, :token}) do
{:ok, data} -> {:ok, data}
{:error, error} -> {:error, error}
end
end
def deauthenticate(token, :refresh_token) when is_bitstring(token) do
deauthenticate(auctoritas_name(@default_name), token, :refresh_token)
end
def deauthenticate(name, token, :refresh_token)
when is_bitstring(name) and is_bitstring(token) do
deauthenticate(auctoritas_name(name), token, :refresh_token)
end
def deauthenticate(pid, token, :refresh_token) do
case GenServer.call(pid, {:deauthenticate, token, :refresh_token}) do
{:ok, data} -> {:ok, data}
{:error, error} -> {:error, error}
end
end
defp get_token_data_from_data_store(config, token) do
case config.data_storage.get_token_data(config.name, token) do
{:ok, data} -> {:ok, data}
{:error, error} -> {:error, error}
end
end
defp get_refresh_token_data_from_data_store(config, refresh_token) do
case config.data_storage.get_refresh_token_data(config.name, refresh_token) do
{:ok, auth_data} -> {:ok, auth_data}
{:error, error} -> {:error, error}
end
end
defp reset_token_expiration(config, token) do
case config.data_storage.reset_expiration(config.name, token, config.expiration) do
{:ok, data} -> {:ok, data}
{:error, error} -> {:error, error}
end
end
defp delete_token_from_data_store(config, token) do
case config.data_storage.delete_token(config.name, token) do
{:ok, data} -> {:ok, data}
{:error, error} -> {:error, error}
end
end
defp delete_refresh_token_from_data_store(config, token) do
with {:ok, data} <- config.data_storage.get_refresh_token_data(config.name, token),
{:ok, true} <- config.data_storage.delete_refresh_token(config.name, token) do
config.data_storage.delete_token(config.name, data.token)
{:ok, true}
else
{:error, error} -> {:error, error}
end
end
defp get_tokens_from_data_store(config, start, amount) do
case config.data_storage.get_tokens(config.name, start, amount) do
{:ok, tokens} -> {:ok, tokens}
{:error, error} -> {:error, error}
end
end
defp get_refresh_tokens_from_data_store(config, start, amount) do
case config.data_storage.get_refresh_tokens(config.name, start, amount) do
{:ok, tokens} -> {:ok, tokens}
{:error, error} -> {:error, error}
end
end
def handle_call(
{:refresh_token, refresh_token},
_from,
%Config{token_type: :refresh_token} = config
) do
case refresh_token_check(config, refresh_token) do
{:ok, token, refresh_token, data, refresh_token_data} ->
{:reply, {:ok, token, refresh_token, data, refresh_token_data}, config}
{:error, error} ->
{:reply, {:error, error}, config}
end
end
def handle_call(
{:authenticate, authentication_data},
_from,
%Config{token_type: :refresh_token} = config
) do
case authenticate_check(config, authentication_data) do
{:ok, token, refresh_token, data, refresh_token_data} ->
{:reply, {:ok, token, refresh_token, data, refresh_token_data}, config}
{:error, error} ->
{:reply, {:error, error}, config}
end
end
def handle_call({:authenticate, authentication_data}, _from, %Config{} = config) do
case authenticate_check(config, authentication_data) do
{:ok, token, data} ->
{:reply, {:ok, token, data}, config}
{:error, error} ->
{:reply, {:error, error}, config}
end
end
def handle_call(
{:get_refresh_token_data, refresh_token},
_from,
%Config{token_type: :refresh_token} = config
) do
with {:ok, auth_data} <- get_refresh_token_data_from_data_store(config, refresh_token) do
{:reply, {:ok, auth_data}, config}
else
{:ok, false} -> {:reply, {:error, "Refresh token expired or doesn't exist"}, config}
{:error, error} -> {:reply, {:error, error}, config}
end
end
def handle_call(
{:get_token_data, :normal, token},
_from,
%Config{token_type: :sliding} = config
) do
with {:ok, true} <- reset_token_expiration(config, token),
{:ok, data} <- get_token_data_from_data_store(config, token) do
{:reply, {:ok, data}, config}
else
{:ok, false} -> {:reply, {:error, "Token expired or doesn't exist"}, config}
{:error, error} -> {:reply, {:error, error}, config}
end
end
def handle_call({:get_token_data, :normal, token}, _from, %Config{} = config) do
case get_token_data_from_data_store(config, token) do
{:ok, data} ->
{:reply, {:ok, data}, config}
{:error, error} ->
{:reply, {:error, error}, config}
end
end
def handle_call({:get_token_data, :silent, token}, _from, %Config{} = config) do
case get_token_data_from_data_store(config, token) do
{:ok, data} ->
{:reply, {:ok, data}, config}
{:error, error} ->
{:reply, {:error, error}, config}
end
end
def handle_call({:deauthenticate, token, :refresh_token}, _from, %Config{} = config) do
case delete_refresh_token_from_data_store(config, token) do
{:ok, data} ->
{:reply, {:ok, data}, config}
{:error, error} ->
{:reply, {:error, error}, config}
end
end
def handle_call({:deauthenticate, token, :token}, _from, %Config{} = config) do
case delete_token_from_data_store(config, token) do
{:ok, data} ->
{:reply, {:ok, data}, config}
{:error, error} ->
{:reply, {:error, error}, config}
end
end
def handle_call({:get_tokens, start, amount}, _from, %Config{} = config) do
case get_tokens_from_data_store(config, start, amount) do
{:ok, data} ->
{:reply, {:ok, data}, config}
{:error, error} ->
{:reply, {:error, error}, config}
end
end
def handle_call({:get_refresh_tokens, start, amount}, _from, %Config{} = config) do
case get_refresh_tokens_from_data_store(config, start, amount) do
{:ok, data} ->
{:reply, {:ok, data}, config}
{:error, error} ->
{:reply, {:error, error}, config}
end
end
end
|
lib/auctoritas.ex
| 0.787482
| 0.410874
|
auctoritas.ex
|
starcoder
|
defmodule SFSObject.Binary.Encoder do
def encode(data_wrapper, output \\ <<>>)
def encode({:null, _}, output) do
output <> <<0>>
end
def encode({:bool, v}, output) do
output <> <<1, encode_bool(v)>>
end
def encode({:byte, v}, output) do
output <> <<2, v::signed-size(8)>>
end
def encode({:short, v}, output) do
output <> <<3, v::signed-size(16)>>
end
def encode({:int, v}, output) do
output <> <<4, v::signed-size(32)>>
end
def encode({:long, v}, output) do
output <> <<5, v::signed-size(64)>>
end
def encode({:float, v}, output) do
output <> <<6, v::float-signed-size(32)>>
end
def encode({:double, v}, output) do
output <> <<7, v::float-signed-size(64)>>
end
def encode({:string, v}, output) do
size = byte_size(v)
output <> <<8, size::size(16), v::binary>>
end
def encode({:bool_array, v}, output) do
size = length(v)
data = transform(v, output, fn val -> <<encode_bool(val)::size(8)>> end)
output <> <<9, size::size(16), data::binary>>
end
def encode({:byte_array, v}, output) do
size = length(v)
data = transform(v, output, fn val -> <<val::signed-size(8)>> end)
output <> <<10, size::size(32), data::binary>>
end
def encode({:short_array, v}, output) do
size = length(v)
data = transform(v, output, fn val -> <<val::signed-size(16)>> end)
output <> <<11, size::size(16), data::binary>>
end
def encode({:int_array, v}, output) do
size = length(v)
data = transform(v, output, fn val -> <<val::signed-size(32)>> end)
output <> <<12, size::size(16), data::binary>>
end
def encode({:long_array, v}, output) do
size = length(v)
data = transform(v, output, fn val -> <<val::signed-size(64)>> end)
output <> <<13, size::size(16), data::binary>>
end
def encode({:float_array, v}, output) do
size = length(v)
data = transform(v, output, fn val -> <<val::float-signed-size(32)>> end)
output <> <<14, size::size(16), data::binary>>
end
def encode({:double_array, v}, output) do
size = length(v)
data = transform(v, output, fn val -> <<val::float-signed-size(64)>> end)
output <> <<15, size::size(16), data::binary>>
end
def encode({:string_array, v}, output) do
size = length(v)
data = transform(v, output, fn val ->
<<byte_size(val)::signed-size(16),val::binary>> end)
output <> <<16, size::size(16), data::binary>>
end
def encode({:array, v}, output) do
size = length(v)
data = transform(v, output, fn val -> encode(val, output) end)
output <> <<17, size::size(16), data::binary>>
end
def encode({:object, v}, output) do
size = Map.size(v)
data = transform(Map.to_list(v), output, fn {key, val} ->
encode_key(key) <> encode(val) end)
output <> <<18, size::size(16), data::binary>>
end
defp encode_key(key) do
<<String.length(key)::size(16), key::binary>>
end
defp encode_bool(true), do: 1
defp encode_bool(false), do: 0
defp transform([], output, _fun), do: output
defp transform([val|rest], output, fun) do
transform(rest, output <> fun.(val), fun)
end
end
|
lib/sfsobject/binary/encoder.ex
| 0.513912
| 0.536434
|
encoder.ex
|
starcoder
|
defmodule Keyboards.Switch.Cherry do
use OpenSCAD
## Square Dimensions of a Cherry MX compatible switch
@keyswitch_width 14.4
@keyswitch_depth 14.4
## how wide/deep the walls are around the switch
@border 3
@y_translate (@border/2)+(@keyswitch_depth/2)
@x_translate (@border/2)+(@keyswitch_width/2)
def plate(width_u \\ 1, depth_u \\ 1, height_mm \\ 4) do
union([
mount(height_mm),
difference([
## Solid Plate
cube(size: [uToMillis(width_u), uToMillis(depth_u), height_mm], center: true),
cube(size: [@keyswitch_width+@border*2, @keyswitch_depth+@border*2, height_mm], center: true)
]) |> translate(v: [0,0, height_mm/2])
])
end
def mount(height_mm \\ 4) do
plate_width = @keyswitch_width + @border*2
plate_depth = @keyswitch_depth + @border*2
plate_height = height_mm
## Top Wall
top_wall =
cube(size: [plate_width, @border, plate_height], center: true)
|> translate(v: [0, @y_translate, (plate_height/2)])
## Bottom Wall
bottom_wall =
cube(size: [plate_width, @border, plate_height], center: true)
|> translate(v: [0, -@y_translate, (plate_height/2)])
## Right Wall
right_wall =
cube(size: [@border, plate_depth, plate_height], center: true)
|> translate(v: [@x_translate, 0, (plate_height/2)])
right_nub =
[
cube(size: [1.5, 2.75, plate_height], center: true)
|> translate(v: [7.95, 0, plate_height/2]),
cylinder(_fn: 30, h: 2.75, r: 1, center: true)
|> rotate(a: [90,0,0])
|> translate(v: [7.2, 0, 1])
] |> hull
## Left Wall
left_wall =
cube(size: [@border, plate_depth, plate_height], center: true)
|> translate(v: [-@x_translate, 0, (plate_height/2)])
left_nub =
[
cube(size: [1.5, 2.75, plate_height], center: true)
|> translate(v: [-7.95, 0, plate_height/2]),
cylinder(_fn: 30, h: 2.75, r: 1, center: true)
|> rotate(a: [90, 0, 0])
|> translate(v: [-7.2, 0, 1])
] |> hull
[top_wall, bottom_wall, left_wall, left_nub, right_wall, right_nub]
|> union
end
def uToMillis(u) do
(18 * (u-1)) + 20.4
end
end
|
lib/switch/cherry.ex
| 0.541409
| 0.456289
|
cherry.ex
|
starcoder
|
defmodule YNAB.ParseResponse do
alias YNAB.ParseData
def parse(raw_response = %HTTPoison.Response{body: %{data: data}}) do
case data do
%{accounts: account_maps} ->
format_response(raw_response, ParseData.parse_accounts(account_maps))
%{account: account_map} ->
format_response(raw_response, ParseData.parse_account(account_map))
%{budgets: budget_maps} ->
format_response(raw_response, ParseData.parse_budgets(budget_maps))
%{budget: budget_map} ->
format_response(raw_response, ParseData.parse_budget(budget_map))
%{category_groups: category_group_maps} ->
format_response(raw_response, ParseData.parse_category_groups(category_group_maps))
%{category: category_map} ->
format_response(raw_response, ParseData.parse_category(category_map))
%{months: month_maps} ->
format_response(raw_response, ParseData.parse_months(month_maps))
%{month: month_map} ->
format_response(raw_response, ParseData.parse_month(month_map))
%{payee: payee_map} ->
format_response(raw_response, ParseData.parse_payee(payee_map))
%{payee_locations: payee_location_maps} ->
format_response(raw_response, ParseData.parse_payee_locations(payee_location_maps))
%{payee_location: payee_location_map} ->
format_response(raw_response, ParseData.parse_payee_location(payee_location_map))
%{payees: payee_maps} ->
format_response(raw_response, ParseData.parse_payees(payee_maps))
%{scheduled_transactions: scheduled_transaction_maps} ->
format_response(
raw_response,
ParseData.parse_scheduled_transactions(scheduled_transaction_maps)
)
%{scheduled_transaction: scheduled_transaction_map} ->
format_response(
raw_response,
ParseData.parse_scheduled_transaction(scheduled_transaction_map)
)
%{settings: budget_settings_map} ->
format_response(raw_response, ParseData.parse_budget_settings(budget_settings_map))
%{transactions: transaction_maps} ->
format_response(raw_response, ParseData.parse_transactions(transaction_maps))
%{transaction: transaction_map} ->
format_response(raw_response, ParseData.parse_transaction(transaction_map))
end
end
defp format_response(raw_response, data) do
{:ok, raw_response, data}
end
end
|
lib/ynab/parse_response.ex
| 0.534127
| 0.516717
|
parse_response.ex
|
starcoder
|
defmodule IndifferentAccess.Plug do
@moduledoc """
Several modes of indifferent access can be configured via the opts passed in to
a pipeline plug.
The default constructs a new IndifferentAccess.Params struct in place of Conn.params and Conn.query_params.
That struct has two behaviors, the default where any map returned from its Access methods
will also be a Params struct, or if `strategy: :static` is passed in here, it will only set the top level
params as a struct and any returned values will be unchanged.
Alternatively, if you want to retain a bare map in your params, pass the option `as: :map` and
it will recursively replace string keys with atom keys in params maps if the strings have existing atoms.
Note that this option isn't truly "indifferent" and will only support atom access because it has replaced the
strings with atoms as the keys.
There is another option that is not reccomended to also pass `strategy: :augment`
in addition to `as: :map` and it will leave string keys in place alongside additional atom keys pointing to the
same value. Note this basically renders enumeration on the map useless/confusing, and also makes updates to the map
problematic as the two values will diverge. This version may not be supported long term, but since this is an experimental
library and it was easy to support as an option, it was left in place.
"""
import IndifferentAccess, only: [indifferentize: 2]
def init(opts) do
[strategy: Keyword.get(opts, :strategy, :replace), as: Keyword.get(opts, :as, :struct)]
end
@doc """
This is meant to be called in a Plug pipeline, and assumes that params and query_params have already been fetched prior to this call.
The valid opts are `:as` and `:strategy`, which are set to default values of `:struct` and `:replace` by init/1
"""
def call(conn, opts) do
conn
|> Map.put(:params, indifferentize(conn.params, opts))
|> Map.put(:query_params, indifferentize(conn.query_params, opts))
end
end
|
lib/plug.ex
| 0.793666
| 0.559049
|
plug.ex
|
starcoder
|
defmodule ExTimeAgo do
@moduledoc """
"xxx ago" past indicator from a previous project in 2019, ported to Elixir
"""
defmodule Timespan do
@moduledoc """
Millisecond-precise date and timespan
"""
@enforce_keys [:dt]
defstruct dt: {{0, 0, 0}, {0, 0, 0}}, ms: nil
@type t :: %__MODULE__{
dt: {{integer, integer, integer}, {integer, integer, integer}},
ms: nil | integer
}
end
@spec string!(Timespan.t()) :: binary
def string!(span) do
string(span, false)
end
defp determine_separator(val) when val, do: " "
defp determine_separator(_), do: ""
# n < 1ms
defp string(%Timespan{dt: {{0, 1, 1}, {0, 0, 0}}, ms: 0}, false) do
"<1ms"
end
# 1sec > n > 1ms (or if we bail out)
defp string(%Timespan{dt: {{0, 1, 1}, {0, 0, 0}}, ms: ms}, true) when ms == 0 or ms == nil do
""
end
defp string(%Timespan{dt: {{0, 1, 1}, {0, 0, 0}}, ms: ms}, sep) when ms > 0 and ms < 1000 do
determine_separator(sep) <> "#{ms}ms"
end
# 1min > n > 1sec
defp string(span = %Timespan{dt: {{0, 1, 1}, {0, 0, 0}}, ms: _}, _) do
string(span, true)
end
defp string(span = %Timespan{dt: {{0, 1, 1}, {0, 0, dsec}}, ms: _}, sep)
when dsec > 0 and dsec < 60 do
determine_separator(sep) <> "#{dsec}sec" <> string(%{span | dt: {{0, 1, 1}, {0, 0, 0}}}, true)
end
# 1min > n > 1sec
defp string(span = %Timespan{dt: {{0, 1, 1}, {0, 0, _}}, ms: _}, _) do
string(%{span | dt: {{0, 1, 1}, {0, 0, 0}}}, true)
end
defp string(span = %Timespan{dt: {{0, 1, 1}, {0, dmin, dsec}}, ms: _}, sep)
when dmin > 0 and dmin < 60 do
determine_separator(sep) <>
"#{dmin}min" <> string(%{span | dt: {{0, 1, 1}, {0, 0, dsec}}}, true)
end
# 1hr > n > 1min
defp string(span = %Timespan{dt: {{0, 1, 1}, {0, _, dsec}}, ms: _}, _) do
string(%{span | dt: {{0, 1, 1}, {0, 0, dsec}}}, true)
end
defp string(span = %Timespan{dt: {{0, 1, 1}, {dhr, dmin, dsec}}, ms: _}, sep)
when dhr > 0 and dhr < 24 do
determine_separator(sep) <>
"#{dhr}hr" <> string(%{span | dt: {{0, 1, 1}, {0, dmin, dsec}}}, true)
end
# 1d > n > 1hr
defp string(span = %Timespan{dt: {{0, 1, 1}, {_, dmin, dsec}}, ms: _}, _) do
string(%{span | dt: {{0, 1, 1}, {0, dmin, dsec}}}, true)
end
defp string(span = %Timespan{dt: {{0, 1, dd}, {dhr, dmin, dsec}}, ms: _}, sep) when dd > 1 do
determine_separator(sep) <>
"#{dd - 1}dy" <> string(%{span | dt: {{0, 1, 1}, {dhr, dmin, dsec}}}, true)
end
# 1m > n > 1d
defp string(span = %Timespan{dt: {{0, 1, dd}, {dhr, dmin, dsec}}, ms: _}, _) when dd > 1 do
string(%{span | dt: {{0, 1, 1}, {dhr, dmin, dsec}}}, true)
end
defp string(span = %Timespan{dt: {{0, dm, dd}, {dhr, dmin, dsec}}, ms: _}, sep) when dm > 1 do
determine_separator(sep) <>
"#{dm - 1}mo" <> string(%{span | dt: {{0, 1, dd}, {dhr, dmin, dsec}}}, true)
end
# n > 1y
defp string(span = %Timespan{dt: {{dy, dm, dd}, {dhr, dmin, dsec}}, ms: _}, _) when dy > 0 do
"#{dy}yr" <> string(%{span | dt: {{0, dm, dd}, {dhr, dmin, dsec}}}, true)
end
@spec unix_span!(Timespan.t(), Timespan.t()) :: Timespan.t()
def unix_span!(d1, d0) when d1.ms >= d0.ms do
sc = :calendar.datetime_to_gregorian_seconds({{1970, 1, 1}, {0, 0, 0}})
s1 = :calendar.datetime_to_gregorian_seconds(d1.dt)
s0 = :calendar.datetime_to_gregorian_seconds(d0.dt)
%Timespan{dt: :calendar.gregorian_seconds_to_datetime(s1 - s0 - sc), ms: d1.ms - d0.ms}
end
def unix_span!(d1, d0) when d1.ms < d0.ms do
sc = :calendar.datetime_to_gregorian_seconds({{1970, 1, 1}, {0, 0, 0}})
s1 = :calendar.datetime_to_gregorian_seconds(d1.dt)
s0 = :calendar.datetime_to_gregorian_seconds(d0.dt)
%Timespan{
dt: :calendar.gregorian_seconds_to_datetime(s1 - s0 - sc - 1),
ms: d1.ms - d0.ms + 1000
}
end
def unix_span!(d1, d0) do
sc = :calendar.datetime_to_gregorian_seconds({{1970, 1, 1}, {0, 0, 0}})
s1 = :calendar.datetime_to_gregorian_seconds(d1.dt)
s0 = :calendar.datetime_to_gregorian_seconds(d0.dt)
%Timespan{dt: :calendar.gregorian_seconds_to_datetime(s1 - s0 - sc)}
end
@spec span!(Timespan.t(), Timespan.t()) :: Timespan.t()
def span!(d1, d0) when d1.ms >= d0.ms do
s1 = :calendar.datetime_to_gregorian_seconds(d1.dt)
s0 = :calendar.datetime_to_gregorian_seconds(d0.dt)
%Timespan{dt: :calendar.gregorian_seconds_to_datetime(s1 - s0), ms: d1.ms - d0.ms}
end
def span!(d1, d0) when d1.ms < d0.ms do
s1 = :calendar.datetime_to_gregorian_seconds(d1.dt)
s0 = :calendar.datetime_to_gregorian_seconds(d0.dt)
%Timespan{dt: :calendar.gregorian_seconds_to_datetime(s1 - s0 - 1), ms: d1.ms - d0.ms + 1000}
end
def span!(d1, d0) do
s1 = :calendar.datetime_to_gregorian_seconds(d1.dt)
s0 = :calendar.datetime_to_gregorian_seconds(d0.dt)
%Timespan{dt: :calendar.gregorian_seconds_to_datetime(s1 - s0)}
end
@spec now :: Timespan.t()
def now do
ts = :erlang.timestamp()
tsd = :calendar.now_to_datetime(ts)
{_, _, tsu} = ts
%Timespan{dt: tsd, ms: div(tsu, 1000)}
end
end
|
apps/extimeago/lib/extimeago.ex
| 0.768993
| 0.552751
|
extimeago.ex
|
starcoder
|
defmodule SMPPEX.ESME do
@moduledoc """
Module for implementing custom SMPP ESME entities.
`SMPPEX.ESME` represents a `GenServer` process which spawns and interacts with `SMPPEX.Session`
`ranch_protocol`. The session is spawned under control of `ranch` supervision tree.
The session makes all requests to the ESME process *syncronously* (via `GenServer.call`),
while the ESME process makes only *asyncronous*(via `GenServer.cast`) requests to the session.
This is made intentionally since this allows:
* to avoid any kind of deadlocks while the session and the ESME process interact actively;
* to control incoming SMPP message rate to avoid overflooding;
* not to lose any control over connection because of the asyncronous nature of TCP implementation in OTP.
To implement an ESME entitiy, one should implement several callbacks (`SMPPEX.ESME` behaviour).
The most proper way to do it is to `use` `SMPPEX.ESME`:
```
defmodule MyESME do
use SMPPEX.ESME
# ...Callback implementation
end
```
In this case all callbacks have reasonable defaults.
"""
alias :erlang, as: Erlang
alias SMPPEX.ClientPool
alias SMPPEX.ESME
alias SMPPEX.Pdu
alias SMPPEX.PduStorage
alias SMPPEX.Session
alias SMPPEX.SMPPTimers
use GenServer
require Logger
defstruct [
:client_pool,
:smpp_session,
:module,
:module_state,
:pdu_storage,
:timers,
:response_limit,
:time,
:timer_resolution,
:tick_timer_ref
]
@default_timeout 5000
@default_enquire_link_limit 30000
@default_enquire_link_resp_limit 30000
@default_inactivity_limit :infinity
@default_response_limit 60000
@default_timer_resolution 100
@default_call_timeout 5000
@default_transport :ranch_tcp
@default_pool_size 2
@type state :: term
@type request :: term
@doc """
Invoked when the ESME is started after connection to SMSC successfully established.
`args` argument is taken directly from `start_link` call, which does not return until `init` finishes.
The return value should be either `{:ok, state}`, then ESME will successfully start and returned state will
be later passed to the other callbacks, or `{:stop, reason}`, then ESME `GenServer` will stop
with the returned reason.
"""
@callback init(args :: term) :: {:ok, state} | {:stop, reason :: term}
@doc """
Invoked when the ESME receives an incoming PDU (which is not a response PDU).
The returned value is used as the new state.
"""
@callback handle_pdu(pdu :: Pdu.t, state) :: state
@doc """
Invoked when the ESME receives a response to a previously sent PDU.
`pdu` argument contains the received response PDU, `original_pdu` contains
the previously sent pdu for which the handled response is received.
The returned value is used as the new state.
"""
@callback handle_resp(pdu :: Pdu.t, original_pdu :: Pdu.t, state) :: state
@doc """
Invoked when the ESME does not receive a response to a previously sent PDU
for the specified timeout.
`pdu` argument contains the PDU for which no response was received. If the response
will be received later it will be dropped (with an `info` log message).
The returned value is used as the new state.
"""
@callback handle_resp_timeout(pdu :: Pdu.t, state) :: state
@doc """
Invoked when the SMPP session successfully sent PDU to transport or failed to do this.
`pdu` argument contains the PDU for which send status is reported. `send_pdu_result` can be
either `:ok` or `{:error, reason}`.
The returned value is used as the new state.
"""
@callback handle_send_pdu_result(pdu :: Pdu.t, send_pdu_result :: SMPPEX.SMPPHandler.send_pdu_result, state) :: state
@doc """
Invoked when the SMPP session is about to stop.
The returned value is ignored.
"""
@callback handle_stop(state) :: any
@doc """
Invoked for handling `call/3` calls.
The callback is called syncronously for handling.
The returned values have the same meaning as in `GenServer` `handle_call` callback
(but note that only two kinds of responses are possible). In case of delaying a reply (`{:noreply, state}` callback result)
it can be later send using `GenServer.reply(from, reply)`
"""
@callback handle_call(request, from :: GenServer.from, state) :: {:reply, reply :: term, state} | {:noreply, state}
@doc """
Invoked for handling `cast/2` calls.
The callback is called asyncronously.
The returned value is used as the new state.
"""
@callback handle_cast(request, state) :: state
@doc """
Invoked for handling generic messages sent to the ESME process.
The returned value is used as the new state.
"""
@callback handle_info(request, state) :: state
defmacro __using__(_) do
quote location: :keep do
@behaviour SMPPEX.ESME
@doc false
def init(args) do
{:ok, args}
end
@doc false
def handle_pdu(_pdu, state), do: state
@doc false
def handle_resp(_pdu, _original_pdu, state), do: state
@doc false
def handle_resp_timeout(_pdu, state), do: state
@doc false
def handle_send_pdu_result(_pdu, _result, state), do: state
@doc false
def handle_stop(_state), do: nil
@doc false
def handle_call(_request, _from, state), do: {:reply, :ok, state}
@doc false
def handle_cast(_request, state), do: state
@doc false
def handle_info(_request, state), do: state
defoverridable [
init: 1,
handle_pdu: 2,
handle_resp: 3,
handle_resp_timeout: 2,
handle_send_pdu_result: 3,
handle_stop: 1,
handle_call: 3,
handle_cast: 2,
handle_info: 2
]
end
end
# Public interface
@spec start_link(host :: term, port :: non_neg_integer, {module, args :: term}, opts :: Keyword.t) :: GenServer.on_start
@doc """
Starts ESME entity.
The function does not return until ESME successfully connects to the specified
`host` and `port` and initializes or fails.
`module` is the callback module which should implement `SMPPEX.ESME` behaviour.
`args` is the argument passed to the `init` callback.
`opts` is a keyword list of different options:
* `:transport` is Ranch transport used for TCP connection: either `ranch_tcp` (the default) or
`ranch_ssl`;
* `:gen_server_opts` is a list of options passed directly to the underlying `GenServer.start_link` call,
the default is `[]`;
* `:timeout` is timeout for the whole connect and initialization process. The default is #{@default_timeout} ms;
* `:esme_opts` is a keyword list of ESME options:
- `:timer_resolution` is interval of internal `ticks` on which time related events happen, like checking timeouts
for pdus, checking SMPP timers, etc. The default is #{@default_timer_resolution} ms;
- `:enquire_link_limit` is value for enquire_link SMPP timer, i.e. the interval of SMPP session inactivity after which
enquire_link PDU is send to "ping" the connetion. The default value is #{@default_enquire_link_limit} ms;
- `:enquire_link_resp_limit` is the maximum time for which ESME waits for enquire_link PDU response. If the
response is not received within this interval of time and no activity from the peer occurs, the session is then considered
dead and the ESME stops. The default value is #{@default_enquire_link_resp_limit} ms;
- `:inactivity_limit` is the maximum time for which the peer is allowed not to send PDUs (which are not response PDUs).
If no such PDUs are received within this interval of time, ESME stops. The default is #{@default_inactivity_limit} ms;
- `:response_limit` is the maximum time to wait for a response for a previously sent PDU. If the response is
not received within this interval, `handle_resp_timeout` callback is triggered for the original pdu. If the response
is received later, it is discarded. The default value is #{@default_response_limit} ms.
If `:esme_opts` list of options is ommited, all options take their default values.
The whole `opts` argument may also be ommited in order to start ESME with the defaults.
The returned value is either `{:ok, pid}` or `{:error, reason}`.
"""
def start_link(host, port, {module, args}, opts \\ []) do
transport = Keyword.get(opts, :transport, @default_transport)
gen_server_opts = Keyword.get(opts, :gen_server_opts, [])
timeout = Keyword.get(opts, :timeout, @default_timeout)
esme_opts = Keyword.get(opts, :esme_opts, [])
GenServer.start_link(
__MODULE__,
[
convert_host(host),
port,
{module, args},
transport,
timeout,
esme_opts
],
gen_server_opts
)
end
@spec send_pdu(esme :: pid, pdu :: Pdu.t) :: :ok
@doc """
Sends outcoming PDU from the ESME.
The whole command is sent to the ESME asyncronously. The further lifecycle of the PDU
can be traced through callbacks.
"""
def send_pdu(esme, pdu) do
GenServer.cast(esme, {:send_pdu, pdu})
end
@spec reply(esme :: pid, pdu :: Pdu.t, reply_pdu :: Pdu.t) :: :ok
@doc """
Sends reply to previously received PDU from the ESME.
The whole command is sent to the ESME asyncronously. The further lifecycle of the response PDU
can be traced through callbacks.
"""
def reply(esme, pdu, reply_pdu) do
GenServer.cast(esme, {:reply, pdu, reply_pdu})
end
@spec stop(esme :: pid) :: :ok
@doc """
Stops ESME asyncronously.
The very moment of the SMPP session termination can be traced via `handle_stop` callback.
"""
def stop(esme) do
GenServer.cast(esme, :stop)
end
@spec call(esme ::pid, arg :: term, timeout) :: term
@doc """
Makes a syncronous call to ESME.
The call is handled by `handle_call/3` ESME callback.
"""
def call(esme, request, timeout \\ @default_call_timeout) do
GenServer.call(esme, {:call, request}, timeout)
end
@spec cast(pid, term) :: :ok
@doc """
Makes an asyncronous call to ESME.
The call is handled by `handle_cast/2` ESME callback.
"""
def cast(esme, request) do
GenServer.cast(esme, {:cast, request})
end
@spec with_session(esme :: pid, (smpp_session :: pid -> any)) :: :ok
@doc """
Asyncronously executes the passed lambda passing SMPP session(`SMPPEX.Session`) to it directly.
This function can be used for uncommon cases like sending PDUs bypassing timers or
sequence_number assignment.
"""
def with_session(esme, fun) do
GenServer.cast(esme, {:with_session, fun})
end
@spec handle_pdu(pid, Pdu.t) :: :ok
def handle_pdu(esme, pdu) do
GenServer.call(esme, {:handle_pdu, pdu})
end
@spec handle_stop(pid) :: :ok
@doc false
def handle_stop(esme) do
GenServer.call(esme, :handle_stop)
end
@type send_pdu_result :: :ok | {:error, term}
@spec handle_send_pdu_result(pid, Pdu.t, send_pdu_result) :: :ok
@doc false
def handle_send_pdu_result(esme, pdu, send_pdu_result) do
GenServer.call(esme, {:handle_send_pdu_result, pdu, send_pdu_result})
end
# GenServer callbacks
@doc false
def init([host, port, mod_with_args, transport, timeout, esme_opts]) do
esme = self()
handler = fn(ref, _socket, _transport, session) ->
Process.link(esme)
Kernel.send esme, {ref, session}
{:ok, SMPPEX.ESME.SMPPHandler.new(esme)}
end
pool_size = Keyword.get(esme_opts, :pool_size, @default_pool_size)
case start_session(handler, host, port, transport, timeout, pool_size, esme_opts) do
{:ok, pool, session} ->
init_esme(mod_with_args, pool, session, esme_opts)
{:error, reason} -> {:stop, reason}
end
end
@doc false
def handle_call({:handle_pdu, pdu}, _from, st) do
case Pdu.resp?(pdu) do
true -> do_handle_resp(pdu, st)
false -> do_handle_pdu(pdu, st)
end
end
def handle_call(:handle_stop, _from, st) do
do_handle_stop(st)
end
def handle_call({:handle_send_pdu_result, pdu, send_pdu_result}, _from, st) do
do_handle_send_pdu_result(pdu, send_pdu_result, st)
end
def handle_call({:call, request}, from, st) do
case st.module.handle_call(request, from, st.module_state) do
{:reply, reply, new_module_state} ->
new_st = %ESME{st | module_state: new_module_state}
{:reply, reply, new_st}
{:noreply, new_module_state} ->
new_st = %ESME{st | module_state: new_module_state}
{:noreply, new_st}
end
end
@doc false
def handle_cast({:send_pdu, pdu}, st) do
new_st = do_send_pdu(pdu, st)
{:noreply, new_st}
end
def handle_cast({:reply, pdu, reply_pdu}, st) do
new_st = do_reply(pdu, reply_pdu, st)
{:noreply, new_st}
end
def handle_cast({:with_session, fun}, st) do
fun.(st.smpp_session)
{:noreply, st}
end
def handle_cast(:stop, st) do
Session.stop(st.smpp_session)
{:noreply, st}
end
def handle_cast({:cast, request}, st) do
new_module_state = st.module.handle_cast(request, st.module_state)
new_st = %ESME{st | module_state: new_module_state}
{:noreply, new_st}
end
@doc false
def handle_info({:timeout, _timer_ref, :emit_tick}, st) do
new_tick_timer_ref = Erlang.start_timer(st.timer_resolution, self(), :emit_tick)
Erlang.cancel_timer(st.tick_timer_ref)
Kernel.send self(), {:tick, SMPPEX.Time.monotonic}
{:noreply, %ESME{st | tick_timer_ref: new_tick_timer_ref}}
end
def handle_info({:tick, time}, st) do
do_handle_tick(time, st)
end
def handle_info(request, st) do
new_module_state = st.module.handle_info(request, st.module_state)
new_st = %ESME{st | module_state: new_module_state}
{:noreply, new_st}
end
@default_options [:binary, {:packet, 0}, {:active, :once}]
defp socket_options(:ranch_ssl, options) do
cert_file = Keyword.get(options, :certfile)
key_file = Keyword.get(options, :keyfile)
ssl_options = cond do
cert_file != nil and key_file != nil ->
[{:certfile, cert_file},{:keyfile, key_file}]
cert_file != nil ->
[{:certfile, cert_file}]
true -> []
end
@default_options ++ ssl_options
end
defp socket_options(_, _), do: @default_options
# Private functions
defp start_session(handler, host, port, transport, timeout, pool_size, opts \\ []) do
socket_opts = socket_options(transport, opts)
case transport.connect(host, port, socket_opts, timeout) do
{:ok, socket} ->
pool = ClientPool.start(handler, pool_size, transport, timeout)
ClientPool.start_session(pool, socket)
ref = ClientPool.ref(pool)
receive do
{^ref, session} ->
{:ok, pool, session}
after timeout ->
{:error, :session_init_timeout}
end
{:error, _} = err -> err
end
end
defp init_esme({module, args}, pool, session, esme_opts) do
case module.init(args) do
{:ok, state} ->
timer_resolution = Keyword.get(esme_opts, :timer_resolution, @default_timer_resolution)
timer_ref = Erlang.start_timer(timer_resolution, self(), :emit_tick)
enquire_link_limit = Keyword.get(esme_opts, :enquire_link_limit, @default_enquire_link_limit)
enquire_link_resp_limit = Keyword.get(esme_opts, :enquire_link_resp_limit, @default_enquire_link_resp_limit)
inactivity_limit = Keyword.get(esme_opts, :inactivity_limit, @default_inactivity_limit)
time = SMPPEX.Time.monotonic
timers = SMPPTimers.new(
time,
:infinity,
enquire_link_limit,
enquire_link_resp_limit,
inactivity_limit
)
pdu_storage_pid = case Keyword.get(esme_opts, :pdu_storage_pid, nil) do
nil ->
{:ok, pid} = PduStorage.start_link()
pid
pid -> pid
end
response_limit = Keyword.get(esme_opts, :response_limit, @default_response_limit)
{:ok, %ESME{
client_pool: pool,
smpp_session: session,
module: module,
module_state: state,
pdu_storage: pdu_storage_pid,
timers: timers,
response_limit: response_limit,
time: time,
timer_resolution: timer_resolution,
tick_timer_ref: timer_ref
}}
{:stop, _} = stop ->
ClientPool.stop(pool)
stop
end
end
defp do_handle_pdu(pdu, st) do
new_module_state = st.module.handle_pdu(pdu, st.module_state)
new_timers = SMPPTimers.handle_peer_transaction(st.timers, st.time)
{:reply, :ok, %ESME{st | module_state: new_module_state, timers: new_timers}}
end
defp do_handle_resp(pdu, st) do
sequence_number = Pdu.sequence_number(pdu)
new_timers = SMPPTimers.handle_peer_action(st.timers, st.time)
new_st = %ESME{st | timers: new_timers}
case PduStorage.fetch(st.pdu_storage, sequence_number) do
[] ->
# don't drop response pdu for sequence numbers which not recognized
# with_session maybe in use
# just return back a nil for original_pdu, and let the client handle it, with some pattern matching
do_handle_resp_for_pdu(pdu, nil, new_st)
[original_pdu] ->
do_handle_resp_for_pdu(pdu, original_pdu, new_st)
end
end
defp do_handle_resp_for_pdu(pdu, original_pdu, st) do
new_module_state = st.module.handle_resp(pdu, original_pdu, st.module_state)
new_st = %ESME{st | module_state: new_module_state}
case Pdu.bind_resp?(pdu) do
true -> do_handle_bind_resp(pdu, new_st)
false -> {:reply, :ok, new_st}
end
end
defp do_handle_bind_resp(pdu, st) do
case Pdu.bind_success_resp?(pdu) do
true ->
new_timers = SMPPTimers.handle_bind(st.timers, st.time)
new_st = %ESME{st | timers: new_timers}
{:reply, :ok, new_st}
false ->
Logger.info("esme #{inspect self()}, bind failed with status #{Pdu.command_status(pdu)}, stopping")
Session.stop(st.smpp_session)
{:reply, :ok, st}
end
end
defp do_handle_stop(st) do
_ = st.module.handle_stop(st.module_state)
Process.unlink(st.smpp_session)
ClientPool.stop(st.client_pool)
{:stop, :normal, :ok, st}
end
defp do_handle_send_pdu_result(pdu, send_pdu_result, st) do
new_module_state = st.module.handle_send_pdu_result(pdu, send_pdu_result, st.module_state)
new_st = %ESME{st | module_state: new_module_state}
{:reply, :ok, new_st}
end
defp do_handle_tick(time, st) do
expired_pdus = PduStorage.fetch_expired(st.pdu_storage, time)
new_st = do_handle_expired_pdus(expired_pdus, st)
do_handle_timers(time, new_st)
end
defp do_handle_expired_pdus([], st), do: st
defp do_handle_expired_pdus([pdu | pdus], st) do
new_module_state = st.module.handle_resp_timeout(pdu, st.module_state)
new_st = %ESME{st | module_state: new_module_state}
do_handle_expired_pdus(pdus, new_st)
end
defp do_handle_timers(time, st) do
case SMPPTimers.handle_tick(st.timers, time) do
{:ok, new_timers} ->
new_st = %ESME{st | timers: new_timers, time: time}
{:noreply, new_st}
{:stop, reason} ->
Logger.info("esme #{inspect self()}, being stopped by timers(#{reason})")
Session.stop(st.smpp_session)
{:noreply, st}
{:enquire_link, new_timers} ->
new_st = %ESME{st | timers: new_timers, time: time}
do_send_enquire_link(new_st)
end
end
defp do_send_enquire_link(st) do
enquire_link = SMPPEX.Pdu.Factory.enquire_link
new_st = do_send_pdu(enquire_link, st)
{:noreply, new_st}
end
defp do_send_pdu(pdu, st) do
pdu = case pdu.sequence_number do
0 -> %Pdu{pdu | sequence_number: PduStorage.reserve_sequence_number(st.pdu_storage)}
_ -> pdu
end
true = PduStorage.store(st.pdu_storage, pdu, st.time + st.response_limit)
Session.send_pdu(st.smpp_session, pdu)
st
end
defp do_reply(pdu, reply_pdu, st) do
new_reply_pdu = %Pdu{reply_pdu | sequence_number: pdu.sequence_number}
Session.send_pdu(st.smpp_session, new_reply_pdu)
st
end
defp convert_host(host) when is_binary(host), do: to_char_list(host)
defp convert_host(host), do: host
end
|
lib/smppex/esme.ex
| 0.840701
| 0.85318
|
esme.ex
|
starcoder
|
defmodule Analyzer do
import SumMag
@moduledoc """
Provides optimizer for anonymous functions.
"""
@doc """
iex> var = quote do [x] end
iex> Analyzer.parse(var)
[var: {:x, [], AnalyzerTest}]
"""
def parse(args) when is_list(args) do
func = fn node, asm ->
[supported?(node) | asm]
end
args
|> Enum.reduce([], func)
|> Enum.reverse()
|> List.flatten()
end
def parse(other), do: [var: other]
@doc """
Check if expressions can be optimzed.
When the expression is enable to optimize, {:ok, map} is returned.
The map is shape following: %{args: _, operators: _}.
iex> var = quote do x end
...> Analyzer.supported?(var)
[var: {:x, [], AnalyzerTest}]
iex> quote do
...> fn x -> x + 1 end
...> end |> Analyzer.supported?
[func: %{args: [{:x, [], AnalyzerTest}, 1], operators: [:+]}]
"""
def supported?({_, _, atom} = var) when is_atom(atom) do
[var: var]
end
def supported?({:@, _, [{_, _, nil}]} = var) do
[var: var]
end
def supported?({:fn, _, [{:->, _, [_arg, expr]}]}) do
polynomial_map(expr)
end
def supported?({:&, _, expr}) do
expr |> hd |> polynomial_map
end
def supported?({:{}, [], list} = tuple)
when is_list(list) do
[var: tuple]
end
def supported?(num) when is_number(num) do
[var: [num]]
end
def supported?(other) do
other
|> Macro.quoted_literal?()
|> case do
false -> {:error, other}
true -> [var: other]
end
end
def polynomial_map(ast) do
acc = %{
operators: [],
args: []
}
polymap = Macro.prewalk(ast, acc, &numerical?/2) |> elem(1)
[func: polymap]
end
defp numerical?({:., _, _} = aliases, acc), do: {aliases, acc}
defp numerical?({:__aliases__, _, _} = aliases, acc), do: {aliases, acc}
defp numerical?({:&, _, _} = cap_val, acc), do: {cap_val, acc}
defp numerical?({_atom, _, context} = val, acc) when is_atom(context) do
{val, acc}
end
defp numerical?({atom, _, args} = ast, acc) when is_list(args) do
%{
operators: operators,
args: map_args
} = acc
operators =
case operator(atom) do
false -> operators
atom -> [atom | operators]
end
map_args =
args
|> Enum.reverse()
|> Enum.reduce(
map_args,
fn x, acc ->
listing_literal(x, acc)
end
)
ret = %{
operators: operators,
args: map_args
}
{ast, ret}
end
defp numerical?(other, acc), do: {other, acc}
def listing_literal(term, acc) do
if Macro.quoted_literal?(term) do
[term | acc]
else
case quoted_var?(term) do
false -> acc
_ -> [term | acc]
end
end
end
defp operator(:+), do: :+
defp operator(:-), do: :-
defp operator(:/), do: :/
defp operator(:*), do: :*
defp operator(:rem), do: :rem
# Logical Operator
defp operator(:>=), do: :>=
defp operator(:<=), do: :<=
defp operator(:!=), do: :!=
defp operator(:<), do: :<
defp operator(:>), do: :>
defp operator(:==), do: :==
defp operator(:!), do: :!
defp operator(:!==), do: :!==
defp operator(:<>), do: :<>
defp operator({:., _, [{:__aliases__, _, [module]}, func]}) do
Atom.to_string(module) <> "." <> Atom.to_string(func)
end
defp operator(_), do: false
end
|
lib/pelemay/analyzer.ex
| 0.556882
| 0.482429
|
analyzer.ex
|
starcoder
|
defmodule Crux.Gateway.Command do
# credo:disable-for-this-file Credo.Check.Readability.SinglePipe
@moduledoc """
Builds [Gateway Commands](https://discord.com/developers/docs/topics/gateway#commands-and-events-gateway-commands).
Note: Only the sent ones can be found here.
A list of available op codes:
| OP Code | Name | Direction |
| ------- | ---------------------- | ------------- |
| 0 | dispatch | received only |
| 1 | heartbeat | two way |
| 2 | identify | sent only |
| 3 | status_update | sent only |
| 4 | voice_state_update | sent only |
| 5 | Removed / Not for bots | neither |
| 6 | resume | sent only |
| 7 | reconnect | received only |
| 8 | request_guild_members | sent only |
| 9 | invalid_session | received only |
| 10 | hello | received only |
| 11 | heartbeat_ack | received only |
[Gateway Lifecycle Flowchart](https://s.gus.host/flowchart.svg)
"""
alias :erlang, as: Erlang
alias :os, as: OS
@typedoc """
Encoded command ready to be sent to the gateway via `Crux.Gateway.send_command/3`.
If you want to build custom commands (read: new commands not yet supported by crux_gateway),
build a valid [Gateway Payload Structure](https://discord.com/developers/docs/topics/gateway#payloads-gateway-payload-structure)
using string keys(!) and pass it to `encode_command/1`.
"""
@opaque command :: {:binary, iodata()}
@doc """
Builds a [Heartbeat](https://discord.com/developers/docs/topics/gateway#heartbeat) command.
Used to signalize the server that the client is still alive and able to receive messages.
> Internally handled by `Crux.Gateway` already.
"""
@spec heartbeat(sequence :: non_neg_integer() | nil) :: command()
def heartbeat(sequence), do: finalize(sequence, 1)
@doc """
Builds an [Identify](https://discord.com/developers/docs/topics/gateway#identify) command.
Used to identify the gateway connection and "log in".
> Internally handled by `Crux.Gateway` already.
"""
@spec identify(
data :: %{
:shard_id => non_neg_integer(),
:shard_count => pos_integer(),
:token => String.t(),
:intents => non_neg_integer(),
optional(:presence) => Crux.Gateway.presence()
}
) :: command()
def identify(
%{shard_id: shard_id, shard_count: shard_count, token: token, intents: intents} = data
) do
presence =
data
|> _get_presence()
|> _update_status()
{os, name} = OS.type()
%{
"token" => token,
"properties" => %{
"$os" => Atom.to_string(os) <> " " <> Atom.to_string(name),
"$browser" => "Crux",
"$device" => "Crux"
},
"compress" => true,
"large_threshold" => 250,
"shard" => [shard_id, shard_count],
"presence" => presence,
"intents" => intents
}
|> finalize(2)
end
defp _get_presence(%{presence: fun, shard_id: shard_id, shard_count: shard_count})
when is_function(fun, 2) do
fun.(shard_id, shard_count)
end
defp _get_presence(%{presence: presence})
when is_map(presence) do
presence
end
defp _get_presence(%{presence: nil}) do
%{}
end
defp _get_presence(data)
when not is_map_key(data, :presence) do
%{}
end
@doc """
Builds a [Update Voice State](https://discord.com/developers/docs/topics/gateway#update-voice-state) command.
Used to join, switch between, and leave voice channels and/or change self_mute or self_deaf states.
"""
@spec update_voice_state(
guild_id :: Crux.Structs.Snowflake.t(),
channel_id :: Crux.Structs.Snowflake.t() | nil,
states :: [{:self_mute, boolean()} | {:self_deaf, boolean()}]
) :: command()
def update_voice_state(guild_id, channel_id \\ nil, states \\ []) do
%{
"guild_id" => guild_id,
"channel_id" => channel_id,
"self_mute" => Keyword.get(states, :self_mute, false),
"self_deaf" => Keyword.get(states, :self_deaf, false)
}
|> finalize(4)
end
@typedoc """
Used to set an activity via `update_status/2`.
`:type` must be a valid [Activity Type](https://discordapp.com/developers/docs/topics/gateway#activity-object-activity-types)
> Note that streaming requires a twitch url pointing to a possible channel!
"""
@type activity :: %{
:name => String.t(),
:type => non_neg_integer(),
optional(:url) => String.t()
}
@doc """
Builds a [Update Status](https://discord.com/developers/docs/topics/gateway#update-status) command.
Used to update the status of the client, including activity.
"""
@spec update_status(status :: String.t(), activities :: [activity()] | []) :: command()
def update_status(status, activities \\ []) do
%{status: status, activities: activities}
|> _update_status()
|> finalize(3)
end
# Helper function used from within identify/1.
defp _update_status(presence) do
presence
|> Map.new(fn
{k, v} when is_list(v) ->
stringify_key = fn {k, v} -> {to_string(k), v} end
activites = Enum.map(v, &Map.new(&1, stringify_key))
{to_string(k), activites}
{k, v} ->
{to_string(k), v}
end)
|> Map.merge(%{"afk" => false, "since" => 0})
end
@doc """
Builds a [Request Guild Members](https://discord.com/developers/docs/topics/gateway#request-guild-members) command.
Used to request guild member for a specific guild.
> Note: This must be sent to the connection handling the guild, otherwise the request will just be ignored.
The gateway will respond with `:GUILD_MEMBERS_CHUNK` packets until all (requested) members were received.
"""
@spec request_guild_members(
guild_id :: Crux.Structs.Snowflake.t(),
opts ::
[
{:query, String.t()}
| {:limit, non_neg_integer()}
| {:presences, boolean()}
| {:user_ids, Crux.Structs.Snowflake.t() | [Crux.Structs.Snowflake.t()]}
| {:nonce, String.t()}
]
| map()
) :: command()
def request_guild_members(guild_id, opts \\ %{})
def request_guild_members(guild_id, %{} = opts) do
other_opts =
case opts do
%{query: query, user_ids: user_ids} -> %{"query" => query, "user_ids" => user_ids}
%{query: query} -> %{"query" => query}
%{user_ids: user_ids} -> %{"user_ids" => user_ids}
%{} -> %{"query" => ""}
end
other_opts =
case opts do
%{nonce: nonce} -> Map.put(other_opts, :nonce, nonce)
_ -> other_opts
end
%{
"guild_id" => guild_id,
"limit" => Map.get(opts, :limit, 0),
"presences" => Map.get(opts, :presences, false)
}
|> Map.merge(other_opts)
|> finalize(8)
end
def request_guild_members(guild_id, opts), do: request_guild_members(guild_id, Map.new(opts))
@doc """
Builds a [Resume](https://discord.com/developers/docs/topics/gateway#resume) command.
Used to resume into a session which was unexpectly disconnected and may be resumable.
> Internally handled by `Crux.Gateway` already.
"""
@spec resume(
data :: %{
seq: non_neg_integer(),
token: String.t(),
session_id: String.t()
}
) :: command()
def resume(%{seq: seq, token: token, session_id: session_id}) do
%{
"seq" => seq,
"token" => token,
"session_id" => session_id
}
|> finalize(6)
end
@doc """
Encodes the given command map to a term that can be sent using `Crux.Gateway.send_command/3`.
"""
@spec encode_command(map()) :: command()
def encode_command(command) do
{:binary, Erlang.term_to_binary(command)}
end
@spec finalize(
data :: %{String.t() => map() | String.t()} | non_neg_integer() | nil,
op :: integer()
) :: command()
defp finalize(data, op) do
%{
"op" => op,
"d" => data
}
|> encode_command()
end
end
|
lib/gateway/command.ex
| 0.819207
| 0.463262
|
command.ex
|
starcoder
|
defmodule CloudflareStream do
require Logger
@opts_keys %{
# media
"TYPE" => :type,
"GROUP-ID" => :group_id,
"NAME" => :name,
"LANGUAGE" => :language,
"DEFAULT" => :default,
"AUTOSELECT" => :autoselect,
"URI" => :uri,
# extension
"EXT" => :extension,
# track info
"RESOLUTION" => :resolution,
"CODECS" => :codecs,
"BANDWIDTH" => :bandwidth,
"FRAME-RATE" => :frame_rate,
"AUDIO" => :audio
}
@opts_types %{
codecs: :list,
autoselect: :boolean,
default: :boolean,
bandwidth: :integer
}
@track_key :track
@doc """
Example usage:
```
CloudflareStream.parse_metadata(metadata, :m3u)
```
Extracts all available data from M3U video metadata
Example .m3u8 metadata:
```
#EXTM3U
#EXT-X-VERSION:6
#EXT-X-INDEPENDENT-SEGMENTS
#EXT-X-MEDIA:TYPE=AUDIO,GROUP-ID="group_audio",NAME="eng",LANGUAGE="en",DEFAULT=YES,AUTOSELECT=YES,URI="stream_tf1bea3e54501931464e543bb005e9d0d_r15218387.m3u8"
#EXT-X-STREAM-INF:RESOLUTION=1280x720,CODECS="avc1.4d401f,mp4a.40.2",BANDWIDTH=3728000,FRAME-RATE=30.000,AUDIO="group_audio"
stream_ta2d3c5d56a9f8537daa3b2b7ddabd5b0_r15218383.m3u8
#EXT-X-STREAM-INF:RESOLUTION=1920x1080,CODECS="avc1.4d4028,mp4a.40.2",BANDWIDTH=5328000,FRAME-RATE=30.000,AUDIO="group_audio"
stream_ta2d3c5d56a9f8537daa3b2b7ddabd5b0_r15218385.m3u8
#EXT-X-STREAM-INF:RESOLUTION=854x480,CODECS="avc1.4d401f,mp4a.40.2",BANDWIDTH=1928000,FRAME-RATE=30.000,AUDIO="group_audio"
stream_ta2d3c5d56a9f8537daa3b2b7ddabd5b0_r15218382.m3u8
#EXT-X-STREAM-INF:RESOLUTION=640x360,CODECS="avc1.4d401e,mp4a.40.2",BANDWIDTH=928000,FRAME-RATE=30.000,AUDIO="group_audio"
stream_ta2d3c5d56a9f8537daa3b2b7ddabd5b0_r15218381.m3u8
#EXT-X-STREAM-INF:RESOLUTION=426x240,CODECS="avc1.42c015,mp4a.40.2",BANDWIDTH=528000,FRAME-RATE=30.000,AUDIO="group_audio"
stream_ta2d3c5d56a9f8537daa3b2b7ddabd5b0_r15218380.m3u8
```
Parse result:
```
%{
extension: "M3U",
media: %{
autoselect: true,
default: true,
group_id: "group_audio",
language: "en",
name: "eng",
type: "AUDIO",
uri: "stream_tf1bea3e54501931464e543bb005e9d0d_r15218387.m3u8"
},
tracks: [
%{
audio: "group_audio",
bandwidth: 3728000,
codecs: ["avc1.4d401f", "mp4a.40.2"],
frame_rate: "30.000",
resolution: "1280x720",
uri: "stream_ta2d3c5d56a9f8537daa3b2b7ddabd5b0_r15218383.m3u8"
},
%{
audio: "group_audio",
bandwidth: 5328000,
codecs: ["avc1.4d4028", "mp4a.40.2"],
frame_rate: "30.000",
resolution: "1920x1080",
uri: "stream_ta2d3c5d56a9f8537daa3b2b7ddabd5b0_r15218385.m3u8"
},
%{
audio: "group_audio",
bandwidth: 1928000,
codecs: ["avc1.4d401f", "mp4a.40.2"],
frame_rate: "30.000",
resolution: "854x480",
uri: "stream_ta2d3c5d56a9f8537daa3b2b7ddabd5b0_r15218382.m3u8"
},
%{
audio: "group_audio",
bandwidth: 928000,
codecs: ["avc1.4d401e", "mp4a.40.2"],
frame_rate: "30.000",
resolution: "640x360",
uri: "stream_ta2d3c5d56a9f8537daa3b2b7ddabd5b0_r15218381.m3u8"
},
%{
audio: "group_audio",
bandwidth: 528000,
codecs: ["avc1.42c015", "mp4a.40.2"],
frame_rate: "30.000",
resolution: "426x240",
uri: "stream_ta2d3c5d56a9f8537daa3b2b7ddabd5b0_r15218380.m3u8"
}
],
version: "6"
}
```
"""
@spec parse_metadata(String.t(), :m3u) :: map()
def parse_metadata(metadata, :m3u) do
split =
metadata
|> String.split("\n")
split
|> Enum.map(fn x -> parse_line(x, metadata) end)
|> parse_format(split)
|> format()
end
defp format(opts) do
opts
|> Enum.filter(&is_map/1)
|> collect_keys(@track_key, :tracks)
|> Enum.reduce(%{}, fn x, acc ->
Map.merge(acc, x)
end)
end
defp collect_keys(opts, key, put_key) do
collected =
opts
|> Enum.filter(fn x ->
Map.get(x, key) != nil
end)
|> Enum.reduce(%{put_key => []}, fn x, acc ->
v = Map.get(x, key)
Map.put(acc, put_key, acc[put_key] ++ [v])
end)
opts
|> Enum.reject(fn x ->
Map.get(x, key) != nil
end)
|> List.insert_at(-1, collected)
end
defp opts_atom_key(key) do
@opts_keys
|> Map.get(key, key)
end
defp parse_format(x, [h | _t]) when is_binary(h) do
format =
h
|> String.split("#EXT")
|> List.last()
format_opts =
"EXT"
|> opts_atom_key()
|> List.wrap()
|> List.insert_at(1, format)
|> List.to_tuple()
|> List.wrap()
|> Map.new()
|> List.wrap()
format_opts ++ x
end
defp parse_line("#EXT-X-VERSION:" <> opts, _) do
opts
|> version()
end
defp parse_line("#EXT-X-MEDIA:" <> opts, _) do
opts
|> fragment("URI")
|> fragment("LANGUAGE")
|> fragment("GROUP-ID")
|> fragment("NAME")
|> extract_opts([:media])
end
defp parse_line("#EXT-X-STREAM-INF:" <> opts, raw) do
opts
|> uri(raw)
|> fragment("CODECS")
|> fragment("AUDIO")
|> extract_opts([@track_key])
end
defp parse_line("stream_" <> _track_path = x, _) do
x
end
defp parse_line(x, _) do
Logger.debug("Ignored metadata string >> #{x}")
x
end
defp uri(line, raw) do
uri =
raw
|> String.split(line)
|> List.last()
|> String.split("\n")
|> Enum.filter(fn x -> String.length(x) > 0 end)
|> List.first()
line
|> String.replace_suffix("", "," <> "URI=#{uri}")
end
defp fragment(line, key) do
fragment =
line
|> String.split("#{key}=\"")
|> List.last()
|> String.split("\"")
|> List.first()
new_fragment =
fragment
|> String.replace(",", ";", [])
replace_fragment =
fragment
|> String.replace_prefix("", "\"")
|> String.replace_suffix("", "\"")
line
|> String.replace(replace_fragment, new_fragment)
end
defp version(version) do
%{version: version}
end
defp extract_opts(opts, path) when is_list(path) do
opts
|> String.split(",")
|> Enum.flat_map(fn x ->
String.split(x, "=")
end)
|> Enum.chunk_every(2)
|> parse_opts(path)
end
defp parse_opts(opts, path) do
opts
|> Enum.map(&List.to_tuple/1)
|> Enum.reduce(%{}, fn ({key, value}, acc) ->
key = opts_atom_key(key)
value = parse_opt_value(key, value)
put_opts(acc, path ++ [key], value)
end)
end
defp parse_opt_value(key, value) do
case Map.get(@opts_types, key) do
:list -> String.split(value, ";")
:boolean -> bool_value(value)
:integer -> integer_value(value)
_ -> value
end
end
defp integer_value(value) do
case Integer.parse(value) do
{x, _} -> x
_ -> value
end
end
defp bool_value("YES"), do: true
defp bool_value(_), do: false
defp put_opts(opts, path, value) do
last = List.last(path)
path
|> Enum.reduce(opts, fn (x, acc) ->
acc =
case Map.get(acc, x) do
nil when last != x -> put_in(acc, [x], %{})
_ -> acc
end
case x do
k when k == last -> put_in(acc, path, value)
_ -> acc
end
end)
end
end
|
lib/cloudflare_stream.ex
| 0.611962
| 0.58599
|
cloudflare_stream.ex
|
starcoder
|
defmodule ExPlasma do
@moduledoc """
Documentation for ExPlasma.
"""
alias ExPlasma.Transaction
alias ExPlasma.Transaction.TypeMapper
# constants that identify payment types, make sure that
# when we introduce a new payment type, you name it `paymentV2`
# https://github.com/omisego/plasma-contracts/blob/6ab35256b805e25cfc30d85f95f0616415220b20/plasma_framework/docs/design/tx-types-dependencies.md
@payment_v1 TypeMapper.tx_type_for(:tx_payment_v1)
@fee TypeMapper.tx_type_for(:tx_fee_token_claim)
@type transaction_type :: non_neg_integer()
@doc """
Simple payment type V1
## Example
iex> ExPlasma.payment_v1()
1
"""
@spec payment_v1() :: 1
def payment_v1(), do: @payment_v1
@doc """
Transaction fee claim V1
## Example
iex> ExPlasma.fee()
3
"""
@spec fee() :: 3
def fee(), do: @fee
@doc """
Transaction types
## Example
iex> ExPlasma.transaction_types()
[1, 3]
"""
@spec transaction_types :: [1 | 3, ...]
def transaction_types(), do: [payment_v1(), fee()]
@doc """
Encode the given Transaction into an RLP encodable list.
If `signed: false` is given in the list of opts, will encode the transaction without its signatures.
## Example
iex> txn =
...> %ExPlasma.Transaction{
...> inputs: [
...> %ExPlasma.Output{
...> output_data: nil,
...> output_id: %{blknum: 0, oindex: 0, position: 0, txindex: 0},
...> output_type: nil
...> }
...> ],
...> metadata: <<0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0>>,
...> outputs: [
...> %ExPlasma.Output{
...> output_data: %{
...> amount: 1,
...> output_guard: <<29, 246, 47, 41, 27, 46, 150, 159, 176, 132, 157, 153,
...> 217, 206, 65, 226, 241, 55, 0, 110>>,
...> token: <<46, 38, 45, 41, 28, 46, 150, 159, 176, 132, 157, 153, 217, 206,
...> 65, 226, 241, 55, 0, 110>>
...> },
...> output_id: nil,
...> output_type: 1
...> }
...> ],
...> sigs: [],
...> tx_data: 0,
...> tx_type: 1
...> }
iex> ExPlasma.encode(txn, signed: false)
{:ok, <<248, 116, 1, 225, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 238, 237, 1, 235, 148, 29, 246, 47,
41, 27, 46, 150, 159, 176, 132, 157, 153, 217, 206, 65, 226, 241, 55, 0, 110,
148, 46, 38, 45, 41, 28, 46, 150, 159, 176, 132, 157, 153, 217, 206, 65, 226,
241, 55, 0, 110, 1, 128, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0>>}
"""
defdelegate encode(transaction, opts \\ []), to: Transaction
@doc """
Throwing version of encode/2
"""
defdelegate encode!(transaction, opts \\ []), to: Transaction
@doc """
Attempt to decode the given RLP list into a Transaction.
If `signed: false` is given in the list of opts, expects the underlying RLP to not contain signatures.
Only validates that the RLP is structurally correct and that the tx type is supported.
Does not perform any other kind of validation, use validate/1 for that.
## Example
iex> rlp = <<248, 116, 1, 225, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
...> 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 238, 237, 1, 235, 148,
...> 29, 246, 47, 41, 27, 46, 150, 159, 176, 132, 157, 153, 217, 206, 65, 226, 241,
...> 55, 0, 110, 148, 46, 38, 45, 41, 28, 46, 150, 159, 176, 132, 157, 153, 217,
...> 206, 65, 226, 241, 55, 0, 110, 1, 128, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
...> 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0>>
iex> ExPlasma.decode(rlp, signed: false)
{:ok,
%ExPlasma.Transaction{
inputs: [
%ExPlasma.Output{
output_data: nil,
output_id: %{blknum: 0, oindex: 0, position: 0, txindex: 0},
output_type: nil
}
],
metadata: <<0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0>>,
outputs: [
%ExPlasma.Output{
output_data: %{
amount: 1,
output_guard: <<29, 246, 47, 41, 27, 46, 150, 159, 176, 132, 157, 153,
217, 206, 65, 226, 241, 55, 0, 110>>,
token: <<46, 38, 45, 41, 28, 46, 150, 159, 176, 132, 157, 153, 217, 206,
65, 226, 241, 55, 0, 110>>
},
output_id: nil,
output_type: 1
}
],
witnesses: [],
sigs: [],
tx_data: 0,
tx_type: 1
}
}
"""
defdelegate decode(tx_bytes, opts \\ []), to: Transaction
@doc """
Keccak hash the Transaction. This is used in the contracts and events to to reference transactions.
## Example
iex> rlp = <<248, 74, 192, 1, 193, 128, 239, 174, 237, 1, 235, 148, 29, 246, 47, 41, 27,
...> 46, 150, 159, 176, 132, 157, 153, 217, 206, 65, 226, 241, 55, 0, 110, 148, 46,
...> 38, 45, 41, 28, 46, 150, 159, 176, 132, 157, 153, 217, 206, 65, 226, 241, 55,
...> 0, 110, 1, 128, 148, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
...> 0>>
iex> ExPlasma.hash(rlp)
{:ok, <<87, 132, 239, 36, 144, 239, 129, 88, 63, 88, 116, 147, 164, 200, 113, 191,
124, 14, 55, 131, 119, 96, 112, 13, 28, 178, 251, 49, 16, 127, 58, 96>>}
"""
defdelegate hash(transaction), to: Transaction
@doc """
Statelessly validate a transation.
Returns :ok if valid or {:error, {atom, atom}} otherwise
"""
defdelegate validate(transaction), to: Transaction
end
|
lib/ex_plasma.ex
| 0.862482
| 0.456834
|
ex_plasma.ex
|
starcoder
|
defmodule XDR do
@moduledoc """
Basic XDR usage
"""
@typedoc """
Because the protocol function signatures must match for all types,
arguments are sometimes unused. The `ignored()` type is used to annotate them.
"""
@type ignored() :: any()
alias XDR.Type.Const
@doc """
Build a concrete XDR type by providing the type module and any configuration options
for that type.
### Examples
Some types don't take any configuration.
These include
- `XDR.Type.Bool`
- `XDR.Type.Double`
- `XDR.Type.Float`
- `XDR.Type.HyperInt`
- `XDR.Type.Int`
- `XDR.Type.UnsignedHyperInt`
- `XDR.Type.UnsignedInt`
- `XDR.Type.Void`
Examples:
iex> XDR.build_type(XDR.Type.Int)
%XDR.Type.Int{type_name: "Int", value: nil}
iex> XDR.build_type(XDR.Type.Float)
%XDR.Type.Float{type_name: "Float", value: nil}
`XDR.Type.VariableOpaque` and `XDR.Type.String` have an optional `max_length` option,
with a default max defined in `XDR.Size`.
iex> XDR.build_type(XDR.Type.VariableOpaque)
%XDR.Type.VariableOpaque{type_name: "VariableOpaque", max_length: XDR.Size.max(), value: nil}
iex> XDR.build_type(XDR.Type.VariableOpaque, 100)
%XDR.Type.VariableOpaque{type_name: "VariableOpaque", max_length: 100, value: nil}
`XDR.Type.VariableArray` has a required `type` and an optional `max_length`.
iex> XDR.build_type(XDR.Type.VariableArray, type: XDR.build_type(XDR.Type.Int))
%XDR.Type.VariableArray{type_name: "VariableArray", data_type: %XDR.Type.Int{}, max_length: XDR.Size.max(), values: []}
`XDR.Type.Array` and `XDR.Type.Opaque` are fixed-length, so the length is required when building the type:
iex> XDR.build_type(XDR.Type.Array, type: XDR.build_type(XDR.Type.Bool), length: 4)
%XDR.Type.Array{type_name: "Array", data_type: %XDR.Type.Bool{}, length: 4, values: []}
iex> XDR.build_type(XDR.Type.Opaque, 4)
%XDR.Type.Opaque{type_name: "Opaque", length: 4, value: nil}
iex> XDR.build_type(XDR.Type.Opaque)
** (XDR.Error) A valid size must be provided
`XDR.Type.Enum` is an enumeration with atom keys and signed int values, provided as a keyword list
iex> _enum_type = XDR.build_type(XDR.Type.Enum, metric: 0, imperial: 1, other: 2)
%XDR.Type.Enum{type_name: "Enum", options: [metric: 0, imperial: 1, other: 2], value: nil}
Creating a concrete `XDR.Type.Optional` type requires that a fully built base type be provided:
iex> XDR.build_type(XDR.Type.Optional, XDR.build_type(XDR.Type.Float))
%XDR.Type.Optional{type_name: "Optional", data_type: %XDR.Type.Float{}, value: nil}
`XDR.Type.Struct` allows the nesting of data into trees, by associating a key with an XDR
data type to be stored under that key.
iex> _user_type = XDR.build_type(
...> XDR.Type.Struct,
...> first_name: XDR.build_type(XDR.Type.VariableOpaque),
...> last_name: XDR.build_type(XDR.Type.VariableOpaque),
...> email: XDR.build_type(XDR.Type.VariableOpaque)
...> )
%XDR.Type.Struct{type_name: "Struct", fields: [
first_name: %XDR.Type.VariableOpaque{},
last_name: %XDR.Type.VariableOpaque{},
email: %XDR.Type.VariableOpaque{}
]}
`XDR.Type.Union` is a discriminated union, with its main data type being determined
the the value of its switch. The switch can optionally be given a name, and must be
of type `XDR.Type.Bool`, `XDR.Type.Enum`, `XDR.Type.Int`, or `XDR.Type.UnsignedInt`.
Each entry in the `switches` list maps the switch values to one of the `arms`,
or in some cases directly to `XDR.Type.Void` if no value is needed. The `arms`
themselves store the XDR type the union will take in that case.
iex> XDR.build_type(
...> XDR.Type.Union,
...> switch_name: "UserType",
...> switch_type: XDR.build_type(XDR.Type.Enum, business: 0, consumer: 1, none: 2),
...> switches: [
...> business: :business_account,
...> consumer: :consumer_account,
...> none: XDR.Type.Void
...> ],
...> arms: [
...> business_account: XDR.build_type(XDR.Type.Opaque, 16),
...> consumer_account: XDR.build_type(XDR.Type.Int)
...> ]
...> )
%XDR.Type.Union{
switch_name: "UserType",
switch: %XDR.Type.Enum{options: [business: 0, consumer: 1, none: 2]},
switches: [
business: :business_account,
consumer: :consumer_account,
none: XDR.Type.Void
],
arms: [
business_account: %XDR.Type.Opaque{length: 16},
consumer_account: %XDR.Type.Int{}
],
type_name: "Union"
}
Building data types on the fly isn't suitable for a complex problem domain, so
`XDR.Base` is provided to allow an application to pre-define named XDR types for
use throughout the application. See the [readme](readme.html#custom-xdr-type-definitions) for more info.
"""
@spec build_type(XDR.Type.Array, XDR.Type.Array.options()) :: XDR.Type.Array.t()
@spec build_type(XDR.Type.Bool, ignored()) :: XDR.Type.Bool.t()
@spec build_type(XDR.Type.Const, any()) :: XDR.Type.Const.t()
@spec build_type(XDR.Type.Double, ignored()) :: XDR.Type.Double.t()
@spec build_type(XDR.Type.Enum, XDR.Type.Enum.options()) :: XDR.Type.Enum.t()
@spec build_type(XDR.Type.Float, ignored()) :: XDR.Type.Float.t()
@spec build_type(XDR.Type.HyperInt, ignored()) :: XDR.Type.HyperInt.t()
@spec build_type(XDR.Type.Int, ignored()) :: XDR.Type.Int.t()
@spec build_type(XDR.Type.Opaque, XDR.Size.t()) :: XDR.Type.Opaque.t()
@spec build_type(XDR.Type.Optional, XDR.Type.t()) :: XDR.Type.Optional.t()
@spec build_type(XDR.Type.String, XDR.Size.t() | []) :: XDR.Type.String.t()
@spec build_type(XDR.Type.Struct, XDR.Type.Struct.fields()) :: XDR.Type.Struct.t()
@spec build_type(XDR.Type.Union, XDR.Type.Union.options()) :: XDR.Type.Union.t()
@spec build_type(XDR.Type.UnsignedHyperInt, ignored()) :: XDR.Type.UnsignedHyperInt.t()
@spec build_type(XDR.Type.UnsignedInt, ignored()) :: XDR.Type.UnsignedInt.t()
@spec build_type(XDR.Type.VariableArray, XDR.Type.VariableArray.options()) ::
XDR.Type.VariableArray.t()
@spec build_type(XDR.Type.VariableOpaque, XDR.Size.t() | []) :: XDR.Type.VariableOpaque.t()
@spec build_type(XDR.Type.Void, ignored()) :: XDR.Type.Void.t()
def build_type(type, options \\ []) do
XDR.Type.build_type(struct(type), options)
end
@doc """
To build a concrete value, supply the fully-built type and a value appropriate
to that type's definition. For simple types, just supply the raw value:
iex> int_type = XDR.build_type(XDR.Type.Int)
...> {:ok, int_val} = XDR.build_value(int_type, 123)
...> int_val.value
123
iex> us_zip_type = XDR.build_type(XDR.Type.Opaque, 5)
...> {:ok, zip_val} = XDR.build_value(us_zip_type, "70119")
...> zip_val.value
"70119"
iex> enum_type = XDR.build_type(XDR.Type.Enum, metric: 0, imperial: 1, other: 2)
...> {:ok, enum_val} = XDR.build_value(enum_type, :metric)
...> enum_val.value
:metric
iex> bool_type = XDR.build_type(XDR.Type.Bool)
...> {:ok, bool_value} = XDR.build_value(bool_type, true)
...> bool_value.value
true
Arrays work similarly. Just supply a list of appropriate values:
iex> scores_type = XDR.build_type(XDR.Type.VariableArray, type: XDR.build_type(XDR.Type.Int))
...> {:ok, scores} = XDR.build_value(scores_type, [1, 2, 3, 4, 5, 6])
...> Enum.map(scores.values, & &1.value)
[1, 2, 3, 4, 5, 6]
When building a struct's value, we supply the raw values of the inner types:
iex> user_type = XDR.build_type(XDR.Type.Struct,
...> name: XDR.build_type(XDR.Type.VariableOpaque),
...> email: XDR.build_type(XDR.Type.VariableOpaque)
...> )
...> {:ok, value} = XDR.build_value(user_type, name: "Marvin", email: "<EMAIL>")
...> value.fields[:name].value
"Marvin"
An optional type can be specified in a few different ways for convenience:
iex> int_type = XDR.build_type(XDR.Type.Int)
...> optional_int = XDR.build_type(XDR.Type.Optional, int_type)
...> {:ok, no_val_1} = XDR.build_value(optional_int, nil)
...> {:ok, no_val_2} = XDR.build_value(optional_int, false)
...> {:ok, no_val_3} = XDR.build_value(optional_int, {false, "ignored"})
...> {:ok, with_val_1} = XDR.build_value(optional_int, {true, 123})
...> {:ok, with_val_2} = XDR.build_value(optional_int, 123)
...> [no_val_1.value, no_val_2.value, no_val_3.value, with_val_1.value.value, with_val_2.value.value]
[%XDR.Type.Void{}, %XDR.Type.Void{}, %XDR.Type.Void{}, 123, 123]
To build a value for `XDR.Type.Union`, supply a tuple including the switch value (an int or atom),
followed by the value of the corresponding inner type. If the inner type is `XDR.Type.Void`, then
the switch value alone is enough.
iex> account_id_type = XDR.build_type(
...> XDR.Type.Union,
...> switch_name: "UserType",
...> switch_type: XDR.build_type(XDR.Type.Enum, business: 0, consumer: 1, none: 2),
...> switches: [
...> business: :business_account,
...> consumer: :consumer_account,
...> none: XDR.Type.Void
...> ],
...> arms: [
...> business_account: XDR.build_type(XDR.Type.Opaque, 16),
...> consumer_account: XDR.build_type(XDR.Type.Int)
...> ]
...> )
...> {:ok, business_id} = XDR.build_value(account_id_type, {:business, "<KEY>"})
...> {:ok, consumer_id} = XDR.build_value(account_id_type, {:consumer, 23456})
...> {:ok, no_id} = XDR.build_value(account_id_type, {:none, nil})
...> {:ok, no_id_2} = XDR.build_value(account_id_type, :none)
...> [business_id.value.value, consumer_id.value.value, no_id.value, no_id_2.value]
["0123456789abcdef", 23456, %XDR.Type.Void{}, %XDR.Type.Void{}]
NOTE: in all of these examples, the underlying values are accessed directly, which
requires some knowledge of the underlying `XDR.Type` structs. In practice,
it's better to use `XDR.extract_value/1` rather than reaching into these structs.
"""
@spec build_value(XDR.Type.Array.t(), list()) :: {:ok, XDR.Type.Array.t()} | {:error, any()}
@spec build_value(XDR.Type.Bool.t(), XDR.Type.Bool.value()) ::
{:ok, XDR.Type.Bool.t()} | {:error, any()}
@spec build_value(XDR.Type.Double.t(), XDR.Type.Double.value()) ::
{:ok, XDR.Type.Double.t()} | {:error, any()}
@spec build_value(XDR.Type.Enum.t(), atom()) :: {:ok, XDR.Type.Enum.t()} | {:error, any()}
@spec build_value(XDR.Type.Float.t(), XDR.Type.Float.value()) ::
{:ok, XDR.Type.Float.t()} | {:error, any()}
@spec build_value(XDR.Type.HyperInt.t(), XDR.Type.HyperInt.value()) ::
{:ok, XDR.Type.HyperInt.t()} | {:error, any()}
@spec build_value(XDR.Type.Int.t(), XDR.Type.Int.value()) ::
{:ok, XDR.Type.Int.t()} | {:error, any()}
@spec build_value(XDR.Type.Opaque.t(), binary()) :: {:ok, XDR.Type.Opaque.t()} | {:error, any()}
@spec build_value(XDR.Type.Optional.t(), XDR.Type.Optional.value()) ::
{:ok, XDR.Type.Optional.t()} | {:error, any()}
@spec build_value(XDR.Type.String.t(), binary()) :: {:ok, XDR.Type.String.t()} | {:error, any()}
@spec build_value(XDR.Type.Struct.t(), keyword()) ::
{:ok, XDR.Type.Struct.t()} | {:error, any()}
@spec build_value(XDR.Type.Union.t(), XDR.Type.Union.value()) ::
{:ok, XDR.Type.Union.t()} | {:error, any()}
@spec build_value(XDR.Type.UnsignedHyperInt.t(), XDR.Type.UnsignedHyperInt.value()) ::
{:ok, XDR.Type.UnsignedHyperInt.t()} | {:error, any()}
@spec build_value(XDR.Type.UnsignedInt.t(), XDR.Type.UnsignedInt.value()) ::
{:ok, XDR.Type.UnsignedInt.t()} | {:error, any()}
@spec build_value(XDR.Type.VariableArray.t(), list()) ::
{:ok, XDR.Type.VariableArray.t()} | {:error, any()}
@spec build_value(XDR.Type.VariableOpaque.t(), binary()) ::
{:ok, XDR.Type.VariableOpaque.t()} | {:error, any()}
def build_value(type, value) do
{:ok, build_value!(type, value)}
rescue
error -> {:error, error}
end
@doc """
Just like `XDR.build_value/2`, but returns raw values on success instead of tuples,
and raises on failure.
"""
@spec build_value!(XDR.Type.Array.t(), list()) :: XDR.Type.Array.t()
@spec build_value!(XDR.Type.Bool.t(), XDR.Type.Bool.value()) :: XDR.Type.Bool.t()
@spec build_value!(XDR.Type.Double.t(), XDR.Type.Double.value()) :: XDR.Type.Double.t()
@spec build_value!(XDR.Type.Enum.t(), atom()) :: XDR.Type.Enum.t()
@spec build_value!(XDR.Type.Float.t(), XDR.Type.Float.value()) :: XDR.Type.Float.t()
@spec build_value!(XDR.Type.HyperInt.t(), XDR.Type.HyperInt.value()) :: XDR.Type.HyperInt.t()
@spec build_value!(XDR.Type.Int.t(), XDR.Type.Int.value()) :: XDR.Type.Int.t()
@spec build_value!(XDR.Type.Opaque.t(), binary()) :: XDR.Type.Opaque.t()
@spec build_value!(XDR.Type.Optional.t(), XDR.Type.Optional.value()) :: XDR.Type.Optional.t()
@spec build_value!(XDR.Type.String.t(), binary()) :: XDR.Type.String.t()
@spec build_value!(XDR.Type.Struct.t(), keyword()) :: XDR.Type.Struct.t()
@spec build_value!(XDR.Type.Union.t(), XDR.Type.Union.value()) :: XDR.Type.Union.t()
@spec build_value!(XDR.Type.UnsignedHyperInt.t(), XDR.Type.UnsignedHyperInt.value()) ::
XDR.Type.UnsignedHyperInt.t()
@spec build_value!(XDR.Type.UnsignedInt.t(), XDR.Type.UnsignedInt.value()) ::
XDR.Type.UnsignedInt.t()
@spec build_value!(XDR.Type.VariableArray.t(), list()) :: XDR.Type.VariableArray.t()
@spec build_value!(XDR.Type.VariableOpaque.t(), binary()) :: XDR.Type.VariableOpaque.t()
def build_value!(type, %Const{value: value}) do
XDR.Type.build_value!(type, value)
end
def build_value!(type, value) do
XDR.Type.build_value!(type, value)
end
@doc """
Encode an XDR value (created with e.g. `XDR.build_value/2`) into its binary representation.
iex> {:ok, value} = XDR.build_value(XDR.build_type(XDR.Type.Opaque, 6), "abcdef")
...> XDR.encode(value)
{:ok, "abcdef" <> <<0, 0>>}
Each type's binary representation is determined by its own rules, as defined in the
XDR spec. In the case of Opaque, the binary contents are passed through, with padding
added to achieve an even multiple of 4 bytes. Variable-length types will be preceded by
a four-byte integer describing the length of the contained value.
iex> {:ok, value} = XDR.build_value(XDR.build_type(XDR.Type.VariableOpaque), "abcdef")
...> XDR.encode(value)
{:ok, <<0, 0, 0, 6>> <> "abcdef" <> <<0, 0>>}
Note that type info is not contained in the binary representation, and is therefore
required to decode the binary.
"""
@spec encode(XDR.Type.t()) :: {:ok, binary()} | {:error, any()}
def encode(type_with_value) do
{:ok, encode!(type_with_value)}
rescue
error -> {:error, error}
end
@doc """
Just like `XDR.encode/1`, but returns raw binaries on success instead of tuples,
and raises on failure.
"""
@spec encode!(XDR.Type.t()) :: binary()
def encode!(type_with_value) do
XDR.Type.encode!(type_with_value)
end
@doc """
Decode a binary representation into an XDR type with value. Since the binary
representation does not contain type info itself, it must be supplied as
the first parameter.
iex> encoding = <<0, 0, 0, 6>> <> "abcdef" <> <<0, 0>>
...> {:ok, type_with_value} = XDR.decode(XDR.build_type(XDR.Type.VariableOpaque), encoding)
...> {type_with_value.length, type_with_value.value}
{6, "abcdef"}
iex> encoding = "abcdef" <> <<0, 0>>
...> {:ok, type_with_value} = XDR.decode(XDR.build_type(XDR.Type.Opaque, 6), encoding)
...> {type_with_value.length, type_with_value.value}
{6, "abcdef"}
As with `XDR.build_value/2` above, we're accessing the values directly inside
the type structs. A more practical way to access inner values is to use `XDR.extract_value/1`.
"""
@spec decode(XDR.Type.t(), binary()) :: {:ok, XDR.Type.t()} | {:error, any()}
def decode(type, encoding) do
{:ok, decode!(type, encoding)}
rescue
error -> {:error, error}
end
@doc """
Just like `XDR.decode/2`, but returns raw values on success instead of tuples,
and raises on failure.
"""
@spec decode!(XDR.Type.t(), binary()) :: XDR.Type.t()
def decode!(type, encoding) do
case XDR.Type.decode!(type, encoding) do
{type_with_data, ""} ->
type_with_data
{_type_with_data, extra} ->
raise XDR.Error, message: "Unexpected trailing bytes", data: extra
end
end
@doc """
XDR data structures created from `XDR.build_value/2` and `XDR.decode/2` include
lots of type metadata, and the different types don't always store their inner
state in the same way. `XDR.extract_value/1` acts as a uniform way to pull
out the underlying values as native elixir types.
iex> us_address = XDR.build_type(XDR.Type.Struct,
...> street: XDR.build_type(XDR.Type.VariableOpaque),
...> city: XDR.build_type(XDR.Type.VariableOpaque),
...> state: XDR.build_type(XDR.Type.Opaque, 2),
...> zip: XDR.build_type(XDR.Type.Opaque, 5)
...> )
...> user_type = XDR.build_type(XDR.Type.Struct,
...> name: XDR.build_type(XDR.Type.VariableOpaque),
...> email: XDR.build_type(XDR.Type.VariableOpaque),
...> address: us_address
...> )
...> {:ok, user} = XDR.build_value(user_type,
...> name: "Marvin",
...> email: "<EMAIL>",
...> address: [
...> street: "123 Shakedown St",
...> city: "New Orleans",
...> state: "LA",
...> zip: "70119",
...> ]
...> )
...> {:ok, user_info} = XDR.extract_value(user)
...> user_info
[
name: "Marvin",
email: "<EMAIL>",
address: [
street: "123 Shakedown St",
city: "New Orleans",
state: "LA",
zip: "70119",
]
]
"""
@spec extract_value(XDR.Type.t()) :: {:ok | :error, any()}
def extract_value(type_with_value) do
{:ok, extract_value!(type_with_value)}
rescue
error -> {:error, error}
end
@doc """
Just like `XDR.extract_value/1`, but returns raw values on success instead of tuples,
and raises on failure.
"""
@spec extract_value!(XDR.Type.t()) :: any()
def extract_value!(type_with_value) do
XDR.Type.extract_value!(type_with_value)
end
end
|
lib/xdr.ex
| 0.920128
| 0.546678
|
xdr.ex
|
starcoder
|
defmodule SyncPrimitives.CountDownLatch do
@moduledoc """
A CountDownLatch expects `count` calls to `count_down/2` before calls to
`await/2` can continue.
A CountDownLatch is initialized with a `count`.
`await/2` blocks until the current count reaches 0 due to invocations of the
`count_down/2` method, after which all blocked processes are unblocked.
Any subsequent invocations of `await/2` return immediately. This is a
one-shot phenomenon -- the count cannot be reset. If you need a version that
resets the count, consider using a `SyncPrimitives.CyclicBarrier`.
## Example
iex> latch = SyncPrimitives.CountDownLatch.start(2, fn -> IO.puts("latch done") end)
{SyncPrimitives.CountDownLatch, #PID<0.227.0>}
iex> spawn_link(fn ->
...> IO.puts("before wait")
...> SyncPrimitives.CountDownLatch.await(latch)
...> IO.puts("after wait")
...> end)
before wait
#PID<0.233.0>
iex> # nothing happens for a while
nil
iex> SyncPrimitives.CountDownLatch.count_down(latch)
:ok
iex> SyncPrimitives.CountDownLatch.count_down(latch)
latch done
after wait
:ok
iex> SyncPrimitives.CountDownLatch.stop(latch)
:ok
"""
require Record
Record.defrecordp(:latch, __MODULE__, pid: nil)
@type latch :: record(:latch, pid: pid)
@server_module Module.concat(__MODULE__, Server)
@spec start(pos_integer, nil | (() -> any)) :: latch
def start(count, action \\ nil)
when is_integer(count) and count > 0 and (action === nil or is_function(action, 0)) do
{:ok, server_pid} = GenServer.start_link(@server_module, count: count, action: action)
latch(pid: server_pid)
end
@spec stop(latch) :: :ok
def stop(latch(pid: pid)) do
GenServer.stop(pid)
end
@spec alive?(latch) :: boolean
def alive?(latch(pid: pid)) do
Process.alive?(pid)
end
@spec count(latch) :: false | integer
def count(latch(pid: pid)) do
GenServer.call(pid, :count)
end
@spec count_down(latch, pos_integer) :: :ok
def count_down(latch(pid: pid), i \\ 1) when is_integer(i) and i > 0 do
GenServer.call(pid, {:count_down, i})
end
@spec await(latch, :infinity | integer) :: :ok | :timeout
def await(latch(pid: pid), timeout \\ :infinity)
when timeout === :infinity or is_integer(timeout) do
case call(pid, :await, timeout) do
:ok ->
:ok
:timeout ->
:timeout
end
end
defp call(pid, request, timeout) do
try do
GenServer.call(pid, request, timeout)
catch
:exit, {:timeout, _} ->
:timeout
end
end
end
|
lib/sync_primitives/count_down_latch.ex
| 0.811003
| 0.434221
|
count_down_latch.ex
|
starcoder
|
defmodule ExDoc.Formatter.HTML.Autolink do
@moduledoc """
Conveniences for autolinking locals, types and more.
"""
import ExDoc.Formatter.HTML.Templates, only: [h: 1, enc_h: 1]
@elixir_docs "https://hexdocs.pm/"
@erlang_docs "http://www.erlang.org/doc/man/"
@basic_types_page "typespecs.html#basic-types"
@built_in_types_page "typespecs.html#built-in-types"
@basic_types [
any: 0,
none: 0,
atom: 0,
map: 0,
pid: 0,
port: 0,
reference: 0,
struct: 0,
tuple: 0,
integer: 0,
float: 0,
neg_integer: 0,
non_neg_integer: 0,
pos_integer: 0,
list: 1,
nonempty_list: 1,
improper_list: 2,
maybe_improper_list: 2,
]
@built_in_types [
term: 0,
arity: 0,
as_boolean: 1,
binary: 0,
bitstring: 0,
boolean: 0,
byte: 0,
char: 0,
charlist: 0,
nonempty_charlist: 0,
fun: 0,
function: 0,
identifier: 0,
iodata: 0,
iolist: 0,
keyword: 0,
keyword: 1,
list: 0,
nonempty_list: 0,
maybe_improper_list: 0,
nonempty_maybe_improper_list: 0,
mfa: 0,
module: 0,
no_return: 0,
node: 0,
number: 0,
struct: 0,
timeout: 0
]
kernel_exports = Kernel.__info__(:functions) ++ Kernel.__info__(:macros)
special_form_exports = Kernel.SpecialForms.__info__(:macros)
@basic_type_strings (for {f, a} <- @basic_types, do: "t:#{f}/#{a}")
@built_in_type_strings (for {f, a} <- @built_in_types, do: "t:#{f}/#{a}")
@kernel_function_strings (for {f, a} <- kernel_exports, do: "#{f}/#{a}")
@special_form_strings (for {f, a} <- special_form_exports, do: "#{f}/#{a}")
@doc """
Receives a list of module nodes and autolink all docs and typespecs.
"""
def all(modules, extension, extra_lib_dirs) do
aliases = Enum.map modules, &(&1.module)
lib_dirs = extra_lib_dirs ++ elixir_lib_dirs() ++ erlang_lib_dirs()
modules
|> Enum.map(&Task.async(fn -> process_module(&1, modules, aliases, extension, lib_dirs) end))
|> Enum.map(&Task.await(&1, :infinity))
end
defp process_module(module, modules, aliases, extension, lib_dirs) do
module
|> all_docs(modules, aliases, extension, lib_dirs)
|> all_typespecs(aliases, lib_dirs)
end
defp module_to_string(module) do
inspect module.module
end
defp all_docs(module, modules, aliases, extension, lib_dirs) do
locals =
for doc <- module.docs,
prefix = doc_prefix(doc),
entry <- [doc.id | doc.defaults],
do: prefix <> entry,
into: Enum.map(module.typespecs, &("t:" <> &1.id))
moduledoc =
if module.doc do
module.doc
|> local_doc(locals, aliases, extension, lib_dirs)
|> project_doc(modules, module.id, extension, lib_dirs)
end
docs = for module_node <- module.docs do
doc =
if module_node.doc do
module_node.doc
|> local_doc(locals, aliases, extension, lib_dirs)
|> project_doc(modules, module.id, extension, lib_dirs)
end
%{module_node | doc: doc}
end
typedocs = for module_node <- module.typespecs do
doc =
if module_node.doc do
module_node.doc
|> local_doc(locals, aliases, extension, lib_dirs)
|> project_doc(modules, module.id, extension, lib_dirs)
end
%{module_node | doc: doc}
end
%{module | doc: moduledoc, docs: docs, typespecs: typedocs}
end
defp all_typespecs(module, aliases, lib_dirs) do
locals = Enum.map module.typespecs, fn
%ExDoc.TypeNode{name: name, arity: arity} -> {name, arity}
end
typespecs = for typespec <- module.typespecs do
%{typespec | spec: typespec(typespec.spec, locals, aliases, lib_dirs)}
end
docs = for module_node <- module.docs do
%{module_node | specs: Enum.map(module_node.specs, &typespec(&1, locals, aliases, lib_dirs))}
end
%{module | typespecs: typespecs, docs: docs}
end
@doc """
Converts the given `ast` to string while linking the locals
given by `typespecs` as HTML.
"""
def typespec(ast, typespecs, aliases, lib_dirs \\ elixir_lib_dirs() ++ erlang_lib_dirs()) do
if formatter_available?() do
format_typespec(ast, typespecs, aliases, lib_dirs)
else
typespec_to_string(ast, typespecs, aliases, lib_dirs)
end
end
defp typespec_to_string({:when, _, [{:::, _, [left, {:|, _, _} = center]}, right]} = ast, typespecs, aliases, lib_dirs) do
if short_typespec?(ast) do
normalize_left(ast, typespecs, aliases, lib_dirs)
else
normalize_left(left, typespecs, aliases, lib_dirs) <>
" ::\n " <> typespec_with_new_line(center, typespecs, aliases, lib_dirs) <>
" when " <> String.slice(format_typespec(right, typespecs, aliases, lib_dirs), 1..-2)
end
end
defp typespec_to_string({:::, _, [left, {:|, _, _} = center]} = ast, typespecs, aliases, lib_dirs) do
if short_typespec?(ast) do
normalize_left(ast, typespecs, aliases, lib_dirs)
else
normalize_left(left, typespecs, aliases, lib_dirs) <>
" ::\n " <> typespec_with_new_line(center, typespecs, aliases, lib_dirs)
end
end
defp typespec_to_string(other, typespecs, aliases, lib_dirs) do
normalize_left(other, typespecs, aliases, lib_dirs)
end
defp short_typespec?(ast) do
byte_size(Macro.to_string(ast)) <= 70
end
defp typespec_with_new_line({:|, _, [left, right]}, typespecs, aliases, lib_dirs) do
format_typespec(left, typespecs, aliases, lib_dirs) <>
" |\n " <> typespec_with_new_line(right, typespecs, aliases, lib_dirs)
end
defp typespec_with_new_line(other, typespecs, aliases, lib_dirs) do
format_typespec(other, typespecs, aliases, lib_dirs)
end
defp normalize_left({:::, _, [{name, meta, args}, right]}, typespecs, aliases, lib_dirs) do
new_args =
Enum.map(args, &[self(), format_typespec(&1, typespecs, aliases, lib_dirs)])
new_left =
Macro.to_string {name, meta, new_args}, fn
[pid, string], _ when pid == self() -> string
_, string -> string
end
new_left <> " :: " <> format_typespec(right, typespecs, aliases, lib_dirs)
end
defp normalize_left({:when, _, [{:::, _, _} = left, right]}, typespecs, aliases, lib_dirs) do
normalize_left(left, typespecs, aliases, lib_dirs) <>
" when " <> String.slice(format_typespec(right, typespecs, aliases, lib_dirs), 1..-2)
end
defp normalize_left(ast, typespecs, aliases, lib_dirs) do
format_typespec(ast, typespecs, aliases, lib_dirs)
end
defp format_typespec(ast, typespecs, aliases, lib_dirs) do
{formatted, placeholders} = format_and_extract_typespec_placeholders(ast, typespecs, aliases, lib_dirs)
replace_placeholders(formatted, placeholders)
end
@doc false
def format_and_extract_typespec_placeholders(ast, typespecs, aliases, lib_dirs) do
ref = make_ref()
elixir_source = get_source(Kernel, aliases, lib_dirs)
{formatted_ast, placeholders} =
Macro.prewalk(ast, %{}, fn
{:::, _, [{name, meta, args}, right]}, placeholders when is_atom(name) and is_list(args) ->
{{:::, [], [{{ref, name}, meta, args}, right]}, placeholders}
# Consume this form so that we don't autolink `foo` in `foo :: bar`
{{^ref, name}, _, args}, placeholders when is_atom(name) and is_list(args) ->
{{name, [], args}, placeholders}
{name, _, args} = form, placeholders when is_atom(name) and is_list(args) ->
arity = length(args)
cond do
{name, arity} in @basic_types ->
url = elixir_source <> @basic_types_page
put_placeholder(form, url, placeholders)
{name, arity} in @built_in_types ->
url = elixir_source <> @built_in_types_page
put_placeholder(form, url, placeholders)
{name, arity} in typespecs ->
n = enc_h("#{name}")
url = "#t:#{n}/#{arity}"
put_placeholder(form, url, placeholders)
true ->
{form, placeholders}
end
{{:., _, [alias, name]}, _, args} = form, placeholders when is_atom(name) and is_list(args) ->
alias = expand_alias(alias)
if source = get_source(alias, aliases, lib_dirs) do
url = type_remote_url(source, alias, name, args)
put_placeholder(form, url, placeholders)
else
{form, placeholders}
end
form, placeholders ->
{form, placeholders}
end)
{format_ast(formatted_ast), placeholders}
end
defp type_remote_url(@erlang_docs = source, module, name, _args) do
module = enc_h("#{module}")
name = enc_h("#{name}")
"#{source}#{module}.html#type-#{name}"
end
defp type_remote_url(source, alias, name, args) do
name = enc_h("#{name}")
"#{source}#{enc_h(inspect alias)}.html#t:#{name}/#{length(args)}"
end
defp typespec_string_to_link(string, url) do
{string_to_link, _string_with_parens} = split_string_to_link(string)
~s[<a href="#{url}">#{h(string_to_link)}</a>]
end
defp put_placeholder(form, url, placeholders) do
string = Macro.to_string(form)
link = typespec_string_to_link(string, url)
case Enum.find(placeholders, fn {_key, value} -> value == link end) do
{placeholder, _} ->
form = put_elem(form, 0, placeholder)
{form, placeholders}
nil ->
count = map_size(placeholders) + 1
placeholder = placeholder(string, count)
form = put_elem(form, 0, placeholder)
{form, Map.put(placeholders, placeholder, link)}
end
end
defp placeholder(string, count) do
[name | _] = String.split(string, "(", trim: true)
name_size = String.length(name)
int_size = count |> Integer.digits() |> length()
underscores_size = 2
pad = String.duplicate("p", max(name_size - int_size - underscores_size, 1))
:"_#{pad}#{count}_"
end
defp replace_placeholders(string, placeholders) do
Regex.replace(~r"_p+\d+_", string, &Map.fetch!(placeholders, String.to_atom(&1)))
end
defp format_ast(ast) do
string = Macro.to_string(ast)
if formatter_available?() do
string
|> Code.format_string!(line_length: 80)
|> IO.iodata_to_binary()
else
string
end
end
@doc """
Create links to locally defined functions, specified in `locals`
as a list of `fun/arity` strings.
Ignores functions which are already wrapped in markdown url syntax,
e.g. `[test/1](url)`. If the function doesn't touch the leading
or trailing `]`, e.g. `[my link link/1 is here](url)`, the fun/arity
will get translated to the new href of the function.
"""
def local_doc(bin, locals, aliases \\ [], extension \\ ".html", lib_dirs \\ elixir_lib_dirs()) when is_binary(bin) do
fun_re = Regex.source(~r{(([ct]:)?([a-z_]+[A-Za-z_\d]*[\\?\\!]?|[\{\}=&\\|\\.<>~*^@\\+\\%\\!-]+)/\d+)})
regex = ~r{(?<!\[)`\s*(#{fun_re})\s*`(?!\])}
elixir_doc = get_source(Kernel, aliases, lib_dirs)
Regex.replace(regex, bin, fn all, match ->
{prefix, _, function, arity} = split_function(match)
text = "`#{function}/#{arity}`"
cond do
match in locals ->
"[#{text}](##{prefix}#{enc_h function}/#{arity})"
match in @basic_type_strings ->
"[#{text}](#{elixir_doc}#{@basic_types_page})"
match in @built_in_type_strings ->
"[#{text}](#{elixir_doc}#{@built_in_types_page})"
match in @kernel_function_strings ->
"[#{text}](#{elixir_doc}Kernel#{extension}##{prefix}#{enc_h function}/#{arity})"
match in @special_form_strings ->
"[#{text}](#{elixir_doc}Kernel.SpecialForms#{extension}##{prefix}#{enc_h function}/#{arity})"
true ->
all
end
end)
end
@doc """
Creates links to modules and functions defined in the project.
"""
def project_doc(bin, modules, module_id \\ nil,
extension \\ ".html", lib_dirs \\ elixir_lib_dirs()) when is_binary(bin) do
project_types =
for module <- modules,
type <- module.typespecs,
do: "t:" <> module.id <> "." <> type.id
project_docs =
for module <- modules,
doc <- module.docs,
prefix = doc_prefix(doc),
entry <- [doc.id | doc.defaults],
do: prefix <> module.id <> "." <> entry,
into: project_types
project_modules =
modules
|> Enum.map(&module_to_string/1)
|> Enum.uniq()
bin
|> local_doc(project_docs, [], extension, lib_dirs)
|> elixir_functions(project_docs, extension, lib_dirs)
|> elixir_modules(project_modules, module_id, extension, lib_dirs)
|> erlang_functions()
end
defp doc_prefix(%{type: c}) when c in [:callback, :macrocallback], do: "c:"
defp doc_prefix(%{type: _}), do: ""
@doc """
Create links to elixir functions defined in the project and Elixir itself.
Project functions are specified in `project_funs` as a list of
`Module.fun/arity` tuples.
Functions wrapped in markdown url syntax can link to other docs if
the url is wrapped in backticks, otherwise the url is used as is.
If the function doesn't touch the leading or trailing `]`, e.g.
`[my link Module.link/1 is here](url)`, the Module.fun/arity
will get translated to the new href of the function.
"""
def elixir_functions(bin, project_funs, extension \\ ".html", lib_dirs \\ elixir_lib_dirs()) when is_binary(bin) do
bin
|> replace_custom_links(project_funs, extension, lib_dirs)
|> replace_normal_links(project_funs, extension, lib_dirs)
end
module_re = Regex.source(~r{(([A-Z][A-Za-z_\d]+)\.)+})
fun_re = Regex.source(~r{([ct]:)?(#{module_re}([a-z_]+[A-Za-z_\d]*[\\?\\!]?|[\{\}=&\\|\\.<>~*^@\\+\\%\\!-]+)/\d+)})
@custom_re ~r{\[(.*?)\]\(`(#{fun_re})`\)}
@normal_re ~r{(?<!\[)`\s*(#{fun_re})\s*`(?!\])}
defp replace_custom_links(bin, project_funs, extension, lib_dirs) do
Regex.replace(@custom_re, bin, fn all, text, match ->
replacement(all, match, project_funs, extension, lib_dirs, text)
end)
end
defp replace_normal_links(bin, project_funs, extension, lib_dirs) do
Regex.replace(@normal_re, bin, fn all, match ->
replacement(all, match, project_funs, extension, lib_dirs)
end)
end
defp replacement(all, match, project_funs, extension, lib_dirs, text \\ nil) do
{prefix, module, function, arity} = split_function(match)
text = text || "`#{module}.#{function}/#{arity}`"
cond do
match in project_funs ->
"[#{text}](#{module}#{extension}##{prefix}#{enc_h function}/#{arity})"
doc = lib_dirs_to_doc("Elixir." <> module, lib_dirs) ->
"[#{text}](#{doc}#{module}.html##{prefix}#{enc_h function}/#{arity})"
true ->
all
end
end
@doc """
Create links to elixir modules defined in the project and
in Elixir itself.
Ignores modules which are already wrapped in markdown url syntax,
e.g. `[Module](url)`. If the module name doesn't touch the leading
or trailing `]`, e.g. `[my link Module is here](url)`, the Module
will get translated to the new href of the module.
"""
def elixir_modules(bin, modules, module_id \\ nil,
extension \\ ".html", lib_dirs \\ elixir_lib_dirs()) when is_binary(bin) do
regex = ~r{(?<!\[)`\s*(([A-Z][A-Za-z_\d]+\.?)+)\s*`(?!\])}
Regex.replace(regex, bin, fn all, match ->
cond do
match == module_id ->
"[`#{match}`](#{match}#{extension}#content)"
match in modules ->
"[`#{match}`](#{match}#{extension})"
doc = lib_dirs_to_doc("Elixir." <> match, lib_dirs) ->
"[`#{match}`](#{doc}#{match}.html)"
true ->
all
end
end)
end
defp split_function("c:" <> bin) do
{_, mod, fun, arity} = split_function(bin)
{"c:", mod, fun, arity}
end
defp split_function("t:" <> bin) do
{_, mod, fun, arity} = split_function(bin)
{"t:", mod, fun, arity}
end
defp split_function(bin) do
[modules, arity] = String.split(bin, "/")
{mod, name} =
modules
|> String.replace(~r{([^\.])\.}, "\\1 ") # this handles the case of the ".." function
|> String.split(" ")
|> Enum.split(-1)
{"", Enum.join(mod, "."), hd(name), arity}
end
@doc """
Create links to Erlang functions in code blocks.
Only links modules that are in the Erlang distribution `lib_dir`
and only link functions in those modules that export a function of the
same name and arity.
Ignores functions which are already wrapped in markdown url syntax,
e.g. `[:module.test/1](url)`. If the function doesn't touch the leading
or trailing `]`, e.g. `[my link :module.link/1 is here](url)`, the :module.fun/arity
will get translated to the new href of the function.
"""
def erlang_functions(bin) when is_binary(bin) do
lib_dirs = erlang_lib_dirs()
regex = ~r{(?<!\[)`\s*:([a-z_]+\.[0-9a-zA-Z_!\\?]+/\d+)\s*`(?!\])}
Regex.replace(regex, bin, fn all, match ->
{_, module, function, arity} = split_function(match)
if doc = lib_dirs_to_doc(module, lib_dirs) do
"[`:#{match}`](#{doc}#{module}.html##{function}-#{arity})"
else
all
end
end)
end
## Helpers
defp lib_dirs_to_doc(module, lib_dirs) do
case :code.where_is_file('#{module}.beam') do
:non_existing ->
nil
path ->
path = List.to_string(path)
Enum.find_value(lib_dirs, fn {lib_dir, doc} ->
String.starts_with?(path, lib_dir) and doc
end)
end
end
defp elixir_lib_dirs do
case Application.fetch_env(:ex_doc, :elixir_lib_dirs) do
{:ok, lib_dirs} ->
lib_dirs
:error ->
lib_dir =
case :code.where_is_file('Elixir.Kernel.beam') do
:non_existing ->
[0]
path ->
path
|> Path.dirname()
|> Path.dirname()
|> Path.dirname()
end
lib_dirs =
for app <- ~w(elixir eex iex logger mix ex_unit) do
{lib_dir <> "/" <> app <> "/ebin", @elixir_docs <> app <> "/"}
end
Application.put_env(:ex_doc, :elixir_lib_dirs, lib_dirs)
lib_dirs
end
end
defp erlang_lib_dirs do
case Application.fetch_env(:ex_doc, :erlang_lib_dirs) do
{:ok, lib_dirs} ->
lib_dirs
:error ->
lib_dirs = [{Path.expand(:code.lib_dir), @erlang_docs}]
Application.put_env(:ex_doc, :erlang_lib_dirs, lib_dirs)
lib_dirs
end
end
defp split_string_to_link(string) do
case :binary.split(string, "(") do
[head, tail] -> {head, "(" <> tail}
[head] -> {head, ""}
end
end
defp expand_alias({:__aliases__, _, [h|t]}) when is_atom(h), do: Module.concat([h|t])
defp expand_alias(atom) when is_atom(atom), do: atom
defp expand_alias(_), do: nil
defp get_source(alias, aliases, lib_dirs) do
cond do
is_nil(alias) -> nil
alias in aliases -> ""
doc = lib_dirs_to_doc(alias, lib_dirs) -> doc
true -> nil
end
end
# TODO: remove when we require Elixir v1.6+
defp formatter_available? do
function_exported?(Code, :format_string!, 2)
end
end
|
spec/fixtures/mix/deps/ex_doc/lib/ex_doc/formatter/html/autolink.ex
| 0.625324
| 0.431464
|
autolink.ex
|
starcoder
|
defmodule QueryElf.Plugins.Preloader do
@moduledoc """
A module for preloading associations using joins.
Based on https://hexdocs.pm/ecto_preloader (licensed under WTFPL)
By default, Ecto preloads associations using a separate query for each association, which can degrade performance.
You could make it run faster by using a combination of join/preload, but that requires a bit of boilerplate (see example below).
With `Ecto.Preloader`, you can accomplish this with just one line of code.
## Example using just Ecto
It requires calling `Query.join/4`, `Query.assoc/3` and `Query.preload/2`
```
import Ecto.Query
Invoice
|> join(:left, [i], assoc(i, :customer), as: :customer)
|> join(:left, [i, c], assoc(c, :account), as: :account)
|> join(:left, [i], assoc(i, :lines), as: :lines)
|> preload([lines: v, customers: c, account: a], lines: v, customer: {c, [a: account]})
|> Repo.all()
```
## Example using Ecto.Preloader
Just one method call:
```
import Ecto.Query
import Ecto.Preloader
Invoice
|> join_preload([:customer, :account])
|> join_preload([:lines])
|> Repo.all()
```
"""
import Ecto, only: [assoc: 2]
require QueryElf.Plugins.ReusableJoin
# alias QueryElf.Plugins.Preloader
# alias Ecto.Query.Builder.{Join, Preload}
@doc "Join + Preload (up to three nested levels of) associations"
defmacro join_preload(query, associations) when is_list(associations) do
quote do: preload_join(unquote(query), unquote_splicing(associations))
end
defmacro join_preload(query, association) when is_atom(association) do
quote do: preload_join(unquote(query), association)
end
defmacro join_preload(query, associations) do
IO.inspect(join_preload_failed: associations)
query
end
# defp do_preload_join(query, association, bindings, expr, preload_bindings, preload_expr, caller) do
# IO.inspect(query: query)
# #IO.inspect(queryable: Ecto.Queryable.to_query(query))
# #IO.inspect(binding: bindings)
# #IO.inspect(expr: expr)
# #IO.inspect(association: association)
# #IO.inspect(preload_bindings: preload_bindings)
# #IO.inspect(preload_expr: preload_expr)
# query
# |> Join.build(:left, bindings, expr, nil, nil, association, nil, nil, caller)
# # |> reusable_join(:left, (bindings), (expr), as: association)
# |> elem(0)
# # |> IO.inspect
# |> Preload.build(preload_bindings, preload_expr, caller)
# end
defmacro do_preload_join(query, association, bindings, expr, preload_bindings, preload_expr ) do
#IO.inspect(query: query)
#IO.inspect(queryable: Ecto.Queryable.to_query(query))
#IO.inspect(bindings: bindings)
#IO.inspect(expr: expr)
#IO.inspect(association: association)
# on = quote do: [{as, unquote(association)}] ++ unquote(opts) # FIXME if we need to pass on
opts = quote do: [as: unquote(association)]
#IO.inspect(on: on)
#IO.inspect(preload_bindings: preload_bindings)
#IO.inspect(preload_expr: preload_expr)
quote do
unquote(query)
|> QueryElf.Plugins.ReusableJoin.do_reusable_join_as(:left, unquote(bindings), unquote(expr), unquote(opts), unquote(association))
|> preload(unquote(preload_bindings), unquote(preload_expr))
# |> IO.inspect
end
end
#doc "Join + Preload an association"
defmacro preload_join(query, association) when is_atom(association) do
# association = quote do: unquote(association)
bindings = quote do: [root]
expr = quote do: assoc(root, unquote(association))
preload_bindings = quote do: [{unquote(association), ass}]
preload_expr = quote do: [{unquote(association), ass}]
quote do: do_preload_join(unquote(query), unquote(association), unquote(bindings), unquote(expr), unquote(preload_bindings), unquote(preload_expr) )
end
#doc "Join + Preload 2 nested associations"
defmacro preload_join(query, via_association, association ) when is_atom(via_association) and is_atom(association) do
query = quote do: preload_join(unquote(query), unquote(via_association))
# association = quote do: unquote(association)
# via_association_pos = quote do: named_binding_position(unquote(query), unquote(via_association))
#IO.inspect(via_association_pos: via_association_pos)
bindings = quote do: [root, {unquote(via_association), via}]
expr = quote do: assoc(via, unquote(association))
preload_bindings = quote do: [root,
{unquote(association), ass},
{unquote(via_association), via}
]
# preload_expr = quote do: [{unquote(via_association), unquote(association)}]
preload_expr = quote do: [
{
unquote(via_association), {via,
[{unquote(association), ass}]
}
}
]
quote do: do_preload_join(unquote(query), unquote(association), unquote(bindings), unquote(expr), unquote(preload_bindings), unquote(preload_expr) )
end
#doc "Join + Preload an assoc within 3 levels of nested associations"
defmacro preload_join(query, via_association_1, via_association_2, association) when is_atom(via_association_1) and is_atom(via_association_2) and is_atom(association) do
query = quote do: preload_join(unquote(query), unquote(via_association_1), unquote(via_association_2))
# |> IO.inspect(label: "pre level 3")
# association = quote do: unquote(association)
# via_association_1_pos = named_binding_position(query, via_association_1)
#IO.inspect(via_association_1_pos: via_association_1_pos)
# bindings = quote do: [root, {via_2, unquote(via_association_2)}] # bad
bindings = quote do: [root, {unquote(via_association_2), via_2}] # good
expr = quote do: assoc(via_2, unquote(association))
# preload_bindings = quote do: [root, a, b, x]
# preload_expr = quote do: [{unquote(via_association_1), [{unquote(via_association_2), [unquote(association)]}]}]
preload_bindings = quote do: [root,
{unquote(association), ass},
{unquote(via_association_1), via_1},
{unquote(via_association_2), via_2}
]
preload_expr = quote do: [
{
unquote(via_association_1), {via_1,
[
{unquote(via_association_2), {via_2,
[{unquote(association), ass}]
}
}
]
}
}
]
quote do: do_preload_join(unquote(query), unquote(association), unquote(bindings), unquote(expr), unquote(preload_bindings), unquote(preload_expr))
# |> IO.inspect(label: "post level 3")
end
#doc "Join + Preload an assoc within 4 levels of nested associations"
defmacro preload_join(query, via_association_1, via_association_2, via_association_3, association) when is_atom(via_association_1) and is_atom(via_association_2) and is_atom(via_association_3) and is_atom(association) do
query = quote do: preload_join(unquote(query), unquote(via_association_1), unquote(via_association_2), unquote(via_association_3))
# |> IO.inspect(label: "pre level 4")
bindings = quote do: [root, {unquote(via_association_3), via_3}]
expr = quote do: assoc(via_3, unquote(association))
# preload_bindings = quote do: [root, a, b, x]
# preload_expr = quote do: [{unquote(via_association_1), [{unquote(via_association_2), [unquote(association)]}]}]
preload_bindings = quote do: [root,
{unquote(association), ass},
{unquote(via_association_1), via_1},
{unquote(via_association_2), via_2},
{unquote(via_association_3), via_3}
]
preload_expr = quote do: [
{
unquote(via_association_1), {via_1,
[
{unquote(via_association_2), {via_2,
[
{unquote(via_association_3), {via_3,
[{unquote(association), ass}]
}
}
]
}
}
]
}
}
]
quote do: do_preload_join(unquote(query), unquote(association), unquote(bindings), unquote(expr), unquote(preload_bindings), unquote(preload_expr))
# |> IO.inspect(label: "post level 4")
end
#doc "Join + Preload an assoc within 5 levels of nested associations"
defmacro preload_join(query, via_association_1, via_association_2, via_association_3, via_association_4, association) when is_atom(via_association_1) and is_atom(via_association_2) and is_atom(via_association_3) and is_atom(via_association_4) and is_atom(association) do
query = quote do: preload_join(unquote(query), unquote(via_association_1), unquote(via_association_2), unquote(via_association_3), unquote(via_association_4))
# |> IO.inspect(label: "pre level 5")
bindings = quote do: [root, {unquote(via_association_4), via_4}]
expr = quote do: assoc(via_4, unquote(association))
# preload_bindings = quote do: [root, a, b, x]
# preload_expr = quote do: [{unquote(via_association_1), [{unquote(via_association_2), [unquote(association)]}]}]
preload_bindings = quote do: [root,
{unquote(association), ass},
{unquote(via_association_1), via_1},
{unquote(via_association_2), via_2},
{unquote(via_association_3), via_3},
{unquote(via_association_4), via_4}
]
preload_expr = quote do: [
{
unquote(via_association_1), {via_1,
[
{unquote(via_association_2), {via_2,
[
{unquote(via_association_3), {via_3,
[
{unquote(via_association_4), {via_4,
[
{unquote(association), ass}
]
}
}
]
}
}
]
}
}
]
}
}
]
quote do: do_preload_join(unquote(query), unquote(association), unquote(bindings), unquote(expr), unquote(preload_bindings), unquote(preload_expr))
# |> IO.inspect(label: "post level 5")
end
# defp named_binding_position(query, binding) do
# Map.get(query.aliases, binding)
# end
end
|
lib/query_elf/plugins/preloader.ex
| 0.792223
| 0.884788
|
preloader.ex
|
starcoder
|
defmodule Cldr.Calendar.Preference do
alias Cldr.LanguageTag
@territory_preferences Cldr.Config.calendar_preferences()
def territory_preferences do
@territory_preferences
end
def preferences_for_territory(territory) do
with {:ok, territory} <- Cldr.validate_territory(territory) do
territory_preferences = territory_preferences()
default_territory = Cldr.default_territory()
the_world = Cldr.the_world()
preferences =
Map.get(territory_preferences, territory) ||
Map.get(territory_preferences, default_territory) ||
Map.get(territory_preferences, the_world)
{:ok, preferences}
end
end
@doc """
Returns the calendar module preferred for
a territory.
## Arguments
* `territory` is any valid ISO3166-2 code as
an `String.t` or upcased `atom()`
## Returns
* `{:ok, calendar_module}` or
* `{:error, {exception, reason}}`
## Examples
iex> Cldr.Calendar.Preference.calendar_for_territory :US
{:ok, Cldr.Calendar.Gregorian}
iex> Cldr.Calendar.Preference.calendar_for_territory :XX
{:error, {Cldr.UnknownTerritoryError, "The territory :XX is unknown"}}
## Notes
The overwhelming majority of territories have
`:gregorian` as their first preferred calendar
and therefore `Cldr.Calendar.Gregorian`
will be returned for most territories.
Returning any other calendar module would require:
1. That another calendar is preferred over `:gregorian`
for a territory
2. That a calendar module is available to support
that calendar.
As an example, Iran (territory `:IR`) prefers the
`:persian` calendar. If the optional library
[ex_cldr_calendars_persian](https://hex.pm/packages/ex_cldr_calendars_persian)
is installed, the calendar module `Cldr.Calendar.Persian` will
be returned. If it is not installed, `Cldr.Calendar.Gregorian`
will be returned as `:gregorian` is the second preference
for `:IR`.
"""
def calendar_for_territory(territory) do
with {:ok, preferences} <- preferences_for_territory(territory),
{:ok, calendar_module} <- find_calendar(preferences) do
if calendar_module == Cldr.Calendar.default_calendar() do
Cldr.Calendar.calendar_for_territory(territory)
else
{:ok, calendar_module}
end
end
end
defp find_calendar(preferences) do
error = {:error, Cldr.unknown_calendar_error(preferences)}
Enum.reduce_while(preferences, error, fn calendar, acc ->
module = calendar_module(calendar)
if Code.ensure_loaded?(module) do
{:halt, {:ok, module}}
else
{:cont, acc}
end
end)
end
@doc """
Return the calendar for a locale.
"""
def calendar_for_locale(%LanguageTag{locale: %{calendar: nil}} = locale) do
locale
|> Cldr.Locale.territory_from_locale
|> calendar_for_territory
end
def calendar_for_locale(%LanguageTag{locale: %{calendar: calendar}} = locale) do
if calendar_module = calendar_from_name(calendar) do
calendar_module
else
locale
|> Cldr.Locale.territory_from_locale
|> calendar_for_territory
end
end
def calendar_for_locale(%LanguageTag{} = locale) do
locale
|> Cldr.Locale.territory_from_locale
|> calendar_for_territory
end
def calendar_for_locale(other) do
{:error, Cldr.Locale.locale_error(other)}
end
@base_calendar Cldr.Calendar
@known_calendars Cldr.known_calendars()
@calendar_modules @known_calendars
|> Enum.map(fn c ->
{c, Module.concat(@base_calendar, c |> Atom.to_string |> Macro.camelize)}
end)
|> Map.new
def calendar_modules do
@calendar_modules
end
def calendar_module(calendar) when calendar in @known_calendars do
Map.fetch!(calendar_modules(), calendar)
end
def calendar_module(other) do
{:error, Cldr.unknown_calendar_error(other)}
end
def calendar_from_name(name) do
calendar_module = calendar_module(name)
if Code.ensure_loaded?(calendar_module) do
calendar_module
else
nil
end
end
end
|
lib/cldr/calendar/preference.ex
| 0.884539
| 0.572842
|
preference.ex
|
starcoder
|
defmodule Artemis.Drivers.IBMCloudant.Request do
use HTTPoison.Base
require Logger
alias Artemis.Drivers.IBMCloudant
defmodule CloudantError do
defexception message: "IBM Cloudant Error"
end
@doc """
Wraps the HTTPoison.request/1 function.
# Params
Takes two additional params in addition to the default `HTTPoison.Request` keys:
- `host`
- `path`
These can be used as an alternative to `url`. The driver can be used to connect to
multiple Cloudant databases, each using different hostnames and
authentication strategies.
These per-host values are defined in the application config. The `host` value
should be a an atom, corresponding to the key in the config. When passed, the driver
will lookup the information in the config and create the correct URL and Headers.
All other params through to `HTTPoison.Request`.
# Response
Params are passed to `HTTPoison.request` and returns either:
{:ok, body}
{:error, message}
Where on success `body` is the decoded response body, and on failure `message` is either
the HTTPoison error message or the response body when a 400/500 status code is received.
"""
def call(params) do
request_params = get_request_params(params)
struct(HTTPoison.Request, request_params)
|> request()
|> simplified_response()
end
def call!(params) do
case call(params) do
{:error, message} -> raise(CloudantError, message)
{:ok, result} -> result
end
end
# Callbacks
def process_request_headers(headers) do
[
Accept: "application/json",
"Content-Type": "application/json"
] ++ headers
end
def process_request_url(url) do
case includes_protocol?(url) do
true -> url
false -> "https://#{url}"
end
end
def process_response_body(body) do
Jason.decode!(body)
rescue
_ -> body
end
# Helpers
defp get_request_params(%{host: host, path: path} = params) do
host_config = IBMCloudant.Config.get_host_config_by!(name: host)
headers = add_authorization_header(host_config, params)
url = "#{host_config[:protocol]}://#{host_config[:hostname]}:#{host_config[:port]}/#{path}"
params
|> Map.delete(:host)
|> Map.delete(:path)
|> Map.put(:headers, headers)
|> Map.put(:url, url)
end
defp get_request_params(%{path: _}), do: raise("Must specify `host` when using `path` param")
defp get_request_params(params), do: params
defp add_authorization_header(host_config, params) do
headers = Map.get(params, :headers, [])
case host_config[:auth_type] do
"ibm_cloud_iam" -> add_ibm_cloud_iam_authorization_header(host_config, headers)
"basic" -> add_basic_authorization_header(host_config, headers)
_ -> headers
end
end
defp add_ibm_cloud_iam_authorization_header(host_config, headers) do
key = Keyword.fetch!(host_config, :ibm_cloud_iam_api_key)
token = Artemis.Worker.IBMCloudIAMAccessToken.get_token!(key)
[Authorization: "Bearer #{token}"] ++ headers
end
defp add_basic_authorization_header(config, headers) do
username = Keyword.fetch!(config, :username)
password = Keyword.fetch!(config, :password)
data = "#{username}:#{password}"
encoded = Base.encode64(data)
[Authorization: "Basic #{encoded}"] ++ headers
end
defp includes_protocol?(url) when is_bitstring(url), do: String.contains?(url, "://")
defp includes_protocol?(_), do: false
defp simplified_response({:ok, %HTTPoison.AsyncResponse{} = async_response}) do
{:ok, async_response}
end
defp simplified_response({:ok, %{body: body, status_code: status_code}}) when status_code in 200..399 do
{:ok, body}
end
defp simplified_response({:ok, %{body: body, status_code: status_code} = request}) when status_code in 400..599 do
Logger.debug("Error response for Cloudant HTTP request: " <> inspect(request))
{:error, body}
end
defp simplified_response(error) do
Logger.info("Error response for Cloudant HTTP request: " <> inspect(error))
error
end
end
|
apps/artemis/lib/artemis/drivers/ibm_cloudant/request.ex
| 0.742235
| 0.445831
|
request.ex
|
starcoder
|
defmodule JQ do
@moduledoc """
Provides capability to run jq queries on elixir structures.
[jq docs](https://stedolan.github.io/jq/)
## Examples
iex> JQ.query(%{key: "value"}, ".key")
{:ok, "value"}
iex> JQ.query!(%{key: "value"}, ".key")
"value"
"""
alias JQ.{MaxByteSizeExceededException, NoResultException, SystemCmdException, UnknownException}
require Logger
@default_options %{max_byte_size: nil}
@doc ~S"""
Execute a jq query on an elixir structure.
Internally invokes `JQ.query!/3` and rescues from all exceptions.
If a `JQ.NoResultException` is raised, `{:ok, nil}` is returned
"""
def query(payload, query, options \\ [])
@spec query(any(), String.t(), list()) ::
{:ok, any()} | {:error, :cmd | :unknown | :max_byte_size_exceeded}
def query(payload, query, options) do
{:ok, query!(payload, query, options)}
rescue
_ in NoResultException ->
{:ok, nil}
e in [SystemCmdException, UnknownException] ->
Logger.warn(e.message)
{:error, :cmd}
e in MaxByteSizeExceededException ->
Logger.warn(e.message)
{:error, :max_byte_size_exceeded}
error ->
Logger.warn("unknown error. error: #{inspect(error)}")
{:error, :unknown}
end
@doc ~S"""
Execute a jq query on an elixir structure.
* `payload` is any elixir structure
* `query` a jq query as a string
Internally this function encodes the `payload` into JSON, writes the JSON to
a temporary file, invokes the jq executable on the temporary file with the supplied
jq `query`.
The result is then decoded from JSON back into an elixir structure.
The temporary file is removed, regardless of the outcome. `System.cmd/3` is called
with the `:stderr_to_stdout` option.
## Options
* `:max_byte_size` - integer representing the maximum number of bytes allowed for the payload, defaults to `nil`.
## Error reasons
* `JQ.MaxByteSizeExceededException` - when the byte_size of the encoded elixir structure is greater than the `:max_byte_size` value
* `JQ.SystemCmdException` - when System.cmd/3 returns a non zero exit code
* `JQ.NoResultException` - when no result was returned
* `JQ.UnknownException` - when System.cmd/3 returns any other error besides those already handled
* `Poison.EncodeError` - when there is an error encoding `payload`
* `Poison.DecodeError` - when there is an error decoding the jq query result
* `Temp.Error` - when there is an error creating a temporary file
* `File.Error` - when there is an error removing a temporary file
"""
def query!(payload, query, options \\ [])
@spec query!(any(), String.t(), list()) :: any()
def query!(payload, query, options) do
%{max_byte_size: max_byte_size} = Enum.into(options, @default_options)
json = payload |> Poison.encode!() |> validate_max_byte_size(max_byte_size)
{fd, file_path} = Temp.open!()
IO.write(fd, json)
File.close(fd)
try do
case System.cmd("jq", [query, file_path], stderr_to_stdout: true) do
{_, code} = error when is_integer(code) and code != 0 ->
raise(SystemCmdException, result: error, command: "jq", args: [query, file_path])
{value, code} when is_integer(code) and code == 0 ->
result = Poison.decode!(value)
unless result, do: raise(NoResultException)
result
error ->
raise(UnknownException, error)
end
after
File.rm!(file_path)
end
end
defp validate_max_byte_size(json, max_byte_size)
when is_integer(max_byte_size) and byte_size(json) > max_byte_size do
raise(MaxByteSizeExceededException, size: byte_size(json), max_byte_size: max_byte_size)
end
defp validate_max_byte_size(json, _), do: json
end
|
lib/jq.ex
| 0.892848
| 0.47457
|
jq.ex
|
starcoder
|
alias Spear.{Connection.Response, Grpc, Rpc}
defmodule Grpc.Response do
@moduledoc false
# a structure and functions for turning a `Spear.Connection.Response` into
# a more friendly and usable data structure
defstruct [:status, :status_code, :message, :data]
# status code information and usages from
# https://grpc.github.io/grpc/core/md_doc_statuscodes.html
@status_code_mapping %{
0 => :ok,
1 => :cancelled,
2 => :unknown,
3 => :invalid_argument,
4 => :deadline_exceeded,
5 => :not_found,
6 => :already_exists,
7 => :permission_denied,
8 => :resource_exhausted,
9 => :failed_precondition,
10 => :aborted,
11 => :out_of_range,
12 => :unimplemented,
13 => :internal,
14 => :unavailable,
15 => :data_loss,
16 => :unauthenticated
}
@reverse_capitalized_status_code_mapping Map.new(@status_code_mapping, fn {status_code, status} ->
{status
|> Atom.to_string()
|> String.upcase()
|> String.to_atom(), status_code}
end)
def from_connection_response(%Response{status: status}, _request, _raw?) when status != 200 do
%__MODULE__{
status_code: 2,
status: :unknown,
message: "Bad HTTP status code: #{inspect(status)}, should be 200"
}
end
def from_connection_response(%Response{headers: headers, data: data, status: 200}, rpc, raw?) do
with {"grpc-status", "0"} <- List.keyfind(headers, "grpc-status", 0),
{:ok, parsed_data} <- parse_data(data, rpc, raw?) do
%__MODULE__{
status_code: 0,
status: :ok,
message: "",
data: parsed_data
}
else
{"grpc-status", other_status} ->
status_code = String.to_integer(other_status)
%__MODULE__{
status_code: status_code,
status: map_status(status_code),
message:
Enum.find_value(headers, fn {k, v} ->
k == "grpc-message" && v
end),
data: data
}
_ ->
%__MODULE__{
status_code: 13,
status: :internal,
message: "Error parsing response proto"
}
end
end
defp parse_data(data, rpc, raw?)
defp parse_data(data, _rpc, true), do: {:ok, data}
defp parse_data(data, %Rpc{response_stream?: false} = rpc, _raw?) do
case Grpc.decode_next_message(data, {rpc.service_module, rpc.response_type}) do
{parsed, <<>>} -> {:ok, parsed}
_ -> :error
end
end
defp parse_data(data, %Rpc{response_stream?: true} = rpc, _raw?) do
import Spear.Records.Streams, only: [read_resp: 1, read_resp_stream_not_found: 0]
parse_chunk = &Spear.Grpc.decode_next_message(&1, {rpc.service_module, rpc.response_type})
case parse_chunk.(data) do
{read_resp(content: {:stream_not_found, read_resp_stream_not_found()}), _rest} ->
{:ok, []}
{_message, _rest} ->
{:ok, Stream.unfold(data, parse_chunk)}
_ ->
:error
end
end
for {code, status} <- @status_code_mapping do
def map_status(unquote(code)), do: unquote(status)
end
def map_status(_), do: :unknown
for {status, code} <- @reverse_capitalized_status_code_mapping do
def status_code(unquote(status)), do: unquote(code)
end
def status_code(_), do: 2
end
|
lib/spear/grpc/response.ex
| 0.810366
| 0.406833
|
response.ex
|
starcoder
|
defmodule Mix.Tasks.Ggity.Visual.Scale.Color.Viridis do
@shortdoc "Launch a browser and draw sample plots using the Viridis color scale."
@moduledoc @shortdoc
use Mix.Task
import GGity.Element.{Line, Rect}
alias GGity.{Examples, Labels, Plot}
@default_browser "firefox"
@doc false
@spec run(list(any)) :: any
def run([]), do: run([@default_browser])
def run(argv) do
plots =
Enum.join(
[
default(),
plasma(),
inferno(),
magma(),
cividis()
],
"\n"
)
test_file = "test/visual/visual_test.html"
browser =
case argv do
["--wsl"] ->
"/mnt/c/Program Files/Mozilla Firefox/firefox.exe"
[browser] ->
browser
end
File.write!(test_file, "<html><body #{grid_style()}>\n#{plots}\n</body></html>")
open_html_file(browser, test_file)
Process.sleep(1000)
File.rm(test_file)
end
defp open_html_file(browser, file) do
System.cmd(browser, [file])
end
defp grid_style do
"style='display: grid;grid-template-columns: repeat(3, 1fr)'"
end
defp default do
Examples.tx_housing()
|> Enum.filter(fn record ->
record["city"] in ["Houston", "Fort Worth", "San Antonio", "Dallas", "Austin"]
end)
|> Plot.new(%{x: "sales", y: "median"})
|> Plot.labs(title: "Default - Viridis")
|> Plot.geom_point(%{color: "city"})
|> Plot.plot()
end
defp plasma do
Examples.tx_housing()
|> Enum.filter(fn record ->
record["city"] in ["Houston", "Fort Worth", "San Antonio", "Dallas", "Austin"]
end)
|> Plot.new(%{x: "sales", y: "median"})
|> Plot.labs(title: "Plasma")
|> Plot.geom_point(%{color: "city"})
|> Plot.scale_color_viridis(option: :plasma)
|> Plot.plot()
end
defp inferno do
Examples.tx_housing()
|> Enum.filter(fn record ->
record["city"] in ["Houston", "Fort Worth", "San Antonio", "Dallas", "Austin"]
end)
|> Plot.new(%{x: "sales", y: "median"})
|> Plot.labs(title: "Inferno")
|> Plot.geom_point(%{color: "city"})
|> Plot.scale_color_viridis(option: :inferno)
|> Plot.plot()
end
defp magma do
Examples.tx_housing()
|> Enum.filter(fn record ->
record["city"] in ["Houston", "Fort Worth", "San Antonio", "Dallas", "Austin"]
end)
|> Plot.new(%{x: "sales", y: "median"})
|> Plot.labs(title: "Custom labels, fixed alpha")
|> Plot.geom_point(%{color: "city"}, alpha: 0.4)
|> Plot.scale_x_continuous(labels: :commas)
|> Plot.scale_y_continuous(labels: fn value -> "$#{Labels.commas(round(value / 1000))}K" end)
|> Plot.scale_color_viridis(option: :magma, labels: fn value -> "#{value}!!!" end)
|> Plot.plot()
end
defp cividis do
Examples.tx_housing()
|> Enum.filter(fn record ->
record["city"] in ["Houston", "Fort Worth", "San Antonio", "Dallas", "Austin"]
end)
|> Plot.new(%{x: "sales", y: "median"})
|> Plot.labs(title: "Cividis, size: 2")
|> Plot.geom_point(%{color: "city"}, size: 2)
|> Plot.scale_color_viridis(option: :cividis)
|> Plot.theme(
axis_ticks: nil,
legend_key: element_rect(fill: "white", size: 1),
panel_background: element_rect(fill: "white"),
panel_border: element_line(color: "lightgray", size: 0.5),
panel_grid: element_line(color: "lightgray"),
panel_grid_major: element_line(size: 0.5)
)
|> Plot.plot()
end
end
|
lib/mix/tasks/ggity_visual_scale_color_viridis.ex
| 0.836388
| 0.433981
|
ggity_visual_scale_color_viridis.ex
|
starcoder
|
defmodule Benchee.Benchmark.Scenario do
@moduledoc """
A Scenario in Benchee is a particular case of a whole benchmarking suite. That
is the combination of a particular function to benchmark (`job_name` and
`function`) in combination with a specific input (`input_name` and `input`).
It then gathers all data measured for this particular combination during
`Benchee.Benchmark.measure/3` (`run_times` and `memory_usages`),
which are then used later in the process by `Benchee.Statistics` to compute
the relevant statistics (`run_time_statistics` and `memory_usage_statistics`).
`name` is the name that should be used by formatters to display scenarios as
it potentially includes the `tag` present when loading scenarios that were
saved before. See `display_name/1`.
"""
defstruct [
:name,
:job_name,
:function,
:input_name,
:input,
:run_time_statistics,
:memory_usage_statistics,
run_times: [],
memory_usages: [],
before_each: nil,
after_each: nil,
before_scenario: nil,
after_scenario: nil,
tag: nil
]
@type t :: %__MODULE__{
name: String.t(),
job_name: String.t(),
function: fun,
input_name: String.t() | nil,
input: any | nil,
run_times: [float],
run_time_statistics: Benchee.Statistics.t() | nil,
memory_usages: [non_neg_integer],
memory_usage_statistics: Benchee.Statistics.t() | nil,
before_each: fun | nil,
after_each: fun | nil,
before_scenario: fun | nil,
after_scenario: fun | nil,
tag: String.t() | nil
}
@doc """
Returns the correct name to display of the given scenario data.
In the normal case this is `job_name`, however when scenarios are loaded they
are tagged and these tags should be shown for disambiguation.
## Examples
iex> alias Benchee.Benchmark.Scenario
iex> Scenario.display_name(%Scenario{job_name: "flat_map"})
"flat_map"
iex> Scenario.display_name(%Scenario{job_name: "flat_map", tag: "master"})
"flat_map (master)"
iex> Scenario.display_name(%{job_name: "flat_map"})
"flat_map"
"""
@spec display_name(t) :: String.t()
def display_name(%{job_name: job_name, tag: nil}), do: job_name
def display_name(%{job_name: job_name, tag: tag}), do: "#{job_name} (#{tag})"
def display_name(%{job_name: job_name}), do: job_name
end
|
lib/benchee/benchmark/scenario.ex
| 0.851104
| 0.816662
|
scenario.ex
|
starcoder
|
defmodule Portal do
@moduledoc """
Create a Portal with two colored doors for transferring data.
"""
alias Portal.Door
@doc """
The `left` and `right` doors of the Portal.
"""
defstruct [:left, :right]
@doc """
Create a new Portal door with given `color` in the supervisor.
Returns `{:ok, pid}` where `pid` is the PID of the created process.
"""
@spec create(:atom) :: {:ok, pid}
def create(color) do
Supervisor.start_child(Portal.Supervisor, [color])
end
@doc """
Setup transfer of `data` between `left` and `right` doors.
Returns a Portal struct: `%Portal{left: left, right: right}`
"""
@spec setup(pid, pid, list) :: struct
def setup(left, right, data) do
# Add all data to the portal on the left.
for item <- data do
Door.push(left, item)
end
%Portal{left: left, right: right}
end
@doc """
Pushes data in one direction (to the `:left` or `:right`) for the given `portal`.
Returns a pretty-printed Portal struct showing the current status of the Portals.
E.g.
:orange <=> :blue
[1, 2, 3] <=> []
Raises an ArgumentError if an invalid `direction` is used.
"""
def push_data(portal, direction) do
case direction do
:left -> push(portal.right, portal.left)
:right -> push(portal.left, portal.right)
_ -> raise ArgumentError, message: "Invalid direction: #{direction}"
end
# Return the portal so our protocol displays it properly.
portal
end
@doc false
@spec push(pid, pid) :: :ok
defp push(from, to) do
# If possible, pushes data from the `from` door to the `to` door.
# Otherwise, do nothing.
case Door.pop(from) do
:error -> :ok
{:ok, value} -> Door.push(to, value)
end
end
end
defimpl Inspect, for: Portal do
@doc """
Display the Portal data in a more readable way.
E.g.
:orange <=> :blue
[1, 2, 3] <=> []
"""
def inspect(%Portal{left: left, right: right}, _) do
left_door = inspect(left)
right_door = inspect(right)
left_data = inspect(Enum.reverse(Portal.Door.get(left)))
right_data = inspect(Portal.Door.get(right))
max = max(String.length(left_door), String.length(left_data))
# Use String.rjust() to make sure the columns line up.
"""
#{String.rjust(left_door, max)} <=> #{right_door}
#{String.rjust(left_data, max)} <=> #{right_data}
"""
end
end
|
lib/portal.ex
| 0.921939
| 0.881155
|
portal.ex
|
starcoder
|
defmodule GenWorker do
@moduledoc ~S"""
Generic Worker behavior that helps to run task at a specific time with a specified frequency.
## Usage
Define you worker module
```elixir
defmodule MyWorker do
use GenWorker, run_at: [hour: 13, minute: 59], run_each: [days: 1]
def run(_prev_args) do
IO.puts "MyWorker run every day at 13:59"
end
end
```
### Supported options
*`run_at`* – keyword list with integers values. Supported keys:
`:year`, `:month`, `:day`, `:hour`, `:minute`, `:second`, `:microsecond`.
Or you can use map for multiple runs:
```elixir
use GenWorker, run_at: %{"some_key" => [hour: 13, minute: 59], "other_key" => [hour: 14, minute: 00]}, run_each: [days: 1]
```
*`run_each`* - keyword list with integers values. Supported keys: `:years`, `:months`, `:weeks`, `:days`, `:hours`, `:minutes`, `:seconds`, `:milliseconds`. Default is `[days: 1]`
*`timezone`* - valid timezone. `:utc` - by default. Receive full list of timezones call `Timex.timezones/0`
You need to implement callback function:
`c:run/1` that defines worker business logic
### Add worker to the application supervision tree:
```elixir
def start(_type, _args) do
import Supervisor.Spec, warn: false
children = [
worker(MyWorker, [])
# ...
]
opts = [strategy: :one_for_one, name: MyApp.Supervisor]
Supervisor.start_link(children, opts)
end
```
"""
@doc """
Callback that should implement task business logic that must be securely processed.
"""
@callback run(worker_args :: term()) :: worker_args :: term()
@doc false
defmacro __using__(opts) do
quote bind_quoted: [opts: opts], location: :keep do
@behaviour GenWorker
@options opts
alias GenWorker.State
@doc """
Start GenServer
"""
def start_link(params \\ nil) do
state =
@options
|> Keyword.put(:caller, __MODULE__)
|> Keyword.put(:worker_args, params)
|> State.init!()
GenServer.start_link(GenWorker.Server, state, name: __MODULE__)
end
def child_spec(opts) do
%{
id: __MODULE__,
start: {__MODULE__, :start_link, [opts]},
type: :worker,
restart: :permanent,
shutdown: 500
}
end
@doc false
def run(_params) do
raise "Behaviour function #{__MODULE__}.run/1 is not implemented!"
end
defoverridable run: 1, child_spec: 1
end
end
@doc """
Allows to set the config options.
GenWorker.configure(fn(config) ->
config.before fn()->
IO.puts "runs before task hook"
end
config.finally fn()->
IO.puts "runs after task hook"
end
end)
"""
def configure(func), do: GenWorker.Configuration.configure(func)
end
|
lib/gen_worker.ex
| 0.848078
| 0.839405
|
gen_worker.ex
|
starcoder
|
defmodule ExHmac do
@moduledoc ~S"""
HMAC Authentication
## Example
This Example Project is the basis for ExHmac, help you use well.
Download via [Gitee](https://gitee.com/lizhaochao/exhmac_example) or [Github](https://github.com/lizhaochao/exhmac_example).
Once downloaded, Two Things Todo:
```bash
mix deps.get
mix test
```
```bash
# confirm gc configs which are expected, run following commands.
# use test.exs to run
MIX_ENV=test iex -S mix
# use dev.exs to run
iex -S mix
```
## Usage
### Quick Start
Here’s a commented example.
```elixir
# Use ExHmac like this in Your Project.
iex> defmodule YourProject do
...> use ExHmac, precision: :millisecond
...> # gen_timestamp/0 and gen_nonce/0 are imported by ExHmac.
...> # sign/3 is imported by ExHmac.
...> # check_hmac/3 is imported by ExHmac.
...>
...> @access_key "exhmac_key"
...> @secret_key "exhmac_secret"
...>
...> def make_params(name) do
...> [name: name, timestamp: gen_timestamp(), nonce: gen_nonce()]
...> end
...>
...> # make signature with access_key & secret_key using sign/3.
...> def make_signature(params) do
...> sign(params, @access_key, @secret_key)
...> end
...>
...> # use sign/3 & check_hmac/3 to make resp with hmac
...> def start_request(name) do
...> # simulate request, prepare params
...> params = make_params(name)
...> signature = make_signature(params)
...> _req_params = [signature: signature] ++ params
...>
...> # simulate response data
...> resp_params = [result: 0, timestamp: gen_timestamp(), nonce: gen_nonce()]
...> signature = sign(resp_params, @access_key, @secret_key)
...> resp_params = [signature: signature] ++ resp_params
...> check_hmac(resp_params, @access_key, @secret_key)
...> end
...> end
...>
iex> # start request & get check response result
...> YourProject.start_request("ljy")
:ok
```
### Check via decorator (Recommended)
`Doc is cheap`, ` Show you the Code.` [Download Example](#example).
### Check via defhmac macro (Recommended)
`Doc is cheap`, ` Show you the Code.` [Download Example](#example)
## Customize Hmac
### Support Hash Algs
- `:sha` & `:hmac_sha`
- `:sha512` & `:hmac_sha512`
- `:sha384` & `:hmac_sha384`
- `:sha256` & `:hmac_sha256`
- `:sha224` & `:hmac_sha224`
- `:sha3_512` & `:hmac_sha3_512`
- `:sha3_384` & `:hmac_sha3_384`
- `:sha3_256` & `:hmac_sha3_256`
- `:sha3_224` & `:hmac_sha3_224`
- `:blake2b` & `:hmac_blake2b`
- `:blake2s` & `:hmac_blake2s`
- `:md5` & `:hmac_md5`
Implements:
```elixir
# :sha256
:crypto.hash(:sha256, "sign string")
# :hmac_sha256
:crypto.mac(:hmac, :sha256, "secret_key", "sign string")
```
### Hooks
- `pre_hook/1`, before check hmac, give you origin args with keyword.
- `post_hook/1`, after check hmac, this output is final.
These hooks only effect decorator & defhmac situation.
### Callbacks
- `get_access_key/1`, get/evaluate access key from input args.
- `get_secret_key/1`, required, you must provide secret.
- `check_nonce/4`, If you want to use Redis getset command to check nonce, then implements it.
- `make_sign_string/3`, change sign string rule.
- `encode_hash_result/1`, defaults to encode hex string.
- `fmt_resp/1`, format resp to your own format, like: `%{result: 0, error_msg: "some error"}`.
- `gc_log_callback/1`, defaults to in-memory cache with gc, collect count up to max will invoke it.
more detail, please [Download Example](#example).
### Available Configs
as ExHmac opts
```elixir
use ExHmac,
# once in-memory cache crash, will lose 2 following configs.
# you should set them again in config.exs.
precision: :millisecond,
nonce_freezing_secs: 60,
# normal configs
hash_alg: :hmac_sha512,
warn: false,
nonce_len: 20,
timestamp_offset_secs: 900
```
the following configs in config.exs
```elixir
# set them again for exactly gc running.
config :exhmac, :precision, :millisecond
config :exhmac, :nonce_freezing_secs, 60
# normal configs
config :exhmac, :disable_noncer, false # disable local in-memory cache
config :exhmac, :gc_interval_milli, 20_000
config :exhmac, :gc_warn_count, 10
config :exhmac, :gc_log_callback, &MyHmac.gc_log/1
```
`NOTICE`: `precision` & `nonce_freezing_secs` set 2 places, once you don't want to use default values.
## Noncer
**What Is Noncer?**
Actually Noncer is a in-memory cache. It store all nonces and its arrived at timestamp.
Not only a cache, Using nonce info in cache that Noncer can ensure nonce safe for duration you set.
**Don't worry about garbage. Noncer has GC.**
Also you can disable Noncer via config. as following:
```elixir
config :exhmac, :disable_noncer, true
```
Implements it on API Gateway.
Or just implements callback `check_nonce/4`, as following:
```elixir
def check_nonce(nonce, curr_ts, nonce_freezing_secs, precision) do
# precision: :second or :millisecond
arrived_at = RedisCache.getset(nonce, curr_ts)
# your check logic
end
```
## Benchmark
```bash
mix bench
## ExHmacBench
benchmark name iterations average time
get_access_key/2 10000000 0.20 µs/op
make_arg_names/1 1000000 1.46 µs/op
sign/4 100000 16.49 µs/op
```
"""
defmacro __using__(opts) do
quote do
use Decorator.Define, check_hmac: 0
use ExHmac.Use.Decorator, unquote(opts)
use ExHmac.Use, unquote(opts)
end
end
end
defmodule ExHmac.Defhmac do
@moduledoc false
defmacro __using__(opts) do
quote do
use ExHmac.Use.Defhmac, unquote(opts)
end
end
end
|
lib/exhmac.ex
| 0.851753
| 0.859723
|
exhmac.ex
|
starcoder
|
defmodule AWS.Batch do
@moduledoc """
AWS Batch enables you to run batch computing workloads on the AWS Cloud.
Batch computing is a common way for developers, scientists, and engineers to
access large amounts of compute resources, and AWS Batch removes the
undifferentiated heavy lifting of configuring and managing the required
infrastructure. AWS Batch will be familiar to users of traditional batch
computing software. This service can efficiently provision resources in response
to jobs submitted in order to eliminate capacity constraints, reduce compute
costs, and deliver results quickly.
As a fully managed service, AWS Batch enables developers, scientists, and
engineers to run batch computing workloads of any scale. AWS Batch automatically
provisions compute resources and optimizes the workload distribution based on
the quantity and scale of the workloads. With AWS Batch, there is no need to
install or manage batch computing software, which allows you to focus on
analyzing results and solving problems. AWS Batch reduces operational
complexities, saves time, and reduces costs, which makes it easy for developers,
scientists, and engineers to run their batch jobs in the AWS Cloud.
"""
@doc """
Cancels a job in an AWS Batch job queue.
Jobs that are in the `SUBMITTED`, `PENDING`, or `RUNNABLE` state are cancelled.
Jobs that have progressed to `STARTING` or `RUNNING` are not cancelled (but the
API operation still succeeds, even if no job is cancelled); these jobs must be
terminated with the `TerminateJob` operation.
"""
def cancel_job(client, input, options \\ []) do
path_ = "/v1/canceljob"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Creates an AWS Batch compute environment.
You can create `MANAGED` or `UNMANAGED` compute environments.
In a managed compute environment, AWS Batch manages the capacity and instance
types of the compute resources within the environment. This is based on the
compute resource specification that you define or the [launch template](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-launch-templates.html)
that you specify when you create the compute environment. You can choose to use
Amazon EC2 On-Demand Instances or Spot Instances in your managed compute
environment. You can optionally set a maximum price so that Spot Instances only
launch when the Spot Instance price is below a specified percentage of the
On-Demand price.
Multi-node parallel jobs are not supported on Spot Instances.
In an unmanaged compute environment, you can manage your own compute resources.
This provides more compute resource configuration options, such as using a
custom AMI, but you must ensure that your AMI meets the Amazon ECS container
instance AMI specification. For more information, see [Container Instance AMIs](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/container_instance_AMIs.html)
in the *Amazon Elastic Container Service Developer Guide*. After you have
created your unmanaged compute environment, you can use the
`DescribeComputeEnvironments` operation to find the Amazon ECS cluster that is
associated with it. Then, manually launch your container instances into that
Amazon ECS cluster. For more information, see [Launching an Amazon ECS Container Instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_container_instance.html)
in the *Amazon Elastic Container Service Developer Guide*.
AWS Batch does not upgrade the AMIs in a compute environment after it is created
(for example, when a newer version of the Amazon ECS-optimized AMI is
available). You are responsible for the management of the guest operating system
(including updates and security patches) and any additional application software
or utilities that you install on the compute resources. To use a new AMI for
your AWS Batch jobs:
Create a new compute environment with the new AMI.
Add the compute environment to an existing job queue.
Remove the old compute environment from your job queue.
Delete the old compute environment.
"""
def create_compute_environment(client, input, options \\ []) do
path_ = "/v1/createcomputeenvironment"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Creates an AWS Batch job queue.
When you create a job queue, you associate one or more compute environments to
the queue and assign an order of preference for the compute environments.
You also set a priority to the job queue that determines the order in which the
AWS Batch scheduler places jobs onto its associated compute environments. For
example, if a compute environment is associated with more than one job queue,
the job queue with a higher priority is given preference for scheduling jobs to
that compute environment.
"""
def create_job_queue(client, input, options \\ []) do
path_ = "/v1/createjobqueue"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Deletes an AWS Batch compute environment.
Before you can delete a compute environment, you must set its state to
`DISABLED` with the `UpdateComputeEnvironment` API operation and disassociate it
from any job queues with the `UpdateJobQueue` API operation.
"""
def delete_compute_environment(client, input, options \\ []) do
path_ = "/v1/deletecomputeenvironment"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Deletes the specified job queue.
You must first disable submissions for a queue with the `UpdateJobQueue`
operation. All jobs in the queue are terminated when you delete a job queue.
It is not necessary to disassociate compute environments from a queue before
submitting a `DeleteJobQueue` request.
"""
def delete_job_queue(client, input, options \\ []) do
path_ = "/v1/deletejobqueue"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Deregisters an AWS Batch job definition.
Job definitions will be permanently deleted after 180 days.
"""
def deregister_job_definition(client, input, options \\ []) do
path_ = "/v1/deregisterjobdefinition"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Describes one or more of your compute environments.
If you are using an unmanaged compute environment, you can use the
`DescribeComputeEnvironment` operation to determine the `ecsClusterArn` that you
should launch your Amazon ECS container instances into.
"""
def describe_compute_environments(client, input, options \\ []) do
path_ = "/v1/describecomputeenvironments"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Describes a list of job definitions.
You can specify a `status` (such as `ACTIVE`) to only return job definitions
that match that status.
"""
def describe_job_definitions(client, input, options \\ []) do
path_ = "/v1/describejobdefinitions"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Describes one or more of your job queues.
"""
def describe_job_queues(client, input, options \\ []) do
path_ = "/v1/describejobqueues"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Describes a list of AWS Batch jobs.
"""
def describe_jobs(client, input, options \\ []) do
path_ = "/v1/describejobs"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Returns a list of AWS Batch jobs.
You must specify only one of the following:
* a job queue ID to return a list of jobs in that job queue
* a multi-node parallel job ID to return a list of that job's nodes
* an array job ID to return a list of that job's children
You can filter the results by job status with the `jobStatus` parameter. If you
do not specify a status, only `RUNNING` jobs are returned.
"""
def list_jobs(client, input, options \\ []) do
path_ = "/v1/listjobs"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
List the tags for an AWS Batch resource.
AWS Batch resources that support tags are compute environments, jobs, job
definitions, and job queues. ARNs for child jobs of array and multi-node
parallel (MNP) jobs are not supported.
"""
def list_tags_for_resource(client, resource_arn, options \\ []) do
path_ = "/v1/tags/#{URI.encode(resource_arn)}"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Registers an AWS Batch job definition.
"""
def register_job_definition(client, input, options \\ []) do
path_ = "/v1/registerjobdefinition"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Submits an AWS Batch job from a job definition.
Parameters specified during `SubmitJob` override parameters defined in the job
definition.
"""
def submit_job(client, input, options \\ []) do
path_ = "/v1/submitjob"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Associates the specified tags to a resource with the specified `resourceArn`.
If existing tags on a resource are not specified in the request parameters, they
are not changed. When a resource is deleted, the tags associated with that
resource are deleted as well. AWS Batch resources that support tags are compute
environments, jobs, job definitions, and job queues. ARNs for child jobs of
array and multi-node parallel (MNP) jobs are not supported.
"""
def tag_resource(client, resource_arn, input, options \\ []) do
path_ = "/v1/tags/#{URI.encode(resource_arn)}"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Terminates a job in a job queue.
Jobs that are in the `STARTING` or `RUNNING` state are terminated, which causes
them to transition to `FAILED`. Jobs that have not progressed to the `STARTING`
state are cancelled.
"""
def terminate_job(client, input, options \\ []) do
path_ = "/v1/terminatejob"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Deletes specified tags from an AWS Batch resource.
"""
def untag_resource(client, resource_arn, input, options \\ []) do
path_ = "/v1/tags/#{URI.encode(resource_arn)}"
headers = []
{query_, input} =
[
{"tagKeys", "tagKeys"},
]
|> AWS.Request.build_params(input)
request(client, :delete, path_, query_, headers, input, options, nil)
end
@doc """
Updates an AWS Batch compute environment.
"""
def update_compute_environment(client, input, options \\ []) do
path_ = "/v1/updatecomputeenvironment"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Updates a job queue.
"""
def update_job_queue(client, input, options \\ []) do
path_ = "/v1/updatejobqueue"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@spec request(AWS.Client.t(), binary(), binary(), list(), list(), map(), list(), pos_integer()) ::
{:ok, map() | nil, map()}
| {:error, term()}
defp request(client, method, path, query, headers, input, options, success_status_code) do
client = %{client | service: "batch"}
host = build_host("batch", client)
url = host
|> build_url(path, client)
|> add_query(query, client)
additional_headers = [{"Host", host}, {"Content-Type", "application/x-amz-json-1.1"}]
headers = AWS.Request.add_headers(additional_headers, headers)
payload = encode!(client, input)
headers = AWS.Request.sign_v4(client, method, url, headers, payload)
perform_request(client, method, url, payload, headers, options, success_status_code)
end
defp perform_request(client, method, url, payload, headers, options, success_status_code) do
case AWS.Client.request(client, method, url, payload, headers, options) do
{:ok, %{status_code: status_code, body: body} = response}
when is_nil(success_status_code) and status_code in [200, 202, 204]
when status_code == success_status_code ->
body = if(body != "", do: decode!(client, body))
{:ok, body, response}
{:ok, response} ->
{:error, {:unexpected_response, response}}
error = {:error, _reason} -> error
end
end
defp build_host(_endpoint_prefix, %{region: "local", endpoint: endpoint}) do
endpoint
end
defp build_host(_endpoint_prefix, %{region: "local"}) do
"localhost"
end
defp build_host(endpoint_prefix, %{region: region, endpoint: endpoint}) do
"#{endpoint_prefix}.#{region}.#{endpoint}"
end
defp build_url(host, path, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}#{path}"
end
defp add_query(url, [], _client) do
url
end
defp add_query(url, query, client) do
querystring = encode!(client, query, :query)
"#{url}?#{querystring}"
end
defp encode!(client, payload, format \\ :json) do
AWS.Client.encode!(client, payload, format)
end
defp decode!(client, payload) do
AWS.Client.decode!(client, payload, :json)
end
end
|
lib/aws/generated/batch.ex
| 0.893852
| 0.792002
|
batch.ex
|
starcoder
|
defmodule ElixirRigidPhysics.Dynamics do
@moduledoc """
Dynamics module responsible for handling physics stepping and substepping of a world.
"""
alias ElixirRigidPhysics.World
alias ElixirRigidPhysics.Collision.Broadphase
alias ElixirRigidPhysics.Collision.Narrowphase
alias Graphmath.Vec3
alias Graphmath.Quatern
@spec step(World.t(), number) :: World.t()
def step(
%World{
timestep: timestep,
current_time: current_time,
bodies: bodies,
broadphase_acceleration_structure: old_acc_struct
} = world,
dt
) do
import ElixirRigidPhysics.Dynamics.Body
{acc_struct_pop_usecs, acc_struct} =
:timer.tc(Broadphase, :populate_acceleration_structure_from_bodies, [old_acc_struct, bodies])
{broadphase_usecs, maybe_colliding_pairs} =
:timer.tc(Broadphase, :generate_potential_colliding_pairs, [acc_struct])
# IO.inspect(maybe_colliding_pairs, label: "MAYBE COLLIDING PAIRS")
{narrowphase_usecs, collisions} =
:timer.tc(Enum, :reduce, [
maybe_colliding_pairs,
[],
fn {{_a_ref, a_body, _a_aabb}, {_b_ref, b_body, _b_aabb}}, acc ->
case Narrowphase.test_intersection(a_body, b_body) do
:coincident -> acc
:no_intersection -> acc
{:error, _} -> acc
manifold -> [manifold | acc]
end
end
])
# IO.inspect(collisions, label: "COLLIDING PAIRS")
new_bodies =
for {r,
body(
position: position,
orientation: orientation,
linear_velocity: linear_velocity,
angular_velocity: angular_velocity
) = b} <- bodies,
into: %{} do
# integrate linear velocity
new_position = linear_velocity |> Vec3.scale(dt) |> Vec3.add(position)
# integrate angular velocity
new_orientation = Quatern.integrate(orientation, angular_velocity, dt)
new_body = body(b, position: new_position, orientation: new_orientation)
{r, new_body}
end
%World{
world
| timestep: timestep + 1,
current_time: current_time + dt,
bodies: new_bodies,
broadphase_acceleration_structure: acc_struct,
collisions: collisions,
narrowphase_usecs: narrowphase_usecs,
broadphase_usecs: broadphase_usecs,
acc_struct_pop_usecs: acc_struct_pop_usecs
}
end
end
|
lib/dynamics/dynamics.ex
| 0.885928
| 0.472623
|
dynamics.ex
|
starcoder
|
defmodule ExUnit do
@moduledoc """
Unit testing framework for Elixir.
## Example
A basic setup for ExUnit is shown below:
# File: assertion_test.exs
# 1) Start ExUnit.
ExUnit.start()
# 2) Create a new test module (test case) and use "ExUnit.Case".
defmodule AssertionTest do
# 3) Notice we pass "async: true", this runs the test case
# concurrently with other test cases. The individual tests
# within each test case are still run serially.
use ExUnit.Case, async: true
# 4) Use the "test" macro instead of "def" for clarity.
test "the truth" do
assert true
end
end
To run the tests above, run the file using `elixir` from the
command line. Assuming you named the file `assertion_test.exs`,
you can run it as:
elixir assertion_test.exs
## Case, Callbacks and Assertions
See `ExUnit.Case` and `ExUnit.Callbacks` for more information
about defining test cases and setting up callbacks.
The `ExUnit.Assertions` module contains a set of macros to
generate assertions with appropriate error messages.
## Integration with Mix
Mix is the project management and build tool for Elixir. Invoking `mix test`
from the command line will run the tests in each file matching the pattern
`*_test.exs` found in the `test` directory of your project.
You must create a `test_helper.exs` file inside the
`test` directory and put the code common to all tests there.
The minimum example of a `test_helper.exs` file would be:
# test/test_helper.exs
ExUnit.start()
Mix will load the `test_helper.exs` file before executing the tests.
It is not necessary to `require` the `test_helper.exs` file in your test
files. See `Mix.Tasks.Test` for more information.
"""
@typedoc """
All tests start with a state of `nil`.
A finished test can be in one of five states:
1. Passed (also represented by `nil`)
2. Failed
3. Skipped (via @tag :skip)
4. Excluded (via :exclude filters)
5. Invalid (when setup_all fails)
"""
@type state ::
nil | {:failed, failed} | {:skipped, binary} | {:excluded, binary} | {:invalid, module}
@typedoc "The error state returned by `ExUnit.Test` and `ExUnit.TestModule`"
@type failed :: [{Exception.kind(), reason :: term, Exception.stacktrace()}]
@typedoc "A map representing the results of running a test suite"
@type suite_result :: %{
excluded: non_neg_integer,
failures: non_neg_integer,
skipped: non_neg_integer,
total: non_neg_integer
}
defmodule Test do
@moduledoc """
A struct that keeps information about the test.
It is received by formatters and contains the following fields:
* `:name` - the test name
* `:module` - the test module
* `:state` - the finished test state (see `t:ExUnit.state/0`)
* `:time` - the time to run the test
* `:tags` - the test tags
* `:logs` - the captured logs
"""
defstruct [:name, :case, :module, :state, time: 0, tags: %{}, logs: ""]
# TODO: Remove the `:case` field on v2.0
@type t :: %__MODULE__{
name: atom,
case: module,
module: module,
state: ExUnit.state(),
time: non_neg_integer,
tags: map,
logs: String.t()
}
end
defmodule TestModule do
@moduledoc """
A struct that keeps information about the test case.
It is received by formatters and contains the following fields:
* `:name` - the test case name
* `:state` - the test error state (see `t:ExUnit.state/0`)
* `:tests` - all tests for this case
"""
defstruct [:name, :state, tests: []]
@type t :: %__MODULE__{name: module, state: ExUnit.state(), tests: [ExUnit.Test.t()]}
end
defmodule TestCase do
# TODO: Remove this module on v2.0 (it has been replacede by TestModule)
@moduledoc false
defstruct [:name, :state, tests: []]
@type t :: %__MODULE__{name: module, state: ExUnit.state(), tests: [ExUnit.Test.t()]}
end
defmodule TimeoutError do
defexception [:timeout, :type]
@impl true
def message(%{timeout: timeout, type: type}) do
"""
#{type} timed out after #{timeout}ms. You can change the timeout:
1. per test by setting "@tag timeout: x"
2. per case by setting "@moduletag timeout: x"
3. globally via "ExUnit.start(timeout: x)" configuration
4. or set it to infinity per run by calling "mix test --trace"
(useful when using IEx.pry/0)
Timeouts are given as integers in milliseconds.
"""
end
end
use Application
@doc false
def start(_type, []) do
children = [
ExUnit.Server,
ExUnit.CaptureServer,
ExUnit.OnExitHandler
]
opts = [strategy: :one_for_one, name: ExUnit.Supervisor]
Supervisor.start_link(children, opts)
end
@doc """
Starts ExUnit and automatically runs tests right before the
VM terminates.
It accepts a set of `options` to configure `ExUnit`
(the same ones accepted by `configure/1`).
If you want to run tests manually, you can set the `:autorun` option
to `false` and use `run/0` to run tests.
"""
@spec start(Keyword.t()) :: :ok
def start(options \\ []) do
{:ok, _} = Application.ensure_all_started(:ex_unit)
configure(options)
if Application.fetch_env!(:ex_unit, :autorun) do
Application.put_env(:ex_unit, :autorun, false)
System.at_exit(fn
0 ->
time = ExUnit.Server.modules_loaded()
options = persist_defaults(configuration())
%{failures: failures} = ExUnit.Runner.run(options, time)
System.at_exit(fn _ ->
if failures > 0, do: exit({:shutdown, 1})
end)
_ ->
:ok
end)
else
:ok
end
end
@doc """
Configures ExUnit.
## Options
ExUnit supports the following options:
* `:assert_receive_timeout` - the timeout to be used on `assert_receive`
calls, defaults to `100` milliseconds;
* `:autorun` - if ExUnit should run by default on exit. Defaults to `true`;
* `:capture_log` - if ExUnit should default to keeping track of log messages
and print them on test failure. Can be overridden for individual tests via
`@tag capture_log: false`. Defaults to `false`;
* `:colors` - a keyword list of colors to be used by some formatters.
The only option so far is `[enabled: boolean]` which defaults to `IO.ANSI.enabled?/0`;
* `:exclude` - specifies which tests are run by skipping tests that match the
filter;
* `:failures_manifest_file` - specifies a path to the file used to store failures
between runs;
* `:formatters` - the formatters that will print results,
defaults to `[ExUnit.CLIFormatter]`;
* `:include` - specifies which tests are run by skipping tests that do not
match the filter. Keep in mind that all tests are included by default, so unless they are
excluded first, the `:include` option has no effect. To only run the tests
that match the `:include` filter, exclude the `:test` tag first (see the
documentation for `ExUnit.Case` for more information on tags);
* `:max_cases` - maximum number of tests to run in parallel. Only tests from
different modules run in parallel. It defaults to `System.schedulers_online * 2`
to optimize both CPU-bound and IO-bound tests;
* `:max_failures` - the suite stops evaluating tests when this number of test failures
is reached. All tests within a module that fail when using the `setup_all/1,2` callbacks
are counted as failures. Defaults to `:infinity`;
* `:module_load_timeout` - the timeout to be used when loading a test module,
defaults to `60_000` milliseconds;
* `:only_test_ids` - a list of `{module_name, test_name}` tuples that limits
what tests get run;
* `:refute_receive_timeout` - the timeout to be used on `refute_receive`
calls, defaults to `100` milliseconds;
* `:seed` - an integer seed value to randomize the test suite. This seed
is also mixed with the test module and name to create a new unique seed
on every test, which is automatically fed into the `:rand` module. This
provides randomness between tests, but predictable and reproducible results;
* `:slowest` - prints timing information for the N slowest tests. Running
ExUnit with slow test reporting automatically runs in `trace` mode. It
is disabled by default;
* `:stacktrace_depth` - configures the stacktrace depth to be used
on formatting and reporters, defaults to `20`;
* `:timeout` - sets the timeout for the tests, defaults to `60_000` milliseconds;
* `:trace` - sets ExUnit into trace mode, this sets `:max_cases` to `1` and
prints each test case and test while running. Note that in trace mode test timeouts
will be ignored.
Any arbitrary configuration can also be passed to `configure/1` or `start/1`,
and these options can then be used in places such as custom formatters. These
other options will be ignored by ExUnit itself.
"""
@spec configure(Keyword.t()) :: :ok
def configure(options) do
Enum.each(options, fn {k, v} ->
Application.put_env(:ex_unit, k, v)
end)
end
@doc """
Returns ExUnit configuration.
"""
@spec configuration() :: Keyword.t()
def configuration do
Application.get_all_env(:ex_unit)
|> put_seed()
|> put_slowest()
|> put_max_cases()
end
@doc """
Returns the pluralization for `word`.
If one is not registered, returns the word appended with an "s".
"""
@spec plural_rule(binary) :: binary
def plural_rule(word) when is_binary(word) do
Application.get_env(:ex_unit, :plural_rules, %{})
|> Map.get(word, "#{word}s")
end
@doc """
Registers a `pluralization` for `word`.
If one is already registered, it is replaced.
"""
@spec plural_rule(binary, binary) :: :ok
def plural_rule(word, pluralization) when is_binary(word) and is_binary(pluralization) do
plural_rules =
Application.get_env(:ex_unit, :plural_rules, %{})
|> Map.put(word, pluralization)
configure(plural_rules: plural_rules)
end
@doc """
Runs the tests. It is invoked automatically
if ExUnit is started via `start/1`.
Returns a map containing the total number of tests, the number
of failures, the number of excluded tests and the number of skipped tests.
"""
@spec run() :: suite_result()
def run do
options = persist_defaults(configuration())
ExUnit.Runner.run(options, nil)
end
@doc """
Sets a callback to be executed after the completion of a test suite.
Callbacks set with `after_suite/1` must accept a single argument, which is a
map containing the results of the test suite's execution.
If `after_suite/1` is called multiple times, the callbacks will be called in
reverse order. In other words, the last callback set will be the first to be
called.
"""
@doc since: "1.8.0"
@spec after_suite((suite_result() -> any)) :: :ok
def after_suite(function) when is_function(function) do
current_callbacks = Application.fetch_env!(:ex_unit, :after_suite)
configure(after_suite: [function | current_callbacks])
end
# Persists default values in application
# environment before the test suite starts.
defp persist_defaults(config) do
config |> Keyword.take([:max_cases, :seed, :trace]) |> configure()
config
end
defp put_seed(opts) do
Keyword.put_new_lazy(opts, :seed, fn ->
# We're using `rem System.system_time()` here
# instead of directly using :os.timestamp or using the
# :microsecond argument because the VM on Windows has odd
# precision. Calling with :microsecond will give us a multiple
# of 1000. Calling without it gives actual microsecond precision.
System.system_time()
|> System.convert_time_unit(:native, :microsecond)
|> rem(1_000_000)
end)
end
defp put_max_cases(opts) do
Keyword.put(opts, :max_cases, max_cases(opts))
end
defp put_slowest(opts) do
if opts[:slowest] > 0 do
Keyword.put(opts, :trace, true)
else
opts
end
end
defp max_cases(opts) do
cond do
opts[:trace] -> 1
max = opts[:max_cases] -> max
true -> System.schedulers_online() * 2
end
end
end
|
lib/ex_unit/lib/ex_unit.ex
| 0.84489
| 0.83346
|
ex_unit.ex
|
starcoder
|
defmodule Adapter do
@moduledoc """
Documentation for `Adapter`.
"""
@default_app :adapter
@default_mode :compile
@default_log :debug
@default_random true
@default_validate true
@typedoc ~S"""
`:app` and `:key`.
"""
@type option ::
{:app, atom}
| {:key, atom}
| {:default, module}
| {:error, :raise | atom}
| {:log, false | :debug | :info | :notice}
| {:mode, :compile | :get_compiled | :get_env}
| {:random, boolean}
| {:validate, boolean}
@doc false
@spec __using__(opts :: [option]) :: term
defmacro __using__(opts \\ []) do
Module.put_attribute(__CALLER__.module, :adapter_opts, parse_config(__CALLER__.module, opts))
quote do
require unquote(__MODULE__)
import unquote(__MODULE__), only: [behavior: 1, behaviour: 1]
end
end
@doc ~S"See `Adapter.behaviour/1`."
@spec behavior(callbacks :: [{:do, term}]) :: term
defmacro behavior(do: block), do: setup(__CALLER__.module, block)
@doc ~S"""
Define the adapter behaviour through callbacks.
A `@doc` tag can be set for each `@callback`.
Each callback is useable
## Example
```elixir
behaviour do
@doc ~S\"""
Get session from storage.
\"""
@callback get(token :: binary) :: {:ok, Session.t | nil} | {:error, atom}
end
```
"""
@spec behaviour(callbacks :: [{:do, term}]) :: term
defmacro behaviour(do: block), do: setup(__CALLER__.module, block)
@spec setup(module, term) :: term
defp setup(module, block) do
{type, opts} = Module.get_attribute(module, :adapter_opts)
{code, callbacks} = __MODULE__.Utility.analyze(block)
type.generate(code, callbacks, opts)
end
@spec parse_config(module, Keyword.t()) :: {module, term}
defp parse_config(module, opts) do
adapter = module |> to_string |> String.split(".") |> Enum.map(&Macro.underscore/1)
{default, opts} = Keyword.pop(opts, :default)
{app, opts} = Keyword.pop(opts, :app, @default_app)
{key, opts} = Keyword.pop(opts, :key, :"#{Enum.join(adapter, "_")}")
{error, opts} = Keyword.pop(opts, :error, :"#{List.last(adapter)}_not_configured")
{log, opts} = Keyword.pop(opts, :log, @default_log)
{random, opts} = Keyword.pop(opts, :random, @default_random)
{validate, opts} = Keyword.pop(opts, :validate, @default_validate)
config = %{
adapter: module,
app: app,
key: key,
default: default,
error: error,
log: log,
random: random,
validate: validate
}
case Keyword.get(opts, :mode, @default_mode) do
:compile ->
{__MODULE__.Compile, config}
:get_env ->
{__MODULE__.GetEnv, config}
:get_compiled ->
{__MODULE__.GetCompiled, config}
m ->
raise CompileError,
description:
"Invalid Adapter Mode: #{inspect(m)}. Only `:compiled`, `:get_compiled`, and `:get_env` are supported."
end
end
end
|
lib/adapter.ex
| 0.867471
| 0.564459
|
adapter.ex
|
starcoder
|
defmodule PaperTrail.VersionQueries do
import Ecto.Query
alias PaperTrail.Version
@repo PaperTrail.RepoClient.repo()
@doc """
Gets all the versions of a record.
"""
@spec get_versions(record :: Ecto.Schema.t()) :: Ecto.Query.t()
def get_versions(record), do: get_versions(record, [])
@doc """
Gets all the versions of a record given a module and its id
"""
@spec get_versions(model :: module, id :: pos_integer) :: Ecto.Query.t()
def get_versions(model, id) when is_atom(model) and is_integer(id),
do: get_versions(model, id, [])
@doc """
Gets all the versions of a record.
A list of options is optional, so you can set for example the :prefix of the query,
wich allows you to change between different tenants.
# Usage example:
iex(1)> PaperTrail.VersionQueries.get_versions(record, [prefix: "tenant_id"])
"""
@spec get_versions(record :: Ecto.Schema.t(), options :: []) :: Ecto.Query.t()
def get_versions(record, options) when is_map(record) do
item_type = record.__struct__ |> Module.split() |> List.last()
version_query(item_type, PaperTrail.get_model_id(record), options) |> @repo.all
end
@doc """
Gets all the versions of a record given a module and its id.
A list of options is optional, so you can set for example the :prefix of the query,
wich allows you to change between different tenants.
# Usage example:
iex(1)> PaperTrail.VersionQueries.get_versions(ModelName, id, [prefix: "tenant_id"])
"""
@spec get_versions(model :: module, id :: pos_integer, options :: []) :: Ecto.Query.t()
def get_versions(model, id, options) do
item_type = model |> Module.split() |> List.last()
version_query(item_type, id, options) |> @repo.all
end
@doc """
Gets the last version of a record.
"""
@spec get_version(record :: Ecto.Schema.t()) :: Ecto.Query.t()
def get_version(record), do: get_version(record, [])
@doc """
Gets the last version of a record given its module reference and its id.
"""
@spec get_version(model :: module, id :: pos_integer) :: Ecto.Query.t()
def get_version(model, id) when is_atom(model) and is_integer(id),
do: get_version(model, id, [])
@doc """
Gets the last version of a record.
A list of options is optional, so you can set for example the :prefix of the query,
wich allows you to change between different tenants.
# Usage example:
iex(1)> PaperTrail.VersionQueries.get_version(record, [prefix: "tenant_id"])
"""
@spec get_version(record :: Ecto.Schema.t(), options :: []) :: Ecto.Query.t()
def get_version(record, options) when is_map(record) do
item_type = record.__struct__ |> Module.split() |> List.last()
last(version_query(item_type, PaperTrail.get_model_id(record), options)) |> @repo.one
end
@doc """
Gets the last version of a record given its module reference and its id.
A list of options is optional, so you can set for example the :prefix of the query,
wich allows you to change between different tenants.
# Usage example:
iex(1)> PaperTrail.VersionQueries.get_version(ModelName, id, [prefix: "tenant_id"])
"""
@spec get_version(model :: module, id :: pos_integer, options :: []) :: Ecto.Query.t()
def get_version(model, id, options) do
item_type = model |> Module.split() |> List.last()
last(version_query(item_type, id, options)) |> @repo.one
end
@doc """
Gets the current model record/struct of a version
"""
def get_current_model(version) do
@repo.get(("Elixir." <> version.item_type) |> String.to_existing_atom(), version.item_id)
end
defp version_query(item_type, id) do
from(v in Version, where: v.item_type == ^item_type and v.item_id == ^id)
end
defp version_query(item_type, id, options) do
with opts <- Enum.into(options, %{}) do
version_query(item_type, id)
|> Ecto.Queryable.to_query()
|> Map.merge(opts)
end
end
end
|
lib/paper_trail/version_queries.ex
| 0.785679
| 0.53127
|
version_queries.ex
|
starcoder
|
defmodule MazesWeb.MazeHelper do
alias Mazes.MazeColors
# doesn't matter that much because the svg is responsive
# but affects how stroke looks like
def max_svg_width, do: 1000
def svg_padding, do: 16
def show_solution?(settings, solution) do
settings.show_solution && is_list(solution) && Enum.count(solution) > 1
end
def solution(maze, solution, center_fun) do
[h | t] = solution
{x, y} = center_fun.(maze, h)
d =
t
|> Enum.map(fn vertex ->
{x, y} = center_fun.(maze, vertex)
"L #{x} #{y}"
end)
|> Enum.join(" ")
[
Phoenix.HTML.Tag.content_tag(:path, "",
d: "M #{x} #{y} #{d}",
style:
"stroke: white; opacity: 0.3; stroke-width: #{stroke_width(maze) * 3}; stroke-linecap: round",
fill: "transparent"
),
Phoenix.HTML.Tag.content_tag(:path, "",
d: "M #{x} #{y} #{d}",
style:
"stroke: black; stroke-width: #{stroke_width(maze)}; stroke-dasharray: #{
stroke_width(maze) * 4
} #{stroke_width(maze) * 4}; stroke-linecap: round",
fill: "transparent"
)
]
end
def from_to(maze, center_fun) do
{from_cx, from_cy} = center_fun.(maze, maze.from)
{to_cx, to_cy} = center_fun.(maze, maze.to)
[
Phoenix.HTML.Tag.content_tag(:circle, "",
cx: from_cx,
cy: from_cy,
r: 2 * (stroke_width(maze) + 1),
style: "opacity: 0.3;",
fill: "white"
),
Phoenix.HTML.Tag.content_tag(:circle, "",
cx: from_cx,
cy: from_cy,
r: stroke_width(maze) + 1,
style: "",
fill: "black"
),
Phoenix.HTML.Tag.content_tag(:circle, "",
cx: to_cx,
cy: to_cy,
r: 2 * (stroke_width(maze) + 1),
style: "opacity: 0.3;",
fill: "white"
),
Phoenix.HTML.Tag.content_tag(:circle, "",
cx: to_cx,
cy: to_cy,
r: stroke_width(maze) + 1,
style: "",
fill: "black"
)
]
end
def vertex_color(_maze, vertex, colors, show_colors, hue, saturation) do
cond do
show_colors && colors ->
MazeColors.color(colors.distances[vertex], colors.max_distance, hue, saturation)
true ->
"white"
end
end
def line_style(maze) do
"stroke: black; #{do_line_style(maze)}"
end
def do_line_style(maze) do
"stroke-width: #{stroke_width(maze)}; stroke-linecap: round;"
end
defp stroke_width(maze) do
case Enum.max(Enum.filter([maze[:width], maze[:height], maze[:radius]], & &1)) do
n when n >= 100 -> 1
_ -> 2
end
end
def move_coordinate_by_radius_and_angle({cx, cy}, radius, alpha) do
cond do
alpha > 2 * :math.pi() ->
move_coordinate_by_radius_and_angle({cx, cy}, radius, alpha - 2 * :math.pi())
alpha < 0 ->
move_coordinate_by_radius_and_angle({cx, cy}, radius, alpha + 2 * :math.pi())
true ->
ratio = alpha / :math.pi()
{theta, x_delta_sign, y_delta_sign} =
case ratio do
ratio when ratio >= 0.0 and ratio < 0.5 ->
{alpha, 1, -1}
ratio when ratio >= 0.5 and ratio < 1.0 ->
{:math.pi() - alpha, 1, 1}
ratio when ratio >= 1.0 and ratio < 1.5 ->
{alpha - :math.pi(), -1, 1}
ratio when ratio >= 1.0 and ratio <= 2 ->
{:math.pi() * 2 - alpha, -1, -1}
end
x = x_delta_sign * radius * :math.sin(theta) + cx
y = y_delta_sign * radius * :math.cos(theta) + cy
{x, y}
end
end
def format_number(x) when is_integer(x) do
"#{x}"
end
def format_number(x) when is_float(x) do
to_string(:io_lib.format("~.4f", [x]))
end
end
|
lib/mazes_web/views/maze_helper.ex
| 0.628521
| 0.433981
|
maze_helper.ex
|
starcoder
|
defmodule Credo.Check.Readability.PreferUnquotedAtoms do
use Credo.Check,
run_on_all: true,
base_priority: :high,
elixir_version: "< 1.7.0-dev",
explanations: [
check: """
Prefer unquoted atoms unless quotes are necessary.
This is helpful because a quoted atom can be easily mistaken for a string.
# prefered
:x
[x: 1]
%{x: 1}
# NOT preferred
:"x"
["x": 1]
%{"x": 1}
The primary case where this can become an issue is when using atoms or
strings for keys in a Map or Keyword list.
For example, this:
%{"x": 1}
Can easily be mistaken for this:
%{"x" => 1}
Because a string key cannot be used to access a value with the equivalent
atom key, this can lead to subtle bugs which are hard to discover.
Like all `Readability` issues, this one is not a technical concern.
The code will behave identical in both ways.
"""
]
@token_types [:atom_unsafe, :kw_identifier_unsafe]
@doc false
@impl true
# TODO: consider for experimental check front-loader (tokens)
def run(%SourceFile{} = source_file, params) do
issue_meta = IssueMeta.for(source_file, params)
source_file
|> Credo.Code.to_tokens()
|> Enum.reduce([], &find_issues(&1, &2, issue_meta))
|> Enum.reverse()
end
for type <- @token_types do
defp find_issues(
{unquote(type), {line_no, column, _}, token},
issues,
issue_meta
) do
case safe_atom_name(token) do
nil ->
issues
atom ->
[issue_for(issue_meta, atom, line_no, column) | issues]
end
end
end
defp find_issues(_token, issues, _issue_meta) do
issues
end
# "safe atom" here refers to a quoted atom not containing an interpolation
defp safe_atom_name(token) when is_list(token) do
if Enum.all?(token, &is_binary/1) do
token
|> Enum.join()
|> safe_atom_name()
end
end
defp safe_atom_name(token) when is_binary(token) do
':#{token}'
|> :elixir_tokenizer.tokenize(1, [])
|> safe_atom_name(token)
end
defp safe_atom_name(_), do: nil
# Elixir >= 1.6.0
defp safe_atom_name({:ok, [{:atom, {_, _, _}, atom} | _]}, token) do
if token == Atom.to_string(atom) do
atom
end
end
# Elixir <= 1.5.x
defp safe_atom_name({:ok, _, _, [{:atom, _, atom} | _]}, token) do
if token == Atom.to_string(atom) do
atom
end
end
defp issue_for(issue_meta, atom, line_no, column) do
trigger = ~s[:"#{atom}"]
format_issue(
issue_meta,
message: "Use unquoted atom `#{inspect(atom)}` rather than quoted atom `#{trigger}`.",
trigger: trigger,
line_no: line_no,
column: column
)
end
end
|
lib/credo/check/readability/prefer_unquoted_atoms.ex
| 0.520984
| 0.421403
|
prefer_unquoted_atoms.ex
|
starcoder
|
defmodule OpenTelemetry.Span do
@moduledoc """
This module contains macros for Span operations that update the active current Span in the current process.
An example of creating an Event and adding it to the current Span:
require OpenTelemetry.Span
...
event = "ecto.query"
ecto_attributes = OpenTelemetry.event([{"query", query}, {"total_time", total_time}])
OpenTelemetry.Span.add_event(event, ecto_attributes)
...
A Span represents a single operation within a trace. Spans can be nested to form a trace tree.
Each trace contains a root span, which typically describes the end-to-end latency and, optionally,
one or more sub-spans for its sub-operations.
Spans encapsulate:
- The span name
- An immutable SpanContext (`t:OpenTelemetry.span_ctx/0`) that uniquely identifies the Span
- A parent Span in the form of a Span (`t:OpenTelemetry.span/0`), SpanContext (`t:OpenTelemetry.span_ctx/0`), or `undefined`
- A start timestamp
- An end timestamp
- An ordered mapping of Attributes (`t:OpenTelemetry.attributes/0`)
- A list of Links to other Spans (`t:OpenTelemetry.link/0`)
- A list of timestamped Events (`t:OpenTelemetry.event/0`)
- A Status (`t:OpenTelemetry.status/0`)
"""
@doc """
Get the SpanId of a Span.
"""
@spec span_id(OpenTelemetry.span_ctx()) :: OpenTelemetry.span_id()
defdelegate span_id(span), to: :otel_span
@doc """
Get the TraceId of a Span.
"""
@spec trace_id(OpenTelemetry.span_ctx()) :: OpenTelemetry.trace_id()
defdelegate trace_id(span), to: :otel_span
@doc """
Get the Tracestate of a Span.
"""
@spec tracestate(OpenTelemetry.span_ctx()) :: OpenTelemetry.tracestate()
defdelegate tracestate(span), to: :otel_span
@doc """
End the Span. Sets the end timestamp for the currently active Span. This has no effect on any
child Spans that may exist of this Span.
The Span Context is returned with `is_recording` set to `false`.
"""
defdelegate end_span(span_ctx), to: :otel_span
@doc """
Set an attribute with key and value on the currently active Span.
"""
@spec set_attribute(OpenTelemetry.span_ctx(), OpenTelemetry.attribute_key(), OpenTelemetry.attribute_value()) :: boolean()
defdelegate set_attribute(span_ctx, key, value), to: :otel_span
@doc """
Add a list of attributes to the currently active Span.
"""
@spec set_attributes(OpenTelemetry.span_ctx(), OpenTelemetry.attributes()) :: boolean()
defdelegate set_attributes(span_ctx, attributes), to: :otel_span
@doc """
Add an event to the currently active Span.
"""
@spec add_event(OpenTelemetry.span_ctx(), OpenTelemetry.event_name(), OpenTelemetry.attributes()) :: boolean()
defdelegate add_event(span_ctx, event, attributes), to: :otel_span
@doc """
Add a list of events to the currently active Span.
"""
@spec add_events(OpenTelemetry.span_ctx(), [OpenTelemetry.event()]) :: boolean()
defdelegate add_events(span_ctx, events), to: :otel_span
@doc """
Sets the Status of the currently active Span.
If used, this will override the default Span Status, which is `Ok`.
"""
@spec set_status(OpenTelemetry.span_ctx(), OpenTelemetry.status()) :: boolean()
defdelegate set_status(span_ctx, status), to: :otel_span
@doc """
Updates the Span name.
It is highly discouraged to update the name of a Span after its creation. Span name is
often used to group, filter and identify the logical groups of spans. And often, filtering
logic will be implemented before the Span creation for performance reasons. Thus the name
update may interfere with this logic.
The function name is called UpdateName to differentiate this function from the regular
property setter. It emphasizes that this operation signifies a major change for a Span
and may lead to re-calculation of sampling or filtering decisions made previously
depending on the implementation.
"""
@spec update_name(OpenTelemetry.span_ctx(), String.t()) :: boolean()
defdelegate update_name(span_ctx, name), to: :otel_span
end
|
apps/opentelemetry_api/lib/open_telemetry/span.ex
| 0.849815
| 0.458591
|
span.ex
|
starcoder
|
defmodule PhoenixLiveReact do
@moduledoc """
Render React.js components in Phoenix LiveView views.
"""
import Phoenix.HTML
import Phoenix.HTML.Tag
@doc """
Render a react component in a live view.
```
<%= PhoenixLiveReact.live_react_component("Components.MyComponent", %{name: "Bob"}, id: "my-component-1") %>
```
## Events
To push events back to the liveview the `pushEvent` and `pushEventTo` functions from
Phoenix LiveView are passed as props to the component.
* pushEvent(event, payload) - push an event from the client to the LiveView
* pushEventTo(selector, event, payload) - push an event from the client to a specific LiveView component
* handleEvent(event, handler) - (phoenix_live_view >= 0.14) receive data directly through liveview `push_event`
```
const { pushEvent, pushEventTo, handleEvent } = this.props;
pushEvent("button_click");
pushEvent("myevent", {"var": "value"});
pushEventTo("#component-1", "do_something")
handleEvent("some-event", (payload) => console.log(payload))
```
## Parameters
- name: String with the module name of the component
- props: Map or keyword list with the props for the react component
- options: Keyword list with render options
It is possible to override both the receiver and the container div's attributes by passing
a keyword list as `:container` and `:receiver` options.
You can also override the tag type with the `:container_tag` and `:receiver_tag` options
By default, LiveView uses `phx-` as the binding prefix. You can override this with the
`:binding_prefix` option.
```
<%=
PhoenixLiveReact.live_react_component("Components.MyComponent", %{},
id: "my-component-1",
container: [class: "my-component"],
container_tag: :p
)
%>
```
"""
def live_react_component(name, props \\ %{}, options \\ [])
def live_react_component(name, props_list, options) when is_list(props_list) do
live_react_component(name, Map.new(props_list), options)
end
def live_react_component(name, props, options) do
html_escape([
receiver_element(name, props, options),
container_element(options)
])
end
defp receiver_element(name, props, options) do
attr = Keyword.get(options, :receiver, [])
tag = Keyword.get(options, :receiver_tag, :div)
binding_prefix = Keyword.get(options, :binding_prefix, "phx-")
default_attr = [
style: "display: none;",
id: Keyword.get(options, :id),
data: [
live_react_class: name,
live_react_props: Jason.encode!(props),
live_react_merge: options[:merge_props] == true
],
"#{binding_prefix}hook": "LiveReact"
]
content_tag(tag, "", Keyword.merge(default_attr, attr))
end
defp container_element(options) do
attr = Keyword.get(options, :container, [])
tag = Keyword.get(options, :container_tag, :div)
id = Keyword.get(options, :id)
binding_prefix = Keyword.get(options, :binding_prefix, "phx-")
default_attr = ["#{binding_prefix}update": "ignore", id: id]
content_tag(tag, "", Keyword.merge(default_attr, attr))
end
end
|
lib/phoenix_live_react.ex
| 0.784979
| 0.716144
|
phoenix_live_react.ex
|
starcoder
|
defmodule Framer do
@moduledoc ~S"""
Module contains helper functions to resize iodata streams and lists.
The two main functions are:
- `resize_stream/2` for resizing an iodata stream
- `resize/2` for resizing iodata
## Examples
Resizing a iodata stream:
iex> stream = ["The brown", " fox", ["that ", "jumped"], " up."]
iex> Framer.resize_stream(stream, 5) |> Enum.to_list()
[["The b"], ["rown", " "], ["fox", "th"], ["at ", "ju"], ["mped", " "], ["up."]]
Resizing iodata:
iex> enum = ["Hello ", "World"]
iex> Framer.resize(enum, 4)
[["Hell"], ["o ", "Wo"], ["rld"]]
"""
@moduledoc since: "0.1.0"
@doc ~S"""
Resizes an `iodata` stream into a stream of equally sized frames.
The last frame might be smaller.
Returns an iodata stream.
## Example
iex> stream = ["The brown", " fox", ["that ", "jumped"], " up."]
iex> Framer.resize_stream(stream, 5) |> Enum.to_list()
[["The b"], ["rown", " "], ["fox", "th"], ["at ", "ju"], ["mped", " "], ["up."]]
"""
@spec resize_stream(Enumerable.t(), pos_integer) :: Enumerable.t()
def resize_stream(iodata, frame_size) do
iodata
|> Stream.concat([:finito])
|> Stream.transform(
fn -> [] end,
fn
:finito, acc -> {[acc], []}
el, acc -> next_frames([acc, el], frame_size)
end,
fn _ -> :ok end
)
end
@doc ~S"""
Resizes `iodata` into a new `iolist` with elements of size `frame_size`.
Returns a new iolist.
## Example
iex> iodata = [?h, "el", ["l", [?o]], "world"]
iex> Framer.resize(iodata, 3)
[[?h, "el"], ["l", ?o, "w"], ["orl"], ["d"]]
"""
@spec resize(iodata, pos_integer) :: iolist
def resize(iodata, frame_size) do
case iodata |> next_frames(frame_size) do
{[], rem} -> [rem]
{frames, []} -> frames
{frames, rem} -> frames ++ [rem]
end
end
@doc ~S"""
Similar to `resize2` except that it returns a tuple with the frames and
remainder
## Example
iex> iodata = [?h, "el", ["l", [?o]], "world"]
iex> Framer.next_frames(iodata, 3)
{[[?h, "el"], ["l", ?o, "w"], ["orl"]], ["d"]}
"""
@spec next_frames(iodata, pos_integer) :: {iolist, iodata}
def next_frames(iodata, frame_size) do
next_frames(iodata, [], frame_size)
end
defp next_frames(iodata, acc, frame_size) do
case next_frame(iodata, frame_size) do
{[], rem} -> {acc |> Enum.reverse(), rem}
{frame, rem} -> next_frames(rem, [frame | acc], frame_size)
end
end
@doc ~S"""
Returns a tuple containing the first frame of `frame_size` taken off
iodata and the reminder of the list.
## Example
iex> iodata = [?h, "el", ["l", [?o]], "world"]
iex> Framer.next_frame(iodata, 3)
{[?h, "el"], [["l", [?o]], "world"]}
When the whole iodata fits into a frame, return the io list with an empty
remainder.
iex> iodata = ["h", "ello"]
iex> Framer.next_frame(iodata, 10)
{[], ["h", "ello"]}
"""
@spec next_frame(iodata, pos_integer) :: {[any], iodata}
def next_frame(iodata, frame_size) when is_binary(iodata), do: next_frame([iodata], frame_size)
def next_frame(iodata, frame_size) do
{frame, rem} = next_frame(iodata, [], frame_size)
{frame |> Enum.reverse(), rem}
end
defp next_frame(iodata, leftover, frame_size)
defp next_frame(iodata, leftover, frame_size) when is_binary(iodata),
do: next_frame([iodata], leftover, frame_size)
defp next_frame([], leftover, _), do: {[], leftover |> Enum.reverse()}
defp next_frame([element | rest], leftover, frame_size) when is_list(element) do
case next_frame(element, leftover, frame_size) do
{[], leftover} -> next_frame(rest, leftover |> Enum.reverse(), frame_size)
{frame, sub_rest} -> {frame, [sub_rest, rest]}
end
end
defp next_frame([element | rest], leftover, frame_size) do
leftover_size = IO.iodata_length(leftover)
total_size = leftover_size + IO.iodata_length([element])
cond do
total_size == frame_size ->
{[element | leftover], rest}
total_size < frame_size ->
next_frame(rest, [element | leftover], frame_size)
total_size > frame_size ->
chunk_size = frame_size - leftover_size
<<chunk::binary-size(chunk_size), rem::binary>> = IO.iodata_to_binary([element])
{[chunk | leftover], [rem | rest]}
end
end
end
|
lib/framer.ex
| 0.892615
| 0.706811
|
framer.ex
|
starcoder
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.