code
stringlengths 114
1.05M
| path
stringlengths 3
312
| quality_prob
float64 0.5
0.99
| learning_prob
float64 0.2
1
| filename
stringlengths 3
168
| kind
stringclasses 1
value |
|---|---|---|---|---|---|
defmodule Terp.Evaluate.Function do
@moduledoc """
Function/anonymous function definition and application.
Functions in `terp` are defined with the `lambda` keyword.
The list of arguments must be quoted; multiple arguments can be specified.
Terp functions are curried out-of-the-box.
## Examples
iex> "((lambda '(:x) (* :x :x)) 5)"
...> |> Terp.eval()
25
iex> "((lambda '(:x :y) (* :x :y)) 5 9)"
...> |> Terp.eval()
45
"""
alias Terp.Evaluate
@doc """
Defines an anonymous function.
"""
def lambda([%RoseTree{children: arguments} | [body | []]], env) do
xs = Enum.map(arguments, fn x -> x.node end)
lambda_helper(xs, body, env)
end
defp lambda_helper([argument | []], body, env) do
fn arg ->
Evaluate.eval_expr(body, fn y -> if argument == y, do: arg, else: env.(y) end)
end
end
defp lambda_helper([argument | arguments], body, env) do
fn arg ->
lambda_helper(arguments, body, fn y -> if argument == y, do: arg, else: env.(y) end)
end
end
@doc """
Apply a list of arguments to a lambda function one at a time.
"""
def apply_lambda(func, [], _env), do: func.()
def apply_lambda(func, [arg | []], _env), do: func.(arg)
def apply_lambda(func, [arg | args], env) do
apply_lambda(func.(arg), args, env)
end
@doc """
Y = λf.(λx.f (x x))(λx.f (x x))
## Examples
iex> Terp.Evaluate.Function.y(fn f -> fn 0 -> 1; x -> x * f.(x - 1) end end).(5)
120
"""
def y(f) do
f.(fn x ->
y(f).(x)
end)
end
def letrec([name | [bound | []]], env) do
# Make a new function wrapping bound, replacing the recursive call with a bound variable, :z
recursive_fn = :__apply
|> RoseTree.new([
RoseTree.new(:__lambda),
RoseTree.new(:__quote, [:z]),
RoseTree.update_node(bound, name.node, :z)
])
|> Evaluate.eval_expr(env)
|> y()
Evaluate.eval_expr(name,
fn y ->
fn arg ->
if arg == y, do: recursive_fn, else: env.(arg)
end
end
)
end
end
|
lib/evaluate/function.ex
| 0.779238
| 0.591664
|
function.ex
|
starcoder
|
defmodule PowAssent.Phoenix.ViewHelpers do
@moduledoc """
View helpers to render authorization links.
"""
alias PowAssent.Plug
alias Phoenix.{HTML, HTML.Link}
alias PowAssent.Phoenix.AuthorizationController
@doc """
Generates list of authorization links for all configured providers.
The list of providers will be fetched from the configuration, and
`authorization_link/2` will be called on each.
If a user is assigned to the conn, the authorized providers for a user will
be looked up with `PowAssent.Plug.providers_for_current_user/1`.
`deauthorization_link/2` will be used for any already authorized providers.
"""
@spec provider_links(Conn.t()) :: [HTML.safe()]
def provider_links(conn) do
available_providers = Plug.available_providers(conn)
providers_for_user = Plug.providers_for_current_user(conn)
available_providers
|> Enum.map(&{&1, &1 in providers_for_user})
|> Enum.map(fn
{provider, true} -> deauthorization_link(conn, provider)
{provider, false} -> authorization_link(conn, provider)
end)
end
@doc """
Generates an authorization link for a provider.
The link is used to sign up or register a user using a provider. If
`:invited_user` is assigned to the conn, the invitation token will be passed
on through the URL query params.
"""
@spec authorization_link(Conn.t(), atom()) :: HTML.safe()
def authorization_link(conn, provider) do
query_params = authorization_link_query_params(conn)
msg = AuthorizationController.extension_messages(conn).login_with_provider(%{conn | params: %{"provider" => provider}})
path = AuthorizationController.routes(conn).path_for(conn, AuthorizationController, :new, [provider], query_params)
Link.link(msg, to: path)
end
defp authorization_link_query_params(%{assigns: %{invited_user: %{invitation_token: token}}}), do: [invitation_token: token]
defp authorization_link_query_params(_conn), do: []
@doc """
Generates a provider deauthorization link.
The link is used to remove authorization with the provider.
"""
@spec deauthorization_link(Conn.t(), atom()) :: HTML.safe()
def deauthorization_link(conn, provider) do
msg = AuthorizationController.extension_messages(conn).remove_provider_authentication(%{conn | params: %{"provider" => provider}})
path = AuthorizationController.routes(conn).path_for(conn, AuthorizationController, :delete, [provider])
Link.link(msg, to: path, method: :delete)
end
end
|
lib/pow_assent/phoenix/views/view_helpers.ex
| 0.808521
| 0.408483
|
view_helpers.ex
|
starcoder
|
defmodule Nixa.NaiveBayes.Gaussian do
@moduledoc """
Implements the Gaussian Naive Bayes algorithm for continuous feature domains
"""
import Nixa.NaiveBayes.Shared
import Nixa.Stats
import Nx.Defn
defstruct [
class_probs: nil,
means: nil,
stds: nil
]
@doc """
Train a model using the provided inputs and targets
"""
def fit(inputs, targets, opts \\ []) do
class_probability = Keyword.get(opts, :class_probability, :weighted)
alpha = Keyword.get(opts, :alpha, 1.0)
class_probs = if is_list(class_probability),
do: class_probability,
else: calc_class_prob(targets, class_probability, alpha)
num_classes = class_probs |> Nx.size() |> Nx.to_scalar()
{means, stds} = 0..(num_classes - 1)
|> Enum.map(fn c -> Task.async(fn -> calc_feature_probs(c, inputs, targets) end) end)
|> Task.await_many(:infinity)
|> Enum.unzip()
%__MODULE__{
class_probs: class_probs,
means: means,
stds: stds,
}
end
@doc """
Predict classes using a trained model
"""
def predict(%__MODULE__{} = model, inputs) do
inputs
|> Enum.map(fn input -> predict_one(model, input) end)
end
### Internal functions
defp predict_one(model, input) do
model.class_probs
|> Nx.to_flat_list()
|> Enum.zip(model.means)
|> Enum.zip(model.stds)
|> Enum.map(fn {{ck, means}, stds} -> calc_input_probs(input, ck, means, stds, :math.pi) end)
|> Nx.stack()
|> Nx.argmax()
|> Nx.new_axis(0)
end
defp calc_feature_probs(c, inputs, targets) do
t_inputs = inputs
|> Enum.zip(targets)
|> Enum.filter(fn {_input, target} -> target |> Nx.squeeze() |> Nx.to_scalar() == c end)
|> Enum.unzip()
|> elem(0)
|> Nx.concatenate()
means = Nx.mean(t_inputs, axes: [0])
stds = std(t_inputs, axes: [0])
{means, stds}
end
defnp calc_input_probs(x, ck, means, stds, pi) do
(1.0 / Nx.sqrt(2 * pi * Nx.power(stds, 2)) * Nx.exp(-0.5 * (((x - means) / stds) |> Nx.power(2))) |> Nx.product(axes: [0])) * ck
end
end
|
lib/nixa/naive_bayes/gaussian.ex
| 0.861989
| 0.626367
|
gaussian.ex
|
starcoder
|
defprotocol SimpleStatEx.Query.Stat do
@moduledoc """
Interface to query stats from the configured Repo
"""
@doc """
Insert a new stat or new stats to the means of storage
"""
def insert(stat)
@doc """
Retrieve a stat or set of stats from the means of storage
"""
def retrieve(stat, stat_query)
@doc """
Retrieve all stats in a categorized set
"""
def all(stat)
end
defimpl SimpleStatEx.Query.Stat, for: SimpleStatEx.SimpleStat do
require Ecto.Query
alias Ecto.Query
alias SimpleStatEx.{SimpleStat, SimpleStatQuery}
alias SimpleStatEx.Util.DataAccess
def insert(%SimpleStat{category: category, time: time, count: count} = simple_stat) do
simple_stat |> DataAccess.repo().insert(
conflict_target: [:category, :time],
on_conflict: SimpleStat |> Query.where(category: ^category, time: ^time) |> Query.update([inc: [count: ^count]])
)
end
def retrieve(%SimpleStat{category: category, period: period}, %SimpleStatQuery{offset: offset, limit: limit}) do
case SimpleStat
|> Query.select([s], %{category: s.category, period: s.period, count: s.count, time: s.time, updated_at: s.updated_at})
|> Query.where(category: ^category, period: ^period)
|> Query.limit(^limit)
|> Query.offset(^offset)
|> Query.order_by([s], desc: s.time)
|> DataAccess.repo().all() do
{:error, reason} ->
{:error, reason}
result ->
{:ok, result}
end
end
def all(%SimpleStat{category: category}) do
case SimpleStat
|> Query.select([s], %{category: s.category, period: s.period, count: s.count, time: s.time, updated_at: s.updated_at})
|> Query.where(category: ^category)
|> Query.order_by([s], desc: s.time)
|> DataAccess.repo().all() do
{:error, reason} ->
{:error, reason}
result ->
{:ok, result}
end
end
end
defimpl SimpleStatEx.Query.Stat, for: SimpleStatEx.SimpleStatHolder do
alias SimpleStatEx.{SimpleStat, SimpleStatHolder, SimpleStatQuery}
alias SimpleStatEx.Server.SimpleStatSet
def insert(%SimpleStatHolder{simple_stat: %SimpleStat{} = simple_stat, category_bucket_pid: category_bucket}) do
_ = SimpleStatSet.add_stat(category_bucket, simple_stat)
{:ok, simple_stat}
end
def retrieve(%SimpleStatHolder{simple_stat: %SimpleStat{} = simple_stat, category_bucket_pid: category_bucket}, %SimpleStatQuery{} = simple_stat_query) do
SimpleStatSet.query_stats(category_bucket, simple_stat, simple_stat_query)
end
def all(%SimpleStatHolder{category_bucket_pid: category_bucket}) do
SimpleStatSet.get_stats(category_bucket)
end
end
|
lib/query/protocol_stat.ex
| 0.824603
| 0.568835
|
protocol_stat.ex
|
starcoder
|
defmodule Mix.Shell.Process do
@moduledoc """
This is a Mix shell that uses the current process mailbox
for communication instead of IO.
When a developer calls `info("hello")`, the following
message will be sent to the current process:
{ :mix_shell, :info, ["hello"] }
This is mainly useful in tests, allowing us to assert
if given messages were received or not. Since we need
to guarantee a clean slate between tests, there
is also a `flush/1` function responsible for flushing all
`:mix_shell` related messages from the process inbox.
"""
@behaviour Mix.Shell
@doc """
Flush all `:mix_shell` and `:mix_shell_input` messages from the current process.
If a callback is given, it is invoked for each received message.
## Examples
flush &IO.inspect(&1)
"""
def flush(callback \\ fn(x) -> x end) do
receive do
{ :mix_shell, _, _ } = message ->
callback.(message)
flush(callback)
{ :mix_shell_input, _, _ } = message ->
callback.(message)
flush(callback)
after
0 -> :done
end
end
@doc """
Executes the given command and fowards its messages to
the current process.
"""
def cmd(command) do
put_app
Mix.Shell.cmd(command, fn(data) ->
send self, { :mix_shell, :run, [data] }
end)
end
@doc """
Forwards the message to the current process.
"""
def info(message) do
put_app
send self, { :mix_shell, :info, [IO.ANSI.escape(message, false)] }
end
@doc """
Forwards the message to the current process.
"""
def error(message) do
put_app
send self, { :mix_shell, :error, [IO.ANSI.escape(message, false)] }
end
@doc """
Forwards the message to the current process.
It also checks the inbox for an input message matching:
{ :mix_shell_input, :yes?, value }
If one does not exist, it will abort since there no shell
process inputs given. Value must be `true` or `false`.
"""
def yes?(message) do
put_app
send self, { :mix_shell, :yes?, [IO.ANSI.escape(message, false)] }
receive do
{ :mix_shell_input, :yes?, response } -> response
after
0 -> raise Mix.Error, message: "No shell process input given for yes?/1"
end
end
defp put_app do
if Mix.Shell.output_app? do
send self, { :mix_shell, :info, ["==> #{Mix.project[:app]}"] }
end
end
end
|
lib/mix/lib/mix/shell/process.ex
| 0.770335
| 0.480662
|
process.ex
|
starcoder
|
defmodule Timex.Interval do
@moduledoc """
This module is used for creating and manipulating date/time intervals.
## Examples
iex> use Timex
...> Interval.new(from: ~D[2016-03-03], until: [days: 3])
%#{__MODULE__}{from: ~N[2016-03-03 00:00:00], left_open: false, right_open: true, step: [days: 1], until: ~N[2016-03-06 00:00:00]}
iex> use Timex
...> Interval.new(from: ~D[2016-03-03], until: ~N[2016-03-10 01:23:45])
%Timex.Interval{from: ~N[2016-03-03 00:00:00], left_open: false, right_open: true, step: [days: 1], until: ~N[2016-03-10 01:23:45]}
iex> use Timex
...> ~N[2016-03-04 12:34:56] in Interval.new(from: ~D[2016-03-03], until: [days: 3])
true
iex> use Timex
...> ~D[2016-03-01] in Interval.new(from: ~D[2016-03-03], until: [days: 3])
false
iex> use Timex
...> Interval.overlaps?(Interval.new(from: ~D[2016-03-01], until: [days: 5]), Interval.new(from: ~D[2016-03-03], until: [days: 3]))
true
iex> use Timex
...> Interval.overlaps?(Interval.new(from: ~D[2016-03-01], until: [days: 1]), Interval.new(from: ~D[2016-03-03], until: [days: 3]))
false
"""
alias Timex.Duration
defmodule FormatError do
@moduledoc """
Thrown when an error occurs with formatting an Interval
"""
defexception message: "Unable to format interval!"
def exception(message: message) do
%FormatError{message: message}
end
end
@type t :: %__MODULE__{}
@type valid_step_unit ::
:microseconds
| :milliseconds
| :seconds
| :minutes
| :hours
| :days
| :weeks
| :months
| :years
@type valid_interval_step :: {valid_step_unit, integer}
@type valid_interval_steps :: [valid_interval_step]
@enforce_keys [:from, :until]
defstruct from: nil,
until: nil,
left_open: false,
right_open: true,
step: [days: 1]
@valid_step_units [
:microseconds,
:milliseconds,
:seconds,
:minutes,
:hours,
:days,
:weeks,
:months,
:years
]
@doc """
Create a new Interval struct.
**Note:** By default intervals are left closed, i.e. they include the `from` date/time,
and exclude the `until` date/time. Put another way, `from <= x < until`. This behavior
matches that of other popular date/time libraries, such as Joda Time, as well as the SQL
behavior of the `overlaps` keyword.
Options:
- `from`: The date the interval starts at. Should be a `(Naive)DateTime`.
- `until`: Either a `(Naive)DateTime`, or a time shift that will be applied to the `from` date.
This value _must_ be greater than `from`, otherwise an error will be returned.
- `left_open`: Whether the interval is left open. See explanation below.
- `right_open`: Whether the interval is right open. See explanation below.
- `step`: The step to use when iterating the interval, defaults to `[days: 1]`
The terms `left_open` and `right_open` come from the mathematical concept of intervals. You
can see more detail on the theory [on Wikipedia](https://en.wikipedia.org/wiki/Interval_(mathematics)),
but it can be more intuitively thought of like so:
- An "open" bound is exclusive, and a "closed" bound is inclusive
- So a left-closed interval includes the `from` value, and a left-open interval does not.
- Likewise, a right-closed interval includes the `until` value, and a right-open interval does not.
- An open interval is both left and right open, conversely, a closed interval is both left and right closed.
**Note:** `until` shifts delegate to `Timex.shift`, so the options provided should match its valid options.
## Examples
iex> use Timex
...> Interval.new(from: ~D[2014-09-22], until: ~D[2014-09-29])
...> |> Interval.format!("%Y-%m-%d", :strftime)
"[2014-09-22, 2014-09-29)"
iex> use Timex
...> Interval.new(from: ~D[2014-09-22], until: [days: 7])
...> |> Interval.format!("%Y-%m-%d", :strftime)
"[2014-09-22, 2014-09-29)"
iex> use Timex
...> Interval.new(from: ~D[2014-09-22], until: [days: 7], left_open: true, right_open: false)
...> |> Interval.format!("%Y-%m-%d", :strftime)
"(2014-09-22, 2014-09-29]"
iex> use Timex
...> Interval.new(from: ~N[2014-09-22T15:30:00], until: [minutes: 20], right_open: false)
...> |> Interval.format!("%H:%M", :strftime)
"[15:30, 15:50]"
"""
@spec new(Keyword.t()) ::
t
| {:error, :invalid_until}
| {:error, :invalid_step}
def new(options \\ []) do
from =
case Keyword.get(options, :from) do
nil ->
Timex.Protocol.NaiveDateTime.now()
%NaiveDateTime{} = d ->
d
d ->
Timex.to_naive_datetime(d)
end
left_open = Keyword.get(options, :left_open, false)
right_open = Keyword.get(options, :right_open, true)
step = Keyword.get(options, :step, days: 1)
until =
case Keyword.get(options, :until, days: 1) do
{:error, _} = err ->
err
x when is_list(x) ->
Timex.shift(from, x)
%NaiveDateTime{} = d ->
d
d ->
Timex.to_naive_datetime(d)
end
cond do
invalid_step?(step) ->
{:error, :invalid_step}
invalid_until?(until) ->
{:error, :invalid_until}
Timex.compare(until, from) <= 0 ->
{:error, :invalid_until}
:else ->
%__MODULE__{
from: from,
until: until,
step: step,
left_open: left_open,
right_open: right_open
}
end
end
defp invalid_until?({:error, _}), do: true
defp invalid_until?(_), do: false
defp invalid_step?([]), do: false
defp invalid_step?([{unit, n} | rest]) when unit in @valid_step_units and is_integer(n) do
invalid_step?(rest)
end
defp invalid_step?(_), do: true
@doc """
Return the interval duration, given a unit.
When the unit is one of `:seconds`, `:minutes`, `:hours`, `:days`, `:weeks`, `:months`, `:years`, the result is an `integer`.
When the unit is `:duration`, the result is a `Duration` struct.
## Example
iex> use Timex
...> Interval.new(from: ~D[2014-09-22], until: [months: 5])
...> |> Interval.duration(:months)
5
iex> use Timex
...> Interval.new(from: ~N[2014-09-22T15:30:00], until: [minutes: 20])
...> |> Interval.duration(:duration)
Duration.from_minutes(20)
"""
def duration(%__MODULE__{until: until, from: from}, :duration) do
Timex.diff(until, from, :microseconds) |> Duration.from_microseconds()
end
def duration(%__MODULE__{until: until, from: from}, unit) do
Timex.diff(until, from, unit)
end
@doc """
Change the step value for the provided interval.
The step should be a keyword list valid for use with `Timex.Date.shift`.
## Examples
iex> use Timex
...> Interval.new(from: ~D[2014-09-22], until: [days: 3], right_open: true)
...> |> Interval.with_step([days: 1]) |> Enum.map(&Timex.format!(&1, "%Y-%m-%d", :strftime))
["2014-09-22", "2014-09-23", "2014-09-24"]
iex> use Timex
...> Interval.new(from: ~D[2014-09-22], until: [days: 3], right_open: false)
...> |> Interval.with_step([days: 1]) |> Enum.map(&Timex.format!(&1, "%Y-%m-%d", :strftime))
["2014-09-22", "2014-09-23", "2014-09-24", "2014-09-25"]
iex> use Timex
...> Interval.new(from: ~D[2014-09-22], until: [days: 3], right_open: false)
...> |> Interval.with_step([days: 2]) |> Enum.map(&Timex.format!(&1, "%Y-%m-%d", :strftime))
["2014-09-22", "2014-09-24"]
iex> use Timex
...> Interval.new(from: ~D[2014-09-22], until: [days: 3], right_open: false)
...> |> Interval.with_step([days: 3]) |> Enum.map(&Timex.format!(&1, "%Y-%m-%d", :strftime))
["2014-09-22", "2014-09-25"]
"""
@spec with_step(t, valid_interval_step) :: t | {:error, :invalid_step}
def with_step(%__MODULE__{} = interval, step) do
if invalid_step?(step) do
{:error, :invalid_step}
else
%__MODULE__{interval | step: step}
end
end
@doc """
Formats the interval as a human readable string.
## Examples
iex> use Timex
...> Interval.new(from: ~D[2014-09-22], until: [days: 3])
...> |> Interval.format!("%Y-%m-%d %H:%M", :strftime)
"[2014-09-22 00:00, 2014-09-25 00:00)"
iex> use Timex
...> Interval.new(from: ~D[2014-09-22], until: [days: 3])
...> |> Interval.format!("%Y-%m-%d", :strftime)
"[2014-09-22, 2014-09-25)"
"""
def format(%__MODULE__{} = interval, format, formatter \\ nil) do
case Timex.format(interval.from, format, formatter) do
{:error, _} = err ->
err
{:ok, from} ->
case Timex.format(interval.until, format, formatter) do
{:error, _} = err ->
err
{:ok, until} ->
lopen = if interval.left_open, do: "(", else: "["
ropen = if interval.right_open, do: ")", else: "]"
{:ok, "#{lopen}#{from}, #{until}#{ropen}"}
end
end
end
@doc """
Same as `format/3`, but raises a `Timex.Interval.FormatError` on failure.
"""
def format!(%__MODULE__{} = interval, format, formatter \\ nil) do
case format(interval, format, formatter) do
{:ok, str} ->
str
{:error, e} ->
raise FormatError, message: "#{inspect(e)}"
end
end
@doc """
Returns true if the first interval includes every point in time the second includes.
## Examples
iex> #{__MODULE__}.contains?(#{__MODULE__}.new(from: ~D[2018-01-01], until: ~D[2018-01-31]), #{
__MODULE__
}.new(from: ~D[2018-01-01], until: ~D[2018-01-30]))
true
iex> #{__MODULE__}.contains?(#{__MODULE__}.new(from: ~D[2018-01-01], until: ~D[2018-01-30]), #{
__MODULE__
}.new(from: ~D[2018-01-01], until: ~D[2018-01-31]))
false
iex> #{__MODULE__}.contains?(#{__MODULE__}.new(from: ~D[2018-01-01], until: ~D[2018-01-10]), #{
__MODULE__
}.new(from: ~D[2018-01-05], until: ~D[2018-01-15]))
false
"""
@spec contains?(__MODULE__.t(), __MODULE__.t()) :: boolean()
def contains?(%__MODULE__{} = a, %__MODULE__{} = b) do
Timex.compare(min(a), min(b)) <= 0 && Timex.compare(max(a), max(b)) >= 0
end
@doc """
Returns true if the first interval shares any point(s) in time with the second.
## Examples
iex> #{__MODULE__}.overlaps?(#{__MODULE__}.new(from: ~D[2016-03-04], until: [days: 1]), #{
__MODULE__
}.new(from: ~D[2016-03-03], until: [days: 3]))
true
iex> #{__MODULE__}.overlaps?(#{__MODULE__}.new(from: ~D[2016-03-07], until: [days: 1]), #{
__MODULE__
}.new(from: ~D[2016-03-03], until: [days: 3]))
false
"""
@spec overlaps?(__MODULE__.t(), __MODULE__.t()) :: boolean()
def overlaps?(%__MODULE__{} = a, %__MODULE__{} = b) do
cond do
Timex.compare(max(a), min(b)) < 0 ->
# a is completely before b
false
Timex.compare(max(b), min(a)) < 0 ->
# b is completely before a
false
:else ->
# a and b have overlapping elements
true
end
end
@doc false
def min(interval)
def min(%__MODULE__{from: from, left_open: false}), do: from
def min(%__MODULE__{from: from, step: step}) do
case Timex.shift(from, step) do
{:error, {:unknown_shift_unit, unit}} ->
raise FormatError, message: "Invalid step unit for interval: #{inspect(unit)}"
d ->
d
end
end
@doc false
def max(interval)
def max(%__MODULE__{until: until, right_open: false}), do: until
def max(%__MODULE__{until: until}), do: Timex.shift(until, microseconds: -1)
defimpl Enumerable do
alias Timex.Interval
def reduce(%Interval{until: until, right_open: open?, step: step} = i, acc, fun) do
do_reduce({Interval.min(i), until, open?, step}, acc, fun)
end
defp do_reduce(_state, {:halt, acc}, _fun),
do: {:halted, acc}
defp do_reduce(state, {:suspend, acc}, fun),
do: {:suspended, acc, &do_reduce(state, &1, fun)}
defp do_reduce({current_date, end_date, right_open, step}, {:cont, acc}, fun) do
if has_interval_ended?(current_date, end_date, right_open) do
{:done, acc}
else
case Timex.shift(current_date, step) do
{:error, {:unknown_shift_unit, unit}} ->
raise FormatError, message: "Invalid step unit for interval: #{inspect(unit)}"
{:error, err} ->
raise FormatError,
message: "Failed to shift to next element in interval: #{inspect(err)}"
next_date ->
do_reduce({next_date, end_date, right_open, step}, fun.(current_date, acc), fun)
end
end
end
defp has_interval_ended?(current_date, end_date, _right_open = true),
do: Timex.compare(current_date, end_date) >= 0
defp has_interval_ended?(current_date, end_date, _right_open = false),
do: Timex.compare(current_date, end_date) > 0
def member?(%Interval{} = interval, value) do
result =
cond do
before?(interval, value) ->
false
after?(interval, value) ->
false
:else ->
true
end
{:ok, result}
end
defp before?(%Interval{from: from, left_open: true}, value),
do: Timex.compare(value, from) <= 0
defp before?(%Interval{from: from, left_open: false}, value),
do: Timex.compare(value, from) < 0
defp after?(%Interval{until: until, right_open: true}, value),
do: Timex.compare(value, until) >= 0
defp after?(%Interval{until: until, right_open: false}, value),
do: Timex.compare(value, until) > 0
def count(_interval) do
{:error, __MODULE__}
end
def slice(_interval) do
{:error, __MODULE__}
end
end
end
|
lib/interval/interval.ex
| 0.912076
| 0.674091
|
interval.ex
|
starcoder
|
use Croma
defmodule RaftFleet.RecentlyRemovedGroups do
alias RaftFleet.NodesPerZone
defmodule NodesMap do
defmodule Pair do
use Croma.SubtypeOfTuple, elem_modules: [Croma.PosInteger, Croma.TypeGen.nilable(Croma.PosInteger)]
end
use Croma.SubtypeOfMap, key_module: Croma.Atom, value_module: Pair
end
defmodule IndexToGroupName do
use Croma.SubtypeOfMap, key_module: Croma.PosInteger, value_module: Croma.Atom
end
defmodule GroupNameToIndices do
use Croma.SubtypeOfMap, key_module: Croma.Atom, value_module: Croma.TypeGen.list_of(Croma.PosInteger)
end
use Croma.Struct, fields: [
active_nodes: NodesMap, # This field contains not only "currently active nodes" but also "nodes that have been active until recently"
min_index: Croma.TypeGen.nilable(Croma.PosInteger),
max_index: Croma.TypeGen.nilable(Croma.PosInteger),
index_to_group: IndexToGroupName,
group_to_indices: GroupNameToIndices,
]
defun empty() :: t do
%__MODULE__{active_nodes: %{}, min_index: nil, max_index: nil, index_to_group: %{}, group_to_indices: %{}}
end
defun add(%__MODULE__{min_index: min, max_index: max, index_to_group: i2g, group_to_indices: g2is} = t, group :: atom) :: t do
case {min, max} do
{nil, nil} -> %__MODULE__{t | min_index: 1, max_index: 1, index_to_group: %{1 => group}, group_to_indices: %{group => [1]}}
_ ->
i = max + 1
new_i2g = Map.put(i2g, i, group)
new_g2is = Map.update(g2is, group, [i], &[i | &1])
%__MODULE__{t | max_index: i, index_to_group: new_i2g, group_to_indices: new_g2is}
end
end
defun cleanup_ongoing?(%__MODULE__{min_index: min, group_to_indices: g2is}, group :: atom) :: boolean do
Map.get(g2is, group, [])
|> Enum.any?(fn i -> min < i end)
end
# `cancel/2` is not used anymore; just kept here for backward compatibility (i.e. for hot code upgrade).
# Should be removed in a future release.
defun cancel(%__MODULE__{index_to_group: i2g, group_to_indices: g2is} = t, group :: atom) :: t do
{is, new_g2is} = Map.pop(g2is, group, [])
new_i2g = Enum.reduce(is, i2g, fn(i, m) -> Map.delete(m, i) end)
%__MODULE__{t | index_to_group: new_i2g, group_to_indices: new_g2is}
end
defun names_for_node(%__MODULE__{active_nodes: nodes, min_index: min, max_index: max, index_to_group: i2g}, node_from :: node) :: {[atom], nil | pos_integer} do
case min do
nil -> {[], nil}
_ ->
index_from =
case Map.get(nodes, node_from) do
nil -> min
{_t, nil } -> min
{_t, index} -> index + 1
end
if index_from <= max do
names = Enum.map(index_from .. max, fn i -> Map.get(i2g, i) end) |> Enum.reject(&is_nil/1)
{names, max}
else
{[], nil}
end
end
end
defun update(t :: t,
npz :: NodesPerZone.t,
node_from :: node,
index_or_nil :: nil | pos_integer,
now :: pos_integer,
wait_time :: pos_integer) :: t do
t
|> touch_currently_active_nodes(npz, now)
|> forget_about_nodes_that_had_been_deactivated(now - wait_time)
|> set_node_index(node_from, index_or_nil)
|> proceed_min_index()
end
defp touch_currently_active_nodes(%__MODULE__{active_nodes: nodes} = t, npz, now) do
currently_active_nodes = Enum.flat_map(npz, fn {_z, ns} -> ns end)
new_nodes =
Enum.reduce(currently_active_nodes, nodes, fn(n, ns) ->
index_or_nil =
case Map.get(ns, n) do
nil -> nil
{_time, index} -> index
end
Map.put(ns, n, {now, index_or_nil})
end)
%__MODULE__{t | active_nodes: new_nodes}
end
defp forget_about_nodes_that_had_been_deactivated(%__MODULE__{active_nodes: nodes} = t, threshold) do
new_nodes = Enum.reject(nodes, fn {_n, {t, _i}} -> t < threshold end) |> Map.new()
%__MODULE__{t | active_nodes: new_nodes}
end
defp set_node_index(%__MODULE__{active_nodes: nodes} = t, node_from, index_or_nil) do
new_nodes =
case index_or_nil do
nil -> nodes
index when is_integer(index) -> update_node_index(nodes, node_from, index)
end
%__MODULE__{t | active_nodes: new_nodes}
end
defp update_node_index(nodes, node_from, index) do
case Map.get(nodes, node_from) do
nil -> nodes
{time, _index} -> Map.put(nodes, node_from, {time, index})
end
end
defp proceed_min_index(%__MODULE__{active_nodes: nodes, min_index: min, index_to_group: i2g0, group_to_indices: g2is0} = t) do
smallest_node_index =
Enum.map(nodes, fn {_n, {_t, index}} -> index || min end)
|> Enum.min(fn -> min end)
if min < smallest_node_index do
{new_i2g, new_g2is} =
Enum.reduce(min .. (smallest_node_index - 1), {i2g0, g2is0}, fn(i, {i2g, g2is}) ->
case Map.pop(i2g, i) do
{nil, _ } -> {i2g, g2is}
{g , i2g2} ->
g2is2 =
case Map.fetch!(g2is, g) |> List.delete(i) do
[] -> Map.delete(g2is, g)
is -> Map.put(g2is, g, is)
end
{i2g2, g2is2}
end
end)
%__MODULE__{t | min_index: smallest_node_index, index_to_group: new_i2g, group_to_indices: new_g2is}
else
t
end
end
end
|
lib/raft_fleet/recently_removed_groups.ex
| 0.66356
| 0.479808
|
recently_removed_groups.ex
|
starcoder
|
defprotocol ExMatch.Match do
@moduledoc false
@fallback_to_any true
@spec diff(t, any, any) :: [any] | {any, any}
def diff(left, right, opts)
@spec escape(t) :: any
def escape(self)
@spec value(t) :: any
def value(self)
end
defimpl ExMatch.Match, for: Any do
@moduledoc false
def diff(left, right, opts) do
diff_values(left, right, opts)
end
def escape(self),
do: Macro.escape(self)
def value(self),
do: self
def diff_values(left, right, opts, on_diff \\ nil) do
get_opts = fn atom ->
try do
opts
|> Map.get(atom)
|> ExMatch.Match.value()
rescue
ArgumentError ->
nil
end
end
try do
left_value = ExMatch.Match.value(left)
ExMatch.Diff.diff(left_value, right, get_opts)
catch
kind, error ->
left_ast = ExMatch.Match.escape(left)
ex = ExMatch.Exception.new(kind, error, __STACKTRACE__)
{{:=~, [], [left_ast, ex]}, right}
else
nil ->
[]
{left_diff, right_diff} when on_diff == nil ->
{Macro.escape(left_diff), right_diff}
diff ->
on_diff.(diff)
end
end
end
defmodule ExMatch.Expr do
@moduledoc false
defstruct [:ast, :value]
# pin variable
def parse({:^, _, [{var_name, _, module} = var_item]} = ast)
when is_atom(var_name) and is_atom(module),
do: parse(ast, var_item)
# remote function/macro call
def parse({{:., _, [{:__aliases__, _, [module_alias | _]}, fn_name]}, _, args} = ast)
when is_atom(module_alias) and is_atom(fn_name) and is_list(args),
do: parse(ast, ast)
# local/imported function/macro call
def parse({fn_name, _, args} = ast) when is_atom(fn_name) and is_list(args) do
if Macro.special_form?(fn_name, length(args)) do
raise "Special form #{fn_name}/#{length(args)} is not yet supported in ExMatch"
end
parse(ast, ast)
end
defp parse(ast, value) do
self =
quote do
%ExMatch.Expr{
ast: unquote(Macro.escape(ast)),
value: unquote(value)
}
end
{[], self}
end
defimpl ExMatch.Match do
@moduledoc false
def diff(left, right, opts) do
%ExMatch.Expr{ast: ast, value: value} = left
ExMatch.Match.Any.diff_values(value, right, opts, fn
{^value, right_diff} ->
{escape(left), right_diff}
{left_diff, right_diff} ->
left_diff = {:=~, [], [ast, Macro.escape(left_diff)]}
{left_diff, right_diff}
end)
end
def escape(%ExMatch.Expr{ast: ast, value: value}) do
code = Macro.to_string(ast)
if code == inspect(value) do
ast
else
{:=, [], [ast, value]}
end
end
def value(%ExMatch.Expr{value: value}),
do: value
end
end
defmodule ExMatch.Var do
@moduledoc false
defstruct [:binding, :expr, :expr_fun]
def parse({var, _, context} = binding) when is_atom(var) and is_atom(context) do
self =
quote do
%ExMatch.Var{
binding: unquote(Macro.escape(binding))
}
end
case var do
:_ -> {[], self}
_ -> {[binding], self}
end
end
def parse({:when, _, [{var, meta, context} = binding, expr]})
when is_atom(var) and is_atom(context) do
self =
quote do
%ExMatch.Var{
binding: unquote(Macro.escape(binding)),
expr: unquote(Macro.escape(expr)),
expr_fun: fn unquote(binding) -> unquote(expr) end
}
end
{[{var, [generated: true] ++ meta, context}], self}
end
defimpl ExMatch.Match do
@moduledoc false
def diff(%ExMatch.Var{binding: binding, expr: nil, expr_fun: nil}, right, _opts) do
case binding do
{:_, _, nil} -> []
_ -> [right]
end
end
def diff(%ExMatch.Var{binding: binding, expr: expr, expr_fun: expr_fun}, right, _opts) do
expr_fun.(right)
catch
class, error ->
ast =
quote do
unquote(binding) = unquote(Macro.escape(right))
when unquote(expr) = unquote(class)(unquote(error))
end
{ast, right}
else
falsy when falsy in [nil, false] ->
ast =
quote do
unquote(binding) = unquote(Macro.escape(right))
when unquote(expr) = unquote(Macro.escape(falsy))
end
{ast, right}
_truthy ->
[right]
end
def escape(%ExMatch.Var{binding: binding, expr: nil}),
do: binding
def escape(%ExMatch.Var{binding: binding, expr: expr}) do
quote do
unquote(binding) when unquote(expr)
end
end
def value(_self),
do: raise(ArgumentError, "Bindings don't represent values")
end
end
defmodule ExMatch.List do
@moduledoc false
defstruct [:items]
def parse(list, parse_ast, opts) do
{bindings, parsed} = parse_items(list, [], [], parse_ast, opts)
self =
quote do
%ExMatch.List{items: unquote(parsed)}
end
{bindings, self}
end
def parse_items([item | list], bindings, parsed, parse_ast, opts) do
{item_bindings, item_parsed} = parse_ast.(item, opts)
bindings = item_bindings ++ bindings
parsed = [item_parsed | parsed]
parse_items(list, bindings, parsed, parse_ast, opts)
end
def parse_items([], bindings, parsed, _parse_ast, _opts) do
{bindings, Enum.reverse(parsed)}
end
def diff(items, right, opts) do
diff(items, 0, [], [], [], right, opts)
end
defp diff([item | items], skipped, bindings, left_diffs, right_diffs, right, opts) do
case right do
[right_item | right] ->
case ExMatch.Match.diff(item, right_item, opts) do
new_bindings when is_list(new_bindings) ->
bindings = new_bindings ++ bindings
diff(items, skipped + 1, bindings, left_diffs, right_diffs, right, opts)
{left_diff, right_diff} ->
skipped = ExMatch.Skipped.list(skipped)
left_diffs = [left_diff | skipped ++ left_diffs]
right_diffs = [right_diff | skipped ++ right_diffs]
diff(items, 0, bindings, left_diffs, right_diffs, right, opts)
end
[] ->
items = escape_items([item | items])
{Enum.reverse(left_diffs, items), Enum.reverse(right_diffs)}
end
end
defp diff([], _skipped, bindings, [], [], [], _opts), do: bindings
defp diff([], skipped, _bindings, left_diffs, right_diffs, right, _opts) do
skipped = ExMatch.Skipped.list(skipped)
left_diffs = skipped ++ left_diffs
right_diffs = skipped ++ right_diffs
{Enum.reverse(left_diffs), Enum.reverse(right_diffs, right)}
end
def escape_items(items) do
Enum.map(items, &ExMatch.Match.escape/1)
end
def value(items) do
Enum.map(items, &ExMatch.Match.value/1)
end
defimpl ExMatch.Match do
@moduledoc false
def diff(left, right, opts) when is_list(right) do
%ExMatch.List{items: items} = left
ExMatch.List.diff(items, right, opts)
end
def diff(left, right, _) do
{escape(left), right}
end
def escape(%ExMatch.List{items: items}),
do: ExMatch.List.escape_items(items)
def value(%ExMatch.List{items: items}),
do: ExMatch.List.value(items)
end
end
defmodule ExMatch.Tuple do
@moduledoc false
defstruct [:items]
def parse({:{}, _, items}, parse_ast, opts), do: parse_items(items, parse_ast, opts)
def parse({item1, item2}, parse_ast, opts), do: parse_items([item1, item2], parse_ast, opts)
defp parse_items(items, parse_ast, opts) do
{bindings, parsed} = ExMatch.List.parse_items(items, [], [], parse_ast, opts)
self =
quote do
%ExMatch.Tuple{items: unquote(parsed)}
end
{bindings, self}
end
defimpl ExMatch.Match do
@moduledoc false
def diff(left, right, opts) when is_tuple(right) do
%ExMatch.Tuple{items: items} = left
case ExMatch.List.diff(items, Tuple.to_list(right), opts) do
{left_diffs, right_diffs} ->
right_diffs = List.to_tuple(right_diffs)
{{:{}, [], left_diffs}, right_diffs}
bindings ->
bindings
end
end
def diff(left, right, _opts) do
{escape(left), right}
end
def escape(%ExMatch.Tuple{items: [i1, i2]}),
do: {ExMatch.Match.escape(i1), ExMatch.Match.escape(i2)}
def escape(%ExMatch.Tuple{items: items}),
do: {:{}, [], ExMatch.List.escape_items(items)}
def value(%ExMatch.Tuple{items: items}),
do:
items
|> ExMatch.List.value()
|> List.to_tuple()
end
end
defmodule ExMatch.Map do
@moduledoc false
@enforce_keys [:partial, :fields]
defstruct @enforce_keys
def parse({:%{}, _, fields}, parse_ast, opts) do
{partial, bindings, parsed} = parse_fields(fields, parse_ast, opts)
self =
quote do
%ExMatch.Map{
partial: unquote(partial),
fields: unquote(parsed)
}
end
{bindings, self}
end
def parse_fields(fields, parse_ast, opts) do
{partial, bindings, parsed} =
Enum.reduce(fields, {false, [], []}, fn
item, {partial, binding, parsed} ->
parse_field(item, partial, binding, parsed, parse_ast, opts)
end)
{partial, bindings, Enum.reverse(parsed)}
end
defp parse_field({:..., _, nil}, _partial, bindings, parsed, _parse_ast, _opts) do
{true, bindings, parsed}
end
defp parse_field({key, value}, partial, bindings, parsed, parse_ast, opts) do
{value_bindings, value_parsed} = parse_ast.(value, opts)
parsed = [{key, value_parsed} | parsed]
bindings = value_bindings ++ bindings
{partial, bindings, parsed}
end
def diff_items(fields, right, opts) do
{bindings, left_diffs, right_diffs, right, _opts} =
Enum.reduce(fields, {[], [], %{}, right, opts}, &diff_item/2)
{bindings, Enum.reverse(left_diffs), right_diffs, right}
end
defp diff_item({key, field}, {bindings, left_diffs, right_diffs, right, opts}) do
case right do
%{^key => right_value} ->
right = Map.delete(right, key)
case ExMatch.Match.diff(field, right_value, opts) do
{left_diff, right_diff} ->
left_diffs = [{ExMatch.Match.escape(key), left_diff} | left_diffs]
right_diffs = Map.put(right_diffs, key, right_diff)
{bindings, left_diffs, right_diffs, right, opts}
new_bindings ->
bindings = new_bindings ++ bindings
{bindings, left_diffs, right_diffs, right, opts}
end
_ ->
left_diff = {
ExMatch.Match.escape(key),
ExMatch.Match.escape(field)
}
left_diffs = [left_diff | left_diffs]
{bindings, left_diffs, right_diffs, right, opts}
end
end
def field_values(fields),
do:
Enum.map(fields, fn {key, value} ->
{
ExMatch.Match.value(key),
ExMatch.Match.value(value)
}
end)
defimpl ExMatch.Match do
@moduledoc false
def diff(left, right, opts) when is_map(right) do
%ExMatch.Map{partial: partial, fields: fields} = left
case ExMatch.Map.diff_items(fields, right, opts) do
{bindings, left_diffs, right_diffs, right}
when left_diffs == [] and
right_diffs == %{} and
(partial or right == %{}) ->
bindings
{_bindings, left_diffs, right_diffs, right} ->
right_diffs =
if partial do
right_diffs
else
Map.merge(right_diffs, right)
end
left_diffs = {:%{}, [], left_diffs}
{left_diffs, right_diffs}
end
end
def diff(left, right, _opts) do
{escape(left), right}
end
def escape(%ExMatch.Map{fields: fields}) do
fields =
Enum.map(fields, fn {key, value} ->
{
ExMatch.Match.escape(key),
ExMatch.Match.escape(value)
}
end)
{:%{}, [], fields}
end
def value(%ExMatch.Map{partial: true}),
do: raise(ArgumentError, "partial map doesn't represent a value")
def value(%ExMatch.Map{fields: fields}),
do: fields |> ExMatch.Map.field_values() |> Map.new()
end
end
defmodule ExMatch.Struct do
@moduledoc false
defmodule WithValue do
defstruct [:module, :fields, :value]
defimpl ExMatch.Match do
@moduledoc false
def escape(%WithValue{module: module, fields: fields}),
do: ExMatch.Struct.escape(module, fields, false)
def value(%WithValue{value: value}),
do: value
def diff(left, right, opts) do
%WithValue{module: module, fields: fields, value: value} = left
ExMatch.Match.Any.diff_values(value, right, opts, fn _ ->
ExMatch.Struct.diff(module, fields, false, right, opts)
end)
end
end
end
defmodule NoValue do
defstruct [:module, :fields, :partial]
defimpl ExMatch.Match do
@moduledoc false
def escape(%NoValue{module: module, fields: fields, partial: partial}),
do: ExMatch.Struct.escape(module, fields, partial)
def value(%NoValue{}),
do: raise(ArgumentError, "This struct doesn't have value")
def diff(left, right, opts) do
%NoValue{module: module, fields: fields, partial: partial} = left
ExMatch.Struct.diff(module, fields, partial, right, opts)
end
end
end
def parse(
{:%, _, [module, {:%{}, _, fields}]},
parse_ast,
opts
)
when is_list(fields) do
{partial, bindings, parsed} = ExMatch.Map.parse_fields(fields, parse_ast, opts)
self =
quote do
ExMatch.Struct.new(
unquote(module),
unquote(parsed),
unquote(partial),
unquote(opts)
)
end
{bindings, self}
end
def new(module, fields, partial, opts) do
case Map.get(opts, module) do
nil ->
new(module, fields, partial)
%ExMatch.Map{} = opts ->
partial = opts.partial || partial
fields = Keyword.merge(opts.fields, fields)
new(module, fields, partial)
end
end
defp new(module, fields, partial) do
if partial do
raise ArgumentError
end
value = struct!(module, ExMatch.Map.field_values(fields))
fields =
value
|> Map.from_struct()
|> Enum.map(fn {key, value} ->
{key, Macro.escape(value)}
end)
|> Keyword.merge(fields, fn _, _, field -> field end)
%WithValue{
module: module,
fields: fields,
value: value
}
rescue
ArgumentError ->
%NoValue{
module: module,
fields: fields,
partial: partial
}
end
def diff(module, fields, partial, %rstruct{} = right, opts) do
map = %ExMatch.Map{fields: fields, partial: partial}
right_map = Map.from_struct(right)
case ExMatch.Match.ExMatch.Map.diff(map, right_map, opts) do
{left_diff, right_diff} ->
make_diff(module, fields, partial, right, left_diff, right_diff)
_ when module != rstruct ->
left_diff = quote(do: %{})
right_diff = %{}
make_diff(module, fields, partial, right, left_diff, right_diff)
bindings ->
bindings
end
end
def diff(module, fields, partial, right, _opts) do
{escape(module, fields, partial), right}
end
defp make_diff(module, fields, partial, %rstruct{} = right, left_diff, right_diff) do
right_diff = Map.put(right_diff, :__struct__, rstruct)
try do
_ = inspect(right_diff, safe: false)
{{:%, [], [module, left_diff]}, right_diff}
rescue
_ ->
{escape(module, fields, partial), right}
end
end
def escape(module, fields, partial) do
map = %ExMatch.Map{
partial: partial,
fields: fields
}
map = ExMatch.Match.ExMatch.Map.escape(map)
{:%, [], [module, map]}
end
end
|
lib/exmatch/match.ex
| 0.773473
| 0.57678
|
match.ex
|
starcoder
|
defmodule Plug.Adapters.Cowboy do
@moduledoc """
Adapter interface to the Cowboy webserver.
## Options
* `:ip` - the ip to bind the server to.
Must be a tuple in the format `{x, y, z, w}`.
* `:port` - the port to run the server.
Defaults to 4000 (http) and 4040 (https).
* `:acceptors` - the number of acceptors for the listener.
Defaults to 100.
* `:max_connections` - max number of connections supported.
Defaults to `16384`.
* `:dispatch` - manually configure Cowboy's dispatch.
If this option is used, the given plug won't be initialized
nor dispatched to (and doing so becomes the user's responsibility).
* `:ref` - the reference name to be used.
Defaults to `plug.HTTP` (http) and `plug.HTTPS` (https).
This is the value that needs to be given on shutdown.
* `:compress` - Cowboy will attempt to compress the response body.
Defaults to false.
* `:timeout` - Time in ms with no requests before Cowboy closes the connection.
Defaults to 5000ms.
* `:protocol_options` - Specifies remaining protocol options,
see [Cowboy protocol docs](http://ninenines.eu/docs/en/cowboy/1.0/manual/cowboy_protocol/).
All other options are given to the underlying transport.
"""
# Made public with @doc false for testing.
@doc false
def args(scheme, plug, opts, cowboy_options) do
cowboy_options
|> Keyword.put_new(:max_connections, 16384)
|> Keyword.put_new(:ref, build_ref(plug, scheme))
|> Keyword.put_new(:dispatch, cowboy_options[:dispatch] || dispatch_for(plug, opts))
|> normalize_cowboy_options(scheme)
|> to_args()
end
@doc """
Run cowboy under http.
## Example
# Starts a new interface
Plug.Adapters.Cowboy.http MyPlug, [], port: 80
# The interface above can be shutdown with
Plug.Adapters.Cowboy.shutdown MyPlug.HTTP
"""
@spec http(module(), Keyword.t, Keyword.t) ::
{:ok, pid} | {:error, :eaddrinuse} | {:error, term}
def http(plug, opts, cowboy_options \\ []) do
run(:http, plug, opts, cowboy_options)
end
@doc """
Run cowboy under https.
Besides the options described in the module documentation,
this module also accepts all options defined in [the `ssl`
erlang module] (http://www.erlang.org/doc/man/ssl.html),
like keyfile, certfile, cacertfile, dhfile and others.
The certificate files can be given as a relative path.
For such, the `:otp_app` option must also be given and
certificates will be looked from the priv directory of
the given application.
## Example
# Starts a new interface
Plug.Adapters.Cowboy.https MyPlug, [],
port: 443,
password: "<PASSWORD>",
otp_app: :my_app,
keyfile: "priv/ssl/key.pem",
certfile: "priv/ssl/cert.pem",
dhfile: "priv/ssl/dhparam.pem"
# The interface above can be shutdown with
Plug.Adapters.Cowboy.shutdown MyPlug.HTTPS
"""
@spec https(module(), Keyword.t, Keyword.t) ::
{:ok, pid} | {:error, :eaddrinuse} | {:error, term}
def https(plug, opts, cowboy_options \\ []) do
Application.ensure_all_started(:ssl)
run(:https, plug, opts, cowboy_options)
end
@doc """
Shutdowns the given reference.
"""
def shutdown(ref) do
:cowboy.stop_listener(ref)
end
@doc """
Returns a child spec to be supervised by your application.
## Example
Presuming your Plug module is named `MyRouter` you can add it to your
supervision tree like so using this function:
defmodule MyApp do
use Application
def start(_type, _args) do
import Supervisor.Spec
children = [
Plug.Adapters.Cowboy.child_spec(:http, MyRouter, [], [port: 4001])
]
opts = [strategy: :one_for_one, name: MyApp.Supervisor]
Supervisor.start_link(children, opts)
end
end
"""
def child_spec(scheme, plug, opts, cowboy_options \\ []) do
[ref, nb_acceptors, trans_opts, proto_opts] = args(scheme, plug, opts, cowboy_options)
ranch_module = case scheme do
:http -> :ranch_tcp
:https -> :ranch_ssl
end
:ranch.child_spec(ref, nb_acceptors, ranch_module, trans_opts, :cowboy_protocol, proto_opts)
end
## Helpers
@http_cowboy_options [port: 4000]
@https_cowboy_options [port: 4040]
@protocol_options [:timeout, :compress]
defp run(scheme, plug, opts, cowboy_options) do
case Application.ensure_all_started(:cowboy) do
{:ok, _} ->
:ok
{:error, {:cowboy, _}} ->
raise "could not start the cowboy application. Please ensure it is listed " <>
"as a dependency both in deps and application in your mix.exs"
end
apply(:cowboy, :"start_#{scheme}", args(scheme, plug, opts, cowboy_options))
end
defp normalize_cowboy_options(cowboy_options, :http) do
Keyword.merge @http_cowboy_options, cowboy_options
end
defp normalize_cowboy_options(cowboy_options, :https) do
assert_ssl_options(cowboy_options)
cowboy_options = Keyword.merge @https_cowboy_options, cowboy_options
cowboy_options = Enum.reduce [:keyfile, :certfile, :cacertfile, :dhfile], cowboy_options, &normalize_ssl_file(&1, &2)
cowboy_options = Enum.reduce [:password], cowboy_options, &to_char_list(&2, &1)
cowboy_options
end
defp to_args(all_opts) do
{opts, initial_transport_options} = Enum.partition(all_opts, &is_tuple(&1) and tuple_size(&1) == 2)
opts = Keyword.delete(opts, :otp_app)
{ref, opts} = Keyword.pop(opts, :ref)
{dispatch, opts} = Keyword.pop(opts, :dispatch)
{acceptors, opts} = Keyword.pop(opts, :acceptors, 100)
{protocol_options, opts} = Keyword.pop(opts, :protocol_options, [])
dispatch = :cowboy_router.compile(dispatch)
{extra_options, transport_options} = Keyword.split(opts, @protocol_options)
protocol_options = [env: [dispatch: dispatch]] ++ protocol_options ++ extra_options
[ref, acceptors, initial_transport_options ++ transport_options, protocol_options]
end
defp build_ref(plug, scheme) do
Module.concat(plug, scheme |> to_string |> String.upcase)
end
defp dispatch_for(plug, opts) do
opts = plug.init(opts)
[{:_, [ {:_, Plug.Adapters.Cowboy.Handler, {plug, opts}} ]}]
end
defp normalize_ssl_file(key, cowboy_options) do
value = cowboy_options[key]
cond do
is_nil(value) ->
cowboy_options
Path.type(value) == :absolute ->
put_ssl_file cowboy_options, key, value
true ->
put_ssl_file cowboy_options, key, Path.expand(value, otp_app(cowboy_options))
end
end
defp assert_ssl_options(cowboy_options) do
unless Keyword.has_key?(cowboy_options, :key) or
Keyword.has_key?(cowboy_options, :keyfile) do
fail "missing option :key/:keyfile"
end
unless Keyword.has_key?(cowboy_options, :cert) or
Keyword.has_key?(cowboy_options, :certfile) do
fail "missing option :cert/:certfile"
end
end
defp put_ssl_file(cowboy_options, key, value) do
value = to_char_list(value)
unless File.exists?(value) do
fail "the file #{value} required by SSL's #{inspect key} either does not exist, or the application does not have permission to access it"
end
Keyword.put(cowboy_options, key, value)
end
defp otp_app(cowboy_options) do
if app = cowboy_options[:otp_app] do
Application.app_dir(app)
else
fail "to use a relative certificate with https, the :otp_app " <>
"option needs to be given to the adapter"
end
end
defp to_char_list(cowboy_options, key) do
if value = cowboy_options[key] do
Keyword.put cowboy_options, key, to_char_list(value)
else
cowboy_options
end
end
defp fail(message) do
raise ArgumentError, message: "could not start Cowboy adapter, " <> message
end
end
|
lib/plug/adapters/cowboy.ex
| 0.875081
| 0.444685
|
cowboy.ex
|
starcoder
|
defmodule Dpos.Tx do
import Dpos.Utils, only: [hexdigest: 1]
alias Salty.Sign.Ed25519
@keys [
:id,
:recipientId,
:senderPublicKey,
:signature,
:signSignature,
:timestamp,
:type,
address_suffix_length: 1,
amount: 0,
asset: %{},
fee: 0
]
@json_keys [
:id,
:type,
:fee,
:amount,
:recipientId,
:senderPublicKey,
:signature,
:signSignature,
:timestamp,
:asset
]
@derive {Jason.Encoder, only: @json_keys}
defstruct @keys
@doc """
Validates timestamp value.
Check if timestamp is present and not negative,
otherwise it will be set to `Dpos.Time.now/0`.
"""
@spec validate_timestamp(Map.t()) :: Map.t()
def validate_timestamp(attrs) when is_map(attrs) do
ts = attrs[:timestamp]
if ts && is_integer(ts) && ts >= 0 do
attrs
else
Map.put(attrs, :timestamp, Dpos.Time.now())
end
end
defmacro __using__(keys) do
unless keys[:type], do: raise("option 'type' is required")
quote do
@type wallet_or_secret() :: Dpos.Wallet.t() | {String.t(), String.t()}
@doc """
Builds a new transaction.
"""
@spec build(Map.t()) :: Dpos.Tx.t()
def build(attrs) do
keys = Enum.into(unquote(keys), %{})
attrs =
attrs
|> Map.merge(keys)
|> Dpos.Tx.validate_timestamp()
struct!(Dpos.Tx, attrs)
end
@doc """
Signs the transaction with the sender private key.
It accepts either a `Dpos.Wallet` or a `{"secret", "L"}` tuple as second argument
where the first element is the secret and the second element is the address suffix
(i.e. `"L"` for Lisk).
A secondary private_key can also be provided as third argument.
"""
@spec sign(Dpos.Tx.t(), wallet_or_secret, binary()) :: Dpos.Tx.t()
def sign(tx, wallet_or_secret, second_priv_key \\ nil)
def sign(%Dpos.Tx{} = tx, %Dpos.Wallet{} = wallet, second_priv_key) do
tx
|> Map.put(:senderPublicKey, wallet.pub_key)
|> Map.put(:address_suffix_length, wallet.suffix_length)
|> create_signature(wallet.priv_key)
|> create_signature(second_priv_key, :signSignature)
|> determine_id()
end
def sign(%Dpos.Tx{} = tx, {secret, suffix}, second_priv_key)
when is_binary(secret) and is_binary(suffix) do
wallet = Dpos.Wallet.generate(secret, suffix)
sign(tx, wallet, second_priv_key)
end
@doc """
Normalizes the transaction in a format that it could be broadcasted through a relay node.
"""
@spec normalize(Dpos.Tx.t()) :: Dpos.Tx.t()
def normalize(%Dpos.Tx{} = tx) do
tx
|> Map.put(:senderPublicKey, hexdigest(tx.senderPublicKey))
|> Map.put(:signature, hexdigest(tx.signature))
|> Map.put(:signSignature, hexdigest(tx.signSignature))
|> normalize_asset()
end
defp create_signature(tx, priv_key, field \\ :signature)
defp create_signature(%Dpos.Tx{} = tx, nil, _field), do: tx
defp create_signature(%Dpos.Tx{} = tx, priv_key, field) do
{:ok, signature} =
tx
|> compute_hash()
|> Ed25519.sign_detached(priv_key)
Map.put(tx, field, signature)
end
defp determine_id(%Dpos.Tx{} = tx) do
<<head::bytes-size(8), _rest::bytes>> = compute_hash(tx)
id = head |> Dpos.Utils.reverse_binary() |> to_string()
Map.put(tx, :id, id)
end
defp compute_hash(%Dpos.Tx{} = tx) do
bytes =
:erlang.list_to_binary([
<<tx.type>>,
<<tx.timestamp::little-integer-size(32)>>,
<<tx.senderPublicKey::bytes-size(32)>>,
Dpos.Utils.address_to_binary(tx.recipientId, tx.address_suffix_length),
<<tx.amount::little-integer-size(64)>>,
get_child_bytes(tx),
Dpos.Utils.signature_to_binary(tx.signature),
Dpos.Utils.signature_to_binary(tx.signSignature)
])
:crypto.hash(:sha256, bytes)
end
defp get_child_bytes(%Dpos.Tx{}), do: ""
defp normalize_asset(%Dpos.Tx{} = tx), do: tx
defoverridable get_child_bytes: 1, normalize_asset: 1
end
end
end
|
lib/tx/tx.ex
| 0.806396
| 0.616878
|
tx.ex
|
starcoder
|
defmodule AWS.DataSync do
@moduledoc """
AWS DataSync
AWS DataSync is a managed data transfer service that makes it simpler for you to
automate moving data between on-premises storage and Amazon Simple Storage
Service (Amazon S3) or Amazon Elastic File System (Amazon EFS).
This API interface reference for AWS DataSync contains documentation for a
programming interface that you can use to manage AWS DataSync.
"""
alias AWS.Client
alias AWS.Request
def metadata do
%AWS.ServiceMetadata{
abbreviation: "DataSync",
api_version: "2018-11-09",
content_type: "application/x-amz-json-1.1",
credential_scope: nil,
endpoint_prefix: "datasync",
global?: false,
protocol: "json",
service_id: "DataSync",
signature_version: "v4",
signing_name: "datasync",
target_prefix: "FmrsService"
}
end
@doc """
Cancels execution of a task.
When you cancel a task execution, the transfer of some files is abruptly
interrupted. The contents of files that are transferred to the destination might
be incomplete or inconsistent with the source files. However, if you start a new
task execution on the same task and you allow the task execution to complete,
file content on the destination is complete and consistent. This applies to
other unexpected failures that interrupt a task execution. In all of these
cases, AWS DataSync successfully complete the transfer when you start the next
task execution.
"""
def cancel_task_execution(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CancelTaskExecution", input, options)
end
@doc """
Activates an AWS DataSync agent that you have deployed on your host.
The activation process associates your agent with your account. In the
activation process, you specify information such as the AWS Region that you want
to activate the agent in. You activate the agent in the AWS Region where your
target locations (in Amazon S3 or Amazon EFS) reside. Your tasks are created in
this AWS Region.
You can activate the agent in a VPC (virtual private cloud) or provide the agent
access to a VPC endpoint so you can run tasks without going over the public
internet.
You can use an agent for more than one location. If a task uses multiple agents,
all of them need to have status AVAILABLE for the task to run. If you use
multiple agents for a source location, the status of all the agents must be
AVAILABLE for the task to run.
Agents are automatically updated by AWS on a regular basis, using a mechanism
that ensures minimal interruption to your tasks.
"""
def create_agent(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateAgent", input, options)
end
@doc """
Creates an endpoint for an Amazon EFS file system.
"""
def create_location_efs(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateLocationEfs", input, options)
end
@doc """
Creates an endpoint for an Amazon FSx for Windows file system.
"""
def create_location_fsx_windows(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateLocationFsxWindows", input, options)
end
@doc """
Defines a file system on a Network File System (NFS) server that can be read
from or written to.
"""
def create_location_nfs(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateLocationNfs", input, options)
end
@doc """
Creates an endpoint for a self-managed object storage bucket.
For more information about self-managed object storage locations, see
`create-object-location`.
"""
def create_location_object_storage(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateLocationObjectStorage", input, options)
end
@doc """
Creates an endpoint for an Amazon S3 bucket.
For more information, see
https://docs.aws.amazon.com/datasync/latest/userguide/create-locations-cli.html#create-location-s3-cli
in the *AWS DataSync User Guide*.
"""
def create_location_s3(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateLocationS3", input, options)
end
@doc """
Defines a file system on a Server Message Block (SMB) server that can be read
from or written to.
"""
def create_location_smb(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateLocationSmb", input, options)
end
@doc """
Creates a task.
A task is a set of two locations (source and destination) and a set of Options
that you use to control the behavior of a task. If you don't specify Options
when you create a task, AWS DataSync populates them with service defaults.
When you create a task, it first enters the CREATING state. During CREATING AWS
DataSync attempts to mount the on-premises Network File System (NFS) location.
The task transitions to the AVAILABLE state without waiting for the AWS location
to become mounted. If required, AWS DataSync mounts the AWS location before each
task execution.
If an agent that is associated with a source (NFS) location goes offline, the
task transitions to the UNAVAILABLE status. If the status of the task remains in
the CREATING status for more than a few minutes, it means that your agent might
be having trouble mounting the source NFS file system. Check the task's
ErrorCode and ErrorDetail. Mount issues are often caused by either a
misconfigured firewall or a mistyped NFS server hostname.
"""
def create_task(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateTask", input, options)
end
@doc """
Deletes an agent.
To specify which agent to delete, use the Amazon Resource Name (ARN) of the
agent in your request. The operation disassociates the agent from your AWS
account. However, it doesn't delete the agent virtual machine (VM) from your
on-premises environment.
"""
def delete_agent(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteAgent", input, options)
end
@doc """
Deletes the configuration of a location used by AWS DataSync.
"""
def delete_location(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteLocation", input, options)
end
@doc """
Deletes a task.
"""
def delete_task(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteTask", input, options)
end
@doc """
Returns metadata such as the name, the network interfaces, and the status (that
is, whether the agent is running or not) for an agent.
To specify which agent to describe, use the Amazon Resource Name (ARN) of the
agent in your request.
"""
def describe_agent(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeAgent", input, options)
end
@doc """
Returns metadata, such as the path information about an Amazon EFS location.
"""
def describe_location_efs(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeLocationEfs", input, options)
end
@doc """
Returns metadata, such as the path information about an Amazon FSx for Windows
location.
"""
def describe_location_fsx_windows(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeLocationFsxWindows", input, options)
end
@doc """
Returns metadata, such as the path information, about an NFS location.
"""
def describe_location_nfs(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeLocationNfs", input, options)
end
@doc """
Returns metadata about a self-managed object storage server location.
For more information about self-managed object storage locations, see
`create-object-location`.
"""
def describe_location_object_storage(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeLocationObjectStorage", input, options)
end
@doc """
Returns metadata, such as bucket name, about an Amazon S3 bucket location.
"""
def describe_location_s3(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeLocationS3", input, options)
end
@doc """
Returns metadata, such as the path and user information about an SMB location.
"""
def describe_location_smb(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeLocationSmb", input, options)
end
@doc """
Returns metadata about a task.
"""
def describe_task(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeTask", input, options)
end
@doc """
Returns detailed metadata about a task that is being executed.
"""
def describe_task_execution(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeTaskExecution", input, options)
end
@doc """
Returns a list of agents owned by an AWS account in the AWS Region specified in
the request.
The returned list is ordered by agent Amazon Resource Name (ARN).
By default, this operation returns a maximum of 100 agents. This operation
supports pagination that enables you to optionally reduce the number of agents
returned in a response.
If you have more agents than are returned in a response (that is, the response
returns only a truncated list of your agents), the response contains a marker
that you can specify in your next request to fetch the next page of agents.
"""
def list_agents(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListAgents", input, options)
end
@doc """
Returns a list of source and destination locations.
If you have more locations than are returned in a response (that is, the
response returns only a truncated list of your agents), the response contains a
token that you can specify in your next request to fetch the next page of
locations.
"""
def list_locations(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListLocations", input, options)
end
@doc """
Returns all the tags associated with a specified resource.
"""
def list_tags_for_resource(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListTagsForResource", input, options)
end
@doc """
Returns a list of executed tasks.
"""
def list_task_executions(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListTaskExecutions", input, options)
end
@doc """
Returns a list of all the tasks.
"""
def list_tasks(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListTasks", input, options)
end
@doc """
Starts a specific invocation of a task.
A `TaskExecution` value represents an individual run of a task. Each task can
have at most one `TaskExecution` at a time.
`TaskExecution` has the following transition phases: INITIALIZING | PREPARING |
TRANSFERRING | VERIFYING | SUCCESS/FAILURE.
For detailed information, see the Task Execution section in the Components and
Terminology topic in the *AWS DataSync User Guide*.
"""
def start_task_execution(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "StartTaskExecution", input, options)
end
@doc """
Applies a key-value pair to an AWS resource.
"""
def tag_resource(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "TagResource", input, options)
end
@doc """
Removes a tag from an AWS resource.
"""
def untag_resource(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UntagResource", input, options)
end
@doc """
Updates the name of an agent.
"""
def update_agent(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateAgent", input, options)
end
@doc """
Updates the metadata associated with a task.
"""
def update_task(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateTask", input, options)
end
@doc """
Updates execution of a task.
You can modify bandwidth throttling for a task execution that is running or
queued. For more information, see [Adjusting Bandwidth Throttling for a Task Execution](https://docs.aws.amazon.com/datasync/latest/working-with-task-executions.html#adjust-bandwidth-throttling).
The only `Option` that can be modified by `UpdateTaskExecution` is `
[BytesPerSecond](https://docs.aws.amazon.com/datasync/latest/userguide/API_Options.html#DataSync-Type-Options-BytesPerSecond)
`.
"""
def update_task_execution(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateTaskExecution", input, options)
end
end
|
lib/aws/generated/data_sync.ex
| 0.81283
| 0.520557
|
data_sync.ex
|
starcoder
|
defmodule Apoc.RSA.PrivateKey do
@moduledoc """
A Struct and set of functions to represent an RSA private key based
on the underlying erlang representation.
For information on key formats in PKI see [PKI PEM overview](https://gist.github.com/awood/9338235)
or [RFC5912](https://tools.ietf.org/html/rfc5912)
See also [Erlang Public Key Records](http://erlang.org/doc/apps/public_key/public_key_records.html#rsa)
"""
defstruct [
:version,
:modulus,
:public_exponent,
:private_exponent,
:prime1,
:prime2,
:exponent1,
:exponent2,
:coefficient,
:other_prime_info
]
@type t :: %__MODULE__{
version: :"two-prime",
modulus: integer(),
public_exponent: integer(),
private_exponent: integer(),
prime1: integer(),
prime2: integer(),
exponent1: integer(),
exponent2: integer(),
coefficient: integer(),
other_prime_info: any()
}
@doc """
Encrypts a message with the given public key
(uses PKCS1 standard padding).
See `Apoc.RSA.encrypt/2`
"""
@spec encrypt(__MODULE__.t, binary()) :: {:ok, binary()} | :error
def encrypt(%__MODULE__{} = skey, message) do
try do
ciphertext =
:rsa
|> :crypto.private_encrypt(message, to_erlang_type(skey), :rsa_pkcs1_padding)
|> Apoc.encode()
{:ok, ciphertext}
rescue
_ -> :error
end
end
@doc """
Decrypts a message with the given public key
(uses PKCS1-OAEP padding).
See `Apoc.RSA.decrypt/2`
"""
@spec decrypt(__MODULE__.t, binary()) :: {:ok, binary()} | :error
def decrypt(%__MODULE__{} = skey, ciphertext) do
try do
with {:ok, ctb} <- Apoc.decode(ciphertext) do
{:ok,
:crypto.private_decrypt(:rsa, ctb, to_erlang_type(skey), :rsa_pkcs1_oaep_padding)}
end
rescue
_ -> :error
end
end
@doc """
Returns a list of the key's parameters inline with
the erlang [data type](http://erlang.org/doc/man/crypto.html#data-types-)
"""
def to_erlang_type(%__MODULE__{} = skey) do
[
# TODO: Check that these are in the right order
# The type used in `:crypto` is different from that used in `:public_key`
skey.public_exponent,
skey.modulus,
skey.private_exponent,
skey.prime1,
skey.prime2,
skey.exponent1,
skey.exponent2,
skey.coefficient
]
end
@doc """
Loads a pem encoded public key certificate string.
"""
@spec load_pem(String.t) :: {:ok, __MODULE__.t} | {:error, String.t}
def load_pem(pem_str) do
with [enc_pkey] <- :public_key.pem_decode(pem_str),
{
:RSAPrivateKey,
version,
modulus,
public_exponent,
private_exponent,
prime1,
prime2,
exponent1,
exponent2,
coefficient,
other_prime_info
} <- :public_key.pem_entry_decode(enc_pkey) do
{:ok,
%__MODULE__{
version: version,
modulus: modulus,
public_exponent: public_exponent,
private_exponent: private_exponent,
prime1: prime1,
prime2: prime2,
exponent1: exponent1,
exponent2: exponent2,
coefficient: coefficient,
other_prime_info: other_prime_info
}
}
else
_ ->
{:error, "Not a private key"}
end
end
@doc """
Dumps a key into PEM format
"""
@spec dump_pem(__MODULE__.t) :: String.t
def dump_pem(%__MODULE__{} = key) do
target = {
:RSAPrivateKey,
:"two-prime",
key.modulus,
key.public_exponent,
key.private_exponent,
key.prime1,
key.prime2,
key.exponent1,
key.exponent2,
key.coefficient,
:asn1_NOVALUE
}
:RSAPrivateKey
|> :public_key.pem_entry_encode(target)
|> List.wrap
|> :public_key.pem_encode
end
defimpl Inspect do
import Inspect.Algebra
def inspect(_key, _opts) do
concat(["#Apoc.RSA.PrivateKey<XXXXX>"])
end
end
end
|
lib/apoc/rsa/private_key.ex
| 0.796015
| 0.63968
|
private_key.ex
|
starcoder
|
defmodule Watchman.Query do
defstruct [:generator, expression: %{}, fields: ["name"]]
end
defmodule Watchman do
@moduledoc """
A file watching service.
Watchman exists to watch files and send messages to your Erlang processes when they change.
This modules uses [`watchman`](https://facebook.github.io/watchman/) via a Port.
See https://facebook.github.io/watchman/docs/cmd/query.html and https://facebook.github.io/watchman/docs/cmd/subscribe.html for more details about syntax.
"""
use GenServer
def start_link([pid, subscription_id, root, query | options]) do
GenServer.start_link(__MODULE__, [pid, subscription_id, root, query], options)
end
def installed? do
try do
System.cmd("watchman", ["version"])
true
rescue
_ -> false
end
end
@impl true
def init([pid, subscription_id, root, query]) do
stopword = :crypto.strong_rand_bytes(16) |> Base.encode64()
port =
Port.open({:spawn_executable, "/bin/sh"}, [
:exit_status,
:hide,
:stream,
{:args, ["-c", shell_command(stopword)]}
])
json =
Jason.encode!([
"subscribe",
root,
subscription_id,
Map.from_struct(query)
])
nl = List.to_string(:io_lib.nl())
true = Port.command(port, json <> nl)
true = Port.command(port, stopword <> nl)
{:ok, {pid, subscription_id, port}}
end
@impl true
def handle_info({port, {:data, raw_chunk}}, state) do
{pid, subscription_id, ^port} = state
# split chunk if we got several JSON objects in one chunk
for chunk <- List.to_string(raw_chunk) |> split_json() do
case Jason.decode!(chunk) do
%{"subscription" => ^subscription_id, "files" => files} ->
send(pid, {:modified, subscription_id, files})
_ ->
nil
end
end
{:noreply, state}
end
def handle_info({_port, {:exit_status, 88}}, _state) do
raise "Cannot find executable `watchman`"
end
defp shell_command(stopword) do
String.trim("""
set -e
command -v watchman >/dev/null 2>&1 || exit 88
exec 8<&0
(
while read x <&8; do
if [ "$x" = "#{stopword}" ]; then
break
fi
echo $x
done
) | watchman --persistent --json-command --server-encoding=json & PID=$!
(
while read foo <&8; do
:
done
kill -- -$$
) >/dev/null 2>&1 &
wait $PID
""")
end
@spec split_json(binary()) :: [binary()]
def split_json(s) do
split_json(s, [])
end
@spec split_json(binary(), [binary()]) :: [binary()]
def split_json(s, acc) do
case Regex.run(~r/\}\s?\s?\{/, s, return: :index) do
nil ->
Enum.reverse([s | acc])
[{idx, _}] ->
{pre, post} = String.split_at(s, idx + 1)
split_json(post, [pre | acc])
end
end
end
|
lib/watchman.ex
| 0.676299
| 0.463262
|
watchman.ex
|
starcoder
|
defmodule Typesense.Collections do
@moduledoc """
The `Typesense.Collections` module is the service implementation for Typesense' `Collections` API Resource.
"""
@doc """
Create a Collection.
## Examples
```
schema = %{
name: "companies",
fields: [
%{name: "company_name", type: "string"},
%{name: "num_employees", type: "int32"},
%{name: "country", type: "string", facet: true},
],
default_sorting_field: "num_employees"
}
Typesense.Collections.create(schema)
```
"""
def create(schema) do
response = Typesense.post("/collections", schema)
case response do
{:ok, env} -> Typesense.Http.handle_response(env)
{:error, reason} -> {:error, reason}
end
end
@doc """
Retrieve a collection.
## Examples
```elixir
iex> Typesense.Collections.retrieve("companies")
{:ok, company}
```
"""
def retrieve(collection) do
response = Typesense.get("/collections/#{collection}")
case response do
{:ok, env} -> Typesense.Http.handle_response(env)
{:error, reason} -> {:error, reason}
end
end
@doc """
List all collections.
## Examples
```elixir
iex> Typesense.Collections.list()
{:ok, collections}
```
"""
def list() do
response = Typesense.get("/collections")
case response do
{:ok, env} -> Typesense.Http.handle_response(env)
{:error, reason} -> {:error, reason}
end
end
@doc """
Delete a collection.
## Examples
```elixir
iex> Typesense.Collections.delete(collection_id)
{:ok, _collection}
```
"""
def delete(id) do
response = Typesense.delete("/collections/#{id}")
case response do
{:ok, env} -> Typesense.Http.handle_response(env)
{:error, reason} -> {:error, reason}
end
end
@doc """
Search a collection.
## Examples
```elixir
iex> Typesense.Collections.search(collection, query_params)
[%{}, ...]
```
"""
def search(collection, query_params) do
response = Typesense.get("/collections/#{collection}/documents/search", query: query_params)
case response do
{:ok, env} -> Typesense.Http.handle_response(env)
{:error, reason} -> {:error, reason}
end
end
end
|
lib/typesense/collections/collections.ex
| 0.89605
| 0.85567
|
collections.ex
|
starcoder
|
defmodule Ameritrade.Quote.Index do
@moduledoc false
@derive Jason.Encoder
defstruct symbol: nil,
description: nil,
lastPrice: 0,
openPrice: 0,
highPrice: 0,
lowPrice: 0,
closePrice: 0,
netChange: 0,
totalVolume: 0,
tradeTimeInLong: 0,
exchange: nil,
exchangeName: nil,
digits: 0,
fiftyTwoWkHigh: 0,
fiftyTwoWkLow: 0,
securityStatus: nil
end
defmodule Ameritrade.Quote.MutualFund do
@moduledoc false
@derive Jason.Encoder
defstruct symbol: nil,
description: nil,
closePrice: 0,
netChange: 0,
totalVolume: 0,
tradeTimeInLong: 0,
exchange: nil,
exchangeName: nil,
digits: 0,
fiftyTwoWkHigh: 0,
fiftyTwoWkLow: 0,
nAV: 0,
peRatio: 0,
divAmount: 0,
divYield: 0,
divDate: nil,
securityStatus: nil
end
defmodule Ameritrade.Quote.Future do
@moduledoc false
@derive Jason.Encoder
defstruct symbol: nil,
bidPriceInDouble: 0,
askPriceInDouble: 0,
lastPriceInDouble: 0,
bidId: nil,
askId: nil,
highPriceInDouble: 0,
lowPriceInDouble: 0,
closePriceInDouble: 0,
exchange: nil,
description: nil,
lastId: nil,
openPriceInDouble: 0,
changeInDouble: 0,
futurePercentChange: 0,
exchangeName: nil,
securityStatus: nil,
openInterest: 0,
mark: 0,
tick: 0,
tickAmount: 0,
product: nil,
futurePriceFormat: nil,
futureTradingHours: nil,
futureIsTradable: false,
futureMultiplier: 0,
futureIsActive: false,
futureSettlementPrice: 0,
futureActiveSymbol: nil,
futureExpirationDate: nil
end
defmodule Ameritrade.Quote.FutureOptions do
@moduledoc false
@derive Jason.Encoder
defstruct symbol: nil,
bidPriceInDouble: 0,
askPriceInDouble: 0,
lastPriceInDouble: 0,
highPriceInDouble: 0,
lowPriceInDouble: 0,
closePriceInDouble: 0,
description: nil,
openPriceInDouble: 0,
netChangeInDouble: 0,
openInterest: 0,
exchangeName: nil,
securityStatus: nil,
volatility: 0,
moneyIntrinsicValueInDouble: 0,
multiplierInDouble: 0,
digits: 0,
strikePriceInDouble: 0,
contractType: nil,
underlying: nil,
timeValueInDouble: 0,
deltaInDouble: 0,
gammaInDouble: 0,
thetaInDouble: 0,
vegaInDouble: 0,
rhoInDouble: 0,
mark: 0,
tick: 0,
tickAmount: 0,
futureIsTradable: false,
futureTradingHours: nil,
futurePercentChange: 0,
futureIsActive: false,
futureExpirationDate: 0,
expirationType: nil,
exerciseType: nil,
inTheMoney: false
end
defmodule Ameritrade.Quote.Option do
@moduledoc false
@derive Jason.Encoder
defstruct symbol: nil,
description: nil,
bidPrice: 0,
bidSize: 0,
askPrice: 0,
askSize: 0,
lastPrice: 0,
lastSize: 0,
openPrice: 0,
highPrice: 0,
lowPrice: 0,
closePrice: 0,
netChange: 0,
totalVolume: 0,
quoteTimeInLong: 0,
tradeTimeInLong: 0,
mark: 0,
openInterest: 0,
volatility: 0,
moneyIntrinsicValue: 0,
multiplier: 0,
strikePrice: 0,
contractType: nil,
underlying: nil,
timeValue: 0,
deliverables: nil,
delta: 0,
gamma: 0,
theta: 0,
vega: 0,
rho: 0,
securityStatus: nil,
theoreticalOptionValue: 0,
underlyingPrice: 0,
uvExpirationType: nil,
exchange: nil,
exchangeName: nil,
settlementType: nil
end
defmodule Ameritrade.Quote.Forex do
@moduledoc false
@derive Jason.Encoder
defstruct symbol: nil,
bidPriceInDouble: 0,
askPriceInDouble: 0,
lastPriceInDouble: 0,
highPriceInDouble: 0,
lowPriceInDouble: 0,
closePriceInDouble: 0,
exchange: nil,
description: nil,
openPriceInDouble: 0,
changeInDouble: 0,
percentChange: 0,
exchangeName: nil,
digits: 0,
securityStatus: nil,
tick: 0,
tickAmount: 0,
product: nil,
tradingHours: nil,
isTradable: false,
marketMaker: nil,
fiftyTwoWkHighInDouble: 0,
fiftyTwoWkLowInDouble: 0,
mark: 0
end
defmodule Ameritrade.Quote.ETF do
@moduledoc false
@derive Jason.Encoder
defstruct symbol: nil,
description: nil,
bidPrice: 0,
bidSize: 0,
bidId: nil,
askPrice: 0,
askSize: 0,
askId: nil,
lastPrice: 0,
lastSize: 0,
lastId: nil,
openPrice: 0,
highPrice: 0,
lowPrice: 0,
closePrice: 0,
netChange: 0,
totalVolume: 0,
quoteTimeInLong: 0,
tradeTimeInLong: 0,
mark: 0,
exchange: nil,
exchangeName: nil,
marginable: false,
shortable: false,
volatility: 0,
digits: 0,
fiftyTwoWkHigh: 0,
fiftyTwoWkLow: 0,
peRatio: 0,
divAmount: 0,
divYield: 0,
divDate: nil,
securityStatus: nil,
regularMarketLastPrice: 0,
regularMarketLastSize: 0,
regularMarketNetChange: 0,
regularMarketTradeTimeInLong: 0
end
defmodule Ameritrade.Quote.Equity do
@moduledoc false
@derive Jason.Encoder
defstruct symbol: nil,
description: nil,
bidPrice: 0,
bidSize: 0,
bidId: nil,
askPrice: 0,
askSize: 0,
askId: nil,
lastPrice: 0,
lastSize: 0,
lastId: nil,
openPrice: 0,
highPrice: 0,
lowPrice: 0,
closePrice: 0,
netChange: 0,
totalVolume: 0,
quoteTimeInLong: 0,
tradeTimeInLong: 0,
mark: 0,
exchange: nil,
exchangeName: nil,
marginable: false,
shortable: false,
volatility: 0,
digits: 0,
fiftyTwoWkHigh: 0,
fiftyTwoWkLow: 0,
peRatio: 0,
divAmount: 0,
divYield: 0,
divDate: nil,
securityStatus: nil,
regularMarketLastPrice: 0,
regularMarketLastSize: 0,
regularMarketNetChange: 0,
regularMarketTradeTimeInLong: 0
end
|
lib/schema/quote.ex
| 0.644449
| 0.606994
|
quote.ex
|
starcoder
|
defmodule Calcy do
@moduledoc """
Documentation for Calcy.
"""
@doc """
Runs over input and tokenizes it
"""
def run(input, env) do
tokens = Calcy.Lexer.lex(input)
case length(tokens) do
0 -> {}
_ -> continue(tokens, env)
end
end
@doc """
Gets a list of tokens and parses it
"""
def continue(tokens, env) do
tree = Calcy.Parser.parse(tokens)
ir = Calcy.IR.ir(tree)
Calcy.Compiler.compile(ir, env)
env
# eval(tree, env)
end
@doc """
Runs over a list of ASTs and evaluates them
"""
def eval(tree, env) when is_list(tree) do
tree = List.flatten(tree)
eval_lines(Map.merge(%{:pi => 3.1415926535897932, :e => 2.718281828459045}, env), tree, [])
end
@doc """
Runs over an AST and evaluates it
"""
def eval(tree, env) do
result = Calcy.Evaluator.eval(Map.merge(%{:pi => 3.1415926535897932, :e => 2.718281828459045}, env), tree)
case result do
:exit -> exit_program()
_ -> IO.puts(result)
end
env
end
@doc """
Evaluates multiple lines
"""
def eval_lines(env, [line | lines], results) do
result = Calcy.Evaluator.eval(env, line)
{result, env} = check_result(result, env)
eval_lines(env, lines, results ++ [result])
end
@doc """
Prints evaulated results
"""
def eval_lines(env, [], results) do
Enum.map(results, fn result -> print(result) end)
env
end
@doc """
Checks the result and updates the environment if needed
"""
def check_result(result, _env) when is_tuple(result) do
IO.puts(result |> elem(1))
{nil, result |> elem(0)}
end
@doc """
Checks the result and updates the environment if needed
"""
def check_result(result, env) do
{result, env}
end
@doc """
Checks if the user requested to exit
"""
def print(val) when val == :exit do
exit_program()
end
@doc """
Prints the value if it is not nil
"""
def print(val) do
case val do
nil -> nil
_ -> IO.puts(val)
end
end
@doc """
Greets the user with a goodbye and exits
"""
def exit_program() do
IO.puts("Bye bye :)")
exit(:normal)
end
end
|
lib/calcy.ex
| 0.578448
| 0.491395
|
calcy.ex
|
starcoder
|
defmodule MerkleTree do
@moduledoc """
A hash tree or Merkle tree is a tree in which every non-leaf node is labelled
with the hash of the labels or values (in case of leaves) of its child nodes.
Hash trees are useful because they allow efficient and secure verification of
the contents of large data structures.
## Usage Example
iex> MerkleTree.new ["a", "b", "c", "d"]
%MerkleTree{
blocks: ["a", "b", "c", "d"],
hash_function: &MerkleTree.Crypto.sha256/1,
root: %MerkleTree.Node{
children: [
%MerkleTree.Node{
children: [
%MerkleTree.Node{
children: [],
height: 0,
value: "022a6979e6dab7aa5ae4c3e5e45f7e977112a7e63593820dbec1ec738a24f93c"
},
%MerkleTree.Node{
children: [],
height: 0,
value: "57eb35615d47f34ec714cacdf5fd74608a5e8e102724e80b24b287c0c27b6a31"
}
],
height: 1,
value: "4c64254e6636add7f281ff49278beceb26378bd0021d1809974994e6e233ec35"
},
%MerkleTree.Node{
children: [
%MerkleTree.Node{
children: [],
height: 0,
value: "597fcb31282d34654c200d3418fca5705c648ebf326ec73d8ddef11841f876d8"
},
%MerkleTree.Node{
children: [],
height: 0,
value: "d070dc5b8da9aea7dc0f5ad4c29d89965200059c9a0ceca3abd5da2492dcb71d"
}
],
height: 1,
value: "40e2511a6323177e537acb2e90886e0da1f84656fd6334b89f60d742a3967f09"
}
],
height: 2,
value: "9dc1674ae1ee61c90ba50b6261e8f9a47f7ea07d92612158edfe3c2a37c6d74c"
}
}
"""
defstruct [:blocks, :root, :hash_function]
# Number of children per node. Configurable.
@number_of_children 2
# values prepended to a leaf and node to differentiate between them when calculating values
# of parent in Merkle Tree, added to prevent a second preimage attack
# where a proof for node can be validated as a proof for leaf
@leaf_salt <<0>>
@node_salt <<1>>
@type blocks :: [String.t(), ...]
@type hash_function :: (String.t() -> String.t())
@type root :: MerkleTree.Node.t()
@type t :: %MerkleTree{
blocks: blocks,
root: root,
hash_function: hash_function
}
@doc """
Creates a new merkle tree, given a blocks and hash function or opts.
available options:
:hash_function - used hash in mercle tree default :sha256 from :cryto
:height - allows to construct tree of provided height,
empty leaves data will be taken from `:default_data_block` parameter
:default_data_block - this data will be used to supply empty
leaves in case where there isn't enough blocks provided
Check out `MerkleTree.Crypto` for other available cryptographic hashes.
Alternatively, you can supply your own hash function that has the spec
``(String.t -> String.t)``.
"""
@spec new(blocks, hash_function | Keyword.t()) :: t
def new(blocks, hash_function_or_opts \\ [])
def new(blocks, hash_function) when is_function(hash_function),
do: new(blocks, hash_function: hash_function)
def new(blocks, opts) when is_list(opts) do
# fill in the data blocks, note we don't allow to fill in with in with a sensible default here, like ""
filled_blocks = fill_blocks(blocks, Keyword.get(opts, :default_data_block), Keyword.get(opts, :height))
# calculate the root node, which does all the hashing etc.
root = build(blocks, opts)
hash_function = Keyword.get(opts, :hash_function, &MerkleTree.Crypto.sha256/1)
%MerkleTree{blocks: filled_blocks, hash_function: hash_function, root: root}
end
@doc """
Calculates the root of the merkle tree without building the entire tree explicitly,
See `new/2` for a rundown of options
"""
@spec fast_root(blocks, Keyword.t()) :: MerkleTree.Node.hash()
def fast_root(blocks, opts \\ []) do
{hash_function, height, default_data_block} = get_from_options(opts, blocks)
default_leaf_value = hash_function.(@leaf_salt <> default_data_block)
leaf_values = Enum.map(blocks, fn block -> hash_function.(@leaf_salt <> block) end)
_fast_root(leaf_values, hash_function, 0, default_leaf_value, height)
end
defp _fast_root([], hash_function, height, default_leaf, final_height),
do: _fast_root([default_leaf], hash_function, height, default_leaf, final_height)
defp _fast_root([root], _, final_height, _, final_height), do: root
defp _fast_root(nodes, hash_function, height, default_node, final_height) do
count = step = @number_of_children
leftover = List.duplicate(default_node, count - 1)
children_partitions = Enum.chunk_every(nodes, count, step, leftover)
new_height = height + 1
parents =
Enum.map(children_partitions, fn partition ->
concatenated_values = [@node_salt | partition] |> Enum.join()
hash_function.(concatenated_values)
end)
new_default_node = hash_function.(@node_salt <> default_node <> default_node)
_fast_root(parents, hash_function, new_height, new_default_node, final_height)
end
# takes care of the defaults etc
defp get_from_options(opts, blocks) do
{
Keyword.get(opts, :hash_function, &MerkleTree.Crypto.sha256/1),
Keyword.get(opts, :height, guess_height(Enum.count(blocks))),
Keyword.get(opts, :default_data_block, "")
}
end
@doc """
Builds a root MerkleTree.Node structure of a merkle tree
See `new/2` for a rundown of options
"""
@spec build(blocks, hash_function | Keyword.t()) :: root
def build(blocks, hash_function_or_opts \\ [])
def build(blocks, hash_function) when is_function(hash_function),
do: build(blocks, hash_function: hash_function)
def build(blocks, opts) do
{hash_function, height, default_data_block} = get_from_options(opts, blocks)
default_leaf_value = hash_function.(@leaf_salt <> default_data_block)
leaf_values = Enum.map(blocks, fn block -> hash_function.(@leaf_salt <> block) end)
default_leaf = %MerkleTree.Node{value: default_leaf_value, children: [], height: 0}
leaves = Enum.map(leaf_values, &%MerkleTree.Node{value: &1, children: [], height: 0})
_build(leaves, hash_function, 0, default_leaf, height)
end
defp _build([], hash_function, height, default_leaf, final_height),
do: _build([default_leaf], hash_function, height, default_leaf, final_height)
# Base case
defp _build([root], _, final_height, _, final_height), do: root
# Recursive case
defp _build(nodes, hash_function, height, default_leaf, final_height) do
count = step = @number_of_children
leftover = List.duplicate(default_leaf, count - 1)
children_partitions = Enum.chunk_every(nodes, count, step, leftover)
new_height = height + 1
parents =
Enum.map(children_partitions, fn partition ->
concatenated_values = partition |> Enum.map(& &1.value) |> Enum.join()
concatenated_values = @node_salt <> concatenated_values
%MerkleTree.Node{
value: hash_function.(concatenated_values),
children: partition,
height: new_height
}
end)
new_default_leaf_value =
hash_function.(@node_salt <> default_leaf.value <> default_leaf.value)
new_default_leaf = %MerkleTree.Node{
value: new_default_leaf_value,
children: List.duplicate(default_leaf, @number_of_children),
height: new_height
}
_build(parents, hash_function, new_height, new_default_leaf, final_height)
end
defp _ceil(a), do: if(a > trunc(a), do: trunc(a) + 1, else: trunc(a))
defp fill_blocks(blocks, default, nil) when default != nil do
blocks_count = Enum.count(blocks)
leaves_count = guess_leaves_count(blocks_count)
blocks ++ List.duplicate(default, trunc(leaves_count - blocks_count))
end
defp fill_blocks(blocks, default, height) when default != nil do
blocks_count = Enum.count(blocks)
leaves_count = :math.pow(2, height)
fill_elements = leaves_count - blocks_count
if fill_elements < 0, do: raise(MerkleTree.ArgumentError)
blocks ++ List.duplicate(default, trunc(fill_elements))
end
defp fill_blocks(blocks, _, _) when blocks != [] do
blocks_count = Enum.count(blocks)
required_leaves_count = :math.pow(2, _ceil(:math.log2(blocks_count)))
if required_leaves_count != blocks_count,
do: raise(MerkleTree.ArgumentError),
else: blocks
end
defp guess_leaves_count(blocks_count), do: :math.pow(2, guess_height(blocks_count))
defp guess_height(0), do: 0
defp guess_height(blocks_count), do: _ceil(:math.log2(blocks_count))
end
|
lib/merkle_tree.ex
| 0.863435
| 0.528898
|
merkle_tree.ex
|
starcoder
|
defmodule Exbee do
@moduledoc """
Communicate with [XBee](http://en.wikipedia.org/wiki/XBee) wireless radios in Elixir.
This assumes that XBee modules are in API mode. In API mode, XBee modules send and receive
commands via encoded frames. Possible frames include:
* `Exbee.ATCommandFrame`
* `Exbee.ATCommandQueueFrame`
* `Exbee.ATCommandResultFrame`
* `Exbee.RemoteATCommandFrame`
* `Exbee.RemoteATCommandResultFrame`
* `Exbee.RxFrame`
* `Exbee.RxSampleReadFrame`
* `Exbee.RxSensorReadFrame`
* `Exbee.TxFrame`
* `Exbee.TxResultFrame`
* `Exbee.ExplicitTxFrame`
* `Exbee.ExplicitRxFrame`
* `Exbee.DeviceStatusFrame`
Frames are sent via the `Exbee.send_frame/2` function. Frames received on the serial port are
reported as messages to the current process. The messages have the following form:
{:exbee, frame}
This example starts an Exbee process and sends an `Exbee.ATCommandFrame` to change the value of
the `NJ` parameter. Upon receiving the command, the XBee module will return an
`Exbee.ATCommandStatusFrame` indicating the status of the request.
iex> {:ok, pid} = Exbee.start_link(serial_port: "COM1")
iex> Exbee.send_frame(pid, %Exbee.ATCommandFrame{command: "NJ", value: 1})
:ok
iex> flush()
{:exbee, %Exbee.ATCommandResultFrame{command: "NJ", status: :ok, value: <0x01>}}
"""
use GenServer
alias Exbee.{Message}
@config Application.get_all_env(:exbee)
@adapter_options [:speed, :data_bits, :stop_bits, :parity, :flow_control]
@doc """
Return a map of available serial devices with information about each.
iex> Exbee.serial_ports()
%{
"COM1" => %{description: "USB Serial", manufacturer: "FTDI", product_id: 1, vendor_id: 2},
"COM2" => %{...},
"COM3" => %{...}
}
Depending on the device and the operating system, not all fields may be returned.
fields are:
* `:vendor_id` - The 16-bit USB vendor ID of the device providing the port. Vendor ID to name
lists are managed through usb.org
* `:product_id` - The 16-bit vendor supplied product ID
* `:manufacturer` - The manufacturer of the port
* `:description` - A description or product name
* `:serial_number` - The device's serial number if it has one
"""
@type device_option ::
{:serial_port, String.t()}
| {:speed, non_neg_integer}
| {:data_bits, 5..8}
| {:stop_bits, 1..2}
| {:parity, :none | :even | :odd | :space | :mark}
| {:flow_control, :none | :hardware | :software}
@spec serial_ports :: map
def serial_ports do
@config[:adapter].enumerate()
end
@doc """
Start a new Exbee process.
iex> {:ok, pid} = Exbee.start_link(serial_port: "COM1", speed: 9600)
Options can either be passed directly, or they'll be read from `:exbee` config values. The
following options are available:
* `:serial_port` - The serial interface connected to the Xbee device.
* `:speed` - (number) set the initial baudrate (e.g., 115200)
* `:data_bits` - (5, 6, 7, 8) set the number of data bits (usually 8)
* `:stop_bits` - (1, 2) set the number of stop bits (usually 1)
* `:parity` - (`:none`, `:even`, `:odd`, `:space`, or `:mark`) set the parity. Usually this is
`:none`. Other values:
* `:space` means that the parity bit is always 0
* `:mark` means that the parity bit is always 1
* `:flow_control` - (`:none`, `:hardware`, or `:software`) set the flow control strategy.
The following are some reasons for which the device may fail to start:
* `:enoent` - the specified port couldn't be found
* `:eagain` - the port is already open
* `:eacces` - permission was denied when opening the port
"""
@spec start_link([device_option]) :: {:ok, pid} | {:error, term}
def start_link(options \\ []) do
serial_port = Keyword.get(options, :serial_port, @config[:serial_port])
adapter = Keyword.get(options, :adapter, @config[:adapter])
adapter_options = Keyword.merge(@config, options) |> Keyword.take(@adapter_options)
GenServer.start_link(__MODULE__, [self(), serial_port, adapter, adapter_options])
end
@doc """
Send a frame to a given device.
A frame must implement the `Exbee.EncodableFrame` protocol, making it possible to define custom
frames.
"""
@spec send_frame(pid, Exbee.EncodableFrame.t()) :: :ok | {:error, term}
def send_frame(pid, frame) do
GenServer.call(pid, {:send_frame, frame})
end
@doc """
Shuts down the device process.
"""
@spec stop(pid) :: :ok
def stop(pid) do
GenServer.call(pid, :stop)
end
defmodule State do
@moduledoc false
defstruct [:caller_pid, :adapter_pid, :adapter, buffer: <<>>]
end
def init([caller_pid, serial_port, adapter, adapter_options]) do
{:ok, adapter_pid} = adapter.start_link()
:ok = adapter.open(adapter_pid, serial_port, adapter_options)
{:ok, %State{caller_pid: caller_pid, adapter_pid: adapter_pid, adapter: adapter}}
end
def handle_call({:send_frame, frame}, _, %{adapter: adapter, adapter_pid: adapter_pid} = state) do
{:reply, adapter.write(adapter_pid, Message.build(frame)), state}
end
def handle_call(:stop, _, %{adapter: adapter, adapter_pid: adapter_pid} = state) do
{:reply, adapter.stop(adapter_pid), state}
end
def handle_info({:nerves_uart, _port, data}, %{caller_pid: caller_pid, buffer: buffer} = state) do
{new_buffer, frames} = Message.parse(buffer <> data)
for frame <- frames do
send(caller_pid, {:exbee, frame})
end
{:noreply, %{state | buffer: new_buffer}}
end
end
|
lib/exbee.ex
| 0.902757
| 0.552359
|
exbee.ex
|
starcoder
|
defmodule MatrexNumerix.Fft do
@moduledoc """
Computes the discrete Fourier transform (DFT) of the given complex vector.
"""
import Matrex.Guards
alias Matrex.Vector
def dft_freq_and_amplitude(
vector_data(len1, _body1) = x,
vector_data(len2, _body2) = y
) when len1 == len2 do
sampling_interval = x[2] - x[1] # calculate sampling interval
Matrex.concat(y, Vector.zeros(len1), :rows)
|> dft_complex()
|> to_freq_and_amplitude(sampling_interval)
end
@doc """
Computes the discrete Fourier transform (DFT) of the given real vector.
- `y` real and imaginary input
Returns:
- a matrix of real and imaginary fourier transform output
"""
def dft_real(vector_data(len, _body1) = y) do
Matrex.concat(y, Vector.zeros(len), :rows)
|> dft_complex()
end
@doc """
Computes the discrete Fourier transform (DFT) of the given complex vector.
- `xx` real and imaginary input
Returns:
- a matrix of real and imaginary fourier transform output
"""
def dft_complex(xx) do
{2, nn} = xx |> Matrex.size()
# IO.inspect(xx, label: :xx)
rx = xx[1]
ry = xx[2]
output! =
for k <- 1..nn, reduce: Matrex.zeros(2, nn) do
output ->
{sumreal, sumimag} =
for t <- 1..nn, reduce: {0.0, 0.0} do # For each input element
{sumreal, sumimag} ->
angle = 2 * :math.pi * t * k / nn
sumreal! = sumreal + rx[t] * :math.cos(angle) + ry[t] * :math.sin(angle)
sumimag! = sumimag + -rx[t] * :math.sin(angle) + ry[t] * :math.cos(angle)
{sumreal!, sumimag!}
end
output
|> Matrex.set(1, k, sumreal)
|> Matrex.set(2, k, sumimag)
end
output!
end
def to_freq_and_amplitude(
matrex_data(2, nn, _data1, _first) = fft,
sampling_interval) do
tt = sampling_interval
# 1/T = frequency
ff =
(Enum.to_list(1..nn)
|> Matrex.from_list()
|> Matrex.apply(fn x -> (x-1) * 1/(nn*tt) end))[1..div(nn,2)]
# ff = np.linspace(0, 1 / tt, nn)
fft_amp =
(Enum.map(1..nn, fn i -> fft |> Matrex.column_to_list(i) end)
|> Enum.map(fn [r,i] -> :math.sqrt( :math.pow(r,2) + :math.pow(i,2)) end)
|> Vector.new()
|> Matrex.divide(1.0*nn))[1..div(nn,2)]
[frequency: ff, amplitude: fft_amp]
end
end
|
lib/fft.ex
| 0.919584
| 0.707809
|
fft.ex
|
starcoder
|
defmodule Vector do
defstruct x1: 0,
y1: 0,
x2: 0,
y2: 0,
orientation: :point,
slope: 0.0,
covered_points: [],
b: 0.0
@type t(x1, y1, x2, y2, orientation, slope, covered_points, slope) :: %Vector{
x1: x1,
y1: y1,
x2: x2,
y2: y2,
orientation: orientation,
slope: slope,
covered_points: covered_points,
slope: slope
}
def build(x1, y1, x2, y2) when x1 >= 0 and y1 >= 0 and x2 >= 0 and y2 >= 0 and x2 != x1 do
orientation = compute_orientation(x1, y1, x2, y2)
slope = (y2 - y1) / (x2 - x1)
%Vector{
x1: x1,
y1: y1,
x2: x2,
y2: y2,
orientation: orientation,
slope: slope,
b: y1 - slope * x1,
covered_points: covered_points(orientation, x1, y1, x2, y2, slope)
}
end
def build(x1, y1, x2, y2) do
orientation = compute_orientation(x1, y1, x2, y2)
%Vector{
x1: x1,
y1: y1,
x2: x2,
y2: y2,
orientation: orientation,
slope: 0,
covered_points: covered_points(orientation, x1, y1, x2, y2, 0)
}
end
def on_vector(v, x1, y1) do
v.slope * x1 + v.b == y1
end
def intersect(v1, v2) when v1 == v2, do: true
def intersect(%Vector{covered_points: c1}, %Vector{covered_points: c2})
when length(c1) > 0 and length(c2) > 0 do
true
end
def intersections(%Vector{covered_points: c1}, %Vector{covered_points: c2})
when length(c1) > 0 and length(c2) > 0 do
(c1 -- (c2 ++ (c2 -- c1))) |> length()
end
def intersections(_v1, _v2), do: 0
defp covered_points(:horizontal, x1, y1, x2, y2, _m) do
[x] = get_list(x1, x2)
get_list(y1, y2)
|> Enum.map(fn y -> {x, y} end)
end
defp covered_points(:vertical, x1, y1, x2, y2, _m) do
[y] = get_list(y1, y2)
get_list(x1, x2)
|> Enum.map(fn x -> {x, y} end)
end
defp covered_points(:angle, x1, y1, x2, _y2, slope) do
b = y1 - slope * x1
Enum.to_list(x1..x2)
|> Enum.map(fn x ->
{x, normalize_f(slope * x + b)}
end)
end
defp normalize_f(v) do
if v == round(v) do
round(v)
else
v
end
end
defp get_list(p1, p2), do: Enum.to_list(p1..p2)
defp compute_orientation(x1, y1, x2, y2) when x1 != x2 and y1 != y2, do: :angle
defp compute_orientation(x1, y1, x2, y2) when x1 == x2 and y1 == y2, do: :point
defp compute_orientation(x1, y1, x2, y2) when x1 == x2 and y1 != y2, do: :horizontal
defp compute_orientation(x1, y1, x2, y2) when x1 != x2 and y1 == y2, do: :vertical
end
defmodule AdventOfCode.Y2021.Day5 do
@moduledoc """
--- Day 5: Hydrothermal Venture ---
You come across a field of hydrothermal vents on the ocean floor! These vents constantly produce large, opaque clouds, so it would be best to avoid them if possible.
They tend to form in lines; the submarine helpfully produces a list of nearby lines of vents (your puzzle input) for you to review. For example:
0,9 -> 5,9
8,0 -> 0,8
9,4 -> 3,4
2,2 -> 2,1
7,0 -> 7,4
6,4 -> 2,0
0,9 -> 2,9
3,4 -> 1,4
0,0 -> 8,8
5,5 -> 8,2
Each line of vents is given as a line segment in the format x1,y1 -> x2,y2 where x1,y1 are the coordinates of one end the line segment and x2,y2 are the coordinates of the other end. These line segments include the points at both ends. In other words:
An entry like 1,1 -> 1,3 covers points 1,1, 1,2, and 1,3.
An entry like 9,7 -> 7,7 covers points 9,7, 8,7, and 7,7.
For now, only consider horizontal and vertical lines: lines where either x1 = x2 or y1 = y2.
So, the horizontal and vertical lines from the above list would produce the following diagram:
.......1..
..1....1..
..1....1..
.......1..
.112111211
..........
..........
..........
..........
222111....
In this diagram, the top left corner is 0,0 and the bottom right corner is 9,9. Each position is shown as the number of lines which cover that point or . if no line covers that point. The top-left pair of 1s, for example, comes from 2,2 -> 2,1; the very bottom row is formed by the overlapping lines 0,9 -> 5,9 and 0,9 -> 2,9.
To avoid the most dangerous areas, you need to determine the number of points where at least two lines overlap. In the above example, this is anywhere in the diagram with a 2 or larger - a total of 5 points.
Consider only horizontal and vertical lines. At how many points do at least two lines overlap?
"""
@doc """
Day 1 - Part 1
## Examples
iex> AdventOfCode.Y2021.Day5.part1()
5442
"""
def part1() do
setup()
|> Enum.reject(fn v -> v.orientation == :angle end)
|> Enum.flat_map(fn v -> v.covered_points end)
|> Enum.frequencies()
|> Enum.filter(fn {_k, v} -> v >= 2 end)
|> length()
end
def setup() do
AdventOfCode.etl_file(
"lib/y_2021/d5/input.txt",
&compute_lines/1
)
end
defp compute_lines(row) do
[start_pos, end_pos] = row |> String.split(" -> ")
build_coord(start_pos, end_pos)
end
defp build_coord(start_pos, end_pos) do
[x1, y1] = to_xy(start_pos)
[x2, y2] = to_xy(end_pos)
Vector.build(x1, y1, x2, y2)
end
defp to_xy(xy) do
xy
|> String.split(",")
|> Enum.map(fn st ->
{n, ""} = Integer.parse(st)
n
end)
end
@doc """
Day 5 - Part 2
## Examples
iex> AdventOfCode.Y2021.Day5.part2()
19571
"""
def part2() do
lines = setup()
lines
|> Enum.flat_map(fn v -> v.covered_points end)
|> Enum.frequencies()
|> Enum.filter(fn {_k, v} -> v >= 2 end)
|> length()
end
end
|
lib/y_2021/d5/day5.ex
| 0.86012
| 0.870377
|
day5.ex
|
starcoder
|
defmodule Type.Function do
@moduledoc """
represents a function type.
There are two fields for the struct defined by this module.
- `params` a list of types for the function arguments. Note that the arity
of the function is the length of this list. May also be the atom `:any`
which corresponds to "a function of any arity".
- `return` the type of the returned value.
### Examples:
- `(... -> integer())` would be represented as `%Type.Function{params: :any, return: %Type{name: :integer}}`
- `(integer() -> integer())` would be represented as `%Type.Function{params: [%Type{name: :integer}], return: %Type{name: :integer}}`
## Inference
By default, Mavis will not attempt to perform inference on function
types.
```elixir
iex> inspect Type.of(&(&1 + 1))
"(any() -> any())"
```
If you would like to perform inference on the function to obtain
more details on the acceptable function types, set the inference
environment variable. For example, if you're using the `:mavis_inference` hex package, do:
```
Application.put_env(:mavis, :inference, Type.Inference)
```
The default module for this is `Type.NoInference`
### Key functions:
#### comparison
Functions are ordered first by the type order on their return type,
followed by type order on their parameters.
```elixir
iex> Type.compare(%Type.Function{params: [], return: %Type{name: :atom}},
...> %Type.Function{params: [], return: %Type{name: :integer}})
:gt
iex> Type.compare(%Type.Function{params: [%Type{name: :integer}], return: %Type{name: :integer}},
...> %Type.Function{params: [%Type{name: :atom}], return: %Type{name: :integer}})
:lt
```
#### intersection
Functions with distinct parameter types are nonoverlapping, even if their parameter
types overlap. If they have the same parameters, then their return values are intersected.
```elixir
iex> Type.intersection(%Type.Function{params: [], return: 1..10},
...> %Type.Function{params: [], return: %Type{name: :integer}})
%Type.Function{params: [], return: 1..10}
iex> Type.intersection(%Type.Function{params: [%Type{name: :integer}], return: %Type{name: :integer}},
...> %Type.Function{params: [1..10], return: %Type{name: :integer}})
%Type{name: :none}
```
functions with `:any` parameters intersected with a function with specified parameters
will adopt the parameters of the intersected function.
```elixir
iex> Type.intersection(%Type.Function{params: :any, return: %Type{name: :integer}},
...> %Type.Function{params: [1..10], return: %Type{name: :integer}})
%Type.Function{params: [1..10], return: %Type{name: :integer}}
```
#### union
Functions are generally not merged in union operations, but if their parameters are
identical then their return types will be merged.
```elixir
iex> Type.union(%Type.Function{params: [], return: 1..10},
...> %Type.Function{params: [], return: 11..20})
%Type.Function{params: [], return: 1..20}
```
#### subtype?
A function type is the subtype of another if it has the same parameters and its return
value type is the subtype of the other's
```elixir
iex> Type.subtype?(%Type.Function{params: [%Type{name: :integer}], return: 1..10},
...> %Type.Function{params: [%Type{name: :integer}], return: %Type{name: :integer}})
true
```
#### usable_as
The `usable_as` relationship for functions may not necessarily be obvious. An
easy way to think about it, is: if I passed a function with this type to a
function that demanded the other type how confident would I be that it would
not crash.
A function is `usable_as` another function if all of its parameters are
supertypes of the targeted function; and if its return type is subtypes of the
return type of the targeted function.
```elixir
iex> Type.usable_as(%Type.Function{params: [%Type{name: :integer}], return: 1..10},
...> %Type.Function{params: [1..10], return: %Type{name: :integer}})
:ok
iex> Type.usable_as(%Type.Function{params: [1..10], return: 1..10},
...> %Type.Function{params: [%Type{name: :integer}], return: %Type{name: :integer}})
{:maybe, [%Type.Message{type: %Type.Function{params: [1..10], return: 1..10},
target: %Type.Function{params: [%Type{name: :integer}], return: %Type{name: :integer}}}]}
iex> Type.usable_as(%Type.Function{params: [], return: %Type{name: :atom}},
...> %Type.Function{params: [], return: %Type{name: :integer}})
{:error, %Type.Message{type: %Type.Function{params: [], return: %Type{name: :atom}},
target: %Type.Function{params: [], return: %Type{name: :integer}}}}
"""
@enforce_keys [:return]
defstruct @enforce_keys ++ [params: :any, inferred: false]
@type t :: %__MODULE__{
params: [Type.t] | :any,
return: Type.t,
inferred: boolean
}
import Type, only: [builtin: 1]
def infer(fun) do
module = Application.get_env(:mavis, :inference, Type.NoInference)
module.infer(fun)
end
defimpl Type.Properties do
import Type, only: :macros
use Type.Helpers
group_compare do
def group_compare(%{params: :any, return: r1}, %{params: :any, return: r2}) do
Type.compare(r1, r2)
end
def group_compare(%{params: :any}, _), do: :gt
def group_compare(_, %{params: :any}), do: :lt
def group_compare(%{params: p1}, %{params: p2})
when length(p1) < length(p2), do: :gt
def group_compare(%{params: p1}, %{params: p2})
when length(p1) > length(p2), do: :lt
def group_compare(f1, f2) do
[f1.return | f1.params]
|> Enum.zip([f2.return | f2.params])
|> Enum.each(fn {t1, t2} ->
compare = Type.compare(t1, t2)
unless compare == :eq do
throw compare
end
end)
:eq
catch
compare when compare in [:gt, :lt] -> compare
end
end
alias Type.{Function, Message}
usable_as do
def usable_as(challenge = %{params: cparam}, target = %Function{params: tparam}, meta)
when cparam == :any or tparam == :any do
case Type.usable_as(challenge.return, target.return, meta) do
:ok -> :ok
# TODO: add meta-information here.
{:maybe, _} -> {:maybe, [Message.make(challenge, target, meta)]}
{:error, _} -> {:error, Message.make(challenge, target, meta)}
end
end
def usable_as(challenge = %{params: cparam}, target = %Function{params: tparam}, meta)
when length(cparam) == length(tparam) do
[challenge.return | tparam] # note that the target parameters and the challenge
|> Enum.zip([target.return | cparam]) # parameters are swapped here. this is important!
|> Enum.map(fn {c, t} -> Type.usable_as(c, t, meta) end)
|> Enum.reduce(&Type.ternary_and/2)
|> case do
:ok -> :ok
# TODO: add meta-information here.
{:maybe, _} -> {:maybe, [Message.make(challenge, target, meta)]}
{:error, _} -> {:error, Message.make(challenge, target, meta)}
end
end
end
intersection do
def intersection(%{params: :any, return: ret}, target = %Function{}) do
new_ret = Type.intersection(ret, target.return)
if new_ret == builtin(:none) do
builtin(:none)
else
%Function{params: target.params, return: new_ret}
end
end
def intersection(a, b = %Function{params: :any}) do
intersection(b, a)
end
def intersection(%{params: p, return: lr}, %Function{params: p, return: rr}) do
return = Type.intersection(lr, rr)
if return == builtin(:none) do
builtin(:none)
else
%Function{params: p, return: return}
end
end
end
subtype do
def subtype?(challenge, target = %Function{params: :any}) do
Type.subtype?(challenge.return, target.return)
end
def subtype?(challenge = %{params: p_c}, target = %Function{params: p_t})
when p_c == p_t do
Type.subtype?(challenge.return, target.return)
end
end
end
defimpl Inspect do
import Inspect.Algebra
def inspect(%{params: :any, return: %Type{module: nil, name: :any}}, _), do: "function()"
def inspect(%{params: :any, return: return}, opts) do
concat(basic_inspect(:any, return, opts) ++ [")"])
end
def inspect(%{params: params, return: return}, opts) do
# check if any of the params or the returns have *when* statements
# TODO: nested variables
[return | params]
|> Enum.filter(fn
%Type.Function.Var{} -> true
_ -> false
end)
|> case do
[] -> basic_inspect(params, return, opts)
free_vars ->
when_list = free_vars
|> Enum.uniq
|> Enum.map(&Inspect.inspect(&1, opts))
|> Enum.intersperse(", ")
basic_inspect(params, return, opts) ++ [" when " | when_list]
end
|> Kernel.++([")"])
|> concat
end
defp basic_inspect(params, return, opts) do
["(", render_params(params, opts), " -> ", to_doc(return, opts)]
end
defp render_params(:any, _), do: "..."
defp render_params(lst, opts) do
lst
|> Enum.map(&to_doc(&1, opts))
|> Enum.intersperse(", ")
|> concat
end
end
end
|
lib/type/function.ex
| 0.898182
| 0.970632
|
function.ex
|
starcoder
|
defmodule Flex.EngineAdapter.TakagiSugeno do
@moduledoc """
Takagi-Sugeno-Kang fuzzy inference uses singleton output membership functions that are either constant or a linear function of the input values.
The defuzzification process for a Sugeno system is more computationally efficient compared to that of a Mamdani system,
since it uses a weighted average or weighted sum of a few data points rather than compute a centroid of a two-dimensional area.
"""
alias Flex.{EngineAdapter, EngineAdapter.State, Variable}
@behaviour EngineAdapter
import Flex.Rule, only: [statement: 2, get_rule_parameters: 3]
@impl EngineAdapter
def validation(engine_state, _antecedent, _rules, _consequent),
do: engine_state
@impl EngineAdapter
def fuzzification(%State{input_vector: input_vector} = engine_state, antecedent) do
fuzzy_antecedent = EngineAdapter.default_fuzzification(input_vector, antecedent, %{})
%{engine_state | fuzzy_antecedent: fuzzy_antecedent}
end
@impl EngineAdapter
def inference(
%State{fuzzy_antecedent: fuzzy_antecedent, input_vector: input_vector} = engine_state,
rules,
consequent
) do
fuzzy_consequent =
fuzzy_antecedent
|> inference_engine(rules, consequent)
|> compute_output_level(input_vector)
%{engine_state | fuzzy_consequent: fuzzy_consequent}
end
@impl EngineAdapter
def defuzzification(%State{fuzzy_consequent: fuzzy_consequent} = engine_state) do
%{engine_state | crisp_output: weighted_average_method(fuzzy_consequent)}
end
def inference_engine(_fuzzy_antecedent, [], consequent), do: consequent
def inference_engine(fuzzy_antecedent, [rule | tail], consequent) do
rule_parameters = get_rule_parameters(rule.antecedent, fuzzy_antecedent, []) ++ [consequent]
consequent =
if is_function(rule.statement) do
rule.statement.(rule_parameters)
else
args = Map.merge(fuzzy_antecedent, %{consequent.tag => consequent})
statement(rule.statement, args)
end
inference_engine(fuzzy_antecedent, tail, consequent)
end
defp compute_output_level(cons_var, input_vector) do
rules_output =
Enum.reduce(cons_var.fuzzy_sets, [], fn output_fuzzy_set, acc ->
output_value =
for _ <- cons_var.mf_values[output_fuzzy_set.tag], into: [] do
output_fuzzy_set.mf.(input_vector)
end
acc ++ output_value
end)
%{cons_var | rule_output: rules_output}
end
@doc """
Turns an consequent fuzzy variable (output) from a fuzzy value to a crisp value (weighted average method).
"""
@spec weighted_average_method(Flex.Variable.t()) :: float
def weighted_average_method(%Variable{type: type} = fuzzy_var) when type == :consequent do
fuzzy_var
|> build_fuzzy_sets_strength_list()
|> fuzzy_to_crisp(fuzzy_var.rule_output, 0, 0)
end
defp build_fuzzy_sets_strength_list(%Variable{fuzzy_sets: fuzzy_sets, mf_values: mf_values}) do
Enum.reduce(fuzzy_sets, [], fn fuzzy_set, acc -> acc ++ mf_values[fuzzy_set.tag] end)
end
defp fuzzy_to_crisp([], _input, nom, den), do: nom / den
defp fuzzy_to_crisp([fs_strength | f_tail], [input | i_tail], nom, den) do
nom = nom + fs_strength * input
den = den + fs_strength
fuzzy_to_crisp(f_tail, i_tail, nom, den)
end
end
|
lib/engine_adapters/takagi_sugeno.ex
| 0.840029
| 0.583352
|
takagi_sugeno.ex
|
starcoder
|
defmodule Sqlitex.Server do
@moduledoc """
Sqlitex.Server provides a GenServer to wrap a sqlitedb.
This makes it easy to share a sqlite database between multiple processes without worrying about concurrency issues.
You can also register the process with a name so you can query by name later.
## Unsupervised Example
```
iex> {:ok, pid} = Sqlitex.Server.start_link(":memory:", [name: :example])
iex> Sqlitex.Server.exec(pid, "CREATE TABLE t (a INTEGER, b INTEGER)")
:ok
iex> Sqlitex.Server.exec(pid, "INSERT INTO t (a, b) VALUES (1, 1), (2, 2), (3, 3)")
:ok
iex> Sqlitex.Server.query(pid, "SELECT * FROM t WHERE b = 2")
{:ok, [[a: 2, b: 2]]}
iex> Sqlitex.Server.query(:example, "SELECT * FROM t ORDER BY a LIMIT 1", into: %{})
{:ok, [%{a: 1, b: 1}]}
iex> Sqlitex.Server.query_rows(:example, "SELECT * FROM t ORDER BY a LIMIT 2")
{:ok, %{rows: [[1, 1], [2, 2]], columns: [:a, :b], types: [:INTEGER, :INTEGER]}}
iex> Sqlitex.Server.prepare(:example, "SELECT * FROM t")
{:ok, %{columns: [:a, :b], types: [:INTEGER, :INTEGER]}}
# Subsequent queries using this exact statement will now operate more efficiently
# because this statement has been cached.
iex> Sqlitex.Server.prepare(:example, "INVALID SQL")
{:error, {:sqlite_error, 'near "INVALID": syntax error'}}
iex> Sqlitex.Server.stop(:example)
:ok
iex> :timer.sleep(10) # wait for the process to exit asynchronously
iex> Process.alive?(pid)
false
```
## Supervised Example
```
import Supervisor.Spec
children = [
worker(Sqlitex.Server, ["priv/my_db.sqlite3", [name: :my_db])
]
Supervisor.start_link(children, strategy: :one_for_one)
```
"""
use GenServer
alias Sqlitex.Statement
alias Sqlitex.Server.StatementCache, as: Cache
@doc """
Starts a SQLite Server (GenServer) instance.
In addition to the options that are typically provided to `GenServer.start_link/3`,
you can also specify `stmt_cache_size: (positive_integer)` to override the default
limit (20) of statements that are cached when calling `prepare/3`.
"""
def start_link(db_path, opts \\ []) do
stmt_cache_size = Keyword.get(opts, :stmt_cache_size, 20)
GenServer.start_link(__MODULE__, {db_path, stmt_cache_size}, opts)
end
## GenServer callbacks
def init({db_path, stmt_cache_size})
when is_integer(stmt_cache_size) and stmt_cache_size > 0
do
case Sqlitex.open(db_path) do
{:ok, db} -> {:ok, {db, __MODULE__.StatementCache.new(db, stmt_cache_size)}}
{:error, reason} -> {:stop, reason}
end
end
def handle_call({:exec, sql}, _from, {db, stmt_cache}) do
result = Sqlitex.exec(db, sql)
{:reply, result, {db, stmt_cache}}
end
def handle_call({:query, sql, opts}, _from, {db, stmt_cache}) do
case query_impl(sql, opts, stmt_cache) do
{:ok, result, new_cache} -> {:reply, {:ok, result}, {db, new_cache}}
err -> {:reply, err, {db, stmt_cache}}
end
end
def handle_call({:query_rows, sql, opts}, _from, {db, stmt_cache}) do
case query_rows_impl(sql, opts, stmt_cache) do
{:ok, result, new_cache} -> {:reply, {:ok, result}, {db, new_cache}}
err -> {:reply, err, {db, stmt_cache}}
end
end
def handle_call({:prepare, sql}, _from, {db, stmt_cache}) do
case prepare_impl(sql, stmt_cache) do
{:ok, result, new_cache} -> {:reply, {:ok, result}, {db, new_cache}}
err -> {:reply, err, {db, stmt_cache}}
end
end
def handle_call({:create_table, name, table_opts, cols}, _from, {db, stmt_cache}) do
result = Sqlitex.create_table(db, name, table_opts, cols)
{:reply, result, {db, stmt_cache}}
end
def handle_cast(:stop, {db, stmt_cache}) do
{:stop, :normal, {db, stmt_cache}}
end
def terminate(_reason, {db, _stmt_cache}) do
Sqlitex.close(db)
:ok
end
## Public API
def exec(pid, sql, opts \\ []) do
GenServer.call(pid, {:exec, sql}, timeout(opts))
end
def query(pid, sql, opts \\ []) do
GenServer.call(pid, {:query, sql, opts}, timeout(opts))
end
def query_rows(pid, sql, opts \\ []) do
GenServer.call(pid, {:query_rows, sql, opts}, timeout(opts))
end
@doc """
Prepares a SQL statement for future use.
This causes a call to [`sqlite3_prepare_v2`](https://sqlite.org/c3ref/prepare.html)
to be executed in the Server process. To protect the reference to the corresponding
[`sqlite3_stmt` struct](https://sqlite.org/c3ref/stmt.html) from misuse in other
processes, that reference is not passed back. Instead, prepared statements are
cached in the Server process. If a subsequent call to `query/3` or `query_rows/3`
is made with a matching SQL statement, the prepared statement is reused.
Prepared statements are purged from the cache when the cache exceeds a pre-set
limit (20 statements by default).
Returns summary information about the prepared statement
`{:ok, %{columns: [:column1_name, :column2_name,... ], types: [:column1_type, ...]}}`
on success or `{:error, {:reason_code, 'SQLite message'}}` if the statement
could not be prepared.
"""
def prepare(pid, sql, opts \\ []) do
GenServer.call(pid, {:prepare, sql}, timeout(opts))
end
def create_table(pid, name, table_opts \\ [], cols) do
GenServer.call(pid, {:create_table, name, table_opts, cols})
end
def stop(pid) do
GenServer.cast(pid, :stop)
end
## Helpers
defp query_impl(sql, opts, stmt_cache) do
with {%Cache{} = new_cache, stmt} <- Cache.prepare(stmt_cache, sql),
{:ok, stmt} <- Statement.bind_values(stmt, Keyword.get(opts, :bind, [])),
{:ok, rows} <- Statement.fetch_all(stmt, Keyword.get(opts, :into, [])),
do: {:ok, rows, new_cache}
end
defp query_rows_impl(sql, opts, stmt_cache) do
with {%Cache{} = new_cache, stmt} <- Cache.prepare(stmt_cache, sql),
{:ok, stmt} <- Statement.bind_values(stmt, Keyword.get(opts, :bind, [])),
{:ok, rows} <- Statement.fetch_all(stmt, :raw_list),
do: {:ok,
%{rows: rows, columns: stmt.column_names, types: stmt.column_types},
new_cache}
end
defp prepare_impl(sql, stmt_cache) do
with {%Cache{} = new_cache, stmt} <- Cache.prepare(stmt_cache, sql),
do: {:ok, %{columns: stmt.column_names, types: stmt.column_types}, new_cache}
end
defp timeout(kwopts), do: Keyword.get(kwopts, :timeout, 5000)
end
|
deps/sqlitex/lib/sqlitex/server.ex
| 0.839537
| 0.788604
|
server.ex
|
starcoder
|
defmodule Xqlite.PragmaUtil do
@moduledoc ~S"""
A module with zero dependencies on the rest of the modules in this library.
Used to reduce boilerplate and slice and dice the pragmas collection (also used in tests).
"""
@type name :: atom()
@type spec :: keyword()
@type arg_type :: :blob | :bool | :int | :list | :nothing | :real | :text
@type pragma :: {name(), spec()}
@type pragmas :: %{required(name()) => spec()}
@type filter :: (pragma() -> boolean())
defguard is_name(x) when is_atom(x)
defguard is_spec(x) when is_list(x)
defguard is_arg_type(x) when x in [:blob, :bool, :int, :list, :nothing, :real, :text]
defguard is_pragma(x) when is_tuple(x) and is_name(elem(x, 0)) and is_spec(elem(x, 1))
defguard is_pragmas(x) when is_map(x)
defguard is_filter(x) when is_function(x, 1)
@spec readable?(pragma()) :: boolean()
def readable?({_n, s} = p) when is_pragma(p), do: Keyword.has_key?(s, :r)
@spec readable_with_zero_args?(pragma()) :: boolean()
def readable_with_zero_args?({_n, s} = p) when is_pragma(p) do
s
|> Keyword.get_values(:r)
|> Enum.any?(fn x -> match?({0, _, _}, x) end)
end
@spec readable_with_one_arg?(pragma()) :: boolean()
def readable_with_one_arg?({_n, s} = p) when is_pragma(p) do
s
|> Keyword.get_values(:r)
|> Enum.any?(fn x -> match?({1, _, _, _}, x) end)
end
@spec writable?(pragma()) :: boolean()
def writable?({_n, s} = p) when is_pragma(p), do: Keyword.has_key?(s, :w)
@spec one_write_variant?(pragma()) :: boolean()
def one_write_variant?({_n, s} = p) when is_pragma(p),
do: length(Keyword.get_values(s, :w)) == 1
@spec many_write_variants?(pragma()) :: boolean()
def many_write_variants?({_n, s} = p) when is_pragma(p),
do: length(Keyword.get_values(s, :w)) > 1
@spec returns_type?(pragma(), arg_type()) :: boolean()
def returns_type?({_n, s} = p, t) when is_pragma(p) and is_arg_type(t) do
Enum.any?(s, fn
{:r, {0, _, ^t}} -> true
{:r, {1, _, _, ^t}} -> true
{:w, {_, _, ^t}} -> true
_ -> false
end)
end
def returns_bool?(p) when is_pragma(p), do: returns_type?(p, :bool)
def returns_int?(p) when is_pragma(p), do: returns_type?(p, :int)
def returns_list?(p) when is_pragma(p), do: returns_type?(p, :list)
def returns_text?(p) when is_pragma(p), do: returns_type?(p, :text)
def returns_nothing?(p) when is_pragma(p), do: returns_type?(p, :nothing)
@spec of_type(pragmas(), arg_type()) :: [name()]
def of_type(m, t) when is_pragmas(m) and is_arg_type(t) do
filter(m, fn p -> returns_type?(p, t) end)
end
@spec filter(pragmas(), filter()) :: [name()]
def filter(m, f1) when is_pragmas(m) and is_filter(f1) do
m
|> Stream.filter(fn p -> f1.(p) end)
|> Stream.map(fn {n, _s} -> n end)
|> Enum.sort()
end
@spec filter(pragmas(), filter(), filter()) :: [name()]
def filter(m, f1, f2) when is_pragmas(m) and is_filter(f1) and is_filter(f2) do
filter(m, fn p -> f1.(p) && f2.(p) end)
end
end
|
lib/xqlite/pragma_util.ex
| 0.753648
| 0.563678
|
pragma_util.ex
|
starcoder
|
defmodule AdaptableCostsEvaluator.Formulas do
@moduledoc """
The Formulas context.
"""
import Ecto.Query, warn: false
alias AdaptableCostsEvaluator.Repo
alias AdaptableCostsEvaluator.Outputs
alias AdaptableCostsEvaluator.Formulas.Formula
alias AdaptableCostsEvaluator.Computations.Computation
@doc """
Returns the list of formulas in the computation.
## Examples
iex> list_formulas(computation)
[%Formula{}, ...]
"""
def list_formulas(%Computation{} = computation) do
Repo.preload(computation, :formulas).formulas
end
@doc """
Gets a single formula from the computation. If the computation is omitted, it
gets the formula only by the ID.
Raises `Ecto.NoResultsError` if the Formula does not exist.
## Examples
iex> get_formula!(123, computation)
%Formula{}
iex> get_formula!(456, computation)
** (Ecto.NoResultsError)
"""
def get_formula!(id, computation \\ nil) do
if computation == nil do
Repo.get!(Formula, id)
else
Repo.get_by!(Formula, id: id, computation_id: computation.id)
end
end
@doc """
Creates a formula.
## Examples
iex> create_formula(%{field: value})
{:ok, %Formula{}}
iex> create_formula(%{field: bad_value})
{:error, %Ecto.Changeset{}}
"""
def create_formula(attrs \\ %{}) do
%Formula{}
|> change_formula(attrs)
|> Repo.insert()
end
@doc """
Updates a formula.
## Examples
iex> update_formula(formula, %{field: new_value})
{:ok, %Formula{}}
iex> update_formula(formula, %{field: bad_value})
{:error, %Ecto.Changeset{}}
"""
def update_formula(%Formula{} = formula, attrs) do
formula
|> change_formula(attrs)
|> Repo.update()
end
@doc """
Deletes a formula.
## Examples
iex> delete_formula(formula)
{:ok, %Formula{}}
iex> delete_formula(formula)
{:error, %Ecto.Changeset{}}
"""
def delete_formula(%Formula{} = formula) do
Repo.delete(formula)
end
@doc """
Returns an `%Ecto.Changeset{}` for tracking formula changes.
## Examples
iex> change_formula(formula)
%Ecto.Changeset{data: %Formula{}}
"""
def change_formula(%Formula{} = formula, attrs \\ %{}) do
Formula.changeset(formula, attrs)
end
@doc """
Runs the evaluation of the formula.
It evaluates the formula using the linked evaluator. Returns a map with the
result and affected outputs where the `last_value` attribute has been updated.
"""
@spec evaluate_formula(%AdaptableCostsEvaluator.Formulas.Formula{}) ::
{:error, {:unprocessable_entity, [...]}}
| {:ok, %{outputs: list, result: any}}
def evaluate_formula(%Formula{evaluator_id: nil}) do
{:error, "evaluator not specified"}
end
def evaluate_formula(%Formula{} = formula) do
evaluator = Repo.preload(formula, :evaluator).evaluator
result = apply(String.to_existing_atom("Elixir.#{evaluator.module}"), :evaluate, [formula])
case result do
{:ok, value} ->
attrs = %{
outputs: apply_result_to_outputs(formula, value),
result: value
}
{:ok, attrs}
{:error, error} ->
{:error, {:unprocessable_entity, [error]}}
end
end
defp apply_result_to_outputs(%Formula{} = formula, result) do
Repo.preload(formula, :outputs).outputs
|> Enum.map(fn o ->
case Outputs.update_output(o, %{last_value: result}) do
{:ok, output} -> output
{:error, _} -> nil
end
end)
|> Enum.filter(fn o -> o != nil end)
end
end
|
lib/adaptable_costs_evaluator/formulas.ex
| 0.906234
| 0.514644
|
formulas.ex
|
starcoder
|
defmodule Monad.Writer do
@moduledoc """
ML / Ocaml style functor
"""
defmacro functor(opts) do
module_name = Keyword.fetch!(opts, :module)
mempty = Keyword.fetch!(opts, :mempty)
mappend = Keyword.fetch!(opts, :mappend)
quote location: :keep do
defmodule unquote(module_name) do
@enforce_keys [:run_writer]
defstruct [:run_writer]
defmacro mappend(x, y) do
put_elem(unquote(Macro.escape(mappend)), 2, [x, y])
#|> case do x -> IO.puts(Macro.to_string(x)) ; x end
end
# (w, a) -> Writer log a
def new({_, _} = t), do: %__MODULE__{run_writer: t}
# a -> Writer log a
def pure(x), do: %__MODULE__{run_writer: {unquote(mempty), x}}
# log -> Writer log ()
def tell(x), do: %__MODULE__{run_writer: {x, {}}}
# Writer log a -> (a -> b) -> Writer log b
def map(ma, f) do
{logs, x} = ma.run_writer
new({logs, f.(x)})
end
def ap(mf, ma) do
{log1, f} = mf.run_writer
{log2, x} = ma.run_writer
new({mappend(log1, log2), f.(x)})
end
def lift_a(mf, ma) do
{log1, f} = mf.run_writer
{log2, x} = ma.run_writer
new({mappend(log1, log2), f.(x)})
end
def lift_a2(mf, ma, mb) do
{log1, f} = mf.run_writer
{log2, x} = ma.run_writer
{log3, y} = mb.run_writer
new({mappend(log1, mappend(log2, log3)), f.(x, y)})
end
def lift_a3(mf, ma, mb, mc) do
{log1, f} = mf.run_writer
{log2, x} = ma.run_writer
{log3, y} = mb.run_writer
{log4, z} = mc.run_writer
new({mappend(log1, mappend(log2, mappend(log3, log4))), f.(x, y, z)})
end
def join(mma) do
{log_out, ma} = mma.run_writer
{log_in, x} = ma.run_writer
new({mappend(log_out, log_in), x})
end
def bind(ma, f) do
{log_out, x} = ma.run_writer
{log_in, y} = f.(x).run_writer
new({mappend(log_out, log_in), y})
end
defimpl Inspect, for: unquote(module_name) do
def inspect(%unquote(module_name){run_writer: {logs, x}}, opts), do: Inspect.Algebra.concat(["##{unquote(module_name)}(", Inspect.Algebra.to_doc(logs, opts), ", ", Inspect.Algebra.to_doc(x, opts), ")"])
end
defmacro m(do: {:__block__, _context, body}) do
rec_mdo(body)
end
def rec_mdo([{:<-, context, _}]) do
raise "Error line #{Keyword.get(context, :line, :unknown)}: end of monadic do should be a monadic value"
end
def rec_mdo([line]) do
line
end
def rec_mdo([{:<-, _context, [binding, expression]} | tail]) do
module_name = unquote(module_name)
quote location: :keep do
case unquote(expression) do
%unquote(module_name){run_writer: {log7, unquote(binding)}} ->
case unquote(rec_mdo(tail)) do
%unquote(module_name){run_writer: {log8, a8}} ->
new({mappend(log7, log8), a8})
end
end
end
end
def rec_mdo([{:=, _context, [_binding, _expression]} = line | tail]) do
quote location: :keep do
unquote(line)
unquote(rec_mdo(tail))
end
end
def rec_mdo([expression | tail]) do
module_name = unquote(module_name)
quote location: :keep do
case unquote(expression) do
%unquote(module_name){run_writer: {log7, a7}} ->
case unquote(rec_mdo(tail)) do
%unquote(module_name){run_writer: {log8, a8}} ->
new({mappend(log7, log8), a8})
end
end
end
#|> case do x -> IO.puts(Macro.to_string(x)) ; x end
end
end
end
|> case do x ->
case Keyword.get(opts, :debug, :none) do
:none -> :ok
:functor ->
IO.puts(Macro.to_string(x))
end
x
end
end
end
|
writer_monad_from_first_principle/lib/monad.writer.ex
| 0.628749
| 0.472744
|
monad.writer.ex
|
starcoder
|
defmodule Phoenix.Tracker do
@moduledoc ~S"""
Provides distributed Presence tracking to processes.
Tracker servers use a heartbeat protocol and CRDT to replicate presence
information across a cluster in an eventually consistent, conflict-free
manner. Under this design, there is no single source of truth or global
process. Instead, each node runs one or more `Phoenix.Tracker` servers and
node-local changes are replicated across the cluster and handled locally as
a diff of changes.
* `tracker` - The name of the tracker handler module implementing the
`Phoenix.Tracker` behaviour
* `tracker_opts` - The list of options to pass to the tracker handler
* `server_opts` - The list of options to pass to the tracker server
## Required `server_opts`:
* `:name` - The name of the server, such as: `MyApp.Tracker`
* `:pubsub_server` - The name of the PubSub server, such as: `MyApp.PubSub`
## Optional `server_opts`:
* `broadcast_period` - The interval in milliseconds to send delta broadcats
across the cluster. Default `1500`
* `max_silent_periods` - The max integer of broadcast periods for which no
delta broadcasts have been sent. Defaults `10` (15s heartbeat)
* `down_period` - The interval in milliseconds to flag a replica
as down temporarily down. Default `broadcast_period * max_silent_periods * 2`
(30s down detection). Note: This must be at least 2x the `broadcast_period`.
* `permdown_period` - The interval in milliseconds to flag a replica
as permanently down, and discard its state.
Note: This must be at least greater than the `down_period`.
Default `1_200_000` (20 minutes)
* `clock_sample_periods` - The numbers of heartbeat windows to sample
remote clocks before collapsing and requesting transfer. Default `2`
* `max_delta_sizes` - The list of delta generation sizes to keep before
falling back to sending entire state. Defaults `[100, 1000, 10_000]`.
* log_level - The log level to log events, defaults `:debug` and can be
disabled with `false`
## Implementing a Tracker
To start a tracker, first add the tracker to your supervision tree:
worker(MyTracker, [[name: MyTracker, pubsub_server: MyPubSub]])
Next, implement `MyTracker` with support for the `Phoenix.Tracker`
behaviour callbacks. An example of a minimal tracker could include:
defmodule MyTracker do
@behaviour Phoenix.Tracker
def start_link(opts) do
opts = Keyword.merge([name: __MODULE__], opts)
GenServer.start_link(Phoenix.Tracker, [__MODULE__, opts, opts], name: __MODULE__)
end
def init(opts) do
server = Keyword.fetch!(opts, :pubsub_server)
{:ok, %{pubsub_server: server, node_name: Phoenix.PubSub.node_name(server)}}
end
def handle_diff(diff, state) do
for {topic, {joins, leaves}} <- diff do
for {key, meta} <- joins do
IO.puts "presence join: key \"#{key}\" with meta #{inspect meta}"
msg = {:join, key, meta}
Phoenix.PubSub.direct_broadcast!(state.node_name, state.pubsub_server, topic, msg)
end
for {key, meta} <- leaves do
IO.puts "presence leave: key \"#{key}\" with meta #{inspect meta}"
msg = {:leave, key, meta}
Phoenix.PubSub.direct_broadcast!(state.node_name, state.pubsub_server, topic, msg)
end
end
{:ok, state}
end
end
Trackers must implement `start_link/1`, `init/1`, and `handle_diff/2`.
The `init/1` callback allows the tracker to manage its own state when
running within the `Phoenix.Tracker` server. The `handle_diff` callback
is invoked with a diff of presence join and leave events, grouped by
topic. As replicas heartbeat and replicate data, the local tracker state is
merged with the remote data, and the diff is sent to the callback. The
handler can use this information to notify subscribers of events, as
done above.
## Special Considerations
Operations within `handle_diff/2` happen *in the tracker server's context*.
Therefore, blocking operations should be avoided when possible, and offloaded
to a supervised task when required. Also, a crash in the `handle_diff/2` will
crash the tracker server, so operations that may crash the server should be
offloaded with a `Task.Supervisor` spawned process.
"""
use GenServer
alias Phoenix.Tracker.{Clock, State, Replica, DeltaGeneration}
require Logger
@type presence :: {key :: String.t, meta :: Map.t}
@type topic :: String.t
@callback init(Keyword.t) :: {:ok, pid} | {:error, reason :: term}
@callback handle_diff(%{topic => {joins :: [presence], leaves :: [presence]}}, state :: term) :: {:ok, state :: term}
## Client
@doc """
Tracks a presence.
* `server_name` - The registered name of the tracker server
* `pid` - The Pid to track
* `topic` - The `Phoenix.PubSub` topic for this presence
* `key` - The key identifying this presence
* `meta` - The map of metadata to attach to this presence
## Examples
iex> Phoenix.Tracker.track(MyTracker, self, "lobby", u.id, %{stat: "away"})
{:ok, "1WpAofWYIAA="}
"""
@spec track(atom, pid, topic, term, Map.t) :: {:ok, ref :: binary} | {:error, reason :: term}
def track(server_name, pid, topic, key, meta) when is_pid(pid) and is_map(meta) do
GenServer.call(server_name, {:track, pid, topic, key, meta})
end
@doc """
Untracks a presence.
* `server_name` - The registered name of the tracker server
* `pid` - The Pid to untrack
* `topic` - The `Phoenix.PubSub` topic to untrack for this presence
* `key` - The key identifying this presence
All presences for a given Pid can be untracked by calling the
`Phoenix.Tracker.track/2` signature of this function.
## Examples
iex> Phoenix.Tracker.untrack(MyTracker, self, "lobby", u.id)
:ok
iex> Phoenix.Tracker.untrack(MyTracker, self)
:ok
"""
@spec untrack(atom, pid, topic, term) :: :ok
def untrack(server_name, pid, topic, key) when is_pid(pid) do
GenServer.call(server_name, {:untrack, pid, topic, key})
end
def untrack(server_name, pid) when is_pid(pid) do
GenServer.call(server_name, {:untrack, pid})
end
@doc """
Updates a presence's metadata.
* `server_name` - The registered name of the tracker server
* `pid` - The Pid being tracked
* `topic` - The `Phoenix.PubSub` topic to update for this presence
* `key` - The key identifying this presence
All presences for a given Pid can be untracked by calling the
`Phoenix.Tracker.track/2` signature of this function.
## Examples
iex> Phoenix.Tracker.update(MyTracker, self, "lobby", u.id, %{stat: "zzz"})
{:ok, "1WpAofWYIAA="}
"""
@spec update(atom, pid, topic, term, Map.t) :: {:ok, ref :: binary} | {:error, reason :: term}
def update(server_name, pid, topic, key, meta) when is_pid(pid) and is_map(meta) do
GenServer.call(server_name, {:update, pid, topic, key, meta})
end
@doc """
Lists all presences tracked under a given topic.
* `server_name` - The registered name of the tracker server
* `topic` - The `Phoenix.PubSub` topic to update for this presence
Returns a lists of presences in key/metadata tuple pairs.
## Examples
iex> Phoenix.Tracker.list(MyTracker, "lobby")
[{123, %{name: "user 123"}}, {456, %{name: "user 456"}}]
"""
@spec list(atom, topic) :: [presence]
def list(server_name, topic) do
# TODO avoid extra map (ideally crdt does an ets select only returning {key, meta})
server_name
|> GenServer.call({:list, topic})
|> State.get_by_topic(topic)
|> Enum.map(fn {{_topic, _pid, key}, meta, _tag} -> {key, meta} end)
end
@doc """
Gracefully shuts down by broadcasting permdown to all replicas.
## Examples
iex> Phoenix.Tracker.graceful_permdown(MyTracker)
:ok
"""
@spec graceful_permdown(atom) :: :ok
def graceful_permdown(server_name) do
GenServer.call(server_name, :graceful_permdown)
end
## Server
def start_link(tracker, tracker_opts, server_opts) do
name = Keyword.fetch!(server_opts, :name)
GenServer.start_link(__MODULE__, [tracker, tracker_opts, server_opts], name: name)
end
def init([tracker, tracker_opts, opts]) do
Process.flag(:trap_exit, true)
pubsub_server = Keyword.fetch!(opts, :pubsub_server)
server_name = Keyword.fetch!(opts, :name)
broadcast_period = opts[:broadcast_period] || 1500
max_silent_periods = opts[:max_silent_periods] || 10
down_period = opts[:down_period] || (broadcast_period * max_silent_periods * 2)
permdown_period = opts[:permdown_period] || 1_200_000
clock_sample_periods = opts[:clock_sample_periods] || 2
log_level = Keyword.get(opts, :log_level, false)
with :ok <- validate_down_period(down_period, broadcast_period),
:ok <- validate_permdown_period(permdown_period, down_period),
{:ok, tracker_state} <- tracker.init(tracker_opts) do
node_name = Phoenix.PubSub.node_name(pubsub_server)
namespaced_topic = namespaced_topic(server_name)
replica = Replica.new(node_name)
subscribe(pubsub_server, namespaced_topic)
send_stuttered_heartbeat(self(), broadcast_period)
{:ok, %{server_name: server_name,
pubsub_server: pubsub_server,
tracker: tracker,
tracker_state: tracker_state,
replica: replica,
namespaced_topic: namespaced_topic,
log_level: log_level,
replicas: %{},
pending_clockset: [],
presences: State.new(Replica.ref(replica)),
broadcast_period: broadcast_period,
max_silent_periods: max_silent_periods,
silent_periods: max_silent_periods,
down_period: down_period,
permdown_period: permdown_period,
clock_sample_periods: clock_sample_periods,
deltas: [],
max_delta_sizes: [100, 1000, 10_000],
current_sample_count: clock_sample_periods}}
end
end
def validate_down_period(d_period, b_period) when d_period < (2 * b_period) do
{:error, "down_period must be at least twice as large as the broadcast_period"}
end
def validate_down_period(_d_period, _b_period), do: :ok
def validate_permdown_period(p_period, d_period) when p_period <= d_period do
{:error, "permdown_period must be at least larger than the down_period"}
end
def validate_permdown_period(_p_period, _d_period), do: :ok
defp send_stuttered_heartbeat(pid, interval) do
Process.send_after(pid, :heartbeat, Enum.random(0..trunc(interval * 0.25)))
end
def handle_info(:heartbeat, state) do
{:noreply, state
|> broadcast_delta_heartbeat()
|> request_transfer_from_replicas_needing_synced()
|> detect_downs()
|> schedule_next_heartbeat()}
end
def handle_info({:pub, :heartbeat, {name, vsn}, :empty, clocks}, state) do
{:noreply, state
|> put_pending_clock(clocks)
|> handle_heartbeat({name, vsn})}
end
def handle_info({:pub, :heartbeat, {name, vsn}, delta, clocks}, state) do
{presences, joined, left} = State.merge(state.presences, delta)
{:noreply, state
|> report_diff(joined, left)
|> put_presences(presences)
|> put_pending_clock(clocks)
|> push_delta_generation(delta)
|> handle_heartbeat({name, vsn})}
end
def handle_info({:pub, :transfer_req, ref, {name, _vsn}, {_, clocks}}, state) do
log state, fn -> "#{state.replica.name}: transfer_req from #{inspect name}" end
delta = DeltaGeneration.extract(state.presences, state.deltas, clocks)
msg = {:pub, :transfer_ack, ref, Replica.ref(state.replica), delta}
direct_broadcast(state, name, msg)
{:noreply, state}
end
def handle_info({:pub, :transfer_ack, _ref, {name, _vsn}, remote_presences}, state) do
log(state, fn -> "#{state.replica.name}: transfer_ack from #{inspect name}" end)
{presences, joined, left} = State.merge(state.presences, remote_presences)
{:noreply, state
|> report_diff(joined, left)
|> push_delta_generation(remote_presences)
|> put_presences(presences)}
end
def handle_info({:pub, :graceful_permdown, {_name, _vsn} = ref}, state) do
case Replica.fetch_by_ref(state.replicas, ref) do
{:ok, replica} -> {:noreply, state |> down(replica) |> permdown(replica)}
:error -> {:noreply, state}
end
end
def handle_info({:EXIT, pid, _reason}, state) do
{:noreply, drop_presence(state, pid)}
end
def handle_call({:track, pid, topic, key, meta}, _from, state) do
{state, ref} = put_presence(state, pid, topic, key, meta)
{:reply, {:ok, ref}, state}
end
def handle_call({:untrack, pid, topic, key}, _from, state) do
new_state = drop_presence(state, pid, topic, key)
if State.get_by_pid(new_state.presences, pid) == [] do
Process.unlink(pid)
end
{:reply, :ok, new_state}
end
def handle_call({:untrack, pid}, _from, state) do
Process.unlink(pid)
{:reply, :ok, drop_presence(state, pid)}
end
def handle_call({:update, pid, topic, key, new_meta}, _from, state) do
case State.get_by_pid(state.presences, pid, topic, key) do
nil ->
{:reply, {:error, :nopresence}, state}
{{_topic, _pid, ^key}, prev_meta, {_replica, _}} ->
{state, ref} = put_update(state, pid, topic, key, new_meta, prev_meta)
{:reply, {:ok, ref}, state}
end
end
def handle_call(:graceful_permdown, _from, state) do
broadcast_from(state, self(), {:pub, :graceful_permdown, Replica.ref(state.replica)})
{:stop, :normal, :ok, state}
end
def handle_call({:list, _topic}, _from, state) do
{:reply, state.presences, state}
end
def handle_call(:replicas, _from, state) do
{:reply, state.replicas, state}
end
def handle_call(:unsubscribe, _from, state) do
Phoenix.PubSub.unsubscribe(state.pubsub_server, state.namespaced_topic)
{:reply, :ok, state}
end
def handle_call(:resubscribe, _from, state) do
subscribe(state.pubsub_server, state.namespaced_topic)
{:reply, :ok, state}
end
defp subscribe(pubsub_server, namespaced_topic) do
Phoenix.PubSub.subscribe(pubsub_server, namespaced_topic, link: true)
end
defp put_update(state, pid, topic, key, meta, %{phx_ref: ref} = prev_meta) do
state
|> put_presences(State.leave(state.presences, pid, topic, key))
|> put_presence(pid, topic, key, Map.put(meta, :phx_ref_prev, ref), prev_meta)
end
defp put_presence(state, pid, topic, key, meta, prev_meta \\ nil) do
Process.link(pid)
ref = random_ref()
meta = Map.put(meta, :phx_ref, ref)
new_state =
state
|> report_diff_join(topic, key, meta, prev_meta)
|> put_presences(State.join(state.presences, pid, topic, key, meta))
{new_state, ref}
end
defp put_presences(state, %State{} = presences), do: %{state | presences: presences}
defp drop_presence(state, pid, topic, key) do
if leave = State.get_by_pid(state.presences, pid, topic, key) do
state
|> report_diff([], [leave])
|> put_presences(State.leave(state.presences, pid, topic, key))
else
state
end
end
defp drop_presence(state, pid) do
leaves = State.get_by_pid(state.presences, pid)
state
|> report_diff([], leaves)
|> put_presences(State.leave(state.presences, pid))
end
defp handle_heartbeat(state, {name, vsn}) do
case Replica.put_heartbeat(state.replicas, {name, vsn}) do
{replicas, nil, %Replica{status: :up} = upped} ->
up(%{state | replicas: replicas}, upped)
{replicas, %Replica{vsn: ^vsn, status: :up}, %Replica{vsn: ^vsn, status: :up}} ->
%{state | replicas: replicas}
{replicas, %Replica{vsn: ^vsn, status: :down}, %Replica{vsn: ^vsn, status: :up} = upped} ->
up(%{state | replicas: replicas}, upped)
{replicas, %Replica{vsn: old, status: :up} = downed, %Replica{vsn: ^vsn, status: :up} = upped} when old != vsn ->
%{state | replicas: replicas} |> down(downed) |> permdown(downed) |> up(upped)
{replicas, %Replica{vsn: old, status: :down} = downed, %Replica{vsn: ^vsn, status: :up} = upped} when old != vsn ->
%{state | replicas: replicas} |> permdown(downed) |> up(upped)
end
end
defp request_transfer_from_replicas_needing_synced(%{current_sample_count: 1} = state) do
needs_synced = clockset_to_sync(state)
for replica <- needs_synced, do: request_transfer(state, replica)
%{state | pending_clockset: [], current_sample_count: state.clock_sample_periods}
end
defp request_transfer_from_replicas_needing_synced(state) do
%{state | current_sample_count: state.current_sample_count - 1}
end
defp request_transfer(state, {name, _vsn}) do
log state, fn -> "#{state.replica.name}: transfer_req from #{name}" end
ref = make_ref()
msg = {:pub, :transfer_req, ref, Replica.ref(state.replica), clock(state)}
direct_broadcast(state, name, msg)
end
defp detect_downs(%{permdown_period: perm_int, down_period: temp_int} = state) do
Enum.reduce(state.replicas, state, fn {_name, replica}, acc ->
case Replica.detect_down(acc.replicas, replica, temp_int, perm_int) do
{replicas, %Replica{status: :up}, %Replica{status: :permdown} = down_rep} ->
%{acc | replicas: replicas} |> down(down_rep) |> permdown(down_rep)
{replicas, %Replica{status: :down}, %Replica{status: :permdown} = down_rep} ->
permdown(%{acc | replicas: replicas}, down_rep)
{replicas, %Replica{status: :up}, %Replica{status: :down} = down_rep} ->
down(%{acc | replicas: replicas}, down_rep)
{replicas, %Replica{status: unchanged}, %Replica{status: unchanged}} ->
%{acc | replicas: replicas}
end
end)
end
defp schedule_next_heartbeat(state) do
Process.send_after(self(), :heartbeat, state.broadcast_period)
state
end
defp clock(state), do: State.clocks(state.presences)
@spec clockset_to_sync(%{pending_clockset: [State.replica_context]}) :: [State.replica_name]
defp clockset_to_sync(state) do
my_ref = Replica.ref(state.replica)
state.pending_clockset
|> Clock.append_clock(clock(state))
|> Clock.clockset_replicas()
|> Enum.filter(fn ref -> ref != my_ref end)
end
defp put_pending_clock(state, clocks) do
%{state | pending_clockset: Clock.append_clock(state.pending_clockset, clocks)}
end
defp up(state, remote_replica) do
log state, fn -> "#{state.replica.name}: replica up from #{inspect remote_replica.name}" end
{presences, joined, []} = State.replica_up(state.presences, Replica.ref(remote_replica))
state
|> report_diff(joined, [])
|> put_presences(presences)
end
defp down(state, remote_replica) do
log state, fn -> "#{state.replica.name}: replica down from #{inspect remote_replica.name}" end
{presences, [], left} = State.replica_down(state.presences, Replica.ref(remote_replica))
state
|> report_diff([], left)
|> put_presences(presences)
end
defp permdown(state, %Replica{name: name} = remote_replica) do
log state, fn -> "#{state.replica.name}: permanent replica down detected #{name}" end
presences = State.remove_down_replicas(state.presences, Replica.ref(remote_replica))
case Replica.fetch_by_ref(state.replicas, Replica.ref(remote_replica)) do
{:ok, _replica} ->
%{state | presences: presences, replicas: Map.delete(state.replicas, name)}
_ ->
%{state | presences: presences}
end
end
defp namespaced_topic(server_name) do
"phx_presence:#{server_name}"
end
defp broadcast_from(state, from, msg) do
Phoenix.PubSub.broadcast_from!(state.pubsub_server, from, state.namespaced_topic, msg)
end
defp direct_broadcast(state, target_node, msg) do
Phoenix.PubSub.direct_broadcast!(target_node, state.pubsub_server, state.namespaced_topic, msg)
end
defp broadcast_delta_heartbeat(%{presences: presences} = state) do
cond do
State.has_delta?(presences) ->
delta = presences.delta
new_presences = presences |> State.reset_delta() |> State.compact()
broadcast_from(state, self(), {:pub, :heartbeat, Replica.ref(state.replica), delta, clock(state)})
%{state | presences: new_presences, silent_periods: 0}
|> push_delta_generation(delta)
state.silent_periods >= state.max_silent_periods ->
broadcast_from(state, self(), {:pub, :heartbeat, Replica.ref(state.replica), :empty, clock(state)})
%{state | silent_periods: 0}
true -> update_in(state.silent_periods, &(&1 + 1))
end
end
defp report_diff(state, [], []), do: state
defp report_diff(state, joined, left) do
join_diff = Enum.reduce(joined, %{}, fn {{topic, _pid, key}, meta, _}, acc ->
Map.update(acc, topic, {[{key, meta}], []}, fn {joins, leaves} ->
{[{key, meta} | joins], leaves}
end)
end)
full_diff = Enum.reduce(left, join_diff, fn {{topic, _pid, key}, meta, _}, acc ->
Map.update(acc, topic, {[], [{key, meta}]}, fn {joins, leaves} ->
{joins, [{key, meta} | leaves]}
end)
end)
full_diff
|> state.tracker.handle_diff(state.tracker_state)
|> handle_tracker_result(state)
end
defp report_diff_join(state, topic, key, meta, nil = _prev_meta) do
%{topic => {[{key, meta}], []}}
|> state.tracker.handle_diff(state.tracker_state)
|> handle_tracker_result(state)
end
defp report_diff_join(state, topic, key, meta, prev_meta) do
%{topic => {[{key, meta}], [{key, prev_meta}]}}
|> state.tracker.handle_diff(state.tracker_state)
|> handle_tracker_result(state)
end
defp handle_tracker_result({:ok, tracker_state}, state) do
%{state | tracker_state: tracker_state}
end
defp handle_tracker_result(other, state) do
raise ArgumentError, """
expected #{state.tracker} to return {:ok, state}, but got:
#{inspect other}
"""
end
defp push_delta_generation(state, {%State{mode: :normal}, _}) do
%{state | deltas: []}
end
defp push_delta_generation(%{deltas: deltas} = state, %State{mode: :delta} = delta) do
new_deltas = DeltaGeneration.push(state.presences, deltas, delta, state.max_delta_sizes)
%{state | deltas: new_deltas}
end
defp random_ref() do
:crypto.strong_rand_bytes(8) |> Base.encode64()
end
defp log(%{log_level: false}, _msg_func), do: :ok
defp log(%{log_level: level}, msg), do: Logger.log(level, msg)
end
|
deps/phoenix_pubsub/lib/phoenix/tracker.ex
| 0.920994
| 0.65062
|
tracker.ex
|
starcoder
|
defmodule Bubblit.BubbleRooms do
@moduledoc """
The BubbleRooms context.
"""
import Ecto.Query, warn: false
alias Bubblit.Repo
alias Bubblit.Accounts.User
alias Bubblit.BubbleRooms.BubbleLog
@doc """
Returns the list of bubble_logs.
## Examples
iex> list_bubble_logs()
[%BubbleLog{}, ...]
"""
def list_bubble_logs do
Repo.all(BubbleLog)
end
def list_bubble_logs(room_id) do
query =
from l in BubbleLog,
preload: [:user],
join: u in User,
on: u.id == l.user_id,
where: l.room_id == ^room_id,
select: {l, u}
Repo.all(query)
end
@doc """
Gets a single bubble_log.
Raises `Ecto.NoResultsError` if the Bubble log does not exist.
## Examples
iex> get_bubble_log!(123)
%BubbleLog{}
iex> get_bubble_log!(456)
** (Ecto.NoResultsError)
"""
def get_bubble_log!(id), do: Repo.get!(BubbleLog, id)
@doc """
Creates a bubble_log.
## Examples
iex> create_bubble_log(%{field: value})
{:ok, %BubbleLog{}}
iex> create_bubble_log(%{field: bad_value})
{:error, %Ecto.Changeset{}}
"""
def create_bubble_log(attrs \\ %{}) do
%BubbleLog{}
|> BubbleLog.changeset(attrs)
|> Repo.insert()
end
@doc """
Updates a bubble_log.
## Examples
iex> update_bubble_log(bubble_log, %{field: new_value})
{:ok, %BubbleLog{}}
iex> update_bubble_log(bubble_log, %{field: bad_value})
{:error, %Ecto.Changeset{}}
"""
def update_bubble_log(%BubbleLog{} = bubble_log, attrs) do
bubble_log
|> BubbleLog.changeset(attrs)
|> Repo.update()
end
@doc """
Deletes a bubble_log.
## Examples
iex> delete_bubble_log(bubble_log)
{:ok, %BubbleLog{}}
iex> delete_bubble_log(bubble_log)
{:error, %Ecto.Changeset{}}
"""
def delete_bubble_log(%BubbleLog{} = bubble_log) do
Repo.delete(bubble_log)
end
@doc """
Returns an `%Ecto.Changeset{}` for tracking bubble_log changes.
## Examples
iex> change_bubble_log(bubble_log)
%Ecto.Changeset{source: %BubbleLog{}}
"""
def change_bubble_log(%BubbleLog{} = bubble_log) do
BubbleLog.changeset(bubble_log, %{})
end
alias Bubblit.BubbleRooms.Room
@doc """
Returns the list of rooms.
## Examples
iex> list_rooms()
[%Room{}, ...]
"""
def list_rooms do
Repo.all(Room)
end
@doc """
Gets a single room.
Raises `Ecto.NoResultsError` if the Room does not exist.
## Examples
iex> get_room!(123)
%Room{}
iex> get_room!(456)
** (Ecto.NoResultsError)
"""
def get_room!(id), do: Repo.get!(Room, id)
def get_room(id), do: Repo.get(Room, id)
@doc """
Creates a room.
## Examples
iex> create_room(%{field: value})
{:ok, %Room{}}
iex> create_room(%{field: bad_value})
{:error, %Ecto.Changeset{}}
"""
def create_room(attrs \\ %{}) do
%Room{}
|> Room.changeset(attrs)
|> Repo.insert()
end
@doc """
Updates a room.
## Examples
iex> update_room(room, %{field: new_value})
{:ok, %Room{}}
iex> update_room(room, %{field: bad_value})
{:error, %Ecto.Changeset{}}
"""
def update_room(%Room{} = room, attrs) do
room
|> Room.changeset(attrs)
|> Repo.update()
end
@doc """
Deletes a room.
## Examples
iex> delete_room(room)
{:ok, %Room{}}
iex> delete_room(room)
{:error, %Ecto.Changeset{}}
"""
def delete_room(%Room{} = room) do
Repo.delete(room)
end
@doc """
Returns an `%Ecto.Changeset{}` for tracking room changes.
## Examples
iex> change_room(room)
%Ecto.Changeset{source: %Room{}}
"""
def change_room(%Room{} = room, attrs) do
Room.changeset(room, attrs)
end
alias Bubblit.BubbleRooms.RoomAction
@doc """
Returns the list of room_actions.
## Examples
iex> list_room_actions()
[%RoomAction{}, ...]
"""
def list_room_actions do
Repo.all(RoomAction)
end
def list_room_actions(room_id) do
query =
from l in RoomAction,
# preload: [:user],
# join: u in User,
# on: u.id == l.user_id,
where: l.room_id == ^room_id,
select: l
Repo.all(query)
end
@doc """
Gets a single room_action.
Raises `Ecto.NoResultsError` if the Room action does not exist.
## Examples
iex> get_room_action!(123)
%RoomAction{}
iex> get_room_action!(456)
** (Ecto.NoResultsError)
"""
def get_room_action!(id), do: Repo.get!(RoomAction, id)
@doc """
Creates a room_action.
## Examples
iex> create_room_action(%{field: value})
{:ok, %RoomAction{}}
iex> create_room_action(%{field: bad_value})
{:error, %Ecto.Changeset{}}
"""
def create_room_action(attrs \\ %{}) do
%RoomAction{}
|> RoomAction.changeset(attrs)
|> Repo.insert()
end
@doc """
Updates a room_action.
## Examples
iex> update_room_action(room_action, %{field: new_value})
{:ok, %RoomAction{}}
iex> update_room_action(room_action, %{field: bad_value})
{:error, %Ecto.Changeset{}}
"""
def update_room_action(%RoomAction{} = room_action, attrs) do
room_action
|> RoomAction.changeset(attrs)
|> Repo.update()
end
@doc """
Deletes a room_action.
## Examples
iex> delete_room_action(room_action)
{:ok, %RoomAction{}}
iex> delete_room_action(room_action)
{:error, %Ecto.Changeset{}}
"""
def delete_room_action(%RoomAction{} = room_action) do
Repo.delete(room_action)
end
@doc """
Returns an `%Ecto.Changeset{}` for tracking room_action changes.
## Examples
iex> change_room_action(room_action)
%Ecto.Changeset{source: %RoomAction{}}
"""
def change_room_action(%RoomAction{} = room_action) do
RoomAction.changeset(room_action, %{})
end
end
|
bubblit/lib/bubblit/bubble_rooms.ex
| 0.859826
| 0.482612
|
bubble_rooms.ex
|
starcoder
|
defmodule NimbleTOTP do
@moduledoc ~S"""
NimbleTOTP is a tiny library for Two-factor authentication (2FA) that
allows developers to implement Time-Based One-Time Passwords (TOTP)
for their applications.
## Two-factor authentication (2FA)
The concept of 2FA is quite simple. It's an extra layer of security
that demands a user to provide two pieces of evidence (factors) to
the authentication system before access can be granted.
One way to implement 2FA is to generate a random secret for the user
and whenever the system needs to perform a critical action it will
ask the user to enter a validation code. This validation code is a
Time-Based One-Time Password (TOTP) based on the user's secret and can be
provided by an authentication app like Google Authenticator or Authy, which
should be previously installed and configured on a compatible device, e.g.
a smartphone.
> **Note:** A critical action can mean different things depending on
the application. For instance, while in a banking system the login itself
is already considered a critical action, in other systems a user may
be allowed to log in using just the password and only when trying to
update critical data (e.g. its profile) 2FA will be required.
## Using NimbleTOTP
In order to allow developers to implement 2FA, NimbleTOTP provides functions to:
* Generate secrets composed of random bytes.
* Generate URIs to be encoded in a QR Code.
* Generate Time-Based One-Time Passwords based on a secret.
### Generating the secret
The first step to set up 2FA for a user is to generate (and later persist) its random
secret. You can achieve that using `NimbleTOTP.secret/1`.
Example:
secret = NimbleTOTP.secret()
#=> <<63, 24, 42, 30, 95, 116, 80, 121, 106, 102>>
By default, a binary with 10 random bytes is generated.
### Generating URIs for QR Code
Before persisting the secret, you need to make sure the user has already
configured the authentication app in a compatible device. The most common
way to do that is to generate a QR Code that can be read by the app.
You can use `NimbleTOTP.otpauth_uri/3` along with
[eqrcode](https://github.com/SiliconJungles/eqrcode) to generate the QR
code as **SVG**.
Example:
uri = NimbleTOTP.otpauth_uri("Acme:alice", secret, issuer: "Acme")
#=> "otpauth://totp/Acme:alice?secret=MFRGGZA&issuer=Acme"
uri |> EQRCode.encode() |> EQRCode.svg()
#=> "<?xml version=\\"1.0\\" standalone=\\"yes\\"?>\\n<svg version=\\"1.1\\" ...
### Generating a Time-Based One-Time Password
After successfully reading the QR Code, the app will start generating a
different 6 digit code every `30s`. You can compute the verification code
with:
NimbleTOTP.verification_code(secret)
#=> "569777"
The code can be validated using the `valid?/3` function. Example:
NimbleTOTP.valid?(secret, "569777")
#=> true
NimbleTOTP.valid?(secret, "012345")
#=> false
After validating the code, you can finally persist the user's secret so you use
it later whenever you need to authorize any critical action using 2FA.
"""
import Bitwise
@totp_size 6
@default_totp_period 30
@doc """
Generate the uri to be encoded in the QR code.
## Examples
iex> NimbleTOTP.otpauth_uri("Acme:alice", "abcd", issuer: "Acme")
"otpauth://totp/Acme:alice?secret=MFRGGZA&issuer=Acme"
"""
def otpauth_uri(label, secret, uri_params \\ []) do
key = Base.encode32(secret, padding: false)
params = [{:secret, key} | uri_params]
query = URI.encode_query(params)
"otpauth://totp/#{URI.encode(label)}?#{query}"
end
@doc """
Generate a binary composed of random bytes.
The number of bytes is defined by the `size` argument. Default is `10`.
## Examples
NimbleTOTP.secret()
#=> <<63, 24, 42, 30, 95, 116, 80, 121, 106, 102>>
"""
def secret(size \\ 10) do
:crypto.strong_rand_bytes(size)
end
@doc """
Generate Time-Based One-Time Password.
## Options
* :time - The time in unix format to be used. Default is `System.os_time(:second)`
* :period - The period (in seconds) in which the code is valid. Default is `30`.
## Examples
NimbleTOTP.verification_code(secret)
#=> "569777"
"""
def verification_code(secret, opts \\ []) do
time = Keyword.get(opts, :time, System.os_time(:second))
period = Keyword.get(opts, :period, @default_totp_period)
secret
|> hmac(time, period)
|> hmac_truncate()
|> rem(1_000_000)
|> to_string()
|> String.pad_leading(@totp_size, "0")
end
defp hmac(secret, time, period) do
moving_factor = <<Integer.floor_div(time, period)::64>>
hmac_sha(secret, moving_factor)
end
# TODO: Remove me when we require OTP 22.1
if Code.ensure_loaded?(:crypto) and function_exported?(:crypto, :mac, 4) do
defp hmac_sha(key, data), do: :crypto.mac(:hmac, :sha, key, data)
else
defp hmac_sha(key, data), do: :crypto.hmac(:sha, key, data)
end
defp hmac_truncate(hmac) do
<<_::19-binary, _::4, offset::4>> = hmac
<<_::size(offset)-binary, p::4-binary, _::binary>> = hmac
<<_::1, bits::31>> = p
bits
end
@doc """
Checks if the given `otp` code matches the secret.
It accepts the same options as `verification_code/2`.
"""
def valid?(secret, otp, opts \\ [])
def valid?(secret, <<a1, a2, a3, a4, a5, a6>>, opts) do
<<e1, e2, e3, e4, e5, e6>> = verification_code(secret, opts)
(e1 ^^^ a1 ||| e2 ^^^ a2 ||| e3 ^^^ a3 ||| e4 ^^^ a4 ||| e5 ^^^ a5 ||| e6 ^^^ a6) === 0
end
def valid?(_secret, _otp, _opts), do: false
end
|
lib/nimble_totp.ex
| 0.81283
| 0.687643
|
nimble_totp.ex
|
starcoder
|
defmodule Cepex.CEP do
@moduledoc """
This module provides functions related to the Brazilian postal code (CEP) string
representation.
A valid CEP has eight digits, e.g. `80010180` (check `t:Cepex.CEP.t/0`).
"""
@typedoc """
The Brazilian postal code (CEP) string representation without formatting, e.g. `80210130`.
"""
@type t :: <<_::64>>
@spec format(t) :: {:ok, <<_::72>>} | {:error, :invalid}
@doc """
Formats a valid CEP string. If the string may be invalid, call `Cepex.CEP.parse/1` first.
## Examples
iex> Cepex.CEP.format("80210130")
{:ok, "80210-130"}
iex> Cepex.CEP.format(8344010)
{:error, :invalid}
"""
def format(cep)
def format(<<head::binary-size(5), tail::binary-size(3)>>), do: {:ok, head <> "-" <> tail}
def format(_cep), do: {:error, :invalid}
@invalid_ceps ["", 0, "00000000", "00000-000"]
@spec parse(any()) :: {:ok, t()} | {:error, :invalid}
@doc """
Parses a CEP string or integer. If the string is not a valid CEP (check `t:Cepex.CEP.t/0`),
it tries building it by removing non-numeric characters and padding with zeros.
## Examples
iex> Cepex.CEP.parse("80210130")
{:ok, "80210130"}
iex> Cepex.CEP.parse(80210130)
{:ok, "80210130"}
iex> Cepex.CEP.parse("80210-130")
{:ok, "80210130"}
iex> Cepex.CEP.parse(8344010)
{:ok, "08344010"}
iex> Cepex.CEP.parse("8344010")
{:ok, "08344010"}
iex> Cepex.CEP.parse("00000-000")
{:error, :invalid}
iex> Cepex.CEP.parse("80210130130130")
{:error, :invalid}
"""
def parse(cep)
def parse(cep) when cep in @invalid_ceps, do: {:error, :invalid}
def parse(cep) when is_binary(cep) do
cep
|> String.replace(~r/[^\d]/, "")
|> String.pad_leading(8, "0")
|> ensure_valid_cep()
end
def parse(cep) when is_integer(cep) do
cep
|> Kernel.abs()
|> Integer.to_string()
|> parse()
end
def parse(_cep), do: {:error, :invalid}
defp ensure_valid_cep(<<_::binary-size(8)>> = cep), do: {:ok, cep}
defp ensure_valid_cep(_cep), do: {:error, :invalid}
end
|
lib/cepex/cep.ex
| 0.889915
| 0.587381
|
cep.ex
|
starcoder
|
defmodule PokerHands.Hand.Straight do
alias PokerHands.{Hand.HighCard, Utils}
@ace_high %PokerHands.Definitions{}.values_ace_high
@ace_low %PokerHands.Definitions{}.values_ace_low
@doc """
## Examples
iex> PokerHands.Hand.Straight.valid?(
iex> PokerHands.DealtHand.init("2H 3H 4H 5D 6D")
iex> )
true
iex> PokerHands.Hand.Straight.valid?(
iex> PokerHands.DealtHand.init("AH 2H 3H 4D 5D")
iex> )
true
iex> PokerHands.Hand.Straight.valid?(
iex> PokerHands.DealtHand.init("AH KH QH JD TD")
iex> )
true
iex> PokerHands.Hand.Straight.valid?(
iex> PokerHands.DealtHand.init("2H 3C 4H 5D 7D")
iex> )
false
"""
def valid?(dealt_hand) do
ace_high?(dealt_hand) || ace_low?(dealt_hand)
end
defp ace_high?(dealt_hand) do
dealt_hand |> hand_values() |> matches?(@ace_high)
end
defp ace_low?(dealt_hand) do
dealt_hand |> rotated_hand_values() |> matches?(@ace_low)
end
defp matches?(hand_values, all_values) do
all_values
|> :binary.match(hand_values)
|> Kernel.!=(:nomatch)
end
defp hand_values(dealt_hand) do
dealt_hand |> Utils.values() |> Enum.join()
end
defp rotated_hand_values(dealt_hand) do
[head | tail] = Utils.values(dealt_hand)
Enum.join(tail ++ [head])
end
@doc """
## Examples
iex> PokerHands.Hand.Straight.high_card_values(
iex> PokerHands.DealtHand.init("2H 3H 4H 5D 6D")
iex> )
[6]
iex> PokerHands.Hand.Straight.high_card_values(
iex> PokerHands.DealtHand.init("AH 2H 3H 4H 5D")
iex> )
[5]
iex> PokerHands.Hand.Straight.high_card_values(
iex> PokerHands.DealtHand.init("AH KH QH JD TD")
iex> )
[14]
"""
def high_card_values(dealt_hand) do
if ace_low?(dealt_hand) do
high_card_value(dealt_hand, 1)
else
high_card_value(dealt_hand, 0)
end
end
defp high_card_value(dealt_hand, index) do
dealt_hand |> HighCard.high_card_values() |> Enum.slice(index, 1)
end
end
|
lib/poker_hands/hand/straight.ex
| 0.644673
| 0.401688
|
straight.ex
|
starcoder
|
defmodule ExAeonsEnd.Card do
@moduledoc "
This is a simple structure that represents a card
At first pass, this is just a turn order card. This may eventually be refactored
to either support other types of cards, or be renamed to indicate that it is just
for turn order.
"
defstruct [:id, :name]
@type t :: %__MODULE__{
id: integer(),
name: String.t()
}
def new(id, name), do: %__MODULE__{id: id, name: name}
@doc """
This function takes a list of cards and tries to find the first instance of a
specific card.
If it finds it, it will return a tuple with that card and the list without the card.
If it doesn't find it, then it will return a tuple with an error
## Examples -- by card
iex> [] |> ExAeonsEnd.Card.take_card(%ExAeonsEnd.Card{id: 1, name: "test"})
{:error, :not_found}
iex> [%ExAeonsEnd.Card{id: 1, name: "a"}, %ExAeonsEnd.Card{id: 2, name: "b"}] |> ExAeonsEnd.Card.take_card(%ExAeonsEnd.Card{id: 1, name: "a"})
{%ExAeonsEnd.Card{id: 1, name: "a"}, [%ExAeonsEnd.Card{id: 2, name: "b"}]}
iex> [%ExAeonsEnd.Card{id: 1, name: "a"}, %ExAeonsEnd.Card{id: 1, name: "a"}] |> ExAeonsEnd.Card.take_card(%ExAeonsEnd.Card{id: 1, name: "a"})
{%ExAeonsEnd.Card{id: 1, name: "a"}, [%ExAeonsEnd.Card{id: 1, name: "a"}]}
iex> [%ExAeonsEnd.Card{id: 1, name: "a"}, %ExAeonsEnd.Card{id: 2, name: "b"}] |> ExAeonsEnd.Card.take_card(%ExAeonsEnd.Card{id: 2, name: "b"})
{%ExAeonsEnd.Card{id: 2, name: "b"}, [%ExAeonsEnd.Card{id: 1, name: "a"}]}
## Examples -- by index
iex> [%ExAeonsEnd.Card{id: 1, name: "a"}, %ExAeonsEnd.Card{id: 2, name: "b"}] |> ExAeonsEnd.Card.take_card(0)
{%ExAeonsEnd.Card{id: 1, name: "a"}, [%ExAeonsEnd.Card{id: 2, name: "b"}]}
iex> [%ExAeonsEnd.Card{id: 1, name: "a"}, %ExAeonsEnd.Card{id: 2, name: "b"}] |> ExAeonsEnd.Card.take_card(1)
{%ExAeonsEnd.Card{id: 2, name: "b"}, [%ExAeonsEnd.Card{id: 1, name: "a"}]}
"""
@spec take_card([__MODULE__.t()], __MODULE__.t() | integer()) ::
{__MODULE__.t(), [__MODULE__.t()]} | {:error, :not_found}
def take_card(cards, expected_card)
def take_card(cards, card_index) when is_integer(card_index),
do: take_card_by_index(card_index, cards)
def take_card(cards, expected_card) do
cards
|> Enum.find_index(fn x -> x == expected_card end)
|> take_card_by_index(cards)
end
defp take_card_by_index(nil, _cards), do: {:error, :not_found}
defp take_card_by_index(index, cards)
when is_integer(index) and index >= 0 and index < length(cards) do
{front, [card | back]} = cards |> Enum.split(index)
remainder = Enum.concat(front, back)
{card, remainder}
end
end
|
lib/ExAeonsEnd/card.ex
| 0.793586
| 0.481454
|
card.ex
|
starcoder
|
defmodule Solarex.Moon do
@moduledoc """
Solarex.Moon is module for calculating moon phase for particular date.
This module implements naive approach by calculating the number of days since a known new moon.
See [Wikipedia](https://en.wikipedia.org/wiki/Lunar_phase#Calculating_phase) page for more info.
"""
@doc """
Returns moon phase for the current date.
"""
@spec phase() :: atom()
def phase() do
Timex.today()
|> phase()
end
@doc """
Calculate moon phase for the passed Date.
Returns one of `:new_moon`, `:waxing_crescent`, `:first_quarter`, `:waxing_gibbous`,
`:full_moon`, `:waning_gibbous`, `:third_quarter`, `:waning_crescent`, `:new_moon` atom.
iex> Solarex.Moon.phase(~D[2019-05-05])
:new_moon
"""
@spec phase(Date.t()) :: atom()
def phase(%Date{} = date) do
case days_to_new_moon(date) do
d when d >= 0 and d <= 1 -> :new_moon
d when d > 1 and d < 6 -> :waxing_crescent
d when d >= 6 and d <= 8 -> :first_quarter
d when d > 8 and d < 14 -> :waxing_gibbous
d when d >= 14 and d <= 16 -> :full_moon
d when d > 16 and d < 21 -> :waning_gibbous
d when d >= 21 and d <= 23 -> :third_quarter
d when d > 23 and d < 29 -> :waning_crescent
d when d >= 29 -> :new_moon
end
end
@doc """
Returns remaining days to next new moon for the passed date using know new moon date and synodic month
[https://en.wikipedia.org/wiki/Lunar_phase#Calculating_phase](https://en.wikipedia.org/wiki/Lunar_phase#Calculating_phase)
iex> Solarex.Moon.days_to_new_moon(~D[2019-05-05])
0.8776445879999955
"""
@spec days_to_new_moon(Date.t()) :: number()
@synodic_month 29.530588853
def days_to_new_moon(%Date{} = date) do
days = julian_days(date) - julian_days(known_new_moon())
cycles = Float.floor(days / @synodic_month)
days - cycles * @synodic_month
end
@spec known_new_moon() :: Date.t()
defp known_new_moon() do
Application.get_env(:solarex, :known_new_moon)
|> Timex.parse!("%Y-%m-%d", :strftime)
|> Timex.to_date()
end
defp julian_days(%Date{year: year, month: month, day: day}) do
Timex.Calendar.Julian.julian_date(year, month, day)
end
end
|
lib/solarex/moon.ex
| 0.912207
| 0.647652
|
moon.ex
|
starcoder
|
defmodule ForthVM.Words.Stack do
@moduledoc """
Stack words
"""
alias ForthVM.Process
import ForthVM.Utils
# ---------------------------------------------
# Stack operations
# ---------------------------------------------
@doc """
depth: ( -- x ) get stack depth
"""
def depth(tokens, data_stack, return_stack, dictionary, meta) do
Process.next(tokens, [length(data_stack) | data_stack], return_stack, dictionary, meta)
end
@doc """
drop: ( x -- ) remove element from top of stack
"""
def drop(tokens, [_ | data_stack], return_stack, dictionary, meta) do
Process.next(tokens, data_stack, return_stack, dictionary, meta)
end
@doc """
2drop: ( x y -- ) remove two elements from top of stack
"""
def drop2(tokens, [_, _ | data_stack], return_stack, dictionary, meta) do
Process.next(tokens, data_stack, return_stack, dictionary, meta)
end
@doc """
dup: ( x -- x x ) duplicate element from top of stack
"""
def dup(tokens, [x | data_stack], return_stack, dictionary, meta) do
Process.next(tokens, [x, x | data_stack], return_stack, dictionary, meta)
end
@doc """
2dup: ( x y -- x y x y ) duplicate two elements from top of stack
"""
def dup2(tokens, [x, y | data_stack], return_stack, dictionary, meta) do
Process.next(tokens, [x, y, x, y | data_stack], return_stack, dictionary, meta)
end
@doc """
?dup: ( x -- x x ) duplicate element from top of stack if element value is truthly
"""
def dup?(tokens, [x | _] = data_stack, return_stack, dictionary, meta) when is_falsely(x) do
Process.next(tokens, data_stack, return_stack, dictionary, meta)
end
def dup?(tokens, [x | _] = data_stack, return_stack, dictionary, meta) do
Process.next(tokens, [x | data_stack], return_stack, dictionary, meta)
end
@doc """
swap: ( x y -- y x ) swap top two elements on top of stack
"""
def swap(tokens, [y, x | data_stack], return_stack, dictionary, meta) do
Process.next(tokens, [x, y | data_stack], return_stack, dictionary, meta)
end
@doc """
2swap: ( y2 x2 y1 x1 -- y1 x1 y2 x2 ) swap top copules on top of stack
"""
def swap2(tokens, [x1, y1, x2, y2 | data_stack], return_stack, dictionary, meta) do
Process.next(tokens, [x2, y2, x1, y1 | data_stack], return_stack, dictionary, meta)
end
@doc """
over: (y x -- y x y) copy second element on top of stack
"""
def over(tokens, [x, y | data_stack], return_stack, dictionary, meta) do
Process.next(tokens, [y, x, y | data_stack], return_stack, dictionary, meta)
end
@doc """
2over: ( y2 x2 y1 x1 -- y2 x2 y1 x1 y2 x2) swap top copules on top of stack
"""
def over2(tokens, [x1, y1, x2, y2 | data_stack], return_stack, dictionary, meta) do
Process.next(tokens, [x2, y2, x1, y1, x2, y2 | data_stack], return_stack, dictionary, meta)
end
@doc """
rot: ( x y z -- y z x ) rotate the top three stack entries, bottom goes on top
"""
def rot(tokens, [z, y, x | data_stack], return_stack, dictionary, meta) do
Process.next(tokens, [x, z, y | data_stack], return_stack, dictionary, meta)
end
@doc """
-rot: ( x y z -- z x y ) rotate the top three stack entries, top goes on bottom
"""
def rot_neg(tokens, [z, y, x | data_stack], return_stack, dictionary, meta) do
Process.next(tokens, [y, x, z | data_stack], return_stack, dictionary, meta)
end
end
|
lib/forthvm/words/stack.ex
| 0.798619
| 0.850965
|
stack.ex
|
starcoder
|
defmodule AWS.SWF do
@moduledoc """
Amazon Simple Workflow Service
The Amazon Simple Workflow Service (Amazon SWF) makes it easy to build
applications that use Amazon's cloud to coordinate work across distributed
components. In Amazon SWF, a *task* represents a logical unit of work that
is performed by a component of your workflow. Coordinating tasks in a
workflow involves managing intertask dependencies, scheduling, and
concurrency in accordance with the logical flow of the application.
Amazon SWF gives you full control over implementing tasks and coordinating
them without worrying about underlying complexities such as tracking their
progress and maintaining their state.
This documentation serves as reference only. For a broader overview of the
Amazon SWF programming model, see the * [Amazon SWF Developer
Guide](http://docs.aws.amazon.com/amazonswf/latest/developerguide/) *.
"""
@doc """
Returns the number of closed workflow executions within the given domain
that meet the specified filtering criteria.
<note> This operation is eventually consistent. The results are best effort
and may not exactly reflect recent updates and changes.
</note> **Access Control**
You can use IAM policies to control this action's access to Amazon SWF
resources as follows:
<ul> <li> Use a `Resource` element with the domain name to limit the action
to only specified domains.
</li> <li> Use an `Action` element to allow or deny permission to call this
action.
</li> <li> Constrain the following parameters by using a `Condition`
element with the appropriate keys.
<ul> <li> `tagFilter.tag`: String constraint. The key is
`swf:tagFilter.tag`.
</li> <li> `typeFilter.name`: String constraint. The key is
`swf:typeFilter.name`.
</li> <li> `typeFilter.version`: String constraint. The key is
`swf:typeFilter.version`.
</li> </ul> </li> </ul> If the caller doesn't have sufficient permissions
to invoke the action, or the parameter values fall outside the specified
constraints, the action fails. The associated event attribute's `cause`
parameter is set to `OPERATION_NOT_PERMITTED`. For details and example IAM
policies, see [Using IAM to Manage Access to Amazon SWF
Workflows](http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html)
in the *Amazon SWF Developer Guide*.
"""
def count_closed_workflow_executions(client, input, options \\ []) do
request(client, "CountClosedWorkflowExecutions", input, options)
end
@doc """
Returns the number of open workflow executions within the given domain that
meet the specified filtering criteria.
<note> This operation is eventually consistent. The results are best effort
and may not exactly reflect recent updates and changes.
</note> **Access Control**
You can use IAM policies to control this action's access to Amazon SWF
resources as follows:
<ul> <li> Use a `Resource` element with the domain name to limit the action
to only specified domains.
</li> <li> Use an `Action` element to allow or deny permission to call this
action.
</li> <li> Constrain the following parameters by using a `Condition`
element with the appropriate keys.
<ul> <li> `tagFilter.tag`: String constraint. The key is
`swf:tagFilter.tag`.
</li> <li> `typeFilter.name`: String constraint. The key is
`swf:typeFilter.name`.
</li> <li> `typeFilter.version`: String constraint. The key is
`swf:typeFilter.version`.
</li> </ul> </li> </ul> If the caller doesn't have sufficient permissions
to invoke the action, or the parameter values fall outside the specified
constraints, the action fails. The associated event attribute's `cause`
parameter is set to `OPERATION_NOT_PERMITTED`. For details and example IAM
policies, see [Using IAM to Manage Access to Amazon SWF
Workflows](http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html)
in the *Amazon SWF Developer Guide*.
"""
def count_open_workflow_executions(client, input, options \\ []) do
request(client, "CountOpenWorkflowExecutions", input, options)
end
@doc """
Returns the estimated number of activity tasks in the specified task list.
The count returned is an approximation and isn't guaranteed to be exact. If
you specify a task list that no activity task was ever scheduled in then
`0` is returned.
**Access Control**
You can use IAM policies to control this action's access to Amazon SWF
resources as follows:
<ul> <li> Use a `Resource` element with the domain name to limit the action
to only specified domains.
</li> <li> Use an `Action` element to allow or deny permission to call this
action.
</li> <li> Constrain the `taskList.name` parameter by using a `Condition`
element with the `swf:taskList.name` key to allow the action to access only
certain task lists.
</li> </ul> If the caller doesn't have sufficient permissions to invoke the
action, or the parameter values fall outside the specified constraints, the
action fails. The associated event attribute's `cause` parameter is set to
`OPERATION_NOT_PERMITTED`. For details and example IAM policies, see [Using
IAM to Manage Access to Amazon SWF
Workflows](http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html)
in the *Amazon SWF Developer Guide*.
"""
def count_pending_activity_tasks(client, input, options \\ []) do
request(client, "CountPendingActivityTasks", input, options)
end
@doc """
Returns the estimated number of decision tasks in the specified task list.
The count returned is an approximation and isn't guaranteed to be exact. If
you specify a task list that no decision task was ever scheduled in then
`0` is returned.
**Access Control**
You can use IAM policies to control this action's access to Amazon SWF
resources as follows:
<ul> <li> Use a `Resource` element with the domain name to limit the action
to only specified domains.
</li> <li> Use an `Action` element to allow or deny permission to call this
action.
</li> <li> Constrain the `taskList.name` parameter by using a `Condition`
element with the `swf:taskList.name` key to allow the action to access only
certain task lists.
</li> </ul> If the caller doesn't have sufficient permissions to invoke the
action, or the parameter values fall outside the specified constraints, the
action fails. The associated event attribute's `cause` parameter is set to
`OPERATION_NOT_PERMITTED`. For details and example IAM policies, see [Using
IAM to Manage Access to Amazon SWF
Workflows](http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html)
in the *Amazon SWF Developer Guide*.
"""
def count_pending_decision_tasks(client, input, options \\ []) do
request(client, "CountPendingDecisionTasks", input, options)
end
@doc """
Deprecates the specified *activity type*. After an activity type has been
deprecated, you cannot create new tasks of that activity type. Tasks of
this type that were scheduled before the type was deprecated continue to
run.
<note> This operation is eventually consistent. The results are best effort
and may not exactly reflect recent updates and changes.
</note> **Access Control**
You can use IAM policies to control this action's access to Amazon SWF
resources as follows:
<ul> <li> Use a `Resource` element with the domain name to limit the action
to only specified domains.
</li> <li> Use an `Action` element to allow or deny permission to call this
action.
</li> <li> Constrain the following parameters by using a `Condition`
element with the appropriate keys.
<ul> <li> `activityType.name`: String constraint. The key is
`swf:activityType.name`.
</li> <li> `activityType.version`: String constraint. The key is
`swf:activityType.version`.
</li> </ul> </li> </ul> If the caller doesn't have sufficient permissions
to invoke the action, or the parameter values fall outside the specified
constraints, the action fails. The associated event attribute's `cause`
parameter is set to `OPERATION_NOT_PERMITTED`. For details and example IAM
policies, see [Using IAM to Manage Access to Amazon SWF
Workflows](http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html)
in the *Amazon SWF Developer Guide*.
"""
def deprecate_activity_type(client, input, options \\ []) do
request(client, "DeprecateActivityType", input, options)
end
@doc """
Deprecates the specified domain. After a domain has been deprecated it
cannot be used to create new workflow executions or register new types.
However, you can still use visibility actions on this domain. Deprecating a
domain also deprecates all activity and workflow types registered in the
domain. Executions that were started before the domain was deprecated
continues to run.
<note> This operation is eventually consistent. The results are best effort
and may not exactly reflect recent updates and changes.
</note> **Access Control**
You can use IAM policies to control this action's access to Amazon SWF
resources as follows:
<ul> <li> Use a `Resource` element with the domain name to limit the action
to only specified domains.
</li> <li> Use an `Action` element to allow or deny permission to call this
action.
</li> <li> You cannot use an IAM policy to constrain this action's
parameters.
</li> </ul> If the caller doesn't have sufficient permissions to invoke the
action, or the parameter values fall outside the specified constraints, the
action fails. The associated event attribute's `cause` parameter is set to
`OPERATION_NOT_PERMITTED`. For details and example IAM policies, see [Using
IAM to Manage Access to Amazon SWF
Workflows](http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html)
in the *Amazon SWF Developer Guide*.
"""
def deprecate_domain(client, input, options \\ []) do
request(client, "DeprecateDomain", input, options)
end
@doc """
Deprecates the specified *workflow type*. After a workflow type has been
deprecated, you cannot create new executions of that type. Executions that
were started before the type was deprecated continues to run. A deprecated
workflow type may still be used when calling visibility actions.
<note> This operation is eventually consistent. The results are best effort
and may not exactly reflect recent updates and changes.
</note> **Access Control**
You can use IAM policies to control this action's access to Amazon SWF
resources as follows:
<ul> <li> Use a `Resource` element with the domain name to limit the action
to only specified domains.
</li> <li> Use an `Action` element to allow or deny permission to call this
action.
</li> <li> Constrain the following parameters by using a `Condition`
element with the appropriate keys.
<ul> <li> `workflowType.name`: String constraint. The key is
`swf:workflowType.name`.
</li> <li> `workflowType.version`: String constraint. The key is
`swf:workflowType.version`.
</li> </ul> </li> </ul> If the caller doesn't have sufficient permissions
to invoke the action, or the parameter values fall outside the specified
constraints, the action fails. The associated event attribute's `cause`
parameter is set to `OPERATION_NOT_PERMITTED`. For details and example IAM
policies, see [Using IAM to Manage Access to Amazon SWF
Workflows](http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html)
in the *Amazon SWF Developer Guide*.
"""
def deprecate_workflow_type(client, input, options \\ []) do
request(client, "DeprecateWorkflowType", input, options)
end
@doc """
Returns information about the specified activity type. This includes
configuration settings provided when the type was registered and other
general information about the type.
**Access Control**
You can use IAM policies to control this action's access to Amazon SWF
resources as follows:
<ul> <li> Use a `Resource` element with the domain name to limit the action
to only specified domains.
</li> <li> Use an `Action` element to allow or deny permission to call this
action.
</li> <li> Constrain the following parameters by using a `Condition`
element with the appropriate keys.
<ul> <li> `activityType.name`: String constraint. The key is
`swf:activityType.name`.
</li> <li> `activityType.version`: String constraint. The key is
`swf:activityType.version`.
</li> </ul> </li> </ul> If the caller doesn't have sufficient permissions
to invoke the action, or the parameter values fall outside the specified
constraints, the action fails. The associated event attribute's `cause`
parameter is set to `OPERATION_NOT_PERMITTED`. For details and example IAM
policies, see [Using IAM to Manage Access to Amazon SWF
Workflows](http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html)
in the *Amazon SWF Developer Guide*.
"""
def describe_activity_type(client, input, options \\ []) do
request(client, "DescribeActivityType", input, options)
end
@doc """
Returns information about the specified domain, including description and
status.
**Access Control**
You can use IAM policies to control this action's access to Amazon SWF
resources as follows:
<ul> <li> Use a `Resource` element with the domain name to limit the action
to only specified domains.
</li> <li> Use an `Action` element to allow or deny permission to call this
action.
</li> <li> You cannot use an IAM policy to constrain this action's
parameters.
</li> </ul> If the caller doesn't have sufficient permissions to invoke the
action, or the parameter values fall outside the specified constraints, the
action fails. The associated event attribute's `cause` parameter is set to
`OPERATION_NOT_PERMITTED`. For details and example IAM policies, see [Using
IAM to Manage Access to Amazon SWF
Workflows](http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html)
in the *Amazon SWF Developer Guide*.
"""
def describe_domain(client, input, options \\ []) do
request(client, "DescribeDomain", input, options)
end
@doc """
Returns information about the specified workflow execution including its
type and some statistics.
<note> This operation is eventually consistent. The results are best effort
and may not exactly reflect recent updates and changes.
</note> **Access Control**
You can use IAM policies to control this action's access to Amazon SWF
resources as follows:
<ul> <li> Use a `Resource` element with the domain name to limit the action
to only specified domains.
</li> <li> Use an `Action` element to allow or deny permission to call this
action.
</li> <li> You cannot use an IAM policy to constrain this action's
parameters.
</li> </ul> If the caller doesn't have sufficient permissions to invoke the
action, or the parameter values fall outside the specified constraints, the
action fails. The associated event attribute's `cause` parameter is set to
`OPERATION_NOT_PERMITTED`. For details and example IAM policies, see [Using
IAM to Manage Access to Amazon SWF
Workflows](http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html)
in the *Amazon SWF Developer Guide*.
"""
def describe_workflow_execution(client, input, options \\ []) do
request(client, "DescribeWorkflowExecution", input, options)
end
@doc """
Returns information about the specified *workflow type*. This includes
configuration settings specified when the type was registered and other
information such as creation date, current status, etc.
**Access Control**
You can use IAM policies to control this action's access to Amazon SWF
resources as follows:
<ul> <li> Use a `Resource` element with the domain name to limit the action
to only specified domains.
</li> <li> Use an `Action` element to allow or deny permission to call this
action.
</li> <li> Constrain the following parameters by using a `Condition`
element with the appropriate keys.
<ul> <li> `workflowType.name`: String constraint. The key is
`swf:workflowType.name`.
</li> <li> `workflowType.version`: String constraint. The key is
`swf:workflowType.version`.
</li> </ul> </li> </ul> If the caller doesn't have sufficient permissions
to invoke the action, or the parameter values fall outside the specified
constraints, the action fails. The associated event attribute's `cause`
parameter is set to `OPERATION_NOT_PERMITTED`. For details and example IAM
policies, see [Using IAM to Manage Access to Amazon SWF
Workflows](http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html)
in the *Amazon SWF Developer Guide*.
"""
def describe_workflow_type(client, input, options \\ []) do
request(client, "DescribeWorkflowType", input, options)
end
@doc """
Returns the history of the specified workflow execution. The results may be
split into multiple pages. To retrieve subsequent pages, make the call
again using the `nextPageToken` returned by the initial call.
<note> This operation is eventually consistent. The results are best effort
and may not exactly reflect recent updates and changes.
</note> **Access Control**
You can use IAM policies to control this action's access to Amazon SWF
resources as follows:
<ul> <li> Use a `Resource` element with the domain name to limit the action
to only specified domains.
</li> <li> Use an `Action` element to allow or deny permission to call this
action.
</li> <li> You cannot use an IAM policy to constrain this action's
parameters.
</li> </ul> If the caller doesn't have sufficient permissions to invoke the
action, or the parameter values fall outside the specified constraints, the
action fails. The associated event attribute's `cause` parameter is set to
`OPERATION_NOT_PERMITTED`. For details and example IAM policies, see [Using
IAM to Manage Access to Amazon SWF
Workflows](http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html)
in the *Amazon SWF Developer Guide*.
"""
def get_workflow_execution_history(client, input, options \\ []) do
request(client, "GetWorkflowExecutionHistory", input, options)
end
@doc """
Returns information about all activities registered in the specified domain
that match the specified name and registration status. The result includes
information like creation date, current status of the activity, etc. The
results may be split into multiple pages. To retrieve subsequent pages,
make the call again using the `nextPageToken` returned by the initial call.
**Access Control**
You can use IAM policies to control this action's access to Amazon SWF
resources as follows:
<ul> <li> Use a `Resource` element with the domain name to limit the action
to only specified domains.
</li> <li> Use an `Action` element to allow or deny permission to call this
action.
</li> <li> You cannot use an IAM policy to constrain this action's
parameters.
</li> </ul> If the caller doesn't have sufficient permissions to invoke the
action, or the parameter values fall outside the specified constraints, the
action fails. The associated event attribute's `cause` parameter is set to
`OPERATION_NOT_PERMITTED`. For details and example IAM policies, see [Using
IAM to Manage Access to Amazon SWF
Workflows](http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html)
in the *Amazon SWF Developer Guide*.
"""
def list_activity_types(client, input, options \\ []) do
request(client, "ListActivityTypes", input, options)
end
@doc """
Returns a list of closed workflow executions in the specified domain that
meet the filtering criteria. The results may be split into multiple pages.
To retrieve subsequent pages, make the call again using the nextPageToken
returned by the initial call.
<note> This operation is eventually consistent. The results are best effort
and may not exactly reflect recent updates and changes.
</note> **Access Control**
You can use IAM policies to control this action's access to Amazon SWF
resources as follows:
<ul> <li> Use a `Resource` element with the domain name to limit the action
to only specified domains.
</li> <li> Use an `Action` element to allow or deny permission to call this
action.
</li> <li> Constrain the following parameters by using a `Condition`
element with the appropriate keys.
<ul> <li> `tagFilter.tag`: String constraint. The key is
`swf:tagFilter.tag`.
</li> <li> `typeFilter.name`: String constraint. The key is
`swf:typeFilter.name`.
</li> <li> `typeFilter.version`: String constraint. The key is
`swf:typeFilter.version`.
</li> </ul> </li> </ul> If the caller doesn't have sufficient permissions
to invoke the action, or the parameter values fall outside the specified
constraints, the action fails. The associated event attribute's `cause`
parameter is set to `OPERATION_NOT_PERMITTED`. For details and example IAM
policies, see [Using IAM to Manage Access to Amazon SWF
Workflows](http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html)
in the *Amazon SWF Developer Guide*.
"""
def list_closed_workflow_executions(client, input, options \\ []) do
request(client, "ListClosedWorkflowExecutions", input, options)
end
@doc """
Returns the list of domains registered in the account. The results may be
split into multiple pages. To retrieve subsequent pages, make the call
again using the nextPageToken returned by the initial call.
<note> This operation is eventually consistent. The results are best effort
and may not exactly reflect recent updates and changes.
</note> **Access Control**
You can use IAM policies to control this action's access to Amazon SWF
resources as follows:
<ul> <li> Use a `Resource` element with the domain name to limit the action
to only specified domains. The element must be set to
`arn:aws:swf::AccountID:domain/*`, where *AccountID* is the account ID,
with no dashes.
</li> <li> Use an `Action` element to allow or deny permission to call this
action.
</li> <li> You cannot use an IAM policy to constrain this action's
parameters.
</li> </ul> If the caller doesn't have sufficient permissions to invoke the
action, or the parameter values fall outside the specified constraints, the
action fails. The associated event attribute's `cause` parameter is set to
`OPERATION_NOT_PERMITTED`. For details and example IAM policies, see [Using
IAM to Manage Access to Amazon SWF
Workflows](http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html)
in the *Amazon SWF Developer Guide*.
"""
def list_domains(client, input, options \\ []) do
request(client, "ListDomains", input, options)
end
@doc """
Returns a list of open workflow executions in the specified domain that
meet the filtering criteria. The results may be split into multiple pages.
To retrieve subsequent pages, make the call again using the nextPageToken
returned by the initial call.
<note> This operation is eventually consistent. The results are best effort
and may not exactly reflect recent updates and changes.
</note> **Access Control**
You can use IAM policies to control this action's access to Amazon SWF
resources as follows:
<ul> <li> Use a `Resource` element with the domain name to limit the action
to only specified domains.
</li> <li> Use an `Action` element to allow or deny permission to call this
action.
</li> <li> Constrain the following parameters by using a `Condition`
element with the appropriate keys.
<ul> <li> `tagFilter.tag`: String constraint. The key is
`swf:tagFilter.tag`.
</li> <li> `typeFilter.name`: String constraint. The key is
`swf:typeFilter.name`.
</li> <li> `typeFilter.version`: String constraint. The key is
`swf:typeFilter.version`.
</li> </ul> </li> </ul> If the caller doesn't have sufficient permissions
to invoke the action, or the parameter values fall outside the specified
constraints, the action fails. The associated event attribute's `cause`
parameter is set to `OPERATION_NOT_PERMITTED`. For details and example IAM
policies, see [Using IAM to Manage Access to Amazon SWF
Workflows](http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html)
in the *Amazon SWF Developer Guide*.
"""
def list_open_workflow_executions(client, input, options \\ []) do
request(client, "ListOpenWorkflowExecutions", input, options)
end
@doc """
Returns information about workflow types in the specified domain. The
results may be split into multiple pages that can be retrieved by making
the call repeatedly.
**Access Control**
You can use IAM policies to control this action's access to Amazon SWF
resources as follows:
<ul> <li> Use a `Resource` element with the domain name to limit the action
to only specified domains.
</li> <li> Use an `Action` element to allow or deny permission to call this
action.
</li> <li> You cannot use an IAM policy to constrain this action's
parameters.
</li> </ul> If the caller doesn't have sufficient permissions to invoke the
action, or the parameter values fall outside the specified constraints, the
action fails. The associated event attribute's `cause` parameter is set to
`OPERATION_NOT_PERMITTED`. For details and example IAM policies, see [Using
IAM to Manage Access to Amazon SWF
Workflows](http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html)
in the *Amazon SWF Developer Guide*.
"""
def list_workflow_types(client, input, options \\ []) do
request(client, "ListWorkflowTypes", input, options)
end
@doc """
Used by workers to get an `ActivityTask` from the specified activity
`taskList`. This initiates a long poll, where the service holds the HTTP
connection open and responds as soon as a task becomes available. The
maximum time the service holds on to the request before responding is 60
seconds. If no task is available within 60 seconds, the poll returns an
empty result. An empty result, in this context, means that an ActivityTask
is returned, but that the value of taskToken is an empty string. If a task
is returned, the worker should use its type to identify and process it
correctly.
<important> Workers should set their client side socket timeout to at least
70 seconds (10 seconds higher than the maximum time service may hold the
poll request).
</important> **Access Control**
You can use IAM policies to control this action's access to Amazon SWF
resources as follows:
<ul> <li> Use a `Resource` element with the domain name to limit the action
to only specified domains.
</li> <li> Use an `Action` element to allow or deny permission to call this
action.
</li> <li> Constrain the `taskList.name` parameter by using a `Condition`
element with the `swf:taskList.name` key to allow the action to access only
certain task lists.
</li> </ul> If the caller doesn't have sufficient permissions to invoke the
action, or the parameter values fall outside the specified constraints, the
action fails. The associated event attribute's `cause` parameter is set to
`OPERATION_NOT_PERMITTED`. For details and example IAM policies, see [Using
IAM to Manage Access to Amazon SWF
Workflows](http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html)
in the *Amazon SWF Developer Guide*.
"""
def poll_for_activity_task(client, input, options \\ []) do
request(client, "PollForActivityTask", input, options)
end
@doc """
Used by deciders to get a `DecisionTask` from the specified decision
`taskList`. A decision task may be returned for any open workflow execution
that is using the specified task list. The task includes a paginated view
of the history of the workflow execution. The decider should use the
workflow type and the history to determine how to properly handle the task.
This action initiates a long poll, where the service holds the HTTP
connection open and responds as soon a task becomes available. If no
decision task is available in the specified task list before the timeout of
60 seconds expires, an empty result is returned. An empty result, in this
context, means that a DecisionTask is returned, but that the value of
taskToken is an empty string.
<important> Deciders should set their client side socket timeout to at
least 70 seconds (10 seconds higher than the timeout).
</important> <important> Because the number of workflow history events for
a single workflow execution might be very large, the result returned might
be split up across a number of pages. To retrieve subsequent pages, make
additional calls to `PollForDecisionTask` using the `nextPageToken`
returned by the initial call. Note that you do *not* call
`GetWorkflowExecutionHistory` with this `nextPageToken`. Instead, call
`PollForDecisionTask` again.
</important> **Access Control**
You can use IAM policies to control this action's access to Amazon SWF
resources as follows:
<ul> <li> Use a `Resource` element with the domain name to limit the action
to only specified domains.
</li> <li> Use an `Action` element to allow or deny permission to call this
action.
</li> <li> Constrain the `taskList.name` parameter by using a `Condition`
element with the `swf:taskList.name` key to allow the action to access only
certain task lists.
</li> </ul> If the caller doesn't have sufficient permissions to invoke the
action, or the parameter values fall outside the specified constraints, the
action fails. The associated event attribute's `cause` parameter is set to
`OPERATION_NOT_PERMITTED`. For details and example IAM policies, see [Using
IAM to Manage Access to Amazon SWF
Workflows](http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html)
in the *Amazon SWF Developer Guide*.
"""
def poll_for_decision_task(client, input, options \\ []) do
request(client, "PollForDecisionTask", input, options)
end
@doc """
Used by activity workers to report to the service that the `ActivityTask`
represented by the specified `taskToken` is still making progress. The
worker can also specify details of the progress, for example percent
complete, using the `details` parameter. This action can also be used by
the worker as a mechanism to check if cancellation is being requested for
the activity task. If a cancellation is being attempted for the specified
task, then the boolean `cancelRequested` flag returned by the service is
set to `true`.
This action resets the `taskHeartbeatTimeout` clock. The
`taskHeartbeatTimeout` is specified in `RegisterActivityType`.
This action doesn't in itself create an event in the workflow execution
history. However, if the task times out, the workflow execution history
contains a `ActivityTaskTimedOut` event that contains the information from
the last heartbeat generated by the activity worker.
<note> The `taskStartToCloseTimeout` of an activity type is the maximum
duration of an activity task, regardless of the number of
`RecordActivityTaskHeartbeat` requests received. The
`taskStartToCloseTimeout` is also specified in `RegisterActivityType`.
</note> <note> This operation is only useful for long-lived activities to
report liveliness of the task and to determine if a cancellation is being
attempted.
</note> <important> If the `cancelRequested` flag returns `true`, a
cancellation is being attempted. If the worker can cancel the activity, it
should respond with `RespondActivityTaskCanceled`. Otherwise, it should
ignore the cancellation request.
</important> **Access Control**
You can use IAM policies to control this action's access to Amazon SWF
resources as follows:
<ul> <li> Use a `Resource` element with the domain name to limit the action
to only specified domains.
</li> <li> Use an `Action` element to allow or deny permission to call this
action.
</li> <li> You cannot use an IAM policy to constrain this action's
parameters.
</li> </ul> If the caller doesn't have sufficient permissions to invoke the
action, or the parameter values fall outside the specified constraints, the
action fails. The associated event attribute's `cause` parameter is set to
`OPERATION_NOT_PERMITTED`. For details and example IAM policies, see [Using
IAM to Manage Access to Amazon SWF
Workflows](http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html)
in the *Amazon SWF Developer Guide*.
"""
def record_activity_task_heartbeat(client, input, options \\ []) do
request(client, "RecordActivityTaskHeartbeat", input, options)
end
@doc """
Registers a new *activity type* along with its configuration settings in
the specified domain.
<important> A `TypeAlreadyExists` fault is returned if the type already
exists in the domain. You cannot change any configuration settings of the
type after its registration, and it must be registered as a new version.
</important> **Access Control**
You can use IAM policies to control this action's access to Amazon SWF
resources as follows:
<ul> <li> Use a `Resource` element with the domain name to limit the action
to only specified domains.
</li> <li> Use an `Action` element to allow or deny permission to call this
action.
</li> <li> Constrain the following parameters by using a `Condition`
element with the appropriate keys.
<ul> <li> `defaultTaskList.name`: String constraint. The key is
`swf:defaultTaskList.name`.
</li> <li> `name`: String constraint. The key is `swf:name`.
</li> <li> `version`: String constraint. The key is `swf:version`.
</li> </ul> </li> </ul> If the caller doesn't have sufficient permissions
to invoke the action, or the parameter values fall outside the specified
constraints, the action fails. The associated event attribute's `cause`
parameter is set to `OPERATION_NOT_PERMITTED`. For details and example IAM
policies, see [Using IAM to Manage Access to Amazon SWF
Workflows](http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html)
in the *Amazon SWF Developer Guide*.
"""
def register_activity_type(client, input, options \\ []) do
request(client, "RegisterActivityType", input, options)
end
@doc """
Registers a new domain.
**Access Control**
You can use IAM policies to control this action's access to Amazon SWF
resources as follows:
<ul> <li> You cannot use an IAM policy to control domain access for this
action. The name of the domain being registered is available as the
resource of this action.
</li> <li> Use an `Action` element to allow or deny permission to call this
action.
</li> <li> You cannot use an IAM policy to constrain this action's
parameters.
</li> </ul> If the caller doesn't have sufficient permissions to invoke the
action, or the parameter values fall outside the specified constraints, the
action fails. The associated event attribute's `cause` parameter is set to
`OPERATION_NOT_PERMITTED`. For details and example IAM policies, see [Using
IAM to Manage Access to Amazon SWF
Workflows](http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html)
in the *Amazon SWF Developer Guide*.
"""
def register_domain(client, input, options \\ []) do
request(client, "RegisterDomain", input, options)
end
@doc """
Registers a new *workflow type* and its configuration settings in the
specified domain.
The retention period for the workflow history is set by the
`RegisterDomain` action.
<important> If the type already exists, then a `TypeAlreadyExists` fault is
returned. You cannot change the configuration settings of a workflow type
once it is registered and it must be registered as a new version.
</important> **Access Control**
You can use IAM policies to control this action's access to Amazon SWF
resources as follows:
<ul> <li> Use a `Resource` element with the domain name to limit the action
to only specified domains.
</li> <li> Use an `Action` element to allow or deny permission to call this
action.
</li> <li> Constrain the following parameters by using a `Condition`
element with the appropriate keys.
<ul> <li> `defaultTaskList.name`: String constraint. The key is
`swf:defaultTaskList.name`.
</li> <li> `name`: String constraint. The key is `swf:name`.
</li> <li> `version`: String constraint. The key is `swf:version`.
</li> </ul> </li> </ul> If the caller doesn't have sufficient permissions
to invoke the action, or the parameter values fall outside the specified
constraints, the action fails. The associated event attribute's `cause`
parameter is set to `OPERATION_NOT_PERMITTED`. For details and example IAM
policies, see [Using IAM to Manage Access to Amazon SWF
Workflows](http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html)
in the *Amazon SWF Developer Guide*.
"""
def register_workflow_type(client, input, options \\ []) do
request(client, "RegisterWorkflowType", input, options)
end
@doc """
Records a `WorkflowExecutionCancelRequested` event in the currently running
workflow execution identified by the given domain, workflowId, and runId.
This logically requests the cancellation of the workflow execution as a
whole. It is up to the decider to take appropriate actions when it receives
an execution history with this event.
<note> If the runId isn't specified, the `WorkflowExecutionCancelRequested`
event is recorded in the history of the current open workflow execution
with the specified workflowId in the domain.
</note> <note> Because this action allows the workflow to properly clean up
and gracefully close, it should be used instead of
`TerminateWorkflowExecution` when possible.
</note> **Access Control**
You can use IAM policies to control this action's access to Amazon SWF
resources as follows:
<ul> <li> Use a `Resource` element with the domain name to limit the action
to only specified domains.
</li> <li> Use an `Action` element to allow or deny permission to call this
action.
</li> <li> You cannot use an IAM policy to constrain this action's
parameters.
</li> </ul> If the caller doesn't have sufficient permissions to invoke the
action, or the parameter values fall outside the specified constraints, the
action fails. The associated event attribute's `cause` parameter is set to
`OPERATION_NOT_PERMITTED`. For details and example IAM policies, see [Using
IAM to Manage Access to Amazon SWF
Workflows](http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html)
in the *Amazon SWF Developer Guide*.
"""
def request_cancel_workflow_execution(client, input, options \\ []) do
request(client, "RequestCancelWorkflowExecution", input, options)
end
@doc """
Used by workers to tell the service that the `ActivityTask` identified by
the `taskToken` was successfully canceled. Additional `details` can be
provided using the `details` argument.
These `details` (if provided) appear in the `ActivityTaskCanceled` event
added to the workflow history.
<important> Only use this operation if the `canceled` flag of a
`RecordActivityTaskHeartbeat` request returns `true` and if the activity
can be safely undone or abandoned.
</important> A task is considered open from the time that it is scheduled
until it is closed. Therefore a task is reported as open while a worker is
processing it. A task is closed after it has been specified in a call to
`RespondActivityTaskCompleted`, RespondActivityTaskCanceled,
`RespondActivityTaskFailed`, or the task has [timed
out](http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dg-basic.html#swf-dev-timeout-types).
**Access Control**
You can use IAM policies to control this action's access to Amazon SWF
resources as follows:
<ul> <li> Use a `Resource` element with the domain name to limit the action
to only specified domains.
</li> <li> Use an `Action` element to allow or deny permission to call this
action.
</li> <li> You cannot use an IAM policy to constrain this action's
parameters.
</li> </ul> If the caller doesn't have sufficient permissions to invoke the
action, or the parameter values fall outside the specified constraints, the
action fails. The associated event attribute's `cause` parameter is set to
`OPERATION_NOT_PERMITTED`. For details and example IAM policies, see [Using
IAM to Manage Access to Amazon SWF
Workflows](http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html)
in the *Amazon SWF Developer Guide*.
"""
def respond_activity_task_canceled(client, input, options \\ []) do
request(client, "RespondActivityTaskCanceled", input, options)
end
@doc """
Used by workers to tell the service that the `ActivityTask` identified by
the `taskToken` completed successfully with a `result` (if provided). The
`result` appears in the `ActivityTaskCompleted` event in the workflow
history.
<important> If the requested task doesn't complete successfully, use
`RespondActivityTaskFailed` instead. If the worker finds that the task is
canceled through the `canceled` flag returned by
`RecordActivityTaskHeartbeat`, it should cancel the task, clean up and then
call `RespondActivityTaskCanceled`.
</important> A task is considered open from the time that it is scheduled
until it is closed. Therefore a task is reported as open while a worker is
processing it. A task is closed after it has been specified in a call to
RespondActivityTaskCompleted, `RespondActivityTaskCanceled`,
`RespondActivityTaskFailed`, or the task has [timed
out](http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dg-basic.html#swf-dev-timeout-types).
**Access Control**
You can use IAM policies to control this action's access to Amazon SWF
resources as follows:
<ul> <li> Use a `Resource` element with the domain name to limit the action
to only specified domains.
</li> <li> Use an `Action` element to allow or deny permission to call this
action.
</li> <li> You cannot use an IAM policy to constrain this action's
parameters.
</li> </ul> If the caller doesn't have sufficient permissions to invoke the
action, or the parameter values fall outside the specified constraints, the
action fails. The associated event attribute's `cause` parameter is set to
`OPERATION_NOT_PERMITTED`. For details and example IAM policies, see [Using
IAM to Manage Access to Amazon SWF
Workflows](http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html)
in the *Amazon SWF Developer Guide*.
"""
def respond_activity_task_completed(client, input, options \\ []) do
request(client, "RespondActivityTaskCompleted", input, options)
end
@doc """
Used by workers to tell the service that the `ActivityTask` identified by
the `taskToken` has failed with `reason` (if specified). The `reason` and
`details` appear in the `ActivityTaskFailed` event added to the workflow
history.
A task is considered open from the time that it is scheduled until it is
closed. Therefore a task is reported as open while a worker is processing
it. A task is closed after it has been specified in a call to
`RespondActivityTaskCompleted`, `RespondActivityTaskCanceled`,
RespondActivityTaskFailed, or the task has [timed
out](http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dg-basic.html#swf-dev-timeout-types).
**Access Control**
You can use IAM policies to control this action's access to Amazon SWF
resources as follows:
<ul> <li> Use a `Resource` element with the domain name to limit the action
to only specified domains.
</li> <li> Use an `Action` element to allow or deny permission to call this
action.
</li> <li> You cannot use an IAM policy to constrain this action's
parameters.
</li> </ul> If the caller doesn't have sufficient permissions to invoke the
action, or the parameter values fall outside the specified constraints, the
action fails. The associated event attribute's `cause` parameter is set to
`OPERATION_NOT_PERMITTED`. For details and example IAM policies, see [Using
IAM to Manage Access to Amazon SWF
Workflows](http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html)
in the *Amazon SWF Developer Guide*.
"""
def respond_activity_task_failed(client, input, options \\ []) do
request(client, "RespondActivityTaskFailed", input, options)
end
@doc """
Used by deciders to tell the service that the `DecisionTask` identified by
the `taskToken` has successfully completed. The `decisions` argument
specifies the list of decisions made while processing the task.
A `DecisionTaskCompleted` event is added to the workflow history. The
`executionContext` specified is attached to the event in the workflow
execution history.
**Access Control**
If an IAM policy grants permission to use `RespondDecisionTaskCompleted`,
it can express permissions for the list of decisions in the `decisions`
parameter. Each of the decisions has one or more parameters, much like a
regular API call. To allow for policies to be as readable as possible, you
can express permissions on decisions as if they were actual API calls,
including applying conditions to some parameters. For more information, see
[Using IAM to Manage Access to Amazon SWF
Workflows](http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html)
in the *Amazon SWF Developer Guide*.
"""
def respond_decision_task_completed(client, input, options \\ []) do
request(client, "RespondDecisionTaskCompleted", input, options)
end
@doc """
Records a `WorkflowExecutionSignaled` event in the workflow execution
history and creates a decision task for the workflow execution identified
by the given domain, workflowId and runId. The event is recorded with the
specified user defined signalName and input (if provided).
<note> If a runId isn't specified, then the `WorkflowExecutionSignaled`
event is recorded in the history of the current open workflow with the
matching workflowId in the domain.
</note> <note> If the specified workflow execution isn't open, this method
fails with `UnknownResource`.
</note> **Access Control**
You can use IAM policies to control this action's access to Amazon SWF
resources as follows:
<ul> <li> Use a `Resource` element with the domain name to limit the action
to only specified domains.
</li> <li> Use an `Action` element to allow or deny permission to call this
action.
</li> <li> You cannot use an IAM policy to constrain this action's
parameters.
</li> </ul> If the caller doesn't have sufficient permissions to invoke the
action, or the parameter values fall outside the specified constraints, the
action fails. The associated event attribute's `cause` parameter is set to
`OPERATION_NOT_PERMITTED`. For details and example IAM policies, see [Using
IAM to Manage Access to Amazon SWF
Workflows](http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html)
in the *Amazon SWF Developer Guide*.
"""
def signal_workflow_execution(client, input, options \\ []) do
request(client, "SignalWorkflowExecution", input, options)
end
@doc """
Starts an execution of the workflow type in the specified domain using the
provided `workflowId` and input data.
This action returns the newly started workflow execution.
**Access Control**
You can use IAM policies to control this action's access to Amazon SWF
resources as follows:
<ul> <li> Use a `Resource` element with the domain name to limit the action
to only specified domains.
</li> <li> Use an `Action` element to allow or deny permission to call this
action.
</li> <li> Constrain the following parameters by using a `Condition`
element with the appropriate keys.
<ul> <li> `tagList.member.0`: The key is `swf:tagList.member.0`.
</li> <li> `tagList.member.1`: The key is `swf:tagList.member.1`.
</li> <li> `tagList.member.2`: The key is `swf:tagList.member.2`.
</li> <li> `tagList.member.3`: The key is `swf:tagList.member.3`.
</li> <li> `tagList.member.4`: The key is `swf:tagList.member.4`.
</li> <li> `taskList`: String constraint. The key is `swf:taskList.name`.
</li> <li> `workflowType.name`: String constraint. The key is
`swf:workflowType.name`.
</li> <li> `workflowType.version`: String constraint. The key is
`swf:workflowType.version`.
</li> </ul> </li> </ul> If the caller doesn't have sufficient permissions
to invoke the action, or the parameter values fall outside the specified
constraints, the action fails. The associated event attribute's `cause`
parameter is set to `OPERATION_NOT_PERMITTED`. For details and example IAM
policies, see [Using IAM to Manage Access to Amazon SWF
Workflows](http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html)
in the *Amazon SWF Developer Guide*.
"""
def start_workflow_execution(client, input, options \\ []) do
request(client, "StartWorkflowExecution", input, options)
end
@doc """
Records a `WorkflowExecutionTerminated` event and forces closure of the
workflow execution identified by the given domain, runId, and workflowId.
The child policy, registered with the workflow type or specified when
starting this execution, is applied to any open child workflow executions
of this workflow execution.
<important> If the identified workflow execution was in progress, it is
terminated immediately.
</important> <note> If a runId isn't specified, then the
`WorkflowExecutionTerminated` event is recorded in the history of the
current open workflow with the matching workflowId in the domain.
</note> <note> You should consider using `RequestCancelWorkflowExecution`
action instead because it allows the workflow to gracefully close while
`TerminateWorkflowExecution` doesn't.
</note> **Access Control**
You can use IAM policies to control this action's access to Amazon SWF
resources as follows:
<ul> <li> Use a `Resource` element with the domain name to limit the action
to only specified domains.
</li> <li> Use an `Action` element to allow or deny permission to call this
action.
</li> <li> You cannot use an IAM policy to constrain this action's
parameters.
</li> </ul> If the caller doesn't have sufficient permissions to invoke the
action, or the parameter values fall outside the specified constraints, the
action fails. The associated event attribute's `cause` parameter is set to
`OPERATION_NOT_PERMITTED`. For details and example IAM policies, see [Using
IAM to Manage Access to Amazon SWF
Workflows](http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html)
in the *Amazon SWF Developer Guide*.
"""
def terminate_workflow_execution(client, input, options \\ []) do
request(client, "TerminateWorkflowExecution", input, options)
end
@spec request(map(), binary(), map(), list()) ::
{:ok, Poison.Parser.t | nil, Poison.Response.t} |
{:error, Poison.Parser.t} |
{:error, HTTPoison.Error.t}
defp request(client, action, input, options) do
client = %{client | service: "swf"}
host = get_host("swf", client)
url = get_url(host, client)
headers = [{"Host", host},
{"Content-Type", "application/x-amz-json-1.0"},
{"X-Amz-Target", "SimpleWorkflowService.#{action}"}]
payload = Poison.Encoder.encode(input, [])
headers = AWS.Request.sign_v4(client, "POST", url, headers, payload)
case HTTPoison.post(url, payload, headers, options) do
{:ok, response=%HTTPoison.Response{status_code: 200, body: ""}} ->
{:ok, nil, response}
{:ok, response=%HTTPoison.Response{status_code: 200, body: body}} ->
{:ok, Poison.Parser.parse!(body), response}
{:ok, _response=%HTTPoison.Response{body: body}} ->
error = Poison.Parser.parse!(body)
exception = error["__type"]
message = error["message"]
{:error, {exception, message}}
{:error, %HTTPoison.Error{reason: reason}} ->
{:error, %HTTPoison.Error{reason: reason}}
end
end
defp get_host(endpoint_prefix, client) do
if client.region == "local" do
"localhost"
else
"#{endpoint_prefix}.#{client.region}.#{client.endpoint}"
end
end
defp get_url(host, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}/"
end
end
|
lib/aws/swf.ex
| 0.9298
| 0.567697
|
swf.ex
|
starcoder
|
defmodule Vault.Auth.Generic do
@moduledoc """
A Generic Auth Adapter. An alternative to writing your own adapter.
"""
@type request :: %{
path: String.t(),
method: :post,
body: map()
}
@type response :: %{
token: list(String.t()),
ttl: list(String.t())
}
@type params :: %{
request: request(),
response: response()
}
@behaviour Vault.Auth.Adapter
@doc """
Authenticate with a custom auth method. Provide options for the request, and how
to parse the response.
## Examples
`request` defines parameters for the request to vault
- `path`- the path for authentication, after "auth" If you want to authenticate against `https://myvault.com/v1/auth/jwt/login`, then the path would be `jwt/login`
- `method`- one of `:get`, `:post`, `:put`, `:patch`, `:delete`, defaults to `:post`
- `body`- any params needed to login. Defaults to `%{}`
`response` defines parameters for parsing the response.
- `token_path` - a list of properties that describe the JSON path to a token. Defaults to `["auth", "client_token"]`
- `ttl_path` - a list of properties that describe the JSON path to the ttl, or lease duration. Defaults to ["auth", "lease_duration"]
The following would provide a minimal adapter for the JWT backend:
```
{:ok, token, ttl} = Vault.Auth.Generic.login(vault, %{
request: %{
path: "/jwt/login",
body: %{role: "my-role", jwt: "my-<PASSWORD>" },
}
})
```
Here's the above example as part of the full Vault client flow. On success,
it returns an authenticated vault client.
```
vault =
Vault.new([
auth: Vault.Auth.Generic,
http: Vault.HTTP.Tesla,
engine: Vault.KVV2
])
{:ok, vault} = Vault.auth(vault, %{
request: %{
path: "/jwt/login",
body: %{role: "my-role", jwt: "my-jwt" },
}
})
```
Here's a more explicit example, with every option configured.
```
vault =
Vault.new([
auth: Vault.Auth.Generic,
http: Vault.HTTP.Tesla,
engine: Vault.KVV2
])
{:ok, vault} = Vault.auth(vault, %{
request:
path: "/jwt/login",
method: :post,
body: %{role: "my-role", jwt: "<PASSWORD>" },
response: %{
token: ["auth", "client_token"],
ttl: ["auth", "lease_duration"]
}
})
```
"""
@default_response %{token: ["auth", "client_token"], ttl: ["auth", "lease_duration"]}
@impl true
@spec login(Vault.t(), params) :: Vault.Auth.Adapter.response()
def login(vault, params)
def login(%Vault{} = vault, %{request: request} = params) do
request = Map.merge(%{method: :post, body: %{}}, request)
response = Map.merge(@default_response, Map.get(params, :response, %{}))
headers = [
{"Content-Type", "application/json"}
]
url = "auth/#{request.path}"
with {:ok, http_response} <-
Vault.HTTP.request(vault, request.method, url, body: request.body, headers: headers) do
case http_response do
%{"errors" => []} ->
{:error, ["Key not found"]}
%{"errors" => messages} ->
{:error, messages}
otherwise ->
token = get_in(otherwise, response.token)
ttl = get_in(otherwise, response.ttl)
if token && ttl do
{:ok, token, ttl}
else
{:error, ["Unexpected response from vault.", otherwise]}
end
end
else
{:error, reason} ->
{:error, ["Http adapter error", inspect(reason)]}
end
end
end
|
lib/vault/auth/generic.ex
| 0.858006
| 0.719999
|
generic.ex
|
starcoder
|
defmodule Bingo do
@moduledoc """
This is the Bingo Game. The game is a struct, with a value map, a row count map, and a column count map
The value map maps the value to its coordinates. For this game, the coordinates are 1..5
The row count map maps a row number to the count of marked numbers in the row.
The column count map is similar.
When a number is called, use the value map to see if that number is on the board. If it is, then use
its coordinates to identify the row and column where the number is on the board.
Increment the correct row and column counts.
To determinae if it is a winner, search through the row and column counts to find a value of 5. If there is one,
then the board is a winner.
The functions are new, call_number, is_winner?
"""
defstruct values: %{}, row_counts: %{}, column_counts: %{}
@doc """
new.
Given a list of 5 lists, where each sub-list has 5 number, create the values map of the game.
Also set the row and column count maps to zero counts
"""
def new(list_of_lists) do
# g = %__MODULE__{}
r_counts = Enum.reduce(1..5, %{}, fn ndx, count_map -> Map.put_new(count_map, ndx, 0) end)
c_counts = Enum.reduce(1..5, %{}, fn ndx, count_map -> Map.put_new(count_map, ndx, 0) end)
values = Enum.reduce(1..5, %{}, fn row_number, v_map -> new_row(v_map, list_of_lists, row_number) end)
[25] = [length(Map.keys(values))]
%__MODULE__{values: values, row_counts: r_counts, column_counts: c_counts}
end
def new_row(%{} = v_map, row_values, row_index) when is_list(row_values) when length(row_values) == 5 do
row_values = Enum.at(row_values, row_index-1)
Enum.reduce(1..5, v_map,
fn column_index, v -> Map.put(v, Enum.at(row_values, column_index-1), {false, row_index, column_index}) end)
end
def call_number(%__MODULE__{} = g, number) when is_integer(number) do
call_number_aux(g, number, Map.get(g.values, number))
end
def call_number_aux(%__MODULE__{} = g, _number, nil), do: g
def call_number_aux(%__MODULE__{} = g, number, {false, row_index, column_index}) do
rc_new = 1 + Map.get(g.row_counts, row_index)
cc_new = 1 + Map.get(g.column_counts, column_index)
rcs_new = Map.put(g.row_counts, row_index, rc_new)
ccs_new = Map.put(g.column_counts, column_index, cc_new)
cell_new = {true, row_index, column_index}
values_new = Map.put(g.values, number, cell_new)
%{g| values: values_new, row_counts: rcs_new, column_counts: ccs_new}
end
def is_bingo?(%__MODULE__{row_counts: row_counts, column_counts: column_counts} = _g) do
Enum.reduce(1..5, false, fn index, rv -> rv or (Map.get(row_counts, index) == 5) or (Map.get(column_counts, index) == 5) end)
end
end
|
apps/bingo/lib/bingo.ex
| 0.800419
| 0.898053
|
bingo.ex
|
starcoder
|
defmodule Mastery.Core.Quiz do
alias Mastery.Core.{Template, Question, Response}
defstruct title: nil,
mastery: 3,
templates: %{},
used: [],
current_question: nil,
last_response: nil,
record: %{},
mastered: []
def new(fields) do
struct!(__MODULE__, fields)
end
def add_template(quiz, fields) do
template = Template.new(fields)
templates =
update_in(
quiz.templates,
[template.category],
&add_to_list_or_nil(&1, template)
)
%{quiz | templates: templates}
end
defp add_to_list_or_nil(nil, template), do: [template]
defp add_to_list_or_nil(templates, template), do: [template | templates]
def select_question(%__MODULE__{templates: t}) when map_size(t) == 0, do: nil
def select_question(quiz) do
quiz
|> pick_current_question()
|> move_template(:used)
|> reset_template_cycle()
end
def answer_question(quiz, %Response{correct: true} = response) do
new_quiz =
quiz
|> inc_record()
|> save_response(response)
maybe_advance(new_quiz, mastered?(new_quiz))
end
def answer_question(quiz, %Response{correct: false} = response) do
quiz
|> reset_record()
|> save_response(response)
end
defp save_response(quiz, response) do
Map.put(quiz, :last_response, response)
end
defp mastered?(quiz) do
score = Map.get(quiz.record, template(quiz).name, 0)
score == quiz.mastery
end
defp inc_record(%{current_question: question} = quiz) do
new_record = Map.update(quiz.record, question.template.name, 1, &(&1 + 1))
Map.put(quiz, :record, new_record)
end
defp maybe_advance(quiz, false = _mastered), do: quiz
defp maybe_advance(quiz, true = _mastered), do: advance(quiz)
defp advance(quiz) do
quiz
|> move_template(:mastered)
|> reset_record()
|> reset_used()
end
defp reset_record(%{current_question: question} = quiz) do
Map.put(quiz, :record, Map.delete(quiz.record, question.template.name))
end
defp pick_current_question(quiz) do
Map.put(
quiz,
:current_question,
select_a_random_question(quiz)
)
end
defp reset_used(%{current_question: question} = quiz) do
Map.put(
quiz,
:used,
List.delete(quiz.used, question.template)
)
end
defp select_a_random_question(quiz) do
quiz.templates
|> Enum.random()
|> elem(1)
|> Enum.random()
|> Question.new()
end
defp move_template(quiz, field) do
quiz
|> remove_template_from_category
|> add_template_to_field(field)
end
defp remove_template_from_category(quiz) do
template = template(quiz)
new_category_templates =
quiz.templates
|> Map.fetch!(template.category)
|> List.delete(template)
new_templates =
if new_category_templates == [] do
Map.delete(quiz.templates, template.category)
else
Map.put(quiz.templates, template.category, new_category_templates)
end
Map.put(quiz, :templates, new_templates)
end
defp template(quiz), do: quiz.current_question.template
def add_template_to_field(quiz, field) do
template = template(quiz)
list = Map.get(quiz, field)
Map.put(quiz, field, [template | list])
end
defp reset_template_cycle(%{templates: templates, used: used} = quiz)
when map_size(templates) == 0 do
%__MODULE__{
quiz
| templates: Enum.group_by(used, fn template -> template.category end),
used: []
}
end
defp reset_template_cycle(quiz), do: quiz
end
|
lib/mastery/core/quiz.ex
| 0.506591
| 0.460835
|
quiz.ex
|
starcoder
|
defmodule Nightcrawler.Parser do
@moduledoc """
Parses multiple things to deliver neat representations of entities
"""
@doc """
Grabs the title and start + end values
## Example
iex> Nightcrawler.Parser.title("Nightcrawler (2011 - 2018)")
%{title: "Nightcrawler", start: 2011, end: 2018}
iex> Nightcrawler.Parser.title("Super McBadass Comic Man (1989)")
%{title: "Super McBadass Comic Man", start: 1989, end: nil}
iex> Nightcrawler.Parser.title("Some Title (2018 - Present)")
%{title: "Some Title", start: 2018, end: :present}
"""
def title(title) do
# i hate regex.
regex = ~R/^(?<title>.+)
\s\(
(?<start>\d{4})
(?:
(?:\s|-)+
(?<end>\d{4}|Present)
)?
\)$/x
regex
|> Regex.named_captures(title)
|> Enum.map(fn {k, v} ->
key = String.to_atom(k)
cond do
v == "" ->
{key, nil}
key == :end and v == "Present" ->
{key, v |> String.downcase() |> String.to_atom()}
key in ~w(start end)a ->
{key, String.to_integer(v)}
true ->
{key, v}
end
end)
|> Enum.into(%{})
end
@doc """
Parses the entity urls that are returned by the marvel api
## Examples
iex> Nightcrawler.Parser.api_url("http://gateway.marvel.com/v1/public/series/18454/characters")
%{entity: :series, id: 18454, sub_entity: :characters}
iex> Nightcrawler.Parser.api_url("http://gateway.marvel.com/v1/public/series/18454")
%{entity: :series, id: 18454, sub_entity: nil}
"""
def api_url(url) do
regex = ~R"^http://gateway.marvel.com/v1/public/
(?<entity>\w+)/(?<id>\d+)(?:/(?<sub_entity>\w+))?$"x
regex
|> Regex.named_captures(url)
|> Enum.map(fn {k, v} ->
key = String.to_atom(k)
cond do
key == :id ->
{key, String.to_integer(v)}
key == :sub_entity and v == "" ->
{key, nil}
key in ~w(entity sub_entity)a ->
{key, String.to_atom(v)}
true ->
{k, v}
end
end)
|> Enum.into(%{})
end
@doc """
Takes a raw `api_result` and an entity definition and transforms into a
compatible map of values using the functions defined in the `transform_definition`
"""
def transform_entity(api_result, transform_definition) do
api_result
|> Enum.map(fn {key, _val} = row ->
key_atom = String.to_atom(key)
case Map.fetch(transform_definition, key_atom) do
{:ok, func} ->
func.(row)
:error ->
nil
end
end)
|> Enum.reject(&is_nil/1)
|> Map.new()
end
# parser functions
@doc """
Converts a key value pair with a camelCased key to a snake_cased key
"""
@spec underscore_key({String.t, String.t | integer}) :: {String.t, String.t | integer}
def underscore_key({key, val}),
do: {key |> Macro.underscore() |> String.to_existing_atom(), val}
@doc """
Passes through the key value pair while converting the key to an atom
"""
@spec integer_or_string({String.t, String.t | integer}) :: {String.t, String.t | integer}
def integer_or_string({key, val}), do: {String.to_existing_atom(key), val}
@doc """
Converts the key value pair with a datetime string to a compatible datetime
or nil if not compatible
"""
@spec maybe_datetime({String.t, String.t}) :: nil | {String.t, String.t}
def maybe_datetime({key, val}) do
case DateTime.from_iso8601(val) do
{:ok, datetime, _offset} ->
{String.to_existing_atom(key), datetime}
{:error, _reason} ->
nil
end
end
@doc """
Converts the thumbnail key value pair to a compatible key value pair
"""
@spec thumbnail({String.t, map}) :: {String.t, map}
def thumbnail({key, val}) do
{String.to_existing_atom(key), %{extension: val["extension"], path: val["path"]}}
end
end
|
apps/nightcrawler/lib/nightcrawler/parser.ex
| 0.82347
| 0.435841
|
parser.ex
|
starcoder
|
defmodule Pigeon.Dispatcher do
@moduledoc """
Dispatcher worker for push notifications.
If your push workers are relatively static, it is encouraged to follow the adapter
guides. For other use cases, such as supporting dynamic configurations, dispatchers
can be started and stopped as needed.
## Using Dynamic Dispatchers
```
# FCM as an example, but use the relevant options for your push type.
opts = [
adapter: Pigeon.FCM,
project_id: "example-project-123",
service_account_json: File.read!("service-account.json")
]
{:ok, pid} = Pigeon.Dispatcher.start_link(opts)
notification = Pigeon.FCM.Notification.new({:token, "regid"})
Pigeon.push(pid, notification)
```
## Loading Configurations from a Database
```
defmodule YourApp.Application do
@moduledoc false
use Application
@doc false
def start(_type, _args) do
children = [
YourApp.Repo,
{Registry, keys: :unique, name: Registry.YourApp}
] ++ push_workers()
opts = [strategy: :one_for_one, name: YourApp.Supervisor]
Supervisor.start_link(children, opts)
end
defp push_workers do
YourApp.Repo.PushApplication
|> YourApp.Repo.all()
|> Enum.map(&push_spec/1)
end
defp push_spec(%{type: "apns"} = config)
{Pigeon.Dispatcher, [
adapter: Pigeon.APNS,
key: config.key,
key_identifier: config.key_identifier,
team_id: config.team_id,
mode: config.mode,
name: {:via, Registry, {Registry.YourApp, config.name}}
]}
end
defp push_spec(%{type: "fcm"} = config) do
{Pigeon.Dispatcher, [
adapter: Pigeon.FCM,
name: {:via, Registry, {Registry.YourApp, config.name}},
project_id: config.project_id,
service_account_json: config.service_account_json
]}
end
end
```
Once running, you can send to any of these workers by name.
```
Pigeon.push({:via, Registry, {Registry.YourApp, "app1"}}, notification)
```
"""
use Supervisor
@doc false
defmacro __using__(opts) do
quote bind_quoted: [opts: opts] do
@otp_app opts[:otp_app]
def child_spec(opts \\ []) do
config_opts = Application.get_env(@otp_app, __MODULE__, [])
opts =
[name: __MODULE__, pool_size: Pigeon.default_pool_size()]
|> Keyword.merge(config_opts)
|> Keyword.merge(opts)
%{
id: __MODULE__,
start: {Pigeon.Dispatcher, :start_link, [opts]},
type: :worker
}
end
@doc """
Sends a push notification with given options.
"""
def push(notification, opts \\ []) do
Pigeon.push(__MODULE__, notification, opts)
end
end
end
def start_link(opts) do
opts[:adapter] || raise "adapter is not specified"
Supervisor.start_link(__MODULE__, opts, name: opts[:name])
end
def init(opts) do
opts =
opts
|> Keyword.put(:supervisor, opts[:name] || self())
|> Keyword.delete(:name)
children =
for index <- 1..(opts[:pool_size] || Pigeon.default_pool_size()) do
Supervisor.child_spec({Pigeon.DispatcherWorker, opts}, id: index)
end
Supervisor.init(children, strategy: :one_for_one)
end
end
|
lib/pigeon/dispatcher.ex
| 0.714429
| 0.571049
|
dispatcher.ex
|
starcoder
|
defmodule ReviewAnalysis do
@moduledoc """
Useful functions to analyse reviews contents.
"""
@overly_positive_words [
"awesome",
"best",
"friendly",
"amazing",
"excellent",
"better",
"wonderful",
"dream",
"excellence",
"highest",
"knowledgeable",
"fan",
"great"
]
@doc """
Matches just the highest rating reviews in all items.
## Examples
iex> ReviewAnalysis.max_rating_match(%Model.Review{dealership_rating: 50, ratings: %Model.Ratings{customer_service: 50, experience: 50, friendliness: 50, pricing: 50, quality_of_work: 50}})
true
iex> ReviewAnalysis.max_rating_match(%Model.Review{dealership_rating: 40, ratings: %Model.Ratings{customer_service: 50, experience: 50, friendliness: 50, pricing: 50, quality_of_work: 50}})
false
iex> ReviewAnalysis.max_rating_match(%Model.Review{dealership_rating: 50, ratings: %Model.Ratings{customer_service: 0, experience: 50, friendliness: 50, pricing: 50, quality_of_work: 50}})
false
"""
def max_rating_match(
%Model.Review{
dealership_rating: 50,
ratings: %Model.Ratings{
customer_service: 50,
experience: 50,
friendliness: 50,
pricing: 50,
quality_of_work: 50
}
}
), do: true
def max_rating_match(%Model.Review{}), do: false
@doc """
Counts overly positive words occurrences in review body.
## Examples
iex> ReviewAnalysis.overly_positive_words_count(%Model.Review{body: "You are the best! Awesome! The best experience ever!"})
%Model.Review{analysis: %Model.Analysis{overly_positive_words_count: %{"awesome" => 1, "best" => 2}}, body: "You are the best! Awesome! The best experience ever!", date: nil, dealership_rating: nil, ratings: %Model.Ratings{ customer_service: nil, experience: nil, friendliness: nil, pricing: nil, quality_of_work: nil}, reason_for_visit: nil, title: nil, user: nil}
iex> ReviewAnalysis.overly_positive_words_count(%Model.Review{body: "Terrible experience!"})
%Model.Review{analysis: %Model.Analysis{overly_positive_words_count: %{}}, body: "Terrible experience!", date: nil, dealership_rating: nil, ratings: %Model.Ratings{ customer_service: nil, experience: nil, friendliness: nil, pricing: nil, quality_of_work: nil}, reason_for_visit: nil, title: nil, user: nil}
iex> ReviewAnalysis.overly_positive_words_count(%Model.Review{body: "You are the best! Awesome!"})
%Model.Review{analysis: %Model.Analysis{overly_positive_words_count: %{"awesome" => 1, "best" => 1}}, body: "You are the best! Awesome!", date: nil, dealership_rating: nil, ratings: %Model.Ratings{ customer_service: nil, experience: nil, friendliness: nil, pricing: nil, quality_of_work: nil}, reason_for_visit: nil, title: nil, user: nil}
"""
def overly_positive_words_count(%Model.Review{} = review) do
word_count = review.body
|> String.replace(~r/[,\.\!\?]/, "")
|> String.downcase
|> String.split
|> Enum.reduce(%{}, &overly_positive_words_count_reduce/2)
put_in(review.analysis.overly_positive_words_count, word_count)
end
defp overly_positive_words_count_reduce(word, acc) do
case Enum.member?(@overly_positive_words, word) do
true ->
case Map.get(acc, word) do
nil -> Map.put(acc, word, 1)
value -> Map.put(acc, word, value + 1)
end
false ->
acc
end
end
end
|
lib/review_analysis.ex
| 0.69285
| 0.501404
|
review_analysis.ex
|
starcoder
|
defmodule Timber.Exceptions.Translator do
@moduledoc """
This module implements a Logger translator to take advantage of
the richer metadata available from Logger in OTP 21 and Elixir 1.7+.
Including the translator allows for crash reasons and stacktraces to be
included as structured metadata within Timber.
The translator depends on using Elixir's internal Logger.Translator, and
is not compatible with other translators as a Logger event can only be
translated once.
To install, add the translator in your application's start function:
```
# ...
:ok = Logger.add_translator({Timber.Exceptions.Translator, :translate})
opts = [strategy: :one_for_one, name: MyApp.Supervisor]
Supervisor.start_link(children, opts)
```
"""
@max_backtrace_size 20
def translate(min_level, level, kind, message) do
case Logger.Translator.translate(min_level, level, kind, message) do
{:ok, char, metadata} ->
new_metadata = transform_metadata(metadata)
{:ok, char, new_metadata}
{:ok, char} ->
{:ok, char}
:skip ->
:skip
:none ->
:none
end
end
def transform_metadata(nil), do: []
def transform_metadata(metadata) do
with {:ok, crash_reason} <- Keyword.fetch(metadata, :crash_reason),
{:ok, error} <- get_error(crash_reason) do
event = %{
error: error
}
Keyword.merge([event: event], metadata)
else
_ ->
metadata
end
end
defp get_error({{%{__exception__: true} = error, stacktrace}, _stack})
when is_list(stacktrace) do
{:ok, build_error(error, stacktrace)}
end
defp get_error({%{__exception__: true} = error, stacktrace}) when is_list(stacktrace) do
{:ok, build_error(error, stacktrace)}
end
defp get_error({{_type, reason}, stacktrace}) when is_list(stacktrace) do
{:ok, build_error(reason, stacktrace)}
end
defp get_error({error, stacktrace}) when is_list(stacktrace) do
{:ok, build_error(error, stacktrace)}
end
defp get_error(_) do
{:error, :no_info}
end
defp build_error(%{__exception__: true, __struct__: module} = error, stacktrace) do
message = Exception.message(error)
module_name = Timber.Utils.Module.name(module)
%{
message: message,
name: module_name,
backtrace: build_backtrace(stacktrace)
}
end
defp build_error(error, stacktrace) do
ErlangError.normalize(error, stacktrace)
|> build_error(stacktrace)
end
defp build_backtrace([trace | _] = backtrace) when is_map(trace) do
Enum.slice(backtrace, 0..(@max_backtrace_size - 1))
end
defp build_backtrace([stack | _rest] = stacktrace) when is_tuple(stack) do
stacktrace_to_backtrace(stacktrace)
end
defp build_backtrace(_) do
[]
end
defp stacktrace_to_backtrace(stacktrace) do
# arity is an integer or list of arguments
Enum.map(stacktrace, fn {module, function, arity, location} ->
arity =
case arity do
arity when is_list(arity) -> length(arity)
_ -> arity
end
file =
Keyword.get(location, :file)
|> Kernel.to_string()
line = Keyword.get(location, :line)
%{
function: Exception.format_mfa(module, function, arity),
file: file,
line: line
}
end)
end
end
|
lib/timber_exceptions/translator.ex
| 0.773772
| 0.810554
|
translator.ex
|
starcoder
|
defmodule Yum.Migration do
@moduledoc """
A struct that contains the migration info.
Migration items can optionally contain metadata associated with that
individual transaction.
"""
defstruct [
timestamp: -1,
move: [],
delete: [],
add: [],
update: [],
]
@type meta :: any
@type file :: String.t
@type item(item) :: item | { meta, item }
@type transaction(op, item) :: { op, item(item) }
@type delete :: transaction(:delete, file)
@type add :: transaction(:add, file)
@type update :: transaction(:update, file)
@type move :: transaction(:move, { file, file })
@type t :: %Yum.Migration{ timestamp: integer, move: [item({ file, file })], delete: [item(file)], add: [item(file)], update: [item(file)] }
@doc """
Convert to a migration struct
"""
@spec new(Yum.Data.migration) :: t
def new(data) do
%Yum.Migration{ timestamp: String.to_integer(data["timestamp"]) }
|> new_moved(data)
|> new_deleted(data)
|> new_added(data)
|> new_updated(data)
end
defp new_moved(migration, %{ "move" => moved }), do: %{ migration | move: moved }
defp new_moved(migration, _), do: migration
defp new_deleted(migration, %{ "delete" => deleted }), do: %{ migration | delete: deleted }
defp new_deleted(migration, _), do: migration
defp new_added(migration, %{ "add" => added }), do: %{ migration | add: added }
defp new_added(migration, _), do: migration
defp new_updated(migration, %{ "update" => updated }), do: %{ migration | update: updated }
defp new_updated(migration, _), do: migration
@doc """
Get the list of transactions that the migration represents.
These transactions are ordered in the order they should be applied.
"""
@spec transactions(t) :: [move | delete | add | update]
def transactions(migration) do
Enum.map(migration.move, &({ :move, &1 }))
++ Enum.map(migration.delete, &({ :delete, &1 }))
++ Enum.map(migration.add, &({ :add, &1 }))
++ Enum.map(migration.update, &({ :update, &1 }))
end
@doc """
Merge two migrations into one.
"""
@spec merge(t, t) :: t
def merge(migration_a = %{ timestamp: a }, migration_b = %{ timestamp: b }) when a > b, do: merge(migration_b, migration_a)
def merge(migration_a, migration_b) do
{ added, moved_removals } = merge_move(migration_a.add, migration_b.move)
{ updated, _ } = merge_move(migration_a.update, migration_b.move)
{ moved, moved_removals } = merge_move(migration_a.move, migration_b.move, moved_removals)
{ added, deleted_removals } = merge_delete(added, migration_b.delete)
{ updated, _ } = merge_delete(updated, migration_b.delete)
moved = Enum.reverse(moved)
%Yum.Migration{
timestamp: migration_b.timestamp,
add: added ++ migration_b.add,
update: updated ++ Enum.filter(migration_b.update, &(!changes?(&1, added) && !changes?(&1, updated))),
move: moved ++ Enum.filter(migration_b.move, &(!changes?(&1, moved_removals))),
delete: migration_a.delete ++ Enum.filter(migration_b.delete, &(!changes?(&1, deleted_removals)))
}
end
defp merge_move(transactions, move_transactions, removals \\ []) do
Enum.reduce(transactions, { [], removals }, fn transaction, { merged_transactions, removals } ->
case move(transaction, move_transactions) do
{ transaction, nil } -> { [transaction|merged_transactions], removals }
{ transaction, move_transaction } -> { [transaction|merged_transactions], [move_transaction|removals] }
end
end)
end
defp move({ old_file, file }, move_transactions) do
{ new_file, transaction } = move(file, move_transactions)
{ { old_file, new_file }, transaction }
end
defp move(file, move_transactions) do
Enum.find_value(move_transactions, { file, nil }, fn
transaction = { ^file, new_file } -> { new_file, transaction }
{ _, transaction = { ^file, new_file } } -> { new_file, transaction }
_ -> false
end)
end
defp merge_delete(transactions, delete_transactions, removals \\ []) do
Enum.reduce(transactions, { [], removals }, fn transaction, { merged_transactions, removals } ->
if changes?(transaction, delete_transactions) do
case transaction do
{ _, file } -> { merged_transactions, [file|removals] }
file -> { merged_transactions, [file|removals] }
end
else
{ [transaction|merged_transactions], removals }
end
end)
end
defp changes?({ _, file }, transactions), do: changes?(file, transactions)
defp changes?(file, transactions) do
Enum.find_value(transactions, false, fn
{ _, ^file } -> true
^file -> true
_ -> false
end)
end
end
|
lib/yum/migration.ex
| 0.831383
| 0.531696
|
migration.ex
|
starcoder
|
defmodule Membrane.RemoteControlled.Pipeline do
@moduledoc """
`Membrane.RemoteControlled.Pipeline` is a basic `Membrane.Pipeline` implementation that can be
controlled by a controlling process.
The controlling process can request the execution of arbitrary
valid `Membrane.Pipeline.Action`:
```
children = ...
links = ...
actions = [{:spec, %ParentSpec{children: children, links: links}}]
Pipeline.exec_actions(pipeline, actions)
```
The controlling process can also subscribe to the messages
sent by the pipeline and later on synchroniously await for these messages:
```
# subscribes to message which is sent when the pipeline enters any playback state
Pipeline.subscribe(pipeline, %Message.PlaybackState{state: _})
...
# awaits for the message sent when the pipeline enters :playing playback state
Pipeline.await_playback_state(pipeline, :playing)
...
# awaits for the message sent when the pipeline enters :stopped playback state
Pipeline.await_playback_state(pipeline, :stopped)
```
`Membrane.RemoteControlled.Pipeline` can be used when there is no need for introducing a custom
logic in the `Membrane.Pipeline` callbacks implementation. An example of usage could be running a
pipeline from the elixir script. `Membrane.RemoteControlled.Pipeline` sends the following messages:
* `Membrane.RemoteControlled.Message.PlaybackState.t()` sent when pipeline enters a given playback state,
* `Membrane.RemoteControlled.Message.StartOfStream.t()` sent
when one of direct pipeline children informs the pipeline about start of a stream,
* `Membrane.RemoteControlled.Message.EndOfStream.t()` sent
when one of direct pipeline children informs the pipeline about end of a stream,
* `Membrane.RemoteControlled.Message.Notification.t()` sent when pipeline
receives notification from one of its children,
* `Membrane.RemoteControlled.Message.Terminated.t()` sent when the pipeline gracefully terminates.
"""
use Membrane.Pipeline
alias Membrane.Pipeline
alias Membrane.RemoteControlled.Message
alias Membrane.RemoteControlled.Message.{
EndOfStream,
Notification,
PlaybackState,
StartOfStream,
Terminated
}
defmodule State do
@moduledoc false
@enforce_keys [:controller_pid]
defstruct @enforce_keys ++ [matching_functions: []]
end
@doc """
Starts the `Membrane.RemoteControlled.Pipeline` and links it to the current process. The process
that makes the call to the `start_link/1` automatically become the controller process.
"""
@spec start_link(GenServer.options()) :: GenServer.on_start()
def start_link(process_options \\ []) do
Pipeline.start_link(__MODULE__, %{controller_pid: self()}, process_options)
end
@doc """
Does the same as the `start_link/1` but starts the process outside of the supervision tree.
"""
@spec start(GenServer.options()) :: GenServer.on_start()
def start(process_options \\ []) do
Pipeline.start(__MODULE__, %{controller_pid: self()}, process_options)
end
defmacrop pin_leaf_nodes(ast) do
quote do
Macro.postwalk(unquote(ast), fn node ->
if not Macro.quoted_literal?(node) and match?({_name, _ctx, _args}, node) do
{_name, ctx, args} = node
case args do
nil -> {:^, ctx, [node]}
_not_nil -> node
end
else
node
end
end)
end
end
defmacrop do_await(pipeline, message_type, keywords \\ []) do
keywords = pin_leaf_nodes(keywords)
quote do
receive do
%unquote(message_type){
unquote_splicing(Macro.expand(keywords, __ENV__)),
from: ^unquote(pipeline)
} = msg ->
msg
end
end
end
@doc """
Awaits for the first `Membrane.RemoteControlled.Message()` wrapping the `Membrane.RemoteControlled.Message.PlaybackState()`
message with no further constraints, sent by the process with `pipeline` pid.
It is required to firstly use the `subscribe/2` to subscribe to a given message before awaiting
for that message.
Usage example:
1) awaiting for any playback state change occuring in the pipeline:
```
Pipeline.await_playback_state(pipeline)
```
"""
@spec await_playback_state(pid()) :: Membrane.RemoteControlled.Message.PlaybackState.t()
def await_playback_state(pipeline) do
do_await(pipeline, PlaybackState)
end
@doc """
Awaits for the first `Membrane.RemoteControlled.Message()` wrapping the `Membrane.RemoteControlled.Message.PlaybackState()`
message with the given `state`, sent by the process with `pipeline` pid.
It is required to firstly use the `subscribe/2` to subscribe to a given message before awaiting
for that message.
Usage example:
1) awaiting for the pipeline's playback state to change into `:playing`:
```
Pipeline.await_playback_state(pipeline, :playing)
```
"""
@spec await_playback_state(pid, Membrane.PlaybackState.t()) ::
Membrane.RemoteControlled.Message.PlaybackState.t()
def await_playback_state(pipeline, playback_state) do
do_await(pipeline, PlaybackState, state: playback_state)
end
@doc """
Awaits for the first `Membrane.RemoteControlled.Message()` wrapping the `Membrane.RemoteControlled.Message.StartOfStream()` message
with no further constraints, sent by the process with `pipeline` pid.
It is required to firstly use the `subscribe/2` to subscribe to a given message before awaiting
for that message.
Usage example:
1) awaiting for the first `start_of_stream` occuring on any pad of any element in the pipeline:
```
Pipeline.await_start_of_stream(pipeline)
```
"""
@spec await_start_of_stream(pid) :: Membrane.RemoteControlled.Message.StartOfStream.t()
def await_start_of_stream(pipeline) do
do_await(pipeline, StartOfStream)
end
@doc """
Awaits for the first `Membrane.RemoteControlled.Message()` wrapping the `Membrane.RemoteControlled.Message.StartOfStream()` message
concerning the given `element`, sent by the process with `pipeline` pid.
It is required to firstly use the `subscribe/2` to subscribe to a given message before awaiting
for that message.
Usage example:
1) awaiting for the first `start_of_stream` occuring on any pad of the `:element_id` element in the pipeline:
```
Pipeline.await_start_of_stream(pipeline, :element_id)
```
"""
@spec await_start_of_stream(pid(), Membrane.Element.name_t()) ::
Membrane.RemoteControlled.Message.StartOfStream.t()
def await_start_of_stream(pipeline, element) do
do_await(pipeline, StartOfStream, element: element)
end
@doc """
Awaits for the first `Membrane.RemoteControlled.Message()` wrapping the `Membrane.RemoteControlled.Message.StartOfStream()` message
concerning the given `element` and the `pad`, sent by the process with `pipeline` pid.
It is required to firstly use the `subscribe/2` to subscribe to a given message before awaiting
for that message.
Usage example:
1) awaiting for the first `start_of_stream` occuring on the `:pad_id` pad of the `:element_id` element in the pipeline:
```
Pipeline.await_start_of_stream(pipeline, :element_id, :pad_id)
```
"""
@spec await_start_of_stream(pid(), Membrane.Element.name_t(), Membrane.Pad.name_t()) ::
Membrane.RemoteControlled.Message.StartOfStream.t()
def await_start_of_stream(pipeline, element, pad) do
do_await(pipeline, StartOfStream, element: element, pad: pad)
end
@doc """
Awaits for the first `Membrane.RemoteControlled.Message()` wrapping the `Membrane.RemoteControlled.Message.EndOfStream()` message
with no further constraints, sent by the process with `pipeline` pid.
It is required to firstly use the `subscribe/2` to subscribe to a given message before awaiting
for that message.
Usage example:
1) awaiting for the first `end_of_stream` occuring on any pad of any element in the pipeline:
```
Pipeline.await_end_of_stream(pipeline)
```
"""
@spec await_end_of_stream(pid()) :: Membrane.RemoteControlled.Message.EndOfStream.t()
def await_end_of_stream(pipeline) do
do_await(pipeline, EndOfStream)
end
@doc """
Awaits for the first `Membrane.RemoteControlled.Message()` wrapping the `Membrane.RemoteControlled.Message.EndOfStream()` message
concerning the given `element`, sent by the process with `pipeline` pid.
It is required to firstly use the `subscribe/2` to subscribe to a given message before awaiting
for that message.
Usage example:
1) awaiting for the first `end_of_stream` occuring on any pad of the `:element_id` element in the pipeline:
```
Pipeline.await_end_of_stream(pipeline, :element_id)
```
"""
@spec await_end_of_stream(pid(), Membrane.Element.name_t()) ::
Membrane.RemoteControlled.Message.EndOfStream.t()
def await_end_of_stream(pipeline, element) do
do_await(pipeline, EndOfStream, element: element)
end
@doc """
Awaits for the first `Membrane.RemoteControlled.Message()` wrapping the `Membrane.RemoteControlled.Message.EndOfStream()` message
concerning the given `element` and the `pad`, sent by the process with `pipeline` pid.
It is required to firstly use the `subscribe/2` to subscribe to a given message before awaiting
for that message.
Usage example:
1) awaiting for the first `end_of_stream` occuring on the `:pad_id` of the `:element_id` element in the pipeline:
```
Pipeline.await_end_of_stream(pipeline, :element_id, :pad_id)
```
"""
@spec await_end_of_stream(pid(), Membrane.Element.name_t(), Membrane.Pad.name_t()) ::
Membrane.RemoteControlled.Message.EndOfStream.t()
def await_end_of_stream(pipeline, element, pad) do
do_await(pipeline, EndOfStream, element: element, pad: pad)
end
@doc """
Awaits for the first `Membrane.RemoteControlled.Message()` wrapping the `Membrane.RemoteControlled.Message.Notification()`
message with no further constraints, sent by the process with `pipeline` pid.
It is required to firstly use the `subscribe/2` to subscribe to a given message before awaiting
for that message.
Usage example:
1) awaiting for the first notification send to any element in the pipeline:
```
Pipeline.await_notification(pipeline)
```
"""
@spec await_notification(pid()) :: Membrane.RemoteControlled.Message.Notification.t()
def await_notification(pipeline) do
do_await(pipeline, Notification)
end
@doc """
Awaits for the first `Membrane.RemoteControlled.Message()` wrapping the `Membrane.RemoteControlled.Message.Notification()` message
concerning the given `element`, sent by the process with `pipeline` pid.
It is required to firstly use the `subscribe/2` to subscribe to a given message before awaiting
for that message.
Usage example:
1) awaiting for the first notification send to the `:element_id` element in the pipeline:
```
Pipeline.await_notification(pipeline, :element_id)
```
"""
@spec await_notification(pid(), Membrane.Notification.t()) ::
Membrane.RemoteControlled.Message.Notification.t()
def await_notification(pipeline, element) do
do_await(pipeline, Notification, element: element)
end
@doc """
Awaits for the `Membrane.RemoteControlled.Message()` wrapping the `Membrane.RemoteControlled.Message.Terminated` message,
which is send when the pipeline gracefully terminates.
It is required to firstly use the `subscribe/2` to subscribe to a given message before awaiting
for that message.
Usage example:
1) awaiting for the pipeline termination:
```
Pipeline.await_termination(pipeline)
```
"""
@spec await_termination(pid()) :: Membrane.RemoteControlled.Message.Terminated.t()
def await_termination(pipeline) do
do_await(pipeline, Terminated)
end
@doc """
Subscribes to a given `subscription_pattern`. The `subscription_pattern` should describe some subset
of elements of `Membrane.RemoteControlled.Pipeline.message_t()` type. The `subscription_pattern`
must be a match pattern.
Usage examples:
1) making the `Membrane.RemoteControlled.Pipeline` send to the controlling process `Message.StartOfStream` message
when any pad of the `:element_id` receives `:start_of_stream` event.
```
subscribe(pipeline, %Message.StartOfStream{element: :element_id, pad: _})
```
2) making the `Membrane.RemoteControlled.Pipeline` send to the controlling process `Message.PlaybackState` message when the pipeline playback state changes to any state
(that is - for all the :stopped, :prepared and :playing playback states).
```
subscribe(pipeline, %Message.PlaybackState{state: _})
```
"""
defmacro subscribe(pipeline, subscription_pattern) do
quote do
send(
unquote(pipeline),
{:subscription, fn message -> match?(unquote(subscription_pattern), message) end}
)
end
end
@doc """
Sends a list of `Pipeline.Action.t()` to the given `Membrane.RemoteControlled.Pipeline` for execution.
Usage example:
1) making the `Membrane.RemoteControlled.Pipeline` start the `Membrane.ParentSpec`
specified in the action.
```
children = ...
links = ...
actions = [{:spec, %ParentSpec{children: children, links: links}}]
Pipeline.exec_actions(pipeline, actions)
```
"""
@spec exec_actions(pid(), [Pipeline.Action.t()]) :: :ok
def exec_actions(pipeline, actions) do
send(pipeline, {:exec_actions, actions})
:ok
end
@impl true
def handle_init(opts) do
%{controller_pid: controller_pid} = opts
state = %State{controller_pid: controller_pid}
{:ok, state}
end
@impl true
def handle_playing_to_prepared(_ctx, state) do
pipeline_event = %Message.PlaybackState{from: self(), state: :prepared}
send_event_to_controller_if_subscribed(pipeline_event, state)
{:ok, state}
end
@impl true
def handle_prepared_to_playing(_ctx, state) do
pipeline_event = %Message.PlaybackState{from: self(), state: :playing}
send_event_to_controller_if_subscribed(pipeline_event, state)
{:ok, state}
end
@impl true
def handle_prepared_to_stopped(_ctx, state) do
pipeline_event = %Message.PlaybackState{from: self(), state: :stopped}
send_event_to_controller_if_subscribed(pipeline_event, state)
{:ok, state}
end
@impl true
def handle_stopped_to_prepared(_ctx, state) do
pipeline_event = %Message.PlaybackState{from: self(), state: :prepared}
send_event_to_controller_if_subscribed(pipeline_event, state)
{:ok, state}
end
@impl true
def handle_stopped_to_terminating(_ctx, state) do
pipeline_event = %Message.PlaybackState{from: self(), state: :terminating}
send_event_to_controller_if_subscribed(pipeline_event, state)
{:ok, state}
end
@impl true
def handle_element_end_of_stream(element_name, pad_ref, _ctx, state) do
pipeline_event = %Message.EndOfStream{from: self(), element: element_name, pad: pad_ref}
send_event_to_controller_if_subscribed(pipeline_event, state)
{:ok, state}
end
@impl true
def handle_element_start_of_stream(element_name, pad_ref, _ctx, state) do
pipeline_event = %Message.StartOfStream{from: self(), element: element_name, pad: pad_ref}
send_event_to_controller_if_subscribed(pipeline_event, state)
{:ok, state}
end
@impl true
def handle_notification(notification, element, _ctx, state) do
pipeline_event = %Message.Notification{from: self(), data: notification, element: element}
send_event_to_controller_if_subscribed(pipeline_event, state)
{:ok, state}
end
@impl true
def handle_info({:exec_actions, actions}, _ctx, state) do
{{:ok, actions}, state}
end
@impl true
def handle_info({:subscription, pattern}, _ctx, state) do
{:ok, %{state | matching_functions: [pattern | state.matching_functions]}}
end
@impl true
def handle_shutdown(reason, state) do
pipeline_event = %Message.Terminated{from: self(), reason: reason}
send_event_to_controller_if_subscribed(pipeline_event, state)
:ok
end
defp send_event_to_controller_if_subscribed(message, state) do
if Enum.any?(state.matching_functions, & &1.(message)) do
send(state.controller_pid, message)
end
end
end
|
lib/membrane/remote_controlled/pipeline.ex
| 0.941129
| 0.896523
|
pipeline.ex
|
starcoder
|
defmodule LoggerJSON.Ecto do
@moduledoc """
Implements the behaviour of `Ecto.LogEntry` and sends query as string
to Logger with additional metadata:
* result - the query result as an `:ok` or `:error` tuple;
* query_time - the time spent executing the query in microseconds;
* decode_time - the time spent decoding the result in microseconds (it may be nil);
* queue_time - the time spent to check the connection out in microseconds (it may be nil);
* connection_pid - the connection process that executed the query;
* caller_pid - the application process that executed the query;
* ansi_color - the color that should be used when logging the entry.
For more information see [LogEntry](https://github.com/elixir-ecto/ecto/blob/master/lib/ecto/log_entry.ex)
source code.
"""
require Logger
@doc """
Logs query string with metadata from `Ecto.LogEntry` in with debug level.
"""
@spec log(entry :: Ecto.LogEntry.t()) :: Ecto.LogEntry.t()
def log(entry) do
{query, metadata} = query_and_metadata(entry)
# The logger call will be removed at compile time if
# `compile_time_purge_level` is set to higher than debug.
Logger.debug(query, metadata)
entry
end
@doc """
Overwritten to use JSON.
Logs the given entry in the given level.
"""
@spec log(entry :: Ecto.LogEntry.t(), level :: Logger.level()) :: Ecto.LogEntry.t()
def log(entry, level) do
{query, metadata} = query_and_metadata(entry)
# The logger call will not be removed at compile time,
# because we use level as a variable
Logger.log(level, query, metadata)
entry
end
defp query_and_metadata(entry) do
%{
query: query,
query_time: query_time,
decode_time: decode_time,
queue_time: queue_time,
connection_pid: connection_pid,
ansi_color: ansi_color
} = entry
query_time = format_time(query_time)
decode_time = format_time(decode_time)
queue_time = format_time(queue_time)
metadata = [
query_time: query_time,
decode_time: decode_time,
queue_time: queue_time,
duration: Float.round(query_time + decode_time + queue_time, 3),
connection_pid: connection_pid,
ansi_color: ansi_color
]
{query, metadata}
end
defp format_time(nil), do: 0.0
defp format_time(time), do: div(System.convert_time_unit(time, :native, :micro_seconds), 100) / 10
end
|
lib/logger_json/ecto.ex
| 0.90485
| 0.585931
|
ecto.ex
|
starcoder
|
defmodule Dlex.Node do
@moduledoc """
Simple high level API for accessing graphs
## Usage
defmodule Shared do
use Dlex.Node
shared do
field :id, :string, index: ["term"]
field :name, :string, index: ["term"]
end
end
defmodule User do
use Dlex.Node, depends_on: Shared
schema "user" do
field :id, :auto
field :name, :auto
end
end
defmodule User do
use Dlex.Node
schema "user" do
field :id, :auto, depends_on: Shared
field :name, :string, index: ["term"]
field :age, :integer
field :cache, :any, virtual: true
field :owns, :uid
end
end
Dgraph types:
* `:integer`
* `:float`
* `:string`
* `:geo`
* `:datetime`
* `:uid`
* `:auto` - special type, which can be used for `depends_on`
## Reflection
Any schema module will generate the `__schema__` function that can be
used for runtime introspection of the schema:
* `__schema__(:source)` - Returns the source as given to `schema/2`;
* `__schema__(:fields)` - Returns a list of all non-virtual field names;
* `__schema__(:alter)` - Returns a generated alter schema
* `__schema__(:field, field)` - Returns the name of field in database for field in a struct and
vice versa;
* `__schema__(:type, field)` - Returns the type of the given non-virtual field;
Additionally it generates `Ecto` compatible `__changeset__` for using with `Ecto.Changeset`.
"""
alias Dlex.Field
defmacro __using__(opts) do
depends_on = Keyword.get(opts, :depends_on, nil)
quote do
@depends_on unquote(depends_on)
import Dlex.Node, only: [shared: 1, shared: 2, schema: 2]
end
end
defmacro schema(name, block) do
prepare = prepare_block(name, block, :schema)
postprocess = postprocess()
quote do
unquote(prepare)
unquote(postprocess)
end
end
defmacro shared(block) do
prepare = prepare_block(nil, block, :shared)
postprocess = postprocess()
quote do
@depends_on __MODULE__
unquote(prepare)
unquote(postprocess)
end
end
defmacro shared(name, block) do
prepare = prepare_block(name, block, :shared)
postprocess = postprocess()
quote do
@depends_on __MODULE__
unquote(prepare)
unquote(postprocess)
end
end
defp prepare_block(name, block, schema_type) do
quote do
@name unquote(name)
Module.put_attribute(__MODULE__, :schema_type, unquote(schema_type))
Module.register_attribute(__MODULE__, :fields, accumulate: true)
Module.register_attribute(__MODULE__, :fields_struct, accumulate: true)
Module.register_attribute(__MODULE__, :fields_data, accumulate: true)
Module.register_attribute(__MODULE__, :depends_on_modules, accumulate: true)
import Dlex.Node
unquote(block)
end
end
defp postprocess() do
quote unquote: false do
defstruct [:uid | @fields_struct]
fields = Enum.reverse(@fields)
source = @name
alter = Dlex.Node.__schema_alter___(__MODULE__, source)
def __schema__(:source), do: unquote(source)
def __schema__(:fields), do: unquote(fields)
def __schema__(:fields_data), do: @fields_data
def __schema__(:alter), do: unquote(Macro.escape(alter))
def __schema__(:depends_on), do: unquote(Dlex.Node.__depends_on_modules__(__MODULE__))
for %Dlex.Field{name: name, type: type} <- @fields_data do
def __schema__(:type, unquote(name)), do: unquote(type)
end
def __schema__(:field_types),
do: @fields_data |> Enum.map(fn field -> {field.name, field.db_name, field.type} end)
for %Dlex.Field{name: name, db_name: db_name, type: type, opts: opts} <- @fields_data do
def __schema__(:field, unquote(name)), do: unquote(db_name)
def __schema__(:field, unquote(db_name)), do: {unquote(name), unquote(type)}
def __schema__(:models, unquote(name)),
do: Keyword.get(unquote(opts), :models, [])
def __schema__(:models, unquote(db_name)),
do: Keyword.get(unquote(opts), :models, [])
end
def __schema__(:field, _), do: nil
def __schema__(:models, _), do: []
changeset = Dlex.Node.__gen_changeset__(@fields_data)
def __changeset__(), do: unquote(Macro.escape(changeset))
end
end
@doc false
def __schema_alter___(module, source) do
preds =
module
|> Module.get_attribute(:fields_data)
|> Enum.flat_map(&List.wrap(&1.alter))
|> Enum.reverse()
type =
if module |> Module.get_attribute(:schema_type) == :schema do
type_fields =
module
|> Module.get_attribute(:fields_data)
|> Enum.map(&into_type_field/1)
%{"name" => source, "fields" => type_fields}
else
[]
end
%{
"types" => List.wrap(type),
"schema" => preds
}
end
defp into_type_field(%{db_name: name, type: type}) do
%{
"name" => name,
"type" => atom_to_string(type)
}
end
defp atom_to_string(:relation), do: "uid"
defp atom_to_string(:relations), do: "[uid]"
defp atom_to_string([:uid]), do: "[uid]"
defp atom_to_string(:lang), do: "string"
defp atom_to_string(atom), do: atom |> Atom.to_string()
@doc false
def __depends_on_modules__(module) do
depends_on_module = module |> Module.get_attribute(:depends_on) |> List.wrap()
:lists.usort(depends_on_module ++ Module.get_attribute(module, :depends_on_modules))
end
@doc false
def __gen_changeset__(fields) do
for %Dlex.Field{name: name, type: type} <- fields, into: %{}, do: {name, ecto_type(type)}
end
defp ecto_type(:datetime), do: :utc_datetime
defp ecto_type(:relation), do: :map
defp ecto_type(:relations), do: {:array, :any}
defp ecto_type(:lang),
do:
{:embed,
%Ecto.Embedded{
cardinality: :many,
on_replace: :update,
on_cast: &Dlex.Lang.changeset/2,
related: Dlex.Lang
}}
defp ecto_type(type), do: type
defmacro field(name, type, opts \\ []) do
quote do
Dlex.Node.__field__(__MODULE__, unquote(name), unquote(type), unquote(opts), @depends_on)
end
end
defmacro relation(name, type, opts \\ []) do
quote do
Dlex.Node.__field__(
__MODULE__,
unquote(name),
case unquote(type) do
:one -> :relation
:many -> :relations
:reverse -> :reverse_relation
end,
unquote(opts)
|> Enum.concat(if(unquote(type) == :one, do: [default: nil], else: [default: []])),
@depends_on
)
end
end
@doc false
def __field__(module, name, type, opts, depends_on) do
schema_name = Module.get_attribute(module, :name)
if Keyword.get(opts, :lang, false) do
Module.put_attribute(module, :fields_struct, {name, []})
else
Module.put_attribute(module, :fields_struct, {name, opts[:default]})
end
unless opts[:virtual] do
Module.put_attribute(module, :fields, name)
{db_name, type, alter} = db_field(name, type, opts, schema_name, module, depends_on)
db_name =
if type == :reverse_relation do
"~#{db_name}"
else
db_name
end
field =
if Keyword.get(opts, :lang, false) do
%Field{name: name, type: :lang, db_name: db_name, alter: alter, opts: opts}
else
%Field{name: name, type: type, db_name: db_name, alter: alter, opts: opts}
end
Module.put_attribute(module, :fields_data, field)
end
end
defp db_field(name, type, opts, schema_name, module, depends_on) do
if depends_on = opts[:depends_on] || depends_on do
put_attribute_if_not_exists(module, :depends_on_modules, depends_on)
with {:error, error} <- Code.ensure_compiled(depends_on),
do: raise("Module `#{depends_on}` not available, error: #{error}")
field_name = [schema_name, Atom.to_string(name)] |> Enum.reject(&is_nil/1) |> Enum.join(".")
if module == depends_on do
{field_name, type, alter_field(field_name, type, opts)}
else
{depends_on.__schema__(:field, name), depends_on.__schema__(:type, name), nil}
end
else
if type == :reverse_relation do
field_name =
cond do
Keyword.has_key?(opts, :model) && Keyword.has_key?(opts, :name) ->
opts[:model].__schema__(:field, string_to_atom(Keyword.get(opts, :name))) ||
"#{opts[:model].__schema__(:source)}.#{Keyword.get(opts, :name)}"
Keyword.has_key?(opts, :name) ->
Keyword.get(opts, :name)
true ->
name
end
{field_name, type, nil}
else
field_name = "#{schema_name}.#{name}"
{field_name, type, alter_field(field_name, type, opts)}
end
end
end
defp string_to_atom(atom) when is_atom(atom), do: atom
defp string_to_atom(string) do
string
|> String.to_existing_atom()
rescue
_ -> nil
end
defp put_attribute_if_not_exists(module, key, value) do
unless module |> Module.get_attribute(key) |> Enum.member?(value),
do: Module.put_attribute(module, key, value)
end
defp alter_field(field_name, type, opts) do
basic_alter = %{
"predicate" => field_name,
"type" => db_type(type)
}
opts |> Enum.flat_map(&gen_opt(&1, type)) |> Enum.into(basic_alter)
end
@types_mapping [
integer: "int",
float: "float",
string: "string",
geo: "geo",
datetime: "datetime",
uid: "uid",
boolean: "bool",
lang: "string",
relation: "uid",
relations: "[uid]",
reverse_relation: "[uid]"
]
for {type, dgraph_type} <- @types_mapping do
defp db_type(unquote(type)), do: unquote(dgraph_type)
end
defp db_type([:uid]), do: "[uid]"
@ignore_keys [:default, :depends_on, :model, :models]
defp gen_opt({key, _value}, _type) when key in @ignore_keys, do: []
defp gen_opt({:index, true}, type), do: [{"index", true}, {"tokenizer", [db_type(type)]}]
defp gen_opt({:index, tokenizers}, :string) when is_list(tokenizers),
do: [{"index", true}, {"tokenizer", tokenizers}]
defp gen_opt({key, value}, _type), do: [{Atom.to_string(key), value}]
end
|
lib/dlex/node.ex
| 0.771972
| 0.409044
|
node.ex
|
starcoder
|
defmodule Kryptiles do
@moduledoc """
"""
use Bitwise
@doc """
Returns a cryptographically strong pseudo-random data string.
Takes a size argument for the length of the string.
## Examples
iex> Kryptiles.random_string(0)
""
iex> Kryptiles.random_string(10)
"do77RukqJobZPG3rSJSdCm9JDnX5IT1q"
"""
@spec random_string(integer()) :: binary() | {:error, binary()}
def random_string(size) when is_integer(size) do
size
|> Kernel.+(1)
|> Kernel.*(6)
|> random_bits()
|> case do
{:error, reason} -> {:error, reason}
bytes ->
bytes
|> Base.url_encode64()
|> String.slice(0, size)
end
end
@doc """
Returns a cryptographically strong pseudo-random data string consisting of only numerical digits (0-9).
Takes a size argument for the length of the string.
## Examples
iex> Kryptiles.random_digits(1)
"9"
iex> Kryptiles.random_digits(10)
"3149464061"
"""
@spec random_digits(integer()) :: binary() | {:error, binary()}
def random_digits(size) do
size
|> Kernel.*(2)
|> random()
|> digits(size)
end
defp digits(buffer, size, digits \\ [], pos \\ 0)
defp digits({:error, reason}, _, _, _), do: {:error, reason}
defp digits(_buffer, size, digits, _pos) when length(digits) == size do
digits
|> Enum.reverse()
|> Enum.join()
end
defp digits(buffer, size, digits, pos) when length(digits) < size and pos >= byte_size(buffer) do
size * 2
|> random()
|> digits(size, digits)
end
defp digits(buffer, size, digits, pos) when length(digits) < size do
case :erlang.binary_part(buffer, pos, 1) do
<<part::integer()>> when part < 250 -> digits(buffer, size, ["#{Integer.mod(part, 10)}" | digits], pos + 1)
_ -> digits(buffer, size, digits, pos + 1)
end
end
@doc """
Returns a cryptographically strong pseudo-random bytes
Takes a size argument for the length of the string.
## Examples
iex> Kryptiles.random_bits(1)
<<236>>
iex> Kryptiles.random_bits(10)
<<235, 191>>
"""
@spec random_bits(integer()) :: binary() | {:error, binary()}
def random_bits(bits) when bits <= 0, do: {:error, "invalid random bits count"}
def random_bits(bits) do
bits
|> Kernel./(8)
|> Float.ceil()
|> Kernel.round()
|> random()
end
@doc false
@spec random(integer()) :: binary() | {:error, binary()}
def random(bytes) do
try do
:crypto.strong_rand_bytes(bytes)
rescue
ArgumentError -> {:error, "failed generating random bits"}
end
end
# https://github.com/elixir-lang/plug/blob/master/lib/plug/crypto.ex#L94-L114
# http://codahale.com/a-lesson-in-timing-attacks/
@doc """
Compare two strings using fixed time algorithm (to prevent time-based analysis of MAC digest match).
Returns `true` if the strings match, `false` if they differ.
## Examples
iex> Kryptiles.fixed_time_comparison(<<>>, <<>>)
true
iex> Kryptiles.fixed_time_comparison(<<>>, "b0i9XAiBxP")
false
"""
@spec fixed_time_comparison(binary(), binary()) :: true | false
def fixed_time_comparison(left, right) when byte_size(left) == byte_size(right) do
__fixed_time_comparison__(left, right) == 0
end
def fixed_time_comparison(_left, _right), do: false
defp __fixed_time_comparison__(left, right, acc \\ 0)
defp __fixed_time_comparison__(<<>>, <<>>, acc), do: acc
defp __fixed_time_comparison__(<<x, left::binary()>>, <<y, right::binary()>>, acc) do
__fixed_time_comparison__(left, right, acc ||| (x ^^^ y))
end
# bsl(1, 32) - 1 === 4294967295
@doc """
Computes a pbkdf2 bitstring [RFC 2898](https://tools.ietf.org/html/rfc2898)
* `options` are an `Enumerable.t()` with these keys:
* `digest` is any of `:md5 | :sha | :sha224 | :sha256 | :sha384 | :sha512 ` defaults to `:sha`
* `iterations` is a `non_neg_integer()` defaults to `1`
## Examples
iex> keylen = 20
iex> Kryptiles.pbkdf2("password", "salt", keylen)
<<12, 96, 200, 15, 150, 31, 14, 113, 243, 169, 181, 36, 175, 96, 18, 6, 47, 224,
55, 166>>
"""
@spec pbkdf2(binary(), binary(), pos_integer(), pos_integer, atom()) :: binary() | {:error, binary()}
def pbkdf2(password, salt, keylen, iterations \\ 1, digest \\ :sha)
def pbkdf2(_password, _salt, keylen, _iterations, _digest) when not is_integer(keylen) do
{:error, "invalid keylen: #{inspect keylen}"}
end
def pbkdf2(_password, _salt, keylen, _iterations, _digest) when keylen > 4294967295 do
{:error, "keylen is #{inspect keylen} the maximum keylen is 4294967295"}
end
def pbkdf2(_password, _salt, _keylen, iterations, _digest) when not is_integer(iterations) do
{:error, "invalid iterations: #{inspect iterations}"}
end
for digest <- ~w(md5 sha sha224 sha256 sha384 sha512)a do
def pbkdf2(password, salt, keylen, iterations, unquote(digest)), do: __pbkdf2__(mac_fun(unquote(digest), password), salt, keylen, iterations)
end
def pbkdf2(_password, _salt, _keylen, _iterations, digest), do: {:error, "unknown digest: #{inspect digest}"}
defp __pbkdf2__(fun, salt, keylen, iterations, block_index \\ 1, length \\ 0, acc \\ [])
defp __pbkdf2__(_fun, _salt, keylen, _iterations, _block_index, length, acc) when length >= keylen do
acc
|> :erlang.iolist_to_binary()
|> :erlang.binary_part(0, keylen)
end
defp __pbkdf2__(fun, salt, keylen, iterations, block_index, length, acc) do
initial = fun.(<<salt::binary, block_index::integer-size(32)>>)
block = iterate(fun, iterations - 1, initial, initial)
__pbkdf2__(fun, salt, keylen, iterations, block_index + 1, byte_size(block) + length, [acc | block])
end
defp iterate(_fun, 0, _prev, acc), do: acc
defp iterate(fun, iteration, prev, acc) do
next = fun.(prev)
iterate(fun, iteration - 1, next, :crypto.exor(next, acc))
end
defp mac_fun(digest, secret) do
&:crypto.hmac(digest, secret, &1)
end
end
|
lib/kryptiles.ex
| 0.929664
| 0.490602
|
kryptiles.ex
|
starcoder
|
defmodule CouchGears.Database do
@moduledoc """
This module provides CRUD functions for managing either databases or documents.
The main important thing is a `database` module designed to be a 'instance' for certain DB (see examples below).
## Examples:
db = CouchGears.Database.open("db")
db.find("x")
db.close()
Is an equivalent to:
CouchGears.Database.find("db", "x")
also you can use a `database` module from pure Erlang environment:
Db = 'Elixir-CouchGears-Database':open(<<"db">>),
Db:find(<<"x">>),
DB:close().
'Elixir-CouchGears-Database':find(<<"db">>, <<"x">>).
"""
alias CouchGears.Database.Helpers, as: Helpers
alias CouchGears.Records, as: Records
Code.prepend_path("include")
defrecord Db, Record.extract(:db, from: "couch_db.hrl")
defrecordp :database, [:raw_db, :db]
@doc """
Gets associated `db` record. Check a `include/couch_db.hrl` for details.
"""
def db(database(db: db)), do: db
@doc """
Gets associated raw `db` record which has been returned from `couch_db:open_int/2`.
"""
def raw_db(database(raw_db: raw_db)), do: raw_db
@doc """
Opens a `db` and returns a `database` instance or `:no_db_file` atom in case it doesn't exist.
"""
def open(db_name) do
db = do_open(db_name)
unless db == :no_db_file do
db = database(raw_db: db, db: Db.new(db))
end
db
end
@doc """
Opens a `db` and returns a `database` instance or exception in case it doesn't exist.
"""
def open!(db_name) do
db = open(db_name)
if db == :no_db_file do
raise "No db file"
end
db
end
@doc """
Closes associated `db`.
"""
def close(database(raw_db: raw_db)) do
:couch_db.close(raw_db)
database()
end
@doc """
Creates a `db`.
"""
def create_db(db_name, opts // []) do
:couch_server.create(db_name, opts)
end
@doc """
Deletes a `db`.
"""
def delete_db(db_name, opts // []) do
:couch_server.delete(db_name, opts)
end
@doc """
Returns a `document` as a raw `list` or either `:no_db_file`/`:missing` atom.
"""
def find(_doc_id, :no_db_file), do: :no_db_file
def find(doc_id, db) when is_record(db, CouchGears.Database) do
find(doc_id, [], db)
end
def find(db_name, doc_id), do: find(doc_id, [], open(db_name))
@doc """
Returns a `document` as a raw `list` or either `:no_db_file`/`:missing` atom.
An `opts` is a convenience for filtering
## Options
* `except:` - The list of fields which should be cut from a document body
* `only:` - The strict list of fields which should have a returned document
## Examples
Database.find("db", "doc_id", [only: ["_id"]])
# => [{"_id", "doc_id"}]
Database.find("db", "doc_id", [except: ["_id"]])
# => [{"_rev", "1-41f7a51b6f7002e9a41ad4fc466838e4"}]
"""
def find(_doc_id, _opts, :no_db_file), do: :no_db_file
def find(doc_id, opts, db) when is_record(db, CouchGears.Database) do
{ _, doc } = do_find(doc_id, db)
do_filter(doc, opts)
end
def find(db_name, doc_id, opts), do: find(doc_id, opts, open(db_name))
@doc """
Returns a `document` as a raw `list` or either `:no_db_file`/`:missing` atom.
"""
def find_with_rev(_doc_id, _rev, :no_db_file), do: :no_db_file
def find_with_rev(doc_id, rev, database(raw_db: raw_db)) do
case :couch_db.open_doc_revs(raw_db, doc_id, [make_rev(rev)], []) do
{:ok, [{:ok, doc}]} ->
{body} = :couch_doc.to_json_obj(doc, [])
body
_ ->
:missing
end
end
def find_with_rev(db_name, doc_id, rev), do: find_with_rev(doc_id, rev, open(db_name))
@doc """
Creates a `document` and return the `rev` as string or either `:conflict`/`:no_db_file` atom.
"""
def create_doc(_doc, :no_db_file), do: :no_db_file
def create_doc(db_name, doc) when is_list(doc) do
create_doc(doc, open(db_name))
end
def create_doc(doc, db) when is_list(doc) and is_record(db, CouchGears.Database) do
update(doc, db)
end
@doc """
Updates a particular `document` and return a `rev` string or either `:conflict`/`:no_db_file` atom.
"""
def update(_doc, :no_db_file), do: :no_db_file
def update(doc, database(raw_db: raw_db)) when is_list(doc) do
json_doc = :couch_doc.from_json_obj({doc})
{:ok, rev} = :couch_db.update_doc(raw_db, json_doc, [])
:couch_doc.rev_to_str(rev)
end
def update(db_name, doc) when is_list(doc), do: update(doc, open(db_name))
@doc """
Enumerates through particular `db` and pass arguments such a `FullDocInfo` record,
something like `reds` and execution accumulator as a second argument to `callback` function.
Check a `couch_db:enum_docs/4` function usage example for more information.
"""
def enum_docs(db, callback, opts) when is_record(db, CouchGears.Database) and is_function(callback, 3) do
function = fn(raw_full_doc_info, reds, acc) ->
callback.(Records.FullDocInfo.new(raw_full_doc_info), reds, acc)
end
:couch_db.enum_docs(db.raw_db, function, [], opts || [])
end
def enum_docs(db_name, callback, opts), do: enum_docs(open(db_name), callback, opts)
def enum_docs(db_name, callback), do: enum_docs(db_name, callback, [])
# Internal stuff
defp do_open(name) do
case :couch_db.open_int(to_binary(name), []) do
{ :not_found, :no_db_file } -> :no_db_file
{ _, db } -> db
end
end
defp do_find(ddoc, database(raw_db: raw_db)) do
case :couch_db.open_doc(raw_db, ddoc) do
{:ok, doc} ->
{body} = :couch_doc.to_json_obj(doc, [])
{:ok, body }
_ ->
{:not_found, :missing}
end
end
defp do_filter(missing, _opts) when is_atom(missing), do: missing
defp do_filter(doc, []), do: doc
defp do_filter(doc, opts) do
fun = case opts do
[except: fields] ->
fn({k,_}) -> !List.member?(fields, k) end
[only: fields] ->
fn({k,_}) -> List.member?(fields, k) end
end
Enum.filter(doc, fun)
end
defp make_rev(rev), do: :couch_doc.parse_rev(rev)
end
|
lib/couch_gears/database.ex
| 0.809012
| 0.418816
|
database.ex
|
starcoder
|
defprotocol Digger do
@moduledoc """
Documentation for Digger Protocol
"""
@fallback_to_any true
alias Digger.Types
@doc """
'Atomize' a valid Types.data_type according to the protocol implementation
"""
@spec atomize(Types.data_type(), keyword()) :: Types.valid_return_type()
def atomize(
data_type,
opts \\ [type: :key, key_transform: :atomize, value_transform: :none]
)
@doc """
Camel case a valid Types.data_type according to the protocol implementation
"""
@spec camel_case(Types.data_type(), keyword()) :: Types.valid_return_type()
def camel_case(
data_type,
opts \\ [type: :key, key_transform: :upper, value_transform: :none]
)
@doc """
'Dasherize' a valid Types.data_type according to the protocol implementation
"""
@spec dasherize(Types.data_type(), keyword()) :: Types.valid_return_type()
def dasherize(
data_type,
opts \\ [type: :key, key_transform: :dasherize, value_transform: :none]
)
@doc """
Lower case first letter of a valid Types.data_type according to the protocol implementation
"""
@spec lowercase_first(Types.data_type(), keyword()) :: Types.valid_return_type()
def lowercase_first(
data_type,
opts \\ [type: :key, key_transform: :lower, value_transform: :none]
)
@doc """
snake_case a valid Types.data_type according to the protocol implementation
"""
@spec snake_case(Types.data_type(), keyword()) :: Types.valid_return_type()
def snake_case(data_type, opts \\ [type: :key, key_transform: :snake, value_transform: :none])
@doc """
'Stringify' a valid Types.data_type according to the protocol implementation
"""
@spec stringify(Types.data_type(), keyword()) :: Types.valid_return_type()
def stringify(
data_type,
opts \\ [type: :key, key_transform: :stringify, value_transform: :none]
)
@doc """
Upper case the first letter of a valid Types.data_type according to
the protocol implementation
"""
@spec upcase_first(Types.data_type(), keyword()) :: Types.valid_return_type()
def upcase_first(
data_type,
opts \\ [type: :key, key_transform: :upper, value_transform: :none]
)
end
|
lib/protocols/digger_protocol.ex
| 0.872266
| 0.512815
|
digger_protocol.ex
|
starcoder
|
defmodule Dingo.CoreUtils do
@moduledoc """
Utility functions for core gameplay
"""
@def_board %{
0 => %{0 => nil, 1 => nil, 2 => nil, 3 => nil, 4 => nil},
1 => %{0 => nil, 1 => nil, 2 => nil, 3 => nil, 4 => nil},
2 => %{0 => nil, 1 => nil, 2 => nil, 3 => nil, 4 => nil},
3 => %{0 => nil, 1 => nil, 2 => nil, 3 => nil, 4 => nil},
4 => %{0 => nil, 1 => nil, 2 => nil, 3 => nil, 4 => nil}
}
@right_diagonals [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4]]
@left_diagonals [[0, 4], [1, 3], [2, 2], [3, 1], [4, 0]]
@doc """
Returns a tuple of bingo 5 * 5 board as 2-D map and board_index (for o(n) searching) as 1-D map
"""
@spec generate_random_bingo_board :: {map(), map()}
def generate_random_bingo_board do
1..25
|> Enum.shuffle()
|> generate_board(@def_board)
end
@doc """
Prints the bingo board on console.
* board - bingo board (map)
"""
@spec print_board(map()) :: :ok
def print_board(board) do
Enum.each(board, fn {key, _val} ->
Enum.each(board[key], fn {_child_key, child_val} -> IO.write("[#{inspect child_val}] ") end)
IO.puts("\n")
end)
end
@doc """
Return no:of lines cleared by the given move
* board - bingo board (map)
* cell - [row, column]
"""
@spec count_cleared_lines(map(), list()) :: number()
def count_cleared_lines(board, cell) do
count_horizontal_line(board, cell) +
count_vertical_line(board, cell) +
count_diagonal_line(board, cell)
end
defp generate_board(shuffled_list, board, index \\ 0, board_index \\ %{})
defp generate_board([], board, _index, board_index), do: {board, board_index}
defp generate_board([h | t], board, index, board_index) do
row = div(index, 5)
col = rem(index, 5)
board = put_in(board[row][col], %{"num" => h, "check" => false})
board_index = put_in(board_index[h], [row, col])
generate_board(t, board, index + 1, board_index)
end
defp count_horizontal_line(board, [row, _col] = _cell) do
row_line = board[row]
case Enum.all?(row_line, fn {_key, val} ->
val["check"] === true
end) do
true -> 1
false -> 0
end
end
defp count_vertical_line(board, [_row, col] = _cell) do
case Enum.all?(0..4, fn row ->
board[row][col]["check"] === true
end) do
true -> 1
false -> 0
end
end
defp count_diagonal_line(board, [row, col] = _cell) do
case is_diagonal?(row, col) do
{true, "middle"} ->
count_left_diagonal(board) + count_right_diagonal(board)
{true, "right"} ->
count_right_diagonal(board)
{true, "left"} ->
count_left_diagonal(board)
_ ->
0
end
end
defp is_diagonal?(2, 2), do: {true, "middle"}
defp is_diagonal?(row, row), do: {true, "right"}
defp is_diagonal?(1, 3), do: {true, "left"}
defp is_diagonal?(3, 1), do: {true, "left"}
defp is_diagonal?(0, 4), do: {true, "left"}
defp is_diagonal?(4, 0), do: {true, "left"}
defp is_diagonal?(_, _), do: {false, "invalid"}
defp count_left_diagonal(board) do
case Enum.all?(@left_diagonals, fn [row, col] ->
board[row][col]["check"] === true
end) do
true -> 1
false -> 0
end
end
defp count_right_diagonal(board) do
case Enum.all?(@right_diagonals, fn [row, col] ->
board[row][col]["check"] === true
end) do
true -> 1
false -> 0
end
end
end
|
lib/dingo/core_utils.ex
| 0.749637
| 0.528047
|
core_utils.ex
|
starcoder
|
defmodule EpicenterWeb.Test.Pages.CaseInvestigationStartInterview do
import ExUnit.Assertions
import Phoenix.LiveViewTest
alias Epicenter.Cases.CaseInvestigation
alias Epicenter.Test
alias EpicenterWeb.Test.Pages
alias Phoenix.LiveViewTest.View
@form_id "case-investigation-interview-start-form"
def visit(%Plug.Conn{} = conn, %CaseInvestigation{id: case_investigation_id}) do
conn |> Pages.visit("/case-investigations/#{case_investigation_id}/start-interview")
end
def assert_date_started(%View{} = view, :today) do
[actual_date] =
view
|> Pages.parse()
|> Test.Html.find("input##{@form_id}_date_started")
|> Test.Html.attr("value")
assert actual_date =~ ~r"\d\d\/\d\d\/\d\d\d\d"
view
end
def assert_date_started(%View{} = view, date_string) do
[actual_date] =
view
|> Pages.parse()
|> Test.Html.find("input##{@form_id}_date_started")
|> Test.Html.attr("value")
assert actual_date == date_string
view
end
def assert_here(view_or_conn_or_html) do
view_or_conn_or_html |> Pages.assert_on_page("case-investigation-start-interview")
view_or_conn_or_html
end
def assert_person_interviewed_selections(%View{} = view, expected_selections) do
assert Pages.actual_selections(view, "start-interview-form-person-interviewed", "radio") == expected_selections
view
end
def assert_person_interviewed_sentinel_value(%View{} = view, expected_value) do
[actual] =
view
|> Pages.parse()
|> Test.Html.find("input##{@form_id}_person_interviewed___self__[type=radio]")
|> Test.Html.attr("value")
assert actual == expected_value
view
end
def assert_proxy_selected(%View{} = view, expected_proxy_name) do
assert %{"Proxy" => true} = Pages.actual_selections(view, "start-interview-form-person-interviewed", "radio")
[actual_name] =
view
|> Pages.parse()
|> Test.Html.find("input##{@form_id}_person_interviewed[type=text]")
|> Test.Html.attr("value")
assert actual_name == expected_proxy_name
view
end
def assert_time_started(%View{} = view, :now) do
{actual_time, actual_am_pm} = actual_time_started(view)
assert actual_time =~ ~r"\d\d:\d\d"
assert actual_am_pm in ~w{AM PM}
view
end
def assert_time_started(%View{} = view, expected_time_string, expected_am_pm) do
{actual_time, actual_am_pm} = actual_time_started(view)
assert actual_time == expected_time_string
assert actual_am_pm == expected_am_pm
view
end
defp actual_time_started(view) do
parsed = view |> Pages.parse()
[actual_time] =
parsed
|> Test.Html.find("input##{@form_id}_time_started")
|> Test.Html.attr("value")
[actual_am_pm] =
parsed
|> Test.Html.find("select##{@form_id}_time_started_am_pm option[selected]")
|> Enum.map(&Test.Html.text(&1))
{actual_time, actual_am_pm}
end
def change_form(%View{} = view, attrs) do
view |> element("#" <> @form_id) |> render_change(attrs)
view
end
def datetime_started(%View{} = view) do
state = view |> Pages.form_state()
datestring = state["#{@form_id}[date_started]"]
timestring = state["#{@form_id}[time_started]"]
ampmstring = state["#{@form_id}[time_started_am_pm]"]
Timex.parse!("#{datestring} #{timestring} #{ampmstring}", "{0M}/{0D}/{YYYY} {h12}:{m} {AM}")
end
end
|
test/support/pages/case_investigation_start_interview.ex
| 0.589598
| 0.48987
|
case_investigation_start_interview.ex
|
starcoder
|
defmodule Himamo.BaumWelch do
@moduledoc ~S"""
Defines the Baum-Welch algorithm.
See `Himamo.BaumWelch.StepE` and `Himamo.BaumWelch.StepM` for details on its
respective expectation and maximization steps.
"""
defmodule Stats do
@moduledoc ~S"""
Defines the statistical properties of an HMM.
See functions in `Himamo.BaumWelch.StepE` for their definitions.
"""
defstruct [:alpha, :beta, :gamma, :xi, :alpha_times_beta]
@type t :: %__MODULE__{
alpha: Himamo.Matrix.t,
beta: Himamo.Matrix.t,
gamma: Himamo.Matrix.t,
xi: Himamo.Matrix.t,
alpha_times_beta: Himamo.Matrix.t,
}
end
@type stats_list :: [{Himamo.ObsSeq.t, Himamo.Model.probability, Stats.t}]
alias Himamo.BaumWelch.{StepE, StepM}
@doc ~S"""
Computes variables for Baum-Welch E-step.
"""
@spec compute_stats(Himamo.Model.t, Himamo.ObsSeq.t) :: Stats.t
def compute_stats(model, obs_seq) do
import StepE
alpha = compute_alpha(model, obs_seq)
beta = compute_beta(model, obs_seq)
xi = compute_xi(model, obs_seq, alpha: alpha, beta: beta)
gamma = compute_gamma(model, obs_seq, xi: xi)
alpha_times_beta = compute_alpha_times_beta(alpha, beta)
%Stats{
alpha: alpha,
beta: beta,
gamma: gamma,
xi: xi,
alpha_times_beta: alpha_times_beta,
}
end
@spec compute_stats_list(Himamo.Model.t, list(Himamo.ObsSeq.t)) :: stats_list
def compute_stats_list(model, obs_seq_list) do
for obs_seq <- obs_seq_list do
stats = compute_stats(model, obs_seq)
prob = Himamo.ForwardBackward.compute(stats.alpha)
{obs_seq, prob, stats}
end
end
@doc ~S"""
Returns a new HMM with re-estimated parameters `A`, `B`, and `π`.
"""
@spec reestimate_model(Himamo.Model.t, Himamo.BaumWelch.stats_list) :: Himamo.Model.t
def reestimate_model(model, stats_list) do
import StepM
%{model |
a: reestimate_a(model, stats_list),
b: reestimate_b(model, stats_list),
pi: reestimate_pi(model, stats_list),
}
end
end
|
lib/himamo/baum_welch.ex
| 0.793146
| 0.678999
|
baum_welch.ex
|
starcoder
|
defmodule P0 do
# Problem 01 - Write a function last : 'a list -> 'a option that returns the last element of a list.
def last([]), do: nil
def last([a]), do: a
def last([ _ | tail ]), do: last(tail)
# Problem 02 - Find the last but one (last and penultimate) elements of a list.
def last_two([]), do: nil
def last_two([_]), do: nil
def last_two([a, b]), do: [a, b]
def last_two( [ _ | tail ] ), do: last_two(tail)
# Problem 03 - Find the K'th element of a list.
def at(_, []), do: nil
def at(1, [ head | _ ]), do: head
def at(i, [ _ | tail ]), do: at(i - 1, tail)
# Problem 04 - Find the number of elements of a list.
def length(list), do: _length(list, 0)
defp _length([], res), do: res
defp _length([ _ | tail ], res), do: _length(tail, res + 1)
# Problem 05 - Reverse a list.
def rev(list), do: _rev(list, [])
defp _rev([], res), do: res
defp _rev([ head | tail ], res), do: _rev(tail, [head] ++ res)
# Problem 06 - Find out whether a list is a palindrome.
def is_palindrome(list) do
if list == rev(list) do
true
else
false
end
end
# Problem 07 - Flatten a nested list structure.
def flatten(list), do: _flatten(list, []) |> P0.rev()
defp _flatten([], res), do: res
defp _flatten([ head | tail ], res) when is_list(head) do
res = _flatten(head, res)
_flatten(tail, res)
end
defp _flatten([ head | tail ], res), do: _flatten(tail, [head] ++ res)
# Problem 08 - Eliminate consecutive duplicates of list elements.
def compress(list), do: _compress(list, []) |> P0.rev()
defp _compress([], res), do: res
defp _compress([ head | tail ], []), do: _compress(tail, [ head ])
defp _compress([ head | tail ], res) do
[ res_head | _ ] = res
if head != res_head do
_compress(tail, [head] ++ res)
else
_compress(tail, res)
end
end
# Problem 09 - Pack consecutive duplicates of list elements into sublists.
def pack(list), do: _pack(list, [], [])
defp _pack([], acc, final), do: final ++ [acc]
defp _pack([head | tail], [], final), do: _pack(tail, [head], final)
defp _pack([ head | tail ], acc, final) do
[ acc_head | _ ] = acc
if acc_head == head do
_pack(tail, [head] ++ acc, final)
else
_pack(tail, [head], final ++ [acc])
end
end
end
|
solutions/1-10.ex
| 0.607197
| 0.575588
|
1-10.ex
|
starcoder
|
defmodule Cielo.Transaction do
@moduledoc """
This module makes a transactions calls for credit, debit, bankslips and recurrent payments.
Cielo API reference:
- [Credit](https://developercielo.github.io/manual/cielo-ecommerce#cart%C3%A3o-de-cr%C3%A9dito)
- [Debit](https://developercielo.github.io/manual/cielo-ecommerce#cart%C3%A3o-de-d%C3%A9bito)
- [BankSlip](https://developercielo.github.io/manual/cielo-ecommerce#boleto)
- [Recurrent](https://developercielo.github.io/manual/cielo-ecommerce#recorr%C3%AAncia)
"""
@endpoint "sales/"
@capture_endpoint "sales/:payment_id/capture"
@cancel_endpoint "sales/:payment_id/void"
@cancel_partial_endpoint "sales/:payment_id/void?amount=:amount"
@deactivate_recurrent_payment_endpoint "RecurrentPayment/:payment_id/Deactivate"
alias Cielo.{Utils, HTTP}
alias Cielo.Entities.{
BankSlipTransactionRequest,
CreditTransactionRequest,
DebitTransactionRequest,
RecurrentTransactionRequest
}
@doc """
Create a credit transaction if passed attributes satisfy a validation criteria
## Successfull transaction
iex(1)> attrs = %{
customer: %{name: "<NAME>"},
merchant_order_id: "2014111703",
payment: %{
amount: 15700,
credit_card: %{
brand: "Visa",
card_number: "1234123412341231",
card_on_file: %{reason: "Unscheduled", usage: "Used"},
expiration_date: "12/2030",
holder: "<NAME>",
security_code: "123"
},
installments: 1,
is_crypto_currency_negotiation: true,
soft_descriptor: "123456789ABCD",
type: "CreditCard"
}
}
iex(2)> Cielo.Transaction.credit(attrs)
{:ok,
%{
customer: %{name: "<NAME>"},
merchant_order_id: "2014111703",
payment: %{
amount: 15700,
authenticate: false,
authorization_code: "437560",
capture: false,
country: "BRA",
credit_card: ...
links: [
%{
href: "https://apiquerysandbox.cieloecommerce.cielo.com.br/1/sales/...",
method: "GET",
rel: "self"
},
%{
href: "https://apisandbox.cieloecommerce.cielo.com.br/1/sales/.../capture",
method: "PUT",
rel: "capture"
},
%{
href: "https://apisandbox.cieloecommerce.cielo.com.br/1/sales/.../void",
method: "PUT",
rel: "void"
}
],
payment_id: "26e5da86-d975-4e2f-aa25-862b5a43e9f4",
...
type: "CreditCard"
}
}}
## Failed transaction
iex(1)> attrs = %{
customer: %{name: "<NAME>"},
merchant_order_id: "2014111703",
payment: %{
amount: 15700,
credit_card: %{
brand: "Visa",
card_number: "1234123412341231",
card_on_file: %{reason: "Unscheduled", usage: "Used"},
expiration_date: "12/2030",
holder: "Teste Holder"
},
installments: 1,
is_crypto_currency_negotiation: true,
soft_descriptor: "123456789ABCD",
type: "CreditCard"
}
}
iex(2)> Cielo.Transaction.credit(attrs)
{:error,
%{
errors: [
payment: %{
errors: [credit_card: %{errors: [security_code: "can't be blank"]}]
}
]
}}
"""
@spec credit(map) :: {:ok, map()} | {:error, map(), list()} | {:error, any}
def credit(params) do
make_post_transaction(CreditTransactionRequest, params)
end
@doc """
Create a debit transaction if passed attributes satisfy a validation criteria
## Successfull transaction
iex(1)> attrs = %{
customer: %{name: "<NAME>"},
merchant_order_id: "2014121201",
payment: %{
amount: 15700,
authenticate: true,
debit_card: %{
brand: "Visa",
card_number: "4551870000000183",
expiration_date: "12/2030",
holder: "Teste Holder",
security_code: "123"
},
is_crypto_currency_negotiation: true,
return_url: "http://www.cielo.com.br",
type: "DebitCard"
}
}
iex(2)> Cielo.Transaction.debit(attrs)
{:ok,
%{
customer: %{name: "<NAME>"},
merchant_order_id: "2014121201",
payment: %{
amount: 15700,
authenticate: true,
authentication_url: "https://authenticationmocksandbox.cieloecommerce.cielo.com.br/CardAuthenticator/Receive/...",
country: "BRA",
currency: "BRL",
debit_card: %{
brand: "Visa",
card_number: "455187******0183",
expiration_date: "12/2030",
holder: "Teste Holder",
save_card: false
},
is_crypto_currency_negotiation: true,
is_splitted: false,
links: [
%{
href: "https://apiquerysandbox.cieloecommerce.cielo.com.br/1/sales/...",
method: "GET",
rel: "self"
}
],
payment_id: "dde3931d-4dd4-4ab9-8d87-73cbfb1c513a",
proof_of_sale: "430002",
provider: "Simulado",
received_date: "2020-10-18 17:53:42",
recurrent: false,
return_code: "1",
return_url: "http://www.cielo.com.br",
status: 0,
tid: "1018055342725",
type: "DebitCard"
}
}}
## Failed transaction
iex(1)> attrs = %{
customer: %{name: "<NAME>"},
merchant_order_id: "2014111703",
payment: %{
amount: 15700,
credit_card: %{
brand: "Visa",
card_number: "1234123412341231",
card_on_file: %{reason: "Unscheduled", usage: "Used"},
holder: "Test<NAME>"
},
installments: 1,
is_crypto_currency_negotiation: true,
soft_descriptor: "123456789ABCD",
type: "CreditCard"
}
}
iex(2)> Cielo.Transaction.credit(attrs)
{:error,
%{
errors: [
payment: %{
errors: [debit_card: %{errors: [expiration_date: "can't be blank"]}]
}
]
}}
"""
@spec debit(map) :: {:ok, map()} | {:error, map(), list()} | {:error, any}
def debit(params) do
make_post_transaction(DebitTransactionRequest, params)
end
@doc """
Create a bankslip transaction if passed attributes satisfy a validation criteria
## Successfull transaction
iex(1)> attrs = %{
customer: %{
address: %{
city: "Rio de Janeiro",
complement: "Sala 934",
country: "BRA",
district: "Centro",
number: "160",
state: "RJ",
street: "Avenida Marechal Câmara",
zip_code: "22750012"
},
identity: "1234567890",
name: "<NAME>"
},
merchant_order_id: "2014111706",
payment: %{
address: "Rua Teste",
amount: 15700,
assignor: "Empresa Teste",
boleto_number: "123",
demonstrative: "Desmonstrative Teste",
expiration_date: "2020-12-31",
identification: "11884926754",
instructions: "Aceitar somente até a data de vencimento, após essa data juros de 1% dia.",
provider: "INCLUIR PROVIDER",
type: "Boleto"
}
}
iex(2)> Cielo.Transaction.bankslip(attrs)
{:ok,
%{
customer: %{
address: %{
city: "Rio de Janeiro",
complement: "Sala 934",
country: "BRA",
district: "Centro",
number: "160",
state: "RJ",
street: "Avenida Marechal Câmara",
zip_code: "22750012"
},
identity: "1234567890",
name: "<NAME>"
},
merchant_order_id: "2014111706",
payment: %{
address: "Rua Teste",
amount: 15700,
assignor: "Empresa Teste",
bank: 0,
bar_code_number: "00092848600000157009999250000000012399999990",
boleto_number: "123-2",
country: "BRA",
currency: "BRL",
demonstrative: "Desmonstrative Teste",
digitable_line: "00099.99921 50000.000013 23999.999909 2 84860000015700",
expiration_date: "2020-12-31",
identification: "11884926754",
instructions: "Aceitar somente até a data de vencimento, após essa data juros de 1% dia.",
is_splitted: false,
links: [
%{
href: "https://apiquerysandbox.cieloecommerce.cielo.com.br/1/sales/...",
method: "GET",
rel: "self"
}
],
payment_id: "8a946b9a-a9ab-4c16-bebb-0565d11b88f3",
provider: "Simulado",
received_date: "2020-10-18 18:18:29",
status: 1,
type: "Boleto",
url: "https://transactionsandbox.pagador.com.br/post/pagador/reenvia.asp/..."
}
}}
## Warning
Consult the [provider list](https://developercielo.github.io/manual/cielo-ecommerce#transa%C3%A7%C3%A3o-de-boletos) to check if your bank are integrated by cielo in this API
"""
@spec bankslip(map) :: {:ok, map()} | {:error, map(), list()} | {:error, any}
def bankslip(params) do
make_post_transaction(BankSlipTransactionRequest, params)
end
@doc """
**(DEPRECATED)** This function was moved to [`Cielo.Recurrency.create_payment/1`](Cielo.Recurrency.html#create_payment/1)
Also, you can use main module [`Cielo.recurrent_transaction/1`](Cielo.html#recurrent_transaction/1)
"""
@spec recurrent(map) :: {:ok, map()} | {:error, map(), list()} | {:error, any}
def recurrent(params) do
make_post_transaction(RecurrentTransactionRequest, params)
end
@doc """
Capture an uncaptured credit card transaction with partial capture
valid option `amount`
## Successfull transaction
iex(1)> Cielo.Transaction.capture("26e5da86-d975-4e2f-aa25-862b5a43e9f4", %{amount: 5000})
{:ok,
%{
authorization_code: "214383",
links: [
%{
href: "https://apiquerysandbox.cieloecommerce.cielo.com.br/1/sales/...",
method: "GET",
rel: "self"
},
%{
href: "https://apisandbox.cieloecommerce.cielo.com.br/1/sales/.../void",
method: "PUT",
rel: "void"
}
],
proof_of_sale: "793143",
provider_return_code: "6",
provider_return_message: "Operation Successful",
reason_code: 0,
reason_message: "Successful",
return_code: "6",
return_message: "Operation Successful",
status: 2,
tid: "1020082643193"
}}
iex(2)> Cielo.Transaction.capture("26e5da86-d975-4e2f-aa25-862b5a43e9f4")
{:error, :bad_request, [%{code: 308, message: "Transaction not available to capture"}]}
"""
@spec capture(binary(), map()) :: {:error, any} | {:error, any, any} | {:ok, map}
def capture(payment_id, params) when is_binary(payment_id) and is_map(params) do
case Utils.valid_guid?(payment_id) do
:true ->
@capture_endpoint
|> HTTP.build_path(":payment_id", "#{payment_id}")
|> HTTP.encode_url_args(params)
|> HTTP.put()
:false ->
{:error, "Not valid GUID"}
end
end
def capture(_, _), do: {:error, "Not Valid arguments"}
@doc """
Capture an uncaptured credit card transaction
## Successfull transaction
iex(1)> Cielo.Transaction.capture("26e5da86-d975-4e2f-aa25-862b5a43e9f4")
{:ok,
%{
authorization_code: "214383",
links: [
%{
href: "https://apiquerysandbox.cieloecommerce.cielo.com.br/1/sales/...",
method: "GET",
rel: "self"
},
%{
href: "https://apisandbox.cieloecommerce.cielo.com.br/1/sales/.../void",
method: "PUT",
rel: "void"
}
],
proof_of_sale: "793143",
provider_return_code: "6",
provider_return_message: "Operation Successful",
reason_code: 0,
reason_message: "Successful",
return_code: "6",
return_message: "Operation Successful",
status: 2,
tid: "1020082643193"
}}
iex(2)> Cielo.Transaction.capture("26e5da86-d975-4e2f-aa25-862b5a43e9f4")
{:error, :bad_request, [%{code: 308, message: "Transaction not available to capture"}]}
"""
@spec capture(binary()) :: {:error, any} | {:error, any, any} | {:ok, map}
def capture(payment_id) when is_binary(payment_id) do
case Utils.valid_guid?(payment_id) do
:true ->
@capture_endpoint
|> HTTP.build_path(":payment_id", "#{payment_id}")
|> HTTP.put()
:false ->
{:error, "Invalid GUID"}
end
end
def capture(_), do: {:error, "Not Binary Payment Id"}
@doc """
Deactivate a recurrent payment transaction
**(DEPRECATED)** This function was moved to [`Cielo.Recurrency.deactivate/1`](Cielo.Recurrency.html#deactivate/1)
or main module [`Cielo.deactivate_recurrent/1`](Cielo.html#deactivate_recurrent/1)
## Successfull transaction
iex(1)> Cielo.Transaction.deactivate_recurrent_payment("26e5da86-d975-4e2f-aa25-862b5a43e9f4")
{:ok, ""}
"""
@spec deactivate_recurrent_payment(binary()) :: {:error, any} | {:error, any, any} | {:ok, any}
def deactivate_recurrent_payment(recurrent_payment_id) do
case Utils.valid_guid?(recurrent_payment_id) do
:true ->
@deactivate_recurrent_payment_endpoint
|> HTTP.build_path(":payment_id", "#{recurrent_payment_id}")
|> HTTP.put()
:false ->
{:error, "Invalid GUID"}
end
end
@doc """
Cancel a card payment
## Successfull transaction
iex(1)> Cielo.Transaction.cancel_payment("26e5da86-d975-4e2f-aa25-862b5a43e9f4", 1000)
{:ok,
%{
authorization_code: "693066",
links: [
%{
href: "https://apiquerysandbox.cieloecommerce.cielo.com.br/1/sales/{PaymentId}",
method: "GET",
rel: "self"
}
],
proof_of_sale: "4510712",
return_code: "0",
return_message: "Operation Successful",
status: 2,
tid: "0719094510712"
}}
"""
@spec cancel_payment(binary(), non_neg_integer()) :: {:error, any} | {:ok, map}
def cancel_payment(payment_id, amount) do
case Utils.valid_guid?(payment_id) do
:true ->
@cancel_partial_endpoint
|> HTTP.build_path(":payment_id", "#{payment_id}")
|> HTTP.build_path(":amount", "#{amount}")
|> HTTP.put()
:false ->
{:error, "Invalid GUID"}
end
end
@doc """
Cancel a card payment
## Successfull transaction
iex(1)> Cielo.Transaction.cancel_payment("26e5da86-d975-4e2f-aa25-862b5a43e9f4")
{:ok,
%{
authorization_code: "693066",
links: [
%{
href: "https://apiquerysandbox.cieloecommerce.cielo.com.br/1/sales/{PaymentId}",
method: "GET",
rel: "self"
}
],
proof_of_sale: "4510712",
return_code: "9",
return_message: "Operation Successful",
status: 10,
tid: "0719094510712"
}}
"""
@spec cancel_payment(binary()) :: {:error, any} | {:ok, map}
def cancel_payment(payment_id) do
case Utils.valid_guid?(payment_id) do
:true ->
@cancel_endpoint
|> HTTP.build_path(":payment_id", "#{payment_id}")
|> HTTP.put()
:false ->
{:error, "Invalid GUID"}
end
end
@doc false
def make_post_transaction(module, params, endpoint \\ @endpoint) do
module
|> struct()
|> module.changeset(params)
|> case do
%Ecto.Changeset{valid?: true} ->
HTTP.post(endpoint, params)
error ->
{:error, Utils.changeset_errors(error)}
end
end
end
|
lib/cielo/transaction.ex
| 0.732113
| 0.413181
|
transaction.ex
|
starcoder
|
defmodule Day3 do
def solve do
config =
prepare_input()
|> Day3.prepare_config()
start = System.monotonic_time(unquote(:milli_seconds))
IO.puts("Part one answer:")
matrices =
config
|> Day3.build_matrices()
matrices
|> Day3.Part1.solve()
|> IO.puts()
time_part_one = System.monotonic_time(unquote(:milli_seconds)) - start
IO.puts("Part one took #{time_part_one} milliseconds")
start = System.monotonic_time(unquote(:milli_seconds))
IO.puts("Part two answer:")
config
|> Day3.Part2.solve(matrices)
|> IO.puts()
time_part_two = System.monotonic_time(unquote(:milli_seconds)) - start
IO.puts("Part two took #{time_part_two} milliseconds")
IO.puts("Total run time #{time_part_one + time_part_two} milliseconds")
end
defp prepare_input do
"../../inputFiles/day3/input.txt"
|> File.stream!()
|> Stream.map(&String.trim_trailing/1)
|> Enum.to_list()
end
def prepare_config(input) do
input
|> Enum.map(&matrices_config/1)
end
defp matrices_config(configuration) do
split =
configuration
|> String.split("@")
id =
split
|> hd
|> String.trim()
|> String.split("#")
|> tl
|> Enum.join()
|> String.to_integer()
split
|> tl
|> Enum.join()
|> String.split(":")
|> Enum.map(&String.trim/1)
|> matrice_specs(id)
end
defp matrice_specs([pos, size], id) do
[x, y] =
String.split(pos, ",")
|> Enum.map(&String.to_integer/1)
[width, height] =
String.split(size, "x")
|> Enum.map(&String.to_integer/1)
%{id: id, top: y, left: x, width: width, height: height}
end
def build_matrices(input) do
Enum.reduce(input, %{}, fn config, acc ->
Enum.reduce(config.left..(config.left + config.width - 1), acc, fn x, acc ->
Enum.reduce(config.top..(config.top + config.height - 1), acc, fn y, acc ->
key = {x, y}
if acc[key], do: Map.put(acc, key, -1), else: Map.put(acc, key, config.id)
end)
end)
end)
end
end
defmodule Day3.Part1 do
def solve(input) do
input
|> Map.values()
|> Enum.count(fn x -> x == -1 end)
end
end
defmodule Day3.Part2 do
def solve(input, matrices) do
values =
matrices
|> Map.values()
|> Enum.filter(fn x -> x > 0 end)
|> Enum.group_by(fn x -> x end)
|> Enum.sort()
result =
input
|> Enum.filter(fn config ->
case Enum.find(values, fn {id, _} -> id == config.id end) do
{_, value} -> Enum.count(value) == config.width * config.height
_ -> false
end
end)
case Enum.count(result) do
1 -> hd(result).id
_ -> :error
end
end
end
|
elixir/day3/lib/day3.ex
| 0.548674
| 0.421343
|
day3.ex
|
starcoder
|
defmodule Mbcs do
@moduledoc """
Wrapper for erlang-mbcs.
This module provides functions for character encoding conversion.
See `https://code.google.com/p/erlang-mbcs/` for detail.
## Usage
# Start mbcs server
iex> Mbcs.start
:ok
# Convert UTF-8 to Shift_JIS
iex> Mbcs.encode!("九条カレン", :cp932)
<<139, 227, 143, 240, 131, 74, 131, 140, 131, 147>>
# Convert Shift_JIS to UTF-8, and return as a list
iex> Mbcs.decode!([139, 227, 143, 240, 131, 74, 131, 140, 131, 147], :cp932, return: :list)
[20061, 26465, 12459, 12524, 12531]
## Support encodings
* `:cp037`
* `:cp437`
* `:cp500`
* `:cp737`, `:cp775`
* `:cp850`, `:cp852`, `:cp855`, `:cp857`, `:cp860`, `:cp861`, `:cp862`, `:cp863`, `:cp864`, `:cp865`, `:cp866`, `:cp869`, `:cp874`, `:cp875`
* `:cp932`, `:cp936`, `:gbk`, `:cp949`, `:cp950`, `:big5`
* `:cp1026`, `:cp1250`, `:cp1251`, `:cp1252`, `:cp1253`, `:cp1254`, `:cp1255`, `:cp1256`, `:cp1257`, `:cp1258`
* `:cp10000`, `:cp10006`, `:cp10007`, `:cp10029`, `:cp10079`, `:cp10081`
* `:utf8`, `:utf16`, `:utf16le`, `:utf16be`, `:utf32`, `:utf32le`, `:utf32be`
## Options
* return: `:list`, `:binary`
* error: `:strict`, `:ignore`, `:replace`
* replace: `non_neg_integer`
* bom: `true`, `false`
## License
Copyright (c) 2014 woxtu
Licensed under the Boost Software License, Version 1.0.
"""
def start do
:mbcs.start
end
def stop do
:mbcs.stop
end
def encode(string, to, options \\ [])
def encode(string, to, options) when is_bitstring(string) do
to_list = if String.valid?(string), do: &to_charlist/1, else: &:erlang.bitstring_to_list/1
encode(to_list.(string), to, options)
end
def encode(string, to, options) when is_list(string) do
case :mbcs.encode(string, to, options) do
{:error, reason} -> {:error, reason}
result -> {:ok, result}
end
end
def encode!(string, to, options \\ [])
def encode!(string, to, options) when is_bitstring(string) do
to_list = if String.valid?(string), do: &to_charlist/1, else: &:erlang.bitstring_to_list/1
encode!(to_list.(string), to, options)
end
def encode!(string, to, options) when is_list(string) do
case :mbcs.encode(string, to, options) do
{:error, reason} -> raise to_error(reason)
result -> result
end
end
def decode(string, from, options \\ []) do
case :mbcs.decode(string, from, options) do
{:error, reason} -> {:error, reason}
result -> if options[:return] == :list, do: {:ok, result}, else: {:ok, List.to_string(result)}
end
end
def decode!(string, from, options \\ []) do
case :mbcs.decode(string, from, options) do
{:error, reason} -> raise to_error(reason)
result -> if options[:return] == :list, do: result, else: List.to_string(result)
end
end
def from_to(string, from, to, options \\ []) do
case :mbcs.from_to(string, from, to, options) do
{:error, reason} -> {:error, reason}
result -> {:ok, result}
end
end
def from_to!(string, from, to, options \\ []) do
case :mbcs.from_to(string, from, to, options) do
{:error, reason} -> raise to_error(reason)
result -> result
end
end
defmodule UnknownOptionError, do: defexception [:message]
defmodule UnknownEncodingError, do: defexception [:message]
defmodule UnmappingUnicodeError, do: defexception [:message]
defmodule IllegalListError, do: defexception [:message]
defmodule UndefinedCharacterError, do: defexception [:message]
defmodule UnmappingCharacterError, do: defexception [:message]
defmodule IncompleteMultibyteSequenceError, do: defexception [:message]
defmodule UnmappingMultibyteCharacterError, do: defexception [:message]
defmodule UnknownError, do: defexception [:message]
defp to_error({:unknown_option, [option: option]}) do
UnknownOptionError.exception(message: "option #{inspect option}")
end
defp to_error({:unkonwn_encoding, [encoding: encoding]}) do
UnknownEncodingError.exception(message: "encoding #{inspect encoding}")
end
defp to_error({:unmapping_unicode, [unicode: code, pos: pos]}) do
UnmappingUnicodeError.exception(message: "code #{code} in position #{pos}")
end
defp to_error({:illegal_list, [list: list, line: line]}) do
IllegalListError.exception(message: "list #{inspect list} at line #{line}")
end
defp to_error({:undefined_character, [character: character, pos: pos]}) do
UndefinedCharacterError.exception(message: "character #{character} in position #{pos}")
end
defp to_error({:unmapping_character, [character: character, pos: pos]}) do
UnmappingCharacterError.exception(message: "character #{character} in position #{pos}")
end
defp to_error({:incomplete_multibyte_sequence, [leadbyte: leadbyte, pos: pos]}) do
IncompleteMultibyteSequenceError.exception(message: "leadbyte #{leadbyte} in position #{pos}")
end
defp to_error({:unmapping_multibyte_character, [multibyte_character: character, pos: pos]}) do
UnmappingMultibyteCharacterError.exception(message: "character #{character} in position #{pos}")
end
defp to_error(reason) do
UnknownError.exception(message: inspect reason)
end
end
|
lib/mbcs.ex
| 0.78609
| 0.430746
|
mbcs.ex
|
starcoder
|
defmodule InductiveGraph.Internal do
@moduledoc """
Functions to manage internal representation of inductive graph.
"""
alias InductiveGraph.Utilities
@type value :: InductiveGraph.value
@type edge_value :: InductiveGraph.edge_value
@type vertex_value :: InductiveGraph.vertex_value
@type vertex :: InductiveGraph.vertex
@type neighbor :: InductiveGraph.neighbor
@type edge :: InductiveGraph.edge
@type tagged_vertex :: InductiveGraph.tagged_vertex
@type tagged_edge :: InductiveGraph.tagged_edge
@type adjacents :: %{required(neighbor) => [edge_value]}
@type predecessors :: adjacents
@type successors :: adjacents
@type context :: {predecessors, vertex_value, successors}
@type t :: %{required(vertex) => context}
@doc """
Creates an empty graph.
"""
@doc construction: true
@spec empty_graph() :: t
def empty_graph(), do: %{}
@doc """
Determines if `graph` is empty.
"""
@doc inspection: true
@spec empty?(t) :: boolean
def empty?(graph)
def empty?(graph = %{}), do: graph == %{}
@doc """
Updates `context` by `function` based on `update_type`.
"""
@doc update: true
@spec update_context(context, :context | :predecessors | :vertex_value | :successors, (context -> context) | (adjacents -> adjacents) | (value -> value)) :: context
def update_context(context = {predecessors, vertex_value, successors}, update_type, function) do
case update_type do
:context ->
function.(context)
:vertex_value ->
{predecessors, function.(vertex_value), successors}
:predecessors ->
{function.(predecessors), vertex_value, successors}
:successors ->
{predecessors, vertex_value, function.(successors)}
end
end
@doc """
Adds `edge_values` with `neighbor` to `adjacents`.
"""
@doc construction: true
@spec add_edges_to_adjacents(adjacents, neighbor, [edge_value]) :: adjacents
def add_edges_to_adjacents(adjacents, neighbor, edge_values) do
Map.update(adjacents, neighbor, edge_values, &Enum.concat(&1, edge_values))
end
@doc """
Adds `edge_values` with `neighbor` to either predecessor or successor
adjacents `position` in `context`.
"""
@doc construction: true
@spec add_edges_to_context(context, neighbor, [edge_value], :predecessors | :successors) :: context
def add_edges_to_context(context, neighbor, edge_values, position) do
update_context(context, position, &add_edges_to_adjacents(&1, neighbor, edge_values))
end
@doc """
Inserts `tagged_edge` into `graph`.
"""
@doc construction: true
@spec insert_tagged_edge(t, tagged_edge) :: {:ok, t} | :error
def insert_tagged_edge(graph, tagged_edge)
def insert_tagged_edge(graph, {from_vertex, to_vertex, edge_value}) do
with true <- Map.has_key?(graph, from_vertex),
true <- Map.has_key?(graph, to_vertex),
{:ok, graph} <- Utilities.map_update(graph, from_vertex, &add_edges_to_context(&1, to_vertex, [edge_value], :successors)),
{:ok, graph} <- Utilities.map_update(graph, to_vertex, &add_edges_to_context(&1, from_vertex, [edge_value], :predecessors)) do
{:ok, graph}
else
_error -> :error
end
end
@doc """
Inserts `tagged_edges` into `graph`.
"""
@doc construction: true
@spec insert_tagged_edges(t, [tagged_edge]) :: {:ok, t} | :error
def insert_tagged_edges(graph, tagged_edges) do
insert =
fn
tagged_edge, {:ok, graph} -> insert_tagged_edge(graph, tagged_edge)
_edge, :error -> :error
end
List.foldl(tagged_edges, {:ok, graph}, insert)
end
@doc """
Creates a new context based on `vertex_value`.
"""
@doc construction: true
@spec new_context(vertex_value) :: context
def new_context(vertex_value), do: {%{}, vertex_value, %{}}
@doc """
Creates a graph from `tagged_vertices` and `tagged_edges`.
"""
@doc construction: true
@spec make_graph([tagged_vertex], [tagged_edge]) :: {:ok, t} | :error
def make_graph(tagged_vertices, tagged_edges) do
convert = fn {vertex, vertex_value} -> {vertex, new_context(vertex_value)} end
tagged_vertices
|> Stream.map(convert)
|> Map.new()
|> insert_tagged_edges(tagged_edges)
end
@doc """
Removes edges with `neighbor` from either predecessor or successor
adjacents `position` in `context`.
"""
@doc destruction: true
@spec remove_edges_from_context(context, neighbor, :predecessors | :successors) :: context
def remove_edges_from_context(context, neighbor, position) do
update = &Map.delete(&1, neighbor)
update_context(context, position, update)
end
@doc """
Removes edges with `neighbor` from either predecessor or successor adjacents
`position` in contexts of `vertices`.
"""
@doc destruction: true
@spec prune_adjacents(t, [vertex], neighbor, :predecessors | :successors) :: {:ok, t} | :error
def prune_adjacents(graph, vertices, neighbor, position) do
remove =
fn
vertex, {:ok, graph} -> Utilities.map_update(graph, vertex, &remove_edges_from_context(&1, neighbor, position))
_vertex, :error -> :error
end
List.foldl(vertices, {:ok, graph}, remove)
end
@doc """
Converts `adjacents` to `[{edge_value, neighbor}]`.
"""
@doc conversion: true
@spec from_adjacents(adjacents) :: InductiveGraph.adjacents
def from_adjacents(adjacents) do
adjacents
|> Map.to_list()
|> Enum.flat_map(fn {neighbor, edge_values} -> Enum.map(edge_values, &({&1, neighbor})) end)
end
@doc """
Converts `[{edge_value, neighbor}]` to `adjacents`.
"""
@doc conversion: true
@spec to_adjacents(InductiveGraph.adjacents) :: adjacents
def to_adjacents(adjacents) do
convert = fn {edge_value, neighbor}, adjacents -> add_edges_to_adjacents(adjacents, neighbor, [edge_value]) end
List.foldl(adjacents, %{}, convert)
end
@doc """
Converts `{[{edge_value, neighbor}], vertex, vertex_value, [{edge_value, neighbor}]}`
to context.
"""
@doc conversion: true
@spec to_context(InductiveGraph.context) :: context
def to_context(context)
def to_context({predecessors, _vertex, vertex_value, successors}) do
{to_adjacents(predecessors), vertex_value, to_adjacents(successors)}
end
@doc """
Converts `context` to `{[{edge_value, neighbor}], vertex, vertex_value, [{edge_value, neighbor}]}`.
"""
@doc conversion: true
@spec from_context(context, vertex) :: InductiveGraph.context
def from_context(context, vertex)
def from_context({predecessors, vertex_value, successors}, vertex) do
{from_adjacents(predecessors), vertex, vertex_value, from_adjacents(successors)}
end
@doc """
Decomposes `graph` into the context containing `vertex` and the remaining
graph.
"""
@doc destruction: true
@spec decompose(t, vertex) :: {:ok, InductiveGraph.context, t} | :error
def decompose(graph, vertex) do
with {:ok, {predecessors, vertex_value, successors}} <- Map.fetch(graph, vertex),
graph = Map.delete(graph, vertex),
vertex_removed_predecessors = Map.delete(predecessors, vertex),
{:ok, graph} <- prune_adjacents(graph, Map.keys(vertex_removed_predecessors), vertex, :successors),
vertex_removed_successors = Map.delete(successors, vertex),
{:ok, graph} <- prune_adjacents(graph, Map.keys(vertex_removed_successors), vertex, :predecessors) do
predecessors = from_adjacents(vertex_removed_predecessors)
successors = from_adjacents(successors)
{:ok, {predecessors, vertex, vertex_value, successors}, graph}
else
_error -> :error
end
end
@doc """
Lists all vertices in `graph`.
"""
@doc inspection: true
@spec list_tagged_vertices(t) :: [tagged_vertex]
def list_tagged_vertices(graph) do
format = fn {vertex, {_predecessors, vertex_value, _successors}} -> {vertex, vertex_value} end
graph
|> Map.to_list()
|> Enum.map(format)
end
@doc """
Counts number of vertices in `graph`.
"""
@doc inspection: true
@spec count_vertices(t) :: non_neg_integer
def count_vertices(graph) do
map_size(graph)
end
@doc """
Gets range of vertex in `graph`.
Returns `{:ok, minimum, maximum}` for graphs with at least one vertex. Returns
`:error` for empty graph.
"""
@doc inspection: true
@spec vertex_range(t) :: {:ok, minimum :: integer, maximum :: integer} | :error
def vertex_range(graph) do
case Map.keys(graph) do
[] ->
:error
[vertex | vertices] ->
min_max = fn vertex, {minimum, maximum} -> {min(minimum, vertex), max(maximum, vertex)} end
{minimum, maximum} = List.foldl(vertices, {vertex, vertex}, min_max)
{:ok, minimum, maximum}
end
end
@doc """
Inserts `tagged_vertices` into `graph`.
"""
@doc construction: true
@spec insert_tagged_vertices(t, [tagged_vertex]) :: {:ok, t} | :error
def insert_tagged_vertices(graph, tagged_vertices) do
insert =
fn
tagged_vertex, {:ok, graph} -> insert_tagged_vertex(graph, tagged_vertex)
_vertex, :error -> :error
end
List.foldl(tagged_vertices, {:ok, graph}, insert)
end
@doc """
Inserts `tagged_vertex` into `graph`.
"""
@doc construction: true
@spec insert_tagged_vertex(t, tagged_vertex) :: {:ok, t} | :error
def insert_tagged_vertex(graph, tagged_vertex)
def insert_tagged_vertex(graph, {tagged_vertex, vertex_value}) do
case Map.has_key?(graph, tagged_vertex) do
true -> :error
false -> {:ok, Map.put(graph, tagged_vertex, new_context(vertex_value))}
end
end
@doc """
Lists all tagged edges in `graph`.
"""
@doc inspection: true
@spec list_tagged_edges(t) :: [tagged_edge]
def list_tagged_edges(graph) do
for {from_vertex, {_predecessors, _vertex_value, successors}} <- Map.to_list(graph),
{to_vertex, edge_values} <- Map.to_list(successors),
edge_value <- edge_values do
{from_vertex, to_vertex, edge_value}
end
end
@doc """
Pretty prints inductive representation of `graph`.
If `count` is provided, then up to `count` number of contexts will be shown.
"""
@doc inspection: true
@spec pretty_print(t, integer) :: String.t
def pretty_print(graph, count \\ -1) do
vertices = graph |> Map.keys() |> Enum.sort() |> Enum.reverse()
pretty_print(graph, vertices, count, "| ")
end
# Pretty prints inductive representation of `graph`.
@spec pretty_print(t, [vertex], integer, String.t) :: String.t
defp pretty_print(graph, vertices, count, result)
defp pretty_print(_graph, [], _count, result), do: result <> "Empty"
defp pretty_print(_graph, _vertices, 0, result), do: result <> "InductiveGraph"
defp pretty_print(graph, [vertex | vertices], count, result) do
{:ok, context, graph} = decompose(graph, vertex)
result = result <> inspect(context) <> "\n& "
pretty_print(graph, vertices, count - 1, result)
end
@doc """
Merges `context` into `graph`.
"""
@doc construction: true
@spec merge(t, InductiveGraph.context) :: {:ok, t} | :error
def merge(graph, context)
def merge(graph, {predecessors, vertex, vertex_value, successors}) do
process =
fn
{vertex_value, neighbor}, {:ok, graph, context}, position ->
with {:ok, graph} <- Utilities.map_update(graph, neighbor, &add_edges_to_context(&1, vertex, [vertex_value], flip_adjacents_type(position))) do
context = add_edges_to_context(context, neighbor, [vertex_value], position)
{:ok, graph, context}
else
_error -> :error
end
_adjacent, :error, _position ->
:error
end
with false <- Map.has_key?(graph, vertex),
context = new_context(vertex_value),
{:ok, graph, context} <- List.foldl(predecessors, {:ok, graph, context}, &process.(&1, &2, :predecessors)),
{:ok, graph, context} <- List.foldl(successors, {:ok, graph, context}, &process.(&1, &2, :successors)) do
{:ok, Map.put(graph, vertex, context)}
else
_error -> :error
end
end
# Flips the adjacents type.
@spec flip_adjacents_type(:predecessors) :: :successors
@spec flip_adjacents_type(:successors) :: :predecessors
defp flip_adjacents_type(:predecessors), do: :successors
defp flip_adjacents_type(:successors), do: :predecessors
@doc """
Applies `function` to `adjacents`.
"""
@doc update: true
@spec map_adjacents(adjacents, function) :: adjacents
def map_adjacents(adjacents, function) do
Enum.into(adjacents, %{}, fn {neighbor, edge_values} -> {neighbor, Enum.map(edge_values, function)} end)
end
@doc """
Applies `function` to every context in `graph`.
"""
@doc update: true
@spec map_graph(t, (InductiveGraph.context -> InductiveGraph.context)) :: t
def map_graph(graph, function) do
transform =
fn
{vertex, context} ->
context = context |> from_context(vertex) |> function.() |> to_context()
{vertex, context}
end
Enum.into(graph, %{}, transform)
end
@doc """
Applies `function` to every vertex value in `graph`.
"""
@doc update: true
@spec map_vertices(t, (vertex_value -> vertex_value)) :: t
def map_vertices(graph, function) do
transform =
fn
{vertex, context} ->
{vertex, update_context(context, :vertex_value, function)}
end
Enum.into(graph, %{}, transform)
end
@doc """
Applies `function` to every edge value in `graph`.
"""
@doc update: true
@spec map_edges(t, (edge_value -> edge_value)) :: t
def map_edges(graph, function) do
transform =
fn
{vertex, context} ->
context =
context
|> update_context(:predecessors, &map_adjacents(&1, function))
|> update_context(:successors, &map_adjacents(&1, function))
{vertex, context}
end
Enum.into(graph, %{}, transform)
end
@doc """
Applies `vertex_function` to every vertex value and `edge_function` to every
edge value in `graph`.
"""
@doc update: true
@spec map_vertices_and_edges(t, (vertex_value -> vertex_value), (edge_value -> edge_value)) :: t
def map_vertices_and_edges(graph, vertex_function, edge_function) do
transform =
fn
{vertex, context} ->
context =
context
|> update_context(:predecessors, &map_adjacents(&1, edge_function))
|> update_context(:successors, &map_adjacents(&1, edge_function))
|> update_context(:vertex_value, vertex_function)
{vertex, context}
end
Enum.into(graph, %{}, transform)
end
@doc """
Determines if `vertex` is in `graph`.
"""
@doc inspection: true
@spec has_vertex?(t, vertex) :: boolean
def has_vertex?(graph, vertex), do: Map.has_key?(graph, vertex)
@doc """
Determines if `edge` is in `graph`.
"""
@doc inspection: true
@spec has_edge?(t, edge) :: boolean
def has_edge?(graph, edge)
def has_edge?(graph, {from_vertex, to_vertex}) do
with {:ok, {_predecessors, _vertex_value, successors}} <- Map.fetch(graph, from_vertex),
{:ok, _edge_values} <- Map.fetch(successors, to_vertex) do
true
else
_error ->
false
end
end
end
|
lib/inductive_graph/internal.ex
| 0.916959
| 0.696433
|
internal.ex
|
starcoder
|
defmodule JaSerializer.Serializer do
@moduledoc """
Define a serialization schema.
Provides `has_many/2`, `has_one/2`, `attributes/1` and `location/1` macros
to define how your data (struct or map) will be rendered in the
JSONAPI.org 1.0 format.
Defines `format/1`, `format/2` and `format/3` used to convert data for
encoding in your JSON library of choice.
## Example
defmodule PostSerializer do
use JaSerializer
location "/posts/:id"
attributes [:title, :body, :excerpt, :tags]
has_many :comments, links: [related: "/posts/:id/comments"]
has_one :author, serializer: PersonSerializer, include: true
def excerpt(post, _conn) do
[first | _ ] = String.split(post.body, ".")
first
end
end
post = %Post{
id: 1,
title: "jsonapi.org + Elixir = Awesome APIs",
body: "so. much. awesome.",
author: %Person{name: "Alan"}
}
post
|> PostSerializer.format
|> Poison.encode!
"""
use Behaviour
@type id :: String.t | Integer
@type data :: Map
@doc """
The id to be used in the resource object.
http://jsonapi.org/format/#document-resource-objects
Default implementation attempts to get the :id field from the struct.
To override simply define the id function:
def id(struct, _conn), do: struct.slug
"""
defcallback id(data, Plug.Conn.t) :: id
@doc """
The type to be used in the resource object.
http://jsonapi.org/format/#document-resource-objects
Default implementation attempts to infer the type from the serializer
module's name. For example:
MyApp.UserView becomes "user"
MyApp.V1.Serializers.Post becomes "post"
MyApp.V1.CommentsSerializer becomes "comments"
To override simply define the type function:
def type, do: "category"
You may also specify a dynamic type which recieves the data
and connection as parameters:
def type, do: fn(model, _conn) -> model.type end
"""
defcallback type() :: String.t | fun()
@doc """
Returns a map of attributes to be mapped.
The default implementation relies on the `attributes/1` macro to define
which fields to be included in the map.
defmodule UserSerializer do
attributes [:email, :name, :is_admin]
end
UserSerializer.attributes(user, conn)
# %{email: "...", name: "...", is_admin: "..."}
You may override this method and use `super` to filter attributes:
defmodule UserSerializer do
attributes [:email, :name, :is_admin]
def attributes(user, conn) do
attrs = super(user, conn)
if conn.assigns[:current_user].is_admin do
attrs
else
Map.take(attrs, [:email, :name])
end
end
end
UserSerializer.attributes(user, conn)
# %{email: "...", name: "..."}
You may also skip using the `attributes/1` macro altogether in favor of
just defining `attributes/2`.
defmodule UserSerializer do
def attributes(user, conn) do
Map.take(user, [:email, :name])
end
end
UserSerializer.attributes(user, conn)
# %{email: "...", name: "..."}
"""
defcallback attributes(map, Plug.Conn.t) :: map
@doc """
Adds meta data to the individual resource being serialized.
NOTE: To add meta data to the top level object pass the `meta:` option into
YourSerializer.format/3.
A nil return value results in no meta key being added to the serializer.
A map return value will be formated with JaSerializer.Formatter.format/1.
The default implementation returns nil.
"""
defcallback meta(map, Plug.Conn.t) :: map | nil
@doc false
defmacro __using__(_) do
quote do
@behaviour JaSerializer.Serializer
@links []
@attributes []
@relations []
import JaSerializer.Serializer, only: [
serialize: 2, attributes: 1, location: 1, links: 1,
has_many: 2, has_one: 2, has_many: 1, has_one: 1
]
unquote(define_default_type(__CALLER__.module))
unquote(define_default_id)
unquote(define_default_attributes)
unquote(define_default_meta)
@before_compile JaSerializer.Serializer
end
end
defp define_default_type(module) do
type = module
|> Atom.to_string
|> String.split(".")
|> List.last
|> String.replace("Serializer", "")
|> String.replace("View", "")
|> JaSerializer.Formatter.Utils.format_type
quote do
def type, do: unquote(type)
defoverridable [type: 0]
end
end
defp define_default_id do
quote do
def id(m), do: Map.get(m, :id)
def id(m, _c), do: apply(__MODULE__, :id, [m])
defoverridable [{:id, 2}, {:id, 1}]
end
end
defp define_default_attributes do
quote do
def attributes(struct, conn) do
JaSerializer.Serializer.default_attributes(__MODULE__, struct, conn)
end
defoverridable [attributes: 2]
end
end
@doc false
def default_attributes(serializer, struct, conn) do
serializer.__attributes
|> Enum.map(&({&1, apply(serializer, &1, [struct, conn])}))
|> Enum.into(%{})
end
defp define_default_meta do
quote do
def meta(_struct, _conn), do: nil
defoverridable [meta: 2]
end
end
@doc false
defmacro serialize(type, do: block) do
IO.write :stderr, IO.ANSI.format([:red, :bright,
"warning: serialize/2 is deprecated, please use type/0 instead\n" <>
Exception.format_stacktrace(Macro.Env.stacktrace(__CALLER__))
])
quote do
unquote(block)
def type, do: unquote(type)
end
end
@doc """
Defines the canonical path for retrieving this resource.
## String Examples
String may be either a relative or absolute path. Path segments beginning
with a colon are called as functions on the serializer with the struct and
conn passed in.
defmodule PostSerializer do
use JaSerializer
location "/posts/:id"
end
defmodule CommentSerializer do
use JaSerializer
location "http://api.example.com/posts/:post_id/comments/:id"
def post_id(comment, _conn), do: comment.post_id
end
## Atom Example
When an atom is passed in it is assumed it is a function that will return
a relative or absolute path.
defmodule PostSerializer do
use JaSerializer
import MyPhoenixApp.Router.Helpers
location :post_url
def post_url(post, conn) do
#TODO: Verify conn can be used here instead of Endpoint
post_path(conn, :show, post.id)
end
end
"""
defmacro location(uri) do
quote bind_quoted: [uri: uri] do
@links [{:self, uri} | @links]
end
end
defmacro links(links) do
quote bind_quoted: [links: links] do
@links (links ++ @links)
end
end
@doc """
Defines a list of attributes to be included in the payload.
An overridable function for each attribute is generated with the same name
as the attribute. The function's default behavior is to retrieve a field with
the same name from the struct.
For example, if you have `attributes [:body]` a function `body/2` is defined
on the serializer with a default behavior of `Map.get(struct, :body)`.
"""
defmacro attributes(atts) do
quote bind_quoted: [atts: atts] do
# Save attributes
@attributes @attributes ++ atts
# Define default attribute function, make overridable
for att <- atts do
def unquote(att)(m), do: Map.get(m, unquote(att))
def unquote(att)(m, c), do: apply(__MODULE__, unquote(att), [m])
defoverridable [{att, 2}, {att, 1}]
end
end
end
@doc """
Add a has_many relationship to be serialized.
Relationships may be included in any of three composeable ways:
* Links
* Resource Identifiers
* Includes
## Relationship Source
When you define a relationship, a function is defined of the same name in the
serializer module. This function is overrideable but by default attempts to
access a field of the same name as the relationship on the map/struct passed
in. The field may be changed using the `field` option.
For example if you `have_many :comments` a function `comments\2` is defined
which calls `Dict.get(struct, :comments)` by default.
## Link based relationships
Specify a uri which responds with the related resources.
See <a href='#location/1'>location/1</a> for defining uris.
The relationship source is disregarded when linking.
defmodule PostSerializer do
use JaSerializer
has_many :comments, links: [related: "/posts/:id/comments"]
end
## Resource Identifier Relationships
Adds a list of `id` and `type` pairs to the response with the assumption the
API consumer can use them to retrieve the related resources as needed.
The relationship source should return either a list of ids or maps/structs
that have an `id` field.
defmodule PostSerializer do
use JaSerializer
has_many :comments, type: "comments"
def comments(post, _conn) do
post |> Post.get_comments |> Enum.map(&(&1.id))
end
end
## Included Relationships
Adds a list of `id` and `type` pairs, just like Resource Identifier
relationships, but also adds the full serialized resource to the response to
be "sideloaded" as well.
The relationship source should return a list of maps/structs.
defmodule PostSerializer do
use JaSerializer
has_many :comments, serializer: CommentSerializer, include: true
def comments(post, _conn) do
post |> Post.get_comments
end
end
defmodule CommentSerializer do
use JaSerializer
has_one :post, field: :post_id, type: "posts"
attributes [:body]
end
"""
defmacro has_many(name, opts \\ []) do
normalized_opts = normalize_relation_opts(opts, __CALLER__)
quote do
@relations [{:has_many, unquote(name), unquote(normalized_opts)} | @relations]
unquote(JaSerializer.Relationship.default_function(name, normalized_opts))
end
end
@doc """
See documentation for <a href='#has_many/2'>has_many/2</a>.
API is the exact same.
"""
defmacro has_one(name, opts \\ []) do
normalized_opts = normalize_relation_opts(opts, __CALLER__)
quote do
@relations [{:has_one, unquote(name), unquote(normalized_opts)} | @relations]
unquote(JaSerializer.Relationship.default_function(name, normalized_opts))
end
end
defp normalize_relation_opts(opts, caller) do
include = opts[:include]
if opts[:field] && !opts[:type] do
IO.write :stderr, IO.ANSI.format([:red, :bright,
"warning: The `field` option must be used with a `type` option\n" <>
Exception.format_stacktrace(Macro.Env.stacktrace(caller))
])
end
opts = add_related(opts)
case is_boolean(include) or is_nil(include) do
true -> opts
false ->
IO.write :stderr, IO.ANSI.format([:red, :bright,
"warning: Specifying a non-boolean as the `include` option is " <>
"deprecated. If you are specifying the serializer for this " <>
"relation, use the `serializer` option instead. To always " <>
"side-load the relationship, use `include: true` in addition to " <>
"the `serializer` option\n" <>
Exception.format_stacktrace(Macro.Env.stacktrace(caller))
])
[serializer: include, include: true] ++ opts
end
end
defp add_related(opts) do
if opts[:link] do
updated =
Keyword.get(opts, :links, [])
|> Keyword.put_new(:related, opts[:link])
Keyword.put(opts, :links, updated)
else
opts
end
end
@doc false
defmacro __before_compile__(_env) do
quote do
def __links, do: @links
def __relations, do: @relations
def __attributes, do: @attributes
def format(data) do
format(data, %{})
end
def format(data, conn) do
format(data, conn, [])
end
def format(data, conn, opts) do
%{data: data, conn: conn, serializer: __MODULE__, opts: opts}
|> JaSerializer.Builder.build
|> JaSerializer.Formatter.format
end
end
end
end
|
lib/ja_serializer/serializer.ex
| 0.820362
| 0.486027
|
serializer.ex
|
starcoder
|
defmodule FDB.Directory do
@moduledoc """
Directory is one of the ways to [manage
namespaces](https://apple.github.io/foundationdb/developer-guide.html#directories).
root = FDB.Directory.new()
dir = FDB.Database.transact(db, fn tr ->
FDB.Directory.create_or_open(root, tr, ["users", "inactive"])
end)
inactive_subspace = FDB.Coder.Subspace.new(dir)
"""
alias FDB.Directory.Protocol
alias FDB.Directory.Layer
alias FDB.Transaction
@type t :: Protocol.t()
@type path :: [String.t()]
@doc """
Creates root directory
## Options
* node_subspace - (`t:FDB.Coder.t/0`) where the directory metadata should be stored. Defaults to `Subspace.new(<<0xFE>>)`
* content_subspace - (`t:FDB.Coder.t/0`) where contents are stored. Defaults to `Subspace.new("")`
* allow_manual_prefixes - (boolean) whether manual prefixes should be allowed for directories. Defaults to `false`
"""
@spec new(map) :: t
defdelegate new(options \\ %{}), to: Layer
@doc """
Gets the directory layer
"""
@spec layer(t) :: String.t()
defdelegate layer(directory), to: Protocol
@doc """
Gets the directory path
"""
@spec path(t) :: path
defdelegate path(directory), to: Protocol
@doc """
Gets the directory prefix
"""
@spec prefix(t) :: binary
defdelegate prefix(directory), to: Protocol
@doc """
Opens the directory with the given `path`. If the directory does not
exist, it is created (creating parent directories if necessary).
## Options
layer - (binary) if the layer is specified and the directory is new,
it is recorded as the layer; if layer is specified and the directory
already exists, it is compared against the layer specified when the
directory was created, and the method will raise an exception if
they differ.
"""
@spec create_or_open(t, Transaction.t(), path, map) :: t
defdelegate create_or_open(directory, tr, path, options \\ %{}), to: Protocol
@doc """
Opens the directory with given `path`. The function will raise an
exception if the directory does not exist.
## Options
layer - (binary) if the layer is specified, it is compared against
the layer specified when the directory was created, and the function
will raise an exception if they differ.
"""
@spec open(t, Transaction.t(), path, map) :: t
defdelegate open(directory, tr, path, options \\ %{}), to: Protocol
@doc """
Creates a directory with given `path`. Parent directories are
created if necessary. The method will raise an exception if the
given directory already exists.
## Options
layer - (binary) if the layer is specified, it is recorded with the
directory and will be checked by future calls to open.
prefix - (binary) if prefix is specified, the directory is created
with the given prefix; otherwise a prefix is allocated
automatically.
"""
@spec create(t, Transaction.t(), path, map) :: t
defdelegate create(directory, tr, path, options \\ %{}), to: Protocol
@doc """
Moves this directory to new_path, interpreting new_path
absolutely. There is no effect on the prefix of the given directory
or on clients that already have the directory open. The function
will raise an exception if a directory already exists at new_path or
the parent directory of new_path does not exist.
Returns the directory at its new location.
"""
@spec move_to(t, Transaction.t(), path) :: t
defdelegate move_to(directory, tr, new_absolute_path), to: Protocol
@doc """
Moves the directory at old_path to new_path. There is no effect on
the prefix of the given directory or on clients that already have
the directory open. The function will raise an exception if a
directory does not exist at old_path, a directory already exists at
new_path, or the parent directory of new_path does not exist.
Returns the directory at its new location.
"""
@spec move(t, Transaction.t(), path, path) :: t
defdelegate move(directory, tr, old_path, new_path), to: Protocol
@doc """
Removes the directory at path, its contents, and all
subdirectories. The function will raise an exception if the
directory does not exist.
> Clients that have already opened the directory might still insert
data into its contents after removal.
"""
@spec remove(t, Transaction.t(), path) :: t
defdelegate remove(directory, tr, path \\ []), to: Protocol
@doc """
Checks if the directory at path exists and, if so, removes the
directory, its contents, and all subdirectories. Returns `true` if
the directory existed and `false` otherwise.
> Clients that have already opened the directory might still insert
data into its contents after removal.
"""
@spec remove_if_exists(t, Transaction.t(), path) :: t
defdelegate remove_if_exists(directory, tr, path \\ []), to: Protocol
@doc """
Returns `true` if the directory at path exists and `false` otherwise.
"""
@spec exists?(t, Transaction.t(), path) :: t
defdelegate exists?(directory, tr, path \\ []), to: Protocol
@doc """
Returns an list of names of the immediate subdirectories of the
directory at path. Each name represents the last component of a
subdirectory’s path.
"""
@spec list(t, Transaction.t(), path) :: t
defdelegate list(directory, tr, path \\ []), to: Protocol
@doc false
@spec get_layer_for_path(t, path) :: t
defdelegate get_layer_for_path(directory, path), to: Protocol
end
|
lib/fdb/directory.ex
| 0.881456
| 0.4184
|
directory.ex
|
starcoder
|
defmodule Brains do
@moduledoc """
`Brains` is a GraphQL client for Elixir on top of `Tesla`.
## Usage
```elixir
connection = Brains.Connection.new("https://example.com/graph")
Brains.query(connection, \"""
{
films {
title
}
}
\"""
)
{:ok,
%Tesla.Env{
body: "{\\"data\\":{\\"films\\":[{\\"title\\":\\"A New Hope\\"}]}}",
status: 200,
headers: []
}}
```
You can also run mutations:
```elixir
Brains.query(connection, \"""
mutation createUser($name: String!) {
createUser(name: $name) {
id
name
}
}
\""",
variables: %{name: "uesteibar"}
)
```
"""
alias Brains.{Connection, Request}
@type connection :: Tesla.Client.t()
@type query_string :: String.t()
@type url :: String.t()
@type headers :: [header]
@type header :: {String.t(), String.t()}
@type options :: [option]
@type option ::
{:operation_name, String.t()}
| {:variables, map}
| {:headers, headers}
| {:url, url}
@doc """
Runs a query request to your graphql endpoint.
## Example
```elixir
Brains.query(connection, \"""
{
films {
count
}
}
\""")
```
You can also pass variables for your query:
```elixir
Brains.query(connection, \"""
mutation createUser($name: String!) {
createUser(name: $name) {
id
name
}
}
\""",
variables: %{name: "uesteibar"}
)
```
"""
@spec query(
connection,
query_string :: map() | String.t(),
options :: keyword()
) ::
{:ok, map}
| {:error, reason :: term}
def query(connection, query_string, options \\ [])
when is_binary(query_string) and is_list(options) do
body = %{
"query" => query_string
}
body =
case Keyword.get(options, :operation_name) do
nil -> body
name -> Map.put(body, "operationName", name)
end
body =
case Keyword.get(options, :variables) do
nil -> body
variables -> Map.put(body, "variables", variables)
end
request =
Request.new()
|> Request.method(:post)
|> Request.add_param(:header, "content-type", "application/json")
|> Request.add_param(:body, :body, body |> Poison.encode!())
request =
case Keyword.get(options, :headers) do
nil ->
request
headers ->
Enum.reduce(headers, request, fn {key, value}, req ->
Request.add_param(req, :headers, key, value)
end)
end
request =
case Keyword.get(options, :url) do
nil ->
request
url ->
Request.url(request, url)
end
connection
|> Connection.execute(request)
end
end
|
lib/brains.ex
| 0.876509
| 0.701662
|
brains.ex
|
starcoder
|
defmodule MediaServer.Watches do
@moduledoc """
The WatchStatuses context.
"""
import Ecto.Query, warn: false
alias MediaServer.Repo
alias MediaServer.Watches.Movie
@doc """
Returns the list of movie_watches.
## Examples
iex> list_movie_watches()
[%Movie{}, ...]
"""
def list_movie_watches do
Repo.all(Movie)
end
@doc """
Gets a single movie.
Raises `Ecto.NoResultsError` if the Movie does not exist.
## Examples
iex> get_movie!(123)
%Movie{}
iex> get_movie!(456)
** (Ecto.NoResultsError)
"""
def get_movie!(id), do: Repo.get!(Movie, id)
@doc """
Creates a movie.
## Examples
iex> create_movie(%{field: value})
{:ok, %Movie{}}
iex> create_movie(%{field: bad_value})
{:error, %Ecto.Changeset{}}
"""
def create_movie(attrs \\ %{}) do
%Movie{}
|> Movie.changeset(attrs)
|> Repo.insert()
end
@doc """
Updates a movie.
## Examples
iex> update_movie(movie, %{field: new_value})
{:ok, %Movie{}}
iex> update_movie(movie, %{field: bad_value})
{:error, %Ecto.Changeset{}}
"""
def update_movie(%Movie{} = movie, attrs) do
movie
|> Movie.changeset(attrs)
|> Repo.update()
end
def update_or_create_movie(attrs) do
movie = Repo.get_by(Movie, [movie_id: attrs.movie_id, user_id: attrs.user_id])
case movie do
nil ->
if ((attrs.current_time / attrs.duration) * 100) < 90 do
create_movie(attrs)
else
nil
end
_ ->
if ((attrs.current_time / attrs.duration) * 100) < 90 do
update_movie(movie, attrs)
else
delete_movie(movie)
nil
end
end
end
@doc """
Deletes a movie.
## Examples
iex> delete_movie(movie)
{:ok, %Movie{}}
iex> delete_movie(movie)
{:error, %Ecto.Changeset{}}
"""
def delete_movie(%Movie{} = movie) do
Repo.delete(movie)
end
@doc """
Returns an `%Ecto.Changeset{}` for tracking movie changes.
## Examples
iex> change_movie(movie)
%Ecto.Changeset{data: %Movie{}}
"""
def change_movie(%Movie{} = movie, attrs \\ %{}) do
Movie.changeset(movie, attrs)
end
alias MediaServer.Watches.Episode
@doc """
Returns the list of episode_watches.
## Examples
iex> list_episode_watches()
[%Episode{}, ...]
"""
def list_episode_watches do
Repo.all(Episode)
end
@doc """
Gets a single episode.
Raises `Ecto.NoResultsError` if the Episode does not exist.
## Examples
iex> get_episode!(123)
%Episode{}
iex> get_episode!(456)
** (Ecto.NoResultsError)
"""
def get_episode!(id), do: Repo.get!(Episode, id)
@doc """
Creates a episode.
## Examples
iex> create_episode(%{field: value})
{:ok, %Episode{}}
iex> create_episode(%{field: bad_value})
{:error, %Ecto.Changeset{}}
"""
def create_episode(attrs \\ %{}) do
%Episode{}
|> Episode.changeset(attrs)
|> Repo.insert()
end
@doc """
Updates a episode.
## Examples
iex> update_episode(episode, %{field: new_value})
{:ok, %Episode{}}
iex> update_episode(episode, %{field: bad_value})
{:error, %Ecto.Changeset{}}
"""
def update_episode(%Episode{} = episode, attrs) do
episode
|> Episode.changeset(attrs)
|> Repo.update()
end
def update_or_create_episode(attrs) do
episode = Repo.get_by(Episode, [episode_id: attrs.episode_id, serie_id: attrs.serie_id, user_id: attrs.user_id])
case episode do
nil ->
if ((attrs.current_time / attrs.duration) * 100) < 90 do
create_episode(attrs)
else
nil
end
_ ->
if ((attrs.current_time / attrs.duration) * 100) < 90 do
update_episode(episode, attrs)
else
delete_episode(episode)
nil
end
end
end
@doc """
Deletes a episode.
## Examples
iex> delete_episode(episode)
{:ok, %Episode{}}
iex> delete_episode(episode)
{:error, %Ecto.Changeset{}}
"""
def delete_episode(%Episode{} = episode) do
Repo.delete(episode)
end
@doc """
Returns an `%Ecto.Changeset{}` for tracking episode changes.
## Examples
iex> change_episode(episode)
%Ecto.Changeset{data: %Episode{}}
"""
def change_episode(%Episode{} = episode, attrs \\ %{}) do
Episode.changeset(episode, attrs)
end
end
|
lib/media_server/watches.ex
| 0.866979
| 0.416292
|
watches.ex
|
starcoder
|
defmodule Ambry.People do
@moduledoc """
Functions for dealing with People.
"""
import Ambry.FileUtils
import Ecto.Query
alias Ambry.People.{Person, PersonFlat}
alias Ambry.{PubSub, Repo}
@person_direct_assoc_preloads [:authors, :narrators]
@doc """
Returns a limited list of people and whether or not there are more.
By default, it will limit to the first 10 results. Supply `offset` and `limit`
to change this. You can also optionally filter by giving a map with these
supported keys:
* `:search` - String: full-text search on names and aliases.
* `:is_author` - Boolean.
* `:is_narrator` - Boolean.
`order` should be a valid atom key, or a tuple like `{:name, :desc}`.
## Examples
iex> list_people()
{[%PersonFlat{}, ...], true}
"""
def list_people(offset \\ 0, limit \\ 10, filters \\ %{}, order \\ :name) do
over_limit = limit + 1
people =
offset
|> PersonFlat.paginate(over_limit)
|> PersonFlat.filter(filters)
|> PersonFlat.order(order)
|> Repo.all()
people_to_return = Enum.slice(people, 0, limit)
{people_to_return, people != people_to_return}
end
@doc """
Returns the number of people (authors & narrators).
Note that `total` will not always be `authors` + `narrators`, because people
are sometimes both.
## Examples
iex> count_people()
%{authors: 3, narrators: 2, total: 4}
"""
@spec count_people :: %{total: integer(), authors: integer(), narrators: integer()}
def count_people do
Repo.one(
from p in PersonFlat,
select: %{
total: count(p.id),
authors: count(fragment("CASE WHEN ? THEN 1 END", p.is_author)),
narrators: count(fragment("CASE WHEN ? THEN 1 END", p.is_narrator))
}
)
end
@doc """
Gets a single person.
Raises `Ecto.NoResultsError` if the Person does not exist.
## Examples
iex> get_person!(123)
%Person{}
iex> get_person!(456)
** (Ecto.NoResultsError)
"""
def get_person!(id), do: Person |> preload(^@person_direct_assoc_preloads) |> Repo.get!(id)
@doc """
Creates a person.
## Examples
iex> create_person(%{field: value})
{:ok, %Person{}}
iex> create_person(%{field: bad_value})
{:error, %Ecto.Changeset{}}
"""
def create_person(attrs \\ %{}) do
%Person{}
|> Person.changeset(attrs)
|> Repo.insert()
|> tap(&PubSub.broadcast_create/1)
end
@doc """
Updates a person.
## Examples
iex> update_person(person, %{field: new_value})
{:ok, %Person{}}
iex> update_person(person, %{field: bad_value})
{:error, %Ecto.Changeset{}}
"""
def update_person(%Person{} = person, attrs) do
person
|> Repo.preload(@person_direct_assoc_preloads)
|> Person.changeset(attrs)
|> Repo.update()
|> tap(&PubSub.broadcast_update/1)
end
@doc """
Deletes a person.
## Examples
iex> delete_person(person)
:ok
iex> delete_person(person)
{:error, {:has_authored_books, books}}
iex> delete_person(person)
{:error, {:has_narrated_books, books}}
iex> delete_person(person)
{:error, %Ecto.Changeset{}}
"""
def delete_person(%Person{} = person) do
case Repo.delete(change_person(person)) do
{:ok, person} ->
maybe_delete_image(person.image_path)
PubSub.broadcast_delete(person)
:ok
{:error, changeset} ->
cond do
Keyword.has_key?(changeset.errors, :author) ->
{:error, {:has_authored_books, get_authored_books_list(person)}}
Keyword.has_key?(changeset.errors, :narrator) ->
{:error, {:has_narrated_books, get_narrated_books_list(person)}}
end
end
end
defp get_authored_books_list(person) do
%{authors: authors} = Repo.preload(person, authors: [:books])
Enum.flat_map(authors, fn author -> Enum.map(author.books, & &1.title) end)
end
defp get_narrated_books_list(person) do
%{narrators: narrators} = Repo.preload(person, narrators: [:books])
Enum.flat_map(narrators, fn narrator -> Enum.map(narrator.books, & &1.title) end)
end
@doc """
Returns an `%Ecto.Changeset{}` for tracking person changes.
## Examples
iex> change_person(person)
%Ecto.Changeset{data: %Person{}}
"""
def change_person(%Person{} = person, attrs \\ %{}) do
Person.changeset(person, attrs)
end
@doc """
Gets a person and all of their books (either authored or narrated).
Books are listed in descending order based on publish date.
"""
def get_person_with_books!(person_id) do
query =
from person in Person,
left_join: narrators in assoc(person, :narrators),
left_join: narrated_books in assoc(narrators, :books),
left_join: narrated_book_authors in assoc(narrated_books, :authors),
left_join: narrated_book_series_books in assoc(narrated_books, :series_books),
left_join: narrated_book_series_book_series in assoc(narrated_book_series_books, :series),
left_join: authors in assoc(person, :authors),
left_join: authored_books in assoc(authors, :books),
left_join: authored_book_authors in assoc(authored_books, :authors),
left_join: authored_book_series_books in assoc(authored_books, :series_books),
left_join: authored_book_series_book_series in assoc(authored_book_series_books, :series),
preload: [
narrators:
{narrators,
books:
{narrated_books,
[
authors: narrated_book_authors,
series_books:
{narrated_book_series_books, series: narrated_book_series_book_series}
]}},
authors:
{authors,
books:
{authored_books,
[
authors: authored_book_authors,
series_books:
{authored_book_series_books, series: authored_book_series_book_series}
]}}
],
order_by: [desc: narrated_books.published, desc: authored_books.published]
Repo.get!(query, person_id)
end
end
|
lib/ambry/people.ex
| 0.845289
| 0.45042
|
people.ex
|
starcoder
|
defmodule QRCode.FormatVersion do
@moduledoc """
A QR code uses error correction encoding and mask patterns. The QR code's
size is represented by a number, called a version number. To ensure that
a QR code scanner accurately decodes what it scans, the QR code specification
requires that each code include a format information string, which tells the
QR code scanner which error correction level and mask pattern the QR code
is using. In addition, for version 7 and larger, the QR code specification
requires that each code include a version information string, which tells
the QR code scanner which version the code is.
"""
alias MatrixReloaded.{Matrix, Vector}
alias QRCode.QR
import QRCode.QR, only: [masking: 1, version: 1]
@format_information %{
low: %{
0 => [1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0],
1 => [1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1],
2 => [1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0],
3 => [1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1],
4 => [1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1],
5 => [1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0],
6 => [1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1],
7 => [1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0]
},
medium: %{
0 => [1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0],
1 => [1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1],
2 => [1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0],
3 => [1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1],
4 => [1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1],
5 => [1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0],
6 => [1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1],
7 => [1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0]
},
quartile: %{
0 => [0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1],
1 => [0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0],
2 => [0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1],
3 => [0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0],
4 => [0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0],
5 => [0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1],
6 => [0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0],
7 => [0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1]
},
high: %{
0 => [0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1],
1 => [0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0],
2 => [0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1],
3 => [0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0],
4 => [0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0],
5 => [0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1],
6 => [0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0],
7 => [0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1]
}
}
@version_information %{
7 => [[0, 0, 1], [0, 1, 0], [0, 1, 0], [0, 1, 1], [1, 1, 1], [0, 0, 0]],
8 => [[0, 0, 1], [1, 1, 1], [0, 1, 1], [0, 1, 0], [0, 0, 0], [1, 0, 0]],
9 => [[1, 0, 0], [1, 1, 0], [0, 1, 0], [1, 0, 1], [1, 0, 0], [1, 0, 0]],
10 => [[1, 1, 0], [0, 1, 0], [1, 1, 0], [0, 1, 0], [0, 1, 0], [1, 0, 0]],
11 => [[0, 1, 1], [0, 1, 1], [1, 1, 1], [1, 0, 1], [1, 1, 0], [1, 0, 0]],
12 => [[0, 1, 0], [0, 0, 1], [1, 0, 1], [1, 1, 0], [0, 0, 1], [1, 0, 0]],
13 => [[1, 1, 1], [0, 0, 0], [1, 0, 0], [0, 0, 1], [1, 0, 1], [1, 0, 0]],
14 => [[1, 0, 1], [1, 0, 0], [0, 0, 0], [1, 1, 0], [0, 1, 1], [1, 0, 0]],
15 => [[0, 0, 0], [1, 0, 1], [0, 0, 1], [0, 0, 1], [1, 1, 1], [1, 0, 0]],
16 => [[0, 0, 0], [1, 1, 1], [1, 0, 1], [1, 0, 1], [0, 0, 0], [0, 1, 0]],
17 => [[1, 0, 1], [1, 1, 0], [1, 0, 0], [0, 1, 0], [1, 0, 0], [0, 1, 0]],
18 => [[1, 1, 1], [0, 1, 0], [0, 0, 0], [1, 0, 1], [0, 1, 0], [0, 1, 0]],
19 => [[0, 1, 0], [0, 1, 1], [0, 0, 1], [0, 1, 0], [1, 1, 0], [0, 1, 0]],
20 => [[0, 1, 1], [0, 0, 1], [0, 1, 1], [0, 0, 1], [0, 0, 1], [0, 1, 0]],
21 => [[1, 1, 0], [0, 0, 0], [0, 1, 0], [1, 1, 0], [1, 0, 1], [0, 1, 0]],
22 => [[1, 0, 0], [1, 0, 0], [1, 1, 0], [0, 0, 1], [0, 1, 1], [0, 1, 0]],
23 => [[0, 0, 1], [1, 0, 1], [1, 1, 1], [1, 1, 0], [1, 1, 1], [0, 1, 0]],
24 => [[0, 0, 1], [0, 0, 0], [1, 1, 0], [1, 1, 1], [0, 0, 0], [1, 1, 0]],
25 => [[1, 0, 0], [0, 0, 1], [1, 1, 1], [0, 0, 0], [1, 0, 0], [1, 1, 0]],
26 => [[1, 1, 0], [1, 0, 1], [0, 1, 1], [1, 1, 1], [0, 1, 0], [1, 1, 0]],
27 => [[0, 1, 1], [1, 0, 0], [0, 1, 0], [0, 0, 0], [1, 1, 0], [1, 1, 0]],
28 => [[0, 1, 0], [1, 1, 0], [0, 0, 0], [0, 1, 1], [0, 0, 1], [1, 1, 0]],
29 => [[1, 1, 1], [1, 1, 1], [0, 0, 1], [1, 0, 0], [1, 0, 1], [1, 1, 0]],
30 => [[1, 0, 1], [0, 1, 1], [1, 0, 1], [0, 1, 1], [0, 1, 1], [1, 1, 0]],
31 => [[0, 0, 0], [0, 1, 0], [1, 0, 0], [1, 0, 0], [1, 1, 1], [1, 1, 0]],
32 => [[1, 0, 1], [0, 1, 0], [1, 1, 1], [0, 0, 1], [0, 0, 0], [0, 0, 1]],
33 => [[0, 0, 0], [0, 1, 1], [1, 1, 0], [1, 1, 0], [1, 0, 0], [0, 0, 1]],
34 => [[0, 1, 0], [1, 1, 1], [0, 1, 0], [0, 0, 1], [0, 1, 0], [0, 0, 1]],
35 => [[1, 1, 1], [1, 1, 0], [0, 1, 1], [1, 1, 0], [1, 1, 0], [0, 0, 1]],
36 => [[1, 1, 0], [1, 0, 0], [0, 0, 1], [1, 0, 1], [0, 0, 1], [0, 0, 1]],
37 => [[0, 1, 1], [1, 0, 1], [0, 0, 0], [0, 1, 0], [1, 0, 1], [0, 0, 1]],
38 => [[0, 0, 1], [0, 0, 1], [1, 0, 0], [1, 0, 1], [0, 1, 1], [0, 0, 1]],
39 => [[1, 0, 0], [0, 0, 0], [1, 0, 1], [0, 1, 0], [1, 1, 1], [0, 0, 1]],
40 => [[1, 0, 0], [1, 0, 1], [1, 0, 0], [0, 1, 1], [0, 0, 0], [1, 0, 1]]
}
@spec put_information(QR.t()) :: Result.t(String.t(), QR.t())
def put_information(
%QR{matrix: matrix, version: version, ecc_level: ecc_level, mask_num: mask_num} = qr
)
when masking(mask_num) and version(version) do
matrix
|> set_format_info(ecc_level, mask_num, version)
|> Result.and_then(&set_version_info(&1, version))
|> Result.map(fn matrix -> %{qr | matrix: matrix} end)
end
@spec set_format_info(Matrix.t(), QR.level(), QR.mask_num(), QR.version()) ::
Result.t(String.t(), Matrix.t())
def set_format_info(matrix, table_level, mask_num, version) do
{row_left, row_right, col_top, col_bottom} = information_string(table_level, mask_num)
matrix
|> Matrix.update_row(row_left, {8, 0})
|> Result.and_then(&Matrix.update_row(&1, row_right, {8, 4 * version + 9}))
|> Result.and_then(&Matrix.update_col(&1, col_top, {0, 8}))
|> Result.and_then(&Matrix.update_col(&1, col_bottom, {4 * version + 10, 8}))
end
@spec set_version_info(Matrix.t(), QR.version()) :: Result.t(String.t(), Matrix.t())
def set_version_info(matrix, version) when version < 7 do
Result.ok(matrix)
end
def set_version_info(matrix, version) do
version_info = @version_information[version]
matrix
|> Matrix.update(version_info, {0, 4 * version + 6})
|> Result.and_then(&Matrix.update(&1, Matrix.transpose(version_info), {4 * version + 6, 0}))
end
defp information_string(level, mask_num) do
row = @format_information[level][mask_num]
row_left =
row
|> Enum.take(7)
|> List.pop_at(-1)
|> (fn {last, first} -> first ++ [0, last] end).()
row_right = row |> Enum.drop(7)
col_top =
row_right
|> Enum.split(2)
|> (fn {first, rest} -> first ++ [0 | rest] end).()
|> Enum.reverse()
|> Vector.transpose()
col_bottom =
row
|> Enum.take(7)
|> Enum.reverse()
|> Vector.transpose()
{row_left, row_right, col_top, col_bottom}
end
end
|
lib/qr_code/format_version.ex
| 0.530723
| 0.890485
|
format_version.ex
|
starcoder
|
defmodule Filtrex.Type.Config do
@moduledoc """
This configuration struct is for passing options at the top-level (e.g. `Filtrex.parse/2`) in a list. See `defconfig/1` for a more specific example.
Struct keys:
* `type`: the corresponding condition module with this type (e.g. :text = Filtrex.Condition.Text)
* `keys`: the allowed keys for this configuration
* `options`: the configuration options to be passed to the condition
"""
@type t :: Filtrex.Type.Config
defstruct type: nil, keys: [], options: %{}
@doc "Returns whether the passed key is listed in any of the configurations"
def allowed?(configs, key) do
Enum.any?(configs, &(key in &1.keys))
end
@doc "Returns the configuration for the specified key"
def config(configs, key) do
List.first(for c <- configs, key in c.keys, do: c)
end
@doc "Narrows the list of configurations to only the specified type"
def configs_for_type(configs, type) do
for c <- configs, c.type == type, do: c
end
@doc "Returns the specific options of a configuration based on the key"
def options(configs, key) do
(config(configs, key) || struct(__MODULE__)).options
end
@doc """
Allows easy creation of a configuration list:
iex> import Filtrex.Type.Config
iex>
iex> defconfig do
iex> # Single key can be passed
iex> number :rating, allow_decimal: true
iex>
iex> # Multiple keys
iex> text [:title, :description]
iex>
iex> # String key can be passed
iex> date "posted", format: "{ISOz}"
iex> end
[
%Filtrex.Type.Config{keys: ["rating"], options: %{allow_decimal: true}, type: :number},
%Filtrex.Type.Config{keys: ["title", "description"], options: %{}, type: :text},
%Filtrex.Type.Config{keys: ["posted"], options: %{format: "{ISOz}"}, type: :date}
]
"""
defmacro defconfig(block) do
quote do
var!(configs) = []
unquote(block)
var!(configs)
end
end
for module <- Filtrex.Condition.condition_modules do
@doc "Generate a config struct for `#{to_string(module) |> String.slice(7..-1)}`"
defmacro unquote(module.type)(key_or_keys, opts \\ [])
defmacro unquote(module.type)(keys, opts) when is_list(keys) do
type = unquote(module.type)
quote do
var!(configs) = var!(configs) ++
[%Filtrex.Type.Config{type: unquote(type),
keys: Filtrex.Type.Config.to_strings(unquote(keys)),
options: Enum.into(unquote(opts), %{})}]
end
end
defmacro unquote(module.type)(key, opts) do
type = unquote(module.type)
quote do
unquote(type)([to_string(unquote(key))], unquote(opts))
end
end
end
@doc "Convert a list of mixed atoms and/or strings to a list of strings"
def to_strings(keys, strings \\ [])
def to_strings([key | keys], strings) when is_atom(key) do
to_strings(keys, strings ++ [to_string(key)])
end
def to_strings([key | keys], strings) when is_binary(key) do
to_strings(keys, strings ++ [key])
end
def to_strings([], strings), do: strings
end
|
lib/filtrex/config.ex
| 0.834036
| 0.458046
|
config.ex
|
starcoder
|
defmodule Vox.Model do
@moduledoc """
A voxel model.
"""
@type bounds_error :: :bounds
@type unknown_error :: :unknown
@type error(type) :: { :error, { :model, type } }
@type id :: non_neg_integer
@type axis :: non_neg_integer
@type point :: { axis, axis, axis }
@type t :: %__MODULE__{ size: point, voxels: %{ point => Vox.Voxel.t } }
defstruct [size: { 0, 0, 0 }, voxels: %{}]
@doc """
Get the width of the model.
"""
@spec width(t) :: axis
def width(%{ size: { w, _, _ } }), do: w
@doc """
Get the height of the model.
"""
@spec width(t) :: axis
def height(%{ size: { _, h, _ } }), do: h
@doc """
Get the depth of the model.
"""
@spec width(t) :: axis
def depth(%{ size: { _, _, d } }), do: d
@doc """
Get a voxel in the model.
"""
@spec voxel(t, point) :: { :ok, Vox.Voxel.t | nil } | error(bounds_error)
def voxel(%{ size: { w, h, d } }, { x, y, z }) when (x < 0) or (x >= w) or (y < 0) or (y >= h) or (z < 0) or (z >= d), do: { :error, { :model, :bounds } }
def voxel(%{ voxels: voxels }, point), do: { :ok, voxels[point] }
@doc """
Get a voxel in the model.
"""
@spec voxel(t, axis, axis, axis) :: { :ok, Vox.Voxel.t | nil } | error(bounds_error)
def voxel(model, x, y, z), do: voxel(model, { x, y, z })
defmodule BoundsError do
defexception [:point, :model]
@impl Exception
def exception({ model, point }) do
%BoundsError{
point: point,
model: model
}
end
@impl Exception
def message(%{ model: %{ size: size }, point: point }), do: "(#{inspect point}) outside of model's bounds: #{inspect size}"
end
@doc """
Get a voxel in the model.
"""
@spec voxel!(t, point) :: Vox.Voxel.t | nil | no_return
def voxel!(model, point) do
case voxel(model, point) do
{ :ok, result } -> result
{ :error, { :model, type } } -> raise BoundsError, { model, point }
end
end
@doc """
Get a voxel in the model.
"""
@spec voxel!(t, axis, axis, axis) :: Vox.Voxel.t | nil | no_return
def voxel!(model, x, y, z), do: voxel!(model, { x, y, z })
end
|
lib/vox/model.ex
| 0.939679
| 0.574395
|
model.ex
|
starcoder
|
defprotocol RDF.Data do
@moduledoc """
An abstraction over the different data structures for collections of RDF statements.
"""
@doc """
Adds statements to a RDF data structure.
As opposed to the specific `add` functions on the RDF data structures, which
always return the same structure type than the first argument, `merge` might
result in another RDF data structure, eg. merging two `RDF.Description` with
different subjects results in a `RDF.Graph` or adding a quad to a `RDF.Graph`
with a different name than the graph context of the quad results in a
`RDF.Dataset`. But it is always guaranteed that the resulting structure has
a `RDF.Data` implementation.
"""
def merge(data, statements)
@doc """
Deletes statements from a RDF data structure.
As opposed to the `delete` functions on RDF data structures directly, this
function only deletes exactly matching structures.
TODO: rename this function to make the different semantics explicit
"""
def delete(data, statements)
@doc """
Deletes one statement from a RDF data structure and returns a tuple with deleted statement and the changed data structure.
"""
def pop(data)
@doc """
Checks if the given statement exists within a RDF data structure.
"""
def include?(data, statements)
@doc """
Checks if a RDF data structure contains statements about the given resource.
"""
def describes?(data, subject)
@doc """
Returns a `RDF.Description` of the given subject.
Note: On a `RDF.Dataset` this will return an aggregated `RDF.Description` with
the statements about this subject from all graphs.
"""
def description(data, subject)
@doc """
Returns all `RDF.Description`s within a RDF data structure.
Note: On a `RDF.Dataset` this will return aggregated `RDF.Description`s about
the same subject from all graphs.
"""
def descriptions(data)
@doc """
Returns the list of all statements of a RDF data structure.
"""
def statements(data)
@doc """
Returns the set of all resources which are subject of the statements of a RDF data structure.
"""
def subjects(data)
@doc """
Returns the set of all properties used within the statements of RDF data structure.
"""
def predicates(data)
@doc """
Returns the set of all resources used in the objects within the statements of a RDF data structure.
"""
def objects(data)
@doc """
Returns the set of all resources used within the statements of a RDF data structure
"""
def resources(data)
@doc """
Returns the count of all resources which are subject of the statements of a RDF data structure.
"""
def subject_count(data)
@doc """
Returns the count of all statements of a RDF data structure.
"""
def statement_count(data)
@doc """
Returns a nested map of the native Elixir values of a RDF data structure.
"""
def values(data)
@doc """
Returns a nested map of the native Elixir values of a RDF data structure with values mapped with the given function.
"""
def values(data, mapping)
@doc """
Checks if two RDF data structures are equal.
Two RDF data structures are considered to be equal if they contain the same triples.
- comparing two `RDF.Description`s it's just the same as `RDF.Description.equal?/2`
- comparing two `RDF.Graph`s differs in `RDF.Graph.equal?/2` in that the graph
name is ignored
- comparing two `RDF.Dataset`s differs in `RDF.Dataset.equal?/2` in that the
dataset name is ignored
- a `RDF.Description` is equal to a `RDF.Graph`, if the graph has just one
description which equals the given description
- a `RDF.Description` is equal to a `RDF.Dataset`, if the dataset has just one
graph which contains only the given description
- a `RDF.Graph` is equal to a `RDF.Dataset`, if the dataset has just one
graph which equals the given graph; note that in this case the graph names
must match
"""
def equal?(data1, data2)
end
defimpl RDF.Data, for: RDF.Description do
def merge(%RDF.Description{subject: subject} = description, {s, _, _} = triple) do
with ^subject <- RDF.Statement.coerce_subject(s) do
RDF.Description.add(description, triple)
else
_ ->
RDF.Graph.new(description)
|> RDF.Graph.add(triple)
end
end
def merge(description, {_, _, _, _} = quad),
do: RDF.Dataset.new(description) |> RDF.Dataset.add(quad)
def merge(%RDF.Description{subject: subject} = description,
%RDF.Description{subject: other_subject} = other_description)
when other_subject == subject,
do: RDF.Description.add(description, other_description)
def merge(description, %RDF.Description{} = other_description),
do: RDF.Graph.new(description) |> RDF.Graph.add(other_description)
def merge(description, %RDF.Graph{} = graph),
do: RDF.Data.merge(graph, description)
def merge(description, %RDF.Dataset{} = dataset),
do: RDF.Data.merge(dataset, description)
def delete(%RDF.Description{subject: subject} = description,
%RDF.Description{subject: other_subject})
when subject != other_subject, do: description
def delete(description, statements), do: RDF.Description.delete(description, statements)
def pop(description), do: RDF.Description.pop(description)
def include?(description, statements),
do: RDF.Description.include?(description, statements)
def describes?(description, subject),
do: RDF.Description.describes?(description, subject)
def description(%RDF.Description{subject: subject} = description, s) do
with ^subject <- RDF.Statement.coerce_subject(s) do
description
else
_ -> RDF.Description.new(s)
end
end
def descriptions(description), do: [description]
def statements(description), do: RDF.Description.statements(description)
def subjects(%RDF.Description{subject: subject}), do: MapSet.new([subject])
def predicates(description), do: RDF.Description.predicates(description)
def objects(description), do: RDF.Description.objects(description)
def resources(%RDF.Description{subject: subject} = description),
do: RDF.Description.resources(description) |> MapSet.put(subject)
def subject_count(_), do: 1
def statement_count(description), do: RDF.Description.count(description)
def values(description), do: RDF.Description.values(description)
def values(description, mapping), do: RDF.Description.values(description, mapping)
def equal?(description, %RDF.Description{} = other_description) do
RDF.Description.equal?(description, other_description)
end
def equal?(description, %RDF.Graph{} = graph) do
with [single_description] <- RDF.Graph.descriptions(graph) do
RDF.Description.equal?(description, single_description)
else
_ -> false
end
end
def equal?(description, %RDF.Dataset{} = dataset) do
RDF.Data.equal?(dataset, description)
end
def equal?(_, _), do: false
end
defimpl RDF.Data, for: RDF.Graph do
def merge(%RDF.Graph{name: name} = graph, {_, _, _, graph_context} = quad) do
with ^name <- RDF.Statement.coerce_graph_name(graph_context) do
RDF.Graph.add(graph, quad)
else
_ ->
RDF.Dataset.new(graph)
|> RDF.Dataset.add(quad)
end
end
def merge(graph, {_, _, _} = triple),
do: RDF.Graph.add(graph, triple)
def merge(description, {_, _, _, _} = quad),
do: RDF.Dataset.new(description) |> RDF.Dataset.add(quad)
def merge(graph, %RDF.Description{} = description),
do: RDF.Graph.add(graph, description)
def merge(%RDF.Graph{name: name} = graph,
%RDF.Graph{name: other_name} = other_graph)
when other_name == name,
do: RDF.Graph.add(graph, other_graph)
def merge(graph, %RDF.Graph{} = other_graph),
do: RDF.Dataset.new(graph) |> RDF.Dataset.add(other_graph)
def merge(graph, %RDF.Dataset{} = dataset),
do: RDF.Data.merge(dataset, graph)
def delete(%RDF.Graph{name: name} = graph, %RDF.Graph{name: other_name})
when name != other_name, do: graph
def delete(graph, statements), do: RDF.Graph.delete(graph, statements)
def pop(graph), do: RDF.Graph.pop(graph)
def include?(graph, statements), do: RDF.Graph.include?(graph, statements)
def describes?(graph, subject),
do: RDF.Graph.describes?(graph, subject)
def description(graph, subject),
do: RDF.Graph.description(graph, subject) || RDF.Description.new(subject)
def descriptions(graph), do: RDF.Graph.descriptions(graph)
def statements(graph), do: RDF.Graph.statements(graph)
def subjects(graph), do: RDF.Graph.subjects(graph)
def predicates(graph), do: RDF.Graph.predicates(graph)
def objects(graph), do: RDF.Graph.objects(graph)
def resources(graph), do: RDF.Graph.resources(graph)
def subject_count(graph), do: RDF.Graph.subject_count(graph)
def statement_count(graph), do: RDF.Graph.triple_count(graph)
def values(graph), do: RDF.Graph.values(graph)
def values(graph, mapping), do: RDF.Graph.values(graph, mapping)
def equal?(graph, %RDF.Description{} = description),
do: RDF.Data.equal?(description, graph)
def equal?(graph, %RDF.Graph{} = other_graph),
do: RDF.Graph.equal?(%RDF.Graph{graph | name: nil},
%RDF.Graph{other_graph | name: nil})
def equal?(graph, %RDF.Dataset{} = dataset),
do: RDF.Data.equal?(dataset, graph)
def equal?(_, _), do: false
end
defimpl RDF.Data, for: RDF.Dataset do
def merge(dataset, {_, _, _} = triple),
do: RDF.Dataset.add(dataset, triple)
def merge(dataset, {_, _, _, _} = quad),
do: RDF.Dataset.add(dataset, quad)
def merge(dataset, %RDF.Description{} = description),
do: RDF.Dataset.add(dataset, description)
def merge(dataset, %RDF.Graph{} = graph),
do: RDF.Dataset.add(dataset, graph)
def merge(dataset, %RDF.Dataset{} = other_dataset),
do: RDF.Dataset.add(dataset, other_dataset)
def delete(%RDF.Dataset{name: name} = dataset, %RDF.Dataset{name: other_name})
when name != other_name, do: dataset
def delete(dataset, statements), do: RDF.Dataset.delete(dataset, statements)
def pop(dataset), do: RDF.Dataset.pop(dataset)
def include?(dataset, statements), do: RDF.Dataset.include?(dataset, statements)
def describes?(dataset, subject),
do: RDF.Dataset.who_describes(dataset, subject) != []
def description(dataset, subject) do
with subject = RDF.Statement.coerce_subject(subject) do
Enum.reduce RDF.Dataset.graphs(dataset), RDF.Description.new(subject), fn
%RDF.Graph{descriptions: %{^subject => graph_description}}, description ->
RDF.Description.add(description, graph_description)
_, description ->
description
end
end
end
def descriptions(dataset) do
dataset
|> subjects
|> Enum.map(&(description(dataset, &1)))
end
def statements(dataset), do: RDF.Dataset.statements(dataset)
def subjects(dataset), do: RDF.Dataset.subjects(dataset)
def predicates(dataset), do: RDF.Dataset.predicates(dataset)
def objects(dataset), do: RDF.Dataset.objects(dataset)
def resources(dataset), do: RDF.Dataset.resources(dataset)
def subject_count(dataset), do: dataset |> subjects |> Enum.count
def statement_count(dataset), do: RDF.Dataset.statement_count(dataset)
def values(dataset), do: RDF.Dataset.values(dataset)
def values(dataset, mapping), do: RDF.Dataset.values(dataset, mapping)
def equal?(dataset, %RDF.Description{} = description) do
with [graph] <- RDF.Dataset.graphs(dataset) do
RDF.Data.equal?(description, graph)
else
_ -> false
end
end
def equal?(dataset, %RDF.Graph{} = graph) do
with [single_graph] <- RDF.Dataset.graphs(dataset) do
RDF.Graph.equal?(graph, single_graph)
else
_ -> false
end
end
def equal?(dataset, %RDF.Dataset{} = other_dataset) do
RDF.Dataset.equal?(%RDF.Dataset{dataset | name: nil},
%RDF.Dataset{other_dataset | name: nil})
end
def equal?(_, _), do: false
end
|
lib/rdf/data.ex
| 0.862308
| 0.961858
|
data.ex
|
starcoder
|
defmodule ForgeAbi.CodeInfo do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
checksum: binary,
binary: binary
}
defstruct [:checksum, :binary]
field :checksum, 1, type: :bytes
field :binary, 2, type: :bytes
end
defmodule ForgeAbi.TypeUrls do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
url: String.t(),
module: String.t()
}
defstruct [:url, :module]
field :url, 1, type: :string
field :module, 2, type: :string
end
defmodule ForgeAbi.DeployProtocolTx do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
address: String.t(),
name: String.t(),
version: non_neg_integer,
namespace: String.t(),
description: String.t(),
type_urls: [ForgeAbi.TypeUrls.t()],
proto: String.t(),
pipeline: String.t(),
sources: [String.t()],
code: [ForgeAbi.CodeInfo.t()],
tags: [String.t()],
data: Google.Protobuf.Any.t() | nil
}
defstruct [
:address,
:name,
:version,
:namespace,
:description,
:type_urls,
:proto,
:pipeline,
:sources,
:code,
:tags,
:data
]
field :address, 1, type: :string
field :name, 2, type: :string
field :version, 3, type: :uint32
field :namespace, 4, type: :string
field :description, 5, type: :string
field :type_urls, 6, repeated: true, type: ForgeAbi.TypeUrls
field :proto, 7, type: :string
field :pipeline, 8, type: :string
field :sources, 9, repeated: true, type: :string
field :code, 10, repeated: true, type: ForgeAbi.CodeInfo
field :tags, 11, repeated: true, type: :string
field :data, 15, type: Google.Protobuf.Any
end
defmodule ForgeAbi.AccountMigrateTx do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
pk: binary,
type: ForgeAbi.WalletType.t() | nil,
address: String.t(),
data: Google.Protobuf.Any.t() | nil
}
defstruct [:pk, :type, :address, :data]
field :pk, 1, type: :bytes
field :type, 2, type: ForgeAbi.WalletType, deprecated: true
field :address, 3, type: :string
field :data, 15, type: Google.Protobuf.Any
end
defmodule ForgeAbi.DeclareTx do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
moniker: String.t(),
issuer: String.t(),
data: Google.Protobuf.Any.t() | nil
}
defstruct [:moniker, :issuer, :data]
field :moniker, 1, type: :string
field :issuer, 2, type: :string
field :data, 15, type: Google.Protobuf.Any
end
defmodule ForgeAbi.DelegateTx do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
address: String.t(),
to: String.t(),
ops: [ForgeAbi.DelegateOp.t()],
data: Google.Protobuf.Any.t() | nil
}
defstruct [:address, :to, :ops, :data]
field :address, 1, type: :string
field :to, 2, type: :string
field :ops, 3, repeated: true, type: ForgeAbi.DelegateOp
field :data, 15, type: Google.Protobuf.Any
end
defmodule ForgeAbi.DelegateOp do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
type_url: String.t(),
rules: [String.t()]
}
defstruct [:type_url, :rules]
field :type_url, 1, type: :string
field :rules, 2, repeated: true, type: :string
end
defmodule ForgeAbi.RevokeDelegateTx do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
address: String.t(),
to: String.t(),
type_urls: [String.t()],
data: Google.Protobuf.Any.t() | nil
}
defstruct [:address, :to, :type_urls, :data]
field :address, 1, type: :string
field :to, 2, type: :string
field :type_urls, 3, repeated: true, type: :string
field :data, 15, type: Google.Protobuf.Any
end
defmodule ForgeAbi.AssetSpec do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
address: String.t(),
data: String.t()
}
defstruct [:address, :data]
field :address, 1, type: :string
field :data, 2, type: :string
end
defmodule ForgeAbi.AcquireAssetTx do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
to: String.t(),
specs: [ForgeAbi.AssetSpec.t()],
data: Google.Protobuf.Any.t() | nil
}
defstruct [:to, :specs, :data]
field :to, 1, type: :string
field :specs, 2, repeated: true, type: ForgeAbi.AssetSpec
field :data, 15, type: Google.Protobuf.Any
end
defmodule ForgeAbi.ConsumeAssetTx do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
issuer: String.t(),
address: String.t(),
data: Google.Protobuf.Any.t() | nil
}
defstruct [:issuer, :address, :data]
field :issuer, 1, type: :string
field :address, 2, type: :string
field :data, 15, type: Google.Protobuf.Any
end
defmodule ForgeAbi.CreateAssetTx do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
moniker: String.t(),
data: Google.Protobuf.Any.t() | nil,
readonly: boolean,
transferrable: boolean,
ttl: non_neg_integer,
parent: String.t(),
address: String.t()
}
defstruct [:moniker, :data, :readonly, :transferrable, :ttl, :parent, :address]
field :moniker, 1, type: :string
field :data, 2, type: Google.Protobuf.Any
field :readonly, 3, type: :bool
field :transferrable, 4, type: :bool
field :ttl, 5, type: :uint32
field :parent, 6, type: :string
field :address, 7, type: :string
end
defmodule ForgeAbi.AssetAttributes do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
transferrable: boolean,
ttl: non_neg_integer
}
defstruct [:transferrable, :ttl]
field :transferrable, 1, type: :bool
field :ttl, 2, type: :uint32
end
defmodule ForgeAbi.AssetFactory do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
description: String.t(),
limit: non_neg_integer,
price: ForgeAbi.BigUint.t() | nil,
template: String.t(),
allowed_spec_args: [String.t()],
asset_name: String.t(),
attributes: ForgeAbi.AssetAttributes.t() | nil,
data: Google.Protobuf.Any.t() | nil
}
defstruct [
:description,
:limit,
:price,
:template,
:allowed_spec_args,
:asset_name,
:attributes,
:data
]
field :description, 1, type: :string
field :limit, 2, type: :uint32
field :price, 3, type: ForgeAbi.BigUint
field :template, 4, type: :string
field :allowed_spec_args, 5, repeated: true, type: :string
field :asset_name, 6, type: :string
field :attributes, 7, type: ForgeAbi.AssetAttributes
field :data, 15, type: Google.Protobuf.Any
end
defmodule ForgeAbi.UpdateAssetTx do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
address: String.t(),
moniker: String.t(),
data: Google.Protobuf.Any.t() | nil
}
defstruct [:address, :moniker, :data]
field :address, 1, type: :string
field :moniker, 2, type: :string
field :data, 15, type: Google.Protobuf.Any
end
defmodule ForgeAbi.UpdateConsensusParamsTx do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
delegate_config: ForgeAbi.DelegateConfig.t() | nil,
declare_config: ForgeAbi.DeclareConfig.t() | nil,
token_swap_config: ForgeAbi.TokenSwapConfig.t() | nil,
moderator_config: ForgeAbi.AccountConfig.t() | nil
}
defstruct [:delegate_config, :declare_config, :token_swap_config, :moderator_config]
field :delegate_config, 1, type: ForgeAbi.DelegateConfig
field :declare_config, 2, type: ForgeAbi.DeclareConfig
field :token_swap_config, 3, type: ForgeAbi.TokenSwapConfig
field :moderator_config, 4, type: ForgeAbi.AccountConfig
end
defmodule ForgeAbi.UpdateValidatorTx do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
candidates: [ForgeAbi.Validator.t()],
data: Google.Protobuf.Any.t() | nil
}
defstruct [:candidates, :data]
field :candidates, 1, repeated: true, type: ForgeAbi.Validator
field :data, 15, type: Google.Protobuf.Any
end
defmodule ForgeAbi.UpgradeNodeTx do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
height: non_neg_integer,
version: String.t(),
override: boolean
}
defstruct [:height, :version, :override]
field :height, 1, type: :uint64
field :version, 2, type: :string
field :override, 3, type: :bool
end
defmodule ForgeAbi.ActivateProtocolTx do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
address: String.t(),
data: Google.Protobuf.Any.t() | nil
}
defstruct [:address, :data]
field :address, 1, type: :string
field :data, 15, type: Google.Protobuf.Any
end
defmodule ForgeAbi.DeactivateProtocolTx do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
address: String.t(),
data: Google.Protobuf.Any.t() | nil
}
defstruct [:address, :data]
field :address, 1, type: :string
field :data, 15, type: Google.Protobuf.Any
end
defmodule ForgeAbi.PokeTx do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
date: String.t(),
address: String.t(),
data: Google.Protobuf.Any.t() | nil
}
defstruct [:date, :address, :data]
field :date, 1, type: :string
field :address, 2, type: :string
field :data, 15, type: Google.Protobuf.Any
end
defmodule ForgeAbi.RefuelTx do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
date: String.t(),
data: Google.Protobuf.Any.t() | nil
}
defstruct [:date, :data]
field :date, 1, type: :string
field :data, 15, type: Google.Protobuf.Any
end
defmodule ForgeAbi.RetrieveSwapTx do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
address: String.t(),
hashkey: binary,
data: Google.Protobuf.Any.t() | nil
}
defstruct [:address, :hashkey, :data]
field :address, 1, type: :string
field :hashkey, 2, type: :bytes
field :data, 15, type: Google.Protobuf.Any
end
defmodule ForgeAbi.RevokeSwapTx do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
address: String.t(),
data: Google.Protobuf.Any.t() | nil
}
defstruct [:address, :data]
field :address, 1, type: :string
field :data, 15, type: Google.Protobuf.Any
end
defmodule ForgeAbi.SetupSwapTx do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
value: ForgeAbi.BigUint.t() | nil,
assets: [String.t()],
receiver: String.t(),
hashlock: binary,
locktime: non_neg_integer,
data: Google.Protobuf.Any.t() | nil
}
defstruct [:value, :assets, :receiver, :hashlock, :locktime, :data]
field :value, 1, type: ForgeAbi.BigUint
field :assets, 2, repeated: true, type: :string
field :receiver, 3, type: :string
field :hashlock, 4, type: :bytes
field :locktime, 5, type: :uint32
field :data, 15, type: Google.Protobuf.Any
end
defmodule ForgeAbi.ApproveWithdrawTx do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
withdraw_tx_hash: String.t(),
evidence: ForgeAbi.Evidence.t() | nil
}
defstruct [:withdraw_tx_hash, :evidence]
field :withdraw_tx_hash, 1, type: :string
field :evidence, 2, type: ForgeAbi.Evidence
end
defmodule ForgeAbi.DepositTokenTx do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
value: ForgeAbi.BigUint.t() | nil,
address: String.t(),
evidence: ForgeAbi.Evidence.t() | nil
}
defstruct [:value, :address, :evidence]
field :value, 1, type: ForgeAbi.BigUint
field :address, 2, type: :string
field :evidence, 3, type: ForgeAbi.Evidence
end
defmodule ForgeAbi.RevokeWithdrawTx do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
withdraw_tx_hash: String.t()
}
defstruct [:withdraw_tx_hash]
field :withdraw_tx_hash, 1, type: :string
end
defmodule ForgeAbi.WithdrawTokenTx do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
value: ForgeAbi.BigUint.t() | nil,
to: String.t(),
chain_type: String.t(),
chain_id: String.t()
}
defstruct [:value, :to, :chain_type, :chain_id]
field :value, 1, type: ForgeAbi.BigUint
field :to, 2, type: :string
field :chain_type, 3, type: :string
field :chain_id, 4, type: :string
end
defmodule ForgeAbi.ExchangeInfo do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
value: ForgeAbi.BigUint.t() | nil,
assets: [String.t()]
}
defstruct [:value, :assets]
field :value, 1, type: ForgeAbi.BigUint
field :assets, 2, repeated: true, type: :string
end
defmodule ForgeAbi.ExchangeTx do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
to: String.t(),
sender: ForgeAbi.ExchangeInfo.t() | nil,
receiver: ForgeAbi.ExchangeInfo.t() | nil,
expired_at: Google.Protobuf.Timestamp.t() | nil,
data: Google.Protobuf.Any.t() | nil
}
defstruct [:to, :sender, :receiver, :expired_at, :data]
field :to, 1, type: :string
field :sender, 2, type: ForgeAbi.ExchangeInfo
field :receiver, 3, type: ForgeAbi.ExchangeInfo
field :expired_at, 4, type: Google.Protobuf.Timestamp
field :data, 15, type: Google.Protobuf.Any
end
defmodule ForgeAbi.TransferTx do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
to: String.t(),
value: ForgeAbi.BigUint.t() | nil,
assets: [String.t()],
data: Google.Protobuf.Any.t() | nil
}
defstruct [:to, :value, :assets, :data]
field :to, 1, type: :string
field :value, 2, type: ForgeAbi.BigUint
field :assets, 3, repeated: true, type: :string
field :data, 15, type: Google.Protobuf.Any
end
|
lib/protobuf/gen/tx.pb.ex
| 0.788705
| 0.608536
|
tx.pb.ex
|
starcoder
|
defmodule DecemberTwo do
@moduledoc """
Second Advent of Code task.
"""
@noun_position 1
@verb_position 2
@part_one_noun 12
@part_one_verb 2
@part_two_target 19_690_720
@part_two_max_val 99
@doc """
Read file and convert the comma separated list to a map with the index as key.
"""
def init_map(file) do
{_, contents} = File.read(file)
contents
|> String.trim()
|> String.split(",", trim: true)
|> Enum.map(&String.to_integer/1)
|> Enum.with_index()
|> Enum.map(fn {val, idx} -> {idx, val} end)
|> Map.new()
end
@doc """
Part one of the task.
"""
def part_one(file) do
init_map(file)
|> update_noun_and_verb(@part_one_noun, @part_one_verb)
|> check_code(0)
|> Map.get(0)
end
@doc """
Part two of the task.
"""
def part_two(file) do
to_check = for noun <- 0..@part_two_max_val, verb <- 0..@part_two_max_val, do: {noun, verb}
init_map(file)
|> check_noun_and_verb_to(@part_two_target, to_check)
end
@doc """
"""
def check_noun_and_verb_to(sequence, look_for, [{noun, verb} | rest]) do
result =
sequence
|> update_noun_and_verb(noun, verb)
|> check_code(0)
|> Map.get(0)
case result do
^look_for ->
100 * noun + verb
_ ->
check_noun_and_verb_to(sequence, look_for, rest)
end
end
@doc """
Update noun and verb by setting position 1 and 2 of the map.
"""
def update_noun_and_verb(sequence, noun, verb) do
sequence
|> Map.put(@noun_position, noun)
|> Map.put(@verb_position, verb)
end
@doc """
Check the operation code and perform appropreate action.
"""
def check_code(sequence, current_position) do
op_cde = Map.get(sequence, current_position)
n1_pos = Map.get(sequence, current_position + 1)
n2_pos = Map.get(sequence, current_position + 2)
sr_pos = Map.get(sequence, current_position + 3)
n1_val = Map.get(sequence, n1_pos)
n2_val = Map.get(sequence, n2_pos)
case op_cde do
1 ->
Map.put(sequence, sr_pos, n1_val + n2_val)
|> check_code(current_position + 4)
2 ->
Map.put(sequence, sr_pos, n1_val * n2_val)
|> check_code(current_position + 4)
99 ->
sequence
end
end
end
|
02/elixir/lib/december_two.ex
| 0.748168
| 0.410431
|
december_two.ex
|
starcoder
|
defmodule Cards do
@moduledoc """
This module provides many function for interact with an deck
"""
@doc """
Create an deck
## Examples
iex> deck = Cards.create_deck
iex> deck
["Ace of Spades", "Ace of Clubs", "Ace of Hearts", "Ace of Diamonds",
"Two of Spades", "Two of Clubs", "Two of Hearts", "Two of Diamonds",
"Three of Spades", "Three of Clubs", "Three of Hearts", "Three of Diamonds",
"Four of Spades", "Four of Clubs", "Four of Hearts", "Four of Diamonds",
"Five of Spades", "Five of Clubs", "Five of Hearts", "Five of Diamonds"]
"""
def create_deck do
suits = ["Spades", "Clubs", "Hearts", "Diamonds"]
values = ["Ace", "Two", "Three", "Four", "Five"]
cards = for value <- values, suit <- suits do
"#{value} of #{suit}"
end
List.flatten(cards)
end
@doc """
Shuffle the deck send in params
## Examples
iex> Cards.create_deck
["Ace of Spades", "Ace of Clubs", "Ace of Hearts", "Ace of Diamonds",
"Two of Spades", "Two of Clubs", "Two of Hearts", "Two of Diamonds",
"Three of Spades", "Three of Clubs", "Three of Hearts", "Three of Diamonds",
"Four of Spades", "Four of Clubs", "Four of Hearts", "Four of Diamonds",
"Five of Spades", "Five of Clubs", "Five of Hearts", "Five of Diamonds"]
iex > Cards.shuffle(deck)
["Two of Spades", "Four of Diamonds", "Ace of Hearts", "Three of Hearts",
"Ace of Clubs", "Five of Diamonds", "Five of Spades", "Ace of Spades",
"Four of Spades", "Four of Hearts", "Five of Clubs", "Three of Clubs",
"Two of Diamonds", "Two of Hearts", "Five of Hearts", "Three of Spades",
"Ace of Diamonds", "Three of Diamonds", "Two of Clubs", "Four of Clubs"]
"""
def shuffle(deck) do
Enum.shuffle(deck)
end
@doc """
Ask if the `deck` contain the `card`
## Examples
iex> deck = Cards.create_deck
iex> Cards.contains?(deck, "King of Hearts")
false
iex> Cards.contains?(deck, "Two of Spades")
true
"""
def contains?(deck, card) do
Enum.member?(deck, card)
end
@doc """
Give a hand of cards depends on `number_of_cards`
"""
def deal(deck, number_of_cards) do
{ hand, _rest_of_deck } = Enum.split(deck, number_of_cards)
hand
end
@doc """
Save the `deck` in a file
"""
def save(deck, filename) do
binary = :erlang.term_to_binary(deck)
File.write(filename, binary)
end
@doc """
Read from a file
"""
def read(filename) do
case File.read(filename) do
{:ok, deck} -> :erlang.binary_to_term(deck)
{:error, _reason} -> "File does not exists !"
end
end
@doc """
Create the deck, shuffle-it and deal the cards depends of `number_of_cards`
"""
def create_hand(number_of_cards) do
Cards.create_deck
|> Cards.shuffle
|> Cards.deal(number_of_cards)
end
end
|
lib/cards.ex
| 0.841923
| 0.76176
|
cards.ex
|
starcoder
|
defmodule FarmbotCeleryScript.Scheduler do
@moduledoc """
Handles execution of CeleryScript.
CeleryScript can be `execute`d or `schedule`d. Both have the same API but
slightly different behaviour.
A message will arrive in the callers inbox after either shaped like
{FarmbotCeleryScript.Scheduler, result}
where result will be
:ok | {:error, "some string error"}
The Scheduler makes no effort to rescue bad syscall implementations. See
the docs foro SysCalls for more details.
"""
use GenServer
require Logger
alias FarmbotCeleryScript.{AST, Compiler, Scheduler, StepRunner}
alias Scheduler, as: State
# 15 minutes
@grace_period_ms 900_000
defmodule Dispatch do
defstruct [
:scheduled_at,
:data
]
end
defstruct next: nil,
checkup_timer: nil,
scheduled_pid: nil,
compiled: [],
monitors: [],
registry_name: nil
@type compiled_ast() :: [(() -> any)]
@type state :: %State{
next: nil | {compiled_ast(), DateTime.t(), data :: map(), pid},
checkup_timer: nil | reference(),
scheduled_pid: nil | pid(),
compiled: [{compiled_ast(), DateTime.t(), data :: map(), pid}],
monitors: [GenServer.from()],
registry_name: GenServer.server()
}
@doc "Start an instance of a CeleryScript Scheduler"
def start_link(args, opts \\ [name: __MODULE__]) do
GenServer.start_link(__MODULE__, args, opts)
end
def register(sch \\ __MODULE__) do
state = :sys.get_state(sch)
{:ok, _} = Registry.register(state.registry_name, :dispatch, self())
dispatch(state)
:ok
end
@doc """
Schedule CeleryScript to execute whenever there is time for it.
Calls are executed in a first in first out buffer, with things being added
by `execute/2` taking priority.
"""
@spec schedule(
GenServer.server(),
AST.t() | [Compiler.compiled()],
DateTime.t(),
map()
) ::
{:ok, reference()}
def schedule(scheduler_pid \\ __MODULE__, celery_script, at, data)
def schedule(sch, %AST{} = ast, %DateTime{} = at, %{} = data) do
schedule(sch, Compiler.compile(ast), at, data)
end
def schedule(sch, compiled, at, %{} = data) when is_list(compiled) do
GenServer.call(sch, {:schedule, compiled, at, data}, 60_000)
end
def get_next(sch \\ __MODULE__) do
GenServer.call(sch, :get_next)
end
def get_next_from_now(sch \\ __MODULE__) do
case get_next_at(sch) do
nil -> nil
at -> Timex.from_now(at)
end
end
def get_next_at(sch \\ __MODULE__) do
case get_next(sch) do
nil ->
nil
{_compiled, at, _data, _pid} ->
at
end
end
@impl true
def init(args) do
registry_name = Keyword.get(args, :registry_name, Scheduler.Registry)
{:ok, _} = Registry.start_link(keys: :duplicate, name: registry_name)
send(self(), :checkup)
{:ok, %State{registry_name: registry_name}}
end
@impl true
def handle_call({:schedule, compiled, at, data}, {pid, ref} = from, state) do
state =
state
|> monitor(pid)
|> add(compiled, at, data, pid)
:ok = GenServer.reply(from, {:ok, ref})
{:noreply, state}
end
def handle_call(:get_next, _from, state) do
{:reply, state.next, state}
end
@impl true
def handle_info({:DOWN, ref, :process, pid, _reason}, state) do
Logger.debug("Scheduler monitor down: #{inspect(pid)}")
state =
state
|> demonitor({pid, ref})
|> delete(pid)
{:noreply, state}
end
def handle_info(:checkup, %{next: nil} = state) do
# Logger.debug("Scheduling next checkup with no next")
state
|> schedule_next_checkup()
|> dispatch()
end
def handle_info(:checkup, %{next: {_compiled, at, _data, _pid}} = state) do
case DateTime.diff(DateTime.utc_now(), at, :millisecond) do
# now is before the next date
diff_ms when diff_ms < 0 ->
# from_now =
# DateTime.utc_now()
# |> DateTime.add(abs(diff_ms), :millisecond)
# |> Timex.from_now()
# msg = "Next execution is still #{diff_ms}ms too early (#{from_now})"
# Logger.info(msg)
state
|> schedule_next_checkup(abs(diff_ms))
|> dispatch()
# now is more than the grace period past schedule time
diff_ms when diff_ms > @grace_period_ms ->
# from_now = Timex.from_now(at)
# Logger.info("Next execution is #{diff_ms}ms too late (#{from_now})")
state
|> pop_next()
|> index_next()
|> schedule_next_checkup()
|> dispatch()
# now is late, but less than the grace period late
diff_ms when diff_ms >= 0 when diff_ms <= @grace_period_ms ->
Logger.info(
"Next execution is ready for execution: #{Timex.from_now(at)}"
)
state
|> execute_next()
|> dispatch()
end
end
def handle_info(
{:step_complete, {scheduled_at, executed_at, pid}, result},
state
) do
send(
pid,
{FarmbotCeleryScript,
{:scheduled_execution, scheduled_at, executed_at, result}}
)
state
|> pop_next()
|> index_next()
|> schedule_next_checkup()
|> dispatch()
end
@spec execute_next(state()) :: state()
defp execute_next(%{next: {compiled, at, _data, pid}} = state) do
scheduler_pid = self()
scheduled_pid =
spawn(fn ->
StepRunner.step(scheduler_pid, {at, DateTime.utc_now(), pid}, compiled)
end)
%{state | scheduled_pid: scheduled_pid}
end
@spec schedule_next_checkup(state(), :default | integer) :: state()
defp schedule_next_checkup(state, offset_ms \\ :default)
defp schedule_next_checkup(%{checkup_timer: timer} = state, offset_ms)
when is_reference(timer) do
# Logger.debug("canceling checkup timer")
Process.cancel_timer(timer)
schedule_next_checkup(%{state | checkup_timer: nil}, offset_ms)
end
defp schedule_next_checkup(state, :default) do
# Logger.debug("Scheduling next checkup in 15 seconds")
checkup_timer = Process.send_after(self(), :checkup, 15_000)
%{state | checkup_timer: checkup_timer}
end
# If the offset is less than a minute, there will be so little skew that
# it won't be noticed. This speeds up execution and gets it to pretty
# close to millisecond accuracy
defp schedule_next_checkup(state, offset_ms) when offset_ms <= 60000 do
_ = inspect(offset_ms)
# Logger.debug("Scheduling next checkup in #{offset_ms} seconds")
checkup_timer = Process.send_after(self(), :checkup, offset_ms)
%{state | checkup_timer: checkup_timer}
end
defp schedule_next_checkup(state, offset_ms) do
_ = inspect(offset_ms)
# Logger.debug("Scheduling next checkup in 15 seconds (#{offset_ms})")
checkup_timer = Process.send_after(self(), :checkup, 15_000)
%{state | checkup_timer: checkup_timer}
end
@spec index_next(state()) :: state()
defp index_next(%{compiled: []} = state), do: %{state | next: nil}
defp index_next(state) do
[next | _] =
compiled =
Enum.sort(state.compiled, fn
{_, at, _, _}, {_, at, _, _} ->
true
{_, left, _, _}, {_, right, _, _} ->
DateTime.compare(left, right) == :lt
end)
%{state | next: next, compiled: compiled}
end
@spec pop_next(state()) :: state()
defp pop_next(%{compiled: [_ | compiled]} = state) do
%{state | compiled: compiled, scheduled_pid: nil}
end
defp pop_next(%{compiled: []} = state) do
%{state | compiled: [], scheduled_pid: nil}
end
@spec monitor(state(), pid()) :: state()
defp monitor(state, pid) do
already_monitored? =
Enum.find(state.monitors, fn
{^pid, _ref} ->
true
_ ->
false
end)
if already_monitored? do
state
else
ref = Process.monitor(pid)
%{state | monitors: [{pid, ref} | state.monitors]}
end
end
@spec demonitor(state(), GenServer.from()) :: state()
defp demonitor(state, {pid, ref}) do
monitors =
Enum.reject(state.monitors, fn
{^pid, ^ref} ->
true
{_pid, _ref} ->
false
end)
%{state | monitors: monitors}
end
@spec add(state(), compiled_ast(), DateTime.t(), data :: map(), pid()) ::
state()
defp add(state, compiled, at, data, pid) do
%{state | compiled: [{compiled, at, data, pid} | state.compiled]}
|> index_next()
end
@spec delete(state(), pid()) :: state()
defp delete(state, pid) do
compiled =
Enum.reject(state.compiled, fn
{_compiled, _at, _data, ^pid} -> true
{_compiled, _at, _data, _pid} -> false
end)
%{state | compiled: compiled}
|> index_next()
end
defp dispatch(%{registry_name: name, compiled: compiled} = state) do
calendar =
Enum.map(compiled, fn
{_compiled, scheduled_at, data, _pid} ->
%Dispatch{data: data, scheduled_at: scheduled_at}
end)
Registry.dispatch(name, :dispatch, fn entries ->
for {pid, _} <- entries do
do_dispatch(name, pid, calendar)
end
end)
{:noreply, state}
end
defp do_dispatch(name, pid, calendar) do
case Registry.meta(name, {:last_calendar, pid}) do
{:ok, ^calendar} ->
Logger.debug("calendar for #{inspect(pid)} hasn't changed")
{FarmbotCeleryScript, {:calendar, calendar}}
_old_calendar ->
Registry.put_meta(name, {:last_calendar, pid}, calendar)
send(pid, {FarmbotCeleryScript, {:calendar, calendar}})
end
end
end
|
farmbot_celery_script/lib/farmbot_celery_script/scheduler.ex
| 0.816223
| 0.408277
|
scheduler.ex
|
starcoder
|
defmodule ESpec.Expect do
@moduledoc """
Defines `expect` and `is_expected` helper functions.
These functions wrap arguments for ESpec.ExpectTo module.
"""
alias ESpec.ExpectTo
@doc false
defmacro __using__(_arg) do
quote do
@doc "The same as `expect(subject)`"
def is_expected do
{ESpec.ExpectTo, apply(__MODULE__, :subject, []), pruned_stacktrace()}
end
end
end
@doc "Wrapper for `ESpec.ExpectTo`."
def expect(do: value), do: {ExpectTo, value, pruned_stacktrace()}
def expect(value), do: {ExpectTo, value, pruned_stacktrace()}
def pruned_stacktrace() do
{:current_stacktrace, trace} = Process.info(self(), :current_stacktrace)
prune_stacktrace(trace)
end
# stop at the example runner
defp prune_stacktrace([{ESpec.ExampleRunner, _, _, _} | _rest]), do: []
# ignore these
defp prune_stacktrace([{Process, :info, _, _} | rest]), do: prune_stacktrace(rest)
defp prune_stacktrace([{_, :is_expected, 0, _} | rest]), do: prune_stacktrace(rest)
defp prune_stacktrace([{_, :should, 1, _} | rest]), do: prune_stacktrace(rest)
defp prune_stacktrace([{_, :should_not, 1, _} | rest]), do: prune_stacktrace(rest)
defp prune_stacktrace([{ESpec.Should, :should, 2, _} | rest]), do: prune_stacktrace(rest)
defp prune_stacktrace([{ESpec.Should, :should_not, 2, _} | rest]), do: prune_stacktrace(rest)
defp prune_stacktrace([{ESpec.To, :to, 2, _} | rest]), do: prune_stacktrace(rest)
defp prune_stacktrace([{ESpec.To, :not_to, 2, _} | rest]), do: prune_stacktrace(rest)
defp prune_stacktrace([{ESpec.To, :to_not, 2, _} | rest]), do: prune_stacktrace(rest)
defp prune_stacktrace([{ESpec.Expect, :expect, _, _} | rest]), do: prune_stacktrace(rest)
defp prune_stacktrace([{ESpec.Expect, :pruned_stacktrace, 0, _} | rest]),
do: prune_stacktrace(rest)
defp prune_stacktrace([{ESpec.Expect, :prune_stacktrace, 2, _} | rest]),
do: prune_stacktrace(rest)
defp prune_stacktrace([h | t]), do: [h | prune_stacktrace(t)]
defp prune_stacktrace([]), do: []
end
|
lib/espec/expect.ex
| 0.805096
| 0.826116
|
expect.ex
|
starcoder
|
defmodule Zstream.Unzip.Extra do
@moduledoc false
use Bitwise
defmodule Unknown do
@type t :: %__MODULE__{
signature: String.t(),
tsize: integer(),
data: iodata()
}
defstruct [:signature, :tsize, :data]
end
defmodule ExtendedTimestamp do
@type t :: %__MODULE__{
mtime: DateTime.t() | nil,
atime: DateTime.t() | nil,
ctime: DateTime.t() | nil
}
defstruct [:mtime, :atime, :ctime]
end
defmodule Zip64ExtendedInformation do
@type t :: %__MODULE__{
size: integer(),
compressed_size: integer()
}
defstruct [:size, :compressed_size]
end
# -Extended Timestamp Extra Field:
# ==============================
# The following is the layout of the extended-timestamp extra block.
# (Last Revision 19970118)
# Local-header version:
# Value Size Description
# ----- ---- -----------
# (time) 0x5455 Short tag for this extra block type ("UT")
# TSize Short total data size for this block
# Flags Byte info bits
# (ModTime) Long time of last modification (UTC/GMT)
# (AcTime) Long time of last access (UTC/GMT)
# (CrTime) Long time of original creation (UTC/GMT)
# The central-header extra field contains the modification time only,
# or no timestamp at all. TSize is used to flag its presence or
# absence. But note:
# If "Flags" indicates that Modtime is present in the local header
# field, it MUST be present in the central header field, too!
# This correspondence is required because the modification time
# value may be used to support trans-timezone freshening and
# updating operations with zip archives.
# The time values are in standard Unix signed-long format, indicating
# the number of seconds since 1 January 1970 00:00:00. The times
# are relative to Coordinated Universal Time (UTC), also sometimes
# referred to as Greenwich Mean Time (GMT). To convert to local time,
# the software must know the local timezone offset from UTC/GMT.
# The lower three bits of Flags in both headers indicate which time-
# stamps are present in the LOCAL extra field:
# bit 0 if set, modification time is present
# bit 1 if set, access time is present
# bit 2 if set, creation time is present
# bits 3-7 reserved for additional timestamps; not set
# Those times that are present will appear in the order indicated, but
# any combination of times may be omitted. (Creation time may be
# present without access time, for example.) TSize should equal
# (1 + 4*(number of set bits in Flags)), as the block is currently
# defined. Other timestamps may be added in the future.
def parse(<<0x5455::little-size(16), _tsize::little-size(16), rest::binary>>, acc) do
<<flag::little-size(8), rest::binary>> = rest
timestamp = %ExtendedTimestamp{}
{timestamp, rest} =
if bit_set?(flag, 0) do
<<mtime::little-size(32), rest::binary>> = rest
{%{timestamp | mtime: DateTime.from_unix!(mtime)}, rest}
else
{timestamp, rest}
end
{timestamp, rest} =
if bit_set?(flag, 1) do
<<atime::little-size(32), rest::binary>> = rest
{%{timestamp | atime: DateTime.from_unix!(atime)}, rest}
else
{timestamp, rest}
end
{timestamp, rest} =
if bit_set?(flag, 2) do
<<ctime::little-size(32), rest::binary>> = rest
{%{timestamp | ctime: DateTime.from_unix!(ctime)}, rest}
else
{timestamp, rest}
end
parse(rest, [timestamp | acc])
end
# -Zip64 Extended Information Extra Field (0x0001):
# The following is the layout of the zip64 extended
# information "extra" block. If one of the size or
# offset fields in the Local or Central directory
# record is too small to hold the required data,
# a Zip64 extended information record is created.
# The order of the fields in the zip64 extended
# information record is fixed, but the fields MUST
# only appear if the corresponding Local or Central
# directory record field is set to 0xFFFF or 0xFFFFFFFF.
# Note: all fields stored in Intel low-byte/high-byte order.
# Value Size Description
# ----- ---- -----------
# (ZIP64) 0x0001 2 bytes Tag for this "extra" block type
# Size 2 bytes Size of this "extra" block
# Original
# Size 8 bytes Original uncompressed file size
# Compressed
# Size 8 bytes Size of compressed data
# Relative Header
# Offset 8 bytes Offset of local header record
# Disk Start
# Number 4 bytes Number of the disk on which
# this file starts
# This entry in the Local header MUST include BOTH original
# and compressed file size fields. If encrypting the
# central directory and bit 13 of the general purpose bit
# flag is set indicating masking, the value stored in the
# Local Header for the original file size will be zero.
def parse(
<<0x0001::little-size(16), tsize::little-size(16), size::little-size(64),
compressed_size::little-size(64), rest::binary>>,
acc
) do
tsize = tsize - 16
<<_data::binary-size(tsize), rest::binary>> = rest
zip64_extended_information = %Zip64ExtendedInformation{
size: size,
compressed_size: compressed_size
}
parse(rest, [zip64_extended_information | acc])
end
def parse(<<signature::little-size(16), tsize::little-size(16), rest::binary>>, acc) do
<<data::binary-size(tsize), rest::binary>> = rest
parse(rest, [
%Unknown{signature: Integer.to_string(signature, 16), tsize: tsize, data: data} | acc
])
end
def parse(<<>>, acc), do: Enum.reverse(acc)
defp bit_set?(bits, n) do
(bits &&& 1 <<< n) > 0
end
end
|
lib/zstream/unzip/extra.ex
| 0.681409
| 0.430147
|
extra.ex
|
starcoder
|
defmodule Floki do
alias Floki.{Finder, FilterOut, HTMLTree}
@moduledoc """
Floki is a simple HTML parser that enables search for nodes using CSS selectors.
## Example
Assuming that you have the following HTML:
```html
<!doctype html>
<html>
<body>
<section id="content">
<p class="headline">Floki</p>
<a href="http://github.com/philss/floki">Github page</a>
<span data-model="user">philss</span>
</section>
</body>
</html>
```
To parse this, you can use the function `Floki.parse_document/1`:
```elixir
{:ok, html} = Floki.parse_document(doc)
# =>
# [{"html", [],
# [
# {"body", [],
# [
# {"section", [{"id", "content"}],
# [
# {"p", [{"class", "headline"}], ["Floki"]},
# {"a", [{"href", "http://github.com/philss/floki"}], ["Github page"]},
# {"span", [{"data-model", "user"}], ["philss"]}
# ]}
# ]}
# ]}]
```
With this document you can perform queries such as:
* `Floki.find(html, "#content")`
* `Floki.find(html, ".headline")`
* `Floki.find(html, "a")`
* `Floki.find(html, "[data-model=user]")`
* `Floki.find(html, "#content a")`
* `Floki.find(html, ".headline, a")`
Each HTML node is represented by a tuple like:
{tag_name, attributes, children_nodes}
Example of node:
{"p", [{"class", "headline"}], ["Floki"]}
So even if the only child node is the element text, it is represented
inside a list.
"""
@type html_declaration :: {:pi, String.t(), [html_attribute()]}
@type html_comment :: {:comment, String.t()}
@type html_doctype :: {:doctype, String.t(), String.t(), String.t()}
@type html_attribute :: {String.t(), String.t()}
@type html_tag :: {String.t(), [html_attribute()], [html_tag() | String.t() | html_comment()]}
@type html_node :: html_comment() | html_doctype() | html_tag() | html_declaration()
@type html_tree :: [html_node()]
@type css_selector :: String.t() | Floki.Selector.t() | [Floki.Selector.t()]
@doc """
Parses a HTML Document from a String.
The expect string is a valid HTML, but the parser will try
to parse even with errors.
"""
@spec parse(binary()) :: html_tag() | html_tree() | String.t()
@deprecated "Please use parse_document/1 or parse_fragment/1"
def parse(html) do
with {:ok, document} <- Floki.HTMLParser.parse_document(html) do
if length(document) == 1 do
hd(document)
else
document
end
end
end
@doc """
Parses a HTML Document from a string.
It will use the available parser from application env or the one from the
`:html_parser` option.
Check https://github.com/philss/floki#alternative-html-parsers for more details.
## Examples
iex> Floki.parse_document("<html><head></head><body>hello</body></html>")
{:ok, [{"html", [], [{"head", [], []}, {"body", [], ["hello"]}]}]}
iex> Floki.parse_document("<html><head></head><body>hello</body></html>", html_parser: Floki.HTMLParser.Mochiweb)
{:ok, [{"html", [], [{"head", [], []}, {"body", [], ["hello"]}]}]}
"""
@spec parse_document(binary(), Keyword.t()) :: {:ok, html_tree()} | {:error, String.t()}
defdelegate parse_document(document, opts \\ []), to: Floki.HTMLParser
@doc """
Parses a HTML Document from a string.
Similar to `Floki.parse_document/1`, but raises `Floki.ParseError` if there was an
error parsing the document.
## Example
iex> Floki.parse_document!("<html><head></head><body>hello</body></html>")
[{"html", [], [{"head", [], []}, {"body", [], ["hello"]}]}]
"""
@spec parse_document!(binary(), Keyword.t()) :: html_tree()
def parse_document!(document, opts \\ []) do
case parse_document(document, opts) do
{:ok, parsed_document} -> parsed_document
{:error, message} -> raise Floki.ParseError, message: message
end
end
@doc """
Parses a HTML fragment from a string.
It will use the available parser from application env or the one from the
`:html_parser` option.
Check https://github.com/philss/floki#alternative-html-parsers for more details.
"""
@spec parse_fragment(binary(), Keyword.t()) :: {:ok, html_tree()} | {:error, String.t()}
defdelegate parse_fragment(fragment, opts \\ []), to: Floki.HTMLParser
@doc """
Parses a HTML fragment from a string.
Similar to `Floki.parse_fragment/1`, but raises `Floki.ParseError` if there was an
error parsing the fragment.
"""
@spec parse_fragment!(binary(), Keyword.t()) :: html_tree()
def parse_fragment!(fragment, opts \\ []) do
case parse_fragment(fragment, opts) do
{:ok, parsed_fragment} -> parsed_fragment
{:error, message} -> raise Floki.ParseError, message: message
end
end
@doc """
Converts HTML tree to raw HTML.
Note that the resultant HTML may be different from the original one.
Spaces after tags and doctypes are ignored.
## Options
- `:encode`: accepts `true` or `false`. Will encode html special characters
to html entities.
You can also control the encoding behaviour at the application level via
`config :floki, :encode_raw_html, true | false`
- `:pretty`: accepts `true` or `false`. Will format the output, ignoring
breaklines and spaces from the input and putting new ones in order to pretty format
the html.
## Examples
iex> Floki.raw_html({"div", [{"class", "wrapper"}], ["my content"]})
~s(<div class="wrapper">my content</div>)
iex> Floki.raw_html({"div", [{"class", "wrapper"}], ["10 > 5"]}, encode: true)
~s(<div class="wrapper">10 > 5</div>)
iex> Floki.raw_html({"div", [{"class", "wrapper"}], ["10 > 5"]}, encode: false)
~s(<div class="wrapper">10 > 5</div>)
iex> Floki.raw_html({"div", [], ["\\n ", {"span", [], "Fully indented"}, " \\n"]}, pretty: true)
\"\"\"
<div>
<span>
Fully indented
</span>
</div>
\"\"\"
"""
@spec raw_html(html_tree | binary, keyword) :: binary
defdelegate raw_html(html_tree, options \\ []), to: Floki.RawHTML
@doc """
Find elements inside a HTML tree or string.
## Examples
iex> {:ok, html} = Floki.parse_fragment("<p><span class=hint>hello</span></p>")
iex> Floki.find(html, ".hint")
[{"span", [{"class", "hint"}], ["hello"]}]
iex> {:ok, html} = Floki.parse_fragment("<div id=important><div>Content</div></div>")
iex> Floki.find(html, "#important")
[{"div", [{"id", "important"}], [{"div", [], ["Content"]}]}]
iex> {:ok, html} = Floki.parse_fragment("<p><a href='https://google.com'>Google</a></p>")
iex> Floki.find(html, "a")
[{"a", [{"href", "https://google.com"}], ["Google"]}]
iex> Floki.find([{ "div", [], [{"a", [{"href", "https://google.com"}], ["Google"]}]}], "div a")
[{"a", [{"href", "https://google.com"}], ["Google"]}]
"""
@spec find(binary() | html_tree() | html_node(), css_selector()) :: html_tree
def find(html, selector) when is_binary(html) do
IO.warn(
"deprecation: parse the HTML with parse_document or parse_fragment before using find/2"
)
with {:ok, document} <- Floki.parse_document(html) do
{tree, results} = Finder.find(document, selector)
Enum.map(results, fn html_node -> HTMLTree.to_tuple(tree, html_node) end)
end
end
def find(html_tree_as_tuple, selector) do
{tree, results} = Finder.find(html_tree_as_tuple, selector)
Enum.map(results, fn html_node -> HTMLTree.to_tuple(tree, html_node) end)
end
@doc """
Changes the attribute values of the elements matched by `selector`
with the function `mutation` and returns the whole element tree.
## Examples
iex> Floki.attr([{"div", [{"id", "a"}], []}], "#a", "id", fn(id) -> String.replace(id, "a", "b") end)
[{"div", [{"id", "b"}], []}]
iex> Floki.attr([{"div", [{"class", "name"}], []}], "div", "id", fn _ -> "b" end)
[{"div", [{"id", "b"}, {"class", "name"}], []}]
"""
@spec attr(binary | html_tree | html_node, css_selector(), binary, (binary -> binary)) ::
html_tree
def attr(html_elem_tuple, selector, attribute_name, mutation) when is_tuple(html_elem_tuple) do
attr([html_elem_tuple], selector, attribute_name, mutation)
end
def attr(html, selector, attribute_name, mutation) when is_binary(html) do
IO.warn(
"deprecation: parse the HTML with parse_document or parse_fragment before using attr/4"
)
with {:ok, document} <- Floki.parse_document(html) do
attr(document, selector, attribute_name, mutation)
end
end
def attr(html_tree_list, selector, attribute_name, mutation) when is_list(html_tree_list) do
find_and_update(html_tree_list, selector, fn
{tag, attrs} ->
modified_attrs =
if Enum.any?(attrs, &match?({^attribute_name, _}, &1)) do
Enum.map(
attrs,
fn attribute ->
with {^attribute_name, attribute_value} <- attribute do
{attribute_name, mutation.(attribute_value)}
end
end
)
else
[{attribute_name, mutation.(nil)} | attrs]
end
{tag, modified_attrs}
other ->
other
end)
end
@deprecated """
Use `find_and_update/3` or `Enum.map/2` instead.
"""
def map(_html_tree_or_list, _fun)
def map(html_tree_list, fun) when is_list(html_tree_list) do
Enum.map(html_tree_list, &Finder.map(&1, fun))
end
def map(html_tree, fun), do: Finder.map(html_tree, fun)
@doc """
Searchs for elements inside the HTML tree and update those that matches the selector.
It will return the updated HTML tree.
This function works in a way similar to `traverse_and_update`, but instead of updating
the children nodes, it will only updates the `tag` and `attributes` of the matching nodes.
If `fun` returns `:delete`, the HTML node will be removed from the tree.
## Examples
iex> Floki.find_and_update([{"a", [{"href", "http://elixir-lang.com"}], ["Elixir"]}], "a", fn
iex> {"a", [{"href", href}]} ->
iex> {"a", [{"href", String.replace(href, "http://", "https://")}]}
iex> other ->
iex> other
iex> end)
[{"a", [{"href", "https://elixir-lang.com"}], ["Elixir"]}]
"""
@spec find_and_update(
html_tree(),
css_selector(),
({String.t(), [html_attribute()]} -> {String.t(), [html_attribute()]} | :delete)
) :: html_tree()
def find_and_update(html_tree, selector, fun) do
{tree, results} = Finder.find(html_tree, selector)
operations_with_nodes =
Enum.map(results, fn
html_node = %Floki.HTMLTree.HTMLNode{} ->
case fun.({html_node.type, html_node.attributes}) do
{updated_tag, updated_attrs} ->
{:update, %{html_node | type: updated_tag, attributes: updated_attrs}}
:delete ->
{:delete, html_node}
end
other ->
{:no_op, other}
end)
tree
|> HTMLTree.patch_nodes(operations_with_nodes)
|> HTMLTree.to_tuple_list()
end
@doc """
Traverses and updates a HTML tree structure.
This function returns a new tree structure that is the result of applying the
given `fun` on all nodes. The tree is traversed in a post-walk fashion, where
the children are traversed before the parent.
When the function `fun` encounters HTML tag, it receives a tuple with
`{name, attributes, children}`, and should either return a similar tuple or
`nil` to delete the current node.
The function `fun` can also encounter HTML doctype, comment or declaration and
will receive, and should return, different tuple for these types. See the
documentation for `t:html_comment/0`, `t:html_doctype/0` and
`t:html_declaration/0` for details.
## Examples
iex> html = [{"div", [], ["hello"]}]
iex> Floki.traverse_and_update(html, fn
...> {"div", attrs, children} -> {"p", attrs, children}
...> other -> other
...> end)
[{"p", [], ["hello"]}]
iex> html = [{"div", [], [{:comment, "I am comment"}, {"span", [], ["hello"]}]}]
iex> Floki.traverse_and_update(html, fn
...> {"span", _attrs, _children} -> nil
...> {:comment, text} -> {"span", [], text}
...> other -> other
...> end)
[{"div", [], [{"span", [], "I am comment"}]}]
"""
@spec traverse_and_update(html_tree(), (html_node() -> html_node() | nil)) :: html_tree()
defdelegate traverse_and_update(html_tree, fun), to: Floki.Traversal
@doc """
Traverses and updates a HTML tree structure with an accumulator.
This function returns a new tree structure and the final value of accumulator
which are the result of applying the given `fun` on all nodes. The tree is
traversed in a post-walk fashion, where the children are traversed before
the parent.
When the function `fun` encounters HTML tag, it receives a tuple with
`{name, attributes, children}` and an accumulator. It and should return a
2-tuple like `{new_node, new_acc}`, where `new_node` is either a similar tuple
or `nil` to delete the current node, and `new_acc` is an updated value for the
accumulator.
The function `fun` can also encounter HTML doctype, comment or declaration and
will receive, and should return, different tuple for these types. See the
documentation for `t:html_comment/0`, `t:html_doctype/0` and
`t:html_declaration/0` for details.
## Examples
iex> html = [{"div", [], [{:comment, "I am a comment"}, "hello"]}, {"div", [], ["world"]}]
iex> Floki.traverse_and_update(html, 0, fn
...> {"div", attrs, children}, acc ->
...> {{"p", [{"data-count", to_string(acc)} | attrs], children}, acc + 1}
...> other, acc -> {other, acc}
...> end)
{[
{"p", [{"data-count", "0"}], [{:comment, "I am a comment"}, "hello"]},
{"p", [{"data-count", "1"}], ["world"]}
], 2}
iex> html = {"div", [], [{"span", [], ["hello"]}]}
iex> Floki.traverse_and_update(html, [deleted: 0], fn
...> {"span", _attrs, _children}, acc ->
...> {nil, Keyword.put(acc, :deleted, acc[:deleted] + 1)}
...> tag, acc ->
...> {tag, acc}
...> end)
{{"div", [], []}, [deleted: 1]}
"""
@spec traverse_and_update(
html_tree(),
traverse_acc,
(html_node(), traverse_acc -> {html_node() | nil, traverse_acc})
) :: {html_node(), traverse_acc}
when traverse_acc: any()
defdelegate traverse_and_update(html_tree, acc, fun), to: Floki.Traversal
@doc """
Returns the text nodes from a HTML tree.
By default, it will perform a deep search through the HTML tree.
You can disable deep search with the option `deep` assigned to false.
You can include content of script tags with the option `js` assigned to true.
You can specify a separator between nodes content.
## Examples
iex> Floki.text({"div", [], [{"span", [], ["hello"]}, " world"]})
"hello world"
iex> Floki.text({"div", [], [{"span", [], ["hello"]}, " world"]}, deep: false)
" world"
iex> Floki.text({"div", [], [{"script", [], ["hello"]}, " world"]})
" world"
iex> Floki.text({"div", [], [{"script", [], ["hello"]}, " world"]}, js: true)
"hello world"
iex> Floki.text({"ul", [], [{"li", [], ["hello"]}, {"li", [], ["world"]}]}, sep: "-")
"hello-world"
iex> Floki.text([{"div", [], ["hello world"]}])
"hello world"
iex> Floki.text([{"p", [], ["1"]},{"p", [], ["2"]}])
"12"
iex> Floki.text({"div", [], [{"style", [], ["hello"]}, " world"]}, style: false)
" world"
iex> Floki.text({"div", [], [{"style", [], ["hello"]}, " world"]}, style: true)
"hello world"
"""
@spec text(html_tree | binary) :: binary
def text(html, opts \\ [deep: true, js: false, style: true, sep: ""]) do
cleaned_html_tree =
html
|> parse_it()
|> clean_html_tree(:js, opts[:js])
|> clean_html_tree(:style, opts[:style])
search_strategy =
case opts[:deep] do
false -> Floki.FlatText
_ -> Floki.DeepText
end
case opts[:sep] do
nil -> search_strategy.get(cleaned_html_tree)
sep -> search_strategy.get(cleaned_html_tree, sep)
end
end
@doc """
Returns the direct child nodes of a HTML node.
By default, it will also include all texts. You can disable
this behaviour by using the option `include_text` to `false`.
If the given node is not an HTML tag, then it returns nil.
## Examples
iex> Floki.children({"div", [], ["text", {"span", [], []}]})
["text", {"span", [], []}]
iex> Floki.children({"div", [], ["text", {"span", [], []}]}, include_text: false)
[{"span", [], []}]
iex> Floki.children({:comment, "comment"})
nil
"""
@spec children(html_node(), Keyword.t()) :: html_tree() | nil
def children(html_node, opts \\ [include_text: true]) do
case html_node do
{_, _, subtree} ->
filter_children(subtree, opts[:include_text])
_ ->
nil
end
end
defp filter_children(children, false), do: Enum.filter(children, &is_tuple(&1))
defp filter_children(children, _), do: children
@doc """
Returns a list with attribute values for a given selector.
## Examples
iex> Floki.attribute([{"a", [{"href", "https://google.com"}], ["Google"]}], "a", "href")
["https://google.com"]
iex> Floki.attribute([{"a", [{"class", "foo"}, {"href", "https://google.com"}], ["Google"]}], "a", "class")
["foo"]
"""
@spec attribute(binary | html_tree | html_node, binary, binary) :: list
def attribute(html, selector, attribute_name) do
html
|> find(selector)
|> attribute_values(attribute_name)
end
@doc """
Returns a list with attribute values from elements.
## Examples
iex> Floki.attribute([{"a", [{"href", "https://google.com"}], ["Google"]}], "href")
["https://google.com"]
"""
@spec attribute(binary | html_tree | html_node, binary) :: list
def attribute(html, attribute_name) when is_binary(html) do
IO.warn(
"deprecation: parse the HTML with parse_document or parse_fragment before using attribute/2"
)
with {:ok, document} <- Floki.parse_document(html) do
attribute_values(document, attribute_name)
end
end
def attribute(elements, attribute_name) do
attribute_values(elements, attribute_name)
end
defp attribute_values(element, attr_name) when is_tuple(element) do
attribute_values([element], attr_name)
end
defp attribute_values(elements, attr_name) do
values =
Enum.reduce(
elements,
[],
fn
{_, attributes, _}, acc ->
case attribute_match?(attributes, attr_name) do
{_attr_name, value} ->
[value | acc]
_ ->
acc
end
_, acc ->
acc
end
)
Enum.reverse(values)
end
defp attribute_match?(attributes, attribute_name) do
Enum.find(
attributes,
fn {attr_name, _} ->
attr_name == attribute_name
end
)
end
defp parse_it(html) when is_binary(html) do
IO.warn(
"deprecation: parse the HTML with parse_document or parse_fragment before using text/2"
)
{:ok, document} = Floki.parse_document(html)
document
end
defp parse_it(html), do: html
defp clean_html_tree(html_tree, :js, true), do: html_tree
defp clean_html_tree(html_tree, :js, _), do: filter_out(html_tree, "script")
defp clean_html_tree(html_tree, :style, true), do: html_tree
defp clean_html_tree(html_tree, :style, _), do: filter_out(html_tree, "style")
@doc """
Returns the nodes from a HTML tree that don't match the filter selector.
## Examples
iex> Floki.filter_out({"div", [], [{"script", [], ["hello"]}, " world"]}, "script")
{"div", [], [" world"]}
iex> Floki.filter_out([{"body", [], [{"script", [], []}, {"div", [], []}]}], "script")
[{"body", [], [{"div", [], []}]}]
iex> Floki.filter_out({"div", [], [{:comment, "comment"}, " text"]}, :comment)
{"div", [], [" text"]}
iex> Floki.filter_out({"div", [], ["text"]}, :text)
{"div", [], []}
"""
@spec filter_out(html_node() | html_tree() | binary(), FilterOut.selector()) ::
html_node() | html_tree()
def filter_out(html, selector) when is_binary(html) do
IO.warn(
"deprecation: parse the HTML with parse_document or parse_fragment before using filter_out/2"
)
with {:ok, document} <- Floki.parse_document(html) do
FilterOut.filter_out(document, selector)
end
end
def filter_out(elements, selector) do
FilterOut.filter_out(elements, selector)
end
end
|
lib/floki.ex
| 0.794385
| 0.696042
|
floki.ex
|
starcoder
|
defmodule Sippet.Transactions do
@moduledoc """
The `Sippet.Transactions` is responsible to dispatch messages from
`Sippet.Transports` and `Sippet.Core` modules to transactions, creating when
necessary.
"""
import Supervisor.Spec
alias Sippet.Message, as: Message
alias Sippet.Message.RequestLine, as: RequestLine
alias Sippet.Message.StatusLine, as: StatusLine
alias Sippet.Transactions, as: Transactions
alias Sippet.Core, as: Core
require Logger
@typedoc "A SIP message request"
@type request :: Message.request
@typedoc "A SIP message response"
@type response :: Message.response
@typedoc "An network error that occurred while sending a message"
@type reason :: term
@typedoc "A client transaction identifier"
@type client_key :: Transactions.Client.Key.t
@typedoc "A server transaction identifier"
@type server_key :: Transactions.Server.Key.t
@doc """
Starts the transaction process hierarchy.
"""
def start_link() do
children = [
supervisor(Sippet.Transactions.Registry, []),
supervisor(Sippet.Transactions.Supervisor, [])
]
options = [strategy: :one_for_one, name: __MODULE__]
Supervisor.start_link(children, options)
end
defdelegate start_client(transaction, outgoing_request),
to: Sippet.Transactions.Supervisor
defdelegate start_server(transaction, incoming_request),
to: Sippet.Transactions.Supervisor
@doc """
Receives a message from the transport.
If the message is a request, then it will look if a server transaction
already exists for it and redirect to it. Otherwise, if the request method
is `:ack`, it will redirect the request directly to `Sippet.Core`; if not
`:ack`, then a new `Sippet.Transactions.Server` will be created.
If the message is a response, it looks if a client transaction already exists
in order to handle it, and if so, redirects to it. Otherwise the response is
redirected directly to the `Sippet.Core`. The latter is done so because of
the usual SIP behavior or handling the 200 OK response retransmissions for
requests with `:invite` method directly.
When receiving a burst of equivalent requests, it is possible that another
entity has already created the server transaction, and then the function
will return a `{:error, reason}` tuple.
In case of success, returns `:ok`.
"""
@spec receive_message(request | response) :: :ok | {:error, reason}
def receive_message(
%Message{start_line: %RequestLine{}} = incoming_request) do
transaction = Transactions.Server.Key.new(incoming_request)
case Sippet.Transactions.Registry.lookup(transaction) do
nil ->
if incoming_request.start_line.method == :ack do
# Redirect to the core directly. ACKs sent out of transactions
# pertain to the core.
Core.receive_request(incoming_request, nil)
else
# Start a new server transaction now. The transaction will redirect
# to the core once it starts. It will return errors only if there was
# some kind of race condition when receiving the request.
case start_server(transaction, incoming_request) do
{:ok, _} -> :ok
{:ok, _, _} -> :ok
_errors -> {:error, :already_started}
end
end
pid ->
# Redirect the request to the existing transaction. These are tipically
# retransmissions or ACKs for 200 OK responses.
Transactions.Server.receive_request(pid, incoming_request)
end
end
def receive_message(
%Message{start_line: %StatusLine{}} = incoming_response) do
transaction = Transactions.Client.Key.new(incoming_response)
case Sippet.Transactions.Registry.lookup(transaction) do
nil ->
# Redirect the response to core. These are tipically retransmissions of
# 200 OK for sent INVITE requests, and they have to be handled directly
# by the core in order to catch the correct media handling.
Core.receive_response(incoming_response, nil)
pid ->
# Redirect the response to the existing client transaction. If needed,
# the client transaction will redirect to the core from there.
Transactions.Client.receive_response(pid, incoming_response)
end
end
@doc """
Sends a request using client transactions.
Requests of method `:ack` shall be sent directly to `Sippet.Transports`. If
an `:ack` request is detected, it returns `{:error, :not_allowed}`.
A `Sippet.Transactions.Client` is created to handle retransmissions, when the
transport presumes it, and match response retransmissions, so the
`Sippet.Core` doesn't get retransmissions other than 200 OK for `:invite`
requests.
In case of success, returns `:ok`.
"""
@spec send_request(request) :: :ok | {:error, reason}
def send_request(%Message{start_line: %RequestLine{method: :ack}}) do
# ACKs should be sent directly to transport.
Logger.error("ACKs are not allowed to use transactions")
{:error, :not_allowed}
end
def send_request(%Message{start_line: %RequestLine{}} = outgoing_request) do
transaction = Transactions.Client.Key.new(outgoing_request)
# Create a new client transaction now. The request is passed to the
# transport once it starts.
case start_client(transaction, outgoing_request) do
{:ok, _} -> {:ok, transaction}
{:ok, _, _} -> {:ok, transaction}
_errors ->
Logger.warn fn ->
"client transaction #{transaction} already exists"
end
{:error, :already_started}
end
end
@doc """
Sends a response to a server transaction.
The server transaction identifier is obtained from the message attributes.
See `send_response/2`.
"""
@spec send_response(response) :: :ok | {:error, reason}
def send_response(%Message{start_line: %StatusLine{}} = outgoing_response) do
server_key = Transactions.Server.Key.new(outgoing_response)
send_response(outgoing_response, server_key)
end
@doc """
Sends a response to a server transaction.
Server transactions are created when the incoming request is received, see
`receive_message/1`. The first parameter `server_key` indicates the reference
passed to `Sippet.Core` when the request is received.
If there is no such server transaction, returns `{:error, :no_transaction}`.
In case of success, returns `:ok`.
"""
@spec send_response(response, server_key) :: :ok | {:error, reason}
def send_response(%Message{start_line: %StatusLine{}} = outgoing_response,
%Transactions.Server.Key{} = server_key) do
case Sippet.Transactions.Registry.lookup(server_key) do
nil ->
{:error, :no_transaction}
pid ->
# Send the response through the existing server transaction.
Transactions.Server.send_response(pid, outgoing_response)
end
end
@doc """
Receives a transport error.
The client and server identifiers are passed to the transport by the
transactions. If the transport faces an error, it has to inform the
transaction using this function.
If a transaction with such a key does not exist, it will be silently ignored.
"""
@spec receive_error(client_key | server_key, reason) :: :ok
def receive_error(key, reason) do
case Sippet.Transactions.Registry.lookup(key) do
nil ->
case key do
%Transactions.Client.Key{} ->
Logger.warn fn ->
"client key #{inspect key} not found"
end
%Transactions.Server.Key{} ->
Logger.warn fn ->
"server key #{inspect key} not found"
end
end
:ok
pid ->
# Send the response through the existing server key.
case key do
%Transactions.Client.Key{} ->
Transactions.Client.receive_error(pid, reason)
%Transactions.Server.Key{} ->
Transactions.Server.receive_error(pid, reason)
end
end
end
@doc """
Terminates a client or server transaction forcefully.
This function is not generally executed by entities; there is a single case
where it is fundamental, which is when a client transaction is in proceeding
state for a long time, and the transaction has to be finished forcibly, or it
will never finish by itself.
If a transaction with such a key does not exist, it will be silently ignored.
"""
@spec terminate(client_key | server_key) :: :ok
def terminate(key) do
case Sippet.Transactions.Registry.lookup(key) do
nil ->
:ok
pid ->
# Send the response through the existing server key.
case key do
%Transactions.Client.Key{} ->
Transactions.Client.terminate(pid)
%Transactions.Server.Key{} ->
Transactions.Server.terminate(pid)
end
end
end
@doc """
Handles the sigil `~K`.
It returns a client or server transaction key depending on the number of
parameters passed.
## Examples
iex> import Sippet.Transactions, only: [sigil_K: 2]
iex> Sippet.Transactions.Client.Key.new("<KEY>", :invite)
~K[z9hG4bK230f2.1|:invite]
iex> ~K[z9hG4bK230f2.1|INVITE]
~K[z9hG4bK230f2.1|:invite]
iex> Sippet.Transactions.Server.Key.new("<KEY>", :invite, {"client.biloxi.example.com", 5060})
~K[z9hG4bK74b21|:invite|client.biloxi.example.com:5060]
iex> ~K[z9hG4bK74b21|INVITE|client.biloxi.example.com:5060]
~K[z9hG4bK74b21|:invite|client.biloxi.example.com:5060]
"""
def sigil_K(string, _) do
case String.split(string, "|") do
[branch, method] ->
Transactions.Client.Key.new(branch, sigil_to_method(method))
[branch, method, sentby] ->
[host, port] = String.split(sentby, ":")
Transactions.Server.Key.new(branch, sigil_to_method(method),
{host, String.to_integer(port)})
end
end
defp sigil_to_method(method) do
case method do
":" <> rest -> Message.to_method(rest)
other -> Message.to_method(other)
end
end
end
|
lib/sippet/transactions.ex
| 0.898508
| 0.4133
|
transactions.ex
|
starcoder
|
defmodule GenEvent do
@moduledoc """
A behaviour module for implementing event handling functionality.
The event handling model consists of a generic event manager
process with an arbitrary number of event handlers which are
added and deleted dynamically.
An event manager implemented using this module will have a standard
set of interface functions and include functionality for tracing and
error reporting. It will also fit into a supervision tree.
## Example
There are many use cases for event handlers. For example, a logging
system can be built using event handlers where each log message is
an event and different event handlers can be plugged to handle the
log messages. One handler may print error messages on the terminal,
another can write it to a file, while a third one can keep the
messages in memory (like a buffer) until they are read.
As an example, let's have a GenEvent that accumulates messages until
they are collected by an explicit call.
defmodule LoggerHandler do
use GenEvent
# Callbacks
def handle_event({:log, x}, messages) do
{:ok, [x|messages]}
end
def handle_call(:messages, messages) do
{:ok, Enum.reverse(messages), []}
end
end
{:ok, pid} = GenEvent.start_link()
GenEvent.add_handler(pid, LoggerHandler, [])
#=> :ok
GenEvent.notify(pid, {:log, 1})
#=> :ok
GenEvent.notify(pid, {:log, 2})
#=> :ok
GenEvent.call(pid, LoggerHandler, :messages)
#=> [1, 2]
GenEvent.call(pid, LoggerHandler, :messages)
#=> []
We start a new event manager by calling `GenEvent.start_link/0`.
Notifications can be sent to the event manager which will then
invoke `handle_event/2` for each registered handler.
We can add new handlers with `add_handler/3` and `add_mon_handler/3`.
Calls can also be made to specific handlers by using `call/3`.
## Callbacks
There are 6 callbacks required to be implemented in a `GenEvent`. By
adding `use GenEvent` to your module, Elixir will automatically define
all 6 callbacks for you, leaving it up to you to implement the ones
you want to customize. The callbacks are:
* `init(args)` - invoked when the event handler is added.
It must return:
- `{:ok, state}`
- `{:ok, state, :hibernate}`
- `{:error, reason}`
* `handle_event(msg, state)` - invoked whenever an event is sent via
`notify/2`, `ack_notify/2` or `sync_notify/2`.
It must return:
- `{:ok, new_state}`
- `{:ok, new_state, :hibernate}`
- `:remove_handler`
* `handle_call(msg, state)` - invoked when a `call/3` is done to a specific
handler.
It must return:
- `{:ok, reply, new_state}`
- `{:ok, reply, new_state, :hibernate}`
- `{:remove_handler, reply}`
* `handle_info(msg, state)` - invoked to handle all other messages which
are received by the process. Must return the same values as
`handle_event/2`.
* `terminate(reason, state)` - called when the event handler is removed or
the event manager is terminating. It can return any term.
The reason is one of:
- `:stop` - manager is terminating
- `{:stop, reason}` - monitored process terminated (for monitored handlers)
- `:remove_handler` - handler is being removed
- `{:error, term}` - handler crashed or returned a bad value
- `term` - any term passed to functions like `GenEvent.remove_handler/2`
* `code_change(old_vsn, state, extra)` - called when the application
code is being upgraded live (hot code swapping).
It must return:
- `{:ok, new_state}`
## Name Registration
A GenEvent is bound to the same name registration rules as a `GenServer`.
Read more about it in the `GenServer` docs.
## Modes
GenEvent stream supports three different notifications.
On `GenEvent.ack_notify/2`, the manager acknowledges each event,
providing back pressure, but processing of the message happens
asynchronously.
On `GenEvent.sync_notify/2`, the manager acknowledges an event
just after it was processed by all event handlers.
On `GenEvent.notify/2`, all events are processed asynchronously and
there is no ack (which means there is no backpressure).
## Streaming
`GenEvent` messages can be streamed with the help of `stream/2`.
Here are some examples:
stream = GenEvent.stream(pid)
# Discard the next 10 events
_ = Enum.drop(stream, 10)
# Print all remaining events
for event <- stream do
IO.inspect event
end
## Learn more and compatibility
If you wish to find out more about gen events, Elixir getting started
guides provide a tutorial-like introduction. The documentation and links
in Erlang can also provide extra insight.
* http://elixir-lang.org/getting_started/mix_otp/1.html
* http://www.erlang.org/doc/man/gen_event.html
* http://learnyousomeerlang.com/event-handlers
Keep in mind though Elixir and Erlang gen events are not 100% compatible.
The `:gen_event.add_sup_handler/3` is not supported by Elixir's GenEvent,
which in turn supports `GenEvent.add_mon_handler/3`.
The benefits of the monitoring approach are described in the "Don't drink
too much kool aid" section of the "Learn you some Erlang" link above. Due
to those changes, Elixir's GenEvent does not trap exits by default.
Furthermore, Elixir's also normalizes the `{:error, _}` tuples returned
by many functions, in order to be more consistent with themselves and
the `GenServer` module.
"""
@typedoc "Return values of `start*` functions"
@type on_start :: {:ok, pid} | {:error, {:already_started, pid}}
@typedoc "The GenEvent manager name"
@type name :: atom | {:global, term} | {:via, module, term}
@typedoc "Options used by the `start*` functions"
@type options :: [name: name]
@typedoc "The event manager reference"
@type manager :: pid | name | {atom, node}
@typedoc "Supported values for new handlers"
@type handler :: atom | {atom, term} | {pid, reference}
@doc false
defmacro __using__(_) do
quote location: :keep do
@behaviour :gen_event
@doc false
def init(args) do
{:ok, args}
end
@doc false
def handle_event(_event, state) do
{:ok, state}
end
@doc false
def handle_call(msg, state) do
# We do this to trick dialyzer to not complain about non-local returns.
case :random.uniform(1) do
1 -> exit({:bad_call, msg})
2 -> {:remove_handler, :ok}
end
end
@doc false
def handle_info(_msg, state) do
{:ok, state}
end
@doc false
def terminate(_reason, _state) do
:ok
end
@doc false
def code_change(_old, state, _extra) do
{:ok, state}
end
defoverridable [init: 1, handle_event: 2, handle_call: 2,
handle_info: 2, terminate: 2, code_change: 3]
end
end
@doc """
Starts an event manager linked to the current process.
This is often used to start the `GenEvent` as part of a supervision tree.
It accepts the `:name` option which is described under the `Name Registration`
section in the `GenServer` module docs.
If the event manager is successfully created and initialized, the function
returns `{:ok, pid}`, where pid is the pid of the server. If there already
exists a process with the specified server name, the function returns
`{:error, {:already_started, pid}}` with the pid of that process.
Note that a `GenEvent` started with `start_link/1` is linked to the
parent process and will exit not only on crashes but also if the parent
process exits with `:normal` reason.
"""
@spec start_link(options) :: on_start
def start_link(options \\ []) when is_list(options) do
do_start(:link, options)
end
@doc """
Starts an event manager process without links (outside of a supervision tree).
See `start_link/1` for more information.
"""
@spec start(options) :: on_start
def start(options \\ []) when is_list(options) do
do_start(:nolink, options)
end
@no_callback :"no callback module"
defp do_start(mode, options) do
case Keyword.get(options, :name) do
nil ->
:gen.start(GenEvent, mode, @no_callback, [], [])
atom when is_atom(atom) ->
:gen.start(GenEvent, mode, {:local, atom}, @no_callback, [], [])
other when is_tuple(other) ->
:gen.start(GenEvent, mode, other, @no_callback, [], [])
end
end
@doc """
Returns a stream that consumes events from the `manager`.
The stream is a `GenEvent` struct that implements the `Enumerable`
protocol. Consumption of events only begins when enumeration starts.
Note streaming is specific to Elixir's GenEvent and does not work
with Erlang ones.
## Options
* `:timeout` - raises if no event arrives in X milliseconds
(defaults to `:infinity`)
"""
@spec stream(manager, Keyword.t) :: GenEvent.Stream.t
def stream(manager, options \\ []) do
%GenEvent.Stream{
manager: manager,
timeout: Keyword.get(options, :timeout, :infinity)}
end
@doc """
Adds a new event handler to the event `manager`.
The event manager will call the `init/1` callback with `args` to
initiate the event handler and its internal state.
If `init/1` returns a correct value indicating successful completion,
the event manager adds the event handler and this function returns
`:ok`. If the callback fails with `reason` or returns `{:error, reason}`,
the event handler is ignored and this function returns `{:error, reason}`.
If the given handler was previously installed at the manager, this
function returns `{:error, :already_present}`.
"""
@spec add_handler(manager, handler, term) :: :ok | {:error, term}
def add_handler(manager, handler, args) do
rpc(manager, {:add_handler, handler, args})
end
@doc """
Adds a monitored event handler to the event `manager`.
Expects the same input and returns the same values as `add_handler/3`.
## Monitored handlers
A monitored handler implies the calling process will now be monitored
by the GenEvent manager.
If the calling process later terminates with `reason`, the event manager
will delete the event handler by calling the `terminate/2` callback with
`{:stop, reason}` as argument. If the event handler later is deleted,
the event manager sends a message `{:gen_event_EXIT, handler, reason}`
to the calling process. Reason is one of the following:
* `:normal` - if the event handler has been removed due to a call to
`remove_handler/3`, or `:remove_handler` has been returned by a callback
function
* `:shutdown` - if the event handler has been removed because the event
manager is terminating
* `{:swapped, new_handler, pid}` - if the process pid has replaced the
event handler by another
* a term - if the event handler is removed due to an error. Which term
depends on the error
Keep in mind that the `{:gen_event_EXIT, handler, reason}` message is not
guaranteed to be delivered in case the manager crashes. If you want to
guarantee the message is delivered, you have two options:
* monitor the event manager
* link to the event manager and then set `Process.flag(:trap_exit, true)`
in your handler callback
Finally, this functionality only works with GenEvent started via this
module (it is not backwards compatible with Erlang's `:gen_event`).
"""
@spec add_mon_handler(manager, handler, term) :: :ok | {:error, term}
def add_mon_handler(manager, handler, args) do
rpc(manager, {:add_mon_handler, handler, args, self()})
end
@doc """
Sends an event notification to the event `manager`.
The event manager will call `handle_event/2` for each
installed event handler.
`notify` is asynchronous and will return immediately after the
notification is sent. `notify` will not fail even if the specified
event manager does not exist, unless it is specified as an atom.
"""
@spec notify(manager, term) :: :ok
def notify(manager, event)
def notify({:global, name}, msg) do
try do
:global.send(name, {:notify, msg})
:ok
catch
_, _ -> :ok
end
end
def notify({:via, mod, name}, msg) do
try do
mod.send(name, {:notify, msg})
:ok
catch
_, _ -> :ok
end
end
def notify(other, msg) do
send(other, {:notify, msg})
:ok
end
@doc """
Sends a sync event notification to the event `manager`.
In other words, this function only returns `:ok` after the event manager
invokes the `handle_event/2` on each installed event handler.
See `notify/2` for more info.
"""
@spec sync_notify(manager, term) :: :ok
def sync_notify(manager, event) do
rpc(manager, {:sync_notify, event})
end
@doc """
Sends a ack event notification to the event `manager`.
In other words, this function only returns `:ok` as soon as the
event manager starts processing this event, but it does not wait
for event handlers to process the sent event.
See `notify/2` for more info. Note this function is specific
to Elixir's GenEvent and does not work with Erlang ones.
"""
@spec ack_notify(manager, term) :: :ok
def ack_notify(manager, event) do
rpc(manager, {:ack_notify, event})
end
@doc """
Makes a synchronous call to the event `handler` installed in `manager`.
The given `request` is sent and the caller waits until a reply arrives or
a timeout occurs. The event manager will call `handle_call/2` to handle
the request.
The return value `reply` is defined in the return value of `handle_call/2`.
If the specified event handler is not installed, the function returns
`{:error, :not_found}`.
"""
@spec call(manager, handler, term, timeout) :: term | {:error, term}
def call(manager, handler, request, timeout \\ 5000) do
try do
:gen.call(manager, self(), {:call, handler, request}, timeout)
catch
:exit, reason ->
exit({reason, {__MODULE__, :call, [manager, handler, request, timeout]}})
else
{:ok, res} -> res
end
end
@doc """
Removes an event handler from the event `manager`.
The event manager will call `terminate/2` to terminate the event handler
and return the callback value. If the specified event handler is not
installed, the function returns `{:error, :not_found}`.
"""
@spec remove_handler(manager, handler, term) :: term | {:error, term}
def remove_handler(manager, handler, args) do
rpc(manager, {:delete_handler, handler, args})
end
@doc """
Replaces an old event handler with a new one in the event `manager`.
First, the old event handler is deleted by calling `terminate/2` with
the given `args1` and collects the return value. Then the new event handler
is added and initiated by calling `init({args2, term})`, where `term` is the
return value of calling `terminate/2` in the old handler. This makes it
possible to transfer information from one handler to another.
The new handler will be added even if the specified old event handler
is not installed or if the handler fails to terminate with a given reason
in which case `state = {:error, term}`.
If `init/1` in the second handler returns a correct value, this
function returns `:ok`.
"""
@spec swap_handler(manager, handler, term, handler, term) :: :ok | {:error, term}
def swap_handler(manager, handler1, args1, handler2, args2) do
rpc(manager, {:swap_handler, handler1, args1, handler2, args2})
end
@doc """
Replaces an old event handler with a new monitored one in the event `manager`.
Read the docs for `add_mon_handler/3` and `swap_handler/5` for more information.
"""
@spec swap_mon_handler(manager, handler, term, handler, term) :: :ok | {:error, term}
def swap_mon_handler(manager, handler1, args1, handler2, args2) do
rpc(manager, {:swap_mon_handler, handler1, args1, handler2, args2, self()})
end
@doc """
Returns a list of all event handlers installed in the `manager`.
"""
@spec which_handlers(manager) :: [handler]
def which_handlers(manager) do
rpc(manager, :which_handlers)
end
@doc """
Terminates the event `manager`.
Before terminating, the event manager will call `terminate(:stop, ...)`
for each installed event handler.
"""
@spec stop(manager) :: :ok
def stop(manager) do
rpc(manager, :stop)
end
defp rpc(module, cmd) do
# TODO: Change the tag once patch is accepted by OTP
{:ok, reply} = :gen.call(module, self(), cmd, :infinity)
reply
end
## Init callbacks
require Record
Record.defrecordp :handler, [:module, :id, :state, :pid, :ref]
@doc false
def init_it(starter, :self, name, mod, args, options) do
init_it(starter, self(), name, mod, args, options)
end
def init_it(starter, parent, name, _, _, options) do
Process.put(:"$initial_call", {__MODULE__, :init_it, 6})
debug = :gen.debug_options(options)
:proc_lib.init_ack(starter, {:ok, self()})
loop(parent, name(name), [], debug, false)
end
@doc false
def init_hib(parent, name, handlers, debug) do
fetch_msg(parent, name, handlers, debug, true)
end
defp name({:local, name}), do: name
defp name({:global, name}), do: name
defp name({:via, _, name}), do: name
defp name(pid) when is_pid(pid), do: pid
## Loop
defp loop(parent, name, handlers, debug, true) do
:proc_lib.hibernate(__MODULE__, :init_hib, [parent, name, handlers, debug])
end
defp loop(parent, name, handlers, debug, false) do
fetch_msg(parent, name, handlers, debug, false)
end
defp fetch_msg(parent, name, handlers, debug, hib) do
receive do
{:system, from, req} ->
:sys.handle_system_msg(req, from, parent, __MODULE__,
debug, [name, handlers, hib], hib)
{:EXIT, ^parent, reason} ->
server_terminate(reason, parent, handlers, name)
msg when debug == [] ->
handle_msg(msg, parent, name, handlers, [])
msg ->
debug = :sys.handle_debug(debug, &print_event/3, name, {:in, msg})
handle_msg(msg, parent, name, handlers, debug)
end
end
defp handle_msg(msg, parent, name, handlers, debug) do
case msg do
{:notify, event} ->
{hib, handlers} = server_event(:async, event, handlers, name)
loop(parent, name, handlers, debug, hib)
{_from, _tag, {:notify, event}} ->
{hib, handlers} = server_event(:async, event, handlers, name)
loop(parent, name, handlers, debug, hib)
{_from, tag, {:ack_notify, event}} ->
reply(tag, :ok)
{hib, handlers} = server_event(:ack, event, handlers, name)
loop(parent, name, handlers, debug, hib)
{_from, tag, {:sync_notify, event}} ->
{hib, handlers} = server_event(:sync, event, handlers, name)
reply(tag, :ok)
loop(parent, name, handlers, debug, hib)
{:DOWN, ref, :process, _pid, reason} = other ->
case handle_down(ref, reason, handlers, name) do
{:ok, handlers} ->
loop(parent, name, handlers, debug, false)
:error ->
{hib, handlers} = server_info(other, handlers, name)
loop(parent, name, handlers, debug, hib)
end
{_from, tag, {:call, handler, query}} ->
{hib, reply, handlers} = server_call(handler, query, handlers, name)
reply(tag, reply)
loop(parent, name, handlers, debug, hib)
{_from, tag, {:add_handler, handler, args}} ->
{hib, reply, handlers} = server_add_handler(handler, args, handlers)
reply(tag, reply)
loop(parent, name, handlers, debug, hib)
{_from, tag, {:add_mon_handler, handler, args, notify}} ->
{hib, reply, handlers} = server_add_mon_handler(handler, args, handlers, notify)
reply(tag, reply)
loop(parent, name, handlers, debug, hib)
{_from, tag, {:add_process_handler, pid, notify}} ->
{hib, reply, handlers} = server_add_process_handler(pid, handlers, notify)
reply(tag, reply)
loop(parent, name, handlers, debug, hib)
{_from, tag, {:delete_handler, handler, args}} ->
{reply, handlers} = server_remove_handler(handler, args, handlers, name)
reply(tag, reply)
loop(parent, name, handlers, debug, false)
{_from, tag, {:swap_handler, handler1, args1, handler2, args2}} ->
{hib, reply, handlers} = server_swap_handler(handler1, args1, handler2, args2, handlers, nil, name)
reply(tag, reply)
loop(parent, name, handlers, debug, hib)
{_from, tag, {:swap_mon_handler, handler1, args1, handler2, args2, mon}} ->
{hib, reply, handlers} = server_swap_handler(handler1, args1, handler2, args2, handlers, mon, name)
reply(tag, reply)
loop(parent, name, handlers, debug, hib)
{_from, tag, :stop} ->
try do
server_terminate(:normal, parent, handlers, name)
catch
:exit, :normal -> :ok
end
reply(tag, :ok)
{_from, tag, :which_handlers} ->
reply(tag, server_which_handlers(handlers))
loop(parent, name, handlers, debug, false)
{_from, tag, :get_modules} ->
reply(tag, server_get_modules(handlers))
loop(parent, name, handlers, debug, false)
other ->
{hib, handlers} = server_info(other, handlers, name)
loop(parent, name, handlers, debug, hib)
end
end
## System callbacks
@doc false
def system_continue(parent, debug, [name, handlers, hib]) do
loop(parent, name, handlers, debug, hib)
end
@doc false
def system_terminate(reason, parent, _debug, [name, handlers, _hib]) do
server_terminate(reason, parent, handlers, name)
end
@doc false
def system_code_change([name, handlers, hib], module , old_vsn, extra) do
handlers =
for handler <- handlers do
if handler(handler, :module) == module do
{:ok, state} = module.code_change(old_vsn, handler(handler, :state), extra)
handler(handler, state: state)
else
handler
end
end
{:ok, [name, handlers, hib]}
end
@doc false
def system_get_state([_name, handlers, _hib]) do
tuples = for handler(module: mod, id: id, state: state) <- handlers do
{mod, id, state}
end
{:ok, tuples}
end
@doc false
def system_replace_state(fun, [name, handlers, hib]) do
{handlers, states} =
:lists.unzip(for handler <- handlers do
handler(module: mod, id: id, state: state) = handler
cur = {mod, id, state}
try do
new = {^mod, ^id, new_state} = fun.(cur)
{handler(handler, state: new_state), new}
catch
_, _ ->
{handler, cur}
end
end)
{:ok, states, [name, handlers, hib]}
end
@doc false
def format_status(opt, status_data) do
[pdict, sys_state, parent, _debug, [name, handlers, _hib]] = status_data
header = :gen.format_status_header('Status for event handler', name)
formatted = for handler <- handlers do
handler(module: module, state: state) = handler
if function_exported?(module, :format_status, 2) do
try do
state = module.format_status(opt, [pdict, state])
handler(handler, state: state)
catch
_, _ -> handler
end
else
handler
end
end
[header: header,
data: [{'Status', sys_state}, {'Parent', parent}],
items: {'Installed handlers', formatted}]
end
## Loop helpers
defp print_event(dev, {:in, msg}, name) do
case msg do
{:notify, event} ->
IO.puts dev, "*DBG* #{inspect name} got event #{inspect event}"
{_, _, {:call, handler, query}} ->
IO.puts dev, "*DBG* #{inspect name} (handler #{inspect handler}) got call #{inspect query}"
_ ->
IO.puts dev, "*DBG* #{inspect name} got #{inspect msg}"
end
end
defp print_event(dev, dbg, name) do
IO.puts dev, "*DBG* #{inspect name}: #{inspect dbg}"
end
defp server_add_handler({module, id}, args, handlers) do
handler = handler(module: module, id: {module, id})
do_add_handler(module, handler, args, handlers, :ok)
end
defp server_add_handler(module, args, handlers) do
handler = handler(module: module, id: module)
do_add_handler(module, handler, args, handlers, :ok)
end
defp server_add_mon_handler({module, id}, args, handlers, notify) do
ref = Process.monitor(notify)
handler = handler(module: module, id: {module, id}, pid: notify, ref: ref)
do_add_handler(module, handler, args, handlers, :ok)
end
defp server_add_mon_handler(module, args, handlers, notify) do
ref = Process.monitor(notify)
handler = handler(module: module, id: module, pid: notify, ref: ref)
do_add_handler(module, handler, args, handlers, :ok)
end
defp server_add_process_handler(pid, handlers, notify) do
ref = Process.monitor(pid)
handler = handler(module: GenEvent.Stream, id: {self(), ref},
pid: notify, ref: ref)
do_add_handler(GenEvent.Stream, handler, {pid, ref}, handlers, {self(), ref})
end
defp server_remove_handler(module, args, handlers, name) do
do_take_handler(module, args, handlers, name, :remove, :normal)
end
defp server_swap_handler(module1, args1, module2, args2, handlers, sup, name) do
{state, handlers} =
do_take_handler(module1, args1, handlers, name, :swapped, {:swapped, module2, sup})
if sup do
server_add_mon_handler(module2, {args2, state}, handlers, sup)
else
server_add_handler(module2, {args2, state}, handlers)
end
end
defp server_info(event, handlers, name) do
handlers = :lists.reverse(handlers)
server_notify(event, :handle_info, handlers, name, handlers, [], false)
end
defp server_event(mode, event, handlers, name) do
{handlers, streams} = server_split_process_handlers(mode, event, handlers, [], [])
{hib, handlers} = server_notify(event, :handle_event, handlers, name, handlers, [], false)
{hib, server_collect_process_handlers(mode, event, streams, handlers, name)}
end
defp server_split_process_handlers(mode, event, [handler|t], handlers, streams) do
case handler(handler, :id) do
{pid, _ref} when is_pid(pid) ->
server_process_notify(mode, event, handler)
server_split_process_handlers(mode, event, t, handlers, [handler|streams])
_ ->
server_split_process_handlers(mode, event, t, [handler|handlers], streams)
end
end
defp server_split_process_handlers(_mode, _event, [], handlers, streams) do
{handlers, streams}
end
defp server_process_notify(mode, event, handler(state: {pid, ref})) do
send pid, {self(), {self(), ref}, {mode_to_tag(mode), event}}
end
defp mode_to_tag(:ack), do: :ack_notify
defp mode_to_tag(:sync), do: :sync_notify
defp mode_to_tag(:async), do: :notify
defp server_notify(event, fun, [handler|t], name, handlers, acc, hib) do
case server_update(handler, fun, event, name, handlers) do
{new_hib, handler} ->
server_notify(event, fun, t, name, handlers, [handler|acc], hib or new_hib)
:error ->
server_notify(event, fun, t, name, handlers, acc, hib)
end
end
defp server_notify(_, _, [], _, _, acc, hib) do
{hib, acc}
end
defp server_update(handler, fun, event, name, _handlers) do
handler(module: module, state: state) = handler
case do_handler(module, fun, [event, state]) do
{:ok, res} ->
case res do
{:ok, state} ->
{false, handler(handler, state: state)}
{:ok, state, :hibernate} ->
{true, handler(handler, state: state)}
:remove_handler ->
do_terminate(handler, :remove_handler, event, name, :normal)
:error
other ->
reason = {:bad_return_value, other}
do_terminate(handler, {:error, reason}, event, name, reason)
:error
end
{:error, reason} ->
do_terminate(handler, {:error, reason}, event, name, reason)
:error
end
end
defp server_collect_process_handlers(:async, event, [handler|t], handlers, name) do
server_collect_process_handlers(:async, event, t, [handler|handlers], name)
end
defp server_collect_process_handlers(mode, event, [handler|t], handlers, name) when mode in [:sync, :ack] do
handler(ref: ref, id: id) = handler
receive do
{^ref, :ok} ->
server_collect_process_handlers(mode, event, t, [handler|handlers], name)
{_from, tag, {:delete_handler, ^id, args}} ->
do_terminate(handler, args, :remove, name, :normal)
reply(tag, :ok)
server_collect_process_handlers(mode, event, t, handlers, name)
{:DOWN, ^ref, _, _, reason} ->
do_terminate(handler, {:stop, reason}, :DOWN, name, :shutdown)
server_collect_process_handlers(mode, event, t, handlers, name)
end
end
defp server_collect_process_handlers(_mode, _event, [], handlers, _name) do
handlers
end
defp server_call(module, query, handlers, name) do
case :lists.keyfind(module, handler(:id) + 1, handlers) do
false ->
{false, {:error, :not_found}, handlers}
handler ->
case server_call_update(handler, query, name, handlers) do
{{hib, handler}, reply} ->
{hib, reply, :lists.keyreplace(module, handler(:id) + 1, handlers, handler)}
{:error, reply} ->
{false, reply, :lists.keydelete(module, handler(:id) + 1, handlers)}
end
end
end
defp server_call_update(handler, query, name, _handlers) do
handler(module: module, state: state) = handler
case do_handler(module, :handle_call, [query, state]) do
{:ok, res} ->
case res do
{:ok, reply, state} ->
{{false, handler(handler, state: state)}, reply}
{:ok, reply, state, :hibernate} ->
{{true, handler(handler, state: state)}, reply}
{:remove_handler, reply} ->
do_terminate(handler, :remove_handler, query, name, :normal)
{:error, reply}
other ->
reason = {:bad_return_value, other}
do_terminate(handler, {:error, reason}, query, name, reason)
{:error, {:error, reason}}
end
{:error, reason} ->
do_terminate(handler, {:error, reason}, query, name, reason)
{:error, {:error, reason}}
end
end
defp server_get_modules(handlers) do
(for handler(module: module) <- handlers, do: module)
|> :ordsets.from_list
|> :ordsets.to_list
end
defp server_which_handlers(handlers) do
for handler(id: id) <- handlers, do: id
end
defp server_terminate(reason, _parent, handlers, name) do
_ =
for handler <- handlers do
do_terminate(handler, :stop, :stop, name, :shutdown)
end
exit(reason)
end
defp reply({from, ref}, msg) do
send from, {ref, msg}
end
defp handle_down(ref, reason, handlers, name) do
case :lists.keyfind(ref, handler(:ref) + 1, handlers) do
false -> :error
handler ->
do_terminate(handler, {:stop, reason}, :DOWN, name, :shutdown)
{:ok, :lists.keydelete(ref, handler(:ref) + 1, handlers)}
end
end
defp do_add_handler(module, handler, arg, handlers, succ) do
case :lists.keyfind(handler(handler, :id), handler(:id) + 1, handlers) do
false ->
case do_handler(module, :init, [arg]) do
{:ok, res} ->
case res do
{:ok, state} ->
{false, succ, [handler(handler, state: state)|handlers]}
{:ok, state, :hibernate} ->
{true, succ, [handler(handler, state: state)|handlers]}
{:error, _} = error ->
{false, error, handlers}
other ->
{false, {:error, {:bad_return_value, other}}, handlers}
end
{:error, _} = error ->
{false, error, handlers}
end
_ ->
{false, {:error, :already_present}, handlers}
end
end
defp do_take_handler(module, args, handlers, name, last_in, reason) do
case :lists.keytake(module, handler(:id) + 1, handlers) do
{:value, handler, handlers} ->
{do_terminate(handler, args, last_in, name, reason), handlers}
false ->
{{:error, :not_found}, handlers}
end
end
defp do_terminate(handler, arg, last_in, name, reason) do
handler(module: module, state: state) = handler
res =
case do_handler(module, :terminate, [arg, state]) do
{:ok, res} -> res
{:error, _} = error -> error
end
report_terminate(handler, reason, state, last_in, name)
res
end
defp do_handler(mod, fun, args) do
try do
apply(mod, fun, args)
catch
:throw, val -> {:ok, val}
:error, val -> {:error, {val, System.stacktrace}}
:exit, val -> {:error, val}
else
res -> {:ok, res}
end
end
defp report_terminate(handler, reason, state, last_in, name) do
report_error(handler, reason, state, last_in, name)
if ref = handler(handler, :ref) do
Process.demonitor(ref, [:flush])
end
if pid = handler(handler, :pid) do
send pid, {:gen_event_EXIT, handler(handler, :id), reason}
end
end
defp report_error(_handler, :normal, _, _, _), do: :ok
defp report_error(_handler, :shutdown, _, _, _), do: :ok
defp report_error(_handler, {:swapped, _, _}, _, _, _), do: :ok
defp report_error(handler, reason, state, last_in, name) do
reason =
case reason do
{:undef, [{m,f,a,_}|_]=mfas} ->
cond do
:code.is_loaded(m) ->
{:"module could not be loaded", mfas}
function_exported?(m, f, length(a)) ->
reason
true ->
{:"function not exported", mfas}
end
_ ->
reason
end
formatted = report_status(handler, state)
:error_logger.error_msg(
'** gen_event handler ~p crashed.~n' ++
'** Was installed in ~p~n' ++
'** Last event was: ~p~n' ++
'** When handler state == ~p~n' ++
'** Reason == ~p~n', [handler(handler, :id), name, last_in, formatted, reason])
end
defp report_status(handler(module: module), state) do
if function_exported?(module, :format_status, 2) do
try do
module.format_status(:terminate, [Process.get(), state])
catch
_, _ -> state
end
else
state
end
end
end
|
lib/elixir/lib/gen_event.ex
| 0.896092
| 0.614423
|
gen_event.ex
|
starcoder
|
defmodule Mix.Tasks.Rustler.New do
use Mix.Task
import Mix.Generator
@shortdoc "Creates a new Rustler project."
@moduledoc """
Generates boilerplate for a new Rustler project.
Usage:
```
mix rustler.new [--module <Module>] [--name <Name>] [--otp-app <OTP App>]
```
"""
@basic [
{:eex, "basic/.cargo/config", ".cargo/config"},
{:eex, "basic/README.md", "README.md"},
{:eex, "basic/Cargo.toml.eex", "Cargo.toml"},
{:eex, "basic/src/lib.rs", "src/lib.rs"},
{:text, "basic/.gitignore", ".gitignore"}
]
root = Path.join(:code.priv_dir(:rustler), "templates/")
for {format, source, _} <- @basic do
unless format == :keep do
@external_resource Path.join(root, source)
defp render(unquote(source)), do: unquote(File.read!(Path.join(root, source)))
end
end
@switches [:module, :name, :otp_app]
def run(argv) do
{opts, _argv, _} = OptionParser.parse(argv, switches: @switches)
module =
case opts[:module] do
nil ->
prompt(
"This is the name of the Elixir module the NIF module will be registered to.\n" <>
"Module name"
)
module ->
module
end
name =
case opts[:name] do
nil ->
prompt_default(
"This is the name used for the generated Rust crate. The default is most likely fine.\n" <>
"Library name",
format_module_name_as_name(module)
)
name ->
name
end
otp_app =
case opts[:otp_app] do
nil -> Mix.Project.config() |> Keyword.get(:app)
otp_app -> otp_app
end
check_module_name_validity!(module)
path = Path.join([File.cwd!(), "native/", name])
new(otp_app, path, module, name, opts)
end
defp new(otp_app, path, module, name, _opts) do
module_elixir = "Elixir." <> module
binding = [
otp_app: otp_app,
project_name: module_elixir,
native_module: module_elixir,
module: module,
library_name: name,
rustler_version: Rustler.rustler_version()
]
copy_from(path, binding, @basic)
Mix.Shell.IO.info([:green, "Ready to go! See #{path}/README.md for further instructions."])
end
defp check_module_name_validity!(name) do
unless name =~ ~r/^[A-Z]\w*(\.[A-Z]\w*)*$/ do
Mix.raise(
"Module name must be a valid Elixir alias (for example: Foo.Bar), got: #{inspect(name)}"
)
end
end
defp format_module_name_as_name(module_name) do
String.replace(String.downcase(module_name), ".", "_")
end
defp copy_from(target_dir, binding, mapping) when is_list(mapping) do
for {format, source, target_path} <- mapping do
target = Path.join(target_dir, target_path)
case format do
:keep ->
File.mkdir_p!(target)
:text ->
create_file(target, render(source))
:eex ->
contents = EEx.eval_string(render(source), binding, file: source)
create_file(target, contents)
end
end
end
defp prompt_default(message, default) do
response = prompt([message, :white, " (", default, ")"])
case response do
"" -> default
_ -> response
end
end
defp prompt(message) do
Mix.Shell.IO.print_app()
resp = IO.gets(IO.ANSI.format([message, :white, " > "]))
?\n = :binary.last(resp)
:binary.part(resp, {0, byte_size(resp) - 1})
end
end
|
rustler_mix/lib/mix/tasks/rustler.new.ex
| 0.712732
| 0.543106
|
rustler.new.ex
|
starcoder
|
defmodule ZenMonitor.Proxy.Batcher do
@moduledoc """
`ZenMonitor.Proxy.Batcher` is responsible for collecting death_certificates from
`ZenMonitor.Proxy` destined for the Batcher's subscriber (normally the subscriber is a
`ZenMonitor.Local.Connector`)
Periodically it will sweep and send all of the death_certificates it has collected since the
last sweep to the subscriber for processing.
"""
use GenServer
use Instruments.CustomFunctions, prefix: "zen_monitor.proxy.batcher"
alias ZenMonitor.Proxy.Tables
@chunk_size 5000
@sweep_interval 100
defmodule State do
@moduledoc """
Maintains the internal state for the Batcher
- `subscriber` is the process that death_certificates should be delivered to
- `batch` is the queue of death_certificates pending until the next sweep.
- `length` is the current length of the batch queue (calculating queue length is an O(n)
operation, is is simple to track it as elements are added / removed)
"""
@type t :: %__MODULE__{
subscriber: pid,
batch: :queue.queue(),
length: integer
}
defstruct [
:subscriber,
batch: :queue.new(),
length: 0
]
end
## Client
def start_link(subscriber) do
GenServer.start_link(__MODULE__, subscriber)
end
@doc """
Get a batcher for a given subscriber
"""
@spec get(subscriber :: pid) :: pid
def get(subscriber) do
case GenRegistry.lookup(__MODULE__, subscriber) do
{:ok, batcher} ->
batcher
{:error, :not_found} ->
{:ok, batcher} = GenRegistry.lookup_or_start(__MODULE__, subscriber, [subscriber])
batcher
end
end
@doc """
Enqueues a new death certificate into the batcher
"""
@spec enqueue(batcher :: pid, pid, reason :: any) :: :ok
def enqueue(batcher, pid, reason) do
GenServer.cast(batcher, {:enqueue, pid, reason})
end
@doc """
Gets the sweep interval from the Application Environment
The sweep interval is the number of milliseconds to wait between sweeps, see
ZenMonitor.Proxy.Batcher's @sweep_interval for the default value
This can be controlled at boot and runtime with the {:zen_monitor, :batcher_sweep_interval}
setting, see `ZenMonitor.Proxy.Batcher.sweep_interval/1` for runtime convenience functionality.
"""
@spec sweep_interval() :: integer
def sweep_interval do
Application.get_env(:zen_monitor, :batcher_sweep_interval, @sweep_interval)
end
@doc """
Puts the sweep interval into the Application Environment
This is a simple convenience function for overwrite the {:zen_monitor, :batcher_sweep_interval}
setting at runtime
"""
@spec sweep_interval(value :: integer) :: :ok
def sweep_interval(value) do
Application.put_env(:zen_monitor, :batcher_sweep_interval, value)
end
@doc """
Gets the chunk size from the Application Environment
The chunk size is the maximum number of death certificates that will be sent during each sweep,
see ZenMonitor.Proxy.Batcher's @chunk_size for the default value
This can be controlled at boot and runtime with the {:zen_monitor, :batcher_chunk_size}
setting, see ZenMonitor.Proxy.Batcher.chunk_size/1 for runtime convenience functionality.
"""
@spec chunk_size() :: integer
def chunk_size do
Application.get_env(:zen_monitor, :batcher_chunk_size, @chunk_size)
end
@doc """
Puts the chunk size into the Application Environment
This is a simple convenience function for overwrite the {:zen_monitor, :batcher_chunk_size}
setting at runtime.
"""
@spec chunk_size(value :: integer) :: :ok
def chunk_size(value) do
Application.put_env(:zen_monitor, :batcher_chunk_size, value)
end
## Server
def init(subscriber) do
Process.monitor(subscriber)
schedule_sweep()
{:ok, %State{subscriber: subscriber}}
end
@doc """
Handle enqueuing a new death_certificate
Simply puts it in the batch queue.
"""
def handle_cast({:enqueue, pid, reason}, %State{batch: batch, length: length} = state) do
increment("enqueue")
{:noreply, %State{state | batch: :queue.in({pid, reason}, batch), length: length + 1}}
end
@doc """
Handle the subscriber crashing
When the subscriber crashes there is no point in continuing to run, so the Batcher stops.
"""
def handle_info(
{:DOWN, _, :process, subscriber, reason},
%State{subscriber: subscriber} = state
) do
# The subscriber process has crashed, clean up the subscribers table
:ets.match_delete(Tables.subscribers(), {{:_, subscriber}})
{:stop, {:shutdown, {:subscriber_down, reason}}, state}
end
@doc """
Handle sweep
Every sweep the batcher will send the death_certificates batched up since the last sweep to the
subscriber. After that it will schedule another sweep.
"""
def handle_info(:sweep, %State{} = state) do
new_state = do_sweep(state)
schedule_sweep()
{:noreply, new_state}
end
## Private
@spec do_sweep(state :: State.t()) :: State.t()
defp do_sweep(%State{length: 0} = state), do: state
defp do_sweep(%State{subscriber: subscriber, batch: batch, length: length} = state) do
{summary, overflow, new_length} = chunk(batch, length)
increment("sweep", length - new_length)
Process.send(subscriber, {:dead, node(), :queue.to_list(summary)}, [:noconnect])
%State{state | batch: overflow, length: new_length}
end
@spec chunk(batch :: :queue.queue(), length :: integer) ::
{:queue.queue(), :queue.queue(), integer}
defp chunk(batch, length) do
size = chunk_size()
if length <= size do
{batch, :queue.new(), 0}
else
{summary, overflow} = :queue.split(size, batch)
{summary, overflow, length - size}
end
end
@spec schedule_sweep() :: reference
defp schedule_sweep do
Process.send_after(self(), :sweep, sweep_interval())
end
end
|
lib/zen_monitor/proxy/batcher.ex
| 0.865878
| 0.44071
|
batcher.ex
|
starcoder
|
defmodule NewRelic.Sampler.Beam do
use GenServer
@kb 1024
@mb 1024 * 1024
# Takes samples of the state of the BEAM at an interval
@moduledoc false
def start_link(_) do
GenServer.start_link(__MODULE__, :ok, name: __MODULE__)
end
def init(:ok) do
# throw away first value
:cpu_sup.util()
NewRelic.sample_process()
if NewRelic.Config.enabled?(),
do: Process.send_after(self(), :report, NewRelic.Sampler.Reporter.random_sample_offset())
{:ok, %{previous: take_sample()}}
end
def handle_info(:report, state) do
current_sample = record_sample(state)
Process.send_after(self(), :report, NewRelic.Sampler.Reporter.sample_cycle())
{:noreply, %{state | previous: current_sample}}
end
def handle_call(:report, _from, state) do
current_sample = record_sample(state)
{:reply, :ok, %{state | previous: current_sample}}
end
def record_sample(state) do
{current_sample, stats} = collect(state.previous)
NewRelic.report_sample(:BeamStat, stats)
NewRelic.report_metric(:memory, mb: stats[:memory_total_mb])
NewRelic.report_metric(:cpu, utilization: stats[:cpu_utilization])
current_sample
end
defp collect(previous) do
current_sample = take_sample()
stats = Map.merge(current_sample, delta(previous, current_sample))
{current_sample, stats}
end
defp take_sample do
{gcs, _, _} = :erlang.statistics(:garbage_collection)
{reductions, _} = :erlang.statistics(:reductions)
{{:input, bytes_in}, {:output, bytes_out}} = :erlang.statistics(:io)
memory = :erlang.memory()
%{
garbage_collections: gcs,
input_kb: bytes_in / @kb,
output_kb: bytes_out / @kb,
reductions: reductions,
run_queue: :erlang.statistics(:total_run_queue_lengths),
memory_total_mb: memory[:total] / @mb,
memory_procs_mb: memory[:processes_used] / @mb,
memory_ets_mb: memory[:ets] / @mb,
memory_atom_mb: memory[:atom_used] / @mb,
memory_binary_mb: memory[:binary] / @mb,
memory_code_mb: memory[:code] / @mb,
atom_count: :erlang.system_info(:atom_count),
ets_count: safe_check(:erlang, :system_info, [:ets_count]),
port_count: :erlang.system_info(:port_count),
process_count: :erlang.system_info(:process_count),
atom_limit: :erlang.system_info(:atom_limit),
ets_limit: :erlang.system_info(:ets_limit),
port_limit: :erlang.system_info(:port_limit),
process_limit: :erlang.system_info(:process_limit),
schedulers: :erlang.system_info(:schedulers),
scheduler_utilization: safe_check(:scheduler, :sample, []),
cpu_count: :erlang.system_info(:logical_processors),
cpu_utilization: :cpu_sup.util()
}
end
def safe_check(m, f, a) do
# Some checks only available in OTP 21+
apply(m, f, a)
rescue
_ -> nil
end
defp delta(previous, current) do
%{
garbage_collections: current.garbage_collections - previous.garbage_collections,
input_kb: current.input_kb - previous.input_kb,
output_kb: current.output_kb - previous.output_kb,
reductions: current.reductions - previous.reductions,
scheduler_utilization:
delta(:util, previous.scheduler_utilization, current.scheduler_utilization)
}
end
defp delta(:util, nil, nil), do: nil
defp delta(:util, previous, current) do
[{:total, scheduler_utilization, _} | _] = :scheduler.utilization(previous, current)
scheduler_utilization
end
end
|
lib/new_relic/sampler/beam.ex
| 0.771155
| 0.404302
|
beam.ex
|
starcoder
|
defmodule Membrane.MP4.MovieBox do
@moduledoc """
A module providing a function assembling an MPEG-4 movie box.
The movie box (`moov`) is a top-level box that contains information about
a presentation as a whole. It consists of:
* exactly one movie header (`mvhd` atom)
The movie header contains media-independent data, such as the
number of tracks, volume, duration or timescale (presentation-wide).
* one or more track box (`trak` atom)
* zero or one movie extends box (`mvex` atom)
For more information about movie box and its contents, refer to documentation of
`#{inspect(__MODULE__)}` submodules or to [ISO/IEC 14496-12](https://www.iso.org/standard/74428.html).
"""
alias __MODULE__.TrackBox
alias Membrane.MP4.{Container, Track}
@movie_timescale 1000
@spec assemble([Track.t()], Container.t()) :: Container.t()
def assemble(tracks, extensions \\ []) do
tracks = Enum.map(tracks, &Track.finalize(&1, @movie_timescale))
header = movie_header(tracks)
track_boxes = Enum.flat_map(tracks, &TrackBox.assemble/1)
[moov: %{children: header ++ track_boxes ++ extensions, fields: %{}}]
end
defp movie_header(tracks) do
longest_track = Enum.max_by(tracks, & &1.movie_duration)
[
mvhd: %{
children: [],
fields: %{
creation_time: 0,
duration: longest_track.movie_duration,
flags: 0,
matrix_value_A: {1, 0},
matrix_value_B: {0, 0},
matrix_value_C: {0, 0},
matrix_value_D: {1, 0},
matrix_value_U: {0, 0},
matrix_value_V: {0, 0},
matrix_value_W: {1, 0},
matrix_value_X: {0, 0},
matrix_value_Y: {0, 0},
modification_time: 0,
next_track_id: length(tracks) + 1,
quicktime_current_time: 0,
quicktime_poster_time: 0,
quicktime_preview_duration: 0,
quicktime_preview_time: 0,
quicktime_selection_duration: 0,
quicktime_selection_time: 0,
rate: {1, 0},
timescale: @movie_timescale,
version: 0,
volume: {1, 0}
}
}
]
end
end
|
lib/membrane_mp4/movie_box.ex
| 0.869424
| 0.773687
|
movie_box.ex
|
starcoder
|
defmodule Mmo.Player do
alias Mmo.Player
alias Mmo.Player.Controller
alias Mmo.World
@derive Jason.Encoder
defstruct id: "",
name: "",
x: 64,
y: 64,
attack: 10,
max_health: 100,
health: 100,
exp: 0,
required_exp: 25,
level: 0,
type: ""
def hit(damage, %Player{health: health} = defender) do
%{defender | health: health - damage}
end
@spec move(%Player{} | nil, :up | :down | :left | :right | map) ::
%Player{}
def move(%Player{x: current_x, y: current_y} = player, %{x: new_x, y: new_y}) do
x = update_cord(current_x, new_x)
y = update_cord(current_y, new_y)
%{player | x: x, y: y}
end
def move(%Player{} = player, :error) do
player
end
def move(player, data) do
move(player, Controller.convert(data))
end
def increase_exp(
%Player{exp: exp, required_exp: required_exp} = player,
increase_amount
) do
new_exp = exp + increase_amount
if new_exp >= required_exp do
left_over_exp = new_exp - required_exp
level(player, left_over_exp)
else
%{player | exp: new_exp}
end
end
def level(
%Player{
attack: attack,
required_exp: required_exp,
level: level,
max_health: max_health
} = player,
exp
) do
new_health = max_health + 50
%{
player
| level: level + 1,
attack: attack + 1,
exp: exp,
required_exp: required_exp * 2,
health: new_health,
max_health: new_health
}
end
def new(%World{player_ids: player_ids} = world) do
{x, y} = World.get_random_cords(world)
%Player{id: generate_uuid(player_ids), x: x, y: y}
end
def new(%{id: player_id, x: x, y: y}) do
%Player{id: player_id, x: x, y: y}
end
def new(%Player{id: player_id}, %World{} = world) do
{x, y} = World.get_random_cords(world)
%Player{id: player_id, x: x, y: y}
end
defp generate_uuid(player_ids) do
uuid = Ecto.UUID.generate()
generate_uuid(Enum.member?(player_ids, uuid), uuid, player_ids)
end
defp generate_uuid(true, _uuid, player_ids) do
generate_uuid(player_ids)
end
defp generate_uuid(false, uuid, _player_ids) do
uuid
end
defp update_cord(current, new) when current > new do
current - 1
end
defp update_cord(current, new) when current < new do
current + 1
end
defp update_cord(current, _new) do
current
end
def damage(%Player{health: health} = player, damage_amount) do
%{player | health: health - damage_amount}
end
def damagePercent(%Player{max_health: max_health} = player, percent) do
damage(player, max_health * percent)
end
def respawn(%Player{health: health} = player, %World{} = _world) when health > 0 do
player
end
def respawn(%Player{} = player, %World{} = world) do
Player.new(player, world)
end
end
|
lib/mmo/player/player.ex
| 0.594669
| 0.411879
|
player.ex
|
starcoder
|
defmodule Niex.State do
@moduledoc """
The internal state representing a running `Niex.Notebook`.
"""
defstruct(
notebook: %Niex.Notebook{},
selected_cell: nil,
worksheet: 0,
env: [],
bindings: [],
path: nil,
dirty: false
)
def new() do
%Niex.State{
dirty: false,
notebook: %Niex.Notebook{
metadata: %{name: "Untitled Notebook"},
worksheets: [
%{
cells: [
%{
prompt_number: 0,
id: UUID.uuid4(),
cell_type: "code",
content: ["IO.inspect(\"hello, world\")"],
outputs: [%{text: "", type: "code"}]
}
]
}
]
}
}
end
def from_file(path) do
%{from_string(File.read!(path)) | path: path}
end
def from_string(str) do
%Niex.State{notebook: Poison.decode!(str, keys: :atoms, as: %Niex.Notebook{})}
end
def save(state, path) do
save(%{state | path: path})
end
def save(state = %Niex.State{path: path}) when not is_nil(path) do
:ok = File.write(path, Poison.encode!(state.notebook))
%{state | dirty: false}
end
def save(_) do
end
def update_metadata(state, metadata) do
%{state | dirty: true, notebook: %{state.notebook | metadata: metadata}}
end
def add_cell(state, idx, cell_type) do
%{
state
| notebook: Niex.Notebook.add_cell(state.notebook, state.worksheet, idx, cell_type),
dirty: true
}
end
def remove_cell(state, id) do
%{
state
| notebook: Niex.Notebook.remove_cell(state.notebook, id),
dirty: true
}
end
def update_cell(state, id, update) do
%{
state
| notebook:
Niex.Notebook.update_cell(
state.notebook,
id,
update
),
dirty: true
}
end
def update_bindings(state, bindings) do
%{state | bindings: bindings}
end
def update_env(state, env) do
%{state | env: env}
end
def execute_cell(state, id, output_pid) do
%{
state
| notebook:
Niex.Notebook.execute_cell(state.notebook, id, output_pid, state.bindings, state.env),
dirty: true
}
end
def set_selected_cell(state, n) do
%{state | selected_cell: n}
end
def active_worksheet(state) do
Enum.at(state.notebook.worksheets, state.worksheet)
end
end
|
niex/lib/niex/state.ex
| 0.628407
| 0.558417
|
state.ex
|
starcoder
|
defmodule Scidata.FashionMNIST do
@moduledoc """
Module for downloading the [FashionMNIST dataset](https://github.com/zalandoresearch/fashion-mnist#readme).
"""
require Scidata.Utils
alias Scidata.Utils
@base_url "http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/"
@train_image_file "train-images-idx3-ubyte.gz"
@train_label_file "train-labels-idx1-ubyte.gz"
@test_image_file "t10k-images-idx3-ubyte.gz"
@test_label_file "t10k-labels-idx1-ubyte.gz"
@doc """
Downloads the FashionMNIST training dataset or fetches it locally.
## Options
* `:transform_images` - A function that transforms images, defaults to
`& &1`.
It accepts a tuple like `{binary_data, tensor_type, data_shape}` which
can be used for converting the `binary_data` to a tensor with a function
like:
fn {labels_binary, type, _shape} ->
labels_binary
|> Nx.from_binary(type)
|> Nx.new_axis(-1)
|> Nx.equal(Nx.tensor(Enum.to_list(0..9)))
|> Nx.to_batched_list(32)
end
* `:transform_labels` - similar to `:transform_images` but applied to
dataset labels
## Examples
iex> Scidata.FashionMNIST.download()
{{<<105, 109, 97, 103, 101, 115, 45, 105, 100, 120, 51, 45, 117, 98, 121, 116,
101, 0, 236, 253, 7, 88, 84, 201, 215, 232, 11, 23, 152, 38, 57, 51, 166,
81, 71, 157, 209, 49, 135, 49, 141, 99, 206, 142, 57, 141, 89, 68, ...>>,
{:u, 8}, {3739854681, 226418, 1634299437}},
{<<0, 3, 116, 114, 97, 105, 110, 45, 108, 97, 98, 101, 108, 115, 45, 105, 100,
120, 49, 45, 117, 98, 121, 116, 101, 0, 53, 221, 9, 130, 36, 73, 110, 100,
81, 219, 220, 150, 91, 214, 249, 251, 20, 141, 247, 53, 114, ...>>, {:u, 8},
{3739854681}}}
"""
def download(opts \\ []) do
transform_images = opts[:transform_images] || (& &1)
transform_labels = opts[:transform_labels] || (& &1)
{download_images(@train_image_file, transform_images),
download_labels(@train_label_file, transform_labels)}
end
@doc """
Downloads the FashionMNIST test dataset or fetches it locally.
Accepts the same options as `download/1`.
"""
def download_test(opts \\ []) do
transform_images = opts[:transform_images] || (& &1)
transform_labels = opts[:transform_labels] || (& &1)
{download_images(@test_image_file, transform_images),
download_labels(@test_label_file, transform_labels)}
end
defp download_images(image_file, transform) do
data = Utils.get!(@base_url <> image_file).body
<<_::32, n_images::32, n_rows::32, n_cols::32, images::binary>> = data
transform.({images, {:u, 8}, {n_images, n_rows, n_cols}})
end
defp download_labels(label_file, transform) do
data = Utils.get!(@base_url <> label_file).body
<<_::32, n_labels::32, labels::binary>> = data
transform.({labels, {:u, 8}, {n_labels}})
end
end
|
lib/scidata/fashionmnist.ex
| 0.824144
| 0.733667
|
fashionmnist.ex
|
starcoder
|
defmodule Nostrum.Cache.UserCache do
@default_cache_implementation Nostrum.Cache.UserCache.ETS
@moduledoc """
Cache behaviour & dispatcher for users.
You can call the functions provided by this module independent of which cache
is configured, and it will dispatch to the configured cache implementation.
By default, #{@default_cache_implementation} will be used for caching users.
You can override this in the `:caches` option of the `:nostrum` application
by setting the `:users` field to a different module implementing the behaviour
defined by this module.
See the documentation for the `Nostrum.Cache.GuildCache` module for more details.
"""
alias Nostrum.Struct.User
alias Nostrum.Util
import Nostrum.Snowflake, only: [is_snowflake: 1]
@configured_cache :nostrum
|> Application.compile_env([:caches, :users], @default_cache_implementation)
## Supervisor callbacks
@doc false
defdelegate init(init_arg), to: @configured_cache
@doc false
defdelegate start_link(init_arg), to: @configured_cache
@doc false
defdelegate child_spec(opts), to: @configured_cache
## Behaviour specification
@doc ~s"""
Retrieves a user from the cache by id.
If successful, returns `{:ok, user}`. Otherwise, returns `{:error, reason}`.
## Example
```elixir
case Nostrum.Cache.UserCache.get(1111222233334444) do
{:ok, user} ->
"We found " <> user.username
{:error, _reason} ->
"No es bueno"
end
```
"""
@callback get(id :: User.id()) :: {:ok, User.t()} | {:error, atom}
@doc ~S"""
Add a new user to the cache based on the Discord Gateway payload.
Returns a `t:Nostrum.Struct.User.t/0` struct representing the created user.
"""
@callback create(payload :: map()) :: User.t()
@doc ~S"""
Bulk add multiple users to the cache at once.
Returns `:ok`.
"""
@callback bulk_create(user_payloads :: Enum.t()) :: :ok
@doc ~S"""
Update a user in the cache based on payload sent via the Gateway.
Returns `:noop` if the user has not been updated in the cache, or
`{old_user, new_user}` is the user has been written to the cache.
"""
@callback update(payload :: map()) :: :noop | {User.t(), User.t()}
@doc ~S"""
Delete a user by ID.
Returns the deleted user if present in the cache, or
`:noop` if the user was not cached.
"""
@callback delete(snowflake :: User.id()) :: :noop | User.t()
## Dispatching
defdelegate get(id), to: @configured_cache
@doc false
defdelegate create(payload), to: @configured_cache
@doc false
defdelegate bulk_create(users), to: @configured_cache
@doc false
defdelegate update(payload), to: @configured_cache
@doc false
defdelegate delete(snowflake), to: @configured_cache
@doc """
Same as `c:get/1`, but raises `Nostrum.Error.CacheError` in case of a failure.
"""
@spec get!(User.id()) :: no_return | User.t()
def get!(id) when is_snowflake(id), do: id |> get |> Util.bangify_find(id, __MODULE__)
end
|
lib/nostrum/cache/user_cache.ex
| 0.866387
| 0.563408
|
user_cache.ex
|
starcoder
|
defmodule Neoscan.Explanations do
@moduledoc false
@explanations %{
# blocks
"block_hash" =>
"Hash of all of the info stored in a block, including, but not limited to, the index, time, merkle root, etc...",
"block_index" =>
"Position of this block in the overall chain of blocks forming the 'blockchain'",
"block_transactions" => "List of transactions included in this block",
"block_time" => "Time the block was formed",
"block_version" => "The current version of the block system used by NEO",
"block_merkle_root" =>
"Each transaction in a block is hashed. all of the individual transaction hashes together are then hashed to form a new hash; this combined hash is known as the merkle root.",
"block_validator" =>
"Public address of the consensus node that first announced the transaction",
"block_size" => "Total size of the block in kilobytes",
"block_previous_block" => "Block hash of the previous block",
"block_next_block" => "Block hash of the next block",
"block_confirmations" => "Number of blocks that have been created after this block",
# scripts
"bytecode_invocation_script" =>
"Hex string of the Elliptic Curve Digital Signature Algorithm signature generated from transaction data and a user's private key. The verification script uses this to check against the public key",
"bytecode_verification_script" =>
"Hex string for checking the public key against the Elliptic Curve Digital Signature Algorithm signature",
"opcode_invocation_script" => "Human readable format of the bytecode invocation script",
"opcode_verification_script" => "Human readable format of the bytecode verification script",
# transactions
"transaction_type" =>
"Types of transactions can be claim, for claiming gas; contract, for sending NEO and GAS; invocation, for calling a smart contract; and miner, for validation of the block by a consensus node",
"transaction_hash" => "Hash of all the information in the transaction",
"transaction_time" => "Time the transaction was included in the blockchain",
"transaction_size" => "Size of the transaction in bytes",
"transaction_confirmations" =>
"Number of blocks that have been created after the block containing this transaction",
"transaction_network_fees" =>
"Gas charged by the consensus nodes for confiming a transaction and including it in the blockchain.",
"transaction_system_fees" =>
"Cost in gas charged under NEO system fees for confiming a transaction and including it in the blockchain. The system fees are distributed to NEO holders",
"transaction_spent" =>
"After the transaction has been completed, coins that have been sent to another address",
"transaction_unspent" =>
"After the transaction has been completed, coins that remain in the same address",
# addresses
"address_hash" => "hash of a public address",
"address_balance" =>
"NEO and GAS held by an address. Note: other tokens are stored in smart contracts",
"address_unclaimed" =>
"Gas generated by NEO needs to be claimed by an address owner before it can be spent. Gas is generated by NEO holders through system fees or blocks generated over approximately the first 22 years of the NEO blockchain",
"address_created" => "time the address was created",
"address_transactions" => "history of the transactions for the address",
"address_first_transaction" => "first transaction ever made by the address"
}
def get(topic) do
case Map.fetch(@explanations, topic) do
{:ok, explanation} -> explanation
:error -> "failed to find this explanation"
end
end
end
|
apps/neoscan/lib/neoscan/helpers/explanations.ex
| 0.613005
| 0.529689
|
explanations.ex
|
starcoder
|
defmodule Jsonpatch do
@moduledoc """
A implementation of [RFC 6902](https://tools.ietf.org/html/rfc6902) in pure Elixir.
The patch can be a single change or a list of things that shall be changed. Therefore
a list or a single JSON patch can be provided. Every patch belongs to a certain operation
which influences the usage.
According to [RFC 6901](https://tools.ietf.org/html/rfc6901) escaping of `/` and `~` is done
by using `~1` for `/` and `~0` for `~`.
"""
alias Jsonpatch.Operation
alias Jsonpatch.Operation.Add
alias Jsonpatch.Operation.Copy
alias Jsonpatch.Operation.Move
alias Jsonpatch.Operation.Remove
alias Jsonpatch.Operation.Replace
alias Jsonpatch.Operation.Test
@typedoc """
A valid Jsonpatch operation by RFC 6902
"""
@type t :: Add.t() | Remove.t() | Replace.t() | Copy.t() | Move.t() | Test.t()
@typedoc """
Describe an error that occured while patching.
"""
@type error :: {:error, :invalid_path | :invalid_index | :test_failed, bitstring()}
@doc """
Apply a Jsonpatch or a list of Jsonpatches to a map or struct. The whole patch will not be applied
when any path is invalid or any other error occured.
## Examples
iex> patch = [
...> %Jsonpatch.Operation.Add{path: "/age", value: 33},
...> %Jsonpatch.Operation.Replace{path: "/hobbies/0", value: "Elixir!"},
...> %Jsonpatch.Operation.Replace{path: "/married", value: true},
...> %Jsonpatch.Operation.Remove{path: "/hobbies/1"},
...> %Jsonpatch.Operation.Remove{path: "/hobbies/2"},
...> %Jsonpatch.Operation.Copy{from: "/name", path: "/surname"},
...> %Jsonpatch.Operation.Move{from: "/home", path: "/work"},
...> %Jsonpatch.Operation.Test{path: "/name", value: "Bob"}
...> ]
iex> target = %{"name" => "Bob", "married" => false, "hobbies" => ["Sport", "Elixir", "Football"], "home" => "Berlin"}
iex> Jsonpatch.apply_patch(patch, target)
{:ok, %{"name" => "Bob", "married" => true, "hobbies" => ["Elixir!"], "age" => 33, "surname" => "Bob", "work" => "Berlin"}}
iex> # Patch will not be applied if test fails. The target will not be changed.
iex> patch = [
...> %Jsonpatch.Operation.Add{path: "/age", value: 33},
...> %Jsonpatch.Operation.Test{path: "/name", value: "Alice"}
...> ]
iex> target = %{"name" => "Bob", "married" => false, "hobbies" => ["Sport", "Elixir", "Football"], "home" => "Berlin"}
iex> Jsonpatch.apply_patch(patch, target)
{:error, :test_failed, "Expected value 'Alice' at '/name'"}
"""
@spec apply_patch(Jsonpatch.t() | list(Jsonpatch.t()), map()) ::
{:ok, map()} | Jsonpatch.error()
def apply_patch(json_patch, target)
def apply_patch(json_patch, %{} = target) when is_list(json_patch) do
# Operatons MUST be sorted before applying because a remove operation for path "/foo/2" must be done
# before the remove operation for path "/foo/1". Without order it could be possible that the wrong
# value will be removed or only one value instead of two.
result =
json_patch
|> Enum.map(&create_sort_value/1)
|> Enum.sort(fn {sort_value_1, _}, {sort_value_2, _} -> sort_value_1 >= sort_value_2 end)
|> Enum.map(fn {_, patch} -> patch end)
|> Enum.reduce(target, &Jsonpatch.Operation.apply_op/2)
case result do
{:error, _, _} = error -> error
ok_result -> {:ok, ok_result}
end
end
def apply_patch(json_patch, %{} = target) do
result = Operation.apply_op(json_patch, target)
case result do
{:error, _, _} = error -> error
ok_result -> {:ok, ok_result}
end
end
@doc """
Apply a Jsonpatch or a list of Jsonpatches to a map or struct. In case of an error
it will raise an exception.
(See Jsonpatch.apply_patch/2 for more details)
"""
@spec apply_patch!(Jsonpatch.t() | list(Jsonpatch.t()), map()) :: map()
def apply_patch!(json_patch, target)
def apply_patch!(json_patch, target) do
case apply_patch(json_patch, target) do
{:ok, patched} -> patched
{:error, _, _} = error -> raise JsonpatchException, error
end
end
@doc """
Creates a patch from the difference of a source map to a destination map or list.
## Examples
iex> source = %{"name" => "Bob", "married" => false, "hobbies" => ["Elixir", "Sport", "Football"]}
iex> destination = %{"name" => "Bob", "married" => true, "hobbies" => ["Elixir!"], "age" => 33}
iex> Jsonpatch.diff(source, destination)
[
%Jsonpatch.Operation.Replace{path: "/married", value: true},
%Jsonpatch.Operation.Remove{path: "/hobbies/2"},
%Jsonpatch.Operation.Remove{path: "/hobbies/1"},
%Jsonpatch.Operation.Replace{path: "/hobbies/0", value: "Elixir!"},
%Jsonpatch.Operation.Add{path: "/age", value: 33}
]
"""
@spec diff(maybe_improper_list | map, maybe_improper_list | map) :: list(Jsonpatch.t())
def diff(source, destination)
def diff(%{} = source, %{} = destination) do
Map.to_list(destination)
|> do_diff(source, "")
end
def diff(source, destination) when is_list(source) and is_list(destination) do
Enum.with_index(destination)
|> Enum.map(fn {v, k} -> {k, v} end)
|> do_diff(source, "")
end
def diff(_, _) do
[]
end
# ===== ===== PRIVATE ===== =====
# Helper for better readability
defguardp are_unequal_maps(val1, val2)
when val1 != val2 and is_map(val2) and is_map(val1)
# Helper for better readability
defguardp are_unequal_lists(val1, val2)
when val1 != val2 and is_list(val2) and is_list(val1)
# Diff reduce loop
defp do_diff(destination, source, ancestor_path, acc \\ [], checked_keys \\ [])
defp do_diff([], source, ancestor_path, acc, checked_keys) do
# The complete desination was check. Every key that is not in the list of
# checked keys, must be removed.
acc =
source
|> flat()
|> Stream.map(fn {k, _} -> k end)
|> Stream.filter(fn k -> k not in checked_keys end)
|> Stream.map(fn k -> %Remove{path: "#{ancestor_path}/#{k}"} end)
|> Enum.reduce(acc, fn r, acc -> [r | acc] end)
acc
end
defp do_diff([{key, val} | tail], source, ancestor_path, acc, checked_keys)
when is_list(source) or is_map(source) do
current_path = "#{ancestor_path}/#{escape(key)}"
acc =
case get(source, key) do
# Key is not present in source
nil ->
[%Add{path: current_path, value: val} | acc]
# Source has a different value but both (destination and source) value are lists or a maps
source_val
when are_unequal_lists(source_val, val) or are_unequal_maps(source_val, val) ->
# Enter next level - set check_keys to empty list because it is a different level
do_diff(flat(val), source_val, current_path, acc, [])
# Scalar source val that is not equal
source_val when source_val != val ->
[%Replace{path: current_path, value: val} | acc]
_ ->
acc
end
# Diff next value of same level
do_diff(tail, source, ancestor_path, acc, [escape(key) | checked_keys])
end
# Transforms a map into a tuple list and a list also into a tuple list with indizes
defp flat(val) when is_list(val) do
Stream.with_index(val) |> Enum.map(fn {v, k} -> {k, v} end)
end
defp flat(val) when is_map(val) do
Map.to_list(val)
end
# Unified access to lists or maps
defp get(source, key) when is_list(source) do
Enum.at(source, key)
end
defp get(source, key) when is_map(source) do
Map.get(source, key)
end
# Escape `/` to `~1 and `~` to `~`.
defp escape(subpath) when is_bitstring(subpath) do
subpath
|> String.replace("~", "~0")
|> String.replace("/", "~1")
end
defp escape(subpath) do
subpath
end
# Create once a easy sortable value for a operation
defp create_sort_value(%{path: path} = operation) do
fragments = String.split(path, "/")
x = Jsonpatch.PathUtil.operation_sort_value?(operation) * 1_000_000 * 100_000_000
y = length(fragments) * 100_000_000
z =
case List.last(fragments) |> Integer.parse() do
:error -> 0
{int, _} -> int
end
# Structure of recorde sort value
# x = Kind of PathUtil
# y = Amount of fragments (how deep goes the path?)
# z = At which position in a list?
# xxxxyyyyyyzzzzzzzz
{x + y + z, operation}
end
end
|
lib/jsonpatch.ex
| 0.89783
| 0.520131
|
jsonpatch.ex
|
starcoder
|
defmodule Binance.FuturesCoin do
alias Binance.Futures.Coin.Rest.HTTPClient
@type error ::
{:binance_error, %{code: integer(), message: String.t()}}
| {:http_error, any()}
| {:poison_decode_error, any()}
| {:config_missing, String.t()}
# Server
@doc """
Pings Binance API. Returns `{:ok, %{}}` if successful, `{:error, reason}` otherwise
"""
@spec ping() :: {:ok, %{}} | {:error, error()}
def ping() do
HTTPClient.get_binance("/dapi/v1/ping")
end
@doc """
Get binance server time in unix epoch.
## Example
```
{:ok, 1515390701097}
```
"""
@spec get_server_time() :: {:ok, integer()} | {:error, error()}
def get_server_time() do
case HTTPClient.get_binance("/dapi/v1/time") do
{:ok, %{"serverTime" => time}} -> {:ok, time}
err -> err
end
end
@spec get_index_price(String.t()) :: {:ok, map()} | {:error, error()}
def get_index_price(instrument) do
case HTTPClient.get_binance("/dapi/v1/premiumIndex?symbol=#{instrument}") do
{:ok, data} -> {:ok, data}
err -> err
end
end
@spec get_best_ticker(String.t()) :: {:ok, map()} | {:error, error()}
def get_best_ticker(instrument) do
case HTTPClient.get_binance("/dapi/v1/ticker/bookTicker?symbol=#{instrument}") do
{:ok, data} -> {:ok, data}
err -> err
end
end
@spec get_exchange_info() :: {:ok, %Binance.ExchangeInfo{}} | {:error, error()}
def get_exchange_info() do
case HTTPClient.get_binance("/dapi/v1/exchangeInfo") do
{:ok, data} -> {:ok, Binance.ExchangeInfo.new(data)}
err -> err
end
end
@spec create_listen_key(map()) :: {:ok, map()} | {:error, error()}
def create_listen_key(params, config \\ nil) do
arguments =
%{
timestamp: :os.system_time(:millisecond)
}
|> Map.merge(
unless(is_nil(params[:timestamp]), do: %{timestamp: params[:timestamp]}, else: %{})
)
|> Map.merge(
unless(is_nil(params[:recv_window]), do: %{recvWindow: params[:recv_window]}, else: %{})
)
case HTTPClient.post_binance("/dapi/v1/listenKey", arguments, config) do
{:ok, %{"code" => code, "msg" => msg}} ->
{:error, {:binance_error, %{code: code, msg: msg}}}
data ->
data
end
end
@spec keep_alive_listen_key(map(), map() | nil) ::
{:ok, %{}} | {:error, error()}
def keep_alive_listen_key(params, config \\ nil) do
arguments =
%{
timestamp: :os.system_time(:millisecond)
}
|> Map.merge(
unless(is_nil(params[:timestamp]), do: %{timestamp: params[:timestamp]}, else: %{})
)
|> Map.merge(
unless(is_nil(params[:recv_window]), do: %{recvWindow: params[:recv_window]}, else: %{})
)
case HTTPClient.put_binance("/dapi/v1/listenKey", arguments, config) do
{:ok, %{"code" => code, "msg" => msg}} ->
{:error, {:binance_error, %{code: code, msg: msg}}}
data ->
data
end
end
@doc """
Retrieves the bids & asks of the order book up to the depth for the given symbol
## Example
```
{:ok,
%Binance.OrderBook{
asks: [
["8400.00000000", "2.04078100", []],
["8405.35000000", "0.50354700", []],
["8406.00000000", "0.32769800", []],
["8406.33000000", "0.00239000", []],
["8406.51000000", "0.03241000", []]
],
bids: [
["8393.00000000", "0.20453200", []],
["8392.57000000", "0.02639000", []],
["8392.00000000", "1.40893300", []],
["8390.09000000", "0.07047100", []],
["8388.72000000", "0.04577400", []]
],
last_update_id: 113634395
}
}
```
"""
@spec get_depth(String.t(), integer) :: {:ok, %Binance.OrderBook{}} | {:error, error()}
def get_depth(symbol, limit) do
case HTTPClient.get_binance("/dapi/v1/depth?symbol=#{symbol}&limit=#{limit}") do
{:ok, data} -> {:ok, Binance.OrderBook.new(data)}
err -> err
end
end
# Account
@doc """
Fetches user account from binance
In the case of a error on binance, for example with invalid parameters, `{:error, {:binance_error, %{code: code, msg: msg}}}` will be returned.
Please read https://binance-docs.github.io/apidocs/delivery/en/#futures-account-balance-user_data
"""
@spec get_account(map() | nil) :: {:ok, map()} | {:error, error()}
def get_account(config \\ nil) do
case HTTPClient.get_binance("/dapi/v1/account", %{}, config) do
{:ok, data} ->
{:ok, data}
error ->
error
end
end
@spec get_position(map() | nil) :: {:ok, list(%Binance.Futures.Position{})} | {:error, error()}
def get_position(config \\ nil) do
case HTTPClient.get_binance("/dapi/v1/positionRisk", %{}, config) do
{:ok, data} ->
{:ok, data}
error ->
error
end
end
# Order
@doc """
Creates a new order on Binance Coin Futures
In the case of a error on Binance, for example with invalid parameters, `{:error, {:binance_error, %{code: code, msg: msg}}}` will be returned.
Please read https://binance-docs.github.io/apidocs/delivery/en/#new-order-trade
"""
@spec create_order(map(), map() | nil) :: {:ok, map()} | {:error, error()}
def create_order(
%{symbol: symbol, side: side, type: type, quantity: quantity} = params,
config \\ nil
) do
arguments = %{
symbol: symbol,
side: side,
type: type,
quantity: quantity,
timestamp: params[:timestamp] || :os.system_time(:millisecond)
}
arguments =
arguments
|> Map.merge(
unless(
is_nil(params[:new_client_order_id]),
do: %{newClientOrderId: params[:new_client_order_id]},
else: %{}
)
)
|> Map.merge(
unless(is_nil(params[:stop_price]), do: %{stopPrice: params[:stop_price]}, else: %{})
)
|> Map.merge(
unless(
is_nil(params[:time_in_force]),
do: %{timeInForce: params[:time_in_force]},
else: %{}
)
)
|> Map.merge(unless(is_nil(params[:price]), do: %{price: params[:price]}, else: %{}))
|> Map.merge(
unless(is_nil(params[:recv_window]), do: %{recvWindow: params[:recv_window]}, else: %{})
)
case HTTPClient.post_binance("/dapi/v1/order", arguments, config) do
{:ok, data} ->
{:ok, data}
error ->
error
end
end
def prepare_create_order(
%{symbol: symbol, side: side, type: type, quantity: quantity} = params,
config \\ nil
) do
arguments = %{
symbol: symbol,
side: side,
type: type,
quantity: quantity,
timestamp: params[:timestamp] || :os.system_time(:millisecond)
}
arguments =
arguments
|> Map.merge(
unless(
is_nil(params[:new_client_order_id]),
do: %{newClientOrderId: params[:new_client_order_id]},
else: %{}
)
)
|> Map.merge(
unless(is_nil(params[:stop_price]), do: %{stopPrice: params[:stop_price]}, else: %{})
)
|> Map.merge(
unless(
is_nil(params[:time_in_force]),
do: %{timeInForce: params[:time_in_force]},
else: %{}
)
)
|> Map.merge(unless(is_nil(params[:price]), do: %{price: params[:price]}, else: %{}))
|> Map.merge(
unless(is_nil(params[:recv_window]), do: %{recvWindow: params[:recv_window]}, else: %{})
)
{:ok, url, headers, argument_string} =
HTTPClient.prepare_request(
:post,
"https://dapi.binance.com/dapi/v1/order",
arguments,
config,
true
)
%{
method: "POST",
url: url,
headers: headers,
body: argument_string
}
end
@doc """
Get all open orders, alternatively open orders by symbol (params[:symbol])
Weight: 1 for a single symbol; 40 when the symbol parameter is omitted
## Example
```
{:ok,
[%Binance.Futures.Order{price: "0.1", orig_qty: "1.0", executed_qty: "0.0", ...},
%Binance.Futures.Order{...},
%Binance.Futures.Order{...},
%Binance.Futures.Order{...},
%Binance.Futures.Order{...},
%Binance.Futures.Order{...},
...]}
```
Read more: https://binanceapitest.github.io/Binance-Futures-API-doc/trade_and_account/#current-open-orders-user_data
"""
@spec get_open_orders(map(), map() | nil) ::
{:ok, list() | {:error, error()}}
def get_open_orders(params \\ %{}, config \\ nil) do
case HTTPClient.get_binance("/dapi/v1/openOrders", params, config) do
{:ok, data} -> {:ok, data}
err -> err
end
end
@doc """
Get order by symbol and either orderId or origClientOrderId are mandatory
Weight: 1
## Example
```
{:ok, %Binance.Futures.Order{price: "0.1", origQty: "1.0", executedQty: "0.0", ...}}
```
Info: https://binance-docs.github.io/apidocs/delivery/en/#place-multiple-orders-trade
"""
@spec get_order(map(), map() | nil) :: {:ok, list()} | {:error, error()}
def get_order(params, config \\ nil) do
arguments =
%{
symbol: params[:symbol]
}
|> Map.merge(
unless(is_nil(params[:order_id]), do: %{orderId: params[:order_id]}, else: %{})
)
|> Map.merge(
unless(
is_nil(params[:orig_client_order_id]),
do: %{origClientOrderId: params[:orig_client_order_id]},
else: %{}
)
)
case HTTPClient.get_binance("/dapi/v1/order", arguments, config) do
{:ok, data} -> {:ok, data}
err -> err
end
end
@doc """
Cancel an active order.
Symbol and either orderId or origClientOrderId must be sent.
Returns `{:ok, map()}` or `{:error, reason}`.
Weight: 1
Info: https://binance-docs.github.io/apidocs/delivery/en/#cancel-order-trade
"""
@spec cancel_order(map(), map() | nil) :: {:ok, %Binance.Futures.Order{}} | {:error, error()}
def cancel_order(params, config \\ nil) do
arguments =
%{
symbol: params[:symbol]
}
|> Map.merge(
unless(is_nil(params[:order_id]), do: %{orderId: params[:order_id]}, else: %{})
)
|> Map.merge(
unless(
is_nil(params[:orig_client_order_id]),
do: %{origClientOrderId: params[:orig_client_order_id]},
else: %{}
)
)
case HTTPClient.delete_binance("/dapi/v1/order", arguments, config) do
{:ok, %{"rejectReason" => _} = err} -> {:error, err}
{:ok, data} -> {:ok, data}
err -> err
end
end
def prepare_cancel_order(params, config \\ nil) do
arguments =
%{
symbol: params[:symbol]
}
|> Map.merge(
unless(is_nil(params[:order_id]), do: %{orderId: params[:order_id]}, else: %{})
)
|> Map.merge(
unless(
is_nil(params[:orig_client_order_id]),
do: %{origClientOrderId: params[:orig_client_order_id]},
else: %{}
)
)
{:ok, url, headers} =
HTTPClient.prepare_request(
:delete,
"https://dapi.binance.com/dapi/v1/order",
arguments,
config,
true
)
%{
method: "DELETE",
url: url,
headers: headers
}
end
@spec cancel_batch_order(map(), map() | nil) :: {:ok, list} | {:error, error()}
def cancel_batch_order(params, config \\ nil) do
arguments =
%{
symbol: params[:symbol]
}
|> Map.merge(
if(!!params[:order_id_list], do: %{orderIdList: params[:order_id_list]}, else: %{})
)
|> Map.merge(
if(
!!params[:orig_client_order_id_list],
do: %{origClientOrderIdList: params[:orig_client_order_id_list]},
else: %{}
)
)
case HTTPClient.delete_binance("/dapi/v1/batchOrders", arguments, config) do
{:ok, %{"rejectReason" => _} = err} -> {:error, err}
{:ok, data} -> {:ok, data}
err -> err
end
end
@doc """
Cancel all orders for a symbol (params[:symbol])
Weight: 1
## Example
```
Binance.Coin.cancel_all_orders(%{symbol: "BTCUSDT"}, config)
Read more: https://binance-docs.github.io/apidocs/delivery/en/#cancel-all-open-orders-trade
"""
@spec cancel_all_orders(map(), map() | nil) :: {:ok, any()} | {:error, any()}
def cancel_all_orders(params, config \\ nil) do
case HTTPClient.delete_binance("/dapi/v1/allOpenOrders", params, config) do
{:ok, %{"rejectReason" => _} = err} -> {:error, err}
{:ok, data} -> {:ok, data}
err -> err
end
end
end
|
lib/binance/futures_coin.ex
| 0.882301
| 0.573021
|
futures_coin.ex
|
starcoder
|
defmodule Exred.Node.Suppress do
@moduledoc """
Suppresses or filters incoming messages for configurable time periods.
###Inputs
`topic :: string`
if message suppression starts when triggered then a message with '_START' topic triggers the suppression
otherwise topic is ignored
`payload :: term | number`
if suppressing a band then payload needs to be number (it'll get compared to the less\\_then and greater\\_then limits)
"""
@name "Suppress"
@category "function"
@info @moduledoc
@config [
name: %{
info: "Visible node name",
value: @name,
type: "string",
attrs: %{max: 20}
},
per_topic: %{
info: "Suppress per topic or regardless of topic",
type: "select",
value: true,
attrs: %{options: [true, false]}
},
start_when: %{
info:
"Start suppressing when the flow is deployed or when a message with topic: '_START' arrives",
type: "select",
value: "triggered",
attrs: %{options: ["deployed", "triggered"]}
},
how_long: %{
info: "How long should messages be suppressed for?",
type: "select",
value: "time_period",
attrs: %{options: ["forever", "time_period"]}
},
time_period: %{
info: "Time period in milliseconds",
type: "number",
value: 100,
attrs: %{min: 1, max: 3_600_000}
},
what: %{
info: "What to suppress",
type: "select",
value: "all",
attrs: %{options: ["all", "band"]}
},
band_less_then: %{
info: "Suppress message if payload is less then",
type: "number",
value: 0
},
band_greater_then: %{
info: "Suppress message if payload is greater then",
type: "number",
value: 1000
}
]
@ui_attributes [
fire_button: false,
right_icon: "gavel"
]
use Exred.NodePrototype
require Logger
@impl true
def node_init(state) do
Logger.warn("CONFIG #{inspect(state.config)}")
timer_ref =
if state.config.start_when.value == "deployed" do
case state.config.how_long.value do
"forever" ->
:forever
"time_period" ->
{:ok, ref} = :timer.send_after(state.config.time_period.value, :_TIMEOUT)
ref
end
else
nil
end
state |> Map.put(:timer_ref, timer_ref)
end
# start a new timer
# TODO: cancel the old timer
@impl true
def handle_msg(%{payload: "_START"}, %{config: %{start_when: %{value: "triggered"}}} = state) do
timer_ref =
case state.config.how_long.value do
"forever" ->
:forever
"time_period" ->
{:ok, ref} = :timer.send_after(state.config.time_period.value, :_TIMEOUT)
ref
end
{nil, %{state | timer_ref: timer_ref}}
end
# clear timer_ref from state
def handle_msg(:_TIMEOUT, state) do
{nil, %{state | timer_ref: nil}}
end
def handle_msg(msg, %{timer_ref: timer_ref} = state) do
case timer_ref do
# no timer, forward all messages
nil ->
{msg, state}
# there's a timer running (or it's set to :forever)
# this means that we need to suppress messages
_ ->
case state.config.what.value do
# suppress all messages
"all" ->
{nil, state}
# suppress if payload is in a certain band
"band" ->
if msg.payload < state.config.band_less_then.value or
msg.payload > state.config.band_greater_then.value do
{nil, state}
else
{msg, state}
end
end
end
end
end
|
lib/exred_node_suppress.ex
| 0.61682
| 0.403156
|
exred_node_suppress.ex
|
starcoder
|
defmodule Mix.Tasks.AzureFunctions.Release do
@moduledoc """
Create files to publish Azure Functions.
Run this task inside Docker image `elixir:1.10.4-slim`.
## How to build
```
$ docker run -d -it --rm --name elx erintheblack/elixir-azure-functions-builder:1.10.4
$ docker cp mix.exs elx:/tmp
$ docker cp lib elx:/tmp
$ docker exec elx /bin/bash -c "mix deps.get; MIX_ENV=prod mix azure_functions.release ${handle_moduler} ${method_name} 'post put'"
$ docker cp elx:/tmp/_build_az_func .
$ docker stop elx
```
"""
use Mix.Task
@build_dir "_build_az_func"
@doc """
Create files to publish Azure Functions.
"""
@impl Mix.Task
def run([handler_module]) do
app_name = app_name()
bootstrap = bootstrap(app_name)
host_json = host_json(app_name)
local_setting_json = local_setting_json(handler_module)
env = Mix.env
Mix.Shell.cmd("rm -f -R ./_build/#{env}/*", &IO.puts/1)
Mix.Shell.cmd("MIX_ENV=#{env} mix release --quiet", &IO.puts/1)
File.write("./_build/#{env}/rel/#{app_name}/bootstrap", bootstrap)
Mix.Shell.cmd("chmod +x ./_build/#{env}/rel/#{app_name}/bin/#{app_name}", &IO.puts/1)
Mix.Shell.cmd("chmod +x ./_build/#{env}/rel/#{app_name}/releases/*/elixir", &IO.puts/1)
Mix.Shell.cmd("chmod +x ./_build/#{env}/rel/#{app_name}/erts-*/bin/erl", &IO.puts/1)
Mix.Shell.cmd("chmod +x ./_build/#{env}/rel/#{app_name}/bootstrap", &IO.puts/1)
Mix.Shell.cmd("rm -f -R ./#{@build_dir}/*", &IO.puts/1)
Mix.Shell.cmd("mkdir -p ./#{@build_dir}", &IO.puts/1)
Mix.Shell.cmd("cp -a ./_build/#{env}/rel/#{app_name} ./#{@build_dir}/", &IO.puts/1)
File.write("./#{@build_dir}/host.json", host_json)
File.write("./#{@build_dir}/local.settings.json", local_setting_json)
end
def run([handler_module, method_name, method_types]) do
function_json = function_json(~w/#{method_types}/)
run([handler_module])
Mix.Shell.cmd("mkdir -p ./#{@build_dir}/#{method_name}", &IO.puts/1)
File.write("./#{@build_dir}/#{method_name}/function.json", function_json)
end
defp app_name do
Mix.Project.config |> Keyword.get(:app) |> to_string
end
defp bootstrap(app_name) do
"""
#!/bin/sh
set -e
bin/#{app_name} start
"""
end
defp host_json(app_name) do
"""
{
"version": "2.0",
"logging": {
"applicationInsights": {
"samplingSettings": {
"isEnabled": true,
"excludedTypes": "Request"
}
}
},
"extensionBundle": {
"id": "Microsoft.Azure.Functions.ExtensionBundle",
"version": "[1.*, 2.0.0)"
},
"customHandler": {
"description": {
"defaultExecutablePath": "#{app_name}/bootstrap",
"workingDirectory": "#{app_name}",
"arguments": []
}
}
}
"""
end
defp local_setting_json(handler_module) do
"""
{
"IsEncrypted": false,
"Values": {
"FUNCTIONS_WORKER_RUNTIME": "custom",
"_HANDLER": "#{handler_module}",
"LOG_LEVEL": "info"
}
}
"""
end
defp function_json(method_types) do
"""
{
"bindings": [
{
"authLevel": "function",
"type": "httpTrigger",
"direction": "in",
"name": "req",
"methods": #{method_types |> inspect}
},
{
"type": "http",
"direction": "out",
"name": "res"
}
]
}
"""
end
end
|
lib/mix/tasks/azure_functions/release.ex
| 0.673084
| 0.402627
|
release.ex
|
starcoder
|
defmodule Game.Effect do
@moduledoc """
Calculate and apply effects from skills/items
"""
alias Data.Effect
alias Data.Stats
alias Game.DamageTypes
@random_effect_range Application.get_env(:ex_venture, :game)[:random_effect_range]
@type continuous_effect :: {Character.t(), Effec.t()}
@doc """
Calculate effects based on the user
Filters out stat boosting effects, then deals with damage & recovery
"""
@spec calculate(Stats.t(), [Effect.t()]) :: [map()]
def calculate(stats, effects) do
{stats, effects} = stats |> calculate_stats(effects)
{stats_boost, effects} = effects |> Enum.split_with(&(&1.kind == "stats/boost"))
{damage_effects, effects} = effects |> Enum.split_with(&(&1.kind == "damage"))
damage = damage_effects |> Enum.map(&calculate_damage(&1, stats))
{damage_over_time_effects, effects} =
effects |> Enum.split_with(&(&1.kind == "damage/over-time"))
damage_over_time = damage_over_time_effects |> Enum.map(&calculate_damage(&1, stats))
{recover_effects, effects} = effects |> Enum.split_with(&(&1.kind == "recover"))
recover = recover_effects |> Enum.map(&calculate_recover(&1, stats))
{damage_type_effects, _effects} = effects |> Enum.split_with(&(&1.kind == "damage/type"))
damage = damage_type_effects |> Enum.reduce(damage, &calculate_damage_type/2)
stats_boost ++ damage ++ damage_over_time ++ recover
end
@doc """
Calculate stats and return any effects that were not processed
iex> stats = %{strength: 10}
iex> effects = [%{kind: "stats", mode: "add", field: :strength, amount: 10}, %{kind: "damage"}]
iex> Game.Effect.calculate_stats(stats, effects)
{%{strength: 20}, [%{kind: "damage"}]}
"""
@spec calculate_stats(Stats.t(), [Effect.t()]) :: Stats.t()
def calculate_stats(stats, effects) do
{stat_effects, effects} = effects |> Enum.split_with(&(&1.kind == "stats"))
stats = Enum.reduce(stat_effects, stats, &process_stats/2)
{stats, effects}
end
@doc """
Calculate a character's stats based on the current continuous effects on them
"""
@spec calculate_stats_from_continuous_effects(Stats.t(), map()) :: [Effect.t()]
def calculate_stats_from_continuous_effects(stats, state) do
state.continuous_effects
|> Enum.map(&elem(&1, 1))
|> Enum.filter(&(&1.kind == "stats/boost"))
|> Enum.reduce(stats, &process_stats/2)
end
@doc """
Process stats effects
iex> Game.Effect.process_stats(%{field: :strength, mode: "add", amount: 10}, %{strength: 10})
%{strength: 20}
"""
@spec process_stats(Effect.t(), Stats.t()) :: Stats.t()
def process_stats(effect, stats)
def process_stats(effect, stats) do
case effect.mode do
"add" ->
stats |> Map.put(effect.field, stats[effect.field] + effect.amount)
"subtract" ->
stats |> Map.put(effect.field, stats[effect.field] - effect.amount)
"multiply" ->
stats |> Map.put(effect.field, stats[effect.field] * effect.amount)
"division" ->
stats |> Map.put(effect.field, round(stats[effect.field] / effect.amount))
end
end
@doc """
Calculate damage
"""
@spec calculate_damage(Effect.t(), Stats.t()) :: map()
def calculate_damage(effect, stats) do
case DamageTypes.get(effect.type) do
{:ok, damage_type} ->
stat = Map.get(stats, damage_type.stat_modifier)
random_swing = Enum.random(@random_effect_range)
modifier = 1 + stat / damage_type.boost_ratio + random_swing / 100
modified_amount = max(round(Float.ceil(effect.amount * modifier)), 0)
effect |> Map.put(:amount, modified_amount)
_ ->
effect
end
end
@doc """
Calculate recovery
iex> effect = %{kind: "recover", type: "health", amount: 10}
iex> Game.Effect.calculate_recover(effect, %{})
%{kind: "recover", type: "health", amount: 10}
"""
@spec calculate_recover(Effect.t(), Stats.t()) :: map()
def calculate_recover(effect, _stats) do
random_swing = Enum.random(@random_effect_range)
modifier = 1 + random_swing / 100
modified_amount = round(Float.ceil(effect.amount * modifier))
effect |> Map.put(:amount, modified_amount)
end
@doc """
Calculate damage type effects
Damage:
iex> effect = %{kind: "damage/type", types: ["slashing"]}
iex> damage = %{kind: "damage", amount: 10, type: "bludgeoning"}
iex> Game.Effect.calculate_damage_type(effect, [damage])
[%{kind: "damage", amount: 5, type: "bludgeoning"}]
"""
@spec calculate_damage_type(Effect.t(), Stats.t()) :: map()
def calculate_damage_type(effect, damages) do
damages
|> Enum.map(fn damage ->
case damage.type in effect.types do
true ->
damage
false ->
amount = round(Float.ceil(damage.amount / 2.0))
%{damage | amount: amount}
end
end)
end
@doc """
Adjust effects before applying them to a character
"""
@spec adjust_effects([Effect.t()], Stats.t()) :: [Effect.t()]
def adjust_effects(effects, stats) do
effects |> Enum.map(&adjust_effect(&1, stats))
end
@doc """
Adjust a single effect
"""
@spec adjust_effect(Effect.t(), Stats.t()) :: Effect.t()
def adjust_effect(effect, stats)
def adjust_effect(effect = %{kind: "damage"}, stats) do
case DamageTypes.get(effect.type) do
{:ok, damage_type} ->
stat = Map.get(stats, damage_type.reverse_stat)
random_swing = Enum.random(@random_effect_range)
modifier = 1 + stat / damage_type.reverse_boost + random_swing / 100
modified_amount = round(Float.ceil(effect.amount / modifier))
effect |> Map.put(:amount, modified_amount)
_ ->
effect
end
end
def adjust_effect(effect = %{kind: "damage/over-time"}, stats) do
case DamageTypes.get(effect.type) do
{:ok, damage_type} ->
stat = Map.get(stats, damage_type.reverse_stat)
random_swing = Enum.random(@random_effect_range)
modifier = 1 + stat / damage_type.reverse_boost + random_swing / 100
modified_amount = round(Float.ceil(effect.amount / modifier))
effect |> Map.put(:amount, modified_amount)
_ ->
effect
end
end
def adjust_effect(effect, _stats), do: effect
@doc """
Apply effects to stats.
"""
@spec apply([Effect.t()], Stats.t()) :: Stats.t()
def apply(effects, stats) do
effects |> Enum.reduce(stats, &apply_effect/2)
end
@doc """
Apply an effect to stats
"""
@spec apply_effect(Effect.t(), Stats.t()) :: Stats.t()
def apply_effect(effect, stats)
def apply_effect(effect = %{kind: "damage"}, stats) do
%{health_points: health_points} = stats
Map.put(stats, :health_points, health_points - effect.amount)
end
def apply_effect(effect = %{kind: "damage/over-time"}, stats) do
%{health_points: health_points} = stats
Map.put(stats, :health_points, health_points - effect.amount)
end
def apply_effect(effect = %{kind: "recover", type: "health"}, stats) do
%{health_points: health_points, max_health_points: max_health_points} = stats
health_points = max_recover(health_points, effect.amount, max_health_points)
%{stats | health_points: health_points}
end
def apply_effect(effect = %{kind: "recover", type: "skill"}, stats) do
%{skill_points: skill_points, max_skill_points: max_skill_points} = stats
skill_points = max_recover(skill_points, effect.amount, max_skill_points)
%{stats | skill_points: skill_points}
end
def apply_effect(effect = %{kind: "recover", type: "endurance"}, stats) do
%{endurance_points: endurance_points, max_endurance_points: max_endurance_points} = stats
endurance_points = max_recover(endurance_points, effect.amount, max_endurance_points)
%{stats | endurance_points: endurance_points}
end
def apply_effect(_effect, stats), do: stats
@doc """
Limit recovery to the max points
iex> Game.Effect.max_recover(10, 1, 15)
11
iex> Game.Effect.max_recover(10, 6, 15)
15
"""
@spec max_recover(integer(), integer(), integer()) :: integer()
def max_recover(current_points, amount, max_points) do
case current_points + amount do
current_points when current_points > max_points -> max_points
current_points -> current_points
end
end
@doc """
Filters to continuous effects only
- `damage/over-time`
"""
@spec continuous_effects([Effect.t()], Character.t()) :: [Effect.t()]
def continuous_effects(effects, from) do
effects
|> Enum.filter(&Effect.continuous?/1)
|> Enum.map(&Effect.instantiate/1)
|> Enum.map(fn effect ->
{from, effect}
end)
end
@doc """
Start the continuous effect tick cycle if required
Effect must have an "every" field, such as damage over time
"""
@spec maybe_tick_effect(Effect.t(), pid()) :: :ok
def maybe_tick_effect(effect, pid) do
cond do
Map.has_key?(effect, :every) ->
:erlang.send_after(effect.every, pid, {:continuous_effect, effect.id})
Map.has_key?(effect, :duration) ->
:erlang.send_after(effect.duration, pid, {:continuous_effect, :clear, effect.id})
true ->
:ok
end
end
@doc """
"""
@spec find_effect(map(), String.t()) :: {:ok, Effect.t()} | {:error, :not_found}
def find_effect(state, effect_id) do
effect =
Enum.find(state.continuous_effects, fn {_from, effect} ->
effect.id == effect_id
end)
case effect do
nil ->
{:error, :not_found}
effect ->
{:ok, effect}
end
end
end
|
lib/game/effect.ex
| 0.895625
| 0.611759
|
effect.ex
|
starcoder
|
defmodule Descisionex.Helper do
@moduledoc """
Utility functions
"""
@doc """
Normalizes matrix.
## Examples
iex> matrix = [[1, 2], [3, 4], [0, 1]]
iex> size = 3 # matrix rows
iex> Descisionex.Helper.normalize(matrix, size)
[[0.25, 0.286], [0.75, 0.571], [0.0, 0.143]]
"""
@spec normalize([[number]], integer) :: any
def normalize(data, size) do
summed_columns =
Matrix.transpose(data)
|> Enum.map(fn row -> Enum.sum(row) end)
|> Enum.with_index()
Enum.reduce(0..(size - 1), [], fn index, acc ->
column =
Enum.map(Enum.with_index(Enum.at(data, index)), fn {alternative, ind} ->
{sum, _} = Enum.at(summed_columns, ind)
Float.round(alternative / sum, 3)
end)
acc ++ [column]
end)
end
@doc """
Calculate weights for matrix rows.
## Examples
iex> matrix = [[1, 2], [3, 4], [0, 1]]
iex> size = 2 # matrix row elements count
iex> Descisionex.Helper.calculate_weights(matrix, size)
[[1.5], [3.5], [0.5]]
"""
@spec calculate_weights(any, any) :: list
def calculate_weights(data, size) do
Enum.map(data, fn row ->
[Float.round(Enum.sum(row) / size, 3)]
end)
end
@doc """
Average number for matrix row.
## Examples
iex> matrix = [[1, 2], [3, 4], [0, 1]]
iex> Enum.map(matrix, fn row -> Descisionex.Helper.avg(row, 2) end)
[1.5, 3.5, 0.5]
"""
@spec avg(any, number) :: float
def avg(row, size) do
Float.round(Enum.sum(row) / size, 3)
end
@doc """
Find maximal criteria (row) in matrix (with index).
## Examples
iex> matrix = [[1, 2], [3, 4], [0, 1]]
iex> Descisionex.Helper.find_max_criteria(matrix)
{[3, 4], 1}
"""
@spec find_max_criteria(any) :: any
def find_max_criteria(criteria) do
criteria |> Enum.with_index() |> Enum.max()
end
@doc """
Find minimal criteria (row) in matrix (with index).
## Examples
iex> matrix = [[1, 2], [3, 4], [0, 1]]
iex> Descisionex.Helper.find_min_criteria(matrix)
{[0, 1], 2}
"""
@spec find_min_criteria(any) :: any
def find_min_criteria(criteria) do
criteria |> Enum.with_index() |> Enum.min()
end
@doc """
Matrix rounding to 3 numbers.
## Examples
iex> matrix = [[1/3, 2/3], [4/5, 5/6]]
iex> Descisionex.Helper.round_matrix(matrix)
[[0.333, 0.667], [0.8, 0.833]]
"""
@spec round_matrix([[number]]) :: list
def round_matrix(matrix) do
Enum.map(matrix, fn row ->
Enum.map(row, fn element ->
if is_float(element), do: Float.round(element, 3), else: element
end)
end)
end
end
|
lib/helper/helper.ex
| 0.871639
| 0.73041
|
helper.ex
|
starcoder
|
defmodule QRCodeEx.Mask do
@moduledoc false
@doc """
Get the total score for the masked matrix.
"""
@spec score(QRCodeEx.Matrix.matrix()) :: integer
def score(matrix) do
rule1(matrix) + rule2(matrix) + rule3(matrix) + rule4(matrix)
end
@doc """
Check for consecutive blocks.
"""
@spec rule1(QRCodeEx.Matrix.matrix()) :: integer
def rule1(matrix) do
matrix = for e <- Tuple.to_list(matrix), do: Tuple.to_list(e)
Stream.concat(matrix, transform(matrix))
|> Enum.reduce(0, &(do_rule1(&1, {nil, 0}, 0) + &2))
end
defp do_rule1([], _, acc), do: acc
defp do_rule1([h | t], {_, 0}, acc), do: do_rule1(t, {h, 1}, acc)
defp do_rule1([h | t], {h, 4}, acc), do: do_rule1(t, {h, 5}, acc + 3)
defp do_rule1([h | t], {h, 5}, acc), do: do_rule1(t, {h, 5}, acc + 1)
defp do_rule1([h | t], {h, n}, acc), do: do_rule1(t, {h, n + 1}, acc)
defp do_rule1([h | t], {_, _}, acc), do: do_rule1(t, {h, 1}, acc)
defp transform(matrix) do
for e <- Enum.zip(matrix), do: Tuple.to_list(e)
end
@doc """
Check for 2x2 blocks.
"""
@spec rule2(QRCodeEx.Matrix.matrix()) :: integer
def rule2(matrix) do
z = tuple_size(matrix) - 2
for i <- 0..z,
j <- 0..z do
QRCodeEx.Matrix.shape({i, j}, {2, 2})
|> Enum.map(&get(matrix, &1))
end
|> Enum.reduce(0, &do_rule2/2)
end
defp do_rule2([1, 1, 1, 1], acc), do: acc + 3
defp do_rule2([0, 0, 0, 0], acc), do: acc + 3
defp do_rule2([_, _, _, _], acc), do: acc
@doc """
Check for special blocks.
"""
@spec rule3(QRCodeEx.Matrix.matrix()) :: integer
def rule3(matrix) do
z = tuple_size(matrix)
for i <- 0..(z - 1),
j <- 0..(z - 11) do
[{{i, j}, {11, 1}}, {{j, i}, {1, 11}}]
|> Stream.map(fn {a, b} ->
QRCodeEx.Matrix.shape(a, b)
|> Enum.map(&get(matrix, &1))
end)
|> Enum.map(&do_rule3/1)
end
|> List.flatten()
|> Enum.sum()
end
defp do_rule3([1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0]), do: 40
defp do_rule3([0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1]), do: 40
defp do_rule3([_, _, _, _, _, _, _, _, _, _, _]), do: 0
@doc """
Check for module's proportion.
"""
@spec rule4(QRCodeEx.Matrix.matrix()) :: integer
def rule4(matrix) do
m = tuple_size(matrix)
black =
Tuple.to_list(matrix)
|> Enum.reduce(0, fn e, acc ->
Tuple.to_list(e)
|> Enum.reduce(acc, &do_rule4/2)
end)
div(abs(div(black * 100, m * m) - 50), 5) * 10
end
defp do_rule4(1, acc), do: acc + 1
defp do_rule4(_, acc), do: acc
defp get(matrix, {x, y}) do
get_in(matrix, [Access.elem(x), Access.elem(y)])
end
@doc """
The mask algorithm.
"""
@spec mask(integer, QRCodeEx.Matrix.coordinate()) :: 0 | 1
def mask(0b000, {x, y}) when rem(x + y, 2) == 0, do: 1
def mask(0b000, {_, _}), do: 0
def mask(0b001, {x, _}) when rem(x, 2) == 0, do: 1
def mask(0b001, {_, _}), do: 0
def mask(0b010, {_, y}) when rem(y, 3) == 0, do: 1
def mask(0b010, {_, _}), do: 0
def mask(0b011, {x, y}) when rem(x + y, 3) == 0, do: 1
def mask(0b011, {_, _}), do: 0
def mask(0b100, {x, y}) when rem(div(x, 2) + div(y, 3), 2) == 0, do: 1
def mask(0b100, {_, _}), do: 0
def mask(0b101, {x, y}) when rem(x * y, 2) + rem(x * y, 3) == 0, do: 1
def mask(0b101, {_, _}), do: 0
def mask(0b110, {x, y}) when rem(rem(x * y, 2) + rem(x * y, 3), 2) == 0, do: 1
def mask(0b110, {_, _}), do: 0
def mask(0b111, {x, y}) when rem(rem(x + y, 2) + rem(x * y, 3), 2) == 0, do: 1
def mask(0b111, {_, _}), do: 0
end
|
lib/eqrcode/mask.ex
| 0.781956
| 0.681661
|
mask.ex
|
starcoder
|
defmodule AlchemyVM.HostFunction.API do
alias AlchemyVM.Helpers
use Agent
@moduledoc """
Provides an API for interacting with the VM from within a host function
"""
@doc false
def child_spec(arg), do: child_spec(arg)
@doc false
def start_link(vm), do: Agent.start_link(fn -> vm end)
@doc false
def stop(pid), do: Agent.stop(pid)
@doc false
@spec state(pid) :: AlchemyVM
def state(pid), do: Agent.get(pid, & &1)
@doc """
Returns x number of bytes from a given exported memory, starting at the specified
address
## Usage
When within a host function body defined by `defhost`, if called from a WebAssembly
module that has an exported memory called "memory1", and the memory is laid
out as such: `{0, 0, 0, 0, 0, 0, 0, 0, 243, 80, 45, 92, ...}`, it can be accessed
by doing:
defhost get_from_memory do
<<243, 80, 45, 92>> = AlchemyVM.HostFunction.API.get_memory(ctx, "memory1", 8, 4)
end
Note that `ctx` here is a variable defined by the `defhost` macro in order
to serve as a pointer to VM state.
"""
@spec get_memory(pid, String.t(), integer, integer) :: binary | {:error, :no_exported_mem, String.t()}
def get_memory(pid, mem_name, address, bytes \\ 1) do
vm = state(pid)
case Helpers.get_export_by_name(vm, mem_name, :mem) do
:not_found -> {:error, :no_exported_mem, mem_name}
addr ->
vm.store.mems
|> Enum.at(addr)
|> AlchemyVM.Memory.get_at(address, bytes)
end
end
@doc """
Updates a given exported memory with the given bytes, at the specified address
## Usage
When within a host function body defined by `defhost`, if called from a WebAssembly
module that has an exported memory called "memory1", it can be updated
by doing:
defhost update_memory do
AlchemyVM.HostFunction.API.update_memory(ctx, "memory1", 0, <<"hello world">>)
end
This will set the value of "memory1" to `{"h", "e", "l", "l", "o", " ", "w",
"o", "r", "l", "d", ...}`
Note that `ctx` here is a variable defined by the `defhost` macro in order
to serve as a pointer to VM state.
"""
@spec update_memory(pid, String.t(), integer, binary) :: :ok | {:error, :no_exported_mem, String.t()}
def update_memory(pid, mem_name, address, bytes) do
vm_state = state(pid)
case Helpers.get_export_by_name(vm_state, mem_name, :mem) do
:not_found -> {:error, :no_exported_mem, mem_name}
addr ->
Agent.update(pid, fn vm ->
mem =
vm.store.mems
|> Enum.at(addr)
|> AlchemyVM.Memory.put_at(address, bytes)
mems = List.replace_at(vm.store.mems, addr, mem)
store = Map.put(vm.store, :mems, mems)
Map.put(vm, :store, store)
end)
:ok
end
end
end
|
lib/execution/host_function_api.ex
| 0.894993
| 0.404684
|
host_function_api.ex
|
starcoder
|
defmodule Unsafe do
@moduledoc """
Generate unsafe bindings for Elixir functions.
This library aims to simplify the generation of unsafe function
definitions (which here means "functions which can crash"). This
is done by code generation at compile time to lessen the bloat in
the main source tree, and to remove the cognitive load from the
developers.
Generation will create function signatures ending with a `!` per
the Elixir standards, and forward the results of the safe function
through to a chosen handler to deal with crashes.
### Generation
Registering functions for unsafe generation is as easy as using
the `@unsafe` module attribute.
defmodule MyModule do
use Unsafe.Generator,
docs: false
@unsafe [
{ :test, 1, :unwrap }
]
def test(true),
do: { :ok, true }
def test(false),
do: { :ok, false }
defp unwrap({ _, bool }),
do: bool
end
The code above will generate a compile time signature which looks
like the following function definition (conceptually):
def test!(arg0) do
unwrap(test(arg0))
end
Thus making all of the following true in practice:
# clearly we keep the main definition
MyModule.test(true) == { :ok, true }
MyModule.test(false) == { :ok, false }
# and the unsafe versions
MyModule.test!(true) == true
MyModule.test!(false) == false
### @unsafe
The `@unsafe` attribute is used to define which functions should
have their signatures wrapped. Values set against this attribute
*must* define the function name and arity, with an optional handler.
The following are all valid definitions of the `@unsafe` attribute:
# single function binding
@unsafe { :test, 1 }
# many function bindings
@unsafe [ { :test, 1 } ]
# many function arities
@unsafe [ { :test, [ 1, 2 ] } ]
# explicit private (defp) handler definitions
@unsafe [ { :test, 1, :my_handler } ]
# explicit public (def) handler definitions
@unsafe [ { :test, 1, { MyModule, :my_handler } }]
# explicit argument names (for documentation)
@unsafe [ { :test, [ :value ] } ]
It should also be noted that all of the above will accumulate which
means that you can use `@unsafe` as many times and in as many places
as you wish inside a module. In addition, you can use `@unsafe_binding`
in place of `@unsafe` if preferred (due to historical reasons).
### Options
The use hook accepts options as a way to pass global options to all
`@unsafe` attribute hooks inside the current module. These options
will modify the way code is generated.
The existing option set is limited, and is as follows:
* `docs` - whether or not to enable documentation for the generated
functions. By default docs are disabled, so the unsafe functions
are hidden from your documentation. If enabled, you should name
your arguments instead of providing just an arity.
* `handler` - a default handler to apply to all `@unsafe` bindings
which do not have an explicit handler set. This is useful if
all of your definitions should use the same handler.
"""
end
|
deps/unsafe/lib/unsafe.ex
| 0.814717
| 0.67985
|
unsafe.ex
|
starcoder
|
defmodule OMG.Watcher.ExitProcessor.StandardExit do
@moduledoc """
Part of Core to handle SE challenges & invalid exit detection.
Treat as private helper submodule of `OMG.Watcher.ExitProcessor.Core`, test and call via that
"""
defmodule Challenge do
@moduledoc """
Represents a challenge to a standard exit as returned by the `ExitProcessor`
"""
defstruct [:exit_id, :txbytes, :input_index, :sig]
alias OMG.Crypto
alias OMG.State.Transaction
@type t() :: %__MODULE__{
exit_id: pos_integer(),
txbytes: Transaction.tx_bytes(),
input_index: non_neg_integer(),
sig: Crypto.sig_t()
}
end
alias OMG.Block
alias OMG.State.Transaction
alias OMG.Utxo
alias OMG.Watcher.ExitProcessor
alias OMG.Watcher.ExitProcessor.Core
alias OMG.Watcher.ExitProcessor.DoubleSpend
alias OMG.Watcher.ExitProcessor.ExitInfo
alias OMG.Watcher.ExitProcessor.KnownTx
alias OMG.Watcher.ExitProcessor.TxAppendix
import OMG.Watcher.ExitProcessor.Tools
require Utxo
@doc """
Gets all utxo positions exiting via active standard exits
"""
@spec exiting_positions(Core.t()) :: list(Utxo.Position.t())
def exiting_positions(%Core{exits: exits}) do
exits
|> Enum.filter(fn {_key, %ExitInfo{is_active: is_active}} -> is_active end)
|> Enum.map(fn {utxo_pos, _value} -> utxo_pos end)
end
@doc """
Gets all standard exits that are invalid, all and late ones separately
"""
@spec get_invalid(Core.t(), %{Utxo.Position.t() => boolean}, pos_integer()) ::
{%{Utxo.Position.t() => ExitInfo.t()}, %{Utxo.Position.t() => ExitInfo.t()}}
def get_invalid(%Core{exits: exits, sla_margin: sla_margin} = state, utxo_exists?, eth_height_now) do
invalid_exit_positions =
exits
|> Enum.filter(fn {_key, %ExitInfo{is_active: is_active}} -> is_active end)
|> Enum.map(fn {utxo_pos, _value} -> utxo_pos end)
|> only_utxos_checked_and_missing(utxo_exists?)
exits_invalid_by_ife = get_invalid_exits_based_on_ifes(state)
invalid_exits = exits |> Map.take(invalid_exit_positions) |> Enum.concat(exits_invalid_by_ife) |> Enum.uniq()
# get exits which are still invalid and after the SLA margin
late_invalid_exits =
invalid_exits
|> Enum.filter(fn {_, %ExitInfo{eth_height: eth_height}} -> eth_height + sla_margin <= eth_height_now end)
{Map.new(invalid_exits), Map.new(late_invalid_exits)}
end
@doc """
Determines the utxo-creating and utxo-spending blocks to get from `OMG.DB`
`se_spending_blocks_to_get` are requested by the UTXO position they spend
`se_creating_blocks_to_get` are requested by blknum
"""
@spec determine_standard_challenge_queries(ExitProcessor.Request.t(), Core.t()) ::
{:ok, ExitProcessor.Request.t()} | {:error, :exit_not_found}
def determine_standard_challenge_queries(
%ExitProcessor.Request{se_exiting_pos: Utxo.position(creating_blknum, _, _) = exiting_pos} = request,
%Core{exits: exits} = state
) do
with {:ok, _exit_info} <- get_exit(exits, exiting_pos) do
spending_blocks_to_get = if get_ife_based_on_utxo(exiting_pos, state), do: [], else: [exiting_pos]
creating_blocks_to_get = if Utxo.Position.is_deposit?(exiting_pos), do: [], else: [creating_blknum]
{:ok,
%ExitProcessor.Request{
request
| se_spending_blocks_to_get: spending_blocks_to_get,
se_creating_blocks_to_get: creating_blocks_to_get
}}
end
end
@doc """
Determines the txbytes of the particular transaction related to the SE - aka "output tx" - which creates the exiting
utxo
"""
@spec determine_exit_txbytes(ExitProcessor.Request.t(), Core.t()) ::
ExitProcessor.Request.t()
def determine_exit_txbytes(
%ExitProcessor.Request{se_exiting_pos: exiting_pos, se_creating_blocks_result: creating_blocks_result} =
request,
%Core{exits: exits}
)
when not is_nil(exiting_pos) do
exit_id_to_get_by_txbytes =
if Utxo.Position.is_deposit?(exiting_pos) do
%ExitInfo{owner: owner, currency: currency, amount: amount} = exits[exiting_pos]
Transaction.Payment.new([], [{owner, currency, amount}])
else
[%Block{transactions: transactions}] = creating_blocks_result
Utxo.position(_, txindex, _) = exiting_pos
{:ok, signed_bytes} = Enum.fetch(transactions, txindex)
Transaction.Signed.decode!(signed_bytes)
end
|> Transaction.raw_txbytes()
%ExitProcessor.Request{request | se_exit_id_to_get: exit_id_to_get_by_txbytes}
end
@doc """
Creates the final challenge response, if possible
"""
@spec create_challenge(ExitProcessor.Request.t(), Core.t()) ::
{:ok, Challenge.t()} | {:error, :utxo_not_spent}
def create_challenge(
%ExitProcessor.Request{
se_exiting_pos: exiting_pos,
se_spending_blocks_result: spending_blocks_result,
se_exit_id_result: exit_id
},
%Core{exits: exits} = state
)
when not is_nil(exiting_pos) and not is_nil(exit_id) do
%ExitInfo{owner: owner} = exits[exiting_pos]
ife_result = get_ife_based_on_utxo(exiting_pos, state)
with {:ok, spending_tx_or_block} <- ensure_challengeable(spending_blocks_result, ife_result) do
%DoubleSpend{known_spent_index: input_index, known_tx: %KnownTx{signed_tx: challenging_signed}} =
get_double_spend_for_standard_exit(spending_tx_or_block, exiting_pos)
{:ok,
%Challenge{
exit_id: exit_id,
input_index: input_index,
txbytes: challenging_signed |> Transaction.raw_txbytes(),
sig: find_sig!(challenging_signed, owner)
}}
end
end
defp ensure_challengeable(spending_blknum_response, ife_response)
defp ensure_challengeable([%Block{} = block], _), do: {:ok, block}
defp ensure_challengeable(_, ife_response) when not is_nil(ife_response), do: {:ok, ife_response}
defp ensure_challengeable(_, _), do: {:error, :utxo_not_spent}
@spec get_ife_based_on_utxo(Utxo.Position.t(), Core.t()) :: KnownTx.t() | nil
defp get_ife_based_on_utxo(Utxo.position(_, _, _) = utxo_pos, %Core{} = state) do
state
|> get_ife_txs_by_spent_input()
|> Map.get(utxo_pos)
|> case do
nil -> nil
some -> Enum.at(some, 0)
end
end
# finds transaction in given block and input index spending given utxo
@spec get_double_spend_for_standard_exit(Block.t() | KnownTx.t(), Utxo.Position.t()) :: DoubleSpend.t() | nil
defp get_double_spend_for_standard_exit(%Block{transactions: txs}, utxo_pos) do
txs
|> Enum.map(&Transaction.Signed.decode!/1)
|> Enum.find_value(fn tx -> get_double_spend_for_standard_exit(%KnownTx{signed_tx: tx}, utxo_pos) end)
end
defp get_double_spend_for_standard_exit(%KnownTx{} = known_tx, utxo_pos) do
Enum.at(get_double_spends_by_utxo_pos(utxo_pos, known_tx), 0)
end
# Gets all standard exits invalidated by IFEs exiting their utxo positions
@spec get_invalid_exits_based_on_ifes(Core.t()) :: list(%{Utxo.Position.t() => ExitInfo.t()})
defp get_invalid_exits_based_on_ifes(%Core{exits: exits} = state) do
known_txs_by_input = get_ife_txs_by_spent_input(state)
exits
|> Enum.filter(fn {utxo_pos, _exit_info} -> Map.has_key?(known_txs_by_input, utxo_pos) end)
end
@spec get_double_spends_by_utxo_pos(Utxo.Position.t(), KnownTx.t()) :: list(DoubleSpend.t())
defp get_double_spends_by_utxo_pos(Utxo.position(_, _, oindex) = utxo_pos, known_tx),
# the function used expects positions with an index (either input index or oindex), hence the oindex added
do: [{utxo_pos, oindex}] |> double_spends_from_known_tx(known_tx)
defp get_ife_txs_by_spent_input(%Core{} = state) do
TxAppendix.get_all(state)
|> Enum.map(fn signed -> %KnownTx{signed_tx: signed} end)
|> KnownTx.group_txs_by_input()
end
defp get_exit(exits, exiting_pos) do
case Map.get(exits, exiting_pos) do
nil -> {:error, :exit_not_found}
other -> {:ok, other}
end
end
end
|
apps/omg_watcher/lib/omg_watcher/exit_processor/standard_exit.ex
| 0.676406
| 0.415136
|
standard_exit.ex
|
starcoder
|
defmodule AntlUtilsEcto.Query do
@moduledoc """
Set of utils for Ecto.Query
"""
@type status :: :ended | :ongoing | :scheduled
import Ecto.Query, only: [dynamic: 2, from: 2]
@spec where(any, atom, nil | binary | [any] | integer | boolean) :: Ecto.Query.t()
def where(queryable, key, nil) when is_atom(key) do
from(q in queryable, where: is_nil(field(q, ^key)))
end
def where(queryable, key, value) when is_atom(key) and length(value) > 0 do
from(q in queryable, where: field(q, ^key) in ^value)
end
def where(queryable, key, value) when is_atom(key) do
from(q in queryable, where: field(q, ^key) == ^value)
end
@spec where_not(any, atom, nil | binary | [any] | integer | boolean) :: Ecto.Query.t()
def where_not(queryable, key, nil) when is_atom(key) do
from(q in queryable, where: not is_nil(field(q, ^key)))
end
def where_not(queryable, key, value) when is_atom(key) and length(value) > 0 do
from(q in queryable, where: field(q, ^key) not in ^value)
end
def where_not(queryable, key, value) when is_atom(key) do
from(q in queryable, where: field(q, ^key) != ^value)
end
@spec where_like(any, atom, binary) :: Ecto.Query.t()
def where_like(queryable, key, value) when is_atom(key) and is_binary(value) do
like_value = "%#{String.replace(value, "%", "\\%")}%"
from(q in queryable, where: like(field(q, ^key), ^like_value))
end
@spec where_period_status(Ecto.Queryable.t(), status | list(status), atom, atom, DateTime.t()) ::
Ecto.Query.t()
def where_period_status(
queryable,
status,
start_at_key,
end_at_key,
%DateTime{} = datetime
) do
conditions =
status
|> List.wrap()
|> Enum.reduce(
Ecto.Query.dynamic(false),
&status_dynamic_expression(&2, &1, start_at_key, end_at_key, datetime)
)
from(q in queryable, where: ^conditions)
end
@spec or_where(any, atom, nil | binary | [any]) :: Ecto.Query.t()
def or_where(queryable, key, nil) when is_atom(key) do
from(q in queryable, or_where: is_nil(field(q, ^key)))
end
def or_where(queryable, key, value) when is_atom(key) and length(value) > 0 do
from(q in queryable, or_where: field(q, ^key) in ^value)
end
def or_where(queryable, key, value) when is_atom(key) do
from(q in queryable, or_where: field(q, ^key) == ^value)
end
@spec or_where_not(any, atom, nil | binary | [any]) :: Ecto.Query.t()
def or_where_not(queryable, key, nil) when is_atom(key) do
from(q in queryable, or_where: not is_nil(field(q, ^key)))
end
def or_where_not(queryable, key, value) when is_atom(key) and length(value) > 0 do
from(q in queryable, or_where: field(q, ^key) not in ^value)
end
def or_where_not(queryable, key, value) when is_atom(key) do
from(q in queryable, or_where: field(q, ^key) != ^value)
end
@spec or_where_like(any, atom, binary) :: Ecto.Query.t()
def or_where_like(queryable, key, value) when is_atom(key) and is_binary(value) do
like_value = "%#{String.replace(value, "%", "\\%")}%"
from(q in queryable, or_where: like(field(q, ^key), ^like_value))
end
@spec where_in_period(any, atom, atom, DateTime.t()) :: Ecto.Query.t()
def where_in_period(queryable, start_at_key, end_at_key, %DateTime{} = datetime)
when is_atom(start_at_key) and is_atom(end_at_key) do
from(q in queryable,
where:
^status_dynamic_expression(
Ecto.Query.dynamic(false),
:ongoing,
start_at_key,
end_at_key,
datetime
)
)
end
defp status_dynamic_expression(
dynamic,
:ongoing,
start_at_key,
end_at_key,
%DateTime{} = datetime
) do
dynamic(
[q],
^dynamic or
(field(q, ^start_at_key) <= ^datetime and
(field(q, ^end_at_key) > ^datetime or is_nil(field(q, ^end_at_key))))
)
end
defp status_dynamic_expression(
dynamic,
:ended,
start_at_key,
end_at_key,
%DateTime{} = datetime
) do
dynamic(
[q],
^dynamic or (field(q, ^start_at_key) <= ^datetime and field(q, ^end_at_key) <= ^datetime)
)
end
defp status_dynamic_expression(
dynamic,
:scheduled,
start_at_key,
end_at_key,
%DateTime{} = datetime
) do
dynamic(
[q],
^dynamic or
(field(q, ^start_at_key) > ^datetime and
(field(q, ^end_at_key) > ^datetime or is_nil(field(q, ^end_at_key))))
)
end
end
|
lib/query.ex
| 0.797951
| 0.492005
|
query.ex
|
starcoder
|
defmodule Memento.Capture.Feed do
@moduledoc """
The `Memento.Capture.Feed` module implements a state machine capable
of periodically fetching and saving new data from a specific source,
optionally with authentication.
A `Memento.Capture.Feed` instance is started with a configuration map (see `t:config/0`
for more details about its structure.
The details of how to connect and authenticate with the external source
are captured in the `Memento.Capture.Handler` behaviour.
In case of authentication failure, the worker will terminate.
A `Memento.Capture.Feed` state machine can be inserted in a supervision tree with:
children = [
{Memento.Capture.Feed, config}
]
For more details about supervision, see `child_spec/1`.
"""
@behaviour :gen_statem
alias Memento.{Capture.Handler, Repo, Schema.Entry}
alias Phoenix.PubSub
require Logger
@typedoc """
The configuration for a `Memento.Capture.Feed` worker is a map with the
following properties:
- **handler**: the name of a module which implements the `Memento.Capture.Handler` behaviour.
- **name**: a process name for the state machine process. This will also be used by default as id for the worker
inside a supervision tree.
- **data**: the starting data necessary for the worker to function. More often than not its value
is the handler's return value of `c:Memento.Capture.Handler.initial_data/0`.
- **refresh_interval**: the interval, in milliseconds, between data refreshes.
- **retry_interval**: the interval, in milliseconds, between subsequent attempts to refresh the data in case of failure.
"""
@type config :: %{
handler: module(),
name: atom(),
data: Handler.data(),
refresh_interval: pos_integer(),
retry_interval: pos_integer()
}
@doc """
Given the starting config for the state machine, it returns a valid
child specification. Note that by default this implementation assumes one single worker
per handler per supervision tree (as the handler name is used as id for the child).
"""
@spec child_spec(config) :: Supervisor.child_spec()
def child_spec(config) do
%{
id: {__MODULE__, config.handler},
start: {__MODULE__, :start_link, [config]},
type: :worker,
restart: :permanent,
shutdown: 500
}
end
@doc false
def callback_mode, do: :state_functions
@doc """
Starts a new state machine with the specified configuration.
"""
@spec start_link(config) :: :gen_statem.start_ret()
def start_link(config) do
:gen_statem.start_link({:local, config.name}, __MODULE__, config, [])
end
@doc """
Forces a refresh for the specified state machine.
Returns either `{:ok, new_entries_count}` or `{:error, reason}`.
"""
@spec refresh(pid() | atom()) :: {:ok, non_neg_integer} | {:error, term}
def refresh(worker) do
:gen_statem.call(worker, :refresh)
end
@doc false
def init(config) do
action = {:next_event, :internal, :authorize}
{:ok, :idle, config, action}
end
@doc false
def idle(:internal, :authorize, state) do
case state.handler.authorize(state.data) do
{:ok, new_data} ->
action = {:next_event, :internal, :refresh}
new_state = %{state | data: new_data}
{:next_state, :authorized, new_state, action}
{:error, reason} ->
track(state.handler, :auth_failure)
Logger.error(fn ->
"""
Error authorizing #{inspect(state.handler)}.
Reason: #{inspect(reason)}
"""
end)
{:stop, reason}
end
end
@doc false
def authorized(event_type, :refresh, state)
when event_type in [:internal, :timeout] do
case refresh_and_save(state.handler, state.data) do
{:ok, new_count, new_data} ->
track(state.handler, :success, new_count)
Logger.info(fn ->
"""
Refreshed #{inspect(state.handler)}, added #{new_count} new entries.
"""
end)
action = {:timeout, state.refresh_interval, :refresh}
new_state = %{state | data: new_data}
{:keep_state, new_state, action}
{:error, reason} ->
track(state.handler, :refresh_failure)
Logger.error(fn ->
"""
Error refreshing #{inspect(state.handler)}.
Reason: #{inspect(reason)}
"""
end)
action = {:timeout, state.retry_interval, :refresh}
{:keep_state_and_data, action}
end
end
@doc false
def authorized({:call, from}, :refresh, state) do
case refresh_and_save(state.handler, state.data) do
{:ok, new_count, new_data} ->
track(state.handler, :success, new_count)
actions = [
{:reply, from, {:ok, new_count}},
{:timeout, state.refresh_interval, :refresh}
]
new_state = %{state | data: new_data}
{:keep_state, new_state, actions}
{:error, reason} ->
track(state.handler, :refresh_failure)
actions = [
{:reply, from, {:error, reason}},
{:timeout, state.refresh_interval, :refresh}
]
{:keep_state_and_data, actions}
end
end
defp refresh_and_save(handler, data) do
case handler.refresh(data) do
{:ok, new_entries_data, new_data} ->
{new_count, _} = insert_all(new_entries_data, handler)
{:ok, new_count, new_data}
error ->
error
end
end
defp insert_all(new_entries_data, handler) do
now =
DateTime.utc_now()
|> DateTime.truncate(:second)
inserts =
Enum.map(new_entries_data, fn new_entry_data ->
[
type: handler.entry_type(),
content: new_entry_data,
saved_at: handler.get_saved_at(new_entry_data),
inserted_at: now,
updated_at: now
]
end)
Repo.insert_all(Entry, inserts, on_conflict: :nothing)
end
defp track(handler, status, entries_count \\ 0) do
current_time = DateTime.utc_now()
event_params = %{
type: handler.entry_type(),
last_update: current_time,
status: status,
entries_count: entries_count
}
PubSub.broadcast(Memento.PubSub, "capture", event_params)
:telemetry.execute(
[:memento, :capture],
%{entries_count: entries_count},
%{type: handler.entry_type(), recorded_at: current_time, status: status}
)
end
end
|
lib/memento/capture/feed.ex
| 0.85811
| 0.568026
|
feed.ex
|
starcoder
|
defmodule Tds.Tokens do
import Tds.BinaryUtils
import Bitwise
require Logger
alias Tds.Types
alias Tds.UCS2
def retval_typ_size(38) do
# 0x26 - SYBINTN - 1
8
end
def retval_typ_size(dec) do
# Undefined
raise Tds.Error,
"Unknown datatype parsed when decoding return value: #{dec}"
end
@type token ::
:colmetadata
| :done
| :doneinproc
| :doneproc
| :envchange
| :error
| :info
| :loginack
| :order
| :parameters
| :returnstatus
| :row
# | :eof # end of message marker
## Decode Token Stream
@spec decode_tokens(any, any) :: [{token, any}]
def decode_tokens(binary, colmetadata \\ nil)
def decode_tokens(tail, _) when tail == "" or tail == nil do
[]
end
def decode_tokens(<<token::unsigned-size(8), tail::binary>>, collmetadata) do
{token_data, tail, collmetadata} =
case token do
0x81 -> decode_colmetadata(tail, collmetadata)
# 0xA5 -> decode_colinfo(tail, collmetadata)
0xFD -> decode_done(tail, collmetadata)
0xFE -> decode_doneproc(tail, collmetadata)
0xFF -> decode_doneinproc(tail, collmetadata)
0xE3 -> decode_envchange(tail, collmetadata)
0xAA -> decode_error(tail, collmetadata)
# 0xAE -> decode_featureextack(tail, collmetadata)
# 0xEE -> decode_fedauthinfo(tail, collmetadata)
0xAB -> decode_info(tail, collmetadata)
0xAD -> decode_loginack(tail, collmetadata)
0xD2 -> decode_nbcrow(tail, collmetadata)
# 0x78 -> decode_offset(tail, collmetadata)
0xA9 -> decode_order(tail, collmetadata)
0x79 -> decode_returnstatus(tail, collmetadata)
0xAC -> decode_returnvalue(tail, collmetadata)
0xD1 -> decode_row(tail, collmetadata)
# 0xE4 -> decode_sessionstate(tail, collmetadata)
# 0xED -> decode_sspi(tail, collmetadata)
# 0xA4 -> decode_tablename(tail, collmetadata)
t -> raise_unsupported_token(t, collmetadata)
end
[token_data | decode_tokens(tail, collmetadata)]
end
defp raise_unsupported_token(token, _) do
raise RuntimeError,
"Unsupported Token code #{inspect(token, base: :hex)} in Token Stream"
end
defp decode_returnvalue(bin, collmetadata) do
<<
_ord::little-unsigned-16,
length::size(8),
name::binary-size(length)-unit(16),
_status::size(8),
_usertype::size(32),
_flags::size(16),
data::binary
>> = bin
name = UCS2.to_string(name)
{type_info, tail} = Tds.Types.decode_info(data)
{value, tail} = Tds.Types.decode_data(type_info, tail)
param = %Tds.Parameter{name: name, value: value, direction: :output}
{{:returnvalue, param}, tail, collmetadata}
end
defp decode_returnstatus(
<<value::little-size(32), tail::binary>>,
collmetadata
) do
{{:returnstatus, value}, tail, collmetadata}
end
# COLMETADATA
defp decode_colmetadata(
<<column_count::little-size(2)-unit(8), tail::binary>>,
_
) do
{colmetadata, tail} = decode_columns(tail, column_count)
{{:colmetadata, colmetadata}, tail, colmetadata}
end
# ORDER
defp decode_order(<<length::little-unsigned-16, tail::binary>>, collmetadata) do
length = trunc(length / 2)
{columns, tail} = decode_column_order(tail, length)
{{:order, columns}, tail, collmetadata}
end
# ERROR
defp decode_error(
<<l::little-size(16), data::binary-size(l), tail::binary>>,
collmetadata
) do
<<
number::little-size(32),
state,
class,
msg_len::little-size(16),
msg::binary-size(msg_len)-unit(16),
sn_len,
server_name::binary-size(sn_len)-unit(16),
pn_len,
proc_name::binary-size(pn_len)-unit(16),
line_number::little-size(32)
>> = data
e = %{
number: number,
state: state,
class: class,
msg_text: UCS2.to_string(:binary.copy(msg)),
server_name: UCS2.to_string(:binary.copy(server_name)),
proc_name: UCS2.to_string(:binary.copy(proc_name)),
line_number: line_number
}
# TODO Need to concat errors for delivery
# Logger.debug "SQL Error: #{inspect e}"
{{:error, e}, tail, collmetadata}
end
defp decode_info(
<<l::little-size(16), data::binary-size(l), tail::binary>>,
collmetadata
) do
<<
number::little-size(32),
state,
class,
msg_len::little-size(16),
msg::binary-size(msg_len)-unit(16),
sn_len,
server_name::binary-size(sn_len)-unit(16),
pn_len,
proc_name::binary-size(pn_len)-unit(16),
line_number::little-size(32)
>> = data
info = %{
number: number,
state: state,
class: class,
msg_text: UCS2.to_string(msg),
server_name: UCS2.to_string(server_name),
proc_name: UCS2.to_string(proc_name),
line_number: line_number
}
Logger.debug(fn ->
[
"(Tds.Info)",
"Line",
to_string(info.line_number),
"(Class #{info.class})",
info.msg_text
]
|> Enum.intersperse(" ")
|> IO.iodata_to_binary()
end)
# tokens = Keyword.update(tokens, :info, [i], &[i | &1])
{{:info, info}, tail, collmetadata}
end
## ROW
defp decode_row(<<tail::binary>>, collmetadata) do
{row, tail} = decode_row_columns(tail, collmetadata)
{{:row, row}, tail, collmetadata}
end
## NBC ROW
defp decode_nbcrow(<<tail::binary>>, collmetadata) do
column_count = Enum.count(collmetadata)
bitmap_bytes = round(Float.ceil(column_count / 8))
{bitmap, tail} = bitmap_list(tail, bitmap_bytes)
{row, tail} = decode_nbcrow_columns(tail, collmetadata, bitmap)
{{:row, row}, tail, collmetadata}
end
defp decode_envchange(
<<
_length::little-unsigned-16,
env_type::unsigned-8,
tail::binary
>>,
colmetadata
) do
{token, tail} =
case env_type do
0x01 ->
<<
new_value_size::unsigned-8,
new_value::binary(new_value_size, 16),
old_value_size::unsigned-8,
old_value::binary(old_value_size, 16),
rest::binary
>> = tail
new_database = UCS2.to_string(new_value)
old_database = UCS2.to_string(old_value)
{{:database, new_database, old_database}, rest}
0x02 ->
<<
new_value_size::unsigned-8,
new_value::binary(new_value_size, 16),
old_value_size::unsigned-8,
old_value::binary(old_value_size, 16),
rest::binary
>> = tail
new_language = UCS2.to_string(new_value)
old_language = UCS2.to_string(old_value)
{{:language, new_language, old_language}, rest}
0x03 ->
<<
new_value_size::unsigned-8,
new_value::binary(new_value_size, 16),
old_value_size::unsigned-8,
old_value::binary(old_value_size, 16),
rest::binary
>> = tail
new_charset = UCS2.to_string(new_value)
old_charset = UCS2.to_string(old_value)
{{:charset, new_charset, old_charset}, rest}
0x04 ->
<<
new_value_size::unsigned-8,
new_value::binary(new_value_size, 16),
old_value_size::unsigned-8,
old_value::binary(old_value_size, 16),
rest::binary
>> = tail
new_packetsize =
new_value
|> UCS2.to_string()
|> Integer.parse()
|> case do
:error -> 4096
{value, ""} -> value
{value, _maybe_unit} -> value
end
old_packetsize =
old_value
|> UCS2.to_string()
|> Integer.parse()
|> case do
:error -> 4096
{value, ""} -> value
{value, _maybe_unit} -> value
end
{{:packetsize, new_packetsize, old_packetsize}, rest}
# 0x05
# @tds_envtype_unicode_data_storing_local_id ->
# 0x06
# @tds_envtype_uncode_data_string_comparison_flag ->
0x07 ->
<<
new_value_size::unsigned-8,
collation::binary(new_value_size, 8),
old_value_size::unsigned-8,
_old_value::binary(old_value_size, 8),
rest::binary
>> = tail
{:ok, collation} = Tds.Protocol.Collation.decode(collation)
{{:collation, collation, nil}, rest}
0x08 ->
<<
value_size::unsigned-8,
new_value::binary-little-size(value_size)-unit(8),
0x00,
rest::binary
>> = tail
new_trans = :binary.copy(new_value)
{{:transaction_begin, new_trans, <<0x00>>}, rest}
0x09 ->
<<
0x00,
value_size::unsigned-8,
old_value::binary-little-size(value_size)-unit(8),
rest::binary
>> = tail
old_trans = :binary.copy(old_value)
{{:transaction_commit, <<0x00>>, old_trans}, rest}
0x0A ->
<<
0x00,
value_size::unsigned-8,
old_value::binary-little-size(value_size)-unit(8),
rest::binary
>> = tail
trans = :binary.copy(old_value)
{{:transaction_rollback, <<0x00>>, trans}, rest}
# 0x0B
# @tds_envtype_enlist_dtc_transaction ->
0x0C ->
<<
value_size::unsigned-8,
new_value::binary-little-size(value_size)-unit(8),
0x00,
rest::binary
>> = tail
tran = :binary.copy(new_value)
{{:transaction_defect, tran, <<0x00>>}, rest}
0x0D ->
<<
new_value_size::unsigned-8,
_new_value::binary(new_value_size, 16),
0x00,
rest::binary
>> = tail
{{:mirroring_partner, :ignore_me, :ignore_me}, rest}
0x11 ->
<<
0x00,
value_size::unsigned-8,
old_value::binary-little-size(value_size)-unit(8),
rest::binary
>> = tail
old = :binary.copy(old_value)
{{:transaction_ended, <<0x00>>, old}, rest}
0x12 ->
<<0x00, 0x00, rest::binary>> = tail
{{:resetconnection_ack, 0x00, 0x00}, rest}
0x13 ->
<<
size::little-uint16,
value::binary(size, 16),
0x00,
rest::binary
>> = tail
{{:userinfo, UCS2.to_string(value), nil}, rest}
0x14 ->
<<
_routing_data_len::little-uint16,
# Protocol MUST be 0, specifying TCP-IP protocol
0x00,
port::little-uint16,
alt_host_len::little-uint16,
alt_host::binary(alt_host_len, 16),
0x00,
0x00,
rest::binary
>> = tail
routing = %{
hostname: UCS2.to_string(alt_host),
port: port
}
{{:routing, routing, nil}, rest}
end
{{:envchange, token}, tail, colmetadata}
end
## DONE
defp decode_done(
<<status::little-unsigned-size(2)-unit(8), cur_cmd::little-unsigned-size(2)-unit(8),
row_count::little-size(8)-unit(8), tail::binary>>,
collmetadata
) do
status = %{
final?: band(status, 0x0001) == 0x0,
more?: band(status, 0x0001) == 0x1,
error?: band(status, 0x0002) == 0x2,
inxact?: band(status, 0x0004) == 0x4,
count?: band(status, 0x0010) == 0x10,
atnn?: band(status, 0x0020) == 0x20,
rpc_in_batch?: band(status, 0x0080) == 0x80,
srverror?: band(status, 0x0100) == 0x100
}
done = %{
status: status,
cmd: cur_cmd,
rows: row_count
}
{{:done, done}, tail, collmetadata}
end
## DONEPROC
defp decode_doneproc(<<tail::binary>>, collmetadata) do
{{_, done}, tail, _} = decode_done(tail, collmetadata)
{{:doneproc, done}, tail, collmetadata}
end
## DONEINPROC
defp decode_doneinproc(<<tail::binary>>, collmetadata) do
{{_, done}, tail, _} = decode_done(tail, collmetadata)
{{:doneinproc, done}, tail, collmetadata}
end
defp decode_loginack(
<<
_length::little-uint16,
interface::size(8),
tds_version::unsigned-32,
prog_name_len::size(8),
prog_name::binary(prog_name_len, 16),
major_ver::size(8),
minor_ver::size(8),
build_hi::size(8),
build_low::size(8),
tail::binary
>>,
collmetadata
) do
token = %{
t_sql_only: interface == 1,
tds_version: tds_version,
program: UCS2.to_string(prog_name),
version: "#{major_ver}.#{minor_ver}.#{build_hi}.#{build_low}"
}
{{:loginack, token}, tail, collmetadata}
end
defp decode_column_order(tail, n, acc \\ [])
defp decode_column_order(tail, n, acc) when n < 1 do
{Enum.reverse(acc), tail}
end
defp decode_column_order(<<col_id::little-unsigned-16, tail::binary>>, n, acc) do
decode_column_order(tail, n - 1, [col_id | acc])
end
## Row and Column Decoders
defp bitmap_list(tail, n) when n <= 0 do
{[], tail}
end
defp bitmap_list(
<<fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, fc00:e968:6179::de52:7100, fdf8:f53e:61e4::18, fc00:e968:6179::de52:7100, fc00:db20:35b:7399::5, fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b, fdf8:f53e:61e4::18, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, tail::binary>>,
n
) do
{bits, tail} = bitmap_list(tail, n - 1)
{[b1, b2, b3, b4, b5, b6, b7, b8 | bits], tail}
end
defp decode_columns(data, n, acc \\ [])
defp decode_columns(tail, n, acc) when n < 1 do
{Enum.reverse(acc), tail}
end
defp decode_columns(data, n, acc) do
{column, tail} = decode_column(data)
decode_columns(tail, n - 1, [column | acc])
end
defp decode_column(<<_usertype::int32, _flags::int16, tail::binary>>) do
{info, tail} = Types.decode_info(tail)
{name, tail} = decode_column_name(tail)
info =
info
|> Map.put(:name, name)
{info, tail}
end
defp decode_column_name(<<length::int8, name::binary-size(length)-unit(16), tail::binary>>) do
name = UCS2.to_string(name)
{name, tail}
end
defp decode_row_columns(binary, colmetadata, acc \\ [])
defp decode_row_columns(<<tail::binary>>, [], acc) do
{Enum.reverse(acc), tail}
end
defp decode_row_columns(<<data::binary>>, [column_meta | colmetadata], acc) do
{column, tail} = decode_row_column(data, column_meta)
decode_row_columns(tail, colmetadata, [column | acc])
end
defp decode_nbcrow_columns(binary, colmetadata, bitmap, acc \\ [])
defp decode_nbcrow_columns(<<tail::binary>>, [], _bitmap, acc) do
{Enum.reverse(acc), tail}
end
defp decode_nbcrow_columns(<<tail::binary>>, colmetadata, bitmap, acc) do
[column_meta | colmetadata] = colmetadata
[bit | bitmap] = bitmap
{column, tail} =
case bit do
0 -> decode_row_column(tail, column_meta)
_ -> {nil, tail}
end
decode_nbcrow_columns(tail, colmetadata, bitmap, [column | acc])
end
defp decode_row_column(<<tail::binary>>, column_meta) do
Types.decode_data(column_meta, tail)
end
end
|
lib/tds/tokens.ex
| 0.566139
| 0.40987
|
tokens.ex
|
starcoder
|
defmodule Courier.Scheduler do
use GenServer
@timeout 5_000
@pool_size 10
@interval 1_000
@moduledoc """
Scheduler adapter
The Scheduler will allow you to schedule when messages can be sent out.
It accomplishes this with four parts
1. The adapter - needs to be set in the opts
2. The poller - defaults to an `interval` of `1_000`ms
3. The store - defaults to `Courier.Stores.Simple`
4. The pool - max concurrent deliveries through the adapter at any given time
## The Adapter
All Mailers in Courier run through `Courier.Scheduler`. However, an adapter that scheduled
messages are delivering through must be configured. Do this in your environment's config:
# lib/my_app/mailer.ex
defmodule MyApp.Mailer do
use Courier, otp_ap: :my_app
end
# config/dev.exs
config :my_app, MyApp.Mailer,
adapter: Courier.Adapters.Logger
To send mail you just use `MyApp.Mailer.deliver/2`
Mail.build()
|> MyApp.ScheduledMail.deliver()
Courier will default to sending the email almost instantly. The assumed timestamp
when sending is `:calendar.universal_time/0`. You can tell Courier when the message should be sent
by passing the `at` option
tomorrow =
:calendar.universal_time()
|> :calendar.datetime_to_gregorian_seconds()
|> +(1 * 60 * 60 * 24) # 24 hours in seconds
|> :calendar.gregorian_seconds_to_datetime()
Mail.build()
|> MyApp.ScheduledMail.deliver(at: tomorrow)
The scheduler will delegate the message sending to the `mailer` declared in yor Mailer's opts.
## The Poller
The default polling interval is `1_000`. This is likely far too aggressive. To change the interval for how frequently
the poller awakes to check for new messages to send simply set `interval` in the opts:
# opts/opts.exs
opts :my_app, MyApp.Mailer,
adapter: Courier.Adapter.Logger,
interval: 1_000 * 60 * 60 # awakes once an hour
The value for the interval is in milliseconds in accordance with the value that
[`Process.send_after/3`](http://elixir-lang.org/docs/stable/elixir/Process.html#send_after/3) expects.
## Store
The store is where messages are kept until they are ready to be sent. The default
store is `Courier.Stores.Simple` and is just an Agent, storing messages in-memory.
This may not be ideal for your use-case. You can override the store in the opts:
# opts/opts.exs
opts :my_app, MyApp.Mailer,
adapter: MyApp.DefaultMailer,
store: MyApp.OtherStore
The custom store must respond to a certain API. Please see the documentation for `Courier.Store`
for details or look at the source code for `Courier.Stores.Agent`.
## Pool
The number of concurrent messages being delivered by the adapter is limited with the pooler. By default this
number is limited to 10. You can modify this in your environment's config:
config :my_app, MyApp.Mailer
adapter: Courier.Adapter.Logger,
pool_size: 20
If you are sending messages through an external service you should consult the documentation for that service
to determine what the max concurrent connections allowed is.
## Special Options
These are options that you may want to use in different environment
* `delivery_timeout` milliseconds to keep the GenServer alive. This should be set to a much higher value
in development and/or test environment.
"""
defmodule Worker do
use GenServer
def start_link(state) do
GenServer.start_link(__MODULE__, state, [])
end
def init(opts) do
{:ok, opts}
end
def handle_call(
{:deliver, message, message_opts},
_from,
[store: _store, adapter: adapter, opts: _opts] = state
) do
adapter.deliver(message, message_opts)
{:reply, :ok, state}
end
end
@doc false
def deliver(%Mail.Message{} = message, opts) do
store(opts).put({message, timestamp(opts), opts})
end
def children(opts) do
mailer = opts[:mailer]
adapter = opts[:adapter]
store = store(opts)
task_sup = Module.concat(mailer, TaskSupervisor)
opts = Keyword.put(opts, :task_sup, task_sup)
pool_name = Module.concat(mailer, Pool)
opts = Keyword.put(opts, :pool_name, pool_name)
[
Supervisor.Spec.supervisor(Task.Supervisor, [[name: opts[:task_sup]]]),
Supervisor.Spec.supervisor(adapter, [opts]),
:poolboy.child_spec(opts[:pool_name], pool_opts(pool_name, opts),
store: store,
adapter: adapter,
opts: opts
),
Supervisor.Spec.worker(store, []),
Supervisor.Spec.worker(__MODULE__, [opts])
]
end
defp pool_opts(name, opts) do
[
name: {:local, name},
worker_module: Worker,
size: opts[:pool_size] || @pool_size,
max_overflow: 0
]
end
@doc false
def start_link(opts) do
GenServer.start_link(__MODULE__, opts)
end
@doc false
def init(opts) do
state = %{opts: opts, messages: %{}}
Process.send_after(self(), :poll, interval(opts))
{:ok, state}
end
@doc false
def handle_info(:poll, %{opts: opts} = state) do
timeout = opts[:delivery_timeout] || @timeout
state =
store(opts).pop(past: true)
|> Enum.reduce(state, fn {message, _timestamp, message_opts}, state ->
%{ref: ref} =
Task.Supervisor.async_nolink(opts[:task_sup], fn ->
:poolboy.transaction(opts[:pool_name], fn worker_pid ->
GenServer.call(worker_pid, {:deliver, message, message_opts}, timeout)
end)
end)
add_message(state, message, ref)
end)
Process.send_after(self(), :poll, interval(state.opts))
{:noreply, state}
end
@doc false
def handle_info({ref, :ok}, state) do
{:noreply, delete_message(state, ref)}
end
@doc false
def handle_info({:DOWN, _ref, :process, _pid, _}, state) do
{:noreply, state}
end
defp add_message(%{messages: messages} = state, message, ref) do
%{state | messages: Map.put(messages, ref, message)}
end
defp delete_message(%{messages: messages} = state, ref) do
%{state | messages: Map.delete(messages, ref)}
end
defp timestamp(opts),
do: opts[:at] || :calendar.universal_time()
defp store(opts),
do: opts[:store] || Courier.Stores.Simple
defp interval(opts),
do: opts[:interval] || @interval
end
|
lib/courier/scheduler.ex
| 0.792504
| 0.432123
|
scheduler.ex
|
starcoder
|
defmodule Normalizer do
@moduledoc """
# Normalizer
Normalizes string-keyed maps to atom-keyed maps while converting values
according to a given schema. Particularly useful when working with param maps.
### Usage
schema = %{
user_id: {:number, required: true},
name: :string,
admin: {:boolean, default: false},
languages: [:string]
}
params = %{
"user_id" => "42",
"name" => "Neo",
"languages" => ["en"],
"age" => "55"
}
> Normalizer.normalize(params, schema)
%{
user_id: 42,
name: "Neo",
admin: false,
languages: ["en"]
}
### Properties
* **Converts** types whenever possible and reasonable;
* **Ensures** required values are given;
* **Filters** keys and values not in the schema;
* **Supports** basic types, lists, maps, and nested lists/maps.
See `Normalizer.normalize/2` for more details.
"""
alias Normalizer.MissingValue
@type value_type :: atom() | [atom()] | map()
@type value_options :: [
required: boolean(),
default: any(),
with_offset: boolean()
]
@type value_schema :: value_type() | {value_type(), value_options()}
@type schema :: %{
required(atom()) => value_schema()
}
defmacrop is_type(type) do
quote do
is_atom(unquote(type)) or is_list(unquote(type)) or is_map(unquote(type))
end
end
defmacrop is_schema(schema) do
quote do
is_type(unquote(schema)) or is_tuple(unquote(schema))
end
end
@doc """
Normalizes a string-map using the given schema.
The schema is expected to be a map where each key is an atom representing an
expected string key in `params`, pointing to the type to which the respective
value in the params should be normalized.
The return is a normalized map, in case of success, or a map with each
erroring key and a description, in case of failure.
## Types
Types can be one of:
* **Primitives**: `:string`, `:number`, `:boolean`.
* **Parseable values**: `:datetime`, `:date`.
* **Maps**: a nested schema for a nested map.
* **Lists**: one-element lists that contain any of the other types.
* **Tuples**: two-element tuples where the first element is one of the other types, and the second element a keyword list of options.
### Primitives
**Strings** are somewhat of a catch all that ensures anything but lists and
maps are converted to strings:
iex> Normalizer.normalize(%{"key" => 42}, %{key: :string})
{:ok, %{key: "42"}}
**Numbers** are kept as is, if possible, or parsed:
iex> Normalizer.normalize(%{"key" => 42}, %{key: :number})
{:ok, %{key: 42}}
iex> Normalizer.normalize(%{"key" => "42.5"}, %{key: :number})
{:ok, %{key: 42.5}}
**Booleans** accept native values, "true" and "false" strings, and "1" and "0"
strings:
iex> Normalizer.normalize(%{"key" => true}, %{key: :boolean})
{:ok, %{key: true}}
iex> Normalizer.normalize(%{"key" => "false"}, %{key: :boolean})
{:ok, %{key: false}}
### Parseable Values
Only `:datetime` and `:date` are supported right now.
iex> Normalizer.normalize(%{"key" => "2020-02-11T00:00:00+0100"}, %{key: :datetime})
{:ok, %{key: ~U[2020-02-10T23:00:00Z]}}
iex> Normalizer.normalize(%{"key" => "2020-02-11"}, %{key: :date})
{:ok, %{key: ~D[2020-02-11]}}
The offset can be extracted as well by passing the `with_offset` option:
iex> Normalizer.normalize(
...> %{"key" => "2020-02-11T00:00:00+0100"},
...> %{key: {:datetime, with_offset: true}}
...> )
{:ok, %{key: {~U[2020-02-10T23:00:00Z], 3600}}}
### Maps
Nested schemas are supported:
iex> Normalizer.normalize(
...> %{"key" => %{"age" => "42"}},
...> %{key: %{age: :number}}
...> )
{:ok, %{key: %{age: 42}}}
### Lists
Lists are represented in the schema by a single-element list:
iex> Normalizer.normalize(
...> %{"key" => ["42", 52]},
...> %{key: [:number]}
...> )
{:ok, %{key: [42, 52]}}
We can normalize lists of any one of the other types.
## Options
Per-value options can be specified by passing a two-element tuple in the type
specification. The three available options are `:required`, `:default`, and
`:with_offset`.
`:required` fails the validation process if the key is missing or nil:
iex> Normalizer.normalize(%{"key" => nil}, %{key: :number})
{:ok, %{key: nil}}
iex> Normalizer.normalize(%{"key" => nil}, %{key: {:number, required: true}})
{:error, %{key: "required number, got nil"}}
iex> Normalizer.normalize(%{}, %{key: {:number, required: true}})
{:error, %{key: "required number"}}
`:default` ensures a value in a given key, if nil or missing:
iex> Normalizer.normalize(%{}, %{key: {:number, default: 42}})
{:ok, %{key: 42}}
iex> Normalizer.normalize(%{"key" => 24}, %{key: {:number, default: 42}})
{:ok, %{key: 24}}
`:with_offset` is explained in the `:datetime` type above.
"""
@spec normalize(params :: %{String.t() => any()}, schema :: schema()) ::
{:ok, %{required(atom()) => any()}} | {:error, String.t()}
def normalize(params, schema) do
case apply_schema(params, schema) do
%{errors: errors, normalized: normalized} when map_size(errors) == 0 -> {:ok, normalized}
%{errors: errors} -> {:error, errors}
end
end
defp apply_schema(params, schema) do
for {key, value_schema} <- schema, reduce: %{normalized: %{}, errors: %{}} do
%{normalized: normalized, errors: errors} ->
value = Map.get(params, Atom.to_string(key), %MissingValue{})
case normalize_value(value, value_schema) do
{:ok, %MissingValue{}} ->
%{normalized: normalized, errors: errors}
{:ok, normalized_value} ->
%{normalized: Map.put(normalized, key, normalized_value), errors: errors}
{:error, value_error} ->
%{normalized: normalized, errors: Map.put(errors, key, value_error)}
end
end
end
defp normalize_value(value, {type, options}) when is_type(type) and is_list(options) do
with {:ok, value} <- convert_type(value, type),
{:ok, value} <- apply_options(value, type, options),
do: {:ok, value}
end
defp normalize_value(value, type) when is_type(type),
do: normalize_value(value, {type, []})
# Return nil for all types if value is nil:
defp convert_type(nil, _type),
do: {:ok, nil}
# Pass-through for a missing value:
defp convert_type(%MissingValue{}, _type),
do: {:ok, %MissingValue{}}
# Convert strings:
defp convert_type(value, :string) when is_binary(value),
do: {:ok, value}
defp convert_type(value, :string) when not is_map(value) and not is_list(value),
do: {:ok, to_string(value)}
defp convert_type(_value, :string),
do: {:error, "expected string"}
# Convert numbers:
defp convert_type(value, :number) when is_number(value),
do: {:ok, value}
defp convert_type(value, :number) when is_binary(value) do
ret =
if String.contains?(value, "."),
do: Float.parse(value),
else: Integer.parse(value)
case ret do
{value, ""} -> {:ok, value}
_ -> {:error, "expected number"}
end
end
# Convert booleans:
defp convert_type(value, :boolean) when is_boolean(value),
do: {:ok, value}
defp convert_type(value, :boolean) when is_binary(value) do
case value do
truthy when truthy in ["1", "true"] -> {:ok, true}
falsy when falsy in ["0", "false"] -> {:ok, false}
_ -> {:error, "expected boolean"}
end
end
# Convert datetimes:
defp convert_type(value, :datetime) when is_binary(value) do
case DateTime.from_iso8601(value) do
{:ok, datetime, offset} -> {:ok, {datetime, offset}}
{:error, error} -> {:error, "expected datetime (#{error})"}
end
end
# Convert dates:
defp convert_type(value, :date) when is_binary(value) do
case Date.from_iso8601(value) do
{:ok, date} -> {:ok, date}
{:error, error} -> {:error, "expected date (#{error})"}
end
end
# Convert lists:
defp convert_type(values, [schema]) when is_list(values) and is_schema(schema) do
values
|> Enum.reduce_while([], fn value, out ->
case normalize_value(value, schema) do
{:ok, normalized} -> {:cont, [normalized | out]}
{:error, error} -> {:halt, error <> " list"}
end
end)
|> case do
out when is_list(out) -> {:ok, Enum.reverse(out)}
error when is_binary(error) -> {:error, error}
end
end
# Convert maps (just recurse):
defp convert_type(map, map_schema) when is_map(map) and is_map(map_schema),
do: normalize(map, map_schema)
defp convert_type(_value, type),
do: {:error, "expected #{type_string(type)}"}
defp apply_options(value, type, options) when is_list(options) and is_type(type) do
options
|> full_type_options(type)
|> Enum.reduce_while({:ok, value}, fn {opt_key, opt_value}, {:ok, value} ->
case apply_option(value, type, opt_key, opt_value) do
{:ok, value} -> {:cont, {:ok, value}}
{:error, error} -> {:halt, {:error, error}}
end
end)
end
defp apply_option(nil, type, :required, true),
do: {:error, "required #{type_string(type)}, got nil"}
defp apply_option(%MissingValue{}, type, :required, true),
do: {:error, "required #{type_string(type)}"}
defp apply_option(nil, _type, :default, value),
do: {:ok, value}
defp apply_option(%MissingValue{}, _type, :default, value),
do: {:ok, value}
defp apply_option(datetime_with_offset, :datetime, :with_offset, true),
do: {:ok, datetime_with_offset}
defp apply_option({datetime, _offset}, :datetime, :with_offset, false),
do: {:ok, datetime}
defp apply_option(value, _type, _option, _option_value),
do: {:ok, value}
defp full_type_options(options, :datetime), do: Keyword.put_new(options, :with_offset, false)
defp full_type_options(options, _type), do: options
defp type_string(type) when is_atom(type),
do: "#{type}"
defp type_string([type]) when is_atom(type),
do: "#{type} list"
defp type_string([{type, _options}]) when is_atom(type),
do: "#{type} list"
defp type_string(type) when is_map(type),
do: "map"
end
|
lib/normalizer.ex
| 0.909567
| 0.622517
|
normalizer.ex
|
starcoder
|
defmodule AdventOfCode.Day12 do
@moduledoc "Day 12"
defmodule PartOneShip, do: defstruct x: 0, y: 0, face: :E
defmodule PartTwoShip, do: defstruct x: 0, y: 0, waypoint_x: 10, waypoint_y: -1
defprotocol Ship do
def turn(ship)
def forward(ship, value)
end
defimpl Ship, for: PartOneShip do
@turns [:N, :E, :S, :W]
def turn(ship),
do: %{face: Enum.at(@turns, Enum.find_index(@turns, &(&1 == ship.face)) + 1, :N)}
def forward(ship, value) do
case ship.face do
:N -> %{y: ship.y - value}
:S -> %{y: ship.y + value}
:E -> %{x: ship.x + value}
:W -> %{x: ship.x - value}
end
end
end
defimpl Ship, for: PartTwoShip do
def turn(ship) do
case {ship.waypoint_x, ship.waypoint_y} do
{x, y} when x >= 0 and y < 0 -> %{waypoint_x: abs(y), waypoint_y: x}
{x, y} when x > 0 and y >= 0 -> %{waypoint_x: -y, waypoint_y: x}
{x, y} when x <= 0 and y > 0 -> %{waypoint_x: -y, waypoint_y: x}
{x, y} when x < 0 and y <= 0 -> %{waypoint_x: abs(y), waypoint_y: x}
{x, y} when x == 0 and y == 0 -> %{}
end
end
def forward(ship, value),
do: %{x: ship.x + ship.waypoint_x * value, y: ship.y + ship.waypoint_y * value}
end
defp parse(line) do
[_, heading, value] = Regex.run(~r/^([NSEWLRF])(\d+)$/, line)
turn = {:T, nil}
case {String.to_atom(heading), String.to_integer(value)} do
n when n == {:R, 90} or n == {:L, 270} -> [turn]
n when n == {:R, 180} or n == {:L, 180} -> [turn, turn]
n when n == {:R, 270} or n == {:L, 90} -> [turn, turn, turn]
result -> [result]
end
end
defp process(list, ship, processor),
do: Enum.flat_map(list, &parse/1)
|> Enum.reduce(ship, &(Map.merge(&2, processor.(&1, &2))))
|> (&(abs(&1.x) + abs(&1.y))).()
def part1(list), do: process(
list,
%PartOneShip{},
fn
{:T, _}, ship -> Ship.turn(ship)
{:F, value}, ship -> Ship.forward(ship, value)
{direction, value}, ship -> Ship.forward(%{ship | face: direction}, value)
end
)
def part2(list), do: process(
list,
%PartTwoShip{},
fn
{:N, value}, ship -> %{waypoint_y: ship.waypoint_y - value}
{:S, value}, ship -> %{waypoint_y: ship.waypoint_y + value}
{:E, value}, ship -> %{waypoint_x: ship.waypoint_x + value}
{:W, value}, ship -> %{waypoint_x: ship.waypoint_x - value}
{:T, _}, ship -> Ship.turn(ship)
{:F, value}, ship -> Ship.forward(ship, value)
end
)
end
|
lib/advent_of_code/day12.ex
| 0.7181
| 0.724481
|
day12.ex
|
starcoder
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.