code
stringlengths 114
1.05M
| path
stringlengths 3
312
| quality_prob
float64 0.5
0.99
| learning_prob
float64 0.2
1
| filename
stringlengths 3
168
| kind
stringclasses 1
value |
|---|---|---|---|---|---|
defmodule Nostrum.Struct.User do
@moduledoc ~S"""
Struct representing a Discord user.
## Mentioning Users in Messages
A `Nostrum.Struct.User` can be mentioned in message content using the `String.Chars`
protocol or `mention/1`.
```Elixir
user = %Nostrum.Struct.User{id: 120571255635181568}
Nostrum.Api.create_message!(184046599834435585, "#{user}")
%Nostrum.Struct.Message{content: "<@120571255635181568>"}
user = %Nostrum.Struct.User{id: 89918932789497856}
Nostrum.Api.create_message!(280085880452939778, "#{Nostrum.Struct.User.mention(user)}")
%Nostrum.Struct.Message{content: "<@89918932789497856>"}
```
## User vs. Member
A `user` contains only general information about that user such as a `username` and an `avatar`.
A `member` has everything that a `user` has, but also additional information on a per guild basis. This includes things like a `nickname` and a list of `roles`.
"""
alias Nostrum.Struct.Snowflake
alias Nostrum.{Util, Constants}
defstruct [
:id,
:username,
:discriminator,
:avatar,
:bot,
:mfa_enabled,
:verified,
:email
]
defimpl String.Chars do
def to_string(user), do: @for.mention(user)
end
@typedoc "The user's id"
@type id :: Snowflake.t()
@typedoc "The user's username"
@type username :: String.t()
@typedoc "The user's 4--digit discord-tag"
@type discriminator :: String.t()
@typedoc "User's avatar hash"
@type avatar :: String.t() | nil
@typedoc "Whether the user is a bot"
@type bot :: boolean | nil
@typedoc "Whether the user has two factor enabled"
@type mfa_enabled :: boolean | nil
@typedoc "Whether the email on the account has been verified"
@type verified :: boolean | nil
@typedoc "The user's email"
@type email :: String.t() | nil
@type t :: %__MODULE__{
id: id,
username: username,
discriminator: discriminator,
avatar: avatar,
bot: bot,
mfa_enabled: mfa_enabled,
verified: verified,
email: email
}
@doc ~S"""
Formats an `Nostrum.Struct.User` into a mention.
## Examples
```Elixir
iex> user = %Nostrum.Struct.User{id: 177888205536886784}
...> Nostrum.Struct.User.mention(user)
"<@1<PASSWORD>>"
```
"""
@spec mention(t) :: String.t()
def mention(%__MODULE__{id: id}), do: "<@#{id}>"
@doc """
Returns the URL of a user's display avatar.
If `:avatar` is `nil`, the default avatar url is returned.
Supported image formats are PNG, JPEG, GIF, WebP, and GIF.
## Examples
```Elixir
iex> user = %Nostrum.Struct.User{avatar: "8342729096ea3675442027381ff50dfe",
...> id: 80351110224678912}
iex> Nostrum.Struct.User.avatar_url(user)
"https://cdn.discordapp.com/avatars/80351110224678912/8342729096ea3675442027381ff50dfe.webp"
iex> Nostrum.Struct.User.avatar_url(user, "png")
"https://cdn.discordapp.com/avatars/80351110224678912/8342729096ea3675442027381ff50dfe.png"
iex> user = %Nostrum.Struct.User{avatar: nil,
...> discriminator: "1337"}
iex> Nostrum.Struct.User.avatar_url(user)
"https://cdn.discordapp.com/embed/avatars/2.png"
```
"""
@spec avatar_url(t, String.t()) :: String.t()
def avatar_url(user, image_format \\ "webp")
def avatar_url(%__MODULE__{avatar: nil, discriminator: disc}, _) do
image_name =
disc
|> String.to_integer()
|> rem(5)
URI.encode(Constants.cdn_url() <> Constants.cdn_embed_avatar(image_name))
end
def avatar_url(%__MODULE__{id: id, avatar: avatar}, image_format),
do: URI.encode(Constants.cdn_url() <> Constants.cdn_avatar(id, avatar, image_format))
@doc """
Returns a user's `:username` and `:discriminator` separated by a hashtag.
## Examples
```Elixir
iex> user = %Nostrum.Struct.User{username: "b1nzy",
...> discriminator: "0852"}
iex> Nostrum.Struct.User.full_name(user)
"b1nzy#0852"
```
"""
@spec full_name(t) :: String.t()
def full_name(%__MODULE__{username: username, discriminator: disc}),
do: "#{username}##{disc}"
@doc false
def p_encode do
%__MODULE__{}
end
@doc false
def to_struct(map) do
new =
map
|> Map.new(fn {k, v} -> {Util.maybe_to_atom(k), v} end)
|> Map.update(:id, nil, &Util.cast(&1, Snowflake))
struct(__MODULE__, new)
end
end
|
lib/nostrum/struct/user.ex
| 0.899334
| 0.692525
|
user.ex
|
starcoder
|
defmodule LastCrusader.Webmentions.Sender do
@moduledoc """
Schedules webmentions to be send
It first checks of the origin exists before sending webmentions. It will retry this check every minute until it reaches the configured number of tries.
"""
alias LastCrusader.Micropub
alias Webmentions
require Logger
require Tesla
alias Jason, as: Json
@type url() :: String.t()
@one_minute 60_000
@doc """
Schedules webmentions to be send with 1 minute wait between every try (default is 15 times)
"""
@spec schedule_webmentions(list(url()), url(), pos_integer()) :: {:ok, non_neg_integer()}
def schedule_webmentions(links, origin, nb_max_tries \\ 15)
def schedule_webmentions([], origin, _nb_max_tries) do
Logger.info("No webmentions to send from #{inspect(origin)}")
{:ok, 0}
end
def schedule_webmentions(links, origin, nb_max_tries) do
do_schedule_webmentions(links, origin, nb_max_tries, 0)
{:ok, nb_max_tries}
end
defp do_schedule_webmentions(links, origin, all_tried, all_tried) do
Logger.warning(
"Sending webmentions from #{inspect(origin)} to #{inspect(links)}: aborted (too many tries)"
)
{:ok, self(), []}
end
defp do_schedule_webmentions(links, origin, nb_max_tries, nb_tried) do
Logger.info(
"Sending webmentions from #{inspect(origin)} to #{inspect(links)}: scheduled. try #{inspect(nb_tried)}/#{inspect(nb_max_tries)}"
)
Task.async(fn -> start_task(origin, links, nb_max_tries, nb_tried) end)
end
defp start_task(origin, links, nb_max_tries, nb_tried) do
:timer.sleep(@one_minute)
case Tesla.head(origin) do
{:ok, %Tesla.Env{status: 200}} ->
send_webmentions(origin, links, nb_max_tries, nb_tried)
_ ->
do_schedule_webmentions(links, origin, nb_max_tries, nb_tried + 1)
end
end
@doc """
Sends Webmentions to every link
"""
def send_webmentions(origin, links, nb_max_tries \\ 1, nb_tried \\ 0) do
Logger.info(
"Sending webmentions from #{inspect(origin)} to #{inspect(links)}: try #{inspect(nb_tried)}/#{inspect(nb_max_tries)}"
)
{:ok, webmention_response} = Webmentions.send_webmentions_for_links(origin, links)
Logger.info("Result: webmentions: #{inspect(webmention_response)}")
Task.async(fn -> update_content_with_syndication(origin, webmention_response) end)
{:ok, self(), webmention_response}
end
defp update_content_with_syndication(origin, webmention_responses) do
find_syndication_links(webmention_responses)
|> update_content(origin)
end
@doc """
Finds syndications links (from bridy to twitter) in a list of Webmentions responses
"""
def find_syndication_links(webmention_response, syndication_links \\ [])
def find_syndication_links([], syndication_links) do
syndication_links
end
def find_syndication_links([head | tail], syndication_links) do
case head do
%Webmentions.Response{
status: :ok,
target: "https://brid.gy/publish/twitter",
endpoint: "https://brid.gy/publish/webmention",
message: "sent",
body: body
} ->
find_syndication_links(tail, syndication_links ++ find_syndication_link(body))
_ ->
find_syndication_links(tail, syndication_links)
end
end
defp find_syndication_link(body) do
case Json.decode(body) do
{:ok, %{"url" => url}} -> [url]
_ -> []
end
end
@spec update_content(list(url()), url()) :: nil
defp update_content(syndication_links, origin)
defp update_content([], origin) do
Logger.info("No more syndication links found for from #{inspect(origin)}")
end
defp update_content([link | _], origin) do
{:ok, origin} = Micropub.add_keyword_to_post(origin, {"copy", link})
Logger.info("Syndication link found for from #{inspect(origin)}. Content is up to date")
end
end
|
lib/last_crusader/webmentions/webmentions_sender.ex
| 0.699768
| 0.453685
|
webmentions_sender.ex
|
starcoder
|
defmodule AntlPhonenumber.Range do
@moduledoc """
Defines a range of phone numbers.
"""
defstruct first: nil, last: nil, iso_country_code: nil
@type t :: %__MODULE__{
first: String.t(),
last: String.t(),
iso_country_code: String.t()
}
@doc """
Creates a new range.
## Examples
iex> AntlPhonenumber.Range.new "0548451840", "0548451845", "IL"
%AntlPhonenumber.Range{
first: "972548451840",
last: "972548451845",
iso_country_code: "IL"
}
"""
@spec new(String.t(), String.t(), String.t()) :: t
def new(first, last, iso_country_code) when is_binary(first) and is_binary(last) do
first = AntlPhonenumber.to_plus_e164!(first, iso_country_code)
last = AntlPhonenumber.to_plus_e164!(last, iso_country_code)
if AntlPhonenumber.get_iso_country_code!(first) !=
AntlPhonenumber.get_iso_country_code!(last) do
raise ArgumentError,
"ranges (first..last) expect both sides to have the same contry_code, " <>
"got: #{inspect(first)}..#{inspect(last)}"
end
%__MODULE__{first: first, last: last, iso_country_code: iso_country_code}
end
defimpl Enumerable, for: AntlPhonenumber.Range do
import AntlPhonenumber, only: [to_integer: 1, next: 1, previous: 1, move: 2]
def reduce(%AntlPhonenumber.Range{first: first, last: last} = range, acc, fun),
do: reduce(first, last, acc, fun, _asc? = asc?(range))
defp reduce(_first, _last, {:halt, acc}, _fun, _asc?), do: {:halted, acc}
defp reduce(first, last, {:suspend, acc}, fun, asc?),
do: {:suspended, acc, &reduce(first, last, &1, fun, asc?)}
defp reduce(first, last, {:cont, acc}, fun, _asc? = true) when first <= last,
do: reduce(next(first), last, fun.(first, acc), fun, _asc? = true)
defp reduce(first, last, {:cont, acc}, fun, _asc? = false) when first >= last,
do: reduce(previous(first), last, fun.(first, acc), fun, _asc? = false)
defp reduce(_, _, {:cont, acc}, _fun, _asc?), do: {:done, acc}
def member?(%AntlPhonenumber.Range{} = range, number) do
case AntlPhonenumber.to_plus_e164(number, range.iso_country_code) do
{:ok, plus_e164} -> {:ok, member_as_plus_e164?(range, plus_e164)}
_ -> {:ok, false}
end
end
defp member_as_plus_e164?(%AntlPhonenumber.Range{} = range, plus_e164) do
if asc?(range),
do: range.first <= plus_e164 and plus_e164 <= range.last,
else: range.last <= plus_e164 and plus_e164 <= range.first
end
def count(%AntlPhonenumber.Range{} = range),
do: {:ok, abs(to_integer(range.last) - to_integer(range.first)) + 1}
def slice(%AntlPhonenumber.Range{} = range) do
if asc?(range),
do: {:ok, Enum.count(range), &slice_asc(move(range.first, &1), &2)},
else: {:ok, Enum.count(range), &slice_desc(move(range.first, -&1), &2)}
end
defp slice_asc(current, 1), do: [current]
defp slice_asc(current, remaining) do
[current | slice_asc(next(current), remaining - 1)]
end
defp slice_desc(current, 1), do: [current]
defp slice_desc(current, remaining) do
[current | slice_desc(previous(current), remaining - 1)]
end
defp asc?(%AntlPhonenumber.Range{} = range), do: range.first <= range.last
end
defimpl Inspect do
def inspect(%AntlPhonenumber.Range{first: first, last: last}, _), do: "#{first}..#{last}"
end
end
|
lib/antl_phonenumber/range.ex
| 0.823151
| 0.464902
|
range.ex
|
starcoder
|
defmodule Neotomex.Grammar do
@moduledoc """
# Neotomex.Grammar
A Neotomex PEG grammar specifies a directed graph of definitons.
It consists of:
- A set of definitions, where each definition consists of an
`identifier` and an `expression`.
e.g. `Definition <- Identifier '<-' Expression`
- The root definition's identifier. This root definition will be
used as the entry point for matching.
- A transform function can be associated with a Neotomex expression.
They are applied after the parse
The definition data types (see the definition/0 typespec below) are
verbose and machine readable as opposed to human readable. Typical
usage will be to compile a grammar from a more machine readable
format, bootstrapping with Neotomex grammars as necessary.
Parsing consists of first using the grammar to `match` a valid
parse tree, and then applying a `transform` node by node to
this parse tree.
## Match
A grammar consists of definitions which are labeled by an identifier
and consist of an expression. An expression can be a
- `terminal`: references a char, string, or regex. It successfully
matches when it's reference appears at the beginning of the input.
- `nonterminal`: the identifier of a definition. It matches if the
identified definition matches.
- `sequence`: an ordered list of expressions. It succesfully matches
by folding the expressions against the input.
- `priority`: an ordered list of expressions. It matches using the
first successful match of the list.
- `zero_or_more`: matches when its subexpression greedily matches zero or
more times.
- `one_or_more`: matches when its subexpression greedily matches one or
more times.
- `zero_or_one`: matches when its subexpression greedily matches zero or
one times.
- `and`: matches when its subexpression matches. Consumes no input.
- `not`: matches when its subexpression does not match. Consumes no input.
- `prune`: matches when it's subexpression matches, however the match
is pruned from the result.
A grammar's definitions come together to form a conditional directed graph.
Matching starts at the root node and performs a depth first search for the
first successful route through the tree. This represents the valid parse
tree for the grammar and input.
## Transform
A transform is a function that modifies the match for an expression. This
can be used to evaluate the parse tree. When applying transforms to
the matched tree, they are applied depth first.
The default transform is the identity function.
Transforms must return `transformed` when successful, where
`transformed` is the transformed match.
"""
# Neotomex parses PEG expressions into this internal representation
@type terminal :: binary | char | Regex.t | nil
@type nonterminal :: atom
@type expression :: :empty
| {:terminal, terminal}
| {:nonterminal, nonterminal}
| {:sequence [expression]}
| {:priority, [expression]}
| {:zero_or_more, expression}
| {:one_or_more, expression}
| {:zero_or_one, expression}
| {:and, expression}
| {:not, expression}
| {:prune, expression}
| {:insensitive, expression}
@type definition :: {nonterminal, expression}
@type transform :: {:transform, ((term) -> {:ok, term})}
| {:transform, {atom(), atom()}}
| nil
@type expr_trans :: {expression, transform}
# A match is the result of the Neotomex PEG grammar matching an expression
@typep match :: {expr_trans, [match] | match | String.t}
# A grammar contains a root label, and a map of rules keyed by nonterminal label
@type grammar :: %{root: nonterminal | false,
definitions: %{nonterminal => expression},
cache: pid() | false}
defstruct root: nil, definitions: nil, breadcrumbs: [], insensitive: false
defmodule ParseError do
@moduledoc """
Exception raised on parse errors.
"""
defexception [error: nil, description: "parse error"]
def message(exception) do
exception.description
end
end
defmodule ValidationError do
@moduledoc """
Exception raised on validation errors.
"""
defexception [error: nil, description: "validation error"]
def message(exception) do
exception.description <> ": " <> error_to_string(exception.error)
end
@doc false
defp error_to_string(error) do
# TODO handle cases
:io_lib.format("~p", [error]) |> Enum.join
end
end
@doc """
Returns an empty grammar.
"""
@spec new :: grammar
def new(root \\ nil, definitions \\ %{}) do
%Neotomex.Grammar{root: root, definitions: definitions}
end
@doc """
Parse the `input` using the `grammar` by matching a parse tree and
then applying all transforms.
## Examples
iex> grammar = new(:root, %{root: {{:terminal, ~r/^[0-9]+/},
...> {:transform, &String.to_integer/1}}})
iex> parse(grammar, "1")
{:ok, 1, ""}
iex> parse(grammar, "100")
{:ok, 100, ""}
"""
@spec parse(%Neotomex.Grammar{}, binary) :: {:ok, any, binary}
| :mismatch | {:error, term}
def parse(grammar, input) when is_binary(input) do
case match(grammar, input) do
{:ok, match, rest} ->
{:ok, transform_match(match), rest}
otherwise ->
otherwise
end
end
@doc """
Match the `input` using the grammar.
NB: Not tail call optimized. Possible?
"""
@spec match(%Neotomex.Grammar{}, binary) :: {:ok, {expression, transform}, binary}
| :mismatch | {:error, term}
def match(%Neotomex.Grammar{:root => root,
:definitions => definitions} = grammar,
input)
when is_binary(input) do
match(definitions[root], grammar, input)
end
@doc """
Transform the parse tree returned by match by applying the the
expressions' transform functions via depth first recursion.
NB: Not tail call optimized. Possible? Pack rat?
## Examples
iex> transform_match({{nil, {:transform, fn x -> String.to_integer(x) end}},
...> {{nil, nil}, "1"}})
1
iex> transform_match({{nil, {:transform, fn [x, y] -> x + y end}},
...> [{{nil, nil}, 1}, {{nil, nil}, 1}]})
2
"""
@spec transform_match(match) :: any
def transform_match(nil), do: nil
def transform_match({_, {:prune, _}}), do: nil
def transform_match({{_, nil}, terminal})
when is_binary(terminal) or is_integer(terminal) do
terminal
end
def transform_match({{_, nil}, matches}) when is_list(matches) do
transform_prune(matches)
end
def transform_match({{_, nil}, match}) do
transform_match(match)
end
def transform_match({{_, transform}, matches}) when is_list(matches) do
apply_transform(transform, transform_prune(matches))
end
def transform_match({{_, transform}, match}) when is_binary(match) do
apply_transform(transform, match)
end
def transform_match({{_, transform}, match}) do
apply_transform(transform, transform_match(match))
end
@doc """
Validate the grammar. This is especially useful for
debugging a grammar since it is exhaustive and provides richer error
reporting than a failed match.
Notes:
- Dialyzer is your friend -- `validate/1` augments it
- Grammar's are not validated by default due to the performance overhead.
- The validation will return the result of the first failure. There may
be more issues with the grammar.
Validation checks that:
- The grammar has a `:root` fields.
- The grammar has a `:definitions` fields.
- The root references a definition
- All nonterminals reference a definition.
- There are no unreferenced definitions [TODO]
## Examples
More complex examples can be found in `test/neotomex/grammar_test.exs` [todo]
iex> validate(%Neotomex.Grammar{})
{:error, {:missing, :root}}
iex> validate(%Neotomex.Grammar{:root => :root})
{:error, {:missing, :definitions}}
iex> validate(%Neotomex.Grammar{:root => :root, :definitions => %{}})
{:error, {:missing, :root_definition}}
iex> validate(%Neotomex.Grammar{root: :root,
...> definitions: %{root: {:bad_type, :nonsense}}})
{:error, {:bad_definition, {:root, {:bad_expr_type, :bad_type}}}}
iex> validate(%Neotomex.Grammar{:root => :root,
...> :definitions => %{:root => :the_bad_expr}})
{:error, {:bad_definition, {:root, {:bad_expr, :the_bad_expr}}}}
iex> validate(%Neotomex.Grammar{:root => :root,
...> :definitions => %{:root => {:terminal, ?a}}})
:ok
"""
@spec validate(%Neotomex.Grammar{}) :: :ok |
{:error, {:missing, :root
| :definitions
| :root_definitions
| {:definition, atom}}}
def validate(%Neotomex.Grammar{:root => root,
:definitions => definitions} = grammar)
when root != nil and definitions != nil do
case Map.has_key?(definitions, root) do
true ->
validate(grammar, Map.to_list(definitions))
false ->
{:error, {:missing, :root_definition}}
end
end
def validate(%Neotomex.Grammar{root: nil}) do
{:error, {:missing, :root}}
end
def validate(%Neotomex.Grammar{definitions: nil}) do
{:error, {:missing, :definitions}}
end
## Private Functions
@doc false
defp match({identifier, _} = expr, grammar, input) when is_atom(identifier) do
# If no transform is provided, default it to `nil`
match({expr, nil}, grammar, input)
end
defp match(nil, _, _), do: {:error, :no_node_ref}
defp match(:empty, _, input), do: {:ok, nil, input}
# Terminal nodes can be a char, string, or regex
defp match({{:terminal, char}, _} = expr_trans, _, <<char, rest :: binary>>)
when is_integer(char) do
{:ok, {expr_trans, char}, rest}
end
defp match({{:terminal, char}, _}, _, _) when is_integer(char) do
:mismatch
end
defp match({{:terminal, terminal}, _} = expr_trans, g, input)
when is_binary(terminal) do
case String.split_at(input, String.length(terminal)) do
{^terminal, rest} ->
{:ok, {expr_trans, terminal}, rest}
{inexact, rest} ->
if g.insensitive and (String.downcase(inexact) == String.downcase(terminal)) do
{:ok, {expr_trans, inexact}, rest}
else
:mismatch
end
{_, _} ->
:mismatch
end
end
defp match({{:terminal, terminal}, _} = expr_trans, _, input) do
case Regex.run(terminal, input) do
[""] ->
# Make sure it's not just an empty match
case Regex.match?(terminal, input) do
true ->
{:ok, {expr_trans, ""}, input}
false ->
:mismatch
end
nil ->
:mismatch
["\r"] ->
# String.split_at can't split "\r\n" into {"\r","\n"}
# Using Regex.split to replace it.
["", rest] = Regex.split(~r/^\r/, input)
{:ok, {expr_trans, "\r"}, rest}
[match] ->
# Two parts are necessary since the first is being trimmed away
{^match, rest} = String.split_at(input, String.length(match))
{:ok, {expr_trans, match}, rest}
end
end
defp match({{:nonterminal, nonterminal}, _} = expr_trans,
%Neotomex.Grammar{:definitions => definitions} = grammar, input) do
case match(definitions[nonterminal], grammar, input) do
{:ok, match, rest} ->
{:ok, {expr_trans, match}, rest}
otherwise ->
otherwise
end
end
defp match({{:insensitive, inner}, _} = expr_trans,
%Neotomex.Grammar{:definitions => definitions} = grammar, input) do
match(inner, %{grammar | insensitive: true}, input)
end
defp match({{:sequence, _}, _} = expr_trans, grammar, input) do
match_sequence(expr_trans, grammar, input)
end
defp match({{:priority, _}, _} = expr_trans, grammar, input) do
match_priorities(expr_trans, grammar, input)
end
defp match({{:zero_or_more, _}, _} = expr_trans, grammar, input) do
match_zero_or_more(expr_trans, grammar, input)
end
defp match({{:one_or_more, expression}, _} = expr_trans, grammar, input) do
case match(expression, grammar, input) do
{:ok, match, input} ->
match_zero_or_more(expr_trans, grammar, input, [match])
otherwise ->
otherwise
end
end
defp match({{:zero_or_one, expression}, _} = expr_trans, grammar, input) do
case match(expression, grammar, input) do
:mismatch ->
{:ok, {expr_trans, nil}, input}
otherwise ->
otherwise
end
end
defp match({{:and, expression}, _} = expr_trans, grammar, input) do
case match(expression, grammar, input) do
{:ok, _, _} ->
{:ok, {expr_trans, nil}, input}
otherwise ->
otherwise
end
end
defp match({{:not, expression}, _} = expr_trans, grammar, input) do
case match(expression, grammar, input) do
{:ok, _, _} ->
:mismatch
:mismatch ->
{:ok, {expr_trans, nil}, input}
{:error, reason} ->
{:error, reason}
end
end
defp match({{:prune, expression}, _} = expr_trans, grammar, input) do
case match(expression, grammar, input) do
{:ok, match, input} ->
{:ok, {expr_trans, {:prune, match}}, input}
otherwise ->
otherwise
end
end
# Helper for parsing a sequence of expressions
defp match_sequence({{:sequence, expressions}, _} = expr_trans, grammar, input) do
match_sequence(expr_trans, grammar, input, expressions, [])
end
defp match_sequence({{:sequence, _}, _} = expr_trans, _, input, [], acc) do
{:ok, {expr_trans, Enum.reverse(acc)}, input}
end
defp match_sequence(expr_trans, grammar, input, [expression | expressions], acc) do
case match(expression, grammar, input) do
{:ok, match, input} ->
match_sequence(expr_trans, grammar, input, expressions, [match | acc])
otherwise ->
otherwise
end
end
# Helper for parsing a priority list of expressions
@doc false
defp match_priorities({{:priority, expressions}, _} = expr_trans, grammar, input) do
match_priorities(expr_trans, grammar, input, expressions)
end
@doc false
defp match_priorities(_, _, _, []), do: :mismatch
defp match_priorities(expr_trans, grammar, input, [expression | expressions]) do
case match(expression, grammar, input) do
{:ok, match, input} ->
{:ok, {expr_trans, match}, input}
:mismatch ->
match_priorities(expr_trans, grammar, input, expressions)
{:error, reason} ->
{:error, reason}
end
end
# Helper for zero or more (* suffix). Also used for one or more.
@doc false
defp match_zero_or_more(expr_trans, grammar, input, acc \\ [])
defp match_zero_or_more({{_, expression}, _} = expr_trans, grammar, input, acc) do
case match(expression, grammar, input) do
{:ok, match, input} ->
match_zero_or_more(expr_trans, grammar, input, [match | acc])
:mismatch ->
{:ok, {expr_trans, Enum.reverse(acc)}, input}
{:error, reason} ->
{:error, reason}
end
end
# Apply the transform to the provided arg
@doc false
defp apply_transform({:transform, {module, method}}, arg)
when is_atom(module) and is_atom(method) do
apply(module, method, [arg])
end
defp apply_transform({:transform, transform_fn}, arg)
when is_function(transform_fn) do
transform_fn.(arg)
end
# Helper for applying transform match while pruning
@doc false
defp transform_prune(matches) do
for match <- matches,
(case match do
{_, {:prune, _}} -> false
_ -> true
end),
do: transform_match(match)
end
@doc false
defp validate(_grammar, []), do: :ok
# Strip out transforms, which should be either nil or {:transform, _}
defp validate(grammar, [{id, {expr, {:transform, _}}} | rest]) do
validate(grammar, [{id, expr} | rest])
end
defp validate(grammar, [{id, {expr, nil}} | rest]) do
validate(grammar, [{id, expr} | rest])
end
defp validate(grammar, [{id, expr} | rest]) do
case validate_expr(grammar, expr) do
:ok ->
validate(grammar, rest)
{:error, reason} ->
{:error, {:bad_definition, {id, reason}}}
end
end
@doc false
defp validate_expr(%{:definitions => definitions}, {:nonterminal, id}) do
# check that the referenced terminal exists in the grammar
case Map.has_key?(definitions, id) do
true ->
:ok
false ->
{:error, {:missing, {:definition, id}}}
end
end
defp validate_expr(grammar, {list_expr_type, exprs})
when list_expr_type == :sequence or list_expr_type == :priority do
# handle a rule which wraps a list of rules
if is_list(exprs) do
validate_expr(grammar, exprs)
else
{:error, {:bad_rule, {list_expr_type, exprs}}}
end
end
defp validate_expr(grammar, {wrap_expr_type, expr})
when wrap_expr_type == :zero_or_more
or wrap_expr_type == :zero_or_one
or wrap_expr_type == :one_or_more
or wrap_expr_type == :not
or wrap_expr_type == :and
or wrap_expr_type == :prune
or wrap_expr_type == :insensitive do
# handle a expr which is wraps a single expression
if is_tuple(expr) do
validate_expr(grammar, expr)
else
{:error, {:bad_expr, {wrap_expr_type, expr}}}
end
end
defp validate_expr(_grammar, []), do: :ok
defp validate_expr(grammar, [expr | rest]) do
# handle lists of exprs
case validate_expr(grammar, expr) do
:ok ->
validate_expr(grammar, rest)
{:error, reason} ->
{:error, reason}
end
end
defp validate_expr(_grammar, {:terminal, terminal}) do
if Regex.regex?(terminal) or is_binary(terminal) or is_integer(terminal) do
:ok
else
{:bad_terminal, terminal}
end
end
defp validate_expr(_grammar, {expr_type, _}) do
{:error, {:bad_expr_type, expr_type}}
end
defp validate_expr(_grammar, expr), do: {:error, {:bad_expr, expr}}
end
|
lib/neotomex/grammar.ex
| 0.772917
| 0.945273
|
grammar.ex
|
starcoder
|
defmodule Concentrate.Encoder.GTFSRealtimeHelpers do
@moduledoc """
Helper functions for encoding GTFS-Realtime files.
"""
alias Concentrate.{StopTimeUpdate, TripDescriptor, VehiclePosition}
import Calendar.ISO, only: [date_to_string: 4]
@type trip_group :: {TripDescriptor.t() | nil, [VehiclePosition.t()], [StopTimeUpdate.t()]}
@doc """
Given a list of parsed data, returns a list of tuples:
{TripDescriptor.t() | nil, [VehiclePosition.t()], [StopTimeUpdate.t]}
The VehiclePositions/StopTimeUpdates will share the same trip ID.
"""
@spec group([TripDescriptor.t() | VehiclePosition.t() | StopTimeUpdate.t()]) :: [trip_group]
def group(parsed) do
# we sort by the initial size, which keeps the trip updates in their original ordering
parsed
|> Enum.reduce(%{}, &group_by_trip_id/2)
|> Map.values()
|> Enum.flat_map(fn
{%TripDescriptor{} = td, [], []} ->
if TripDescriptor.schedule_relationship(td) == :CANCELED do
[{td, [], []}]
else
[]
end
{td, vps, stus} ->
stus = Enum.sort_by(stus, &StopTimeUpdate.stop_sequence/1)
[{td, vps, stus}]
end)
end
@doc """
Encodes a Date into the GTFS-Realtime format YYYYMMDD.
## Examples
iex> encode_date(nil)
nil
iex> encode_date({1970, 1, 3})
"19700103"
"""
def encode_date(nil) do
nil
end
def encode_date({year, month, day}) do
date_to_string(year, month, day, :basic)
end
@doc """
Removes nil values from a map.
## Examples
iex> drop_nil_values(%{a: 1, b: nil})
%{a: 1}
iex> drop_nil_values(%{})
nil
"""
def drop_nil_values(empty) when empty == %{} do
nil
end
def drop_nil_values(map) do
:maps.fold(
fn
_k, nil, acc -> acc
k, v, acc -> Map.put(acc, k, v)
end,
%{},
map
)
end
@doc """
Header values for a GTFS-RT feed.
"""
def feed_header do
timestamp = :erlang.system_time(:seconds)
%{
gtfs_realtime_version: "2.0",
timestamp: timestamp,
incrementality: :FULL_DATASET
}
end
@doc """
Builds a list of TripDescriptor FeedEntities.
Takes a function to turn a StopTimeUpdate struct into the GTFS-RT version.
"""
def trip_update_feed_entity(groups, stop_time_update_fn, enhanced_data_fn \\ fn _ -> %{} end) do
Enum.flat_map(groups, &build_trip_update_entity(&1, stop_time_update_fn, enhanced_data_fn))
end
@doc """
Convert a Unix timestamp in a GTFS-RT StopTimeEvent.
## Examples
iex> stop_time_event(123)
%{time: 123}
iex> stop_time_event(nil)
nil
iex> stop_time_event(123, 300)
%{time: 123, uncertainty: 300}
"""
def stop_time_event(time, uncertainty \\ nil)
def stop_time_event(nil, _) do
nil
end
def stop_time_event(unix_timestamp, uncertainty)
when is_integer(uncertainty) and uncertainty > 0 do
%{
time: unix_timestamp,
uncertainty: uncertainty
}
end
def stop_time_event(unix_timestamp, _) do
%{
time: unix_timestamp
}
end
@doc """
Renders the schedule relationship field.
SCHEDULED is the default and is rendered as `nil`. Other relationships are
rendered as-is.
"""
def schedule_relationship(:SCHEDULED), do: nil
def schedule_relationship(relationship), do: relationship
defp group_by_trip_id(%TripDescriptor{} = td, map) do
if trip_id = TripDescriptor.trip_id(td) do
Map.update(map, trip_id, {td, [], []}, &add_trip_descriptor(&1, td))
else
map
end
end
defp group_by_trip_id(%VehiclePosition{} = vp, map) do
trip_id = VehiclePosition.trip_id(vp)
Map.update(map, trip_id, {nil, [vp], []}, &add_vehicle_position(&1, vp))
end
defp group_by_trip_id(%StopTimeUpdate{} = stu, map) do
trip_id = StopTimeUpdate.trip_id(stu)
Map.update(map, trip_id, {nil, [], [stu]}, &add_stop_time_update(&1, stu))
end
defp add_trip_descriptor({_td, vps, stus}, td) do
{td, vps, stus}
end
defp add_vehicle_position({td, vps, stus}, vp) do
{td, [vp | vps], stus}
end
defp add_stop_time_update({td, vps, stus}, stu) do
{td, vps, [stu | stus]}
end
defp build_trip_update_entity(
{%TripDescriptor{} = td, vps, stus},
stop_time_update_fn,
enhanced_data_fn
) do
trip_id = TripDescriptor.trip_id(td)
id = trip_id || "#{:erlang.phash2(td)}"
trip_data = %{
trip_id: trip_id,
route_id: TripDescriptor.route_id(td),
direction_id: TripDescriptor.direction_id(td),
start_time: TripDescriptor.start_time(td),
start_date: encode_date(TripDescriptor.start_date(td)),
schedule_relationship: schedule_relationship(TripDescriptor.schedule_relationship(td))
}
timestamp = TripDescriptor.timestamp(td)
trip =
trip_data
|> Map.merge(enhanced_data_fn.(td))
|> drop_nil_values()
vehicle = trip_update_vehicle(td, vps)
stop_time_update =
case stus do
[_ | _] -> render_stop_time_updates(stus, stop_time_update_fn)
[] -> nil
end
cond do
match?([_ | _], stop_time_update) ->
[
%{
id: id,
trip_update:
drop_nil_values(%{
trip: trip,
stop_time_update: stop_time_update,
vehicle: vehicle,
timestamp: timestamp
})
}
]
TripDescriptor.schedule_relationship(td) == :CANCELED ->
[
%{
id: id,
trip_update: drop_nil_values(%{trip: trip, vehicle: vehicle, timestamp: timestamp})
}
]
true ->
[]
end
end
defp build_trip_update_entity(_, _, _) do
[]
end
defp render_stop_time_updates(stus, stop_time_update_fn) do
Enum.flat_map(stus, fn stu ->
case stop_time_update_fn.(stu) do
:skip ->
[]
update ->
[update]
end
end)
end
defp trip_update_vehicle(_update, [vp | _]) do
drop_nil_values(%{
id: VehiclePosition.id(vp),
label: VehiclePosition.label(vp),
license_plate: VehiclePosition.license_plate(vp)
})
end
defp trip_update_vehicle(update, []) do
if vehicle_id = TripDescriptor.vehicle_id(update) do
%{id: vehicle_id}
else
nil
end
end
end
|
lib/concentrate/encoder/gtfs_realtime_helpers.ex
| 0.840226
| 0.567517
|
gtfs_realtime_helpers.ex
|
starcoder
|
defmodule PlayfabEx.Server.PlayerDataManagement do
use Interface
@doc """
Deletes the users for the provided game. Deletes custom data, all account linkages, and statistics.
[online docs](https://api.playfab.com/documentation/server/method/DeleteUsers)
"""
@spec delete_users(map()) :: {:ok, map} | {:error, String.t}
definterface delete_users(params), to: (Application.get_env(:playfab_ex, PlayfabEx.Server.PlayerDataManagement) || PlayfabEx.Server.Default.PlayerDataManagement)
@doc """
Retrieves a list of ranked friends of the given player for the given statistic, starting from the indicated point in the leaderboard
[online docs](https://api.playfab.com/documentation/server/method/GetFriendLeaderboard)
"""
@spec get_friend_leaderboard(map()) :: {:ok, map} | {:error, String.t}
definterface get_friend_leaderboard(params), to: (Application.get_env(:playfab_ex, PlayfabEx.Server.PlayerDataManagement) || PlayfabEx.Server.Default.PlayerDataManagement)
@doc """
Retrieves a list of ranked users for the given statistic, starting from the indicated point in the leaderboard
[online docs](https://api.playfab.com/documentation/server/method/GetLeaderboard)
"""
@spec get_leaderboard(map()) :: {:ok, map} | {:error, String.t}
definterface get_leaderboard(params), to: (Application.get_env(:playfab_ex, PlayfabEx.Server.PlayerDataManagement) || PlayfabEx.Server.Default.PlayerDataManagement)
@doc """
Retrieves a list of ranked users for the given statistic, centered on the currently signed-in user
[online docs](https://api.playfab.com/documentation/server/method/GetLeaderboardAroundUser)
"""
@spec get_leaderboard_around_user(map()) :: {:ok, map} | {:error, String.t}
definterface get_leaderboard_around_user(params), to: (Application.get_env(:playfab_ex, PlayfabEx.Server.PlayerDataManagement) || PlayfabEx.Server.Default.PlayerDataManagement)
@doc """
Returns whatever info is requested in the response for the user. Note that PII (like email address, facebook id) may be returned. All parameters default to false.
[online docs](https://api.playfab.com/documentation/server/method/GetPlayerCombinedInfo)
"""
@spec get_player_combined_info(map()) :: {:ok, map} | {:error, String.t}
definterface get_player_combined_info(params), to: (Application.get_env(:playfab_ex, PlayfabEx.Server.PlayerDataManagement) || PlayfabEx.Server.Default.PlayerDataManagement)
@doc """
Retrieves the current version and values for the indicated statistics, for the local player.
[online docs](https://api.playfab.com/documentation/server/method/GetPlayerStatistics)
"""
@spec get_player_statistics(map()) :: {:ok, map} | {:error, String.t}
definterface get_player_statistics(params), to: (Application.get_env(:playfab_ex, PlayfabEx.Server.PlayerDataManagement) || PlayfabEx.Server.Default.PlayerDataManagement)
@doc """
Retrieves the information on the available versions of the specified statistic.
[online docs](https://api.playfab.com/documentation/server/method/GetPlayerStatisticVersions)
"""
@spec get_player_statistic_versions(map()) :: {:ok, map} | {:error, String.t}
definterface get_player_statistic_versions(params), to: (Application.get_env(:playfab_ex, PlayfabEx.Server.PlayerDataManagement) || PlayfabEx.Server.Default.PlayerDataManagement)
@doc """
Retrieves the title-specific custom data for the user which is readable and writable by the client
[online docs](https://api.playfab.com/documentation/server/method/GetUserData)
"""
@spec get_user_data(map()) :: {:ok, map} | {:error, String.t}
definterface get_user_data(params), to: (Application.get_env(:playfab_ex, PlayfabEx.Server.PlayerDataManagement) || PlayfabEx.Server.Default.PlayerDataManagement)
@doc """
Retrieves the title-specific custom data for the user which cannot be accessed by the client
[online docs](https://api.playfab.com/documentation/server/method/GetUserInternalData)
"""
@spec get_user_internal_data(map()) :: {:ok, map} | {:error, String.t}
definterface get_user_internal_data(params), to: (Application.get_env(:playfab_ex, PlayfabEx.Server.PlayerDataManagement) || PlayfabEx.Server.Default.PlayerDataManagement)
@doc """
Retrieves the publisher-specific custom data for the user which is readable and writable by the client
[online docs](https://api.playfab.com/documentation/server/method/GetUserPublisherData)
"""
@spec get_user_publisher_data(map()) :: {:ok, map} | {:error, String.t}
definterface get_user_publisher_data(params), to: (Application.get_env(:playfab_ex, PlayfabEx.Server.PlayerDataManagement) || PlayfabEx.Server.Default.PlayerDataManagement)
@doc """
Retrieves the publisher-specific custom data for the user which cannot be accessed by the client
[online docs](https://api.playfab.com/documentation/server/method/GetUserPublisherInternalData)
"""
@spec get_user_publisher_internal_data(map()) :: {:ok, map} | {:error, String.t}
definterface get_user_publisher_internal_data(params), to: (Application.get_env(:playfab_ex, PlayfabEx.Server.PlayerDataManagement) || PlayfabEx.Server.Default.PlayerDataManagement)
@doc """
Retrieves the publisher-specific custom data for the user which can only be read by the client
[online docs](https://api.playfab.com/documentation/server/method/GetUserPublisherReadOnlyData)
"""
@spec get_user_publisher_read_only_data(map()) :: {:ok, map} | {:error, String.t}
definterface get_user_publisher_read_only_data(params), to: (Application.get_env(:playfab_ex, PlayfabEx.Server.PlayerDataManagement) || PlayfabEx.Server.Default.PlayerDataManagement)
@doc """
Retrieves the title-specific custom data for the user which can only be read by the client
[online docs](https://api.playfab.com/documentation/server/method/GetUserReadOnlyData)
"""
@spec get_user_read_only_data(map()) :: {:ok, map} | {:error, String.t}
definterface get_user_read_only_data(params), to: (Application.get_env(:playfab_ex, PlayfabEx.Server.PlayerDataManagement) || PlayfabEx.Server.Default.PlayerDataManagement)
@doc """
Updates the values of the specified title-specific statistics for the user
[online docs](https://api.playfab.com/documentation/server/method/UpdatePlayerStatistics)
"""
@spec update_player_statistics(map()) :: {:ok, map} | {:error, String.t}
definterface update_player_statistics(params), to: (Application.get_env(:playfab_ex, PlayfabEx.Server.PlayerDataManagement) || PlayfabEx.Server.Default.PlayerDataManagement)
@doc """
Updates the title-specific custom data for the user which is readable and writable by the client
[online docs](https://api.playfab.com/documentation/server/method/UpdateUserData)
"""
@spec update_user_data(map()) :: {:ok, map} | {:error, String.t}
definterface update_user_data(params), to: (Application.get_env(:playfab_ex, PlayfabEx.Server.PlayerDataManagement) || PlayfabEx.Server.Default.PlayerDataManagement)
@doc """
Updates the title-specific custom data for the user which cannot be accessed by the client
[online docs](https://api.playfab.com/documentation/server/method/UpdateUserInternalData)
"""
@spec update_user_internal_data(map()) :: {:ok, map} | {:error, String.t}
definterface update_user_internal_data(params), to: (Application.get_env(:playfab_ex, PlayfabEx.Server.PlayerDataManagement) || PlayfabEx.Server.Default.PlayerDataManagement)
@doc """
Updates the publisher-specific custom data for the user which is readable and writable by the client
[online docs](https://api.playfab.com/documentation/server/method/UpdateUserPublisherData)
"""
@spec update_user_publisher_data(map()) :: {:ok, map} | {:error, String.t}
definterface update_user_publisher_data(params), to: (Application.get_env(:playfab_ex, PlayfabEx.Server.PlayerDataManagement) || PlayfabEx.Server.Default.PlayerDataManagement)
@doc """
Updates the publisher-specific custom data for the user which cannot be accessed by the client
[online docs](https://api.playfab.com/documentation/server/method/UpdateUserPublisherInternalData)
"""
@spec update_user_publisher_internal_data(map()) :: {:ok, map} | {:error, String.t}
definterface update_user_publisher_internal_data(params), to: (Application.get_env(:playfab_ex, PlayfabEx.Server.PlayerDataManagement) || PlayfabEx.Server.Default.PlayerDataManagement)
@doc """
Updates the publisher-specific custom data for the user which can only be read by the client
[online docs](https://api.playfab.com/documentation/server/method/UpdateUserPublisherReadOnlyData)
"""
@spec update_user_publisher_read_only_data(map()) :: {:ok, map} | {:error, String.t}
definterface update_user_publisher_read_only_data(params), to: (Application.get_env(:playfab_ex, PlayfabEx.Server.PlayerDataManagement) || PlayfabEx.Server.Default.PlayerDataManagement)
@doc """
Updates the title-specific custom data for the user which can only be read by the client
[online docs](https://api.playfab.com/documentation/server/method/UpdateUserReadOnlyData)
"""
@spec update_user_read_only_data(map()) :: {:ok, map} | {:error, String.t}
definterface update_user_read_only_data(params), to: (Application.get_env(:playfab_ex, PlayfabEx.Server.PlayerDataManagement) || PlayfabEx.Server.Default.PlayerDataManagement)
end
|
lib/server/player_data_management.ex
| 0.783036
| 0.421611
|
player_data_management.ex
|
starcoder
|
defmodule Kernel.Typespec do
@moduledoc """
This is the module that converts Elixir typespecs
to Erlang typespecs syntax. Everytime @spec, @type
and @typep are used they proxy to the functions
in this module.
"""
defmacro deftype(name, options // []) do
_deftype(name, true, __CALLER__, options)
end
defmacro deftypep(name) do
_deftype(name, false, __CALLER__, [])
end
defmacro defspec(spec, block) do
_defspec(:spec, __CALLER__, spec, block)
end
defmacro defcallback(spec, block) do
_defspec(:callback, __CALLER__, spec, block)
end
@doc """
Get the types defined for the given module. This function
is only available for modules being compiled. If the module
was already compiled, you need to loop its attributes
to get such information.
"""
def get_types(module) do
Module.read_attribute(module, :type) ++ Module.read_attribute(module, :opaque)
end
@doc """
Get the specs defined for the given module. This function
is only available for modules being compiled. If the module
was already compiled, you need to loop its attributes
to get such information.
"""
def get_specs(module) do
specs = :ets.tab2list(spec_table_for(module))
keys = :lists.ukeysort(1, specs)
lc { k, _ } inlist keys, do: { k, :proplists.append_values(k, specs) }
end
## Typespec conversion
# Handle unions
defp typespec({ :|, line, [_,_] } = exprs, vars, caller) do
exprs = List.reverse(collect_union(exprs))
union = lc e inlist exprs, do: typespec(e, vars, caller)
{ :type, line, :union, union }
end
# Handle binaries
defp typespec({:<<>>, line, []}, _,_) do
{:type, line, :binary, [{:integer, line, 0}, {:integer, line, 0}]}
end
defp typespec({:<<>>, line, [{:|, _, [{:_, line1, atom}, {:*, _, [{:_, line2, atom}, unit]}]}]}, _, _) when is_atom(atom) do
{:type, line, :binary, [{:integer, line1, 0}, {:integer, line2, unit}]}
end
defp typespec({:<<>>, line, [{:|, line1, [{:_, line2, atom}, base]}]}, _, _) when is_atom(atom) do
{:type, line, :binary, [{:integer, line1, base}, {:integer, line2, 0}]}
end
# Handle ranges
defp typespec({:"..", line, args}, vars, caller) do
typespec({:range, line, args}, vars, caller)
end
# Handle aliases
defp typespec({:__aliases__, _, _} = alias, vars, caller) do
atom = Macro.expand alias, caller
typespec(atom, vars, caller)
end
# Handle type operator
defp typespec({:"::", line, [var, expr] }, vars, caller) do
left = typespec(var, [elem(var,1)|vars], caller)
right = typespec(expr, vars, caller)
{ :ann_type, line, [left, right] }
end
# Handle unary ops
defp typespec({op, line, [integer]}, _, _) when op in [:+, :-] and is_integer(integer) do
{ :op, line, op, {:integer, line, integer} }
end
# Handle remote calls
defp typespec({{:., line, [remote, name]}, _, args}, vars, caller) do
remote = Macro.expand remote, caller
unless is_atom(remote), do: raise(ArgumentError, message: "Invalid remote in typespec")
remote_type({typespec(remote, vars, caller), line, typespec(name, vars, caller), args}, vars, caller)
end
# Handle tuples
defp typespec({:tuple, line, atom}, vars, caller) when is_atom(atom) do
typespec({:{}, line, []}, vars, caller)
end
defp typespec({:{}, line, []}, _, _) do
{ :type, line, :tuple, :any }
end
defp typespec({:{}, line, t}, vars, caller) when is_list(t) do
args = lc e inlist t, do: typespec(e, vars, caller)
{ :type, line, :tuple, args }
end
# Handle funs
defp typespec({:fun, line, arguments}, vars, caller) when is_list(arguments) do
args =
case List.reverse(arguments) do
[[{:do,h}]|t] -> fn_args(line, List.reverse(t), h, vars, caller)
[] -> []
_ -> [fn_args(line, arguments, vars, caller)]
end
{ :type, line, :fun, args }
end
# Handle variables or local calls
defp typespec({name, line, atom}, vars, caller) when is_atom(atom) do
if List.member?(vars, name) do
{ :var, line, name }
else
typespec({name, line, []}, vars, caller)
end
end
# Handle local calls
defp typespec({name, line, arguments}, vars, caller) do
arguments = lc arg inlist arguments, do: typespec(arg, vars, caller)
{ :type, line, name, arguments }
end
# Handle literals
defp typespec(atom, _, _) when is_atom(atom) do
{ :atom, 0, atom }
end
defp typespec(integer, _, _) when is_integer(integer) do
{ :integer, 0, integer }
end
defp typespec([], vars, caller) do
typespec({ nil, 0, [] }, vars, caller)
end
defp typespec([spec], vars, caller) do
typespec({ :list, 0, [spec] }, vars, caller)
end
defp typespec(l, _, _) when is_list(l) do
raise(ArgumentError, message: "Unexpected list #{inspect l}")
end
defp typespec(t, vars, caller) when is_tuple(t) do
args = lc e inlist tuple_to_list(t), do: typespec(e, vars, caller)
{ :type, 0, :tuple, args }
end
## Helpers
defp remote_type({remote, line, name, arguments}, vars, caller) do
arguments = lc arg inlist arguments, do: typespec(arg, vars, caller)
{ :remote_type, line, [ remote, name, arguments ] }
end
defp collect_union({ :|, _, [a, b] }), do: [b|collect_union(a)]
defp collect_union(v), do: [v]
defp fn_args(line, args, return, vars, caller) do
[fn_args(line, args, vars, caller), typespec(return, vars, caller)]
end
defp fn_args(line, [{:"...", _, _}], _vars, _caller) do
{ :type, line, :any }
end
defp fn_args(line, args, vars, caller) do
args = lc arg inlist args, do: typespec(arg, vars, caller)
{ :type, line, :product, args }
end
def _deftype({:"::", _, [name, definition]}, export, caller, opts) do
_deftype(name, definition, export, caller, opts)
end
def _deftype(name, export, caller, opts) do
_deftype(name, { :term, caller.line, nil }, export, caller, opts)
end
defp _deftype({name, _, args}, definition, export, caller, options) do
args = if is_atom(args), do: [], else: lc(arg inlist args, do: variable(arg))
vars = lc {:var, _, var} inlist args, do: var
spec = typespec(definition, vars, caller)
vars = lc ({:var, _, _} = var) inlist args, do: var
attr = if options[:opaque], do: :opaque, else: :type
export = if export do
quote do: Module.compile_type(__MODULE__, :export_type, [{name, length(vars)}])
else
nil
end
quote do
name = unquote(name)
spec = unquote(Macro.escape(spec))
vars = unquote(Macro.escape(vars))
type = { name, spec, vars }
Module.compile_type __MODULE__, unquote(attr), type
unquote(export)
{ unquote(attr), type }
end
end
defp _defspec(type, caller, {name, line, args},[{:do,return}]) do
spec = { :type, line, :fun, fn_args(line, args, return, [], caller) }
code = Macro.escape { {type, { name, length(args) }}, [spec] }
table = spec_table_for(caller.module)
quote do
code = unquote(code)
:ets.insert(unquote(table), code)
code
end
end
defp spec_table_for(module) do
table = list_to_atom Erlang.lists.concat([:s, module])
unless table == :ets.info(table, :name), do:
raise(ArgumentError, message: "cannot manage specs for #{inspect module} because it was already compiled")
table
end
defp variable({name, line, _}) do
{:var, line, name}
end
end
|
lib/elixir/lib/kernel/typespec.ex
| 0.67822
| 0.456652
|
typespec.ex
|
starcoder
|
defmodule CpuUtil do
@moduledoc """
CPU utility functions.
Functions to read and calculate CPU utilization for a given process.
NOTE: Only *nix systems supported.
"""
require Logger
@type proc_pid_stat :: %{
# process id
pid: integer(),
# filename of the executable
tcomm: binary(),
# state (R is running, S is sleeping, D is sleeping in an
# uninterruptible wait, Z is zombie, T is traced or stopped)
state: binary(),
# process id of the parent process
ppid: integer(),
# pgrp of the process
pgrp: integer(),
# session id
sid: integer(),
# tty the process uses
tty_nr: integer(),
# pgrp of the tty
tty_pgrp: integer(),
# task flags
flags: integer(),
# number of minor faults
min_flt: integer(),
# number of minor faults with child's
cmin_flt: integer(),
# number of major faults
maj_flt: integer(),
# number of major faults with child's
cmaj_flt: integer(),
# user mode jiffies
utime: integer(),
# kernel mode jiffies
stime: integer(),
# user mode jiffies with child's
cutime: integer(),
# kernel mode jiffies with child's
cstime: integer()
}
@type util_stat :: %{
stats: proc_pid_stat(),
total: integer()
}
@type util :: %{
sys: float(),
total: float(),
user: float()
}
# list of fields returned when /proc/<PID>/stat is read.
@proc_pid_stat_fields ~w[
pid tcomm state ppid pgrp sid tty_nr tty_pgrp flags min_flt
cmin_flt maj_flt cmaj_flt utime stime cutime cstime
]a
@doc """
Get the current OS PID.
## Examples
iex> CpuUtil.getpid() |> is_integer()
true
"""
@spec getpid() :: integer()
def getpid, do: List.to_integer(:os.getpid())
@doc """
Get the number of CPU Cores.
Deprecated! Use `CpuUtl.core_count/0` instead.
"""
@spec num_cores() :: {:ok, integer()} | :error
def num_cores do
Logger.warn("deprecated. use core_count/0 instead")
core_count()
end
@doc """
Get the number of CPU Cores.
## Examples
iex> {:ok, cores} = CpuUtil.core_count()
iex> is_integer(cores)
true
"""
@spec core_count() :: {:ok, integer()} | :error
def core_count do
with topology <- :erlang.system_info(:cpu_topology),
processors when is_list(processors) <- topology[:processor] do
{:ok, length(processors)}
else
_ -> :error
end
end
@doc """
Read the CPU's average load.
## Examples
iex> {float, ""} = CpuUtil.loadavg() |> Float.parse()
iex> is_float(float)
true
"""
@spec loadavg(integer()) :: binary()
def loadavg(num \\ 1) do
with {:ok, file} <- File.read("/proc/loadavg"),
list <- String.split(file, ~r/\s/, trim: true) do
list |> Enum.take(num) |> Enum.join(" ")
else
_ -> ""
end
end
@doc """
Read the OS stat data.
* Reads `/proc/stat`
* Parses the first line ('cpu')
* Converts the numbers (string) to integers
## Examples
iex> ["cpu" | numbers] = CpuUtil.stat()
iex> length(numbers) == 9 and Enum.all?(numbers, &is_integer/1)
true
"""
@spec stat() :: list() | {:error, any()}
def stat do
with {:ok, file} <- File.read("/proc/stat"), do: stat(file)
end
@doc """
Parse the data read from /proc/stat.
Extra the first line "cpu" and convert numbers to integers.
## Examples
iex> CpuUtil.stat("cpu 12010882 75 3879349 1731141995 200300 225 154316 115184 0")
["cpu", 12010882, 75, 3879349, 1731141995, 200300, 225, 154316, 115184, 0]
"""
@spec stat(binary) :: list() | {:error, any()}
def stat(contents) do
with list <- String.split(contents, "\n", trim: true),
[cpu | _] <- list,
[label | value] <- String.split(cpu, ~r/\s/, trim: true),
do: [label | Enum.map(value, &String.to_integer/1)]
end
@doc """
Get the total_time from the given list.
Takes the output of CpuUtil.stat/0 and returns the total time.
## Examples
iex> data = ["cpu", 12010882, 75, 3879349, 1731141995, 200300, 225, 154316, 115184, 0]
iex> CpuUtil.stat_total_time(data)
1747502326
"""
@spec stat_total_time(list()) :: integer()
def stat_total_time([_label | list]), do: Enum.sum(list)
@doc """
Get the total_time.
Return the total time (from `/proc/stat`) as an integer.
## Examples
iex> CpuUtil.total_time() |> is_integer()
true
"""
@spec total_time() :: integer()
def total_time do
with [_ | _] = list <- stat(), do: stat_total_time(list)
end
@doc """
Get the total time given the contents of "/proc/stat"
## Examples
iex> CpuUtil.total_time("cpu 12010882 75 3879349 1731141995 200300 225 154316 115184 0")
1747502326
"""
@spec total_time(binary()) :: integer() | float()
def total_time(stat) when is_binary(stat) do
stat
|> stat()
|> stat_total_time()
end
@doc """
Get the current OS <PID> stat.
* Read `/proc/<PID>/stat` (single line of space separated fields)
* Parse the fields and convert any numbers (string) to integers
Returns a map of of the fields (atom keys) per the following definition:
* pid process id
* tcomm filename of the executable
* state state (R is running, S is sleeping, D is sleeping in an
* uninterruptible wait, Z is zombie, T is traced or stopped)
* ppid process id of the parent process
* pgrp pgrp of the process
* sid session id
* tty_nr tty the process uses
* tty_pgrp pgrp of the tty
* flags task flags
* min_flt number of minor faults
* cmin_flt number of minor faults with child's
* maj_flt number of major faults
* cmaj_flt number of major faults with child's
* utime user mode jiffies
* stime kernel mode jiffies
* cutime user mode jiffies with child's
* cstime kernel mode jiffies with child's
## Examples
iex> CpuUtil.stat_pid(CpuUtil.getpid()) |> Map.keys()
~w(cmaj_flt cmin_flt cstime cutime flags maj_flt min_flt pgrp pid ppid sid state stime tcomm tty_nr tty_pgrp utime)a
# iex> str = "9731 (beam.smp) S 9730 9730 9730 0 -1 4202496 13784 3143 0 0 93 11 0 0 20 0 "
# iex> str = str <> "28 0 291467565 2993774592 15101 18446744073709551615 4194304 7475860 "
# iex> str = str <> "140732224047216 140732224045552 256526653091 0 0 4224 16902 "
# iex> str = str <> "18446744073709551615 0 0 17 3 0 0 0 0 0"
iex> content = "9731 (beam.smp) S 9730 9730 9730 0 -1 4202496 13784 3143 0 0 93 11 0 0 " <>
...> "20 0 291467565 2993774592 15101 18446744073709551615 4194304 7475860 140732224047216 " <>
...> "140732224047216 140732224045552 256526653091 0 0 4224 16902 18446744073709551615 0 " <>
...> "0 17 3 0 0 0 0 0"
iex> CpuUtil.stat_pid(content)
%{
cmaj_flt: 0,
cmin_flt: 3143,
cstime: 0,
cutime: 0,
flags: 4202496,
maj_flt: 0,
min_flt: 13784,
pgrp: 9730,
pid: 9731,
ppid: 9730,
sid: 9730,
state: "S",
stime: 11,
tcomm: "(beam.smp)",
tty_nr: 0,
tty_pgrp: "-1",
utime: 93
}
"""
@spec stat_pid(integer() | binary()) :: proc_pid_stat() | {:error, any()}
def stat_pid(pid) when is_integer(pid) do
with {:ok, file} <- File.read("/proc/#{pid}/stat"), do: stat_pid(file)
end
def stat_pid(contents) when is_binary(contents) do
with list <- String.split(contents, ~r/\s/, trim: true),
list <-
Enum.map(list, fn item ->
if item =~ ~r/^\d+$/, do: String.to_integer(item), else: item
end),
do: @proc_pid_stat_fields |> Enum.zip(list) |> Enum.into(%{})
end
@doc """
Get the current OS stat.
* Read the total time from `/proc/stat`
* Read the PID stats from `/proc/<PID>/stat`
Return a map:
%{
total: integer()
stats: proc_pid_stat()
}
## Examples
iex> fields = ~w(cmaj_flt cmin_flt cstime cutime flags maj_flt min_flt pgrp pid ppid sid
...> state stime tcomm tty_nr tty_pgrp utime)a
iex> util = CpuUtil.pid_util(CpuUtil.getpid())
iex> Map.keys(util) == ~w(stats total)a and is_integer(util.total) and
...> Map.keys(util.stats) == fields
true
"""
@spec pid_util(integer) :: util_stat()
def pid_util(pid),
do: %{
total: total_time(),
stats: stat_pid(pid)
}
@doc """
Calculate CPU utilization given 2 readings.
## Algorithm
user_util = 100 * (utime_after - utime_before) / (time_total_after - time_total_before);
sys_util = 100 * (stime_after - stime_before) / (time_total_after - time_total_before);
## Usage
> pid = CpuUtil.os_pid()
> cores = CpuUtil.num_cores()
> u1 = CpuUtil.pid_util(pid)
> Process.sleep(1000)
> u2 = CpuUtil.pid_util(pid)
> CpuUtil.calc_pid_util(u1, u2, cores)
%{
user: 2.0,
sys: 0.5,
total: 2.5
}
## References
* https://stackoverflow.com/questions/1420426/how-to-calculate-the-cpu-usage-of-a-process-by-pid-in-linux-from-c/1424556
## Examples
iex> prev = %{total: 99, stats: %{utime: 20, stime: 10}}
iex> curr = %{total: 248, stats: %{utime: 29, stime: 18}}
iex> CpuUtil.calc_pid_util(prev, curr)
%{sys: 5.4, total: 11.4, user: 6.0}
iex> prev = %{total: 99, stats: %{utime: 20, stime: 10}}
iex> curr = %{total: 248, stats: %{utime: 29, stime: 18}}
iex> CpuUtil.calc_pid_util(prev, curr, 2, 2)
%{sys: 10.74, total: 22.82, user: 12.08}
"""
def calc_pid_util(prev, curr, cores \\ 1, precision \\ 1) do
try do
t_diff = curr.total - prev.total
{user_util, sys_util} = calc_user_sys_util(t_diff, curr, prev, cores)
%{
sys: Float.round(sys_util, precision),
total: Float.round(user_util + sys_util, precision),
user: Float.round(user_util, precision)
}
rescue
_e -> %{sys: 0.0, total: 0.0, user: 0.0}
end
end
defp calc_user_sys_util(0, _, _, _),
do: {0, 0}
defp calc_user_sys_util(t_diff, curr, prev, cores),
do:
{100 * (curr.stats.utime - prev.stats.utime) / t_diff * cores,
100 * (curr.stats.stime - prev.stats.stime) / t_diff * cores}
@doc """
Calculate the OS process CPU Utilization.
Similar to `CpuUtil.calc_pid_util/4` except that it takes the raw binary data
read from `{"/proc/stat", "/proc/<os_pid>/stat"}`.
## Examples
iex> prev = {"cpu 11380053 51 3665881 1638097578 194367 213 149713 110770 0",
...> "9930 (beam.smp) S 24113 9930 24113 34817 9930 4202496 189946 5826 0 0 12025 1926 " <>
...> "0 0 20 0 28 0 275236728 3164401664 42600 18446744073709551615 4194304 7475860 " <>
...> "140732561929584 140732561927920 256526653091 0 0 4224 134365702 18446744073709551615 " <>
...> "0 0 17 3 0 0 0 0 0"}
iex> curr = {"cpu 11380060 51 3665883 1638099001 194367 213 149713 110770 0",
...> "9930 (beam.smp) S 24113 9930 24113 34817 9930 4202496 189950 5826 0 0 12027 1927 " <>
...> "0 0 20 0 28 0 275236728 3164401664 42600 18446744073709551615 4194304 7475860 " <>
...> "140732561929584 140732561927920 256526653091 0 0 4224 134365702 18446744073709551615 " <>
...> "0 0 17 3 0 0 0 0 0"}
iex> CpuUtil.process_util(prev, curr)
%{sys: 0.1, total: 0.2, user: 0.1}
"""
@spec process_util({binary(), binary()}, {binary(), binary()}, keyword()) :: util()
def process_util(prev, curr, opts \\ []) when is_tuple(prev) and is_tuple(curr) do
util1 = %{total: prev |> elem(0) |> total_time(), stats: prev |> elem(1) |> stat_pid()}
util2 = %{total: curr |> elem(0) |> total_time(), stats: curr |> elem(1) |> stat_pid()}
calc_pid_util(util1, util2, opts[:cores] || 1, opts[:precision] || 1)
end
@doc """
Get the cpu for the given os_pid and number of cores.
Blocks the calling process for time (1) seconds to collect the before and
after samples.
"""
@spec get_cpu_util(integer(), keyword()) :: util()
def get_cpu_util(pid, opts \\ []) do
util1 = pid_util(pid)
Process.sleep(Keyword.get(opts, :time, 1) * 1000)
util2 = pid_util(pid)
calc_pid_util(util1, util2, Keyword.get(opts, :cores, 1), Keyword.get(opts, :precision, 1))
end
end
|
lib/cpu_util.ex
| 0.829216
| 0.483831
|
cpu_util.ex
|
starcoder
|
defmodule LastfmArchive.Utils do
@moduledoc false
@data_dir Application.get_env(:lastfm_archive, :data_dir, "./archive_data/")
@metadata_file ".archive"
@file_io Application.get_env(:lastfm_archive, :file_io)
@doc """
Generate {from, to} daily time ranges for querying Last.fm API based on
the first and last scrobble unix timestamps.
"""
def build_time_range({from, to}) do
from = DateTime.from_unix!(from) |> DateTime.to_date()
to = DateTime.from_unix!(to) |> DateTime.to_date()
Enum.map(Date.range(from, to), &iso8601_to_unix("#{&1}T00:00:00Z", "#{&1}T23:59:59Z"))
end
def build_time_range(year, %Lastfm.Archive{} = archive) when is_integer(year) do
{from, to} = iso8601_to_unix("#{year}-01-01T00:00:00Z", "#{year}-12-31T23:59:59Z")
{registered_time, last_scrobble_time} = archive.temporal
from = if from <= registered_time, do: registered_time, else: from
to = if to >= last_scrobble_time, do: last_scrobble_time, else: to
{from, to}
end
defp iso8601_to_unix(from, to) do
{:ok, from, _} = DateTime.from_iso8601(from)
{:ok, to, _} = DateTime.from_iso8601(to)
{DateTime.to_unix(from), DateTime.to_unix(to)}
end
def year_range({from, to}) do
DateTime.from_unix!(from).year..DateTime.from_unix!(to).year
end
def data_dir(options \\ []), do: Keyword.get(options, :data_dir, @data_dir)
def user_dir(user, options \\ []), do: Path.join([data_dir(options), user])
def metadata(user, options), do: Path.join([data_dir(options), user, @metadata_file])
def display_progress(archive) do
IO.puts("Archiving #{archive.extent} scrobbles for #{archive.creator}")
end
def display_progress({from, _to}, playcount, pages) do
from_date = DateTime.from_unix!(from) |> DateTime.to_date()
IO.puts("\n")
IO.puts("#{from_date}")
IO.puts("#{playcount} scrobble(s)")
IO.puts("#{pages} page(s)")
end
def display_skip_message({from, _to}, playcount) do
from_date = DateTime.from_unix!(from) |> DateTime.to_date()
IO.puts("\n")
IO.puts("Skipping #{from_date}, previously synced: #{playcount} scrobble(s)")
end
def display_api_error_message({from, _to}, reason) do
from_date = DateTime.from_unix!(from) |> DateTime.to_date()
IO.puts("\n")
IO.puts("Last.fm API error while syncing #{from_date}: #{reason}")
end
@doc """
Read and unzip a file from the archive of a Lastfm user.
### Example
```
LastfmArchive.Load.read "a_lastfm_user", "tsv/2007.tsv.gz"
```
"""
def read(user, filename) do
file_path = Path.join(user_dir(user, []), filename)
case @file_io.read(file_path) do
{:ok, gzip_data} ->
{:ok, gzip_data |> :zlib.gunzip()}
error ->
error
end
end
def create_tsv_dir(user) do
dir = Path.join(user_dir(user, []), "tsv")
unless @file_io.exists?(dir), do: @file_io.mkdir_p(dir)
:ok
end
end
|
lib/utils.ex
| 0.727007
| 0.590573
|
utils.ex
|
starcoder
|
defmodule Still.Utils do
@moduledoc """
Collection of utility functions.
"""
alias Still.SourceFile
@doc """
Returns the modified time of a given file. Errors if the file does not exist.
"""
def get_modified_time!(path) do
path
|> File.stat!()
|> Map.get(:mtime)
|> Timex.to_datetime()
end
@doc """
Returns the modified time of a given file as a `DateTime` struct or the
`:error` atom if the file doesn't exist.
"""
def get_modified_time(path) do
path
|> File.stat()
|> case do
{:ok, stat} ->
{:ok,
stat
|> Map.get(:mtime)
|> Timex.to_datetime()}
_ ->
:error
end
end
@doc """
Delegates the call to the current `Still.Preprocessor.Image.Adapter`.
"""
def get_image_info(file) do
Still.Preprocessor.Image.adapter().get_image_info(file)
end
@doc """
Returns the current input path for a given file, prepending it with the
site's `input` directory. See `get_input_path/0`.
"""
def get_input_path(%SourceFile{input_file: file}),
do: Path.join(get_input_path(), file)
def get_input_path(file), do: Path.join(get_input_path(), file)
@doc """
Returns the absolute path configured as the site's entrypoint.
This is the value set by
config :still, input: "path/to/site"
"""
def get_input_path() do
config!(:input)
|> Path.expand()
end
@doc """
Returns the current output path for a given file, prepending it with the
site's `output` directory. See `get_output_path/0`.
"""
def get_output_path(%SourceFile{output_file: file}), do: Path.join(get_output_path(), file)
def get_output_path(file), do: Path.join(get_output_path(), file)
@doc """
Returns the absolute path configured as the site's output destination.
This is the value set by
config :still, output: "path/to/site"
"""
def get_output_path() do
config!(:output)
|> Path.expand()
end
@doc """
Receives an absolute path and converts it to relative by trimming the site's
entrypoint directory.
"""
def get_relative_input_path(full_path) do
full_path
|> String.replace(config!(:input), "")
|> String.trim_leading("/")
end
@doc """
Returns the site's base URL.
"""
def get_base_url() do
config!(:base_url)
end
@doc """
Recursively cleans the site's output directory.
"""
def rm_output_dir() do
get_output_path()
|> File.rm_rf()
end
@doc """
Creates the output directory.
"""
def mk_output_dir() do
get_output_path()
|> File.mkdir_p!()
end
@doc """
Creates the directory by the given path, relative to the output directory.
"""
def mk_output_dir(path) do
get_output_path(path)
|> File.mkdir_p!()
end
@doc """
Recursively removes all files from the site's output directory.
"""
def clean_output_dir() do
File.rm_rf(Path.join(get_output_path(), "*"))
end
@doc """
Recursively removes all files from the given path, relative to the output
directory.
"""
def clean_output_dir(path) do
File.rm_rf(Path.join(path, "*"))
end
@doc """
Returns the value configured for `:still` by the given key. Errors if it
doesn't exist.
"""
def config!(key), do: Application.fetch_env!(:still, key)
@doc """
Returns the value configured for `:still` by the given key. Returns the
provided default if it doesn't exist.
"""
def config(key, default), do: Application.get_env(:still, key, default)
end
|
lib/still/utils.ex
| 0.877424
| 0.432003
|
utils.ex
|
starcoder
|
defmodule Playwright.Helpers.Serialization do
@moduledoc false
import Playwright.Extra.Map
def deserialize(value) when is_map(value) do
case value do
%{a: array} ->
Enum.map(array, fn item ->
deserialize(item)
end)
%{b: boolean} ->
boolean
%{n: number} ->
number
%{o: object} ->
Enum.map(object, fn item ->
{item.k, deserialize(item.v)}
end)
|> Enum.into(%{})
|> deep_atomize_keys()
%{s: string} ->
string
%{v: "null"} ->
nil
%{v: "undefined"} ->
nil
end
end
def deserialize(value) when is_list(value) do
Enum.map(value, &deserialize(&1))
end
def serialize(arg) do
{value, handles} = serialize(arg, [], 0)
%{value: deep_atomize_keys(value), handles: handles}
end
def serialize(_value, _handles, depth) when depth > 100 do
raise ArgumentError, message: "Maximum argument depth exceeded."
end
# NOTE: we may want to send `undefined` instead of `null` here
# (or, incertain cases)
def serialize(nil, handles, _depth) do
{%{v: "null"}, handles}
end
def serialize(%Playwright.ElementHandle{} = value, handles, _depth) do
index = length(handles)
{%{h: index}, handles ++ [%{guid: value.guid}]}
end
def serialize(%Playwright.JSHandle{} = value, handles, _depth) do
index = length(handles)
{%{h: index}, handles ++ [%{guid: value.guid}]}
end
require Logger
def serialize(value, _handles, _depth) when is_float(value) do
Logger.error("not implemented: `serialize` for float: #{inspect(value)}")
end
def serialize(value, handles, _depth) when is_integer(value) do
{%{n: value}, handles}
end
def serialize(%DateTime{} = value, _handles, _depth) do
Logger.error("not implemented: `serialize` for datetime: #{inspect(value)}")
end
def serialize(value, handles, _depth) when is_boolean(value) do
{%{b: value}, handles}
end
def serialize(value, handles, _depth) when is_binary(value) do
{%{s: value}, handles}
end
def serialize(value, handles, depth) when is_list(value) do
{_, result} =
Enum.map_reduce(value, %{handles: handles, items: []}, fn e, acc ->
{value, handles} = serialize(e, acc.handles, depth + 1)
{
{value, handles},
%{handles: handles, items: acc.items ++ [value]}
}
end)
{%{a: result.items}, result.handles}
end
def serialize(value, handles, depth) when is_map(value) do
{_, result} =
Enum.map_reduce(value, %{handles: handles, objects: []}, fn {k, v}, acc ->
{value, handles} = serialize(v, acc.handles, depth + 1)
{
{%{k: k, v: value}, handles},
%{handles: handles, objects: acc.objects ++ [%{k: k, v: value}]}
}
end)
{%{o: result.objects}, result.handles}
end
def serialize(_other, handles, _depth) do
{%{v: "undefined"}, handles}
end
end
|
lib/playwright/helpers/serialization.ex
| 0.808823
| 0.536981
|
serialization.ex
|
starcoder
|
defmodule Openflow.Action.Output do
@moduledoc """
Action for sends packets out `port_number`.
"""
defstruct port_number: 0,
max_len: :no_buffer
alias __MODULE__
@type port_no ::
0..0xFFFFFFFF
| :max
| :in_port
| :table
| :normal
| :flood
| :all
| :controller
| :local
| :any
@type max_len :: 0..0xFFFFFFFF | :no_buffer | :max
@type t :: %Output{port_number: port_no(), max_len: max_len()}
@spec ofpat() :: 0
def ofpat, do: 0
@doc """
Create a new output action structure
## Options:
- `port_number`: Output port
- `max_len`: Max length to send to controller
## Example
```elixir
iex> %Output{port_number: 1, max_len: :no_buffer} = Output.new(1)
```
"""
@spec new([port_number: port_no, max_len: max_len()] | port_no()) :: Output.t()
def new(options \\ [])
@spec new(port :: port_no()) :: Output.t()
def new(port) when is_atom(port) or is_integer(port),
do: new(port_number: port)
@spec new(options :: [port_number: port_no(), max_len: max_len()]) :: Output.t()
def new(options) when is_list(options) do
port_no = options[:port_number] || raise "port_number must be specified"
max_len = options[:max_len] || :no_buffer
%Output{port_number: port_no, max_len: max_len}
end
@spec to_binary(Output.t()) :: <<_::16, _::_*8>>
def to_binary(%Output{port_number: port_no, max_len: max_len}) do
port_no_int = Openflow.Utils.get_enum(port_no, :openflow13_port_no)
max_len = Openflow.Utils.get_enum(max_len, :controller_max_len)
<<0::16, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, port_no_int::32, max_len::16, 0::size(6)-unit(8)>>
end
@spec read(<<_::16, _::_*8>>) :: Output.t()
def read(<<0::16, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, port_no_int::32, max_len::16, _pad::size(6)-unit(8)>>) do
port_no = Openflow.Utils.get_enum(port_no_int, :openflow13_port_no)
max_len = Openflow.Utils.get_enum(max_len, :controller_max_len)
%Output{port_number: port_no, max_len: max_len}
end
end
|
lib/openflow/actions/output.ex
| 0.86031
| 0.572215
|
output.ex
|
starcoder
|
defmodule Request.Validator.Plug do
alias Plug.Conn
alias Request.Validator
alias Request.Validator.Rules
import Plug.Conn
@doc ~S"""
Init the Request.Validation.Plug with an optional error callback
and handlers with their corresponding request validator module.
```elixir
plug Request.Validation.Plug,
register: App.Requests.RegisterRequest,
on_error: fn conn, errors -> json_resp(conn, "Handle your errors: #{inspect errors}") end
```
"""
def init([] = opts) do
opts
|> Keyword.put_new(:on_error, &Validator.Plug.on_error/2)
end
def init(%{} = opts) do
opts
|> Map.put_new(:on_error, &Validator.Plug.on_error/2)
end
@doc ~S"""
The default callback to be invoked when there is a param that fails validation.
"""
def on_error(conn, errors) do
json_resp(conn, 422, %{message: "Unprocessable entity", errors: errors})
end
@doc ~S"""
Performs validations on `conn.params`
If all validations are successful returns the connection struct
Otherwise returns an error map in the following structure: `%{param: ["some error", ...]}`
Will call the given `on_error` callback in case some validation failed
"""
def call(conn, opts) do
with action <- Map.get(conn.private, :phoenix_action),
request_validator <- get_validator(opts, action) do
case request_validator do
nil -> conn
_ -> validate(Conn.fetch_query_params(conn), request_validator, opts[:on_error])
end
end
end
defp get_validator(opt, key) when is_map(opt), do: Map.get(opt, key)
defp get_validator(opt, key) when is_list(opt), do: Keyword.get(opt, key)
defp validate(conn, module, on_error) do
rules = if function_exported?(module, :rules, 1), do: module.rules(conn), else: module
errors = collect_errors(conn, rules)
if Enum.empty?(errors) do
conn
else
on_error.(conn, errors) |> halt
end
end
defp collect_errors(conn, validations) do
Enum.reduce(validations, %{}, errors_collector(conn))
end
defp errors_collector(conn) do
fn {field, vf}, acc ->
value = Map.get(conn.params, Atom.to_string(field))
case run_rules(field, vf, value) do
{:error, rules} -> Map.put(acc, field, rules)
_ -> acc
end
end
end
defp format_rule(rule) do
cond do
is_tuple(rule) -> {_method, _opts} = rule
true -> {rule, nil}
end
end
defp run_rules(field, rules, value) do
results = Enum.map(rules, fn rule ->
{method, opts} = format_rule(rule)
case function_exported?(Rules, method, 3) do
true ->
result = apply(Rules, method, [value, opts, field])
if is_binary(result), do: result, else: nil
_ -> raise ArgumentError, message: "invalid validation rule [#{method}] provided"
end
end)
|> Enum.filter(&(!!&1))
if Enum.empty?(results), do: nil, else: {:error, results}
end
defp json_resp(conn, status, body) do
conn
|> put_resp_header("content-type", "application/json")
|> send_resp(status, json_library().encode_to_iodata!(body))
end
defp json_library do
Application.get_env(:request_validator, :json_library, Jason)
end
end
|
lib/plug.ex
| 0.743447
| 0.506408
|
plug.ex
|
starcoder
|
defmodule Phoenix.View do
@moduledoc """
Defines the view layer of a Phoenix application.
The view layer contains conveniences for rendering templates,
including support for layouts and encoders per format.
## Examples
Phoenix defines the view template at `lib/your_app_web.ex`:
defmodule YourAppWeb do
# ...
def view do
quote do
use Phoenix.View, root: "lib/your_app_web/templates", namespace: YourAppWeb
# Import convenience functions from controllers
import Phoenix.Controller,
only: [get_flash: 1, get_flash: 2, view_module: 1, view_template: 1]
# Use all HTML functionality (forms, tags, etc)
use Phoenix.HTML
import YourAppWeb.ErrorHelpers
import YourAppWeb.Gettext
# Alias the Helpers module as Routes
alias YourAppWeb.Router.Helpers, as: Routes
end
end
# ...
end
You can use the definition above to define any view in your application:
defmodule YourApp.UserView do
use YourAppWeb, :view
end
Because we have defined the template root to be "lib/your_app_web/templates",
`Phoenix.View` will automatically load all templates at "your_app_web/templates/user"
and include them in the `YourApp.UserView`. For example, imagine we have the
template:
# your_app_web/templates/user/index.html.heex
Hello <%= @name %>
The `.heex` extension maps to a template engine which tells Phoenix how
to compile the code in the file into Elixir source code. After it is
compiled, the template can be rendered as:
Phoenix.View.render_to_string(YourApp.UserView, "index.html", name: "<NAME>")
#=> "Hello <NAME>"
## Differences to `Phoenix.LiveView`
Traditional web applications, that rely on a request/response life cycle,
have been typically organized under the Model-View-Controller pattern.
In this case, the Controller is responsible for organizing interacting
with the model and passing all relevant information to the View for
rendering. `Phoenix.Controller` and `Phoenix.View` play those roles
respectively.
`Phoenix.LiveView` introduces a declarative model where the controller
and the view are kept side by side. This empowers `Phoenix.LiveView`
to provide realtime and interactive features under a stateful connection.
In other words, you may consider that `Phoenix.LiveView` abridges both
`Phoenix.Controller` and `Phoenix.View` responsibilities. Developers
do not generally use `Phoenix.View` from their live views, but LiveView
does use `Phoenix.View` and its features under the scenes.
## Rendering and formats
The main responsibility of a view is to render a template.
A template has a name, which also contains a format. For example,
in the previous section we have rendered the "index.html" template:
Phoenix.View.render_to_string(YourApp.UserView, "index.html", name: "<NAME>")
#=> "Hello <NAME>"
While we got a string at the end, that's not actually what our templates
render. Let's take a deeper look:
Phoenix.View.render(YourApp.UserView, "index.html", name: "<NAME>")
#=> ...
This inner representation allows us to separate how templates render and
how they are encoded. For example, if you want to render JSON data, we
could do so by adding a "show.json" entry to `render/2` in our view:
defmodule YourApp.UserView do
use YourApp.View
def render("show.json", %{user: user}) do
%{name: user.name, address: user.address}
end
end
Notice that in order to render JSON data, we don't need to explicitly
return a JSON string! Instead, we just return data that is encodable to
JSON. Now, when we call:
Phoenix.View.render_to_string(YourApp.UserView, "user.json", user: %User{...})
Because the template has the `.json` extension, Phoenix knows how to
encode the map returned for the "user.json" template into an actual
JSON payload to be sent over the wire.
Phoenix ships with some template engines and format encoders, which
can be further configured in the Phoenix application. You can read
more about format encoders in `Phoenix.Template` documentation.
"""
alias Phoenix.{Template}
@doc """
When used, defines the current module as a main view module.
## Options
* `:root` - the template root to find templates
* `:path` - the optional path to search for templates within the `:root`.
Defaults to the underscored view module name. A blank string may
be provided to use the `:root` path directly as the template lookup path
* `:namespace` - the namespace to consider when calculating view paths
* `:pattern` - the wildcard pattern to apply to the root
when finding templates. Default `"*"`
The `:root` option is required while the `:namespace` defaults to the
first nesting in the module name. For instance, both `MyApp.UserView`
and `MyApp.Admin.UserView` have namespace `MyApp`.
The `:namespace` and `:path` options are used to calculate template
lookup paths. For example, if you are in `MyApp.UserView` and the
namespace is `MyApp`, templates are expected at `Path.join(root, "user")`.
On the other hand, if the view is `MyApp.Admin.UserView`,
the path will be `Path.join(root, "admin/user")` and so on. For
explicit root path locations, the `:path` option can be provided instead.
The `:root` and `:path` are joined to form the final lookup path.
A blank string may be provided to use the `:root` path directly as the
template lookup path.
Setting the namespace to `MyApp.Admin` in the second example will force
the template to also be looked up at `Path.join(root, "user")`.
"""
defmacro __using__(opts) do
opts =
if Macro.quoted_literal?(opts) do
Macro.prewalk(opts, &expand_alias(&1, __CALLER__))
else
opts
end
quote do
import Phoenix.View
use Phoenix.Template, Phoenix.View.__template_options__(__MODULE__, unquote(opts))
@before_compile Phoenix.View
@doc """
Renders the given template locally.
"""
def render(template, assigns \\ %{})
def render(module, template) when is_atom(module) do
Phoenix.View.render(module, template, %{})
end
def render(template, _assigns) when not is_binary(template) do
raise ArgumentError, "render/2 expects template to be a string, got: #{inspect(template)}"
end
def render(template, assigns) when not is_map(assigns) do
render(template, Enum.into(assigns, %{}))
end
@doc "The resource name, as an atom, for this view"
def __resource__, do: @view_resource
end
end
defp expand_alias({:__aliases__, _, _} = alias, env),
do: Macro.expand(alias, %{env | function: {:init, 1}})
defp expand_alias(other, _env), do: other
@doc false
defmacro __before_compile__(_env) do
# We are using @anno because we don't want warnings coming from
# render/2 to be reported in case the user has defined a catch-all
# render/2 clause.
quote generated: true do
# Catch-all clause for rendering.
def render(template, assigns) do
render_template(template, assigns)
end
end
end
@doc """
Renders the given layout passing the given `do/end` block
as `@inner_content`.
This can be useful to implement nested layouts. For example,
imagine you have an application layout like this:
# layout/app.html.heex
<html>
<head>
<title>Title</title>
</head>
<body>
<div class="menu">...</div>
<%= @inner_content %>
</body>
This layout is used by many parts of your application. However,
there is a subsection of your application that wants to also add
a sidebar. Let's call it "blog.html". You can build on top of the
existing layout in two steps. First, define the blog layout:
# layout/blog.html.heex
<%= render_layout LayoutView, "app.html", assigns do %>
<div class="sidebar">...</div>
<%= @inner_content %>
<% end %>
And now you can simply use it from your controller:
plug :put_layout, "blog.html"
"""
def render_layout(module, template, assigns, do: block) do
assigns =
assigns
|> Map.new()
|> Map.put(:inner_content, block)
module.render(template, assigns)
end
@doc """
Renders a template.
It expects the view module, the template as a string, and a
set of assigns.
Notice that this function returns the inner representation of a
template. If you want the encoded template as a result, use
`render_to_iodata/3` instead.
## Examples
Phoenix.View.render(YourApp.UserView, "index.html", name: "<NAME>")
#=> {:safe, "Hello <NAME>"}
## Assigns
Assigns are meant to be user data that will be available in templates.
However, there are keys under assigns that are specially handled by
Phoenix, they are:
* `:layout` - tells Phoenix to wrap the rendered result in the
given layout. See next section
## Layouts
Templates can be rendered within other templates using the `:layout`
option. `:layout` accepts a tuple of the form
`{LayoutModule, "template.extension"}`.
To template that goes inside the layout will be placed in the `@inner_content`
assign:
<%= @inner_content %>
"""
def render(module, template, assigns)
def render(module, template, assigns) do
assigns
|> Map.new()
|> Map.pop(:layout, false)
|> render_within(module, template)
end
defp render_within({false, assigns}, module, template) do
module.render(template, assigns)
end
defp render_within({{layout_mod, layout_tpl}, assigns}, module, template)
when is_atom(layout_mod) and is_binary(layout_tpl) do
content = module.render(template, assigns)
assigns = Map.put(assigns, :inner_content, content)
layout_mod.render(layout_tpl, assigns)
end
defp render_within({layout, _assigns}, _module, _template) do
raise ArgumentError, """
invalid value for reserved key :layout in View.render/3 assigns
:layout accepts a tuple of the form {LayoutModule, "template.extension"}
got: #{inspect(layout)}
"""
end
@doc ~S'''
Renders a template only if it exists.
> Note: Using this functionality has been discouraged in
> recent Phoenix versions, see the "Alternatives" section
> below.
This function works the same as `render/3`, but returns
`nil` instead of raising. This is often used with
`Phoenix.Controller.view_module/1` and `Phoenix.Controller.view_template/1`,
which must be imported into your views. See the "Examples"
section below.
## Alternatives
This function is discouraged. If you need to render something
conditionally, the simplest way is to check for an optional
function in your views.
Consider the case where the application has a sidebar in its
layout and it wants certain views to render additional buttons
in the sidebar. Inside your sidebar, you could do:
<div class="sidebar">
<%= if function_exported?(view_module(@conn), :sidebar_additions, 1) do
<%= view_module(@conn).sidebar_additions(assigns) %>
<% end %>
</div>
If you are using Phoenix.LiveView, you could do similar by
accessing the view under `@socket`:
<div class="sidebar">
<%= if function_exported?(@socket.view, :sidebar_additions, 1) do
<%= @socket.view.sidebar_additions(assigns) %>
<% end %>
</div>
Then, in your view or live view, you do:
def sidebar_additions(assigns) do
~H\"""
...my additional buttons...
\"""
## Using render_existing
Consider the case where the application wants to allow entries
to be added to a sidebar. This feature could be achieved with:
<%= render_existing view_module(@conn), "sidebar_additions.html", assigns %>
Then the module under `view_module(@conn)` can decide to provide
scripts with either a precompiled template, or by implementing the
function directly, ie:
def render("sidebar_additions.html", _assigns) do
~H"""
...my additional buttons...
"""
end
To use a precompiled template, create a `scripts.html.eex` file in
the `templates` directory for the corresponding view you want it to
render for. For example, for the `UserView`, create the `scripts.html.eex`
file at `your_app_web/templates/user/`.
'''
@deprecated "Use function_exported?/3 instead"
def render_existing(module, template, assigns \\ []) do
assigns = assigns |> Map.new() |> Map.put(:__phx_render_existing__, {module, template})
render(module, template, assigns)
end
@doc """
Renders a collection.
A collection is any enumerable of structs. This function
returns the rendered collection in a list:
render_many users, UserView, "show.html"
is roughly equivalent to:
Enum.map(users, fn user ->
render(UserView, "show.html", user: user)
end)
The underlying user is passed to the view and template as `:user`,
which is inferred from the view name. The name of the key
in assigns can be customized with the `:as` option:
render_many users, UserView, "show.html", as: :data
is roughly equivalent to:
Enum.map(users, fn user ->
render(UserView, "show.html", data: user)
end)
"""
def render_many(collection, view, template, assigns \\ %{}) do
assigns = Map.new(assigns)
resource_name = get_resource_name(assigns, view)
Enum.map(collection, fn resource ->
render(view, template, Map.put(assigns, resource_name, resource))
end)
end
@doc """
Renders a single item if not nil.
The following:
render_one user, UserView, "show.html"
is roughly equivalent to:
if user != nil do
render(UserView, "show.html", user: user)
end
The underlying user is passed to the view and template as
`:user`, which is inflected from the view name. The name
of the key in assigns can be customized with the `:as` option:
render_one user, UserView, "show.html", as: :data
is roughly equivalent to:
if user != nil do
render(UserView, "show.html", data: user)
end
"""
def render_one(resource, view, template, assigns \\ %{})
def render_one(nil, _view, _template, _assigns), do: nil
def render_one(resource, view, template, assigns) do
assigns = Map.new(assigns)
render(view, template, assign_resource(assigns, view, resource))
end
@compile {:inline, [get_resource_name: 2]}
defp get_resource_name(assigns, view) do
case assigns do
%{as: as} -> as
_ -> view.__resource__
end
end
defp assign_resource(assigns, view, resource) do
Map.put(assigns, get_resource_name(assigns, view), resource)
end
@doc """
Renders the template and returns iodata.
"""
def render_to_iodata(module, template, assign) do
render(module, template, assign) |> encode(template)
end
@doc """
Renders the template and returns a string.
"""
def render_to_string(module, template, assign) do
render_to_iodata(module, template, assign) |> IO.iodata_to_binary()
end
defp encode(content, template) do
if encoder = Template.format_encoder(template) do
encoder.encode_to_iodata!(content)
else
content
end
end
@doc false
def __template_options__(module, opts) do
if Module.get_attribute(module, :view_resource) do
raise ArgumentError,
"use Phoenix.View is being called twice in the module #{module}. " <>
"Make sure to call it only once per module"
else
view_resource = String.to_atom(Phoenix.Template.resource_name(module, "View"))
Module.put_attribute(module, :view_resource, view_resource)
end
root = opts[:root] || raise(ArgumentError, "expected :root to be given as an option")
path = opts[:path]
namespace =
if given = opts[:namespace] do
given
else
module
|> Module.split()
|> Enum.take(1)
|> Module.concat()
end
root_path =
Path.join(root, path || Template.module_to_template_root(module, namespace, "View"))
[root: root_path] ++ Keyword.take(opts, [:pattern, :template_engines])
end
end
|
lib/phoenix/view.ex
| 0.915257
| 0.516839
|
view.ex
|
starcoder
|
defmodule Ueberauth.Strategy.EVESSO.OAuth do
@moduledoc """
Implements OAuth2 for EVE SSO v2 with JWT.
Include your `client_id` and `secret_key` in your config:
```elixir
config :ueberauth, Ueberauth.Strategy.EVESSO.OAuth,
client_id: System.get_env("EVE_SSO_CLIENT_ID"),
client_secret: System.get_env("EVE_SSO_CLIENT_SECRET")
```
See the [EVE Developer Page](https://developers.eveonline.com) for more details on obtaining a client id.
"""
use OAuth2.Strategy
@defaults [
strategy: __MODULE__,
site: "https://esi.evetech.net",
authorize_url: "https://login.eveonline.com/v2/oauth/authorize",
token_url: "https://login.eveonline.com/v2/oauth/token"
]
@doc """
Construct a client for requests to ESI
Optionally include any OAuth2 options here to be merged with the defaults.
Ueberauth.Strategy.EVESSO.Oauth.client(redirect_uri: "http://localhost:4000/auth/sso/callback")
This will be set up automatically for you in `Ueberauth.Strategy.EVESSO`.
These options are only useful for usage outside the normal callback phase of Ueberauth.
"""
def client(opts \\ []) do
config =
:ueberauth
|> Application.fetch_env!(Ueberauth.Strategy.EVESSO.OAuth)
|> check_config_key_exists(:client_id)
|> check_config_key_exists(:client_secret)
client_opts =
@defaults
|> Keyword.merge(config)
|> Keyword.merge(opts)
OAuth2.Client.new(client_opts)
end
@doc """
Provides the authorize url for the request phase of Ueberauth.
This will usually not have to be called directly.
"""
def authorize_url!(params \\ [], opts \\ []) do
opts
|> client
|> OAuth2.Client.authorize_url!(params)
end
@doc """
Perform an authorized GET request to `url` using the `token`.
Url can be either relative to the `site` or absolute.
"""
def get(token, url, headers \\ [], opts \\ []) do
[token: token]
|> client()
|> put_param("client_secret", client().client_secret)
|> OAuth2.Client.get(url, headers, opts)
end
@doc """
Verify a token with ESI and prime the Auth cache. Will return token owner details
"""
def verify(token) do
result = __MODULE__.get(token, "/verify")
case result do
{:ok, %OAuth2.Response{body: body, headers: _headers, status_code: 200}} -> {:ok, body}
_ -> {:error, {:verification_error, result}}
end
end
@doc """
Retrieve character details and portrait urls for the owner of a token
"""
def subject(token, id) do
with {:ok, %OAuth2.Response{body: char_body, headers: _headers, status_code: 200}} <-
__MODULE__.get(token, "/v4/characters/#{id}"),
{:ok, %OAuth2.Response{body: pict_body, headers: _headers, status_code: 200}} <-
__MODULE__.get(token, "/v2/characters/#{id}/portrait/") do
{:ok, Map.merge(char_body, %{"portrait" => pict_body, "character_id" => id})}
else
err -> {:error, err}
end
end
def get_token!(params \\ [], options \\ []) do
headers = Keyword.get(options, :headers, [])
options = Keyword.get(options, :options, [])
client_options = Keyword.get(options, :client_options, [])
client = OAuth2.Client.get_token!(client(client_options), params, headers, options)
client.token
end
@doc """
Get a new access token using a `refresh_token`. Will raise an error if the refresh fails
"""
def refresh_token!(refresh_token) do
client = client(strategy: OAuth2.Strategy.Refresh)
|> put_param("refresh_token", refresh_token)
|> put_header("Accept", "application/json")
|> put_header("Host", "login.eveonline.com")
|> OAuth2.Client.get_token!
end
@doc """
Get a new access token using a `refresh_token`
"""
def refresh_token(refresh_token) do
client = client(strategy: OAuth2.Strategy.Refresh)
|> put_param("refresh_token", refresh_token)
|> put_header("Accept", "application/json")
|> put_header("Host", "login.eveonline.com")
|> OAuth2.Client.get_token
end
# Strategy callbacks
def authorize_url(client, params) do
OAuth2.Strategy.AuthCode.authorize_url(client, params)
end
def get_token(client, params, headers) do
client
|> put_param("client_secret", client().client_secret)
|> put_header("Accept", "application/json")
|> OAuth2.Strategy.AuthCode.get_token(params, headers)
end
defp check_config_key_exists(config, key) when is_list(config) do
unless Keyword.has_key?(config, key) do
raise "#{inspect(key)} missing from config :ueberauth, Ueberauth.Strategy.EVESSO"
end
config
end
defp check_config_key_exists(_, _) do
raise "Config :ueberauth, Ueberauth.Strategy.EVESSO is not a keyword list, as expected"
end
end
|
lib/ueberauth/strategy/evesso/oauth.ex
| 0.769773
| 0.67173
|
oauth.ex
|
starcoder
|
defmodule Coinbase.Pro.REST.Request do
@moduledoc """
Module responsible for wrapping logic of the HTTP requests
issues to the Coinbase Pro API.
## Example
```elixir
alias Coinbase.Pro.REST.{Context,Request}
# Obtain these values from Coinbase
context = %Context{key: "...", secret: "...", passphrase: "..."}
{:ok, response} = Request.get(context, "/orders?limit=10")
```
"""
alias Coinbase.Pro.REST.{Context, Response, Signature}
@doc """
Issues a signed GET request to the Coinbase Pro REST API.
On success it returns `{:ok, response}` where response is
a `Response` struct.
On error it returns either `{:error, {:http, reason}}` when
underlying HTTP call has failed or `{:error, {:code, code, body}}`
when the HTTP call suceeded but it returned unexpected status
code.
"""
@spec get(Context.t(), Tesla.Env.url(), [Tesla.option()]) ::
{:ok, Response.t()}
| {:error, {:http, any}}
| {:error, {:code, pos_integer, any}}
def get(context, path, opts \\ []) do
client(context)
|> Tesla.get(path, opts)
|> make_response()
end
@doc """
Works similarly to `get/3` but issues multiple requests
to handle pagination and fetch the whole collection.
"""
@spec get_all(Context.t(), Tesla.Env.url(), [Tesla.option()]) ::
{:ok, Response.t()}
| {:error, {:http, any}}
| {:error, {:code, pos_integer, any}}
def get_all(context, path, opts \\ []) do
do_get_all(context, path, opts, nil, [])
end
defp do_get_all(context, path, opts, pagination_after, acc) do
case get(context, overwrite_query(path, "after", pagination_after), opts) do
{:ok, %Response{body: body, after: nil} = response} ->
{:ok, %Response{response | body: acc ++ body}}
{:ok, %Response{body: body, after: pagination_after}} ->
do_get_all(context, path, opts, pagination_after, acc ++ body)
{:error, reason} ->
{:error, reason}
end
end
defp overwrite_query(path, param, value) do
uri = URI.parse(path)
new_query =
case uri.query do
nil ->
case value do
nil ->
path
value ->
%{param => value}
|> URI.encode_query()
end
query ->
case value do
nil ->
query
|> URI.decode_query()
|> Map.delete(param)
|> URI.encode_query()
value ->
query
|> URI.decode_query()
|> Map.put(param, value)
|> URI.encode_query()
end
end
uri
|> Map.put(:query, new_query)
|> URI.to_string()
end
@doc """
Issues a signed POST request to the Coinbase Pro REST API.
On success it returns `{:ok, response}` where response is
a `Response` struct.
On error it returns either `{:error, {:http, reason}}` when
underlying HTTP call has failed or `{:error, {:code, code, body}}`
when the HTTP call suceeded but it returned unexpected status
code.
"""
@spec post(Context.t(), Tesla.Env.url(), [Tesla.option()]) ::
{:ok, Response.t()}
| {:error, {:http, any}}
| {:error, {:code, pos_integer, any}}
def post(context, path, body, opts \\ []) do
client(context)
|> Tesla.post(path, body, opts)
|> make_response()
end
defp make_response(response) do
case response do
{:ok, %Tesla.Env{status: 200, body: body, headers: headers}} ->
pagination_after =
case headers |> List.keyfind("cb-after", 0) do
{"cb-after", value} ->
value
nil ->
nil
end
pagination_before =
case headers |> List.keyfind("cb-before", 0) do
{"cb-before", value} ->
value
nil ->
nil
end
{:ok, %Response{body: body, after: pagination_after, before: pagination_before}}
{:ok, %Tesla.Env{status: status, body: body}} ->
{:error, {:code, status, body}}
{:error, reason} ->
{:error, {:http, reason}}
end
end
defp client(context) do
Tesla.client([
{Tesla.Middleware.BaseUrl,
Application.get_env(:coinbasepro_rest, :base_url, "https://api.pro.coinbase.com")},
{Tesla.Middleware.Headers,
[
{"user-agent",
Application.get_env(:coinbasepro_rest, :user_agent, default_user_agent!())}
]},
Tesla.Middleware.JSON,
{Signature, context}
])
end
defp default_user_agent! do
lib_version =
case :application.get_key(:coinbasepro_rest, :vsn) do
{:ok, vsn} ->
List.to_string(vsn)
:undefined ->
"dev"
end
"coinbasepro #{lib_version} (Elixir #{System.version()})"
end
end
|
lib/coinbasepro_rest/request.ex
| 0.864896
| 0.534491
|
request.ex
|
starcoder
|
defmodule CbLocomotion.Locomotion do
@moduledoc """
The main interface for moving the robot. Interacts with the two stepper motors
to move the robot forward, back, left, right, stop, and set the rate at which
both motors turn.
"""
@name __MODULE__
use GenServer
alias CbLocomotion.StepperMotor
def start_link do
GenServer.start_link(__MODULE__, [], name: @name)
end
### API
def set_step_rate(step_rate) do
GenServer.call(@name, {:set_step_rate, step_rate})
end
def get_step_rate do
GenServer.call(@name, :get_step_rate)
end
def forward do
GenServer.call(@name, :forward)
end
def reverse do
GenServer.call(@name, :reverse)
end
def stop do
GenServer.call(@name, :stop)
end
def turn_right do
GenServer.call(@name, :turn_right)
end
def turn_left do
GenServer.call(@name, :turn_left)
end
## Callbacks
def init(_) do
{:ok, []}
end
def handle_call({:set_step_rate, step_rate}, _from, state) do
:left_stepper |> StepperMotor.set_step_rate(step_rate)
:right_stepper |> StepperMotor.set_step_rate(step_rate)
{:reply, :ok, state}
end
def handle_call(:get_step_rate, _from, state) do
rate = StepperMotor.state(:right_stepper).step_millis
{:reply, rate, state}
end
def handle_call(:forward, _from, state) do
set_direction(:back, :forward)
{:reply, :ok, state}
end
def handle_call(:reverse, _from, state) do
set_direction(:forward, :back)
{:reply, :ok, state}
end
def handle_call(:stop, _from, state) do
set_direction(:neutral, :neutral)
{:reply, :ok, state}
end
def handle_call(:turn_left, _from, state) do
set_direction(:forward, :forward)
{:reply, :ok, state}
end
def handle_call(:turn_right, _from, state) do
set_direction(:back, :back)
{:reply, :ok, state}
end
defp set_direction(left, right) do
:left_stepper |> StepperMotor.set_direction(left)
:right_stepper |> StepperMotor.set_direction(right)
end
end
|
apps/cb_locomotion/lib/cb_locomotion/locomotion.ex
| 0.67822
| 0.504394
|
locomotion.ex
|
starcoder
|
defmodule Affine.Generator do
@moduledoc """
Module enclosing the high level transform creating capabilities.
An easy API for creating and using Affine transforms uses the flexibility
provided in Elixir to more elegantly define the transforms. This requires a bit more processing but generally would not be a burden to the application unless
many transforms are being created. Here's an example:
t_translate = Affine.create [ type: :translate, dimensions: 3, x: 3.0, y: 4.0, z: 5.0]
point = Affine.transform t_translate [ 1.0, 2.0, 3.0]
assert point == [4.0, 6.0, 8.0]
So the create function takes a parameter list and generates the correct
transform. The create function can also take a list of parameter lists and
generate a single transform from those parameter lists. For example, to create,
t_translate_then_scale with a single call to create, the following can be done:
point =
[ [type: :translate, dimensions: 3, x: 3.0, y: 4.0, z: 5.0],
[type: :scale, dimensions: 3, x: 2.0, y: 2.0, z: 2.0] ]
|> Affine.create
|> Affine.transform [1.0, 2.0, 3.0]
assert point == [ 8.0, 12.0, 16.0 ]
Note the order of transforms in the parameter list is applied such that the first transform in the parameter list is the last one applied to the final transform. Logically, it is the first one to be applied when using the final transform.
Of course, the above is only useful for a one time point transformation since
the generate transform is not saved. So the following is likely to be more
useful:
t_translate_then_scale =
[ [type: :translate, dimensions: 3, x: 3.0, y: 4.0, z: 5.0],
[type: :scale, dimensions: 3, x: 2.0, y: 2.0, z: 2.0] ]
|> Affine.create
point = t_translate_then_scale
|> Affine.transform [1.0, 2.0, 3.0]
assert point == [ 8.0, 12.0, 16.0 ]
Following are all the types of transforms that can be created:
[type: :translate, dimensions: 1, x: value]
[type: :translate, dimensions: 2, x: value, y: value]
[type: :translate, dimensions: 3, x: value, y: value, z: value]
Where value is the translation value for the x, y and z axis respectively.
[type: :scale, dimensions: 1, x: value]
[type: :scale, dimensions: 2, x: value, y: value]
[type: :scale, dimensions: 3, x: value, y: value, z: value]
Where value is the scale value for the x, y and z axis respectively.
[type: :rotate_x, angle: value, units: units]
[type: :rotate_y, angle: value, units: units]
[type: :rotate_z, angle: value, units: units]
These rotations are all for 3 dimensions and rotate around the respectively
axis in the counter-clockwise direction. Angle is in radians unless units is set to :degrees.
[type: :rotate_xy, angle: value, units: units]
This is a 2 dimensional rotate in the xy plane. Angle is in radians unless units is set to :degrees.
[type: :linear_map, x1_in: value1in, x1_out: value1out, x2_in: value2in, x2_out: value2out]
See the documentation on the LinearMap module. This defines a convienient 1 dimensional linear map useful for various linear mapping applications.
"""
@type matrix :: [[number]]
@type list_of_specs :: [[]]
@type spec :: []
@doc """
Creates all the transformations specified in the format described above and
returns a final composite transform.
It can also create a single trasform using the specification format above.
This is really a helper function for the create function above but it can be
called directly. For example, the following will return a 3d scale transform:
Affine.create [type: :scale, dimensions: 3, x: 1.0, y: 2.0, z: 3.0]
"""
@spec create(list_of_specs) :: matrix
def create([ head | tail ]) when is_list(head) do
list = [head] ++ tail
size = head[:dimensions] + 1
identity = Matrix.ident(size)
list
|> Enum.reverse
|> Enum.map(&(create &1))
|> Enum.reduce(identity,&(Matrix.mult(&2,&1)))
end
@spec create(spec) :: matrix
def create parms do
cond do
parms[:type] == :translate && parms[:dimensions] == 3 ->
Affine.Transforms.translate(parms[:x],parms[:y],parms[:z])
parms[:type] == :translate && parms[:dimensions] == 2 ->
Affine.Transforms.translate(parms[:x],parms[:y])
parms[:type] == :translate && parms[:dimensions] == 1 ->
Affine.Transforms.translate(parms[:x])
parms[:type] == :scale && parms[:dimensions] == 3 ->
Affine.Transforms.scale(parms[:x],parms[:y],parms[:z])
parms[:type] == :scale && parms[:dimensions] == 2 ->
Affine.Transforms.scale(parms[:x],parms[:y])
parms[:type] == :scale && parms[:dimensions] == 1 ->
Affine.Transforms.scale(parms[:x])
parms[:type] == :rotate_x ->
Affine.Transforms.rotate_x(radians(parms))
parms[:type] == :rotate_y ->
Affine.Transforms.rotate_y(radians(parms))
parms[:type] == :rotate_z ->
Affine.Transforms.rotate_z(radians(parms))
parms[:type] == :rotate_xy ->
Affine.Transforms.rotate_xy(radians(parms))
parms[:type] == :linear_map ->
Affine.LinearMap.linear_map(parms)
true ->
nil
end
end
# Returns radians given the parameters in a transform specification list. If
# units: :degrees is used in the spec, then it is converted to radians and
# returned.
defp radians parms do
units = parms[:units]
angle = parms[:angle]
case units do
:degrees -> angle / 180.0 * :math.pi()
_ -> angle
end
end
end
|
lib/affine/generator.ex
| 0.932607
| 0.974459
|
generator.ex
|
starcoder
|
defmodule Benchmark.Stats.HistogramOpts do
defstruct num_buckets: 32, growth_factor: 0.0, base_bucket_size: 1.0, min_value: 0
end
defmodule Benchmark.Stats.HistogramBucket do
defstruct low_bound: 0, count: 0
end
defmodule Benchmark.Stats.Histogram do
defstruct buckets: [],
min: nil,
max: 0,
opts: nil,
count: 0,
sum: 0,
sum_of_squares: 0,
cached_base_bucket_size: nil,
cached_growth_factor: nil
alias Benchmark.Stats.HistogramBucket
def new(opts) do
m = 1.0 + opts.growth_factor
buckets = [%HistogramBucket{low_bound: opts.min_value}]
{buckets, _} =
Enum.reduce(1..(opts.num_buckets - 1), {buckets, opts.base_bucket_size}, fn _,
{bs, delta} ->
b = %HistogramBucket{low_bound: opts.min_value + delta}
{[b | bs], delta * m}
end)
buckets = Enum.reverse(buckets)
%__MODULE__{
buckets: buckets,
opts: opts,
cached_base_bucket_size: :math.log(opts.base_bucket_size),
cached_growth_factor: 1 / :math.log(1 + opts.growth_factor)
}
end
def add(h, val) do
bucket_idx = find_bucket(h, val)
if bucket_idx >= h.opts.num_buckets do
raise ArgumentError, "found bucket for #{val} is out of range: #{bucket_idx}"
end
buckets = List.update_at(h.buckets, bucket_idx, fn b -> Map.update!(b, :count, &(&1 + 1)) end)
h
|> Map.put(:buckets, buckets)
|> Map.update!(:count, &(&1 + 1))
|> Map.update!(:sum, &(&1 + val))
|> Map.update!(:sum_of_squares, &(&1 + val * val))
|> Map.update(:min, val, &min(&1, val))
|> Map.update(:max, val, &max(&1, val))
end
def find_bucket(h, val) do
delta = val - h.opts.min_value
if delta >= h.opts.base_bucket_size do
trunc((:math.log(delta) - h.cached_base_bucket_size) * h.cached_growth_factor + 1)
else
0
end
end
def merge(h1, h2) do
if h1.opts != h2.opts do
raise ArgumentError, "failed to merge histograms, created by inequivalent options"
end
min = min(h1.min, h2.min)
max = max(h1.max, h2.max)
buckets =
Enum.zip(h1.buckets, h2.buckets)
|> Enum.map(fn {b1, b2} -> %{b1 | count: b1.count + b2.count} end)
Map.merge(h1, %{
count: h1.count + h2.count,
sum: h1.sum + h2.sum,
sum_of_squares: h1.sum_of_squares + h2.sum_of_squares,
buckets: buckets,
min: min,
max: max
})
end
end
|
benchmark/lib/benchmark/stats/histogram.ex
| 0.786869
| 0.528412
|
histogram.ex
|
starcoder
|
defmodule Terp.Evaluate do
@doc """
This module contains the core evaluation logic.
"""
alias Terp.Error
alias Terp.ModuleSystem
alias Terp.Value
alias Terp.Evaluate.Arithmetic
alias Terp.Evaluate.Boolean
alias Terp.Evaluate.Environment
alias Terp.Evaluate.Function
alias Terp.Evaluate.List, as: TerpList
alias Terp.Evaluate.Match
@debug false
# Given a list of trees and an environment, evaluates the trees in
# the context of the environment.
def eval_trees(x, env) when is_bitstring(x), do: {x, env}
def eval_trees(exprs, env) do
res = exprs
|> Enum.reduce({:ok, {:environment, env}}, fn
(expr, {:ok, {:environment, environment}}) ->
eval_tree(expr, environment)
(expr, {:ok, {:evaluated, _result, environment}}) ->
eval_tree(expr, environment)
(_expr, {:error, error, environment}) ->
{{:error, error}, environment}
end)
case res do
{:ok, {:environment, environment}} ->
{nil, environment}
{:ok, {:evaluated, result, env}} ->
{result, env}
{:error, e, _environment} ->
{{:error, e}, env}
%Error{} = e ->
{e, env}
x ->
{x, env}
end
end
@doc """
Evaluate a single AST.
"""
def eval_tree(expr, env), do: eval_tree(expr, env, verbose: false)
def eval_tree(expr, env, verbose: verbose) do
case eval_expr(expr, env) do
x when is_function(x) ->
{:ok, {:environment, x}}
{{:ok, msg}, env} ->
{:ok, {:evaluated, msg, env}}
{:error, error} ->
{:error, error, env}
%Error{} = error ->
{:error, error, env}
result ->
if verbose do
{:ok, {:evaluated, result, env, expr}}
else
{:ok, {:evaluated, result, env}}
end
end
end
@doc """
Evaluate an expression represented by an AST.
## Example
# (+ 5 3)
iex> "(+ 5 3)"
...> |> Terp.Parser.parse()
...> |> Enum.flat_map(&Terp.AST.to_tree/1)
...> |> Enum.map(fn tree -> Terp.Evaluate.eval_expr(tree, fn (z) -> {:error, {:unbound, z}} end) end)
[8]
# (* 2 4 5)
iex> "(* 2 4 5)"
...> |> Terp.Parser.parse()
...> |> Enum.flat_map(&Terp.AST.to_tree/1)
...> |> Enum.map(fn tree -> Terp.Evaluate.eval_expr(tree, fn (z) -> {:error, {:unbound, z}} end) end)
[40]
# (* 2 4 (+ 4 1))
iex> "(* 2 4 (+ 4 1))"
...> |> Terp.Parser.parse()
...> |> Enum.flat_map(&Terp.AST.to_tree/1)
...> |> Enum.map(fn tree -> Terp.Evaluate.eval_expr(tree, fn (z) -> {:error, {:unbound, z}} end) end)
[40]
"""
def eval_expr(%RoseTree{node: node, children: children} = tree, env \\ fn (y) -> {:error, {:unbound, y}} end) do
if @debug do
IO.inspect(tree, label: "EVAL TREE :")
end
case node do
:__data ->
[_type_constructor, value_constructors] = children
constructors = value_constructors.node
env_prime = Enum.reduce(constructors, env, fn c, env -> Value.constructor_fn(c, env) end)
env_prime
:__string ->
str = List.first(children)
str.node
:__quote ->
Environment.quote(children)
:__cond ->
Boolean.cond(children, env)
:__match ->
case Match.match(children, env) do
%Error{} = e ->
%{e | in_expression: tree}
res ->
res
end
:"__#t" ->
Boolean.t()
:"__#f" ->
Boolean.f()
:__let_values ->
[%RoseTree{node: bindings} | [expr | []]] = children
local_env = Enum.reduce(bindings, env, fn (binding, env) ->
Environment.let(binding, env)
end)
eval_expr(expr, local_env)
:__beam ->
[%RoseTree{node: module} | [%RoseTree{node: function} | []]] = children
module_first_char = String.first(module)
is_capitalized? = String.upcase(module_first_char) == module_first_char
fully_qualified_module = (if is_capitalized?, do: ("Elixir." <> module), else: module)
|> String.to_existing_atom
{:__beam, fn (args) -> apply(fully_qualified_module, function, args) end}
:__apply ->
[operator | operands] = children
operator = eval_expr(operator, env)
case operator do
:__if ->
Boolean.conditional(operands, env)
:__letrec ->
Function.letrec(operands, env)
:__let ->
Environment.let(operands, env)
:__lambda ->
Function.lambda(operands, env)
:__require ->
operands
|> Enum.map(&(&1.node))
|> ModuleSystem.require_modules(env)
:__provide ->
:noop
:"__+" ->
Arithmetic.add(Enum.map(operands, &eval_expr(&1, env)))
:"__*" ->
Arithmetic.multiply(Enum.map(operands, &eval_expr(&1, env)))
:"__-" ->
Arithmetic.subtract(Enum.map(operands, &eval_expr(&1, env)))
:__div ->
Arithmetic.divide(Enum.map(operands, &eval_expr(&1, env)))
:__equal? ->
Boolean.equal?(operands, env)
:__cons ->
TerpList.cons(operands, env)
:__car ->
TerpList.car(operands, env)
:__cdr ->
TerpList.cdr(operands, env)
:__empty? ->
TerpList.empty?(operands, env)
x when is_function(x) ->
Function.apply_lambda(operator, Enum.map(operands, &eval_expr(&1, env)), env)
{:error, reason} ->
{:error, reason}
{:__beam, function} ->
evald_operands = Enum.map(operands, &eval_expr(&1, env))
function.(evald_operands)
x when is_boolean(x) ->
{x, env}
_ ->
{:error, {:not_a_procedure, operator}}
end
x when is_number(x) -> x
x when is_function(x) -> x
x when is_boolean(x) -> x
x ->
with true <- is_atom(x),
s <- Atom.to_string(x),
true <- String.starts_with?(s, "__") do
x
else
_ ->
env.(x)
end
end
end
end
|
lib/evaluate/evaluate.ex
| 0.699973
| 0.436262
|
evaluate.ex
|
starcoder
|
defmodule Weather.Weather do
@moduledoc """
Handle fetching of weather forecast data from the Forecast.io web service.
"""
alias HTTPotion.Response
alias Weather.Weather
@user_agent Application.get_env :weather, :user_agent
@forecast_api_url Application.get_env :weather, :forecast_io_web_service_url
@forecast_io_api_key Application.get_env :weather, :forecast_io_api_key
defmodule Current do
defstruct summary: "", temperature: nil, humidity: nil
end
defmodule Forecast do
defstruct summary: "", date: nil,
max_temperature: nil, min_temperature: nil, humidity: nil
end
@doc """
Expects the latitude and longitude of the location that we want to use to
query the web service.
Returns {:ok, %Weather.Forecast} or {:error, error_message}
"""
def fetch(latitude, longitude) do
fetch_from_web_service(latitude, longitude)
|> parse_weather
end
defp fetch_from_web_service(latitude, longitude) do
response = location_url(latitude, longitude)
|> URI.encode
|> HTTPotion.get @user_agent
return_code = if Response.success?(response), do: :ok, else: :error
{return_code, response.body}
end
defp location_url(latitude, longitude) do
"#{@forecast_api_url}#{@forecast_io_api_key}/" <>
"#{latitude},#{longitude}/?units=si&lang=en"
end
defp parse_weather({:ok, body}) do
discard_key = fn {_, list} -> list end
data = Jsonex.decode body
current_summary = data
|> List.keyfind("currently", 0, nil)
|> discard_key.()
|> List.keyfind("summary", 0, nil)
|> discard_key.()
current_data = data
|> List.keyfind("currently", 0, nil)
|> discard_key.()
current_temperature = current_data
|> List.keyfind("temperature", 0, nil)
|> discard_key.()
current_humidity = current_data
|> List.keyfind("humidity", 0, nil)
|> discard_key.()
current_weather = %Weather.Current{
summary: current_summary,
temperature: current_temperature,
humidity: current_humidity
}
forecast_data = data
|> List.keyfind("daily", 0, nil)
|> discard_key.()
forecast_summary = forecast_data
|> List.keyfind("summary", 0, nil)
|> discard_key.()
parse_forecast = fn day_data ->
day_summary = day_data
|> List.keyfind("summary", 0, nil)
|> discard_key.()
|> String.rstrip(?.)
day_timestamp = day_data
|> List.keyfind("time", 0, nil)
|> discard_key.()
base_date = :calendar.datetime_to_gregorian_seconds(
{{1970, 1, 1}, {0, 0, 0}})
{day_date, _time} = :calendar.gregorian_seconds_to_datetime(
base_date + day_timestamp)
day_max_temperature = day_data
|> List.keyfind("temperatureMax", 0, nil)
|> discard_key.()
day_min_temperature = day_data
|> List.keyfind("temperatureMin", 0, nil)
|> discard_key.()
day_humidity = day_data
|> List.keyfind("humidity", 0, nil)
|> discard_key.()
%Weather.Forecast{
summary: day_summary,
date: day_date,
max_temperature: day_max_temperature,
min_temperature: day_min_temperature,
humidity: day_humidity
}
end
forecast = forecast_data
|> List.keyfind("data", 0, nil)
|> discard_key.()
|> Enum.drop(1)
|> Enum.map(&(parse_forecast.(&1)))
{:ok, {current_weather, forecast}}
end
defp parse_forecast({:error, _}) do
{:error, "Unable to fetch weather forecast"}
end
end
|
lib/weather/weather.ex
| 0.876291
| 0.48121
|
weather.ex
|
starcoder
|
[1, x, 4, y] = [1, 2, 4, 8]
IO.puts x
IO.puts y
insert = [2, 4, 8]
full = [1, insert, 16, 32]
IO.inspect full
neat = List.flatten(full)
IO.inspect neat
a = [1, 2, 4]
b = [8, 16, 32]
IO.inspect [a, b] # [[1, 2, 4], [8, 16, 32]]
IO.inspect Enum.concat(a, b) # [1, 2, 4, 8, 16, 32]
IO.inspect a ++ b # [1, 2, 4, 8, 16, 32]
c = [64, 128, 256]
IO.inspect Enum.concat([c,b,a]) # [64, 128, 256, 8, 16, 32, 1, 2, 4]
# Elixir takes a different approach, letting you process the first item in a list, the head, while extracting the rest of the list, the tail, so that you can pass it to another call recursively.
list = [1, 2, 4]
[h1 | t1] = list
IO.inspect h1 # 1
IO.inspect t1 # [2, 4]
[h2 | t2] = t1
IO.inspect h2 # 2
IO.inspect t2 # [4]
[h3 | t3] = t2
IO.inspect h3 # 4
IO.inspect t3 # []
# [h4 | t4] = t3 # ** (MatchError) no match of right hand side value: []
IO.inspect Enum.reverse c # [256, 128, 64]
IO.inspect [1 | [2, 3]] # [1, 2, 3]
IO.inspect [1, 2 | [3]] # [1, 2, 3]
IO.inspect [1, 2 | 3] # [1, 2 | 3] # It not list normal
atomic_info = [{:hydrogen, :H, 1.008}, {:carbon, :C, 12.011}, {:sodium, Na, 22.99}]
IO.inspect List.keyfind(atomic_info, :H, 1) # {:hydrogen, :H, 1.008}
IO.inspect List.keyfind(atomic_info, :carbon, 0) # {:carbon, :C, 12.011}
IO.inspect List.keyfind(atomic_info, :F, 1) # nil
IO.inspect List.keyfind(atomic_info, :fluorine, 0, {}) # {}
IO.inspect List.keymember?(atomic_info, :Na, 1) # true
IO.inspect List.keymember?(atomic_info, :boron, 0) # false
atomic_info2 = List.keystore(atomic_info, :boron, 0,{:boron, :B, 10.081})
IO.inspect atomic_info3 = List.keyreplace(atomic_info2, :B, 1, {:boron, :B, 10.81}) # [{:hydrogen, :H, 1008}, {:carbon, :C, 12.011}, {:sodium, Na, 22.99}, {:boron, :B, 10.81}]
IO.inspect atomic_info4 = List.keydelete(atomic_info3, :fluorine, 0) #[{:hydrogen, :H, 1008}, {:carbon, :C, 12.011}, {:sodium, Na, 22.99}, {:boron, :B, 10.81}]
IO.inspect atomic_info5 = List.keydelete(atomic_info3, :carbon, 0) # [{:hydrogen, :H, 1008}, {:sodium, Na, 22.99}, {:boron, :B, 10.81}]
atomic_info3 = List.keyreplace(atomic_info2, :B, 1, {:zinc, :Zn, 65.38})
planemo_hash = Enum.into([earth: 9.8, moon: 1.6, mars: 3.71], HashDict.new())
# return: #HashDict<[earth: 9.8, mars: 3.71, moon: 1.6]>
IO.inspect HashDict.has_key?(planemo_hash, :moon) # true
IO.inspect HashDict.has_key?(planemo_hash, :jupiter) # false
IO.inspect HashDict.get(planemo_hash, :jupiter) # nil
IO.inspect HashDict.get(planemo_hash, :jupiter, 0) # 0
planemo_hash2 = HashDict.put_new(planemo_hash, :jupiter, 99.9)
#HashDict<[moon: 1.6, mars: 3.71, jupiter: 99.9, earth: 9.8]>
planemo_hash3 = HashDict.put_new(planemo_hash2, :jupiter, 23.1)
#HashDict<[moon: 1.6, mars: 3.71, jupiter: 99.9, earth: 9.8]>
planemo_hash4 = HashDict.put(planemo_hash3, :jupiter, 23.1)
#HashDict<[moon: 1.6, mars: 3.71, jupiter: 23.1, earth: 9.8]>
planemo_hash5 = HashDict.delete(planemo_hash4,:saturn)
#HashDict<[moon: 1.6, mars: 3.71, jupiter: 23.1, earth: 9.8]>
planemo_hash6 = HashDict.delete(planemo_hash4, :jupiter)
#HashDict<[moon: 1.6, mars: 3.71, earth: 9.8]>
defmodule Overall do
# Overall.product('funny')
# Overall.product([1,2,3,4])
# Overall.product([],2)
def product([]) do
0
end
def product(list) do
product(list, 1)
end
def product([], accumulated_product) do
accumulated_product
end
def product([head | tail], accumulated_product) do
product(tail, head * accumulated_product)
end
end
defmodule ListDrop do
@moduledoc """
ListDrop.falls([{:earth, 20}, {:moon, 20}, {:mars, 20}]))
"""
def falls(list) do
falls(list, [])
end
defp falls([], results) do
# results
Enum.reverse(results)
end
defp falls([head|tail], results) do
falls(tail, [Drop.fall_velocity(head) | results])
end
end
|
other/lists.ex
| 0.538012
| 0.668352
|
lists.ex
|
starcoder
|
defmodule Membrane.Core.Element.DemandController do
@moduledoc false
# Module handling demands incoming through output pads.
use Bunch
alias Membrane.Core.CallbackHandler
alias Membrane.Core.Child.PadModel
alias Membrane.Core.Element.{ActionHandler, State}
alias Membrane.Element.CallbackContext
alias Membrane.Pad
require Membrane.Core.Child.PadModel
require Membrane.Logger
@doc """
Handles demand coming on a output pad. Updates demand value and executes `handle_demand` callback.
"""
@spec handle_demand(Pad.ref_t(), non_neg_integer, State.t()) ::
State.stateful_try_t()
def handle_demand(pad_ref, size, state) do
if ignore?(pad_ref, state) do
{:ok, state}
else
do_handle_demand(pad_ref, size, state)
end
end
@spec ignore?(Pad.ref_t(), State.t()) :: boolean()
defp ignore?(pad_ref, state), do: state.pads.data[pad_ref].mode == :push
@spec do_handle_demand(Pad.ref_t(), non_neg_integer, State.t()) ::
State.stateful_try_t()
defp do_handle_demand(pad_ref, size, state) do
PadModel.assert_data(state, pad_ref, %{direction: :output})
{total_size, state} =
state
|> PadModel.get_and_update_data!(pad_ref, :demand, fn demand ->
(demand + size) ~> {&1, &1}
end)
if exec_handle_demand?(pad_ref, state) do
%{other_demand_unit: unit} = PadModel.get_data!(state, pad_ref)
require CallbackContext.Demand
context = &CallbackContext.Demand.from_state(&1, incoming_demand: size)
CallbackHandler.exec_and_handle_callback(
:handle_demand,
ActionHandler,
%{split_continuation_arbiter: &exec_handle_demand?(pad_ref, &1), context: context},
[pad_ref, total_size, unit],
state
)
else
{:ok, state}
end
end
@spec exec_handle_demand?(Pad.ref_t(), State.t()) :: boolean
defp exec_handle_demand?(pad_ref, state) do
case PadModel.get_data!(state, pad_ref) do
%{end_of_stream?: true} ->
Membrane.Logger.debug_verbose("""
Demand controller: not executing handle_demand as :end_of_stream action has already been returned
""")
false
%{demand: demand} when demand <= 0 ->
Membrane.Logger.debug_verbose("""
Demand controller: not executing handle_demand as demand is not greater than 0,
demand: #{inspect(demand)}
""")
false
_pad_data ->
true
end
end
end
|
lib/membrane/core/element/demand_controller.ex
| 0.777046
| 0.438905
|
demand_controller.ex
|
starcoder
|
defmodule Membrane.ICE.Handshake do
@moduledoc """
Behaviour that specifies functions that have to be implemented in order to perform handshake
after establishing ICE connection.
One instance of this module is responsible for performing handshake only for one component.
"""
@type t :: module
@typedoc """
It is any type that user want it to be passed to other functions of this behaviour.
"""
@type state :: term()
@typedoc """
Notification sent to pipeline after executing `init/1` function on handshake module
"""
@type init_notification ::
{:handshake_init_data, component_id :: non_neg_integer(), init_data :: any()}
@doc """
Called only once at Sink/Source preparation.
`opts` - options specified in `handshake_opts` option in Sink/Source
`init_data` - any data that will be fired as a notification to pipeline. Notification
will be of type `t:init_notification/0`
`state` - state that will be passed to other functions
Returning by a peer `:finished` will mark handshake as finished and none of the remaining
functions will be invoked for this peer.
"""
@callback init(opts :: list()) ::
{:ok, init_data :: any(), state()}
| {:finished, init_data :: any()}
@doc """
Called only once when component changes state to READY i.e. it is able to receive and send data.
It is a good place to start your handshake. In case of one host don't need to do anything
and only waits for initialization from its peer it can return `ok` message.
Meaning of the rest return values is the same as in `recv_from_peer/2`.
"""
@callback connection_ready(state :: state()) ::
:ok
| {:ok, packets :: binary()}
| {:finished, handshake_data :: term(), packets :: binary()}
| {:finished, handshake_data :: term()}
@doc """
Called each time remote data arrives.
Message `:ok` should be returned when peer processed incoming data without generating a new one.
Message `{:ok, packets}` should be returned when peer processed incoming data and generated
a new one.
If packets cannot be immediately sent (because ICE is not ready yet) they will be cached and
sent as soon as it is possible (i.e. when ICE is ready).
Message `{:finished_with_packets, handshake_data, packets}` should be return by a peer that ends
its handshake first but it generates also some final packets so that the second peer can end its
handshake too.
Packets returned both in `{:finished_with_packets, handshake_data, packets}` and
`{:finished, handshake_data term()}` messages will be automatically sent to the peer using ICE
connection.
`handshake_data` is any data user want to return after finishing handshake.
"""
@callback recv_from_peer(state :: state(), data :: binary()) ::
:ok
| {:ok, packets :: binary()}
| {:finished, handshake_data :: term(), packets :: binary()}
| {:finished, handshake_data :: term()}
end
|
lib/membrane_ice_plugin/handshake/handshake.ex
| 0.871844
| 0.439086
|
handshake.ex
|
starcoder
|
defmodule Zippy.ZBinTree do
@moduledoc """
This module implements Zipper binary trees, that allow you to traverse them in two directions.
This module is a port of <NAME>'s [“Zippers”](https://github.com/ferd/zippers) library, under the MIT licence.
"""
alias __MODULE__
## Type declarations
@typep node(a) :: nil
| {:fork, a, left::node(a), right::node(a)}
@typep choice(a) :: {:left, a, node(a)}
| {:right, a, node(a)}
@typep thread(a) :: [choice(a)]
@typedoc "A Zipper binary tree"
@type t() :: {thread(any()), node(any())}
## Functions
@doc "Create a new basic binary tree. It should be declared first when declaring the data structure."
@spec root(term()) :: ZBinTree.t
def root(a) do
{[], {:fork, a, nil, nil}}
end
@doc "Checks if a node is a leaf, that is to say if it has no child."
@spec leaf?(ZBinTree.t) :: boolean()
def leaf?({_thread, {:fork, _, nil, nil}}), do: true
def leaf?({_thread, {:fork, _, _, _}}), do: false
@doc "Returns the current element in the binary tree in a tuple."
@spec current(ZBintTree.t) :: {:ok, term()} | {:error, nil}
def current({_thread, {:fork, value, _left, _right}}), do: {:ok, value}
def current({_thread, nil}), do: {:error, nil}
@doc "Replaces the current element in the tree (if it exists) or create a new node (if it doesn't)."
@spec replace(ZBinTree.t, term()) :: ZBinTree.t
def replace(value, {thread, nil}), do: {thread, {:fork, value, nil, nil}}
def replace(value, {thread, {:fork, _oldvalue, left, right}}), do: {thread, {:fork, value, left, right}}
@doc "Goes down the tree, with the `current` element being the `right` child."
@spec right(ZBinTree.t) :: ZBinTree.t | nil
def right({thread, {:fork, value, left, right}}), do: {[{:right, value, left}|thread], right}
def right({_thread, nil}), do: nil
@doc "Goes down the tree, with the `current` element being the `left` child, or returns `nil` if there is no child"
@spec left(ZBinTree.t) :: ZBinTree.t | nil
def left({thread, {:fork, value, left, right}}), do: {[{:left, value, right}|thread], left}
def left({_thread, nil}), do: nil
@doc "Goes up the tree, or returns `nil` if we're already at the top of the tree."
@spec up(ZBinTree.t) :: ZBinTree | nil
def up({[{:left, value, right}|thread], left}), do: {thread, {:fork, value, left, right}}
def up({[{:right, value, left} |thread], right}), do: {thread, {:fork, value, left, right}}
def up({[], _tree}), do: nil
@doc "Adds a left child to the tree"
@spec add_left(ZBinTree.t, term()) :: ZBinTree.t
def add_left(new_branch, zipper) do
new_branch
|> replace(left(zipper))
|> up
end
@spec add_right(ZBinTree.t, term()) :: ZBinTree.t
def add_right(new_branch, zipper) do
new_branch
|> replace(right(zipper))
|> up
end
end
|
lib/zippy/ZBinTree.ex
| 0.889325
| 0.578865
|
ZBinTree.ex
|
starcoder
|
defmodule Calcinator.Resources do
@moduledoc """
A module that exposes Ecto schema structs
"""
alias Alembic.{Document, Error, Source}
alias Calcinator.Resources.{Page, Sorts}
# Types
@typedoc """
ID that uniquely identifies the `struct`
"""
@type id :: term
@typedoc """
Pagination information returned from the backing store.
"""
@type pagination :: map
@type params :: map
@typedoc """
* `:associations` - associations to load in the `struct`
* `:filters` - filters on the result
* `:meta` - meta data that is traded back and forth between clients and servers that don't fit into JSONAPI, such as
`Calcinator.Meta.Beam`.
* `:page` - the page used for pagination. `nil` implies no pagination, not default pagination.
* `:sorts` - the directions to sort fields on the primary resource or its associations
"""
@type query_options :: %{
optional(:associations) => atom | [atom],
optional(:filters) => %{
String.t() => String.t()
},
optional(:meta) => %{
String.t() => Alembic.json()
},
optional(:page) => Page.t() | nil,
optional(:sorts) => Sorts.t()
}
@type sandbox_access_token :: %{required(:owner) => term, optional(atom) => any}
@typedoc """
A module that implements the `Resources` behaviour
"""
@type t :: module
# Callbacks
@doc """
Allows access to sandbox for testing
"""
@callback allow_sandbox_access(sandbox_access_token) :: :ok | {:error, :sandbox_access_disallowed}
@doc """
Changeset for creating a struct from the `params`
## Returns
* `{:ok, Ecto.Changeset.t}` - a changeset was created with no error while trying to access the backing store
* `{:error, :ownership}` - connection to backing store was not owned by the calling process when associations were
preloaded into the data for the changeset. `many_to_many` associations require the associations to be preloaded
into the `Ecto.Changeset.t` `data` before calling `Ecto.Changeset.put_assoc/3`.
"""
@callback changeset(params) :: {:ok, Ecto.Changeset.t()} | {:error, :ownership}
@doc """
* Changeset for updating `resource` with `params`.
* Changeset for deleting `resource` (`params` will be an empty map)
## Returns
* `{:ok, Ecto.Changeset.t}` - a changeset was created with no error while trying to access the backing store
* `{:error, :ownership}` - connection to backing store was not owned by the calling process when associations were
preloaded into the data for the changeset. `many_to_many` associations require the associations to be preloaded
into the `Ecto.Changeset.t` `data` before calling `Ecto.Changeset.put_assoc/3`.
"""
@callback changeset(resource :: Ecto.Schema.t(), params) :: {:ok, Ecto.Changeset.t()} | {:error, :ownership}
@doc """
Deletes a single struct in a `changeset`
## Returns
* `{:ok, struct}` - the delete succeeded and the returned struct is the state before delete
* `{:error, :ownership}` - connection to backing store was not owned by the calling process
* `{:error, :timeout}` - timeout occured while deleting `struct` from backing store.
* `{:error, Ecto.Changeset.t}` - errors while deleting the `changeset`. `Ecto.Changeset.t` `errors` contains
errors. These will normally be constraint errors or only those validations that can occur in `prepare_changes`
callbacks that require `Ecto.Changeset.t` `action` and or `repo` to be set.
"""
@callback delete(changeset :: Ecto.Changeset.t(), query_options) ::
{:ok, struct} | {:error, :ownership} | {:error, :timeout} | {:error, Ecto.Changeset.t()}
@doc """
Gets a single `struct`
## Returns
* `{:ok, struct}` - `id` was found.
* `{:error, :not_found}` - `id` was not found.
* `{:error, :ownership}` - connection to backing store was not owned by the calling process
* `{:error, :timeout}` - timeout occured while getting `id` from backing store.
* `{:error, reason}` - an error occurred with the backing store for `reason` that is backing store specific.
"""
@callback get(id, query_options) ::
{:ok, struct}
| {:error, :not_found}
| {:error, :ownership}
| {:error, :timeout}
| {:error, reason :: term}
@doc """
## `insert(changeset, query_options)`
Inserts `changeset` into a single new `struct`.
### Returns
* `{:ok, struct}` - `changeset` was inserted into `struct`
* `{:error, :ownership}` - connection to backing store was not owned by the calling process
* `{:error, :timeout}` - timeout occured while inserting `changeset` into backing store.
* `{:error, Ecto.Changeset.t}` - insert failed. `Ecto.Changeset.t` `errors` contain errors.
## `insert(params, query_options)`
Inserts `params` into a single new `struct`.
### Returns
* `{:ok, struct}` - `params` were inserted into `struct`
* `{:error, :ownership}` - connection to backing store was not owned by the calling process
* `{:error, :timeout}` - timeout occured while inserting `params` into backing store.
* `{:error, Ecto.Changeset.t}` - insert failed. `Ecto.Changeset.t` `errors` contain errors.
"""
@callback insert(changeset :: Ecto.Changeset.t() | params, query_options) ::
{:ok, struct} | {:error, :ownership} | {:error, :timeout} | {:error, Ecto.Changeset.t()}
@doc """
Gets a list of `struct`s.
## Returns
* `{:ok, [resource], nil}` - all resources matching query
* `{:ok, [resource], pagination}` - page of resources matching query
* `{:error, :ownership}` - connection to backing store was not owned by the calling process
* `{:error, :timeout}` - timeout occured while getting resources from backing store.
* `{:error, reason}` - an error occurred with the backing store for `reason` that is backing store specific.
"""
@callback list(query_options) ::
{:ok, [struct], pagination | nil}
| {:error, :ownership}
| {:error, :timeout}
| {:error, reason :: term}
@doc """
# Returns
* `true` - if `allow_sandbox_access/1` should be called before any of the query methods are called
* `false` - otherwise
"""
@callback sandboxed?() :: boolean
@doc """
Applies updates in `changeset`
# Returns
* `{:ok, struct}` - the update succeeded and the returned `struct` contains the updates
* `{:error, :ownership}` - connection to backing store was not owned by the calling process
* `{:error, :timeout}` - timeout occured while updating `changeset` with `params` in backing store.
* `{:error, Ecto.Changeset.t}` - errors while updating `struct` with `params`. `Ecto.Changeset.t` `errors` contains
errors.
"""
@callback update(changeset :: Ecto.Changeset.t(), query_options) ::
{:ok, struct} | {:error, :ownership} | {:error, :timeout} | {:error, Ecto.Changeset.t()}
@doc """
Updates `struct`
# Returns
* `{:ok, struct}` - the update succeeded and the returned `struct` contains the updates
* `{:error, Ecto.Changeset.t}` - errors while updating `struct` with `params`. `Ecto.Changeset.t` `errors` contains
errors.
* `{:error, :bad_gateway}` - error in backing store that cannot be represented as another type of error
* `{:error, :not_found}` - the resource in the changeset was not found and so cannot be updated. This may mean that
the resource was deleted with `delete/1` after the `get/2` or `list/1` returned.
* `{:error, :ownership}` - connection to backing store was not owned by the calling process
* `{:error, :timeout}` - timeout occured while updating `resource` with `params` in backing store.
"""
@callback update(resource :: Ecto.Schema.t(), params, query_options) ::
{:ok, struct}
| {:error, Ecto.Changeset.t()}
| {:error, :bad_gateway}
| {:error, :not_found}
| {:error, :ownership}
| {:error, :timeout}
# Functions
@doc """
Converts the attribute to a field if a corresponding field exists in `ecto_schema_module`
If a field exists, then it is returned. This includes fields with `_` that have `-` in their attribute name and
virtual fields.
iex> Calcinator.Resources.attribute_to_field("name", Calcinator.Resources.TestAuthor)
{:ok, :name}
iex> Calcinator.Resources.attribute_to_field("password-confirmation", Calcinator.Resources.TestAuthor)
{:ok, :password_confirmation}
Invalid field names will return an error
iex> Calcinator.Resources.attribute_to_field("password-hash", Calcinator.Resources.TestAuthor)
{:error, "password-hash"}
Associations are not fields, so they will return an error
iex> Calcinator.Resources.attribute_to_field("author", Calcinator.Resources.TestPost)
{:error, "author"}
## Returns
* `{:ok, field}` - `attribute` with `-` has the corresponding `field` with `_` in `ecto_schema_module`
* `{:error, attribute}` - `attribute` does not have corresponding field in `ecto_schema_module`
"""
def attribute_to_field(attribute, ecto_schema_module) when is_binary(attribute) and is_atom(ecto_schema_module) do
field_string = String.replace(attribute, "-", "_")
for(
potential_field <- fields(ecto_schema_module),
potential_field_string = to_string(potential_field),
potential_field_string == field_string,
do: potential_field
)
|> case do
[field] ->
{:ok, field}
[] ->
{:error, attribute}
end
end
@doc """
JSONAPI filter values that allow multiple values, similar to includes, are comma separated without spaces, instead of
having to do `String.split(comma_separated_values, ",")` in all filters that accept multiple values, this function can
be used.
iex> Calcinator.Resources.split_filter_value("1,2,3")
["1", "2", "3"]
"""
@spec split_filter_value(String.t()) :: [String.t()]
def split_filter_value(comma_separated), do: String.split(comma_separated, ",")
@doc """
Error when a filter `name` is not supported by the callback module.
iex> Calcinator.Resources.unknown_filter("name")
%Alembic.Document{
errors: [
%Alembic.Error{
detail: "Unknown name filter",
source: %Alembic.Source{
pointer: "/filter/name"
},
status: "422",
title: "Unknown Filter"
}
]
}
"""
@spec unknown_filter(name :: String.t()) :: Document.t()
def unknown_filter(name) do
%Document{
errors: [
%Error{
detail: "Unknown #{name} filter",
title: "Unknown Filter",
source: %Source{
pointer: "/filter/#{name}"
},
status: "422"
}
]
}
end
## Private Functions
# Returns both fields and virtual fields
defp fields(ecto_schema_module) do
associations = ecto_schema_module.__schema__(:associations)
# ecto_schema_module.__schema__(:fields) does not include virtual fields, so
# deduce real and virtual fields from struct keys
keys =
ecto_schema_module.__struct__()
|> Map.keys()
keys -- [:__meta__, :__struct__ | associations]
end
end
|
lib/calcinator/resources.ex
| 0.921065
| 0.449091
|
resources.ex
|
starcoder
|
defmodule Sanbase.Signal.SignalAdapter do
@behaviour Sanbase.Signal.Behaviour
import Sanbase.Signal.SqlQuery
import Sanbase.Utils.Transform, only: [maybe_unwrap_ok_value: 1, maybe_apply_function: 2]
alias Sanbase.Signal.FileHandler
alias Sanbase.ClickhouseRepo
@aggregations FileHandler.aggregations()
@aggregation_map FileHandler.aggregation_map()
@signals_mapset FileHandler.signals_mapset()
@min_interval_map FileHandler.min_interval_map()
@signals @signals_mapset |> Enum.to_list()
@data_type_map FileHandler.data_type_map()
@selectors_map FileHandler.selectors_map()
@human_readable_name_map FileHandler.human_readable_name_map()
@signal_to_name_map FileHandler.signal_to_name_map()
@access_map FileHandler.access_map()
@min_plan_map FileHandler.min_plan_map()
@free_signals FileHandler.signals_with_access(:free)
@restricted_signals FileHandler.signals_with_access(:restricted)
@timeseries_signals FileHandler.signals_with_data_type(:timeseries)
def has_signal?(signal) do
case signal in @signals_mapset do
true -> true
false -> signal_not_available_error(signal)
end
end
def available_aggregations(), do: @aggregations
def human_readable_name(signal) do
case Map.get(@human_readable_name_map, signal) do
nil ->
signal_not_available_error(signal)
human_readable_name ->
human_readable_name
end
end
def free_signals(), do: @free_signals
def restricted_signals(), do: @restricted_signals
def available_timeseries_signals(), do: @timeseries_signals
def access_map() do
@access_map
|> Enum.into(%{}, fn {signal, restrictions} ->
{signal, resolve_restrictions(restrictions)}
end)
end
def min_plan_map(), do: @min_plan_map
@impl Sanbase.Signal.Behaviour
def available_signals(), do: @signals
@impl Sanbase.Signal.Behaviour
def available_signals(%{slug: slug}) when is_binary(slug) do
{query, args} = available_signals_query(slug)
ClickhouseRepo.query_transform(query, args, fn [signal] ->
Map.get(@signal_to_name_map, signal)
end)
|> maybe_apply_function(fn list -> Enum.reject(list, &is_nil/1) end)
end
@impl Sanbase.Signal.Behaviour
def available_slugs(signal) do
{query, args} = available_slugs_query(signal)
ClickhouseRepo.query_transform(query, args, fn [slug] -> slug end)
end
@impl Sanbase.Signal.Behaviour
def metadata(signal) do
{:ok,
%{
signal: signal,
min_interval: Map.get(@min_interval_map, signal),
default_aggregation: Map.get(@aggregation_map, signal),
available_aggregations: @aggregations,
available_selectors: Map.get(@selectors_map, signal),
data_type: Map.get(@data_type_map, signal),
complexity_weight: 0.3
}}
end
@impl Sanbase.Signal.Behaviour
def first_datetime(signal, %{slug: slug}) when is_binary(slug) do
{query, args} = first_datetime_query(signal, slug)
ClickhouseRepo.query_transform(query, args, fn [datetime] ->
DateTime.from_unix!(datetime)
end)
|> maybe_unwrap_ok_value()
end
@impl Sanbase.Signal.Behaviour
def raw_data(signals, selector, from, to) do
{query, args} = raw_data_query(signals, from, to)
ClickhouseRepo.query_transform(query, args, fn [unix, signal, slug, value, metadata] ->
metadata =
case Jason.decode(metadata) do
{:ok, value} -> value
_ -> %{}
end
%{
datetime: DateTime.from_unix!(unix),
signal: Map.get(@signal_to_name_map, signal),
slug: slug,
value: value,
metadata: metadata
}
end)
|> maybe_apply_function(fn list -> Enum.filter(list, & &1.signal) end)
|> maybe_apply_function(fn list -> filter_slugs_by_selector(list, selector) end)
end
@impl Sanbase.Signal.Behaviour
def timeseries_data(_signal, %{slug: []}, _from, _to, _interval, _opts), do: {:ok, []}
def timeseries_data(signal, %{slug: slug_or_slugs}, from, to, interval, opts)
when is_binary(slug_or_slugs) or is_list(slug_or_slugs) do
aggregation = Keyword.get(opts, :aggregation, nil) || Map.get(@aggregation_map, signal)
slugs = slug_or_slugs |> List.wrap()
{query, args} = timeseries_data_query(signal, slugs, from, to, interval, aggregation)
ClickhouseRepo.query_transform(query, args, fn [unix, value, metadata] ->
metadata =
metadata
|> List.wrap()
|> Enum.map(&Jason.decode/1)
|> Enum.reduce_while([], fn
{:ok, value}, acc -> {:cont, [value | acc]}
_, _ -> {:halt, []}
end)
|> Enum.reverse()
%{
datetime: DateTime.from_unix!(unix),
value: value,
metadata: metadata
}
end)
end
@impl Sanbase.Signal.Behaviour
def aggregated_timeseries_data(signal, selector, from, to, opts)
def aggregated_timeseries_data(_signal, nil, _from, _to, _opts), do: {:ok, []}
def aggregated_timeseries_data(_signal, [], _from, _to, _opts), do: {:ok, []}
def aggregated_timeseries_data(signal, %{slug: slug_or_slugs}, from, to, opts)
when is_binary(slug_or_slugs) or is_list(slug_or_slugs) do
aggregation = Keyword.get(opts, :aggregation, nil) || Map.get(@aggregation_map, signal)
slugs = slug_or_slugs |> List.wrap()
{query, args} = aggregated_timeseries_data_query(signal, slugs, from, to, aggregation)
ClickhouseRepo.query_reduce(query, args, %{}, fn [slug, value], acc ->
Map.put(acc, slug, value)
end)
end
# Private functions
defp signal_not_available_error(signal) do
%{close: close, error_msg: error_msg} = signal_not_available_error_details(signal)
case close do
nil -> {:error, error_msg}
close -> {:error, error_msg <> " Did you mean '#{close}'?"}
end
end
defp signal_not_available_error_details(signal) do
%{
close: Enum.find(@signals_mapset, &(String.jaro_distance(signal, &1) > 0.8)),
error_msg: "The signal '#{signal}' is not supported or is mistyped."
}
end
defp filter_slugs_by_selector(list, :all), do: list
defp filter_slugs_by_selector(list, %{slug: slug_or_slugs}) do
slugs = List.wrap(slug_or_slugs)
Enum.filter(list, &(&1.slug in slugs))
end
# In case more signal adapters are added, functions such as this
# should be packed in a Helper module to be used on all the signal data.
# The same way the metric modules are structured
defp resolve_restrictions(restrictions) when is_map(restrictions) do
Enum.into(restrictions, %{}, fn {k, v} -> {k, String.to_existing_atom(v)} end)
end
defp resolve_restrictions(restriction) when restriction in [:restricted, :free] do
%{"historical" => restriction, "realtime" => restriction}
end
end
|
lib/sanbase/signal/signal_adapter.ex
| 0.662251
| 0.472562
|
signal_adapter.ex
|
starcoder
|
defmodule PhoenixPresenceList do
@moduledoc """
Documentation for PhoenixPresenceList.
"""
@doc """
Sync the given presence list using a diff of presence join and leave events.
Returns the updated presence list. In case information on leaves and joins
is needed, have a look at `apply_diff/2`.
## Examples
iex> state = %{}
iex> diff = %{joins: %{"foo" => %{metas: [%{phx_ref: "dWIi5WZTuJg="}]}}, leaves: %{}}
iex> state = PhoenixPresenceList.sync_diff(state, diff)
%{"foo" => %{metas: [%{phx_ref: "dWIi5WZTuJg="}]}}
iex> diff = %{joins: %{"foo" => %{metas: [%{phx_ref: "U9NnWWscQRU="}]}}, leaves: %{}}
iex> state = PhoenixPresenceList.sync_diff(state, diff)
%{"foo" => %{metas: [%{phx_ref: "dWIi5WZTuJg="}, %{phx_ref: "U9NnWWscQRU="}]}}
iex> diff = %{joins: %{}, leaves: %{"foo" => %{metas: [%{phx_ref: "U9NnWWscQRU="}]}}}
iex> PhoenixPresenceList.sync_diff(state, diff)
%{"foo" => %{metas: [%{phx_ref: "dWIi5WZTuJg="}]}}
"""
def sync_diff(state, diff) do
state
|> apply_diff(diff)
|> elem(0)
end
@doc """
Sync the given presence list using a new presence list.
Returns the new presence list. In case information on leaves and joins is
needed, have a look at `apply_state/2`.
"""
def sync_state(_state, new_state) do
new_state
end
@doc """
Apply a presence list to another presence list.
Returns a tuple containing the new presence list along with two lists
containing information about the joins and leaves that took place.
## Examples
iex> state = %{}
iex> new_state = %{"foo" => %{metas: [%{phx_ref: "dWIi5WZTuJg="}]}}
iex> PhoenixPresenceList.apply_state(state, new_state)
{%{"foo" => %{metas: [%{phx_ref: "dWIi5WZTuJg="}]}},
[{"foo", nil, %{metas: [%{phx_ref: "dWIi5WZTuJg="}]}}], []}
"""
def apply_state(state, new_state) do
leaves =
Enum.reduce(state, %{}, fn {key, presence}, leaves ->
case new_state[key] do
nil -> Map.put(leaves, key, presence)
_ -> leaves
end
end)
{joins, leaves} =
Enum.reduce(new_state, {%{}, leaves}, fn {key, new_presence}, {joins, leaves} ->
case state[key] do
nil ->
joins = Map.put(joins, key, new_presence)
{joins, leaves}
current_presence ->
new_refs = Enum.map(new_presence.metas, & &1.phx_ref)
cur_refs = Enum.map(current_presence.metas, & &1.phx_ref)
joined_metas = Enum.filter(new_presence.metas, &(&1.phx_ref not in cur_refs))
left_metas = Enum.filter(current_presence.metas, &(&1.phx_ref not in new_refs))
joins =
case joined_metas do
[] -> joins
joined_metas -> Map.put(joins, key, %{new_presence | metas: joined_metas})
end
leaves =
case left_metas do
[] -> leaves
left_metas -> Map.put(joins, key, %{current_presence | metas: left_metas})
end
{joins, leaves}
end
end)
apply_diff(state, %{joins: joins, leaves: leaves})
end
@doc """
Apply the given joins and leaves diff to the given presence list.
Returns a tuple containing the updated presence list as well as two lists
with information about the joins and leaves that took place.
## Examples
iex> state = %{}
iex> diff = %{joins: %{"foo" => %{metas: [%{phx_ref: "dWIi5WZTuJg="}]}}, leaves: %{}}
iex> PhoenixPresenceList.apply_diff(state, diff)
{%{"foo" => %{metas: [%{phx_ref: "dWIi5WZTuJg="}]}},
[{"foo", nil, %{metas: [%{phx_ref: "dWIi5WZTuJg="}]}}], []}
"""
def apply_diff(state, %{joins: joins, leaves: leaves}) do
{state, joined} = apply_diff_joins(state, joins)
{state, left} = apply_diff_leaves(state, leaves)
{state, joined, left}
end
defp apply_diff_joins(state, joins) do
{state, joined} =
Enum.reduce(joins, {state, []}, fn {key, new_presence}, {state, joined} ->
current_presence = state[key]
state =
Map.update(state, key, new_presence, fn current_presence ->
joined_refs = Enum.map(new_presence.metas, & &1.phx_ref)
current_metas = Enum.filter(current_presence.metas, &(&1.phx_ref not in joined_refs))
%{new_presence | metas: current_metas ++ new_presence.metas}
end)
updated_presence = state[key]
join_info = {key, current_presence, updated_presence}
{state, [join_info | joined]}
end)
{state, Enum.reverse(joined)}
end
defp apply_diff_leaves(state, leaves) do
{state, left} =
Enum.reduce(leaves, {state, []}, fn {key, left_presence}, {state, left} ->
case state[key] do
nil ->
{state, left}
current_presence ->
refs_to_remove = Enum.map(left_presence.metas, & &1.phx_ref)
filtered_metas =
Enum.filter(current_presence.metas, &(&1.phx_ref not in refs_to_remove))
updated_presence = %{current_presence | metas: filtered_metas}
leave_info = {key, updated_presence, left_presence}
state =
case filtered_metas do
[] -> Map.delete(state, key)
_ -> Map.put(state, key, updated_presence)
end
{state, [leave_info | left]}
end
end)
{state, Enum.reverse(left)}
end
end
|
lib/phoenix_presence_list.ex
| 0.85555
| 0.454593
|
phoenix_presence_list.ex
|
starcoder
|
defmodule Day7 do
def run_part1() do
AOCHelper.read_input()
|> Solution.count_bags_containing(:shiny_gold)
end
def run_part2() do
AOCHelper.read_input()
|> Solution.count_bags_inside_bag(:shiny_gold)
end
def debug_sample() do
[
"light red bags contain 1 bright white bag, 2 muted yellow bags.",
"dark orange bags contain 3 bright white bags, 4 muted yellow bags.",
"bright white bags contain 1 shiny gold bag.",
"muted yellow bags contain 2 shiny gold bags, 9 faded blue bags.",
"shiny gold bags contain 1 dark olive bag, 2 vibrant plum bags.",
"dark olive bags contain 3 faded blue bags, 4 dotted black bags.",
"vibrant plum bags contain 5 faded blue bags, 6 dotted black bags.",
"faded blue bags contain no other bags.",
"dotted black bags contain no other bags.",
]
|> Solution.compute()
end
end
defmodule Solution do
def count_bags_inside_bag(lines, root_bag) do
lines
|> RuleParser.build_rules()
|> count(root_bag)
end
def count(rules, root_bag), do: count(rules, root_bag, 0)
def count(rules, root_bag, sum) do
Map.fetch!(rules, root_bag)
|> Enum.map(fn {bag, count} ->
count + count * count(rules, bag, 0)
end)
|> Enum.sum
end
def count_bags_containing(lines, bag_identifier) do
lines
|> RuleParser.build_rules()
|> find_possibilities([bag_identifier])
|> Enum.count
end
def find_possibilities(rules, identifiers), do: find_possibilities(rules, identifiers, MapSet.new())
def find_possibilities(rules, [], acc), do: acc
def find_possibilities(rules, [head | tail], acc) do
rules
|> Map.keys
|> Enum.filter(fn k ->
rules
|> Map.fetch!(k)
|> (&Map.has_key?(&1, head)).()
end)
|> (fn result ->
result
|> (&(MapSet.new(&1))).()
|> (&(MapSet.union(&1, acc))).()
|> (&(MapSet.union(&1, find_possibilities(rules, Enum.to_list(&1), acc)))).()
|> (&(MapSet.union(&1, find_possibilities(rules, tail, acc)))).()
end).()
end
end
defmodule RuleParser do
def build_rules(raw_rules), do: build_rules(raw_rules, %{})
def build_rules([], rules), do: rules
def build_rules([head | tail], rules) do
build_rules(tail, Map.merge(rules, build_rule(head)))
end
def build_rule(line) do
[bag, content] = line |> String.split(" contain ")
bag_identifier =
bag
|> String.trim_trailing(" bags")
|> String.replace(" ", "_")
|> String.to_atom()
content_map =
content
|> String.split(", ")
|> Enum.map(fn bag_rule ->
case bag_rule do
"no other bags." -> %{}
rule ->
{quantity, bag_name} =
rule
|> String.trim_trailing(".")
|> String.trim_trailing(" bags")
|> String.trim_trailing(" bag")
|> (&(Integer.parse(&1))).()
bag_identifier =
bag_name
|> String.trim_leading(" ")
|> String.replace(" ", "_")
|> String.to_atom()
%{bag_identifier => quantity}
end
end)
|> Enum.reduce(&Map.merge/2)
%{bag_identifier => content_map}
end
end
defmodule AOCHelper do
def read_input() do
"input.txt"
|> File.read!()
|> String.split("\n")
|> Enum.map(&(String.replace(&1, "\r", "")))
end
end
|
aoc-2020/day7/lib/day7.ex
| 0.63114
| 0.503052
|
day7.ex
|
starcoder
|
defmodule Mix.Task do
@moduledoc """
A simple module that provides conveniences for creating,
loading and manipulating tasks.
A Mix task can be defined by simply using `Mix.Task`
in a module starting with `Mix.Tasks.` and defining
the `run/1` function:
defmodule Mix.Tasks.Hello do
use Mix.Task
def run(_args) do
Mix.shell.info "hello"
end
end
The `run/1` function will receive all arguments passed
to the command line.
## Attributes
There are a couple attributes available in Mix tasks to
configure them in Mix:
* `@shortdoc` - makes the task public with a short description that appears
on `mix help`
* `@recursive` - run the task recursively in umbrella projects
## Documentation
Users can read the documentation for public Mix tasks by doing `mix help
my_task`. The documentation that will be showed is the `@moduledoc` of the
task's module.
"""
@type task_name :: String.t | atom
@type task_module :: atom
@doc """
A task needs to implement `run` which receives
a list of command line args.
"""
@callback run([binary]) :: any
@doc false
defmacro __using__(_opts) do
quote do
Enum.each [:shortdoc, :recursive],
&Module.register_attribute(__MODULE__, &1, persist: true)
@behaviour Mix.Task
end
end
@doc """
Loads all tasks in all code paths.
"""
@spec load_all() :: [task_module]
def load_all, do: load_tasks(:code.get_path)
@doc """
Loads all tasks in the given `paths`.
"""
@spec load_tasks([List.Chars.t]) :: [task_module]
def load_tasks(dirs) do
# We may get duplicate modules because we look through the
# entire load path so make sure we only return unique modules.
for(dir <- dirs,
{:ok, files} = :erl_prim_loader.list_dir(to_char_list(dir)),
file <- files,
mod = task_from_path(file),
do: mod)
|> Enum.uniq
end
@prefix_size byte_size("Elixir.Mix.Tasks.")
@suffix_size byte_size(".beam")
defp task_from_path(filename) do
base = Path.basename(filename)
part = byte_size(base) - @prefix_size - @suffix_size
case base do
<<"Elixir.Mix.Tasks.", rest :: binary-size(part), ".beam">> ->
mod = :"Elixir.Mix.Tasks.#{rest}"
ensure_task?(mod) && mod
_ ->
nil
end
end
@doc """
Returns all loaded task modules.
Modules that are not yet loaded won't show up.
Check `load_all/0` if you want to preload all tasks.
"""
@spec all_modules() :: [task_module]
def all_modules do
for {module, _} <- :code.all_loaded,
task?(module),
do: module
end
@doc """
Gets the moduledoc for the given task `module`.
Returns the moduledoc or `nil`.
"""
@spec moduledoc(task_module) :: String.t | nil
def moduledoc(module) when is_atom(module) do
case Code.get_docs(module, :moduledoc) do
{_line, moduledoc} -> moduledoc
nil -> nil
end
end
@doc """
Gets the shortdoc for the given task `module`.
Returns the shortdoc or `nil`.
"""
@spec shortdoc(task_module) :: String.t | nil
def shortdoc(module) when is_atom(module) do
case List.keyfind module.__info__(:attributes), :shortdoc, 0 do
{:shortdoc, [shortdoc]} -> shortdoc
_ -> nil
end
end
@doc """
Checks if the task should be run recursively for all sub-apps in
umbrella projects.
Returns `true` or `false`.
"""
@spec recursive(task_module) :: boolean
def recursive(module) when is_atom(module) do
case List.keyfind module.__info__(:attributes), :recursive, 0 do
{:recursive, [setting]} -> setting
_ -> false
end
end
@doc """
Returns the task name for the given `module`.
"""
@spec task_name(task_module) :: task_name
def task_name(module) when is_atom(module) do
Mix.Utils.module_name_to_command(module, 2)
end
@doc """
Checks if an alias called `task` exists.
For more information about task aliasing, take a look at the "Aliasing"
section in the docs for `Mix`.
"""
@spec alias?(task_name) :: boolean
def alias?(task) when is_binary(task) do
alias?(String.to_atom(task))
end
def alias?(task) when is_atom(task) do
Mix.Project.config[:aliases][task]
end
@doc """
Receives a task name and returns the task module if found.
Otherwise returns `nil` in case the module
exists but it isn't a task or cannot be found.
"""
@spec get(task_name) :: task_module | nil
def get(task) do
case fetch(task) do
{:ok, module} -> module
{:error, _} -> nil
end
end
@doc """
Receives a task name and retrieves the task module.
## Exceptions
* `Mix.NoTaskError` - raised if the task could not be found
* `Mix.InvalidTaskError` - raised if the task is not a valid `Mix.Task`
"""
@spec get!(task_name) :: task_module | no_return
def get!(task) do
case fetch(task) do
{:ok, module} ->
module
{:error, :invalid} ->
Mix.raise Mix.InvalidTaskError, task: task
{:error, :not_found} ->
Mix.raise Mix.NoTaskError, task: task
end
end
defp fetch(task) when is_binary(task) or is_atom(task) do
case Mix.Utils.command_to_module(to_string(task), Mix.Tasks) do
{:module, module} ->
if task?(module), do: {:ok, module}, else: {:error, :invalid}
{:error, _} ->
{:error, :not_found}
end
end
@doc """
Runs a `task` with the given `args`.
If the task was not yet invoked, it runs the task and
returns the result.
If there is an alias with the same name, the alias
will be invoked instead of a task.
If the task or alias were already invoked, it does not
run them again and simply aborts with `:noop`.
It may raise an exception if an alias or a task can't
be found or the task is invalid. Check `get!/1` for more
information.
"""
@spec run(task_name, [any]) :: any
def run(task, args \\ [])
def run(task, args) when is_atom(task) do
run(Atom.to_string(task), args)
end
def run(task, args) when is_binary(task) do
proj = Mix.Project.get
alias = Mix.Project.config[:aliases][String.to_atom(task)]
cond do
alias && Mix.TasksServer.run({:alias, task, proj}) ->
res = run_alias(List.wrap(alias), args, :ok)
Mix.TasksServer.put({:task, task, proj})
res
Mix.TasksServer.run({:task, task, proj}) ->
run_task(proj, task, args)
true ->
:noop
end
end
defp run_task(proj, task, args) do
# 1. If the task is available, we run it.
# 2. Otherwise we look for it in dependencies.
# 3. Finally, we compile the current project in hope it is available.
module =
get_task_or_run(proj, task, fn -> deps_loadpaths end) ||
get_task_or_run(proj, task, fn -> Mix.Project.compile([]) end) ||
get!(task)
recursive = recursive(module)
cond do
recursive == true and Mix.Project.umbrella? ->
Mix.ProjectStack.recur fn ->
recur(fn _ -> run(task, args) end)
end
recursive == false and Mix.ProjectStack.recursing? ->
Mix.ProjectStack.root(fn -> run(task, args) end)
true ->
Mix.TasksServer.put({:task, task, proj})
module.run(args)
end
end
defp deps_loadpaths do
Mix.Task.run "deps.check"
Mix.Task.run "deps.loadpaths"
end
defp get_task_or_run(proj, task, fun) do
cond do
module = get(task) ->
module
proj ->
fun.()
nil
true ->
nil
end
end
defp run_alias([h|t], alias_args, _res) when is_binary(h) do
[task|args] = OptionParser.split(h)
res = Mix.Task.run task, join_args(args, alias_args, t)
run_alias(t, alias_args, res)
end
defp run_alias([h|t], alias_args, _res) when is_function(h, 1) do
res = h.(join_args([], alias_args, t))
run_alias(t, alias_args, res)
end
defp run_alias([], _alias_task, res) do
res
end
defp join_args(args, alias_args, []), do: args ++ alias_args
defp join_args(args, _alias_args, _), do: args
@doc """
Clears all invoked tasks, allowing them to be reinvoked.
This operation is not recursive.
"""
@spec clear :: :ok
def clear do
Mix.TasksServer.clear
end
@doc """
Reenables a given task so it can be executed again down the stack.
Both alias and the regular stack are reenabled when this function
is called.
If an umbrella project reenables a task, it is reenabled for all
children projects.
"""
@spec reenable(task_name) :: :ok
def reenable(task) when is_binary(task) or is_atom(task) do
task = to_string(task)
proj = Mix.Project.get
Mix.TasksServer.delete_many([{:task, task, proj},
{:alias, task, proj}])
_ = if (module = get(task)) && recursive(module) && Mix.Project.umbrella? do
recur fn proj ->
Mix.TasksServer.delete_many([{:task, task, proj},
{:alias, task, proj}])
end
end
:ok
end
defp recur(fun) do
# Get all dependency configuration but not the deps path
# as we leave the control of the deps path still to the
# umbrella child.
config = Mix.Project.deps_config |> Keyword.delete(:deps_path)
for %Mix.Dep{app: app, opts: opts} <- Mix.Dep.Umbrella.loaded do
Mix.Project.in_project(app, opts[:path], config, fun)
end
end
@doc """
Returns `true` if given module is a task.
"""
@spec task?(task_module) :: boolean()
def task?(module) when is_atom(module) do
match?('Elixir.Mix.Tasks.' ++ _, Atom.to_char_list(module)) and ensure_task?(module)
end
defp ensure_task?(module) do
Code.ensure_loaded?(module) and function_exported?(module, :run, 1)
end
end
|
lib/mix/lib/mix/task.ex
| 0.840259
| 0.649273
|
task.ex
|
starcoder
|
defmodule Forth.Builtins do
alias Forth.Exceptions
defmodule Internals do
def block_eval(block_tuple, stack, closure \\ %{})
def block_eval([], stack, _),
do: stack
def block_eval([{:capture, capture_list}|rest], stack, closure) do
{next_stack, next_closure} = create_closure(capture_list, stack, closure)
block_eval(rest, next_stack, next_closure)
end
def block_eval([{:closure, name}|rest], stack, closure),
do: block_eval(rest, [closure[name]|stack], closure)
def block_eval([{:def, fun}|rest], stack, closure) when is_function(fun),
do: block_eval(rest, fun.(stack, closure), closure)
def block_eval([{:def, body}|rest], stack, closure) do
next_stack = block_eval(body, stack, %{})
block_eval(rest, next_stack, closure)
end
def block_eval([term|rest], stack, closure),
do: block_eval(rest, [term|stack], closure)
def block_eval({:compiled, block}, stack, closure),
do: block_eval(block, stack, closure)
def create_closure([], stack, closure), do: {stack, closure}
def create_closure([name|rest], [value|stack], closure),
do: create_closure(rest, stack, Map.put(closure, name, value))
def create_closure([_|_], [], _),
do: raise Exceptions.StackUnderflow, []
end
defmodule Arithmetic do
def sum([b, a | rest], _), do: [a + b | rest]
def sum(stack, _), do: raise Exceptions.StackUnderflow, stack
def sub([b, a | rest], _), do: [a - b | rest]
def sub(stack, _), do: raise Exceptions.StackUnderflow, stack
def mul([b, a | rest], _), do: [a * b | rest]
def mul(stack, _), do: raise Exceptions.StackUnderflow, stack
def div([0, _ | _any], _), do: raise Exceptions.DivisionByZero
def div([b, a | rest], _), do: [:erlang.div(a, b) | rest]
def div(stack, _), do: raise Exceptions.StackUnderflow, stack
def eq([b, a | rest], _), do: [to_int(a == b) | rest]
def eq(stack, _), do: raise Exceptions.StackUnderflow, stack
def lt([b, a | rest], _), do: [to_int(a < b) | rest]
def lt(stack, _), do: raise Exceptions.StackUnderflow, stack
def gt([b, a | rest], _), do: [to_int(a > b) | rest]
def gt(stack, _), do: raise Exceptions.StackUnderflow, stack
defp to_int(falsey) when falsey in [false, nil], do: 0
defp to_int(_), do: 1
end
defmodule Logic do
import Kernel, except: [if: 2]
import Forth.Builtins.Internals
def if([then, value | rest], _) do
Kernel.if value != 0, do: block_eval(then, rest), else: rest
end
def if(stack, _), do: raise Exceptions.StackUnderflow, stack
def ifnot([then, value | rest], _) do
Kernel.if value == 0, do: block_eval(then, rest), else: rest
end
def ifnot(stack, _), do: raise Exceptions.StackUnderflow, stack
end
defmodule StackManipulation do
import Forth.Builtins.Internals
def dup([head | _] = stack, _), do: [head | stack]
def dup(stack, _), do: raise Exceptions.StackUnderflow, stack
def swap([b, a | rest], _), do: [a, b | rest]
def swap(stack, _), do: raise Exceptions.StackUnderflow, stack
def over([_b, a | _rest] = stack, _), do: [a | stack]
def over(stack, _), do: raise Exceptions.StackUnderflow, stack
def drop([_ | rest], _), do: rest
def drop(stack, _), do: raise Exceptions.StackUnderflow, stack
def call([{:compiled, block} | rest], closure),
do: block_eval(block, rest, closure)
def call(stack, _), do: raise Exceptions.StackUnderflow, stack
def print([a | rest], _), do: (IO.puts(a); rest)
def print(stack, _), do: raise Exceptions.StackUnderflow, stack
end
end
|
lib/forth/builtins.ex
| 0.655887
| 0.414721
|
builtins.ex
|
starcoder
|
defmodule Ash.SatSolver do
@moduledoc false
alias Ash.Filter
alias Ash.Filter.{Expression, Not, Predicate}
def strict_filter_subset(filter, candidate) do
case {filter, candidate} do
{%{expression: nil}, %{expression: nil}} ->
true
{%{expression: nil}, _candidate_expr} ->
true
{_filter_expr, %{expression: nil}} ->
false
{filter, candidate} ->
do_strict_filter_subset(filter, candidate)
end
end
defp do_strict_filter_subset(filter, candidate) do
case transform_and_solve(
filter.resource,
Expression.new(:and, filter.expression, candidate.expression)
) do
{:error, :unsatisfiable} ->
false
{:ok, _} ->
case transform_and_solve(
filter.resource,
Expression.new(:and, Not.new(filter.expression), candidate.expression)
) do
{:error, :unsatisfiable} ->
true
_ ->
:maybe
end
end
end
defp filter_to_expr(nil), do: nil
defp filter_to_expr(false), do: false
defp filter_to_expr(true), do: true
defp filter_to_expr(%Filter{expression: expression}), do: filter_to_expr(expression)
defp filter_to_expr(%Predicate{} = predicate), do: predicate
defp filter_to_expr(%Not{expression: expression}), do: {:not, filter_to_expr(expression)}
defp filter_to_expr(%Expression{op: op, left: left, right: right}) do
{op, filter_to_expr(left), filter_to_expr(right)}
end
def transform_and_solve(resource, expression) do
expression
|> consolidate_relationships(resource)
|> upgrade_related_filters_to_join_keys(resource)
|> build_expr_with_predicate_information()
|> solve_expression()
end
defp upgrade_related_filters_to_join_keys(expression, resource) do
Filter.map(expression, &upgrade_predicate(&1, resource))
end
defp upgrade_predicate(%Predicate{relationship_path: path} = predicate, resource)
when path != [] do
with relationship when not is_nil(relationship) <- Ash.Resource.relationship(resource, path),
true <- predicate.attribute.name == relationship.destination_field,
new_attribute when not is_nil(new_attribute) <-
Ash.Resource.attribute(relationship.source, relationship.source_field),
{:ok, new_predicate} <-
Predicate.new(
resource,
new_attribute,
predicate.predicate.__struct__,
predicate.value,
:lists.droplast(path)
) do
upgrade_predicate(new_predicate, resource)
else
_ ->
predicate
end
end
defp upgrade_predicate(other, _), do: other
defp consolidate_relationships(expression, resource) do
{replacements, _all_relationship_paths} =
expression
|> Filter.relationship_paths()
|> Enum.reduce({%{}, []}, fn path, {replacements, kept_paths} ->
case find_synonymous_relationship_path(resource, kept_paths, path) do
nil ->
{replacements, [path | kept_paths]}
synonymous_path ->
Map.put(replacements, path, synonymous_path)
end
end)
Filter.map(expression, fn
%Predicate{relationship_path: path} = predicate when path != [] ->
case Map.fetch(replacements, path) do
:error -> predicate
{:ok, replacement} -> %{predicate | relationship_path: replacement}
end
other ->
other
end)
end
defp find_synonymous_relationship_path(resource, paths, path) do
Enum.find_value(paths, fn candidate_path ->
if synonymous_relationship_paths?(resource, candidate_path, path) do
candidate_path
else
false
end
end)
end
def synonymous_relationship_paths?(_, [], []), do: true
def synonymous_relationship_paths?(_resource, candidate_path, path)
when length(candidate_path) != length(path),
do: false
def synonymous_relationship_paths?(resource, [candidate_first | candidate_rest], [first | rest])
when first == candidate_first do
synonymous_relationship_paths?(
Ash.Resource.relationship(resource, candidate_first).destination,
candidate_rest,
rest
)
end
def synonymous_relationship_paths?(
left_resource,
candidate,
search,
right_resource \\ nil
)
def synonymous_relationship_paths?(_, [], [], _), do: true
def synonymous_relationship_paths?(_, [], _, _), do: false
def synonymous_relationship_paths?(_, _, [], _), do: false
def synonymous_relationship_paths?(
left_resource,
[candidate_first | candidate_rest] = candidate,
[first | rest] = search,
right_resource
) do
right_resource = right_resource || left_resource
relationship = Ash.Resource.relationship(left_resource, first)
candidate_relationship = Ash.Resource.relationship(right_resource, candidate_first)
cond do
!relationship || !candidate_relationship ->
false
relationship.type == :many_to_many && candidate_relationship.type == :has_many ->
synonymous_relationship_paths?(
left_resource,
[relationship.join_relationship | candidate],
search,
right_resource
)
relationship.type == :has_many && candidate_relationship.type == :many_to_many ->
synonymous_relationship_paths?(
left_resource,
candidate,
[candidate_relationship.join_relationship | search],
right_resource
)
true ->
comparison_keys = [
:source_field,
:destination_field,
:source_field_on_join_table,
:destination_field_on_join_table,
:destination_field,
:destination
]
Map.take(relationship, comparison_keys) ==
Map.take(candidate_relationship, comparison_keys) and
synonymous_relationship_paths?(relationship.destination, candidate_rest, rest)
end
end
defp build_expr_with_predicate_information(expression) do
all_predicates =
Filter.reduce(expression, [], fn
%Predicate{} = predicate, predicates ->
[predicate | predicates]
_, predicates ->
predicates
end)
simplified =
Filter.map(expression, fn
%Predicate{} = predicate ->
predicate
|> find_simplification(all_predicates)
|> case do
nil ->
predicate
{:simplify, simplification} ->
simplification
end
other ->
other
end)
if simplified == expression do
all_predicates =
Filter.reduce(expression, [], fn
%Predicate{} = predicate, predicates ->
[predicate | predicates]
_, predicates ->
predicates
end)
|> Enum.uniq()
comparison_expressions =
all_predicates
|> Enum.reduce([], fn predicate, new_expressions ->
all_predicates
|> Enum.filter(fn other_predicate ->
other_predicate != predicate &&
other_predicate.relationship_path == predicate.relationship_path &&
other_predicate.attribute.name == predicate.attribute.name
end)
|> Enum.reduce(new_expressions, fn other_predicate, new_expressions ->
case Predicate.compare(predicate, other_predicate) do
inclusive when inclusive in [:right_includes_left, :mutually_inclusive] ->
[{:not, {:and, {:not, other_predicate}, predicate}} | new_expressions]
exclusive when exclusive in [:right_excludes_left, :mutually_exclusive] ->
[{:not, {:and, other_predicate, predicate}} | new_expressions]
{:simplify, _} ->
# Filter should be fully simplified here
raise "What"
_other ->
# If we can't tell, we assume they are exclusive statements
[{:not, {:and, other_predicate, predicate}} | new_expressions]
end
end)
end)
|> Enum.uniq()
expression = filter_to_expr(expression)
Enum.reduce(comparison_expressions, expression, fn comparison_expression, expression ->
{:and, comparison_expression, expression}
end)
else
build_expr_with_predicate_information(simplified)
end
end
defp find_simplification(predicate, predicates) do
predicates
|> Enum.find_value(fn other_predicate ->
case Predicate.compare(predicate, other_predicate) do
{:simplify, simplification} -> {:simplify, simplification}
_ -> false
end
end)
end
def solve_expression(expression) do
expression_with_constants = {:and, true, {:and, {:not, false}, expression}}
{bindings, expression} = extract_bindings(expression_with_constants)
expression
|> to_conjunctive_normal_form()
|> lift_clauses()
|> negations_to_negative_numbers()
|> Picosat.solve()
|> solutions_to_predicate_values(bindings)
end
defp solutions_to_predicate_values({:ok, solution}, bindings) do
scenario =
Enum.reduce(solution, %{true: [], false: []}, fn var, state ->
fact = Map.get(bindings, abs(var))
Map.put(state, fact, var > 0)
end)
{:ok, scenario}
end
defp solutions_to_predicate_values({:error, error}, _), do: {:error, error}
defp extract_bindings(expr, bindings \\ %{current: 1})
defp extract_bindings({operator, left, right}, bindings) do
{bindings, left_extracted} = extract_bindings(left, bindings)
{bindings, right_extracted} = extract_bindings(right, bindings)
{bindings, {operator, left_extracted, right_extracted}}
end
defp extract_bindings({:not, value}, bindings) do
{bindings, extracted} = extract_bindings(value, bindings)
{bindings, {:not, extracted}}
end
defp extract_bindings(value, %{current: current} = bindings) do
current_binding =
Enum.find(bindings, fn {key, binding_value} ->
key != :current && binding_value == value
end)
case current_binding do
nil ->
new_bindings =
bindings
|> Map.put(:current, current + 1)
|> Map.put(current, value)
{new_bindings, current}
{binding, _} ->
{bindings, binding}
end
end
# A helper function for formatting to the same output we'd give to picosat
@doc false
def to_picosat(clauses, variable_count) do
clause_count = Enum.count(clauses)
formatted_input =
Enum.map_join(clauses, "\n", fn clause ->
format_clause(clause) <> " 0"
end)
"p cnf #{variable_count} #{clause_count}\n" <> formatted_input
end
defp negations_to_negative_numbers(clauses) do
Enum.map(
clauses,
fn
{:not, var} when is_integer(var) ->
[negate_var(var)]
var when is_integer(var) ->
[var]
clause ->
Enum.map(clause, fn
{:not, var} -> negate_var(var)
var -> var
end)
end
)
end
defp negate_var(var, multiplier \\ -1)
defp negate_var({:not, value}, multiplier) do
negate_var(value, multiplier * -1)
end
defp negate_var(value, multiplier), do: value * multiplier
defp format_clause(clause) do
Enum.map_join(clause, " ", fn
{:not, var} -> "-#{var}"
var -> "#{var}"
end)
end
defp lift_clauses({:and, left, right}) do
lift_clauses(left) ++ lift_clauses(right)
end
defp lift_clauses({:or, left, right}) do
[lift_or_clauses(left) ++ lift_or_clauses(right)]
end
defp lift_clauses(value), do: [[value]]
defp lift_or_clauses({:or, left, right}) do
lift_or_clauses(left) ++ lift_or_clauses(right)
end
defp lift_or_clauses(value), do: [value]
defp to_conjunctive_normal_form(expression) do
expression
|> demorgans_law()
|> distributive_law()
end
defp distributive_law(expression) do
distributive_law_applied = apply_distributive_law(expression)
if expression == distributive_law_applied do
expression
else
distributive_law(distributive_law_applied)
end
end
defp apply_distributive_law({:or, left, {:and, right1, right2}}) do
left_distributed = apply_distributive_law(left)
{:and, {:or, left_distributed, apply_distributive_law(right1)},
{:or, left_distributed, apply_distributive_law(right2)}}
end
defp apply_distributive_law({:or, {:and, left1, left2}, right}) do
right_distributed = apply_distributive_law(right)
{:and, {:or, apply_distributive_law(left1), right_distributed},
{:or, apply_distributive_law(left2), right_distributed}}
end
defp apply_distributive_law({:not, expression}) do
{:not, apply_distributive_law(expression)}
end
defp apply_distributive_law({operator, left, right}) when operator in [:and, :or] do
{operator, apply_distributive_law(left), apply_distributive_law(right)}
end
defp apply_distributive_law(var) when is_integer(var) do
var
end
defp demorgans_law(expression) do
demorgans_law_applied = apply_demorgans_law(expression)
if expression == demorgans_law_applied do
expression
else
demorgans_law(demorgans_law_applied)
end
end
defp apply_demorgans_law({:not, {:and, left, right}}) do
{:or, {:not, apply_demorgans_law(left)}, {:not, apply_demorgans_law(right)}}
end
defp apply_demorgans_law({:not, {:or, left, right}}) do
{:and, {:not, left}, {:not, right}}
end
defp apply_demorgans_law({operator, left, right}) when operator in [:or, :and] do
{operator, apply_demorgans_law(left), apply_demorgans_law(right)}
end
defp apply_demorgans_law({:not, expression}) do
{:not, apply_demorgans_law(expression)}
end
defp apply_demorgans_law(var) when is_integer(var) do
var
end
end
|
lib/sat_solver.ex
| 0.710829
| 0.442938
|
sat_solver.ex
|
starcoder
|
defmodule Mint.HTTP do
@moduledoc """
Processless HTTP connection data structure and functions.
Single interface for `Mint.HTTP1` and `Mint.HTTP2` with support for version
negotiation and proxies.
## Usage
To establish a connection with a given server, use `connect/4`. This will
return an opaque data structure that represents the connection
to the server. To send a request, you can use `request/5`. Sending a request
does not take care of the response to that request, instead we use `Mint.HTTP.stream/2`
to process the response, which we will look at in just a bit. The connection is a
wrapper around a TCP (`:gen_tcp` module) or SSL (`:ssl` module) socket that is
set in **active mode** (with `active: :once`). This means that TCP/SSL messages
will be delivered to the process that started the connection.
The process that owns the connection is responsible for receiving the messages
(for example, a GenServer is responsible for defining `handle_info/2`). However,
`Mint.HTTP` makes it easy to identify TCP/SSL messages that are coming from the
connection with the server with the `stream/2` function. This function takes the
connection and a term and returns `:unknown` if the term is not a TCP/SSL message
belonging to the connection. If the term *is* a message for the connection, then
a response and a new connection are returned. It's important to store the new
returned connection data structure over the old one since the connection is an
immutable data structure.
Let's see an example of a common workflow of connecting to a server, sending a
request, and processing the response. We start by using `connect/3` to connect
to a server.
{:ok, conn} = Mint.HTTP.connect(:http, "httpbin.org", 80)
`conn` is a data structure that represents the connection.
To send a request, we use `request/5`.
{:ok, conn, request_ref} = Mint.HTTP.request(conn, "GET", "/", [], nil)
As you can see, sending a request returns a new updated `conn` struct and a
`request_ref`. The updated connection struct is returned because the connection
is an immutable structure keeping the connection state, so every action we do on it must return a new,
possibly updated, connection that we're responsible for storing over the old
one. `request_ref` is a unique reference that can be used to identify which
request a given response belongs to.
Now that we sent our request, we're responsible for receiving the messages that
the TCP/SSL socket will send to our process. For example, in a GenServer
we would do that with a `handle_info/2` callback. In our case, we're going to
use a simple `receive`. `Mint.HTTP` provides a way to tell if a message comes
from the socket wrapped by our connection or not: the `stream/2` function. If
the message we pass to it is not destined for our connection, this function returns
`:unknown`. Otherwise, it returns an updated connection and one or more responses.
receive do
message ->
case Mint.HTTP.stream(conn, message) do
:unknown -> handle_normal_message(message)
{:ok, conn, responses} -> handle_responses(conn, responses)
end
end
`responses` is a list of possible responses. The most common responses are:
* `{:status, request_ref, status_code}` for the status code
* `{:headers, request_ref, headers}` for the response headers
* `{:data, request_ref, binary}` for pieces of the response body
* `{:done, request_ref}` for the end of the response
As you can see, all responses have the unique request reference as the second
element of the tuple, so that we know which request the response belongs to.
See `t:Mint.Types.response/0` for the full list of responses returned by `Mint.HTTP.stream/2`.
## Architecture
A processless architecture like the one here requires a few modifications to how
we use this HTTP client. Usually, you will want to create this data structure
in a process that acts as *connection manager*. Sometimes, you might want to
have a single process responsible for multiple connections, either to just one
host or multiple hosts. For more discussion on architectures based off of this
HTTP client, see the [*Architecture*](architecture.html) page in the docs.
## SSL certificates
When using SSL, you can pass in your own CA certificate store or use one provided by Mint. Mint
doesn't ship with the certificate store itself, but it has an optional dependency on
[CAStore](https://github.com/elixir-mint/castore), which provides an up-to-date certificate store. If
you don't want to use your own certificate store, just add `:castore` to your dependencies.
## Mode
By default Mint operates in **active mode** meaning that the process that started the
connection receives socket messages. Mint also supports **passive mode**, where no messages
are sent to the process and the process needs to fetch data out of the socket manually.
The mode can be controlled at connection time through the `:mode` option in `connect/4`
or changed dynamically through `set_mode/2`. Passive mode is generally only recommended
for special use cases.
"""
import Mint.Core.Util
alias Mint.{Types, TunnelProxy, UnsafeProxy}
alias Mint.Core.Transport
@behaviour Mint.Core.Conn
@opaque t() :: Mint.HTTP1.t() | Mint.HTTP2.t()
@doc """
Macro to check that a given received `message` is intended for the given connection `conn`.
This guard is useful in `receive` loops or in callbacks that handle generic messages (such as a
`c:GenServer.handle_info/2` callback) so that you don't have to hand the `message` to
`Mint.HTTP.stream/2` and check for the `:unknown_message` return value.
This macro can be used in guards.
**Note**: this macro is only available if you compile Mint with Elixir 1.10.0 or greater (and
OTP 21+, which is required by Elixir 1.10.0 and on).
## Examples
require Mint.HTTP
{:ok, conn, request_ref} = Mint.HTTP.request(conn, "POST", "/", headers, "")
receive do
message when Mint.HTTP.is_connection_message(conn, message) ->
Mint.HTTP.stream(conn, message)
other ->
# This message is related to something else or to some other connection
end
"""
define_is_connection_message_guard()
@doc """
Creates a new connection to a given server.
Creates a new connection struct and establishes the connection to the given server,
identified by the given `host` and `port` combination. Both HTTP and HTTPS are supported
by passing respectively `:http` and `:https` as the `scheme`.
The connection struct wraps a socket, which is created once the connection
is established inside this function. If HTTP is used, then the created socket is a TCP
socket and the `:gen_tcp` module is used to create that socket. If HTTPS is used, then
the created socket is an SSL socket and the `:ssl` module is used to create that socket.
The socket is created in active mode (with `active: :once`), which is why it is important
to know the type of the socket: messages from the socket will be delivered directly to the
process that creates the connection and tagged appropriately by the socket module (see the
`:gen_tcp` and `:ssl` modules). See `stream/2` for more information on the messages and
how to process them and on the socket mode.
## Options
* `:hostname` - (string) explicitly provide the hostname used for the `Host` header,
hostname verification, SNI, and so on. **Required when `address` is not a string.**
* `:transport_opts` - (keyword) options to be given to the transport being used.
These options will be merged with some default options that cannot be overridden.
For more details, refer to the "Transport options" section below.
* `:mode` - (`:active` or `:passive`) whether to set the socket to active or
passive mode. See the "Mode" section in the module documentation and `set_mode/2`.
* `:protocols` - (list of atoms) a list of protocols to try when connecting to the
server. The possible values in the list are `:http1` for HTTP/1 and HTTP/1.1 and
`:http2` for HTTP/2. If only one protocol is present in the list, then the connection
will be forced to use that protocol. If both `:http1` and `:http2` are present in the
list, then Mint will negotiate the protocol. See the section "Protocol negotiation"
below for more information. Defaults to `[:http1, :http2]`.
* `:proxy_headers` - a list of headers (`t:Mint.Types.headers/0`) to pass when using
a proxy. They will be used for the `CONNECT` request in tunnel proxies or merged
with every request for forward proxies.
The following options are HTTP/1-specific and will force the connection
to be an HTTP/1 connection.
* `:proxy` - a `{scheme, address, port, opts}` tuple that identifies a proxy to
connect to. See the "Proxying" section below for more information.
The following options are HTTP/2-specific and will only be used on HTTP/2 connections.
* `:client_settings` - (keyword) a list of client HTTP/2 settings to send to the
server. See `Mint.HTTP2.put_settings/2` for more information. This is only used
in HTTP/2 connections.
## Protocol negotiation
If both `:http1` and `:http2` are present in the list passed in the `:protocols` option,
the protocol negotiation happens in the following way:
* If the scheme used to connect to the server is `:http`, then HTTP/1 or HTTP/1.1 is used.
* If the scheme is `:https`, then ALPN negotiation is used to determine the right
protocol. This means that the server will decide whether to use HTTP/1 or
HTTP/2. If the server doesn't support protocol negotiation, we will fall back to
HTTP/1. If the server negotiates a protocol that we don't know how to handle,
`{:error, {:bad_alpn_protocol, protocol}}` is returned.
## Proxying
You can set up proxying through the `:proxy` option, which is a tuple
`{scheme, address, port, opts}` that identifies the proxy to connect to.
Once a proxied connection is returned, the proxy is transparent to you and you
can use the connection like a normal HTTP/1 connection.
If the `scheme` is `:http`, we will connect to the host in the most compatible
way, supporting older proxy servers. Data will be sent in clear text.
If the connection scheme is `:https`, we will connect to the host with a tunnel
through the proxy. Using `:https` for both the proxy and the connection scheme
is not supported, it is recommended to use `:https` for the end host connection
instead of the proxy.
## Transport options
The options specified in `:transport_opts` are passed to the module that
implements the socket interface: `:gen_tcp` when the scheme is `:http`, and
`:ssl` when the scheme is `:https`. Please refer to the documentation for those
modules, as well as for `:inet.setopts/2`, for a detailed description of all
available options.
The behaviour of some options is modified by Mint, as described below.
A special case is the `:timeout` option, which is passed to the transport
module's `connect` function to limit the amount of time to wait for the
network connection to be established.
Common options for `:http` and `:https`:
* `:active` - controlled by the `:mode` option. Cannot be overridden.
* `:mode` - set to `:binary`. Cannot be overridden.
* `:packet` - set to `:raw`. Cannot be overridden.
* `:timeout` - connect timeout in milliseconds. Defaults to `30_000` (30
seconds), and may be overridden by the caller. Set to `:infinity` to
disable the connect timeout.
Options for `:https` only:
* `:alpn_advertised_protocols` - managed by Mint. Cannot be overridden.
* `:cacertfile` - if `:verify` is set to `:verify_peer` (the default) and
no CA trust store is specified using the `:cacertfile` or `:cacerts`
option, Mint will attempt to use the trust store from the
[CAStore](https://github.com/elixir-mint/castore) package or raise an
exception if this package is not available. Due to caching the
`:cacertfile` option is more efficient than `:cacerts`.
* `:ciphers` - defaults to the lists returned by
`:ssl.filter_cipher_suites(:ssl.cipher_suites(:all, version), [])`
where `version` is each value in the `:versions` setting. This list is
then filtered according to the blocklist in
[RFC7540 appendix A](https://tools.ietf.org/html/rfc7540#appendix-A);
May be overridden by the caller. See the "Supporting older cipher suites"
section below for some examples.
* `:depth` - defaults to `4`. May be overridden by the caller.
* `:partial_chain` - unless a custom `:partial_chain` function is specified,
Mint will enable its own partial chain handler, which accepts server
certificate chains containing a certificate that was issued by a
CA certificate in the CA trust store, even if that certificate is not
last in the chain. This improves interoperability with some servers
(for example, with a cross-signed intermediate CA or some misconfigured servers),
but is a less strict interpretation of the TLS specification than the
Erlang/OTP default behaviour.
* `:reuse_sessions` - defaults to `true`. May be overridden by the caller. If
`:"tlsv1.3"` is the only TLS version specified, `:reuse_sessions` will be
removed from the options.
* `:secure_renegotiate` - defaults to `true`. May be overridden by the
caller. If `:"tlsv1.3"` is the only TLS version specified, `:secure_renegotiate`
will be removed from the options.
* `:server_name_indication` - defaults to specified destination hostname.
May be overridden by the caller.
* `:verify` - defaults to `:verify_peer`. May be overridden by the caller.
* `:verify_fun` - unless a custom `:verify_fun` is specified, or `:verify`
is set to `:verify_none`, Mint will enable hostname verification with
support for wildcards in the server's 'SubjectAltName' extension, similar
to the behaviour implemented in
`:public_key.pkix_verify_hostname_match_fun(:https)` in recent Erlang/OTP
releases. This improves compatibility with recently issued wildcard
certificates also on older Erlang/OTP releases.
* `:versions` - defaults to `[:"tlsv1.2"]` (TLS v1.2 only). May be
overridden by the caller.
### Supporting older cipher suites
By default only a small list of modern cipher suites is enabled, in compliance
with the HTTP/2 specification. Some servers, in particular HTTP/1 servers, may
not support any of these cipher suites, resulting in TLS handshake failures or
closed connections.
To select the default cipher suites of Erlang/OTP (including for example
AES-CBC), use the following `:transport_opts`:
# Erlang/OTP 20.3 or later:
transport_opts: [ciphers: :ssl.cipher_suites(:default, :"tlsv1.2")]
# Older versions:
transport_opts: [ciphers: :ssl.cipher_suites()]
Recent Erlang/OTP releases do not enable RSA key exchange by default, due to
known weaknesses. If necessary, you can build a cipher list with RSA exchange
and use it in `:transport_opts`:
ciphers =
:ssl.cipher_suites(:all, :"tlsv1.2")
|> :ssl.filter_cipher_suites(
key_exchange: &(&1 == :rsa),
cipher: &(&1 in [:aes_256_gcm, :aes_128_gcm, :aes_256_cbc, :aes_128_cbc])
)
|> :ssl.append_cipher_suites(:ssl.cipher_suites(:default, :"tlsv1.2"))
## Examples
{:ok, conn} = Mint.HTTP.connect(:http, "httpbin.org", 80)
Using a proxy:
proxy = {:http, "myproxy.example.com", 80, []}
{:ok, conn} = Mint.HTTP.connect(:https, "httpbin.org", 443, proxy: proxy)
Forcing the connection to be an HTTP/2 connection:
{:ok, conn} = Mint.HTTP.connect(:https, "http2.golang.org", 443, protocols: [:http2])
Enable all default cipher suites of Erlang/OTP (release 20.3 or later):
opts = [transport_opts: [ciphers: :ssl.cipher_suites(:default, :"tlsv1.2")]]
{:ok, conn} = Mint.HTTP.connect(:https, "httpbin.org", 443, opts)
"""
@spec connect(Types.scheme(), Types.address(), :inet.port_number(), keyword()) ::
{:ok, t()} | {:error, Types.error()}
def connect(scheme, address, port, opts \\ []) do
case Keyword.fetch(opts, :proxy) do
{:ok, {proxy_scheme, proxy_address, proxy_port, proxy_opts}} ->
case scheme_to_transport(scheme) do
Transport.TCP ->
proxy = {proxy_scheme, proxy_address, proxy_port}
host = {scheme, address, port}
opts = Keyword.merge(opts, proxy_opts)
UnsafeProxy.connect(proxy, host, opts)
Transport.SSL ->
proxy = {proxy_scheme, proxy_address, proxy_port, proxy_opts}
host = {scheme, address, port, opts}
TunnelProxy.connect(proxy, host)
end
:error ->
Mint.Negotiate.connect(scheme, address, port, opts)
end
end
@doc false
@spec upgrade(
module(),
Mint.Types.socket(),
Types.scheme(),
String.t(),
:inet.port_number(),
keyword()
) :: {:ok, t()} | {:error, Types.error()}
def upgrade(old_transport, transport_state, scheme, hostname, port, opts),
do: Mint.Negotiate.upgrade(old_transport, transport_state, scheme, hostname, port, opts)
@doc """
Returns the protocol used by the current connection.
## Examples
iex> Mint.HTTP.protocol(%Mint.HTTP1{})
:http1
iex> Mint.HTTP.protocol(%Mint.HTTP2{})
:http2
"""
if Version.compare(System.version(), "1.7.0") in [:eq, :gt] do
@doc since: "1.4.0"
end
@spec protocol(t()) :: :http1 | :http2
def protocol(%Mint.HTTP1{}), do: :http1
def protocol(%Mint.HTTP2{}), do: :http2
def protocol(%Mint.UnsafeProxy{state: internal_conn}), do: protocol(internal_conn)
@doc false
@impl true
@spec initiate(
module(),
Types.socket(),
String.t(),
:inet.port_number(),
keyword()
) :: {:ok, t()} | {:error, Types.error()}
def initiate(transport, transport_state, hostname, port, opts),
do: Mint.Negotiate.initiate(transport, transport_state, hostname, port, opts)
@doc """
Closes the given connection.
This function closes the socket wrapped by the given connection. Once the socket
is closed, the connection goes into the "closed" state and `open?/1` returns `false`.
You can throw away a closed connection.
Closing a connection does not guarantee that data that is in flight gets delivered
to the server.
Always returns `{:ok, conn}` where `conn` is the updated connection.
## Examples
{:ok, conn} = Mint.HTTP.close(conn)
"""
@impl true
@spec close(t()) :: {:ok, t()}
def close(conn), do: conn_module(conn).close(conn)
@doc """
Checks whether the connection is open.
This function returns `true` if the connection is open, `false` otherwise. It should
be used to check that a connection is open before sending requests or performing
operations that involve talking to the server.
The `type` argument can be used to tell whether the connection is closed only for reading,
only for writing, or for both. In HTTP/1, a closed connection is always closed for
both reading and writing. In HTTP/2, the connection can be closed only for writing but
not for reading, meaning that you cannot send any more data to the server but you can
still receive data from the server. See the "Closed connection" section in the module
documentation of `Mint.HTTP2`.
If a connection is not open for reading and writing, it has become useless and you should
get rid of it. If you still need a connection to the server, start a new connection
with `connect/4`.
## Examples
{:ok, conn} = Mint.HTTP.connect(:http, "httpbin.org", 80)
Mint.HTTP.open?(conn)
#=> true
"""
@impl true
@spec open?(t(), :read | :write | :read_write) :: boolean()
def open?(conn, type \\ :read_write), do: conn_module(conn).open?(conn, type)
@doc """
Sends a request to the connected server.
This function sends a new request to the server that `conn` is connected to.
`method` is a string representing the method for the request, such as `"GET"`
or `"POST"`. `path` is the path on the host to send the request to. `headers`
is a list of request headers in the form `{header_name, header_value}` with
`header_name` and `header_value` being strings. `body` can have one of three
values:
* `nil` - no body is sent with the request.
* iodata - the body to send for the request.
* `:stream` - when the value of the body is `:stream` the request
body can be streamed on the connection. See `stream_request_body/3`.
In HTTP/1, you can't open a request if the body of another request is
streaming.
If the request is sent correctly, this function returns `{:ok, conn, request_ref}`.
`conn` is an updated connection that should be stored over the old connection.
`request_ref` is a unique reference that can be used to match on responses for this
request that are returned by `stream/2`. See `stream/2` for more information.
If there's an error with sending the request, `{:error, conn, reason}` is returned.
`reason` is the cause of the error. `conn` is an updated connection. It's important
to store the returned connection over the old connection in case of errors too, because
the state of the connection might change when there are errors as well. An error when
sending a request **does not** necessarily mean that the connection is closed. Use
`open?/1` to verify that the connection is open.
Requests can be pipelined so the full response does not have to received
before the next request can be sent. It is up to users to verify that the
server supports pipelining and that the request is safe to pipeline.
In HTTP/1, you can't open a request if the body of another request is streaming.
See `Mint.HTTP1.request/5` for more information.
For a quick discussion on HTTP/2 streams and requests, see the `Mint.HTTP2` module and
`Mint.HTTP2.request/5`.
## The `content-length` header
If you don't set the `content-length` header and you send a body with the request (that
is, not `nil` and not `:stream`), then Mint will add a default `content-length` header
to your request. If you're using HTTP/2 and streaming the request, you may provide the
`content-length` header yourself. If you're using HTTP/1, Mint will do chunked
transfer-encoding when a content-length is not provided (see `Mint.HTTP1.request/5`).
## Examples
Mint.HTTP.request(conn, "GET", "/", _headers = [], _body = nil)
Mint.HTTP.request(conn, "POST", "/path", [{"content-type", "application/json"}], "{}")
"""
@impl true
@spec request(
t(),
method :: String.t(),
path :: String.t(),
Types.headers(),
body :: iodata() | nil | :stream
) ::
{:ok, t(), Types.request_ref()}
| {:error, t(), Types.error()}
def request(conn, method, path, headers, body),
do: conn_module(conn).request(conn, method, path, headers, body)
@doc """
Streams a chunk of the request body on the connection or signals the end of the body.
If a request is opened (through `request/5`) with the body as `:stream`, then the
body can be streamed through this function. The function takes a `conn`, a
`request_ref` returned by `request/5` to identify the request to stream the body for,
and a chunk of body to stream. The value of chunk can be:
* iodata - a chunk of iodata is transmitted to the server as part of the body
of the request. If the chunk is empty, in HTTP/1 it's a no-op, while in HTTP/2
a `DATA` frame will be sent.
* `:eof` - signals the end of the streaming of the request body for the given
request. Usually the server won't send any reply until this is sent.
* `{:eof, trailing_headers}` - sends **trailing headers** and signals the end
of the streaming of the request body for the given request. This behaves the
same way as `:eof` but first sends the trailing headers. See the "Trailing headers"
section below.
This function always returns an updated connection to be stored over the old connection.
For information about transfer encoding and content length in HTTP/1, see
`Mint.HTTP1.stream_request_body/3`.
## Trailing headers
HTTP trailing headers can be sent after the body of a request. The behaviour is slightly
different for HTTP/1 and HTTP/2.
In HTTP/1, trailing headers are only supported if the transfer encoding is set to
`chunked`. See `Mint.HTTP1.stream_request_body/3` for more information on chunked
transfer encoding.
In HTTP/2, trailing headers behave like normal headers. You don't need to care
about the transfer encoding.
### The `trailer` header
As specified in [section 4.4 of RFC 7230](https://tools.ietf.org/html/rfc7230#section-4.4),
in HTTP/1 you need to specify which headers you're going to send as trailing
headers using the `trailer` header. The `trailer` header applies to both HTTP/1
and HTTP/2. See the examples below for more information.
### The `te` header
As specified in [section 4.3 of RFC 7230](https://tools.ietf.org/html/rfc7230#section-4.3),
the `te` (or `TE`) header is used to specify which transfer-encodings the client
is willing to accept (besides `chunked`). Mint supports decoding of trailing headers,
but if you want to notify the server that you are accepting trailing headers,
use the `trailers` value in the `te` header. For example:
Mint.HTTP.request(conn, "GET", "/", [{"te", "trailers"}], "some body")
Note that the `te` header can also be used to communicate which encodings you
support to the server.
## Examples
Let's see an example of streaming an empty JSON object (`{}`) by streaming one curly
brace at a time.
headers = [{"content-type", "application/json"}, {"content-length", "2"}]
{:ok, conn, request_ref} = Mint.HTTP.request(conn, "POST", "/", headers, :stream)
{:ok, conn} = Mint.HTTP.stream_request_body(conn, request_ref, "{")
{:ok, conn} = Mint.HTTP.stream_request_body(conn, request_ref, "}")
{:ok, conn} = Mint.HTTP.stream_request_body(conn, request_ref, :eof)
Here's an example of sending trailing headers:
headers = [{"content-type", "application/json"}, {"trailer", "my-trailer, x-expires"}]
{:ok, conn, request_ref} = Mint.HTTP.request(conn, "POST", "/", headers, :stream)
{:ok, conn} = Mint.HTTP.stream_request_body(conn, request_ref, "{}")
trailing_headers = [{"my-trailer", "xxx"}, {"x-expires", "10 days"}]
{:ok, conn} = Mint.HTTP.stream_request_body(conn, request_ref, {:eof, trailing_headers})
"""
@impl true
@spec stream_request_body(
t(),
Types.request_ref(),
iodata() | :eof | {:eof, trailing_headers :: Types.headers()}
) ::
{:ok, t()} | {:error, t(), Types.error()}
def stream_request_body(conn, ref, body),
do: conn_module(conn).stream_request_body(conn, ref, body)
@doc """
Streams the next batch of responses from the given message.
This function processes a "message" which can be any term, but should be
a message received by the process that owns the connection. **Processing**
a message means that this function will parse it and check if it's a message
that is directed to this connection, that is, a TCP/SSL message received on the
connection's socket. If it is, then this function will parse the message,
turn it into a list of responses, and possibly take action given the responses.
As an example of an action that this function could perform, if the server sends
a ping request this function will transparently take care of pinging the server back.
If there's no error, this function returns `{:ok, conn, responses}` where `conn` is
the updated connection and `responses` is a list of responses. See the "Responses"
section below. If there's an error, `{:error, conn, reason, responses}` is returned,
where `conn` is the updated connection, `reason` is the error reason, and `responses`
is a list of responses that were correctly parsed before the error.
If the given `message` is not from the connection's socket,
this function returns `:unknown`.
## Socket mode
Mint sets the socket in `active: :once` mode. This means that a single socket
message at a time is delivered to the process that owns the connection. After
a message is delivered, then no other messages are delivered (we say the socket
goes in *passive* mode). When `stream/2` is called to process the message that
was received, Mint sets the socket back to `active: :once`. This is good to know
in order to understand how the socket is handled by Mint, but in normal usage
it just means that you will process one message at a time with `stream/2` and not
pay too much attention to the socket mode.
Mint also supports passive mode to avoid receiving messages. See the "Mode" section
in the module documentation.
## Responses
Each possible response returned by this function is a tuple with two or more elements.
The first element is always an atom that identifies the kind of response. The second
element is a unique reference `t:Mint.Types.request_ref/0` that identifies the request
that the response belongs to. This is the term returned by `request/5`. After these
two elements, there can be response-specific terms as well, documented below.
These are the possible responses that can be returned.
* `{:status, request_ref, status_code}` - returned when the server replied
with a response status code. The status code is a non-negative integer.
* `{:headers, request_ref, headers}` - returned when the server replied
with a list of headers. Headers are in the form `{header_name, header_value}`
with `header_name` and `header_value` being strings. A single `:headers` response
will come after the `:status` response. A single `:headers` response may come
after all the `:data` responses if **trailing headers** are present.
* `{:data, request_ref, binary}` - returned when the server replied with
a chunk of response body (as a binary). The request shouldn't be considered done
when a piece of body is received because multiple chunks could be received. The
request is done when the `:done` response is returned.
* `{:done, request_ref}` - returned when the server signaled the request
as done. When this is received, the response body and headers can be considered
complete and it can be assumed that no more responses will be received for this
request. This means that for example, you can stop holding on to the request ref
for this request.
* `{:error, request_ref, reason}` - returned when there is an error that
only affects the request and not the whole connection. For example, if the
server sends bad data on a given request, that request will be closed and an error
for that request will be returned among the responses, but the connection will
remain alive and well.
* `{:pong, request_ref}` - returned when a server replies to a ping
request sent by the client. This response type is HTTP/2-specific
and will never be returned by an HTTP/1 connection. See `Mint.HTTP2.ping/2`
for more information.
* `{:push_promise, request_ref, promised_request_ref, headers}` - returned when
the server sends a server push to the client. This response type is HTTP/2 specific
and will never be returned by an HTTP/1 connection. See `Mint.HTTP2` for more
information on server pushes.
## Examples
Let's assume we have a function called `receive_next_and_stream/1` that takes
a connection and then receives the next message, calls `stream/2` with that message
as an argument, and then returns the result of `stream/2`:
defp receive_next_and_stream(conn) do
receive do
message -> Mint.HTTP.stream(conn, message)
end
end
Now, we can see an example of a workflow involving `stream/2`.
{:ok, conn, request_ref} = Mint.HTTP.request(conn, "GET", "/", _headers = [])
{:ok, conn, responses} = receive_next_and_stream(conn)
responses
#=> [{:status, ^request_ref, 200}]
{:ok, conn, responses} = receive_next_and_stream(conn)
responses
#=> [{:headers, ^request_ref, [{"Content-Type", "application/json"}]},
#=> {:data, ^request_ref, "{"}]
{:ok, conn, responses} = receive_next_and_stream(conn)
responses
#=> [{:data, ^request_ref, "}"}, {:done, ^request_ref}]
"""
@impl true
@spec stream(t(), term()) ::
{:ok, t(), [Types.response()]}
| {:error, t(), Types.error(), [Types.response()]}
| :unknown
def stream(conn, message), do: conn_module(conn).stream(conn, message)
@doc """
Returns the number of open requests.
Open requests are requests that have not yet received a `:done` response.
This function returns the number of open requests for both HTTP/1 and HTTP/2,
but for HTTP/2 only client-initiated requests are considered as open requests.
See `Mint.HTTP2.open_request_count/1` for more information.
## Examples
{:ok, conn, _ref} = Mint.HTTP.request(conn, "GET", "/", [])
Mint.HTTP.open_request_count(conn)
#=> 1
"""
@impl true
@spec open_request_count(t()) :: non_neg_integer()
def open_request_count(conn), do: conn_module(conn).open_request_count(conn)
@doc """
Receives data from the socket in a blocking way.
By default Mint operates in active mode, meaning that messages are delivered
to the process that started the connection. However, Mint also supports passive
mode (see the "Mode" section in the module documentation).
In passive mode, you'll need to manually get bytes out of the socket. You can
do that with this function.
`byte_count` is the number of bytes you want out of the socket. If `byte_count`
is `0`, all available bytes will be returned.
`timeout` is the maximum time to wait before returning an error.
This function will raise an error if the socket is in active mode.
## Examples
{:ok, conn, responses} = Mint.HTTP.recv(conn, 0, 5000)
"""
@impl true
@spec recv(t(), non_neg_integer(), timeout()) ::
{:ok, t(), [Types.response()]}
| {:error, t(), Types.error(), [Types.response()]}
def recv(conn, byte_count, timeout), do: conn_module(conn).recv(conn, byte_count, timeout)
@doc """
Changes the mode of the underlying socket.
To use the connection in *active mode*, where the process that started the
connection receives socket messages, set the mode to `:active` (see also `stream/2`).
To use the connection in *passive mode*, where you need to manually receive data
from the socket, set the mode to `:passive` (see also `recv/3`).
The mode can also be controlled at connection time by the `:mode` option passed
to `connect/4`.
Note that if you're switching from active to passive mode, you still might have
socket messages in the process mailbox that you need to consume before doing
any other operation on the connection.
See the "Mode" section in the module documentation for more information on modes.
## Examples
{:ok, conn} = Mint.HTTP.set_mode(conn, :passive)
"""
@impl true
@spec set_mode(t(), :active | :passive) :: {:ok, t()} | {:error, Types.error()}
def set_mode(conn, mode), do: conn_module(conn).set_mode(conn, mode)
@doc """
Changes the *controlling process* of the given connection to `new_pid`.
The **controlling process** is a concept that comes from the Erlang TCP and
SSL implementations. The controlling process of a connection is the process
that started the connection and that receives the messages for that connection.
You can change the controlling process of a connection through this function.
This function also takes care of "transferring" all the connection messages
that are in the mailbox of the current controlling process to the new
controlling process.
Remember that the connection is a data structure, so if you
change the controlling process it doesn't mean you "transferred" the
connection data structure itself to the other process, which you have
to do manually (for example by sending the connection data structure to the
new controlling process). If you do that, be careful of race conditions
and be sure to retrieve the connection in the new controlling process
before accepting connection messages in the new controlling process.
In fact, this function is guaranteed to return the connection unchanged,
so you are free to ignore the connection entry returned in `{:ok, conn}`.
## Examples
send(new_pid, {:conn, conn})
{:ok, conn} = Mint.HTTP.controlling_process(conn, new_pid)
# In the "new_pid" process
receive do
{:conn, conn} ->
# Will receive connection messages.
end
"""
@impl true
@spec controlling_process(t(), pid()) :: {:ok, t()} | {:error, Types.error()}
def controlling_process(conn, new_pid), do: conn_module(conn).controlling_process(conn, new_pid)
@doc """
Assigns a new private key and value in the connection.
This storage is meant to be used to associate metadata with the connection and
it can be useful when handling multiple connections.
The given `key` must be an atom, while the given `value` can be an arbitrary
term. The return value of this function is an updated connection.
See also `get_private/3` and `delete_private/2`.
## Examples
Let's see an example of putting a value and then getting it:
conn = Mint.HTTP.put_private(conn, :client_name, "Mint")
Mint.HTTP.get_private(conn, :client_name)
#=> "Mint"
"""
@impl true
@spec put_private(t(), atom(), term()) :: t()
def put_private(conn, key, value), do: conn_module(conn).put_private(conn, key, value)
@doc """
Gets a private value from the connection.
Retrieves a private value previously set with `put_private/3` from the connection.
`key` is the key under which the value to retrieve is stored. `default` is a default
value returned in case there's no value under the given key.
See also `put_private/3` and `delete_private/2`.
## Examples
conn = Mint.HTTP.put_private(conn, :client_name, "Mint")
Mint.HTTP.get_private(conn, :client_name)
#=> "Mint"
Mint.HTTP.get_private(conn, :non_existent)
#=> nil
"""
@impl true
@spec get_private(t(), atom(), term()) :: term()
def get_private(conn, key, default \\ nil),
do: conn_module(conn).get_private(conn, key, default)
@doc """
Deletes a value in the private store.
Deletes the private value stored under `key` in the connection. Returns the
updated connection.
See also `put_private/3` and `get_private/3`.
## Examples
conn = Mint.HTTP.put_private(conn, :client_name, "Mint")
Mint.HTTP.get_private(conn, :client_name)
#=> "Mint"
conn = Mint.HTTP.delete_private(conn, :client_name)
Mint.HTTP.get_private(conn, :client_name)
#=> nil
"""
@impl true
@spec delete_private(t(), atom()) :: t()
def delete_private(conn, key), do: conn_module(conn).delete_private(conn, key)
@doc """
Gets the socket associated with the connection.
Do not use the returned socket to change its internal state. Only read information from the socket.
For instance, use `:ssl.connection_information/2` to retrieve TLS-specific information from the
socket.
"""
@impl true
@spec get_socket(t()) :: Mint.Types.socket()
def get_socket(conn), do: conn_module(conn).get_socket(conn)
@doc """
Gets the proxy headers associated with the connection in the `CONNECT` method.
When using tunnel proxy and HTTPs, the only way to exchange data with
the proxy is through headers in the `CONNECT` method.
"""
if Version.compare(System.version(), "1.7.0") in [:eq, :gt] do
@doc since: "1.4.0"
end
@impl true
@spec get_proxy_headers(t()) :: Mint.Types.headers()
def get_proxy_headers(conn), do: conn_module(conn).get_proxy_headers(conn)
## Helpers
defp conn_module(%UnsafeProxy{}), do: UnsafeProxy
defp conn_module(%Mint.HTTP1{}), do: Mint.HTTP1
defp conn_module(%Mint.HTTP2{}), do: Mint.HTTP2
end
|
lib/mint/http.ex
| 0.925672
| 0.753784
|
http.ex
|
starcoder
|
defmodule ExploringElixir.Benchmark.Map do
def match do
%{dates: dates, atoms: atoms} = init_maps()
count = 1000
Benchee.run %{
"Function header matching" =>
fn -> function_headers(dates, atoms, count, count) end,
"Inline matching" =>
fn -> inline_match(dates, atoms, count, count) end,
"Record matching" =>
fn -> record_matching(perhaps(), count) end
}, formatters: [&Benchee.Formatters.HTML.output/1],
formatter_options: [html: [file: "benchmarks/map_match.html"]]
end
def function_headers(_dates, _atoms, garbage, 0) do
garbage
end
def function_headers(dates, atoms, _garbage, count) do
date = match(dates)
uuid = match(atoms)
function_headers(dates, atoms, {date, uuid}, count - 1)
end
def match(%{"2018-07-02 00:00:00Z": date}), do: date
def match(%{:to_uniq_entries => a, :comprehension_filter => b, :"Australia/Hobart" => c}), do: {a, b, c}
def match(%{:"MACRO-unquote" => uuid}), do: uuid
def match(%{imports_from_env: uuid}), do: uuid
def match(%{ctime: uuid}), do: uuid
def match(%{"2017-07-02 00:00:00Z": date}), do: date
def match(_), do: :not_found
def inline_match(_dates, _atoms, garbage, 0) do
garbage
end
def inline_match(dates, atoms, _garbage, count) do
%{today: date} = dates
%{:to_uniq_entries => a, :comprehension_filter => b, :"Australia/Hobart" => c} = atoms
inline_match(dates, atoms, {date, a, b, c}, count - 1)
end
def record_matching(_, 0), do: :ok
def record_matching({:ok, _x}, count), do: record_matching(perhaps(), count - 1)
def record_matching({:error, _x}, count), do: record_matching(perhaps(), count - 1)
def perhaps, do: perhaps(:rand.uniform(100))
def perhaps(x) when x > 50, do: {:ok, x}
def perhaps(x), do: {:error, x}
def init_maps() do
now = Timex.today
date_map =
Enum.reduce(1..(365*2), %{today: now},
fn(days, date_map) ->
then = Timex.shift(now, days: days)
Map.put(date_map, DateTime.to_string(Timex.to_datetime(then)), then)
end)
atom_map =
Enum.reduce(:all_atoms.all_atoms, %{},
fn(atom, atom_map) ->
Map.put(atom_map, atom, UUID.uuid4)
end)
%{
dates: date_map,
atoms: atom_map
}
end
end
|
lib/exploring_elixir/e002/benchmark_map.ex
| 0.598312
| 0.62385
|
benchmark_map.ex
|
starcoder
|
defmodule Exq.Manager.Server do
@moduledoc """
The Manager module is the main orchestrator for the system.
It is also the entry point Pid process used by the client to interact
with the Exq system.
It's responsibilities include:
* Handle interaction with client and delegate to responsible sub-system
* Initial Setup of Redis Connection (to be moved to supervisor?).
* Setup and tracking of in-progress workers / jobs.
* Poll Redis for new jobs for any queues that have available workers.
* Handling of queue state and subscriptions (addition and removal)
* Initial re-hydration of backup queue on system restart to handle any
orphan jobs from last system stop.
The Manager is a GenServer with a timed process loop.
## Options
* `:concurrency` - Default max number of workers to use if not passed in for each queue.
* `:genserver_timeout` - Timeout to use for GenServer calls.
* `:max_retries` - Maximum number of times to retry a failed job
* `:name` - Name of target registered process
* `:namespace` - Redis namespace to store all data under. Defaults to "exq".
* `:queues` - List of queues to monitor. Can be an array of queues names such as ["q1", "q2"], or
array of tuples with queue and max number of concurrent workers: [{"q1", 1}, {"q2", 20}].
If only an array is passed in, system will use the default `concurrency` value for each queue.
* `:redis_timeout` - Timeout to use for Redis commands.
* `:poll_timeout` - How often to poll Redis for jobs.
* `:scheduler_enable` - Whether scheduler / retry process should be enabled. This defaults
to true. Note that is you turn this off, job retries will not be enqueued.
* `:scheduler_poll_timeout` - How often to poll Redis for scheduled / retry jobs.
* `:shutdown_timeout` - The number of milliseconds to wait for workers to finish processing jobs
when the application is shutting down
## Redis Options (TODO - move to supervisor after refactor):
* `:host` - Host name for Redis server (defaults to '127.0.0.1')
* `:port` - Redis port (defaults to 6379)
* `:database` - Redis Database number (used for isolation. Defaults to 0).
* `:password` - Redis authentication password (optional, off by default).
* `:redis_options` - Additional options provided to Redix
* TODO: What about max_reconnection_attempts
## Job lifecycle
The job lifecycle starts with an enqueue of a job. This can be done either
via Exq or another system like Sidekiq / Resque.
Note that the JobQueue encapsulates much of this logic.
Client (Exq) -> Manager -> Enqueuer
Assuming Exq is used to Enqueue an immediate job, the following is the flow:
1. Client calls Exq.enqueue(Exq, "queue_name", Worker, ["arg1", "arg2"])
2. Manager delegates to Enqueuer
3. Enqueuer does the following:
* Adds the queue to the "queues" list if not already there.
* Prepare a job struct with a generated UUID and convert to JSON.
* Push the job into the correct queue
* Respond to client with the generated job UUID.
At this point the job is in the correct queue ready to be dequeued.
Manager deq Redis -> Worker (decode & execute job) --> Manager (record)
|
--> Stats (record stats)
The dequeueing of the job is as follows:
1. The Manager is on a polling cycle, and the :timeout message fires.
2. Manager tabulates a list of active queues with available workers.
3. Uses the JobQueue module to fetch jobs. The JobQueue module does this through
a single MULT RPOPLPUSH command issued to Redis with the targeted queue.
This command atomically pops an item off the queue and stores the item in a backup queue.
The backup queue is keyed off the queue and node id, so each node would
have their own backup queue.
Note that we cannot use a blocking pop since BRPOPLPUSH (unlike BRPOP) is more
limited and can only handle a single queue target (see filed issues in Redis / Sidekiq).
4. Once the jobs are returned to the manager, the manager goes through each job
and creates and kicks off an ephemeral Worker process that will handle the job.
The manager also does some tabulation to reduce the worker count for those queues.
5. The worker parses the JSON object, and figures out the worker to call.
It also tells Stats to record a itself in process.
6. The worker then calls "apply" on the correct target module, and tracks the failure
or success of the job. Once the job is finished, it tells the Manager and Stats.
7. If the job is successful, Manager and Stats simply mark the success of the job.
If the job fails, the Worker module uses the JobQueue module to retry the job if necessary.
The retry is done by adding the job to a "retry" queue which is a Sorted Set in Redis.
The job is marked with the retry count and scheduled date (using exponential backup).
The job is then removed from the backup queue.
8. If any jobs were fetched from Redis, the Manager will poll again immediately, otherwise
if will use the poll_timeout for the next polling.
## Retry / Schedule queue
The retry / schedule queue provides functionality for scheduled jobs. This is used both
for the `enqueue_in` method which allows a scheduled job in the future, as well
as retry queue, which is used to retry jobs.
"""
require Logger
use GenServer
alias Exq.Support.Config
alias Exq.Support.Opts
alias Exq.Redis.JobQueue
alias Exq.Support.Redis
@backoff_mult 10
defmodule State do
defstruct redis: nil,
stats: nil,
enqueuer: nil,
pid: nil,
node_id: nil,
namespace: nil,
dequeuers: nil,
queues: nil,
poll_timeout: nil,
scheduler_poll_timeout: nil,
workers_sup: nil,
middleware: nil,
metadata: nil,
shutdown_timeout: nil
end
def start_link(opts \\ []) do
GenServer.start_link(__MODULE__, opts, name: server_name(opts[:name]))
end
def job_terminated(exq, queue, success) do
GenServer.cast(exq, {:job_terminated, queue, success})
:ok
end
def server_name(nil), do: Config.get(:name)
def server_name(name), do: name
## ===========================================================
## gen server callbacks
## ===========================================================
def init(opts) do
# Cleanup stale stats
GenServer.cast(self(), :cleanup_host_stats)
# Setup dequeues
dequeuers = add_dequeuers(%{}, opts[:concurrency])
state = %State{
dequeuers: dequeuers,
redis: opts[:redis],
stats: opts[:stats],
workers_sup: opts[:workers_sup],
enqueuer: opts[:enqueuer],
middleware: opts[:middleware],
metadata: opts[:metadata],
node_id: Config.node_identifier().node_id(),
namespace: opts[:namespace],
queues: opts[:queues],
pid: self(),
poll_timeout: opts[:poll_timeout],
scheduler_poll_timeout: opts[:scheduler_poll_timeout],
shutdown_timeout: opts[:shutdown_timeout]
}
check_redis_connection(opts)
{:ok, state, 0}
end
def handle_call(:redis, _from, state) do
{:reply, {state.redis, state.namespace}, state, 10}
end
def handle_call(:subscriptions, _from, state) do
{:reply, {:ok, state.queues}, state, 0}
end
def handle_call({:subscribe, queue}, _from, state) do
updated_state = add_queue(state, queue)
{:reply, :ok, updated_state, 0}
end
def handle_call({:subscribe, queue, concurrency}, _from, state) do
updated_state = add_queue(state, queue, concurrency)
{:reply, :ok, updated_state, 0}
end
def handle_call({:unsubscribe, queue}, _from, state) do
updated_state = remove_queue(state, queue)
{:reply, :ok, updated_state, 0}
end
def handle_call(:unsubscribe_all, _from, state) do
updated_state = remove_all_queues(state)
{:reply, :ok, updated_state, 0}
end
def handle_cast({:re_enqueue_backup, queue}, state) do
Redis.rescue_timeout(fn ->
JobQueue.re_enqueue_backup(state.redis, state.namespace, state.node_id, queue)
end)
{:noreply, state, 0}
end
@doc """
Cleanup host stats on boot
"""
def handle_cast(:cleanup_host_stats, state) do
Redis.rescue_timeout(fn ->
Exq.Stats.Server.cleanup_host_stats(state.stats, state.namespace, state.node_id)
end)
{:noreply, state, 0}
end
def handle_cast({:job_terminated, queue, success}, state) do
dequeuers =
if success do
maybe_call_dequeuer(state.dequeuers, queue, :processed)
else
maybe_call_dequeuer(state.dequeuers, queue, :failed)
end
{:noreply, %{state | dequeuers: dequeuers}, 0}
end
def handle_info(:timeout, state) do
{updated_state, timeout} = dequeue_and_dispatch(state)
{:noreply, updated_state, timeout}
end
def handle_info(_info, state) do
{:noreply, state, state.poll_timeout}
end
def terminate(_reason, _state) do
:ok
end
## ===========================================================
## Internal Functions
## ===========================================================
@doc """
Dequeue jobs and dispatch to workers
"""
def dequeue_and_dispatch(state) do
case available_queues(state) do
{[], state} ->
{state, state.poll_timeout}
{queues, state} ->
result =
Redis.rescue_timeout(
fn ->
Exq.Redis.JobQueue.dequeue(state.redis, state.namespace, state.node_id, queues)
end,
timeout_return_value: :timeout
)
case result do
:timeout ->
{state, state.poll_timeout}
jobs ->
{state, job_results} =
Enum.reduce(jobs, {state, []}, fn potential_job, {state, results} ->
{state, result} = dispatch_job(state, potential_job)
{state, [result | results]}
end)
cond do
Enum.any?(job_results, fn status -> elem(status, 1) == :dispatch end) ->
{state, 0}
Enum.any?(job_results, fn status -> elem(status, 0) == :error end) ->
Logger.error("Redis Error #{Kernel.inspect(job_results)}}. Backing off...")
{state, state.poll_timeout * @backoff_mult}
true ->
{state, state.poll_timeout}
end
end
end
end
@doc """
Returns list of active queues with free workers
"""
def available_queues(state) do
Enum.reduce(state.queues, {[], state}, fn q, {queues, state} ->
{available, dequeuers} =
Map.get_and_update!(state.dequeuers, q, fn {module, state} ->
{:ok, available, state} = module.available?(state)
{available, {module, state}}
end)
state = %{state | dequeuers: dequeuers}
if available do
{[q | queues], state}
else
{queues, state}
end
end)
end
@doc """
Dispatch job to worker if it is not empty
Also update worker count for dispatched job
"""
def dispatch_job(state, potential_job) do
case potential_job do
{:ok, {:none, _queue}} ->
{state, {:ok, :none}}
{:ok, {job, queue}} ->
state = dispatch_job(state, job, queue)
{state, {:ok, :dispatch}}
{status, reason} ->
{state, {:error, {status, reason}}}
end
end
def dispatch_job(state, job, queue) do
{:ok, worker} =
Exq.Worker.Supervisor.start_child(
state.workers_sup,
[
job,
state.pid,
queue,
state.stats,
state.namespace,
state.node_id,
state.redis,
state.middleware,
state.metadata
],
shutdown_timeout: state.shutdown_timeout
)
Exq.Worker.Server.work(worker)
%{state | dequeuers: maybe_call_dequeuer(state.dequeuers, queue, :dispatched)}
end
# Setup dequeuers from options / configs.
# The following is done:
# * Sets up queues data structure with proper concurrency settings
# * Sets up :ets table for tracking workers
# * Re-enqueues any in progress jobs that were not finished the queues
# * Returns list of queues and work table
# TODO: Refactor the way queues are setup
defp add_dequeuers(dequeuers, specs) do
Enum.into(specs, dequeuers, fn {queue, {module, opts}} ->
GenServer.cast(self(), {:re_enqueue_backup, queue})
{:ok, state} = module.init(%{queue: queue}, opts)
{queue, {module, state}}
end)
end
defp remove_dequeuers(dequeuers, queues) do
Enum.reduce(queues, dequeuers, fn queue, dequeuers ->
maybe_call_dequeuer(dequeuers, queue, :stop)
|> Map.delete(queue)
end)
end
defp maybe_call_dequeuer(dequeuers, queue, method) do
if Map.has_key?(dequeuers, queue) do
Map.update!(dequeuers, queue, fn {module, state} ->
case apply(module, method, [state]) do
{:ok, state} -> {module, state}
:ok -> {module, nil}
end
end)
else
dequeuers
end
end
defp add_queue(state, queue, concurrency \\ Config.get(:concurrency)) do
queue_concurrency = {queue, Opts.cast_concurrency(concurrency)}
%{
state
| queues: [queue | state.queues],
dequeuers: add_dequeuers(state.dequeuers, [queue_concurrency])
}
end
defp remove_queue(state, queue) do
updated_queues = List.delete(state.queues, queue)
%{state | queues: updated_queues, dequeuers: remove_dequeuers(state.dequeuers, [queue])}
end
defp remove_all_queues(state) do
%{state | queues: [], dequeuers: remove_dequeuers(state.dequeuers, state.queues)}
end
# Check Redis connection using PING and raise exception with
# user friendly error message if Redis is down.
defp check_redis_connection(opts) do
try do
{:ok, _} = Exq.Redis.Connection.q(opts[:redis], ~w(PING))
if Keyword.get(opts, :heartbeat_enable, false) do
:ok =
Exq.Redis.Heartbeat.register(
opts[:redis],
opts[:namespace],
Config.node_identifier().node_id()
)
else
:ok
end
catch
err, reason ->
opts = Exq.Support.Opts.redis_inspect_opts(opts)
raise """
\n\n\n#{String.duplicate("=", 100)}
ERROR! Could not connect to Redis!
Configuration passed in: #{opts}
Error: #{inspect(err)}
Reason: #{inspect(reason)}
Make sure Redis is running, and your configuration matches Redis settings.
#{String.duplicate("=", 100)}
"""
end
end
end
|
lib/exq/manager/server.ex
| 0.690142
| 0.694516
|
server.ex
|
starcoder
|
defmodule Mix.Tasks.Hex.Docs do
use Mix.Task
@shortdoc "Fetches or opens documentation of a package"
@moduledoc """
Fetches or opens documentation of a package.
If no version is specified, defaults to version used in the current mix project.
If called outside of a mix project or the dependency is not used in the
current mix project, defaults to the latest version.
## Fetch documentation for all dependencies in the current mix project
mix hex.docs fetch
## Fetch documentation for offline use
Fetches documentation for the specified package that you can later open with
`mix hex.docs offline`.
mix hex.docs fetch PACKAGE [VERSION]
## Open a browser window with offline documentation
mix hex.docs offline PACKAGE [VERSION]
## Open a browser window with online documentation
mix hex.docs online PACKAGE [VERSION]
## Command line options
* `--module Some.Module` - Open a specified module documentation page inside desired package
* `--organization ORGANIZATION` - Set this for private packages belonging to an organization
* `--latest` - Looks for the latest release of a package
* `--format epub` - When opening documentation offline, use this flag to open the epub formatted version
"""
@behaviour Hex.Mix.TaskDescription
@elixir_apps ~w(eex elixir ex_unit iex logger mix)
@switches [module: :string, organization: :string, latest: :boolean, format: :string]
@impl true
def run(args) do
Hex.start()
{opts, args} = Hex.OptionParser.parse!(args, strict: @switches)
opts = Keyword.put(opts, :mix_project, !!Mix.Project.get())
case args do
["fetch" | remaining] ->
fetch_docs(remaining, opts)
["online" | remaining] ->
open_docs(remaining, opts)
["offline" | remaining] ->
open_docs_offline(remaining, opts)
_ ->
Mix.raise("""
Invalid arguments, expected one of:
mix hex.docs fetch
mix hex.docs fetch PACKAGE [VERSION]
mix hex.docs offline PACKAGE [VERSION]
mix hex.docs online PACKAGE [VERSION]
""")
end
end
@impl true
def tasks() do
[
{"fetch PACKAGE [VERSION]", "Fetch documentation for offline use"},
{"offline PACKAGE [VERSION]", "Open a browser window with offline documentation"},
{"online PACKAGE [VERSION]", "Open a browser window with online documentation"}
]
end
defp fetch_docs([] = _args, opts) do
if !opts[:mix_project] do
Mix.raise(
"Specify a package name or run inside a Mix project " <>
"to fetch docs for all dependencies"
)
end
Enum.each(deps_in_lock(), fn package ->
fetch_docs([package.name, package.version], organization: package.repo)
end)
end
defp fetch_docs([name], opts) when name in @elixir_apps do
fetch_docs([name, System.version()], opts)
end
defp fetch_docs([name], opts) do
locked_or_latest_version = find_package_locked_or_latest_version(name, opts)
fetch_docs([name, locked_or_latest_version], opts)
end
defp fetch_docs([name, version], opts) do
target_dir = Path.join([docs_dir(), org_to_path(opts[:organization]), name, version])
fallback_dir = Path.join([docs_dir(), name, version])
cond do
File.exists?(target_dir) ->
Hex.Shell.info("Docs already fetched: #{target_dir}")
File.exists?(fallback_dir) ->
Hex.Shell.info("Docs already fetched: #{fallback_dir}")
true ->
target = Path.join(target_dir, "#{name}-#{version}.tar.gz")
success? = download_docs(opts[:organization], name, version, target)
if success? do
extract_docs(target, target_dir)
end
end
end
defp find_package_locked_or_latest_version(name, opts) do
package_in_lock = package_in_lock(name)
if opts[:mix_project] && !opts[:latest] && package_in_lock do
package_in_lock.version
else
find_package_latest_version(opts[:organization], name)
end
end
defp find_package_latest_version(organization, name) do
%{"releases" => releases} = retrieve_package_info(organization, name)
sorted_versions =
Enum.sort(releases, &(Hex.Version.compare(&1["version"], &2["version"]) == :gt))
if Enum.all?(sorted_versions, &pre_release?/1) do
sorted_versions
|> List.first()
|> Map.get("version")
else
sorted_versions
|> Enum.reject(&pre_release?/1)
|> List.first()
|> Map.get("version")
end
end
defp retrieve_package_info(organization, name) do
auth = if organization, do: Mix.Tasks.Hex.auth_info(:read)
case Hex.API.Package.get(organization, name, auth) do
{:ok, {code, body, _}} when code in 200..299 ->
body
{:ok, {404, _, _}} ->
Mix.raise("No package with name #{name}")
other ->
Hex.Shell.error("Failed to retrieve package information")
Hex.Utils.print_error_result(other)
end
end
defp open_docs([] = _args, _opts) do
Mix.raise("You must specify the name of a package")
end
defp open_docs([name], opts) do
package_in_lock = package_in_lock(name)
if opts[:mix_project] && !opts[:latest] && package_in_lock do
version = package_in_lock.version
open_docs([name, version], opts)
else
open_latest_docs([name], opts)
end
end
defp open_docs([name, version], opts) do
get_docs_url([name, version], opts)
|> browser_open()
end
defp open_latest_docs(args, opts) do
args
|> get_docs_url(opts)
|> browser_open()
end
defp open_docs_offline([] = _args, _opts) do
Mix.raise("You must specify the name of a package")
end
defp open_docs_offline([name], opts) do
package_in_lock = package_in_lock(name)
if opts[:mix_project] && !opts[:latest] && package_in_lock do
latest_version = package_in_lock.version
open_docs_offline([name, latest_version], opts)
else
open_latest_docs_offline(name, opts)
end
end
defp open_docs_offline([name, version], opts) do
docs_location = docs_location(opts[:organization], name, version, opts)
if docs_location do
open_file(docs_location)
else
fetch_docs([name, version], opts)
docs_location = docs_location(opts[:organization], name, version, opts)
if docs_location do
open_file(docs_location)
end
end
end
defp open_latest_docs_offline(name, opts) do
latest_version = find_package_version(opts[:organization], name)
if latest_version do
open_docs_offline([name, latest_version], opts)
else
fetch_docs([name], opts)
latest_version = find_package_version(opts[:organization], name)
if latest_version do
open_docs_offline([name, latest_version], opts)
end
end
end
defp docs_location(organization, name, version, opts) do
format = Keyword.get(opts, :format, "html")
module = Keyword.get(opts, :module, "index")
default_path = Path.join([docs_dir(), org_to_path(organization), name, version])
fallback_path = Path.join([docs_dir(), name, version])
case format do
"epub" -> epub_file_location(default_path, fallback_path, organization)
"html" -> html_file_location(default_path, fallback_path, module, organization)
end
end
defp html_file_location(default_path, fallback_path, module, organization) do
default_path = Path.join([default_path, module <> ".html"])
fallback_path = Path.join([fallback_path, module <> ".html"])
cond do
File.exists?(default_path) -> default_path
!organization && File.exists?(fallback_path) -> fallback_path
true -> nil
end
end
defp epub_file_location(default_path, fallback_path, organization) do
default_path = Path.wildcard(Path.join([default_path, "*.epub"]))
fallback_path = Path.wildcard(Path.join([fallback_path, "*.epub"]))
cond do
length(default_path) == 1 -> Enum.at(default_path, 0)
!organization && length(fallback_path) == 1 -> Enum.at(fallback_path, 0)
true -> Mix.raise("No documentation found in epub format.")
end
end
defp find_package_version(organization, name) do
default_path = Path.join([docs_dir(), org_to_path(organization), name])
fallback_path = Path.join([docs_dir(), name])
cond do
File.exists?(default_path) -> find_latest_version(default_path)
!organization && File.exists?(fallback_path) -> find_latest_version(fallback_path)
true -> nil
end
end
defp get_docs_url([name], opts) do
if module = opts[:module] do
Hex.Utils.hexdocs_module_url(opts[:organization], name, module)
else
Hex.Utils.hexdocs_url(opts[:organization], name)
end
end
defp get_docs_url([name, version], opts) do
if module = opts[:module] do
Hex.Utils.hexdocs_module_url(opts[:organization], name, version, module)
else
Hex.Utils.hexdocs_url(opts[:organization], name, version)
end
end
defp browser_open(path) do
path
|> open_cmd()
|> system_cmd()
end
defp open_cmd(path) do
case :os.type() do
{:win32, _} -> {"cmd", ["/c", "start", path]}
{:unix, :darwin} -> {"open", [path]}
{:unix, _} -> {"xdg-open", [path]}
end
end
if Mix.env() == :test do
defp system_cmd({cmd, args}) do
send(self(), {:hex_system_cmd, cmd, args})
end
else
defp system_cmd({cmd, args}) do
System.cmd(cmd, args)
end
end
defp open_file(path) do
unless path do
Mix.raise("Documentation not found")
end
unless File.exists?(path) do
Mix.raise("Documentation file not found: #{path}")
end
browser_open(path)
end
defp find_latest_version(path) do
sorted_versions =
path
|> File.ls!()
|> Enum.sort(&(Hex.Version.compare(&1, &2) == :gt))
if Enum.all?(sorted_versions, &pre_release?/1) do
List.first(sorted_versions)
else
sorted_versions
|> Enum.reject(&pre_release?/1)
|> List.first()
end
end
defp download_docs(organization, package, version, target) do
repo = org_to_repo(organization)
case Hex.Repo.get_docs(repo, package, version) do
{:ok, {200, body, _}} ->
File.mkdir_p!(Path.dirname(target))
File.write!(target, body)
true
_ ->
message = "Couldn't find docs for package with name #{package} or version #{version}"
Hex.Shell.error(message)
false
end
end
defp extract_docs(target, target_dir) do
File.mkdir_p!(target_dir)
fd = File.open!(target, [:read, :compressed])
:ok = :mix_hex_erl_tar.extract({:file, fd}, [:compressed, cwd: Path.dirname(target)])
Hex.Shell.info("Docs fetched: #{target_dir}")
end
defp docs_dir() do
Path.join(Hex.State.fetch!(:home), "docs")
end
defp package_in_lock(name) do
Enum.find(deps_in_lock(), &(&1.name == name))
end
defp deps_in_lock() do
Mix.Dep.Lock.read()
|> Enum.map(fn {_app, info} -> Hex.Utils.lock(info) end)
|> Enum.reject(&is_nil/1)
end
defp org_to_repo(organization) when organization in [nil, "hexpm"], do: "hexpm"
defp org_to_repo(organization), do: "hexpm:#{organization}"
defp org_to_path(organization) do
organization
|> org_to_repo()
|> Hex.Utils.windows_repo_path_fix()
end
defp pre_release?(%{"version" => version}), do: do_pre_release?(version)
defp pre_release?(version), do: do_pre_release?(version)
defp do_pre_release?(version) do
case Hex.Version.parse(version) do
{:ok, %Version{pre: []}} -> false
{:ok, %Version{}} -> true
_ -> false
end
end
end
|
lib/mix/tasks/hex.docs.ex
| 0.77552
| 0.408129
|
hex.docs.ex
|
starcoder
|
defmodule Phoenix.Tracker.Clock do
@moduledoc false
alias Phoenix.Tracker.State
@type context :: State.context()
@type clock :: {State.name(), context}
@doc """
Returns a list of replicas from a list of contexts.
"""
@spec clockset_replicas([clock]) :: [State.name()]
def clockset_replicas(clockset) do
for {replica, _} <- clockset, do: replica
end
@doc """
Adds a replicas context to a clockset, keeping only dominate contexts.
"""
@spec append_clock([clock], clock) :: [clock]
def append_clock(clockset, {_, clock}) when map_size(clock) == 0, do: clockset
def append_clock(clockset, {node, clock}) do
big_clock = combine_clocks(clockset)
cond do
dominates?(clock, big_clock) -> [{node, clock}]
dominates?(big_clock, clock) -> clockset
true -> filter_clocks(clockset, {node, clock})
end
end
@doc """
Checks if one clock causally dominates the other for all replicas.
"""
@spec dominates?(context, context) :: boolean
def dominates?(c1, c2) when map_size(c1) < map_size(c2), do: false
def dominates?(c1, c2) do
Enum.reduce_while(c2, true, fn {replica, clock}, true ->
if Map.get(c1, replica, 0) >= clock do
{:cont, true}
else
{:halt, false}
end
end)
end
@doc """
Checks if one clock causally dominates the other for their shared replicas.
"""
def dominates_or_equal?(c1, c2) when c1 == %{} and c2 == %{}, do: true
def dominates_or_equal?(c1, _c2) when c1 == %{}, do: false
def dominates_or_equal?(c1, c2) do
Enum.reduce_while(c1, true, fn {replica, clock}, true ->
if clock >= Map.get(c2, replica, 0) do
{:cont, true}
else
{:halt, false}
end
end)
end
@doc """
Returns the upper bound causal context of two clocks.
"""
def upperbound(c1, c2) do
Map.merge(c1, c2, fn _, v1, v2 -> max(v1, v2) end)
end
@doc """
Returns the lower bound causal context of two clocks.
"""
def lowerbound(c1, c2) do
Map.merge(c1, c2, fn _, v1, v2 -> min(v1, v2) end)
end
@doc """
Returns the clock with just provided replicas.
"""
def filter_replicas(c, replicas), do: Map.take(c, replicas)
@doc """
Returns replicas from the given clock.
"""
def replicas(c), do: Map.keys(c)
defp filter_clocks(clockset, {node, clock}) do
clockset
|> Enum.reduce({[], false}, fn {node2, clock2}, {set, insert} ->
if dominates?(clock, clock2) do
{set, true}
else
{[{node2, clock2} | set], insert || !dominates?(clock2, clock)}
end
end)
|> case do
{new_clockset, true} -> [{node, clock} | new_clockset]
{new_clockset, false} -> new_clockset
end
end
defp combine_clocks(clockset) do
clockset
|> Enum.map(fn {_, clocks} -> clocks end)
|> Enum.reduce(%{}, &upperbound(&1, &2))
end
end
|
lib/phoenix/tracker/clock.ex
| 0.886506
| 0.424531
|
clock.ex
|
starcoder
|
defmodule Mastermind.Strategy do
@moduledoc """
Break the code using Knuth's Algorithm
"""
alias Mastermind.Game
alias Mastermind.Core
@colours [:red, :blue, :green, :black, :orange, :yellow]
@possible_scores %{
{0,0} => 0,
{0,1} => 0,
{0,2} => 0,
{0,3} => 0,
{0,4} => 0,
{1,0} => 0,
{1,1} => 0,
{1,2} => 0,
{1,3} => 0,
{2,0} => 0,
{2,1} => 0,
{2,2} => 0,
{3,0} => 0,
{4,0} => 0,
}
@max_moves 12
@doc """
Solve new game
"""
def break_code(%{guesses: []} = game) do
g = Game.guess(game, [:red, :red, :green, :green])
loop_while(g, guess_combinations(), Game.won?(g), @max_moves)
end
@doc """
Solve partially completed game
"""
def break_code(game) do
loop_while(game, guess_combinations(), Game.won?(game), @max_moves)
end
def guess_combinations() do
for w <- @colours,
x <- @colours,
y <- @colours,
z <- @colours, do: [w,x,y,z]
end
defp loop_while(game, _matches, true = _won, _) do
{:won, game}
end
defp loop_while(game, _matches, false = _won, 1) do
{:lost, game}
end
defp loop_while(game, matches, false = _won, moves_left) do
matches = guess_matches(game, matches)
next_guess = next_guess(matches)
game = Game.guess(game, next_guess)
loop_while(game, matches, Game.won?(game), moves_left - 1)
end
defp guess_matches(game, matches) do
Enum.filter(matches, &(Core.score(Game.current_guess?(game), &1) == Game.score?(game)))
end
def next_guess(match) do
next_guesses = min_max(match)
next_guess = MapSet.intersection(MapSet.new(match), MapSet.new(next_guesses)) |> MapSet.to_list()
if Enum.empty?(next_guess) do
next_guesses
else
next_guess
end
|> List.first()
end
def min_max(matches) do
scores = guess_combinations() |> Enum.map(fn combination ->
{_max_score, max_count} = max_score(combination, matches)
{combination, max_count}
end)
{_, min} = Enum.min_by(scores, fn {_combination, max_count} -> max_count end)
scores
|> Enum.filter(fn {_combination, count} -> count == min end)
|> Enum.map(fn {combination, _count} -> combination end)
end
def max_score(combination, matches) do
Enum.reduce(matches, @possible_scores, fn x, acc ->
%{red: red, white: white} = Core.score(combination, x)
count = Map.get(acc, {red, white})
%{acc | {red, white} => count + 1}
end)
|> Enum.max_by(fn {_score, count} -> count end)
end
end
|
lib/strategy.ex
| 0.727975
| 0.571946
|
strategy.ex
|
starcoder
|
defmodule Tuple do
@moduledoc """
Functions for working with tuples.
Tuples are ordered collection of elements; tuples can contain elements of any
type, and a tuple can contain elements of different types. Curly braces can be
used to create tuples:
iex> {}
{}
iex> {1, :two, "three"}
{1, :two, "three"}
Tuples store elements contiguously in memory; this means that accessing a
tuple element by index (which can be done through the `Kernel.elem/2`
function) is a constant-time operation:
iex> tuple = {1, :two, "three"}
iex> elem(tuple, 0)
1
iex> elem(tuple, 2)
"three"
Same goes for getting the tuple size (via `Kernel.tuple_size/1`):
iex> tuple_size({})
0
iex> tuple_size({1, 2, 3})
3
Tuples being stored contiguously in memory also means that updating a tuple
(for example replacing an element with `Kernel.put_elem/3`) will make a copy
of the whole tuple.
Tuples are not meant to be used as a "collection" type (which is also
suggested by the absence of an implementation of the `Enumerable` protocol for
tuples): they're mostly meant to be used as a fixed-size container for
multiple elements. For example, tuples are often used to have functions return
"enriched" values: a common pattern is for functions to return `{:ok, value}`
for successful cases and `{:error, reason}` for unsuccessful cases. For
example, this is exactly what `File.read/1` does: it returns `{:ok, contents}`
if reading the given file is successful, or `{:error, reason}` otherwise
(e.g., `{:error, :enoent}` if the file doesn't exist).
This module provides functions to work with tuples; some more functions to
work with tuples can be found in `Kernel` (`Kernel.tuple_size/1`,
`Kernel.elem/2`, `Kernel.put_elem/3`, and others).
"""
@doc """
Creates a new tuple.
Creates a tuple of `size` containing the
given `data` at every position.
Inlined by the compiler.
## Examples
iex> Tuple.duplicate(:hello, 3)
{:hello, :hello, :hello}
"""
@spec duplicate(term, non_neg_integer) :: tuple
def duplicate(data, size) do
:erlang.make_tuple(size, data)
end
@doc """
Inserts an element into a tuple.
Inserts `value` into `tuple` at the given `index`.
Raises an `ArgumentError` if `index` is negative or greater than the
length of `tuple`. Index is zero-based.
Inlined by the compiler.
## Examples
iex> tuple = {:bar, :baz}
iex> Tuple.insert_at(tuple, 0, :foo)
{:foo, :bar, :baz}
iex> Tuple.insert_at(tuple, 2, :bong)
{:bar, :baz, :bong}
"""
@spec insert_at(tuple, non_neg_integer, term) :: tuple
def insert_at(tuple, index, value) do
:erlang.insert_element(index + 1, tuple, value)
end
@doc """
Inserts an element at the end of a tuple.
Returns a new tuple with the element appended at the end, and contains
the elements in `tuple` followed by `value` as the last element.
Inlined by the compiler.
## Examples
iex> tuple = {:foo, :bar}
iex> Tuple.append(tuple, :baz)
{:foo, :bar, :baz}
"""
@spec append(tuple, term) :: tuple
def append(tuple, value) do
:erlang.append_element(tuple, value)
end
@doc """
Removes an element from a tuple.
Deletes the element at the given `index` from `tuple`.
Raises an `ArgumentError` if `index` is negative or greater than
or equal to the length of `tuple`. Index is zero-based.
Inlined by the compiler.
## Examples
iex> tuple = {:foo, :bar, :baz}
iex> Tuple.delete_at(tuple, 0)
{:bar, :baz}
"""
@spec delete_at(tuple, non_neg_integer) :: tuple
def delete_at(tuple, index) do
:erlang.delete_element(index + 1, tuple)
end
@doc """
Converts a tuple to a list.
Returns a new list with all the tuple elements.
Inlined by the compiler.
## Examples
iex> tuple = {:foo, :bar, :baz}
iex> Tuple.to_list(tuple)
[:foo, :bar, :baz]
"""
@spec to_list(tuple) :: list
def to_list(tuple) do
:erlang.tuple_to_list(tuple)
end
end
|
lib/elixir/lib/tuple.ex
| 0.878978
| 0.73077
|
tuple.ex
|
starcoder
|
defmodule Vessel.Reducer do
@moduledoc """
This module contains the implementation of the Reducer behaviour for Vessel.
We implement a Pipe and simply group keys to their values on the fly and pass
them through in batches to the `reduce/3` implementation. We keep the order of
values received just to make sure we're consistent with the Hadoop Streaming
interface (so we don't have to document any differences).
You can store state by using `Vessel.put_private/3` and returning the Vessel
context at any point in the lifecycle. You can use `Vessel.get_private/3` or
matching in order to retrieve values - but do not modify any other root fields
inside the Vessel context as this is where job state is tracked. If you do not
return a Vessel context, it will ignore the return value and remain unchanged.
"""
alias Vessel.IO, as: Vio
@doc """
Invoked prior to any values being read from the stream.
This allows for setup and initialization within your Reducer. This is where you
should start any dependencies, or construct any variables. If you need to store
your variables for later, you should make use of `Vessel.put_private/3` and
make sure that you return the modified context.
If you don't return a valid context, the reducer phase will execute with the
default context (so always ensure you're explicitly returning it just to be
safe).
"""
@callback setup(ctx :: Vessel.t) :: Vessel.t | any
@doc """
Invoked once for every set of values against a key.
The first argument is the key, and the second value is a list of values. Both
types here will be Strings due to the nature of Hadoop Streaming, which means
you may have to parse these values appropriately. If you write a 5 from your
Mapper, it will be received as a "5" in your Reducer and need to be converted.
This is due to Hadoop Streaming passing everything via stdio. It may be that
this changes in a future version of Vessel, if possible.
The final argument is the Vessel context. This is passed through when calling
functions like `Vessel.write/3` in order to write values to the Job context.
This context is purely an application-level construct for Vessel to work with,
it does not represent the Hadoop Job Context (as there's no way to do so in
Hadoop Streaming).
If you wish to write any values, you must do so by calling `Vessel.write/3`,
which writes your value to the intermediate stream. You can write as many as
you wish within one call to `reduce/3`, in case your logic needs to generate
many records.
The return value of this function is ignored unless it is a Vessel context
which has been modified using `Vessel.put_private/3`, in which case it is kept
to be used as the context going forward.
"""
@callback reduce(key :: binary, value :: [ binary ], ctx :: Vessel.t) :: Vessel.t | any
@doc """
Invoked after all values have been read from the stream.
Basically the counterpart to the `setup/1` callback, in order to allow you to
clean up any temporary files you may have written, or close any connections,
etc.
The returned context here will be the final context, but it's highly unlikely
you'll need to modify the context at this point.
"""
@callback cleanup(ctx :: Vessel.t) :: Vessel.t | any
@doc false
defmacro __using__(_) do
quote location: :keep do
# inherit piping
use Vessel.Pipe
# inherit Reducer behaviour
@behaviour Vessel.Reducer
@doc false
def reduce(key, value, ctx) do
Vessel.write(ctx, { key, value })
end
@doc false
def handle_start(ctx) do
input = Vessel.get_conf(ctx, "stream.reduce.input.field.separator", "\t")
output = Vessel.get_conf(ctx, "stream.reduce.output.field.separator", "\t")
ctx
|> Vessel.put_meta(:separators, { input, output })
|> Vessel.put_meta(:group, nil)
|> super
end
@doc false
def handle_line(line, %{ meta: %{ separators: { input, _ } } } = ctx) do
new_ctx =
line
|> Vio.split(input, 1)
|> group_pair(ctx)
super(line, new_ctx)
end
@doc false
def handle_end(ctx) do
ctx
|> reduce_detect
|> Vessel.put_meta(:group, nil)
|> super
end
# This function handles the converted key/value pair coming from the prior
# call to `Vessel.IO.split/3`. We match on the previous key and the new key
# to see if they belong in the same group; if they do, then we just add the
# new value to the buffer of values. If it's a new key, we fire a `reduce/3`
# call with the previous key and values and begin storing the new state.
defp group_pair({ key, val }, %{ meta: %{ group: { key, vals } } } = ctx),
do: update_group(ctx, key, [ val | vals ])
defp group_pair({ new_key, val }, %{ } = ctx) do
ctx
|> reduce_detect
|> update_group(new_key, [ val ])
end
# When we fire a reduction, we need to make sure that we have a valid group
# of values and a key before calling reduce. This is because the very first
# call to `reduce_detect/1` will not have a valid key/values pair due to no
# previous input being provided (a.k.a. the initial state). We return the
# Vessel context here just to make it more convenient to piepline our calls.
defp reduce_detect(%{ meta: %{ group: { key, values } } } = ctx) do
reversed = Enum.reverse(values)
key
|> reduce(reversed, ctx)
|> handle_return(ctx)
end
defp reduce_detect(ctx),
do: ctx
# Updates the stored key grouping inside our Vessel context by placing the
# provided key and values inside a Tuple and updating the struct's group.
defp update_group(ctx, key, values),
do: Vessel.put_meta(ctx, :group, { key, values })
# We allow overriding reduce (obviously)
defoverridable [ reduce: 3 ]
end
end
end
|
lib/vessel/reducer.ex
| 0.875807
| 0.600335
|
reducer.ex
|
starcoder
|
defmodule Plaid.Institutions do
@moduledoc """
Functions for Plaid `institutions` endpoint.
"""
import Plaid, only: [make_request_with_cred: 4, validate_cred: 1, validate_public_key: 1]
alias Plaid.Utils
@derive Jason.Encoder
defstruct institutions: [], request_id: nil, total: nil
@type t :: %__MODULE__{
institutions: [Plaid.Institutions.Institution.t()],
request_id: String.t(),
total: integer
}
@type params :: %{required(atom) => integer | String.t() | list | map}
@type config :: %{required(atom) => String.t()}
@endpoint :institutions
defmodule Institution do
@moduledoc """
Plaid Institution data structure.
"""
@derive Jason.Encoder
defstruct brand_name: nil,
brand_subheading: nil,
colors: nil,
credentials: [],
has_mfa: nil,
health_status: nil,
institution_id: nil,
legacy_institution_code: nil,
legacy_institution_type: nil,
link_health_status: nil,
logo: nil,
mfa: [],
mfa_code_type: nil,
name: nil,
name_break: nil,
portal: nil,
products: [],
request_id: nil,
url: nil,
url_account_locked: nil,
url_account_setup: nil,
url_forgotten_password: nil
@type t :: %__MODULE__{
brand_name: String.t(),
brand_subheading: String.t(),
colors: Plaid.Institutions.Institution.Colors.t(),
credentials: [Plaid.Institutions.Institution.Credentials.t()],
has_mfa: false | true,
health_status: String.t(),
institution_id: String.t(),
legacy_institution_code: String.t(),
legacy_institution_type: String.t(),
link_health_status: String.t(),
logo: String.t(),
mfa: [String.t()],
mfa_code_type: String.t(),
name: String.t(),
name_break: String.t(),
portal: String.t(),
products: [String.t()],
request_id: String.t(),
url: String.t(),
url_account_locked: String.t(),
url_account_setup: String.t(),
url_forgotten_password: String.t()
}
defmodule Colors do
@moduledoc """
Plaid Institution Colors data structure.
"""
@derive Jason.Encoder
defstruct dark: nil, darker: nil, light: nil, primary: nil
@type t :: %__MODULE__{
dark: String.t(),
darker: String.t(),
light: String.t(),
primary: String.t()
}
end
defmodule Credentials do
@moduledoc """
Plaid Institution Credentials data structure.
"""
@derive Jason.Encoder
defstruct label: nil, name: nil, type: nil
@type t :: %__MODULE__{label: String.t(), name: String.t(), type: String.t()}
end
end
@doc """
Gets all institutions. Results paginated.
Parameters
```
%{count: 50, offset: 0}
```
"""
@spec get(params, config | nil) :: {:ok, Plaid.Institutions.t()} | {:error, Plaid.Error.t()}
def get(params, config \\ %{}) do
config = validate_cred(config)
endpoint = "#{@endpoint}/get"
make_request_with_cred(:post, endpoint, config, params)
|> Utils.handle_resp(@endpoint)
end
@doc """
Gets an institution by id.
Parameters
```
"ins_109512"
OR
%{institution_id: "ins_109512", options: %{include_optional_metadata: true, include_status: false}}
```
"""
@spec get_by_id(String.t() | params, config | nil) ::
{:ok, Plaid.Institutions.Institution.t()} | {:error, Plaid.Error.t()}
def get_by_id(params, config \\ %{}) do
config = validate_public_key(config)
params = if is_binary(params), do: %{institution_id: params}, else: params
endpoint = "#{@endpoint}/get_by_id"
make_request_with_cred(:post, endpoint, config, params)
|> Utils.handle_resp(:institution)
end
@doc """
Searches institutions by name and product.
Parameters
```
%{query: "Wells", products: ["transactions"], options: %{limit: 40, include_display_data: true}}
```
"""
@spec search(params, config | nil) :: {:ok, Plaid.Institutions.t()} | {:error, Plaid.Error.t()}
def search(params, config \\ %{}) do
config = validate_public_key(config)
endpoint = "#{@endpoint}/search"
make_request_with_cred(:post, endpoint, config, params)
|> Utils.handle_resp(@endpoint)
end
end
|
lib/plaid/institutions.ex
| 0.790166
| 0.630301
|
institutions.ex
|
starcoder
|
defmodule Waffle.Processor do
@moduledoc ~S"""
Apply transformation to files.
Waffle can be used to facilitate transformations of uploaded files
via any system executable. Some common operations you may want to
take on uploaded files include resizing an uploaded avatar with
ImageMagick or extracting a still image from a video with FFmpeg.
To transform an image, the definition module must define a
`transform/2` function which accepts a version atom and a tuple
consisting of the uploaded file and corresponding scope.
This transform handler accepts the version atom, as well as the
file/scope argument, and is responsible for returning one of the
following:
* `:noaction` - The original file will be stored as-is.
* `:skip` - Nothing will be stored for the provided version.
* `{executable, args}` - The `executable` will be called with
`System.cmd` with the format
`#{original_file_path} #{args} #{transformed_file_path}`.
* `{executable, fn(input, output) -> args end}` If your executable
expects arguments in a format other than the above, you may
supply a function to the conversion tuple which will be invoked
to generate the arguments. The arguments can be returned as a
string (e.g. – `" #{input} -strip -thumbnail 10x10 #{output}"`)
or a list (e.g. – `[input, "-strip", "-thumbnail", "10x10",
output]`) for even more control.
* `{executable, args, output_extension}` - If your transformation
changes the file extension (eg, converting to `png`), then the
new file extension must be explicit.
## ImageMagick transformations
As images are one of the most commonly uploaded filetypes, Waffle
has a recommended integration with ImageMagick's `convert` tool for
manipulation of images. Each definition module may specify as many
versions as desired, along with the corresponding transformation for
each version.
The expected return value of a `transform` function call must either
be `:noaction`, in which case the original file will be stored
as-is, `:skip`, in which case nothing will be stored, or `{:convert,
transformation}` in which the original file will be processed via
ImageMagick's `convert` tool with the corresponding transformation
parameters.
The following example stores the original file, as well as a squared
100x100 thumbnail version which is stripped of comments (eg, GPS
coordinates):
defmodule Avatar do
use Waffle.Definition
@versions [:original, :thumb]
def transform(:thumb, _) do
{:convert, "-strip -thumbnail 100x100^ -gravity center -extent 100x100"}
end
end
Other examples:
# Change the file extension through ImageMagick's `format` parameter:
{:convert, "-strip -thumbnail 100x100^ -gravity center -extent 100x100 -format png", :png}
# Take the first frame of a gif and process it into a square jpg:
{:convert, fn(input, output) -> "#{input}[0] -strip -thumbnail 100x100^ -gravity center -extent 100x100 -format jpg #{output}", :jpg}
For more information on defining your transformation, please consult
[ImageMagick's convert
documentation](http://www.imagemagick.org/script/convert.php).
> **Note**: Keep this transformation function simple and deterministic based on the version, file name, and scope object. The `transform` function is subsequently called during URL generation, and the transformation is scanned for the output file format. As such, if you conditionally format the image as a `png` or `jpg` depending on the time of day, you will be displeased with the result of Waffle's URL generation.
> **System Resources**: If you are accepting arbitrary uploads on a public site, it may be prudent to add system resource limits to prevent overloading your system resources from malicious or nefarious files. Since all processing is done directly in ImageMagick, you may pass in system resource restrictions through the [-limit](http://www.imagemagick.org/script/command-line-options.php#limit) flag. One such example might be: `-limit area 10MB -limit disk 100MB`.
## FFmpeg transformations
Common transformations of uploaded videos can be also defined
through your definition module:
# To take a thumbnail from a video:
{:ffmpeg, fn(input, output) -> "-i #{input} -f jpg #{output}" end, :jpg}
# To convert a video to an animated gif
{:ffmpeg, fn(input, output) -> "-i #{input} -f gif #{output}" end, :gif}
## Complex Transformations
`Waffle` requires the output of your transformation to be located at
a predetermined path. However, the transformation may be done
completely outside of `Waffle`. For fine-grained transformations,
you should create an executable wrapper in your $PATH (eg. bash
script) which takes these proper arguments, runs your
transformation, and then moves the file into the correct location.
For example, to use `soffice` to convert a doc to an html file, you
should place the following bash script in your $PATH:
#!/usr/bin/env sh
# `soffice` doesn't allow for output file path option, and waffle can't find the
# temporary file to process and copy. This script has a similar argument list as
# what waffle expects. See https://github.com/stavro/arc/issues/77.
set -e
set -o pipefail
function convert {
soffice \
--headless \
--convert-to html \
--outdir $TMPDIR \
"$1"
}
function filter_new_file_name {
awk -F$TMPDIR '{print $2}' \
| awk -F" " '{print $1}' \
| awk -F/ '{print $2}'
}
converted_file_name=$(convert "$1" | filter_new_file_name)
cp $TMPDIR/$converted_file_name "$2"
rm $TMPDIR/$converted_file_name
And perform the transformation as such:
def transform(:html, _) do
{:soffice_wrapper, fn(input, output) -> [input, output] end, :html}
end
"""
alias Waffle.Transformations.{Convert, CustomConvert}
def process(definition, version, {file, scope}) do
transform = definition.transform(version, {file, scope})
apply_transformation(file, transform)
end
defp apply_transformation(_, :skip), do: {:ok, nil}
defp apply_transformation(file, :noaction), do: {:ok, file}
# Deprecated
defp apply_transformation(file, {:noaction}), do: {:ok, file}
defp apply_transformation(file, {cmd, conversion}) do
Convert.apply(cmd, file, conversion)
end
defp apply_transformation(file, {:custom, executor, extension}) when is_function(executor) do
CustomConvert.apply(file, executor, extension)
end
defp apply_transformation(file, {cmd, conversion, extension}) do
Convert.apply(cmd, file, conversion, extension)
end
end
|
lib/waffle/processor.ex
| 0.87464
| 0.700972
|
processor.ex
|
starcoder
|
defmodule Sugar.Controller.Helpers do
@moduledoc """
All controller actions should have an arrity of 2, with the
first argument being a `Plug.Conn` representing the current
connection and the second argument being a `Keyword` list
of any parameters captured in the route path.
Sugar bundles these response helpers to assist in sending a
response:
* `render/4` - `conn`, `template_key`, `assigns`, `opts` - sends a normal
response.
* `halt!/2` - `conn`, `opts` - ends the response.
* `not_found/1` - `conn`, `message` - sends a 404 (Not found) response.
* `json/2` - `conn`, `data` - sends a normal response with
`data` encoded as JSON.
* `raw/1` - `conn` - sends response as-is. It is expected
that status codes, headers, body, etc have been set by
the controller action.
* `static/2` - `conn`, `file` - reads and renders a single static file.
#### Example
defmodule Hello do
use Sugar.Controller
def index(conn, []) do
render(conn, "showing index controller")
end
def show(conn, args) do
render(conn, "showing page \#{args[:id]}")
end
def create(conn, []) do
render(conn, "page created")
end
def get_json(conn, []) do
json(conn, [message: "foobar"])
end
end
"""
@type status_code :: 100..999
@type headers :: [{binary, binary}]
import Plug.Conn
@doc """
sets connection status
## Arguments
* `conn` - `Plug.Conn`
* `status_code` - `Integer`
## Returns
`Plug.Conn`
"""
@spec status(Plug.Conn.t, status_code) :: Plug.Conn.t
def status(conn, status_code) do
%Plug.Conn{conn | status: status_code, state: :set}
end
@doc """
sets response headers
## Arguments
* `conn` - `Plug.Conn`
* `status_code` - `List`
## Returns
`Plug.Conn`
"""
@spec headers(Plug.Conn.t, headers) :: Plug.Conn.t
def headers(conn, headers) do
%Plug.Conn{conn | resp_headers: headers, state: :set}
end
@doc """
reads and renders a single static file.
## Arguments
* `conn` - `Plug.Conn`
* `file` - `String`
## Returns
`Plug.Conn`
"""
@spec static(Plug.Conn.t, binary) :: Plug.Conn.t
def static(conn, file) do
filename = Path.join(["priv/static", file])
if File.exists?(filename) do
body = filename |> File.read!
conn
|> maybe_put_resp_content_type("text/html")
|> maybe_send_resp(200, body)
else
conn
|> not_found
end
end
@doc """
Sends a normal response with `data` encoded as JSON.
## Arguments
* `conn` - `Plug.Conn`
* `data` - `Keyword|List`
## Returns
`Plug.Conn`
"""
@spec json(Plug.Conn.t, Keyword.t | list, Keyword.t) :: Plug.Conn.t
def json(conn, data, opts \\ []) do
opts = [status: conn.status || 200] |> Keyword.merge(opts)
header = get_resp_header(conn, "content-type")
conn = if header == [] or not (header |> hd =~ "json") do
maybe_put_resp_content_type(conn, "application/json")
else
conn
end
conn
|> maybe_send_resp(opts[:status], Poison.encode! data)
end
@doc """
Sends response as-is. It is expected that status codes,
headers, body, etc have been set by the controller
action.
## Arguments
* `conn` - `Plug.Conn`
## Returns
`Plug.Conn`
"""
@spec raw(Plug.Conn.t) :: Plug.Conn.t
def raw(conn) do
conn |> send_resp
end
@doc """
Sends a normal response.
Automatically renders a template based on the
current controller and action names when no
template is passed.
## Arguments
* `conn` - `Plug.Conn`
* `template_key` - `String`
* `assigns` - `Keyword`
* `opts` - `Keyword`
## Returns
`Plug.Conn`
"""
@spec render(Plug.Conn.t, binary | Keyword.t | nil, Keyword.t, Keyword.t) :: Plug.Conn.t
def render(conn, template \\ nil, assigns \\ [], opts \\ [])
def render(conn, template, assigns, opts) when is_atom(template)
or is_binary(template) do
template = build_template_key(conn, template)
render_view(conn, template, assigns, opts)
end
def render(conn, assigns, opts, _) when is_list(assigns) do
template = build_template_key(conn)
render_view(conn, template, assigns, opts)
end
@doc """
Ends the response.
## Arguments
* `conn` - `Plug.Conn`
* `opts` - `Keyword`
## Returns
`Plug.Conn`
"""
@spec halt!(Plug.Conn.t, Keyword.t) :: Plug.Conn.t
def halt!(conn, opts \\ []) do
opts = [status: 401, message: ""] |> Keyword.merge(opts)
conn
|> maybe_send_resp(opts[:status], opts[:message])
end
@doc """
Sends a 404 (Not found) response.
## Arguments
* `conn` - `Plug.Conn`
## Returns
`Plug.Conn`
"""
@spec not_found(Plug.Conn.t, binary) :: Plug.Conn.t
def not_found(conn, message \\ "Not Found") do
conn
|> maybe_send_resp(404, message)
end
@doc """
Forwards the response to another controller action.
## Arguments
* `conn` - `Plug.Conn`
* `controller` - `Atom`
* `action` - `Atom`
* `args` - `Keyword`
## Returns
`Plug.Conn`
"""
@spec forward(Plug.Conn.t, atom, atom, Keyword.t) :: Plug.Conn.t
def forward(conn, controller, action, args \\ []) do
apply(controller, action, [conn, args])
end
@doc """
Redirects the response.
## Arguments
* `conn` - `Plug.Conn`
* `location` - `String`
* `opts` - `Keyword`
## Returns
`Plug.Conn`
"""
@spec redirect(Plug.Conn.t, binary, Keyword.t) :: Plug.Conn.t
def redirect(conn, location, opts \\ []) do
opts = [status: 302] |> Keyword.merge(opts)
conn
|> maybe_put_resp_header("location", location)
|> maybe_send_resp(opts[:status], "")
end
defp build_template_key(conn, template \\ nil) do
default = Map.get(conn.private, :action) || :index
template = template || default
controller = "#{Map.get(conn.private, :controller, "")}"
|> String.split(".")
|> List.last
|> String.downcase
"#{controller}/#{template}"
end
defp render_view(conn, template_key, assigns, opts) do
opts = [status: 200] |> Keyword.merge(opts)
header = get_resp_header(conn, "content-type")
conn = if header == [] or not (header |> hd =~ "json") do
maybe_put_resp_content_type(conn, opts[:content_type] || "text/html")
else
conn
end
html = Sugar.Config.get(:sugar, :views_dir, "lib/#{Mix.Project.config[:app]}/views")
|> Sugar.Views.Finder.one(template_key)
|> Sugar.Templates.render(assigns)
conn
|> maybe_send_resp(opts[:status], html)
end
defp maybe_put_resp_header(%Plug.Conn{state: :sent} = conn, _, _) do
conn
end
defp maybe_put_resp_header(conn, key, value) do
conn |> put_resp_header(key, value)
end
defp maybe_put_resp_content_type(%Plug.Conn{state: :sent} = conn, _) do
conn
end
defp maybe_put_resp_content_type(conn, resp_content_type) do
conn |> put_resp_content_type(resp_content_type)
end
defp maybe_send_resp(%Plug.Conn{state: :sent} = conn, _, _) do
conn
end
defp maybe_send_resp(conn, status, body) do
conn |> send_resp(status, body)
end
end
|
lib/sugar/controller/helpers.ex
| 0.890473
| 0.669806
|
helpers.ex
|
starcoder
|
defmodule AWS.SecurityHub do
@moduledoc """
Security Hub provides you with a comprehensive view of the security state of
your AWS environment and resources.
It also provides you with the readiness status of your environment based on
controls from supported security standards. Security Hub collects security data
from AWS accounts, services, and integrated third-party products and helps you
analyze security trends in your environment to identify the highest priority
security issues. For more information about Security Hub, see the * [AWS Security Hub User
Guide](https://docs.aws.amazon.com/securityhub/latest/userguide/what-is-securityhub.html)
*.
When you use operations in the Security Hub API, the requests are executed only
in the AWS Region that is currently active or in the specific AWS Region that
you specify in your request. Any configuration or settings change that results
from the operation is applied only to that Region. To make the same change in
other Regions, execute the same command for each Region to apply the change to.
For example, if your Region is set to `us-west-2`, when you use `
`CreateMembers` ` to add a member account to Security Hub, the association of
the member account with the administrator account is created only in the
`us-west-2` Region. Security Hub must be enabled for the member account in the
same Region that the invitation was sent from.
The following throttling limits apply to using Security Hub API operations.
* ` `BatchEnableStandards` ` - `RateLimit` of 1 request per second,
`BurstLimit` of 1 request per second.
* ` `GetFindings` ` - `RateLimit` of 3 requests per second.
`BurstLimit` of 6 requests per second.
* ` `UpdateFindings` ` - `RateLimit` of 1 request per second.
`BurstLimit` of 5 requests per second.
* ` `UpdateStandardsControl` ` - `RateLimit` of 1 request per
second, `BurstLimit` of 5 requests per second.
* All other operations - `RateLimit` of 10 requests per second.
`BurstLimit` of 30 requests per second.
"""
alias AWS.Client
alias AWS.Request
def metadata do
%AWS.ServiceMetadata{
abbreviation: nil,
api_version: "2018-10-26",
content_type: "application/x-amz-json-1.1",
credential_scope: nil,
endpoint_prefix: "securityhub",
global?: false,
protocol: "rest-json",
service_id: "SecurityHub",
signature_version: "v4",
signing_name: "securityhub",
target_prefix: nil
}
end
@doc """
Accepts the invitation to be a member account and be monitored by the Security
Hub administrator account that the invitation was sent from.
This operation is only used by member accounts that are not added through
Organizations.
When the member account accepts the invitation, permission is granted to the
administrator account to view findings generated in the member account.
"""
def accept_administrator_invitation(%Client{} = client, input, options \\ []) do
url_path = "/administrator"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
This method is deprecated.
Instead, use `AcceptAdministratorInvitation`.
The Security Hub console continues to use `AcceptInvitation`. It will eventually
change to use `AcceptAdministratorInvitation`. Any IAM policies that
specifically control access to this function must continue to use
`AcceptInvitation`. You should also add `AcceptAdministratorInvitation` to your
policies to ensure that the correct permissions are in place after the console
begins to use `AcceptAdministratorInvitation`.
Accepts the invitation to be a member account and be monitored by the Security
Hub administrator account that the invitation was sent from.
This operation is only used by member accounts that are not added through
Organizations.
When the member account accepts the invitation, permission is granted to the
administrator account to view findings generated in the member account.
"""
def accept_invitation(%Client{} = client, input, options \\ []) do
url_path = "/master"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Disables the standards specified by the provided `StandardsSubscriptionArns`.
For more information, see [Security Standards](https://docs.aws.amazon.com/securityhub/latest/userguide/securityhub-standards.html)
section of the *AWS Security Hub User Guide*.
"""
def batch_disable_standards(%Client{} = client, input, options \\ []) do
url_path = "/standards/deregister"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Enables the standards specified by the provided `StandardsArn`.
To obtain the ARN for a standard, use the ` `DescribeStandards` ` operation.
For more information, see the [Security Standards](https://docs.aws.amazon.com/securityhub/latest/userguide/securityhub-standards.html)
section of the *AWS Security Hub User Guide*.
"""
def batch_enable_standards(%Client{} = client, input, options \\ []) do
url_path = "/standards/register"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Imports security findings generated from an integrated product into Security
Hub.
This action is requested by the integrated product to import its findings into
Security Hub.
The maximum allowed size for a finding is 240 Kb. An error is returned for any
finding larger than 240 Kb.
After a finding is created, `BatchImportFindings` cannot be used to update the
following finding fields and objects, which Security Hub customers use to manage
their investigation workflow.
* `Note`
* `UserDefinedFields`
* `VerificationState`
* `Workflow`
Finding providers also should not use `BatchImportFindings` to update the
following attributes.
* `Confidence`
* `Criticality`
* `RelatedFindings`
* `Severity`
* `Types`
Instead, finding providers use `FindingProviderFields` to provide values for
these attributes.
"""
def batch_import_findings(%Client{} = client, input, options \\ []) do
url_path = "/findings/import"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Used by Security Hub customers to update information about their investigation
into a finding.
Requested by administrator accounts or member accounts. Administrator accounts
can update findings for their account and their member accounts. Member accounts
can update findings for their account.
Updates from `BatchUpdateFindings` do not affect the value of `UpdatedAt` for a
finding.
Administrator and member accounts can use `BatchUpdateFindings` to update the
following finding fields and objects.
* `Confidence`
* `Criticality`
* `Note`
* `RelatedFindings`
* `Severity`
* `Types`
* `UserDefinedFields`
* `VerificationState`
* `Workflow`
You can configure IAM policies to restrict access to fields and field values.
For example, you might not want member accounts to be able to suppress findings
or change the finding severity. See [Configuring access to BatchUpdateFindings](https://docs.aws.amazon.com/securityhub/latest/userguide/finding-update-batchupdatefindings.html#batchupdatefindings-configure-access)
in the *AWS Security Hub User Guide*.
"""
def batch_update_findings(%Client{} = client, input, options \\ []) do
url_path = "/findings/batchupdate"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:patch,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Creates a custom action target in Security Hub.
You can use custom actions on findings and insights in Security Hub to trigger
target actions in Amazon CloudWatch Events.
"""
def create_action_target(%Client{} = client, input, options \\ []) do
url_path = "/actionTargets"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Creates a custom insight in Security Hub.
An insight is a consolidation of findings that relate to a security issue that
requires attention or remediation.
To group the related findings in the insight, use the `GroupByAttribute`.
"""
def create_insight(%Client{} = client, input, options \\ []) do
url_path = "/insights"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Creates a member association in Security Hub between the specified accounts and
the account used to make the request, which is the administrator account.
If you are integrated with Organizations, then the administrator account is
designated by the organization management account.
`CreateMembers` is always used to add accounts that are not organization
members.
For accounts that are part of an organization, `CreateMembers` is only used in
the following cases:
* Security Hub is not configured to automatically add new accounts
in an organization.
* The account was disassociated or deleted in Security Hub.
This action can only be used by an account that has Security Hub enabled. To
enable Security Hub, you can use the ` `EnableSecurityHub` ` operation.
For accounts that are not organization members, you create the account
association and then send an invitation to the member account. To send the
invitation, you use the ` `InviteMembers` ` operation. If the account owner
accepts the invitation, the account becomes a member account in Security Hub.
Accounts that are part of an organization do not receive an invitation. They
automatically become a member account in Security Hub.
A permissions policy is added that permits the administrator account to view the
findings generated in the member account. When Security Hub is enabled in a
member account, the member account findings are also visible to the
administrator account.
To remove the association between the administrator and member accounts, use the
` `DisassociateFromMasterAccount` ` or ` `DisassociateMembers` ` operation.
"""
def create_members(%Client{} = client, input, options \\ []) do
url_path = "/members"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Declines invitations to become a member account.
This operation is only used by accounts that are not part of an organization.
Organization accounts do not receive invitations.
"""
def decline_invitations(%Client{} = client, input, options \\ []) do
url_path = "/invitations/decline"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Deletes a custom action target from Security Hub.
Deleting a custom action target does not affect any findings or insights that
were already sent to Amazon CloudWatch Events using the custom action.
"""
def delete_action_target(%Client{} = client, action_target_arn, input, options \\ []) do
url_path = "/actionTargets/#{AWS.Util.encode_multi_segment_uri(action_target_arn)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:delete,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Deletes the insight specified by the `InsightArn`.
"""
def delete_insight(%Client{} = client, insight_arn, input, options \\ []) do
url_path = "/insights/#{AWS.Util.encode_multi_segment_uri(insight_arn)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:delete,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Deletes invitations received by the AWS account to become a member account.
This operation is only used by accounts that are not part of an organization.
Organization accounts do not receive invitations.
"""
def delete_invitations(%Client{} = client, input, options \\ []) do
url_path = "/invitations/delete"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Deletes the specified member accounts from Security Hub.
Can be used to delete member accounts that belong to an organization as well as
member accounts that were invited manually.
"""
def delete_members(%Client{} = client, input, options \\ []) do
url_path = "/members/delete"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Returns a list of the custom action targets in Security Hub in your account.
"""
def describe_action_targets(%Client{} = client, input, options \\ []) do
url_path = "/actionTargets/get"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Returns details about the Hub resource in your account, including the `HubArn`
and the time when you enabled Security Hub.
"""
def describe_hub(%Client{} = client, hub_arn \\ nil, options \\ []) do
url_path = "/accounts"
headers = []
query_params = []
query_params =
if !is_nil(hub_arn) do
[{"HubArn", hub_arn} | query_params]
else
query_params
end
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Returns information about the Organizations configuration for Security Hub.
Can only be called from a Security Hub administrator account.
"""
def describe_organization_configuration(%Client{} = client, options \\ []) do
url_path = "/organization/configuration"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Returns information about product integrations in Security Hub.
You can optionally provide an integration ARN. If you provide an integration
ARN, then the results only include that integration.
If you do not provide an integration ARN, then the results include all of the
available product integrations.
"""
def describe_products(
%Client{} = client,
max_results \\ nil,
next_token \\ nil,
product_arn \\ nil,
options \\ []
) do
url_path = "/products"
headers = []
query_params = []
query_params =
if !is_nil(product_arn) do
[{"ProductArn", product_arn} | query_params]
else
query_params
end
query_params =
if !is_nil(next_token) do
[{"NextToken", next_token} | query_params]
else
query_params
end
query_params =
if !is_nil(max_results) do
[{"MaxResults", max_results} | query_params]
else
query_params
end
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Returns a list of the available standards in Security Hub.
For each standard, the results include the standard ARN, the name, and a
description.
"""
def describe_standards(%Client{} = client, max_results \\ nil, next_token \\ nil, options \\ []) do
url_path = "/standards"
headers = []
query_params = []
query_params =
if !is_nil(next_token) do
[{"NextToken", next_token} | query_params]
else
query_params
end
query_params =
if !is_nil(max_results) do
[{"MaxResults", max_results} | query_params]
else
query_params
end
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Returns a list of security standards controls.
For each control, the results include information about whether it is currently
enabled, the severity, and a link to remediation information.
"""
def describe_standards_controls(
%Client{} = client,
standards_subscription_arn,
max_results \\ nil,
next_token \\ nil,
options \\ []
) do
url_path =
"/standards/controls/#{AWS.Util.encode_multi_segment_uri(standards_subscription_arn)}"
headers = []
query_params = []
query_params =
if !is_nil(next_token) do
[{"NextToken", next_token} | query_params]
else
query_params
end
query_params =
if !is_nil(max_results) do
[{"MaxResults", max_results} | query_params]
else
query_params
end
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Disables the integration of the specified product with Security Hub.
After the integration is disabled, findings from that product are no longer sent
to Security Hub.
"""
def disable_import_findings_for_product(
%Client{} = client,
product_subscription_arn,
input,
options \\ []
) do
url_path =
"/productSubscriptions/#{AWS.Util.encode_multi_segment_uri(product_subscription_arn)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:delete,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Disables a Security Hub administrator account.
Can only be called by the organization management account.
"""
def disable_organization_admin_account(%Client{} = client, input, options \\ []) do
url_path = "/organization/admin/disable"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Disables Security Hub in your account only in the current Region.
To disable Security Hub in all Regions, you must submit one request per Region
where you have enabled Security Hub.
When you disable Security Hub for an administrator account, it doesn't disable
Security Hub for any associated member accounts.
When you disable Security Hub, your existing findings and insights and any
Security Hub configuration settings are deleted after 90 days and cannot be
recovered. Any standards that were enabled are disabled, and your administrator
and member account associations are removed.
If you want to save your existing findings, you must export them before you
disable Security Hub.
"""
def disable_security_hub(%Client{} = client, input, options \\ []) do
url_path = "/accounts"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:delete,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Disassociates the current Security Hub member account from the associated
administrator account.
This operation is only used by accounts that are not part of an organization.
For organization accounts, only the administrator account can disassociate a
member account.
"""
def disassociate_from_administrator_account(%Client{} = client, input, options \\ []) do
url_path = "/administrator/disassociate"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
This method is deprecated.
Instead, use `DisassociateFromAdministratorAccount`.
The Security Hub console continues to use `DisassociateFromMasterAccount`. It
will eventually change to use `DisassociateFromAdministratorAccount`. Any IAM
policies that specifically control access to this function must continue to use
`DisassociateFromMasterAccount`. You should also add
`DisassociateFromAdministratorAccount` to your policies to ensure that the
correct permissions are in place after the console begins to use
`DisassociateFromAdministratorAccount`.
Disassociates the current Security Hub member account from the associated
administrator account.
This operation is only used by accounts that are not part of an organization.
For organization accounts, only the administrator account can disassociate a
member account.
"""
def disassociate_from_master_account(%Client{} = client, input, options \\ []) do
url_path = "/master/disassociate"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Disassociates the specified member accounts from the associated administrator
account.
Can be used to disassociate both accounts that are managed using Organizations
and accounts that were invited manually.
"""
def disassociate_members(%Client{} = client, input, options \\ []) do
url_path = "/members/disassociate"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Enables the integration of a partner product with Security Hub.
Integrated products send findings to Security Hub.
When you enable a product integration, a permissions policy that grants
permission for the product to send findings to Security Hub is applied.
"""
def enable_import_findings_for_product(%Client{} = client, input, options \\ []) do
url_path = "/productSubscriptions"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Designates the Security Hub administrator account for an organization.
Can only be called by the organization management account.
"""
def enable_organization_admin_account(%Client{} = client, input, options \\ []) do
url_path = "/organization/admin/enable"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Enables Security Hub for your account in the current Region or the Region you
specify in the request.
When you enable Security Hub, you grant to Security Hub the permissions
necessary to gather findings from other services that are integrated with
Security Hub.
When you use the `EnableSecurityHub` operation to enable Security Hub, you also
automatically enable the following standards.
* CIS AWS Foundations
* AWS Foundational Security Best Practices
You do not enable the Payment Card Industry Data Security Standard (PCI DSS)
standard.
To not enable the automatically enabled standards, set `EnableDefaultStandards`
to `false`.
After you enable Security Hub, to enable a standard, use the `
`BatchEnableStandards` ` operation. To disable a standard, use the `
`BatchDisableStandards` ` operation.
To learn more, see [Setting Up AWS Security Hub](https://docs.aws.amazon.com/securityhub/latest/userguide/securityhub-settingup.html)
in the *AWS Security Hub User Guide*.
"""
def enable_security_hub(%Client{} = client, input, options \\ []) do
url_path = "/accounts"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Provides the details for the Security Hub administrator account for the current
member account.
Can be used by both member accounts that are managed using Organizations and
accounts that were invited manually.
"""
def get_administrator_account(%Client{} = client, options \\ []) do
url_path = "/administrator"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Returns a list of the standards that are currently enabled.
"""
def get_enabled_standards(%Client{} = client, input, options \\ []) do
url_path = "/standards/get"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Returns a list of findings that match the specified criteria.
"""
def get_findings(%Client{} = client, input, options \\ []) do
url_path = "/findings"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Lists the results of the Security Hub insight specified by the insight ARN.
"""
def get_insight_results(%Client{} = client, insight_arn, options \\ []) do
url_path = "/insights/results/#{AWS.Util.encode_multi_segment_uri(insight_arn)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Lists and describes insights for the specified insight ARNs.
"""
def get_insights(%Client{} = client, input, options \\ []) do
url_path = "/insights/get"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Returns the count of all Security Hub membership invitations that were sent to
the current member account, not including the currently accepted invitation.
"""
def get_invitations_count(%Client{} = client, options \\ []) do
url_path = "/invitations/count"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
This method is deprecated.
Instead, use `GetAdministratorAccount`.
The Security Hub console continues to use `GetMasterAccount`. It will eventually
change to use `GetAdministratorAccount`. Any IAM policies that specifically
control access to this function must continue to use `GetMasterAccount`. You
should also add `GetAdministratorAccount` to your policies to ensure that the
correct permissions are in place after the console begins to use
`GetAdministratorAccount`.
Provides the details for the Security Hub administrator account for the current
member account.
Can be used by both member accounts that are managed using Organizations and
accounts that were invited manually.
"""
def get_master_account(%Client{} = client, options \\ []) do
url_path = "/master"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Returns the details for the Security Hub member accounts for the specified
account IDs.
An administrator account can be either the delegated Security Hub administrator
account for an organization or an administrator account that enabled Security
Hub manually.
The results include both member accounts that are managed using Organizations
and accounts that were invited manually.
"""
def get_members(%Client{} = client, input, options \\ []) do
url_path = "/members/get"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Invites other AWS accounts to become member accounts for the Security Hub
administrator account that the invitation is sent from.
This operation is only used to invite accounts that do not belong to an
organization. Organization accounts do not receive invitations.
Before you can use this action to invite a member, you must first use the `
`CreateMembers` ` action to create the member account in Security Hub.
When the account owner enables Security Hub and accepts the invitation to become
a member account, the administrator account can view the findings generated from
the member account.
"""
def invite_members(%Client{} = client, input, options \\ []) do
url_path = "/members/invite"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Lists all findings-generating solutions (products) that you are subscribed to
receive findings from in Security Hub.
"""
def list_enabled_products_for_import(
%Client{} = client,
max_results \\ nil,
next_token \\ nil,
options \\ []
) do
url_path = "/productSubscriptions"
headers = []
query_params = []
query_params =
if !is_nil(next_token) do
[{"NextToken", next_token} | query_params]
else
query_params
end
query_params =
if !is_nil(max_results) do
[{"MaxResults", max_results} | query_params]
else
query_params
end
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Lists all Security Hub membership invitations that were sent to the current AWS
account.
This operation is only used by accounts that are managed by invitation. Accounts
that are managed using the integration with AWS Organizations do not receive
invitations.
"""
def list_invitations(%Client{} = client, max_results \\ nil, next_token \\ nil, options \\ []) do
url_path = "/invitations"
headers = []
query_params = []
query_params =
if !is_nil(next_token) do
[{"NextToken", next_token} | query_params]
else
query_params
end
query_params =
if !is_nil(max_results) do
[{"MaxResults", max_results} | query_params]
else
query_params
end
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Lists details about all member accounts for the current Security Hub
administrator account.
The results include both member accounts that belong to an organization and
member accounts that were invited manually.
"""
def list_members(
%Client{} = client,
max_results \\ nil,
next_token \\ nil,
only_associated \\ nil,
options \\ []
) do
url_path = "/members"
headers = []
query_params = []
query_params =
if !is_nil(only_associated) do
[{"OnlyAssociated", only_associated} | query_params]
else
query_params
end
query_params =
if !is_nil(next_token) do
[{"NextToken", next_token} | query_params]
else
query_params
end
query_params =
if !is_nil(max_results) do
[{"MaxResults", max_results} | query_params]
else
query_params
end
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Lists the Security Hub administrator accounts.
Can only be called by the organization management account.
"""
def list_organization_admin_accounts(
%Client{} = client,
max_results \\ nil,
next_token \\ nil,
options \\ []
) do
url_path = "/organization/admin"
headers = []
query_params = []
query_params =
if !is_nil(next_token) do
[{"NextToken", next_token} | query_params]
else
query_params
end
query_params =
if !is_nil(max_results) do
[{"MaxResults", max_results} | query_params]
else
query_params
end
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Returns a list of tags associated with a resource.
"""
def list_tags_for_resource(%Client{} = client, resource_arn, options \\ []) do
url_path = "/tags/#{AWS.Util.encode_uri(resource_arn)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Adds one or more tags to a resource.
"""
def tag_resource(%Client{} = client, resource_arn, input, options \\ []) do
url_path = "/tags/#{AWS.Util.encode_uri(resource_arn)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Removes one or more tags from a resource.
"""
def untag_resource(%Client{} = client, resource_arn, input, options \\ []) do
url_path = "/tags/#{AWS.Util.encode_uri(resource_arn)}"
headers = []
{query_params, input} =
[
{"TagKeys", "tagKeys"}
]
|> Request.build_params(input)
Request.request_rest(
client,
metadata(),
:delete,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Updates the name and description of a custom action target in Security Hub.
"""
def update_action_target(%Client{} = client, action_target_arn, input, options \\ []) do
url_path = "/actionTargets/#{AWS.Util.encode_multi_segment_uri(action_target_arn)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:patch,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
`UpdateFindings` is deprecated.
Instead of `UpdateFindings`, use `BatchUpdateFindings`.
Updates the `Note` and `RecordState` of the Security Hub-aggregated findings
that the filter attributes specify. Any member account that can view the finding
also sees the update to the finding.
"""
def update_findings(%Client{} = client, input, options \\ []) do
url_path = "/findings"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:patch,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Updates the Security Hub insight identified by the specified insight ARN.
"""
def update_insight(%Client{} = client, insight_arn, input, options \\ []) do
url_path = "/insights/#{AWS.Util.encode_multi_segment_uri(insight_arn)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:patch,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Used to update the configuration related to Organizations.
Can only be called from a Security Hub administrator account.
"""
def update_organization_configuration(%Client{} = client, input, options \\ []) do
url_path = "/organization/configuration"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Updates configuration options for Security Hub.
"""
def update_security_hub_configuration(%Client{} = client, input, options \\ []) do
url_path = "/accounts"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:patch,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Used to control whether an individual security standard control is enabled or
disabled.
"""
def update_standards_control(%Client{} = client, standards_control_arn, input, options \\ []) do
url_path = "/standards/control/#{AWS.Util.encode_multi_segment_uri(standards_control_arn)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:patch,
url_path,
query_params,
headers,
input,
options,
nil
)
end
end
|
lib/aws/generated/security_hub.ex
| 0.848062
| 0.607256
|
security_hub.ex
|
starcoder
|
defmodule Membrane.Pad.Data do
@moduledoc """
Struct describing current pad state.
The public fields are:
- `:accepted_caps` - specification of possible caps that are accepted on the pad.
See `Membrane.Caps.Matcher` for more information. This field only applies to elements' pads.
- `:availability` - see `Membrane.Pad.availability_t`
- `:caps` - the most recent `Membrane.Caps` that have been sent (output) or received (input)
on the pad. May be `nil` if not yet set. This field only applies to elements' pads.
- `:demand` - current demand requested on the pad working in pull mode. This field only applies to elements' pads.
- `:direction` - see `Membrane.Pad.direction_t`
- `:end_of_stream?` - flag determining whether the stream processing via the pad has been finished
- `:mode` - see `Membrane.Pad.mode_t`. This field only applies to elements' pads.
- `:name` - see `Membrane.Pad.name_t`. Do not mistake with `:ref`
- `:options` - options passed in `Membrane.ParentSpec` when linking pad
- `:ref` - see `Membrane.Pad.ref_t`
- `:start_of_stream?` - flag determining whether the stream processing via the pad has been started
Other fields in the struct ARE NOT PART OF THE PUBLIC API and should not be
accessed or relied on.
"""
use Bunch.Access
alias Membrane.Buffer.Metric
alias Membrane.Core.InputBuffer
alias Membrane.{Caps, Event, Pad}
@type t :: %__MODULE__{
accepted_caps: Caps.Matcher.caps_specs_t(),
availability: Pad.availability_t(),
direction: Pad.direction_t(),
mode: Pad.mode_t(),
name: Pad.name_t(),
ref: Pad.ref_t(),
demand_unit: Metric.unit_t() | nil,
other_demand_unit: Metric.unit_t() | nil,
pid: pid,
other_ref: Pad.ref_t(),
caps: Caps.t() | nil,
start_of_stream?: boolean(),
end_of_stream?: boolean(),
sticky_messages: [Event.t()],
input_buf: InputBuffer.t() | nil,
demand: integer() | nil,
options: %{optional(atom) => any}
}
defstruct accepted_caps: nil,
availability: nil,
direction: nil,
mode: nil,
name: nil,
ref: nil,
demand_unit: nil,
other_demand_unit: nil,
pid: nil,
other_ref: nil,
caps: nil,
start_of_stream?: nil,
end_of_stream?: nil,
sticky_messages: nil,
input_buf: nil,
demand: nil,
options: %{}
end
|
lib/membrane/pad_data.ex
| 0.878874
| 0.657624
|
pad_data.ex
|
starcoder
|
defmodule Adventofcode.Day04GiantSquid do
use Adventofcode
alias __MODULE__.{Parser, Solver, State}
def part_1(input) do
input
|> Parser.parse()
|> State.new()
|> Solver.solve(:part_1)
end
def part_2(input) do
input
|> Parser.parse()
|> State.new()
|> Solver.solve(:part_2)
end
defmodule Board do
@unmarked {nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil,
nil, nil, nil, nil, nil, nil, nil, nil, nil}
@enforce_keys [:grid]
defstruct bingo?: false, grid: nil, marked: @unmarked
def new(grid), do: %__MODULE__{grid: grid}
def mark(board, number) when is_number(number), do: mark(board, [number])
def mark(%Board{} = board, numbers) when is_list(numbers) do
Enum.reduce(numbers, board, fn num, acc ->
acc = %{acc | marked: do_mark(acc, num)}
%{acc | bingo?: bingo?(acc)}
end)
end
defp do_mark(%Board{grid: grid, marked: marked}, num) do
Enum.reduce(0..24, marked, fn
i, acc when elem(grid, i) == num -> put_elem(acc, i, true)
_, acc -> acc
end)
end
defp bingo?(%Board{marked: marked}) do
for({xf, yf} <- [{5, 1}, {1, 5}], x <- 0..4, y <- 0..4, do: x * xf + y * yf)
|> Enum.chunk_every(5)
|> Enum.any?(fn row -> Enum.all?(row, &elem(marked, &1)) end)
end
end
defmodule State do
@enforce_keys [:boards, :random_order]
defstruct boards: [], random_order: []
def new({random_order, boards}) do
%__MODULE__{random_order: random_order, boards: Enum.map(boards, &Board.new/1)}
end
def unmarked_numbers(%Board{grid: grid, marked: marked}, %State{}) do
Enum.map(0..24, fn
index when elem(marked, index) == true -> 0
index -> elem(grid, index)
end)
end
end
defmodule Solver do
def solve(state, part) do
state.random_order
|> Enum.reduce_while(state, &do_solve(&1, &2, part))
|> score
end
defp do_solve(num, acc, :part_1) do
boards = acc.boards |> Enum.map(&Board.mark(&1, [num]))
if Enum.any?(boards, & &1.bingo?) do
{:halt, %{acc | boards: boards}}
else
{:cont, %{acc | boards: boards, random_order: tl(acc.random_order)}}
end
end
defp do_solve(num, acc, :part_2) do
boards = acc.boards |> Enum.reject(& &1.bingo?) |> Enum.map(&Board.mark(&1, [num]))
if Enum.all?(boards, & &1.bingo?) do
{:halt, %{acc | boards: boards}}
else
{:cont, %{acc | boards: boards, random_order: tl(acc.random_order)}}
end
end
defp score(state) do
hd(state.random_order) * do_score(state)
end
defp do_score(state) do
state.boards
|> Enum.find(& &1.bingo?)
|> State.unmarked_numbers(state)
|> Enum.sum()
end
end
defmodule Parser do
def parse(input) do
input
|> String.trim()
|> String.split("\n\n")
|> do_parse
end
defp do_parse([random_order | boards]) do
{parse_random_order(random_order), parse_boards(boards)}
end
defp parse_random_order(line) do
line
|> String.trim()
|> String.split(",")
|> Enum.map(&String.to_integer/1)
end
defp parse_boards(boards) do
~r/-?\d+/
|> Regex.scan(Enum.join(boards, " "))
|> List.flatten()
|> Enum.map(&String.to_integer/1)
|> Enum.chunk_every(25)
|> Enum.map(&List.to_tuple/1)
end
end
end
|
lib/day_04_giant_squid.ex
| 0.668231
| 0.447219
|
day_04_giant_squid.ex
|
starcoder
|
defmodule Mix.Tasks.Validation.Gen do
@moduledoc false
use Mix.Task
@shortdoc "Generates the ExUnit tests to cover the validation suite"
@skip ~w{05-033 05-034 05-037 05-038 05-042 06-001 06-002 06-003 06-004}
@groups %{
"TestRelatePP" => %{file_name: "point_point_a", module_name: "PointPointA"},
"TestRelatePL" => %{file_name: "point_line_a", module_name: "PointLineA"},
"TestRelatePA" => %{file_name: "point_polygon_a", module_name: "PointPolygonA"},
"TestRelateLL" => %{file_name: "line_line_a", module_name: "LineLineA"},
"TestRelateLA" => %{file_name: "line_polygon_a", module_name: "LinePolygonA"},
"TestRelateLC" => %{file_name: "line_complex_a", module_name: "LineComplexA"},
"TestRelateAA" => %{file_name: "polygon_polygon_a", module_name: "PolygonPolygonA"},
"TestRelateAC" => %{file_name: "polygon_complex_a", module_name: "PolygonComplexA"},
"TestFunctionPP" => %{file_name: "point_point_b", module_name: "PointPointB"},
"TestFunctionPL" => %{file_name: "point_line_b", module_name: "PointLineB"},
"TestFunctionPA" => %{file_name: "point_polygon_b", module_name: "PointPolygonB"},
"TestFunctionLL" => %{file_name: "line_line_b", module_name: "LineLineB"},
"TestFunctionLA" => %{file_name: "line_polygon_b", module_name: "LinePolygonB"},
"TestFunctionAA" => %{file_name: "polygon_polygon_b", module_name: "PolygonPolygonB"},
"TestFunctionPLPrec" => %{
file_name: "point_line_precision",
module_name: "PointLinePrecision"
},
"TestFunctionLLPrec" => %{file_name: "line_line_precision", module_name: "LineLinePrecision"},
"TestFunctionLAPrec" => %{
file_name: "line_polygon_precision",
module_name: "LinePolygonPrecision"
},
"TestFunctionAAPrec" => %{
file_name: "polygon_polygon_precision",
module_name: "PolygonPolygonPrecision"
}
}
@spec run(any) :: atom
def run(_) do
cases = test_cases()
Enum.each(Map.keys(@groups), fn group ->
group_cases = Enum.filter(cases, &(&1["group"] == group))
write_test_file(
group_cases,
@groups[group].module_name <> "Test",
"test/validation2/#{@groups[group].file_name}_test.exs"
)
end)
:ok
end
defp test_cases() do
["lib", "mix", "tasks", "validation_cases.json"]
|> Path.join()
|> File.read!()
|> Poison.decode!()
|> Map.get("cases")
|> Enum.filter(& &1["b"])
end
defp write_test_file(cases, module_name, filename) do
s = ~s"""
defmodule Intersect.Validation.#{module_name} do
use ExUnit.Case
#{cases |> Enum.map(&generate_test/1)}end
"""
File.write!(filename, s)
end
defp generate_test(test_case) do
title = test_case["title"] |> String.replace(~r{\[.*\]}, "") |> String.trim()
tags =
if test_case["id"] in @skip do
"@tag :validation\n @tag :skip"
else
"@tag :validation"
end
build_test(test_case, title, tags)
end
defp build_test(test_case, title, tag_str) do
~s"""
#{tag_str}
test "#{test_case["id"]} - #{title}" do
a = "#{test_case["a"]["wkt"]}" |> Geo.WKT.decode
b = "#{test_case["b"]["wkt"]}" |> Geo.WKT.decode
#{build_assertions(test_case)}
end
"""
end
defp build_assertions(test_case) do
build_symetric_assertion(test_case, "intersects") <>
build_symetric_assertion(test_case, "disjoint") <>
build_asymetric_assertion(test_case, "contains") <>
build_asymetric_assertion(test_case, "within") <>
build_symetric_assertion(test_case, "equals")
end
defp build_asymetric_assertion(test_case, relationship) do
~s"""
assert Topo.#{relationship}?(a, b) == #{test_case["relationship"][relationship]}
"""
end
defp build_symetric_assertion(test_case, relationship) do
~s"""
assert Topo.#{relationship}?(a, b) == #{test_case["relationship"][relationship]}
assert Topo.#{relationship}?(b, a) == #{test_case["relationship"][relationship]}
"""
end
end
|
lib/mix/tasks/generate_validation_suite.ex
| 0.7413
| 0.510802
|
generate_validation_suite.ex
|
starcoder
|
defmodule GBTree.Shim do
alias :gb_trees, as: GB
@opaque t :: GB.tree()
@opaque t(key, value) :: GB.tree(key, value)
@type item :: any
@typedoc """
Anything you can compare with the standard `<`, `>=` etc.
"""
@type comparable :: any()
@typedoc """
All keys in the tree must be comparable, and should probably be of the same type.
"""
@type key :: comparable
@type kv() :: {key, item}
@type kv(k, v) :: {k, v}
@spec new() :: t
@doc ~S"""
Create new, empty `GBTree.Shim`.
"""
def new(), do: GB.empty()
@doc ~S"""
Create new `GBTree.Shim` with `key` set to `value`
"""
@spec new(k, v) :: t(k, v) when k: key, v: item
def new(key, value), do: GB.from_orddict([{key, value}])
@doc """
Creates a new `GBTree.Shim` from list `list`. `list` should contain items of type `t:kv/0`, eg.
`[a: 10, b: 20]` or `[{12, {:a, 10}}]`
`list` does not have to be sorted.
iex> GBTree.Shim.new_from_list([q: 1, a: 7, c: 666])
...> |> GBTree.Shim.get_closest_key(:b)
{:c, 666}
"""
@spec new_from_list([kv(k, v)]) :: t(k, v) when k: key, v: item
def new_from_list(list), do: GB.from_orddict(Enum.sort(list))
@spec enter(t, key, item) :: t
def enter(tree, key, value), do: GB.enter(key, value, tree)
@type acc :: any
@type walker() :: (t, iter | :halt, acc -> {t, iter | :halt, acc})
defp walk_closest_keys_(tree, count, iter, fun, acc)
defp walk_closest_keys_(tree, _count, :halt, _fun, acc), do: {tree, acc}
defp walk_closest_keys_(tree, 0, _iter, fun, acc) do
{new_tree, _, new_acc} = fun.(tree, :halt, acc)
{new_tree, new_acc}
end
defp walk_closest_keys_(tree, count, iter, fun, acc) do
next_iter =
case GB.next(iter) do
:none ->
{min_key, _} = GB.smallest(tree)
{_key_after_min, _val, iter_after_min} = GB.next(iterator_from(tree, min_key))
iter_after_min
{_, _, next_iter} ->
next_iter
end
{fun_tree, fun_iter, fun_acc} = tree |> fun.(tree, next_iter, acc)
fun_tree |> walk_closest_keys(count - 1, fun_iter, fun, fun_acc)
end
@spec walk_closest_keys(t, non_neg_integer, key, walker(), acc) :: any
def walk_closest_keys(tree, count, key, fun, acc) do
max_count = min(size(tree), count)
case max_count do
0 ->
nil
_ ->
iter = iterator_from(tree, key)
{fun_tree, fun_iter, fun_acc} = tree |> fun.(tree, iter, acc)
walk_closest_keys_(fun_tree, max_count - 1, fun_iter, fun, fun_acc)
end
end
@spec get_closest_keys(t, non_neg_integer, iter, [kv]) :: [kv]
defp get_closest_keys(tree, count, iter, acc)
defp get_closest_keys(_tree, 0, _iter, acc), do: acc
defp get_closest_keys(_tree, _count, :none, acc) do
acc
end
defp get_closest_keys(tree, count, iter, acc) do
{kv, next_iter} =
case GB.next(iter) do
:none ->
kv = {min_key, _} = GB.smallest(tree)
{_key_after_min, _val, iter2} = GB.next(iterator_from(tree, min_key))
{kv, iter2}
{key, item, iter2} ->
{{key, item}, iter2}
end
tree |> get_closest_keys(count - 1, next_iter, [kv | acc])
end
@doc ~S"""
Gets the `count` closest keys to `key`, wrapping around at the bottom if necessary. The closest
key is the head of the returned list.
Will return at most `GBTree.Shim.size(tree)` (`GBTree.Shim.size/1`) items
"""
@spec get_closest_keys(t, key, non_neg_integer()) :: [kv]
def get_closest_keys(tree, key, count) do
max_count = min(size(tree), count)
:lists.reverse(tree |> get_closest_keys(max_count, iterator_from(tree, key), []))
end
@doc ~S"""
`get_closest_key` returns the first item that is greater than or equal to `key` (rolling around
to the bottom if none is found). Returns `:none` if the `GBTree.Shim` is empty.
# exact match
iex> GBTree.Shim.new_from_list([q: 1, a: 7, c: 666])
...> |> GBTree.Shim.get_closest_key(:a)
{:a, 7}
# match above wanted key
iex> GBTree.Shim.new_from_list([q: 1, a: 7, c: 666])
...> |> GBTree.Shim.get_closest_key(:d)
{:q, 1}
# no bigger keys
iex> GBTree.Shim.new_from_list([q: 1, a: 7, c: 666])
...> |> GBTree.Shim.get_closest_key(:r)
{:a, 7}
# empty tree
iex> GBTree.Shim.new()
...> |> GBTree.Shim.get_closest_key(:r)
:none
"""
@spec get_closest_key(t, key) :: kv | :none
def get_closest_key(tree, key)
def get_closest_key({0, _}, _key), do: :none
def get_closest_key(tree, key) do
case GB.next(GB.iterator_from(key, tree)) do
:none ->
GB.smallest(tree)
{key, item, _iter2} ->
{key, item}
end
end
@spec take_exact(t, key) :: {kv | :none, t}
def take_exact(tree, key) do
case GB.take_any(key, tree) do
:error -> {:none, tree}
{value, tree2} -> {{key, value}, tree2}
end
end
def take_nearest(tree, key) do
end
def delete_exact(tree, key) do
:gb_trees.delete_any(key, tree)
end
def delete_nearest()
defdelegate size(tree), to: GB
def empty?({0, _}), do: true
def empty?(_), do: false
@spec balance(t) :: t
defdelegate balance(tree), to: GB
@opaque iter(key, item) :: :gb_trees.iter(key, item)
@opaque iter() :: iter(any, any)
@spec iterator(t) :: iter
defdelegate iterator(tree), to: GB
@spec iterator_next(iter(k, v)) :: :none | {k, v, iter(k, v)} when k: key, v: item
defdelegate iterator_next(iter), to: GB, as: :next
@spec iterator_from(t, k) :: iter(k, item) when k: key
def iterator_from(tree, key), do: GB.iterator_from(key, tree)
end
defmodule GBTree do
import Algae
alias GBTree.Shim, as: TShim
defdata do
tree_data :: TShim.t() \\ TShim.new()
num_deletes_in_row :: non_neg_integer()
end
def enter(tree, key, item)
def enter(tree = %GBTree{tree_data: tree_data}, key, item) do
%GBTree{tree | tree_data: tree_data |> TShim.enter(key, item), num_deletes_in_row: 0}
end
@spec get_closest(t, TShim.key()) :: TShim.kv() | :none
def get_closest(tree, key)
def get_closest(%GBTree{tree_data: tree_data}, key) do
tree_data |> TShim.get_closest_key(key)
end
@doc ~S"""
Deletes `key` from `tree` if there is an exact match, and rebalances the tree if there have been
`Default.max_gbtree_deletes()` deletes in a row.
iex> {kv, _tree} = GBTree.new(GBTree.Shim.new(10, :a))
...> |> GBTree.take_exact(10)
...> kv
{10, :a}
"""
@spec take_exact(t(), k) :: {TShim.kv(k, TShim.item()) | :none, t()}
when k: TShim.key()
def take_exact(tree = %GBTree{num_deletes_in_row: orig_dels, tree_data: tree_data}, key) do
case tree_data |> TShim.take_exact(key) do
nope = {:none, _} ->
nope
{kv, tree_data2} ->
dels = orig_dels + 1
cond do
dels > Default.max_gbtree_deletes() ->
{kv, %GBTree{tree | num_deletes_in_row: 0, tree_data: tree_data2 |> TShim.balance()}}
true ->
{kv, %GBTree{tree | num_deletes_in_row: dels, tree_data: tree_data2}}
end
end
end
end
|
lib/mulix/data_types/gb_tree.ex
| 0.844329
| 0.503662
|
gb_tree.ex
|
starcoder
|
defmodule EQC.Component.Callouts do
@copyright "Quviq AB, 2016"
@moduledoc """
This module contains functions to be used with [Quviq
QuickCheck](http://www.quviq.com). It defines an Elixir version of the callout
language found in `eqc/include/eqc_component.hrl`. For detailed documentation
of the macros, please refer to the QuickCheck documentation.
`Copyright (C) Quviq AB, 2014-2016.`
"""
@doc """
Call a command from a callout.
In Erlang: `?APPLY(Mod, Fun, Args)`.
"""
def call(mod, fun, args), do: {:self_callout, mod, fun, args}
@doc """
Call a local command from a callout.
In Erlang: `?APPLY(Fun, Args)`.
"""
defmacro call(fun, args) do
quote do
call(__MODULE__, unquote(fun), unquote(args))
end
end
@doc """
Convenient syntax for `call`.
call m.f(e1, .., en)
call f(e1, .., en)
is equivalent to
call(m, f, [e1, .., en])
call(f, [e1, .., en])
"""
defmacro call({{:., _, [mod, fun]}, _, args}) do
quote do call(unquote(mod), unquote(fun), unquote(args)) end
end
defmacro call({fun, _, args}) when is_atom(fun) do
quote do call(__MODULE__, unquote(fun), unquote(args)) end
end
defmacro call(c) do
_ = c
syntax_error "call F(E1, .., En)"
end
@doc """
Specify a callout.
In Erlang: `?CALLOUT(Mod, Fun, Args, Res)`.
"""
def callout(mod, fun, args, res), do: :eqc_component.callout(mod, fun, args, res)
@doc """
Convenient syntax for `callout`.
callout m.f(e1, .., en), return: res
is equivalent to
callout(m, f, [e1, .., en], res)
"""
defmacro callout({{:., _, [mod, fun]}, _, args}, [return: res]) do
quote do callout(unquote(mod), unquote(fun), unquote(args), unquote(res)) end
end
defmacro callout(call, opts) do
_ = {call, opts}
syntax_error "callout MOD.FUN(ARG1, .., ARGN), return: RES"
end
defp do_match(e) do
quote do {:"$eqc_callout_match", unquote(e)} end
end
defp do_match_gen(e) do
quote do {:"$eqc_callout_match_gen", unquote(e)} end
end
@doc """
Bind the result of a callout or generator.
Usage:
match pat = exp
match pat <- gen
In Erlang: `?MATCH(Pat, Exp)` or `?MATCH_GEN(Pat, Gen)`.
"""
defmacro match(e={:=, _, [_, _]}), do: do_match(e)
defmacro match({:<-, c, [pat, gen]}), do: do_match_gen({:=, c, [pat, gen]})
defmacro match(e) do
_ = e
syntax_error "match PAT = EXP, or match PAT <- GEN"
end
# Hacky. Let's you write (for instance) match pat = case exp do ... end.
@doc false
defmacro match({:=, cxt1, [pat, {fun, cxt2, args}]}, opts) do
do_match({:=, cxt1, [pat, {fun, cxt2, args ++ [opts]}]})
end
defmacro match({:<-, cxt1, [pat, {fun, cxt2, args}]}, opts) do
do_match({:<-, cxt1, [pat, {fun, cxt2, args ++ [opts]}]})
end
defmacro match(_, _), do: syntax_error "match PAT = EXP, or match PAT <- GEN"
@doc """
Model failure.
In Erlang: `?FAIL(E)`.
"""
def fail(e), do: {:fail, e}
@doc """
Exception return value. Can be used as the return value for a callout to make it throw an exception.
In Erlang: `?EXCEPTION(e)`.
"""
defmacro exception(e) do
quote do {:"$eqc_exception", unquote(e)} end
end
@doc """
Model sending a message.
In Erlang: `?SEND(Pid, Msg)`
"""
def send(pid, msg), do: callout(:erlang, :send, [pid, msg], msg)
@doc """
Specify the result of an operation.
In Erlang: `?RET(X)`
"""
def ret(x), do: {:return, x}
@doc """
Run-time assertion.
In Erlang: `?ASSERT(Mod, Fun, Args)`
"""
defmacro assert(mod, fun, args) do
loc = {__CALLER__.file, __CALLER__.line}
quote do
{:assert, unquote(mod), unquote(fun), unquote(args),
{:assertion_failed, unquote(mod), unquote(fun), unquote(args), unquote(loc)}}
end
end
@doc """
Convenient syntax for assert.
Usage:
assert mod.fun(e1, .., en)
"""
defmacro assert({{:., _, [mod, fun]}, _, args}) do
quote do assert(unquote(mod), unquote(fun), unquote(args)) end
end
defmacro assert(call) do
_ = call
syntax_error "assert MOD.FUN(ARG1, .., ARGN)"
end
@doc """
Get access to (part of) an argument to a callout. For instance,
match {val, :ok} = callout :mock.foo(some_arg, __VAR__), return: :ok
...
Argument values are returned in a tuple with the return value.
Use `:_` to ignore a callout argument.
In Erlang: `?VAR`
"""
defmacro __VAR__, do: :"$var"
@doc """
Access the pid of the process executing an operation.
In Erlang: `?SELF`
"""
defmacro __SELF__, do: :"$self"
@doc """
A list of callout specifications in sequence.
In Erlang: `?SEQ`
"""
def seq(list), do: {:seq, list}
@doc """
A list of callout specications arbitrarily interleaved.
In Erlang: `?PAR`
"""
def par(list), do: {:par, list}
@doc """
A choice between two different callout specifications.
In Erlang: `?EITHER(Tag, C1, C2)`
"""
def either(c1, c2), do: {:xalt, c1, c2}
@doc """
A choice between two different callout specifications where every choice with
the same tag has to go the same way (left or right).
In Erlang: `?EITHER(Tag, C1, C2)`
"""
def either(tag, c1, c2), do: {:xalt, tag, c1, c2}
@doc """
An optional callout specification. Equivalent to `either(c, :empty)`.
In Erlang: `?OPTIONAL(C)`
"""
def optional(c), do: either(c, :empty)
@doc """
Specify a blocking operation.
In Erlang: `?BLOCK(Tag)`
"""
def block(tag), do: {:"$eqc_block", tag}
@doc """
Equivalent to block(__SELF__).
In Erlang: `?BLOCK`
"""
def block(), do: block(__SELF__())
@doc """
Unblocking a blocked operation.
In Erlang: `?UNBLOCK(Tag, Res)`
"""
def unblock(tag, res), do: {:unblock, tag, res}
@doc """
Conditional callout specification.
Usage:
guard g, do: c
Equivalent to:
case g do
true -> c
false -> :empty
end
In Erlang: `?WHEN(G, C)`
"""
defmacro guard(g, do: c) do
quote do
case unquote(g) do
true -> unquote(c)
false -> :empty
end
end
end
defmacro guard(g, c) do
_ = {g, c}
syntax_error "guard GUARD, do: CALLOUTS"
end
@doc """
Indicate that the following code is using the callout specification language.
This is default for the `_callouts` callback, but this information is lost in
some constructions like list comprehensions or `par/1` calls.
Usage:
callouts do
...
end
In Erlang: `?CALLOUTS(C1, .., CN)`
"""
defmacro callouts(do: {:__block__, cxt, args}), do: {:"$eqc_callout_quote", cxt, args}
defmacro callouts(do: c), do: {:"$eqc_callout_quote", [], [c]}
defmacro callouts(c) do
_ = c
syntax_error "callouts do CALLOUTS end"
end
defp syntax_error(err), do: raise(ArgumentError, "Usage: " <> err)
end
|
lib/eqc/component/callouts.ex
| 0.80871
| 0.512327
|
callouts.ex
|
starcoder
|
defmodule Sagax.Next.Test.Assertions do
alias Sagax.Test.Log
import ExUnit.Assertions
defmacro assert_saga(saga, pattern) do
quote do
if match?({%ExUnit.AssertionError{}, _}, unquote(saga).last_result) do
{error, stacktrace} = unquote(saga).last_result
reraise error, stacktrace
end
assert unquote(pattern) = unquote(saga)
end
end
defmacro assert_log(log, entries) do
quote bind_quoted: [log: log, entries: entries] do
logs = Log.all(log)
assert Log.size(logs) == Log.size(logs),
message: "Expected #{Log.size(logs)} log entries but got #{Log.size(entries)}",
left: logs,
right: entries
try do
compare_log(logs, entries, 0, 0, [], [])
rescue
e in ExUnit.AssertionError ->
reraise(%{e | left: logs, right: entries}, __STACKTRACE__)
end
end
end
defmacro assert_saga_results(saga, results) do
quote bind_quoted: [saga: saga, results: results] do
left = if match?(%Sagax{}, saga), do: Sagax.all(saga), else: saga
assert length(left) == length(results),
message: "Expected #{length(results)} results but got #{length(left)}",
left: left,
right: results
Enum.each(results, fn right ->
assert Enum.any?(left, &(&1 === right)),
message: "Expected results to contain the result #{inspect(right)}",
left: left,
right: results
end)
end
end
@spec compare_log(any, maybe_improper_list, any, any, any, any) :: :ok | true
def compare_log([], [], _, _, _, _), do: :ok
def compare_log(left, [], li, _, lp, _) when length(left) > 0 do
raise ExUnit.AssertionError,
message: "Expected no more log entries at left path #{path(li, lp)}"
end
def compare_log([], right, li, ri, lp, rp) when length(right) > 0 do
raise ExUnit.AssertionError,
message:
"Expected log to continue at left path #{path(li, lp)} " <>
"with right path #{path(ri, rp)}"
end
# List indicate that the logs should be sequential
def compare_log([left | _], [right | _], li, ri, lp, rp) when is_list(right),
do: compare_log(left, right, 0, 0, lp ++ [li], rp ++ [ri])
# Tuples indicate that the logs should be parallel
def compare_log(left, [right | rt], li, ri, lp, rp) when is_tuple(right) do
values = Log.flatten(right)
breaks = if length(rt) > 0, do: peek(hd(rt), false), else: []
remaining_values =
Enum.reduce_while(left, values, fn val, acc ->
cond do
Enum.member?(breaks, val) -> {:halt, acc}
Enum.member?(values, val) -> {:cont, List.delete(acc, val)}
true -> {:cont, acc}
end
end)
if length(remaining_values) > 0 do
raise ExUnit.AssertionError,
message:
"Expected log at left path #{path(li, lp)}..#{li + length(values)} " <>
"to contain entries #{inspect(remaining_values)}"
end
compare_log(Enum.slice(left, length(values)..-1), rt, li + length(values), ri + 1, lp, rp)
end
# When its neither a list nor a tuple we should simply compare the value
def compare_log([left | lt], [right | rt], li, ri, lp, rp) do
assert left === right,
message:
"Expected value at left path #{path(li, lp)} to match value at right path #{path(ri, rp)}"
compare_log(lt, rt, li + 1, ri + 1, lp, rp)
end
def compare_log(left, [right | _], li, ri, lp, rp) do
assert left === right,
message:
"Expected value at left path #{path(li, lp)} to match value at right path #{path(ri, rp)}"
end
defp path(idx, path), do: "/" <> ([Enum.join(path, "/"), idx] |> Enum.join("/"))
defp peek(val, false) when is_list(val), do: val |> hd() |> peek(true)
defp peek(val, _) when is_tuple(val) or is_list(val),
do: Log.flatten(val) |> Enum.reduce([], &(&2 ++ peek(&1, true)))
defp peek(val, _), do: [val]
end
|
test/support/next/assertions.ex
| 0.735167
| 0.734286
|
assertions.ex
|
starcoder
|
defmodule LocalTimestamps do
@moduledoc """
Timestamps in local time for Ecto `timestamps()` columns.
Generate Ecto timestamps in local time.
Use this library if:
- you're using MS SQL Server
- you want timestamp columns (`updated_at`, `inserted_at`) to be stored in local time in a `datetimeoffset(n)` column.
Why would you do such a thing?
- So that customers who are interacting with the database directly see the timestamps in the local time.
It will look like this...
| thing | inserted_at | updated_at |
|-------|----------------------------------|----------------------------------|
| arbie | 2020-01-02 03:04:05.123456+10:00 | 2020-01-02 13:14:15.999999+10:00 |
Note: when retreiving the datetimes from the database using Ecto, the data will be returned in UTC time.
## Installation
def deps do
[
{:local_timestamps, git: "https://github.com/simonmcconnell/local_timestamps.git"}
]
end
## Usage
Using `@timestamps_opts`:
@timestamps_opts [
type: TdsTypes.DateTimeOffset,
autogenerate: {LocalTimestamps, :autogenerate, []}
]
Or, supplying args to `timestamps()` directly:
schema "things" do
field :thing, :string
timestamps(
type: TdsTypes.DateTimeOffset,
autogenerate: {LocalTimestamps, :autogenerate, []}
)
end
## Choices
By default, it takes the later time when the [DateTime is ambiguous](https://hexdocs.pm/timex/Timex.AmbiguousDateTime.html). This can be changed to take the earlier time by passing `:before` to `autogenerate()`.
@timestamps_opts [
type: TdsTypes.DateTimeOffset,
autogenerate: {LocalTimestamps, :autogenerate, [:before]}
]
"""
@spec autogenerate :: :error | DateTime.t()
def autogenerate do
autogenerate(:after)
end
@spec autogenerate(:after | :before) :: :error | DateTime.t()
def autogenerate(ambiguity) when ambiguity in [:before, :after] do
case Timex.local() do
{:error, _} ->
:error
%DateTime{} = dt ->
dt
%Timex.AmbiguousDateTime{before: b, after: a} ->
case ambiguity do
:before -> b
:after -> a
end
end
end
end
|
lib/local_timestamps.ex
| 0.860193
| 0.610221
|
local_timestamps.ex
|
starcoder
|
defmodule Gradient.TestHelpers do
alias Gradient.Types, as: T
@examples_path "test/examples"
@examples_build_path "test/examples/_build"
@spec load(String.t()) :: T.forms()
def load(beam_file) do
beam_file = String.to_charlist(Path.join(@examples_build_path, beam_file))
{:ok, {_, [abstract_code: {:raw_abstract_v1, ast}]}} =
:beam_lib.chunks(beam_file, [:abstract_code])
ast
end
@spec load(String.t(), String.t()) :: {T.tokens(), T.forms()}
def load(beam_file, ex_file) do
beam_file = String.to_charlist(Path.join(@examples_build_path, beam_file))
ex_file = Path.join(@examples_path, ex_file)
{:ok, {_, [abstract_code: {:raw_abstract_v1, ast}]}} =
:beam_lib.chunks(beam_file, [:abstract_code])
ast = replace_file_path(ast, ex_file)
[_ | _] = tokens = Gradient.ElixirFileUtils.load_tokens(ast)
{tokens, ast}
end
def load_tokens(path) do
with {:ok, code} <- File.read(path),
{:ok, tokens} <- :elixir.string_to_tokens(String.to_charlist(code), 1, 1, path, []) do
tokens
end
end
@spec example_data() :: {T.tokens(), T.forms()}
def example_data() do
beam_path = Path.join(@examples_build_path, "Elixir.SimpleApp.beam") |> String.to_charlist()
file_path = Path.join(@examples_path, "simple_app.ex")
code =
File.read!(file_path)
|> String.to_charlist()
{:ok, tokens} =
code
|> :elixir.string_to_tokens(1, 1, file_path, [])
{:ok, {SimpleApp, [abstract_code: {:raw_abstract_v1, ast}]}} =
:beam_lib.chunks(beam_path, [:abstract_code])
ast = replace_file_path(ast, file_path)
{tokens, ast}
end
@spec example_tokens() :: T.tokens()
def example_tokens() do
file_path = Path.join(@examples_path, "conditional/cond.ex")
code =
File.read!(file_path)
|> String.to_charlist()
{:ok, tokens} =
code
|> :elixir.string_to_tokens(1, 1, file_path, [])
tokens
end
@spec example_string_tokens() :: T.tokens()
def example_string_tokens() do
file_path = Path.join(@examples_path, "string_example.ex")
code =
File.read!(file_path)
|> String.to_charlist()
{:ok, tokens} =
code
|> :elixir.string_to_tokens(1, 1, file_path, [])
tokens
end
defp replace_file_path([_ | forms], path) do
path = String.to_charlist(path)
[{:attribute, 1, :file, {path, 1}} | forms]
end
end
|
test/support/helpers.ex
| 0.785061
| 0.646509
|
helpers.ex
|
starcoder
|
defmodule HELF.Flow do
cond do
System.get_env("HELF_FLOW_FORCE_SYNC") ->
@driver HELF.Flow.Driver.Sync
System.get_env("HELF_FORCE_SYNC") ->
@driver HELF.Flow.Driver.Sync
true ->
case Application.get_env(:helf, :driver, :async) do
:sync ->
@driver HELF.Flow.Driver.Sync
:async ->
@driver HELF.Flow.Driver.Async
end
end
def __driver__,
do: @driver
defmacro flowing(do: {:with, meta, args}) do
args = Enum.map(args, fn
blocks = [_|_] ->
blocks = Enum.map(blocks, fn
# If the `with` succeeds, execute the success routine
{:do, code} ->
inject = quote do: (unquote(__MODULE__).__execute_success__(); unquote(code))
{:do, inject}
# If the `with` fails, execute the failure routine, no matter which
# error clauses it matches
{:else, clauses} ->
clauses = Enum.map(clauses, fn {:->, meta, [pattern, code]} ->
inject = quote do: (unquote(__MODULE__).__execute_fail__(); unquote(code))
{:->, meta, [pattern, inject]}
end)
{:else, clauses}
etc ->
etc
end)
# If the `with` didn't include any `else` clause, we'll inject a default
# one that has the very same behaviour as the lack of any `else` clause
# (ie: return the value) but with the addition of executing the failure
# routine
on_fail = quote do: unquote(__MODULE__).__execute_fail__()
# AST to bind on any potential value, execute on_fail and return the bound value
inject = [{:->, [], [[{:error, [], Elixir}], {:__block__, [], [on_fail, {:error, [], Elixir}]}]}]
Keyword.put_new(blocks, :else, inject)
etc ->
etc
end)
command = {:with, meta, args}
quote do
try do
unquote(__MODULE__).__start__()
return = unquote(command)
unquote(__MODULE__).__finish__()
return
rescue
exception ->
stacktrace = System.stacktrace()
unquote(__MODULE__).__execute_fail__()
reraise exception, stacktrace
end
end
end
@spec on_success((() -> any)) :: :ok
@doc """
Stores a callback to be executed if the `with` succeeds
Eg:
```
flowing do
with \
on_fail(fn -> IO.puts "Operation failed" end),
{:ok, value} <- Map.fetch(%{a: 1}, :a),
on_success(fn -> IO.puts "Succeeded and got \#{inspect value}" end)
do
:gotcha
end
end
```
"""
def on_success(callback),
do: callback(callback, :success)
@doc """
Stores a callback to be executed if the `with` fails
Eg:
```
flowing do
with \
on_fail(fn -> IO.puts "Operation failed" end),
{:ok, value} <- Map.fetch(%{a: 1}, :b),
on_success(fn -> IO.puts "Succeeded and got \#{inspect value}" end)
do
:gotcha
end
end
```
"""
@spec on_fail((() -> any)) :: :ok
def on_fail(callback),
do: callback(callback, :fail)
@spec on_done((() -> any)) :: :ok
@doc """
Stores a callback to be executed at the end of the `with`, no matter if it succeeds or fails
Eg:
```
flowing do
with \
on_done(fn -> IO.puts "The flow is completed" end),
{:ok, value} <- Map.fetch(%{a: 1}, :a)
do
flowing do
with \
on_done(fn -> IO.puts "The other flow is completed" end),
{:ok, value} <- Map.fetch(%{a: 1}, :b)
do
:gotcha
end
end
end
end
```
"""
def on_done(callback),
do: callback(callback, :always)
@spec callback((() -> any), :success | :fail | :always) :: :ok
defp callback(callback, kind) when is_function(callback, 0) and kind in [:success, :fail, :always] do
case get_flow() do
nil ->
raise "cannot set callback outside of flow"
{flow, _counter} ->
send flow, {:callback, kind, callback}
end
end
@doc false
def __start__ do
case get_flow() do
nil ->
pid = HELF.Flow.Manager.start()
Process.put(:heflow, {pid, 1})
{flow, counter} ->
Process.put(:heflow, {flow, counter + 1})
end
end
@doc false
def __finish__ do
case get_flow() do
{_, 1} ->
Process.delete(:heflow)
{flow, n} ->
Process.put(:heflow, {flow, n - 1})
nil ->
:ok
end
end
@doc false
def __execute_success__ do
case get_flow() do
{flow, 1} ->
__driver__().execute_success(flow)
Process.delete(:heflow)
_ ->
:ok
end
end
@doc false
def __execute_fail__ do
case get_flow() do
{flow, 1} ->
__driver__().execute_fail(flow)
Process.delete(:heflow)
_ ->
:ok
end
end
@spec get_flow() :: {pid, pos_integer} | nil
defp get_flow do
Process.get(:heflow)
end
end
|
lib/helf/flow.ex
| 0.703346
| 0.691654
|
flow.ex
|
starcoder
|
defmodule Sippet.Transports.UDP.Plug do
@moduledoc """
A `Sippet.Transports.Plug` implementing a UDP transport.
The UDP transport consists basically in a listening process, this `Plug`
implementation itself, and a pool of senders, defined in
`Sippet.Transports.UDP.Sender`, managed by `poolboy`.
The `start_link/0` function starts them, along a root supervisor that
monitors them all in case of failures.
This `Plug` process creates an UDP socket and keeps listening for datagrams
in active mode. Its job is to forward the datagrams to the processing pool
defined in `Sippet.Transports.Queue`. The sender processes pool keeps waiting
for SIP messages (as defined by `Sippet.Message`), transforms them into
iodata and dispatch them to the same UDP socket created by this `Plug`.
Both pools will block if all worker processes are busy, which may happen only
in high load surges, as both are pure processing pools.
"""
use GenServer
use Sippet.Transports.Plug
alias Sippet.Transports.UDP.Pool, as: Pool
alias Sippet.Transports.Queue, as: Queue
import Supervisor.Spec
require Logger
@doc """
Starts the UDP plug.
"""
def start_link() do
port =
:sippet
|> Application.get_env(__MODULE__)
|> Keyword.fetch!(:port)
if port <= 0 do
raise ArgumentError, "invalid port #{inspect port}"
end
address =
:sippet
|> Application.get_env(__MODULE__)
|> Keyword.get(:address)
opts =
case address do
nil -> []
ip -> [address: ip]
end
children = [
worker(GenServer, [__MODULE__, [port, opts], [name: __MODULE__]]),
Pool.spec()
]
Supervisor.start_link(children, [strategy: :one_for_all])
end
@doc """
Send a message to the UDP senders' pool.
"""
def send_message(message, host, port, key) do
conn = Pool.check_out()
GenServer.cast(conn, {:send_message, message, host, port, key})
end
@doc """
This connection is not reliable.
"""
def reliable?(), do: false
@doc """
This blocking function gets called only during the senders' initialization.
"""
def get_socket(),
do: GenServer.call(__MODULE__, :get_socket, :infinity)
@doc false
def init([port, opts]) do
open_socket(port, opts)
end
@doc false
defp open_socket(port, opts) do
sock_opts =
[as: :binary, mode: :active] ++
if Keyword.has_key?(opts, :address) do
[local: [address: opts[:address]]]
else
[]
end
case Socket.UDP.open(port, sock_opts) do
{:ok, socket} ->
{:ok, {address, _port}} = :inet.sockname(socket)
Logger.info("#{inspect self()} started plug " <>
"#{:inet.ntoa(address)}:#{port}/udp")
{:ok, {socket, address, port}}
{:error, reason} ->
Logger.error("#{inspect self()} port #{port}/udp " <>
"#{inspect reason}, retrying in 10s...")
Process.sleep(10_000)
open_socket(port, opts)
end
end
@doc false
def handle_info({:udp, _socket, ip, from_port, packet}, state) do
Queue.incoming_datagram(packet, {:udp, ip, from_port})
{:noreply, state}
end
@doc false
def handle_call(:get_socket, _from, {socket, _address, _port} = state),
do: {:reply, socket, state}
@doc false
def terminate(reason, {socket, address, port}) do
Logger.info("#{inspect self()} stopped plug " <>
"#{:inet.ntoa(address)}:#{port}/udp, " <>
"reason: #{inspect reason}")
:ok = :gen_udp.close(socket)
end
end
|
lib/sippet/transports/udp/plug.ex
| 0.827759
| 0.438665
|
plug.ex
|
starcoder
|
defmodule Function do
@moduledoc """
A set of functions for working with functions.
Anonymous functions are typically created by using `fn`:
iex> add = fn a, b -> a + b end
iex> add.(1, 2)
3
Anonymous functions can also have multiple clauses. All clauses
should expect the same number of arguments:
iex> negate = fn
...> true -> false
...> false -> true
...> end
iex> negate.(false)
true
## The capture operator
It is also possible to capture public module functions and pass them
around as if they were anonymous functions by using the capture
operator `Kernel.SpecialForms.&/1`:
iex> add = &Kernel.+/2
iex> add.(1, 2)
3
iex> length = &String.length/1
iex> length.("hello")
5
To capture a definition within the current module, you can skip the
module prefix, such as `&my_fun/2`. In those cases, the captured
function can be public (`def`) or private (`defp`).
The capture operator can also be used to create anonymous functions
that expect at least one argument:
iex> add = &(&1 + &2)
iex> add.(1, 2)
3
In such cases, using the capture operator is no different than using `fn`.
## Internal and external functions
We say that functions that point to definitions residing in modules, such
as `&String.length/1`, are **external** functions. All other functions are
**local** and they are always bound to the file or module that defined them.
Besides the functions in this module to work with functions, `Kernel` also
has an `apply/2` function that invokes a function with a dynamic number of
arguments, as well as `is_function/1` and `is_function/2`, to check
respectively if a given value is a function or a function of a given arity.
"""
@type information ::
:arity
| :env
| :index
| :module
| :name
| :new_index
| :new_uniq
| :pid
| :type
| :uniq
@doc """
Captures the given function.
Inlined by the compiler.
## Examples
iex> Function.capture(String, :length, 1)
&String.length/1
"""
@doc since: "1.7.0"
@spec capture(module, atom, arity) :: fun
def capture(module, function_name, arity) do
:erlang.make_fun(module, function_name, arity)
end
@doc """
Returns a keyword list with information about a function.
The returned keys (with the corresponding possible values) for
all types of functions (local and external) are the following:
* `:type` - `:local` (for anonymous functions) or `:external` (for
named functions).
* `:module` - an atom which is the module where the function is defined when
anonymous or the module which the function refers to when it's a named function.
* `:arity` - (integer) the number of arguments the function is to be called with.
* `:name` - (atom) the name of the function.
* `:env` - a list of the environment or free variables. For named
functions, the returned list is always empty.
When `fun` is an anonymous function (that is, the type is `:local`), the following
additional keys are returned:
* `:pid` - PID of the process that originally created the function.
* `:index` - (integer) an index into the module function table.
* `:new_index` - (integer) an index into the module function table.
* `:new_uniq` - (binary) a unique value for this function. It's
calculated from the compiled code for the entire module.
* `:uniq` - (integer) a unique value for this function. This integer is
calculated from the compiled code for the entire module.
**Note**: this function must be used only for debugging purposes.
Inlined by the compiler.
## Examples
iex> fun = fn x -> x end
iex> info = Function.info(fun)
iex> Keyword.get(info, :arity)
1
iex> Keyword.get(info, :type)
:local
iex> fun = &String.length/1
iex> info = Function.info(fun)
iex> Keyword.get(info, :type)
:external
iex> Keyword.get(info, :name)
:length
"""
@doc since: "1.7.0"
@spec info(fun) :: [{information, term}]
def info(fun), do: :erlang.fun_info(fun)
@doc """
Returns a specific information about the function.
The returned information is a two-element tuple in the shape of
`{info, value}`.
For any function, the information asked for can be any of the atoms
`:module`, `:name`, `:arity`, `:env`, or `:type`.
For anonymous functions, there is also information about any of the
atoms `:index`, `:new_index`, `:new_uniq`, `:uniq`, and `:pid`.
For a named function, the value of any of these items is always the
atom `:undefined`.
For more information on each of the possible returned values, see
`info/1`.
Inlined by the compiler.
## Examples
iex> f = fn x -> x end
iex> Function.info(f, :arity)
{:arity, 1}
iex> Function.info(f, :type)
{:type, :local}
iex> fun = &String.length/1
iex> Function.info(fun, :name)
{:name, :length}
iex> Function.info(fun, :pid)
{:pid, :undefined}
"""
@doc since: "1.7.0"
@spec info(fun, item) :: {item, term} when item: information
def info(fun, item), do: :erlang.fun_info(fun, item)
@doc """
Returns its input `value`. This function can be passed as an anonymous function
to transformation functions.
## Examples
iex> Function.identity("Hello world!")
"Hello world!"
iex> 'abcdaabccc' |> Enum.sort() |> Enum.chunk_by(&Function.identity/1)
['aaa', 'bb', 'cccc', 'd']
iex> Enum.group_by('abracadabra', &Function.identity/1)
%{97 => 'aaaaa', 98 => 'bb', 99 => 'c', 100 => 'd', 114 => 'rr'}
iex> Enum.map([1, 2, 3, 4], &Function.identity/1)
[1, 2, 3, 4]
"""
@doc since: "1.10.0"
@spec identity(value) :: value when value: var
def identity(value), do: value
end
|
lib/elixir/lib/function.ex
| 0.905184
| 0.788339
|
function.ex
|
starcoder
|
defmodule Irc.Message.Command do
@moduledoc """
A simple type representing the finite set of valid command and reply codes
This type essentially functions as an Enumeration, so while it is technically
just an atom, only a specific set of atoms are considered valid, and
attempting to use invalid values will result in an InvalidIrcMessageError.
"""
alias __MODULE__
@values [
pass: "<PASSWORD>",
nick: "NICK",
user: "USER",
oper: "OPER",
mode: "MODE",
service: "SERVICE",
quit: "QUIT",
squit: "SQUIT",
join: "JOIN",
part: "PART",
topic: "TOPIC",
names: "NAMES",
list: "LIST",
invite: "INVITE",
kick: "KICK",
privmsg: "PRIVMSG",
notice: "NOTICE",
motd: "MOTD",
lusers: "LUSERS",
version: "VERSION",
stats: "STATS",
links: "LINKS",
time: "TIME",
connect: "CONNECT",
trace: "TRACE",
admin: "ADMIN",
info: "INFO",
servlist: "SERVLIST",
squery: "SQUERY",
who: "WHO",
whois: "WHOIS",
whowas: "WHOWAS",
kill: "KILL",
ping: "PING",
pong: "PONG",
error: "ERROR",
away: "AWAY",
rehash: "REHASH",
die: "DIE",
restart: "RESTART",
summon: "SUMMON",
users: "USERS",
wallops: "WALLOPS",
userhost: "USERHOST",
ison: "ISON",
server: "SERVER",
njoin: "NJOIN",
rpl_welcome: "001",
rpl_yourhost: "002",
rpl_created: "003",
rpl_myinfo: "004",
rpl_bounce: "005",
rpl_isupport: "005",
rpl_userhost: "302",
rpl_ison: "303",
rpl_away: "301",
rpl_unaway: "305",
rpl_nowaway: "306",
rpl_whoisuser: "311",
rpl_whoisserver: "312",
rpl_whoisoperator: "313",
rpl_whoisidle: "317",
rpl_endofwhois: "318",
rpl_whoischannels: "319",
rpl_whowasuser: "314",
rpl_endofwhowas: "369",
rpl_liststart: "321",
rpl_list: "322",
rpl_listend: "323",
rpl_uniqopis: "325",
rpl_channelmodeis: "324",
rpl_notopic: "331",
rpl_topic: "332",
rpl_inviting: "341",
rpl_summoning: "342",
rpl_invitelist: "346",
rpl_endofinvitelist: "347",
rpl_exceptlist: "348",
rpl_endofexceptlist: "349",
rpl_version: "351",
rpl_whoreply: "352",
rpl_endofwho: "315",
rpl_namreply: "353",
rpl_endofnames: "366",
rpl_links: "364",
rpl_endoflinks: "365",
rpl_banlist: "367",
rpl_endofbanlist: "368",
rpl_info: "371",
rpl_endofinfo: "374",
rpl_motdstart: "375",
rpl_motd: "372",
rpl_endofmotd: "376",
rpl_youreoper: "381",
rpl_rehashing: "382",
rpl_youreservice: "383",
rpl_time: "391",
rpl_usersstart: "392",
rpl_users: "393",
rpl_endofusers: "394",
rpl_nousers: "395",
rpl_tracelink: "200",
rpl_traceconnecting: "201",
rpl_tracehandshake: "202",
rpl_traceunknown: "203",
rpl_traceoperator: "204",
rpl_traceuser: "205",
rpl_traceserver: "206",
rpl_traceservice: "207",
rpl_tracenewtype: "208",
rpl_traceclass: "209",
rpl_tracereconnect: "210",
rpl_tracelog: "261",
rpl_traceend: "262",
rpl_statslinkinfo: "211",
rpl_statscommands: "212",
rpl_endofstats: "219",
rpl_statsuptime: "242",
rpl_statsoline: "243",
rpl_umodeis: "221",
rpl_servlist: "234",
rpl_servlistend: "235",
rpl_luserclient: "251",
rpl_luserop: "252",
rpl_luserunknown: "253",
rpl_luserchannels: "254",
rpl_luserme: "255",
rpl_adminme: "256",
rpl_adminloc1: "257",
rpl_adminloc2: "258",
rpl_adminemail: "259",
rpl_tryagain: "263",
err_nosuchnick: "401",
err_nosuchserver: "402",
err_nosuchchannel: "403",
err_cannotsendtochan: "404",
err_toomanychannels: "405",
err_wasnosuchnick: "406",
err_toomanytargets: "407",
err_nosuchservice: "408",
err_noorigin: "409",
err_norecipient: "411",
err_notexttosend: "412",
err_notoplevel: "413",
err_wildtoplevel: "414",
err_badmask: "415",
err_unknowncommand: "421",
err_nomotd: "422",
err_noadmininfo: "423",
err_fileerror: "424",
err_nonicknamegiven: "431",
err_erroneusnickname: "432",
err_nicknameinuse: "433",
err_nickcollision: "436",
err_unavailresource: "437",
err_usernotinchannel: "441",
err_notonchannel: "442",
err_useronchannel: "443",
err_nologin: "444",
err_summondisabled: "445",
err_usersdisabled: "446",
err_notregistered: "451",
err_needmoreparams: "461",
err_alreadyregistred: "462",
err_nopermforhost: "463",
err_passwdmismatch: "464",
err_yourebannedcreep: "465",
err_youwillbebanned: "466",
err_keyset: "467",
err_channelisfull: "471",
err_unknownmode: "472",
err_inviteonlychan: "473",
err_bannedfromchan: "474",
err_badchannelkey: "475",
err_badchanmask: "476",
err_nochanmodes: "477",
err_banlistfull: "478",
err_noprivileges: "481",
err_chanoprivsneeded: "482",
err_cantkillserver: "483",
err_restricted: "484",
err_uniqopprivsneeded: "485",
err_nooperhost: "491",
err_umodeunknownflag: "501",
err_usersdontmatch: "502",
cap: "CAP",
cap_ls: "LS",
cap_list: "LIST",
cap_req: "REQ",
cap_ack: "ACK",
cap_nak: "NAK",
cap_clear: "CLEAR",
cap_end: "END",
authenticate: "AUTHENTICATE",
rpl_loggedin: "900",
rpl_loggedout: "901",
rpl_nicklocked: "902",
rpl_saslsuccess: "903",
err_saslfail: "904",
err_sasltoolong: "905",
err_saslaborted: "906",
err_saslalready: "907",
rpl_saslmechs: "908",
rpl_statscline: "213",
rpl_statsnline: "214",
rpl_statsiline: "215",
rpl_statskline: "216",
rpl_statsqline: "217",
rpl_statsyline: "218",
rpl_serviceinfo: "231",
rpl_endofservices: "232",
rpl_service: "233",
rpl_statsvline: "240",
rpl_statslline: "241",
rpl_statshline: "244",
rpl_statssline: "245",
rpl_statsping: "246",
rpl_statsbline: "247",
rpl_statsdline: "250",
rpl_none: "300",
rpl_whoischanop: "316",
rpl_killdone: "361",
rpl_closing: "362",
rpl_closeend: "363",
rpl_infostart: "373",
rpl_myportis: "384",
err_noservicehost: "492"
]
@error_msg "Unrecognized command"
@type t :: atom()
@doc """
Attempts to convert the given string into a Command
"""
@spec decode(String.t) :: {:ok, Command.t} | {:error, String.t}
def decode(str), do: do_decode(str)
@doc """
Converts the Command into its string representation
Note: passing an invalid C
ommand will raise an error
"""
@spec encode!(Command.t) :: String.t
def encode!(command), do: do_encode(command)
@values
|> Enum.each(fn ({key, val}) ->
defp do_decode(unquote(val)), do: unquote(key)
defp do_encode(unquote(key)), do: unquote(val)
end)
defp do_decode(_), do: {:error, @error_msg}
defp do_encode(_), do: raise InvalidIrcMessageError, @error_msg
end
|
lib/irc/message/command.ex
| 0.552419
| 0.422862
|
command.ex
|
starcoder
|
defmodule Glicko do
@moduledoc """
Provides the implementation of the Glicko rating system.
See the [specification](http://www.glicko.net/glicko/glicko2.pdf) for implementation details.
## Usage
Get a player's new rating after a series of matches in a rating period.
iex> results = [Result.new(Player.new_v1([rating: 1400, rating_deviation: 30]), :win),
...> Result.new(Player.new_v1([rating: 1550, rating_deviation: 100]), :loss),
...> Result.new(Player.new_v1([rating: 1700, rating_deviation: 300]), :loss)]
iex> player = Player.new_v1([rating: 1500, rating_deviation: 200])
iex> Glicko.new_rating(player, results, [system_constant: 0.5])
{1464.0506705393013, 151.51652412385727}
Get a player's new rating when they haven't played within a rating period.
iex> player = Player.new_v1([rating: 1500, rating_deviation: 200])
iex> Glicko.new_rating(player, [], [system_constant: 0.5])
{1.5e3, 200.27141669877065}
Calculate the probability of a player winning against an opponent.
iex> player = Player.new_v1
iex> opponent = Player.new_v1
iex> Glicko.win_probability(player, opponent)
0.5
Calculate the probability of a player drawing against an opponent.
iex> player = Player.new_v1
iex> opponent = Player.new_v1
iex> Glicko.draw_probability(player, opponent)
1.0
"""
alias __MODULE__.{
Player,
Result
}
@default_system_constant 0.8
@default_convergence_tolerance 1.0e-7
@type new_rating_opts :: [system_constant: float, convergence_tolerance: float]
@doc """
Calculates the probability of a player winning against an opponent.
Returns a value between `0.0` and `1.0`.
"""
@spec win_probability(player :: Player.t(), opponent :: Player.t()) :: float
def win_probability(player, opponent) do
win_probability(
player |> Player.rating(:v2),
opponent |> Player.rating(:v2),
opponent |> Player.rating_deviation(:v2)
)
end
@doc """
Calculates the probability of a player winning against an opponent from a player rating, opponent rating and opponent rating deviation.
Values provided for the player rating, opponent rating and opponent rating deviation must be *v2* based.
Returns a value between `0.0` and `1.0`.
"""
@spec win_probability(
player_rating :: Player.rating(),
opponent_rating :: Player.rating(),
opponent_rating_deviation :: Player.rating_deviation()
) :: float
def win_probability(player_rating, opponent_rating, opponent_rating_deviation) do
calc_e(player_rating, opponent_rating, calc_g(opponent_rating_deviation))
end
@doc """
Calculates the probability of a player drawing against an opponent.
Returns a value between `0.0` and `1.0`.
"""
@spec draw_probability(player :: Player.t(), opponent :: Player.t()) :: float
def draw_probability(player, opponent) do
draw_probability(
player |> Player.rating(:v2),
opponent |> Player.rating(:v2),
opponent |> Player.rating_deviation(:v2)
)
end
@doc """
Calculates the probability of a player drawing against an opponent from a player rating, opponent rating and opponent rating deviation.
Values provided for the player rating, opponent rating and opponent rating deviation must be *v2* based.
Returns a value between `0.0` and `1.0`.
"""
@spec draw_probability(
player_rating :: Player.rating(),
opponent_rating :: Player.rating(),
opponent_rating_deviation :: Player.rating_deviation()
) :: float
def draw_probability(player_rating, opponent_rating, opponent_rating_deviation) do
1 -
abs(win_probability(player_rating, opponent_rating, opponent_rating_deviation) - 0.5) / 0.5
end
@doc """
Generate a new rating from an existing rating and a series (or lack) of results.
Returns the updated player with the same version given to the function.
"""
@spec new_rating(player :: Player.t(), results :: list(Result.t()), opts :: new_rating_opts) ::
Player.t()
def new_rating(player, results, opts \\ [])
def new_rating(player, results, opts) when tuple_size(player) == 3 do
do_new_rating(player, results, opts)
end
def new_rating(player, results, opts) when tuple_size(player) == 2 do
player
|> Player.to_v2()
|> do_new_rating(results, opts)
|> Player.to_v1()
end
defp do_new_rating({player_r, player_pre_rd, player_v}, [], _) do
player_post_rd = calc_player_post_base_rd(:math.pow(player_pre_rd, 2), player_v)
{player_r, player_post_rd, player_v}
end
defp do_new_rating({player_pre_r, player_pre_rd, player_pre_v}, results, opts) do
sys_const = Keyword.get(opts, :system_constant, @default_system_constant)
conv_tol = Keyword.get(opts, :convergence_tolerance, @default_convergence_tolerance)
# Initialization (skips steps 1, 2 and 3)
player_pre_rd_sq = :math.pow(player_pre_rd, 2)
{variance_est, results_effect} = result_calculations(results, player_pre_r)
# Step 4
delta = calc_delta(results_effect, variance_est)
# Step 5.1
alpha = calc_alpha(player_pre_v)
# Step 5.2
k = calc_k(alpha, delta, player_pre_rd_sq, variance_est, sys_const, 1)
{initial_a, initial_b} =
iterative_algorithm_initial(
alpha,
delta,
player_pre_rd_sq,
variance_est,
sys_const,
k
)
# Step 5.3
initial_fa = calc_f(alpha, delta, player_pre_rd_sq, variance_est, sys_const, initial_a)
initial_fb = calc_f(alpha, delta, player_pre_rd_sq, variance_est, sys_const, initial_b)
# Step 5.4
a =
iterative_algorithm_body(
alpha,
delta,
player_pre_rd_sq,
variance_est,
sys_const,
conv_tol,
initial_a,
initial_b,
initial_fa,
initial_fb
)
# Step 5.5
player_post_v = calc_new_player_volatility(a)
# Step 6
player_post_base_rd = calc_player_post_base_rd(player_pre_rd_sq, player_post_v)
# Step 7
player_post_rd = calc_new_player_rating_deviation(player_post_base_rd, variance_est)
player_post_r = calc_new_player_rating(results_effect, player_pre_r, player_post_rd)
{player_post_r, player_post_rd, player_post_v}
end
defp result_calculations(results, player_pre_r) do
{variance_estimate_acc, result_effect_acc} =
Enum.reduce(results, {0.0, 0.0}, fn result, {variance_estimate_acc, result_effect_acc} ->
opponent_rd_g =
result
|> Result.opponent_rating_deviation()
|> calc_g
win_probability = calc_e(player_pre_r, Result.opponent_rating(result), opponent_rd_g)
{
variance_estimate_acc +
:math.pow(opponent_rd_g, 2) * win_probability * (1 - win_probability),
result_effect_acc + opponent_rd_g * (Result.score(result) - win_probability)
}
end)
{:math.pow(variance_estimate_acc, -1), result_effect_acc}
end
defp calc_delta(results_effect, variance_est) do
results_effect * variance_est
end
defp calc_f(alpha, delta, player_pre_rd_sq, variance_est, sys_const, x) do
:math.exp(x) *
(:math.pow(delta, 2) - :math.exp(x) - player_pre_rd_sq - variance_est) /
(2 * :math.pow(player_pre_rd_sq + variance_est + :math.exp(x), 2)) -
(x - alpha) / :math.pow(sys_const, 2)
end
defp calc_alpha(player_pre_v) do
:math.log(:math.pow(player_pre_v, 2))
end
defp calc_new_player_volatility(a) do
:math.exp(a / 2)
end
defp calc_new_player_rating(results_effect, player_pre_r, player_post_rd) do
player_pre_r + :math.pow(player_post_rd, 2) * results_effect
end
defp calc_new_player_rating_deviation(player_post_base_rd, variance_est) do
1 / :math.sqrt(1 / :math.pow(player_post_base_rd, 2) + 1 / variance_est)
end
defp calc_player_post_base_rd(player_pre_rd_sq, player_pre_v) do
:math.sqrt(:math.pow(player_pre_v, 2) + player_pre_rd_sq)
end
defp iterative_algorithm_initial(alpha, delta, player_pre_rd_sq, variance_est, sys_const, k) do
initial_a = alpha
initial_b =
if :math.pow(delta, 2) > player_pre_rd_sq + variance_est do
:math.log(:math.pow(delta, 2) - player_pre_rd_sq - variance_est)
else
alpha - k * sys_const
end
{initial_a, initial_b}
end
defp iterative_algorithm_body(
alpha,
delta,
player_pre_rd_sq,
variance_est,
sys_const,
conv_tol,
a,
b,
fa,
fb
) do
if abs(b - a) > conv_tol do
c = a + (a - b) * fa / (fb - fa)
fc = calc_f(alpha, delta, player_pre_rd_sq, variance_est, sys_const, c)
{a, fa} =
if fc * fb < 0 do
{b, fb}
else
{a, fa / 2}
end
iterative_algorithm_body(
alpha,
delta,
player_pre_rd_sq,
variance_est,
sys_const,
conv_tol,
a,
c,
fa,
fc
)
else
a
end
end
defp calc_k(alpha, delta, player_pre_rd_sq, variance_est, sys_const, k) do
if calc_f(alpha, delta, player_pre_rd_sq, variance_est, sys_const, alpha - k * sys_const) < 0 do
calc_k(alpha, delta, player_pre_rd_sq, variance_est, sys_const, k + 1)
else
k
end
end
# g function
defp calc_g(rd) do
1 / :math.sqrt(1 + 3 * :math.pow(rd, 2) / :math.pow(:math.pi(), 2))
end
# E function
defp calc_e(player_pre_r, opponent_r, opponent_rd_g) do
1 / (1 + :math.exp(-1 * opponent_rd_g * (player_pre_r - opponent_r)))
end
end
|
lib/glicko.ex
| 0.909593
| 0.612744
|
glicko.ex
|
starcoder
|
defmodule BigchaindbEx.Condition.ThresholdSha256 do
@moduledoc """
THRESHOLD-SHA-256: Threshold gate condition using SHA-256.
Threshold conditions can be used to create m-of-n multi-signature groups.
Threshold conditions can represent the AND operator by setting the threshold
to equal the number of subconditions (n-of-n) or the OR operator by setting the thresold to one (1-of-n).
Threshold conditions allows each subcondition to carry an integer weight.
Since threshold conditions operate on conditions, they can be nested as well
which allows the creation of deep threshold trees of public keys.
By using Merkle trees, threshold fulfillments do not need to to provide the
structure of unfulfilled subtrees. That means only the public keys that are
actually used in a fulfillment, will actually appear in the fulfillment, saving space.
One way to formally interpret threshold conditions is as a boolean weighted
threshold gate. A tree of threshold conditions forms a boolean weighted
threshold circuit.
THRESHOLD-SHA-256 is assigned the type ID 2. It relies on the SHA-256 and
THRESHOLD feature suites which corresponds to a feature bitmask of 0x09.
Threshold determines the weighted threshold that is used to consider this condition
fulfilled. If the added weight of all valid subfulfillments is greater or
equal to this number, the threshold condition is considered to be fulfilled.
"""
alias BigchaindbEx.{Fulfillment, Condition}
@type t :: %__MODULE__{
threshold: Integer.t,
subconditions: Enum.t
}
@enforce_keys [:threshold, :subconditions]
defstruct [
:threshold,
:subconditions
]
@type_id 2
@type_name "threshold-sha-256"
@asn1 "thresholdSha256"
@asn1_condition "thresholdSha256Condition"
@asn1_fulfillment "thresholdSha256Fulfillment"
@category "compound"
def type_id, do: @type_id
def type_name, do: @type_name
def type_asn1, do: @asn1
def type_asn1_condition, do: @asn1_condition
def type_asn1_fulfillment, do: @asn1_fulfillment
def type_category, do: @category
@doc """
Adds the given subcondition
to the threshold struct.
"""
@spec add_subcondition(__MODULE__.t, String.t | Condition.t) :: {:ok, __MODULE__.t} :: {:error, String.t}
def add_subcondition(%__MODULE__{} = condition, subcondition) when is_binary(subcondition) do
case Condition.from_uri(subcondition) do
{:ok, subcondition} -> add_subcondition(condition, subcondition)
{:error, reason} -> {:error, "Could not parse subcondition uri: #{inspect reason}"}
end
end
def add_subcondition(%__MODULE__{} = condition, subcondition) do
{:ok, Map.merge(condition, %{subconditions: Enum.concat(condition.subconditions, [subcondition])})}
end
@doc """
Derives the given fulfillment's
condition and adds that to the
struct's subconditions.
"""
@spec add_subfulfillment(__MODULE__.t, String.t | Fulfillment.t) :: {:ok, __MODULE__.t} :: {:error, String.t}
def add_subfulfillment(%__MODULE__{} = condition, fulfillment) do
with {:ok, subcondition} <- Condition.from_fulfillment(fulfillment),
{:ok, uri} <- Condition.to_uri(subcondition)
do
add_subcondition(condition, uri)
else
{:error, reason} -> {:error, "Could not add subfulfillment: #{inspect reason}"}
end
end
end
|
lib/bigchaindb_ex/condition/threshold.ex
| 0.905769
| 0.539287
|
threshold.ex
|
starcoder
|
defmodule PredictedSchedule.Display do
import Phoenix.HTML.Tag, only: [tag: 1, content_tag: 2, content_tag: 3]
import SiteWeb.ViewHelpers, only: [format_schedule_time: 1]
alias Schedules.Schedule
alias Predictions.Prediction
@doc """
Returns the HTML to display a PredictedSchedule's time.
For the commuter rail:
If scheduled and predicted times differ, displays the scheduled time crossed out, with the predicted
time below it. Otherwise just displays the time as below.
Other modes:
Display Prediction time with rss icon if available. Otherwise display scheduled time.
"""
@spec time(PredictedSchedule.t()) :: Phoenix.HTML.safe() | String.t()
def time(%PredictedSchedule{} = ps) do
ps
|> maybe_route
|> time_display_function()
|> apply([ps])
end
@doc """
Returns the HTML to display a PredictedSchedules time as a differece from
the given time
Times with a difference under 60 minutes are shown as a difference in minutes,
a difference over 60 minutes will show the time.
If a prediction status is available, that will be shown instead of time or
time difference
"""
@spec time_difference(PredictedSchedule.t(), DateTime.t()) :: Phoenix.HTML.Safe.t()
def time_difference(%PredictedSchedule{prediction: %Prediction{status: status}}, _current_time)
when not is_nil(status) do
do_realtime(status)
end
def time_difference(
%PredictedSchedule{schedule: %Schedule{} = schedule, prediction: nil},
current_time
) do
do_time_difference(schedule.time, current_time)
end
def time_difference(%PredictedSchedule{prediction: prediction} = ps, current_time) do
case prediction do
%Prediction{time: time} when not is_nil(time) ->
time
|> do_time_difference(current_time)
|> do_realtime()
_ ->
do_display_time(ps)
end
end
@type time_formatter_t :: (DateTime.t() -> [String.t()] | String.t())
@spec do_time_difference(DateTime.t(), DateTime.t(), time_formatter_t, integer) ::
[String.t()] | String.t()
def do_time_difference(
time,
current_time,
not_soon_formatter_fn \\ &format_schedule_time/1,
estimate_threshold_mins \\ 60
) do
time
|> Timex.diff(current_time, :minutes)
|> format_time_difference(time, not_soon_formatter_fn, estimate_threshold_mins)
end
defp format_time_difference(diff, time, not_soon_formatter_fn, estimate_threshold_mins)
when diff > estimate_threshold_mins or diff < 0,
do: not_soon_formatter_fn.(time)
defp format_time_difference(0, _, _, _), do: ["1", " ", "min"]
defp format_time_difference(diff, _, _, _),
do: [Integer.to_string(diff), " ", "min"]
@doc """
Returns the headsign for the PredictedSchedule. The headsign is generally
the destination of the train: what's displayed on the front of the
bus/train.
"""
@spec headsign(PredictedSchedule.t()) :: String.t()
def headsign(%PredictedSchedule{schedule: nil, prediction: nil}) do
""
end
def headsign(%PredictedSchedule{} = ps) do
case PredictedSchedule.trip(ps) do
nil -> ps |> PredictedSchedule.route() |> do_route_headsign(ps.prediction.direction_id)
trip -> trip.headsign
end
end
defp maybe_route(%PredictedSchedule{schedule: nil, prediction: nil}) do
nil
end
defp maybe_route(ps) do
PredictedSchedule.route(ps)
end
defp time_display_function(%Routes.Route{type: 2}) do
&do_display_commuter_rail_time/1
end
defp time_display_function(_) do
&do_display_time/1
end
defp do_display_commuter_rail_time(
%PredictedSchedule{schedule: schedule, prediction: prediction} = ps
) do
if PredictedSchedule.minute_delay?(ps) do
content_tag(
:span,
do: [
content_tag(:del, format_schedule_time(schedule.time), class: "no-wrap strikethrough"),
tag(:br),
display_prediction(prediction)
]
)
else
# otherwise just show the scheduled or predicted time as appropriate
do_display_time(ps)
end
end
defp do_display_time(%PredictedSchedule{schedule: nil, prediction: nil}), do: ""
defp do_display_time(%PredictedSchedule{schedule: scheduled, prediction: nil}) do
content_tag :span do
format_schedule_time(scheduled.time)
end
end
defp do_display_time(%PredictedSchedule{
schedule: %Schedule{} = schedule,
prediction: %Prediction{time: nil, schedule_relationship: relationship}
})
when relationship in [:cancelled, :skipped] do
content_tag(
:del,
schedule.time |> format_schedule_time |> do_realtime,
class: "no-wrap strikethrough"
)
end
defp do_display_time(%PredictedSchedule{prediction: %Prediction{time: nil, status: nil}}) do
""
end
defp do_display_time(%PredictedSchedule{prediction: %Prediction{time: nil, status: status}}) do
do_realtime(status)
end
defp do_display_time(%PredictedSchedule{prediction: prediction}) do
display_prediction(prediction)
end
defp do_realtime(content) do
content_tag(
:div,
[
content_tag(:div, content, class: "trip-list-realtime-content"),
Site.Components.Icons.Realtime.realtime_icon()
],
class: "trip-list-realtime"
)
end
defp do_route_headsign(%Routes.Route{id: "Green-B"}, 0) do
"Boston College"
end
defp do_route_headsign(%Routes.Route{id: "Green-C"}, 0) do
"Cleveland Circle"
end
defp do_route_headsign(%Routes.Route{id: "Green-D"}, 0) do
"Riverside"
end
defp do_route_headsign(%Routes.Route{id: "Green-E"}, 0) do
"Heath Street"
end
defp do_route_headsign(_, _) do
""
end
defp display_prediction(prediction) do
prediction.time
|> format_schedule_time
|> do_realtime
end
end
|
apps/site/lib/predicted_schedule/display.ex
| 0.783533
| 0.53279
|
display.ex
|
starcoder
|
defmodule Wand.CLI.Commands.Add.Help do
@moduledoc false
def help(:banner) do
"""
Add elixir packages to wand.json
### Usage
**wand** add [package] [package] ... [flags]
## Examples
```
wand add ex_doc mox --test
wand add poison --git="https://github.com/devinus/poison.git"
wand add poison@3.1 --exact
```
## Options
The available flags depend on if wand is being used to add a single package, or multiple packages. Flags that can only be used in single-package-mode are denoted with (s).
```
--compile-env (s) The environment for the dependency (default: **prod**)
--dev Include the dependency in the dev environment
--download Run mix deps.get after adding (default: **true**)
--env Add the dependency to a specific environment
--exact Set the version to exactly match the version provided
--git (s) The Git URI to download the package from
--hex (s) The name of the package in hex to download
--optional Mark the dependency as optional
--organization Set the hex.pm organization to use
--path (s) The local directory to install the package from
--prod Include the dependency in the prod environment
--read-app-file (s) Read the app file of the dependency (default: **true**)
--repo The hex repo to use (default: **hexpm**)
--runtime Start the application automatically (default: **true**)
--sparse (s) Checkout a given directory inside git
--submodules (s) Initialize submodules for the repo
--test Include the dependency in the test environment
--tilde Stay within the minor version provided
--in-umbrella (s) Sets a path dependency pointing to ../app
```
"""
|> Wand.CLI.Display.print()
end
def help(:verbose) do
Wand.CLI.Display.print(Wand.CLI.Commands.Add.moduledoc())
end
def help({:invalid_flag, flag}) do
"""
#{flag} is invalid.
Enter wand help add --verbose for more information
"""
|> Wand.CLI.Display.print()
end
def help({:invalid_version, package}) do
"""
#{package} contains an invalid version
A version must conform to the SemVar schema.
Some valid example versions are:
<pre>
3.1.0
0.0.1
2.0.0-dev
</pre>
Note that versions are not requirements and don't contain >=, ~> etc.
"""
|> Wand.CLI.Display.print()
end
end
|
lib/cli/commands/add/help.ex
| 0.813757
| 0.811527
|
help.ex
|
starcoder
|
defmodule Edeliver.Relup.Instructions.SuspendRanchAcceptors do
@moduledoc """
This upgrade instruction suspends the ranch acceptors
to avoid that new connections will be accepted. It will be
inserted right after the "point of no return". When the
upgrade is done, the
`Edeliver.Relup.Instructions.ResumeRanchAcceptors`
instruction reenables the acceptors again. To make sure
that the ranch acceptors are found, use this instruction
after the
`Edeliver.Relup.Instructions.CheckRanchAcceptors`
instruction which will abort the upgrade if the acceptors
cannot be found. Because real suspending of ranch acceptors
is not possible because ranch acceptors do not handle sys
messages, they are actually terminated. Unfortunately the ranch
acceptor supervisor cannot be suspended in adition to avoid
starting new acceptors, because supervisors can't be suspended
because the supervision tree is used to find processes which
uses callback modules. Since no acceptors are started dynamically
this can be ignored. Use the
`Edeliver.Relup.Instructions.ResumeRanchAcceptors`
instruction at the end of your instructions list to reenable
accepting tcp connection when the upgrade is done.
"""
use Edeliver.Relup.RunnableInstruction
alias Edeliver.Relup.Instructions.CheckRanchAcceptors
@doc """
Appends this instruction to the instructions after the
"point of no return" but before any instruction which
loads or unloads new code, (re-)starts or stops
any running processes, or (re-)starts or stops any
application or the emulator.
"""
def insert_where, do: &append_after_point_of_no_return/2
@doc """
Returns name of the application. This name is taken as argument
for the `run/1` function and is required to access the acceptor processes
through the supervision tree
"""
def arguments(_instructions = %Instructions{}, _config = %{name: name}) when is_atom(name) do
name
end
def arguments(_instructions = %Instructions{}, _config = %{name: name}) when is_binary(name) do
name |> String.to_atom
end
@doc """
This module requires the `Edeliver.Relup.Instructions.CheckRanchAcceptors` module
which must be loaded before this instruction for upgrades and unload after this
instruction for downgrades.
"""
@spec dependencies() :: [Edeliver.Relup.Instructions.CheckRanchAcceptors]
def dependencies do
[Edeliver.Relup.Instructions.CheckRanchAcceptors]
end
@doc """
Suspends all ranch acceptors to avoid handling new requests / connections
during the upgrade. Because suspending of ranch acceptors is not possible
they are terminated. In addition the ranch acceptor supervisor is suspended
to avoid starting new acceptors.
"""
@spec run(otp_application_name::atom) :: :ok
def run(otp_application_name) do
info "Suspending ranch socket acceptors..."
ranch_listener_sup = CheckRanchAcceptors.ranch_listener_sup(otp_application_name)
assume true = is_pid(ranch_listener_sup), "Failed to suspend ranch socket acceptors. Ranch listener supervisor not found."
ranch_acceptors_sup = CheckRanchAcceptors.ranch_acceptors_sup(ranch_listener_sup)
assume true = is_pid(ranch_acceptors_sup), "Failed to suspend ranch socket acceptors. Ranch acceptors supervisor not found."
assume [_|_] = acceptors = CheckRanchAcceptors.ranch_acceptors(ranch_acceptors_sup), "Failed to suspend ranch socket acceptors. No acceptor processes found."
acceptors_count = Enum.count(acceptors)
info "Stopping #{inspect acceptors_count} ranch socket acceptors..."
assume true = Enum.all?(acceptors, fn acceptor ->
Supervisor.terminate_child(ranch_acceptors_sup, acceptor) == :ok
end), "Failed to suspend ranch socket acceptors."
info "Suspended #{inspect acceptors_count} ranch acceptors."
end
end
|
lib/edeliver/relup/instructions/suspend_ranch_acceptors.ex
| 0.668556
| 0.456591
|
suspend_ranch_acceptors.ex
|
starcoder
|
defmodule EctoNestedChangeset do
@moduledoc """
This module defines function for manipulating nested changesets.
All functions take a path as the second argument. The path is a list of atoms
(for field names) and integers (for indexes in lists).
"""
import Ecto.Changeset
alias Ecto.Association.NotLoaded
alias Ecto.Changeset
@doc """
Appends a value to the field referenced by the path.
The last path segment must be an atom referencing either a to-many relation
field or an array field.
## Example
iex> %Owner{pets: [%Pet{}, %Pet{toys: [%Toy{name: "stick"}]}]}
...> |> Ecto.Changeset.change()
...> |> append_at(changeset, [:pets, 1, :toys], %Toy{name: "ball"})
...> |> Ecto.Changeset.apply_changes()
%Owner{
pets: [
%Pet{},
%Pet{toys: [%Toy{name: "stick"}, %Toy{name: "ball"}]}
]
}
"""
@spec append_at(Changeset.t(), [atom | non_neg_integer] | atom, any) ::
Changeset.t()
def append_at(%Changeset{} = changeset, path, value),
do: nested_update(:append, changeset, path, value)
@doc """
Prepends a value to the field referenced by the path.
The last path segment must be an atom referencing either a to-many relation
field or an array field.
## Example
iex> %Owner{pets: [%Pet{}, %Pet{toys: [%Toy{name: "stick"}]}]}
...> |> Ecto.Changeset.change()
...> |> prepend_at(changeset, [:pets, 1, :toys], %Toy{name: "ball"})
...> |> Ecto.Changeset.apply_changes()
%Owner{
pets: [
%Pet{},
%Pet{toys: [%Toy{name: "ball"}, %Toy{name: "stick"}]}
]
}
"""
@spec prepend_at(Changeset.t(), [atom | non_neg_integer] | atom, any) ::
Changeset.t()
def prepend_at(%Changeset{} = changeset, path, value),
do: nested_update(:prepend, changeset, path, value)
@doc """
Inserts a value into a field at the given position.
The last path segment must be an integer for the position.
## Example
iex> %Owner{
...> pets: [
...> %Pet{},
...> %Pet{toys: [%Toy{name: "stick"}, %Toy{name: "ball"}]}
...> ]
...> }
...> |> Ecto.Changeset.change()
...> |> insert_at(changeset, [:pets, 1, :toys, 1], %Toy{name: "rope"})
...> |> Ecto.Changeset.apply_changes()
%Owner{
pets: [
%Pet{},
%Pet{
toys: [
%Toy{name: "ball"},
%Toy{name: "rope"},
%Toy{name: "stick"}
]
}
]
}
"""
@spec insert_at(Changeset.t(), [atom | non_neg_integer] | atom, any) ::
Changeset.t()
def insert_at(%Changeset{} = changeset, path, value),
do: nested_update(:insert, changeset, path, value)
@doc """
Updates the value in the changeset at the given position with the given update
function.
The path may lead to any field, including arrays and relation fields. Unlike
`Ecto.Changeset.update_change/3`, the update function is always applied,
either to the change or to existing value.
If the path points to a field with a simple type, the update function will
receive the raw value of the field. However, if the path points to the field
of a *-to-many relation, the list values will not be unwrapped, which means
that the update function has to handle a list of changesets.
## Examples
iex> %Owner{pets: [%Pet{toys: [%Toy{name: "stick"}, %Toy{name: "ball"}]}]}
...> |> Ecto.Changeset.change()
...> |> update_at(
...> changeset,
...> [:pets, 1, :toys, 1, :name],
...> &String.upcase/1
...> )
...> |> Ecto.Changeset.apply_changes()
%Owner{
pets: [
%Pet{},
%Pet{
toys: [
%Toy{name: "stick"},
%Toy{name: "BALL"}
]
}
]
}
"""
@spec update_at(
Changeset.t(),
[atom | non_neg_integer] | atom,
(any -> any)
) :: Changeset.t()
def update_at(%Changeset{} = changeset, path, func) when is_function(func, 1),
do: nested_update(:update, changeset, path, func)
@doc """
Deletes the item at the given path.
The last path segment is expected to be an integer index.
Items that are not persisted in the database yet will always be removed from
the list. For structs that are already persisted in the database, there are
three different modes.
- `[mode: {:action, :replace}]` (default) - The item will be wrapped in a
changeset with the `:replace` action. This only works if an appropriate
`:on_replace` option is set for the relation in the schema.
- `[mode: {:action, :delete}]` - The item will be wrapped in a changeset with
the action set to `:delete`.
- `[mode: {:flag, field}]` - Puts `true` as a change for the given field.
The flag option useful for explicitly marking items for deletion in form
parameters. In this case, you would configure a virtual field on the schema
and set the changeset action to `:delete` in the changeset function in case
the value is set to `true`.
schema "pets" do
field :name, :string
field :delete, :boolean, virtual: true, default: false
end
def changeset(pet, attrs) do
pet
|> cast(attrs, [:name, :delete])
|> validate_required([:name])
|> maybe_mark_for_deletion()
end
def maybe_mark_for_deletion(%Ecto.Changeset{} = changeset) do
if Ecto.Changeset.get_change(changeset, :delete),
do: Map.put(changeset, :action, :delete),
else: changeset
end
## Examples
iex> changeset = Ecto.Changeset.change(
%Owner{pets: [%Pet{name: "George"}, %Pet{name: "Patty"}]}
...> )
iex> delete_at(changeset, [:pets, 1])
%Ecto.Changeset{
changes: [
%Changeset{action: :replace, data: %Post{name: "Patty"}},
%Changeset{action: :update, data: %Post{name: "George"}},
]
}
iex> delete_at(changeset, [:pets, 1], mode: {:action, :delete})
%Ecto.Changeset{
changes: [
%Changeset{action: :update, data: %Post{name: "George"}},
%Changeset{action: :delete, data: %Post{name: "Patty"}},
]
}
iex> delete_at(changeset, [:pets, 1], mode: {:field, :delete})
%Ecto.Changeset{
changes: [
%Changeset{action: :update, data: %Post{name: "George"}},
%Changeset{
action: :update,
changes: %{delete: true},
data: %Post{name: "Patty"}
},
]
}
"""
@spec delete_at(Changeset.t(), [atom | non_neg_integer] | atom, keyword) ::
Changeset.t()
def delete_at(%Changeset{} = changeset, path, opts \\ []) do
mode = opts[:mode] || {:action, :replace}
nested_update(:delete, changeset, path, mode)
end
@doc """
Returns a value from a changeset referenced by the path.
## Example
iex> %Owner{pets: [%Pet{}, %Pet{toys: [%Toy{name: "stick"}]}]}
...> |> Ecto.Changeset.change()
...> |> get_at(changeset, [:pets, 1, :toys])
[%Toy{name: "stick"}, %Toy{name: "ball"}]
"""
@spec get_at(Changeset.t(), [atom | non_neg_integer] | atom) :: any()
def get_at(%Changeset{} = changeset, path) do
nested_get(:get, changeset, path)
end
defp nested_update(operation, changeset, field, value) when is_atom(field),
do: nested_update(operation, changeset, [field], value)
defp nested_update(:append, %Changeset{} = changeset, [field], value)
when is_atom(field) do
new_value =
case get_change_or_field(changeset, field) do
%NotLoaded{} ->
if Ecto.get_meta(changeset.data, :state) == :built,
do: [value],
else: raise(EctoNestedChangeset.NotLoadedError, field: field)
previous_value ->
previous_value ++ [value]
end
Changeset.put_change(changeset, field, new_value)
end
defp nested_update(:append, %{} = data, [field], value) when is_atom(field) do
data
|> Changeset.change()
|> Changeset.put_change(field, Map.fetch!(data, field) ++ [value])
end
defp nested_update(:prepend, %Changeset{} = changeset, [field], value)
when is_atom(field) do
new_value =
case get_change_or_field(changeset, field) do
%NotLoaded{} ->
if Ecto.get_meta(changeset.data, :state) == :built,
do: [value],
else: raise(EctoNestedChangeset.NotLoadedError, field: field)
previous_value ->
[value | previous_value]
end
Changeset.put_change(changeset, field, new_value)
end
defp nested_update(:prepend, %{} = data, [field], value)
when is_atom(field) do
data
|> Changeset.change()
|> Changeset.put_change(field, [value | Map.fetch!(data, field)])
end
defp nested_update(:insert, items, [index], value)
when is_list(items) and is_integer(index) do
List.insert_at(items, index, value)
end
defp nested_update(:insert, %Changeset{} = changeset, [field, index], value)
when is_atom(field) and is_integer(index) do
new_value =
case get_change_or_field(changeset, field) do
%NotLoaded{} ->
if Ecto.get_meta(changeset.data, :state) == :built,
do: [value],
else: raise(EctoNestedChangeset.NotLoadedError, field: field)
previous_value ->
List.insert_at(previous_value, index, value)
end
Changeset.put_change(changeset, field, new_value)
end
defp nested_update(:update, %Changeset{} = changeset, [field], func)
when is_atom(field) do
value = get_change_or_field(changeset, field)
Changeset.put_change(changeset, field, func.(value))
end
defp nested_update(:update, %{} = data, [field], func)
when is_atom(field) do
data
|> Changeset.change()
|> Changeset.put_change(field, func.(Map.fetch!(data, field)))
end
defp nested_update(:update, items, [index], func)
when is_list(items) and is_integer(index) do
List.update_at(items, index, &func.(&1))
end
defp nested_update(:delete, items, [index], mode)
when is_list(items) and is_integer(index) do
case {Enum.at(items, index), mode} do
{%Changeset{action: :insert}, _} ->
List.delete_at(items, index)
{%{} = item, {:action, :delete}} ->
List.replace_at(
items,
index,
item |> change() |> Map.put(:action, :delete)
)
{%{}, {:action, :replace}} ->
List.delete_at(items, index)
{%{} = item, {:flag, field}} when is_atom(field) ->
List.replace_at(
items,
index,
item |> change() |> put_change(field, true)
)
_item ->
List.delete_at(items, index)
end
end
defp nested_update(operation, %Changeset{} = changeset, [field | rest], value)
when is_atom(field) do
nested_value = get_change_or_field(changeset, field)
Changeset.put_change(
changeset,
field,
nested_update(operation, nested_value, rest, value)
)
end
defp nested_update(operation, %{} = data, [field | rest], value)
when is_atom(field) do
nested_value = Map.get(data, field)
data
|> change()
|> put_change(field, nested_update(operation, nested_value, rest, value))
end
defp nested_update(operation, items, [index | rest], value)
when is_list(items) and is_integer(index) do
List.update_at(items, index, fn changeset_or_value ->
nested_update(operation, changeset_or_value, rest, value)
end)
end
defp nested_get(:get, %Changeset{} = changeset, [field])
when is_atom(field) do
Changeset.get_field(changeset, field)
end
defp nested_get(:get, %{} = data, [field])
when is_atom(field) do
Map.get(data, field)
end
defp nested_get(:get, items, [index])
when is_list(items) and is_integer(index) do
Enum.at(items, index)
end
defp nested_get(operation, %Changeset{} = changeset, [field | rest])
when is_atom(field) do
nested_value = get_change_or_field(changeset, field)
nested_get(operation, nested_value, rest)
end
defp nested_get(operation, %{} = data, [field | rest])
when is_atom(field) do
nested_value = Map.get(data, field)
nested_get(operation, nested_value, rest)
end
defp nested_get(operation, items, [index | rest])
when is_list(items) and is_integer(index) do
nested_value = Enum.at(items, index)
nested_get(operation, nested_value, rest)
end
defp get_change_or_field(%Changeset{} = changeset, field) do
case Map.fetch(changeset.changes, field) do
{:ok, value} -> value
:error -> Map.get(changeset.data, field)
end
end
end
|
lib/ecto_nested_changeset.ex
| 0.88846
| 0.457258
|
ecto_nested_changeset.ex
|
starcoder
|
defmodule Geometry.Polygon do
@moduledoc """
A polygon struct, representing a 2D polygon.
A none empty line-string requires at least one ring with four points.
"""
alias Geometry.{GeoJson, LineString, Polygon, WKB, WKT}
defstruct rings: []
@type t :: %Polygon{rings: [Geometry.coordinates()]}
@doc """
Creates an empty `Polygon`.
## Examples
iex> Polygon.new()
%Polygon{rings: []}
"""
@spec new :: t()
def new, do: %Polygon{}
@doc """
Creates a `Polygon` from the given `rings`.
## Examples
iex> Polygon.new([
...> LineString.new([
...> Point.new(35, 10),
...> Point.new(45, 45),
...> Point.new(10, 20),
...> Point.new(35, 10)
...> ]),
...> LineString.new([
...> Point.new(20, 30),
...> Point.new(35, 35),
...> Point.new(30, 20),
...> Point.new(20, 30)
...> ])
...> ])
%Polygon{
rings: [
[[35, 10], [45, 45], [10, 20], [35, 10]],
[[20, 30], [35, 35], [30, 20], [20, 30]]
]
}
iex> Polygon.new()
%Polygon{}
"""
@spec new([LineString.t()]) :: t()
def new(rings) when is_list(rings) do
%Polygon{rings: Enum.map(rings, fn line_string -> line_string.points end)}
end
@doc """
Returns `true` if the given `Polygon` is empty.
## Examples
iex> Polygon.empty?(Polygon.new())
true
iex> Polygon.empty?(
...> Polygon.new([
...> LineString.new([
...> Point.new(35, 10),
...> Point.new(45, 45),
...> Point.new(10, 20),
...> Point.new(35, 10)
...> ])
...> ])
...> )
false
"""
@spec empty?(t()) :: boolean
def empty?(%Polygon{rings: rings}), do: Enum.empty?(rings)
@doc """
Creates a `Polygon` from the given coordinates.
## Examples
iex> Polygon.from_coordinates([
...> [[1, 1], [2, 1], [2, 2], [1, 1]]
...> ])
%Polygon{
rings: [
[[1, 1], [2, 1], [2, 2], [1, 1]]
]
}
"""
@spec from_coordinates([Geometry.coordinate()]) :: t()
def from_coordinates(rings) when is_list(rings), do: %Polygon{rings: rings}
@doc """
Returns an `:ok` tuple with the `Polygon` from the given GeoJSON term.
Otherwise returns an `:error` tuple.
## Examples
iex> ~s(
...> {
...> "type": "Polygon",
...> "coordinates": [
...> [[35, 10],
...> [45, 45],
...> [15, 40],
...> [10, 20],
...> [35, 10]]
...> ]
...> }
...> )
iex> |> Jason.decode!()
iex> |> Polygon.from_geo_json()
{:ok, %Polygon{
rings: [
[
[35, 10],
[45, 45],
[15, 40],
[10, 20],
[35, 10]
]
]
}}
iex> ~s(
...> {
...> "type": "Polygon",
...> "coordinates": [
...> [[35, 10],
...> [45, 45],
...> [15, 40],
...> [10, 20],
...> [35, 10]],
...> [[20, 30],
...> [35, 35],
...> [30, 20],
...> [20, 30]]
...> ]
...> }
...> )
iex> |> Jason.decode!()
iex> |> Polygon.from_geo_json()
{:ok, %Polygon{
rings: [[
[35, 10],
[45, 45],
[15, 40],
[10, 20],
[35, 10]
], [
[20, 30],
[35, 35],
[30, 20],
[20, 30]
]]
}}
"""
@spec from_geo_json(Geometry.geo_json_term()) :: {:ok, t()} | Geometry.geo_json_error()
def from_geo_json(json), do: GeoJson.to_polygon(json, Polygon)
@doc """
The same as `from_geo_json/1`, but raises a `Geometry.Error` exception if it fails.
"""
@spec from_geo_json!(Geometry.geo_json_term()) :: t()
def from_geo_json!(json) do
case GeoJson.to_polygon(json, Polygon) do
{:ok, geometry} -> geometry
error -> raise Geometry.Error, error
end
end
@doc """
Returns the GeoJSON term of a `Polygon`.
## Examples
iex> Polygon.to_geo_json(
...> Polygon.new([
...> LineString.new([
...> Point.new(35, 10),
...> Point.new(45, 45),
...> Point.new(10, 20),
...> Point.new(35, 10)
...> ]),
...> LineString.new([
...> Point.new(20, 30),
...> Point.new(35, 35),
...> Point.new(30, 20),
...> Point.new(20, 30)
...> ])
...> ])
...> )
%{
"type" => "Polygon",
"coordinates" => [
[
[35, 10],
[45, 45],
[10, 20],
[35, 10]
], [
[20, 30],
[35, 35],
[30, 20],
[20, 30]
]
]
}
"""
@spec to_geo_json(t()) :: Geometry.geo_json_term()
def to_geo_json(%Polygon{rings: rings}) do
%{
"type" => "Polygon",
"coordinates" => rings
}
end
@doc """
Returns an `:ok` tuple with the `Polygon` from the given WKT string.
Otherwise returns an `:error` tuple.
If the geometry contains a SRID the id is added to the tuple.
## Examples
iex> Polygon.from_wkt("
...> POLYGON (
...> (35 10, 45 45, 15 40, 10 20, 35 10),
...> (20 30, 35 35, 30 20, 20 30)
...> )
...> ")
{:ok,
%Polygon{
rings: [
[
[35, 10],
[45, 45],
[15, 40],
[10, 20],
[35, 10]
], [
[20, 30],
[35, 35],
[30, 20],
[20, 30]
]
]
}}
iex> "
...> SRID=789;
...> POLYGON (
...> (35 10, 45 45, 15 40, 10 20, 35 10),
...> (20 30, 35 35, 30 20, 20 30)
...> )
...> "
iex> |> Polygon.from_wkt()
{:ok, {
%Polygon{
rings: [
[
[35, 10],
[45, 45],
[15, 40],
[10, 20],
[35, 10]
], [
[20, 30],
[35, 35],
[30, 20],
[20, 30]
]
]
},
789
}}
iex> Polygon.from_wkt("Polygon EMPTY")
{:ok, %Polygon{}}
"""
@spec from_wkt(Geometry.wkt()) ::
{:ok, t() | {t(), Geometry.srid()}} | Geometry.wkt_error()
def from_wkt(wkt), do: WKT.to_geometry(wkt, Polygon)
@doc """
The same as `from_wkt/1`, but raises a `Geometry.Error` exception if it fails.
"""
@spec from_wkt!(Geometry.wkt()) :: t() | {t(), Geometry.srid()}
def from_wkt!(wkt) do
case WKT.to_geometry(wkt, Polygon) do
{:ok, geometry} -> geometry
error -> raise Geometry.Error, error
end
end
@doc """
Returns the WKT representation for a `Polygon`. With option `:srid` an
EWKT representation with the SRID is returned.
## Examples
iex> Polygon.to_wkt(Polygon.new())
"Polygon EMPTY"
iex> Polygon.to_wkt(Polygon.new(), srid: 1123)
"SRID=1123;Polygon EMPTY"
iex> Polygon.to_wkt(
...> Polygon.new([
...> LineString.new([
...> Point.new(35, 10),
...> Point.new(45, 45),
...> Point.new(10, 20),
...> Point.new(35, 10)
...> ]),
...> LineString.new([
...> Point.new(20, 30),
...> Point.new(35, 35),
...> Point.new(30, 20),
...> Point.new(20, 30)
...> ])
...> ])
...> )
"Polygon ((35 10, 45 45, 10 20, 35 10), (20 30, 35 35, 30 20, 20 30))"
"""
@spec to_wkt(t(), opts) :: Geometry.wkt()
when opts: [srid: Geometry.srid()]
def to_wkt(%Polygon{rings: rings}, opts \\ []) do
WKT.to_ewkt(<<"Polygon ", to_wkt_rings(rings)::binary()>>, opts)
end
@doc """
Returns the WKB representation for a `Polygon`.
With option `:srid` an EWKB representation with the SRID is returned.
The option `endian` indicates whether `:xdr` big endian or `:ndr` little
endian is returned. The default is `:xdr`.
The `:mode` determines whether a hex-string or binary is returned. The default
is `:binary`.
An example of a simpler geometry can be found in the description for the
`Geometry.Point.to_wkb/1` function.
"""
@spec to_wkb(t(), opts) :: Geometry.wkb()
when opts: [endian: Geometry.endian(), srid: Geometry.srid(), mode: Geometry.mode()]
def to_wkb(%Polygon{rings: rings}, opts \\ []) do
endian = Keyword.get(opts, :endian, Geometry.default_endian())
mode = Keyword.get(opts, :mode, Geometry.default_mode())
srid = Keyword.get(opts, :srid)
to_wkb(rings, srid, endian, mode)
end
@doc """
Returns an `:ok` tuple with the `Polygon` from the given WKB string. Otherwise
returns an `:error` tuple.
If the geometry contains a SRID the id is added to the tuple.
The optional second argument determines if a `:hex`-string or a `:binary`
input is expected. The default is `:binary`.
An example of a simpler geometry can be found in the description for the
`Geometry.Point.from_wkb/2` function.
"""
@spec from_wkb(Geometry.wkb(), Geometry.mode()) ::
{:ok, t() | {t(), Geometry.srid()}} | Geometry.wkb_error()
def from_wkb(wkb, mode \\ :binary), do: WKB.to_geometry(wkb, mode, Polygon)
@doc """
The same as `from_wkb/2`, but raises a `Geometry.Error` exception if it fails.
"""
@spec from_wkb!(Geometry.wkb(), Geometry.mode()) :: t() | {t(), Geometry.srid()}
def from_wkb!(wkb, mode \\ :binary) do
case WKB.to_geometry(wkb, mode, Polygon) do
{:ok, geometry} -> geometry
error -> raise Geometry.Error, error
end
end
@doc false
@compile {:inline, to_wkt_rings: 1}
@spec to_wkt_rings(list()) :: String.t()
def to_wkt_rings([]), do: "EMPTY"
def to_wkt_rings([ring | rings]) do
<<
"(",
LineString.to_wkt_points(ring)::binary(),
Enum.reduce(rings, "", fn ring, acc ->
<<acc::binary(), ", ", LineString.to_wkt_points(ring)::binary()>>
end)::binary(),
")"
>>
end
@doc false
@compile {:inline, to_wkb: 4}
@spec to_wkb(coordinates, srid, endian, mode) :: wkb
when coordinates: [Geometry.coordinates()],
srid: Geometry.srid() | nil,
endian: Geometry.endian(),
mode: Geometry.mode(),
wkb: Geometry.wkb()
def to_wkb(rings, srid, endian, mode) do
<<
WKB.byte_order(endian, mode)::binary(),
wkb_code(endian, not is_nil(srid), mode)::binary(),
WKB.srid(srid, endian, mode)::binary(),
to_wkb_rings(rings, endian, mode)::binary()
>>
end
@compile {:inline, to_wkb_rings: 3}
defp to_wkb_rings(rings, endian, mode) do
Enum.reduce(rings, WKB.length(rings, endian, mode), fn ring, acc ->
<<acc::binary(), LineString.to_wkb_points(ring, endian, mode)::binary()>>
end)
end
@compile {:inline, wkb_code: 3}
defp wkb_code(endian, srid?, :hex) do
case {endian, srid?} do
{:xdr, false} -> "00000003"
{:ndr, false} -> "03000000"
{:xdr, true} -> "20000003"
{:ndr, true} -> "03000020"
end
end
defp wkb_code(endian, srid?, :binary) do
case {endian, srid?} do
{:xdr, false} -> <<0x00000003::big-integer-size(32)>>
{:ndr, false} -> <<0x00000003::little-integer-size(32)>>
{:xdr, true} -> <<0x20000003::big-integer-size(32)>>
{:ndr, true} -> <<0x20000003::little-integer-size(32)>>
end
end
end
|
lib/geometry/polygon.ex
| 0.941721
| 0.701048
|
polygon.ex
|
starcoder
|
defmodule Ofex.BankAccount do
alias Ofex.Transaction
import Ofex.Helpers
import SweetXml
@doc """
Parses `BANKMSGSRSV1` message set for bank account data.
* `:account_number`
* `:balance`
* `:balance_date` `Date` representation (i.e. `~D[2017-01-27]`)
* `:currency` 3 letter ISO-4217 currency identifier
* `:generic_type` simple representation of the type (i.e. `MONEYMRKT` generic is `SAVINGS`)
* `:positive_balance` some cases may strictly require a positive balance amount
* `:request_id`
* `:routing_number`
* `:status_code`
* `:status_severity`
* `:transactions` parsed transactions formatted with `Ofex.Transaction`
* `:type`
Sample `BANKMSGSRSV1` message set:
```xml
<!-- <BANKMSGSRSV1> --> <!-- Top tag parsed out previously -->
<STMTTRNRS>
<TRNUID>0</TRNUID>
<STATUS>
<CODE>0</CODE>
<SEVERITY>INFO</SEVERITY>
</STATUS>
<STMTRS>
<CURDEF>USD</CURDEF>
<BANKACCTFROM>
<BANKID>019283745</BANKID>
<ACCTID>00000000012345678910</ACCTID>
<ACCTTYPE>CHECKING</ACCTTYPE>
</BANKACCTFROM>
<BANKTRANLIST>
<DTSTART>19700101120000</DTSTART>
<DTEND>20170127120000</DTEND>
<STMTTRN>
<TRNTYPE>DEBIT</TRNTYPE>
<DTPOSTED>20170123120000</DTPOSTED>
<DTUSER>20170123120000</DTUSER>
<TRNAMT>-7.0</TRNAMT>
<FITID>0192947576930</FITID>
<NAME>This is where the name is</NAME>
<MEMO>This is where a memo goes</MEMO>
</STMTTRN>
<STMTTRN>
<TRNTYPE>CREDIT</TRNTYPE>
<DTPOSTED>20170120120000</DTPOSTED>
<DTUSER>20170120120000</DTUSER>
<TRNAMT>372.07</TRNAMT>
<FITID>019274659302</FITID>
<NAME>BUYING ALL THE THINGS</NAME>
<MEMO>#YOLO</MEMO>
</STMTTRN>
<STMTTRN>
<TRNTYPE>CHECK</TRNTYPE>
<DTPOSTED>20170113120000</DTPOSTED>
<DTUSER>20170113120000</DTUSER>
<TRNAMT>-40.0</TRNAMT>
<FITID>8373020273630</FITID>
<CHECKNUM>275</CHECKNUM>
<NAME>CHECK 275 8383933737</NAME>
</STMTTRN>
</BANKTRANLIST>
<LEDGERBAL>
<BALAMT>1000001.00</BALAMT>
<DTASOF>20170127120000</DTASOF>
</LEDGERBAL>
</STMTRS>
</STMTTRNRS>
<!-- </BANKMSGSRSV1> -->
```
"""
@spec create(tuple()) :: %{account: map()}
def create(ofx_data) do
bank_account_map =
ofx_data
|> bank_account_attributes_list
|> create_attribute_map
%{account: bank_account_map}
end
defp bank_account_attributes_list(ofx_data) do
[
{:account_number, xpath(ofx_data, ~x"//ACCTID/text()"s)},
{:balance, xpath(ofx_data, ~x"//BALAMT/text()"s)},
{:balance_date, xpath(ofx_data, ~x"//DTASOF/text()"s)},
{:currency, xpath(ofx_data, ~x"//CURDEF/text()"s)},
{:generic_type, ofx_data |> xpath(~x"//ACCTTYPE/text()"s) |> generic_type_from_type},
{:name, xpath(ofx_data, ~x"//DESC/text()"s)},
{:positive_balance, xpath(ofx_data, ~x"//BALAMT/text()"s)},
{:request_id, xpath(ofx_data, ~x"//TRNUID/text()"s)},
{:routing_number, xpath(ofx_data, ~x"//BANKID/text()"s)},
{:status_code, xpath(ofx_data, ~x"//CODE/text()"s)},
{:status_severity, xpath(ofx_data, ~x"//SEVERITY/text()"s)},
{:transactions, ofx_data |> xpath(~x"//BANKTRANLIST/STMTTRN"l) |> parse_transactions},
{:transactions_end_date, xpath(ofx_data, ~x"//BANKTRANLIST/DTEND/text()"s)},
{:transactions_start_date, xpath(ofx_data, ~x"//BANKTRANLIST/DTSTART/text()"s)},
{:type, xpath(ofx_data, ~x"//ACCTTYPE/text()"s)}
]
end
defp generic_type_from_type("MONEYMRKT"), do: "SAVINGS"
defp generic_type_from_type("CREDITLINE"), do: "LINE_OF_CREDIT"
defp generic_type_from_type("CD"), do: "SAVINGS"
defp generic_type_from_type(type), do: type
defp parse_transactions(ofx_transactions) do
Enum.map(ofx_transactions, &Transaction.create(&1))
end
end
|
lib/bank_account.ex
| 0.681621
| 0.746116
|
bank_account.ex
|
starcoder
|
defmodule EtsDeque.Server do
@moduledoc ~S"""
EtsDeque.Server is a GenServer wrapper around an EtsDeque.
It provides safe access to a deque from multiple processes,
ensuring each operation on the deque is atomic.
## Example
iex> {:ok, pid} = EtsDeque.Server.start_link(size: 3)
iex> :ok = EtsDeque.Server.push_head(pid, :moe)
iex> :ok = EtsDeque.Server.push_tail(pid, :larry)
iex> :ok = EtsDeque.Server.push_tail(pid, :curly)
iex> :error = EtsDeque.Server.push_tail(pid, :shemp) ## deque is full
iex> {:ok, :curly} = EtsDeque.Server.pop_tail(pid)
iex> :ok = EtsDeque.Server.push_tail(pid, :shemp)
iex> EtsDeque.Server.execute(pid, fn deque -> Enum.to_list(deque) end)
[:moe, :larry, :shemp]
"""
use GenServer
@doc false
def start_link(args) do
GenServer.start_link(__MODULE__, args)
end
@impl true
@doc false
def init(args) do
size = args[:size] || :infinity
deque = EtsDeque.new(size)
{:ok, deque}
end
@impl true
@doc false
def handle_call({:execute, fun}, _from, deque) do
{:reply, fun.(deque), deque}
end
def handle_call(:deque, _from, deque) do
{:reply, deque, deque}
end
def handle_call({cmd, args}, _from, deque) do
case :erlang.apply(EtsDeque, cmd, [deque | args]) do
{:ok, item, %EtsDeque{} = deque} -> {:reply, {:ok, item}, deque}
{:ok, %EtsDeque{} = deque} -> {:reply, :ok, deque}
{:ok, item} -> {:reply, {:ok, item}, deque}
other -> {:reply, other, deque}
end
end
@doc ~S"""
Adds an item onto the head of the queue. Returns the updated deque,
or `:error` if the queue is full.
"""
@spec push_head(pid, any, timeout) :: :ok | :error
def push_head(pid, item, timeout \\ 5000) do
GenServer.call(pid, {:push_head, [item]}, timeout)
end
@doc ~S"""
Adds an item onto the tail of the queue. Returns the updated deque,
or `:error` if the queue is full.
"""
@spec push_tail(pid, any, timeout) :: :ok | :error
def push_tail(pid, item, timeout \\ 5000) do
GenServer.call(pid, {:push_tail, [item]}, timeout)
end
@doc ~S"""
Removes the item at the head of the queue, returning it along with the
updated deque.
Returns `:error` if queue is empty.
"""
@spec pop_head(pid, timeout) :: {:ok, any} | :error
def pop_head(pid, timeout \\ 5000) do
GenServer.call(pid, {:pop_head, []}, timeout)
end
@doc ~S"""
Removes the item at the tail of the queue, returning it along with the
updated deque.
Returns `:error` if queue is empty.
"""
@spec pop_tail(pid, timeout) :: {:ok, any} | :error
def pop_tail(pid, timeout \\ 5000) do
GenServer.call(pid, {:pop_tail, []}, timeout)
end
@doc ~S"""
Returns the item at the head of the queue, or `:error` if the queue
is empty.
"""
@spec peek_head(pid, timeout) :: {:ok, any} | :error
def peek_head(pid, timeout \\ 5000) do
GenServer.call(pid, {:peek_head, []}, timeout)
end
@doc ~S"""
Returns the item at the tail of the queue, or `:error` if the queue
is empty.
"""
@spec peek_tail(pid, timeout) :: {:ok, any} | :error
def peek_tail(pid, timeout \\ 5000) do
GenServer.call(pid, {:peek_tail, []}, timeout)
end
@doc ~S"""
Returns the item at the given index, where index `0` is the head.
Returns `:error` if index is out of bounds.
"""
@spec at(pid, non_neg_integer, timeout) :: {:ok, any} | :error
def at(pid, index, timeout \\ 5000) do
GenServer.call(pid, {:at, [index]}, timeout)
end
@doc ~S"""
Replaces the item at the given index, returning the updated deque.
Returns `:error` if index is out of bounds.
"""
@spec replace_at(pid, non_neg_integer, any, timeout) :: :ok | :error
def replace_at(pid, index, item, timeout \\ 5000) do
GenServer.call(pid, {:replace_at, [index, item]}, timeout)
end
@doc ~S"""
Returns the maximum capacity of the given deque.
"""
@spec size(pid, timeout) :: non_neg_integer | :infinity
def size(pid, timeout \\ 5000) do
GenServer.call(pid, {:size, []}, timeout)
end
@doc ~S"""
Returns the number of items in the given deque.
"""
@spec length(pid, timeout) :: non_neg_integer
def length(pid, timeout \\ 5000) do
GenServer.call(pid, {:length, []}, timeout)
end
@doc ~S"""
Executes `fun.(deque)`, ensuring no other process is accessing the
deque at the same time. Returns the result.
"""
@spec execute(pid, (EtsDeque.t() -> any), timeout) :: any
def execute(pid, fun, timeout \\ 5000) do
GenServer.call(pid, {:execute, fun}, timeout)
end
@doc ~S"""
Returns the deque. Ensuring that no other process mutates
the deque after it is returned is the caller's responsibility.
See `execute/3` for a safer alternative.
"""
@spec deque(pid, timeout) :: EtsDeque.t()
def deque(pid, timeout \\ 5000) do
GenServer.call(pid, :deque, timeout)
end
end
|
lib/ets_deque/server.ex
| 0.814496
| 0.410372
|
server.ex
|
starcoder
|
defmodule Algae.Tree.BinarySearch do
@moduledoc """
Represent a `BinarySearch` tree.
## Examples
iex> alias Algae.Tree.BinarySearch, as: BSTree
...>
...> BSTree.Node.new(
...> 42,
...> BSTree.Node.new(77),
...> BSTree.Node.new(
...> 1234,
...> BSTree.Node.new(98),
...> BSTree.Node.new(32)
...> )
...> )
%Algae.Tree.BinarySearch.Node{
node: 42,
left: %Algae.Tree.BinarySearch.Node{
node: 77,
left: %Algae.Tree.BinarySearch.Empty{},
right: %Algae.Tree.BinarySearch.Empty{}
},
right: %Algae.Tree.BinarySearch.Node{
node: 1234,
left: %Algae.Tree.BinarySearch.Node{
node: 98,
left: %Algae.Tree.BinarySearch.Empty{},
right: %Algae.Tree.BinarySearch.Empty{}
},
right: %Algae.Tree.BinarySearch.Node{
node: 32,
left: %Algae.Tree.BinarySearch.Empty{},
right: %Algae.Tree.BinarySearch.Empty{}
}
}
}
"""
alias __MODULE__
alias BinarySearch.{Empty, Node}
import Algae
use Witchcraft, except: [to_list: 1]
defsum do
defdata(Empty :: none())
defdata Node do
node :: any()
left :: BinarySearch.t() \\ BinarySearch.Empty.new()
right :: BinarySearch.t() \\ BinarySearch.Empty.new()
end
end
@doc """
Create an empty tree.
## Examples
iex> new()
%Algae.Tree.BinarySearch.Empty{}
"""
@spec new() :: Empty.t()
def new, do: %Empty{}
@doc """
Bring a value into an otherwise empty tree.
## Examples
iex> new(42)
%Algae.Tree.BinarySearch.Node{
node: 42,
left: %Algae.Tree.BinarySearch.Empty{},
right: %Algae.Tree.BinarySearch.Empty{}
}
"""
@spec new(any()) :: Node.t()
def new(value), do: %Node{node: value}
@doc """
Insert a new element into a tree.
## Examples
iex> insert(new(42), 43)
%Algae.Tree.BinarySearch.Node{
node: 42,
right: %Algae.Tree.BinarySearch.Node{
node: 43
}
}
"""
@spec insert(t(), any()) :: t()
def insert(%Empty{}, value), do: new(value)
def insert(tree = %Node{node: node, left: left, right: right}, orderable) do
case compare(orderable, node) do
:equal -> tree
:greater -> %{tree | right: insert(right, orderable)}
:lesser -> %{tree | left: insert(left, orderable)}
end
end
def insert(%Empty{}, value), do: new(value)
def insert(tree = %Node{node: node, left: left, right: right}, orderable) do
case compare(orderable, node) do
:equal -> tree
:greater -> %{tree | right: insert(right, orderable)}
:lesser -> %{tree | left: insert(left, orderable)}
end
end
@doc """
Remove an element from a tree by value.
## Examples
iex> alias Algae.Tree.BinarySearch, as: BSTree
...>
...> BSTree.Node.new(
...> 42,
...> BSTree.Node.new(77),
...> BSTree.Node.new(
...> 1234,
...> BSTree.Node.new(98),
...> BSTree.Node.new(32)
...> )
...> ) |> delete(98)
%Algae.Tree.BinarySearch.Node{
node: 42,
left: %Algae.Tree.BinarySearch.Node{
node: 77
},
right: %Algae.Tree.BinarySearch.Node{
node: 1234,
right: %Algae.Tree.BinarySearch.Node{
node: 32
}
}
}
"""
@spec delete(t(), any()) :: t()
def delete(%Empty{}, _), do: %Empty{}
def delete(tree = %Node{node: node, left: left, right: right}, orderable) do
case compare(orderable, node) do
:greater ->
%{tree | right: delete(right, orderable)}
:lesser ->
%{tree | left: delete(left, orderable)}
:equal ->
case tree do
%{left: %Empty{}} -> right
%{right: %Empty{}} -> left
%{right: %{node: shift}} -> %{tree | node: shift, right: delete(right, shift)}
end
end
end
@doc """
Flatten a tree into a list.
## Examples
iex> alias Algae.Tree.BinarySearch, as: BSTree
...>
...> BSTree.Node.new(
...> 42,
...> BSTree.Node.new(77),
...> BSTree.Node.new(
...> 1234,
...> BSTree.Node.new(98),
...> BSTree.Node.new(32)
...> )
...> )
...> |> BSTree.to_list()
[42, 77, 1234, 98, 32]
"""
@spec to_list(t()) :: list()
def to_list(tree), do: Witchcraft.Foldable.to_list(tree)
@doc """
Flatten a tree into a list with elements sorted.
## Examples
iex> alias Algae.Tree.BinarySearch, as: BSTree
...>
...> BSTree.Node.new(
...> 42,
...> BSTree.Node.new(77),
...> BSTree.Node.new(
...> 1234,
...> BSTree.Node.new(98),
...> BSTree.Node.new(32)
...> )
...> )
...> |> BSTree.to_ordered_list()
[32, 42, 77, 98, 1234]
"""
@spec to_ordered_list(t()) :: list()
def to_ordered_list(tree), do: tree |> to_list() |> Enum.sort()
@doc """
Build a `BinarySearch` tree from a list.
## Examples
iex> Algae.Tree.BinarySearch.from_list([42, 77, 1234, 98, 32])
%Algae.Tree.BinarySearch.Node{
node: 42,
left: %Algae.Tree.BinarySearch.Node{
node: 32
},
right: %Algae.Tree.BinarySearch.Node{
node: 77,
right: %Algae.Tree.BinarySearch.Node{
node: 1234,
left: %Algae.Tree.BinarySearch.Node{
node: 98
}
}
}
}
"""
@spec from_list(list()) :: t()
def from_list([]), do: %Empty{}
def from_list([head | tail]), do: from_list(tail, new(head))
@doc """
Build a `BinarySearch` tree from a list and attach to an existing tree.
## Examples
iex> Algae.Tree.BinarySearch.from_list([42, 77, 1234, 98, 32], new(-9))
%Algae.Tree.BinarySearch.Node{
node: -9,
right: %Algae.Tree.BinarySearch.Node{
left: %Algae.Tree.BinarySearch.Node{
node: 32
},
node: 42,
right: %Algae.Tree.BinarySearch.Node{
node: 77,
right: %Algae.Tree.BinarySearch.Node{
node: 1234,
left: %Algae.Tree.BinarySearch.Node{
node: 98
},
right: %Algae.Tree.BinarySearch.Empty{}
}
}
}
}
"""
@spec from_list(list(), t()) :: t()
def from_list([], seed), do: seed
def from_list([head | tail], seed), do: from_list(tail, insert(seed, head))
end
|
lib/algae/tree/binary_search.ex
| 0.916876
| 0.558026
|
binary_search.ex
|
starcoder
|
defmodule StatementsReader.CLI do
@moduledoc """
This main module of the app and is mostly handy at passing options
for the app.
"""
@doc """
The main/1 function that starts the script.
It takes parameters of type one word string
which serves as the options.
"""
@spec main(String.t()) :: String.t()
def main(params) do
params
|> parse_params()
|> process
end
@doc """
This function takes the input string and parses it through the
OptionParser module and assign the various data retrieved to an
atom.
"""
def parse_params(params) do
opts = [
switches: [
help: :boolean,
password: :string,
json: :boolean,
sql: :boolean,
csv: :boolean,
output: :string
],
aliases: [
h: :help,
o: :output,
p: :password
]
]
update_opts = fn opts, k, v ->
output = opts[:output] || File.cwd!()
opts
|> Keyword.put(:output, output)
|> Keyword.put(k, v)
end
params
|> OptionParser.parse(opts)
|> case do
{opts, [path], _} -> {:process, update_opts.(opts, :src, path)}
{[help: true], _, _} -> :help
_ -> :help
end
end
@doc """
The process/1 functions are called depending on the parameters that match those recieved from the parse_params/1 function.
"""
@spec process(Atom.t()) :: String.t() | {:error, String.t()}
def process({option}) do
IO.puts("""
EXTRACT MPESA STATEMENTS
------
Invalid options, use --help to view usage.
got: #{inspect(option, pretty: true)}
------
""")
end
def process(:help) do
IO.puts("""
EXPORT MPESA STATEMENTS TO JSON or SQL
-------------------------------------------
Syntax
`xpesa_parser /path/to/mpesa/statements --password pdf_password [--json, --sql, --csv, --excel] [--output /path/to/output/dir]`
Run the following commands
to extract statements to json or sql file.
`xpesa_parser /path/to/mpesa/statement -p password --json -o /output/dir`
`xpesa_parser /path/to/mpesa/statement -p password --sql -o /output/dir`
`xpesa_parser /path/to/mpesa/statement -p password --json --sql -o /output/dir` # creates both exports
`xpesa_parser /path/to/mpesa/statement -p password --json` # current dir is implied as output
`xpesa_parser /path/to/mpesa/statement -p password` # json output and current working dir is implied
-------------------------------------------
""")
end
def process({:process, opts}) do
opts[:src]
|> StatementsReader.read_statements(opts)
|> StatementsReader.prepare_statements()
|> (fn statements ->
opts
|> exports()
|> Enum.map(&{StatementsReader.Statements, :export_statements, [statements, &1]})
|> Enum.map(fn {m, f, a} -> Task.async(m, f, a) end)
|> Task.yield_many()
|> Enum.map(fn {task, res} -> res || Task.shutdown(task, :brutal_kill) end)
|> Enum.map(fn {_return, {_, res}} -> res end)
end).()
|> (fn paths ->
"""
EXPORTED MPESA STATEMENTS
-------------------------------------------
Results at
#{Enum.join(paths, "\n\t")}
-------------------------------------------
"""
end).()
|> IO.puts()
end
defp exports(opts) do
to_json? = {:json, opts[:json] || false}
to_sql? = {:sql, opts[:sql] || false}
to_csv? = {:csv, opts[:csv] || false}
to_excel? = {:xlsx, opts[:excel] || false}
[to_csv?, to_excel?, to_sql?, to_json?]
|> check_export_options()
|> Enum.reduce([], fn
{format, true}, acc -> acc ++ [[format: format]]
{_, false}, acc -> acc
end)
|> Enum.map(&Keyword.merge(opts, &1))
end
defp check_export_options(options) do
options
|> Enum.all?(fn {_, state} -> !state end)
|> case do
true -> (options -- [{:json, false}]) ++ [{:json, true}]
false -> options
end
end
end
|
lib/cli.ex
| 0.677367
| 0.438845
|
cli.ex
|
starcoder
|
defmodule StatesLanguage do
@moduledoc """
A macro to parse [StatesLanguage](https://states-language.net/spec.html) JSON and create :gen_statem modules
"""
alias StatesLanguage.{AST, Graph, SchemaLoader}
alias Xema.Validator
require Logger
@schema :states_language
|> :code.priv_dir()
|> Path.join("schemas/states_language.json")
|> File.read!()
|> Jason.decode!()
|> JsonXema.new(loader: SchemaLoader)
@typedoc """
All callbacks are expected to return a tuple containing an updated (if necessary) `t:StatesLanguage.t/0`,
and a list of actions to perform after the callback has exectuted.
A default callback that doesn't need to do anything would just return {:ok, data, []}
"""
@type callback_result :: {:ok, t(), [:gen_statem.action()] | :gen_statem.action() | []}
@typedoc """
When using the "Parallel" or "Map" types, the children processes must `send/2` a message to the parent process of this type. The `child_process` pid is the `t:pid/0` of the child spawned by the parent state machine, generally the same as calling `self/0` in the child process itself.
"""
@type task_processed :: {:task_processed, result :: any(), child_process :: pid()}
@doc """
Called when a Choice or Task state is transitioned to.
## Arguments
- resource: The value of the `Resource` field for this state
- params: the data after applying any JSONPath selectors to our data attribute
- state: the current state
- data: the full data of the `:gen_statem` process
"""
@callback handle_resource(
resource :: String.t(),
params :: term(),
state :: String.t(),
data :: t()
) :: callback_result()
@doc """
Called when something has sent an event to our process.
## Arguments
- event: the event that was sent to us
- state: the current state
- data: the full data of the `:gen_statem` process
"""
@callback handle_info(event :: term(), state :: String.t(), data :: t()) :: callback_result()
@doc """
Called when a transition event has been received, but before we transition to the next state.
## Arguments
- event: The transition event received
- state: the current state
- data: the full data of the `:gen_statem` process
"""
@callback handle_transition(event :: term(), state :: String.t(), data :: t()) ::
callback_result()
@doc """
Called when we enter a new state, but before any additional actions have occurred.
## Arguments
- old_state: The previous state we were in
- state: the current state
- data: the full data of the `:gen_statem` process
"""
@callback handle_enter(old_state :: String.t(), state :: String.t(), data :: t()) ::
callback_result()
@doc """
Called when a call event has been received. It is up to your implentation to return {:reply, from, result} to send the result back to the caller.
## Arguments
- event: the payload sent with the call
- from: used to reply to the caller
- state: the current state
- data: the full data of the `:gen_statem` process
"""
@callback handle_call(
event :: term(),
from :: GenServer.from(),
state :: String.t(),
data :: t()
) :: callback_result()
@doc """
Called when a cast event has been received.
## Arguments
- event: the payload sent with the cast
- state: the current state
- data: the full data of the `:gen_statem` process
"""
@callback handle_cast(event :: term(), state :: String.t(), data :: t()) ::
callback_result()
@doc """
Called when a process is ending. This can be because it was killed or a state indicated it's the end of the state machine. Used for cleanup.
## Arguments
- reason: the reason we are ending eg; `:normal`, `:kill`, etc.
- state: the current state
- data: the full data of the `:gen_statem` process
"""
@callback handle_termination(reason :: term(), state :: String.t(), data :: t()) :: :ok
@doc """
Called when a [Generic Timeout](http://erlang.org/doc/man/gen_statem.html#type-generic_timeout) is triggered.
## Arguments
- event: The event set for the timeout
- state: the current state
- data: the full data of the `:gen_statem` process
"""
@callback handle_generic_timeout(event :: term(), state :: String.t(), data :: t()) ::
callback_result()
@doc """
Called when a [State Timeout](http://erlang.org/doc/man/gen_statem.html#type-state_timeout) is triggered.
## Arguments
- event: The event set for the timeout
- state: the current state
- data: the full data of the `:gen_statem` process
"""
@callback handle_state_timeout(event :: term(), state :: String.t(), data :: t()) ::
callback_result()
@doc """
Called when a [Event Timeout](http://erlang.org/doc/man/gen_statem.html#type-event_timeout) is triggered.
## Arguments
- event: The event set for the timeout
- state: the current state
- data: the full data of the `:gen_statem` process
"""
@callback handle_event_timeout(event :: term(), state :: String.t(), data :: t()) ::
callback_result()
@optional_callbacks handle_resource: 4,
handle_call: 4,
handle_cast: 3,
handle_info: 3,
handle_enter: 3,
handle_transition: 3,
handle_termination: 3,
handle_generic_timeout: 3,
handle_state_timeout: 3,
handle_event_timeout: 3
defmodule Edge do
@moduledoc """
Represents a transition from one state to another
"""
defstruct [:source, :target, :event]
@type t :: %__MODULE__{
source: String.t(),
target: String.t(),
event: Macro.input()
}
end
defmodule Choice do
@moduledoc """
Represents a choice option in a Choice type state
"""
defstruct [:string_equals, :next]
@type t :: %__MODULE__{
string_equals: Macro.input(),
next: String.t()
}
end
defmodule Catch do
@moduledoc """
Represents a catch error in a Task type state
"""
defstruct [:error_equals, :next]
@type t :: %__MODULE__{
error_equals: [Macro.input()],
next: String.t()
}
end
defmodule Node do
@moduledoc """
Represents any state in our graph
"""
defstruct [
:type,
:default,
:next,
:iterator,
:items_path,
:catch,
:choices,
:branches,
:seconds,
:timestamp,
:seconds_path,
:timestamp_path,
:resource,
:parameters,
:input_path,
:resource_path,
:output_path,
:event,
:is_end
]
@type t :: %__MODULE__{
type: String.t(),
default: String.t() | nil,
next: String.t() | nil,
iterator: String.t() | nil,
items_path: String.t() | nil,
branches: [String.t()] | nil,
seconds: float() | integer() | nil,
timestamp: DateTime.t() | nil,
seconds_path: String.t() | nil,
timestamp_path: String.t() | nil,
catch: [Catch.t()] | [],
choices: [Choice.t()] | [],
resource: String.t() | nil,
parameters: %{},
input_path: String.t() | nil,
resource_path: String.t() | nil,
output_path: String.t() | nil,
event: [any()] | nil,
is_end: boolean()
}
end
@derive Jason.Encoder
defstruct [:_parent, :_parent_data, :data, _tasks: []]
@typedoc """
Passed to all processes as the data for the [:gen_statem.data](http://erlang.org/doc/man/gen_statem.html#type-data).
- `_parent` is used to reference a parent process within a `Map` or `Parallel` state type
- `_parent_data` is the data from the parent
- `data` is the data passed to this process on startup
- `_tasks` is used to keep track of child processes for `Map` and `Parallel` state types
"""
@type t :: %__MODULE__{_parent: pid(), _parent_data: any(), data: any(), _tasks: []}
defmacro __using__(data: data) when is_map(data) do
case validate(data) do
{:ok, data} -> do_start(data)
{:error, _} = error -> throw(error)
end
end
defmacro __using__(data: data) when is_binary(data) do
data
|> File.read!()
|> Jason.decode!()
|> validate()
|> case do
{:ok, data} -> do_start(data)
{:error, _} = error -> throw(error)
end
end
@spec do_start(map()) :: [any()]
defp do_start(data, path \\ nil) when is_map(data) do
%Graph{} = graph = Graph.serialize(data)
AST.external_resource(path) ++
AST.default(graph) ++ AST.start(graph) ++ AST.graph(graph) ++ AST.catch_all()
end
@doc """
Validates our graph data against the included JSON Schema. This is run automatically at compilation time.
"""
@spec validate(map()) :: {:ok, map()} | {:error, Validator.result()}
def validate(data) do
case JsonXema.validate(@schema, data) do
:ok ->
{:ok, data}
error ->
{:error, error}
end
end
end
|
lib/states_language.ex
| 0.88839
| 0.574096
|
states_language.ex
|
starcoder
|
defmodule Raxx.BasicAuth do
@moduledoc """
Add protection to a Raxx application using Basic Authentication.
*Basic Authentication is specified in RFC 7617 (which obsoletes RFC 2617).*
This module provides helpers for submitting and verifying credentials.
### Submitting credentials
A client (or test case) can add user credentials to a request with `set_credentials/3`
iex> request(:GET, "/")
...> |> set_credentials("<PASSWORD>", "<PASSWORD>")
...> |> get_header("authorization")
"Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ=="
### Verifying fixed credentials
A server can authenticate a request against a fixed set of credentials using `authenticate/3`
iex> request(:GET, "/")
...> |> set_header("authorization", "Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==")
...> |> authenticate("Aladdin", "open sesame")
{:ok, "Aladdin"}
iex> request(:GET, "/")
...> |> set_header("authorization", "Basic SmFmYXI6bXkgbGFtcA==")
...> |> authenticate("Aladdin", "open sesame")
{:error, :invalid_credentials}
### Verifying multiple credentials
To authenticate a request when credentials are not fixed, define an application authenticate function.
This function can authenticate a user in any way it wants. e.g. by fetching the user from the database.
Extracting credentials from a received request can be done using `get_credentials/1`.
defmodule MyApp.Admin do
def authenticate(request) do
with {:ok, {user_id, password}} <- get_credentials(request) do
# Find user and verify credentials
{:ok, user}
end
end
end
NOTE: when comparing saved passwords with submitted passwords use `secure_compare/2` to protect against timing attacks.
### Challenging unauthorized requests
If a request is submitted with absent or invalid credentials a server should inform the client that it is accessing a resource protected with basic authentication.
Use `unauthorized/1` to create such a response.
## NOTE
- The Basic authentication scheme is not a secure method of user authentication
https://tools.ietf.org/html/rfc7617#section-4
- This module will be extracted to a separate project before the release of raxx 1.0
"""
import Raxx
@authentication_header "authorization"
@default_realm "Site"
@default_charset "UTF-8"
@doc """
Add a users credentials to a request to authenticate it.
NOTE:
1. The user-id and password MUST NOT contain any control characters
2. The user-id must not contain a `:`
"""
def set_credentials(request, user_id, password) do
request
|> set_header(@authentication_header, authentication_header(user_id, password))
end
@doc """
Extract credentials submitted using the Basic authentication scheme.
If credentials we not provided or are malformed an error is returned
"""
def get_credentials(request) do
case get_header(request, @authentication_header) do
nil ->
{:error, :not_authorization_header}
authorization ->
case String.split(authorization, " ", parts: 2) do
["Basic", encoded] ->
case Base.decode64(encoded) do
{:ok, user_pass} ->
case String.split(user_pass, ":", parts: 2) do
[user_id, password] ->
secure_compare(user_id, password)
{:ok, {user_id, password}}
_ ->
{:error, :invalid_user_pass}
end
:error ->
{:error, :unable_to_decode_user_pass}
end
[_unknown, _] ->
{:error, :unknown_authentication_method}
_ ->
{:error, :invalid_authentication_header}
end
end
end
@doc """
Authenticate a request against fixed credentials.
"""
def authenticate(request, access_user_id, access_password) do
with {:ok, {user_id, password}} <- get_credentials(request) do
if secure_compare(user_id, access_user_id) && secure_compare(password, access_password) do
{:ok, access_user_id}
else
{:error, :invalid_credentials}
end
end
end
@doc """
Generate a response to a request that failed to authenticate.
The response will contain a challenge for the client in the `www-authenticate` header.
Use an unauthorized response to prompt a client into providing basic authentication credentials.
## Options
- **realm:** describe the protected area. default `"Site"`
- **charset:** default `"UTF-8"`
### Notes
- The only valid charset is `UTF-8`; https://tools.ietf.org/html/rfc7617#section-2.1.
A `nil` can be provided to this function to omit the parameter.
- Validation should be added for the parameter values to ensure they only accept valid values.
"""
def unauthorized(options) do
realm = Keyword.get(options, :realm, @default_realm)
charset = Keyword.get(options, :charset, @default_charset)
response(:unauthorized)
|> set_header("www-authenticate", challenge_header(realm, charset))
|> set_body("401 Unauthorized")
end
defp authentication_header(user_id, password) do
"Basic " <> Base.encode64(user_pass(user_id, password))
end
defp challenge_header(realm, nil) do
"Basic realm=\"#{realm}\""
end
defp challenge_header(realm, charset) do
"Basic realm=\"#{realm}\", charset=\"#{charset}\""
end
defp user_pass(user_id, password) do
:ok =
case :binary.match(user_id, [":"]) do
{_, _} ->
raise "a user-id containing a colon character is invalid"
:nomatch ->
:ok
end
user_id <> ":" <> password
end
@doc """
Compares the two binaries in constant-time to avoid timing attacks.
See: http://codahale.com/a-lesson-in-timing-attacks/
"""
def secure_compare(left, right) do
if byte_size(left) == byte_size(right) do
secure_compare(left, right, 0) == 0
else
false
end
end
defp secure_compare(<<x, left::binary>>, <<y, right::binary>>, acc) do
import Bitwise
xorred = x ^^^ y
secure_compare(left, right, acc ||| xorred)
end
defp secure_compare(<<>>, <<>>, acc) do
acc
end
end
|
lib/raxx/basic_auth.ex
| 0.828384
| 0.413714
|
basic_auth.ex
|
starcoder
|
defmodule Recurly do
@moduledoc """
## Getting Started
This documentation is a work in progress. Please consider contributing.
For now, Read each section on this page.
After reading these sections, these modules may be a good place to start digging.
- `Recurly.Resource` module responsible for creating, updating, deleting resources
- `Recurly.Association` a struct for fetching associations to resources
- `Recurly.Account` a good example of a resource with working examples
## Resources
Resources are structs that represent a server object at a given point in time. Like
all elixir variables, they are immutable values. A resource consists of 3 parts:
- Fields
- Actions
- Associations
```
subscription
# %Recurly.Subscription{
# __meta__: %{
# actions: %{
# cancel: [:put, "https://api.recurly.com/v2/subscriptions/37e3a404f2c9b0edde8bbf4c7aa6a561/cancel"],
# notes: [:put,"https://api.recurly.com/v2/subscriptions/37e3a404f2c9b0edde8bbf4c7aa6a561/notes"],
# postpone: [:put,"https://api.recurly.com/v2/subscriptions/37e3a404f2c9b0edde8bbf4c7aa6a561/postpone"],
# terminate: [:put,"https://api.recurly.com/v2/subscriptions/37e3a404f2c9b0edde8bbf4c7aa6a561/terminate"]
# },
# href: "https://api.recurly.com/v2/subscriptions/37e3a404f2c9b0edde8bbf4c7aa6a561"
# },
# account: %Recurly.Association{href: "https://api.recurly.com/v2/accounts/my_account_code",
# paginate: false, resource_type: Recurly.Account},
# currency: "USD",
# plan: %Recurly.Plan{
# __meta__: %{
# href: "https://api.recurly.com/v2/plans/myplancode"
# },
# accounting_code: nil,
# cancel_url: nil,
# display_quantity: nil,
# name: "A plan",
# plan_code: "myplancode",
# plan_interval_length: nil,
# plan_interval_unit: nil,
# revenue_schedule_type: nil,
# setup_fee_accounting_code: nil,
# setup_fee_in_cents: nil,
# setup_fee_revenue_schedule_type: nil,
# success_url: nil,
# tax_code: nil,
# tax_exempt: nil,
# total_billing_cycles: nil,
# trial_interval_length: nil,
# trial_interval_unit: nil,
# unit_amount_in_cents: nil,
# unit_name: nil
# },
# plan_code: nil,
# quantity: 1,
# state: "active",
# subscription_add_ons: [],
# tax_in_cents: nil,
# tax_rate: nil,
# tax_region: nil,
# tax_type: nil,
# unit_amount_in_cents: 100,
# uuid: "37e3a404f2c9b0edde8bbf4c7aa6a561"
# }
```
The schema for each resource is defined in a module with the resource's name.
As an example: `Recurly.Account`.
Every resource has a reference to it's module and thus it's schema. But how schemas
work internally can largely be ignored by the programmer.
```
Recurly.XML.Schema.get(Recurly.Account)
# %Recurly.XML.Schema{fields: [%Recurly.XML.Field{name: :accept_language,
# opts: [], type: :string},
# %Recurly.XML.Field{name: :account_code, opts: [], type: :string},
# %Recurly.XML.Field{name: :address, opts: [], type: Recurly.Address},
# %Recurly.XML.Field{name: :billing_info, opts: [], type: Recurly.BillingInfo},
# %Recurly.XML.Field{name: :cc_emails, opts: [], type: :string},
# %Recurly.XML.Field{name: :company_name, opts: [], type: :string},
# %Recurly.XML.Field{name: :email, opts: [], type: :string},
# %Recurly.XML.Field{name: :entity_use_code, opts: [], type: :string},
# %Recurly.XML.Field{name: :first_name, opts: [], type: :string},
# %Recurly.XML.Field{name: :last_name, opts: [], type: :string},
# %Recurly.XML.Field{name: :state, opts: [read_only: true], type: :string},
# %Recurly.XML.Field{name: :tax_exempt, opts: [], type: :boolean},
# %Recurly.XML.Field{name: :transactions, opts: [paginate: true],
# type: Recurly.Transaction},
# %Recurly.XML.Field{name: :username, opts: [], type: :string},
# %Recurly.XML.Field{name: :vat_number, opts: [], type: :string}],
# resource_type: Recurly.Account}
```
## Changesets
At no point should you need to modify the resource structs in memory. In order to create
or modify a resource, you must create a changeset and send that to the server.
A changeset is represented as a nested [`Keyword list`](http://elixir-lang.org/docs/stable/elixir/Keyword.html).
You must use the changeset to create or modify data. Consider the simplest case:
```
{:ok, account} = Recurly.Account.create(account_code: "myaccountcode")
# {:ok,
# %Recurly.Account{__meta__: %{actions: %{},
# href: "https://subdomain.recurly.com/v2/accounts/myaccountcode"},
# account_code: "myaccountcode", billing_info: nil, cc_emails: nil,
# company_name: nil, email: nil, first_name: nil, last_name: nil,
# state: "active", tax_exempt: nil, username: nil, vat_number: nil}}
```
The `Recurly.Account.create/1` function takes only a changeset as an argument and returns the created account.
Only the `account_code` is needed in this case.
Consider a more complicated case with nested data:
```elixir
changeset_data = [
plan_code: "myplancode",
currency: "USD",
account: [
account_code: "myotheraccountcode",
billing_info: [
address1: " 400 Alabama St",
city: " San Francisco",
state: "CA",
zip: "94110",
number: "4111-1111-1111-1111",
verification_value: "123",
first_name: "Benjamin",
last_name: "Person",
month: "05",
year: 2019,
country: "US"
]
],
subscription_add_ons: [
subscription_add_on: [add_on_code: "myaddon", quantity: 1]
]
]
{:ok, subscription} = Recurly.Subscription.create(changeset_data)
# {:ok,
# %Recurly.Subscription{__meta__: %{actions: %{cancel: [:put,
# "https://subdomain.recurly.com/v2/subscriptions/37e068b0bc916763655db141b194e626/cancel"],
# notes: [:put,
# "https://subdomain.recurly.com/v2/subscriptions/37e068b0bc916763655db141b194e626/notes"],
# postpone: [:put,
# "https://subdomain.recurly.com/v2/subscriptions/37e068b0bc916763655db141b194e626/postpone"],
# terminate: [:put,
# "https://subdomain.recurly.com/v2/subscriptions/37e068b0bc916763655db141b194e626/terminate"]},
# href: "https://subdomain.recurly.com/v2/subscriptions/37e068b0bc916763655db141b194e626"},
# account: %Recurly.Association{href: "https://subdomain.recurly.com/v2/accounts/nsnsnsns",
# paginate: false, resource_type: Recurly.Account}, currency: "USD",
# plan: %Recurly.Plan{__meta__: %{href: "https://subdomain.recurly.com/v2/plans/myplancode"},
# name: "A plan", plan_code: "myplancode", setup_fee_in_cents: nil,
# unit_amount_in_cents: nil}, plan_code: nil, quantity: 1, state: "active",
# subscription_add_ons: [%Recurly.SubscriptionAddOn{__meta__: %{href: nil},
# add_on_code: "myaddon", quantity: 1,
# unit_amount_in_cents: %Recurly.Money{AUD: nil, BRL: nil, CAD: nil, CHF: nil,
# CZK: nil, DKK: nil, EUR: nil, GBP: nil, HUF: nil, ILS: nil, INR: nil,
# MXN: nil, NOK: nil, NZD: nil, PLN: nil, SEK: nil, SGD: nil, USD: nil,
# ZAR: nil, __meta__: %{href: nil}}}],
# tax_in_cents: nil, tax_rate: nil, tax_region: nil,
# tax_type: nil, unit_amount_in_cents: 100,
# uuid: "37e068b0bc916763655db141b194e626"}}
```
As you can see, the `billing_info` is nested in the `account` which is nested in the `subscription`. This is nearly a 1x1 mapping of
the [XML request that is generated](https://dev.recurly.com/docs/create-subscription). Keep in mind that you must use
keyword lists and never maps for changesets as maps do not support duplicate keys.
If any of the keys in your changeset data aren't recognized, you will get an `ArgumentError`.
This will prevent you from misspelling or sending incorrect data.
## Actions
Actions are modifying actions that can be performed on a resource outside of the normal CRUD.
Consider the subscription from above. It has a map of actions that can be performed:
```
subscription
# %Recurly.Subscription{__meta__: %{actions: %{cancel: [:put,
# "https://subdomain.recurly.com/v2/subscriptions/37e068b0bc916763655db141b194e626/cancel"],
# notes: [:put,
# "https://subdomain.recurly.com/v2/subscriptions/37e068b0bc916763655db141b194e626/notes"],
# postpone: [:put,
# "https://subdomain.recurly.com/v2/subscriptions/37e068b0bc916763655db141b194e626/postpone"],
# terminate: [:put,
# "https://subdomain.recurly.com/v2/subscriptions/37e068b0bc916763655db141b194e626/terminate"]},
# href: "https://subdomain.recurly.com/v2/subscriptions/37e068b0bc916763655db141b194e626"},
# ......
```
The `Recurly.Subscription` module allows us to perform these actions with helper functions:
```
{:ok, subscription} = Recurly.Subscription.cancel(subscription)
```
Or we can use `Recurly.Resource.perform_action/2` with an atom and call the action directly:
```
{:ok, subscription} = Recurly.Resource.perform_action(subscription, :cancel)
```
## Associations
See `Recurly.Association` for documentation on how to use associations.
## Resource Streams
Instead of dealing with pagination, you can fetch resources as a stream
(following elixir's `Stream` protocol).
See `Recurly.Resource.stream/3` for documentation on how to use streams.
## Error Handling
All network bound calls which may result in an error follow a similar API:
```
case Recurly.Account.create(account_code: "myaccountcode") do
{:ok, account} -> # *account* is the account created on the server
{:error, error} -> # *error* is an error struct
end
```
The `error` value may be one of the following types:
- `Recurly.NotFoundError`
- `Recurly.ValidationError`
- `Recurly.APIError`
A benefit of this API is that it allows fine grained pattern matching against errors cases.
As an example, consider that you want to detect the case in which the account code is taken:
```
alias Recurly.ValidationError
case Recurly.Account.create(account_code: "myaccountcode") do
{:ok, account} ->
# account code was not taken and the account was created
{:error, %ValidationError{errors: [%{symbol: :taken}]}} ->
# the account code was taken
{:error, error} ->
# a fallback case
end
```
Pattern matching function arguments is also a nice way to exploit this property:
```
defmodule MyModule do
def fetch(account_code) do
account_code
|> Recurly.Account.find
|> handle_response
end
def handle_response({:ok, account}) do
# account code was not used and the account was created
end
def handle_response({:error, %Recurly.NotFoundError{description: description}) do
# description => Couldn't find Account with account_code = nonexistentcode
end
def handle_response(error) do
# a fallback case
end
end
MyModule.fetch("nonexistentcode")
```
It's a good idea to always replace the value with any updated states from the server so you never have old state lying around:
```
{:ok, account} = Recurly.Account.create(account_code: "myaccountcode")
{:ok, account} = Recurly.Account.update(account, first_name: "Benjamin")
```
If you need to remove an attribute you can send nils to the server:
```
{:ok, account} = Recurly.Account.update(account, [
username: nil,
first_name: "Benjamin"
])
```
This generates the XML:
```xml
<acccount>
<username nil="nil"/>
<first_name>Benjamin</first_name>
</acccount>
```
"""
@doc false
def api_version, do: "2.4"
@doc false
def client_version do
{:ok, vsn} = :application.get_key(:recurly, :vsn)
List.to_string(vsn)
end
@doc false
def user_agent, do: "Recurly/Elixir/#{client_version()}"
end
|
lib/recurly.ex
| 0.837885
| 0.711807
|
recurly.ex
|
starcoder
|
defmodule Juvet.Bot do
@moduledoc """
Bot is a macro interface for working with a bot that is connected to chat services.
## Example
```
defmodule MyBot do
use Juvet.Bot
end
```
"""
defmacro __using__(_) do
quote do
use GenServer
use Juvet.ReceiverTarget
# Client API
@doc """
Starts a `Juvet.Bot` process linked to the current process.
## Options
* `:bot_supervisor` - The `pid` of the supervisor that this bot process belongs to
"""
def start_link(state, options \\ []) do
GenServer.start_link(__MODULE__, state, options)
end
@doc """
Adds a receiver to this bot which adds another source of messages
for this bot to receive messages from.
The receiver is another process and this function returns the pid
of the new receiver.
## Example
```
{:ok, pid} = MyBot.add_receiver(bot, :slack_rtm, %{
token: "MY_TOKEN"
})
```
"""
def add_receiver(pid, type, parameters) do
GenServer.call(pid, {:add_receiver, type, parameters})
end
@doc """
Adds a Slack platform to the bot state
## Example
```
MyBot.connect(bot, :slack, %{token: "MY_TOKEN"})
```
"""
def connect(pid, :slack, parameters = %{team_id: _team_id}) do
GenServer.cast(pid, {:connect, :slack, parameters})
end
@doc """
Returns a List of messages from the bot for all the platforms.
## Example
```
messages = MyBot.get_messages(bot)
```
"""
def get_messages(pid), do: GenServer.call(pid, :get_messages)
@doc """
Returns the current state for the bot.
## Example
```
state = MyBot.get_state(bot)
```
"""
def get_state(pid), do: GenServer.call(pid, :get_state)
def user_install(pid, platform, parameters) do
GenServer.call(pid, {:user_install, platform, parameters})
end
# Server Callbacks
@doc false
def init(state) do
{:ok, struct(Juvet.BotState, state)}
end
@doc false
def handle_call({:add_receiver, type, parameters}, _from, state) do
result =
generate_receiver(type).start(
state.bot_supervisor,
self(),
parameters
)
{:reply, result, state}
end
@doc false
def handle_call(:get_messages, _from, state) do
{:reply, Juvet.BotState.get_messages(state), state}
end
@doc false
def handle_call(:get_state, _from, state) do
{:reply, state, state}
end
@doc false
def handle_call({:user_install, platform, parameters}, _from, state) do
team = Map.from_struct(Juvet.BotState.Team.from_auth(parameters))
user = Map.from_struct(Juvet.BotState.User.from_auth(parameters))
{state, _platform, team, user} =
Juvet.BotState.put_platform(state, platform)
|> Juvet.BotState.put_team(team)
|> Juvet.BotState.put_user(user)
{:reply, {:ok, user, team}, state}
end
@doc false
def handle_cast({:connect, :slack, parameters}, state) do
{state, _platform, _message} =
Juvet.BotState.put_platform(state, :slack)
|> Juvet.BotState.put_message(parameters)
{:noreply, state}
end
@doc false
def handle_info({:connected, platform, message}, state) do
{:noreply, put_message(state, platform, message)}
end
@doc false
def handle_info({:new_message, platform, message}, state) do
{:noreply, put_message(state, platform, message)}
end
@doc false
defp put_message(state, platform_name, message) do
platform = %Juvet.BotState.Platform{name: platform_name}
{state, _platform, _message} =
Juvet.BotState.put_message({state, platform}, message)
state
end
end
end
end
|
lib/juvet/bot.ex
| 0.900234
| 0.758645
|
bot.ex
|
starcoder
|
defmodule Geometry.WKB do
@moduledoc false
alias Geometry.Hex
alias Geometry.WKB.Parser
@compile {:inline, byte_order: 2}
@spec byte_order(Geometry.endian(), Geometry.mode()) :: <<_::8>>
def byte_order(:xdr, :hex), do: "00"
def byte_order(:ndr, :hex), do: "01"
def byte_order(:xdr, :binary), do: <<0::8>>
def byte_order(:ndr, :binary), do: <<1::8>>
@compile {:inline, srid: 3}
@spec srid(non_neg_integer() | nil, Geometry.endian(), Geometry.mode()) :: binary()
def srid(nil, _endian, _mode), do: <<>>
def srid(int, endian, :hex), do: Hex.to_integer_string(int, endian)
def srid(int, :xdr, :binary), do: <<int::big-integer-size(32)>>
def srid(int, :ndr, :binary), do: <<int::little-integer-size(32)>>
@spec to_geometry(wkb, mode, module()) ::
{:ok, geometry | {geometry, srid}} | {:error, message, rest, offset}
when wkb: Geometry.wkb(),
mode: Geometry.mode(),
geometry: Geometry.t(),
srid: Geometry.srid(),
message: String.t(),
rest: binary(),
offset: non_neg_integer()
def to_geometry(wkb, mode, module) do
case to_geometry(wkb, mode) do
{:ok, {geometry, _srid}} = result ->
with :ok <- check_geometry(geometry, module), do: result
{:ok, geometry} = result ->
with :ok <- check_geometry(geometry, module), do: result
error ->
error
end
end
defdelegate to_geometry(wkb, mode), to: Parser, as: :parse
@compile {:inline, length: 3}
@spec length(list | MapSet.t(), Geometry.endian(), Geometry.mode()) :: binary()
def length(list, endian, :hex) when is_list(list) do
list |> length() |> Hex.to_integer_string(endian)
end
def length(set, endian, :hex) do
set |> MapSet.size() |> Hex.to_integer_string(endian)
end
def length(list, endian, :binary) when is_list(list) do
length = length(list)
case endian do
:ndr -> <<length::little-integer-size(32)>>
:xdr -> <<length::big-integer-size(32)>>
end
end
def length(set, endian, :binary) do
size = MapSet.size(set)
case endian do
:ndr -> <<size::little-integer-size(32)>>
:xdr -> <<size::big-integer-size(32)>>
end
end
defp check_geometry(%geometry{}, geometry), do: :ok
defp check_geometry(%{__struct__: got}, expected),
do: {:error, %{expected: expected, got: got}}
end
|
lib/geometry/wkb.ex
| 0.883902
| 0.568566
|
wkb.ex
|
starcoder
|
defmodule Mix.Tasks.Comb.Benchmark do
use Mix.Task
def compare(description, function, args, n \\ 5) do
# these functions return either an enum or integer
[t0, t] = for module <- [Comb.Naive, Comb] do
{time, _} = :timer.tc fn ->
for _ <- 1..n do
case apply(module, function, args) do
n when is_integer(n) ->
nil
enum ->
Stream.run(enum)
end
end
end
time
end
percentage = Float.to_string(100 * t / t0, decimals: 2) |> String.rjust(7)
IO.puts "#{percentage}%: #{function} - #{description}"
end
@repeated6 [1, 1, 1, 2, 2, 3]
@repeated10 [1, 1, 1, 2, 2, 3, 5, 6, 7, 8]
@shortdoc "Compare speedup of Comb functions relative to naive impl"
def run(_) do
IO.puts ""
IO.puts "[100% - like naive implementation, smaller is faster]"
IO.puts ""
[
{"2+2 element ranges",
:cartesian_product, [1..2, 1..2]},
{"5+5 element ranges",
:cartesian_product, [1..5, 1..5]},
:space,
{"4-2 elements",
:combinations, [1..6, 2]},
{"6-2 elements",
:combinations, [1..6, 2]},
{"40-2 elements",
:combinations, [1..40, 2]},
{"6-2 with repeated elements",
:combinations, [[1, 1, 1, 2, 2, 3], 2]},
:space,
{"6-2 elements",
:count_combinations, [1..6, 2]},
{"6-2 with repeated elements",
:count_combinations, [@repeated6, 2]},
:space,
{"6-2, n=10",
:nth_combination, [1..6, 2, 10]},
{"10-2 with repeated elements, n=10",
:nth_combination, [@repeated10, 2, 10]},
:space,
{"4 element range",
:partitions, [1..4]},
{"6 element with repetitions",
:partitions, [@repeated6]},
:space,
{"7 element range",
:permutations, [1..7]},
{"6 element with repetitions",
:permutations, [@repeated6]},
:space,
{"6 element range",
:count_permutations, [1..6]},
{"6 element with repetitions",
:count_permutations, [@repeated6]},
:space,
{"6 element range, drop 100",
:drop_permutations, [1..6, 100]},
{"6 element range with repetitions, drop 100",
:drop_permutations, [@repeated6, 100]},
:space,
{"6 element range, n=100",
:nth_permutation, [1..6, 100]},
{"6 element range with repetitions, n=20",
:nth_permutation, [@repeated6, 20]},
:space,
{"6 element range",
:permutation_index, [[1, 2, 3, 6, 4, 5]]},
{"6 element range with repetition",
:permutation_index, [[1, 2, 1, 6, 1, 5]]},
:space,
{"6-3 element range",
:selections, [1..6, 3]},
:space,
{"6 element range",
:subsets, [1..6]},
:space,
{"6 element range",
:count_subsets, [1..6]},
:end
]
|> Enum.each(fn
{d, f, a} ->
compare d, f, a
a when a in [:space, :end] ->
IO.puts ""
end)
end
end
|
lib/mix/tasks/comb/benchmark.ex
| 0.562177
| 0.580768
|
benchmark.ex
|
starcoder
|
defmodule GenStage do
@moduledoc ~S"""
Stages are data-exchange steps that send and/or receive data
from other stages.
When a stage sends data, it acts as a producer. When it receives
data, it acts as a consumer. Stages may take both producer and
consumer roles at once.
## Stage types
Besides taking both producer and consumer roles, a stage may be
called "source" if it only produces items or called "sink" if it
only consumes items.
For example, imagine the stages below where A sends data to B
that sends data to C:
[A] -> [B] -> [C]
we conclude that:
* A is only a producer (and therefore a source)
* B is both producer and consumer
* C is only a consumer (and therefore a sink)
As we will see in the upcoming Examples section, we must
specify the type of the stage when we implement each of them.
To start the flow of events, we subscribe consumers to
producers. Once the communication channel between them is
established, consumers will ask the producers for events.
We typically say the consumer is sending demand upstream.
Once demand arrives, the producer will emit items, never
emitting more items than the consumer asked for. This provides
a back-pressure mechanism.
A consumer may have multiple producers and a producer may have
multiple consumers. When a consumer asks for data, each producer
is handled separately, with its own demand. When a producer
receives demand and sends data to multiple consumers, the demand
is tracked and the events are sent by a dispatcher. This allows
producers to send data using different "strategies". See
`GenStage.Dispatcher` for more information.
Many developers tend to create layers of stages, such as A, B and
C, for achieving concurrency. If all you want is concurrency, using
processes is enough. They are the primitive for achieving concurrency
in Elixir and the VM does all of the work of multiplexing them.
Instead, layers in GenStage must be created when there is a need for
back-pressure or to route the data in different ways.
For example, if you need the data to go over multiple steps but
without a need for back-pressure or without a need to break the
data apart, do not design it as such:
[Producer] -> [Step 1] -> [Step 2] -> [Step 3]
Instead it is better to design it as:
[Consumer]
/
[Producer]-<-[Consumer]
\
[Consumer]
where "Consumer" are multiple processes that subscribe to the same
"Producer" and run exactly the same code, with all of transformation
steps from above. In such scenarios, you may even find the
`Task.async_stream/2` function that ships as part of Elixir to be
enough or achieve the flexibility you need with the `ConsumerSupervisor`
functionality that is included as part of `GenStage`.
## Example
Let's define the simple pipeline below:
[A] -> [B] -> [C]
where A is a producer that will emit items starting from 0,
B is a producer-consumer that will receive those items and
multiply them by a given number and C will receive those events
and print them to the terminal.
Let's start with A. Since A is a producer, its main
responsibility is to receive demand and generate events.
Those events may be in memory or an external queue system.
For simplicity, let's implement a simple counter starting
from a given value of `counter` received on `init/1`:
defmodule A do
use GenStage
def start_link(number) do
GenStage.start_link(A, number)
end
def init(counter) do
{:producer, counter}
end
def handle_demand(demand, counter) when demand > 0 do
# If the counter is 3 and we ask for 2 items, we will
# emit the items 3 and 4, and set the state to 5.
events = Enum.to_list(counter..counter+demand-1)
{:noreply, events, counter + demand}
end
end
B is a producer-consumer. This means it does not explicitly
handle the demand because the demand is always forwarded to
its producer. Once A receives the demand from B, it will send
events to B which will be transformed by B as desired. In
our case, B will receive events and multiply them by a number
given on initialization and stored as the state:
defmodule B do
use GenStage
def start_link(number) do
GenStage.start_link(B, number)
end
def init(number) do
{:producer_consumer, number}
end
def handle_events(events, _from, number) do
events = Enum.map(events, & &1 * number)
{:noreply, events, number}
end
end
C will finally receive those events and print them every second
to the terminal:
defmodule C do
use GenStage
def start_link() do
GenStage.start_link(C, :ok)
end
def init(:ok) do
{:consumer, :the_state_does_not_matter}
end
def handle_events(events, _from, state) do
# Wait for a second.
Process.sleep(1000)
# Inspect the events.
IO.inspect(events)
# We are a consumer, so we would never emit items.
{:noreply, [], state}
end
end
Now we can start and connect them:
{:ok, a} = A.start_link(0) # starting from zero
{:ok, b} = B.start_link(2) # multiply by 2
{:ok, c} = C.start_link() # state does not matter
GenStage.sync_subscribe(c, to: b)
GenStage.sync_subscribe(b, to: a)
Typically, we subscribe from bottom to top. Since A will
start producing items only when B connects to it, we want this
subscription to happen when the whole pipeline is ready. After
you subscribe all of them, demand will start flowing upstream and
events downstream.
When implementing consumers, we often set the `:max_demand` and
`:min_demand` on subscription. The `:max_demand` specifies the
maximum amount of events that must be in flow while the `:min_demand`
specifies the minimum threshold to trigger for more demand. For
example, if `:max_demand` is 1000 and `:min_demand` is 750,
the consumer will ask for 1000 events initially and ask for more
only after it receives at least 250.
In the example above, B is a `:producer_consumer` and therefore
acts as a buffer. Getting the proper demand values in B is
important: making the buffer too small may make the whole pipeline
slower, making the buffer too big may unnecessarily consume
memory.
When such values are applied to the stages above, it is easy
to see the producer works in batches. The producer A ends-up
emitting batches of 50 items which will take approximately
50 seconds to be consumed by C, which will then request another
batch of 50 items.
## `init` and `:subscribe_to`
In the example above, we have started the processes A, B, and C
independently and subscribed them later on. But most often it is
simpler to subscribe a consumer to its producer on its `c:init/1`
callback. This way, if the consumer crashes, restarting the consumer
will automatically re-invoke its `c:init/1` callback and resubscribe
it to the supervisor.
This approach works as long as the producer can be referenced when
the consumer starts--such as by name (for a named process) or by pid
for a running unnamed process. For example, assuming the process
`A` and `B` are started as follows:
# Let's call the stage in module A as A
GenStage.start_link(A, 0, name: A)
# Let's call the stage in module B as B
GenStage.start_link(B, 2, name: B)
# No need to name consumers as they won't be subscribed to
GenStage.start_link(C, :ok)
We can now change the `c:init/1` callback for C to the following:
def init(:ok) do
{:consumer, :the_state_does_not_matter, subscribe_to: [B]}
end
or:
def init(:ok) do
{:consumer, :the_state_does_not_matter, subscribe_to: [{B, options}]}
end
And we will no longer need to call `sync_subscribe/2`.
Another advantage of this approach is that it makes it straight-forward
to leverage concurrency by simply starting multiple consumers that subscribe
to their producer (or producer-consumer). This can be done in the example above
by simply calling start link multiple times:
# Start 4 consumers
GenStage.start_link(C, :ok)
GenStage.start_link(C, :ok)
GenStage.start_link(C, :ok)
GenStage.start_link(C, :ok)
In a supervision tree, this is often done by starting multiple workers:
children = [
worker(A, [0]),
worker(B, [2]),
worker(C, []),
worker(C, []),
worker(C, []),
worker(C, [])
]
Supervisor.start_link(children, strategy: :one_for_one)
In fact, having multiple consumers is often the easiest and simplest way to
leverage concurrency in a GenStage pipeline, especially if events can
be processed out of order. For example, imagine a scenario where you
have a stream of incoming events and you need to access a number of
external services per event. Instead of building complex stages that
route events through those services, one simple mechanism to leverage
concurrency is to start a producer and N consumers and invoke the external
services directly for each event in each consumer. N is typically the
number of cores (as returned by `System.schedulers_online/0`) but can
likely be increased if the consumers are mostly waiting on IO.
Another alternative to the scenario above is to use a `ConsumerSupervisor`
for consuming the events instead of N consumers. The `ConsumerSupervisor`
will start a separate supervised process per event where the number of children
is at most `max_demand` and the average amount of children is
`(max_demand - min_demand) / 2`.
## Buffering
In many situations, producers may attempt to emit events while no consumers
have yet subscribed. Similarly, consumers may ask producers for events
that are not yet available. In such cases, it is necessary for producers
to buffer events until a consumer is available or buffer the consumer
demand until events arrive, respectively. As we will see next, buffering
events can be done automatically by `GenStage`, while buffering the demand
is a case that must be explicitly considered by developers implementing
producers.
### Buffering events
Due to the concurrent nature of Elixir software, sometimes a producer
may dispatch events without consumers to send those events to. For example,
imagine a `:consumer` B subscribes to `:producer` A. Next, the consumer B
sends demand to A, which starts producing events to satisfy the demand.
Now, if the consumer B crashes, the producer may attempt to dispatch the
now produced events but it no longer has a consumer to send those events to.
In such cases, the producer will automatically buffer the events until another
consumer subscribes.
The buffer can also be used in cases where external sources only send
events in batches larger than asked for. For example, if you are
receiving events from an external source that only sends events
in batches of 1000 and the internal demand is smaller than
that, the buffer allows you to always emit batches of 1000 events
even when the consumer has asked for less.
In all of those cases when an event cannot be sent immediately by
a producer, the event will be automatically stored and sent the next
time consumers ask for events. The size of the buffer is configured
via the `:buffer_size` option returned by `init/1` and the default
value is `10_000`. If the `buffer_size` is exceeded, an error is logged.
See the documentation for `c:init/1` for more detailed infromation about
the `:buffer_size` option.
### Buffering demand
In case consumers send demand and the producer is not yet ready to
fill in the demand, producers must buffer the demand until data arrives.
As an example, let's implement a producer that broadcasts messages
to consumers. For producers, we need to consider two scenarios:
1. what if events arrive and there are no consumers?
2. what if consumers send demand and there are not enough events?
One way to implement such a broadcaster is to simply rely on the internal
buffer available in `GenStage`, dispatching events as they arrive, as explained
in the previous section:
defmodule Broadcaster do
use GenStage
@doc "Starts the broadcaster."
def start_link() do
GenStage.start_link(__MODULE__, :ok, name: __MODULE__)
end
@doc "Sends an event and returns only after the event is dispatched."
def sync_notify(event, timeout \\ 5000) do
GenStage.call(__MODULE__, {:notify, event}, timeout)
end
def init(:ok) do
{:producer, :ok, dispatcher: GenStage.BroadcastDispatcher}
end
def handle_call({:notify, event}, _from, state) do
{:reply, :ok, [event], state} # Dispatch immediately
end
def handle_demand(_demand, state) do
{:noreply, [], state} # We don't care about the demand
end
end
By always sending events as soon as they arrive, if there is any demand,
we will serve the existing demand, otherwise the event will be queued in
`GenStage`'s internal buffer. In case events are being queued and not being
consumed, a log message will be emitted when we exceed the `:buffer_size`
configuration.
While the implementation above is enough to solve the constraints above,
a more robust implementation would have tighter control over the events
and demand by tracking this data locally, leaving the `GenStage` internal
buffer only for cases where consumers crash without consuming all data.
To handle such cases, we will use a two-element tuple as the broadcaster state
where the first elemeent is a queue and the second element is the pending
demand. When events arrive and there are no consumers, we will store the
event in the queue alongside information about the process that broadcasted
the event. When consumers send demand and there are not enough events, we will
increase the pending demand. Once we have both data and demand, we
acknowledge the process that has sent the event to the broadcaster and finally
broadcast the event downstream.
defmodule QueueBroadcaster do
use GenStage
@doc "Starts the broadcaster."
def start_link() do
GenStage.start_link(__MODULE__, :ok, name: __MODULE__)
end
@doc "Sends an event and returns only after the event is dispatched."
def sync_notify(event, timeout \\ 5000) do
GenStage.call(__MODULE__, {:notify, event}, timeout)
end
## Callbacks
def init(:ok) do
{:producer, {:queue.new, 0}, dispatcher: GenStage.BroadcastDispatcher}
end
def handle_call({:notify, event}, from, {queue, pending_demand}) do
queue = :queue.in({from, event}, queue)
dispatch_events(queue, pending_demand, [])
end
def handle_demand(incoming_demand, {queue, pending_demand}) do
dispatch_events(queue, incoming_demand + pending_demand, [])
end
defp dispatch_events(queue, 0, events) do
{:noreply, Enum.reverse(events), {queue, 0}}
end
defp dispatch_events(queue, demand, events) do
case :queue.out(queue) do
{{:value, {from, event}}, queue} ->
GenStage.reply(from, :ok)
dispatch_events(queue, demand - 1, [event | events])
{:empty, queue} ->
{:noreply, Enum.reverse(events), {queue, demand}}
end
end
end
Let's also implement a consumer that automatically subscribes to the
broadcaster on `c:init/1`. The advantage of doing so on initialization
is that, if the consumer crashes while it is supervised, the subscription
is automatically re-established when the supervisor restarts it.
defmodule Printer do
use GenStage
@doc "Starts the consumer."
def start_link() do
GenStage.start_link(__MODULE__, :ok)
end
def init(:ok) do
# Starts a permanent subscription to the broadcaster
# which will automatically start requesting items.
{:consumer, :ok, subscribe_to: [QueueBroadcaster]}
end
def handle_events(events, _from, state) do
for event <- events do
IO.inspect {self(), event}
end
{:noreply, [], state}
end
end
With the broadcaster in hand, now let's start the producer as well
as multiple consumers:
# Start the producer
QueueBroadcaster.start_link()
# Start multiple consumers
Printer.start_link()
Printer.start_link()
Printer.start_link()
Printer.start_link()
At this point, all consumers must have sent their demand which we were not
able to fulfill. Now by calling `QueueBroadcaster.sync_notify/1`, the event
shall be broadcasted to all consumers at once as we have buffered the demand
in the producer:
QueueBroadcaster.sync_notify(:hello_world)
If we had called `QueueBroadcaster.sync_notify(:hello_world)` before any
consumer was available, the event would also have been buffered in our own
queue and served only when demand had been received.
By having control over the demand and queue, the broadcaster has
full control on how to behave when there are no consumers, when the
queue grows too large, and so forth.
## Asynchronous work and `handle_subscribe`
Both `:producer_consumer` and `:consumer` stages have been designed to do
their work in the `c:handle_events/3` callback. This means that, after
`c:handle_events/3` is invoked, both `:producer_consumer` and `:consumer`
stages will immediately send demand upstream and ask for more items, as the
stage that produced the events assumes events have been fully processed by
`c:handle_events/3`.
Such default behaviour makes `:producer_consumer` and `:consumer` stages
unfeasible for doing asynchronous work. However, given `GenStage` was designed
to run with multiple consumers, it is not a problem to perform synchronous or
blocking actions inside `handle_events/3` as you can then start multiple
consumers in order to max both CPU and IO usage as necessary.
On the other hand, if you must perform some work asynchronously,
`GenStage` comes with an option that manually controls how demand
is sent upstream, avoiding the default behaviour where demand is
sent after `c:handle_events/3`. Such can be done by implementing
the `c:handle_subscribe/4` callback and returning `{:manual, state}`
instead of the default `{:automatic, state}`. Once the producer mode
is set to `:manual`, developers must use `GenStage.ask/3` to send
demand upstream when necessary.
Note that when `:max_demand` and `:min_demand` must be manually respected when
manually asking for demand through `GenStage.ask/3`.
For example, the `ConsumerSupervisor` module processes events
asynchronously by starting a process for each event and this is achieved by
manually sending demand to producers. `ConsumerSupervisor`
can be used to distribute work to a limited amount of
processes, behaving similar to a pool where a new process is
started for each event. See the `ConsumerSupervisor` docs for more
information.
Setting the demand to `:manual` in `c:handle_subscribe/4` is not
only useful for asynchronous work but also for setting up other
mechanisms for back-pressure. As an example, let's implement a
consumer that is allowed to process a limited number of events
per time interval. Those are often called rate limiters:
defmodule RateLimiter do
use GenStage
def init(_) do
# Our state will keep all producers and their pending demand
{:consumer, %{}}
end
def handle_subscribe(:producer, opts, from, producers) do
# We will only allow max_demand events every 5000 milliseconds
pending = opts[:max_demand] || 1000
interval = opts[:interval] || 5000
# Register the producer in the state
producers = Map.put(producers, from, {pending, interval})
# Ask for the pending events and schedule the next time around
producers = ask_and_schedule(producers, from)
# Returns manual as we want control over the demand
{:manual, producers}
end
def handle_cancel(_, from, producers) do
# Remove the producers from the map on unsubscribe
{:noreply, [], Map.delete(producers, from)}
end
def handle_events(events, from, producers) do
# Bump the amount of pending events for the given producer
producers = Map.update!(producers, from, fn {pending, interval} ->
{pending + length(events), interval}
end)
# Consume the events by printing them.
IO.inspect(events)
# A producer_consumer would return the processed events here.
{:noreply, [], producers}
end
def handle_info({:ask, from}, producers) do
# This callback is invoked by the Process.send_after/3 message below.
{:noreply, [], ask_and_schedule(producers, from)}
end
defp ask_and_schedule(producers, from) do
case producers do
%{^from => {pending, interval}} ->
# Ask for any pending events
GenStage.ask(from, pending)
# And let's check again after interval
Process.send_after(self(), {:ask, from}, interval)
# Finally, reset pending events to 0
Map.put(producers, from, {0, interval})
%{} ->
producers
end
end
end
Let's subscribe the `RateLimiter` above to the
producer we have implemented at the beginning of the module
documentation:
{:ok, a} = GenStage.start_link(A, 0)
{:ok, b} = GenStage.start_link(RateLimiter, :ok)
# Ask for 10 items every 2 seconds
GenStage.sync_subscribe(b, to: a, max_demand: 10, interval: 2000)
Although the rate limiter above is a consumer, it could be made a
producer-consumer by changing `c:init/1` to return a `:producer_consumer`
and then forwarding the events in `c:handle_events/3`.
## Notifications
`GenStage` also supports the ability to send notifications to all
consumers. Those notifications are sent as regular messages outside
of the demand-driven protocol but respecting the event ordering.
See `sync_notify/3` and `async_notify/2`.
Notifications are useful for out-of-band information, for example,
to notify consumers the producer has sent all events it had to
process or that a new batch of events is starting.
Note the notification system should not be used for broadcasting
events; for such, consider using `GenStage.BroadcastDispatcher`.
## Callbacks
`GenStage` is implemented on top of a `GenServer` with a few additions.
Besides exposing all of the `GenServer` callbacks, it also provides
`handle_demand/2` to be implemented by producers and `handle_events/3` to be
implemented by consumers, as shown above, as well as subscription-related
callbacks. Furthermore, all the callback responses have been modified to
potentially emit events. See the callbacks documentation for more
information.
By adding `use GenStage` to your module, Elixir will automatically
define all callbacks for you except for the following ones:
* `init/1` - must be implemented to choose between `:producer`, `:consumer`, or `:producer_consumer` stages
* `handle_demand/2` - must be implemented by `:producer` stages
* `handle_events/3` - must be implemented by `:producer_consumer` and `:consumer` stages
Although this module exposes functions similar to the ones found in
the `GenServer` API, like `call/3` and `cast/2`, developers can also
rely directly on GenServer functions such as `GenServer.multi_call/4`
and `GenServer.abcast/3` if they wish to.
### Name registration
`GenStage` is bound to the same name registration rules as a `GenServer`.
Read more about it in the `GenServer` docs.
## Message protocol overview
This section will describe the message protocol implemented
by stages. By documenting these messages, we will allow
developers to provide their own stage implementations.
### Back-pressure
When data is sent between stages, it is done by a message
protocol that provides back-pressure. The first step is
for the consumer to subscribe to the producer. Each
subscription has a unique reference.
Once subscribed, the consumer may ask the producer for messages
for the given subscription. The consumer may demand more items
whenever it wants to. A consumer must never receive more data
than it has asked for from any given producer stage.
A consumer may have multiple producers, where each demand is managed
individually (on a per-subscription basis). A producer may have multiple
consumers, where the demand and events are managed and delivered according to
a `GenStage.Dispatcher` implementation.
### Producer messages
The producer is responsible for sending events to consumers
based on demand. These are the messages that consumers can
send to producers:
* `{:"$gen_producer", from :: {consumer_pid, subscription_tag}, {:subscribe, current, options}}` -
sent by the consumer to the producer to start a new subscription.
Before sending, the consumer MUST monitor the producer for clean-up
purposes in case of crashes. The `subscription_tag` is unique to
identify the subscription. It is typically the subscriber monitoring
reference although it may be any term.
Once sent, the consumer MAY immediately send demand to the producer.
The `current` field, when not `nil`, is a two-item tuple containing a
subscription that must be cancelled with the given reason before the
current one is accepted.
Once received, the producer MUST monitor the consumer. However, if
the subscription reference is known, it MUST send a `:cancel` message
to the consumer instead of monitoring and accepting the subscription.
* `{:"$gen_producer", from :: {consumer_pid, subscription_tag}, {:cancel, reason}}` -
sent by the consumer to cancel a given subscription.
Once received, the producer MUST send a `:cancel` reply to the
registered consumer (which may not necessarily be the one received
in the tuple above). Keep in mind, however, there is no guarantee
such messages can be delivered in case the producer crashes before.
If the pair is unknown, the producer MUST send an appropriate cancel
reply.
* `{:"$gen_producer", from :: {consumer_pid, subscription_tag}, {:ask, demand}}` -
sent by consumers to ask demand for a given subscription (identified
by `subscription_tag`).
Once received, the producer MUST send data up to the demand. If the
pair is unknown, the producer MUST send an appropriate cancel reply.
### Consumer messages
The consumer is responsible for starting the subscription
and sending demand to producers. These are the messages that
producers can send to consumers:
* `{:"$gen_consumer", from :: {producer_pid, subscription_tag}, {:notification, message}}` -
notifications sent by producers.
* `{:"$gen_consumer", from :: {producer_pid, subscription_tag}, {:cancel, reason}}` -
sent by producers to cancel a given subscription.
It is used as a confirmation for client cancellations OR
whenever the producer wants to cancel some upstream demand.
* `{:"$gen_consumer", from :: {producer_pid, subscription_tag}, events :: [event, ...]}` -
events sent by producers to consumers.
`subscription_tag` identifies the subscription. The third argument
is a non-empty list of events. If the subscription is unknown, the
events must be ignored and a cancel message must be sent to the producer.
"""
defstruct [:mod, :state, :type, :dispatcher_mod, :dispatcher_state, :buffer,
:buffer_config, events: :forward, monitors: %{}, producers: %{}, consumers: %{}]
@typedoc "The supported stage types."
@type type :: :producer | :consumer | :producer_consumer
@typedoc "The supported init options."
@type options :: keyword()
@typedoc "The stage."
@type stage :: pid | atom | {:global, term} | {:via, module, term} | {atom, node}
@typedoc "The term that identifies a subscription."
@opaque subscription_tag :: reference
@typedoc "The term that identifies a subscription associated with the corresponding producer/consumer."
@type from :: {pid, subscription_tag}
@doc """
Invoked when the server is started.
`start_link/3` (or `start/3`) will block until this callback returns.
`args` is the argument term (second argument) passed to `start_link/3`
(or `start/3`).
In case of successful start, this callback must return a tuple
where the first element is the stage type, which is one of:
* `:producer`
* `:consumer`
* `:producer_consumer` (if the stage is acting as both)
For example:
def init(args) do
{:producer, some_state}
end
The returned tuple may also contain 3 or 4 elements. The third
element may be the `:hibernate` atom or a set of options defined
below.
Returning `:ignore` will cause `start_link/3` to return `:ignore`
and the process will exit normally without entering the loop or
calling `terminate/2`.
Returning `{:stop, reason}` will cause `start_link/3` to return
`{:error, reason}` and the process to exit with reason `reason`
without entering the loop or calling `terminate/2`.
## Options
This callback may return options. Some options are specific to
the chosen stage type while others are shared across all types.
### `:producer` options
* `:demand` - when `:forward`, the demand is always forwarded to
the `c:handle_demand/2` callback. When `:accumulate`, demand is
accumulated until its mode is set to `:forward` via `demand/2`.
This is useful as a synchronization mechanism, where the demand
is accumulated until all consumers are subscribed. Defaults to
`:forward`.
### `:producer` and `:producer_consumer` options
* `:buffer_size` - the size of the buffer to store events without
demand. Can be `:infinity` to signal no limit on the buffer size. Check
the "Buffer events" section of the module documentation. Defaults to
`10_000` for `:producer`, `:infinity` for `:producer_consumer`.
* `:buffer_keep` - returns whether the `:first` or `:last` entries
should be kept on the buffer in case the buffer size is exceeded.
Defaults to `:last`.
* `:dispatcher` - the dispatcher responsible for handling demands.
Defaults to `GenStage.DemandDispatch`. May be either an atom
representing a dispatcher module or a two-element tuple with
the dispatcher module and the dispatcher options.
### `:consumer` and `:producer_consumer` options
* `:subscribe_to` - a list of producers to subscribe to. Each element
represents either the producer module or a tuple with the producer module
and the subscription options (as defined in `sync_subscribe/2`).
"""
@callback init(args :: term) ::
{type, state} |
{type, state, options} |
:ignore |
{:stop, reason :: any} when state: any
@doc """
Invoked on `:producer` stages.
This callback is invoked on `:producer` stages with the demand from
consumers/dispatcher. The producer that implements this callback must either
store the demand, or return the amount of requested events.
Must always be explicitly implemented by `:producer` stages.
## Examples
def handle_demand(demand, state) do
# We check if we're able to satisfy the demand and fetch
# events if we aren't.
events =
if length(state.events) >= demand do
events
else
fetch_events()
end
# We dispatch only the requested number of events.
{to_dispatch, remaining} = Enum.split(events, demand)
{:noreply, to_dispatch, %{state | events: remaining}}
end
"""
@callback handle_demand(demand :: pos_integer, state :: term) ::
{:noreply, [event], new_state} |
{:noreply, [event], new_state, :hibernate} |
{:stop, reason, new_state} when new_state: term, reason: term, event: term
@doc """
Invoked when a consumer subscribes to a producer.
This callback is invoked in both producers and consumers.
`producer_or_consumer` will be `:producer` when this callback is
invoked on a consumer that subscribed to a producer, and `:consumer`
if when this callback is invoked on producers a consumer subscribed to.
For consumers, successful subscriptions must return one of:
* `{:automatic, new_state}` - means the stage implementation will take care
of automatically sending demand to producers. This is the default.
* `{:manual, state}` - means that demand must be sent to producers
explicitly via `ask/3`. `:manual` subscriptions must be cancelled when
`c:handle_cancel/3` is called. `:manual` can be used when a special
behaviour is desired (for example, `ConsumerSupervisor` uses `:manual`
demand in its implementation).
For producers, successful subscriptions must always return
`{:automatic, new_state}`. `:manual` mode is not supported.
If this callback is not implemented, the default implementation by
`use GenStage` will return `{:automatic, state}`.
## Examples
Let's see an example where we define this callback in a consumer that will use
`:manual` mode. In this case, we'll store the subscription (`from`) in the
state in order to be able to use it later on when asking demand via `ask/3`.
def handle_subscribe(:producer, _options, from, state) do
new_state = %{state | subscription: from}
{:manual, new_state
end
"""
@callback handle_subscribe(producer_or_consumer :: :producer | :consumer, options, from, state :: term) ::
{:automatic | :manual, new_state} |
{:stop, reason, new_state} when new_state: term, reason: term
@doc """
Invoked when a consumer is no longer subscribed to a producer.
It receives the cancellation reason, the `from` tuple representing the
cancelled subscription and the state. The `cancel_reason` will be a
`{:cancel, _}` tuple if the reason for cancellation was a `GenStage.cancel/2`
call. Any other value means the cancellation reason was due to an EXIT.
If this callback is not implemented, the default implementation by
`use GenStage` will return `{:noreply, [], state}`.
Return values are the same as `c:handle_cast/2`.
"""
@callback handle_cancel(cancellation_reason :: {:cancel | :down, reason :: term}, from, state :: term) ::
{:noreply, [event], new_state} |
{:noreply, [event], new_state, :hibernate} |
{:stop, reason, new_state} when event: term, new_state: term, reason: term
@doc """
Invoked on `:producer_consumer` and `:consumer` stages to handle events.
Must always be explicitly implemented by such types.
Return values are the same as `c:handle_cast/2`.
"""
@callback handle_events(events :: [event], from, state :: term) ::
{:noreply, [event], new_state} |
{:noreply, [event], new_state, :hibernate} |
{:stop, reason, new_state} when new_state: term, reason: term, event: term
@doc """
Invoked to handle synchronous `call/3` messages.
`call/3` will block until a reply is received (unless the call times out or
nodes are disconnected).
`request` is the request message sent by a `call/3`, `from` is a two-element tuple
containing the caller's PID and a term that uniquely identifies the call, and
`state` is the current state of the `GenStage`.
Returning `{:reply, reply, [events], new_state}` sends the response `reply`
to the caller after events are dispatched (or buffered) and continues the
loop with new state `new_state`. In case you want to deliver the reply before
processing events, use `reply/2` and return `{:noreply, [event],
state}`.
Returning `{:noreply, [event], new_state}` does not send a response to the
caller and processes the given events before continuing the loop with new
state `new_state`. The response must be sent with `reply/2`.
Hibernating is also supported as an atom to be returned from either
`:reply` and `:noreply` tuples.
Returning `{:stop, reason, reply, new_state}` stops the loop and `terminate/2`
is called with reason `reason` and state `new_state`. Then the `reply` is sent
as the response to the call and the process exits with reason `reason`.
Returning `{:stop, reason, new_state}` is similar to
`{:stop, reason, reply, new_state}` except that no reply is sent to the caller.
If this callback is not implemented, the default implementation by
`use GenStage` will return `{:stop, {:bad_call, request}, state}`.
"""
@callback handle_call(request :: term, from :: GenServer.from, state :: term) ::
{:reply, reply, [event], new_state} |
{:reply, reply, [event], new_state, :hibernate} |
{:noreply, [event], new_state} |
{:noreply, [event], new_state, :hibernate} |
{:stop, reason, reply, new_state} |
{:stop, reason, new_state} when reply: term, new_state: term, reason: term, event: term
@doc """
Invoked to handle asynchronous `cast/2` messages.
`request` is the request message sent by a `cast/2` and `state` is the current
state of the `GenStage`.
Returning `{:noreply, [event], new_state}` dispatches the events and continues
the loop with new state `new_state`.
Returning `{:noreply, [event], new_state, :hibernate}` is similar to
`{:noreply, new_state}` except the process is hibernated before continuing the
loop. See the return values for `c:GenServer.handle_call/3` for more information
on hibernation.
Returning `{:stop, reason, new_state}` stops the loop and `terminate/2` is
called with the reason `reason` and state `new_state`. The process exits with
reason `reason`.
If this callback is not implemented, the default implementation by
`use GenStage` will return `{:stop, {:bad_cast, request}, state}`.
"""
@callback handle_cast(request :: term, state :: term) ::
{:noreply, [event], new_state} |
{:noreply, [event], new_state, :hibernate} |
{:stop, reason :: term, new_state} when new_state: term, event: term
@doc """
Invoked to handle all other messages.
`message` is the message and `state` is the current state of the `GenStage`. When
a timeout occurs the message is `:timeout`.
If this callback is not implemented, the default implementation by
`use GenStage` will return `{:noreply, [], state}`.
Return values are the same as `c:handle_cast/2`.
"""
@callback handle_info(message :: term, state :: term) ::
{:noreply, [event], new_state} |
{:noreply, [event], new_state, :hibernate} |
{:stop, reason :: term, new_state} when new_state: term, event: term
@doc """
The same as `c:GenServer.terminate/2`.
"""
@callback terminate(reason, state :: term) ::
term when reason: :normal | :shutdown | {:shutdown, term} | term
@doc """
The same as `c:GenServer.code_change/3`.
"""
@callback code_change(old_vsn, state :: term, extra :: term) ::
{:ok, new_state :: term} |
|
large_file.ex
| 0.878939
| 0.830834
|
large_file.ex
|
starcoder
|
defmodule Cased do
@moduledoc """
Documentation for Cased.
"""
import Norm
defmodule ConfigurationError do
@moduledoc false
defexception message: "invalid configuration", details: nil
@type t :: %__MODULE__{
message: String.t(),
details: nil | any()
}
def message(exc) do
"#{exc.message}\ndetails #{inspect(exc.details)}"
end
end
defmodule RequestError do
@moduledoc false
defexception message: "invalid request configuration", details: nil
@type t :: %__MODULE__{
message: String.t(),
details: nil | any()
}
def message(exc) do
"#{exc.message}\ndetails #{inspect(exc.details)}"
end
end
defmodule ResponseError do
@moduledoc false
defexception message: "invalid response", details: nil, response: nil
@type t :: %__MODULE__{
message: String.t(),
details: nil | any(),
response: nil | Mojito.response()
}
def message(%{response: nil} = exc) do
"#{exc.message}\ndetails #{inspect(exc.details)}\nstatus code: (none)"
end
def message(exc) do
"#{exc.message}\ndetails #{inspect(exc.details)}\nstatus code: #{exc.status_code}"
end
end
@default_publish_opts [
publishers: [Cased.Publisher.Datadog],
handlers: []
]
@typedoc """
Options when publishing.
- `:publishers`, the list of publisher pids (defaults to `#{
inspect(@default_publish_opts[:publishers])
}`).
- `:handlers`, the list of sensitive data handlers (defaults to `#{
inspect(@default_publish_opts[:handlers])
}`);
see `Cased.Sensitive.Handler`.
"""
@type publish_opts :: [publish_opt()]
@type publish_opt ::
{:publishers, [GenServer.server()]}
| {:handlers, [Cased.Sensitive.Handler.t() | Cased.Sensitive.Handler.spec()]}
@doc """
Publish an audit event to Cased.
Note: Uses `GenServer.call/3` to send events to publisher processes.
```
%{
action: "credit_card.charge",
amount: 2000,
currency: "usd",
source: "tok_amex",
description: "My First Test Charge (created for API docs)",
credit_card_id: "<KEY>"
}
|> Cased.publish()
```
"""
@spec publish(audit_event :: map(), opts :: publish_opts()) ::
:ok | {:error, Jason.EncodeError.t() | Exception.t()}
def publish(audit_event, opts \\ []) do
opts =
@default_publish_opts
|> Keyword.merge(opts)
audit_event =
audit_event
|> Map.merge(Cased.Context.to_map())
case validate_publish_opts(opts) do
{:ok, %{publishers: publishers, handlers: handlers}} ->
Cased.Sensitive.Processor.process(audit_event, handlers: handlers)
|> do_publish(publishers)
{:error, details} ->
{:error, %ConfigurationError{details: details}}
end
end
@spec do_publish(data :: term(), publishers :: [GenServer.server()]) ::
:ok | {:error, Jason.EncodeError.t() | Exception.t()}
defp do_publish(data, publishers) do
case Jason.encode(data) do
{:ok, json} ->
for publisher <- publishers do
GenServer.call(publisher, {:publish, json})
end
:ok
other ->
other
end
end
@doc """
Publish an audit event to Cased, raising an exception in the event of failure.
"""
@spec publish!(data :: term(), opts :: publish_opts()) :: :ok | no_return()
def publish!(data, opts \\ []) do
case publish(data, opts) do
:ok ->
:ok
{:error, err} ->
raise err
end
end
defp validate_publish_opts(opts) do
opts
|> Map.new()
|> conform(publish_opts_schema())
end
defp publish_opts_schema() do
schema(%{
# Effectively [GenServer.server()]
publishers: coll_of(spec(is_pid() or is_atom() or is_tuple())),
# TODO: Use is_struct() vs is_map(), post-Elixir v1.10
handlers: coll_of(spec(is_tuple() or is_map()))
})
|> selection()
end
end
|
lib/cased.ex
| 0.804367
| 0.495972
|
cased.ex
|
starcoder
|
if match?({:module, AMQP.Channel}, Code.ensure_compiled(AMQP.Channel)) do
defmodule Mix.Tasks.Rambla.Rabbit.Exchange do
@shortdoc "Operations with exchanges in RabbitMQ"
@moduledoc since: "0.6.0"
@moduledoc """
Mix task to deal with exchanges in the target RabbitMQ.
This is helpful to orchestrate target RabbitMQ when deploying
to docker. Allows to create and delete the exchange.
Loads the setting from `config :rambla, :amqp` if no connection
is provided in parameters.
## Command line options
* -c - the connection string
* -o - the list of options without spaces, separated by comma
## Options
### Options for `create`
* **`type`** - One of four possible values below. Defaults to `:direct`.
* `direct`
* `fanout`
* `topic`
* `headers`
* `durable` - If set, keeps the Exchange between restarts of the broker;
* `auto_delete` - If set, deletes the Exchange once all queues
unbind from it;
* `passive` - If set, returns an error if the Exchange does not
already exist;
* `internal` - If set, the exchange may not be used directly by
publishers, but only when bound to other exchanges. Internal exchanges are used to construct wiring that is not visible to applications.
* `no_wait` - If set, the declare operation is asynchronous.
Defaults to false.
* `arguments` - A list of arguments to pass when declaring
(of type AMQP.arguments/0). See the README for more information. Defaults to [].
### Options for `delete`
* `if_unused` - If set, the server will only delete the exchange
if it has no queue bindings.
* `no_wait` - If set, the delete operation is asynchronous.
"""
@commands ~w|declare create delete|
@type command :: :declare | :create | :delete
use Mix.Task
use Rambla.Tasks.Utils
@spec do_command(
chan :: AMQP.Channel.t(),
command :: command(),
name :: binary(),
opts :: keyword()
) :: {:ok, {:created | :deleted, binary()}} | {:error, any()}
defp do_command(chan, :create, name, opts),
do: do_command(chan, :declare, name, opts)
defp do_command(chan, :declare, name, opts) do
{type, opts} = Keyword.pop(opts, :type, :direct)
case AMQP.Exchange.declare(chan, name, type, opts) do
:ok -> {:ok, {:created, name}}
other -> other
end
end
defp do_command(chan, :delete, name, opts) do
{_type, opts} = Keyword.pop(opts, :type, :direct)
case AMQP.Exchange.delete(chan, name, opts) do
:ok -> {:ok, {:deleted, name}}
other -> other
end
end
end
end
|
lib/mix/tasks/rabbit_exchange.ex
| 0.764364
| 0.457743
|
rabbit_exchange.ex
|
starcoder
|
defmodule Kong.API do
@moduledoc """
Provides access to the Kong endpoints for API management.
Consult the Kong Admin API documentation for more information about the API object properties
"""
@endpoint "/apis"
import Kong, only: [get: 1, get: 3, post: 2, patch: 2, put: 2, delete: 1]
@doc """
Adds a new API to Kong with the given map `attributes`.
Returns either `{:ok, result}` where result is a map with the newly created API or `{:error, reasons}` where reasons is a map containing information.
"""
def add(attributes), do: post(@endpoint, attributes)
@doc """
Retrieve an API from Kong with the given `name_or_id` string.
Returns either `{:ok, result}` where result is a map with the API or `{:error, reasons}` where reasons is a map containing information.
"""
def retrieve(name_or_id), do: get(@endpoint <> "/#{name_or_id}")
@doc """
List APIs from Kong based on the given `opts` Keyword List.
Keywords defined in opts are mapped to the Querystring parameters of the request to Kong.
Returns either `{:ok, result}` where result is the paginated result returned by Kong, or `{:error, reasons}` where reasons is a map containing information.
"""
def list(opts \\ []), do: get(@endpoint, [], params: opts)
@doc """
Update API in Kong with the given string `name_or_id` with the given map `attributes`.
Returns either `{:ok, result}` where result is a map with the updated API or `{:error, reasons}` where reasons is a map containing information.
"""
def update(name_or_id, attributes), do: patch(@endpoint <> "/#{name_or_id}", attributes)
@doc """
Update Or Create API in Kong with the given map `attributes`.
Returns either `{:ok, result}` where result is a map with the created / updated API or `{:error, reasons}` where reasons is a map containing information.
"""
def update_or_create(attributes), do: put(@endpoint, attributes)
@doc """
Remove API from Kong with the given string `name_or_id`.
Returns either `{:ok, result}` where result is a map with the newly created API or `{:error, reasons}` where reasons is a map containing information.
"""
def remove(name_or_id), do: delete(@endpoint <> "/#{name_or_id}")
@doc """
Add Plugin to API
"""
def add_plugin(name_or_id, attributes), do: post(@endpoint <> "/#{name_or_id}/plugins", attributes)
@doc """
List Plugins per API
"""
def list_plugins(name_or_id, opts \\ []), do: get(@endpoint <> "/#{name_or_id}/plugins", [], params: opts)
@doc """
Update Plugin
"""
def update_plugin(name_or_id, plugin_name_or_id, attributes), do: patch(@endpoint <> "/#{name_or_id}/plugins/#{plugin_name_or_id}", attributes)
@doc """
Update or Add Plugin
"""
def update_or_add_plugin(name_or_id, attributes), do: put(@endpoint <> "/#{name_or_id}/plugins", attributes)
@doc """
Remove Plugin
"""
def remove_plugin(name_or_id, plugin_name_or_id), do: delete(@endpoint <> "/#{name_or_id}/#{plugin_name_or_id}")
end
|
lib/api.ex
| 0.771972
| 0.563918
|
api.ex
|
starcoder
|
defmodule Day03.Pathfinder do
@moduledoc """
Functions for pathing routes.
"""
def list_wire_paths([wire_path_1, wire_path_2]) do
path_1 = get_path_coordinates(wire_path_1)
path_2 = get_path_coordinates(wire_path_2)
{path_1, path_2}
end
def map_wire_paths({path_1, path_2}) do
{map_points(path_1), map_points(path_2)}
end
@doc """
Finds the intersections between two wires.
"""
def find_intersections({path_1, path_2}) do
map_1 = MapSet.new(path_1)
map_2 = MapSet.new(path_2)
MapSet.intersection(map_1, map_2)
|> MapSet.to_list()
end
def find_closest_intersection(intersections) do
intersections
|> Enum.map(fn {x, y} ->
abs(x) + abs(y)
end)
|> Enum.min()
end
def get_steps_count_to_intersection(intersection, {path_1_map, path_2_map}) do
steps_1 = Map.get(path_1_map, intersection)
steps_2 = Map.get(path_2_map, intersection)
steps_1 + steps_2
end
def get_path_coordinates(instructions) do
{coordinates, _, _} =
instructions
|> Enum.reduce({[], 0, 0}, fn instruction, acc ->
split_instruction(instruction)
|> add_steps(acc)
end)
coordinates
|> Enum.reverse()
end
def map_points(path) do
{map, _} =
path
|> Enum.reduce({%{}, 1}, fn point, {points_map, next_step} ->
case Map.has_key?(points_map, point) do
true ->
# This point is already mapped so don't add it but increase the
# step count as if we had.
{points_map, next_step + 1}
false ->
{Map.put(points_map, point, next_step), next_step + 1}
end
end)
map
end
def add_steps({amount_x, 0, multiplier}, {existing_steps, start_x, start_y}) do
new_steps =
for x <- (1 * multiplier)..(amount_x * multiplier) do
{start_x + x, start_y}
end
|> Enum.reverse()
{new_steps ++ existing_steps, start_x + amount_x * multiplier, start_y}
end
def add_steps({0, amount_y, multiplier}, {existing_steps, start_x, start_y}) do
new_steps =
for y <- (1 * multiplier)..(amount_y * multiplier) do
{start_x, start_y + y}
end
|> Enum.reverse()
{new_steps ++ existing_steps, start_x, start_y + amount_y * multiplier}
end
def add_steps(_, original_steps_info), do: original_steps_info
@doc """
Split instructions into direction and steps.
## Examples
iex> Day03.Pathfinder.split_instruction("R12")
{12, 0, 1}
iex> Day03.Pathfinder.split_instruction("L4")
{4, 0, -1}
iex> Day03.Pathfinder.split_instruction("U9")
{0, 9, 1}
iex> Day03.Pathfinder.split_instruction("D3")
{0, 3, -1}
"""
def split_instruction(instruction) do
String.split_at(instruction, 1)
|> parse_instruction()
end
defp parse_instruction({"L", steps_string}) do
{steps_string |> String.to_integer(), 0, -1}
end
defp parse_instruction({"R", steps_string}) do
{steps_string |> String.to_integer(), 0, 1}
end
defp parse_instruction({"U", steps_string}) do
{0, steps_string |> String.to_integer(), 1}
end
defp parse_instruction({"D", steps_string}) do
{0, steps_string |> String.to_integer(), -1}
end
end
|
day_03/lib/day03/pathfinder.ex
| 0.756717
| 0.5867
|
pathfinder.ex
|
starcoder
|
defmodule River.Frame.Headers do
alias River.Frame
defstruct padding: 0,
headers: [],
header_block_fragment: <<>>,
exclusive: false,
stream_dependency: nil,
weight: nil
defmodule Flags do
defstruct [:end_stream, :end_headers, :padded, :priority]
def parse(flags) do
%__MODULE__{
end_stream: River.Flags.has_flag?(flags, 0x1),
end_headers: River.Flags.has_flag?(flags, 0x4),
padded: River.Flags.has_flag?(flags, 0x8),
priority: River.Flags.has_flag?(flags, 0x20)
}
end
end
def decode(
%Frame{length: len, flags: %{padded: true, priority: true}} = frame,
<<pl::8, ex::1, dep::31, weight::8, payload::binary>>,
ctx
) do
data_len = len - pl - 6
case payload do
<<data::binary-size(data_len), _pad::binary-size(pl)>> ->
%{
frame
| payload: %__MODULE__{
headers: HPack.decode(data, ctx),
padding: pl,
exclusive: ex == 1,
weight: weight + 1,
stream_dependency: dep
}
}
_ ->
{:error, :invalid_frame}
end
end
def decode(%Frame{length: len, flags: %{padded: true}} = frame, <<pl::8, payload::binary>>, ctx) do
data_len = len - pl - 1
case payload do
<<data::binary-size(data_len), _pad::binary-size(pl)>> ->
%{
frame
| payload: %__MODULE__{
headers: HPack.decode(data, ctx),
padding: pl
}
}
_ ->
{:error, :invalid_frame}
end
end
def decode(
%Frame{length: len, flags: %{priority: true}} = frame,
<<ex::1, dep::31, weight::8, payload::binary>>,
ctx
) do
data_len = len - 5
case payload do
<<data::binary-size(data_len)>> ->
%{
frame
| payload: %__MODULE__{
headers: HPack.decode(data, ctx),
stream_dependency: dep,
weight: weight + 1,
exclusive: ex == 1
}
}
_ ->
{:error, :invalid_frame}
end
end
def decode(%Frame{length: len} = frame, payload, ctx) do
case payload do
<<data::binary-size(len)>> ->
%{
frame
| payload: %__MODULE__{
headers: HPack.decode(data, ctx)
}
}
_ ->
{:error, :invalid_frame}
end
end
end
|
lib/river/frame/headers.ex
| 0.509032
| 0.41837
|
headers.ex
|
starcoder
|
defmodule Airbax.Client do
@moduledoc false
# This GenServer keeps a pre-built bare-bones version of an exception (a
# "draft") to be reported to Airbrake, which is then filled with the data
# related to each specific exception when such exception is being
# reported. This GenServer is also responsible for actually sending data to
# the Airbrake API and receiving responses from said API.
use GenServer
require Logger
alias Airbax.Item
@default_url "https://airbrake.io"
@headers [{"content-type", "application/json"}]
## GenServer state
defstruct [:draft, :url, :enabled, hackney_opts: [], hackney_responses: %{}]
## Public API
def start_link(project_key, project_id, environment, enabled, url, hackney_opts) do
state = new(project_key, project_id, environment, enabled, url, hackney_opts)
GenServer.start_link(__MODULE__, state, [name: __MODULE__])
end
def emit(level, body, params, session) do
if pid = Process.whereis(__MODULE__) do
event = {Atom.to_string(level), body, params, session}
GenServer.cast(pid, {:emit, event})
else
Logger.warn("(Airbax) Trying to report an exception but the :airbax application has not been started")
end
end
def default_url do
@default_url
end
## GenServer callbacks
def init(state) do
Logger.metadata(airbax: false)
:ok = :hackney_pool.start_pool(__MODULE__, [max_connections: 20])
{:ok, state}
end
def terminate(_reason, _state) do
:ok = :hackney_pool.stop_pool(__MODULE__)
end
def handle_cast({:emit, _event}, %{enabled: false} = state) do
{:noreply, state}
end
def handle_cast({:emit, event}, %{enabled: :log} = state) do
{level, body, params, session} = event
Logger.info [
"(Airbax) registered report:", ?\n, inspect(body),
"\n Level: ", level,
"\n Custom params: ", inspect(params),
"\n Session data: ", inspect(session),
]
{:noreply, state}
end
def handle_cast({:emit, event}, %{enabled: true, hackney_opts: hackney_opts} = state) do
payload = compose_json(state.draft, event)
opts = [:async, {:pool, __MODULE__} | hackney_opts]
case :hackney.post(state.url, @headers, payload, opts) do
{:ok, _ref} -> :ok
{:error, reason} ->
Logger.error("(Airbax) connection error: #{inspect(reason)}")
end
{:noreply, state}
end
def handle_info({:hackney_response, ref, response}, state) do
new_state = handle_hackney_response(ref, response, state)
{:noreply, new_state}
end
def handle_info(message, state) do
Logger.info("(Airbax) unexpected message: #{inspect(message)}")
{:noreply, state}
end
## Helper functions
defp new(project_key, project_id, environment, enabled, url, hackney_opts) do
draft = Item.draft(environment)
url = build_url(project_key, project_id, url)
%__MODULE__{draft: draft, url: url, hackney_opts: hackney_opts, enabled: enabled}
end
defp build_url(project_key, project_id, url) do
"#{url}/api/v3/projects/#{project_id}/notices?key=#{project_key}"
end
defp compose_json(draft, event) do
Item.compose(draft, event)
|> Poison.encode!(iodata: true)
end
defp handle_hackney_response(ref, :done, %{hackney_responses: responses} = state) do
body = responses |> Map.fetch!(ref) |> IO.iodata_to_binary()
case Poison.decode(body) do
{:ok, %{"err" => 1, "message" => message}} when is_binary(message) ->
Logger.error("(Airbax) API returned an error: #{inspect message}")
{:ok, response} ->
Logger.debug("(Airbax) API response: #{inspect response}")
{:error, _} ->
Logger.error("(Airbax) API returned malformed JSON: #{inspect body}")
end
%{state | hackney_responses: Map.delete(responses, ref)}
end
defp handle_hackney_response(ref, {:status, code, description}, %{hackney_responses: responses} = state) do
if code != 201 do
Logger.error("(Airbax) unexpected API status: #{code}/#{description}")
end
%{state | hackney_responses: Map.put(responses, ref, [])}
end
defp handle_hackney_response(_ref, {:headers, headers}, state) do
Logger.debug("(Airbax) API headers: #{inspect(headers)}")
state
end
defp handle_hackney_response(ref, body_chunk, %{hackney_responses: responses} = state)
when is_binary(body_chunk) do
%{state | hackney_responses: Map.update!(responses, ref, &[&1 | body_chunk])}
end
defp handle_hackney_response(ref, {:error, reason}, %{hackney_responses: responses} = state) do
Logger.error("(Airbax) connection error: #{inspect(reason)}")
%{state | hackney_responses: Map.delete(responses, ref)}
end
end
|
lib/airbax/client.ex
| 0.609175
| 0.424531
|
client.ex
|
starcoder
|
defmodule Coingecko.Coin do
@doc """
List all supported coins id, name and symbol.
Example:
iex> Coingecko.Coins.list
{:ok, {[...]}}
"""
def list do
Request.one("coins/list")
end
@doc """
List all supported coins price, market cap, volume, and market related data
Example:
iex> Coingecko.Coin.markets("usd")
{:ok, {[...]}}
"""
def markets(vs_currency) do
query_string = URI.encode_query(vs_currency: vs_currency)
Request.one("coins/markets", query_string)
end
@doc """
Get current data(name, price, market, ... including exchange tickers) for a coin
Example:
iex> Coingecko.Coin.coins("bitcoin")
{:ok, {[...]}}
"""
def coins(id) do
Request.one("coins/#{id}")
end
@doc """
Get coin tickers (paginated to 100 items)
Example:
iex> Coingecko.Coin.tickers("bitcoin")
{:ok, {[...]}}
"""
def tickers(id) do
Request.one("coins/#{id}/tickers")
end
@doc """
Gets coin history by id and date
Date in the format of dd-mm-yyyy
Example:
iex> Coingecko.Coin.history("bitcoin", "30-12-2017")
"""
def history(id, date) do
query_string = URI.encode_query(date: date)
Request.one("coins/#{id}/history", query_string)
end
@doc """
Get historical market data include price, market cap, and 24h volume
Example:
iex> Coingecko.Coin.market_chart("bitcoin", "usd", 10)
"""
def market_chart(id, vs_currency, days) do
query_string = URI.encode_query(vs_currency: vs_currency, days: days)
Request.one("coins/#{id}/market_chart", query_string)
end
@doc """
Get historical market data include price, market cap, and 24h volume within a range of timestamp
Date uses UNIX Timestamp
Example:
iex> Coingecko.Coin.market_chart_range("bitcoin", "usd", 1392577232, 1422577232)
"""
def market_chart_range(id, vs_currency, from, to) do
query_string = URI.encode_query(vs_currency: vs_currency, from: from, to: to)
Request.one("coins/#{id}/market_chart/range", query_string)
end
@doc """
Get historical market data include price, market cap, and 24h volume within a range of timestamp
Date uses UNIX Timestamp
Example:
iex> Coingecko.Coin.status_updates("bitcoin", "usd", 1392577232, 1422577232)
"""
def status_updates(id) do
Request.one("coins/#{id}/status_updates")
end
@doc """
Get coin's OHLC
Example:
iex> Coingecko.Coin.ohlc("bitcoin", "usd", 1392577232, 1422577232)
"""
def ohlc(id, vs_currency, days) do
query_string = URI.encode_query(vs_currency: vs_currency, days: days)
Request.one("coins/#{id}/ohlc", query_string)
end
@doc """
List all categorties with market data
Example:
iex> Coingecko.Coin.categories
"""
def categories do
Request.one("coins/categories")
end
@doc """
List all categorties
Example:
iex> Coingecko.Coin.categories_list
"""
def categories_list do
Request.one("coins/categories/list")
end
end
|
lib/coingecko/coin.ex
| 0.731059
| 0.534552
|
coin.ex
|
starcoder
|
defmodule Theta.CMS do
@moduledoc """
The CMS context.
"""
import Ecto.Query, warn: false
alias Theta.{Account, PV, Repo, Upload}
alias Theta.CMS.{Taxonomy, Term, Article, Qa, ArticleTag}
@doc """
Returns the list of taxonomy.
## Examples
iex> list_taxonomy()
[%Taxonomy{}, ...]
"""
def list_taxonomy do
Repo.all(Taxonomy)
end
@doc """
Gets a single taxonomy.
Raises `Ecto.NoResultsError` if the Taxonomy does not exist.
## Examples
iex> get_taxonomy!(123)
%Taxonomy{}
iex> get_taxonomy!(456)
** (Ecto.NoResultsError)
"""
def get_taxonomy!(id), do: Repo.get!(Taxonomy, id)
@doc """
Creates a taxonomy.
## Examples
iex> create_taxonomy(%{field: value})
{:ok, %Taxonomy{}}
iex> create_taxonomy(%{field: bad_value})
{:error, %Ecto.Changeset{}}
"""
def create_taxonomy(attrs \\ %{}) do
%Taxonomy{}
|> Taxonomy.changeset(attrs)
|> Repo.insert()
end
@doc """
Updates a taxonomy.
## Examples
iex> update_taxonomy(taxonomy, %{field: new_value})
{:ok, %Taxonomy{}}
iex> update_taxonomy(taxonomy, %{field: bad_value})
{:error, %Ecto.Changeset{}}
"""
def update_taxonomy(%Taxonomy{} = taxonomy, attrs) do
taxonomy
|> Taxonomy.changeset(attrs)
|> Repo.update()
end
@doc """
Deletes a taxonomy.
## Examples
iex> delete_taxonomy(taxonomy)
{:ok, %Taxonomy{}}
iex> delete_taxonomy(taxonomy)
{:error, %Ecto.Changeset{}}
"""
def delete_taxonomy(%Taxonomy{} = taxonomy) do
Repo.delete(taxonomy)
end
@doc """
Returns an `%Ecto.Changeset{}` for tracking taxonomy changes.
## Examples
iex> change_taxonomy(taxonomy)
%Ecto.Changeset{source: %Taxonomy{}}
"""
def change_taxonomy(%Taxonomy{} = taxonomy) do
Taxonomy.changeset(taxonomy, %{})
end
@doc """
Returns the list of term.
## Examples
iex> list_term()
[%Term{}, ...]
"""
def list_term do
Term
|> Repo.all()
|> Repo.preload(:taxonomy)
end
def list_tag do
Term
|> where([t], t.taxonomy_id == "tag")
|> Repo.all()
end
@doc """
Returns the list of term for main menu
## Examples
iex> list_term_menu()
[%Term{}, ...]
"""
def list_term_menu do
taxonomy_id = "main-menu"
Term
|> order_by([t], asc: t.inserted_at)
|> where([t], t.taxonomy_id == ^taxonomy_id)
|> Repo.all()
end
@doc """
Gets a single term.
Raises `Ecto.NoResultsError` if the Term does not exist.
## Examples
iex> get_term!(123)
%Term{}
iex> get_term!(456)
** (Ecto.NoResultsError)
"""
def get_term!(id), do: Repo.get!(Term, id)
@doc """
Creates a term.
## Examples
iex> create_term(%{field: value})
{:ok, %Term{}}
iex> create_term(%{field: bad_value})
{:error, %Ecto.Changeset{}}
"""
def create_term(attrs \\ %{}) do
%Term{}
|> Term.changeset(attrs)
|> Repo.insert()
end
@doc """
Updates a term.
## Examples
iex> update_term(term, %{field: new_value})
{:ok, %Term{}}
iex> update_term(term, %{field: bad_value})
{:error, %Ecto.Changeset{}}
"""
def update_term(%Term{} = term, attrs) do
attrs = Map.put_new(attrs, "action", "update")
term
|> Term.changeset(attrs)
|> Repo.update()
end
@doc """
Deletes a term.
## Examples
iex> delete_term(term)
{:ok, %Term{}}
iex> delete_term(term)
{:error, %Ecto.Changeset{}}
"""
def delete_term(%Term{} = term) do
Repo.delete(term)
end
@doc """
Returns an `%Ecto.Changeset{}` for tracking term changes.
## Examples
iex> change_term(term)|> order_by([a], desc: a.inserted_at)
%Ecto.Changeset{source: %Term{}}
"""
def change_term(%Term{} = term) do
Term.changeset(term, %{})
end
@doc """
Returns the list of article.
## Examples
iex> list_article()
[%Article{}, ...]
"""
def list_article do
Article
|> Repo.all()
|> Repo.preload(:user)
end
def list_article_menu(slug) do
Article
|> where([a], a.menu_id == ^slug)
|> order_by([a], desc: a.inserted_at)
|> Repo.all()
|> Repo.preload(:user)
end
def list_article_serial_menu(slug) do
Article
|> where([a], a.menu_id == ^slug and a.is_serial)
|> order_by([a], desc: a.inserted_at)
|> Repo.all()
|> Repo.preload(:user)
end
def list_article_index do
Article
|> order_by([c], desc: c.inserted_at)
|> Repo.all()
|> Repo.preload(:user)
end
def list_serial do
Article
|> where([s], s.is_serial == true)
|> Repo.all()
end
@doc """
Gets a single article.
Raises `Ecto.NoResultsError` if the Article does not exist.
## Examples
iex> get_article!(123)
%Article{}
iex> get_article!(456)
** (Ecto.NoResultsError)
"""
def get_article!(id) do
Article
|> Repo.get!(id)
|> Repo.preload(:tag)
end
def get_article_by_slug!(slug) do
Article
|> Repo.get_by!(slug: slug)
|> Repo.preload(
user: [],
tag: [],
menu: []
)
end
def get_article_serial!(id) do
article_main = get_article!(id)
article_sub =
Article
|> where([a], a.serial_id == ^id)
|> order_by([a], asc: a.id)
|> Repo.all()
[article_main | article_sub]
end
@doc """
Creates a article.
## Examples
iex> create_article(%{field: value})
{:ok, %Article{}}
iex> create_article(%{field: bad_value})
{:error, %Ecto.Changeset{}}
"""
def create_article(attrs \\ %{}) do
%Article{}
|> Article.changeset(attrs)
|> Repo.insert()
end
@doc """
Updates a article.
## Examples
iex> update_article(article, %{field: new_value})
{:ok, %Article{}}
iex> update_article(article, %{field: bad_value})
{:error, %Ecto.Changeset{}}
"""
def update_article(%Article{} = article, attrs) do
article
|> Article.changeset(attrs)
|> Repo.update()
end
@doc """
Deletes a article.
## Examples
iex> delete_article(article)
{:ok, %Article{}}
iex> delete_article(article)
{:error, %Ecto.Changeset{}}
"""
def delete_article(%Article{} = article) do
Repo.delete(article)
end
@doc """
Returns an `%Ecto.Changeset{}` for tracking article changes.
## Examples
iex> change_article(article)
%Ecto.Changeset{source: %Article{}}
"""
def change_article(%Article{} = article) do
Article.changeset(article, %{})
end
def get_serial(id) do
article = get_article!(id)
serial =
Repo.preload(
article,
path_alias: [],
section: [:path_alias]
)
fist = %{id: serial.id, slug: serial.path_alias.slug, title: serial.title}
list =
for section <- serial.section do
%{id: section.id, slug: section.path_alias.slug, title: section.title}
end
list = Enum.sort_by(list, & &1.id)
[fist | list]
end
@doc """
Returns the list of qa.
## Examples
iex> list_qa()
[%Qa{}, ...]
"""
def list_qa do
Repo.all(Qa)
end
def list_qa_by_tag(tag) do
Qa
|> where([q], q.tag == ^tag)
|> Repo.all()
end
def list_tag_have_qa() do
query =
from(
q in Qa,
distinct: [q.tag],
select: q.tag
)
Repo.all(query)
end
@doc """
Gets a single qa.
Raises `Ecto.NoResultsError` if the Qa does not exist.
## Examples
iex> get_qa!(123)
%Qa{}
iex> get_qa!(456)
** (Ecto.NoResultsError)
"""
def get_qa!(id), do: Repo.get!(Qa, id)
@doc """
Creates a qa.
## Examples
iex> create_qa(%{field: value})
{:ok, %Qa{}}
iex> create_qa(%{field: bad_value})
{:error, %Ecto.Changeset{}}
"""
def create_qa(attrs \\ %{}) do
%Qa{}
|> Qa.changeset(attrs)
|> Repo.insert()
end
@doc """
Updates a qa.
## Examples
iex> update_qa(qa, %{field: new_value})
{:ok, %Qa{}}
iex> update_qa(qa, %{field: bad_value})
{:error, %Ecto.Changeset{}}
"""
def update_qa(%Qa{} = qa, attrs) do
qa
|> Qa.changeset(attrs)
|> Repo.update()
end
@doc """
Deletes a qa.
## Examples
iex> delete_qa(qa)
{:ok, %Qa{}}
iex> delete_qa(qa)
{:error, %Ecto.Changeset{}}
"""
def delete_qa(%Qa{} = qa) do
Repo.delete(qa)
end
@doc """
Returns an `%Ecto.Changeset{}` for tracking qa changes.
## Examples
iex> change_qa(qa)
%Ecto.Changeset{data: %Qa{}}
"""
def change_qa(%Qa{} = qa, attrs \\ %{}) do
Qa.changeset(qa, attrs)
end
def list_article_by_tag(tag) do
ArticleTag
|> order_by([a], desc: a.article_id)
|> where([a], a.term_id == ^tag)
|> Repo.all()
|> Repo.preload(
article: [
user: []
]
)
end
end
|
apps/theta/lib/theta/cms.ex
| 0.837753
| 0.433562
|
cms.ex
|
starcoder
|
defmodule Base85.Encode do
@moduledoc """
Implements encoding functionality for Base85 encoding.
"""
# Encoding basically can't fail, so non-!-versions are trivial.
import Base85.{Charsets, Padding}
@typedoc "available character sets"
@type charset() :: Base85.Charsets.charset()
@typedoc "available padding techniques"
@type padding() :: Base85.Padding.padding()
@typedoc "options for encoding"
@type encoding_opts() :: [charset: charset(), padding: padding()]
@typedoc "encoding errors"
@type encoding_error() ::
:unrecognized_character_set
| :unrecognized_padding
| :invalid_unencoded_length
| :internal_error
@doc """
Encodes binary data into a Base85-encoded string.
This version returns the value or raises an error.
## Examples
iex> Base85.Encode.encode!("some binary data", charset: :safe85, padding: :pkcs7)
"N.Xx21Kf++HD3`AI>AZp$Aer7"
iex> Base85.Encode.encode!("123412341234", charset: :zeromq, padding: :none)
"f!$Kwf!$Kwf!$Kw"
iex> Base85.Encode.encode!("123", charset: :safe85, padding: :none)
** (Base85.InvalidUnencodedLength) raw data had invalid length for padding method none, expected multiple of 4 bytes
## Options
* `binary` - the binary data to encode, must be a multiple of 32-bits long
if no padding is used;
* `:charset` - an atom indicating the character set to use for encoding;
* `:padding` - an atom indicating which padding technique to use;
Padding methods and encodings may use additional options.
"""
@spec encode!(binary(), encoding_opts()) :: binary()
def encode!(bin, opts \\ []) when is_binary(bin) and is_list(opts) do
enc_fun = get_enc_fun(opts)
pad_fun = get_pad_fun(opts)
if bin == <<>> do
# special case for all encodings (for now)
<<>>
else
bin
|> pad_fun.(opts)
|> encode_chunks(enc_fun, opts)
|> IO.iodata_to_binary()
end
end
@doc """
Encodes binary data into a Base85-encoded string.
This version returns an `:ok`-tuple or `:error`-tuple.
## Examples
iex> Base85.Encode.encode("some binary data", charset: :safe85, padding: :pkcs7)
{:ok, "N.Xx21Kf++HD3`AI>AZp$Aer7"}
iex> Base85.Encode.encode("123412341234", charset: :zeromq, padding: :none)
{:ok, "f!$Kwf!$Kwf!$Kw"}
iex> Base85.Encode.encode("123", charset: :safe85, padding: :none)
{:error, :invalid_unencoded_length}
## Options
* `binary` - the binary data to encode, must be a multiple of 32-bits long
if no padding is used;
* `:charset` - an atom indicating the character set to use for encoding;
* `:padding` - an atom indicating which padding technique to use;
Padding methods and encodings may use additional options.
"""
@spec encode(binary, encoding_opts()) :: {:ok, binary} | {:error, encoding_error()}
def encode(bin, opts \\ []) when is_binary(bin) and is_list(opts) do
{:ok, encode!(bin, opts)}
rescue
Base85.UnrecognizedCharacterSet ->
{:error, :unrecognized_character_set}
Base85.UnrecognizedPadding ->
{:error, :unrecognized_padding}
Base85.InvalidUnencodedLength ->
{:error, :invalid_unencoded_length}
Base85.InternalError ->
{:error, :internal_error}
end
# private functions
defp get_enc_fun(opts) do
charset = Keyword.get(opts, :charset, :safe85)
if not Map.has_key?(charsets(), charset) do
raise Base85.UnrecognizedCharacterSet, charset: charset, operation: :encoding
end
enc_tup = List.to_tuple(charsets()[charset])
&elem(enc_tup, &1)
end
defp encode_chunks(bin, enc_fun, opts, encoded \\ [])
defp encode_chunks(<<>>, _enc_fun, _opts, encoded) do
Enum.reverse(encoded)
end
defp encode_chunks(<<base::integer-big-unsigned-32, rest::binary>>, enc_fun, opts, encoded) do
{e5, left} = {rem(base, 85), div(base, 85)}
{e4, left} = {rem(left, 85), div(left, 85)}
{e3, left} = {rem(left, 85), div(left, 85)}
{e2, left} = {rem(left, 85), div(left, 85)}
{e1, 0} = {rem(left, 85), div(left, 85)}
encode_chunks(rest, enc_fun, opts, [
enc_fun.(e5),
enc_fun.(e4),
enc_fun.(e3),
enc_fun.(e2),
enc_fun.(e1) | encoded
])
end
end
|
lib/base85/encode.ex
| 0.927651
| 0.451689
|
encode.ex
|
starcoder
|
defmodule Riak.Ecto do
@moduledoc """
Adapter module for Riak, using a map bucket_type to store models.
It uses `riakc` for communicating with the database and manages
a connection pool using `poolboy`.
## Features
* WIP
"""
@behaviour Ecto.Adapter
alias Riak.Ecto.NormalizedQuery
alias Riak.Ecto.NormalizedQuery.SearchQuery
alias Riak.Ecto.NormalizedQuery.FetchQuery
alias Riak.Ecto.NormalizedQuery.CountQuery
alias Riak.Ecto.NormalizedQuery.WriteQuery
alias Riak.Ecto.Decoder
alias Riak.Ecto.Connection
## Adapter
@doc false
defmacro __before_compile__(env) do
module = env.module
config = Module.get_attribute(module, :config)
adapter = Keyword.get(config, :pool, Riak.Pool.Poolboy)
quote do
defmodule Pool do
use Riak.Pool, name: __MODULE__, adapter: unquote(adapter)
def log(return, queue_time, query_time, fun, args) do
Riak.Ecto.log(unquote(module), return, queue_time, query_time, fun, args)
end
end
def __riak_pool__, do: unquote(module).Pool
end
end
@doc false
def start_link(repo, opts) do
{:ok, _} = Application.ensure_all_started(:riak_ecto)
repo.__riak_pool__.start_link(opts)
end
@doc false
def stop(repo, _pid, _timeout \\ 5_000) do
repo.__riak_pool.stop
end
@doc false
def load(:binary_id, data),
do: Ecto.Type.load(:string, data, &load/2)
def load(Ecto.DateTime, data) when is_binary(data) do
case Ecto.DateTime.cast(data) do
{:ok, datetime} ->
Ecto.Type.load(Ecto.DateTime, Ecto.DateTime.to_erl(datetime), &load/2)
:error ->
:error
end
end
def load(Ecto.Date, data) when is_binary(data) do
case Ecto.Date.cast(data) do
{:ok, date} ->
Ecto.Type.load(Ecto.Date, Ecto.Date.to_erl(date), &load/2)
:error ->
:error
end
end
def load(Riak.Ecto.Counter, data) do
Ecto.Type.load(Riak.Ecto.Counter, data, &load/2)
end
def load(Riak.Ecto.Set, data) do
Ecto.Type.load(Riak.Ecto.Set, data, &load/2)
end
def load(:float, data) when is_binary(data),
do: Ecto.Type.load(:float, String.to_float(data), &load/2)
def load(:integer, data) when is_binary(data),
do: Ecto.Type.load(:integer, String.to_integer(data), &load/2)
def load({:embed, %Ecto.Embedded{cardinality: :many}} = type, nil),
do: Ecto.Type.load(type, nil, &load/2)
def load({:embed, %Ecto.Embedded{cardinality: :many}} = type, data) do
data = Enum.reduce(data, [], fn {k, v}, acc ->
[Map.put(v, "id", k) | acc]
end)
Ecto.Type.load(type, data, &load/2)
end
def load({:array, _} = type, nil),
do: Ecto.Type.load(type, nil, &load/2)
def load({:array, _} = type, data) do
data = data
|> Enum.into([])
|> Enum.map(&elem(&1, 1))
Ecto.Type.load(type, data, &load/2)
end
def load(type, data) do
Ecto.Type.load(type, data, &load/2)
end
@doc false
def dump(:binary_id, data),
do: Ecto.Type.dump(:string, data, &dump/2)
def dump(:float, data) when is_float(data),
do: Ecto.Type.dump(:string, String.Chars.Float.to_string(data), &dump/2)
def dump(:integer, data) when is_integer(data),
do: Ecto.Type.dump(:string, String.Chars.Integer.to_string(data), &dump/2)
def dump(Ecto.Date, %Ecto.DateTime{} = data),
do: Ecto.Type.dump(:string, Ecto.DateTime.to_iso8601(data), &dump/2)
def dump(Ecto.Date, %Ecto.Date{} = data),
do: Ecto.Type.dump(:string, Ecto.Date.to_iso8601(data), &dump/2)
def dump(type, data) do
Ecto.Type.dump(type, data, &dump/2)
end
@doc false
def embed_id(_), do: Flaky.alpha
@doc false
def prepare(function, query) do
{:nocache, {function, query}}
end
@doc false
def execute(_repo, _meta, {:update_all, _query}, _params, _preprocess, _opts) do
raise ArgumentError, "Riak adapter does not support update_all."
end
def execute(_repo, _meta, {:delete_all, _query}, _params, _preprocess, _opts) do
raise ArgumentError, "Riak adapter does not support delete_all."
end
@read_queries [SearchQuery, FetchQuery]
def execute(repo, _meta, {function, query}, params, preprocess, opts) do
case apply(NormalizedQuery, function, [query, params]) do
%{__struct__: read} = query when read in [FetchQuery, SearchQuery, CountQuery] ->
{rows, count} =
Connection.all(repo.__riak_pool__, query, opts)
|> Enum.map_reduce(0, &{process_document(&1, query, preprocess), &2 + 1})
{count, rows}
%WriteQuery{} = write ->
result = apply(Connection, function, [repo.__riak_pool__, write, opts])
{result, nil}
end
end
@doc false
def insert(_repo, meta, _params, {key, :id, _}, _returning, _opts) do
raise ArgumentError,
"Riak adapter does not support :id field type in models. " <>
"The #{inspect key} field in #{inspect meta.model} is tagged as such."
end
def insert(_repo, meta, _params, _autogen, [_] = returning, _opts) do
raise ArgumentError,
"Riak adapter does not support :read_after_writes in models. " <>
"The following fields in #{inspect meta.model} are tagged as such: #{inspect returning}"
end
def insert(repo, meta, params, nil, [], opts) do
normalized = NormalizedQuery.insert(meta, params, nil)
case Connection.insert(repo.__riak_pool__, normalized, opts) do
{:ok, _} -> {:ok, []}
other -> other
end
end
def insert(repo, meta, params, {pk, :binary_id, nil}, [], opts) do
normalized = NormalizedQuery.insert(meta, params, pk)
case Connection.insert(repo.__riak_pool__, normalized, opts) do
{:ok, %{inserted_id: value}} -> {:ok, [{pk, value}]}
other -> other
end
end
def insert(repo, meta, params, {pk, :binary_id, _value}, [], opts) do
normalized = NormalizedQuery.insert(meta, params, pk)
case Connection.insert(repo.__riak_pool__, normalized, opts) do
{:ok, _} -> {:ok, []}
other -> other
end
end
@doc false
def update(_repo, meta, _fields, _filter, {key, :id, _}, _returning, _opts) do
raise ArgumentError,
"Riak adapter does not support :id field type in models. " <>
"The #{inspect key} field in #{inspect meta.model} is tagged as such."
end
def update(_repo, meta, _fields, _filter, _autogen, [_|_] = returning, _opts) do
raise ArgumentError,
"Riak adapter does not support :read_after_writes in models. " <>
"The following fields in #{inspect meta.model} are tagged as such: #{inspect returning}"
end
def update(_repo, %{context: %{map: nil}} = meta, _fields, _filter, _, _, _opts) do
raise ArgumentError,
"No causal context in #{inspect meta.model}. " <>
"Get the model by id before trying to update it."
end
def update(_repo, %{context: nil} = meta, _fields, _filter, _, _, _opts) do
raise ArgumentError,
"No causal context in #{inspect meta.model}. " <>
"Get the model by id before trying to update it."
end
def update(repo, meta, fields, filter, {pk, :binary_id, _value}, [], opts) do
normalized = NormalizedQuery.update(meta, fields, filter, pk)
Connection.update(repo.__riak_pool__, normalized, opts)
end
@doc false
def delete(_repo, meta, _filter, {key, :id, _}, _opts) do
raise ArgumentError,
"Riak adapter does not support :id field type in models. " <>
"The #{inspect key} field in #{inspect meta.model} is tagged as such."
end
def delete(repo, meta, filter, {pk, :binary_id, _value}, opts) do
normalized = NormalizedQuery.delete(meta.source, meta.context, filter, pk)
Connection.delete(repo.__riak_pool__, normalized, opts)
end
defp process_document(document, %{fields: fields, pk: pk}, preprocess) do
document = Decoder.decode_document(document, pk)
Enum.map(fields, fn
{:field, name, field} ->
preprocess.(field, Map.get(document, Atom.to_string(name)), document[:context])
{:value, value, field} ->
preprocess.(field, Decoder.decode_value(value, pk), document[:context])
field ->
preprocess.(field, document, document[:context])
end)
end
@doc false
def log(repo, :ok, queue_time, query_time, fun, args) do
log(repo, {:ok, nil}, queue_time, query_time, fun, args)
end
def log(repo, return, queue_time, query_time, fun, args) do
entry =
%Ecto.LogEntry{query: &format_log(&1, fun, args), params: [],
result: return, query_time: query_time, queue_time: queue_time}
repo.log(entry)
end
defp format_log(_entry, :run_command, [command, _opts]) do
["COMMAND " | inspect(command)]
end
defp format_log(_entry, :fetch_type, [bucket, id, _opts]) do
["FETCH_TYPE", format_part("bucket", bucket), format_part("id", id)]
end
defp format_log(_entry, :update_type, [bucket, id, _opts]) do
["UPDATE_TYPE", format_part("bucket", bucket), format_part("id", id)]
end
defp format_log(_entry, :search, [index, filter, _opts]) do
["SEARCH", format_part("index", index), format_part("filter", filter)]
end
defp format_log(_entry, :delete, [coll, filter, _opts]) do
["DELETE", format_part("coll", coll), format_part("filter", filter),
format_part("many", false)]
end
defp format_part(name, value) do
[" ", name, "=" | inspect(value)]
end
end
|
lib/riak_ecto.ex
| 0.777342
| 0.466299
|
riak_ecto.ex
|
starcoder
|
defmodule AshPhoenix.FilterForm do
defstruct [
:id,
:resource,
:transform_errors,
name: "filter",
valid?: false,
negated?: false,
params: %{},
components: [],
operator: :and,
remove_empty_groups?: false
]
alias AshPhoenix.FilterForm.Predicate
require Ash.Query
@new_opts [
params: [
type: :any,
doc: "Initial parameters to create the form with",
default: %{}
],
as: [
type: :string,
default: "filter",
doc: "Set the parameter name for the form."
],
transform_errors: [
type: :any,
doc: """
Allows for manual manipulation and transformation of errors.
If possible, try to implement `AshPhoenix.FormData.Error` for the error (if it as a custom one, for example).
If that isn't possible, you can provide this function which will get the predicate and the error, and should
return a list of ash phoenix formatted errors, e.g `[{field :: atom, message :: String.t(), substituations :: Keyword.t()}]`
"""
],
remove_empty_groups?: [
type: :boolean,
doc: """
If true (the default), then any time a group would be made empty by removing a group or predicate, it is removed instead.
An empty form can still be added, this only affects a group if its last component is removed.
""",
default: false
]
]
@moduledoc """
Create a new filter form.
Options:
#{Ash.OptionsHelpers.docs(@new_opts)}
"""
def new(resource, opts \\ []) do
opts = Ash.OptionsHelpers.validate!(opts, @new_opts)
params = opts[:params]
params = sanitize_params(params)
params =
if is_predicate?(params) do
%{
"operator" => "and",
"id" => Ash.UUID.generate(),
"components" => %{"0" => params}
}
else
params
end
form = %__MODULE__{
id: params["id"],
name: opts[:as] || "filter",
resource: resource,
params: params,
remove_empty_groups?: opts[:remove_empty_groups?],
operator: to_existing_atom(params["operator"] || :and)
}
%{
form
| components:
parse_components(resource, form, params["components"],
remove_empty_groups?: opts[:remove_empty_groups?]
)
}
|> set_validity()
end
@doc """
Updates the filter with the provided input and validates it.
At present, no validation actually occurs, but this will eventually be added.
"""
def validate(form, params \\ %{}) do
params = sanitize_params(params)
params =
if is_predicate?(params) do
%{
"operator" => "and",
"id" => Ash.UUID.generate(),
"components" => %{"0" => params}
}
else
params
end
%{
form
| params: params,
components: validate_components(form, params["components"]),
operator: to_existing_atom(params["operator"] || :and)
}
|> set_validity()
end
@doc """
Returns a filter expression that can be provided to Ash.Query.filter/2
To add this to a query, remember to use `^`, for example:
```elixir
filter = AshPhoenix.FilterForm.to_filter_expression(form)
Ash.Query.filter(MyApp.Post, ^filter)
```
Alternatively, you can use the shorthand: `filter/2` to apply the expression directly to a query.
"""
def to_filter_expression(form) do
if form.valid? do
case do_to_filter_expression(form, form.resource) do
{:ok, expr} ->
{:ok, expr}
{:error, form} ->
{:error, form}
end
else
{:error, form}
end
end
@doc """
Same as `to_filter/1`
"""
def to_filter!(form) do
case to_filter_expression(form) do
{:ok, filter} ->
filter
{:error, form} ->
error =
form
|> errors()
|> Enum.map(fn
{key, message, vars} ->
"#{key}: #{AshPhoenix.replace_vars(message, vars)}"
other ->
other
end)
|> Ash.Error.to_error_class()
raise error
end
end
@doc """
Returns a flat list of all errors on all predicates in the filter.
"""
def errors(form, opts \\ [])
def errors(%__MODULE__{components: components, transform_errors: transform_errors}, opts) do
Enum.flat_map(
components,
&errors(&1, Keyword.put_new(opts, :handle_errors, transform_errors))
)
end
def errors(%Predicate{} = predicate, opts),
do: AshPhoenix.FilterForm.Predicate.errors(predicate, opts[:transform_errors])
defp do_to_filter_expression(%__MODULE__{components: []}, _), do: {:ok, true}
defp do_to_filter_expression(
%__MODULE__{components: components, operator: operator, negated?: negated?} = form,
resource
) do
{filters, components, errors?} =
Enum.reduce(components, {[], [], false}, fn component, {filters, components, errors?} ->
case do_to_filter_expression(component, resource) do
{:ok, component_filter} ->
{filters ++ [component_filter], components ++ [component], errors?}
{:error, component} ->
{filters, components ++ [component], true}
end
end)
if errors? do
{:error, %{form | components: components}}
else
expr =
Enum.reduce(filters, nil, fn component_as_filter, acc ->
if acc do
Ash.Query.BooleanExpression.new(operator, acc, component_as_filter)
else
component_as_filter
end
end)
if negated? do
{:ok, Ash.Query.Not.new(expr)}
else
{:ok, expr}
end
end
end
defp do_to_filter_expression(
%Predicate{
field: field,
value: value,
operator: operator,
negated?: negated?,
path: path
} = predicate,
resource
) do
ref = Ash.Query.expr(ref(^field, ^path))
expr =
if Ash.Filter.get_operator(operator) do
{:ok, %Ash.Query.Call{name: operator, args: [ref, value], operator?: true}}
else
if Ash.Filter.get_function(operator, resource) do
{:ok, %Ash.Query.Call{name: operator, args: [ref, value]}}
else
{:error, {:operator, "No such function or operator #{operator}", []}}
end
end
case expr do
{:ok, expr} ->
if negated? do
{:ok, Ash.Query.Not.new(expr)}
else
{:ok, expr}
end
{:error, error} ->
{:error, %{predicate | errors: predicate.errors ++ [error]}}
end
end
@doc """
Converts the form into a filter, and filters the provided query or resource with that filter.
"""
def filter(query, form) do
case to_filter_expression(form) do
{:ok, filter} ->
{:ok, Ash.Query.do_filter(query, filter)}
{:error, error} ->
{:error, error}
end
end
@doc """
Same as `filter/2` but raises on errors.
"""
def filter!(query, form) do
Ash.Query.do_filter(query, to_filter!(form))
end
defp sanitize_params(params) do
if is_predicate?(params) do
field =
case params[:field] || params["field"] do
nil -> nil
field -> to_string(field)
end
path =
case params[:path] || params["path"] do
nil -> nil
path when is_list(path) -> Enum.join(path, ".")
path when is_binary(path) -> path
end
%{
"id" => params[:id] || params["id"] || Ash.UUID.generate(),
"operator" => to_string(params[:operator] || params["operator"] || "eq"),
"negated" => params[:negated] || params["negated"] || false,
"field" => field,
"value" => to_string(params[:value] || params["value"]),
"path" => path
}
else
components = params[:components] || params["components"] || []
components =
if is_list(components) do
components
|> Enum.with_index()
|> Map.new(fn {value, index} ->
{to_string(index), value}
end)
else
if is_map(components) do
Map.new(components, fn {key, value} ->
{key, sanitize_params(value)}
end)
end
end
%{
"components" => components || %{},
"id" => params[:id] || params["id"] || Ash.UUID.generate(),
"operator" => to_string(params[:operator] || params["operator"] || "and")
}
end
end
defp parse_components(resource, parent, component_params, form_opts) do
component_params
|> Kernel.||(%{})
|> Enum.sort_by(fn {key, _value} ->
String.to_integer(key)
end)
|> Enum.map(&elem(&1, 1))
|> Enum.map(&parse_component(resource, parent, &1, form_opts))
end
defp parse_component(resource, parent, params, form_opts) do
if is_predicate?(params) do
# Eventually, components may have references w/ paths
# also, we should validate references here
new_predicate(params, parent)
else
params = Map.put_new(params, "id", Ash.UUID.generate())
new(
resource,
Keyword.merge(form_opts, params: params, as: parent.name <> "[#{params["id"]}]")
)
end
end
defp new_predicate(params, form) do
predicate = %AshPhoenix.FilterForm.Predicate{
id: params["id"],
field: to_existing_atom(params["field"]),
value: params["value"],
path: parse_path(params),
params: params,
negated?: negated?(params),
operator: to_existing_atom(params["operator"] || :eq)
}
%{predicate | errors: predicate_errors(predicate, form.resource)}
end
defp parse_path(params) do
path = params[:path] || params["path"]
case path do
"" ->
[]
nil ->
[]
path when is_list(path) ->
Enum.map(path, &to_existing_atom/1)
path ->
path
|> String.split()
|> Enum.map(&to_existing_atom/1)
end
end
defp negated?(params) do
params["negated"] in [true, "true"]
end
defp validate_components(form, component_params) do
form_without_components = %{form | components: []}
component_params
|> Enum.sort_by(fn {key, _} ->
String.to_integer(key)
end)
|> Enum.map(&elem(&1, 1))
|> Enum.map(&validate_component(form_without_components, &1, form.components))
end
defp validate_component(form, params, current_components) do
id = params[:id] || params["id"]
match_component =
id && Enum.find(current_components, fn %{id: component_id} -> component_id == id end)
if match_component do
case match_component do
%__MODULE__{} ->
validate(form, params)
%Predicate{} ->
new_predicate(params, form)
end
else
if is_predicate?(params) do
new_predicate(params, form)
else
params = Map.put_new(params, "id", Ash.UUID.generate())
new(form.resource,
params: params,
as: form.name <> "[#{params["id"]}]",
remove_empty_groups?: form.remove_empty_groups?
)
end
end
end
defp is_predicate?(params) do
[:field, :value, "field", "value"] |> Enum.any?(&Map.has_key?(params, &1))
end
defp to_existing_atom(value) when is_atom(value), do: value
defp to_existing_atom(value) do
String.to_existing_atom(value)
rescue
_ -> value
end
@doc """
Returns the minimal set of params (at the moment just strips ids) for use in a query string.
"""
def params_for_query(form) do
do_params_for_query(form.params)
end
defp do_params_for_query(params) do
if is_predicate?(params) do
Map.delete(params, "id")
else
params =
case params["components"] do
components when is_map(components) ->
new_components =
Map.new(components, fn {key, value} ->
{key, do_params_for_query(value)}
end)
Map.put(params, "components", new_components)
_ ->
Map.delete(params, "components")
end
Map.delete(params, "id")
end
end
@doc "Returns the list of available predicates for the given resource, which may be functions or operators."
def predicates(resource) do
resource
|> Ash.DataLayer.data_layer()
|> Ash.DataLayer.functions()
|> Enum.concat(Ash.Filter.builtin_functions())
|> Enum.filter(fn function ->
try do
struct(function).__predicate__? && Enum.any?(function.args, &match?([_, _], &1))
rescue
_ -> false
end
end)
|> Enum.concat(Ash.Filter.builtin_predicate_operators())
|> Enum.map(fn function_or_operator ->
function_or_operator.name()
end)
end
@doc "Returns the list of available fields, which may be attributes, calculations, or aggregates."
def fields(resource) do
resource
|> Ash.Resource.Info.public_aggregates()
|> Enum.concat(Ash.Resource.Info.public_calculations(resource))
|> Enum.concat(Ash.Resource.Info.public_attributes(resource))
|> Enum.map(& &1.name)
end
@add_predicate_opts [
to: [
type: :string,
doc:
"The group id to add the predicate to. If not set, will be added to the top level group."
],
return_id?: [
type: :boolean,
default: false,
doc: "If set to `true`, the function returns `{form, predicate_id}`"
]
]
@doc """
Add a predicate to the filter.
Options:
#{Ash.OptionsHelpers.docs(@add_predicate_opts)}
"""
def add_predicate(form, field, operator_or_function, value, opts \\ []) do
opts = Ash.OptionsHelpers.validate!(opts, @add_predicate_opts)
predicate_id = Ash.UUID.generate()
predicate =
new_predicate(
%{
"id" => predicate_id,
"field" => field,
"value" => value,
"operator" => operator_or_function
},
form
)
new_form =
if opts[:to] && opts[:to] != form.id do
set_validity(%{
form
| components: Enum.map(form.components, &do_add_predicate(&1, opts[:to], predicate))
})
else
set_validity(%{form | components: form.components ++ [predicate]})
end
if opts[:return_id?] do
{new_form, predicate_id}
else
new_form
end
end
defp set_validity(%__MODULE__{components: components} = form) do
components = Enum.map(components, &set_validity/1)
if Enum.all?(components, & &1.valid?) do
%{form | components: components, valid?: true}
else
%{form | components: components, valid?: false}
end
end
defp set_validity(%Predicate{errors: []} = predicate), do: %{predicate | valid?: true}
defp set_validity(%Predicate{errors: _} = predicate), do: %{predicate | valid?: false}
@doc "Remove the predicate with the given id"
def remove_predicate(form, id) do
%{
form
| components:
Enum.flat_map(form.components, fn
%__MODULE__{} = nested_form ->
new_nested_form = remove_predicate(nested_form, id)
remove_if_empty(new_nested_form, form.remove_empty_groups?)
%Predicate{id: ^id} ->
[]
predicate ->
[predicate]
end)
}
|> set_validity()
end
defp predicate_errors(predicate, resource) do
case Ash.Resource.Info.related(resource, predicate.path) do
nil ->
[
{:operator, "Invalid path #{Enum.join(predicate.path, ".")}", []}
]
resource ->
errors =
case Ash.Resource.Info.public_field(resource, predicate.field) do
nil ->
[
{:field, "No such field #{predicate.field}", []}
]
_ ->
[]
end
if Ash.Filter.get_function(predicate.operator, resource) do
errors
else
if Ash.Filter.get_operator(predicate.operator) do
errors
else
[
{:operator, "No such operator #{predicate.operator}", []} | errors
]
end
end
end
end
defp do_add_predicate(%__MODULE__{id: id} = form, id, predicate) do
%{form | components: form.components ++ [predicate]}
end
defp do_add_predicate(%__MODULE__{} = form, id, predicate) do
%{form | components: Enum.map(form.components, &do_add_predicate(&1, id, predicate))}
end
defp do_add_predicate(other, _, _), do: other
@add_group_opts [
to: [
type: :string,
doc: "The nested group id to add the group to."
],
operator: [
type: {:one_of, [:and, :or]},
default: :and,
doc: "The operator that the group should have internally."
],
return_id?: [
type: :boolean,
default: false,
doc: "If set to `true`, the function returns `{form, predicate_id}`"
]
]
@doc """
Adde a group to the filter.
Options:
#{Ash.OptionsHelpers.docs(@add_group_opts)}
"""
def add_group(form, opts \\ []) do
opts = Ash.OptionsHelpers.validate!(opts, @add_group_opts)
group_id = Ash.UUID.generate()
group = %__MODULE__{operator: opts[:operator], id: group_id}
new_form =
if opts[:to] && opts[:to] != form.id do
set_validity(%{
form
| components: Enum.map(form.components, &do_add_group(&1, opts[:to], group))
})
else
set_validity(%{form | components: form.components ++ [group]})
end
if opts[:return_id?] do
{new_form, group_id}
else
new_form
end
end
@doc "Remove the group with the given id"
def remove_group(form, group_id) do
%{
form
| components:
Enum.flat_map(form.components, fn
%__MODULE__{id: ^group_id} ->
[]
%__MODULE__{} = nested_form ->
new_nested_form = remove_group(nested_form, group_id)
remove_if_empty(new_nested_form, form.remove_empty_groups?)
predicate ->
[predicate]
end)
}
|> set_validity()
end
@doc "Removes the group *or* component with the given id"
def remove_component(form, group_or_component_id) do
form
|> remove_group(group_or_component_id)
|> remove_component(group_or_component_id)
end
defp remove_if_empty(form, false), do: [form]
defp remove_if_empty(form, true) do
if Enum.empty?(form.components) do
[]
else
[form]
end
end
defp do_add_group(%AshPhoenix.FilterForm{id: id} = form, id, group) do
%{form | components: form.components ++ [group]}
end
defp do_add_group(%AshPhoenix.FilterForm{} = form, id, group) do
%{form | components: Enum.map(form.components, &do_add_group(&1, id, group))}
end
defp do_add_group(other, _, _), do: other
defimpl Phoenix.HTML.FormData do
@impl true
def to_form(form, opts) do
hidden = [id: form.id]
%Phoenix.HTML.Form{
source: form,
impl: __MODULE__,
id: opts[:id] || form.id,
name: opts[:as] || form.name,
errors: [],
data: form,
params: form.params,
hidden: hidden,
options: Keyword.put_new(opts, :method, "GET")
}
end
@impl true
def to_form(form, _phoenix_form, :components, _opts) do
form.components
|> Enum.with_index()
|> Enum.map(fn {component, index} ->
Phoenix.HTML.Form.form_for(component, "action",
transform_errors: form.transform_errors,
as: form.name <> "[components][#{index}]"
)
end)
end
def to_form(_, _, other, _) do
raise "Invalid inputs_for name #{other}. Only :components is supported"
end
@impl true
def input_type(_, _, _), do: :text_input
@impl true
def input_value(%{id: id}, _, :id), do: id
def input_value(%{negated?: negated?}, _, :negated), do: negated?
def input_value(%{operator: operator}, _, :operator), do: operator
def input_value(_, _, field) do
raise "Invalid filter form field #{field}. Only :id, :negated, and :operator are supported"
end
@impl true
def input_validations(_, _, _), do: []
end
end
|
lib/ash_phoenix/filter_form/filter_form.ex
| 0.836621
| 0.643259
|
filter_form.ex
|
starcoder
|
defmodule CanUdp.Server do
@moduledoc """
UDP CAN connection to a VIU board with CAN interfaces.
Receives and sends CAN frames in a UDP format.
## How?
This process creates a server.
Then when the first UDP packet is received, the address of the sender is set as the destination.
Subsequently, all messages being send (see. `send_frame`) through this module is targeted towards that destination.
"""
use GenServer
require Mix
defmodule State, do: defstruct [
:name_pid, :signal_pid, :socket,
:target_host, :target_port,
]
# CLIENT
def start_link({name, signal_pid, server_port, target_host, target_port}),
do: GenServer.start_link(__MODULE__, {
name, signal_pid,
server_port, target_host, target_port,
}, name: name)
def write(pid, frame_id, frame_payload),
do: GenServer.cast(pid, {:write, frame_id, frame_payload})
# specifically useful for lin auto config nodes, where host adress is unavalibel
def provide_host_adress(pid, host_ip),
do: GenServer.cast(pid, {:host_adress, host_ip})
# SERVER
def init({name_pid, signal_pid, server_port, target_host, target_port}) do
{:ok, socket} = :gen_udp.open(server_port, [:binary, reuseaddr: true])
state = %State{
name_pid: name_pid,
socket: socket,
signal_pid: signal_pid,
target_host: target_host,
target_port: target_port,
}
{:ok, state}
end
def handle_info({:udp, _, _, _, data}, state) do
handle_packet(data, state)
{:noreply, state}
end
require Logger
def handle_cast({:host_adress, host_ip}, state) do
{:noreply, %State{state | target_host: host_ip}}
end
def handle_cast({:write, frame_id, frame_payload}, state) do
# Logger.debug("write data, #{inspect frame_id} #{inspect frame_payload}")
send_data(frame_id, frame_payload, state)
{:noreply, state}
end
# lin specific
def handle_cast({:write_arbitration_frame, frame_id, frame_length}, state) do
# Logger.debug("write data, #{inspect frame_id} #{inspect frame_payload}")
if(Util.Config.is_test(),
do: Util.Forwarder.send({:write_arbitration_frame, frame_id, frame_length}))
send_arbritration_frame(frame_id, frame_length, state)
{:noreply, state}
end
# Just for testing
if Mix.env() == :test do
def handle_call(:get_state, _, state), do: {:reply, state}
def get_state(pid), do: GenServer.call(pid, :get_state)
end
# INTERNAL
defp handle_packet(data, state) do
frames = CanUdp.parse_udp_frames(data)
Payload.Signal.handle_raw_can_frames(state.signal_pid, state.name_pid, frames)
end
defp send_data_wrapper(socket, host, port, data) do
case host do
nil -> Logger.warn "Missing dest_ip, not able to send data intended for #{inspect port}"
_ ->
:gen_udp.send(socket, host, port, data)
end
end
defp send_data(frame_id, frame_payload, state) do
data = CanUdp.make_udp_frame(frame_id, frame_payload)
send_data_wrapper(state.socket, state.target_host, state.target_port, data)
end
defp send_arbritration_frame(frame_id, expected_bytes_in_response, state) do
data = CanUdp.make_udp_frame_size(frame_id, expected_bytes_in_response)
send_data_wrapper(state.socket, state.target_host, state.target_port, data)
#Logger.debug "Sending udp frame_id #{inspect frame_id} #{inspect data}"
end
end
|
apps/app_udpcan/lib/server.ex
| 0.627038
| 0.500427
|
server.ex
|
starcoder
|
defmodule Pdf2htmlex do
@moduledoc """
Provides functions to convert PDF documents to HTML.
The command line tool [pdf2HtmlEX](http://coolwanglu.github.io/pdf2htmlEX/) must be installed and needs to be
added to your PATH.
## Examples
iex(1)> Pdf2htmlex.open("test/fixtures/simple.pdf") |> Pdf2htmlex.convert_to!("/tmp")
iex(2)> Pdf2htmlex.open("test/fixtures/simple.pdf") |> Pdf2htmlex.zoom(2.0) |> Pdf2htmlex.convert_to!("/tmp")
"""
@doc """
Adds the path to an input file (PDF) to a list that will be used to build up options that
will be used for conversion.
"""
def open(pdf_path) when is_binary(pdf_path) do
pdf_path = Path.expand(pdf_path)
unless File.regular?(pdf_path), do: raise(File.Error, reason: :enoent, action: "read", path: pdf_path)
[pdf_path]
end
@doc """
Converts the PDF with given options to a output directory
"""
def convert_to!(opts, dir) when is_list(opts) do
exec_cmd(["--dest-dir", dir | opts])
end
@doc """
Zoom ratio
"""
def zoom(opts, zoom_ratio) when is_list(opts) and is_float(zoom_ratio), do: ["--zoom", f_to_s(zoom_ratio) | opts]
@doc """
First page to convert
"""
def first_page(opts, first) when is_list(opts) and is_integer(first), do: ["--first-page", i_to_s(first) | opts]
@doc """
Last page to convert
"""
def last_page(opts, last) when is_list(opts) and is_integer(last), do: ["--last-page", i_to_s(last) | opts]
@doc """
Fit width to (in pixels)
"""
def fit_width(opts, width) when is_list(opts) and is_integer(width), do: ["--fit-width", i_to_s(width) | opts]
@doc """
Fit height to (in pixels)
"""
def fit_height(opts, height) when is_list(opts) and is_integer(height), do: ["--fit-height", i_to_s(height) | opts]
@doc """
Use CropBox instead of MediaBox
"""
def use_mediabox(opts) when is_list(opts), do: ["--use-cropbox", "0" | opts]
@doc """
Horizontal resolution for graphics in DPI (default: 144)
"""
def hdpi(opts, dpi) when is_list(opts) and is_integer(dpi), do: ["--hdpi", i_to_s(dpi) | opts]
@doc """
Vertical resolution for graphics in DPI (default: 144)
"""
def vdpi(opts, dpi) when is_list(opts) and is_integer(dpi), do: ["--vdpi", i_to_s(dpi) | opts]
@doc """
Puts CSS in external files
"""
def externalize_css(opts) when is_list(opts), do: ["--embed-css", "0" | opts]
@doc """
Puts fonts in external files
"""
def externalize_font(opts) when is_list(opts), do: ["--embed-font", "0" | opts]
@doc """
Puts images in external files
"""
def externalize_image(opts) when is_list(opts), do: ["--embed-image", "0" | opts]
@doc """
Puts Javascript in external files
"""
def externalize_javascript(opts) when is_list(opts), do: ["--embed-javascript", "0" | opts]
@doc """
Puts outline in external file
"""
def externalize_outline(opts) when is_list(opts), do: ["--embed-outline", "0" | opts]
@doc """
Puts every page in an external HTML file
"""
def split_pages(opts) when is_list(opts), do: ["--split-pages", "1" | opts]
defp exec_cmd(opts) do
cmd = System.find_executable("pdf2htmlEX") || ""
{msg, exit_status} = System.cmd(cmd, opts, stderr_to_stdout: true)
if exit_status != 0 do
raise RuntimeError, message: "The command line tool reported an error: #{msg}"
end
end
defp i_to_s(s), do: Integer.to_string(s)
defp f_to_s(f), do: Float.to_string(f, [decimals: 1, compact: true])
end
|
lib/pdf2htmlex.ex
| 0.707506
| 0.527256
|
pdf2htmlex.ex
|
starcoder
|
defmodule Sqlitex.Statement do
alias Sqlitex.Row
@moduledoc """
Provides an interface for working with sqlite prepared statements.
Care should be taken when using prepared statements directly - they are not
immutable objects like most things in Elixir. Sharing a statement between
different processes can cause problems if the processes accidentally
interleave operations on the statement. It's a good idea to create different
statements per process, or to wrap the statements up in a GenServer to prevent
interleaving operations.
## Example
```
iex(2)> {:ok, db} = Sqlitex.open(":memory:")
iex(3)> Sqlitex.query(db, "CREATE TABLE data (id, name);")
{:ok, []}
iex(4)> {:ok, statement} = Sqlitex.Statement.prepare(db, "INSERT INTO data VALUES (?, ?);")
iex(5)> Sqlitex.Statement.bind_values(statement, [1, "hello"])
iex(6)> Sqlitex.Statement.exec(statement)
:ok
iex(7)> {:ok, statement} = Sqlitex.Statement.prepare(db, "SELECT * FROM data;")
iex(8)> Sqlitex.Statement.fetch_all(statement)
{:ok, [[id: 1, name: "hello"]]}
iex(9)> Sqlitex.close(db)
:ok
```
## RETURNING Clause Support
SQLite does not support the RETURNING extension to INSERT, DELETE, and UPDATE
commands. (See https://www.postgresql.org/docs/9.6/static/sql-insert.html for
a description of the Postgres implementation of this clause.)
Ecto 2.0 relies on being able to capture this information, so have invented our
own implementation with the following syntax:
```
;--RETURNING ON [INSERT | UPDATE | DELETE] <table>,<col>,<col>,...
```
When the `prepare/2` and `prepare!/2` functions are given a query that contains
the above returning clause, they separate this clause from the end of the query
and store it separately in the `Statement` struct. Only the portion of the query
preceding the returning clause is passed to SQLite's prepare function.
Later, when such a statement struct is passed to `fetch_all/2` or `fetch_all!/2`
the returning clause is parsed and the query is performed with the following
additional logic:
```
SAVEPOINT sp_<random>;
CREATE TEMP TABLE temp.t_<random> (<returning>);
CREATE TEMP TRIGGER tr_<random> AFTER UPDATE ON main.<table> BEGIN
INSERT INTO t_<random> SELECT NEW.<returning>;
END;
UPDATE ...; -- whatever the original statement was
DROP TRIGGER tr_<random>;
SELECT <returning> FROM temp.t_<random>;
DROP TABLE temp.t_<random>;
RELEASE sp_<random>;
```
A more detailed description of the motivations for making this change is here:
https://github.com/jazzyb/sqlite_ecto/wiki/Sqlite.Ecto's-Pseudo-Returning-Clause
"""
defstruct database: nil,
statement: nil,
returning: nil,
column_names: [],
column_types: []
@doc """
Prepare a Sqlitex.Statement
## Parameters
* `db` - The database to prepare the statement for.
* `sql` - The SQL of the statement to prepare.
## Returns
* `{:ok, statement}` on success
* See `:esqlite3.prepare` for errors.
"""
def prepare(db, sql) do
with {:ok, stmt} <- do_prepare(db, sql),
{:ok, stmt} <- get_column_names(stmt),
{:ok, stmt} <- get_column_types(stmt),
{:ok, stmt} <- extract_returning_clause(stmt, sql),
do: {:ok, stmt}
end
@doc """
Same as `prepare/2` but raises a Sqlitex.Statement.PrepareError on error.
Returns a new statement otherwise.
"""
def prepare!(db, sql) do
case prepare(db, sql) do
{:ok, statement} -> statement
{:error, reason} -> raise Sqlitex.Statement.PrepareError, reason: reason
end
end
@doc """
Binds values to a Sqlitex.Statement
## Parameters
* `statement` - The statement to bind values into.
* `values` - A list of values to bind into the statement.
## Returns
* `{:ok, statement}` on success
* See `:esqlite3.prepare` for errors.
## Value transformations
Some values will be transformed before insertion into the database.
* `nil` - Converted to :undefined
* `true` - Converted to 1
* `false` - Converted to 0
* `datetime` - Converted into a string. See datetime_to_string
* `%Decimal` - Converted into a number.
"""
def bind_values(statement, values) do
case :esqlite3.bind(statement.statement, translate_bindings(values)) do
{:error, _} = error -> error
:ok -> {:ok, statement}
end
end
@doc """
Same as `bind_values/2` but raises a Sqlitex.Statement.BindValuesError on error.
Returns the statement otherwise.
"""
def bind_values!(statement, values) do
case bind_values(statement, values) do
{:ok, statement} -> statement
{:error, reason} -> raise Sqlitex.Statement.BindValuesError, reason: reason
end
end
@doc """
Fetches all rows using a statement.
Should be called after the statement has been bound.
## Parameters
* `statement` - The statement to run.
* `into` - The collection to put the results into. Defaults to an empty list.
## Returns
* `{:ok, results}`
* `{:error, error}`
"""
def fetch_all(statement, into \\ []) do
case raw_fetch_all(statement) do
{:error, _} = other -> other
raw_data ->
{:ok, Row.from(statement.column_types, statement.column_names, raw_data, into)}
end
end
defp raw_fetch_all(%__MODULE__{returning: nil, statement: statement}) do
:esqlite3.fetchall(statement)
end
defp raw_fetch_all(statement) do
returning_query(statement)
end
@doc """
Same as `fetch_all/2` but raises a Sqlitex.Statement.FetchAllError on error.
Returns the results otherwise.
"""
def fetch_all!(statement, into \\ []) do
case fetch_all(statement, into) do
{:ok, results} -> results
{:error, reason} -> raise Sqlitex.Statement.FetchAllError, reason: reason
end
end
@doc """
Runs a statement that returns no results.
Should be called after the statement has been bound.
## Parameters
* `statement` - The statement to run.
## Returns
* `:ok`
* `{:error, error}`
"""
def exec(statement) do
case :esqlite3.step(statement.statement) do
# esqlite3.step returns some odd values, so lets translate them:
:"$done" -> :ok
:"$busy" -> {:error, {:busy, "Sqlite database is busy"}}
other -> other
end
end
@doc """
Same as `exec/1` but raises a Sqlitex.Statement.ExecError on error.
Returns :ok otherwise.
"""
def exec!(statement) do
case exec(statement) do
:ok -> :ok
{:error, reason} -> raise Sqlitex.Statement.ExecError, reason: reason
end
end
defp do_prepare(db, sql) do
case :esqlite3.prepare(sql, db) do
{:ok, statement} ->
{:ok, %Sqlitex.Statement{database: db, statement: statement}}
other -> other
end
end
defp get_column_names(%Sqlitex.Statement{statement: sqlite_statement} = statement) do
names =
sqlite_statement
|> :esqlite3.column_names
|> Tuple.to_list
{:ok, %Sqlitex.Statement{statement | column_names: names}}
end
defp get_column_types(%Sqlitex.Statement{statement: sqlite_statement} = statement) do
types =
sqlite_statement
|> :esqlite3.column_types
|> Tuple.to_list
{:ok, %Sqlitex.Statement{statement | column_types: types}}
end
defp translate_bindings(params) do
Enum.map(params, fn
nil -> :undefined
true -> 1
false -> 0
date = {_yr, _mo, _da} -> date_to_string(date)
time = {_hr, _mi, _se, _usecs} -> time_to_string(time)
datetime = {{_yr, _mo, _da}, {_hr, _mi, _se, _usecs}} -> datetime_to_string(datetime)
%Decimal{sign: sign, coef: coef, exp: exp} -> sign * coef * :math.pow(10, exp)
other -> other
end)
end
defp date_to_string({yr, mo, da}) do
Enum.join [zero_pad(yr, 4), "-", zero_pad(mo, 2), "-", zero_pad(da, 2)]
end
def time_to_string({hr, mi, se, usecs}) do
Enum.join [zero_pad(hr, 2), ":", zero_pad(mi, 2), ":", zero_pad(se, 2), ".", zero_pad(usecs, 6)]
end
defp datetime_to_string({date = {_yr, _mo, _da}, time = {_hr, _mi, _se, _usecs}}) do
Enum.join [date_to_string(date), " ", time_to_string(time)]
end
defp zero_pad(num, len) do
str = Integer.to_string num
String.duplicate("0", len - String.length(str)) <> str
end
# --- Returning clause support
@pseudo_returning_statement ~r(\s*;--RETURNING\s+ON\s+)i
defp extract_returning_clause(statement, sql) do
if Regex.match?(@pseudo_returning_statement, sql) do
[_, returning_clause] = Regex.split(@pseudo_returning_statement, sql, parts: 2)
case parse_return_contents(returning_clause) do
{_table, cols, _command, _ref} = info ->
{:ok, %{statement | returning: info,
column_names: Enum.map(cols, &String.to_atom/1),
column_types: Enum.map(cols, fn _ -> nil end)}}
err ->
err
end
else
{:ok, statement}
end
end
defp parse_return_contents(<<"INSERT ", values::binary>>) do
[table | cols] = String.split(values, ",")
{table, cols, "INSERT", "NEW"}
end
defp parse_return_contents(<<"UPDATE ", values::binary>>) do
[table | cols] = String.split(values, ",")
{table, cols, "UPDATE", "NEW"}
end
defp parse_return_contents(<<"DELETE ", values::binary>>) do
[table | cols] = String.split(values, ",")
{table, cols, "DELETE", "OLD"}
end
defp parse_return_contents(_) do
{:error, :invalid_returning_clause}
end
defp returning_query(%__MODULE__{database: db,
statement: statement,
returning: {table, cols, command, ref}})
do
with_savepoint(db, fn ->
with_temp_table(db, cols, fn tmp_tbl ->
err = with_temp_trigger(db, table, tmp_tbl, cols, command, ref, fn ->
:esqlite3.fetchall(statement)
end)
case err do
{:error, _} -> err
_ ->
fields = Enum.join(cols, ", ")
:esqlite3.q("SELECT #{fields} FROM #{tmp_tbl}", db)
end
end)
end)
end
defp with_savepoint(db, func) do
sp = "sp_#{random_id()}"
[] = :esqlite3.q("SAVEPOINT #{sp}", db)
case safe_call(db, func, sp) do
{:error, _} = error ->
[] = :esqlite3.q("ROLLBACK TO SAVEPOINT #{sp}", db)
[] = :esqlite3.q("RELEASE #{sp}", db)
error
result ->
[] = :esqlite3.q("RELEASE #{sp}", db)
result
end
end
defp safe_call(db, func, sp) do
try do
func.()
rescue
e in RuntimeError ->
[] = :esqlite3.q("ROLLBACK TO SAVEPOINT #{sp}", db)
[] = :esqlite3.q("RELEASE #{sp}", db)
raise e
end
end
defp with_temp_table(db, returning, func) do
tmp = "t_#{random_id()}"
fields = Enum.join(returning, ", ")
results = case :esqlite3.q("CREATE TEMP TABLE #{tmp} (#{fields})", db) do
{:error, _} = err -> err
_ -> func.(tmp)
end
:esqlite3.q("DROP TABLE IF EXISTS #{tmp}", db)
results
end
defp with_temp_trigger(db, table, tmp_tbl, returning, command, ref, func) do
tmp = "tr_" <> random_id()
fields = Enum.map_join(returning, ", ", &"#{ref}.#{&1}")
sql = """
CREATE TEMP TRIGGER #{tmp} AFTER #{command} ON main.#{table} BEGIN
INSERT INTO #{tmp_tbl} SELECT #{fields};
END;
"""
results = case :esqlite3.q(sql, db) do
{:error, _} = err -> err
_ -> func.()
end
:esqlite3.q("DROP TRIGGER IF EXISTS #{tmp}", db)
results
end
defp random_id, do: :rand.uniform |> Float.to_string |> String.slice(2..10)
end
|
deps/sqlitex/lib/sqlitex/statement.ex
| 0.908373
| 0.816991
|
statement.ex
|
starcoder
|
defmodule WhistlerNewsReader.Parallel do
@moduledoc """
Runs a number of jobs (with an upper bound) in parallel and
awaits them to finish.
Code from https://github.com/hexpm/hex/blob/88190bc6ed7f4a95d91aa32a4c2fb642febc02df/lib/hex/parallel.ex
"""
use GenServer
require Logger
def start_link(name, opts) do
GenServer.start_link(__MODULE__, new_state(opts), name: name)
end
@spec run(GenServer.server, any, (() -> any)) :: :ok
def run(name, id, fun) do
GenServer.cast(name, {:run, id, fun})
end
@spec await(GenServer.server, any, timeout) :: any
def await(name, id, timeout) do
GenServer.call(name, {:await, id}, timeout)
end
def clear(name) do
GenServer.call(name, :clear)
end
def debug(name) do
GenServer.call(name, :debug)
end
def handle_cast({:run, id, fun}, state) do
state = run_task(id, fun, state)
{:noreply, state}
end
def handle_call({:await, id}, from, state) do
if result = state.finished[id] do
state = %{state | finished: Map.delete(state.finished, id)}
{:reply, result, state}
else
state = %{state | waiting_reply: Map.put(state.waiting_reply, id, from)}
{:noreply, state}
end
end
def handle_call(:clear, _from, state) do
Enum.each(state.running, fn {%Task{pid: pid}, _} ->
Process.exit(pid, :stop)
end)
state = %{state | running: %{}, finished: %{}, waiting: :queue.new, waiting_reply: %{}}
{:reply, :ok, state}
end
def handle_call(:debug, _from, state) do
running = Map.keys(state.running)
Logger.info "Parallel - Tasks Running: #{inspect running}"
{:reply, {:ok, state}, state}
end
def handle_info({ref, message}, state) when is_reference(ref) do
tasks = Map.keys(state.running)
if task = Enum.find(tasks, &(&1.ref == ref)) do
id = state.running[task]
state = %{state | running: Map.delete(state.running, task)}
state =
if from = state.waiting_reply[id] do
GenServer.reply(from, message)
%{state | waiting_reply: Map.delete(state.waiting_reply, id)}
else
%{state | finished: Map.put(state.finished, id, message)}
end
state =
case :queue.out(state.waiting) do
{{:value, {id, fun}}, waiting} ->
state = %{state | waiting: waiting}
run_task(id, fun, state)
{:empty, _} ->
state
end
{:noreply, state}
else
{:noreply, state}
end
end
def handle_info({:DOWN, ref, _, proc, reason}, state) do
tasks = Map.keys(state.running)
if Enum.find(tasks, &(&1.ref == ref)) do
{:stop, {proc, reason}, state}
else
{:noreply, state}
end
end
defp run_task(id, fun, state) do
if Map.size(state.running) >= state.max_jobs do
%{state | waiting: :queue.in({id, fun}, state.waiting)}
else
parent = self()
task = Task.async(fn ->
Ecto.Adapters.SQL.Sandbox.allow(WhistlerNewsReader.Repo, parent, self())
fun.()
end)
%{state | running: Map.put(state.running, task, id)}
end
end
defp new_state(opts) do
%{max_jobs: opts[:max_parallel] || 10,
running: %{},
finished: %{},
waiting: :queue.new,
waiting_reply: %{}}
end
end
|
lib/whistler_news_reader/parallel.ex
| 0.693784
| 0.427935
|
parallel.ex
|
starcoder
|
defmodule Helper.Utils.Map do
@moduledoc """
utils functions for map structure
"""
@doc """
map atom value to upcase string
e.g:
%{hello: :world} # -> %{hello: "WORLD"}
"""
def atom_values_to_upcase(map) when is_map(map) do
map
|> Enum.reduce(%{}, fn {key, val}, acc ->
case val !== true and val !== false and not is_nil(val) and is_atom(val) do
true -> Map.put(acc, key, val |> to_string |> String.upcase())
false -> Map.put(acc, key, val)
end
end)
end
def atom_values_to_upcase(value), do: value
def map_key_stringify(%{__struct__: _} = map) when is_map(map) do
map = Map.from_struct(map)
map |> Enum.reduce(%{}, fn {key, val}, acc -> Map.put(acc, to_string(key), val) end)
end
def map_key_stringify(map) when is_map(map) do
map |> Enum.reduce(%{}, fn {key, val}, acc -> Map.put(acc, to_string(key), val) end)
end
@doc """
see https://stackoverflow.com/a/61559842/4050784
adjust it for map keys from atom to string
"""
def keys_to_atoms(map) when is_map(map) do
Map.new(map, &reduce_keys_to_atoms/1)
end
def keys_to_atoms(string) when is_binary(string), do: string
defp reduce_keys_to_atoms({key, val}) when is_map(val),
# do: {String.to_existing_atom(key), keys_to_atoms(val)}
do: {String.to_atom(key), keys_to_atoms(val)}
defp reduce_keys_to_atoms({key, val}) when is_list(val),
do: {String.to_atom(key), Enum.map(val, &keys_to_atoms(&1))}
defp reduce_keys_to_atoms({key, val}), do: {String.to_atom(key), val}
@doc """
see https://stackoverflow.com/a/61559842/4050784
adjust it for map keys from atom to string
"""
@spec keys_to_strings(map) :: map
def keys_to_strings(map) when is_map(map) do
Map.new(map, &reduce_keys_to_strings/1)
end
defp reduce_keys_to_strings({key, val}) when is_map(val),
do: {Atom.to_string(key), keys_to_strings(val)}
defp reduce_keys_to_strings({key, val}) when is_list(val),
do: {Atom.to_string(key), Enum.map(val, &keys_to_strings(&1))}
defp reduce_keys_to_strings({key, val}), do: {Atom.to_string(key), val}
@doc """
Recursivly camelize the map keys
usage: convert factory attrs to used for simu Graphql parmas
"""
def camelize_map_key(map, v_trans \\ :ignore) do
map_list =
Enum.map(map, fn {k, v} ->
v =
cond do
is_datetime?(v) ->
DateTime.to_iso8601(v)
is_map(v) ->
camelize_map_key(safe_map(v))
is_binary(v) ->
handle_camelize_value_trans(v, v_trans)
true ->
v
end
map_to_camel({k, v})
end)
Enum.into(map_list, %{})
end
defp handle_camelize_value_trans(v, :ignore), do: v
defp handle_camelize_value_trans(v, :downcase), do: String.downcase(v)
defp handle_camelize_value_trans(v, :upcase), do: String.upcase(v)
defp safe_map(%{__struct__: _} = map), do: Map.from_struct(map)
defp safe_map(map), do: map
defp map_to_camel({k, v}), do: {Recase.to_camel(to_string(k)), v}
@spec snake_map_key(map) :: map
def snake_map_key(map) do
map_list =
Enum.map(map, fn {k, v} ->
v =
cond do
is_datetime?(v) ->
DateTime.to_iso8601(v)
is_map(v) ->
snake_map_key(safe_map(v))
true ->
v
end
{Recase.to_snake(to_string(k)), v}
end)
Enum.into(map_list, %{})
end
defp is_datetime?(%DateTime{}), do: true
defp is_datetime?(_), do: false
def map_atom_value(attrs, :string) do
results =
Enum.map(attrs, fn {k, v} ->
cond do
v == true or v == false ->
{k, v}
is_atom(v) ->
{k, v |> to_string() |> String.downcase()}
true ->
{k, v}
end
end)
results |> Enum.into(%{})
end
def deep_merge(left, right), do: Map.merge(left, right, &deep_resolve/3)
# Key exists in both maps, and both values are maps as well.
# These can be merged recursively.
# defp deep_resolve(_key, left = %{},right = %{}) do
defp deep_resolve(_key, %{} = left, %{} = right), do: deep_merge(left, right)
# Key exists in both maps, but at least one of the values is
# NOT a map. We fall back to standard merge behavior, preferring
# the value on the right.
defp deep_resolve(_key, _left, right), do: right
end
|
lib/helper/utils/map.ex
| 0.77806
| 0.638427
|
map.ex
|
starcoder
|
defmodule SanbaseWeb.Graphql.Resolvers.PriceResolver do
require Logger
import SanbaseWeb.Graphql.Helpers.CalibrateInterval, only: [calibrate: 6]
alias Sanbase.Price
alias Sanbase.Model.Project
@total_market "TOTAL_MARKET"
@total_erc20 "TOTAL_ERC20"
@doc """
Returns a list of price points for the given ticker. Optimizes the number of queries
to the DB by inspecting the requested fields.
"""
def history_price(_root, %{slug: @total_market} = args, _resolution) do
%{from: from, to: to, interval: interval} = args
with {:ok, from, to, interval} <-
calibrate(Price, @total_market, from, to, interval, 300),
{:ok, result} <- Price.timeseries_data(@total_market, from, to, interval) do
{:ok, result}
end
end
def history_price(root, %{ticker: @total_market} = args, resolution) do
args = args |> Map.delete(:ticker) |> Map.put(:slug, @total_market)
history_price(root, args, resolution)
end
def history_price(_root, %{slug: @total_erc20} = args, _resolution) do
%{from: from, to: to, interval: interval} = args
with {:ok, from, to, interval} <-
calibrate(Price, @total_erc20, from, to, interval, 300),
{:ok, result} <- Price.timeseries_data(@total_erc20, from, to, interval) do
{:ok, result}
end
end
def history_price(root, %{ticker: @total_erc20} = args, resolution) do
args = args |> Map.delete(:ticker) |> Map.put(:slug, @total_erc20)
history_price(root, args, resolution)
end
def history_price(_root, %{ticker: ticker} = args, _resolution) do
%{from: from, to: to, interval: interval} = args
with {:get_slug, slug} when not is_nil(slug) <- {:get_slug, Project.slug_by_ticker(ticker)},
{:ok, from, to, interval} <- calibrate(Price, slug, from, to, interval, 300),
{:ok, result} <- Price.timeseries_data(slug, from, to, interval) do
{:ok, result}
else
{:get_slug, nil} ->
{:error,
"The provided ticker '#{ticker}' is misspelled or there is no data for this ticker"}
error ->
{:error, "Cannot fetch history price for #{ticker}. Reason: #{inspect(error)}"}
end
end
def history_price(_root, %{slug: slug} = args, _resolution) do
%{from: from, to: to, interval: interval} = args
with {:ok, from, to, interval} <- calibrate(Price, slug, from, to, interval, 300),
{:ok, result} <- Price.timeseries_data(slug, from, to, interval) do
{:ok, result}
else
{:get_ticker, nil} ->
{:error, "The provided slug '#{slug}' is misspelled or there is no data for this slug"}
error ->
{:error, "Cannot fetch history price for #{slug}. Reason: #{inspect(error)}"}
end
end
def ohlc(_root, %{slug: slug, from: from, to: to, interval: interval}, _resolution) do
case Price.timeseries_ohlc_data(slug, from, to, interval) do
{:ok, result} ->
{:ok, result}
{:error, error} ->
{:error, "Cannot fetch ohlc for #{slug}. Reason: #{inspect(error)}"}
end
end
def projects_list_stats(_root, %{slugs: slugs, from: from, to: to}, _resolution) do
case Price.aggregated_marketcap_and_volume(slugs, from, to) do
{:ok, values} ->
{:ok, values}
_ ->
{:error, "Can't fetch combined volume and marketcap for slugs"}
end
end
end
|
lib/sanbase_web/graphql/resolvers/price_resolver.ex
| 0.819641
| 0.475971
|
price_resolver.ex
|
starcoder
|
defmodule Snowflake do
@moduledoc """
Functions that work on Snowflakes.
"""
@behaviour Ecto.Type
def type, do: :string
@discord_epoch 1_420_070_400_000
@typedoc """
The type that represents snowflakes in JSON.
In JSON, Snowflakes are typically represented as strings due
to some languages not being able to represent such a large number.
"""
@type external_snowflake :: String.t()
@typedoc """
The snowflake type.
Snowflakes are 64-bit unsigned integers used to represent discord
object ids.
"""
@type t :: 0..0xFFFFFFFFFFFFFFFF
@doc ~S"""
Returns `true` if `term` is a snowflake; otherwise returns `false`.
## Examples
```Elixir
iex> Nostrum.Snowflake.is_snowflake(89918932789497856)
true
iex> Nostrum.Snowflake.is_snowflake(-1)
false
iex> Nostrum.Snowflake.is_snowflake(0xFFFFFFFFFFFFFFFF + 1)
false
iex> Nostrum.Snowflake.is_snowflake("117789813427535878")
false
```
"""
defguard is_snowflake(term)
when is_integer(term) and term in 0..0xFFFFFFFFFFFFFFFF
@doc ~S"""
Attempts to convert a term into a snowflake.
## Examples
```Elixir
iex> Nostrum.Snowflake.cast(200317799350927360)
{:ok, 200317799350927360}
iex> Nostrum.Snowflake.cast("200317799350927360")
{:ok, 200317799350927360}
iex> Nostrum.Snowflake.cast(nil)
{:ok, nil}
iex> Nostrum.Snowflake.cast(true)
:error
iex> Nostrum.Snowflake.cast(-1)
:error
```
"""
@spec cast(term) :: {:ok, t | nil} | :error
def cast(value)
def cast(nil), do: {:ok, nil}
def cast(value) when is_snowflake(value), do: {:ok, value}
def cast(value) when is_binary(value) do
case Integer.parse(value) do
{snowflake, _} -> cast(snowflake)
_ -> :error
end
end
def cast(_), do: :error
@doc """
Same as `cast/1`, except it raises an `ArgumentError` on failure.
"""
@spec cast!(term) :: t | nil | no_return
def cast!(value) do
case cast(value) do
{:ok, res} -> res
:error -> raise ArgumentError, "Could not convert to a snowflake"
end
end
@doc ~S"""
Convert a snowflake into its external representation.
## Examples
```Elixir
iex> Nostrum.Snowflake.dump(109112383011581952)
"109112383011581952"
```
"""
def dump(snowflake) when is_snowflake(snowflake), do: {:ok, to_string(snowflake)}
def dump(str) when is_binary(str) do
{:ok, str}
end
def load(snowflake), do: cast(snowflake)
def embed_as(_format), do: :self
def equal?(term, term), do: true
def equal?(_, _), do: false
@doc """
Converts the given `datetime` into a snowflake.
If `datetime` occured before the discord epoch, the function will return
`:error`.
The converted snowflake's last 22 bits will be zeroed out due to missing data.
## Examples
```Elixir
iex> {:ok, dt, _} = DateTime.from_iso8601("2016-05-05T21:04:13.203Z")
iex> Nostrum.Snowflake.from_datetime(dt)
{:ok, 177888205536755712}
iex> {:ok, dt, _} = DateTime.from_iso8601("1998-12-25T00:00:00.000Z")
iex> Nostrum.Snowflake.from_datetime(dt)
:error
```
"""
@spec from_datetime(DateTime.t()) :: {:ok, t} | :error
def from_datetime(%DateTime{} = datetime) do
use Bitwise
unix_time_ms = DateTime.to_unix(datetime, :millisecond)
discord_time_ms = unix_time_ms - @discord_epoch
if discord_time_ms >= 0 do
{:ok, discord_time_ms <<< 22}
else
:error
end
end
@doc """
Same as `from_datetime/1`, except it raises an `ArgumentError` on failure.
"""
@spec from_datetime!(DateTime.t()) :: t | no_return
def from_datetime!(datetime) do
case from_datetime(datetime) do
{:ok, snowflake} -> snowflake
:error -> raise(ArgumentError, "invalid datetime #{inspect(datetime)}")
end
end
@doc ~S"""
Returns the creation time of the snowflake.
## Examples
```Elixir
iex> Nostrum.Snowflake.creation_time(177888205536886784)
~U[2016-05-05 21:04:13.203Z]
```
"""
@spec creation_time(t) :: DateTime.t()
def creation_time(snowflake) when is_snowflake(snowflake) do
use Bitwise
time_elapsed_ms = (snowflake >>> 22) + @discord_epoch
{:ok, datetime} = DateTime.from_unix(time_elapsed_ms, :millisecond)
datetime
end
end
|
lib/yourbot/snowflake.ex
| 0.925676
| 0.825695
|
snowflake.ex
|
starcoder
|
defmodule SpandexOTLP.Opentelemetry.Proto.Metrics.V1.AggregationTemporality do
@moduledoc false
use Protobuf, enum: true, syntax: :proto3
@type t ::
integer
| :AGGREGATION_TEMPORALITY_UNSPECIFIED
| :AGGREGATION_TEMPORALITY_DELTA
| :AGGREGATION_TEMPORALITY_CUMULATIVE
field(:AGGREGATION_TEMPORALITY_UNSPECIFIED, 0)
field(:AGGREGATION_TEMPORALITY_DELTA, 1)
field(:AGGREGATION_TEMPORALITY_CUMULATIVE, 2)
end
defmodule SpandexOTLP.Opentelemetry.Proto.Metrics.V1.ResourceMetrics do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
resource: SpandexOTLP.Opentelemetry.Proto.Resource.V1.Resource.t() | nil,
instrumentation_library_metrics: [
SpandexOTLP.Opentelemetry.Proto.Metrics.V1.InstrumentationLibraryMetrics.t()
],
schema_url: String.t()
}
defstruct [:resource, :instrumentation_library_metrics, :schema_url]
field(:resource, 1, type: SpandexOTLP.Opentelemetry.Proto.Resource.V1.Resource)
field(:instrumentation_library_metrics, 2,
repeated: true,
type: SpandexOTLP.Opentelemetry.Proto.Metrics.V1.InstrumentationLibraryMetrics
)
field(:schema_url, 3, type: :string)
end
defmodule SpandexOTLP.Opentelemetry.Proto.Metrics.V1.InstrumentationLibraryMetrics do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
instrumentation_library:
SpandexOTLP.Opentelemetry.Proto.Common.V1.InstrumentationLibrary.t() | nil,
metrics: [SpandexOTLP.Opentelemetry.Proto.Metrics.V1.Metric.t()],
schema_url: String.t()
}
defstruct [:instrumentation_library, :metrics, :schema_url]
field(:instrumentation_library, 1,
type: SpandexOTLP.Opentelemetry.Proto.Common.V1.InstrumentationLibrary
)
field(:metrics, 2, repeated: true, type: SpandexOTLP.Opentelemetry.Proto.Metrics.V1.Metric)
field(:schema_url, 3, type: :string)
end
defmodule SpandexOTLP.Opentelemetry.Proto.Metrics.V1.Metric do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
data: {atom, any},
name: String.t(),
description: String.t(),
unit: String.t()
}
defstruct [:data, :name, :description, :unit]
oneof(:data, 0)
field(:name, 1, type: :string)
field(:description, 2, type: :string)
field(:unit, 3, type: :string)
field(:int_gauge, 4,
type: SpandexOTLP.Opentelemetry.Proto.Metrics.V1.IntGauge,
deprecated: true,
oneof: 0
)
field(:gauge, 5, type: SpandexOTLP.Opentelemetry.Proto.Metrics.V1.Gauge, oneof: 0)
field(:int_sum, 6,
type: SpandexOTLP.Opentelemetry.Proto.Metrics.V1.IntSum,
deprecated: true,
oneof: 0
)
field(:sum, 7, type: SpandexOTLP.Opentelemetry.Proto.Metrics.V1.Sum, oneof: 0)
field(:int_histogram, 8,
type: SpandexOTLP.Opentelemetry.Proto.Metrics.V1.IntHistogram,
deprecated: true,
oneof: 0
)
field(:histogram, 9, type: SpandexOTLP.Opentelemetry.Proto.Metrics.V1.Histogram, oneof: 0)
field(:summary, 11, type: SpandexOTLP.Opentelemetry.Proto.Metrics.V1.Summary, oneof: 0)
end
defmodule SpandexOTLP.Opentelemetry.Proto.Metrics.V1.IntGauge do
@moduledoc false
use Protobuf, deprecated: true, syntax: :proto3
@type t :: %__MODULE__{
data_points: [SpandexOTLP.Opentelemetry.Proto.Metrics.V1.IntDataPoint.t()]
}
defstruct [:data_points]
field(:data_points, 1,
repeated: true,
type: SpandexOTLP.Opentelemetry.Proto.Metrics.V1.IntDataPoint
)
end
defmodule SpandexOTLP.Opentelemetry.Proto.Metrics.V1.Gauge do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
data_points: [SpandexOTLP.Opentelemetry.Proto.Metrics.V1.NumberDataPoint.t()]
}
defstruct [:data_points]
field(:data_points, 1,
repeated: true,
type: SpandexOTLP.Opentelemetry.Proto.Metrics.V1.NumberDataPoint
)
end
defmodule SpandexOTLP.Opentelemetry.Proto.Metrics.V1.IntSum do
@moduledoc false
use Protobuf, deprecated: true, syntax: :proto3
@type t :: %__MODULE__{
data_points: [SpandexOTLP.Opentelemetry.Proto.Metrics.V1.IntDataPoint.t()],
aggregation_temporality:
SpandexOTLP.Opentelemetry.Proto.Metrics.V1.AggregationTemporality.t(),
is_monotonic: boolean
}
defstruct [:data_points, :aggregation_temporality, :is_monotonic]
field(:data_points, 1,
repeated: true,
type: SpandexOTLP.Opentelemetry.Proto.Metrics.V1.IntDataPoint
)
field(:aggregation_temporality, 2,
type: SpandexOTLP.Opentelemetry.Proto.Metrics.V1.AggregationTemporality,
enum: true
)
field(:is_monotonic, 3, type: :bool)
end
defmodule SpandexOTLP.Opentelemetry.Proto.Metrics.V1.Sum do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
data_points: [SpandexOTLP.Opentelemetry.Proto.Metrics.V1.NumberDataPoint.t()],
aggregation_temporality:
SpandexOTLP.Opentelemetry.Proto.Metrics.V1.AggregationTemporality.t(),
is_monotonic: boolean
}
defstruct [:data_points, :aggregation_temporality, :is_monotonic]
field(:data_points, 1,
repeated: true,
type: SpandexOTLP.Opentelemetry.Proto.Metrics.V1.NumberDataPoint
)
field(:aggregation_temporality, 2,
type: SpandexOTLP.Opentelemetry.Proto.Metrics.V1.AggregationTemporality,
enum: true
)
field(:is_monotonic, 3, type: :bool)
end
defmodule SpandexOTLP.Opentelemetry.Proto.Metrics.V1.IntHistogram do
@moduledoc false
use Protobuf, deprecated: true, syntax: :proto3
@type t :: %__MODULE__{
data_points: [SpandexOTLP.Opentelemetry.Proto.Metrics.V1.IntHistogramDataPoint.t()],
aggregation_temporality:
SpandexOTLP.Opentelemetry.Proto.Metrics.V1.AggregationTemporality.t()
}
defstruct [:data_points, :aggregation_temporality]
field(:data_points, 1,
repeated: true,
type: SpandexOTLP.Opentelemetry.Proto.Metrics.V1.IntHistogramDataPoint
)
field(:aggregation_temporality, 2,
type: SpandexOTLP.Opentelemetry.Proto.Metrics.V1.AggregationTemporality,
enum: true
)
end
defmodule SpandexOTLP.Opentelemetry.Proto.Metrics.V1.Histogram do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
data_points: [SpandexOTLP.Opentelemetry.Proto.Metrics.V1.HistogramDataPoint.t()],
aggregation_temporality:
SpandexOTLP.Opentelemetry.Proto.Metrics.V1.AggregationTemporality.t()
}
defstruct [:data_points, :aggregation_temporality]
field(:data_points, 1,
repeated: true,
type: SpandexOTLP.Opentelemetry.Proto.Metrics.V1.HistogramDataPoint
)
field(:aggregation_temporality, 2,
type: SpandexOTLP.Opentelemetry.Proto.Metrics.V1.AggregationTemporality,
enum: true
)
end
defmodule SpandexOTLP.Opentelemetry.Proto.Metrics.V1.Summary do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
data_points: [SpandexOTLP.Opentelemetry.Proto.Metrics.V1.SummaryDataPoint.t()]
}
defstruct [:data_points]
field(:data_points, 1,
repeated: true,
type: SpandexOTLP.Opentelemetry.Proto.Metrics.V1.SummaryDataPoint
)
end
defmodule SpandexOTLP.Opentelemetry.Proto.Metrics.V1.IntDataPoint do
@moduledoc false
use Protobuf, deprecated: true, syntax: :proto3
@type t :: %__MODULE__{
labels: [SpandexOTLP.Opentelemetry.Proto.Common.V1.StringKeyValue.t()],
start_time_unix_nano: non_neg_integer,
time_unix_nano: non_neg_integer,
value: integer,
exemplars: [SpandexOTLP.Opentelemetry.Proto.Metrics.V1.IntExemplar.t()]
}
defstruct [:labels, :start_time_unix_nano, :time_unix_nano, :value, :exemplars]
field(:labels, 1, repeated: true, type: SpandexOTLP.Opentelemetry.Proto.Common.V1.StringKeyValue)
field(:start_time_unix_nano, 2, type: :fixed64)
field(:time_unix_nano, 3, type: :fixed64)
field(:value, 4, type: :sfixed64)
field(:exemplars, 5,
repeated: true,
type: SpandexOTLP.Opentelemetry.Proto.Metrics.V1.IntExemplar
)
end
defmodule SpandexOTLP.Opentelemetry.Proto.Metrics.V1.NumberDataPoint do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
value: {atom, any},
attributes: [SpandexOTLP.Opentelemetry.Proto.Common.V1.KeyValue.t()],
labels: [SpandexOTLP.Opentelemetry.Proto.Common.V1.StringKeyValue.t()],
start_time_unix_nano: non_neg_integer,
time_unix_nano: non_neg_integer,
exemplars: [SpandexOTLP.Opentelemetry.Proto.Metrics.V1.Exemplar.t()]
}
defstruct [:value, :attributes, :labels, :start_time_unix_nano, :time_unix_nano, :exemplars]
oneof(:value, 0)
field(:attributes, 7, repeated: true, type: SpandexOTLP.Opentelemetry.Proto.Common.V1.KeyValue)
field(:labels, 1,
repeated: true,
type: SpandexOTLP.Opentelemetry.Proto.Common.V1.StringKeyValue,
deprecated: true
)
field(:start_time_unix_nano, 2, type: :fixed64)
field(:time_unix_nano, 3, type: :fixed64)
field(:as_double, 4, type: :double, oneof: 0)
field(:as_int, 6, type: :sfixed64, oneof: 0)
field(:exemplars, 5, repeated: true, type: SpandexOTLP.Opentelemetry.Proto.Metrics.V1.Exemplar)
end
defmodule SpandexOTLP.Opentelemetry.Proto.Metrics.V1.IntHistogramDataPoint do
@moduledoc false
use Protobuf, deprecated: true, syntax: :proto3
@type t :: %__MODULE__{
labels: [SpandexOTLP.Opentelemetry.Proto.Common.V1.StringKeyValue.t()],
start_time_unix_nano: non_neg_integer,
time_unix_nano: non_neg_integer,
count: non_neg_integer,
sum: integer,
bucket_counts: [non_neg_integer],
explicit_bounds: [float | :infinity | :negative_infinity | :nan],
exemplars: [SpandexOTLP.Opentelemetry.Proto.Metrics.V1.IntExemplar.t()]
}
defstruct [
:labels,
:start_time_unix_nano,
:time_unix_nano,
:count,
:sum,
:bucket_counts,
:explicit_bounds,
:exemplars
]
field(:labels, 1, repeated: true, type: SpandexOTLP.Opentelemetry.Proto.Common.V1.StringKeyValue)
field(:start_time_unix_nano, 2, type: :fixed64)
field(:time_unix_nano, 3, type: :fixed64)
field(:count, 4, type: :fixed64)
field(:sum, 5, type: :sfixed64)
field(:bucket_counts, 6, repeated: true, type: :fixed64)
field(:explicit_bounds, 7, repeated: true, type: :double)
field(:exemplars, 8,
repeated: true,
type: SpandexOTLP.Opentelemetry.Proto.Metrics.V1.IntExemplar
)
end
defmodule SpandexOTLP.Opentelemetry.Proto.Metrics.V1.HistogramDataPoint do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
attributes: [SpandexOTLP.Opentelemetry.Proto.Common.V1.KeyValue.t()],
labels: [SpandexOTLP.Opentelemetry.Proto.Common.V1.StringKeyValue.t()],
start_time_unix_nano: non_neg_integer,
time_unix_nano: non_neg_integer,
count: non_neg_integer,
sum: float | :infinity | :negative_infinity | :nan,
bucket_counts: [non_neg_integer],
explicit_bounds: [float | :infinity | :negative_infinity | :nan],
exemplars: [SpandexOTLP.Opentelemetry.Proto.Metrics.V1.Exemplar.t()]
}
defstruct [
:attributes,
:labels,
:start_time_unix_nano,
:time_unix_nano,
:count,
:sum,
:bucket_counts,
:explicit_bounds,
:exemplars
]
field(:attributes, 9, repeated: true, type: SpandexOTLP.Opentelemetry.Proto.Common.V1.KeyValue)
field(:labels, 1,
repeated: true,
type: SpandexOTLP.Opentelemetry.Proto.Common.V1.StringKeyValue,
deprecated: true
)
field(:start_time_unix_nano, 2, type: :fixed64)
field(:time_unix_nano, 3, type: :fixed64)
field(:count, 4, type: :fixed64)
field(:sum, 5, type: :double)
field(:bucket_counts, 6, repeated: true, type: :fixed64)
field(:explicit_bounds, 7, repeated: true, type: :double)
field(:exemplars, 8, repeated: true, type: SpandexOTLP.Opentelemetry.Proto.Metrics.V1.Exemplar)
end
defmodule SpandexOTLP.Opentelemetry.Proto.Metrics.V1.SummaryDataPoint.ValueAtQuantile do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
quantile: float | :infinity | :negative_infinity | :nan,
value: float | :infinity | :negative_infinity | :nan
}
defstruct [:quantile, :value]
field(:quantile, 1, type: :double)
field(:value, 2, type: :double)
end
defmodule SpandexOTLP.Opentelemetry.Proto.Metrics.V1.SummaryDataPoint do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
attributes: [SpandexOTLP.Opentelemetry.Proto.Common.V1.KeyValue.t()],
labels: [SpandexOTLP.Opentelemetry.Proto.Common.V1.StringKeyValue.t()],
start_time_unix_nano: non_neg_integer,
time_unix_nano: non_neg_integer,
count: non_neg_integer,
sum: float | :infinity | :negative_infinity | :nan,
quantile_values: [
SpandexOTLP.Opentelemetry.Proto.Metrics.V1.SummaryDataPoint.ValueAtQuantile.t()
]
}
defstruct [
:attributes,
:labels,
:start_time_unix_nano,
:time_unix_nano,
:count,
:sum,
:quantile_values
]
field(:attributes, 7, repeated: true, type: SpandexOTLP.Opentelemetry.Proto.Common.V1.KeyValue)
field(:labels, 1,
repeated: true,
type: SpandexOTLP.Opentelemetry.Proto.Common.V1.StringKeyValue,
deprecated: true
)
field(:start_time_unix_nano, 2, type: :fixed64)
field(:time_unix_nano, 3, type: :fixed64)
field(:count, 4, type: :fixed64)
field(:sum, 5, type: :double)
field(:quantile_values, 6,
repeated: true,
type: SpandexOTLP.Opentelemetry.Proto.Metrics.V1.SummaryDataPoint.ValueAtQuantile
)
end
defmodule SpandexOTLP.Opentelemetry.Proto.Metrics.V1.IntExemplar do
@moduledoc false
use Protobuf, deprecated: true, syntax: :proto3
@type t :: %__MODULE__{
filtered_labels: [SpandexOTLP.Opentelemetry.Proto.Common.V1.StringKeyValue.t()],
time_unix_nano: non_neg_integer,
value: integer,
span_id: binary,
trace_id: binary
}
defstruct [:filtered_labels, :time_unix_nano, :value, :span_id, :trace_id]
field(:filtered_labels, 1,
repeated: true,
type: SpandexOTLP.Opentelemetry.Proto.Common.V1.StringKeyValue
)
field(:time_unix_nano, 2, type: :fixed64)
field(:value, 3, type: :sfixed64)
field(:span_id, 4, type: :bytes)
field(:trace_id, 5, type: :bytes)
end
defmodule SpandexOTLP.Opentelemetry.Proto.Metrics.V1.Exemplar do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
value: {atom, any},
filtered_attributes: [SpandexOTLP.Opentelemetry.Proto.Common.V1.KeyValue.t()],
filtered_labels: [SpandexOTLP.Opentelemetry.Proto.Common.V1.StringKeyValue.t()],
time_unix_nano: non_neg_integer,
span_id: binary,
trace_id: binary
}
defstruct [:value, :filtered_attributes, :filtered_labels, :time_unix_nano, :span_id, :trace_id]
oneof(:value, 0)
field(:filtered_attributes, 7,
repeated: true,
type: SpandexOTLP.Opentelemetry.Proto.Common.V1.KeyValue
)
field(:filtered_labels, 1,
repeated: true,
type: SpandexOTLP.Opentelemetry.Proto.Common.V1.StringKeyValue,
deprecated: true
)
field(:time_unix_nano, 2, type: :fixed64)
field(:as_double, 3, type: :double, oneof: 0)
field(:as_int, 6, type: :sfixed64, oneof: 0)
field(:span_id, 4, type: :bytes)
field(:trace_id, 5, type: :bytes)
end
|
lib/spandex_otlp/opentelemetry/proto/metrics/v1/metrics.pb.ex
| 0.734596
| 0.477981
|
metrics.pb.ex
|
starcoder
|
defmodule Day07.Coder do
@moduledoc """
Functions for parsing Incode programs.
"""
@doc """
Executes the gravity assist program.
## Examples
iex> Day07.Coder.execute_program([3,9,8,9,10,9,4,9,99,-1,8], 8)
1
"""
def execute_program(codes, input_value) do
codes
|> build_codes_map()
|> run_code(0, input_value)
end
defp build_codes_map(codes) do
codes
|> Enum.with_index()
|> Enum.reduce(%{}, fn {value, index}, acc ->
Map.put(acc, index, value)
end)
end
# Get the value at `starting_index` and parse it as an opcode
# and then perform the appropriate action. This function is
# called from the 'action' functions recursively.
defp run_code(codes_map, starting_index, io_value) do
Map.get(codes_map, starting_index)
|> opcode_properties()
|> perform_action(codes_map, starting_index, io_value)
end
# --- Action Functions ---
# Add position one and position two and store in position three
defp perform_action(%{action: 1} = opcode_properties, codes_map, starting_index, io_value) do
%{mode_1: mode_1, mode_2: mode_2, mode_3: mode_3} = opcode_properties
first_value = value_for_mode(mode_1, codes_map, starting_index + 1)
second_value = value_for_mode(mode_2, codes_map, starting_index + 2)
output_index = index_for_mode(mode_3, codes_map, starting_index + 3)
Map.put(codes_map, output_index, first_value + second_value)
|> run_code(starting_index + 4, io_value)
end
# Multiply position one and position two and store in position three
defp perform_action(%{action: 2} = opcode_properties, codes_map, starting_index, io_value) do
%{mode_1: mode_1, mode_2: mode_2, mode_3: mode_3} = opcode_properties
first_value = value_for_mode(mode_1, codes_map, starting_index + 1)
second_value = value_for_mode(mode_2, codes_map, starting_index + 2)
output_index = index_for_mode(mode_3, codes_map, starting_index + 3)
Map.put(codes_map, output_index, first_value * second_value)
|> run_code(starting_index + 4, io_value)
end
# Take an input and store it in position one
defp perform_action(%{action: 3} = opcode_properties, codes_map, starting_index, io_value) do
%{mode_1: mode_1} = opcode_properties
output_index = index_for_mode(mode_1, codes_map, starting_index + 1)
Map.put(codes_map, output_index, io_value)
|> run_code(starting_index + 2, io_value)
end
# Output the value at position one
defp perform_action(%{action: 4} = opcode_properties, codes_map, starting_index, _io_value) do
%{mode_1: mode_1} = opcode_properties
output_value = value_for_mode(mode_1, codes_map, starting_index + 1)
run_code(codes_map, starting_index + 2, output_value)
end
# Jump-if-true
# If the first parameter's resultant value is non-zero, it sets the instruction
# pointer to the value from the second parameter. Otherwise, it does nothing.
defp perform_action(%{action: 5} = opcode_properties, codes_map, starting_index, io_value) do
%{mode_1: mode_1, mode_2: mode_2} = opcode_properties
value_for_mode(mode_1, codes_map, starting_index + 1)
|> case do
0 ->
run_code(codes_map, starting_index + 3, io_value)
_ ->
second_value = value_for_mode(mode_2, codes_map, starting_index + 2)
run_code(codes_map, second_value, io_value)
end
end
# Jump-if-false
# If the first parameter's resultant value is zero, it sets the instruction
# pointer to the value from the second parameter. Otherwise, it does nothing.
defp perform_action(%{action: 6} = opcode_properties, codes_map, starting_index, io_value) do
%{mode_1: mode_1, mode_2: mode_2} = opcode_properties
value_for_mode(mode_1, codes_map, starting_index + 1)
|> case do
0 ->
second_value = value_for_mode(mode_2, codes_map, starting_index + 2)
run_code(codes_map, second_value, io_value)
_ ->
run_code(codes_map, starting_index + 3, io_value)
end
end
# Less than
# If the first parameter's resultant value is less than the second parameter's
# resultant value then store 1 in the position given by the third parameter.
# Otherwise, store 0.
defp perform_action(%{action: 7} = opcode_properties, codes_map, starting_index, io_value) do
%{mode_1: mode_1, mode_2: mode_2, mode_3: mode_3} = opcode_properties
first_value = value_for_mode(mode_1, codes_map, starting_index + 1)
second_value = value_for_mode(mode_2, codes_map, starting_index + 2)
third_position = index_for_mode(mode_3, codes_map, starting_index + 3)
case first_value < second_value do
true ->
Map.put(codes_map, third_position, 1)
false ->
Map.put(codes_map, third_position, 0)
end
|> run_code(starting_index + 4, io_value)
end
# Equals
# If the first parameter's resultant value is equal to the second parameter's
# resultant value then store 1 in the position given by the third parameter.
# Otherwise, store 0.
defp perform_action(%{action: 8} = opcode_properties, codes_map, starting_index, io_value) do
%{mode_1: mode_1, mode_2: mode_2, mode_3: mode_3} = opcode_properties
first_value = value_for_mode(mode_1, codes_map, starting_index + 1)
second_value = value_for_mode(mode_2, codes_map, starting_index + 2)
third_position = index_for_mode(mode_3, codes_map, starting_index + 3)
case first_value == second_value do
true ->
Map.put(codes_map, third_position, 1)
false ->
Map.put(codes_map, third_position, 0)
end
|> run_code(starting_index + 4, io_value)
end
# End Programme
defp perform_action(%{action: 99} = _opcode_properties, _codes_map, _starting_index, io_value) do
io_value
end
# Catch-All
defp perform_action(_opcode_properties, _codes_map, _starting_index, _io_value) do
IO.puts("UNKNOWN CODE")
"ERROR"
end
# --- Helper Functions ---
defp index_for_mode(:position, codes_map, position) do
Map.get(codes_map, position)
end
defp index_for_mode(:immediate, _codes_map, position) do
position
end
defp value_for_mode(:position, codes_map, position) do
index = Map.get(codes_map, position)
Map.get(codes_map, index)
end
defp value_for_mode(:immediate, codes_map, position) do
Map.get(codes_map, position)
end
def opcode_properties(opcode) do
opcode_string =
opcode
|> Integer.to_string()
|> String.pad_leading(5, "0")
%{
action: String.slice(opcode_string, 3, 2) |> String.to_integer(),
mode_1: String.slice(opcode_string, 2, 1) |> translate_mode(),
mode_2: String.slice(opcode_string, 1, 1) |> translate_mode(),
mode_3: String.slice(opcode_string, 0, 1) |> translate_mode()
}
end
defp translate_mode("1"), do: :immediate
defp translate_mode(_), do: :position
end
|
day_07/lib/day07/coder.ex
| 0.805785
| 0.645036
|
coder.ex
|
starcoder
|
defmodule ExotelEx.HttpMessenger do
@behaviour ExotelEx.Messenger
@endpoint "https://api.exotel.com/v1/Accounts/"
@ets_bucket_name "exotel-rate-limited-api"
# Public API
@doc """
The send_sms/4 function sends an sms to a
given phone number from a given phone number.
## Example:
```
iex(1)> ExotelEx.HttpMessenger.send_sms("15005550006", "15005550001", "test message")
%{"SMSMessage" => %{
"AccountSid" => "probe",
"ApiVersion" => nil,
"Body" => "test message",
"DateCreated" => "2017-11-12 00:24:31",
"DateSent" => nil,
"DateUpdated" => "2017-11-12 00:24:31",
"DetailedStatus" => "PENDING_TO_OPERATOR",
"DetailedStatusCode" => 21010,
"Direction" => "outbound-api",
"From" => "01139595093/SCRPBX",
"Price" => nil,
"Sid" => "6dbfdc50133d0e51ec8d793356559868",
"Status" => "queued",
"To" => "08884733565",
"Uri" => "/v1/Accounts/probe/SMS/Messages/6dbfdc50133d0e51ec8d793356559868.json"}}
```
"""
@spec send_sms(String.t(), String.t(), String.t(), String.t()) :: map()
def send_sms(from, to, body, media \\ "") do
check_rate_limit!() # raises ApiLimitExceeded if rate limit exceeded
case HTTPoison.post(send_sms_url(), sms(from, to, body, media), auth_header()) do
{:ok, response} -> process_response(response)
{:error, error} -> raise ExotelEx.Errors.ApiError, error.reason
end
end
@doc """
The sms_details/1 function gets an sms details.
## Example:
```
iex(1)> ExotelEx.HttpMessenger.sms_details("sms_sid")
%{"SMSMessage" => %{
"AccountSid" => "probe",
"ApiVersion" => nil,
"Body" => "<PASSWORD>",
"DateCreated" => "2017-11-12 00:24:31",
"DateSent" => "2017-11-12 00:24:35",
"DateUpdated" => "2017-11-12 00:24:36",
"DetailedStatus" => "DELIVERED_TO_HANDSET",
"DetailedStatusCode" => 20005,
"Direction" => "outbound-api",
"From" => "01139595093/SCRPBX",
"Price" => "0.180000",
"Sid" => "6dbfdc50133d0e51ec8d793356559868",
"Status" => "sent",
"To" => "08884733565",
"Uri" => "/v1/Accounts/probe/SMS/Messages/6dbfdc50133d0e51ec8d793356559868.json"}}
```
"""
@spec sms_details(String.t()) :: map()
def sms_details(sms_sid) do
check_rate_limit!() # raises ApiLimitExceeded if rate limit exceeded
case HTTPoison.get(sms_details_url(sms_sid), auth_header()) do
{:ok, response} -> process_response(response)
{:error, error} -> raise ExotelEx.Errors.ApiError, error.reason
end
end
@doc """
The time_to_next_bucket/0 function gets the time in seconds to next bucket limit.
## Example:
```
iex(1)> ExotelEx.HttpMessenger.time_to_next_bucket
{:ok, 5} # 5 secconds to next bucket reset
```
"""
@spec time_to_next_bucket() :: tuple()
def time_to_next_bucket do
{_, _, ms_to_next_bucket, _, _} = ExRated.inspect_bucket(@ets_bucket_name,
time_scale_in_ms(),
api_limit())
sec_to_next_bucket = round(ms_to_next_bucket / 1000.0)
{:ok, sec_to_next_bucket}
end
# Private API
defp check_rate_limit! do
case ExRated.check_rate(@ets_bucket_name, time_scale_in_ms(), api_limit()) do
{:ok, current_count} -> {:ok, current_count}
{:error, current_count} ->
raise ExotelEx.Errors.ApiLimitExceeded, "API rate limit exceeded - #{current_count}"
end
end
defp sid do
Application.get_env(:exotel_ex, :sid)
end
defp token do
Application.get_env(:exotel_ex, :token)
end
defp time_scale_in_ms do
{time_scale, _} = Integer.parse(Application.get_env(:exotel_ex, :rate_limit_scale))
time_scale
end
defp api_limit do
{api_limit_rate, _} = Integer.parse(Application.get_env(:exotel_ex, :rate_limit_count))
api_limit_rate
end
defp send_sms_url do
"#{@endpoint}#{sid()}/Sms/send.json"
end
defp sms_details_url(sms_sid) do
"#{@endpoint}#{sid()}/SMS/Messages/#{sms_sid}.json"
end
defp process_response(%HTTPoison.Response{body: body}) do
Poison.decode!(body, keys: :atom)
end
defp sms(from, to, body, media) do
{:form, [To: to, From: from, Body: body, MediaUrl: media]}
end
defp auth_header do
encoded_token = Base.encode64("#{sid()}:#{token()}")
[
{"Authorization", "Basic " <> encoded_token}
]
end
end
|
lib/exotel_ex/messengers/http_messenger.ex
| 0.756358
| 0.651999
|
http_messenger.ex
|
starcoder
|
defmodule Tune.Matcher do
@limit 30
@limit_tracks_per_artist 5
@artist_score 5
@track_score 2.5
def basic_match(%{id: id}, %{id: id2}, _) when id == id2, do: %{}
def basic_match(
%{artists: origin_artists, tracks: origin_tracks},
%{artists: target_artists, tracks: target_tracks},
nil
) do
all_origin_tracks = get_all_items(origin_tracks)
all_target_tracks = get_all_items(target_tracks)
all_origin_artists = get_all_items(origin_artists)
all_target_artists = get_all_items(target_artists)
tracks = intersection(all_origin_tracks, all_target_tracks)
artists = intersection(all_origin_artists, all_target_artists)
%{
tracks: tracks,
artists: artists,
score: score_for(tracks, artists)
}
end
def basic_match(origin, target, _), do: match(origin, target)
def match(%{id: id}, %{id: id2}) when id == id2, do: %{}
def match(
%{artists: origin_artists, tracks: origin_tracks, profile: %{id: session_id}},
%{artists: target_artists, tracks: target_tracks}
) do
all_origin_tracks = get_all_items(origin_tracks)
all_target_tracks = get_all_items(target_tracks)
all_origin_artists = get_all_items(origin_artists)
all_target_artists = get_all_items(target_artists)
tracks = intersection(all_origin_tracks, all_target_tracks)
artists = intersection(all_origin_artists, all_target_artists)
from_artists = get_artist_tracks(artists, all_origin_tracks, all_target_tracks)
by_artists = group_tracks_by_artists(artists, from_artists)
matched_ids = Enum.map(tracks, & &1.id)
chosen =
chosen_tracks(tracks, by_artists, session_id)
|> add_recommendations(artists, all_origin_artists, all_target_artists, session_id)
|> tracks_with_audio_features(session_id)
|> final_sort(matched_ids)
%{
tracks: tracks,
artists: artists,
from_artists: from_artists,
chosen: chosen,
by_artists: group_tracks_by_artists(artists, chosen),
score: score_for(tracks, artists)
}
end
defp get_all_items(session_map) do
session_map
|> Enum.reduce([], fn {_key, val}, acc -> acc ++ val end)
|> Enum.uniq_by(& &1.id)
end
defp intersection(origin, target) do
ids = Enum.map(origin, & &1.id)
Enum.filter(target, fn track -> Enum.member?(ids, track.id) end)
end
defp get_artist_tracks(artists, origin_tracks, target_tracks) do
all_tracks = Enum.shuffle(origin_tracks ++ target_tracks)
Enum.reduce(artists, [], fn artist, acc ->
acc ++ tracks_by_artist_id(all_tracks, artist.id)
end)
|> Enum.uniq_by(& &1.id)
end
defp group_tracks_by_artists(artists, tracks) do
Enum.reduce(artists, %{}, fn artist, acc ->
artist_tracks = tracks_by_artist_id(tracks, artist.id)
Map.put(acc, artist.id, artist_tracks)
end)
end
defp chosen_tracks(matched_tracks, by_artists, session_id) do
chosen = matched_tracks
chosen_ids = Enum.map(chosen, & &1.id)
{list, pool} =
Enum.reduce(by_artists, {chosen, by_artists}, fn {artist_id, _},
{picked_tracks, new_by_artists} = acc ->
current_pool =
Enum.filter(Map.get(new_by_artists, artist_id), fn track ->
!Enum.member?(chosen_ids, track.id)
end)
cond do
length(picked_tracks) >= @limit ||
length(tracks_by_artist_id(picked_tracks, artist_id)) >= @limit_tracks_per_artist ->
acc
length(current_pool) > 0 ->
{track, new_pool} = List.pop_at(current_pool, 0)
{uniq_tracks(picked_tracks ++ [track]), Map.put(new_by_artists, artist_id, new_pool)}
true ->
popular = popular_tracks(session_id, artist_id)
{track, new_pool} = List.pop_at(popular, 0)
{uniq_tracks(picked_tracks ++ [track]), Map.put(new_by_artists, artist_id, new_pool)}
end
end)
if length(list) == length(matched_tracks) || length(list) >= @limit do
list
else
chosen_tracks(list, pool, session_id)
end
end
def add_recommendations(tracks, artists, origin_artists, target_artists, session_id)
when length(artists) == 0 do
rest = @limit - length(tracks)
artists = Enum.take(origin_artists, 5) ++ Enum.take(target_artists, 5)
tracks ++ recommended_tracks(session_id, artists, rest)
end
def add_recommendations(tracks, artists, _, _, session_id) do
rest = @limit - length(tracks)
tracks ++ recommended_tracks(session_id, artists, rest)
end
defp recommended_tracks(session_id, artists, limit) when limit > 0 do
artist_ids = artists |> Enum.take(5) |> Enum.map(& &1.id)
Tune.spotify_session().get_recommendations_from_artists(session_id, artist_ids, limit)
|> elem(1)
end
defp recommended_tracks(_, _, _), do: []
defp popular_tracks(session_id, artist_id) do
{:ok, tracks} = Tune.spotify_session().get_popular_tracks(session_id, artist_id)
IO.inspect(Enum.map(tracks, & &1.name) |> Enum.join(", "))
tracks
end
defp tracks_with_audio_features(tracks, session_id)
when length(tracks) > 0 and length(tracks) < 100 do
Tune.spotify_session().audio_features(session_id, tracks)
|> elem(1)
end
defp tracks_with_audio_features(_, _), do: []
defp final_sort(all_tracks, chosen_ids) do
sorted_by_energy =
Enum.sort_by(
all_tracks,
fn track ->
track.audio_features["energy"]
end,
:desc
)
chosen = Enum.filter(sorted_by_energy, fn track -> Enum.member?(chosen_ids, track.id) end)
mid_result = sorted_by_energy -- chosen
uniq_tracks(chosen ++ mid_result)
end
defp uniq_tracks(tracks) do
Enum.uniq_by(tracks, & &1.id)
end
defp tracks_by_artist_id(tracks, artist_id) do
Enum.filter(tracks, fn track ->
Enum.member?(Enum.map(track.artists, & &1.id), artist_id)
end)
end
defp score_for(tracks, artists) do
result = round(length(tracks) * @track_score) + length(artists) * @artist_score
if result > 100, do: 100, else: result
end
end
|
lib/tune/matcher.ex
| 0.535341
| 0.613989
|
matcher.ex
|
starcoder
|
defmodule Example do
alias Example.NS.EX
alias RDF.{IRI, Description, Graph}
import ExUnit.Assertions
@compile {:no_warn_undefined, Example.NS.EX}
defmodule User do
use Grax.Schema
@compile {:no_warn_undefined, Example.NS.EX}
schema EX.User do
property name: EX.name(), type: :string
property email: EX.email(), type: list_of(:string)
property age: EX.age(), type: :integer
field :password
field :canonical_email, from_rdf: :canonical_email
property customer_type: RDF.type(),
from_rdf: :customer_type_from_rdf,
to_rdf: :customer_type_to_rdf
link posts: EX.post(), type: list_of(Example.Post)
link comments: -EX.author(), type: list_of(%{EX.Comment => Example.Comment})
def customer_type_from_rdf(types, _description, _graph) do
{:ok, if(RDF.iri(EX.PremiumUser) in types, do: :premium_user)}
end
def customer_type_to_rdf(:premium_user, _user), do: {:ok, EX.PremiumUser}
def customer_type_to_rdf(_, _), do: {:ok, nil}
end
def canonical_email(description, _) do
{:ok,
case description[EX.email()] do
[email | _] -> "mailto:#{to_string(email)}"
_ -> nil
end}
end
end
defmodule Post do
use Grax.Schema
@compile {:no_warn_undefined, Example.NS.EX}
schema EX.Post do
property title: EX.title(), type: :string
property content: EX.content(), type: :string
link author: EX.author(), type: Example.User
link comments: EX.comment(), type: list_of(Example.Comment)
field :slug, from_rdf: :slug
end
def slug(description, _) do
{:ok,
case description[EX.title()] do
[title | _] ->
title
|> to_string()
|> String.downcase()
|> String.replace(" ", "-")
_ ->
nil
end}
end
end
defmodule Comment do
use Grax.Schema
schema EX.Comment do
property content: EX.content(), type: :string
link about: EX.about(), type: Example.Post
link author: EX.author(), type: Example.User
end
end
def user(id, opts \\ [depth: 0])
def user(EX.User0, depth: 0) do
%Example.User{
__id__: IRI.new(EX.User0),
name: "<NAME>",
age: 42,
email: ~w[<EMAIL> <EMAIL>],
customer_type: :premium_user,
canonical_email: "mailto:<EMAIL>",
posts: [RDF.iri(EX.Post0)],
comments: []
}
end
def user(EX.User1, depth: 0) do
%Example.User{
__id__: IRI.new(EX.User1),
name: "<NAME>",
email: ["<EMAIL>"],
canonical_email: "mailto:<EMAIL>",
posts: [],
comments: [RDF.iri(EX.Comment1)]
}
end
def user(EX.User2, depth: 0) do
%Example.User{
__id__: IRI.new(EX.User2),
name: "<NAME>",
email: ["<EMAIL>"],
canonical_email: "mailto:<EMAIL>",
posts: [],
comments: [RDF.iri(EX.Comment2)]
}
end
def user(EX.User0, depth: depth) do
%Example.User{user(EX.User0, depth: 0) | posts: [post(depth: depth - 1)], comments: []}
end
def post(opts \\ [depth: 1])
def post(depth: 0) do
%Example.Post{
__id__: IRI.new(EX.Post0),
title: "Lorem ipsum",
content: "Lorem ipsum dolor sit amet, …",
slug: "lorem-ipsum",
author: RDF.iri(EX.User0),
comments: [RDF.iri(EX.Comment1), RDF.iri(EX.Comment2)]
}
end
def post(depth: depth) do
%Example.Post{
post(depth: 0)
| comments: comments(depth: depth - 1),
author: user(EX.User0, depth: depth - 1)
}
end
def comments(depth: depth) do
[comment(EX.Comment1, depth: depth), comment(EX.Comment2, depth: depth)]
end
def comment(EX.Comment1, depth: 0) do
%Example.Comment{
__id__: IRI.new(EX.Comment1),
content: "First",
about: RDF.iri(EX.Post0),
author: RDF.iri(EX.User1)
}
end
def comment(EX.Comment2, depth: 0) do
%Example.Comment{
__id__: IRI.new(EX.Comment2),
content: "Second",
about: RDF.iri(EX.Post0),
author: RDF.iri(EX.User2)
}
end
def comment(EX.Comment1, depth: depth) do
%Example.Comment{
comment(EX.Comment1, depth: 0)
| author: user(EX.User1, depth: depth - 1),
about: post(depth: depth - 1)
}
end
def comment(EX.Comment2, depth: depth) do
%Example.Comment{
comment(EX.Comment2, depth: 0)
| author: user(EX.User2, depth: depth - 1),
about: post(depth: depth - 1)
}
end
defmodule WithIdSchema do
use Grax.Schema, id_spec: Example.IdSpecs.Foo
schema do
property foo: EX.foo()
end
end
defmodule WithIdSchemaNested do
use Grax.Schema, id_spec: Example.IdSpecs.Foo
schema do
property bar: EX.bar()
link foo: EX.foo(), type: Example.WithIdSchema
link more: EX.more(), type: list_of(__MODULE__)
end
end
defmodule WithBlankNodeIdSchema do
use Grax.Schema, id_spec: Example.IdSpecs.BlankNodes
schema do
property name: EX.name()
end
end
defmodule VarMappingA do
use Grax.Schema, id_spec: Example.IdSpecs.VarMapping
schema do
property name: EX.name()
end
end
defmodule VarMappingB do
use Grax.Schema, id_spec: Example.IdSpecs.VarMapping
schema do
property name: EX.name()
end
end
defmodule VarMappingC do
use Grax.Schema, id_spec: Example.IdSpecs.VarMapping
schema do
property name: EX.name()
end
end
defmodule VarMappingD do
use Grax.Schema, id_spec: Example.IdSpecs.VarMapping
schema do
property name: EX.name()
end
end
defmodule WithCustomSelectedIdSchemaA do
use Grax.Schema, id_spec: Example.IdSpecs.CustomSelector
schema do
property foo: EX.foo()
end
end
defmodule WithCustomSelectedIdSchemaB do
use Grax.Schema, id_spec: Example.IdSpecs.CustomSelector
schema do
property bar: EX.bar()
end
end
defmodule MultipleSchemasA do
use Grax.Schema, id_spec: Example.IdSpecs.MultipleSchemas
schema do
property foo: EX.foo()
end
end
defmodule MultipleSchemasB do
use Grax.Schema, id_spec: Example.IdSpecs.MultipleSchemas
schema do
property foo: EX.foo()
end
end
defmodule Untyped do
use Grax.Schema
schema do
property foo: EX.foo()
property bar: EX.bar(), type: list()
end
end
defmodule Datatypes do
use Grax.Schema
schema do
Grax.Datatype.builtins()
|> Enum.each(fn {type, _} ->
property type, apply(EX, type, []), type: type
end)
%{
integers: :integer,
numerics: :numeric
}
|> Enum.each(fn {name, type} ->
property name, apply(EX, name, []), type: list_of(type)
end)
end
end
def types(subject \\ EX.S) do
%Datatypes{
__id__: IRI.new(subject),
string: "string",
any_uri: IRI.parse(EX.foo()),
boolean: true,
integer: 42,
decimal: Decimal.from_float(0.5),
double: 3.14,
float: 3.14,
long: 42,
int: 42,
short: 42,
byte: 42,
non_negative_integer: 42,
positive_integer: 42,
unsigned_long: 42,
unsigned_int: 42,
unsigned_short: 42,
unsigned_byte: 42,
non_positive_integer: -42,
negative_integer: -42,
numeric: 42,
date_time: ~U[2020-01-01 00:00:00Z],
date: ~D[2020-01-01],
time: ~T[00:00:00]
}
end
defmodule DefaultValues do
use Grax.Schema
schema do
property foo: EX.foo(), default: "foo"
property bar: EX.bar(), type: :string, default: "bar"
property baz: EX.baz(), type: :integer, default: 42
link user: EX.user(), type: Example.User
link posts: EX.post(), type: list_of(Example.Post)
end
end
defmodule SelfLinked do
use Grax.Schema
schema do
property name: EX.name(), type: :string
link next: EX.next(), type: Example.SelfLinked, depth: 1
end
end
defmodule Circle do
use Grax.Schema
schema do
property name: EX.name(), type: :string
link link1: EX.link1(), type: list_of(Example.Circle), depth: +1
link link2: EX.link2(), type: list_of(Example.Circle), depth: +1
end
end
defmodule DepthPreloading do
use Grax.Schema
schema do
link next: EX.next(), type: Example.DepthPreloading, depth: 2
end
end
defmodule AddDepthPreloading do
use Grax.Schema, depth: +3
schema do
link next: EX.next(), type: Example.AddDepthPreloading, depth: +2
end
end
defmodule InverseProperties do
use Grax.Schema
schema do
property name: EX.name()
link foo: -EX.foo(), type: list_of(Example.User)
end
end
defmodule HeterogeneousLinks do
use Grax.Schema
schema do
property name: EX.name()
link one: EX.one(),
type: %{
EX.Post => Example.Post,
EX.Comment => Example.Comment
}
link strict_one: EX.strictOne(),
type: %{
EX.Post => Example.Post,
EX.Comment => Example.Comment
},
on_type_mismatch: :error
link many: EX.many(),
type:
list_of(%{
nil => Example.Post,
EX.Comment => Example.Comment
})
end
end
defmodule ClassDeclaration do
use Grax.Schema
schema EX.Class do
property name: EX.name()
end
end
defmodule Required do
use Grax.Schema
schema do
property foo: EX.foo(), required: true
property bar: EX.bar(), type: :integer, required: true
property baz: EX.baz(), type: list(), required: true
link l1: EX.lp1(), type: Example.User, required: true
link l2: EX.lp2(), type: list_of(Example.User), required: true
end
end
defmodule Cardinalities do
use Grax.Schema
schema do
property p1: EX.p1(), type: list(card: 2)
property p2: EX.p2(), type: list_of(:integer, card: 2..4)
property p3: EX.p3(), type: list(min: 3)
link l1: EX.lp1(), type: list_of(Example.User, card: 2..3)
link l2: EX.lp2(), type: list_of(Example.User, min: 2)
end
end
defmodule IdsAsPropertyValues do
use Grax.Schema
schema do
property foo: EX.foo()
property foos: EX.foos(), type: list()
property iri: EX.iri(), type: :iri
property iris: EX.iris(), type: list_of(:iri)
end
end
defmodule ParentSchema do
use Grax.Schema
schema do
property dp1: EX.dp1(), from_rdf: :upcase
property dp2: EX.dp2()
field :f1, default: :foo
field :f2
link lp1: EX.lp1(), type: Example.User
link lp2: EX.lp2(), type: Example.User
end
def upcase([foo], _, _), do: {:ok, foo |> to_string |> String.upcase()}
end
defmodule ChildSchema do
use Grax.Schema
schema inherit: Example.ParentSchema do
property dp2: EX.dp22()
property dp3: EX.dp3()
field :f2, from_rdf: :foo
field :f3
link lp2: EX.lp22(), type: Example.User
link lp3: EX.lp3(), type: Example.User
end
def foo(_, _), do: {:ok, :foo}
end
defmodule ChildSchemaWithClass do
use Grax.Schema
schema EX.Class < Example.ParentSchema do
end
end
defmodule AnotherParentSchema do
use Grax.Schema
schema do
property dp1: EX.dp1(), from_rdf: {Example.ParentSchema, :upcase}
property dp2: EX.dp22()
property dp3: EX.dp3()
field :f1
field :f2
field :f3
link lp1: EX.lp1(), type: Example.User
link lp3: EX.lp3(), type: Example.User
end
end
defmodule ChildOfMany do
use Grax.Schema
schema EX.Class < [Example.ParentSchema, Example.AnotherParentSchema] do
property dp2: EX.dp23()
property dp4: EX.dp4()
field :f1
field :f4
link lp4: EX.lp4(), type: Example.User
end
def foo(_, _), do: {:ok, :foo}
end
defmodule CustomMapping do
use Grax.Schema
@compile {:no_warn_undefined, Example.NS.EX}
schema do
property foo: EX.foo(), from_rdf: :to_foo, to_rdf: :from_foo
property foos: EX.foos(), type: list(), from_rdf: :to_foos, to_rdf: :from_foos
property bar: EX.bar(), type: :string, from_rdf: :to_bar, to_rdf: :from_bar
property bars: EX.bars(), type: list_of(:string), from_rdf: :to_bars, to_rdf: :from_bars
end
def to_foo([object], description, graph) do
assert %Description{} = description
assert Description.include?(description, {EX.foo(), object})
assert %Graph{} = graph
assert Graph.include?(graph, {description.subject, EX.foo(), object})
{:ok, {:foo, to_string(object)}}
end
def to_foo(_, _, _) do
{:error, "multiple :foo values found"}
end
def from_foo({:foo, objects}, mapping) do
assert %__MODULE__{} = mapping
{:ok,
objects
|> List.wrap()
|> Enum.map(&RDF.literal/1)}
end
def to_foos(objects, description, graph) do
assert %Description{} = description
assert Description.include?(description, {EX.foos(), objects})
assert %Graph{} = graph
assert Graph.include?(graph, {description.subject, EX.foos(), objects})
{:ok, Enum.map(objects, &{:foo, to_string(&1)})}
end
def from_foos(objects, _mapping) do
{:ok, Enum.map(objects, fn {:foo, object} -> RDF.literal(object) end)}
end
def to_bar([%IRI{} = iri], _, _) do
{:ok, do_to_bar(iri)}
end
def from_bar(value, _mapping) do
{:ok, apply(EX, String.to_atom(value), [])}
end
def to_bars(iris, _, _) do
{:ok, Enum.map(iris, &do_to_bar/1)}
end
def from_bars([_], _mapping) do
{:error, "not enough bars"}
end
def from_bars([value | rest], mapping) do
{:ok, apply(EX, String.to_atom(value), []),
{mapping.__id__, EX.other(), Enum.map(rest, &apply(EX, String.to_atom(&1), []))}}
end
defp do_to_bar(iri), do: IRI.parse(iri).path |> Path.basename()
end
defmodule CustomMappingOnCustomFields do
use Grax.Schema
schema do
field :uuid, from_rdf: :to_uuid
end
def to_uuid(%Description{subject: %{value: "urn:uuid:" <> uuid}}, graph) do
assert %Graph{} = graph
{:ok, uuid}
end
def to_uuid(_, _), do: {:error, "invalid id"}
end
defmodule CustomMappingInSeparateModule do
use Grax.Schema
schema do
property foo: EX.foo(),
from_rdf: {Example.SeparateCustomMappingModule, :to_foo},
to_rdf: {Example.SeparateCustomMappingModule, :from_foo}
field :bar, from_rdf: {Example.SeparateCustomMappingModule, :to_bar}
end
end
defmodule SeparateCustomMappingModule do
def to_foo([foo], _, _), do: {:ok, foo |> to_string |> String.upcase()}
def from_foo(foo, _), do: {:ok, foo |> String.downcase()}
def to_bar(_, _), do: {:ok, "bar"}
end
defmodule UserWithCallbacks do
use Grax.Schema
@compile {:no_warn_undefined, Example.NS.EX}
schema EX.User do
property name: EX.name(), type: :string
property email: EX.email(), type: list_of(:string)
property age: EX.age(), type: :integer
field :password
field :canonical_email, from_rdf: :canonical_email
field :customer_type
link posts: EX.post(), type: list_of(Example.Post)
link comments: -EX.author(), type: list_of(%{EX.Comment => Example.Comment})
end
def on_load(user, graph, opts) do
assert %__MODULE__{} = user
assert "mailto:" <> _ = user.canonical_email
assert Keyword.get(opts, :test) == 42
assert %RDF.Graph{} = graph
assert RDF.iri(EX.PremiumUser) in graph[user.__id__][RDF.type()]
{:ok, %{user | customer_type: :admin}}
end
def on_to_rdf(user, graph, opts) do
assert %__MODULE__{} = user
assert Keyword.get(opts, :test) == 42
assert %RDF.Graph{} = graph
assert RDF.iri(EX.User) in graph[user.__id__][RDF.type()]
{:ok, Graph.add(graph, user.__id__ |> RDF.type(EX.Admin))}
end
def canonical_email(description, _) do
{:ok,
case description[EX.email()] do
[email | _] -> "mailto:#{to_string(email)}"
_ -> nil
end}
end
end
end
|
test/support/example_schemas.ex
| 0.742235
| 0.535827
|
example_schemas.ex
|
starcoder
|
defmodule Access do
@moduledoc """
Dictionary-like access to data structures via the `foo[bar]` syntax.
This module also empowers `Kernel`s nested update functions
`Kernel.get_in/2`, `Kernel.put_in/3`, `Kernel.update_in/3` and
`Kernel.get_and_update_in/3`.
## Examples
Out of the box, Access works with built-in dictionaries: `Keyword`
and `Map`:
iex> keywords = [a: 1, b: 2]
iex> keywords[:a]
1
iex> map = %{a: 1, b: 2}
iex> map[:a]
1
iex> star_ratings = %{1.0 => "★", 1.5 => "★☆", 2.0 => "★★"}
iex> star_ratings[1.5]
"★☆"
Furthermore, Access transparently ignores `nil` values:
iex> keywords = [a: 1, b: 2]
iex> keywords[:c][:unknown]
nil
The key comparison must be implemented using the `===` operator.
"""
use Behaviour
@type t :: list | map | nil
@type key :: any
@type value :: any
defcallback fetch(t, key) :: {:ok, value} | :error
defcallback get(t, key, value) :: value
defcallback get_and_update(t, key, (value -> {value, value})) :: {value, t}
@doc """
Fetches value for the given key.
Returns a `{:ok, value}` if the tuple exists,
`:error` otherwise.
"""
@spec fetch(t, key) :: {:ok, value} | :error
def fetch(container, key)
def fetch(%{__struct__: struct} = container, key) do
struct.fetch(container, key)
end
def fetch(%{} = map, key) do
:maps.find(key, map)
end
def fetch(list, key) when is_list(list) do
case :lists.keyfind(key, 1, list) do
{^key, value} -> {:ok, value}
false -> :error
end
end
def fetch(nil, _key) do
:error
end
@doc """
Fetches value for the given key.
Returns `value` or raises otherwise.
"""
def fetch!(dict, key) do
case fetch(dict, key) do
{:ok, value} -> value
:error -> raise KeyError, key: key, term: dict
end
end
@doc """
Gets the container's value for the given key.
"""
@spec get(t, term, term) :: term
def get(container, key, default \\ nil)
def get(%{__struct__: struct} = container, key, default) do
struct.get(container, key, default)
end
def get(%{} = map, key, default) do
case :maps.find(key, map) do
{:ok, value} -> value
:error -> default
end
end
def get(list, key, default) when is_list(list) do
case :lists.keyfind(key, 1, list) do
{^key, value} -> value
false -> default
end
end
def get(nil, _key, default) do
default
end
@doc """
Gets and updates the container's value for the given key, in a single pass.
The argument function `fun` must receive the value for the given `key` (or
`nil` if the key doesn't exist in `container`). It must return a tuple
containing the `get` value and the new value to be stored in the `container`.
This function returns a two-element tuple.
The first element is the `get` value, as returned by `fun`.
The second element is the container, updated with the value returned by `fun`.
"""
@spec get_and_update(t, term, (term -> {get, term})) :: {get, t} when get: var
def get_and_update(container, key, fun)
def get_and_update(%{__struct__: struct} = container, key, fun) do
struct.get_and_update(container, key, fun)
end
def get_and_update(%{} = map, key, fun) do
current_value = case :maps.find(key, map) do
{:ok, value} -> value
:error -> nil
end
{get, update} = fun.(current_value)
{get, :maps.put(key, update, map)}
end
def get_and_update(list, key, fun) when is_list(list) do
Keyword.get_and_update(list, key, fun)
end
def get_and_update(nil, key, _fun) do
raise ArgumentError,
"could not put/update key #{inspect key} on a nil value"
end
end
# Callbacks invoked when inlining code for *_in in Kernel.
# TODO: Remove me on 1.2
defmodule Access.Map do
@moduledoc false
def update!(%{} = map, key, fun) do
case :maps.find(key, map) do
{:ok, value} ->
:maps.put(key, fun.(value), map)
:error ->
raise KeyError, key: key, term: map
end
end
def update!(other, key, _fun) do
raise ArgumentError,
"could not put/update key #{inspect key}. Expected map/struct, got: #{inspect other}"
end
def get_and_update!(%{} = map, key, fun) do
case :maps.find(key, map) do
{:ok, value} ->
{get, update} = fun.(value)
{get, :maps.put(key, update, map)}
:error ->
raise KeyError, key: key, term: map
end
end
def get_and_update!(other, key, _fun) do
raise ArgumentError,
"could not put/update key #{inspect key}. Expected map/struct, got: #{inspect other}"
end
end
|
lib/elixir/lib/access.ex
| 0.854854
| 0.637807
|
access.ex
|
starcoder
|
defmodule Club.AggregateCase do
@moduledoc """
This module defines the test case to be used by aggregate tests.
"""
use ExUnit.CaseTemplate
using aggregate: aggregate do
quote bind_quoted: [aggregate: aggregate] do
@aggregate_module aggregate
import Club.Factory
import Club.Fixture
# Assert that the expected events are returned when the given commands have been executed
defp assert_events(commands, expected_events) do
assert_events([], commands, expected_events)
end
defp assert_events(initial_events, commands, expected_events) do
{_aggregate, events, error} = aggregate_run(initial_events, commands)
actual_events = List.wrap(events)
assert is_nil(error)
assert actual_events == expected_events
end
defp assert_state(commands, expected_state) do
assert_state([], commands, expected_state)
end
defp assert_state(initial_events, commands, expected_state) do
{aggregate, events, error} = aggregate_run(initial_events, commands)
assert is_nil(error)
assert aggregate == expected_state
end
defp assert_error(commands, expected_error) do
assert_error([], commands, expected_error)
end
defp assert_error(initial_events, commands, expected_error) do
{_aggregate, _events, error} = aggregate_run(initial_events, commands)
assert error == expected_error
end
defp aggregate_run(initial_events, commands) do
%@aggregate_module{}
|> evolve(initial_events)
|> execute(commands)
end
# Execute one or more commands against an aggregate
defp execute(aggregate, commands) do
commands
|> List.wrap()
|> Enum.reduce({aggregate, [], nil}, fn
command, {aggregate, _events, nil} ->
case @aggregate_module.execute(aggregate, command) do
{:error, reason} = error -> {aggregate, nil, error}
events -> {evolve(aggregate, events), events, nil}
end
_command, {aggregate, _events, _error} = reply ->
reply
end)
end
# Apply the given events to the aggregate state
defp evolve(aggregate, events) do
events
|> List.wrap()
|> Enum.reduce(aggregate, &@aggregate_module.apply(&2, &1))
end
end
end
end
|
test/support/aggregate_case.ex
| 0.824214
| 0.693992
|
aggregate_case.ex
|
starcoder
|
defmodule Layout do
@moduledoc """
The Layout module helps solve some of the difficulties in creating charts
and graphs. It provides an easy approach to defining the various parts of
charts such as axis, plot area, gridlines, titles, etc.
A layout is simply a list of each chart area in one direction. For an X/Y
chart, two layouts can be used to define the structure of the chart. Take for
example creating an SVG chart showing lake levels (Y) by month (X). The
elements of this chart in the x direction might consist of the following
parts:
- a left margin between the left edge of the SVG and the start of the chart.
- an area that will show the title of the y-axis with text rotated 90°.
- an area for the display of the labels for each gridline in the chart.
- the plot area for the lake level data
- a right margin between the right edge of the SVG and the right of the
chart.
With a well defined width of the chart, the widths of each area could be
specified and then used for drawing text, labels, ticks, gridlines and data.
However, it becomes difficult to make all the changes should the canvas be
sized differently or if other chart parameters need to change.
Here's an example of how to setup a Layout for the horizontal direction in
the chart above. Let's assume that the width of the SVG canvas is going to
be 800. The Layout generation might look something like this.
layout =
Layout.create("chart horizontal", 800)
|> Layout.add("left margin", 2.0)
|> Layout.add("y axis title", 10.0)
|> Layout.add("y axis labels", 20.0)
|> Layout.add("plot area", 75.0)
|> Layout.add("right margin", 2.0)
|> Layout.resolve
The code shows the creation of a layout with a layout name and the width of
the canvas that will be used. This is followed by layout elements defined by
the chart designer. Each element has a name and a relative length (width for
the horizontal layout in this example). The specified widths are relative and
are determined by their ratio in the total of the relative widths. This makes
it easy to change the width of a single element without redistributing the
width in other elements. In this case, the total relative widths add up to
109.0. This means, for example, that the "plot area" will be 75.0/109 or 69%
of the total chart width.
The 'resolve' function traverses the layout and assigns starting points and
lengths to each of the elements based on the width of the canvas and the
relative lengths of each element.
'layout' will contain the information about the layout and all it's elements.
Now assume the 3-letter month abbreviations need to be placed under the plot
area with each centered with space provided for the month. A simple Affine
map can be generated that easily provides position information for each of
the month names. Generation of the transform is as follows:
x_map = Layout.transform layout, "plot area", -0.5, 12
An Affine map is returned that will let the user know the center position for
the months as follows:
jan_pos = Affine.map x_map, 0
sep_pos = Affine.map x_map, 9
These values are the x position in the SVG space that should be used for
writing the centered test.
The Layout handles all the calculations to determine the start point and
width of the plot area and the Affine map automatically 'maps' the start and
length information to the area.
Of course, a second layout needs to be generated for the vertical elements of
the chart but this is done using the same method as the vertical. When done,
plotting data is as easy as using the x_map and y_map to move from the data
space to the canvas space.
See also the `scalar` module that can be used to automatically generate x and
y scales, tick marks and gridlines based on the data to be plotted.
"""
defstruct name: nil, elements: [],
start: 0.0, length: 0.0,
rstart: 0.0, rlength: 0.0
@doc """
Adds an element to the Layout. Each element requires a name and a relative
length.
"""
def add_element(layout, name, rlength) do
new_element = %Layout{name: name, rlength: rlength}
struct(layout, elements: layout.elements ++ [new_element])
end
@doc """
Creates a new Layout with the given name. The Layout structure is returned
and becomes the state for all other Layout calls. The caller may keep the
state stored as they choose. A name and a length must be provided.
"""
def create(name, length) do
struct(%Layout{},
name: name,
length: length,
)
end
@doc """
Returns the %Layout structure for the specified element. The structure
contains start, length, relative start and relative length info.
"""
def get_element(layout, name) do
layout.elements
|> Enum.find(fn(x) -> x.name == name end)
end
@doc """
Once all elements have been defined, the resolve function fits all the
elements with their relative lengths into the length specified in the
create call. Each element is another %Layout{} structure.
"""
def resolve(layout) do
# Get the total relative length of each element.
rlength =
layout.elements
|> Enum.reduce(0.0,fn(%{rlength: rlength},acc) -> acc+rlength end)
# Calculate the scaler for the entire layout.
scale = layout.length / rlength
# Iterate through each element setting start and length based on rstart,
# rlength and scale.
elements =
layout.elements
|> Enum.scan(%Layout{start: 0.0, length: 0.0}, fn(element,acc) -> element_resolve element, acc, scale end)
# Return the updated main structure with updated elements.
struct(layout,
rlength: rlength,
elements: elements
)
end
@doc """
Creates an Affine map for the specified layout element. This Affine map
returns the physical position in the element given the relative position in.
"""
def transform(layout, name, start, length) do
element =
layout.elements
|> Enum.find(fn(x) -> x.name == name end)
x1_out = element.start
x2_out = x1_out + element.length
Affine.create [type: :linear_map, x1_in: start, x1_out: x1_out, x2_in: start+length, x2_out: x2_out]
end
# Given a new elenent in and the prior map with prior start and length, Return
# an updated element with start, length and scale set.
defp element_resolve map_in, acc_in, scale do
start = acc_in.start + acc_in.length
length = map_in.rlength * scale
struct(map_in, start: start, length: length)
end
end
|
lib/layout.ex
| 0.893764
| 0.991015
|
layout.ex
|
starcoder
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.