code
stringlengths 114
1.05M
| path
stringlengths 3
312
| quality_prob
float64 0.5
0.99
| learning_prob
float64 0.2
1
| filename
stringlengths 3
168
| kind
stringclasses 1
value |
|---|---|---|---|---|---|
defmodule Rummage.Ecto.Schema do
@moduledoc """
This module is meant to be `use`d by a module (typically an `Ecto.Schema`).
This isn't a required module for using `Rummage`, but it allows us to extend
its functionality.
"""
@rummage_scope_types ~w{search sort paginate custom_search custom_sort custom_paginate}a
@doc """
This macro allows us to leverage features in `Rummage.Ecto.Schema`. It takes
advantage of `Ecto`, `rummage_field` and `rummage_scope`
## Usage:
```elixir
defmodule MySchema do
use Rummage.Ecto.Schema
schema "my_table" do
field :field1, :integer
field :field2, :integer
timestamps()
end
rummage_field :field1_or_field2 do
{:fragment, "coalesce(?, ?)", :name, :description}
end
rummage_scope :show_page, [type: :paginate], fn(page) ->
%{per_page: 10, page: page}
end
end
```
"""
defmacro __using__(opts) do
quote do
use Ecto.Schema
use Rummage.Ecto, unquote(opts)
import Ecto.Query
import unquote(__MODULE__)
end
end
@doc """
Rummage Field is a way to define a field which can be used to search, sort,
paginate through. This field might not exist in the database or the schema,
but can be represented as a `fragments` query using multiple fields.
NOTE: Currently this feature has some limitations due to limitations put on
Ecto's fragments. Ecto 3.0 is expected to come out with `unsafe_fragment`,
which will give this feature great flexibility. This feature is also quite
dependent on what database engine is being used. For now, we have made
a few fragments available (the list can be seen [here]()) which are thoroughly
tested on postgres. If these fragments don't do it, you can use `rummage_scope`
to accomplish a similar functionality.
## Usage:
To use upper case name as rummage field:
```elixir
rummage_field :upper_case_name do
{:fragment, "upper(?)", :name}
end
```
To use the hour for created_at as rummage field:
```elixir
rummage_field :created_at_hour do
{:fragment, "date_part('hour', ?)", :inserted_at}
end
```
"""
defmacro rummage_field(field, do: block) do
name = :"__rummage_field_#{field}"
quote do
def unquote(name)(), do: unquote(block)
end
end
defmacro rummage_scope(scope, [type: type], fun) when type in @rummage_scope_types do
name = :"__rummage_#{type}_#{scope}"
quote do
def unquote(name)(term), do: unquote(fun).(term)
end
end
end
|
lib/rummage_ecto/schema.ex
| 0.770162
| 0.824391
|
schema.ex
|
starcoder
|
defmodule EllipticCurve.Ecdsa do
@moduledoc """
Used to sign and verify signatures using the Elliptic Curve Digital Signature Algorithm (ECDSA)
Functions:
- `sign()`
- `verify?()`
"""
alias EllipticCurve.Utils.Integer, as: IntegerUtils
alias EllipticCurve.Utils.BinaryAscii
alias EllipticCurve.{Point, Signature, Math}
@doc """
Generates a message signature based on a private key
Parameters:
- message [string]: message that will be signed
- privateKey [%EllipticCurve.PrivateKey]: private key data associated with the signer
- options [keyword list]: refines request
- hashfunc [:method]: defines the hash function applied to the message. Must be compatible with :crypto.hash. Default: :sha256;
Returns signature:
- signature [string]: base-64 message signature;
## Example:
iex> EllipticCurve.Ecdsa.sign("my message", privateKey)
"MEQCIFp2TrQ6RlThbEOeYin2t+Dz3TAebeK/kinZaU0Iltm4AiBXyvyCTwgjOBo5eZNssw/3shTqn8eHZyoRiToSttrRFw=="
"""
def sign(message, privateKey, options \\ []) do
%{hashfunc: hashfunc} = Enum.into(options, %{hashfunc: :sha256})
numberMessage =
:crypto.hash(hashfunc, message)
|> BinaryAscii.numberFromString()
curveData = privateKey.curve
randNum = IntegerUtils.between(1, curveData."N" - 1)
r =
Math.multiply(curveData."G", randNum, curveData."N", curveData."A", curveData."P").x
|> IntegerUtils.modulo(curveData."N")
s =
((numberMessage + r * privateKey.secret) * Math.inv(randNum, curveData."N"))
|> IntegerUtils.modulo(curveData."N")
%Signature{r: r, s: s}
end
@doc """
Verifies a message signature based on a public key
Parameters:
- `message` [string]: message that will be signed
- `signature` [%EllipticCurve.Signature]: signature associated with the message
- `publicKey` [%EllipticCurve.PublicKey]: public key associated with the message signer
- `options` [keyword list]: refines request
- `:hashfunc` [:method]: defines the hash function applied to the message. Must be compatible with :crypto.hash. Default: :sha256;
Returns:
- verified [bool]: true if message, public key and signature are compatible, false otherwise;
## Example:
iex> EllipticCurve.Ecdsa.verify?(message, signature, publicKey)
true
iex> EllipticCurve.Ecdsa.verify?(wrongMessage, signature, publicKey)
false
iex> EllipticCurve.Ecdsa.verify?(message, wrongSignature, publicKey)
false
iex> EllipticCurve.Ecdsa.verify?(message, signature, wrongPublicKey)
false
"""
def verify?(message, signature, publicKey, options \\ []) do
%{hashfunc: hashfunc} = Enum.into(options, %{hashfunc: :sha256})
numberMessage =
:crypto.hash(hashfunc, message)
|> BinaryAscii.numberFromString()
curveData = publicKey.curve
inv = Math.inv(signature.s, curveData."N")
v = Math.add(
Math.multiply(
curveData."G",
IntegerUtils.modulo(numberMessage * inv, curveData."N"),
curveData."N",
curveData."A",
curveData."P"
),
Math.multiply(
publicKey.point,
IntegerUtils.modulo(signature.r * inv, curveData."N"),
curveData."N",
curveData."A",
curveData."P"
),
curveData."A",
curveData."P"
)
cond do
signature.r < 1 || signature.r >= curveData."N" -> false
signature.s < 1 || signature.s >= curveData."N" -> false
Point.isAtInfinity?(v) -> false
IntegerUtils.modulo(v.x, curveData."N") != signature.r -> false
true -> true
end
end
end
|
lib/ecdsa.ex
| 0.962054
| 0.697274
|
ecdsa.ex
|
starcoder
|
defmodule TypeCheck.TypeError.DefaultFormatter do
@behaviour TypeCheck.TypeError.Formatter
def format_wrap(problem_tuple) do
format(problem_tuple)
|> String.trim_trailing()
end
@doc """
Transforms a `problem_tuple` into a humanly-readable explanation string.
C.f. `TypeCheck.TypeError.Formatter` for more information about problem tuples.
"""
@spec format(TypeCheck.TypeError.Formatter.problem_tuple()) :: String.t()
def format(problem_tuple)
def format({%TypeCheck.Builtin.Atom{}, :no_match, _, val}) do
"`#{inspect(val)}` is not an atom."
end
def format({%TypeCheck.Builtin.Binary{}, :no_match, _, val}) do
"`#{inspect(val)}` is not a binary."
end
def format({%TypeCheck.Builtin.Bitstring{}, :no_match, _, val}) do
"`#{inspect(val)}` is not a bitstring."
end
def format({%TypeCheck.Builtin.Boolean{}, :no_match, _, val}) do
"`#{inspect(val)}` is not a boolean."
end
def format({s = %TypeCheck.Builtin.FixedList{}, :not_a_list, _, val}) do
problem = "`#{inspect(val)}` is not a list."
compound_check(val, s, problem)
end
def format(
{s = %TypeCheck.Builtin.FixedList{}, :different_length,
%{expected_length: expected_length}, val}
) do
problem = "`#{inspect(val)}` has #{length(val)} elements rather than #{expected_length}."
compound_check(val, s, problem)
end
def format(
{s = %TypeCheck.Builtin.FixedList{}, :element_error, %{problem: problem, index: index},
val}
) do
compound_check(val, s, "at index #{index}:\n", format(problem))
end
def format({s = %TypeCheck.Builtin.FixedMap{}, :not_a_map, _, val}) do
problem = "`#{inspect(val)}` is not a map."
compound_check(val, s, problem)
end
def format({s = %TypeCheck.Builtin.FixedMap{}, :missing_keys, %{keys: keys}, val}) do
keys_str =
keys
|> Enum.map(&inspect/1)
|> Enum.join(", ")
problem = "`#{inspect(val)}` is missing the following required key(s): `#{keys_str}`."
compound_check(val, s, problem)
end
def format(
{s = %TypeCheck.Builtin.FixedMap{}, :value_error, %{problem: problem, key: key}, val}
) do
compound_check(val, s, "under key `#{inspect(key)}`:\n", format(problem))
end
def format({%TypeCheck.Builtin.Float{}, :no_match, _, val}) do
"`#{inspect(val)}` is not a float."
end
def format({%TypeCheck.Builtin.Function{}, :no_match, _, val}) do
"`#{inspect(val)}` is not a function."
end
def format({s = %TypeCheck.Builtin.Guarded{}, :type_failed, %{problem: problem}, val}) do
compound_check(val, s, format(problem))
end
def format({s = %TypeCheck.Builtin.Guarded{}, :guard_failed, %{bindings: bindings}, val}) do
problem = """
`#{Macro.to_string(s.guard)}` evaluated to false or nil.
bound values: #{inspect(bindings)}
"""
compound_check(val, s, "type guard:\n", problem)
end
def format({%TypeCheck.Builtin.Integer{}, :no_match, _, val}) do
"`#{inspect(val)}` is not an integer."
end
def format({%TypeCheck.Builtin.PosInteger{}, :no_match, _, val}) do
"`#{inspect(val)}` is not a positive integer."
end
def format({%TypeCheck.Builtin.NegInteger{}, :no_match, _, val}) do
"`#{inspect(val)}` is not a negative integer."
end
def format({%TypeCheck.Builtin.NonNegInteger{}, :no_match, _, val}) do
"`#{inspect(val)}` is not a non-negative integer."
end
def format({s = %TypeCheck.Builtin.List{}, :not_a_list, _, val}) do
compound_check(val, s, "`#{inspect(val)}` is not a list.")
end
def format(
{s = %TypeCheck.Builtin.List{}, :element_error, %{problem: problem, index: index}, val}
) do
compound_check(val, s, "at index #{index}:\n", format(problem))
end
def format({%TypeCheck.Builtin.Literal{value: expected_value}, :not_same_value, %{}, val}) do
"`#{inspect(val)}` is not the same value as `#{inspect(expected_value)}`."
end
def format({s = %TypeCheck.Builtin.Map{}, :not_a_map, _, val}) do
compound_check(val, s, "`#{inspect(val)}` is not a map.")
end
def format({s = %TypeCheck.Builtin.Map{}, :key_error, %{problem: problem}, val}) do
compound_check(val, s, "key error:\n", format(problem))
end
def format({s = %TypeCheck.Builtin.Map{}, :value_error, %{problem: problem, key: key}, val}) do
compound_check(val, s, "under key `#{inspect(key)}`:\n", format(problem))
end
def format({s = %TypeCheck.Builtin.NamedType{}, :named_type, %{problem: problem}, val}) do
compound_check(val, s, format(problem))
end
def format({%TypeCheck.Builtin.None{}, :no_match, _, val}) do
"`#{inspect(val)}` does not match `none()` (no value matches `none()`)."
end
def format({%TypeCheck.Builtin.Number{}, :no_match, _, val}) do
"`#{inspect(val)}` is not a number."
end
def format({s = %TypeCheck.Builtin.OneOf{}, :all_failed, %{problems: problems}, val}) do
message =
problems
|> Enum.with_index()
|> Enum.map(fn {problem, index} ->
"""
#{index})
#{indent(format(problem))}
"""
end)
|> Enum.join("\n")
compound_check(val, s, "all possibilities failed:\n", message)
end
def format({s = %TypeCheck.Builtin.Range{}, :not_an_integer, _, val}) do
compound_check(val, s, "`#{inspect(val)}` is not an integer.")
end
def format({s = %TypeCheck.Builtin.Range{range: range}, :not_in_range, _, val}) do
compound_check(val, s, "`#{inspect(val)}` falls outside the range #{inspect(range)}.")
end
def format({s = %TypeCheck.Builtin.FixedTuple{}, :not_a_tuple, _, val}) do
problem = "`#{inspect(val)}` is not a tuple."
compound_check(val, s, problem)
end
def format(
{s = %TypeCheck.Builtin.FixedTuple{}, :different_size, %{expected_size: expected_size},
val}
) do
problem = "`#{inspect(val)}` has #{tuple_size(val)} elements rather than #{expected_size}."
compound_check(val, s, problem)
end
def format(
{s = %TypeCheck.Builtin.FixedTuple{}, :element_error, %{problem: problem, index: index},
val}
) do
compound_check(val, s, "at index #{index}:\n", format(problem))
end
def format({s = %TypeCheck.Builtin.Tuple{}, :no_match, _, val}) do
problem = "`#{inspect(val)}` is not a tuple."
compound_check(val, s, problem)
end
def format({s = %TypeCheck.Spec{}, :param_error, %{index: index, problem: problem}, val}) do
# compound_check(val, s, "at parameter no. #{index + 1}:\n", format(problem))
arguments = val |> Enum.map(&inspect/1) |> Enum.join(", ")
call = "#{s.name}(#{arguments})"
"""
The call `#{call}` does not adhere to spec `#{TypeCheck.Inspect.inspect_binary(s)}`. Reason:
parameter no. #{index + 1}:
#{indent(indent(format(problem)))}
"""
end
def format(
{s = %TypeCheck.Spec{}, :return_error, %{problem: problem, arguments: arguments}, _val}
) do
arguments_str = arguments |> Enum.map(&inspect/1) |> Enum.join(", ")
call = "#{s.name}(#{arguments_str})"
"""
The result of calling `#{call}` does not adhere to spec `#{
TypeCheck.Inspect.inspect_binary(s)
}`. Reason:
Returned result:
#{indent(indent(format(problem)))}
"""
end
defp compound_check(val, s, child_prefix \\ nil, child_problem) do
child_str =
if child_prefix do
indent(child_prefix <> indent(child_problem))
else
indent(child_problem)
end
"""
`#{inspect(val)}` does not check against `#{TypeCheck.Inspect.inspect_binary(s)}`. Reason:
#{child_str}
"""
end
defp indent(str) do
String.replace(" " <> str, "\n", "\n ")
end
end
|
lib/type_check/type_error/default_formatter.ex
| 0.877935
| 0.5835
|
default_formatter.ex
|
starcoder
|
defmodule Snap do
@moduledoc """
Snap is split into 3 main components:
* `Snap.Cluster` - clusters are wrappers around the Elasticsearch HTTP API.
We can use this to perform low-level HTTP requests.
* `Snap.Bulk` - a convenience wrapper around bulk operations, using `Stream`
to stream actions (such as `Snap.Bulk.Action.Create`) to be performed
against the `Snap.Cluster`.
* `Snap.Indexes` - a convenience wrapper around the Elasticsearch indexes
APIs, allowing the creation, deleting and aliasing of indexes, along with
hotswap functionality to bulk load documents into an aliased index,
switching to it atomically.
Additionally, there are other supporting modules:
* `Snap.Auth` - defines how an HTTP request is modified to include
authentication headers. `Snap.Auth.Plain` implements HTTP Basic Auth.
## Set up
`Snap.Cluster` is a wrapped around an Elasticsearch cluster. We can define
it like so:
```
defmodule MyApp.Cluster do
use Snap.Cluster, otp_app: :my_app
end
```
The configuration for the cluster can be defined in your config:
```
config :my_app, MyApp.Cluster,
url: "http://localhost:9200",
username: "username",
password: "password"
```
Or you can load it dynamically by implementing `c:Snap.Cluster.init/1`.
Each cluster defines `start_link/1` which must be invoked before using the
cluster and optionally accepts an explicit config. It creates the
supervision tree, including the connection pool.
Include it in your application:
```
def start(_type, _args) do
children = [
{MyApp.Cluster, []}
]
opts = [strategy: :one_for_one, name: MyApp.Supervisor]
Supervisor.start_link(children, opts)
end
```
## Config
The following configuration options are supported:
* `url` - the URL of the Elasticsearch HTTP endpoint (required)
* `username` - the username used to access the cluster
* `password` - the password used to access the cluster
* `auth` - the auth module used to configure the HTTP authentication headers
(defaults to `Snap.Auth.Plain`)
* `pool_size` - the maximum size of the HTTP connection pool (defaults to 5)
* `telemetry_prefix` - the prefix of the telemetry events (default to
`[:my_app, :snap]`)
## Telemetry
Snap supports sending `Telemetry` events on each HTTP request. It sends one
event per query, of the name `[:my_app, :snap, :request]`.
The telemetry event has the following measurements:
* `response_time` - how long the request took to return
* `decode_time` - how long the response took to decode into a map or
exception
* `total_time` - how long everything took in total
In addition, the metadata contains a map of:
* `method` - the HTTP method used
* `path` - the path requested
* `port` - the port requested
* `host` - the host requested
* `headers` - a list of the headers sent
* `body` - the body sent
* `result` - the result returned to the user
"""
alias Snap.Request
@doc false
def get(cluster, path, params \\ [], headers \\ [], opts \\ []) do
Request.request(cluster, "GET", path, nil, params, headers, opts)
end
@doc false
def post(cluster, path, body \\ nil, params \\ [], headers \\ [], opts \\ []) do
Request.request(cluster, "POST", path, body, params, headers, opts)
end
@doc false
def put(cluster, path, body \\ nil, params \\ [], headers \\ [], opts \\ []) do
Request.request(cluster, "PUT", path, body, params, headers, opts)
end
@doc false
def delete(cluster, path, params \\ [], headers \\ [], opts \\ []) do
Request.request(cluster, "DELETE", path, nil, params, headers, opts)
end
end
|
lib/snap.ex
| 0.93007
| 0.949576
|
snap.ex
|
starcoder
|
defmodule OMG.API.BlockQueue.Core do
@moduledoc """
Maintains a queue of to-be-mined blocks. Has no side-effects or side-causes.
Note that first nonce (zero) of authority account is used to deploy RootChain.
Every next nonce is used to submit operator blocks.
(thus, it handles config values as internal variables)
"""
alias OMG.API.BlockQueue
alias OMG.API.BlockQueue.Core
alias OMG.API.BlockQueue.GasPriceAdjustmentStrategyParams, as: GasPriceParams
use OMG.API.LoggerExt
defmodule BlockSubmission do
@moduledoc false
@type hash() :: <<_::256>>
@type plasma_block_num() :: non_neg_integer()
@type t() :: %__MODULE__{
num: plasma_block_num(),
hash: hash(),
nonce: non_neg_integer(),
gas_price: pos_integer()
}
defstruct [:num, :hash, :nonce, :gas_price]
end
@zero_bytes32 <<0::size(256)>>
defstruct [
:blocks,
:parent_height,
last_parent_height: 0,
formed_child_block_num: 0,
wait_for_enqueue: false,
gas_price_to_use: 20_000_000_000,
mined_child_block_num: 0,
last_enqueued_block_at_height: 0,
# config:
child_block_interval: nil,
chain_start_parent_height: nil,
minimal_enqueue_block_gap: 1,
finality_threshold: 12,
gas_price_adj_params: %GasPriceParams{}
]
@type t() :: %__MODULE__{
blocks: %{pos_integer() => %BlockSubmission{}},
# last mined block num
mined_child_block_num: BlockQueue.plasma_block_num(),
# newest formed block num
formed_child_block_num: BlockQueue.plasma_block_num(),
# current Ethereum block height
parent_height: nil | BlockQueue.eth_height(),
# whether we're pending an enqueue signal with a new block
wait_for_enqueue: boolean(),
# gas price to use when (re)submitting transactions
gas_price_to_use: pos_integer(),
last_enqueued_block_at_height: pos_integer(),
# CONFIG CONSTANTS below
# spacing of child blocks in RootChain contract, being the amount of deposit decimals per child block
child_block_interval: pos_integer(),
# Ethereum height at which first block was mined
chain_start_parent_height: pos_integer(),
# minimal gap between child blocks
minimal_enqueue_block_gap: pos_integer(),
# depth of max reorg we take into account
finality_threshold: pos_integer(),
# the gas price adjustment strategy parameters
gas_price_adj_params: GasPriceParams.t()
}
@type submit_result_t() :: {:ok, <<_::256>>} | {:error, map}
def new, do: {:ok, %__MODULE__{blocks: Map.new()}}
@spec new(keyword) :: {:ok, Core.t()} | {:error, :mined_hash_not_found_in_db} | {:error, :contract_ahead_of_db}
def new(
mined_child_block_num: mined_child_block_num,
known_hashes: known_hashes,
top_mined_hash: top_mined_hash,
parent_height: parent_height,
child_block_interval: child_block_interval,
chain_start_parent_height: child_start_parent_height,
minimal_enqueue_block_gap: minimal_enqueue_block_gap,
finality_threshold: finality_threshold,
last_enqueued_block_at_height: last_enqueued_block_at_height
) do
state = %__MODULE__{
blocks: Map.new(),
mined_child_block_num: mined_child_block_num,
parent_height: parent_height,
child_block_interval: child_block_interval,
chain_start_parent_height: child_start_parent_height,
minimal_enqueue_block_gap: minimal_enqueue_block_gap,
finality_threshold: finality_threshold,
gas_price_adj_params: %GasPriceParams{},
last_enqueued_block_at_height: last_enqueued_block_at_height
}
enqueue_existing_blocks(state, top_mined_hash, known_hashes)
end
@spec enqueue_block(Core.t(), BlockQueue.hash(), BlockQueue.plasma_block_num(), pos_integer()) ::
Core.t() | {:error, :unexpected_block_number}
def enqueue_block(state, hash, expected_block_number, parent_height) do
own_height = state.formed_child_block_num + state.child_block_interval
with :ok <- validate_block_number(expected_block_number, own_height) do
enqueue_block(state, hash, parent_height)
end
end
defp validate_block_number(block_number, block_number), do: :ok
defp validate_block_number(_, _), do: {:error, :unexpected_block_number}
defp enqueue_block(state, hash, parent_height) do
own_height = state.formed_child_block_num + state.child_block_interval
block = %BlockSubmission{
num: own_height,
nonce: calc_nonce(own_height, state.child_block_interval),
hash: hash
}
blocks = Map.put(state.blocks, own_height, block)
%{
state
| formed_child_block_num: own_height,
blocks: blocks,
wait_for_enqueue: false,
last_enqueued_block_at_height: parent_height
}
end
# Set number of plasma block mined on the parent chain.
# Since reorgs are possible, consecutive values of mined_child_block_num don't have to be
# monotonically increasing. Due to construction of contract we know it does not
# contain holes so we care only about the highest number.
@spec set_mined(Core.t(), BlockQueue.plasma_block_num()) :: Core.t()
defp set_mined(state, mined_child_block_num) do
num_threshold = mined_child_block_num - state.child_block_interval * state.finality_threshold
young? = fn {_, block} -> block.num > num_threshold end
blocks = state.blocks |> Enum.filter(young?) |> Map.new()
top_known_block = max(mined_child_block_num, state.formed_child_block_num)
%{state | formed_child_block_num: top_known_block, mined_child_block_num: mined_child_block_num, blocks: blocks}
end
@doc """
Set height of Ethereum chain and the height of the child chain mined on Ethereum.
"""
@spec set_ethereum_status(Core.t(), BlockQueue.eth_height(), BlockQueue.plasma_block_num(), boolean()) ::
{:do_form_block, Core.t()} | {:dont_form_block, Core.t()}
def set_ethereum_status(state, parent_height, mined_child_block_num, is_empty_block) do
new_state =
%{state | parent_height: parent_height}
|> set_mined(mined_child_block_num)
|> adjust_gas_price()
if should_form_block?(new_state, is_empty_block) do
{:do_form_block, %{new_state | wait_for_enqueue: true}}
else
{:dont_form_block, new_state}
end
end
# Updates gas price to use basing on :calculate_gas_price function, updates current parent height
# and last mined child block number in the state which used by gas price calculations
@spec adjust_gas_price(Core.t()) :: Core.t()
defp adjust_gas_price(%Core{gas_price_adj_params: %GasPriceParams{last_block_mined: nil} = gas_params} = state) do
# initializes last block mined
%{state | gas_price_adj_params: GasPriceParams.with(gas_params, state.parent_height, state.mined_child_block_num)}
end
defp adjust_gas_price(
%Core{blocks: blocks, parent_height: parent_height, last_parent_height: last_parent_height} = state
) do
if parent_height <= last_parent_height or
!Enum.find(blocks, to_mined_block_filter(state)) do
state
else
new_gas_price = calculate_gas_price(state)
_ = Logger.debug("using new gas price '#{inspect(new_gas_price)}'")
new_state =
state
|> set_gas_price(new_gas_price)
|> update_last_checked_mined_block_num()
%{new_state | last_parent_height: parent_height}
end
end
# Calculates the gas price basing on simple strategy to raise the gas price by gas_price_raising_factor
# when gap of mined parent blocks is growing and droping the price by gas_price_lowering_factor otherwise
@spec calculate_gas_price(Core.t()) :: pos_integer()
defp calculate_gas_price(%Core{
formed_child_block_num: formed_child_block_num,
mined_child_block_num: mined_child_block_num,
gas_price_to_use: gas_price_to_use,
parent_height: parent_height,
gas_price_adj_params: %GasPriceParams{
gas_price_lowering_factor: gas_price_lowering_factor,
gas_price_raising_factor: gas_price_raising_factor,
eth_gap_without_child_blocks: eth_gap_without_child_blocks,
max_gas_price: max_gas_price,
last_block_mined: {lastchecked_parent_height, lastchecked_mined_block_num}
}
}) do
multiplier =
with true <- blocks_needs_be_mined?(formed_child_block_num, mined_child_block_num),
true <- eth_blocks_gap_filled?(parent_height, lastchecked_parent_height, eth_gap_without_child_blocks),
false <- new_blocks_mined?(mined_child_block_num, lastchecked_mined_block_num) do
gas_price_raising_factor
else
_ -> gas_price_lowering_factor
end
Kernel.min(
max_gas_price,
Kernel.round(multiplier * gas_price_to_use)
)
end
# Updates the state with information about last parent height and mined child block number
@spec update_last_checked_mined_block_num(Core.t()) :: Core.t()
defp update_last_checked_mined_block_num(
%Core{
parent_height: parent_height,
mined_child_block_num: mined_child_block_num,
gas_price_adj_params: %GasPriceParams{
last_block_mined: {_lastechecked_parent_height, lastchecked_mined_block_num}
}
} = state
) do
if lastchecked_mined_block_num < mined_child_block_num do
%Core{
state
| gas_price_adj_params: GasPriceParams.with(state.gas_price_adj_params, parent_height, mined_child_block_num)
}
else
state
end
end
defp blocks_needs_be_mined?(formed_child_block_num, mined_child_block_num) do
formed_child_block_num > mined_child_block_num
end
defp eth_blocks_gap_filled?(parent_height, last_height, eth_gap_without_child_blocks) do
parent_height - last_height >= eth_gap_without_child_blocks
end
defp new_blocks_mined?(mined_child_block_num, last_mined_block_num) do
mined_child_block_num > last_mined_block_num
end
defp set_gas_price(state, price) do
%{state | gas_price_to_use: price}
end
@doc """
Compares the child blocks mined in contract with formed blocks
Picks for submission child blocks that haven't yet been seen mined on Ethereum
"""
@spec get_blocks_to_submit(Core.t()) :: [BlockQueue.encoded_signed_tx()]
def get_blocks_to_submit(%{blocks: blocks, formed_child_block_num: formed} = state) do
_ = Logger.debug("preparing blocks #{inspect(first_to_mined(state))}..#{inspect(formed)} for submission")
blocks
|> Enum.filter(to_mined_block_filter(state))
|> Enum.map(fn {_blknum, block} -> block end)
|> Enum.sort_by(& &1.num)
|> Enum.map(&Map.put(&1, :gas_price, state.gas_price_to_use))
end
@spec first_to_mined(Core.t()) :: pos_integer()
defp first_to_mined(%{mined_child_block_num: mined, child_block_interval: interval}), do: mined + interval
@spec to_mined_block_filter(Core.t()) :: ({pos_integer, BlockSubmission.t()} -> boolean)
defp to_mined_block_filter(%{formed_child_block_num: formed} = state),
do: fn {blknum, _} -> first_to_mined(state) <= blknum and blknum <= formed end
@doc """
Generates an enumberable of block numbers to be starting the BlockQueue with
(inclusive and it takes `finality_threshold` blocks before the youngest mined block)
"""
@spec child_block_nums_to_init_with(non_neg_integer, non_neg_integer, pos_integer, non_neg_integer) :: list
def child_block_nums_to_init_with(mined_num, until_child_block_num, interval, finality_threshold) do
make_range(max(interval, mined_num - finality_threshold * interval), until_child_block_num, interval)
end
@spec should_form_block?(Core.t(), boolean()) :: boolean()
defp should_form_block?(
%Core{
parent_height: parent_height,
last_enqueued_block_at_height: last_enqueued_block_at_height,
minimal_enqueue_block_gap: minimal_enqueue_block_gap,
wait_for_enqueue: wait_for_enqueue
},
is_empty_block
) do
it_is_time = parent_height - last_enqueued_block_at_height > minimal_enqueue_block_gap
should_form_block = it_is_time and !wait_for_enqueue and !is_empty_block
_ =
if !should_form_block do
log_data = %{
parent_height: parent_height,
last_enqueued_block_at_height: last_enqueued_block_at_height,
minimal_enqueue_block_gap: minimal_enqueue_block_gap,
wait_for_enqueue: wait_for_enqueue,
it_is_time: it_is_time,
is_empty_block: is_empty_block
}
Logger.debug("Skipping forming block because: #{inspect(log_data)}")
end
should_form_block
end
defp calc_nonce(height, interval) do
trunc(height / interval)
end
# :lists.seq/3 throws, so wrapper
defp make_range(first, last, _) when first > last, do: []
defp make_range(first, last, step) do
:lists.seq(first, last, step)
end
# When restarting, we don't actually know what was the state of submission process to Ethereum.
# Some blocks might have been submitted and lost/rejected/reorged by Ethereum in the mean time.
# To properly restart the process we get last blocks known to DB and split them into mined
# blocks (might still need tracking!) and blocks not yet submitted.
# NOTE: handles both the case when there aren't any hashes in database and there are
@spec enqueue_existing_blocks(Core.t(), BlockQueue.hash(), [{pos_integer(), BlockQueue.hash()}]) ::
{:ok, Core.t()} | {:error, :contract_ahead_of_db | :mined_blknum_not_found_in_db | :hashes_dont_match}
defp enqueue_existing_blocks(state, @zero_bytes32, [] = _known_hahes) do
# we start a fresh queue from db and fresh contract
{:ok, %{state | formed_child_block_num: 0}}
end
defp enqueue_existing_blocks(_state, _top_mined_hash, [] = _known_hashes) do
# something's wrong - no hashes in db and top_mined hash isn't a zero hash as required
{:error, :contract_ahead_of_db}
end
defp enqueue_existing_blocks(state, top_mined_hash, hashes) do
with :ok <- block_number_and_hash_valid?(top_mined_hash, state.mined_child_block_num, hashes) do
{mined_blocks, fresh_blocks} = split_existing_blocks(state, hashes)
mined_submissions =
for {num, hash} <- mined_blocks do
{num,
%BlockSubmission{
num: num,
hash: hash,
nonce: calc_nonce(num, state.child_block_interval)
}}
end
|> Map.new()
state = %{
state
| formed_child_block_num: state.mined_child_block_num,
blocks: mined_submissions
}
_ = Logger.info("Loaded with #{inspect(mined_blocks)} mined and #{inspect(fresh_blocks)} enqueued")
{:ok, Enum.reduce(fresh_blocks, state, fn hash, acc -> enqueue_block(acc, hash, state.parent_height) end)}
end
end
# splits into ones that are before top_mined_hash and those after
# mined are zipped with their numbers to submit
defp split_existing_blocks(%__MODULE__{mined_child_block_num: blknum}, blknums_and_hashes) do
{mined, fresh} =
Enum.find_index(blknums_and_hashes, &(elem(&1, 0) == blknum))
|> case do
nil -> {[], blknums_and_hashes}
index -> Enum.split(blknums_and_hashes, index + 1)
end
fresh_hashes = Enum.map(fresh, &elem(&1, 1))
{mined, fresh_hashes}
end
defp block_number_and_hash_valid?(@zero_bytes32, 0, _) do
:ok
end
defp block_number_and_hash_valid?(expected_hash, blknum, blknums_and_hashes) do
validate_block_hash(
expected_hash,
Enum.find(blknums_and_hashes, fn {num, _hash} -> blknum == num end)
)
end
defp validate_block_hash(expected, {_blknum, blkhash}) when expected == blkhash, do: :ok
defp validate_block_hash(_, nil), do: {:error, :mined_blknum_not_found_in_db}
defp validate_block_hash(_, _), do: {:error, :hashes_dont_match}
@spec process_submit_result(BlockSubmission.t(), submit_result_t(), BlockSubmission.plasma_block_num()) ::
:ok | {:error, atom}
def process_submit_result(submission, submit_result, newest_mined_blknum) do
case submit_result do
{:ok, txhash} ->
_ = Logger.info("Submitted #{inspect(submission)} at: #{inspect(txhash)}")
:ok
{:error, %{"code" => -32_000, "message" => "known transaction" <> _}} ->
_ = Logger.debug("Submission #{inspect(submission)} is known transaction - ignored")
:ok
{:error, %{"code" => -32_000, "message" => "replacement transaction underpriced"}} ->
_ = Logger.debug("Submission #{inspect(submission)} is known, but with higher price - ignored")
:ok
{:error, %{"code" => -32_000, "message" => "authentication needed: password or unlock"}} ->
_ = Logger.error("It seems that authority account is locked. Check README.md")
{:error, :account_locked}
{:error, %{"code" => -32_000, "message" => "nonce too low"}} ->
process_nonce_too_low(submission, newest_mined_blknum)
end
end
defp process_nonce_too_low(%BlockSubmission{num: blknum} = submission, newest_mined_blknum) do
if blknum <= newest_mined_blknum do
# apparently the `nonce too low` error is related to the submission having been mined while it was prepared
:ok
else
_ = Logger.error("Submission #{inspect(submission)} unexpectedly failed with nonce too low")
{:error, :nonce_too_low}
end
end
end
|
apps/omg_api/lib/block_queue/core.ex
| 0.77928
| 0.42471
|
core.ex
|
starcoder
|
defmodule Jason.Formatter do
@moduledoc ~S"""
Pretty-printing and minimizing functions for JSON-encoded data.
Input is required to be in an 8-bit-wide encoding such as UTF-8 or Latin-1
in `t:iodata/0` format. Input must ve valid JSON, invalid JSON may produce
unexpected results or errors.
"""
@type opts :: [
{:indent, iodata}
| {:line_separator, iodata}
| {:record_separator, iodata}
| {:after_colon, iodata}
]
import Record
defrecordp :opts, [:indent, :line, :record, :colon]
@doc ~S"""
Pretty-prints JSON-encoded `input`.
`input` may contain multiple JSON objects or arrays, optionally separated
by whitespace (e.g., one object per line). Objects in output will be
separated by newlines. No trailing newline is emitted.
## Options
* `:indent` - used for nested objects and arrays (default: two spaces - `" "`);
* `:line_separator` - used in nested objects (default: `"\n"`);
* `:record_separator` - separates root-level objects and arrays
(default is the value for `:line_separator` option);
* `:after_colon` - printed after a colon inside objects (default: one space - `" "`).
## Examples
iex> Jason.Formatter.pretty_print(~s|{"a":{"b": [1, 2]}}|)
~s|{
"a": {
"b": [
1,
2
]
}
}|
"""
@spec pretty_print(iodata, opts) :: binary
def pretty_print(input, opts \\ []) do
input
|> pretty_print_to_iodata(opts)
|> IO.iodata_to_binary()
end
@doc ~S"""
Pretty-prints JSON-encoded `input` and returns iodata.
This function should be preferred to `pretty_print/2`, if the pretty-printed
JSON will be handed over to one of the IO functions or sent
over the socket. The Erlang runtime is able to leverage vectorised
writes and avoid allocating a continuous buffer for the whole
resulting string, lowering memory use and increasing performance.
"""
@spec pretty_print_to_iodata(iodata, opts) :: iodata
def pretty_print_to_iodata(input, opts \\ []) do
opts = parse_opts(opts, " ", "\n", nil, " ")
depth = :first
empty = false
{output, _state} = pp_iodata(input, [], depth, empty, opts)
output
end
@doc ~S"""
Minimizes JSON-encoded `input`.
`input` may contain multiple JSON objects or arrays, optionally
separated by whitespace (e.g., one object per line). Minimized
output will contain one object per line. No trailing newline is emitted.
## Options
* `:record_separator` - controls the string used as newline (default: `"\n"`).
## Examples
iex> Jason.Formatter.minimize(~s|{ "a" : "b" , "c": \n\n 2}|)
~s|{"a":"b","c":2}|
"""
@spec minimize(iodata, opts) :: binary
def minimize(input, opts \\ []) do
input
|> minimize_to_iodata(opts)
|> IO.iodata_to_binary()
end
@doc ~S"""
Minimizes JSON-encoded `input` and returns iodata.
This function should be preferred to `minimize/2`, if the minimized
JSON will be handed over to one of the IO functions or sent
over the socket. The Erlang runtime is able to leverage vectorised
writes and avoid allocating a continuous buffer for the whole
resulting string, lowering memory use and increasing performance.
"""
@spec minimize_to_iodata(iodata, opts) :: iodata
def minimize_to_iodata(input, opts) do
record = Keyword.get(opts, :record_separator, "\n")
opts = opts(indent: "", line: "", record: record, colon: "")
depth = :first
empty = false
{output, _state} = pp_iodata(input, [], depth, empty, opts)
output
end
defp parse_opts([{option, value} | opts], indent, line, record, colon) do
value = IO.iodata_to_binary(value)
case option do
:indent -> parse_opts(opts, value, line, record, colon)
:record_separator -> parse_opts(opts, indent, line, value, colon)
:after_colon -> parse_opts(opts, indent, line, record, value)
:line_separator -> parse_opts(opts, indent, value, record || value, colon)
end
end
defp parse_opts([], indent, line, record, colon) do
opts(indent: indent, line: line, record: record || line, colon: colon)
end
for depth <- 1..16 do
defp tab(" ", unquote(depth)), do: unquote(String.duplicate(" ", depth))
end
defp tab("", _), do: ""
defp tab(indent, depth), do: List.duplicate(indent, depth)
defp pp_iodata(<<>>, output_acc, depth, empty, opts) do
{output_acc, &pp_iodata(&1, &2, depth, empty, opts)}
end
defp pp_iodata(<<byte, rest::binary>>, output_acc, depth, empty, opts) do
pp_byte(byte, rest, output_acc, depth, empty, opts)
end
defp pp_iodata([], output_acc, depth, empty, opts) do
{output_acc, &pp_iodata(&1, &2, depth, empty, opts)}
end
defp pp_iodata([byte | rest], output_acc, depth, empty, opts) when is_integer(byte) do
pp_byte(byte, rest, output_acc, depth, empty, opts)
end
defp pp_iodata([head | tail], output_acc, depth, empty, opts) do
{output_acc, cont} = pp_iodata(head, output_acc, depth, empty, opts)
cont.(tail, output_acc)
end
defp pp_byte(byte, rest, output, depth, empty, opts) when byte in ' \n\r\t' do
pp_iodata(rest, output, depth, empty, opts)
end
defp pp_byte(byte, rest, output, depth, empty, opts) when byte in '{[' do
{out, depth} =
cond do
depth == :first -> {byte, 1}
depth == 0 -> {[opts(opts, :record), byte], 1}
empty -> {[opts(opts, :line), tab(opts(opts, :indent), depth), byte], depth + 1}
true -> {byte, depth + 1}
end
empty = true
pp_iodata(rest, [output, out], depth, empty, opts)
end
defp pp_byte(byte, rest, output, depth, true = _empty, opts) when byte in '}]' do
empty = false
depth = depth - 1
pp_iodata(rest, [output, byte], depth, empty, opts)
end
defp pp_byte(byte, rest, output, depth, false = empty, opts) when byte in '}]' do
depth = depth - 1
out = [opts(opts, :line), tab(opts(opts, :indent), depth), byte]
pp_iodata(rest, [output, out], depth, empty, opts)
end
defp pp_byte(byte, rest, output, depth, _empty, opts) when byte in ',' do
empty = false
out = [byte, opts(opts, :line), tab(opts(opts, :indent), depth)]
pp_iodata(rest, [output, out], depth, empty, opts)
end
defp pp_byte(byte, rest, output, depth, empty, opts) when byte in ':' do
out = [byte, opts(opts, :colon)]
pp_iodata(rest, [output, out], depth, empty, opts)
end
defp pp_byte(byte, rest, output, depth, empty, opts) do
out = if empty, do: [opts(opts, :line), tab(opts(opts, :indent), depth), byte], else: byte
empty = false
if byte == ?" do
pp_string(rest, [output, out], _in_bs = false, &pp_iodata(&1, &2, depth, empty, opts))
else
pp_iodata(rest, [output, out], depth, empty, opts)
end
end
defp pp_string(<<>>, output_acc, in_bs, cont) do
{output_acc, &pp_string(&1, &2, in_bs, cont)}
end
defp pp_string(binary, output_acc, true = _in_bs, cont) when is_binary(binary) do
<<byte, rest::binary>> = binary
pp_string(rest, [output_acc, byte], false, cont)
end
defp pp_string(binary, output_acc, false = _in_bs, cont) when is_binary(binary) do
case :binary.match(binary, ["\"", "\\"]) do
:nomatch ->
{[output_acc | binary], &pp_string(&1, &2, false, cont)}
{pos, 1} ->
{head, tail} = :erlang.split_binary(binary, pos + 1)
case :binary.at(binary, pos) do
?\\ -> pp_string(tail, [output_acc | head], true, cont)
?" -> cont.(tail, [output_acc | head])
end
end
end
defp pp_string([], output_acc, in_bs, cont) do
{output_acc, &pp_string(&1, &2, in_bs, cont)}
end
defp pp_string([byte | rest], output_acc, in_bs, cont) when is_integer(byte) do
cond do
in_bs -> pp_string(rest, [output_acc, byte], false, cont)
byte == ?" -> cont.(rest, [output_acc, byte])
true -> pp_string(rest, [output_acc, byte], byte == ?\\, cont)
end
end
defp pp_string([head | tail], output_acc, in_bs, cont) do
{output_acc, cont} = pp_string(head, output_acc, in_bs, cont)
cont.(tail, output_acc)
end
end
|
lib/formatter.ex
| 0.899796
| 0.740597
|
formatter.ex
|
starcoder
|
defmodule Plaid.Processor do
@moduledoc """
[Plaid Processor API](https://plaid.com/docs/api/processors) calls and schema.
"""
alias Plaid.Castable
defmodule CreateTokenResponse do
@moduledoc """
[Plaid API /processor/token/create response schema.](https://plaid.com/docs/api/processors/#processortokencreate)
"""
@behaviour Castable
@type t :: %__MODULE__{
processor_token: String.t(),
request_id: String.t()
}
defstruct [:processor_token, :request_id]
@impl true
def cast(generic_map) do
%__MODULE__{
processor_token: generic_map["processor_token"],
request_id: generic_map["request_id"]
}
end
end
@doc """
Creates a processor token from an access_token.
Does a `POST /processor/token/create` call which generates
any non-stripe processor token for a given account ID.
Params:
* `access_token` - access_token to create a processor token for.
* `account_id` - ID of the account to create a processor token for.
* `processor` - name of the processor to create a token for.
## Examples
Processor.create_token("access-prod-123xxx", "<PASSWORD>", "<PASSWORD>", client_id: "123", secret: "abc")
{:ok, %Processor.CreateTokenResponse{}}
"""
@spec create_token(String.t(), String.t(), String.t(), Plaid.config()) ::
{:ok, CreateTokenResponse.t()} | {:error, Plaid.Error.t()}
def create_token(access_token, account_id, processor, config) do
Plaid.Client.call(
"/processor/token/create",
%{access_token: access_token, account_id: account_id, processor: processor},
CreateTokenResponse,
config
)
end
defmodule CreateStripeBankAccountTokenResponse do
@moduledoc """
[Plaid API /processor/stripe/bank_account_token/create response schema.](https://plaid.com/docs/api/processors/#processorstripebank_account_tokencreate)
"""
@behaviour Castable
@type t :: %__MODULE__{
stripe_bank_account_token: String.t(),
request_id: String.t()
}
defstruct [:stripe_bank_account_token, :request_id]
@impl true
def cast(generic_map) do
%__MODULE__{
stripe_bank_account_token: generic_map["stripe_bank_account_token"],
request_id: generic_map["request_id"]
}
end
end
@doc """
Creates a stripe bank account token from an access_token.
Does a POST `/processor/stripe/bank_account_token/create` call which
generates a stripe bank account token for a given account ID.
Params:
* `access_token` - access_token to create a processor token for.
* `account_id` - ID of the account to create a processor token for.
## Examples
Processor.create_stripe_bank_account_token("<KEY>", "<PASSWORD>", client_id: "123", secret: "abc")
{:ok, %Processor.CreateStripeBankAccountTokenResponse{}}
"""
@spec create_stripe_bank_account_token(String.t(), String.t(), Plaid.config()) ::
{:ok, CreateStripeBankAccountTokenResponse.t()} | {:error, Plaid.Error.t()}
def create_stripe_bank_account_token(access_token, account_id, config) do
Plaid.Client.call(
"/processor/stripe/bank_account_token/create",
%{access_token: access_token, account_id: account_id},
CreateStripeBankAccountTokenResponse,
config
)
end
defmodule GetAuthResponse do
@moduledoc """
[Plaid API /processor/auth/get response schema.](https://plaid.com/docs/api/processors/#processorauthget)
"""
@behaviour Castable
alias Plaid.Account
alias Plaid.Processor.Numbers
@type t :: %__MODULE__{
account: Account.t(),
numbers: Numbers.t(),
request_id: String.t()
}
defstruct [:account, :numbers, :request_id]
@impl true
def cast(generic_map) do
%__MODULE__{
account: Castable.cast(Account, generic_map["account"]),
numbers: Castable.cast(Numbers, generic_map["numbers"]),
request_id: generic_map["request_id"]
}
end
end
@doc """
Get the bank account info given a processor_token.
Does a POST `/processor/auth/get` call which returns the bank account and bank
identification number (such as the routing number, for US accounts), for a checking or
savings account that's associated with a given processor_token.
Params:
* `processor_token` - The processor token obtained from the Plaid integration partner.
## Examples
Processor.get_auth("processor-prod-123xxx", client_id: "123", secret: "abc")
{:ok, %Processor.GetAuthResponse{}}
"""
@spec get_auth(String.t(), Plaid.config()) ::
{:ok, GetAuthResponse.t()} | {:error, Plaid.Error.t()}
def get_auth(processor_token, config) do
Plaid.Client.call(
"/processor/auth/get",
%{processor_token: processor_token},
GetAuthResponse,
config
)
end
defmodule GetBalanceResponse do
@moduledoc """
[Plaid API /processor/balance/get response schema.](https://plaid.com/docs/api/processors/#processorbalanceget)
"""
@behaviour Castable
alias Plaid.Account
@type t :: %__MODULE__{
account: Account.t(),
request_id: String.t()
}
defstruct [:account, :request_id]
@impl true
def cast(generic_map) do
%__MODULE__{
account: Castable.cast(Account, generic_map["account"]),
request_id: generic_map["request_id"]
}
end
end
@doc """
Get real-time balance for each of an Item's accounts.
Does a POST `/processor/balance/get` call which returns the balance for each of a Item's
accounts.
While other endpoints may return a balance object, only /processor/balance/get
forces the available and current balance fields to be refreshed rather than cached.
Params:
* `processor_token` - The processor token obtained from the Plaid integration partner.
## Examples
Processor.get_balance("processor-prod-123xxx", client_id: "123", secret: "abc")
{:ok, %Processor.GetBalanceResponse{}}
"""
@spec get_balance(String.t(), Plaid.config()) ::
{:ok, GetBalanceResponse.t()} | {:error, Plaid.Error.t()}
def get_balance(processor_token, config) do
Plaid.Client.call(
"/processor/balance/get",
%{processor_token: processor_token},
GetBalanceResponse,
config
)
end
defmodule GetIdentityResponse do
@moduledoc """
[Plaid API /processor/identity/get response schema.](https://plaid.com/docs/api/processors/#processoridentityget)
"""
@behaviour Castable
alias Plaid.Account
@type t :: %__MODULE__{
account: Account.t(),
request_id: String.t()
}
defstruct [:account, :request_id]
@impl true
def cast(generic_map) do
%__MODULE__{
account: Castable.cast(Account, generic_map["account"]),
request_id: generic_map["request_id"]
}
end
end
@doc """
Get account holder information on file with the financial institution.
Does a POST `/processor/identity/get` call which allows you to retrieve various
account holder information on file with the financial institution,
including names, emails, phone numbers, and addresses.
Params:
* `processor_token` - The processor token obtained from the Plaid integration partner.
## Examples
Processor.get_identity("processor-prod-123xxx", client_id: "123", secret: "abc")
{:ok, %Processor.GetIdentityResponse{}}
"""
@spec get_identity(String.t(), Plaid.config()) ::
{:ok, GetIdentityResponse.t()} | {:error, Plaid.Error.t()}
def get_identity(processor_token, config) do
Plaid.Client.call(
"/processor/identity/get",
%{processor_token: processor_token},
GetIdentityResponse,
config
)
end
end
|
lib/plaid/processor.ex
| 0.919163
| 0.402451
|
processor.ex
|
starcoder
|
defmodule Mockery.Macro do
@moduledoc """
Alternative macro-based way to prepare module for mocking/asserting.
"""
@doc """
Function used to prepare module for mocking/asserting.
For Mix.env other than :test it returns the first argument unchanged.
If Mix.env equal :test it creates a proxy to the original module.
When Mix is missing it assumes that env is :prod.
## Examples
#### Prepare for mocking
defmodule Foo do
import Mockery.Macro
def foo do
mockable(Bar).bar()
end
end
#### Prepare for mocking with global mock
# test/support/global_mocks/bar.ex
defmodule BarGlobalMock do
def bar, do: :mocked
end
# lib/foo.ex
defmodule Foo do
import Mockery.Macro
def foo do
mockable(Bar, by: BarGlobalMock).bar()
end
end
## Mockery.of/2 comparison
* It's based on macro and process dictionary instead of on tuple calls. (Tuple calls
are disabled by default in OTP21+ and require additional compile flag to be reenabled)
* It doesn't support passing module names as a string as it don't create unwanted compile-time
dependencies between modules
## Potential issues
Output of `mockable/2` macro should not be bind to variable or module attribute.
If it happens, you'll see a compilation warning at best, and in the worst case Mockery won't
work correctly.
"""
@spec mockable(
mod :: module,
opts :: [by: module]
) :: module
defmacro mockable(mod, opts \\ []) do
case opts[:env] || mix_env() do
:test ->
quote do
mocked_calls = Process.get(Mockery.MockableModule, [])
Process.put(Mockery.MockableModule, [{unquote(mod), unquote(opts[:by])} | mocked_calls])
Mockery.Proxy.MacroProxy
end
_ ->
mod
end
end
@compile {:inline, mix_env: 0}
defp mix_env do
if function_exported?(Mix, :env, 0), do: Mix.env(), else: :prod
end
end
|
lib/mockery/macro.ex
| 0.827932
| 0.510863
|
macro.ex
|
starcoder
|
defmodule Meeseeks.Document do
@moduledoc """
A `Meeseeks.Document` represents a flattened, queryable view of an HTML
document in which:
- The nodes (element, comment, or text) have been provided an id
- Parent-child relationships have been made explicit
## Examples
The actual contents of a document become quickly unwieldy in iex, so
the inspect value of a document is always `#Meeseeks.Document<{...}>`
regardless of the content. The example below ignores this fact for
educational purposes.
```elixir
tuple_tree = {"html", [],
[{"head", [], []},
{"body", [],
[{"h1", [{"id", "greeting"}], ["Hello, World!"]},
{"div", [], [
{"p", [], ["1"]},
{"p", [], ["2"]},
{"p", [], ["3"]}]}]}]}
document = Meeseeks.parse(tuple_tree, :tuple_tree)
#=> %Meeseeks.Document{
# id_counter: 12,
# roots: [1],
# nodes: %{
# 1 => %Meeseeks.Document.Element{attributes: [], children: [3, 2],
# id: 1, namespace: nil, parent: nil, tag: "html"},
# 2 => %Meeseeks.Document.Element{attributes: [], children: [], id: 2,
# namespace: nil, parent: 1, tag: "head"},
# 3 => %Meeseeks.Document.Element{attributes: [], children: [6, 4], id: 3,
# namespace: nil, parent: 1, tag: "body"},
# 4 => %Meeseeks.Document.Element{attributes: [{"id", "greeting"}],
# children: [5], id: 4, namespace: nil, parent: 3, tag: "h1"},
# 5 => %Meeseeks.Document.Text{content: "Hello, World!", id: 5, parent: 4},
# 6 => %Meeseeks.Document.Element{attributes: [], children: [7, 9, 11],
# id: 6, namespace: nil, parent: 3, tag: "div"},
# 7 => %Meeseeks.Document.Element{attributes: [], children: [8], id: 7,
# namespace: nil, parent: 6, tag: "p"},
# 8 => %Meeseeks.Document.Text{content: "1", id: 8, parent: 7},
# 9 => %Meeseeks.Document.Element{attributes: [], children: [10], id: 9,
# namespace: nil, parent: 6, tag: "p"},
# 10 => %Meeseeks.Document.Text{content: "2", id: 10, parent: 9},
# 11 => %Meeseeks.Document.Element{attributes: [], children: [12], id: 11,
# namespace: nil, parent: 6, tag: "p"},
# 12 => %Meeseeks.Document.Text{content: "3", id: 12, parent: 11}}}
Meeseeks.Document.children(document, 6)
#=> [7, 9, 11]
Meeseeks.Document.descendants(document, 6)
#=> [7, 8, 9, 10, 11, 12]
```
"""
alias Meeseeks.{Document, Error, Extractor}
alias Meeseeks.Document.{Element, Node}
defstruct id_counter: nil, roots: [], nodes: %{}
@type node_id :: integer
@type node_t :: Node.t()
@type t :: %Document{
id_counter: node_id | nil,
roots: [node_id],
nodes: %{optional(node_id) => node_t}
}
@doc """
Returns the HTML of the document.
"""
def html(%Document{} = document) do
document
|> get_root_nodes()
|> Enum.map(&Extractor.Html.from_node(&1, document))
|> IO.iodata_to_binary()
|> String.trim()
end
@doc """
Returns the `Meeseeks.TupleTree` of the document.
"""
def tree(%Document{} = document) do
document
|> get_root_nodes()
|> Enum.map(&Extractor.Tree.from_node(&1, document))
end
# Query
@doc """
Checks if a node_id refers to a `Meeseeks.Document.Element` in the context
of the document.
Raises if node_id does not exist in the document.
"""
@spec element?(Document.t(), node_id) :: boolean | no_return
def element?(%Document{} = document, node_id) do
case fetch_node(document, node_id) do
{:ok, %Element{}} -> true
{:ok, _} -> false
{:error, %Error{} = error} -> raise error
end
end
@doc """
Returns the node id of node_id's parent in the context of the document, or
nil if node_id does not have a parent.
Raises if node_id does not exist in the document.
"""
@spec parent(Document.t(), node_id) :: node_id | nil | no_return
def parent(%Document{} = document, node_id) do
case fetch_node(document, node_id) do
{:ok, %{parent: nil}} -> nil
{:ok, %{parent: parent}} -> parent
{:error, %Error{} = error} -> raise error
end
end
@doc """
Returns the node ids of node_id's ancestors in the context of the document.
Returns the ancestors in reverse order: `[parent, grandparent, ...]`
Raises if node_id does not exist in the document.
"""
@spec ancestors(Document.t(), node_id) :: [node_id] | no_return
def ancestors(%Document{} = document, node_id) do
case parent(document, node_id) do
nil -> []
parent_id -> [parent_id | ancestors(document, parent_id)]
end
end
@doc """
Returns the node ids of node_id's children in the context of the document.
Returns *all* children, not just those that are `Meeseeks.Document.Element`s.
Returns children in depth-first order.
Raises if node_id does not exist in the document.
"""
@spec children(Document.t(), node_id) :: [node_id] | no_return
def children(%Document{} = document, node_id) do
case fetch_node(document, node_id) do
{:ok, %Document.Element{children: children}} -> children
{:ok, _} -> []
{:error, %Error{} = error} -> raise error
end
end
@doc """
Returns the node ids of node_id's descendants in the context of the document.
Returns *all* descendants, not just those that are `Meeseeks.Document.Element`s.
Returns descendants in depth-first order.
Raises if node_id does not exist in the document.
"""
@spec descendants(Document.t(), node_id) :: [node_id] | no_return
def descendants(%Document{} = document, node_id) do
case fetch_node(document, node_id) do
{:ok, %Element{children: children}} ->
Enum.flat_map(children, &[&1 | descendants(document, &1)])
{:ok, _} ->
[]
{:error, %Error{} = error} ->
raise error
end
end
@doc """
Returns the node ids of node_id's siblings in the context of the document.
Returns *all* siblings, **including node_id itself**, and not just those
that are `Meeseeks.Document.Element`s.
Returns siblings in depth-first order.
Raises if node_id does not exist in the document.
"""
@spec siblings(Document.t(), node_id) :: [node_id] | no_return
def siblings(%Document{} = document, node_id) do
with {:ok, node} <- fetch_node(document, node_id) do
case node.parent do
nil -> get_root_ids(document)
parent -> children(document, parent)
end
else
{:error, %Error{} = error} -> raise error
end
end
@doc """
Returns the node ids of the siblings that come before node_id in the
context of the document.
Returns *all* of these siblings, not just those that are `Meeseeks.Document.Element`s.
Returns siblings in depth-first order.
Raises if node_id does not exist in the document.
"""
@spec previous_siblings(Document.t(), node_id) :: [node_id] | no_return
def previous_siblings(%Document{} = document, node_id) do
document
|> siblings(node_id)
|> Enum.take_while(fn id -> id != node_id end)
end
@doc """
Returns the node ids of the siblings that come after node_id in the context
of the document.
Returns *all* of these siblings, not just those that are
`Meeseeks.Document.Element`s.
Returns siblings in depth-first order.
Raises if node_id does not exist in the document.
"""
@spec next_siblings(Document.t(), node_id) :: [node_id] | no_return
def next_siblings(%Document{} = document, node_id) do
document
|> siblings(node_id)
|> Enum.drop_while(fn id -> id != node_id end)
|> Enum.drop(1)
end
@doc """
Returns all of the document's root ids.
Returns root ids in depth-first order.
"""
@spec get_root_ids(Document.t()) :: [node_id]
def get_root_ids(%Document{roots: roots}) do
Enum.sort(roots)
end
@doc """
Returns all of the document's root nodes.
Returns nodes in depth-first order.
"""
@spec get_root_nodes(Document.t()) :: [node_t]
def get_root_nodes(%Document{} = document) do
root_ids = get_root_ids(document)
get_nodes(document, root_ids)
end
@doc """
Returns all of the document's node ids.
Returns node ids in depth-first order.
"""
@spec get_node_ids(Document.t()) :: [node_id]
def get_node_ids(%Document{nodes: nodes}) do
nodes
|> Map.keys()
|> Enum.sort()
end
@doc """
Returns all of the document's nodes.
Returns nodes in depth-first order.
"""
@spec get_nodes(Document.t()) :: [node_t] | no_return
def get_nodes(document) do
node_ids = get_node_ids(document)
get_nodes(document, node_ids)
end
@doc """
Returns a list of nodes referred to by node_ids in the context of the document.
Returns nodes in the same order as node_ids.
Raises if any id in node_ids does not exist in the document.
"""
@spec get_nodes(Document.t(), [node_id]) :: [node_t] | no_return
def get_nodes(document, node_ids) do
Enum.map(node_ids, fn node_id ->
case fetch_node(document, node_id) do
{:ok, node} -> node
{:error, error} -> raise error
end
end)
end
@doc """
Returns a tuple of {:ok, node}, where node is the node referred to by node_id in the context of the document, or :error.
"""
@spec fetch_node(Document.t(), node_id) :: {:ok, node_t} | {:error, Error.t()}
def fetch_node(%Document{nodes: nodes} = document, node_id) do
case Map.fetch(nodes, node_id) do
{:ok, _} = ok ->
ok
:error ->
{:error,
Error.new(:document, :unknown_node, %{
description: "No node with the provided id exists in the document",
document: document,
node_id: node_id
})}
end
end
@doc """
Returns the node referred to by node_id in the context of the document, or nil.
"""
@spec get_node(Document.t(), node_id) :: node_t | nil
def get_node(%Document{nodes: nodes}, node_id) do
Map.get(nodes, node_id, nil)
end
@doc """
Deletes the node referenced by node_id and all its descendants from the document.
Raises if node_id does not exist in the document.
"""
@spec delete_node(Document.t(), node_id) :: Document.t() | no_return
def delete_node(%Document{nodes: nodes, roots: roots} = document, node_id) do
deleted = [node_id | descendants(document, node_id)]
roots =
roots
|> Enum.reject(&(&1 in deleted))
nodes =
nodes
|> Enum.reduce(nodes, fn {id, node}, nodes ->
cond do
id in deleted ->
Map.delete(nodes, id)
Map.has_key?(node, :children) ->
Map.put(nodes, id, %{
node
| children: Enum.reject(node.children, &(&1 in deleted))
})
true ->
nodes
end
end)
%{document | roots: roots, nodes: nodes}
end
# Inspect
defimpl Inspect do
def inspect(_document, _opts) do
"#Meeseeks.Document<{...}>"
end
end
end
|
lib/meeseeks/document.ex
| 0.86148
| 0.745306
|
document.ex
|
starcoder
|
defmodule Blockchain.Block do
@moduledoc """
Implements a blockchain block, which is a building block of the blockchain.
Blocks are limited to containing transactions.
"""
alias Blockchain.Hash
alias Blockchain.Transaction
@enforce_keys [:current_hash, :previous_hash, :data, :timestamp, :nonce]
defstruct @enforce_keys
@typedoc """
Represents a block
"""
@type t :: %__MODULE__{
current_hash: Hash.t(),
previous_hash: Hash.t(),
data: Transaction.t(),
timestamp: DateTime.t(),
nonce: non_neg_integer()
}
@doc """
Calculates a block's hash using the SHA hashing algorithm
"""
@spec calculate_hash(Hash.t(), DateTime.t(), Transaction.t(), non_neg_integer()) :: Hash.t()
def calculate_hash(
previous_hash,
%DateTime{} = timestamp,
%Transaction{} = transaction,
nonce
) do
# Append all data as a list of binaries or strings and then hash the list
ExCrypto.Hash.sha256!([
Hash.to_binary(previous_hash),
DateTime.to_string(timestamp),
:erlang.term_to_binary(transaction),
Integer.to_string(nonce)
])
|> Hash.new()
end
@doc """
Calculates a block's hash using the SHA hashing algorithm
"""
@spec calculate_hash(__MODULE__.t()) :: Hash.t()
def calculate_hash(%__MODULE__{} = block) do
calculate_hash(block.previous_hash, block.timestamp, block.data, block.nonce)
end
@doc """
Determines if a block is valid or not by re-calculating the block's hash and comparing it
to the block's current hash
"""
@spec valid?(__MODULE__.t()) :: boolean()
def valid?(%__MODULE__{} = block) do
block.current_hash ==
calculate_hash(block.previous_hash, block.timestamp, block.data, block.nonce)
end
@doc """
Determines if a block has been mined according to if the block's current
hash matches the target
"""
@spec mined?(Hash.t()) :: boolean()
def mined?(block_hash) do
Hash.part(block_hash, 0, difficulty()) == Hash.part(target(), 0, difficulty())
end
@doc """
Implements the Hashcash procedure
"""
@spec make_and_mine_block(Hash.t(), DateTime.t(), Transaction.t(), non_neg_integer()) ::
__MODULE__.t()
def make_and_mine_block(
previous_hash,
%DateTime{} = timestamp,
%Transaction{} = transaction,
nonce
)
when nonce >= 0 and is_integer(nonce) do
current_hash =
calculate_hash(
previous_hash,
timestamp,
%Transaction{} = transaction,
nonce
)
if mined?(current_hash) do
%__MODULE__{
current_hash: current_hash,
previous_hash: previous_hash,
data: transaction,
timestamp: timestamp,
nonce: nonce
}
else
make_and_mine_block(previous_hash, timestamp, transaction, nonce + 1)
end
end
@doc """
Implements the Hashcash procedure
"""
@spec make_and_mine_block(__MODULE__.t()) :: __MODULE__.t()
def make_and_mine_block(%__MODULE__{} = block) do
make_and_mine_block(block.previous_hash, block.timestamp, block.data, block.nonce)
end
@doc """
Mines a block at the current time
"""
@spec mine_block(Transaction.t(), Hash.t()) :: __MODULE__.t()
def mine_block(%Transaction{} = transaction, previous_hash) do
make_and_mine_block(previous_hash, DateTime.utc_now(), transaction, 1)
end
@doc """
Formats a block as a string suitable for printing
"""
@spec format(__MODULE__.t()) :: String.t()
def format(
%__MODULE__{
current_hash: current_hash,
previous_hash: previous_hash,
data: transaction,
timestamp: timestamp,
nonce: nonce
} = _block
) do
"""
Block information
=================
Hash: #{Hash.to_encoded_string(current_hash)}
Previous Hash: #{Hash.to_encoded_string(previous_hash)}
Timestamp: #{DateTime.to_string(timestamp)}
Nonce: #{to_string(nonce)}
Data: #{Transaction.format(transaction)}
"""
end
# Helper function to set the number of bytes a target will have
@spec difficulty :: non_neg_integer()
defp difficulty, do: 2
# A target for comparing hashes of blocks
@spec target :: Hash.t()
defp target do
<<32>>
|> String.duplicate(difficulty())
|> Hash.new()
end
end
|
lib/blockchain/block.ex
| 0.833257
| 0.641317
|
block.ex
|
starcoder
|
defmodule Frettchen.Trace do
@moduledoc """
A Trace is a process that collects spans. When a span
is created it registers with the process and when it is closed
it removes itself from the process and gets sent
to the reporter that is has been configured for. A Trace can be configured
to act differently based on need. This allows you to create some traces
that get sent to Jaeger, and other that get logged or sent to null.
You can even configure the port numbers for your collectors so Traces
can go to different collectors.
"""
use GenServer
alias Frettchen.Trace
alias Jaeger.Thrift.Span
defstruct configuration: nil, id: nil, service_name: nil, spans: %{}, timeout: nil
# Public API
@doc """
Starts a new Trace process with a passed service name and options for a
custom id, configuration, and timeout in millieseconds. The current custom timeout
is 2 minutes (2 * 60 * 1000)
"""
def start(service_name, options \\ []) do
configuration = Keyword.get(options, :configuration, %Frettchen.Configuration{})
id = Keyword.get(options, :id, Frettchen.Helpers.random_id())
timeout = Keyword.get(options, :timeout, 120_000)
trace = %{%Trace{} | configuration: configuration, id: id, service_name: service_name, timeout: timeout}
{:ok, _} = GenServer.start_link(__MODULE__, trace, name: {:global, {:frettchen, trace.id}})
trace
end
@doc """
Adds a span to a trace based on the trace_id_low inside the
span. This is largely a convenience function for allowing
spans to be processed inside a pipe.
"""
def add_span(%Span{} = span) do
GenServer.cast({:global, {:frettchen, span.trace_id_low}}, {:add_span, span})
span
end
@doc """
Returns a trace processs based on the trace_low_id inside a span. Usefull
for getting a trace when a span is passed between functions.
"""
def get(%Span{} = span) do
get(span.trace_id_low)
end
@doc """
Returns a trace process based on a passed ID. Usefull for getting
a trace when just an ID reference is passed between process or
microservices.
"""
def get(id) do
case :global.whereis_name({:frettchen, id}) do
:undefined -> :undefined
pid -> :sys.get_state(pid)
end
end
@doc """
Triggers the resolution of a span. A span is sent to the collector
for distribution and then removed from the spans map inside the trace.
"""
def resolve_span(%Span{} = span) do
GenServer.call({:global, {:frettchen, span.trace_id_low}}, {:resolve_span, span})
span
end
@doc """
Returns a map of all the spans inside a trace.
"""
def spans(%Trace{} = trace) do
GenServer.call({:global, {:frettchen, trace.id}}, :spans)
end
@doc """
Terminates the trace process
"""
def terminate(%Trace{} = trace) do
GenServer.call({:global, {:frettchen, trace.id}}, :terminate)
end
# Private API
def init(state) do
Process.send_after(self(), :shutdown, state.timeout)
{:ok, state}
end
def handle_call(:spans, _from, trace) do
{:reply, trace.spans, trace}
end
def handle_call({:resolve_span, span}, _from, trace) do
trace = %{trace | spans: Map.merge(trace.spans, Map.put(%{}, span.span_id, span))}
Frettchen.Collector.add({span.span_id, trace})
{:reply, trace, %{trace | spans: Map.delete(trace.spans, span.span_id)}}
end
def handle_call(:terminate, _from, state) do
{:stop, :normal, :ok, state}
end
def handle_cast({:add_span, span}, trace) do
{:noreply, %{trace | spans: Map.merge(trace.spans, Map.put(%{}, span.span_id, span))}}
end
@doc """
Starts the shutdown sequence after a timeout by spawning a reaper process
that will close all the remaining spans and the send an exit signal to the
trace.
"""
def handle_info(:shutdown, state) do
{:ok, pid} = Frettchen.Reaper.start_link()
Frettchen.Reaper.absolve(pid, state)
{:noreply, state}
end
end
|
lib/frettchen/trace.ex
| 0.815269
| 0.483344
|
trace.ex
|
starcoder
|
defmodule RDF.Diff do
@moduledoc """
A data structure for diffs between `RDF.Graph`s and `RDF.Description`s.
A `RDF.Diff` is a struct consisting of two fields `additions` and `deletions`
with `RDF.Graph`s of added and deleted statements.
"""
alias RDF.{Description, Graph}
@type t :: %__MODULE__{
additions: Graph.t,
deletions: Graph.t
}
defstruct [:additions, :deletions]
@doc """
Creates a `RDF.Diff` struct.
Some initial additions and deletions can be provided optionally with the resp.
`additions` and `deletions` keywords. The statements for the additions and
deletions can be provided in any form supported by the `RDF.Graph.new/1` function.
"""
@spec new(keyword) :: t
def new(diff \\ []) do
%__MODULE__{
additions: Keyword.get(diff, :additions) |> coerce_graph(),
deletions: Keyword.get(diff, :deletions) |> coerce_graph()
}
end
defp coerce_graph(nil), do: Graph.new()
defp coerce_graph(%Description{} = description),
do: if Enum.empty?(description), do: Graph.new(), else: Graph.new(description)
defp coerce_graph(data), do: Graph.new(data)
@doc """
Computes a diff between two `RDF.Graph`s or `RDF.Description`s.
The first argument represents the original and the second argument the new version
of the RDF data to be compared. Any combination of `RDF.Graph`s or
`RDF.Description`s can be passed as first and second argument.
## Examples
iex> RDF.Diff.diff(
...> RDF.description(EX.S1, EX.p1, [EX.O1, EX.O2]),
...> RDF.graph([
...> {EX.S1, EX.p1, [EX.O2, EX.O3]},
...> {EX.S2, EX.p2, EX.O4}
...> ]))
%RDF.Diff{
additions: RDF.graph([
{EX.S1, EX.p1, EX.O3},
{EX.S2, EX.p2, EX.O4}
]),
deletions: RDF.graph({EX.S1, EX.p1, EX.O1})
}
"""
@spec diff(Description.t | Graph.t, Description.t | Graph.t) :: t
def diff(original_rdf_data, new_rdf_data)
def diff(%Description{} = description, description), do: new()
def diff(%Description{subject: subject} = original_description,
%Description{subject: subject} = new_description) do
{additions, deletions} =
original_description
|> Description.predicates()
|> Enum.reduce({new_description, Description.new(subject)},
fn property, {additions, deletions} ->
original_objects = Description.get(original_description, property)
case Description.get(new_description, property) do
nil ->
{
additions,
Description.add(deletions, property, original_objects)
}
new_objects ->
{unchanged_objects, deleted_objects} =
Enum.reduce(original_objects, {[], []}, fn
original_object, {unchanged_objects, deleted_objects} ->
if original_object in new_objects do
{[original_object | unchanged_objects], deleted_objects}
else
{unchanged_objects, [original_object | deleted_objects]}
end
end)
{
Description.delete(additions, property, unchanged_objects),
Description.add(deletions, property, deleted_objects),
}
end
end)
new(additions: additions, deletions: deletions)
end
def diff(%Description{} = original_description, %Description{} = new_description),
do: new(additions: new_description, deletions: original_description)
def diff(%Graph{} = graph1, %Graph{} = graph2) do
graph1_subjects = graph1 |> Graph.subjects() |> MapSet.new()
graph2_subjects = graph2 |> Graph.subjects() |> MapSet.new()
deleted_subjects = MapSet.difference(graph1_subjects, graph2_subjects)
added_subjects = MapSet.difference(graph2_subjects, graph1_subjects)
graph1_subjects
|> MapSet.intersection(graph2_subjects)
|> Enum.reduce(
new(
additions: Graph.take(graph2, added_subjects),
deletions: Graph.take(graph1, deleted_subjects)
),
fn subject, diff ->
merge(diff, diff(
Graph.description(graph1, subject),
Graph.description(graph2, subject)
))
end)
end
def diff(%Description{} = description, %Graph{} = graph) do
case Graph.pop(graph, description.subject) do
{nil, graph} ->
new(
additions: graph,
deletions: description
)
{new_description, graph} ->
new(additions: graph)
|> merge(diff(description, new_description))
end
end
def diff(%Graph{} = graph, %Description{} = description) do
diff = diff(description, graph)
%__MODULE__{ diff |
additions: diff.deletions,
deletions: diff.additions
}
end
@doc """
Merges two diffs.
The diffs are merged by adding up the `additions` and `deletions` of both
diffs respectively.
"""
@spec merge(t, t) :: t
def merge(%__MODULE__{} = diff1, %__MODULE__{} = diff2) do
new(
additions: Graph.add(diff1.additions, diff2.additions),
deletions: Graph.add(diff1.deletions, diff2.deletions)
)
end
@doc """
Determines if a diff is empty.
A `RDF.Diff` is empty, if its `additions` and `deletions` graphs are empty.
"""
@spec empty?(t) :: boolean
def empty?(%__MODULE__{} = diff) do
Enum.empty?(diff.additions) and Enum.empty?(diff.deletions)
end
@doc """
Applies a diff to a `RDF.Graph` or `RDF.Description` by deleting the `deletions` and adding the `additions` of the `diff`.
Deletions of statements which are not present in the given graph or description
are simply ignored.
The result of an application is always a `RDF.Graph`, even if a `RDF.Description`
is given and the additions from the diff are all about the subject of this description.
"""
@spec apply(t, Description.t | Graph.t) :: Graph.t
def apply(diff, rdf_data)
def apply(%__MODULE__{} = diff, %Graph{} = graph) do
graph
|> Graph.delete(diff.deletions)
|> Graph.add(diff.additions)
end
def apply(%__MODULE__{} = diff, %Description{} = description) do
__MODULE__.apply(diff, Graph.new(description))
end
end
|
lib/rdf/diff.ex
| 0.857709
| 0.862004
|
diff.ex
|
starcoder
|
defmodule Rolodex.Utils do
@moduledoc false
@doc """
Pipeline friendly dynamic struct creator
"""
def to_struct(data, module), do: struct(module, data)
@doc """
Recursively convert a keyword list into a map
"""
def to_map_deep(data, level \\ 0)
def to_map_deep([], 0), do: %{}
def to_map_deep(list, level) when is_list(list) do
case Keyword.keyword?(list) do
true -> Map.new(list, fn {key, val} -> {key, to_map_deep(val, level + 1)} end)
false -> list
end
end
def to_map_deep(data, _), do: data
@doc """
Recursively convert all keys in a map from snake_case to camelCase
"""
def camelize_map(data) when not is_map(data), do: data
def camelize_map(data) do
Map.new(data, fn {key, value} -> {camelize(key), camelize_map(value)} end)
end
defp camelize(key) when is_atom(key), do: key |> Atom.to_string() |> camelize()
defp camelize(key) do
case Macro.camelize(key) do
^key -> key
camelized -> uncapitalize(camelized)
end
end
defp uncapitalize(<<char, rest::binary>>), do: String.downcase(<<char>>) <> rest
@doc """
Similar to Ruby's `with_indifferent_access`, this function performs an indifferent
key lookup on a map or keyword list. Indifference means that the keys :foo and
"foo" are considered identical. We only convert from atom -> string to avoid
the unsafe `String.to_atom/1` function.
"""
@spec indifferent_find(map() | keyword(), atom() | binary()) :: any()
def indifferent_find(data, key) when is_atom(key),
do: indifferent_find(data, Atom.to_string(key))
def indifferent_find(data, key) do
data
|> Enum.find(fn
{k, _} when is_atom(k) -> Atom.to_string(k) == key
{k, _} -> k == key
end)
|> case do
{_, result} -> result
_ -> nil
end
end
@doc """
Grabs the description and metadata map associated with the given function via
`@doc` annotations.
"""
@spec fetch_doc_annotation(module(), atom()) :: {:ok, binary(), map()} | {:error, :not_found}
def fetch_doc_annotation(controller, action) do
controller
|> Code.fetch_docs()
|> Tuple.to_list()
|> Enum.at(-1)
|> Enum.find(fn
{{:function, ^action, _arity}, _, _, _, _} -> true
_ -> false
end)
|> case do
{_, _, _, desc, metadata} -> {:ok, desc, metadata}
_ -> {:error, :not_found}
end
end
end
|
lib/rolodex/utils.ex
| 0.756897
| 0.587588
|
utils.ex
|
starcoder
|
defmodule State.Helpers do
@moduledoc """
Helper functions for State modules.
"""
alias Model.Trip
@doc """
Returns true if the given Model.Trip is on a hidden (negative priority) shape.
"""
def trip_on_hidden_shape?(%{shape_id: shape_id}) do
case State.Shape.by_primary_id(shape_id) do
%{priority: priority} when priority < 0 ->
true
_ ->
false
end
end
@doc """
Returns true if the given Model.Trip shouldn't be considered (by default) as having stops on the route.
We ignore negative priority shapes, as well as alternate route trips.
"""
def ignore_trip_for_route?(%Trip{route_type: type}) when is_integer(type), do: true
def ignore_trip_for_route?(%Trip{alternate_route: bool}) when is_boolean(bool), do: true
def ignore_trip_for_route?(%Trip{} = trip), do: trip_on_hidden_shape?(trip)
@doc """
Returns true if the given Model.Trip shouldn't be considered (by default) as being part of the route based on the pattern.
We ignore as alternate route trips, as well as very-atypical patterns.
"""
@spec ignore_trip_route_pattern?(Model.Trip.t()) :: boolean
def ignore_trip_route_pattern?(trip)
def ignore_trip_route_pattern?(%Trip{route_type: int}) when is_integer(int), do: true
def ignore_trip_route_pattern?(%Trip{alternate_route: bool}) when is_boolean(bool), do: true
def ignore_trip_route_pattern?(%Trip{route_pattern_id: route_pattern_id})
when is_binary(route_pattern_id) do
case State.RoutePattern.by_id(route_pattern_id) do
%{typicality: typicality} when typicality < 4 ->
false
_ ->
true
end
end
def ignore_trip_route_pattern?(%Trip{route_pattern_id: nil}), do: true
@doc """
Safely get the size of an ETS table
If the table doesn't exist, we'll return a 0 size.
"""
@spec safe_ets_size(:ets.tab()) :: non_neg_integer
def safe_ets_size(table) do
case :ets.info(table, :size) do
:undefined ->
0
value when is_integer(value) ->
value
end
end
end
|
apps/state/lib/state/helpers.ex
| 0.860164
| 0.511229
|
helpers.ex
|
starcoder
|
defmodule Msgpax do
@moduledoc ~S"""
This module provides functions for serializing and de-serializing Elixir terms
using the [MessagePack](http://msgpack.org/) format.
## Data conversion
The following table shows how Elixir types are serialized to MessagePack types
and how MessagePack types are de-serialized back to Elixir types.
Elixir | MessagePack | Elixir
--------------------------------- | ------------- | -------------
`nil` | nil | `nil`
`true` | boolean | `true`
`false` | boolean | `false`
`-1` | integer | `-1`
`1.25` | float | `1.25`
*N/A*<sup>1</sup> | NaN | `Msgpax.NaN`<sup>2</sup>
*N/A*<sup>1</sup> | +infinity | `Msgpax.Infinity`<sup>2</sup>
*N/A*<sup>1</sup> | -infinity | `Msgpax.NegInfinity`<sup>2</sup>
`:ok` | string | `"ok"`
`Atom` | string | `"Elixir.Atom"`
`"str"` | string | `"str"`
`"\xFF\xFF"` | string | `"\xFF\xFF"`
`#Msgpax.Bin<"\xFF">` | binary | `"\xFF"`<sup>3</sup>
`%{foo: "bar"}` | map | `%{"foo" => "bar"}`
`[foo: "bar"]` | map | `%{"foo" => "bar"}`
`[1, true]` | array | `[1, true]`
`#Msgpax.Ext<4, "02:12">` | extension | `#Msgpax.Ext<4, "02:12">`
`#DateTime<2017-12-06 00:00:00Z>` | extension | `#DateTime<2017-12-06 00:00:00Z>`
<sup>1</sup>`Msgpax.Packer` provides helper functions to facilitate the serialization of natively unsupported data types.
<sup>2</sup>NaN and ±infinity are not enabled by default. See `unpack/2` for for more information.
<sup>3</sup>To deserialize back to `Msgpax.Bin` structs see the `unpack/2` options.
"""
alias __MODULE__.Packer
alias __MODULE__.Unpacker
@doc """
Serializes `term`.
This function returns iodata by default; if you want to force the result to be
a binary, you can use `IO.iodata_to_binary/1` or use the `:iodata` option (see
the "Options" section below).
This function returns `{:ok, iodata}` if the serialization is successful,
`{:error, exception}` otherwise, where `exception` is a `Msgpax.PackError`
struct which can be raised or converted to a more human-friendly error
message with `Exception.message/1`. See `Msgpax.PackError` for all the
possible reasons for a packing error.
## Options
* `:iodata` - (boolean) if `true`, this function returns the encoded term as
iodata, if `false` as a binary. Defaults to `true`.
## Examples
iex> {:ok, packed} = Msgpax.pack("foo")
iex> IO.iodata_to_binary(packed)
<<163, 102, 111, 111>>
iex> Msgpax.pack(20000000000000000000)
{:error, %Msgpax.PackError{reason: {:too_big, 20000000000000000000}}}
iex> Msgpax.pack("foo", iodata: false)
{:ok, <<163, 102, 111, 111>>}
"""
@spec pack(term, Keyword.t()) :: {:ok, iodata} | {:error, Msgpax.PackError.t() | Exception.t()}
def pack(term, options \\ []) when is_list(options) do
iodata? = Keyword.get(options, :iodata, true)
try do
Packer.pack(term)
catch
:throw, reason ->
{:error, %Msgpax.PackError{reason: reason}}
:error, %Protocol.UndefinedError{protocol: Msgpax.Packer} = exception ->
{:error, exception}
else
iodata when iodata? ->
{:ok, iodata}
iodata ->
{:ok, IO.iodata_to_binary(iodata)}
end
end
@doc """
Works as `pack/1`, but raises if there's an error.
This function works like `pack/1`, except it returns the `term` (instead of
`{:ok, term}`) if the serialization is successful or raises a
`Msgpax.PackError` exception otherwise.
## Options
This function accepts the same options as `pack/2`.
## Examples
iex> "foo" |> Msgpax.pack!() |> IO.iodata_to_binary()
<<163, 102, 111, 111>>
iex> Msgpax.pack!(20000000000000000000)
** (Msgpax.PackError) too big value: 20000000000000000000
iex> Msgpax.pack!("foo", iodata: false)
<<163, 102, 111, 111>>
"""
@spec pack!(term, Keyword.t()) :: iodata | no_return
def pack!(term, options \\ []) do
case pack(term, options) do
{:ok, result} ->
result
{:error, exception} ->
raise exception
end
end
@doc """
De-serializes part of the given `iodata`.
This function works like `unpack/2`, but instead of requiring the input to be
a MessagePack-serialized term with nothing after that, it accepts leftover
bytes at the end of `iodata` and only de-serializes the part of the input that
makes sense. It returns `{:ok, term, rest}` if de-serialization is successful,
`{:error, exception}` otherwise (where `exception` is a `Msgpax.UnpackError`
struct).
See `unpack/2` for more information on the supported options.
## Examples
iex> Msgpax.unpack_slice(<<163, "foo", "junk">>)
{:ok, "foo", "junk"}
iex> Msgpax.unpack_slice(<<163, "fo">>)
{:error, %Msgpax.UnpackError{reason: {:invalid_format, 163}}}
"""
@spec unpack_slice(iodata, Keyword.t()) :: {:ok, any, binary} | {:error, Msgpax.UnpackError.t()}
def unpack_slice(iodata, options \\ []) when is_list(options) do
try do
iodata
|> IO.iodata_to_binary()
|> Unpacker.unpack(options)
catch
:throw, reason ->
{:error, %Msgpax.UnpackError{reason: reason}}
else
{value, rest} ->
{:ok, value, rest}
end
end
@doc """
Works like `unpack_slice/2` but raises in case of error.
This function works like `unpack_slice/2`, but returns just `{term, rest}` if
de-serialization is successful and raises a `Msgpax.UnpackError` exception if
it's not.
## Examples
iex> Msgpax.unpack_slice!(<<163, "foo", "junk">>)
{"foo", "junk"}
iex> Msgpax.unpack_slice!(<<163, "fo">>)
** (Msgpax.UnpackError) invalid format, first byte: 163
"""
@spec unpack_slice!(iodata, Keyword.t()) :: {any, binary} | no_return
def unpack_slice!(iodata, options \\ []) do
case unpack_slice(iodata, options) do
{:ok, value, rest} ->
{value, rest}
{:error, exception} ->
raise exception
end
end
@doc """
De-serializes the given `iodata`.
This function de-serializes the given `iodata` into an Elixir term. It returns
`{:ok, term}` if the de-serialization is successful, `{:error, exception}`
otherwise, where `exception` is a `Msgpax.UnpackError` struct which can be
raised or converted to a more human-friendly error message with
`Exception.message/1`. See `Msgpax.UnpackError` for all the possible reasons
for an unpacking error.
## Options
* `:binary` - (boolean) if `true`, then binaries are decoded as `Msgpax.Bin`
structs instead of plain Elixir binaries. Defaults to `false`.
* `:ext` - (module) a module that implements the `Msgpax.Ext.Unpacker`
behaviour. For more information, see the docs for `Msgpax.Ext.Unpacker`.
* `:nonfinite_floats` - (boolean) if `true`, deserializes NaN and ±infinity to
"signalling" atoms (see the "Data conversion" section), otherwise errors.
Defaults to `false`.
## Examples
iex> Msgpax.unpack(<<163, "foo">>)
{:ok, "foo"}
iex> Msgpax.unpack(<<163, "foo", "junk">>)
{:error, %Msgpax.UnpackError{reason: {:excess_bytes, "junk"}}}
iex> packed = Msgpax.pack!(Msgpax.Bin.new(<<3, 18, 122, 27, 115>>))
iex> {:ok, bin} = Msgpax.unpack(packed, binary: true)
iex> bin
#Msgpax.Bin<<<3, 18, 122, 27, 115>>>
"""
@spec unpack(iodata, Keyword.t()) :: {:ok, any} | {:error, Msgpax.UnpackError.t()}
def unpack(iodata, options \\ []) do
case unpack_slice(iodata, options) do
{:ok, value, <<>>} ->
{:ok, value}
{:ok, _, bytes} ->
{:error, %Msgpax.UnpackError{reason: {:excess_bytes, bytes}}}
{:error, _} = error ->
error
end
end
@doc """
Works like `unpack/2`, but raises in case of errors.
This function works like `unpack/2`, but it returns `term` (instead of `{:ok,
term}`) if de-serialization is successful, otherwise raises a
`Msgpax.UnpackError` exception.
## Example
iex> Msgpax.unpack!(<<163, "foo">>)
"foo"
iex> Msgpax.unpack!(<<163, "foo", "junk">>)
** (Msgpax.UnpackError) found excess bytes: "junk"
iex> packed = Msgpax.pack!(Msgpax.Bin.new(<<3, 18, 122, 27, 115>>))
iex> Msgpax.unpack!(packed, binary: true)
#Msgpax.Bin<<<3, 18, 122, 27, 115>>>
"""
@spec unpack!(iodata, Keyword.t()) :: any | no_return
def unpack!(iodata, options \\ []) do
case unpack(iodata, options) do
{:ok, value} ->
value
{:error, exception} ->
raise exception
end
end
end
|
lib/msgpax.ex
| 0.874373
| 0.692063
|
msgpax.ex
|
starcoder
|
defmodule Hui.URL do
@moduledoc """
Struct and utilities for working with Solr URLs and parameters.
Use the module `t:Hui.URL.t/0` struct to specify
Solr core or collection URLs with request handlers.
### Hui URL endpoints
```
# binary
url = "http://localhost:8983/solr/collection"
Hui.search(url, q: "loch")
# key referring to config setting
url = :library
Hui.search(url, q: "edinburgh", rows: 10)
# Hui.URL struct
url = %Hui.URL{url: "http://localhost:8983/solr/collection", handler: "suggest"}
Hui.search(url, suggest: true, "suggest.dictionary": "mySuggester", "suggest.q": "el")
```
`t:Hui.URL.t/0` struct also enables HTTP headers and [HTTPoison options](https://hexdocs.pm/httpoison/HTTPoison.html#request/5)
to be specified in keyword lists. HTTPoison options provide further controls for a request, e.g. `timeout`, `recv_timeout`,
`max_redirect`, `params` etc.
```
# setting up a header and a 10s receiving connection timeout
url = %Hui.URL{url: "..", headers: [{"accept", "application/json"}], options: [recv_timeout: 10000]}
Hui.search(url, q: "solr rocks")
```
"""
defstruct [:url, handler: "select", headers: [], options: []]
@type headers :: HTTPoison.Base.headers
@type options :: Keyword.t
@typedoc """
Struct for a Solr endpoint with a request handler and any associated HTTP headers and options.
## Example
```
%Hui.URL{handler: "suggest", url: "http://localhost:8983/solr/collection"}
```
- `url`: typical endpoint including the core or collection name. This may also be a load balancer
endpoint fronting several Solr upstreams.
- `handler`: name of a Solr request handler that processes requests.
- `headers`: HTTP headers.
- `options`: [HTTPoison options](https://hexdocs.pm/httpoison/HTTPoison.html#request/5).
"""
@type t :: %__MODULE__{url: nil | binary, handler: nil | binary, headers: nil | headers, options: nil | options}
@typedoc """
Solr parameters as keyword list or structs.
"""
@type url_params :: Keyword.t | Hui.Q.t | Hui.D.t | Hui.F.t | Hui.F.Range.t | Hui.F.Interval.t
@doc """
Returns a configured default Solr endpoint as `t:Hui.URL.t/0` struct.
```
Hui.URL.default_url!
%Hui.URL{handler: "select", url: "http://localhost:8983/solr/gettingstarted", headers: [{"accept", "application/json"}], options: [recv_timeout: 10000]}
```
The default endpoint can be specified in application configuration as below:
```
config :hui, :default,
url: "http://localhost:8983/solr/gettingstarted",
handler: "select", # optional
headers: [{"accept", "application/json"}],
options: [recv_timeout: 10000]
```
"""
@spec default_url! :: t | nil
def default_url! do
{status, default_url} = configured_url(:default)
case status do
:ok -> default_url
:error -> nil
end
end
@doc """
Retrieve url configuration as `t:Hui.URL.t/0` struct.
## Example
iex> Hui.URL.configured_url(:suggester)
{:ok, %Hui.URL{handler: "suggest", url: "http://localhost:8983/solr/collection"}}
The above retrieves the following endpoint configuration e.g. from `config.exs`:
```
config :hui, :suggester,
url: "http://localhost:8983/solr/collection",
handler: "suggest"
```
"""
@spec configured_url(atom) :: {:ok, t} | {:error, binary} | nil
def configured_url(config_key) do
url = Application.get_env(:hui, config_key)[:url]
handler = Application.get_env(:hui, config_key)[:handler]
headers = if Application.get_env(:hui, config_key)[:headers], do: Application.get_env(:hui, config_key)[:headers], else: []
options = if Application.get_env(:hui, config_key)[:options], do: Application.get_env(:hui, config_key)[:options], else: []
case {url,handler} do
{nil, _} -> {:error, %Hui.Error{reason: :nxdomain}}
{_, nil} -> {:ok, %Hui.URL{url: url, headers: headers, options: options}}
{_, _} -> {:ok, %Hui.URL{url: url, handler: handler, headers: headers, options: options}}
end
end
@doc false
@spec encode_query(url_params) :: binary
@deprecated "Please use Hui.Encoder instead"
# coveralls-ignore-start
def encode_query(%Hui.H3{} = url_params), do: encode_query(url_params |> Map.to_list |> Enum.sort)
def encode_query(%Hui.F.Range{} = url_params), do: encode_query(url_params |> Map.to_list, "facet.range", url_params.range, url_params.per_field)
def encode_query(%Hui.F.Interval{} = url_params), do: encode_query(url_params |> Map.to_list, "facet.interval", url_params.interval, url_params.per_field)
def encode_query(url_params) when is_map(url_params), do: encode_query(url_params |> Map.to_list)
def encode_query([{:__struct__, Hui.Q} | tail]), do: tail |> encode_query
def encode_query([{:__struct__, Hui.F} | tail]), do: Enum.map(tail, &prefix/1) |> encode_query
def encode_query([{:__struct__, Hui.H} | tail]), do: Enum.map(tail, &prefix(&1, "hl")) |> encode_query
def encode_query([{:__struct__, Hui.H1} | tail]), do: Enum.map(tail, &prefix(&1, "hl")) |> encode_query
def encode_query([{:__struct__, Hui.H2} | tail]), do: Enum.map(tail, &prefix(&1, "hl")) |> encode_query
def encode_query([{:__struct__, Hui.H3} | tail]), do: Enum.map(tail, &prefix(&1, "hl")) |> encode_query
def encode_query([{:__struct__, Hui.S} | tail]), do: Enum.map(tail, &prefix(&1, "suggest")) |> encode_query
def encode_query([{:__struct__, Hui.Sp} | tail]), do: Enum.map(tail, &prefix(&1, "spellcheck")) |> encode_query
def encode_query([{:__struct__, Hui.M} | tail]), do: Enum.map(tail, &prefix(&1, "mlt")) |> encode_query
def encode_query(enumerable) when is_list(enumerable), do: Enum.reject(enumerable, &invalid_param?/1) |> Enum.map_join("&", &encode/1)
def encode_query(_), do: ""
def encode_query([{:__struct__, _struct} | tail], prefix, field, per_field), do: Enum.map(tail, &prefix(&1, prefix, field, per_field)) |> encode_query
# coveralls-ignore-stop
@doc "Returns the string representation (URL path) of the given `t:Hui.URL.t/0` struct."
@spec to_string(t) :: binary
defdelegate to_string(uri), to: String.Chars.Hui.URL
# coveralls-ignore-start
defp encode({k,v}) when is_list(v), do: Enum.reject(v, &invalid_param?/1) |> Enum.map_join("&", &encode({k,&1}))
defp encode({k,v}) when is_binary(v), do: "#{k}=#{URI.encode_www_form(v)}"
# when value is a also struct, e.g. %Hui.F.Range/Interval{}
defp encode({_k,v}) when is_map(v), do: encode_query(v)
defp encode({k,v}), do: "#{k}=#{v}"
defp encode([]), do: ""
defp encode(v), do: v
# kv pairs with empty, nil or [] values
defp invalid_param?(""), do: true
defp invalid_param?(nil), do: true
defp invalid_param?([]), do: true
defp invalid_param?(x) when is_tuple(x), do: is_nil(elem(x,1)) or elem(x,1) == "" or elem(x, 1) == [] or elem(x,0) == :__struct__
defp invalid_param?(_x), do: false
# render kv pairs according to Solr prefix /per field syntax
# e.g. `field: "year"` to `"facet.field": "year"`, `f.[field].facet.gap`
defp prefix({k,v}) when k == :facet, do: {k,v}
defp prefix({k,v}, prefix \\ "facet", field \\ "", per_field \\ false) do
case {k,prefix} do
{:facet, _} -> {:facet, v}
{:hl, _} -> {:hl, v}
{:suggest, _} -> {:suggest, v}
{:spellcheck, _} -> {:spellcheck, v}
{:mlt, _} -> {:mlt, v}
{:range, "facet.range"} -> {:"facet.range", v} # render the same way despite per field setting
{:method, "facet.range"} -> {:"facet.range.method", v} # ditto
{:interval, "facet.interval"} -> {:"facet.interval", v} # ditto
{:per_field, _} -> {k, nil} # do not render this field
{:per_field_method, _} -> if per_field, do: {:"f.#{field}.#{prefix}.method", v}, else: {k, nil}
{_, _} -> if per_field, do: {:"f.#{field}.#{prefix}.#{k}", v}, else: {:"#{prefix}.#{k}", v}
end
end
# coveralls-ignore-stop
end
# implement `to_string` for %Hui.URL{} in Elixir generally via the String.Chars protocol
defimpl String.Chars, for: Hui.URL do
def to_string(%Hui.URL{url: url, handler: handler}), do: [url, "/", handler] |> IO.iodata_to_binary
end
|
lib/hui/url.ex
| 0.91115
| 0.76366
|
url.ex
|
starcoder
|
defmodule GitHooks.Tasks.Mix do
@moduledoc """
Represents a Mix task that will be executed as a git hook task.
A mix task should be configured as `{:mix_task, task_name, task_args}`,
being `task_args` an optional configuration. See `#{__MODULE__}.new/1` for
more information.
For example:
```elixir
config :git_hooks,
hooks: [
pre_commit: [
{:mix_task, :test},
{:mix_task, :format, ["--dry-run"]}
]
]
```
See `https://hexdocs.pm/mix/Mix.Task.html#run/2` for reference.
"""
defstruct [:task, args: [], result: nil]
@typedoc """
Represents a Mix task.
"""
@type t :: %__MODULE__{
task: Mix.Task.task_name(),
args: [any]
}
@doc """
Creates a new Mix task struct.
This function expects a tuple or triple with `:mix_task`, the task name and
the task args.
### Examples
iex> #{__MODULE__}.new({:mix_task, :test, ["--failed"]})
%#{__MODULE__}{task: :test, args: ["--failed"]}
"""
@spec new({:mix_task, Mix.Task.task_name(), [any]} | Mix.Task.task_name()) :: __MODULE__.t()
def new({:mix_task, task, args}) do
%__MODULE__{
task: task,
args: args
}
end
end
defimpl GitHooks.Task, for: GitHooks.Tasks.Mix do
alias GitHooks.Tasks.Mix, as: MixTask
alias GitHooks.Printer
# Mix tasks raise an error if they are valid, but determining if they are
# success or not depends on the return of the task.
# @default_success_results [0, :ok, nil, {:ok, []}, {:noop, []}]
@default_success_results [0, :ok, nil]
@success_results GitHooks.Config.extra_success_returns() ++ @default_success_results
def run(%MixTask{task: :test, args: args} = mix_task, _opts) do
args = ["test" | args] ++ ["--color"]
{_, result} =
System.cmd(
"mix",
args,
into: IO.stream(:stdio, :line)
)
Map.put(mix_task, :result, result)
end
def run(%MixTask{task: task, args: args} = mix_task, _opts) do
result = Mix.Task.run(task, args)
Map.put(mix_task, :result, result)
end
def success?(%MixTask{result: result}) when result in @success_results, do: true
def success?(%MixTask{result: _result}), do: false
def print_result(%MixTask{task: task, result: result} = mix_task) do
case result do
result when result in @success_results ->
Printer.success("`#{task}` was successful")
_ ->
Printer.error("mix task `#{task}` failed, return result: #{inspect(result)}")
end
mix_task
end
end
|
lib/tasks/mix.ex
| 0.890859
| 0.847148
|
mix.ex
|
starcoder
|
defmodule Mix.Tasks.Ecto.Migrate do
use Mix.Task
import Mix.Ecto
@shortdoc "Runs the repository migrations"
@moduledoc """
Runs the pending migrations for the given repository.
Migrations are expected at "priv/YOUR_REPO/migrations" directory
of the current application but it can be configured to be any
subdirectory of `priv` by specifying the `:priv` key under the
repository configuration. When building the migrations path,
"migrations" is appended to the path specified in the `:priv` key.
For example, if `:priv` is set to "priv/YOUR_REPO/my_migrations",
the migrations path will be "priv/YOUR_REPO/my_migrations/migrations".
Runs all pending migrations by default. To migrate up to a specific
version number, supply `--to version_number`. To migrate a specific
number of times, use `--step n`.
The repositories to migrate are the ones specified under the
`:ecto_repos` option in the current app configuration. However,
if the `-r` option is given, it replaces the `:ecto_repos` config.
Since Ecto tasks can only be executed once, if you need to migrate
multiple repositories, set `:ecto_repos` accordingly or pass the `-r`
flag multiple times.
If a repository has not yet been started, one will be started outside
your application supervision tree and shutdown afterwards.
## Examples
mix ecto.migrate
mix ecto.migrate -r Custom.Repo
mix ecto.migrate -n 3
mix ecto.migrate --step 3
mix ecto.migrate -v 20080906120000
mix ecto.migrate --to 20080906120000
## Command line options
* `-r`, `--repo` - the repo to migrate
* `--all` - run all pending migrations
* `--step` / `-n` - run n number of pending migrations
* `--to` / `-v` - run all migrations up to and including version
* `--quiet` - do not log migration commands
* `--prefix` - the prefix to run migrations on
* `--pool-size` - the pool size if the repository is started only for the task (defaults to 1)
* `--log-sql` - log the raw sql migrations are running
"""
@doc false
def run(args, migrator \\ &Ecto.Migrator.run/3) do
repos = parse_repo(args)
{opts, _, _} = OptionParser.parse args,
switches: [all: :boolean, step: :integer, to: :integer, quiet: :boolean,
prefix: :string, pool_size: :integer, log_sql: :boolean],
aliases: [n: :step, v: :to]
opts =
if opts[:to] || opts[:step] || opts[:all],
do: opts,
else: Keyword.put(opts, :all, true)
opts =
if opts[:quiet],
do: Keyword.merge(opts, [log: false, log_sql: false]),
else: opts
Enum.each repos, fn repo ->
ensure_repo(repo, args)
ensure_migrations_path(repo)
{:ok, pid, apps} = ensure_started(repo, opts)
pool = repo.config[:pool]
migrated =
if function_exported?(pool, :unboxed_run, 2) do
pool.unboxed_run(repo, fn -> migrator.(repo, :up, opts) end)
else
migrator.(repo, :up, opts)
end
pid && repo.stop(pid)
restart_apps_if_migrated(apps, migrated)
end
end
end
|
lib/mix/tasks/ecto.migrate.ex
| 0.811303
| 0.403537
|
ecto.migrate.ex
|
starcoder
|
defmodule Ecto.Validator do
@moduledoc """
Validates a given record or dict given a set of predicates.
Ecto.Validator.record(user,
name: present() when on_create?(user),
age: present(message: "must be present"),
age: greater_than(18),
also: validate_other
)
Validations are passed as the second argument in the attribute-predicate
format. Each predicate can be filtered via the `when` operator. Note `when`
here is not limited to only guard expressions.
The predicates above are going to receive the attribute being validated
and its current value as argument. For example, the `present` predicate
above is going to be called as:
present(:name, user.name)
present(:age, user.age, message: "must be present")
The validator also handles a special key `:also`, which is used to pipe
to predicates without a particular attribute. Instead, such predicates
receive the record as argument. In this example, `validate_other` will
be invoked as:
validate_other(user)
Note all predicates must return a keyword list, with the attribute error
as key and the validation message as value.
A handful of predicates can be found at `Ecto.Validator.Predicates`.
"""
@doc """
Validates a given dict given a set of predicates.
"""
@spec dict(Macro.t, Keyword.t) :: Macro.t
defmacro dict(value, opts) when is_list(opts) do
process opts, value, fn var, attr ->
quote do: Dict.get(unquote(var), unquote(attr))
end
end
@doc """
Validates a given dict, with binary keys, given a set of predicates.
"""
@spec bin_dict(Macro.t, Keyword.t) :: Macro.t
defmacro bin_dict(value, opts) when is_list(opts) do
process opts, value, fn var, attr ->
quote do: Dict.get(unquote(var), unquote(atom_to_binary(attr)))
end
end
@doc """
Validates a given record given a set of predicates.
"""
@spec record(Macro.t, Keyword.t) :: Macro.t
defmacro record(value, opts) when is_list(opts) do
process opts, value, fn var, attr ->
quote do: unquote(var).unquote(attr)
end
end
defp process([], _value, _getter), do: []
defp process(opts, value, getter) do
var = quote do: var
validations = opts
|> Stream.map(&process_each(&1, var, getter))
|> concat
quote do
unquote(var) = unquote(value)
unquote(validations)
end
end
defp concat(predicates) do
Enum.reduce(predicates, fn i, acc ->
quote do: unquote(acc) ++ unquote(i)
end)
end
defp process_each({ :also, function }, var, _getter) do
handle_ops function, fn call -> Macro.pipe(var, call) end
end
defp process_each({ attr, function }, var, getter) do
handle_ops function, fn call ->
Macro.pipe(attr, Macro.pipe(getter.(var, attr), call))
end
end
defp handle_ops({ :when, _, [left, right] }, callback) do
quote do
if unquote(right), do: unquote(concat(handle_and(left, callback))), else: []
end
end
defp handle_ops(other, callback) do
concat(handle_and(other, callback))
end
defp handle_and({ :and, _, [left, right] }, callback) do
handle_and(left, callback) ++ [callback.(right)]
end
defp handle_and(other, callback) do
[callback.(other)]
end
end
|
lib/ecto/validator.ex
| 0.920959
| 0.598459
|
validator.ex
|
starcoder
|
defmodule ExVCR.Adapter.Hackney do
@moduledoc """
Provides adapter methods to mock :hackney methods.
"""
use ExVCR.Adapter
alias ExVCR.Adapter.Hackney.Store
alias ExVCR.Util
defmacro __using__(_opts) do
quote do
Store.start
end
end
defdelegate convert_from_string(string), to: ExVCR.Adapter.Hackney.Converter
defdelegate convert_to_string(request, response), to: ExVCR.Adapter.Hackney.Converter
defdelegate parse_request_body(request_body), to: ExVCR.Adapter.Hackney.Converter
@doc """
Returns the name of the mock target module.
"""
def module_name do
:hackney
end
@doc """
Returns list of the mock target methods with function name and callback.
"""
def target_methods(recorder) do
[
{:request, &ExVCR.Recorder.request(recorder, [&1, &2, &3, &4, &5])},
{:request, &ExVCR.Recorder.request(recorder, [&1, &2, &3, &4])},
{:request, &ExVCR.Recorder.request(recorder, [&1, &2, &3])},
{:request, &ExVCR.Recorder.request(recorder, [&1, &2])},
{:request, &ExVCR.Recorder.request(recorder, [&1])},
{:body, &handle_body_request(recorder, [&1])},
{:body, &handle_body_request(recorder, [&1, &2])}
]
end
@doc """
Generate key for searching response.
"""
def generate_keys_for_request(request) do
url = Enum.fetch!(request, 1)
method = Enum.fetch!(request, 0)
request_body = Enum.fetch(request, 3) |> parse_request_body
headers = Enum.at(request, 2, []) |> Util.stringify_keys()
[url: url, method: method, request_body: request_body, headers: headers]
end
@doc """
Callback from ExVCR.Handler when response is retrieved from the HTTP server.
"""
def hook_response_from_server(response) do
apply_filters(response)
end
defp apply_filters({:ok, status_code, headers, reference}) do
filtered_headers = ExVCR.Filter.remove_blacklisted_headers(headers)
{:ok, status_code, filtered_headers, reference}
end
defp apply_filters({:ok, status_code, headers}) do
filtered_headers = ExVCR.Filter.remove_blacklisted_headers(headers)
{:ok, status_code, filtered_headers}
end
defp apply_filters({:error, reason}) do
{:error, reason}
end
@doc """
Callback from ExVCR.Handler when response is retrieved from the json file cache.
"""
def hook_response_from_cache(_request, nil), do: nil
def hook_response_from_cache(_request, %ExVCR.Response{type: "error"} = response), do: response
def hook_response_from_cache(_request, %ExVCR.Response{body: nil} = response), do: response
def hook_response_from_cache([_, _, _, _, opts], %ExVCR.Response{body: body} = response) do
if :with_body in opts || {:with_body, true} in opts do
response
else
client = make_ref()
client_key_atom = client |> inspect |> String.to_atom
Store.set(client_key_atom, body)
%{response | body: client}
end
end
defp handle_body_request(recorder, [client]) do
handle_body_request(recorder, [client, :infinity])
end
defp handle_body_request(recorder, [client, max_length]) do
client_key_atom = client |> inspect |> String.to_atom
if body = Store.get(client_key_atom) do
Store.delete(client_key_atom)
{:ok, body}
else
case :meck.passthrough([client, max_length]) do
{:ok, body} ->
body = ExVCR.Filter.filter_sensitive_data(body)
client_key_string = inspect(client)
ExVCR.Recorder.update(recorder,
fn(%{request: _request, response: response}) ->
response.body == client_key_string
end,
fn(%{request: request, response: response}) ->
%{request: request, response: %{response | body: body}}
end
)
{:ok, body}
{ret, body} ->
{ret, body}
end
end
end
@doc """
Returns the response from the ExVCR.Reponse record.
"""
def get_response_value_from_cache(response) do
if response.type == "error" do
{:error, response.body}
else
case response.body do
nil -> {:ok, response.status_code, response.headers}
_ -> {:ok, response.status_code, response.headers, response.body}
end
end
end
@doc """
Default definitions for stub.
"""
def default_stub_params(:headers), do: %{"Content-Type" => "text/html"}
def default_stub_params(:status_code), do: 200
end
|
lib/exvcr/adapter/hackney.ex
| 0.766774
| 0.438785
|
hackney.ex
|
starcoder
|
defmodule Blockchain.Account do
@moduledoc """
Represents the account state,
as defined in Section 4.1 of the Yellow Paper
"""
alias ExthCrypto.Hash.Keccak
alias MerklePatriciaTree.Trie
alias MerklePatriciaTree.TrieStorage
alias Blockchain.Account.{Address, Storage}
@empty_keccak Keccak.kec(<<>>)
@empty_trie Trie.empty_trie_root_hash()
# State defined in Section 4.1 of the Yellow Paper:
# nonce: σ_n
# balance: σ_b
# storage_root: σ_s
# code_hash: σ_c
defstruct nonce: 0,
balance: 0,
storage_root: @empty_trie,
code_hash: @empty_keccak
@type t :: %__MODULE__{
nonce: integer(),
balance: EVM.Wei.t(),
storage_root: EVM.trie_root(),
code_hash: MerklePatriciaTree.Trie.key() | nil
}
@doc """
Checks whether or not an account is a non-contract account.
If the codeHash field is the Keccak-256 hash of the empty string, then the
node represents a simple account, sometimes referred to as a “non-contract”
account. This is defined in the latter part of Section 4.1 of the Yellow Paper.
## Examples
iex> Blockchain.Account.is_simple_account?(%Blockchain.Account{})
true
iex> Blockchain.Account.is_simple_account?(%Blockchain.Account{code_hash: <<0x01, 0x02>>})
false
iex> Blockchain.Account.is_simple_account?(%Blockchain.Account{code_hash: <<197, 210, 70, 1, 134, 247, 35, 60, 146, 126, 125, 178, 220, 199, 3, 192, 229, 0, 182, 83, 202, 130, 39, 59, 123, 250, 216, 4, 93, 133, 164, 112>>})
true
"""
@spec is_simple_account?(t) :: boolean()
def is_simple_account?(acct) do
acct.code_hash == @empty_keccak
end
@doc """
Encodes an account such that it can be represented in RLP encoding.
This is defined as Eq.(10) `p` in the Yellow Paper.
## Examples
iex> Blockchain.Account.serialize(%Blockchain.Account{nonce: 5, balance: 10, storage_root: <<0x00, 0x01>>, code_hash: <<0x01, 0x02>>})
[5, 10, <<0x00, 0x01>>, <<0x01, 0x02>>]
iex> Blockchain.Account.serialize(%Blockchain.Account{})
[
0,
0,
<<86, 232, 31, 23, 27, 204, 85, 166, 255, 131, 69, 230, 146, 192, 248, 110, 91, 72, 224, 27, 153, 108, 173, 192, 1, 98, 47, 181, 227, 99, 180, 33>>,
<<197, 210, 70, 1, 134, 247, 35, 60, 146, 126, 125, 178, 220, 199, 3, 192, 229, 0, 182, 83, 202, 130, 39, 59, 123, 250, 216, 4, 93, 133, 164, 112>>
]
"""
@spec serialize(t) :: ExRLP.t()
def serialize(account) do
[
account.nonce,
account.balance,
account.storage_root,
account.code_hash
]
end
@doc """
Decodes an account from an RLP encodable structure.
This is defined as Eq.(10) `p` in the Yellow Paper (reversed).
## Examples
iex> Blockchain.Account.deserialize([<<5>>, <<10>>, <<0x00, 0x01>>, <<0x01, 0x02>>])
%Blockchain.Account{nonce: 5, balance: 10, storage_root: <<0x00, 0x01>>, code_hash: <<0x01, 0x02>>}
iex> Blockchain.Account.deserialize([<<0>>, <<0>>, <<86, 232, 31, 23, 27, 204, 85, 166, 255, 131, 69, 230, 146, 192, 248, 110, 91, 72, 224, 27, 153, 108, 173, 192, 1, 98, 47, 181, 227, 99, 180, 33>>, <<197, 210, 70, 1, 134, 247, 35, 60, 146, 126, 125, 178, 220, 199, 3, 192, 229, 0, 182, 83, 202, 130, 39, 59, 123, 250, 216, 4, 93, 133, 164, 112>>])
%Blockchain.Account{}
"""
@spec deserialize(ExRLP.t()) :: t
def deserialize(rlp) do
[
nonce,
balance,
storage_root,
code_hash
] = rlp
%Blockchain.Account{
nonce: :binary.decode_unsigned(nonce),
balance: :binary.decode_unsigned(balance),
storage_root: storage_root,
code_hash: code_hash
}
end
@doc """
Loads an account from an address, as defined in Eq.(9), Eq.(10), and Eq.(12)
of the Yellow Paper.
## Examples
iex> MerklePatriciaTree.Trie.new(MerklePatriciaTree.Test.random_ets_db())
...> |> MerklePatriciaTree.Trie.update_key(<<0x01::160>> |> ExthCrypto.Hash.Keccak.kec(), ExRLP.encode([5, 6, <<1>>, <<2>>]))
...> |> Blockchain.Account.get_account(<<0x01::160>>)
%Blockchain.Account{nonce: 5, balance: 6, storage_root: <<0x01>>, code_hash: <<0x02>>}
iex> MerklePatriciaTree.Trie.new(MerklePatriciaTree.Test.random_ets_db())
...> |> MerklePatriciaTree.Trie.update_key(<<0x01::160>> |> ExthCrypto.Hash.Keccak.kec(), nil)
...> |> Blockchain.Account.get_account(<<0x01::160>>)
nil
iex> MerklePatriciaTree.Trie.new(MerklePatriciaTree.Test.random_ets_db())
...> |> Blockchain.Account.get_account(<<0x01::160>>)
nil
"""
@spec get_account(TrieStorage.t(), Address.t()) :: t | nil
def get_account(state, address) do
account = TrieStorage.get_key(state, Keccak.kec(address))
case account do
nil ->
nil
encoded_account ->
encoded_account
|> ExRLP.decode()
|> deserialize()
end
end
@doc """
Checks for an empty code hash and a zero nonce. This is the equivalent of any
empty account check regardless of whether the account has a balance. This
check is needed when an account hides money in one of it's contract addresses
and later creates a contract to get the money out. This technique is
documented as `Quirk #1 - hiding in plain sight` in the [Ethereum quirks and
vulns](http://swende.se/blog/Ethereum_quirks_and_vulns.html) blog post.
"""
@spec uninitialized_contract?(t()) :: boolean()
def uninitialized_contract?(account) do
account.nonce == 0 && account.code_hash == @empty_keccak
end
@doc """
Checks if an account is empty.
"""
@spec empty?(t()) :: boolean()
def empty?(account) do
account.nonce == 0 && account.balance == 0 &&
(account.code_hash == Trie.empty_trie_root_hash() || is_nil(account.code_hash) ||
is_simple_account?(account))
end
@doc """
Returns account not saved to the database yet.
"""
@spec not_persistent_account() :: t()
def not_persistent_account do
%__MODULE__{code_hash: nil}
end
@doc """
Helper function to load multiple accounts.
## Examples
iex> state = MerklePatriciaTree.Trie.update_key(MerklePatriciaTree.Trie.new(MerklePatriciaTree.Test.random_ets_db()), <<0x01::160>> |> ExthCrypto.Hash.Keccak.kec(), ExRLP.encode([5, 6, <<1>>, <<2>>]))
iex> Blockchain.Account.get_accounts(state, [<<0x01::160>>, <<0x02::160>>])
[
%Blockchain.Account{nonce: 5, balance: 6, storage_root: <<0x01>>, code_hash: <<0x02>>},
nil
]
"""
@spec get_accounts(TrieStorage.t(), [Address.t()]) :: [t | nil]
def get_accounts(state, addresses) do
for address <- addresses, do: get_account(state, address)
end
@doc """
Stores an account at a given address. This function handles serializing
the account, encoding it to RLP and placing into the given state trie.
## Examples
iex> state = Blockchain.Account.put_account(MerklePatriciaTree.Trie.new(MerklePatriciaTree.Test.random_ets_db()), <<0x01::160>>, %Blockchain.Account{nonce: 5, balance: 6, storage_root: <<0x01>>, code_hash: <<0x02>>})
iex> MerklePatriciaTree.Trie.get_key(state, <<0x01::160>> |> ExthCrypto.Hash.Keccak.kec()) |> ExRLP.decode
[<<5>>, <<6>>, <<0x01>>, <<0x02>>]
"""
@spec put_account(TrieStorage.t(), Address.t(), t, boolean()) :: TrieStorage.t()
def put_account(state, address, account, return_account \\ false) do
prepared_account = %{account | code_hash: account.code_hash || @empty_keccak}
encoded_account =
prepared_account
|> serialize()
|> ExRLP.encode()
updated_state = TrieStorage.update_key(state, Keccak.kec(address), encoded_account)
if return_account, do: {updated_state, prepared_account}, else: updated_state
end
@doc """
Completely removes an account from the world state.
This is used, for instance, after a selfdestruct.
This is defined from Eq.(71) and Eq.(80) in the Yellow Paper.
## Examples
iex> MerklePatriciaTree.Trie.new(MerklePatriciaTree.Test.random_ets_db())
...> |> Blockchain.Account.put_account(<<0x01::160>>, %Blockchain.Account{balance: 10})
...> |> Blockchain.Account.del_account(<<0x01::160>>)
...> |> Blockchain.Account.get_account(<<0x01::160>>)
nil
iex> MerklePatriciaTree.Trie.new(MerklePatriciaTree.Test.random_ets_db())
...> |> Blockchain.Account.del_account(<<0x01::160>>)
...> |> Blockchain.Account.get_account(<<0x01::160>>)
nil
"""
@spec del_account(TrieStorage.t(), Address.t()) :: TrieStorage.t()
def del_account(state, address) do
account = get_account(state, address)
case account do
nil ->
state
_acc ->
TrieStorage.remove_key(state, Keccak.kec(address))
end
end
@doc """
Gets and updates an account based on a given input
function `fun`. Account passed to `fun` will be blank
instead of nil if account doesn't exist.
## Examples
iex> state = MerklePatriciaTree.Trie.new(MerklePatriciaTree.Test.random_ets_db())
...> |> Blockchain.Account.put_account(<<0x01::160>>, %Blockchain.Account{balance: 10})
iex> Blockchain.Account.update_account(state, <<0x01::160>>, fn (acc) -> {%{acc | balance: acc.balance + 5}, state} end)
...> |> Blockchain.Account.get_account(<<0x01::160>>)
%Blockchain.Account{balance: 15}
iex> state = MerklePatriciaTree.Trie.new(MerklePatriciaTree.Test.random_ets_db())
...> |> Blockchain.Account.put_account(<<0x01::160>>, %Blockchain.Account{balance: 10})
iex> {_state, before_acct, after_acct} = Blockchain.Account.update_account(state, <<0x01::160>>, fn (acc) -> {%{acc | balance: acc.balance + 5}, state} end, true)
iex> before_acct.balance
10
iex> after_acct.balance
15
iex> state = MerklePatriciaTree.Trie.new(MerklePatriciaTree.Test.random_ets_db())
iex> Blockchain.Account.update_account(state, <<0x01::160>>, fn (acc) -> {%{acc | nonce: acc.nonce + 1}, state} end, false)
...> |> Blockchain.Account.get_account(<<0x01::160>>)
%Blockchain.Account{nonce: 1}
"""
@spec update_account(TrieStorage.t(), Address.t(), (t -> t)) :: TrieStorage.t()
def update_account(state, address, fun) do
update_account(state, address, fun, false)
end
@spec update_account(TrieStorage.t(), Address.t(), (t -> t), true) :: {TrieStorage.t(), t, t}
@spec update_account(TrieStorage.t(), Address.t(), (t -> t), false) :: TrieStorage.t()
def update_account(state, address, fun, true) do
account = get_account(state, address) || not_persistent_account()
{updated_account, updated_state} = fun.(account)
updated_state = put_account(updated_state, address, updated_account)
{updated_state, account, updated_account}
end
def update_account(state, address, fun, false) do
account = get_account(state, address) || not_persistent_account()
{updated_account, updated_state} = fun.(account)
updated_state = put_account(updated_state, address, updated_account)
updated_state
end
@doc """
Simple helper function to increment a nonce value.
iex> state = MerklePatriciaTree.Trie.new(MerklePatriciaTree.Test.random_ets_db())
...> |> Blockchain.Account.put_account(<<0x01::160>>, %Blockchain.Account{nonce: 10})
iex> state
...> |> Blockchain.Account.increment_nonce(<<0x01::160>>)
...> |> Blockchain.Account.get_account(<<0x01::160>>)
%Blockchain.Account{nonce: 11}
iex> state = MerklePatriciaTree.Trie.new(MerklePatriciaTree.Test.random_ets_db())
...> |> Blockchain.Account.put_account(<<0x01::160>>, %Blockchain.Account{nonce: 10})
iex> { _state, before_acct, after_acct } = Blockchain.Account.increment_nonce(state, <<0x01::160>>, true)
iex> before_acct.nonce
10
iex> after_acct.nonce
11
"""
@spec increment_nonce(TrieStorage.t(), Address.t()) :: TrieStorage.t()
def increment_nonce(state, address) do
increment_nonce(state, address, false)
end
@spec increment_nonce(TrieStorage.t(), Address.t(), true) :: {TrieStorage.t(), t, t}
@spec increment_nonce(TrieStorage.t(), Address.t(), false) :: TrieStorage.t()
def increment_nonce(state, address, return_accounts) do
update_account(
state,
address,
fn acct ->
{%{acct | nonce: acct.nonce + 1}, state}
end,
return_accounts
)
end
@doc """
Simple helper function to adjust wei in an account. Wei may be positive (to
add wei) or negative (to remove it). This function will raise if we attempt to
reduce wei in an account to less than zero.
## Examples
iex> state = MerklePatriciaTree.Trie.new(MerklePatriciaTree.Test.random_ets_db())
...> |> Blockchain.Account.put_account(<<0x01::160>>, %Blockchain.Account{balance: 10})
iex> state
...> |> Blockchain.Account.add_wei(<<0x01::160>>, 13)
...> |> Blockchain.Account.get_account(<<0x01::160>>)
%Blockchain.Account{balance: 23}
iex> state = MerklePatriciaTree.Trie.new(MerklePatriciaTree.Test.random_ets_db())
...> |> Blockchain.Account.put_account(<<0x01::160>>, %Blockchain.Account{balance: 10})
iex> state
...> |> Blockchain.Account.add_wei(<<0x01::160>>, -3)
...> |> Blockchain.Account.get_account(<<0x01::160>>)
%Blockchain.Account{balance: 7}
iex> state = MerklePatriciaTree.Trie.new(MerklePatriciaTree.Test.random_ets_db())
...> |> Blockchain.Account.put_account(<<0x01::160>>, %Blockchain.Account{balance: 10})
iex> state
...> |> Blockchain.Account.add_wei(<<0x01::160>>, -13)
...> |> Blockchain.Account.get_account(<<0x01::160>>)
** (RuntimeError) wei reduced to less than zero
"""
@spec add_wei(TrieStorage.t(), Address.t(), EVM.Wei.t()) :: TrieStorage.t()
def add_wei(state, address, delta_wei) do
update_account(
state,
address,
fn acct ->
updated_balance = acct.balance + delta_wei
if updated_balance < 0, do: raise("wei reduced to less than zero")
{%{acct | balance: updated_balance}, state}
end
)
end
@doc """
Even simpler helper function to adjust wei in an account negatively. Wei may
be positive (to subtract wei) or negative (to add it). This function will
raise if we attempt to reduce wei in an account to less than zero.
## Examples
iex> state = MerklePatriciaTree.Trie.new(MerklePatriciaTree.Test.random_ets_db())
...> |> Blockchain.Account.put_account(<<0x01::160>>, %Blockchain.Account{balance: 10})
iex> state
...> |> Blockchain.Account.dec_wei(<<0x01::160>>, 3)
...> |> Blockchain.Account.get_account(<<0x01::160>>)
%Blockchain.Account{balance: 7}
iex> state = MerklePatriciaTree.Trie.new(MerklePatriciaTree.Test.random_ets_db())
...> |> Blockchain.Account.put_account(<<0x01::160>>, %Blockchain.Account{balance: 10})
iex> state
...> |> Blockchain.Account.dec_wei(<<0x01::160>>, 13)
...> |> Blockchain.Account.get_account(<<0x01::160>>)
** (RuntimeError) wei reduced to less than zero
iex> state = MerklePatriciaTree.Trie.new(MerklePatriciaTree.Test.random_ets_db())
...> |> Blockchain.Account.put_account(<<0x01::160>>, %Blockchain.Account{balance: 10})
iex> state
...> |> Blockchain.Account.dec_wei(<<0x01::160>>, -3)
...> |> Blockchain.Account.get_account(<<0x01::160>>)
%Blockchain.Account{balance: 13}
"""
@spec dec_wei(TrieStorage.t(), Address.t(), EVM.Wei.t()) :: TrieStorage.t()
def dec_wei(state, address, delta_wei), do: add_wei(state, address, -1 * delta_wei)
@doc """
Helper function for transferring eth for one account to another.
This handles the fact that a new account may be shadow-created if
it receives eth. See Section 8, Eq.(100-104) of the Yellow Paper.
The Yellow Paper assumes this function will always succeed (as the checks
occur before this function is called), but we'll check just in case this
function is not properly called. The only case will be if the sending account
is nil or has an insufficient balance, but we add a few extra checks just in
case.
**Note**: transferring value to an empty account still adds value to said
account, even though it's effectively a zombie.
## Examples
iex> state = MerklePatriciaTree.Trie.new(MerklePatriciaTree.Test.random_ets_db())
...> |> Blockchain.Account.put_account(<<0x01::160>>, %Blockchain.Account{balance: 10})
...> |> Blockchain.Account.put_account(<<0x02::160>>, %Blockchain.Account{balance: 5})
iex> {:ok, state} = Blockchain.Account.transfer(state, <<0x01::160>>, <<0x02::160>>, 3)
iex> {Blockchain.Account.get_account(state, <<0x01::160>>), Blockchain.Account.get_account(state, <<0x02::160>>)}
{%Blockchain.Account{balance: 7}, %Blockchain.Account{balance: 8}}
iex> state = MerklePatriciaTree.Trie.new(MerklePatriciaTree.Test.random_ets_db())
...> |> Blockchain.Account.put_account(<<0x01::160>>, %Blockchain.Account{balance: 10})
iex> {:ok, state} = Blockchain.Account.transfer(state, <<0x01::160>>, <<0x02::160>>, 3)
iex> {Blockchain.Account.get_account(state, <<0x01::160>>), Blockchain.Account.get_account(state, <<0x02::160>>)}
{%Blockchain.Account{balance: 7}, %Blockchain.Account{balance: 3}}
iex> state = MerklePatriciaTree.Trie.new(MerklePatriciaTree.Test.random_ets_db())
...> |> Blockchain.Account.put_account(<<0xfc00:e968:6179::de52:7100>>, %Blockchain.Account{balance: 10})
iex> Blockchain.Account.transfer(state, <<0x0fc00:db20:35b:7399::5>>, <<0x02::160>>, 12)
{:error, "sender account insufficient wei"}
iex> Blockchain.Account.transfer(MerklePatriciaTree.Trie.new(MerklePatriciaTree.Test.random_ets_db()), <<0xfc00:e968:6179::de52:7100>>, <<0x02::160>>, -3)
{:error, "wei transfer cannot be negative"}
"""
@spec transfer(TrieStorage.t(), Address.t(), Address.t(), EVM.Wei.t()) ::
{:ok, TrieStorage.t()} | {:error, String.t()}
def transfer(state, from, to, wei) do
# TODO: Decide if we want to waste the cycles to pull
# the account information when `add_wei` will do that itself.
from_account = get_account(state, from)
cond do
wei < 0 ->
{:error, "wei transfer cannot be negative"}
from_account == nil ->
{:error, "sender account does not exist"}
from_account.balance < wei ->
{:error, "sender account insufficient wei"}
true ->
{:ok,
state
|> add_wei(from, -1 * wei)
|> add_wei(to, wei)}
end
end
@doc """
Performs transfer but raises instead of returning if an error occurs.
## Examples
iex> state = MerklePatriciaTree.Trie.new(MerklePatriciaTree.Test.random_ets_db())
...> |> Blockchain.Account.put_account(<<0xfc00:e968:6179::de52:7100>>, %Blockchain.Account{balance: 10})
...> |> Blockchain.Account.put_account(<<0xfdf8:f53e:61e4::1860>>, %Blockchain.Account{balance: 5})
iex> state = Blockchain.Account.transfer!(state, <<0x01::160>>, <<0x02::160>>, 3)
iex> {Blockchain.Account.get_account(state, <<0x01::160>>), Blockchain.Account.get_account(state, <<0x02::160>>)}
{%Blockchain.Account{balance: 7}, %Blockchain.Account{balance: 8}}
"""
@spec transfer!(TrieStorage.t(), Address.t(), Address.t(), EVM.Wei.t()) :: TrieStorage.t()
def transfer!(state, from, to, wei) do
case transfer(state, from, to, wei) do
{:ok, state} -> state
{:error, reason} -> raise reason
end
end
@doc """
Puts code into a given account.
**Note**: we need to store the `code_hash` outside of the contract itself and
only store the KEC of the code_hash. See Section 4.1 under `codeHash` in the
Yellow Paper.
All such code fragments are contained in the
state database under their corresponding hashes for later retrieval.
## Examples
iex> state = MerklePatriciaTree.Trie.new(MerklePatriciaTree.Test.random_ets_db())
...> |> Blockchain.Account.put_code(<<0x01::160>>, <<1, 2, 3>>)
iex> Blockchain.Account.get_account(state, <<0x01::160>>)
%Blockchain.Account{code_hash: <<241, 136, 94, 218, 84, 183, 160, 83, 49, 140, 212, 30,
32, 147, 34, 13, 171, 21, 214, 83, 129, 177, 21, 122, 54, 51, 168,
59, 253, 92, 146, 57>>}
iex> MerklePatriciaTree.DB.get(state.db, ExthCrypto.Hash.Keccak.kec(<<1, 2, 3>>))
{:ok, <<1, 2, 3>>}
"""
@spec put_code(TrieStorage.t(), Address.t(), EVM.MachineCode.t()) :: TrieStorage.t()
def put_code(state, contract_address, machine_code) do
kec = Keccak.kec(machine_code)
new_state = TrieStorage.put_raw_key!(state, kec, machine_code)
update_account(
state,
contract_address,
fn acct ->
{%{acct | code_hash: kec}, new_state}
end,
false
)
end
@doc """
Returns the machine code associated with the account at the given address.
This will return nil if the contract has no associated code (i.e. it is a
simple account).
We may return `:not_found`, indicating that we were not able to find the given
code hash in the state trie.
Alternatively, we will return `{:ok, machine_code}` where `machine_code` may
be the empty string `<<>>`.
**Note**: "it is assumed that the client will have stored the pair (KEC(I_b),
I_b) at some point prior in order to make the determination of Ib feasible"
## Examples
iex> MerklePatriciaTree.Trie.new(MerklePatriciaTree.Test.random_ets_db())
...> |> Blockchain.Account.machine_code(<<0x01::160>>)
:not_found
iex> MerklePatriciaTree.Trie.new(MerklePatriciaTree.Test.random_ets_db())
...> |> Blockchain.Account.put_code(<<0x01::160>>, <<1, 2, 3>>)
...> |> Blockchain.Account.machine_code(<<0x01::160>>)
{:ok, <<1, 2, 3>>}
"""
@spec machine_code(TrieStorage.t(), Address.t() | t()) :: {:ok, binary()} | :not_found
def machine_code(state, contract_address) when is_binary(contract_address) do
# TODO: Do we have a standard for default account values
account = get_account(state, contract_address) || not_persistent_account()
machine_code(state, account)
end
def machine_code(state, account) do
case account.code_hash do
@empty_keccak ->
{:ok, <<>>}
code_hash ->
case TrieStorage.get_raw_key(state, code_hash) do
{:ok, machine_code} when is_binary(machine_code) -> {:ok, machine_code}
_ -> :not_found
end
end
end
@doc """
Stores a value in the storage root of an account. This
is defined in Section 4.1 under **storageRoot** in the
Yellow Paper.
## Examples
iex> state = MerklePatriciaTree.Trie.new(MerklePatriciaTree.Test.random_ets_db())
iex> updated_state = Blockchain.Account.put_storage(state, <<01::160>>, 5, 9)
iex> Blockchain.Account.get_storage(updated_state, <<01::160>>, 5)
{:ok, 9}
"""
@spec put_storage(
TrieStorage.t(),
Address.t() | {Address.t(), t()},
integer(),
integer(),
false
) :: TrieStorage.t()
@spec put_storage(
TrieStorage.t(),
Address.t() | {Address.t(), t()},
integer(),
integer(),
true
) :: {t(), TrieStorage.t()}
def put_storage(state_trie, address, key, value, return_account \\ false)
def put_storage(state_trie, address, key, value, return_account) when is_binary(address) do
account = get_account(state_trie, address) || not_persistent_account()
put_storage(state_trie, {address, account}, key, value, return_account)
end
def put_storage(state_trie, {address, account}, key, value, return_account) do
{updated_storage_trie, updated_trie} =
Storage.put(state_trie, account.storage_root, key, value)
root_hash = TrieStorage.root_hash(updated_storage_trie)
updated_account = %{account | storage_root: root_hash}
updated_state_trie = put_account(updated_trie, address, updated_account)
if return_account, do: {updated_account, updated_state_trie}, else: updated_state_trie
end
@spec remove_storage(
TrieStorage.t(),
Address.t() | {Address.t(), t()},
integer(),
false
) :: TrieStorage.t()
@spec remove_storage(
TrieStorage.t(),
Address.t() | {Address.t(), t()},
integer(),
true
) :: {t(), TrieStorage.t()}
def remove_storage(state, address, key, return_account \\ false)
def remove_storage(state, address, key, return_account) when is_binary(address) do
account = get_account(state, address) || not_persistent_account()
remove_storage(state, {address, account}, key, return_account)
end
def remove_storage(state, {address, account}, key, return_account) do
{updated_storage_trie, updated_trie} = Storage.remove(state, account.storage_root, key)
root_hash = TrieStorage.root_hash(updated_storage_trie)
updated_account = %{account | storage_root: root_hash}
updated_state = put_account(updated_trie, address, updated_account)
if return_account, do: {updated_account, updated_state}, else: updated_state
end
@doc """
Gets a value from storage root of an account.
See Section 4.1 under **storageRoot** from the Yellow Paper.
## Examples
iex> state = MerklePatriciaTree.Trie.new(MerklePatriciaTree.Test.random_ets_db())
iex> updated_state = Blockchain.Account.put_storage(state, <<01::160>>, 5, 9)
iex> Blockchain.Account.get_storage(updated_state, <<01::160>>, 5)
{:ok, 9}
iex> state = MerklePatriciaTree.Trie.new(MerklePatriciaTree.Test.random_ets_db())
iex> Blockchain.Account.get_storage(state, <<02::160>>, 5)
:account_not_found
iex> state = MerklePatriciaTree.Trie.new(MerklePatriciaTree.Test.random_ets_db())
iex> updated_state = Blockchain.Account.put_storage(state, <<01::160>>, 5, 9)
iex> Blockchain.Account.get_storage(updated_state, <<01::160>>, 55)
:key_not_found
"""
@spec get_storage(TrieStorage.t(), Address.t() | t() | nil, integer()) ::
{:ok, any()} | :account_not_found | :key_not_found
def get_storage(state, address, key) when is_binary(address) do
account = get_account(state, address)
get_storage(state, account, key)
end
def get_storage(state, account, key) do
case account do
nil ->
:account_not_found
account ->
case Storage.fetch(state, account.storage_root, key) do
nil -> :key_not_found
value -> {:ok, value |> :binary.decode_unsigned()}
end
end
end
@doc """
Sets the balance of an account to zero.
## Examples
iex> state = MerklePatriciaTree.Trie.new(MerklePatriciaTree.Test.random_ets_db())
...> |> Blockchain.Account.put_account(<<0x01::160>>, %Blockchain.Account{balance: 100})
iex> state
...> |> Blockchain.Account.clear_balance(<<0x01::160>>)
...> |> Blockchain.Account.get_account(<<0x01::160>>)
%Blockchain.Account{balance: 0}
"""
@spec clear_balance(TrieStorage.t(), Address.t()) :: TrieStorage.t()
def clear_balance(state, address) do
update_account(
state,
address,
fn acct ->
{%{acct | balance: 0}, state}
end,
false
)
end
@doc """
Resets an account to the basic struct.
## Examples
iex> state = MerklePatriciaTree.Trie.new(MerklePatriciaTree.Test.random_ets_db())
...> |> Blockchain.Account.put_account(<<0x01::160>>, %Blockchain.Account{balance: 100, code_hash: "abc"})
iex> state
...> |> Blockchain.Account.reset_account(<<0x01::160>>)
...> |> Blockchain.Account.get_account(<<0x01::160>>)
%Blockchain.Account{}
"""
@spec reset_account(TrieStorage.t(), Address.t()) :: TrieStorage.t()
def reset_account(state, address) do
put_account(state, address, not_persistent_account())
end
@spec empty_keccak() :: <<_::256>>
def empty_keccak do
@empty_keccak
end
@spec empty_trie() :: <<_::256>>
def empty_trie do
@empty_trie
end
end
|
apps/blockchain/lib/blockchain/account.ex
| 0.845337
| 0.45744
|
account.ex
|
starcoder
|
defmodule APIacFilterThrottler.Functions do
@moduledoc """
Throttling functions that construct keys for the `APIacFilterThrottler` plug.
Note that except `throttle_by_ip_subject_client_safe/1`, these functions do
not protect against collisions. See the *Security considerations* of the
`APIacFilterThrottler` module for further information.
"""
@doc """
Returns the IP address as a string
Make sure that the `remote_ip` of the `Plug.Conn.t` is correctly set
Example:
```elixir
iex> throttle_by_ip(conn)
"192.168.3.11"
```
"""
@spec throttle_by_ip(Plug.Conn.t()) :: String.t()
def throttle_by_ip(conn) do
List.to_string(:inet.ntoa(conn.remote_ip))
end
@doc """
Returns the IP address concatenated to the path as a string
Make sure that the `remote_ip` of the `Plug.Conn.t` is correctly set
Example:
```elixir
iex> throttle_by_ip_path(conn)
"192.168.3.11/api/prices/eurusd"
```
"""
@spec throttle_by_ip_path(Plug.Conn.t()) :: String.t()
def throttle_by_ip_path(conn) do
List.to_string(:inet.ntoa(conn.remote_ip)) <> conn.request_path
end
@doc """
Returns the authenticated client as a string
Make sure that a client is authenticated by an `APIac.Authenticator` plug, otherwise
this function will raise an exception since you certainly don't want clients to be
throttled, but not unauthenticated accesses
Example:
```elixir
iex> throttle_by_client(conn)
"client32187"
```
"""
@spec throttle_by_client(Plug.Conn.t()) :: String.t()
def throttle_by_client(conn) do
case APIac.client(conn) do
client when is_binary(client) ->
client
nil ->
raise "#{__MODULE__}: unauthenticated client, cannot throttle"
end
end
@doc """
Returns the authenticated client concatenated to the path as a string
Make sure that a client is authenticated by an `APIac.Authenticator` plug, otherwise
this function will raise an exception since you certainly don't want clients to be
throttled, but not unauthenticated accesses
Example:
```elixir
iex> throttle_by_client_path(conn)
"client32187/api/prices/eurusd"
```
"""
@spec throttle_by_client_path(Plug.Conn.t()) :: String.t()
def throttle_by_client_path(conn) do
case APIac.client(conn) do
client when is_binary(client) ->
client <> conn.request_path
nil ->
raise "#{__MODULE__}: unauthenticated client, cannot throttle"
end
end
@doc """
Returns the IP address concatenated to client as a string. May be usefull when
dealing with OAuth2 public clients such as mobiles apps or SPAs, when many devices
share the same `client_id` and to limit the *global* volume of calls so as to
protect against, for instance, application faults triggering request storms
Make sure that the `remote_ip` of the `Plug.Conn.t` is correctly set
Example:
```elixir
iex> throttle_by_ip_client(conn)
"172.16.17.326client10341"
```
"""
@spec throttle_by_ip_client(Plug.Conn.t()) :: String.t()
def throttle_by_ip_client(conn) do
List.to_string(:inet.ntoa(conn.remote_ip)) <> APIac.client(conn)
end
@doc """
Returns the subject concatenated to the client. Maybe be usefull when
dealing with OAuth2 public clients such as mobiles apps or SPAs, when many devices
share the same `client_id`, to protect against a malicious user trying to globally
block the API
Example:
```elixir
iex> throttle_by_subject_client(conn)
"bob23mymobileapp"
```
"""
@spec throttle_by_subject_client(Plug.Conn.t()) :: String.t()
def throttle_by_subject_client(conn) do
APIac.subject(conn) <> APIac.client(conn)
end
@doc """
Returns the IP address concatenated to subject and the client. Maybe be usefull when
dealing with OAuth2 public clients such as mobiles apps that can be used on several
personal devices (e.g. Android laptop, smartphone and tablet) simultaneously (however
devices could share the same IP address)
Example:
```elixir
iex> throttle_by_ip_subject_client(conn)
"275.33.99.208bob23mymobileapp"
```
"""
@spec throttle_by_ip_subject_client(Plug.Conn.t()) :: String.t()
def throttle_by_ip_subject_client(conn) do
List.to_string(:inet.ntoa(conn.remote_ip)) <> APIac.subject(conn) <> APIac.client(conn)
end
@doc """
Same as throttle_by_subject_client/1 but avoids collisions by using `:erlang.phash2/1`
Example:
```elixir
iex> throttle_by_ip_subject_client_safe(conn)
"37541545"
```
"""
@spec throttle_by_subject_client_safe(Plug.Conn.t()) :: String.t()
def throttle_by_subject_client_safe(conn) do
{conn.remote_ip, APIac.subject(conn), APIac.client(conn)}
|> :erlang.phash2()
|> Integer.to_string()
end
end
|
lib/apiac_filter_throttler/functions.ex
| 0.940831
| 0.794982
|
functions.ex
|
starcoder
|
defmodule AWS.API.Pricing do
@moduledoc """
AWS Price List Service API (AWS Price List Service) is a centralized and
convenient way to programmatically query Amazon Web Services for services,
products, and pricing information. The AWS Price List Service uses
standardized product attributes such as `Location`, `Storage Class`, and
`Operating System`, and provides prices at the SKU level. You can use the
AWS Price List Service to build cost control and scenario planning tools,
reconcile billing data, forecast future spend for budgeting purposes, and
provide cost benefit analysis that compare your internal workloads with
AWS.
Use `GetServices` without a service code to retrieve the service codes for
all AWS services, then `GetServices` with a service code to retreive the
attribute names for that service. After you have the service code and
attribute names, you can use `GetAttributeValues` to see what values are
available for an attribute. With the service code and an attribute name and
value, you can use `GetProducts` to find specific products that you're
interested in, such as an `AmazonEC2` instance, with a `Provisioned IOPS`
`volumeType`.
Service Endpoint
AWS Price List Service API provides the following two endpoints:
<ul> <li> https://api.pricing.us-east-1.amazonaws.com
</li> <li> https://api.pricing.ap-south-1.amazonaws.com
</li> </ul>
"""
@doc """
Returns the metadata for one service or a list of the metadata for all
services. Use this without a service code to get the service codes for all
services. Use it with a service code, such as `AmazonEC2`, to get
information specific to that service, such as the attribute names available
for that service. For example, some of the attribute names available for
EC2 are `volumeType`, `maxIopsVolume`, `operation`, `locationType`, and
`instanceCapacity10xlarge`.
"""
def describe_services(client, input, options \\ []) do
request(client, "DescribeServices", input, options)
end
@doc """
Returns a list of attribute values. Attibutes are similar to the details in
a Price List API offer file. For a list of available attributes, see [Offer
File
Definitions](http://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/reading-an-offer.html#pps-defs)
in the [AWS Billing and Cost Management User
Guide](http://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/billing-what-is.html).
"""
def get_attribute_values(client, input, options \\ []) do
request(client, "GetAttributeValues", input, options)
end
@doc """
Returns a list of all products that match the filter criteria.
"""
def get_products(client, input, options \\ []) do
request(client, "GetProducts", input, options)
end
@spec request(AWS.Client.t(), binary(), map(), list()) ::
{:ok, Poison.Parser.t() | nil, Poison.Response.t()}
| {:error, Poison.Parser.t()}
| {:error, HTTPoison.Error.t()}
defp request(client, action, input, options) do
client = %{client | service: "pricing"}
host = build_host("api.pricing", client)
url = build_url(host, client)
headers = [
{"Host", host},
{"Content-Type", "application/x-amz-json-1.1"},
{"X-Amz-Target", "AWSPriceListService.#{action}"}
]
payload = Poison.Encoder.encode(input, %{})
headers = AWS.Request.sign_v4(client, "POST", url, headers, payload)
case HTTPoison.post(url, payload, headers, options) do
{:ok, %HTTPoison.Response{status_code: 200, body: ""} = response} ->
{:ok, nil, response}
{:ok, %HTTPoison.Response{status_code: 200, body: body} = response} ->
{:ok, Poison.Parser.parse!(body, %{}), response}
{:ok, %HTTPoison.Response{body: body}} ->
error = Poison.Parser.parse!(body, %{})
{:error, error}
{:error, %HTTPoison.Error{reason: reason}} ->
{:error, %HTTPoison.Error{reason: reason}}
end
end
defp build_host(_endpoint_prefix, %{region: "local"}) do
"localhost"
end
defp build_host(endpoint_prefix, %{region: region, endpoint: endpoint}) do
"#{endpoint_prefix}.#{region}.#{endpoint}"
end
defp build_url(host, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}/"
end
end
|
lib/aws/api_pricing.ex
| 0.806662
| 0.667331
|
api_pricing.ex
|
starcoder
|
defmodule MatrixReloaded.Matrix do
@moduledoc """
Provides a set of functions to work with matrices.
Don't forget, numbering of row and column starts from `0` and goes
to `m - 1` and `n - 1` where `{m, n}` is dimension (size) of matrix.
"""
alias MatrixReloaded.Vector
@type t :: [Vector.t()]
@type dimension :: {pos_integer, pos_integer} | pos_integer
@type index :: {non_neg_integer, non_neg_integer}
@type submatrix :: number | Vector.t() | t()
@doc """
Creates a new matrix of the specified size. In case of positive number you get
a squared matrix, for tuple `{m, n}` you get a rectangular matrix. For negative
values you get an error message. All elements of the matrix are filled with the
default value 0. This value can be changed.
Returns result, it means either tuple of `{:ok, matrix}` or `{:error, "msg"}`.
## Examples
iex> MatrixReloaded.Matrix.new(3)
{:ok, [[0, 0, 0], [0, 0, 0], [0, 0, 0]]}
iex> MatrixReloaded.Matrix.new({2, 3}, -10)
{:ok, [[-10, -10, -10], [-10, -10, -10]]}
"""
@spec new(dimension, number) :: Result.t(String.t(), t())
def new(dimension, val \\ 0)
def new(dim, val) when is_tuple(dim) do
dim
|> is_dimension_ok?()
|> Result.map(fn {rows, cols} ->
for(
_r <- 1..rows,
do: make_row(cols, val)
)
end)
end
def new(dim, val) do
dim
|> is_dimension_ok?()
|> Result.map(fn row ->
for(
_r <- 1..row,
do: make_row(row, val)
)
end)
end
@doc """
Summation of two matrices. Sizes (dimensions) of both matrices must be same.
Otherwise you get an error message.
Returns result, it means either tuple of `{:ok, matrix}` or `{:error, "msg"}`.
## Examples
iex> mat1 = {:ok, [[1, 2, 3], [4, 5, 6], [7, 8, 9]]}
iex> mat2 = MatrixReloaded.Matrix.new(3,1)
iex> Result.and_then_x([mat1, mat2], &MatrixReloaded.Matrix.add(&1, &2))
{:ok,
[
[2, 3, 4],
[5, 6, 7],
[8, 9, 10]
]
}
"""
@spec add(t(), t()) :: Result.t(String.t(), t())
def add(matrix1, matrix2) do
{rs1, cs1} = size(matrix1)
{rs2, cs2} = size(matrix2)
if rs1 == rs2 and cs1 == cs2 do
matrix1
|> Enum.zip(matrix2)
|> Enum.map(fn {row1, row2} ->
Vector.add(row1, row2)
end)
|> Result.product()
else
Result.error("Sizes (dimensions) of both matrices must be same!")
end
end
@doc """
Subtraction of two matrices. Sizes (dimensions) of both matrices must be same.
Otherwise you get an error message.
Returns result, it means either tuple of `{:ok, matrix}` or `{:error, "msg"}`.
## Examples
iex> mat1 = {:ok, [[1, 2, 3], [4, 5, 6], [7, 8, 9]]}
iex> mat2 = MatrixReloaded.Matrix.new(3,1)
iex> Result.and_then_x([mat1, mat2], &MatrixReloaded.Matrix.sub(&1, &2))
{:ok,
[
[0, 1, 2],
[3, 4, 5],
[6, 7, 8]
]
}
"""
@spec sub(t(), t()) :: Result.t(String.t(), t())
def sub(matrix1, matrix2) do
{rs1, cs1} = size(matrix1)
{rs2, cs2} = size(matrix2)
if rs1 == rs2 and cs1 == cs2 do
matrix1
|> Enum.zip(matrix2)
|> Enum.map(fn {row1, row2} ->
Vector.sub(row1, row2)
end)
|> Result.product()
else
Result.error("Sizes (dimensions) of both matrices must be same!")
end
end
@doc """
Product of two matrices. If matrix `A` has a size `n × p` and matrix `B` has
a size `p × m` then their matrix product `A*B` is matrix of size `n × m`.
Otherwise you get an error message.
Returns result, it means either tuple of `{:ok, matrix}` or `{:error, "msg"}`.
## Examples
iex> mat1 = {:ok, [[1, 2], [3, 4], [5, 6], [7, 8]]}
iex> mat2 = {:ok, [[1, 2 ,3], [4, 5, 6]]}
iex> Result.and_then_x([mat1, mat2], &MatrixReloaded.Matrix.product(&1, &2))
{:ok,
[
[9, 12, 15],
[19, 26, 33],
[29, 40, 51],
[39, 54, 69]
]
}
"""
@spec product(t(), t()) :: Result.t(String.t(), t())
def product(matrix1, matrix2) do
{_rs1, cs1} = size(matrix1)
{rs2, _cs2} = size(matrix2)
if cs1 == rs2 do
matrix1
|> Enum.map(fn row1 ->
matrix2
|> transpose()
|> Enum.map(fn row2 -> Vector.dot(row1, row2) end)
end)
|> Enum.map(&Result.product(&1))
|> Result.product()
else
Result.error("Column size of first matrix must be same as row size of second matrix!")
end
end
@doc """
Schur product (or the Hadamard product) of two matrices. It produces another
matrix where each element `i, j` is the product of elements `i, j` of the
original two matrices. Sizes (dimensions) of both matrices must be same.
Otherwise you get an error message.
Returns result, it means either tuple of `{:ok, matrix}` or `{:error, "msg"}`.
## Examples
iex> mat1 = {:ok, [[1, 2, 3], [5, 6, 7]]}
iex> mat2 = {:ok, [[1, 2 ,3], [4, 5, 6]]}
iex> Result.and_then_x([mat1, mat2], &MatrixReloaded.Matrix.schur_product(&1, &2))
{:ok,
[
[1, 4, 9],
[20, 30, 42]
]
}
"""
@spec schur_product(t(), t()) :: Result.t(String.t(), t())
def schur_product(matrix1, matrix2) do
{rs1, cs1} = size(matrix1)
{rs2, cs2} = size(matrix2)
if rs1 == rs2 and cs1 == cs2 do
matrix1
|> Enum.zip(matrix2)
|> Enum.map(fn {row1, row2} -> Vector.inner_product(row1, row2) end)
|> Result.product()
else
Result.error(
"Dimension of matrix {#{rs1}, #{cs1}} is not same as dimension of matrix {#{rs2}, #{cs2}}!"
)
end
end
@doc """
Updates the matrix by given a submatrix. The position of submatrix inside
matrix is given by index `{row_num, col_num}` and dimension of submatrix.
Size of submatrix must be less than or equal to size of matrix. Otherwise
you get an error message. The values of indices start from `0` to `matrix row size - 1`.
Similarly for `col` size.
Returns result, it means either tuple of `{:ok, matrix}` or `{:error, "msg"}`.
## Example:
iex> mat = MatrixReloaded.Matrix.new(4)
iex> mat |> Result.and_then(&MatrixReloaded.Matrix.update(&1, [[1,2],[3,4]], {1,2}))
{:ok,
[
[0, 0, 0, 0],
[0, 0, 1, 2],
[0, 0, 3, 4],
[0, 0, 0, 0]
]
}
"""
@spec update(t(), submatrix, index) :: Result.t(String.t(), t())
def update(matrix, submatrix, index) do
matrix
|> is_index_ok?(index)
|> Result.and_then(
&is_submatrix_smaller_than_matrix?(&1, size(matrix), size(submatrix), :update)
)
|> Result.and_then(&is_submatrix_in_matrix?(&1, size(matrix), size(submatrix), index))
|> Result.map(&make_update(&1, submatrix, index))
end
@doc """
Updates the matrix by given a number. The position of element in matrix
which you want to change is given by tuple `{row_num, col_num}`.
Returns result, it means either tuple of `{:ok, matrix}` or `{:error, "msg"}`.
## Example:
iex> mat = MatrixReloaded.Matrix.new(3)
iex> mat |> Result.and_then(&MatrixReloaded.Matrix.update_element(&1, -1, {1, 1}))
{:ok,
[
[0, 0, 0],
[0, -1, 0],
[0, 0, 0]
]
}
"""
@spec update_element(t(), number, index) :: Result.t(String.t(), t())
def update_element(matrix, el, index) when is_number(el) do
matrix
|> is_index_ok?(index)
|> Result.and_then(&is_element_in_matrix?(&1, size(matrix), index))
|> Result.map(&make_update(&1, [[el]], index))
end
@doc """
Updates row in the matrix by given a row vector (list) of numbers. The row which
you want to change is given by tuple `{row_num, col_num}`. Both values are non
negative integers.
Returns result, it means either tuple of `{:ok, matrix}` or `{:error, "msg"}`.
## Example:
iex> {:ok, mat} = MatrixReloaded.Matrix.new(4)
iex> MatrixReloaded.Matrix.update_row(mat, [1, 2, 3], {3, 1})
{:ok,
[
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 1, 2, 3]
]
}
"""
@spec update_row(t(), Vector.t(), index) :: Result.t(String.t(), t())
def update_row(matrix, row, index) do
matrix
|> is_index_ok?(index)
|> Result.and_then(&is_row_ok?(&1, row))
|> Result.and_then(&is_row_size_smaller_than_rows_of_matrix?(&1, size(matrix), length(row)))
|> Result.and_then(&is_row_in_matrix?(&1, size(matrix), length(row), index))
|> Result.map(&make_update(&1, [row], index))
end
@doc """
Updates column in the matrix by given a column vector. The column which you
want to change is given by tuple `{row_num, col_num}`. Both values are non
negative integers.
Returns result, it means either tuple of `{:ok, matrix}` or `{:error, "msg"}`.
## Example:
iex> {:ok, mat} = MatrixReloaded.Matrix.new(4)
iex> MatrixReloaded.Matrix.update_col(mat, [[1], [2], [3]], {0, 1})
{:ok,
[
[0, 1, 0, 0],
[0, 2, 0, 0],
[0, 3, 0, 0],
[0, 0, 0, 0]
]
}
"""
@spec update_col(t(), Vector.column(), index) :: Result.t(String.t(), t())
def update_col(matrix, [hd | _] = submatrix, index)
when is_list(submatrix) and length(hd) == 1 do
update(matrix, submatrix, index)
end
@doc """
Updates the matrix by given a submatrices. The positions (or locations) of these
submatrices are given by list of indices. Index of the individual submatrices is
tuple of two numbers. These two numbers are number row and number column of matrix
where the submatrices will be located. All submatrices must have same size (dimension).
Returns result, it means either tuple of `{:ok, matrix}` or `{:error, "msg"}`.
## Example:
iex> mat = MatrixReloaded.Matrix.new(5)
iex> sub_mat = MatrixReloaded.Matrix.new(2,1)
iex> positions = [{0,0}, {3, 3}]
iex> [mat, sub_mat] |> Result.and_then_x(&MatrixReloaded.Matrix.update_map(&1, &2, positions))
{:ok,
[
[1, 1, 0, 0, 0],
[1, 1, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 1, 1],
[0, 0, 0, 1, 1]
]
}
"""
@spec update_map(t(), submatrix, list(index)) :: Result.t(String.t(), t())
def update_map(matrix, submatrix, position_indices) do
Enum.reduce(position_indices, {:ok, matrix}, fn position, acc ->
Result.and_then(acc, &update(&1, submatrix, position))
end)
end
@doc """
Gets a submatrix from the matrix. By index you can select a submatrix. Dimension of
submatrix is given by positive number (result then will be a square matrix) or tuple
of two positive numbers (you get then a rectangular matrix).
Returns result, it means either tuple of `{:ok, matrix}` or `{:error, "msg"}`.
## Example:
iex> mat = [[0, 0, 0, 0], [0, 0, 1, 2], [0, 0, 3, 4], [0, 0, 0, 0]]
iex> MatrixReloaded.Matrix.get_submatrix(mat, {1, 2}, 2)
{:ok,
[
[1, 2],
[3, 4]
]
}
iex> mat = [[0, 0, 0, 0], [0, 0, 0, 0], [0, 1, 2, 3], [0, 4, 5, 6]]
iex> MatrixReloaded.Matrix.get_submatrix(mat, {2, 1}, {3, 3})
{:ok,
[
[1, 2, 3],
[4, 5, 6]
]
}
"""
@spec get_submatrix(t(), index, dimension) :: Result.t(String.t(), t())
def get_submatrix(matrix, index, dimension) do
dim_sub = dimension_of_submatrix(index, dimension)
matrix
|> is_index_ok?(index)
|> Result.and_then(&is_submatrix_smaller_than_matrix?(&1, size(matrix), dim_sub, :get))
|> Result.and_then(&is_submatrix_in_matrix?(&1, size(matrix), index, dim_sub, :get))
|> Result.map(&make_get_submatrix(&1, index, dim_sub))
end
@doc """
Gets an element from the matrix. By index you can select an element.
Returns result, it means either tuple of `{:ok, number}` or `{:error, "msg"}`.
## Example:
iex> mat = [[0, 0, 0, 0], [0, 0, 1, 2], [0, 0, 3, 4], [0, 0, 0, 0]]
iex> MatrixReloaded.Matrix.get_element(mat, {2, 2})
{:ok, 3}
"""
@spec get_element(t(), index) :: Result.t(String.t(), number)
def get_element(matrix, index) when is_tuple(index) do
dim_sub = dimension_of_submatrix(index, 1)
matrix
|> is_element_in_matrix?(size(matrix), index, :get)
|> Result.map(&make_get_submatrix(&1, index, dim_sub))
|> Result.map(fn el -> el |> hd |> hd end)
end
@doc """
Gets a whole row from the matrix. By row number you can select the row which
you want.
Returns result, it means either tuple of `{:ok, number}` or `{:error, "msg"}`.
## Example:
iex> mat = [[0, 0, 0, 0], [0, 0, 1, 2], [0, 0, 3, 4], [0, 0, 0, 0]]
iex> MatrixReloaded.Matrix.get_row(mat, 1)
{:ok, [0, 0, 1, 2]}
"""
@spec get_row(t(), non_neg_integer) :: Result.t(String.t(), Vector.t())
def get_row(matrix, row_num) do
{rs, cs} = size(matrix)
matrix
|> is_non_neg_integer?(row_num)
|> Result.and_then(&is_row_num_at_matrix?(&1, {rs, cs}, row_num))
|> Result.map(&make_get_submatrix(&1, {row_num, 0}, {row_num, cs}))
|> Result.map(&hd(&1))
end
@doc """
Gets a part row from the matrix. By index and positive number you can select
the row and elements which you want.
Returns result, it means either tuple of `{:ok, number}` or `{:error, "msg"}`.
## Example:
iex> mat = [[0, 0, 0, 0], [0, 0, 1, 2], [0, 0, 3, 4], [0, 0, 0, 0]]
iex> MatrixReloaded.Matrix.get_row(mat, {2, 1}, 2)
{:ok, [0, 3]}
"""
@spec get_row(t(), index, non_neg_integer) :: Result.t(String.t(), Vector.t())
def get_row(matrix, {row_num, _} = index, num_of_el) do
{rs, cs} = size(matrix)
matrix
|> is_index_ok?(index)
|> Result.and_then(&is_positive_integer?(&1, num_of_el))
|> Result.and_then(&is_row_num_at_matrix?(&1, {rs, cs}, row_num))
|> Result.map(&make_get_submatrix(&1, index, {row_num, num_of_el}))
|> Result.map(&hd(&1))
end
@doc """
Gets a whole column from the matrix. By column number you can select the column
which you want.
Returns result, it means either tuple of `{:ok, number}` or `{:error, "msg"}`.
## Example:
iex> mat = [[0, 0, 0, 0], [0, 0, 1, 2], [0, 0, 3, 4], [0, 0, 0, 0]]
iex> MatrixReloaded.Matrix.get_col(mat, 3)
{:ok, [[0], [2], [4], [0]]}
"""
@spec get_col(t(), non_neg_integer) :: Result.t(String.t(), Vector.column())
def get_col(matrix, col_num) do
{rs, cs} = size(matrix)
matrix
|> is_non_neg_integer?(col_num)
|> Result.map(&transpose/1)
|> Result.and_then(&is_row_num_at_matrix?(&1, {rs, cs}, col_num, :column))
|> Result.map(&make_get_submatrix(&1, {col_num, 0}, {col_num, cs}))
|> Result.map(&hd(&1))
|> Result.map(&Vector.transpose(&1))
end
@doc """
Gets a part column from the matrix. By index and positive number you can select
the column and elements which you want.
Returns result, it means either tuple of `{:ok, matrix}` or `{:error, "msg"}`.
## Example:
iex> mat = [[0, 0, 0, 0], [0, 0, 1, 2], [0, 0, 3, 4], [0, 0, 0, 0]]
iex> MatrixReloaded.Matrix.get_col(mat, {1, 2}, 2)
{:ok, [[1], [3]]}
"""
@spec get_col(t(), index, non_neg_integer) :: Result.t(String.t(), Vector.column())
def get_col(matrix, {row_num, col_num} = index, num_of_el) do
{rs, cs} = size(matrix)
matrix
|> is_index_ok?(index)
|> Result.and_then(&is_positive_integer?(&1, num_of_el))
|> Result.map(&transpose/1)
|> Result.and_then(&is_row_num_at_matrix?(&1, {cs, rs}, col_num, :column))
|> Result.map(&make_get_submatrix(&1, {col_num, row_num}, {col_num, num_of_el}))
|> Result.map(&hd(&1))
|> Result.map(&Vector.transpose(&1))
end
@doc """
Creates a square diagonal matrix with the elements of vector on the main diagonal
or on lower/upper bidiagonal if diagonal number `k` is `k < 0` or `0 < k`.
This number `k` must be integer.
Returns result, it means either tuple of `{:ok, matrix}` or `{:error, "msg"}`.
## Example:
iex> MatrixReloaded.Matrix.diag([1, 2, 3])
{:ok,
[
[1, 0, 0],
[0, 2, 0],
[0, 0, 3]
]
}
iex> MatrixReloaded.Matrix.diag([1, 2, 3], 1)
{:ok,
[
[0, 1, 0, 0],
[0, 0, 2, 0],
[0, 0, 0, 3],
[0, 0, 0, 0]
]
}
"""
@spec diag(Vector.t(), integer()) :: Result.t(String.t(), t())
def diag(vector, k \\ 0)
def diag(vector, k) when is_list(vector) and is_integer(k) and 0 <= k do
len = length(vector)
if k <= len do
0..(len - 1)
|> Enum.reduce(new(len + k), fn i, acc ->
acc |> Result.and_then(&update_element(&1, Enum.at(vector, i), {i, i + k}))
end)
else
Result.error("Length of upper bidiagonal must be less or equal to length of vector!")
end
end
def diag(vector, k) when is_list(vector) and is_integer(k) and k < 0 do
len = length(vector)
if abs(k) <= len do
0..(len - 1)
|> Enum.reduce(new(len - k), fn i, acc ->
acc |> Result.and_then(&update_element(&1, Enum.at(vector, i), {i - k, i}))
end)
else
Result.error("Length of lower bidiagonal must be less or equal to length of vector!")
end
end
@doc """
Transpose of matrix.
## Example:
iex> mat = [[1,2,3], [4,5,6], [7,8,9]]
iex> MatrixReloaded.Matrix.transpose(mat)
[
[1, 4, 7],
[2, 5, 8],
[3, 6, 9]
]
"""
@spec transpose(t()) :: t()
def transpose(matrix) do
matrix
|> make_transpose()
end
@doc """
Flip columns of matrix in the left-right direction (i.e. about a vertical axis).
## Example:
iex> mat = [[1,2,3], [4,5,6], [7,8,9]]
iex> MatrixReloaded.Matrix.flip_lr(mat)
[
[3, 2, 1],
[6, 5, 4],
[9, 8, 7]
]
"""
@spec flip_lr(t()) :: t()
def flip_lr(matrix) do
matrix
|> Enum.map(fn row -> Enum.reverse(row) end)
end
@doc """
Flip rows of matrix in the up-down direction (i.e. about a horizontal axis).
## Example:
iex> mat = [[1,2,3], [4,5,6], [7,8,9]]
iex> MatrixReloaded.Matrix.flip_ud(mat)
[
[7, 8, 9],
[4, 5, 6],
[1, 2, 3]
]
"""
@spec flip_ud(t()) :: t()
def flip_ud(matrix) do
matrix
|> Enum.reverse()
end
@doc """
Drops the row or list of rows from the matrix. The row number (or row numbers)
must be positive integer.
Returns matrix.
## Example:
iex> mat = [[0, 0, 0, 0], [0, 0, 1, 2], [0, 0, 3, 4], [0, 0, 0, 0]]
iex> MatrixReloaded.Matrix.drop_row(mat, 2)
{:ok,
[
[0, 0, 0, 0],
[0, 0, 1, 2],
[0, 0, 0, 0]
]
}
iex> mat = [[0, 0, 0, 0], [0, 0, 1, 2], [0, 0, 3, 4], [0, 0, 0, 0]]
iex> MatrixReloaded.Matrix.drop_row(mat, [0, 3])
{:ok,
[
[0, 0, 1, 2],
[0, 0, 3, 4]
]
}
"""
@spec drop_row(t(), non_neg_integer | [non_neg_integer]) :: Result.t(String.t(), t())
def drop_row(matrix, rows) when is_list(rows) do
matrix
|> is_all_row_numbers_ok?(rows)
|> Result.and_then(&make_drop_rows(&1, rows))
end
def drop_row(matrix, row) do
matrix
|> is_non_neg_integer?(row)
|> Result.and_then(&make_drop_row(&1, row))
end
@doc """
Drops the column or list of columns from the matrix. The column number
(or column numbers) must be positive integer.
Returns result, it means either tuple of `{:ok, matrix}` or `{:error, "msg"}`.
## Example:
iex> mat = [[0, 0, 0, 0], [0, 0, 1, 2], [0, 0, 3, 4], [0, 0, 0, 0]]
iex> MatrixReloaded.Matrix.drop_col(mat, 2)
{:ok,
[
[0, 0, 0],
[0, 0, 2],
[0, 0, 4],
[0, 0, 0]
]
}
iex> mat = [[0, 0, 0, 0], [0, 0, 1, 2], [0, 0, 3, 4], [0, 0, 0, 0]]
iex> MatrixReloaded.Matrix.drop_col(mat, [0, 1])
{:ok,
[
[0, 0],
[1, 2],
[3, 4],
[0, 0]
]
}
"""
@spec drop_col(t(), non_neg_integer | [non_neg_integer]) :: Result.t(String.t(), t())
def drop_col(matrix, cols) when is_list(cols) do
matrix
|> transpose()
|> is_all_row_numbers_ok?(cols)
|> Result.and_then(&make_drop_rows(&1, cols, :column))
|> Result.map(&transpose/1)
end
def drop_col(matrix, col) do
matrix
|> transpose()
|> is_non_neg_integer?(col)
|> Result.and_then(&make_drop_row(&1, col, :column))
|> Result.map(&transpose/1)
end
@doc """
Concatenate matrices horizontally. Both matrices must have same a row dimension.
Returns result, it means either tuple of `{:ok, matrix}` or `{:error, "msg"}`.
## Example:
iex> mat1 = MatrixReloaded.Matrix.diag([1, 1, 1])
iex> mat2 = MatrixReloaded.Matrix.diag([2, 2, 2])
iex> Result.and_then_x([mat1, mat2], &MatrixReloaded.Matrix.concat_row(&1, &2))
{:ok,
[
[1, 0, 0, 2, 0, 0],
[0, 1, 0, 0, 2, 0],
[0, 0, 1, 0, 0, 2]
]
}
"""
@spec concat_row(t(), t()) :: Result.t(String.t(), t())
def concat_row(matrix1, matrix2) do
{rs1, _cs1} = size(matrix1)
{rs2, _cs2} = size(matrix2)
if rs1 == rs2 do
matrix1
|> Enum.zip(matrix2)
|> Enum.map(fn {r1, r2} -> Enum.concat(r1, r2) end)
|> Result.ok()
else
Result.error("Matrices have different row dimensions. Must be same!")
end
end
@doc """
Concatenate matrices vertically. Both matrices must have same a column dimension.
Returns result, it means either tuple of `{:ok, matrix}` or `{:error, "msg"}`.
## Example:
iex> mat1 = MatrixReloaded.Matrix.diag([1, 1, 1])
iex> mat2 = MatrixReloaded.Matrix.diag([2, 2, 2])
iex> Result.and_then_x([mat1, mat2], &MatrixReloaded.Matrix.concat_col(&1, &2))
{:ok,
[
[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[2, 0, 0],
[0, 2, 0],
[0, 0, 2]
]
}
"""
@spec concat_col(t(), t()) :: Result.t(String.t(), t())
def concat_col(matrix1, matrix2) do
{_rs1, cs1} = size(matrix1)
{_rs2, cs2} = size(matrix2)
if cs1 == cs2 do
matrix1
|> Enum.concat(matrix2)
|> Result.ok()
else
Result.error("Matrices have different column dimensions. Must be same!")
end
end
@doc """
Reshape vector or matrix. The `row` and `col` numbers must be positive number.
By the `row` or `col` number you can change shape of matrix, respectively create
new from vector.
Returns result, it means either tuple of `{:ok, vector | matrix}` or `{:error, "msg"}`.
## Example:
iex> 1..10 |> Enum.to_list |> MatrixReloaded.Matrix.reshape(5, 2)
{:ok,
[
[1, 2],
[3, 4],
[5, 6],
[7, 8],
[9, 10]
]
}
iex> MatrixReloaded.Matrix.new({3,4}) |> Result.map(&MatrixReloaded.Matrix.reshape(&1, 2, 6))
{:ok,
[
[0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0,]
]
}
"""
@spec reshape(Vector.t() | t(), pos_integer(), pos_integer()) ::
Result.t(String.t(), Vector.t()) | Result.t(String.t(), t())
def reshape([el | _] = vector, row, col)
when is_list(vector) and is_number(el) and
is_integer(row) and row > 0 and is_integer(col) and col == 1 do
vector
|> transpose()
end
def reshape([el | _] = vector, row, col)
when is_list(vector) and is_number(el) and
is_integer(row) and row == 1 and is_integer(col) and col > 0 do
vector
end
def reshape([el | _] = vector, row, col)
when is_list(vector) and is_number(el) and
is_integer(row) and row > 0 and is_integer(col) and col > 0 do
vector
|> is_reshapeable?(row, col)
|> Result.map(&Enum.chunk_every(&1, col))
end
def reshape([r | _] = matrix, row, col)
when is_list(matrix) and is_list(r) and
is_integer(row) and row == 1 and is_integer(col) and col > 0 do
matrix
|> is_reshapeable?(row, col)
|> Result.and_then(&List.flatten(&1))
end
def reshape([r | _] = matrix, row, col)
when is_list(matrix) and is_list(r) and
is_integer(row) and row > 0 and is_integer(col) and col > 0 do
matrix
|> is_reshapeable?(row, col)
|> Result.map(&List.flatten(&1))
|> Result.and_then(&Enum.chunk_every(&1, col))
end
def reshape(_matrix, row, col) when row < 2 and col < 2 do
Result.error("'row' and 'col' number must be positive integer number greater than 0!")
end
@doc """
The size (dimensions) of the matrix.
Returns tuple of {row_size, col_size}.
## Example:
iex> MatrixReloaded.Matrix.new({3,4}) |> Result.map(&MatrixReloaded.Matrix.size(&1))
{:ok, {3, 4}}
"""
@spec size(t()) :: {pos_integer, pos_integer}
def size(matrix), do: {length(matrix), length(List.first(matrix))}
defp make_row(0, _val), do: []
defp make_row(n, val), do: [val] ++ make_row(n - 1, val)
defp make_update(matrix, submatrix, {from_row, from_col}) do
{to_row, to_col} = size(submatrix)
matrix
|> Enum.with_index()
|> Enum.map(fn {row, i} ->
if i in from_row..(from_row + to_row - 1) do
row
|> Enum.with_index()
|> Enum.map(fn {val, j} ->
if j in from_col..(from_col + to_col - 1) do
submatrix |> Enum.at(i - from_row) |> Enum.at(j - from_col)
else
val
end
end)
else
row
end
end)
end
defp make_get_submatrix(matrix, {from_row, from_col}, {to_row, to_col}) do
matrix
|> Enum.with_index()
|> Enum.filter(fn {_row, i} ->
i in from_row..(from_row + to_row - 1)
end)
|> Enum.map(fn {row, _i} ->
row
|> Enum.with_index()
|> Enum.filter(fn {_col, j} ->
j in from_col..(from_col + to_col - 1)
end)
|> Enum.map(fn {val, _j} -> val end)
end)
end
defp make_drop_rows(matrix, rows, vec \\ :row) do
{row_size, col_size} = size(matrix)
if length(rows) < row_size do
row_help =
0..(length(rows) - 1)
|> Enum.to_list()
rows
|> Vector.sub(row_help)
|> Result.and_then(
&Enum.reduce(&1, {:ok, matrix}, fn r, acc ->
acc |> Result.and_then(fn a -> drop_row(a, r) end)
end)
)
else
Result.error(
"It is not possible drop all the #{Atom.to_string(vec)}s from matrix! Matrix has dimensions {#{
row_size
}, #{col_size}}."
)
end
end
defp make_drop_row(matrix, row, vec \\ "row") do
{row_size, _col_size} = size(matrix)
if row < row_size do
matrix
|> List.delete_at(row)
|> Result.ok()
else
Result.error(
"It is not possible drop the #{vec} #{row} from matrix! Numbering of #{vec}s begins from 0 to (matrix #{
vec
} size - 1)."
)
end
end
defp make_transpose([[] | _]), do: []
defp make_transpose(matrix) do
[Enum.map(matrix, &hd/1) | make_transpose(Enum.map(matrix, &tl/1))]
end
defp is_submatrix_smaller_than_matrix?(
matrix,
{rs_mat, cs_mat},
{rs_sub, cs_sub},
_method
)
when rs_sub < rs_mat and cs_sub < cs_mat do
Result.ok(matrix)
end
defp is_submatrix_smaller_than_matrix?(
_matrix,
_size_mat,
_size_sub,
:update
) do
Result.error(
"You can not update the matrix. Size of submatrix is same or bigger than size of matrix!"
)
end
defp is_submatrix_smaller_than_matrix?(
_matrix,
_size_mat,
_size_sub,
:get
) do
Result.error(
"You can not get the submatrix. Size of submatrix is same or bigger than size of matrix!"
)
end
defp is_row_size_smaller_than_rows_of_matrix?(
matrix,
size_mat,
size_row,
method \\ :update
)
defp is_row_size_smaller_than_rows_of_matrix?(
matrix,
{_rs_mat, cs_mat},
size_r,
_method
)
when size_r <= cs_mat do
Result.ok(matrix)
end
defp is_row_size_smaller_than_rows_of_matrix?(
_matrix,
_size_mat,
_size_row,
method
) do
Result.error(
"You can not #{Atom.to_string(method)} the matrix. Size of row is bigger than row size of matrix!"
)
end
defp is_element_in_matrix?(
matrix,
size_mat,
index,
method \\ :update
)
defp is_element_in_matrix?(
matrix,
{rs_mat, cs_mat},
{from_row, from_col},
_method
)
when from_row < rs_mat and from_col < cs_mat do
Result.ok(matrix)
end
defp is_element_in_matrix?(
_matrix,
_size_mat,
{from_row, from_col},
method
) do
Result.error(
"You can not #{Atom.to_string(method)} the matrix on given position {#{from_row}, #{
from_col
}}. The element is outside of matrix!"
)
end
defp is_submatrix_in_matrix?(
matrix,
size_mat,
size_sub,
index,
method \\ :update
)
defp is_submatrix_in_matrix?(
matrix,
{rs_mat, cs_mat},
{to_row, to_col},
{from_row, from_col},
_method
)
when from_row + to_row - 1 < rs_mat and from_col + to_col - 1 < cs_mat do
Result.ok(matrix)
end
defp is_submatrix_in_matrix?(
_matrix,
_size_mat,
_size_sub,
{from_row, from_col},
method
) do
Result.error(
"You can not #{Atom.to_string(method)} the matrix on given position {#{from_row}, #{
from_col
}}. The submatrix is outside of matrix!"
)
end
defp is_row_in_matrix?(
matrix,
size_mat,
size_row,
index,
method \\ :update
)
defp is_row_in_matrix?(
matrix,
{rs_mat, cs_mat},
s_row,
{from_row, from_col},
_method
)
when from_row <= rs_mat and from_col + s_row <= cs_mat do
Result.ok(matrix)
end
defp is_row_in_matrix?(
_matrix,
_size_mat,
_size_row,
{from_row, from_col},
method
) do
Result.error(
"You can not #{Atom.to_string(method)} row in the matrix on given position {#{from_row}, #{
from_col
}}. A part of row is outside of matrix!"
)
end
defp is_row_num_at_matrix?(matrix, size_mat, row_num, vec \\ :row)
defp is_row_num_at_matrix?(matrix, {rs_mat, _cs_mat}, row_num, _vec)
when row_num < rs_mat do
Result.ok(matrix)
end
defp is_row_num_at_matrix?(
_matrix,
_size_mat,
row_num,
vec
) do
Result.error(
"You can not get #{Atom.to_string(vec)} from the matrix. The #{Atom.to_string(vec)} number #{
row_num
} is outside of matrix!"
)
end
defp dimension_of_submatrix({from_row, from_col}, {to_row, to_col} = dimension)
when is_tuple(dimension) do
{to_row - from_row + 1, to_col - from_col + 1}
end
defp dimension_of_submatrix(_index, dimension) do
{dimension, dimension}
end
defp is_row_ok?(matrix, [hd | _] = row) when is_list(row) and is_number(hd) do
Result.ok(matrix)
end
defp is_row_ok?(_matrix, _row) do
Result.error("Input row (or column) vector must be only list of numbers!")
end
defp is_all_row_numbers_ok?(matrix, row_list, vec \\ :row) do
is_all_ok? =
row_list
|> Enum.map(fn r -> 0 <= r end)
|> Enum.find_index(fn r -> r == false end)
if is_all_ok? == nil do
Result.ok(matrix)
else
Result.error("List of #{Atom.to_string(vec)} numbers must be greater or equal to zero!")
end
end
defp is_dimension_ok?({rows, cols} = tpl)
when tuple_size(tpl) == 2 and is_integer(rows) and rows > 0 and is_integer(cols) and
cols > 0 do
Result.ok(tpl)
end
defp is_dimension_ok?({rows, cols}) do
Result.error(
"The size {#{rows}, #{cols}} of matrix must be in the form {m, n} where m, n are positive integers!"
)
end
defp is_dimension_ok?(dim)
when is_integer(dim) and 0 < dim do
Result.ok(dim)
end
defp is_dimension_ok?(_dim) do
Result.error("Dimension of squared matrix must be positive integer!")
end
defp is_index_ok?(matrix, ind)
when is_tuple(ind) and tuple_size(ind) == 2 and is_integer(elem(ind, 0)) and
0 <= elem(ind, 0) and is_integer(elem(ind, 1)) and 0 <= elem(ind, 1) do
Result.ok(matrix)
end
defp is_index_ok?(_matrix, _index) do
Result.error("The index must be in the form {m, n} where 0 <= m and 0 <= n !")
end
defp is_non_neg_integer?(matrix, num) when is_integer(num) and 0 <= num do
Result.ok(matrix)
end
defp is_non_neg_integer?(_matrix, num) when is_number(num) do
Result.error("The integer number must be greater or equal to zero!")
end
defp is_positive_integer?(matrix, num) when is_integer(num) and 0 < num do
Result.ok(matrix)
end
defp is_positive_integer?(_matrix, num) when is_number(num) do
Result.error("The integer number must be positive, i.e. n > 0 !")
end
defp is_reshapeable?([el | _] = vector, row, col)
when is_list(vector) and is_number(el) and length(vector) == row * col do
Result.ok(vector)
end
defp is_reshapeable?([el | _] = vector, _row, _col)
when is_list(vector) and is_number(el) do
Result.error(
"It is not possible to reshape vector! The numbers of element of vector must be equal row * col."
)
end
defp is_reshapeable?([r | _] = matrix, row, col)
when is_list(matrix) and is_list(r) do
{rs, cs} = size(matrix)
if row * col == rs * cs do
Result.ok(matrix)
else
Result.error(
"It is not possible to reshape matrix! The numbers of element of matrix must be equal row * col."
)
end
end
end
|
lib/matrix_reloaded/matrix.ex
| 0.943256
| 0.817647
|
matrix.ex
|
starcoder
|
defmodule Quadtreex.Node do
@moduledoc """
A node in a quadtree
A quadtree node represents a bounded volume of 2 dimensional space.
"""
alias Quadtreex.BoundingBox
alias Quadtreex.Entity
defstruct parent: nil, bbox: nil, min_size: 0.0, split_size: 0, children: %{}, entities: []
@type child_map() :: %{BoundingBox.quadrant() => t()}
@type min_size_option() :: {:min_size, float()}
@type split_size_option() :: {:split_size, non_neg_integer()}
@type parent_option() :: {:parent, t()}
@type create_option :: min_size_option() | split_size_option() | parent_option()
@type create_options :: [] | [create_option(), ...]
@type t() :: %__MODULE__{
parent: t() | nil,
bbox: BoundingBox.t(),
min_size: float(),
split_size: pos_integer(),
children: %{} | child_map(),
entities: [] | [Entity.t()]
}
@spec new(BoundingBox.t(), create_options()) :: t()
def new(%BoundingBox{} = bbox, options \\ []) do
parent = Keyword.get(options, :parent)
min_size = Keyword.get(options, :min_size, 5.0)
split_size = Keyword.get(options, :split_size, 32)
%__MODULE__{parent: parent, bbox: bbox, min_size: min_size, split_size: split_size}
end
@spec is_root?(t()) :: boolean()
def is_root?(%__MODULE__{parent: nil}), do: true
def is_root?(%__MODULE__{}), do: false
@spec contains?(t(), BoundingBox.coordinate()) :: boolean()
def contains?(%__MODULE__{bbox: bbox}, coord) do
BoundingBox.contains?(bbox, coord)
end
@spec range_query(t(), BoundingBox.coordinate(), number()) :: [] | [term()]
def range_query(%__MODULE__{} = node, {px, py} = point, max_distance) do
if Enum.empty?(node.children) do
Enum.reduce(node.entities, [], fn entity, accum ->
{ex, ey} = entity.location
if :math.sqrt(:math.pow(ey - py, 2) + :math.pow(ex - px, 2)) <= max_distance do
[entity.thing | accum]
else
accum
end
end)
else
Enum.flat_map(node.children, fn {_key, child} -> range_query(child, point, max_distance) end)
end
end
@spec insert(t(), BoundingBox.coordinate(), term()) :: {:ok, t()} | {:error, :out_of_bounds}
def insert(%__MODULE__{} = node, location, thing) do
insert(node, %Entity{location: location, thing: thing})
end
@spec height(t()) :: non_neg_integer()
def height(%__MODULE__{children: children, entities: entities}) do
if Enum.empty?(children) do
if(Enum.empty?(entities)) do
0
else
1
end
else
heights = Enum.map(Map.values(children), &height(&1))
Enum.max(heights) + 1
end
end
@spec delete(t(), term()) :: {:ok, boolean(), t()}
def delete(%__MODULE__{} = node, thing) do
if Enum.empty?(node.children) do
updated_entities = Enum.filter(node.entities, fn e -> e.thing != thing end)
if updated_entities != node.entities do
{:ok, true, %{node | entities: updated_entities}}
else
{:ok, false, node}
end
else
delete_via_children(node, thing)
end
end
@spec insert(t(), Entity.t()) :: {:ok, t()} | {:error, :out_of_bounds}
def insert(%__MODULE__{} = node, %Entity{} = entity) do
if BoundingBox.contains?(node.bbox, entity.location) do
if Enum.empty?(node.children) do
if should_split?(node) do
insert(split!(node), entity)
else
{:ok, %{node | entities: [entity | node.entities]}}
end
else
{:ok, quadrant} = BoundingBox.find_quadrant(node.bbox, entity.location)
child = Map.fetch!(node.children, quadrant)
case insert(child, entity) do
{:ok, child} ->
{:ok, %{node | children: Map.put(node.children, quadrant, child)}}
error ->
error
end
end
else
{:error, :out_of_bounds}
end
end
defp split!(%__MODULE__{children: %{}, entities: entities} = node) do
node = %{node | children: make_children(node)}
node = handoff_to_child!(entities, node)
%{node | entities: []}
end
defp handoff_to_child!([], node) do
%{node | entities: []}
end
defp handoff_to_child!([entity | t], %__MODULE__{bbox: bbox, children: children} = node) do
{:ok, key} = BoundingBox.find_quadrant(bbox, entity.location)
children = Map.update!(children, key, fn child -> insert!(child, entity) end)
handoff_to_child!(t, %{node | children: children})
end
defp insert!(%__MODULE__{} = node, entity) do
case insert(node, entity) do
{:ok, node} ->
node
{:error, reason} ->
raise RuntimeError, message: reason
end
end
defp should_split?(
%__MODULE__{split_size: split_size, entities: entities, children: %{}} = node
) do
has_room?(node) and length(entities) >= split_size
end
defp should_split?(_node), do: false
defp has_room?(%__MODULE__{bbox: bbox, min_size: min_size}) do
bbox.width > min_size and bbox.height > min_size
end
defp make_children(%__MODULE__{min_size: min_size, split_size: split_size, bbox: bbox} = node) do
options = [min_size: min_size, split_size: split_size, parent: node]
%{
ne: new(BoundingBox.for_quadrant(bbox, :ne), options),
se: new(BoundingBox.for_quadrant(bbox, :se), options),
sw: new(BoundingBox.for_quadrant(bbox, :sw), options),
nw: new(BoundingBox.for_quadrant(bbox, :nw), options)
}
end
defp delete_via_children(%__MODULE__{} = node, thing) do
case scan_and_delete(Map.keys(node.children), node.children, thing) do
{false, _children} ->
{:ok, false, node}
{true, children} ->
{:ok, true, %{node | children: children}}
end
end
defp scan_and_delete([], children, _thing) do
{false, children}
end
defp scan_and_delete([key | keys], children, thing) do
child = Map.fetch!(children, key)
case delete(child, thing) do
{:ok, true, child} ->
{true, Map.put(children, key, child)}
{:ok, false, _child} ->
scan_and_delete(keys, children, thing)
end
end
end
|
lib/quadtreex/node.ex
| 0.868799
| 0.6535
|
node.ex
|
starcoder
|
defmodule Aeutil.PatriciaMerkleTree do
@moduledoc """
This module provides and API for creating, updating, deleting
patricia merkle tries, The actual handler is https://github.com/aeternity/elixir-merkle-patricia-tree
"""
alias Aeutil.Serialization
alias MerklePatriciaTree.Trie
alias MerklePatriciaTree.Proof
alias MerklePatriciaTree.DB.ExternalDB
alias MerklePatriciaTree.Trie.Inspector
alias Aecore.Persistence.Worker, as: Persistence
@typedoc """
The type of the value in PMT.
"""
@type trie_value :: binary()
@typedoc """
Depending on the name, different data base ref will
be used for the trie creation.
"""
@type trie_name ::
:accounts
| :txs
| :proof
| :oracles
| :oracles_cache
| :naming
| :channels
| :contracts
| :calls
@spec root_hash(Trie.t()) :: binary
def root_hash(%{root_hash: root_hash}), do: root_hash
@doc """
Creating new trie.
"""
@spec new(trie_name()) :: Trie.t()
def new(trie_name), do: Trie.new(ExternalDB.init(get_db_handlers(trie_name)))
@doc """
Create new trie with specific hash root
"""
@spec new(trie_name(), binary) :: Trie.t()
def new(trie_name, root_hash) do
Trie.new(ExternalDB.init(get_db_handlers(trie_name)), root_hash)
end
@spec new(trie_name()) :: Trie.t()
defp get_db_handlers(trie_name) do
%{put: Persistence.db_handler_put(trie_name), get: Persistence.db_handler_get(trie_name)}
end
@doc """
Retrieve value from trie.
"""
@spec lookup(Trie.t(), Trie.key()) :: {:ok, trie_value()} | :none
def lookup(trie, key) do
case Trie.get(trie, key) do
nil -> :none
val -> {:ok, val}
end
end
@doc """
Retrieve value from trie and construct proof.
"""
@spec lookup_with_proof(Trie.t(), Trie.key()) :: :none | {:ok, trie_value(), Trie.t()}
def lookup_with_proof(trie, key) do
case Proof.construct_proof({trie, key, new(:proof)}) do
{nil, _proof} -> :none
{val, proof} -> {:ok, val, proof}
end
end
@doc """
Check if the value already exists for this key before add it.
If so return error message.
"""
@spec insert(Trie.t(), Trie.key(), trie_value()) :: Trie.t() | {:error, term}
def insert(trie, key, value) do
case lookup(trie, key) do
{:ok, _value} ->
{:error, :already_present}
:none ->
Trie.update(trie, key, value)
end
end
@spec enter(Trie.t(), Trie.key(), trie_value()) :: Trie.t()
def enter(trie, key, value), do: Trie.update(trie, key, value)
@doc """
Verify if value is present in the proof trie for the provided key.
The key represents the path in the proof trie.
"""
@spec verify_proof?(Trie.key(), trie_value(), binary(), Trie.t()) :: boolean
def verify_proof?(key, value, root_hash, proof) do
case Proof.verify_proof(key, value, root_hash, proof) do
:ok ->
true
{:error, _} ->
false
end
end
@doc """
Lookups the value associated with the given key in the proof trie.
"""
@spec lookup_proof(Trie.key(), binary(), Trie.t()) :: {:ok, trie_value()} | :error
def lookup_proof(key, root_hash, proof) do
case Proof.lookup_proof(key, root_hash, proof) do
nil ->
:error
{:error, _} ->
:error
val ->
{:ok, val}
end
end
@doc """
Deleting a value for given key and reorganizing the trie
"""
@spec delete(Trie.t(), Trie.key()) :: Trie.t()
def delete(trie, key), do: Trie.delete(trie, key)
@doc """
Providing debug print of a given trie in the shell
"""
@spec print_debug(Trie.t()) :: Trie.t() | list() | {:error, term()}
def print_debug(trie), do: print_trie(trie, output: :as_pair, deserialize: true)
@doc """
Providing pretty print of a given trie in the shell.
Depending on the atom it can print structure or key value pairs. The default output is :as_struct
# Examples
If we want to print as pair and no serialization is needed
iex> Aeutil.PatriciaMerkleTree.new(:test_trie) |> Aeutil.PatriciaMerkleTree.enter("111", "val1") |> Aeutil.PatriciaMerkleTree.enter("112", "val2") |> Aeutil.PatriciaMerkleTree.print_trie([output: :as_pair, deserialize: false])
[{"111", "v1"}, {"112", "v2"}]
If we want to print as pair and serialization is needed
iex> Chain.get_chain_state_by_height(1).accounts |> PatriciaMerkleTree.print_trie([output: :as_pair, deserialize: true])
[
{<<3, 198, 106, 104, 110, 21, 75, 215, 141, 232, 196, 72, 106, 43, 188, 85,
47, 30, 208, 235, 189, 51, 92, 132, 247, 27, 130, 183, 118, 136, 119, 33,
190>>,
%Aecore.Account.Account{
balance: 100,
nonce: 0,
pubkey: <<3, 198, 106, 104, 110, 21, 75, 215, 141, 232, 196, 72, 106, 43,
188, 85, 47, 30, 208, 235, 189, 51, 92, 132, 247, 27, 130, 183, 118, 136,
119, 33, 190>>
}}
]
If we want to print the whole struct. Returns the trie as well.
iex> Aeutil.PatriciaMerkleTree.new(:test_trie) |> Aeutil.PatriciaMerkleTree.enter("111", "val1") |> Aeutil.PatriciaMerkleTree.enter("112", "val2") |> Aeutil.PatriciaMerkleTree.print_trie()
~~~~~~Trie~~~
Node: ext (prefix: [3, 1, 3, 1, 3])
Node: branch (value: "")
[0] Node: <empty>
[1] Node: leaf ([]="val1")
[2] Node: leaf ([]="val2")
[3] Node: <empty>
[4] Node: <empty>
[5] Node: <empty>
[6] Node: <empty>
[7] Node: <empty>
[8] Node: <empty>
[9] Node: <empty>
[10] Node: <empty>
[11] Node: <empty>
[12] Node: <empty>
[13] Node: <empty>
[14] Node: <empty>
[15] Node: <empty>
~~~/Trie/~~~
If the given type is incorrect
iex> Aeutil.PatriciaMerkleTree.new(:test_trie) |> Aeutil.PatriciaMerkleTree.enter("111", "val1") |> Aeutil.PatriciaMerkleTree.enter("112", "val2") |> Aeutil.PatriciaMerkleTree.print_trie(:wrong_type)
{:error, "Unknown print type"}
"""
@spec print_trie(Trie.t(), keyword()) :: Trie.t() | list() | {:error, term()}
def print_trie(trie, opts \\ [output: :as_struct, deserialize: false]) do
print_trie(trie, opts[:output], opts[:deserialize])
end
def print_trie(trie, :as_struct, _), do: Inspector.inspect_trie(trie)
def print_trie(trie, :as_pair, false), do: Inspector.all_values(trie)
def print_trie(trie, :as_pair, _) do
list = Inspector.all_values(trie)
Enum.reduce(list, [], fn {key, val}, acc ->
[{key, elem(Serialization.rlp_decode_anything(val), 1)} | acc]
end)
end
def print_trie(_, _, _), do: {:error, "Unknown print type"}
@doc """
Retrieving all keys of a given trie
"""
@spec all_keys(Trie.t()) :: list(Trie.key())
def all_keys(trie), do: Inspector.all_keys(trie)
@doc """
Count all keys of a given trie
"""
@spec trie_size(Trie.t()) :: integer()
def trie_size(trie), do: length(all_keys(trie))
end
|
apps/aeutil/lib/patricia_merkle_tree.ex
| 0.838151
| 0.482063
|
patricia_merkle_tree.ex
|
starcoder
|
defmodule PowAssent.Plug.Reauthorization do
@moduledoc """
This plug can reauthorize a user who signed in through a provider.
The plug is dependent on a `:handler` that has the following methods:
* `reauthorize?/2` - verifies the request for reauthorization condition. If
the condition exists for the request (usually the sign in path), the
reauthorization cookie will be fetched and deleted, the `reauthorize/2`
callback will be called, and the connection halted.
* `clear_reauthorization?/2` - verifies the request for clear reauthorization
condition. If the condition exists (usually the session delete path) then
the cookie is deleted.
* `reauthorize/3` - the callback to handle the request when a reauthorization
condition exists. Usually this would redirect the user.
See `PowAssent.Phoenix.ReauthorizationPlugHandler` for a Phoenix example.
## Example
plug PowAssent.Plug.Reauthorization,
handler: MyApp.ReauthorizationHandler
## Configuration options
* `:handler` - the handler module. Should either be a module or a tuple
`{module, options}`.
* `:reauthorization_cookie_key` - reauthorization key name. This defaults
to "authorization_provider". If `:otp_app` is used it'll automatically
prepend the key with the `:otp_app` value.
* `:reauthorization_cookie_opts` - keyword list of cookie options, see
`Plug.Conn.put_resp_cookie/4` for options. The default options are
`[max_age: max_age, path: "/"]` where `:max_age` is 30 days.
"""
alias Plug.Conn
alias Pow.Config
alias Pow.Plug, as: PowPlug
alias PowAssent.Plug
@cookie_key "reauthorization_provider"
@cookie_max_age Integer.floor_div(:timer.hours(24) * 30, 1000)
@doc false
@spec init(Config.t()) :: {Config.t(), {module(), Config.t()}}
def init(config) do
handler = get_handler(config)
config = Keyword.delete(config, :handler)
{config, handler}
end
defp get_handler(plug_config) do
{handler, config} =
plug_config
|> Config.get(:handler)
|> Kernel.||(raise_no_handler())
|> case do
{handler, config} -> {handler, config}
handler -> {handler, []}
end
{handler, Keyword.put(config, :reauthorization_plug, __MODULE__)}
end
@doc false
@spec call(Conn.t(), {Config.t(), {module(), Config.t()}}) :: Conn.t()
def call(conn, {config, {handler, handler_config}}) do
config =
conn
|> Plug.fetch_config()
|> Config.merge(config)
conn =
conn
|> Conn.fetch_cookies()
|> Plug.put_create_session_callback(&store_reauthorization_provider/3)
provider = get_reauthorization_provider(conn, {handler, handler_config}, config)
cond do
provider ->
conn
|> clear_cookie(config)
|> handler.reauthorize(provider, handler_config)
|> Conn.halt()
clear_reauthorization?(conn, {handler, handler_config}) ->
clear_cookie(conn, config)
true ->
conn
end
end
defp store_reauthorization_provider(conn, provider, config) do
Conn.register_before_send(conn, &Conn.put_resp_cookie(&1, cookie_key(config), provider, cookie_opts(config)))
end
defp cookie_key(config) do
Config.get(config, :reauthorization_cookie_key, default_cookie_key(config))
end
defp default_cookie_key(config) do
PowPlug.prepend_with_namespace(config, @cookie_key)
end
defp cookie_opts(config) do
config
|> Config.get(:reauthorization_cookie_opts, [])
|> Keyword.put_new(:max_age, @cookie_max_age)
|> Keyword.put_new(:path, "/")
end
defp get_reauthorization_provider(conn, {handler, handler_config}, config) do
with :ok <- check_should_reauthorize(conn, {handler, handler_config}),
{:ok, provider} <- fetch_provider_from_cookie(conn, config) do
provider
else
:error -> nil
end
end
defp check_should_reauthorize(conn, {handler, handler_config}) do
case handler.reauthorize?(conn, handler_config) do
true -> :ok
false -> :error
end
end
defp fetch_provider_from_cookie(conn, config) do
case conn.cookies[cookie_key(config)] do
nil ->
:error
provider ->
config
|> Plug.available_providers()
|> Enum.any?(&Atom.to_string(&1) == provider)
|> case do
true -> {:ok, provider}
false -> :error
end
end
end
defp clear_cookie(conn, config) do
Conn.put_resp_cookie(conn, cookie_key(config), "", max_age: -1)
end
defp clear_reauthorization?(conn, {handler, handler_config}),
do: handler.clear_reauthorization?(conn, handler_config)
@spec raise_no_handler :: no_return
defp raise_no_handler do
Config.raise_error("No :handler configuration option provided. It's required to set this when using #{inspect __MODULE__}.")
end
end
|
lib/pow_assent/plug/reauthorization.ex
| 0.736495
| 0.418281
|
reauthorization.ex
|
starcoder
|
defmodule RegexRs do
@moduledoc """
Documentation for `RegexRs`.
See Rust documentation for more information: https://docs.rs/regex/1.4.3/regex/index.html
Implemented so far:
- [x] as_str
- [ ] capture_locations
- [x] capture_names
- [x] captures (+ _named)
- [x] captures_iter (+ _named)
- [x] captures_len
- [ ] captures_read
- [ ] catpures_read_at
- [x] find
- [ ] find_at
- [x] find_iter
- [x] is_match
- [ ] is_match_at
- [x] new
- [x] replace
- [x] replace_all
- [ ] replacen
- [ ] shortest_match
- [ ] shortest_match_at
- [ ] split
- [ ] splitn
"""
use Rustler,
otp_app: :regex_rs,
crate: :regexrust
@doc ~S"""
Compiles the regular expression.
Takes an existing Elixir `Regex` or a string that has been properly escaped.
## Examples
iex> {:ok, _reference} = RegexRs.new(~r/\d+/); nil
nil
iex> {:ok, _reference} = RegexRs.new(Regex.compile!("\\d+")); nil
nil
iex> {:ok, _reference} = RegexRs.new("\\d+"); nil
nil
iex> {:error, _error} = RegexRs.new("\\y"); nil
nil
"""
def new(re = %Regex{}), do: new_internal(Regex.source(re))
def new(string) when is_binary(string), do: new_internal(string)
defp new_internal(_string), do: error()
@doc ~S"""
Runs the regular expression against the given string until the first match. It
returns a list with all captures or nil no match occurred.
## Examples
iex> {:ok, re} = RegexRs.new("[a-z]+\\s+\(\\d+\)")
iex> RegexRs.captures(re, "abc 123 def 456")
["abc 123", "123"]
iex> RegexRs.captures(re, "abc abc abc abc")
nil
"""
def captures(_re, _string), do: error()
@doc ~S"""
Returns the given captures as a map, or nil if no captures are found.
## Examples
iex> {:ok, re} = RegexRs.new(~r/c(?P<foo>d)/); nil
nil
iex> RegexRs.captures_named(re, "abcd")
%{"foo" => "d"}
iex> {:ok, re} = RegexRs.new("(?P<alpha>[a-z]+)\\s+(?P<digits>\\d+)"); nil
nil
iex> RegexRs.captures_named(re, "abc 123 def 456")
%{"alpha" => "abc", "digits" => "123"}
iex> RegexRs.captures_named(re, "zzzzzzzzzzz")
nil
"""
def captures_named(_re, _string), do: error()
@doc ~S"""
Returns the number of captures.
## Examples
iex> {:ok, re} = RegexRs.new(~r/\d+/); nil
nil
iex> RegexRs.captures_len(re)
1
iex> {:ok, re} = RegexRs.new(~r/(?P<alpha>[a-z]+)\s+(?P<digits>\d+)/); nil
nil
iex> RegexRs.captures_len(re)
3
"""
def captures_len(_re), do: error()
@doc ~S"""
Returns a list of the capture names.
Unnamed captures will always be represented by the string "unnamed_capture".
The capture representing the entire expression will always be unnamed,
and will always be at position 0.
## Examples
iex> {:ok, re} = RegexRs.new(~r/(?P<alpha>[a-z]+)\s+(?P<digits>\d+)/); nil
nil
iex> RegexRs.capture_names(re)
["unnamed_capture", "alpha", "digits"]
"""
def capture_names(_re), do: error()
@doc ~S"""
Returns the given captures as a list of lists, or an empty list if no captures are found.
## Examples
iex> {:ok, re} = RegexRs.new(~r/(?P<letters>[a-z]+)\s+(?P<digits>\d+)/); nil
nil
iex> RegexRs.captures_iter(re, "abc 123 def 456")
[["abc 123", "abc", "123"], ["def 456", "def", "456"]]
iex> RegexRs.captures_iter(re, "abc abc abc abc")
[]
"""
def captures_iter(_re, _string), do: error()
@doc ~S"""
Returns the given captures as a list of maps, or an empty list if no captures are found.
## Examples
iex> {:ok, re} = RegexRs.new(~r/(?P<letters>[a-z]+)\s+(?P<digits>\d+)/); nil
nil
iex> RegexRs.captures_iter_named(re, "abc 123 def 456")
[%{"digits" => "123", "letters" => "abc"}, %{"digits" => "456", "letters" => "def"}]
iex> RegexRs.captures_iter_named(re, "abc abc abc abc")
[]
"""
def captures_iter_named(_re, _string), do: error()
@doc ~S"""
Returns the leftmost match in text. If no match exists, then nil is returned.
Note that this should only be used if you want to discover the position of the match.
Testing the existence of a match is faster if you use is_match.
## Examples
iex> {:ok, re} = RegexRs.new("[a-z]+\\s+\(\\d+\)")
iex> RegexRs.find(re, "abc 123 def 456")
"abc 123"
iex> RegexRs.find(re, "abc abc abc abc")
nil
"""
def find(_re, _string), do: error()
@doc ~S"""
Runs the regular expression against the given string until the first match. It
returns a list with all captures or an empty list if no match occurred.
## Examples
iex> {:ok, re} = RegexRs.new(~r/\d+/)
iex> RegexRs.find_iter(re, "12345 678910")
["12345", "678910"]
iex> RegexRs.find_iter(re, "abcde")
[]
"""
def find_iter(_re, _string), do: error()
@doc ~S"""
Returns a boolean indicating whether there was a match or not.
## Examples
iex> {:ok, re} = RegexRs.new(~r/\d+/); nil
nil
iex> RegexRs.is_match(re, "12345")
true
iex> RegexRs.is_match(re, "xxxxx")
false
"""
def is_match(_re, _string), do: error()
@doc ~S"""
From the Rust `regex` docs:
Replaces the leftmost-first match with the replacement provided. The replacement
can be a regular string (where $N and $name are expanded to match capture groups)
or a function that takes the matches' Captures and returns the replaced string.
If no match is found, then a copy of the string is returned unchanged.
See: https://docs.rs/regex/1.4.3/regex/struct.Regex.html#method.replace
## Examples
iex> {:ok, re} = RegexRs.new("(?P<last>[^,\\s]+),\\s+(?P<first>\\S+)"); nil
nil
iex> RegexRs.replace(re, "<NAME>", "$first $last")
"<NAME>"
iex> RegexRs.replace(re, "<NAME>", "${first}_$last")
"Bruce_Springsteen"
iex> RegexRs.replace(re, "<NAME>", "$2 $1")
"<NAME>"
iex> RegexRs.replace(re, "234234902834", "$first $last")
"234234902834"
"""
def replace(_re, _string, _replacement), do: error()
@doc ~S"""
Replaces all non-overlapping matches in text with the replacement provided.
Obeys the same conventions as `replace/3`.
See: https://docs.rs/regex/1.4.3/regex/struct.Regex.html#method.replace_all
## Examples
iex> {:ok, re} = RegexRs.new("(?P<letters>[a-z]+)\\s+(?P<digits>\\d+)")
iex> RegexRs.replace_all(re, "abc 123 def 456", "$digits $letters")
"123 abc 456 def"
iex> RegexRs.replace_all(re, "abc abc abc abc", "$digits $letters")
"abc abc abc abc"
"""
def replace_all(_re, _string, _replacement), do: error()
@doc ~S"""
Returns the regex source as a binary.
## Examples
iex> {:ok, re} = RegexRs.new(~r/\d+/); nil
nil
iex> RegexRs.as_str(re)
"\\d+"
"""
def as_str(_re), do: error()
defp error() do
:erlang.nif_error(:nif_not_loaded)
end
end
|
lib/regex_rs.ex
| 0.86009
| 0.566498
|
regex_rs.ex
|
starcoder
|
defmodule Day04 do
@moduledoc """
Documentation for `Day04`.
"""
def bingo({draws, boards}) do
lookups = for _ <- 1..Enum.count(boards), do: List.duplicate(false, 25) |> Enum.chunk_every(5)
bingo(draws, boards, lookups, [], MapSet.new())
end
defp bingo([], _, _, scores, _), do: Enum.reverse(scores)
defp bingo([draw | draws], boards, lookups, scores, wins) do
positions = findpos(draw, boards)
marked = mark(positions, lookups)
{score, new_wins} =
Enum.with_index(marked)
|> Enum.reduce({nil, wins}, fn {lookup, idx}, {score, wins} ->
if idx in wins do
{score, wins}
else
if winner(lookup) do
new_wins = MapSet.put(wins, idx)
# IO.puts("board #{idx} won with #{draw}")
score = unmarked(lookup, Enum.at(boards, idx)) * draw
{score, new_wins}
else
{score, wins}
end
end
end)
if score do
bingo(draws, boards, marked, [score | scores], new_wins)
else
bingo(draws, boards, marked, scores, new_wins)
end
end
defp findpos(draw, boards, poslist \\ [])
defp findpos(_, [], poslist), do: Enum.reverse(poslist)
defp findpos(draw, [board | boards], poslist) do
pos =
Enum.with_index(board)
|> Enum.map(fn {row, rownum} -> {rownum, Enum.find_index(row, fn i -> i == draw end)} end)
|> Enum.filter(fn {_row, col} -> col != nil end)
|> List.first()
findpos(draw, boards, [pos | poslist])
end
defp mark(positions, lookups, marked \\ [])
defp mark([], [], marked), do: Enum.reverse(marked)
# skip boards which do not have the draw
defp mark([nil | positions], [lookup | lookups], marked),
do: mark(positions, lookups, [lookup | marked])
defp mark([{row, col} | positions], [lookup | lookups], marked) do
lookup =
Enum.with_index(lookup)
|> Enum.map(fn {lrow, idx} ->
if idx == row do
List.replace_at(lrow, col, true)
else
lrow
end
end)
mark(positions, lookups, [lookup | marked])
end
defp winner(lookup) do
if Enum.map(lookup, &Enum.all?/1) |> Enum.any?() do
true
else
# rotate
for(r <- 0..4, c <- 0..4, do: Enum.at(lookup, c) |> Enum.at(r))
|> Enum.chunk_every(5)
|> Enum.map(&Enum.all?/1)
|> Enum.any?()
end
end
defp unmarked(lookup, board) do
Enum.with_index(lookup)
|> Enum.flat_map(fn {row, r} ->
Enum.with_index(row)
|> Enum.map(fn {val, c} ->
if not val do
Enum.at(board, r)
|> Enum.at(c)
else
0
end
end)
end)
|> Enum.sum()
end
end
|
2021/day04/lib/day04.ex
| 0.603114
| 0.449513
|
day04.ex
|
starcoder
|
defmodule JsonDiffEx do
@moduledoc """
This is the documentation of JsonDiffEx.
There are no runtime dependencies and it should be easy
to use.
You can use the javascript library
[jsondiffpatch](https://github.com/benjamine/jsondiffpatch)
with it since it get's it's diff format from it.
It contains both diff and patch
## Example
### Diff
Simple example:
iex> JsonDiffEx.diff %{"test" => 1}, %{"test" => 2}
%{"test" => [1, 2]}
Now with list:
iex> JsonDiffEx.diff %{"test" => [1,2,3]}, %{"test" => [2,3]}
%{"test" => %{"_0" => [1, 0, 0], "_t" => "a"}}
Now with a map in the map:
iex> JsonDiffEx.diff %{"test" => %{"1": 1}}, %{"test" => %{"1": 2}}
%{"test" => %{"1": [1, 2]}}
Now with a map in an list in the map:
iex> JsonDiffEx.diff %{"test" => [%{"1": 1}]}, %{"test" => [%{"1": 2}]}
%{"test" => %{"0" => %{"1": [1, 2]}, "_t" => "a"}}
If you have problems with using both integers and floats you can override the
strict comparison:
iex> JsonDiffEx.diff(%{a: 2100}, %{a: 2.1e3}, strict_equality: false)
%{}
### Patch
Simple example of a patch:
iex> JsonDiffEx.patch %{"test" => 1}, %{"test" => [1, 2]}
%{"test" => 2}
Now a patch with list:
iex> JsonDiffEx.patch %{"test" => [1,2,3]},
...> %{"test" => %{"_0" => [1, 0, 0], "_t" => "a"}}
%{"test" => [2,3]}
Now a patch with a map in the map:
iex> JsonDiffEx.patch %{"test" => %{"1": 1}}, %{"test" => %{"1": [1, 2]}}
%{"test" => %{"1": 2}}
Now with a map in an list in the map:
iex> JsonDiffEx.patch %{"test" => [%{"1": 1}]},
...> %{"test" => %{"0" => %{"1": [1, 2]}, "_t" => "a"}}
%{"test" => [%{"1": 2}]}
"""
@default_strict_equality true
@sentinel :json_diff_ex_sentinal_value
@spec split_underscore_map({binary, list}) :: boolean
defp split_underscore_map({<<"_", _::binary>>, [map, 0, 0]}) when is_map(map) do
true
end
defp split_underscore_map(_) do
false
end
@spec split_underscore({binary, list}) :: boolean
defp split_underscore({<<"_", _::binary>>, [_, 0, 0]}) do
true
end
defp split_underscore(_) do
false
end
@spec all_checked(list, map, list) :: list
defp all_checked([], deleted_map, _) do
Map.to_list(deleted_map)
end
defp all_checked([head | tail], deleted_map, opts) do
case head do
{i, [value]} when is_map(value) ->
neg_i = "_" <> i
case Map.fetch(deleted_map, neg_i) do
{:ok, [value2, 0, 0]} ->
[
{i, do_diff(value2, value, opts)}
| all_checked(tail, Map.delete(deleted_map, neg_i), opts)
]
:error ->
[head | all_checked(tail, deleted_map, opts)]
end
_ ->
[head | all_checked(tail, deleted_map, opts)]
end
end
@spec do_diff(list, list, list) :: map | nil
defp do_diff(l1, l2, opts) when is_list(l1) and is_list(l2) do
new_list =
l1
|> List.myers_difference(l2)
|> Enum.reduce({0, 0, %{}}, fn
{:eq, equal}, {count, delete_count, acc} ->
equal_length = length(equal)
{count + equal_length, delete_count + equal_length, acc}
{:del, deleted_list}, {count, delete_count, acc} ->
{delete_count, acc3} =
Enum.reduce(deleted_list, {delete_count, acc}, fn deleted_item, {count2, acc2} ->
{
count2 + 1,
Map.put(acc2, "_" <> Integer.to_string(count2), [deleted_item, 0, 0])
}
end)
{count, delete_count, acc3}
{:ins, inserted_list}, {count, delete_count, acc} ->
Enum.reduce(inserted_list, {count, delete_count, acc}, fn inserted_item,
{count2, _, acc2} ->
{count2 + 1, delete_count, Map.put(acc2, Integer.to_string(count2), [inserted_item])}
end)
end)
|> elem(2)
diff =
case Enum.split_with(new_list, &split_underscore_map/1) do
{[], []} ->
new_list
{[], _} ->
new_list
{deleted, check} ->
deleted_map = Map.new(deleted)
check
|> all_checked(deleted_map, opts)
|> Enum.filter(fn
{_, nil} -> false
_ -> true
end)
|> Map.new()
end
if diff != %{} do
diff
|> Enum.concat([{"_t", "a"}])
|> Map.new()
else
nil
end
end
@spec do_diff(binary | integer | float, binary | integer | float, list) :: map | nil
defp do_diff(i1, i2, opts)
when not (is_list(i1) and is_list(i2)) and not (is_map(i1) and is_map(i2)) do
compare =
if Keyword.get(opts, :strict_equality, @default_strict_equality) do
&===/2
else
&==/2
end
case compare.(i1, i2) do
true -> nil
false -> [i1, i2]
end
end
@spec do_diff(map, map, list) :: map | nil
defp do_diff(map1, map2, opts) when is_map(map1) and is_map(map2) do
keys_non_uniq = Enum.concat(Map.keys(map1), Map.keys(map2))
diff =
keys_non_uniq
|> Enum.uniq()
|> Enum.map(fn k ->
case Map.has_key?(map1, k) do
true ->
case Map.has_key?(map2, k) do
true -> {k, do_diff(Map.get(map1, k), Map.get(map2, k), opts)}
false -> {k, [Map.get(map1, k), 0, 0]}
end
false ->
{k, [Map.get(map2, k)]}
end
end)
|> Enum.filter(fn {_, v} -> v !== nil end)
|> Map.new()
if map_size(diff) != 0 do
diff
else
nil
end
end
@doc """
Diff only supports Elixir's Map format but they can contain,
lists, other maps and anything that can be compared like strings,
numbers and boolean.
"""
@spec diff(map, map) :: map
def diff(map1, map2, opts \\ []) when is_map(map1) and is_map(map2) do
case do_diff(map1, map2, opts) do
nil -> %{}
map -> map
end
end
defp do_patch_delete(list, diff) do
case Enum.split_with(diff, &split_underscore/1) do
{[], []} ->
{list, diff}
{[], _} ->
{list, diff}
{deleted, check} ->
delete_list =
Enum.map(deleted, fn {<<"_", s_index::binary>>, _} -> String.to_integer(s_index) end)
filtered_list =
list
|> Enum.reject(fn {_, index} -> index in delete_list end)
|> clean_index()
|> Enum.with_index()
{filtered_list, Map.new(check)}
end
end
defp clean_index(list) do
Enum.map(list, fn {value, _index} -> value end)
end
defp do_patch_list({list, diff}) do
new_list = clean_index(list)
diff
|> Enum.map(fn {s_index, value} -> {String.to_integer(s_index), value} end)
|> Enum.sort_by(fn {idx, _v} -> idx end)
|> Enum.reduce(new_list, fn
{index, %{} = diff_map}, acc ->
List.update_at(acc, index, &do_patch(&1, diff_map))
{index, [value | []]}, acc ->
List.insert_at(acc, index, value)
{index, [_old_value | [new_value]]}, acc ->
List.replace_at(acc, index, new_value)
end)
end
defp do_patch(map1, diff1) do
diff2 =
diff1
|> Enum.map(fn {k, v} ->
case v do
[new_value] -> {k, new_value}
_ -> {k, v}
end
end)
|> Map.new()
map1
|> Map.merge(diff2, &do_patch_merge/3)
|> Enum.filter(fn {_k, v} -> v !== @sentinel end)
|> Map.new()
end
defp do_patch_merge(_k, v_map, v_diff) do
{v_map_compare, v_diff_compare} = correct_lists(v_map, v_diff)
case v_diff_compare do
[^v_map_compare, new_value] ->
new_value
new_map when is_map(new_map) ->
case Map.get(new_map, "_t", false) === "a" do
true ->
v_diff2 = Map.delete(v_diff, "_t")
v_map
|> Enum.with_index()
|> do_patch_delete(v_diff2)
|> do_patch_list()
false ->
do_patch(v_map, v_diff)
end
[_, 0, 0] ->
@sentinel
end
end
defp correct_lists(map, [diff_key, diff_new]) when is_list(map) and is_list(diff_key) do
{Enum.join(map, "---"), [Enum.join(diff_key, "---"), diff_new]}
end
defp correct_lists(map, diff) do
{map, diff}
end
@doc """
Patch only supports Elixir's Map format.
"""
@spec patch(map, map) :: map
def patch(map1, diff1) when is_map(map1) and is_map(diff1) do
do_patch(map1, diff1)
end
end
|
lib/json_diff_ex.ex
| 0.747339
| 0.66886
|
json_diff_ex.ex
|
starcoder
|
defmodule Flop.Phoenix do
@moduledoc """
Components for Phoenix and Flop.
## Introduction
Please refer to the [Readme](README.md) for an introduction.
## Customization
The default classes, attributes, texts and symbols can be overridden by
passing the `opts` assign. Since you probably will use the same `opts` in all
your templates, you can globally configure an `opts` provider function for
each component.
The functions have to return the options as a keyword list. The overrides
are deep-merged into the default options.
defmodule MyAppWeb.ViewHelpers do
import Phoenix.HTML
def pagination_opts do
[
ellipsis_attrs: [class: "ellipsis"],
ellipsis_content: "‥",
next_link_attrs: [class: "next"],
next_link_content: next_icon(),
page_links: {:ellipsis, 7},
pagination_link_aria_label: &"\#{&1}ページ目へ",
previous_link_attrs: [class: "prev"],
previous_link_content: previous_icon()
]
end
defp next_icon do
tag :i, class: "fas fa-chevron-right"
end
defp previous_icon do
tag :i, class: "fas fa-chevron-left"
end
def table_opts do
[
container: true,
container_attrs: [class: "table-container"],
no_results_content: content_tag(:p, do: "Nothing found."),
table_attrs: [class: "table"]
]
end
end
Refer to `t:pagination_option/0` and `t:table_option/0` for a list of
available options and defaults.
Once you have defined these functions, you can reference them with a
module/function tuple in `config/config.exs`.
```elixir
config :flop_phoenix,
pagination: [opts: {MyApp.ViewHelpers, :pagination_opts}],
table: [opts: {MyApp.ViewHelpers, :table_opts}]
```
## Hiding default parameters
Default values for page size and ordering are omitted from the query
parameters. If you pass the `:for` assign, the Flop.Phoenix function will
pick up the default values from the schema module deriving `Flop.Schema`.
## Links
Links are generated with `Phoenix.LiveView.Helpers.live_patch/2`. This will
lead to `<a>` tags with `data-phx-link` and `data-phx-link-state` attributes,
which will be ignored outside of LiveViews and LiveComponents.
When used within a LiveView or LiveComponent, you will need to handle the new
params in the `c:Phoenix.LiveView.handle_params/3` callback of your LiveView
module.
## Event-Based Pagination and Sorting
To make `Flop.Phoenix` use event based pagination and sorting, you need to
assign the `:event` to the pagination and table components. This will
generate an `<a>` tag with `phx-click` and `phx-value` attributes set.
You can set a different target by assigning a `:target`. The value
will be used as the `phx-target` attribute.
<Flop.Phoenix.pagination
meta={@meta}
event="paginate-pets"
target={@myself}
/>
You will need to handle the event in the `c:Phoenix.LiveView.handle_event/3`
or `c:Phoenix.LiveComponent.handle_event/3` callback of your
LiveView or LiveComponent module. The event name will be the one you set with
the `:event` option.
def handle_event("paginate-pets", %{"page" => page}, socket) do
flop = Flop.set_page(socket.assigns.meta.flop, page)
with {:ok, {pets, meta}} <- Pets.list_pets(params) do
{:noreply, assign(socket, pets: pets, meta: meta)}
end
end
def handle_event("order_pets", %{"order" => order}, socket) do
flop = Flop.push_order(socket.assigns.meta.flop, order)
with {:ok, {pets, meta}} <- Pets.list_pets(flop) do
{:noreply, assign(socket, pets: pets, meta: meta)}
end
end
"""
use Phoenix.Component
use Phoenix.HTML
import Phoenix.LiveView.Helpers
alias Flop.Filter
alias Flop.Meta
alias Flop.Phoenix.CursorPagination
alias Flop.Phoenix.Misc
alias Flop.Phoenix.Pagination
alias Flop.Phoenix.Table
alias Phoenix.HTML.Form
@typedoc """
Defines the available options for `Flop.Phoenix.pagination/1`.
- `:current_link_attrs` - The attributes for the link to the current page.
Default: `#{inspect(Pagination.default_opts()[:current_link_attrs])}`.
- `:disabled` - The class which is added to disabled links. Default:
`#{inspect(Pagination.default_opts()[:disabled_class])}`.
- `:ellipsis_attrs` - The attributes for the `<span>` that wraps the
ellipsis.
Default: `#{inspect(Pagination.default_opts()[:ellipsis_attrs])}`.
- `:ellipsis_content` - The content for the ellipsis element.
Default: `#{inspect(Pagination.default_opts()[:ellipsis_content])}`.
- `:next_link_attrs` - The attributes for the link to the next page.
Default: `#{inspect(Pagination.default_opts()[:next_link_attrs])}`.
- `:next_link_content` - The content for the link to the next page.
Default: `#{inspect(Pagination.default_opts()[:next_link_content])}`.
- `:page_links` - Specifies how many page links should be rendered.
Default: `#{inspect(Pagination.default_opts()[:page_links])}`.
- `:all` - Renders all page links.
- `{:ellipsis, n}` - Renders `n` page links. Renders ellipsis elements if
there are more pages than displayed.
- `:hide` - Does not render any page links.
- `:pagination_link_aria_label` - 1-arity function that takes a page number
and returns an aria label for the corresponding page link.
Default: `&"Go to page \#{&1}"`.
- `:pagination_link_attrs` - The attributes for the pagination links.
Default: `#{inspect(Pagination.default_opts()[:pagination_link_attrs])}`.
- `:pagination_list_attrs` - The attributes for the pagination list.
Default: `#{inspect(Pagination.default_opts()[:pagination_list_attrs])}`.
- `:previous_link_attrs` - The attributes for the link to the previous page.
Default: `#{inspect(Pagination.default_opts()[:previous_link_attrs])}`.
- `:previous_link_content` - The content for the link to the previous page.
Default: `#{inspect(Pagination.default_opts()[:previous_link_content])}`.
- `:wrappers_attrs` - The attributes for the `<nav>` element that wraps the
pagination links.
Default: `#{inspect(Pagination.default_opts()[:wrappers_attrs])}`.
"""
@type pagination_option ::
{:current_link_attrs, keyword}
| {:disabled_class, String.t()}
| {:ellipsis_attrs, keyword}
| {:ellipsis_content, Phoenix.HTML.safe() | binary}
| {:next_link_attrs, keyword}
| {:next_link_content, Phoenix.HTML.safe() | binary}
| {:page_links, :all | :hide | {:ellipsis, pos_integer}}
| {:pagination_link_aria_label, (pos_integer -> binary)}
| {:pagination_link_attrs, keyword}
| {:pagination_list_attrs, keyword}
| {:previous_link_attrs, keyword}
| {:previous_link_content, Phoenix.HTML.safe() | binary}
| {:wrapper_attrs, keyword}
@typedoc """
Defines the available options for `Flop.Phoenix.cursor_pagination/1`.
- `:disabled` - The class which is added to disabled links. Default:
`#{inspect(CursorPagination.default_opts()[:disabled_class])}`.
- `:next_link_attrs` - The attributes for the link to the next page.
Default: `#{inspect(CursorPagination.default_opts()[:next_link_attrs])}`.
- `:next_link_content` - The content for the link to the next page.
Default: `#{inspect(CursorPagination.default_opts()[:next_link_content])}`.
- `:previous_link_attrs` - The attributes for the link to the previous page.
Default: `#{inspect(CursorPagination.default_opts()[:previous_link_attrs])}`.
- `:previous_link_content` - The content for the link to the previous page.
Default: `#{inspect(CursorPagination.default_opts()[:previous_link_content])}`.
- `:wrappers_attrs` - The attributes for the `<nav>` element that wraps the
pagination links.
Default: `#{inspect(CursorPagination.default_opts()[:wrapper_attrs])}`.
"""
@type cursor_pagination_option ::
{:disabled_class, String.t()}
| {:next_link_attrs, keyword}
| {:next_link_content, Phoenix.HTML.safe() | binary}
| {:previous_link_attrs, keyword}
| {:previous_link_content, Phoenix.HTML.safe() | binary}
| {:wrapper_attrs, keyword}
@typedoc """
Defines the available options for `Flop.Phoenix.table/1`.
- `:container` - Wraps the table in a `<div>` if `true`.
Default: `#{inspect(Table.default_opts()[:container])}`.
- `:container_attrs` - The attributes for the table container.
Default: `#{inspect(Table.default_opts()[:container_attrs])}`.
- `:no_results_content` - Any content that should be rendered if there are no
results. Default: `<p>No results.</p>`.
- `:table_attrs` - The attributes for the `<table>` element.
Default: `#{inspect(Table.default_opts()[:table_attrs])}`.
- `:th_wrapper_attrs` - The attributes for the `<span>` element that wraps the
header link and the order direction symbol.
Default: `#{inspect(Table.default_opts()[:th_wrapper_attrs])}`.
- `:symbol_asc` - The symbol that is used to indicate that the column is
sorted in ascending order.
Default: `#{inspect(Table.default_opts()[:symbol_asc])}`.
- `:symbol_attrs` - The attributes for the `<span>` element that wraps the
order direction indicator in the header columns.
Default: `#{inspect(Table.default_opts()[:symbol_attrs])}`.
- `:symbol_desc` - The symbol that is used to indicate that the column is
sorted in ascending order.
Default: `#{inspect(Table.default_opts()[:symbol_desc])}`.
- `:symbol_unsorted` - The symbol that is used to indicate that the column is
not sorted. Default: `#{inspect(Table.default_opts()[:symbol_unsorted])}`.
- `:tbody_td_attrs`: Attributes to added to each `<td>` tag within the
`<tbody>`. Default: `#{inspect(Table.default_opts()[:tbody_td_attrs])}`.
- `:tbody_tr_attrs`: Attributes to added to each `<tr>` tag within the
`<tbody>`. Default: `#{inspect(Table.default_opts()[:tbody_tr_attrs])}`.
- `:thead_th_attrs`: Attributes to added to each `<th>` tag within the
`<thead>`. Default: `#{inspect(Table.default_opts()[:thead_th_attrs])}`.
- `:thead_tr_attrs`: Attributes to added to each `<tr>` tag within the
`<thead>`. Default: `#{inspect(Table.default_opts()[:thead_tr_attrs])}`.
"""
@type table_option ::
{:container, boolean}
| {:container_attrs, keyword}
| {:no_results_content, Phoenix.HTML.safe() | binary}
| {:symbol_asc, Phoenix.HTML.safe() | binary}
| {:symbol_attrs, keyword}
| {:symbol_desc, Phoenix.HTML.safe() | binary}
| {:symbol_unsorted, Phoenix.HTML.safe() | binary}
| {:table_attrs, keyword}
| {:tbody_td_attrs, keyword}
| {:tbody_tr_attrs, keyword}
| {:th_wrapper_attrs, keyword}
| {:thead_th_attrs, keyword}
| {:thead_tr_attrs, keyword}
@doc """
Generates a pagination element.
## Example
<Flop.Phoenix.pagination
meta={@meta}
path_helper={{Routes, :pet_path, [@socket, :index]}}
/>
## Assigns
- `meta` - The meta information of the query as returned by the `Flop` query
functions.
- `path_helper` - The path helper to use for building the link URL. Can be an
mfa tuple or a function/args tuple. If set, links will be rendered with
`live_patch/2` and the parameters have to be handled in the `handle_params/3`
callback of the LiveView module.
- `event` - If set, `Flop.Phoenix` will render links with a `phx-click`
attribute.
- `target` (optional) - Sets the `phx-target` attribute for the pagination
links.
- `opts` (optional) - Options to customize the pagination. See
`t:Flop.Phoenix.pagination_option/0`. Note that the options passed to the
function are deep merged into the default options. Since these options will
likely be the same for all the tables in a project, so it is recommended to
define them once in a function or set them in a wrapper function as
described in the `Customization` section of the module documentation.
## Page link options
By default, page links for all pages are shown. You can limit the number of
page links or disable them altogether by passing the `:page_links` option.
- `:all`: Show all page links (default).
- `:hide`: Don't show any page links. Only the previous/next links will be
shown.
- `{:ellipsis, x}`: Limits the number of page links. The first and last page
are always displayed. The `x` refers to the number of additional page links
to show.
## Pagination link aria label
For the page links, there is the `:pagination_link_aria_label` option to set
the aria label. Since the page number is usually part of the aria label, you
need to pass a function that takes the page number as an integer and returns
the label as a string. The default is `&"Goto page \#{&1}"`.
## Previous/next links
By default, the previous and next links contain the texts `Previous` and
`Next`. To change this, you can pass the `:previous_link_content` and
`:next_link_content` options.
"""
@doc section: :components
@spec pagination(map) :: Phoenix.LiveView.Rendered.t()
def pagination(assigns) do
assigns = Pagination.init_assigns(assigns)
~H"""
<%= if @meta.total_pages > 1 do %>
<Pagination.render
event={@event}
meta={@meta}
opts={@opts}
page_link_helper={
Pagination.build_page_link_helper(@meta, @path_helper)
}
target={@target}
/>
<% end %>
"""
end
@doc """
Renders a cursor pagination element.
## Example
<Flop.Phoenix.cursor_pagination
meta={@meta}
path_helper={{Routes, :pet_path, [@socket, :index]}}
/>
## Assigns
- `meta` - The meta information of the query as returned by the `Flop` query
functions.
- `path_helper` - The path helper to use for building the link URL. Can be an
mfa tuple or a function/args tuple. If set, links will be rendered with
`live_patch/2` and the parameters have to be handled in the `handle_params/3`
callback of the LiveView module.
- `event` - If set, `Flop.Phoenix` will render links with a `phx-click`
attribute.
- `target` (optional) - Sets the `phx-target` attribute for the pagination
links.
- `reverse` (optional) - By default, the `next` link moves forward with the
`:after` parameter set to the end cursor, and the `previous` link moves
backward with the `:before` parameter set to the start cursor. If `reverse`
is set to `true`, the destinations of the links are switched.
- `opts` (optional) - Options to customize the pagination. See
`t:Flop.Phoenix.cursor_pagination_option/0`. Note that the options passed to
the function are deep merged into the default options. Since these options
will likely be the same for all the tables in a project, so it is
recommended to define them once in a function or set them in a wrapper
function as described in the `Customization` section of the module
documentation.
## Handling parameters and events
If you set the `path_helper` assign, a link with query parameters is rendered.
In a LiveView, you need to handle the parameters in the
`c:Phoenix.LiveView.handle_params/3` callback.
def handle_params(params, _, socket) do
{pets, meta} = MyApp.list_pets(params)
{:noreply, assign(socket, meta: meta, pets: pets)}
end
If you use LiveView and set the `event` assign, you need to update the Flop
parameters in the `handle_event/3` callback.
def handle_event("paginate-users", %{"to" => to}, socket) do
flop = Flop.set_cursor(socket.assigns.meta, to)
{pets, meta} = MyApp.list_pets(flop)
{:noreply, assign(socket, meta: meta, pets: pets)}
end
## Getting the right parameters from Flop
This component requires the start and end cursors to be set in `Flop.Meta`. If
you pass a `Flop.Meta` struct with page or offset-based parameters, this will
result in an error. You can enforce cursor-based pagination in your query
function with the `default_pagination_type` and `pagination_types` options.
def list_pets(params) do
Flop.validate_and_run!(Pet, params,
for: Pet,
default_pagination_type: :first,
pagination_types: [:first, :last]
)
end
`default_pagination_type` ensures that Flop defaults to the right pagination
type when it cannot determine the type from the parameters. `pagination_types`
ensures that parameters for other types are not accepted.
## Order fields
The pagination cursor is based on the `ORDER BY` fields of the query. It is
important that the combination of order fields is unique across the data set.
You can use:
- the field with the primary key
- a field with a unique index
- all fields of a composite primary key or unique index
If you want to order by fields that are not unique, you can add the primary
key as the last order field. For example, if you want to order by family name
and given name, you should set the `order_by` parameter to
`[:family_name, :given_name, :id]`.
"""
@doc section: :components
@spec cursor_pagination(map) :: Phoenix.LiveView.Rendered.t()
def cursor_pagination(assigns) do
assigns = CursorPagination.init_assigns(assigns)
~H"""
<%= unless @meta.errors != [] do %>
<nav {@opts[:wrapper_attrs]}>
<CursorPagination.render_link
attrs={@opts[:previous_link_attrs]}
content={@opts[:previous_link_content]}
direction={if @reverse, do: :next, else: :previous}
event={@event}
meta={@meta}
path_helper={@path_helper}
opts={@opts}
target={@target}
/>
<CursorPagination.render_link
attrs={@opts[:next_link_attrs]}
content={@opts[:next_link_content]}
direction={if @reverse, do: :previous, else: :next}
event={@event}
meta={@meta}
path_helper={@path_helper}
opts={@opts}
target={@target}
/>
</nav>
<% end %>
"""
end
@doc """
Generates a table with sortable columns.
## Example
```elixir
<Flop.Phoenix.table
items={@pets}
meta={@meta}
path_helper={{Routes, :pet_path, [@socket, :index]}}
>
<:col let={pet} label="Name" field={:name}><%= pet.name %></:col>
<:col let={pet} label="Age" field={:age}><%= pet.age %></:col>
</Flop.Phoenix.table>
```
## Assigns
- `items` - The list of items to be displayed in rows. This is the result list
returned by the query.
- `meta` - The `Flop.Meta` struct returned by the query function.
- `path_helper` - The path helper to use for building the link URL. Can be an
mfa tuple or a function/args tuple. If set, links will be rendered with
`live_path/2` and the parameters have to be handled in the `handle_params/3`
callback of the LiveView module.
- `event` - If set, `Flop.Phoenix` will render links with a `phx-click`
attribute.
- `event` (optional) - If set, `Flop.Phoenix` will render links with a
`phx-click` attribute.
- `target` (optional) - Sets the `phx-target` attribute for the header links.
- `caption` (optional) - Content for the `<caption>` element.
- `opts` (optional) - Keyword list with additional options (see
`t:Flop.Phoenix.table_option/0`). Note that the options passed to the
function are deep merged into the default options. These options will
likely be the same for all the tables in a project, so it probably makes
sense to define them once in a function or set them in a wrapper function
as described in the `Customization` section of the module documentation.
## Flop.Schema
If you pass the `for` option when making the query with Flop, Flop Phoenix can
determine which table columns are sortable. It also hides the `order` and
`page_size` parameters if they match the default values defined with
`Flop.Schema`.
## Col slot
For each column to render, add one `<:col>` element.
```elixir
<:col let={pet} label="Name" field={:name} col_style="width: 20%;">
<%= pet.name %>
</:col>
```
- `label` - The content for the header column.
- `field` (optional) - The field name for sorting.
- `show` (optional) - Boolean value to conditionally show the column. Defaults
to `true`.
- `hide` (optional) - Boolean value to conditionally hide the column. Defaults
to `false`.
- `col_style` (optional) - If set, a `<colgroup>` element is rendered and the
value of the `col_style` assign is set as `style` attribute for the `<col>`
element of the respective column. You can set the `width`, `background` and
`border` of a column this way.
Any additional assigns will be added as attributes to the `<td>` elements.
## Foot slot
You can optionally add a `foot`. The inner block will be rendered inside
a `tfoot` element.
<Flop.Phoenix.table>
<:foot>
<tr><td>Total: <span class="total"><%= @total %></span></td></tr>
</:foot>
</Flop.Phoenix.table>
"""
@doc since: "0.6.0"
@doc section: :components
@spec table(map) :: Phoenix.LiveView.Rendered.t()
def table(assigns) do
assigns = Table.init_assigns(assigns)
~H"""
<%= if @items == [] do %>
<%= @opts[:no_results_content] %>
<% else %>
<%= if @opts[:container] do %>
<div {@opts[:container_attrs]}>
<Table.render
caption={@caption}
col={@col}
foot={@foot}
event={@event}
items={@items}
meta={@meta}
opts={@opts}
path_helper={@path_helper}
target={@target}
/>
</div>
<% else %>
<Table.render
caption={@caption}
col={@col}
foot={@foot}
event={@event}
items={@items}
meta={@meta}
opts={@opts}
path_helper={@path_helper}
target={@target}
/>
<% end %>
<% end %>
"""
end
@doc """
Renders all inputs for a filter form including the hidden inputs.
If you need more control, you can use `filter_input/1` and `filter_label/1`
directly.
## Example
<.form let={f} for={@meta}>
<.filter_fields let={entry} form={f} fields={[:email, :name]}>
<%= entry.label %>
<%= entry.input %>
</.filter_fields>
</.form>
## Assigns
- `form` - The `Phoenix.HTML.Form`.
- `fields` - The list of fields and field options. Note that inputs will not
be rendered for fields that are not marked as filterable in the schema.
- `dynamic` (optional) - If `true`, fields are only rendered for filters that
are present in the `Flop.Meta` struct passed to the form. You can use this
for rendering filter forms that allow the user to add and remove filters
dynamically. The `fields` assign is only used for looking up the options
in that case. Defaults to `false`.
- `id` (optional) - Overrides the ID for the nested filter inputs.
- `input_opts` (optional) - Additional options passed to each input.
- `label_opts` (optional) - Additional options passed to each label.
## Inner block
The generated labels and inputs are passed to the inner block instead of being
automatically rendered. This allows you to customize the markup.
<.filter_fields let={e} form={f} fields={[:email, :name]}>
<div class="field-label"><%= e.label %></div>
<div class="field-body"><%= e.input %></div>
</.filter_fields>
## Field configuration
The fields can be passed as atoms or keywords with additional options.
fields={[:name, :email]}
Or
fields={[
name: [label: gettext("Name")],
email: [
label: gettext("Email"),
op: :ilike_and,
type: :email_input
]
]}
Options:
- `label`
- `op`
- `type`
- `default`
The value under the `:type` key matches the format used in `filter_input/1`.
Any additional options will be passed to the input (e.g. HTML classes).
## Label and input opts
You can set default attributes for all labels and inputs:
<.filter_fields
let={e}
form={f}
fields={[:name]}
input_opts={[class: "input"]}
label_opts={[class: "label"]}
>
The additional options in the type configuration are merged into the input
opts. This means you can set a default class and override it for individual
fields.
<.filter_fields
let={e}
form={f}
fields={[
:name,
:email,
role: [type: {:select, ["author", "editor"], class: "select"}]
]}
input_opts={[class: "input"]}
>
"""
@doc since: "0.12.0"
@doc section: :components
@spec filter_fields(map) :: Phoenix.LiveView.Rendered.t()
def filter_fields(assigns) do
is_meta_form!(assigns.form)
fields = assigns[:fields] || []
labels =
fields
|> Enum.map(fn
{field, opts} -> {field, opts[:label]}
field -> {field, nil}
end)
|> Enum.reject(fn {_, label} -> is_nil(label) end)
types =
fields
|> Enum.map(fn
{field, opts} -> {field, opts[:type]}
field -> {field, nil}
end)
|> Enum.reject(fn {_, type} -> is_nil(type) end)
inputs_for_fields = if assigns[:dynamic], do: nil, else: fields
assigns =
assigns
|> assign(:fields, inputs_for_fields)
|> assign(:labels, labels)
|> assign(:types, types)
|> assign_new(:id, fn -> nil end)
|> assign_new(:input_opts, fn -> [] end)
|> assign_new(:label_opts, fn -> [] end)
~H"""
<%= filter_hidden_inputs_for(@form) %>
<%= for ff <- inputs_for(@form, :filters, fields: @fields, id: @id) do %>
<%= render_slot(@inner_block, %{
label: ~H"<.filter_label form={ff} texts={@labels} {@label_opts} />",
input: ~H"<.filter_input form={ff} types={@types} {@input_opts} />"
}) %>
<% end %>
"""
end
@doc """
Renders a label for the `:value` field of a filter.
This function must be used within the `Phoenix.HTML.Form.inputs_for/2`,
`Phoenix.HTML.Form.inputs_for/3` or `Phoenix.HTML.Form.inputs_for/4` block of
the filter form.
Note that `inputs_for` will not render inputs for fields that are not marked
as filterable in the schema, even if passed in the options.
## Assigns
- `form` - The filter form.
- `texts` (optional) - Either a function or a keyword list for setting the
label text depending on the field.
All additional assigns will be passed to the label.
## Example
<.form let={f} for={@meta}>
<%= filter_hidden_inputs_for(f) %>
<%= for ff <- inputs_for(f, :filters, fields: [:email]) do %>
<.filter_label form={ff} />
<.filter_input form={ff} />
<% end %>
</.form>
`Flop.Phoenix.filter_hidden_inputs_for/1` is necessary because
`Phoenix.HTML.Form.hidden_inputs_for/1` does not support lists in versions
<= 3.1.0.
## Label text
By default, the label text is inferred from the value of the `:field` key of
the filter. You can override the default type by passing a keyword list or a
function that maps fields to label texts.
<.filter_label form={ff} text={[
email: gettext("Email")
phone: gettext("Phone number")
]} />
Or
<.filter_label form={ff} text={
fn
:email -> gettext("Email")
:phone -> gettext("Phone number")
end
} />
"""
@doc since: "0.12.0"
@doc section: :components
@spec filter_label(map) :: Phoenix.LiveView.Rendered.t()
def filter_label(assigns) do
is_filter_form!(assigns.form)
opts = assigns_to_attributes(assigns, [:form, :texts])
assigns =
assigns
|> assign_new(:texts, fn -> nil end)
|> assign(:opts, opts)
~H"""
<%= label @form, :value, label_text(@form, @texts), opts %>
"""
end
defp label_text(form, nil), do: form |> input_value(:field) |> humanize()
defp label_text(form, func) when is_function(func, 1),
do: form |> input_value(:field) |> func.()
defp label_text(form, mapping) when is_list(mapping) do
field = input_value(form, :field)
safe_get(mapping, field, label_text(form, nil))
end
defp safe_get(keyword, key, default)
when is_list(keyword) and is_atom(key) do
Keyword.get(keyword, key, default)
end
defp safe_get(keyword, key, default)
when is_list(keyword) and is_binary(key) do
value =
Enum.find(keyword, fn {current_key, _} ->
Atom.to_string(current_key) == key
end)
case value do
nil -> default
{_, value} -> value
end
end
@doc """
Renders an input for the `:value` field and hidden inputs of a filter.
This function must be used within the `Phoenix.HTML.Form.inputs_for/2`,
`Phoenix.HTML.Form.inputs_for/3` or `Phoenix.HTML.Form.inputs_for/4` block of
the filter form.
## Assigns
- `form` - The filter form.
- `skip_hidden` (optional) - Disables the rendering of the hidden inputs for
the filter. Default: `false`.
- `types` (optional) - Either a function or a keyword list that maps fields
to input types
All additional assigns will be passed to the input function.
## Example
<.form let={f} for={@meta}>
<%= filter_hidden_inputs_for(f) %>
<%= for ff <- inputs_for(f, :filters, fields: [:email]) do %>
<.filter_label form={ff} />
<.filter_input form={ff} />
<% end %>
</.form>
## Types
By default, the input type is inferred from the field type in the Ecto schema.
You can override the default type by passing a keyword list or a function that
maps fields to types.
<.filter_input form={ff} types={[
email: :email_input,
phone: :telephone_input
]} />
Or
<.filter_input form={ff} types={
fn
:email -> :email_input
:phone -> :telephone_input
end
} />
The type can be given as:
- An atom referencing the input function from `Phoenix.HTML.Form`:
`:telephone_input`
- A tuple with an atom and additional options. The given list is merged into
the `opts` assign and passed to the input:
`{:telephone_input, class: "phone"}`
- A tuple with an atom, options for a select input, and additional options:
`{:select, ["Option a": "a", "Option B": "b"], class: "select"}`
- A 3-arity function taking the form, field and opts. This is useful for
custom input functions:
`fn form, field, opts -> ... end` or `&my_custom_input/3`
- A tuple with a 3-arity function and additional opts:
`{&my_custom_input/3, class: "input"}`
- A tuple with a 4-arity function, a list of options and additional opts:
`{fn form, field, options, opts -> ... end, ["Option a": "a", "Option B": "b"], class: "select"}`
"""
@doc since: "0.12.0"
@doc section: :components
@spec filter_input(map) :: Phoenix.LiveView.Rendered.t()
def filter_input(assigns) do
is_filter_form!(assigns.form)
opts = assigns_to_attributes(assigns, [:form, :skip_hidden, :type, :types])
assigns =
assigns
|> assign_new(:skip_hidden, fn -> false end)
|> assign(:type, type_for(assigns.form, assigns[:types]))
|> assign(:opts, opts)
~H"""
<%= unless @skip_hidden do %><%= hidden_inputs_for @form %><% end %>
<%= render_input(@form, @type, @opts) %>
"""
end
defp render_input(form, type, opts) when is_atom(type) do
apply(Phoenix.HTML.Form, type, [form, :value, opts])
end
defp render_input(form, {type, input_opts}, opts) when is_atom(type) do
opts = Keyword.merge(opts, input_opts)
apply(Phoenix.HTML.Form, type, [form, :value, opts])
end
defp render_input(form, {type, options, input_opts}, opts)
when is_atom(type) and is_list(options) do
opts = Keyword.merge(opts, input_opts)
apply(Phoenix.HTML.Form, type, [form, :value, options, opts])
end
defp render_input(form, func, opts) when is_function(func, 3) do
func.(form, :value, opts)
end
defp render_input(form, {func, input_opts}, opts) when is_function(func, 3) do
opts = Keyword.merge(opts, input_opts)
func.(form, :value, opts)
end
defp render_input(form, {func, options, input_opts}, opts)
when is_function(func, 4) and is_list(options) do
opts = Keyword.merge(opts, input_opts)
func.(form, :value, options, opts)
end
defp type_for(form, nil), do: input_type(form, :value)
defp type_for(form, func) when is_function(func, 1) do
form |> input_value(:field) |> func.()
end
defp type_for(form, mapping) when is_list(mapping) do
field = input_value(form, :field)
safe_get(mapping, field, type_for(form, nil))
end
defp is_filter_form!(%Form{data: %Filter{}, source: %Meta{}}), do: :ok
defp is_filter_form!(_) do
raise ArgumentError, """
must be used with a filter form
Example:
<.form let={f} for={@meta}>
<%= filter_hidden_inputs_for(f) %>
<%= for ff <- inputs_for(f, :filters, fields: [:email]) do %>
<.filter_label form={ff} />
<.filter_input form={ff} />
<% end %>
</.form>
"""
end
defp is_meta_form!(%Form{data: %Flop{}, source: %Meta{}}), do: :ok
defp is_meta_form!(_) do
raise ArgumentError, """
must be used with a filter form
Example:
<.form let={f} for={@meta}>
<.filter_fields let={entry} form={f} fields={[:email, :name]}>
<%= entry.label %>
<%= entry.input %>
</.filter_fields>
</.form>
"""
end
@doc """
Converts a Flop struct into a keyword list that can be used as a query with
Phoenix route helper functions.
Default limits and default order parameters set via the application
environment are omitted. You can pass the `:for` option to pick up the
default options from a schema module deriving `Flop.Schema`. You can also
pass `default_limit` and `default_order` as options directly. The function
uses `Flop.get_option/2` internally to retrieve the default options.
## Examples
iex> to_query(%Flop{})
[]
iex> f = %Flop{order_by: [:name, :age], order_directions: [:desc, :asc]}
iex> to_query(f)
[order_directions: [:desc, :asc], order_by: [:name, :age]]
iex> f |> to_query |> Plug.Conn.Query.encode()
"order_directions[]=desc&order_directions[]=asc&order_by[]=name&order_by[]=age"
iex> f = %Flop{page: 5, page_size: 20}
iex> to_query(f)
[page_size: 20, page: 5]
iex> f = %Flop{first: 20, after: "g3QAAAABZAAEbmFtZW0AAAAFQXBwbGU="}
iex> to_query(f)
[first: 20, after: "g3QAAAABZAAEbmFtZW0AAAAFQXBwbGU="]
iex> f = %Flop{
...> filters: [
...> %Flop.Filter{field: :name, op: :=~, value: "Mag"},
...> %Flop.Filter{field: :age, op: :>, value: 25}
...> ]
...> }
iex> to_query(f)
[
filters: %{
0 => %{field: :name, op: :=~, value: "Mag"},
1 => %{field: :age, op: :>, value: 25}
}
]
iex> f |> to_query() |> Plug.Conn.Query.encode()
"filters[0][field]=name&filters[0][op]=%3D~&filters[0][value]=Mag&filters[1][field]=age&filters[1][op]=%3E&filters[1][value]=25"
iex> f = %Flop{page: 5, page_size: 20}
iex> to_query(f, default_limit: 20)
[page: 5]
"""
@doc since: "0.6.0"
@doc section: :miscellaneous
@spec to_query(Flop.t()) :: keyword
def to_query(%Flop{filters: filters} = flop, opts \\ []) do
filter_map =
filters
|> Stream.with_index()
|> Enum.into(%{}, fn {filter, index} ->
{index, Map.from_struct(filter)}
end)
default_limit = Flop.get_option(:default_limit, opts)
default_order = Flop.get_option(:default_order, opts)
[]
|> Misc.maybe_put(:offset, flop.offset, 0)
|> Misc.maybe_put(:page, flop.page, 1)
|> Misc.maybe_put(:after, flop.after)
|> Misc.maybe_put(:before, flop.before)
|> Misc.maybe_put(:page_size, flop.page_size, default_limit)
|> Misc.maybe_put(:limit, flop.limit, default_limit)
|> Misc.maybe_put(:first, flop.first, default_limit)
|> Misc.maybe_put(:last, flop.last, default_limit)
|> Misc.maybe_put_order_params(flop, default_order)
|> Misc.maybe_put(:filters, filter_map)
end
@doc """
Builds a path that includes query parameters for the given `Flop` struct
using the referenced Phoenix path helper function.
The first argument can be either an MFA tuple (module, function name as atom,
arguments) or a 2-tuple (function, arguments).
Default values for `limit`, `page_size`, `order_by` and `order_directions` are
omitted from the query parameters. To pick up the default parameters from a
schema module deriving `Flop.Schema`, you need to pass the `:for` option.
## Examples
iex> flop = %Flop{page: 2, page_size: 10}
iex> build_path(
...> {Flop.PhoenixTest, :route_helper, [%Plug.Conn{}, :pets]},
...> flop
...> )
"/pets?page_size=10&page=2"
iex> pet_path = fn _conn, :index, query ->
...> "/pets?" <> Plug.Conn.Query.encode(query)
...> end
iex> flop = %Flop{page: 2, page_size: 10}
iex> build_path({pet_path, [%Plug.Conn{}, :index]}, flop)
"/pets?page_size=10&page=2"
We're defining fake path helpers for the scope of the doctests. In a real
Phoenix application, you would pass something like
`{Routes, :pet_path, args}` or `{&Routes.pet_path/3, args}` as the
first argument.
You can also pass a `Flop.Meta` struct or a keyword list as the third
argument.
iex> pet_path = fn _conn, :index, query ->
...> "/pets?" <> Plug.Conn.Query.encode(query)
...> end
iex> flop = %Flop{page: 2, page_size: 10}
iex> meta = %Flop.Meta{flop: flop}
iex> build_path({pet_path, [%Plug.Conn{}, :index]}, meta)
"/pets?page_size=10&page=2"
iex> query_params = to_query(flop)
iex> build_path({pet_path, [%Plug.Conn{}, :index]}, query_params)
"/pets?page_size=10&page=2"
If the path helper takes additional path parameters, just add them to the
second argument.
iex> user_pet_path = fn _conn, :index, id, query ->
...> "/users/\#{id}/pets?" <> Plug.Conn.Query.encode(query)
...> end
iex> flop = %Flop{page: 2, page_size: 10}
iex> build_path({user_pet_path, [%Plug.Conn{}, :index, 123]}, flop)
"/users/123/pets?page_size=10&page=2"
If the last path helper argument is a query parameter list, the Flop
parameters are merged into it.
iex> pet_url = fn _conn, :index, query ->
...> "https://pets.flop/pets?" <> Plug.Conn.Query.encode(query)
...> end
iex> flop = %Flop{order_by: :name, order_directions: [:desc]}
iex> build_path({pet_url, [%Plug.Conn{}, :index, [user_id: 123]]}, flop)
"https://pets.flop/pets?user_id=123&order_directions[]=desc&order_by=name"
iex> build_path(
...> {pet_url,
...> [%Plug.Conn{}, :index, [category: "small", user_id: 123]]},
...> flop
...> )
"https://pets.flop/pets?category=small&user_id=123&order_directions[]=desc&order_by=name"
"""
@doc since: "0.6.0"
@doc section: :miscellaneous
@spec build_path(
{module, atom, [any]} | {function, [any]},
Meta.t() | Flop.t() | keyword,
keyword
) ::
String.t()
def build_path(tuple, meta_or_flop_or_params, opts \\ [])
def build_path(tuple, %Meta{flop: flop}, opts),
do: build_path(tuple, flop, opts)
def build_path(tuple, %Flop{} = flop, opts) do
build_path(tuple, Flop.Phoenix.to_query(flop, opts))
end
def build_path({module, func, args}, flop_params, _opts)
when is_atom(module) and
is_atom(func) and
is_list(args) and
is_list(flop_params) do
final_args = build_final_args(args, flop_params)
apply(module, func, final_args)
end
def build_path({func, args}, flop_params, _opts)
when is_function(func) and
is_list(args) and
is_list(flop_params) do
final_args = build_final_args(args, flop_params)
apply(func, final_args)
end
defp build_final_args(args, flop_params) do
case Enum.reverse(args) do
[last_arg | rest] when is_list(last_arg) ->
query_arg = Keyword.merge(last_arg, flop_params)
Enum.reverse([query_arg | rest])
_ ->
args ++ [flop_params]
end
end
@doc """
Generates hidden inputs for the given form.
This does the same as `Phoenix.HTML.Form.hidden_inputs_for/1` in versions
<= 3.1.0, except that it supports list fields. If you use a later
`Phoenix.HTML` version, you don't need this function.
"""
@doc since: "0.12.0"
@doc section: :components
@spec filter_hidden_inputs_for(Phoenix.HTML.Form.t()) ::
list(Phoenix.HTML.safe())
def filter_hidden_inputs_for(form) do
Enum.flat_map(form.hidden, fn {k, v} ->
filter_hidden_inputs_for(form, k, v)
end)
end
defp filter_hidden_inputs_for(form, k, values) when is_list(values) do
id = input_id(form, k)
name = input_name(form, k)
for {v, index} <- Enum.with_index(values) do
hidden_input(form, k,
id: id <> "_" <> Integer.to_string(index),
name: name <> "[]",
value: v
)
end
end
defp filter_hidden_inputs_for(form, k, v) do
[hidden_input(form, k, value: v)]
end
end
|
lib/flop_phoenix.ex
| 0.812756
| 0.862004
|
flop_phoenix.ex
|
starcoder
|
defmodule Honeydew.FailureMode.Retry do
alias Honeydew.Job
alias Honeydew.Queue
alias Honeydew.Processes
alias Honeydew.FailureMode.Abandon
alias Honeydew.FailureMode.Move
@moduledoc """
Instructs Honeydew to retry a job a number of times on failure.
## Examples
Retry jobs in this queue 3 times:
```elixir
Honeydew.start_queue(:my_queue, failure_mode: {#{inspect __MODULE__},
times: 3})
```
Retry jobs in this queue 3 times and then move to another queue:
```elixir
Honeydew.start_queue(:my_queue,
failure_mode: {#{inspect __MODULE__},
times: 3,
finally: {#{inspect Move},
queue: :dead_letters}})
```
"""
require Logger
@behaviour Honeydew.FailureMode
@impl true
def validate_args!(args) when is_list(args) do
args
|> Enum.into(%{})
|> validate_args!(__MODULE__)
end
def validate_args!(args, module \\ __MODULE__)
def validate_args!(%{fun: fun}, _module) when is_function(fun, 3), do: :ok
def validate_args!(%{fun: bad}, module) do
raise ArgumentError, "You provided a bad `:fun` argument (#{inspect bad}) to the #{module} failure mode, it's expecting a function or function capture of arity three (job, failure_reason, args), for example: `&#{inspect __MODULE__}.immediate/3`"
end
def validate_args!(%{times: times}, module) when not is_integer(times) or times <= 0 do
raise ArgumentError, "You provided a bad `:times` argument (#{inspect times}) to the #{module} failure mode, it's expecting a positive integer."
end
def validate_args!(%{finally: {module, args} = bad}, module) when not is_atom(module) or not is_list(args) do
raise ArgumentError, "You provided a bad `:finally` argument (#{inspect bad}) to the #{module} failure mode, it's expecting `finally: {module, args}`"
end
def validate_args!(%{times: _times, finally: {m, a}}, _module) do
m.validate_args!(a)
end
def validate_args!(%{times: _times}, _module), do: :ok
def validate_args!(bad, module) do
raise ArgumentError, "You provided bad arguments (#{inspect bad}) to the #{module} failure mode, at a minimum, it must be a list with a maximum number of retries specified, for example: `[times: 5]`"
end
@impl true
def handle_failure(%Job{queue: queue, from: from} = job, reason, args) when is_list(args) do
args = Enum.into(args, %{})
args = Map.merge(%{finally: {Abandon, []},
fun: &immediate/3}, args)
%{fun: fun, finally: {finally_module, finally_args}} = args
case fun.(job, reason, args) do
{:cont, private, delay_secs} ->
job = %Job{job | failure_private: private, delay_secs: delay_secs, result: {:retrying, reason}}
queue
|> Processes.get_queue()
|> Queue.nack(job)
# send the error to the awaiting process, if necessary
with {owner, _ref} <- from,
do: send(owner, %{job | result: {:retrying, reason}})
:halt ->
finally_module.handle_failure(%{job | failure_private: nil, delay_secs: 0}, reason, finally_args)
end
end
def immediate(%Job{failure_private: nil} = job, reason, args) do
immediate(%Job{job | failure_private: 0}, reason, args)
end
def immediate(%Job{failure_private: times_retried} = job, reason, %{times: max_retries}) when times_retried < max_retries do
Logger.info "Job failed because #{inspect reason}, retrying #{max_retries - times_retried} more times, job: #{inspect job}"
{:cont, times_retried + 1, 0}
end
def immediate(_, _, _), do: :halt
end
|
lib/honeydew/failure_mode/retry.ex
| 0.837454
| 0.619975
|
retry.ex
|
starcoder
|
defmodule OMG.Watcher.BlockValidator do
@moduledoc """
Operations related to block validation.
"""
alias OMG.Watcher.Block
alias OMG.Watcher.Merkle
alias OMG.Watcher.State.Transaction
alias OMG.Watcher.Utxo.Position
@transaction_upper_limit 2 |> :math.pow(16) |> Kernel.trunc()
@doc """
Executes stateless validation of a submitted block:
- Verifies that the number of transactions falls within the accepted range.
- Verifies that (payment and fee) transactions are correctly formed.
- Verifies that fee transactions are correctly placed and unique per currency.
- Verifies that there are no duplicate inputs at the block level.
- Verifies that given Merkle root matches reconstructed Merkle root.
"""
@spec stateless_validate(Block.t()) :: {:ok, boolean()} | {:error, atom()}
def stateless_validate(submitted_block) do
with :ok <- number_of_transactions_within_limit(submitted_block.transactions),
{:ok, recovered_transactions} <- verify_transactions(submitted_block.transactions),
{:ok, _fee_transactions} <- verify_fee_transactions(recovered_transactions),
{:ok, _inputs} <- verify_no_duplicate_inputs(recovered_transactions),
{:ok, _block} <- verify_merkle_root(submitted_block, recovered_transactions) do
{:ok, true}
end
end
@spec verify_merkle_root(Block.t(), list(Transaction.Recovered.t())) ::
{:ok, Block.t()} | {:error, :mismatched_merkle_root}
defp verify_merkle_root(block, transactions) do
reconstructed_merkle_hash =
transactions
|> Enum.map(&Transaction.raw_txbytes/1)
|> Merkle.hash()
case block.hash do
^reconstructed_merkle_hash -> {:ok, block}
_ -> {:error, :invalid_merkle_root}
end
end
@spec verify_transactions(transactions :: list(Transaction.Signed.tx_bytes())) ::
{:ok, list(Transaction.Recovered.t())}
| {:error, Transaction.Recovered.recover_tx_error()}
defp verify_transactions(transactions) do
transactions
|> Enum.reverse()
|> Enum.reduce_while({:ok, []}, fn tx, {:ok, already_recovered} ->
case Transaction.Recovered.recover_from(tx) do
{:ok, recovered} ->
{:cont, {:ok, [recovered | already_recovered]}}
error ->
{:halt, error}
end
end)
end
@spec number_of_transactions_within_limit([Transaction.Signed.tx_bytes()]) :: :ok | {:error, atom()}
defp number_of_transactions_within_limit([]), do: {:error, :empty_block}
defp number_of_transactions_within_limit(transactions) when length(transactions) > @transaction_upper_limit do
{:error, :transactions_exceed_block_limit}
end
defp number_of_transactions_within_limit(_transactions), do: :ok
@spec verify_no_duplicate_inputs([Transaction.Recovered.t()]) :: {:ok, [map()]} | {:error, :block_duplicate_inputs}
defp verify_no_duplicate_inputs(transactions) do
all_inputs = Enum.flat_map(transactions, &Transaction.get_inputs/1)
uniq_inputs = Enum.uniq_by(all_inputs, &Position.encode/1)
case length(all_inputs) == length(uniq_inputs) do
true -> {:ok, all_inputs}
false -> {:error, :block_duplicate_inputs}
end
end
@spec verify_fee_transactions([Transaction.Recovered.t()]) :: {:ok, [Transaction.Recovered.t()]} | {:error, atom()}
defp verify_fee_transactions(transactions) do
identified_fee_transactions = Enum.filter(transactions, &is_fee/1)
with :ok <- expected_index(transactions, identified_fee_transactions),
:ok <- unique_fee_transaction_per_currency(identified_fee_transactions) do
{:ok, identified_fee_transactions}
end
end
@spec expected_index([Transaction.Recovered.t()], [Transaction.Recovered.t()]) :: :ok | {:error, atom()}
defp expected_index(transactions, identified_fee_transactions) do
number_of_fee_txs = length(identified_fee_transactions)
tail = Enum.slice(transactions, -number_of_fee_txs, number_of_fee_txs)
case identified_fee_transactions do
^tail -> :ok
_ -> {:error, :unexpected_transaction_type_at_fee_index}
end
end
@spec unique_fee_transaction_per_currency([Transaction.Recovered.t()]) :: :ok | {:error, atom()}
defp unique_fee_transaction_per_currency(identified_fee_transactions) do
identified_fee_transactions
|> Enum.uniq_by(fn fee_transaction -> fee_transaction |> get_fee_output() |> Map.get(:currency) end)
|> case do
^identified_fee_transactions -> :ok
_ -> {:error, :duplicate_fee_transaction_for_ccy}
end
end
defp is_fee(%Transaction.Recovered{signed_tx: %Transaction.Signed{raw_tx: %Transaction.Fee{}}}) do
true
end
defp is_fee(_), do: false
defp get_fee_output(fee_transaction) do
fee_transaction |> Transaction.get_outputs() |> Enum.at(0)
end
end
|
apps/omg_watcher/lib/omg_watcher/block_validator.ex
| 0.866401
| 0.483466
|
block_validator.ex
|
starcoder
|
defmodule Timex.Ecto.DateTimeWithTimezone do
@moduledoc """
This is a special type for storing datetime + timezone information as a composite type.
To use this, you must first make sure you have the `datetimetz` type defined in your database:
```sql
CREATE TYPE datetimetz AS (
dt timestamptz,
tz varchar
);
```
Then you can use that type when creating your table, i.e.:
```sql
CREATE TABLE example (
id integer,
created_at datetimetz
);
```
That's it!
"""
use Timex
@behaviour Ecto.Type
def type, do: :datetimetz
@doc """
Handle casting to Timex.Ecto.DateTimeWithTimezone
"""
def cast(%DateTime{} = datetime), do: {:ok, datetime}
# Support embeds_one/embeds_many
def cast(%{"calendar" => _cal,
"year" => y, "month" => m, "day" => d,
"hour" => h, "minute" => mm, "second" => s, "ms" => ms,
"timezone" => %{"full_name" => tzname,
"abbreviation" => abbr,
"offset_std" => offset_std,
"offset_utc" => offset_utc}}) do
dt = %DateTime{
:year => y,
:month => m,
:day => d,
:hour => h,
:minute => mm,
:second => s,
:microsecond => Timex.Ecto.Helpers.millisecond_to_microsecond(ms),
:time_zone => tzname,
:zone_abbr => abbr,
:utc_offset => offset_utc,
:std_offset => offset_std
}
{:ok, dt}
end
def cast(%{"calendar" => _cal,
"year" => y, "month" => m, "day" => d,
"hour" => h, "minute" => mm, "second" => s, "millisecond" => ms,
"timezone" => %{"full_name" => tzname,
"abbreviation" => abbr,
"offset_std" => offset_std,
"offset_utc" => offset_utc}}) do
dt = %DateTime{
:year => y,
:month => m,
:day => d,
:hour => h,
:minute => mm,
:second => s,
:microsecond => Timex.Ecto.Helpers.millisecond_to_microsecond(ms),
:time_zone => tzname,
:zone_abbr => abbr,
:utc_offset => offset_utc,
:std_offset => offset_std
}
{:ok, dt}
end
def cast(%{"calendar" => _cal,
"year" => y, "month" => m, "day" => d,
"hour" => h, "minute" => mm, "second" => s, "microsecond" => us,
"time_zone" => tzname, "zone_abbr" => abbr, "utc_offset" => offset_utc, "std_offset" => offset_std}) do
case us do
us when is_integer(us) -> Timex.DateTime.Helpers.construct_microseconds({us, -1})
{_,_} -> us
end
dt = %DateTime{
:year => y,
:month => m,
:day => d,
:hour => h,
:minute => mm,
:second => s,
:microsecond => us,
:time_zone => tzname,
:zone_abbr => abbr,
:utc_offset => offset_utc,
:std_offset => offset_std
}
{:ok, dt}
end
def cast(input) when is_binary(input) do
case Timex.parse(input, "{ISO:Extended}") do
{:ok, datetime} -> {:ok, datetime}
{:error, _} -> :error
end
end
def cast(input) when is_map(input) do
case Timex.Convert.convert_map(input) do
%DateTime{} = d ->
{:ok, d}
%_{} = result ->
case Timex.to_datetime(result, "Etc/UTC") do
{:error, _} ->
case Ecto.DateTime.cast(input) do
{:ok, d} ->
load({{{d.year, d.month, d.day}, {d.hour, d.min, d.sec, d.usec}}, "Etc/UTC"})
:error ->
:error
end
%DateTime{} = d ->
{:ok, d}
end
{:error, _} ->
:error
end
end
def cast(input) do
case Timex.to_datetime(input, "Etc/UTC") do
{:error, _} ->
case Ecto.DateTime.cast(input) do
{:ok, d} ->
load({{{d.year, d.month, d.day}, {d.hour, d.min, d.sec, d.usec}}, "Etc/UTC"})
:error ->
:error
end
%DateTime{} = d ->
{:ok, d}
end
end
@doc """
Load from the native Ecto representation
"""
def load({{{y, m, d}, {h, mm, s, usec}}, timezone}) do
secs = :calendar.datetime_to_gregorian_seconds({{y,m,d},{h,mm,s}})
case Timezone.resolve(timezone, secs) do
{:error, _} -> :error
%TimezoneInfo{} = tz ->
dt = %DateTime{
:year => y,
:month => m,
:day => d,
:hour => h,
:minute => mm,
:second => s,
:microsecond => Timex.DateTime.Helpers.construct_microseconds({usec,-1}),
:time_zone => tz.full_name,
:zone_abbr => tz.abbreviation,
:utc_offset => tz.offset_utc,
:std_offset => tz.offset_std
}
{:ok, dt}
%AmbiguousTimezoneInfo{before: b, after: a} ->
dt = %AmbiguousDateTime{
:before => %DateTime{
:year => y,
:month => m,
:day => d,
:hour => h,
:minute => mm,
:second => s,
:microsecond => Timex.DateTime.Helpers.construct_microseconds({usec,-1}),
:time_zone => b.full_name,
:zone_abbr => b.abbreviation,
:utc_offset => b.offset_utc,
:std_offset => b.offset_std
},
:after => %DateTime{
:year => y,
:month => m,
:day => d,
:hour => h,
:minute => mm,
:second => s,
:microsecond => Timex.DateTime.Helpers.construct_microseconds({usec,-1}),
:time_zone => a.full_name,
:zone_abbr => a.abbreviation,
:utc_offset => a.offset_utc,
:std_offset => a.offset_std
}
}
{:ok, dt}
end
end
def load(_), do: :error
@doc """
Convert to the native Ecto representation
"""
def dump(%DateTime{microsecond: {us, _}, time_zone: tzname} = d) do
{:ok, {{{d.year, d.month, d.day}, {d.hour, d.minute, d.second, us}}, tzname}}
end
def autogenerate(precision \\ :sec)
def autogenerate(:sec) do
{date, {h, m, s}} = :erlang.universaltime
load({{date,{h, m, s, 0}}, "UTC"}) |> elem(1)
end
def autogenerate(:usec) do
timestamp = {_,_, usec} = :os.timestamp
{date, {h, m, s}} = :calendar.now_to_datetime(timestamp)
load({{date, {h, m, s, usec}}, "UTC"}) |> elem(1)
end
end
|
lib/types/datetimetz.ex
| 0.831451
| 0.667903
|
datetimetz.ex
|
starcoder
|
defmodule AshPostgres do
@moduledoc """
A postgres extension library for `Ash`.
`AshPostgres.DataLayer` provides a DataLayer, and a DSL extension to configure that data layer.
The dsl extension exposes the `postgres` section. See: `AshPostgres.DataLayer` for more.
"""
alias Ash.Dsl.Extension
@doc "The configured repo for a resource"
def repo(resource) do
Extension.get_opt(resource, [:postgres], :repo, nil, true)
end
@doc "The configured table for a resource"
def table(resource) do
Extension.get_opt(resource, [:postgres], :table, nil, true)
end
@doc "The configured polymorphic? for a resource"
def polymorphic?(resource) do
Extension.get_opt(resource, [:postgres], :polymorphic?, nil, true)
end
@doc "The configured unique_index_names"
def unique_index_names(resource) do
Extension.get_opt(resource, [:postgres], :unique_index_names, [], true)
end
@doc "The configured foreign_key_names"
def foreign_key_names(resource) do
Extension.get_opt(resource, [:postgres], :foreign_key_names, [], true)
end
@doc "Whether or not the resource should be included when generating migrations"
def migrate?(resource) do
Extension.get_opt(resource, [:postgres], :migrate?, nil, true)
end
@doc "A stringified version of the base_filter, to be used in a where clause when generating unique indexes"
def base_filter_sql(resource) do
Extension.get_opt(resource, [:postgres], :base_filter_sql, nil)
end
@doc "Skip generating unique indexes when generating migrations"
def skip_unique_indexes?(resource) do
Extension.get_opt(resource, [:postgres], :skip_unique_indexes?, [])
end
@doc "The template for a managed tenant"
def manage_tenant_template(resource) do
Extension.get_opt(resource, [:postgres, :manage_tenant], :template, nil)
end
@doc "Whether or not to create a tenant for a given resource"
def manage_tenant_create?(resource) do
Extension.get_opt(resource, [:postgres, :manage_tenant], :create?, false)
end
@doc "Whether or not to update a tenant for a given resource"
def manage_tenant_update?(resource) do
Extension.get_opt(resource, [:postgres, :manage_tenant], :update?, false)
end
end
|
lib/ash_postgres.ex
| 0.835819
| 0.449574
|
ash_postgres.ex
|
starcoder
|
defmodule Gather.Extraction do
@moduledoc """
Process to wrap and manage a dataset's extraction pipeline. This is operated
like a `Task`, in that it executes and shuts down.
"""
import Events
use GenServer, restart: :transient
require Logger
use Properties, otp_app: :service_gather
use Annotated.Retry
alias Gather.Extraction.SourceHandler
@max_tries get_config_value(:max_tries, default: 10)
@initial_delay get_config_value(:initial_delay, default: 500)
getter(:app_name, required: true)
def start_link(args) do
server_opts = Keyword.take(args, [:name])
GenServer.start_link(__MODULE__, args, server_opts)
end
@impl GenServer
def init(args) do
Process.flag(:trap_exit, true)
{:ok, Map.new(args), {:continue, :extract}}
end
@dialyzer {:nowarn_function, handle_continue: 2}
@impl GenServer
def handle_continue(:extract, %{extract: extract} = state) do
case extract(extract) do
{:ok, destination_and_source} ->
Logger.debug(fn -> "#{__MODULE__}: Started extraction: #{inspect(extract)}" end)
{:noreply, Map.merge(state, destination_and_source)}
{:error, reason} ->
Logger.warn("#{__MODULE__}: Extraction Stopping: #{inspect(extract)}")
{:stop, reason, state}
end
end
@impl GenServer
def handle_info(:extract_complete, %{extract: extract, destination_pid: pid} = state) do
Destination.stop(extract.destination, pid)
Logger.debug(fn -> "#{__MODULE__}: Extraction Completed: #{inspect(extract)}" end)
Brook.Event.send(Gather.Application.instance(), extract_end(), "gather", extract)
{:stop, :normal, state}
end
@impl GenServer
def handle_info({:extract_failed, reason}, %{extract: extract, destination_pid: pid} = state) do
Destination.stop(extract.destination, pid)
Logger.warn("#{__MODULE__}: Extraction Stopping: #{inspect(extract)}")
{:stop, reason, state}
end
@impl GenServer
def handle_info(msg, state) do
Logger.warn(fn -> "#{__MODULE__}: Received unexpected message : #{inspect(msg)}" end)
{:noreply, state}
end
@retry with: exponential_backoff(@initial_delay) |> take(@max_tries)
defp extract(extract) do
with {:ok, destination_pid} <- start_destination(extract),
{:ok, source_pid} <- start_source(extract, destination_pid) do
{:ok, %{destination_pid: destination_pid, source_pid: source_pid}}
end
end
def start_source(extract, destination_pid) do
Source.start_link(extract.source, source_context(extract, destination_pid))
end
defp start_destination(extract) do
Destination.start_link(
extract.destination,
Destination.Context.new!(
app_name: app_name(),
dataset_id: extract.dataset_id,
subset_id: extract.subset_id,
dictionary: extract.dictionary
)
)
end
defp source_context(extract, destination_pid) do
Source.Context.new!(
dictionary: extract.dictionary,
handler: SourceHandler,
app_name: :service_gather,
dataset_id: extract.dataset_id,
subset_id: extract.subset_id,
decode_json: false,
assigns: %{
pid: self(),
destination_pid: destination_pid,
extract: extract
}
)
end
end
|
apps/service_gather/lib/gather/extraction.ex
| 0.781914
| 0.402011
|
extraction.ex
|
starcoder
|
defmodule Sanbase.Clickhouse.HistoricalBalance do
@moduledoc ~s"""
Module providing functions for historical balances and balance changes.
This module dispatches to underlaying modules and serves as common interface
for many different database tables and schemas.
"""
use AsyncWith
import Sanbase.Utils.Transform, only: [maybe_apply_function: 2]
alias Sanbase.Model.Project
alias Sanbase.Clickhouse.HistoricalBalance.XrpBalance
@balances_aggregated_blockchains ["ethereum", "bitcoin", "bitcoin-cash", "litecoin", "binance"]
@supported_infrastructures ["BCH", "BNB", "BEP2", "BTC", "LTC", "XRP", "ETH"]
def supported_infrastructures(), do: @supported_infrastructures
@type selector :: %{
optional(:infrastructure) => String.t(),
optional(:currency) => String.t(),
optional(:slug) => String.t(),
optional(:contract) => String.t(),
optional(:decimals) => non_neg_integer()
}
@type slug :: String.t()
@type address :: String.t() | list(String.t())
@typedoc ~s"""
An interval represented as string. It has the format of number followed by one of:
ns, ms, s, m, h, d or w - each representing some time unit
"""
@type interval :: String.t()
@typedoc ~s"""
The type returned by the historical_balance/5 function
"""
@type historical_balance_return ::
{:ok, []}
| {:ok, list(%{datetime: DateTime.t(), balance: number()})}
| {:error, String.t()}
defguard balances_aggregated_blockchain?(blockchain)
when blockchain in @balances_aggregated_blockchains
@doc ~s"""
Return a list of the assets that a given address currently holds or
has held in the past.
This can be combined with the historical balance query to see the historical
balance of all currently owned assets
"""
@spec assets_held_by_address(map()) :: {:ok, list(map())} | {:error, String.t()}
def assets_held_by_address(%{infrastructure: infr, address: address}) do
case selector_to_args(%{infrastructure: infr}) do
%{blockchain: blockchain} when balances_aggregated_blockchain?(blockchain) ->
Sanbase.Balance.assets_held_by_address(address)
%{module: module} ->
module.assets_held_by_address(address)
{:error, error} ->
{:error, error}
end
|> maybe_apply_function(fn data -> Enum.sort_by(data, &Map.get(&1, :balance), :desc) end)
end
@doc ~s"""
For a given address or list of addresses returns the `slug` balance change for the
from-to period. The returned lists indicates the address, before balance, after balance
and the balance change
"""
@spec balance_change(selector, address, from :: DateTime.t(), to :: DateTime.t()) ::
__MODULE__.Behaviour.balance_change_result()
def balance_change(selector, address, from, to) do
case selector_to_args(selector) do
%{blockchain: blockchain, slug: slug} when balances_aggregated_blockchain?(blockchain) ->
Sanbase.Balance.balance_change(address, slug, from, to)
%{module: module, asset: asset, decimals: decimals} ->
module.balance_change(address, asset, decimals, from, to)
{:error, error} ->
{:error, error}
end
end
@doc ~s"""
For a given address or list of addresses returns the combined `slug` balance for each bucket
of size `interval` in the from-to time period
"""
@spec historical_balance(selector, address, from :: DateTime.t(), to :: DateTime.t(), interval) ::
__MODULE__.Behaviour.historical_balance_result()
def historical_balance(selector, address, from, to, interval) do
case selector_to_args(selector) do
%{blockchain: blockchain, slug: slug} when balances_aggregated_blockchain?(blockchain) ->
Sanbase.Balance.historical_balance(address, slug, from, to, interval)
%{module: module, asset: asset, decimals: decimals} ->
module.historical_balance(address, asset, decimals, from, to, interval)
{:error, error} ->
{:error, error}
end
end
@spec current_balance(selector, address | list(address)) ::
__MODULE__.Behaviour.current_balance_result()
def current_balance(selector, address) do
case selector_to_args(selector) do
%{blockchain: blockchain, slug: slug} when balances_aggregated_blockchain?(blockchain) ->
Sanbase.Balance.current_balance(address, slug)
%{module: module, asset: asset, decimals: decimals} ->
module.current_balance(address, asset, decimals)
{:error, error} ->
{:error, error}
end
end
defguard is_ethereum(map)
when (is_map_key(map, :slug) and map.slug == "ethereum") or
(is_map_key(map, :contract) and map.contract == "ETH") or
(is_map_key(map, :infrastructure) and not is_map_key(map, :slug) and
map.infrastructure == "ETH")
def selector_to_args(
%{infrastructure: "ETH", contract: contract, decimals: decimals} = selector
)
when is_binary(contract) and is_number(decimals) and decimals > 0 and
not is_ethereum(selector) do
%{
module: :none,
asset: String.downcase(contract),
contract: String.downcase(contract),
blockchain: Sanbase.Balance.blockchain_from_infrastructure("ETH"),
slug: Map.get(selector, :slug),
decimals: decimals
}
end
def selector_to_args(%{} = selector) when is_ethereum(selector) do
selector = Map.put_new(selector, :slug, "ethereum")
with %{slug: slug, contract: contract, decimals: decimals, infrastructure: "ETH"} <-
get_project_details(selector) do
%{
module: :none,
asset: contract,
contract: contract,
blockchain: Sanbase.Balance.blockchain_from_infrastructure("ETH"),
slug: slug,
decimals: decimals
}
end
end
def selector_to_args(%{infrastructure: "ETH", slug: slug} = selector) when is_binary(slug) do
with %{contract: contract, decimals: decimals} <- get_project_details(selector) do
%{
module: :none,
asset: contract,
contract: contract,
blockchain: Sanbase.Balance.blockchain_from_infrastructure("ETH"),
slug: slug,
decimals: decimals
}
end
end
def selector_to_args(%{infrastructure: "XRP"} = selector) do
%{
module: XrpBalance,
asset: Map.get(selector, :currency, "XRP"),
currency: Map.get(selector, :currency, "XRP"),
blockchain: Sanbase.Balance.blockchain_from_infrastructure("XRP"),
slug: "ripple",
decimals: 0
}
end
def selector_to_args(%{infrastructure: "BTC"} = selector) do
selector = Map.put_new(selector, :slug, "bitcoin")
with %{slug: slug, contract: contract, decimals: decimals} <- get_project_details(selector) do
%{
module: :none,
asset: contract,
contract: contract,
blockchain: Sanbase.Balance.blockchain_from_infrastructure("BTC"),
slug: slug,
decimals: decimals
}
end
end
def selector_to_args(%{infrastructure: "BCH"} = selector) do
selector = Map.put_new(selector, :slug, "bitcoin-cash")
with %{slug: slug, contract: contract, decimals: decimals} <- get_project_details(selector) do
%{
module: :none,
asset: contract,
contract: contract,
blockchain: Sanbase.Balance.blockchain_from_infrastructure("BCH"),
slug: slug,
decimals: decimals
}
end
end
def selector_to_args(%{infrastructure: "LTC"} = selector) do
selector = Map.put_new(selector, :slug, "litecoin")
with %{slug: slug, contract: contract, decimals: decimals} <- get_project_details(selector) do
%{
module: :none,
asset: contract,
contract: contract,
blockchain: Sanbase.Balance.blockchain_from_infrastructure("LTC"),
slug: slug,
decimals: decimals
}
end
end
def selector_to_args(%{infrastructure: "BNB"} = selector) do
selector = Map.put_new(selector, :slug, "binance-coin")
with %{slug: slug, contract: contract, decimals: decimals} <- get_project_details(selector) do
%{
module: :none,
asset: contract,
contract: contract,
blockchain: Sanbase.Balance.blockchain_from_infrastructure("BNB"),
slug: slug,
decimals: decimals
}
end
end
def selector_to_args(%{infrastructure: infrastructure} = selector) do
{:error,
"Invalid historical balance selector. The infrastructure #{inspect(infrastructure)} is not supported. Provided selector: #{inspect(selector)}"}
end
def selector_to_args(%{slug: slug} = selector) when not is_nil(slug) do
with %{infrastructure: _} = map <- get_project_details(%{slug: slug}) do
%{original_selector: selector} |> Map.merge(map) |> selector_to_args()
else
{:error, {:missing_contract, _}} ->
{:error,
"Invalid historical balance selector. The provided slug has no contract data available. Provided selector: #{inspect(selector)}"}
error ->
error
end
end
def selector_to_args(selector) do
original_selector = Map.get(selector, :original_selector) || selector
{:error, "Invalid historical balance selector: #{inspect(original_selector)}"}
end
defp get_project_details(%{contract: _, decimals: _, slug: _, infrastructure: _} = data) do
data
end
defp get_project_details(%{slug: slug}) do
with {:ok, contract, decimals, infrastructure} <-
Project.contract_info_infrastructure_by_slug(slug) do
%{contract: contract, decimals: decimals, slug: slug, infrastructure: infrastructure}
end
end
end
|
lib/sanbase/clickhouse/historical_balance/historical_balance.ex
| 0.882656
| 0.546859
|
historical_balance.ex
|
starcoder
|
defmodule Glicko.Player do
@moduledoc """
Provides convenience functions that handle conversions between Glicko versions one and two.
## Usage
Create a *v1* player with the default values for an unrated player.
iex> Player.new_v1
{1.5e3, 350.0}
Create a *v2* player with the default values for an unrated player.
iex> Player.new_v2
{0.0, 2.014761872416068, 0.06}
Create a player with custom values.
iex> Player.new_v2([rating: 3.0, rating_deviation: 2.0, volatility: 0.05])
{3.0, 2.0, 0.05}
Convert a *v2* player to a *v1*. Note this drops the volatility.
iex> Player.new_v2 |> Player.to_v1
{1.5e3, 350.0}
Convert a *v1* player to a *v2*.
iex> Player.new_v1 |> Player.to_v2(0.06)
{0.0, 2.014761872416068, 0.06}
Note calling `to_v1` with a *v1* player or likewise with `to_v2` and a *v2* player
will pass-through unchanged. The volatility arg in this case is ignored.
iex> player_v2 = Player.new_v2
iex> player_v2 == Player.to_v2(player_v2)
true
"""
@magic_version_scale 173.7178
@magic_version_scale_rating 1500.0
@type t :: v1 | v2
@type v1 :: {rating, rating_deviation}
@type v2 :: {rating, rating_deviation, volatility}
@type version :: :v1 | :v2
@type rating :: float
@type rating_deviation :: float
@type volatility :: float
@doc """
The recommended initial rating value for a new player.
"""
@spec initial_rating(version) :: rating
def initial_rating(_version = :v1), do: 1500.0
def initial_rating(_version = :v2) do
:v1 |> initial_rating |> scale_rating_to(:v2)
end
@doc """
The recommended initial rating deviation value for a new player.
"""
@spec initial_rating_deviation(version) :: rating_deviation
def initial_rating_deviation(_version = :v1), do: 350.0
def initial_rating_deviation(_version = :v2) do
:v1 |> initial_rating_deviation |> scale_rating_deviation_to(:v2)
end
@doc """
The recommended initial volatility value for a new player.
"""
@spec initial_volatility :: volatility
def initial_volatility, do: 0.06
@doc """
Creates a new v1 player.
If not overriden, will use the default values for an unrated player.
"""
@spec new_v1(rating: rating, rating_deviation: rating_deviation) :: v1
def new_v1(opts \\ []) when is_list(opts) do
{
Keyword.get(opts, :rating, initial_rating(:v1)),
Keyword.get(opts, :rating_deviation, initial_rating_deviation(:v1))
}
end
@doc """
Creates a new v2 player.
If not overriden, will use default values for an unrated player.
"""
@spec new_v2(rating: rating, rating_deviation: rating_deviation, volatility: volatility) :: v2
def new_v2(opts \\ []) when is_list(opts) do
{
Keyword.get(opts, :rating, initial_rating(:v2)),
Keyword.get(opts, :rating_deviation, initial_rating_deviation(:v2)),
Keyword.get(opts, :volatility, initial_volatility())
}
end
@doc """
Converts a v2 player to a v1.
A v1 player will pass-through unchanged.
Note the volatility field used in a v2 player will be lost in the conversion.
"""
@spec to_v1(player :: t) :: v1
def to_v1({rating, rating_deviation}), do: {rating, rating_deviation}
def to_v1({rating, rating_deviation, _}) do
{
rating |> scale_rating_to(:v1),
rating_deviation |> scale_rating_deviation_to(:v1)
}
end
@doc """
Converts a v1 player to a v2.
A v2 player will pass-through unchanged with the volatility arg ignored.
"""
@spec to_v2(player :: t, volatility :: volatility) :: v2
def to_v2(player, volatility \\ initial_volatility())
def to_v2({rating, rating_deviation, volatility}, _volatility),
do: {rating, rating_deviation, volatility}
def to_v2({rating, rating_deviation}, volatility) do
{
rating |> scale_rating_to(:v2),
rating_deviation |> scale_rating_deviation_to(:v2),
volatility
}
end
@doc """
A version agnostic method for getting a player's rating.
"""
@spec rating(player :: t, as_version :: version | nil) :: rating
def rating(player, as_version \\ nil)
def rating({rating, _}, nil), do: rating
def rating({rating, _, _}, nil), do: rating
def rating({rating, _}, :v1), do: rating
def rating({rating, _}, :v2), do: rating |> scale_rating_to(:v2)
def rating({rating, _, _}, :v1), do: rating |> scale_rating_to(:v1)
def rating({rating, _, _}, :v2), do: rating
@doc """
A version agnostic method for getting a player's rating deviation.
"""
@spec rating_deviation(player :: t, as_version :: version | nil) :: rating_deviation
def rating_deviation(player, as_version \\ nil)
def rating_deviation({_, rating_deviation}, nil), do: rating_deviation
def rating_deviation({_, rating_deviation, _}, nil), do: rating_deviation
def rating_deviation({_, rating_deviation}, :v1), do: rating_deviation
def rating_deviation({_, rating_deviation}, :v2),
do: rating_deviation |> scale_rating_deviation_to(:v2)
def rating_deviation({_, rating_deviation, _}, :v1),
do: rating_deviation |> scale_rating_deviation_to(:v1)
def rating_deviation({_, rating_deviation, _}, :v2), do: rating_deviation
@doc """
A version agnostic method for getting a player's volatility.
"""
@spec volatility(player :: t, default_volatility :: volatility) :: volatility
def volatility(player, default_volatility \\ initial_volatility())
def volatility({_, _}, default_volatility), do: default_volatility
def volatility({_, _, volatility}, _), do: volatility
@doc """
A convenience function for summarizing a player's strength as a 95%
confidence interval.
The lowest value in the interval is the player's rating minus twice the RD,
and the highest value is the player's rating plus twice the RD.
The volatility measure does not appear in the calculation of this interval.
An example would be if a player's rating is 1850 and the RD is 50,
the interval would range from 1750 to 1950. We would then say that we're 95%
confident that the player's actual strength is between 1750 and 1950.
When a player has a low RD, the interval would be narrow, so that we would
be 95% confident about a player’s strength being in a small interval of values.
"""
@spec rating_interval(player :: t, as_version :: version | nil) :: {rating_low :: float, rating_high :: float}
def rating_interval(player, as_version \\ nil) do
{
rating(player, as_version) - rating_deviation(player, as_version) * 2,
rating(player, as_version) + rating_deviation(player, as_version) * 2
}
end
@doc """
Scales a player's rating.
"""
@spec scale_rating_to(rating :: rating, to_version :: version) :: rating
def scale_rating_to(rating, _version = :v1),
do: rating * @magic_version_scale + @magic_version_scale_rating
def scale_rating_to(rating, _version = :v2),
do: (rating - @magic_version_scale_rating) / @magic_version_scale
@doc """
Scales a player's rating deviation.
"""
@spec scale_rating_deviation_to(rating_deviation :: rating_deviation, to_version :: version) ::
rating_deviation
def scale_rating_deviation_to(rating_deviation, _version = :v1),
do: rating_deviation * @magic_version_scale
def scale_rating_deviation_to(rating_deviation, _version = :v2),
do: rating_deviation / @magic_version_scale
end
|
lib/glicko/player.ex
| 0.868186
| 0.555676
|
player.ex
|
starcoder
|
defmodule Freddy.Core.Queue do
@moduledoc """
Queue configuration
## Fields
* `:name` - Queue name. If left empty, a server named queue with unique
name will be declared.
* `:opts` - Queue options, see below.
## Options
* `:durable` - If set, keeps the Queue between restarts of the broker.
* `:auto_delete` - If set, deletes the Queue once all subscribers disconnect.
* `:exclusive` - If set, only one subscriber can consume from the Queue.
* `:passive` - If set, raises an error unless the queue already exists.
* `:nowait` - If set, the server will not respond to the method and client
will not wait for a reply. Default is `false`.
* `:arguments` - A set of arguments for the declaration. The syntax and semantics
of these arguments depends on the server implementation.
## Examples
### Server-named queue
iex> %Freddy.Core.Queue{exclusive: true, auto_delete: true}
### Client-named queue
iex> %Freddy.Core.Queue{name: "notifications", durable: true}
"""
@type t :: %__MODULE__{
name: String.t(),
opts: options
}
@type options :: [
durable: boolean,
auto_delete: boolean,
exclusive: boolean,
passive: boolean,
nowait: boolean,
arguments: Keyword.t()
]
defstruct name: "", opts: []
@doc """
Create queue configuration from keyword list or `Freddy.Core.Queue` structure.
"""
@spec new(t | Keyword.t()) :: t
def new(%__MODULE__{} = queue) do
queue
end
def new(config) when is_list(config) do
struct!(__MODULE__, config)
end
@doc false
@spec declare(t, Freddy.Core.Channel.t()) :: {:ok, t} | {:error, atom}
def declare(%__MODULE__{name: name, opts: opts} = queue, %{adapter: adapter, chan: chan}) do
case adapter.declare_queue(chan, name, opts) do
{:ok, name} -> {:ok, %{queue | name: name}}
error -> error
end
end
@doc false
@spec consume(t, pid, Freddy.Core.Channel.t()) :: {:ok, String.t()} | {:error, atom}
def consume(%__MODULE__{name: name}, consumer_pid, %{adapter: adapter, chan: chan}, opts \\ []) do
adapter.consume(chan, name, consumer_pid, opts)
end
end
|
lib/freddy/core/queue.ex
| 0.904479
| 0.41117
|
queue.ex
|
starcoder
|
defmodule Roger.Info do
@moduledoc """
Get information about the current partitions, queues and jobs of the entire cluster.
Most of the functions here are mirrored from `Roger.NodeInfo` but
calls these function for each node through `Roger.System.call/2`.
"""
alias Roger.{ApplySystem, Job}
@doc """
Retrieve combined partition info on all running and waiting partitions, over the entire cluster.
"""
def partitions do
gather(:partitions)
end
@doc """
Retrieve all partitions that are currently started on all nodes in the cluster.
"""
def running_partitions do
gather(:running_partitions)
end
@doc """
Retrieve all partitions that are currently waiting for start.
"""
def waiting_partitions do
gather(:waiting_partitions)
end
@doc """
Retrieve all jobs that are currently running on the cluster.
"""
def running_jobs do
gather(:running_jobs)
end
@doc """
Retrieve all running jobs for the given partition on the cluster.
"""
def running_jobs(partition_id) do
gather(:running_jobs, [partition_id])
end
@doc """
Retrieve queued jobs for the given partition and queue.
This basically does a `basic.get` AMQP command on the queue and
requeues the message using a nack.
"""
def queued_jobs(partition_id, queue_type, count \\ 100) do
connection_name = Application.get_env(:roger, :connection_name)
with {:ok, amqp_conn} <- AMQP.Application.get_connection(connection_name),
{:ok, channel} <- AMQP.Channel.open(amqp_conn) do
queue = Roger.Queue.make_name(partition_id, queue_type)
result = get_queue_messages(channel, queue, count)
AMQP.Channel.close(channel)
result
end
end
defp get_queue_messages(channel, queue, count) do
get_queue_messages(channel, queue, count, [])
end
defp get_queue_messages(_, _, 0, result) do
result
end
defp get_queue_messages(channel, queue, count, acc) do
case AMQP.Basic.get(channel, queue, no_ack: false) do
{:ok, payload, _meta} ->
{:ok, job} = Job.decode(payload)
get_queue_messages(channel, queue, count - 1, [job | acc])
{:empty, _} ->
acc
end
end
defp gather(call, args \\ []) do
{:ok, result} = ApplySystem.call({:apply, Roger.NodeInfo, call}, args)
result
end
end
|
lib/roger/info.ex
| 0.736874
| 0.62332
|
info.ex
|
starcoder
|
defmodule OMG.Performance.ByzantineEvents do
@moduledoc """
OMG network child chain server byzantine event test entrypoint. Runs performance byzantine tests.
## Usage
To setup, once you have your Ethereum node and a child chain running, from a configured `iex -S mix run --no-start`
shell do:
```
use OMG.Performance
Performance.init()
spenders = Generators.generate_users(2)
```
You probably want to prefill the child chain with transactions, see `OMG.Performance.ExtendedPerftest` or just:
```
Performance.ExtendedPerftest.start(10_000, 16, randomized: false)
```
(`randomized: false` is useful to test massive honest-standard-exiting, since it will create many unspent UTXOs for
each of the spenders)
"""
use OMG.Utils.LoggerExt
alias OMG.Performance.HttpRPC.WatcherClient
alias Support.WaitFor
alias OMG.Utxo
require Utxo
@doc """
For given utxo positions shuffle them and ask the watcher for exit data
## Usage
On top of the generic setup (see above) do:
```
alice = Enum.at(spenders, 0)
:ok = ByzantineEvents.watcher_synchronize()
utxo_positions = ByzantineEvents.get_exitable_utxos(alice.addr, take: 20)
exit_datas = timeit ByzantineEvents.get_many_standard_exits(utxo_positions)
```
NOTE this uses unspent UTXOs creating valid exits for `alice`. For invalid exits do
```
utxo_positions = Generators.stream_utxo_positions(owned_by: alice.addr, take: 20)
```
"""
@spec get_many_standard_exits(list(pos_integer())) :: list(map())
def get_many_standard_exits(exit_positions) do
watcher_url = Application.fetch_env!(:omg_performance, :watcher_url)
exit_positions
|> Enum.shuffle()
|> Enum.map(&WatcherClient.get_exit_data(&1, watcher_url))
|> only_successes()
end
@doc """
For given standard exit datas (maps received from the Watcher) start all the exits in the root chain contract.
Will use `owner_address` to start the exits so this address must own all the supplied UTXOs to exit.
Will send out all transactions concurrently, fail if any of them fails and block till the last gets mined. Returns
the receipt of the last transaction sent out.
"""
@spec start_many_exits(list(map), OMG.Crypto.address_t()) :: {:ok, map()} | {:error, any()}
def start_many_exits(exit_datas, owner_address) do
map_contract_transaction(exit_datas, fn composed_exit ->
Support.RootChainHelper.start_exit(
composed_exit.utxo_pos,
composed_exit.txbytes,
composed_exit.proof,
owner_address
)
end)
end
@doc """
For given utxo positions shuffle them and ask the watcher for challenge data. All positions must be invalid exits
## Usage
Having some invalid exits out there (see above), last of which started at `last_exit_height`, do:
```
:ok = ByzantineEvents.watcher_synchronize(root_chain_height: last_exit_height)
utxos_to_challenge = timeit ByzantineEvents.get_byzantine_events("invalid_exit")
challenge_responses = timeit ByzantineEvents.get_many_se_challenges(utxos_to_challenge)
```
"""
@spec get_many_se_challenges(list(pos_integer())) :: list(map())
def get_many_se_challenges(positions) do
watcher_url = Application.fetch_env!(:omg_performance, :watcher_url)
positions
|> Enum.shuffle()
|> Enum.map(&WatcherClient.get_exit_challenge(&1, watcher_url))
|> only_successes()
end
@doc """
For given challenges (maps received from the Watcher) challenge all the invalid exits in the root chain contract.
Will use `challenger_address`, which can be any well-funded address.
Will send out all transactions concurrently, fail if any of them fails and block till the last gets mined. Returns
the receipt of the last transaction sent out.
"""
@spec challenge_many_exits(list(map), OMG.Crypto.address_t()) :: {:ok, map()} | {:error, any()}
def challenge_many_exits(challenge_responses, challenger_address) do
map_contract_transaction(challenge_responses, fn challenge ->
Support.RootChainHelper.challenge_exit(
challenge.exit_id,
challenge.exiting_tx,
challenge.txbytes,
challenge.input_index,
challenge.sig,
challenger_address
)
end)
end
@doc """
Fetches utxo positions for a given user's address.
## Usage
On top of the generic setup (see above) do:
```
timeit ByzantineEvents.get_exitable_utxos(alice.addr)
```
Options:
- :take - if not nil, will limit to this many results
"""
@spec get_exitable_utxos(OMG.Crypto.address_t(), keyword()) :: list(pos_integer())
def get_exitable_utxos(addr, opts \\ []) when is_binary(addr) do
watcher_url = Application.fetch_env!(:omg_performance, :watcher_url)
{:ok, utxos} = WatcherClient.get_exitable_utxos(addr, watcher_url)
utxo_positions = Enum.map(utxos, & &1.utxo_pos)
if opts[:take], do: Enum.take(utxo_positions, opts[:take]), else: utxo_positions
end
@doc """
Blocks the caller until the watcher configured reports to be fully synced up (both child chain blocks and eth events)
Options:
- :root_chain_height - if not `nil`, in addition to synchronizing to current top mined child chain block, it will
sync up till all the Watcher's services report at at least this Ethereum height
"""
@spec watcher_synchronize(keyword()) :: :ok
def watcher_synchronize(opts \\ []) do
root_chain_height = Keyword.get(opts, :root_chain_height, nil)
watcher_url = Application.fetch_env!(:omg_performance, :watcher_url)
_ = Logger.info("Waiting for the watcher to synchronize")
:ok = WaitFor.ok(fn -> watcher_synchronized?(root_chain_height, watcher_url) end, :infinity)
# NOTE: allowing some more time for the dust to settle on the synced Watcher
# otherwise some of the freshest UTXOs to exit will appear as missing on the Watcher
# related issue to remove this `sleep` and fix properly is https://github.com/omisego/elixir-omg/issues/1031
Process.sleep(2000)
_ = Logger.info("Watcher synchronized")
end
@doc """
Gets all the byzantine events from the Watcher
"""
@spec get_byzantine_events() :: list(map())
def get_byzantine_events() do
watcher_url = Application.fetch_env!(:omg_performance, :watcher_url)
{:ok, status_response} = WatcherClient.get_status(watcher_url)
status_response[:byzantine_events]
end
@doc """
Gets byzantine events of a particular flavor from the Watcher
"""
@spec get_byzantine_events(String.t()) :: list(map())
def get_byzantine_events(event_name) do
get_byzantine_events()
|> Enum.filter(&(&1["event"] == event_name))
|> postprocess_byzantine_events(event_name)
end
defp postprocess_byzantine_events(events, "invalid_exit"), do: Enum.map(events, & &1["details"]["utxo_pos"])
defp only_successes(responses), do: Enum.map(responses, fn {:ok, response} -> response end)
# this allows one to map a contract-transacting function over a collection nicely.
# It initiates all the transactions concurrently. Then it waits on all of them to mine successfully.
# Returns the last receipt result, so you can synchronize on the block number returned (and the entire bundle of txs)
defp map_contract_transaction(enumberable, transaction_function) do
enumberable
|> Enum.map(transaction_function)
|> Task.async_stream(&Support.DevHelper.transact_sync!(&1, timeout: :infinity),
timeout: :infinity,
max_concurrency: 10_000
)
|> Enum.map(fn {:ok, result} -> result end)
|> List.last()
end
# This function is prepared to be called in `WaitFor.ok`.
# It repeatedly ask for Watcher's `/status.get` until Watcher consume mined block
defp watcher_synchronized?(root_chain_height, watcher_url) do
{:ok, status} = WatcherClient.get_status(watcher_url)
with true <- watcher_synchronized_to_mined_block?(status),
true <- root_chain_synced?(root_chain_height, status) do
:ok
else
_ -> :repeat
end
end
defp root_chain_synced?(nil, _), do: true
defp root_chain_synced?(root_chain_height, status) do
status
|> Access.get(:services_synced_heights)
|> Enum.all?(&(&1["height"] >= root_chain_height))
end
defp watcher_synchronized_to_mined_block?(%{
last_mined_child_block_number: last_mined_child_block_number,
last_validated_child_block_number: last_validated_child_block_number
})
when last_mined_child_block_number == last_validated_child_block_number and
last_mined_child_block_number > 0 do
_ = Logger.debug("Synced to blknum: #{last_validated_child_block_number}")
true
end
defp watcher_synchronized_to_mined_block?(_), do: false
end
|
apps/omg_performance/lib/omg_performance/byzantine_events.ex
| 0.881971
| 0.721351
|
byzantine_events.ex
|
starcoder
|
defmodule SvgBuilder.Text do
import XmlBuilder
alias SvgBuilder.{Element, Units}
@type text_t :: binary | Element.t() | [Element.t()]
@moduledoc """
Add and modify text elements.
"""
@doc """
Create a text element.
The `text` may be a binary, text element or list of text elements.
"""
@spec text(text_t) :: Element.t()
def text(text) do
element(:text, %{}, text_content(text))
end
@doc """
Create a text element with glyph positions.
x : An x coordintate or a list of x coordinates to be assigned to the
glyphs of the content.
y : An y coordintate or a list of y coordinates to be assigned to the
glyphs of the content.
text : Content of the text element.
"""
@spec text(Units.length_list_t(), Units.length_list_t(), text_t) :: Element.t()
def text(x, y, text) do
element(
:text,
%{x: "#{Units.length_list(x)}", y: "#{Units.length_list(y)}"},
text_content(text)
)
end
@spec text(
Units.length_list_t(),
Units.length_list_t(),
Units.length_list_t(),
Units.length_list_t(),
text_t
) :: Element.t()
def text(x, y, dx, dy, text) do
element(
:text,
%{
x: "#{Units.length_list(x)}",
y: "#{Units.length_list(y)}",
dx: "#{Units.length_list(dx)}",
dy: "#{Units.length_list(dy)}"
},
text_content(text)
)
end
@spec tspan(text_t) :: Element.t()
def tspan(text) do
element(:tspan, %{}, tspan_content(text))
end
@spec tspan(Units.length_list_t(), Units.length_list_t(), text_t) :: Element.t()
def tspan(x, y, text) do
element(
:tspan,
%{x: "#{Units.length_list(x)}", y: "#{Units.length_list(y)}"},
text_content(text)
)
end
@spec tspan(
Units.length_list_t(),
Units.length_list_t(),
Units.length_list_t(),
Units.length_list_t(),
text_t
) :: Element.t()
def tspan(x, y, dx, dy, text) do
element(
:tspan,
%{
x: "#{Units.length_list(x)}",
y: "#{Units.length_list(y)}",
dx: "#{Units.length_list(dx)}",
dy: "#{Units.length_list(dy)}"
},
text_content(text)
)
end
@doc """
Rotate the glyphs in a text element.
If the argument is a single number then all text is rotated.
A list rotates each glyph by the amount corresponding to the glyph in the list.
"""
@spec rotate(Element.t(), Units.length_list_t()) :: Element.t()
def rotate({type, _, _} = element, rotation) when type in [:text, :tspan] do
Element.add_attribute(element, :rotate, Units.length_list(rotation))
end
defp text_content(text) when is_list(text) do
Enum.filter(text, &is_valid_text_content/1)
end
defp text_content(text) when is_binary(text) do
text
end
defp is_valid_text_content(t) when is_binary(t) do
true
end
defp is_valid_text_content({type, _, _})
when type in [:altGlyph, :textPath, :tref, :tspan, :metadata, :desc, :title, :a] do
true
end
defp is_valid_text_content(_) do
false
end
defp tspan_content(text) when is_list(text) do
Enum.filter(text, &is_valid_tspan_content/1)
end
defp tspan_content(text) when is_binary(text) do
text
end
defp is_valid_tspan_content(t) when is_binary(t) do
true
end
defp is_valid_tspan_content({type, _, _})
when type in ["altGlyph", "tref", :tspan, "metadata", "desc", "title", "a"] do
true
end
defp is_valid_tspan_content(_) do
false
end
end
|
lib/text.ex
| 0.829008
| 0.455501
|
text.ex
|
starcoder
|
defmodule Nacha.Record do
@moduledoc """
A use macro for building and formatting NACHA records.
"""
defmacro __using__(opts) do
quote do
import Kernel, except: [to_string: 1]
@fields unquote(Keyword.get(opts, :fields))
if __MODULE__ |> Module.get_attribute(:required) |> is_nil() do
Module.put_attribute(__MODULE__, :required, [])
end
defstruct Enum.map(@fields, fn
{key, _, _} -> key
{key, _, _, default} -> {key, default}
end) ++ [errors: [], valid?: false]
@type t :: %__MODULE__{}
@spec validate(__MODULE__.t()) :: __MODULE__.t()
def validate(record),
do: unquote(__MODULE__).validate_required(record, @required)
@spec to_string(__MODULE__.t()) :: String.t()
def to_string(%__MODULE__{} = record),
do: unquote(__MODULE__).to_string(record, @fields)
@spec to_iolist(__MODULE__.t()) :: iolist
def to_iolist(%__MODULE__{} = record),
do: unquote(__MODULE__).to_iolist(record, @fields)
def to_iolist([%__MODULE__{} | _] = records),
do: unquote(__MODULE__).to_iolist(records, @fields)
def to_iolist([]), do: []
end
end
@typep key_def :: {atom, atom, integer} | {atom, atom, integer, any}
@spec validate_required(struct, list(atom)) :: struct
def validate_required(record, required) do
validated =
Enum.reduce(required, record, fn key, acc ->
if is_nil(Map.get(acc, key)),
do: Map.update!(acc, :errors, &[{key, "is required"} | &1]),
else: acc
end)
%{validated | valid?: length(validated.errors) == 0}
end
@spec to_string(struct, list(key_def)) :: String.t()
def to_string(record, keys), do: record |> to_iolist(keys) |> to_string
@spec to_iolist(list(struct), list(key_def)) :: iolist
def to_iolist(records, keys) when is_list(records),
do: records |> Stream.map(&to_iolist(&1, keys)) |> Enum.intersperse("\n")
@spec to_iolist(struct, list(key_def)) :: iolist
def to_iolist(record, keys),
do: Enum.reduce(keys, [], &[&2, format_field(record, &1)])
defp format_field(record, {key, type, length, _}),
do: format_field(record, {key, type, length})
defp format_field(record, {key, type, length}),
do:
record
|> Map.get(key, "")
|> format_value(length, type)
|> pad(length, type)
defp format_value(nil, length, type)
when type in [:date, :time],
do: format_value(nil, length, :string)
defp format_value(date, _, :date),
do: date |> Date.to_iso8601(:basic) |> String.slice(2, 6)
defp format_value(time, _, :time),
do: time |> Time.to_iso8601(:basic) |> String.slice(0, 4)
defp format_value(value, length, _type),
do: value |> to_string() |> String.slice(0, length)
defp pad(val, length, :number), do: String.pad_leading(val, length, "0")
defp pad(val, length, _), do: String.pad_trailing(val, length, " ")
end
|
lib/nacha/record.ex
| 0.635109
| 0.425695
|
record.ex
|
starcoder
|
defmodule Xcribe do
@moduledoc """
Xcribe is a library for API documentation. It generates docs from your test specs.
Xcribe use `Plug.Conn` struct to fetch information about requests and use them
to document your API. You must give requests examples (from your tests ) to Xcribe
be able to document your routes.
Each connection sent to documenting in your tests is parsed. Is expected that
connection has been passed through the app `Endpoint` as a finished request.
The parser will extract all needed info from `Conn` and uses app `Router`
for additional information about the request.
The attribute `description` may be given at `document` macro call with the
option `:as`:
test "test name", %{conn: conn} do
...
document(conn, as: "description here")
...
end
If no description is given the current test description will be used.
## API information
You must provide your API information by creatint a mudule that use
`Xcribe.Information` macros.
The required info are:
- `name` - a name for your API.
- `description` - a description about your API.
- `host` - your API host url
This information is set by Xcribe macros inside the block `xcribe_info`. eg:
defmodule YourModuleInformation do
use Xcribe.Information
xcribe_info do
name "Your awesome API"
description "The best API in the world"
host "http://your-api.us"
end
end
See `Xcribe.Information` for more details about custom information.
## JSON
Xcribe uses the same json library configured for Phoenix to handle json content.
you can configure xcribe to use your preferred library. Poison and Jason are
the most popular json libraries common used in Elixir and Xcribe works fine with both.
## Configuration
You must configure at least the `information_source` and `format` for basic use.
eg
config :xcribe,
information_source: YourApp.YouModuleInformation,
format: :swagger
#### Available configurations:
* `:information_source` - Module that implements `Xcribe.Information` with
API information. It's required.
* `:output` - The name of file output with generated configuration. Default
value changes by the format, 'api_blueprint.apib' for Blueprint and
'app_doc.json' for swagger.
* `:format` - Format to generate documentation, allowed `:api_blueprint` and
`:swagger`. Default `:api_blueprint`.
* `:env_var` - Environment variable name for active Xcribe documentation
generator. Default is `XCRIBE_ENV`.
* `:json_library` - The library to be used for json decode/encode (Jason
and Poison are supported). The default is the same as `Phoenix` configuration.
* `:serve` - Enable Xcribe serve mode. Default `false`. See more `Serving doc`
"""
use Application
alias Xcribe.CLI.Output
alias Xcribe.Config
@doc false
def start(_type, _opts) do
opts = [strategy: :one_for_one, name: Xcribe.Supervisor]
case Config.check_configurations([:serve]) do
{:error, errors} -> Output.print_configuration_errors(errors)
:ok -> :ok
end
Supervisor.start_link(children(), opts)
end
@doc false
def start(_options \\ []) do
{:ok, _} = Application.start(:xcribe)
:ok
end
defp children do
[{Xcribe.Recorder, []}]
end
end
|
lib/xcribe.ex
| 0.859251
| 0.52208
|
xcribe.ex
|
starcoder
|
defmodule AWS.MemoryDB do
@moduledoc """
MemoryDB for Redis is a fully managed, Redis-compatible, in-memory database that
delivers ultra-fast performance and Multi-AZ durability for modern applications
built using microservices architectures.
MemoryDB stores the entire database in-memory, enabling low latency and high
throughput data access. It is compatible with Redis, a popular open source data
store, enabling you to leverage Redis’ flexible and friendly data structures,
APIs, and commands.
"""
alias AWS.Client
alias AWS.Request
def metadata do
%AWS.ServiceMetadata{
abbreviation: "Amazon MemoryDB",
api_version: "2021-01-01",
content_type: "application/x-amz-json-1.1",
credential_scope: nil,
endpoint_prefix: "memory-db",
global?: false,
protocol: "json",
service_id: "MemoryDB",
signature_version: "v4",
signing_name: "memorydb",
target_prefix: "AmazonMemoryDB"
}
end
@doc """
Apply the service update to a list of clusters supplied.
For more information on service updates and applying them, see [Applying the service
updates](https://docs.aws.amazon.com/MemoryDB/latest/devguide/managing-updates.html#applying-updates).
"""
def batch_update_cluster(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "BatchUpdateCluster", input, options)
end
@doc """
Makes a copy of an existing snapshot.
"""
def copy_snapshot(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CopySnapshot", input, options)
end
@doc """
Creates an Access Control List.
For more information, see [Authenticating users with Access Contol Lists (ACLs)](https://docs.aws.amazon.com/MemoryDB/latest/devguide/clusters.acls.html).
"""
def create_acl(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateACL", input, options)
end
@doc """
Creates a cluster.
All nodes in the cluster run the same protocol-compliant engine software.
"""
def create_cluster(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateCluster", input, options)
end
@doc """
Creates a new MemoryDB parameter group.
A parameter group is a collection of parameters and their values that are
applied to all of the nodes in any cluster. For more information, see
[Configuring engine parameters using parameter groups](https://docs.aws.amazon.com/MemoryDB/latest/devguide/parametergroups.html).
"""
def create_parameter_group(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateParameterGroup", input, options)
end
@doc """
Creates a copy of an entire cluster at a specific moment in time.
"""
def create_snapshot(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateSnapshot", input, options)
end
@doc """
Creates a subnet group.
A subnet group is a collection of subnets (typically private) that you can
designate for your clusters running in an Amazon Virtual Private Cloud (VPC)
environment. When you create a cluster in an Amazon VPC, you must specify a
subnet group. MemoryDB uses that subnet group to choose a subnet and IP
addresses within that subnet to associate with your nodes. For more information,
see [Subnets and subnet groups](https://docs.aws.amazon.com/MemoryDB/latest/devguide/subnetgroups.html).
"""
def create_subnet_group(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateSubnetGroup", input, options)
end
@doc """
Creates a MemoryDB user.
For more information, see [Authenticating users with Access Contol Lists (ACLs)](https://docs.aws.amazon.com/MemoryDB/latest/devguide/clusters.acls.html).
"""
def create_user(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateUser", input, options)
end
@doc """
Deletes an Access Control List.
The ACL must first be disassociated from the cluster before it can be deleted.
For more information, see [Authenticating users with Access Contol Lists (ACLs)](https://docs.aws.amazon.com/MemoryDB/latest/devguide/clusters.acls.html).
"""
def delete_acl(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteACL", input, options)
end
@doc """
Deletes a cluster.
It also deletes all associated nodes and node endpoints
"""
def delete_cluster(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteCluster", input, options)
end
@doc """
Deletes the specified parameter group.
You cannot delete a parameter group if it is associated with any clusters. You
cannot delete the default parameter groups in your account.
"""
def delete_parameter_group(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteParameterGroup", input, options)
end
@doc """
Deletes an existing snapshot.
When you receive a successful response from this operation, MemoryDB immediately
begins deleting the snapshot; you cannot cancel or revert this operation.
"""
def delete_snapshot(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteSnapshot", input, options)
end
@doc """
Deletes a subnet group.
You cannot delete a default subnet group or one that is associated with any
clusters.
"""
def delete_subnet_group(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteSubnetGroup", input, options)
end
@doc """
Deletes a user.
The user will be removed from all ACLs and in turn removed from all clusters.
"""
def delete_user(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteUser", input, options)
end
@doc """
Returns a list of ACLs
"""
def describe_acls(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeACLs", input, options)
end
@doc """
Returns information about all provisioned clusters if no cluster identifier is
specified, or about a specific cluster if a cluster name is supplied.
"""
def describe_clusters(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeClusters", input, options)
end
@doc """
Returns a list of the available Redis engine versions.
"""
def describe_engine_versions(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeEngineVersions", input, options)
end
@doc """
Returns events related to clusters, security groups, and parameter groups.
You can obtain events specific to a particular cluster, security group, or
parameter group by providing the name as a parameter. By default, only the
events occurring within the last hour are returned; however, you can retrieve up
to 14 days' worth of events if necessary.
"""
def describe_events(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeEvents", input, options)
end
@doc """
Returns a list of parameter group descriptions.
If a parameter group name is specified, the list contains only the descriptions
for that group.
"""
def describe_parameter_groups(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeParameterGroups", input, options)
end
@doc """
Returns the detailed parameter list for a particular parameter group.
"""
def describe_parameters(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeParameters", input, options)
end
@doc """
Returns details of the service updates
"""
def describe_service_updates(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeServiceUpdates", input, options)
end
@doc """
Returns information about cluster snapshots.
By default, DescribeSnapshots lists all of your snapshots; it can optionally
describe a single snapshot, or just the snapshots associated with a particular
cluster.
"""
def describe_snapshots(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeSnapshots", input, options)
end
@doc """
Returns a list of subnet group descriptions.
If a subnet group name is specified, the list contains only the description of
that group.
"""
def describe_subnet_groups(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeSubnetGroups", input, options)
end
@doc """
Returns a list of users.
"""
def describe_users(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeUsers", input, options)
end
@doc """
Used to failover a shard
"""
def failover_shard(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "FailoverShard", input, options)
end
@doc """
Lists all available node types that you can scale to from your cluster's current
node type.
When you use the UpdateCluster operation to scale your cluster, the value of the
NodeType parameter must be one of the node types returned by this operation.
"""
def list_allowed_node_type_updates(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListAllowedNodeTypeUpdates", input, options)
end
@doc """
Lists all tags currently on a named resource.
A tag is a key-value pair where the key and value are case-sensitive. You can
use tags to categorize and track your MemoryDB resources. For more information,
see [Tagging your MemoryDB resources](https://docs.aws.amazon.com/MemoryDB/latest/devguide/Tagging-Resources.html)
"""
def list_tags(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListTags", input, options)
end
@doc """
Modifies the parameters of a parameter group to the engine or system default
value.
You can reset specific parameters by submitting a list of parameter names. To
reset the entire parameter group, specify the AllParameters and
ParameterGroupName parameters.
"""
def reset_parameter_group(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ResetParameterGroup", input, options)
end
@doc """
A tag is a key-value pair where the key and value are case-sensitive.
You can use tags to categorize and track all your MemoryDB resources. When you
add or remove tags on clusters, those actions will be replicated to all nodes in
the cluster. For more information, see [Resource-level permissions](https://docs.aws.amazon.com/MemoryDB/latest/devguide/iam.resourcelevelpermissions.html).
For example, you can use cost-allocation tags to your MemoryDB resources, Amazon
generates a cost allocation report as a comma-separated value (CSV) file with
your usage and costs aggregated by your tags. You can apply tags that represent
business categories (such as cost centers, application names, or owners) to
organize your costs across multiple services. For more information, see [Using Cost Allocation
Tags](https://docs.aws.amazon.com/MemoryDB/latest/devguide/tagging.html).
"""
def tag_resource(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "TagResource", input, options)
end
@doc """
Use this operation to remove tags on a resource
"""
def untag_resource(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UntagResource", input, options)
end
@doc """
Changes the list of users that belong to the Access Control List.
"""
def update_acl(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateACL", input, options)
end
@doc """
Modifies the settings for a cluster.
You can use this operation to change one or more cluster configuration settings
by specifying the settings and the new values.
"""
def update_cluster(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateCluster", input, options)
end
@doc """
Updates the parameters of a parameter group.
You can modify up to 20 parameters in a single request by submitting a list
parameter name and value pairs.
"""
def update_parameter_group(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateParameterGroup", input, options)
end
@doc """
Updates a subnet group.
For more information, see [Updating a subnet group](https://docs.aws.amazon.com/MemoryDB/latest/devguide/ubnetGroups.Modifying.html)
"""
def update_subnet_group(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateSubnetGroup", input, options)
end
@doc """
Changes user password(s) and/or access string.
"""
def update_user(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateUser", input, options)
end
end
|
lib/aws/generated/memory_db.ex
| 0.883167
| 0.442637
|
memory_db.ex
|
starcoder
|
defmodule Scrivener.HTML do
use Phoenix.HTML
alias Scrivener.Page
alias Scrivener.HTML.Parse
alias Scrivener.HTML.Render
@parse_defaults Parse.defaults()
@render_defaults Render.defaults()
@moduledoc """
## Usage
Import `Scrivener.HTML` to your view:
defmodule SampleWeb.UserView do
use SampleWeb, :view
use Scrivener.HTML
end
Use helper functions in your template:
<%= pagination @page %>
Where `@page` is a `%Scrivener.Page{}` struct.
Read `Scrivener.HTML.pagination` for more details.
## SEO
See `Scrivener.HTML.SEO` for more details.
"""
@doc false
defmacro __using__(_) do
quote do
import Scrivener.HTML
import Scrivener.HTML.SEO
end
end
@doc """
Generates the HTML pagination for a given `%Scrivener.Page{}` returned by Scrivener.
## Available options
Available `options` consists of options provided by `Scrivener.HTML.Parse` and
`Scrivener.HTML.Render`.
Default options of `Scrivener.HTML.Parse`:
```
#{inspect(@parse_defaults, pretty: true, limit: :infinity)}
```
Default options of `Scrivener.HTML.Render`:
```
#{inspect(@render_defaults, pretty: true, limit: :infinity)}
```
All other options will be considered as extra params of links.
## Examples
Call `pagination/2` with `Scrivener.HTML.Parse` options:
iex> pagination(%Scrivener.Page{total_pages: 10, page_number: 5}, distance: 4)
Call `pagination/2` with more options:
iex> pagination(%Scrivener.Page{total_pages: 10, page_number: 5}, page_param: :p, distance: 4)
Call `pagination/2` with extra options:
iex> pagination(%Scrivener.Page{total_pages: 10, page_number: 5}, my_param: "foobar")
## Custom HTML output
### Custom HTML attrs of container
iex> pagination(%Scrivener.Page{total_pages: 10, page_number: 5}, html_attrs: [class: "pagination"])
### Custom previous and next buttons
iex> pagination(%Scrivener.Page{total_pages: 10, page_number: 5}, previous: Phoenix.HTML.raw("←"), next: Phoenix.HTML.raw("→")
### Advanced customization
Create a render module referencing `Scrivener.HTML.Render.Preset`, then use it
by setting `:render_module` option.
"""
def pagination(%Page{} = page, options \\ []) do
parse_options = Keyword.take(options, Keyword.keys(@parse_defaults))
render_options = Keyword.drop(options, Keyword.keys(@parse_defaults))
page
|> Parse.parse(parse_options)
|> Render.render(page, render_options)
end
def defaults(), do: @parse_defaults ++ @render_defaults
end
|
lib/scrivener/html.ex
| 0.845496
| 0.657612
|
html.ex
|
starcoder
|
defmodule Socket.UDP do
@moduledoc """
This module wraps a UDP socket using `gen_udp`.
## Options
When creating a socket you can pass a series of options to use for it.
* `:as` sets the kind of value returned by recv, either `:binary` or `:list`,
the default is `:binary`.
* `:mode` can be either `:passive` or `:active`, default is `:passive`
* `:local` must be a keyword list
- `:address` the local address to use
- `:fd` an already opened file descriptor to use
* `:version` sets the IP version to use
* `:broadcast` enables broadcast sending
## Examples
server = Socket.UDP.open!(1337)
{ data, client } = server |> Socket.Datagram.recv!
server |> Socket.Datagram.send! data, client
"""
@type t :: port
use Socket.Helpers
@doc """
Create a UDP socket listening on an OS chosen port, use `local` to know the
port it was bound on.
"""
@spec open :: { :ok, t } | { :error, Error.t }
def open do
open(0, [])
end
@doc """
Create a UDP socket listening on an OS chosen port, use `local` to know the
port it was bound on, raising if an error occurs.
"""
@spec open! :: t | no_return
defbang open
@doc """
Create a UDP socket listening on the given port or using the given options.
"""
@spec open(:inet.port_number | Keyword.t) :: { :ok, t } | { :error, Error.t }
def open(port) when port |> is_integer do
open(port, [])
end
def open(options) when options |> is_list do
open(0, options)
end
@doc """
Create a UDP socket listening on the given port or using the given options,
raising if an error occurs.
"""
@spec open!(:inet.port_number | Keyword.t) :: t | no_return
defbang open(port_or_options)
@doc """
Create a UDP socket listening on the given port and using the given options.
"""
@spec open(:inet.port_number, Keyword.t) :: { :ok, t } | { :error, Error.t }
def open(port, options) do
options = Keyword.put_new(options, :mode, :passive)
:gen_udp.open(port, arguments(options))
end
@doc """
Create a UDP socket listening on the given port and using the given options,
raising if an error occurs.
"""
@spec open!(:inet.port_number, Keyword.t) :: t | no_return
defbang open(port, options)
@doc """
Set the process which will receive the messages.
"""
@spec process(t | port, pid) :: :ok | { :error, :closed | :not_owner | Error.t }
def process(sock, pid) when sock |> is_port do
:gen_udp.controlling_process(sock, pid)
end
@doc """
Set the process which will receive the messages, raising if an error occurs.
"""
@spec process!(t | port, pid) :: :ok | no_return
def process!(sock, pid) do
case process(sock, pid) do
:ok ->
:ok
:closed ->
raise RuntimeError, message: "the socket is closed"
:not_owner ->
raise RuntimeError, message: "the current process isn't the owner"
code ->
raise Socket.Error, reason: code
end
end
@doc """
Set options of the socket.
"""
@spec options(t, Keyword.t) :: :ok | { :error, Error.t }
def options(sock, opts) when sock |> is_port do
:inet.setopts(sock, arguments(opts))
end
@doc """
Convert UDP options to `:inet.setopts` compatible arguments.
"""
@spec arguments(Keyword.t) :: list
def arguments(options) do
options = options
|> Keyword.put_new(:as, :binary)
options = Enum.group_by(options, fn
{ :as, _ } -> true
{ :local, _ } -> true
{ :version, _ } -> true
{ :broadcast, _ } -> true
{ :multicast, _ } -> true
{ :membership, _ } -> true
_ -> false
end)
{ local, global } = {
Map.get(options, true, []),
Map.get(options, false, [])
}
Socket.arguments(global) ++ Enum.flat_map(local, fn
{ :as, :binary } ->
[:binary]
{ :as, :list } ->
[:list]
{ :local, options } ->
Enum.flat_map(options, fn
{ :address, address } ->
[{ :ip, Socket.Address.parse(address) }]
{ :fd, fd } ->
[{ :fd, fd }]
end)
{ :version, 4 } ->
[:inet]
{ :version, 6 } ->
[:inet6]
{ :broadcast, broadcast } ->
[{ :broadcast, broadcast }]
{ :multicast, options } ->
Enum.flat_map(options, fn
{ :address, address } ->
[{ :multicast_if, Socket.Address.parse(address) }]
{ :loop, loop } ->
[{ :multicast_loop, loop }]
{ :ttl, ttl } ->
[{ :multicast_ttl, ttl }]
end)
{ :membership, membership } ->
[{ :add_membership, membership }]
end)
end
end
|
deps/socket/lib/socket/udp.ex
| 0.893176
| 0.544559
|
udp.ex
|
starcoder
|
defmodule Gorpo.Service do
@moduledoc """
Consul service definition.
<dl>
<dt>id</dt>
<dd>a unique value for this service on the local agent</dd>
<dt>name</dt>
<dd>the name of this service</dd>
<dt>tags</dt>
<dd>a list of strings [opaque to consul] that can be used to further assist discovery</dd>
<dt>address</dt>
<dd>hostname of IP address of this service. if not used, the agent's IP address is used</dd>
<dt>port</dt>
<dd>the inet port of this service</dd>
<dt>check</dt>
<dd>the health check associated with this service</dd>
</dl>
"""
defstruct [
id: nil,
name: nil,
address: nil,
port: nil,
tags: [],
check: nil
]
@type t :: %__MODULE__{
id: String.t,
name: String.t,
address: String.t | nil,
port: 0..65_535 | nil,
tags: [String.t],
check: Gorpo.Check.t | nil
}
@spec dump(t) :: %{String.t => term}
@doc """
Encodes the service into a map that once json-encoded matches the Consul
service definition specification.
"""
def dump(service) do
check = if service.check, do: Gorpo.Check.dump(service.check)
params = [
{"ID", service.id},
{"Name", service.name},
{"Tags", service.tags},
{"Port", service.port},
{"Address", service.address},
{"check", check}
]
params
|> Enum.reject(fn {_, x} -> is_nil(x) end)
|> Map.new()
end
@spec load(String.t, map) :: t
@doc """
Parses a Consul service definition into a `Service` struct.
"""
def load(name, data) do
%__MODULE__{
id: data["ID"],
name: Map.get(data, "Name", name),
port: data["Port"],
tags: Map.get(data, "Tags", []),
address: data["Address"]
}
end
@spec check_id(t) :: String.t | nil
@doc """
Returns the id that can be used to refer to a check assoaciated with a given
service.
"""
def check_id(service) do
if service.id || service.name do
"service:" <> (service.id || service.name)
end
end
@doc """
Returns the service id.
"""
@spec id(t) :: {String.t, String.t | nil}
def id(%__MODULE__{id: id, name: name}),
do: {id, name}
end
defimpl Poison.Encoder, for: Gorpo.Service do
def encode(service, opts) do
service
|> Gorpo.Service.dump()
|> Poison.Encoder.encode(opts)
end
end
|
lib/gorpo/service.ex
| 0.734024
| 0.562567
|
service.ex
|
starcoder
|
defmodule Comb do
# unfortunately exdoc doesnt support ``` fenced blocks
@moduledoc (
File.read!("README.md")
|> String.split("\n")
|> Enum.reject(&(String.match?(&1, ~r/```|Build Status|Documentation Status/)))
|> Enum.join("\n")
)
@doc """
This function returns a list containing all combinations of one element
from each `a` and `b`
## Example
iex> cartesian_product(1..2, 3..4)
[[1, 3], [1, 4], [2, 3], [2, 4]]
"""
@spec cartesian_product(Enum.t, Enum.t) :: [list]
def cartesian_product(a, b) do
for x <- a, y <- b, do: [x, y]
end
@doc """
Returns any combination of the elements in `enum` with exactly `k` elements.
Repeated elements are handled intelligently.
## Examples
iex> combinations([1, 2, 3], 2) |> Enum.to_list
[[1, 2], [1, 3], [2, 3]]
iex> combinations([1, 1, 2], 2) |> Enum.to_list
[[1, 1], [1, 2]]
"""
@spec combinations(Enum.t, integer) :: Enum.t
def combinations(enum, k) do
List.last(do_combinations(enum, k))
|> Enum.uniq
end
defp do_combinations(enum, k) do
combinations_by_length = [[[]]|List.duplicate([], k)]
list = Enum.to_list(enum)
List.foldr list, combinations_by_length, fn x, next ->
sub = :lists.droplast(next)
step = [[]|(for l <- sub, do: (for s <- l, do: [x|s]))]
:lists.zipwith(&:lists.append/2, step, next)
end
end
@doc """
Returns the number of elements `combinations/2` would have returned.
## Examples
iex> count_combinations([1, 2, 3], 2)
3
"""
@spec count_combinations(Enum.t, integer) :: Enum.integer
def count_combinations(enum, k), do: combinations(enum, k) |> Enum.count
@doc """
Returns the nth combination that `combinations/2` would have returned. `n` is
zero based.
## Examples
iex> nth_combination([1, 2, 3], 2, 1)
[1, 3]
"""
@spec nth_combination(Enum.t, integer, integer) :: list | no_return
def nth_combination(enum, k, n), do: combinations(enum, k) |> Enum.fetch!(n)
@doc """
Partitions `enum` into any groups in sum containing all elements.
## Examples
iex> partitions(1..2) |> Enum.to_list
[[[1, 2]], [[1], [2]]]
iex> partitions([1, 1, 2]) |> Enum.to_list
[[[1, 1, 2]], [[1, 1], [2]], [[1], [1, 2]], [[1], [1], [2]]]
"""
@spec partitions(Enum.t) :: Enum.t
def partitions(enum) do
list = Enum.to_list enum
n = Enum.count list
Enum.flat_map(n..1, &(do_partition_for_size(list, &1)))
|> Enum.uniq
end
defp do_partitions([]), do: [[]]
defp do_partitions(list) do
n = Enum.count list
Enum.flat_map(n..1, &(do_partition_for_size(list, &1)))
end
defp do_partition_for_size(list, size) do
list
|> combinations(size)
|> Enum.flat_map(fn comb ->
do_partitions(list -- comb)
|> Enum.map(&(Enum.sort([comb] ++ &1)))
end)
end
@doc """
Returns a stream containing all permutations of the input `enum`. A
permutation will contain all elements in a different order. Repeated elements
are handled sensibly.
## Examples
iex> permutations(1..3) |> Enum.to_list
[[1, 2, 3], [2, 1, 3], [1, 3, 2], [3, 1, 2], [2, 3, 1], [3, 2, 1]]
iex> permutations([1, 1, 2]) |> Enum.to_list
[[1, 1, 2], [1, 2, 1], [2, 1, 1]]
"""
@spec permutations(Enum.t) :: Enum.t
defdelegate permutations(enum), to: Comb.TablePermutations
# At the moment this one returns permutations in incorrect order
# defdelegate permutations(enum), to: Comb.TablePermutations
@doc """
Returns the number of elements `permutations/1` would have returned.
## Examples
iex> count_permutations([1, 2, 3])
6
iex> count_permutations([1, 1, 2])
3
"""
@spec count_permutations(Enum.t) :: integer
def count_permutations(enum) do
import Comb.Math
analysis = Comb.ListAnalyzer.analyze(enum)
if analysis |> Comb.ListAnalyzer.all_unique? do
factorial(analysis.count)
else
analysis.freq
|> Map.values
|> Enum.map(&factorial/1)
|> Enum.reduce(factorial(analysis.count), &(div(&2, &1)))
end
end
@doc """
Calculates permutations starting from element `n` where `n` is zero based.
This is more eficient than combining `permutations/1` and `Stream.drop/2`
## Examples
iex> drop_permutations([1, 2, 3], 2) |> Enum.to_list
[[1, 3, 2], [3, 1, 2], [2, 3, 1], [3, 2, 1]]
"""
@spec drop_permutations(Enum.t, integer) :: Enum.t
def drop_permutations(enum, n), do: permutations(enum) |> Stream.drop(n)
@doc """
Returns the nth permutation that `permutations/1` would have returned. `n` is
zero based.
## Examples
iex> nth_permutation([1, 2, 3], 1)
[2, 1, 3]
"""
@spec nth_permutation(Enum.t, integer) :: list
def nth_permutation(enum, n), do: permutations(enum) |> Enum.fetch!(n)
@doc """
Returns the index in the result of what `permutations/1` would return, if
`permutations/1` was called with `enum` sorted. `n` is zero based.
## Examples
iex> permutation_index([1, 3, 2])
2
iex> nth_permutation([1, 2, 3], 2)
[1, 3, 2]
"""
@spec permutation_index(Enum.t) :: integer
def permutation_index(enum) do
list = Enum.to_list enum
list
|> Enum.sort
|> permutations
|> Enum.find_index(fn p -> p == list end)
end
@doc """
Returns a list containing all selections of the input `enum`. A selection
will contain `k` elements each from `enum`.
## Examples
iex> selections(1..3, 2) |> Enum.to_list
[[1, 1], [1, 2], [1, 3], [2, 1], [2, 2], [2, 3], [3, 1], [3, 2], [3, 3]]
"""
@spec selections(Enum.t, integer) :: Enum.t
def selections(_, 0), do: [[]]
def selections(enum, n) do
list = Enum.to_list enum
list
|> Enum.flat_map(fn el -> Enum.map(selections(list, n - 1), &([el | &1])) end)
end
@doc """
Returns a list containing all sets possible with any combination of elements
in `enum`.
## Examples
iex> subsets(1..3) |> Enum.to_list
[[], [1], [2], [3], [1, 2], [1, 3], [2, 3], [1, 2, 3]]
iex> subsets([1, 1, 2]) |> Enum.to_list
[[], [1], [2], [1, 1], [1, 2], [1, 1, 2]]
"""
@spec subsets(Enum.t) :: Enum.t
def subsets(enum) do
n = Enum.count enum
0..n
|> Stream.flat_map(&(do_subsets_for_n(enum, &1)))
end
defp do_subsets_for_n(enum, n) do
enum
|> combinations(n)
end
@doc """
Returns the number of elements `subsets/1` would have returned.
## Examples
iex> count_subsets([1, 2, 3])
8
"""
@spec count_subsets(Enum.t) :: integer
def count_subsets(enum), do: subsets(enum) |> Enum.count
end
|
lib/comb.ex
| 0.82963
| 0.792906
|
comb.ex
|
starcoder
|
defmodule Huth.Client do
alias Huth.Config
alias Huth.Token
@moduledoc """
`Huth.Client` is the module through which all interaction with Huawei's APIs flows.
## Available Options
The first parameter is either the token scopes or a tuple of the service
account client email and its scopes.
See
[Huawei's Documentation](https://developer.huawei.com/consumer/en/doc/development/HMSCore-Guides-V5/open-platform-oauth-0000001053629189-V5#EN-US_TOPIC_0000001053629189__section12493191334711)
for more details.
"""
@doc """
*Note:* Most often, you'll want to use `Huth.Token.for_scope/1` instead of this method.
As the docs for `Huth.Token.for_scope/1` note, it will return a cached token if one
already exists, thus saving you the cost of a round-trip to the server to generate a
new token.
`Huth.Client.get_access_token/1`, on the other hand will always hit the server to
retrieve a new token.
"""
def get_access_token(scope), do: get_access_token({:hns_default, scope}, [])
def get_access_token(scope, opts) when is_binary(scope) and is_list(opts) do
get_access_token({:hns_default, scope}, opts)
end
def get_access_token({account, scope}, opts) when is_binary(scope) and is_list(opts) do
{:ok, token_source} = Config.get(account, :token_source)
get_access_token(token_source, {account, scope}, opts)
end
@doc false
def get_access_token(source, info, opts \\ [])
# Fetch an access token from Huawei's metadata service for applications running
# on Huawei's Cloud platform.
def get_access_token(type, scope, opts) when is_atom(type) and is_binary(scope) do
get_access_token(type, {:hns_default, scope}, opts)
end
# Fetch an access token from Huawei's OAuth service using client credential
def get_access_token(:oauth_client_credential, {account, scope}, _opts) do
{:ok, grand_type} = Config.get(account, :grand_type)
{:ok, client_id} = Config.get(account, :client_id)
{:ok, client_secret} = Config.get(account, :client_secret)
endpoint = Application.get_env(:huth, :endpoint, "https://oauth-login.cloud.huawei.com")
url = "#{endpoint}/oauth2/v2/token"
body =
{:form,
[
grant_type: grand_type,
client_id: client_id,
client_secret: client_secret
]}
headers = [{"Content-Type", "application/x-www-form-urlencoded"}]
HTTPoison.post(url, body, headers)
|> handle_response({account, scope})
end
defp handle_response(resp, opts, sub \\ nil)
defp handle_response({:ok, %{body: body, status_code: code}}, {account, scope}, sub)
when code in 200..299,
do: {:ok, Token.from_response_json({account, scope}, sub, body)}
defp handle_response({:ok, %{body: body}}, _scope, _sub),
do: {:error, "Could not retrieve token, response: #{body}"}
defp handle_response(other, _scope, _sub), do: other
end
|
lib/huth/client.ex
| 0.811601
| 0.403273
|
client.ex
|
starcoder
|
defmodule PSQ do
@moduledoc """
PSQ provides a purely-functional implementation of priority search queues. A
priority search queue is a data structure that efficiently supports both
associative operations (like those for `Map`) and priority-queue operations
(akin to heaps in imperative languages). The implementation is based on the
Haskell
[PSQueue](https://hackage.haskell.org/package/PSQueue-1.1/docs/Data-PSQueue.html)
package and the associated paper.
PSQs can be created from lists in O(n log n) time. Once created, the minimum
element (`min/1`) and size (`Enum.count/1`) can be accessed in O(1) time; most
other basic operations (including `get/2`, `pop/1`, and `put/2`, and
`delete/2`) are in O(log n).
PSQs implement `Enumerable` and `Collectable`, so all your favorite functions
from `Enum` and `Stream` should work as expected.
Each entry in a PSQ has an associated *priority* and *key*. Map-like
operations, such as `get/2`, use keys to find the entry; all entries in a PSQ
are unique by key. Ordered operations, such as `pop/1` and `Enum.to_list/1`,
use priority to determine order (with minimum first). Priorities need not be
unique by entry; entries with the same priority will be popped in unspecified
order.
## Examples
There are two primary ways to determine a value's priority and key in a
queue. The simplest is to start with an empty queue and input values with
priorities and keys directly, through `put/4`:
iex> q = PSQ.new |> PSQ.put(:a, "foo", 2) |> PSQ.put(:b, "bar", 1)
iex> q |> PSQ.get(:a)
"foo"
iex> q |> PSQ.min
"bar"
Alternatively, you can specify mapper functions to determine key and priority
for all entries in the queue. This is particularly useful for determining
custom priorities. For example, here's a simple method to use PSQs for
max-queues:
iex> q = PSQ.new(&(-&1))
iex> q = [?a, ?b, ?c, ?d, ?e] |> Enum.into(q)
iex> q |> Enum.to_list
[?e, ?d, ?c, ?b, ?a]
Here's a queue that orders strings by size, using downcased strings as keys:
iex> q = PSQ.new(&String.length/1, &String.downcase/1)
iex> q = ["How", "is", "your", "ocelot"] |> Enum.into(q)
iex> q |> Enum.to_list
["is", "How", "your", "ocelot"]
iex> q |> PSQ.get("how")
"How"
iex> q |> PSQ.get("How")
nil
Priority and key mappers are also useful if you're inputting entries that are
structs or maps and want to use particular fields as keys or priorities. For
example:
iex> q = PSQ.new(&(&1[:priority]), &(&1[:key]))
iex> q = PSQ.put(q, %{priority: 5, key: 1})
iex> q = PSQ.put(q, %{priority: 2, key: 2})
iex> q = PSQ.put(q, %{priority: 1, key: 1})
iex> q |> PSQ.min
%{priority: 1, key: 1}
iex> q |> PSQ.get(1)
%{priority: 1, key: 1}
"""
defstruct tree: :void, key_mapper: nil, priority_mapper: nil
alias PSQ.Winner
alias PSQ.Loser
alias PSQ.Entry
@type key :: any
@type value :: any
@type priority :: any
@type key_mapper :: (value -> key)
@type priority_mapper :: (value -> priority)
@type t :: %__MODULE__{tree: Winner.t,
key_mapper: key_mapper,
priority_mapper: priority_mapper}
@doc """
Returns a new empty PSQ.
Optional params `priority_mapper` and `key_mapper` are functions to determine
keys and priorities from values. For example, to create a max-queue of numbers
instead of a min-queue, pass in `&(-&1)` for `priority_mapper`:
iex> PSQ.new(&(-&1)) |> PSQ.put(3) |> PSQ.put(5) |> PSQ.put(1) |> Enum.to_list
[5, 3, 1]
`key_mapper` is useful if your values are structs where particular fields are
considered a unique key:
iex> q = PSQ.new(&(&1[:priority]), &(&1[:key]))
iex> q = q |> PSQ.put(%{key: 1, priority: 1})
iex> q = q |> PSQ.put(%{key: 1, priority: 3})
iex> q |> PSQ.get(1)
%{key: 1, priority: 3}
`priority_mapper` and `key_mapper` both default to the identity function.
"""
@spec new(priority_mapper, key_mapper) :: t
def new(priority_mapper \\ &(&1), key_mapper \\ &(&1)) do
%PSQ{key_mapper: key_mapper, priority_mapper: priority_mapper}
end
@doc """
Returns a new PSQ from `list`.
`priority_mapper` and `key_mapper` behave the same way as for `new/2`.
## Examples
iex> [2, 5, 4, 1, 3] |> PSQ.from_list |> Enum.to_list
[1, 2, 3, 4, 5]
"""
@spec from_list(list(value), priority_mapper, key_mapper) :: t
def from_list(list, priority_mapper \\ &(&1), key_mapper \\ &(&1)) do
q = new(priority_mapper, key_mapper)
list |> Enum.into(q)
end
@doc """
Puts the given `value` into the queue, using `priority_mapper` and
`key_mapper` to determine uniqueness/order (see `new`).
If a value with the same key already exits in the queue, it will be replaced
by the new value.
## Examples
iex> q = PSQ.new(&(&1), &trunc/1)
iex> q = PSQ.put(q, 3.89)
iex> q = PSQ.put(q, 2.71)
iex> q = PSQ.put(q, 3.14)
iex> Enum.to_list(q)
[2.71, 3.14]
"""
@spec put(t, value) :: t
def put(q, value)
def put(q = %PSQ{priority_mapper: priority_mapper, key_mapper: key_mapper}, val) do
put(q, key_mapper.(val), val, priority_mapper.(val))
end
@doc """
Puts the given `value` into the queue with specified `key` and
`priority`.
When using this function (as opposed to `put/2`), the queue's
`priority_mapper` and `key_mapper` will be ignored. It is not recommended to
use both mappers and direct keys/priorities for the same queue.
## Examples
iex> PSQ.new |> PSQ.put(:a, 1, 1) |> PSQ.put(:a, 2, 1) |> PSQ.get(:a)
2
iex> PSQ.new |> PSQ.put(:a, 1, 2) |> PSQ.put(:b, 2, 1) |> Enum.to_list
[2, 1]
"""
@spec put(t, key, value, priority) :: t
def put(q, key, val, priority)
def put(q = %PSQ{tree: tree}, key, val, priority) do
entry = Entry.new(val, priority, key)
%PSQ{q | tree: do_put(tree, entry)}
end
@spec do_put(Winner.t, Entry.t) :: Winner.t
defp do_put(:void, entry), do: Winner.new(entry, :start, Entry.key(entry))
defp do_put(winner = {winner_entry, :start, max_key}, entry) do
winner_key = Entry.key(winner_entry)
entry_key = Entry.key(entry)
cond do
winner_key < entry_key ->
play(winner, Winner.new(entry, :start, entry_key))
winner_key == entry_key ->
Winner.new(entry, :start, max_key)
winner_key > entry_key ->
play(Winner.new(entry, :start, entry_key), winner)
end
end
defp do_put(winner, entry) do
{t1, t2} = unplay(winner)
if Entry.key(entry) <= Winner.max_key(t1) do
play(do_put(t1, entry), t2)
else
play(t1, do_put(t2, entry))
end
end
@doc """
Returns and removes the value with the minimum priority from `q`. The value
will be `nil` if the queue is empty.
## Examples
iex> q = PSQ.from_list([3, 1])
iex> {min, q} = PSQ.pop(q)
iex> min
1
iex> {min, q} = PSQ.pop(q)
iex> min
3
iex> {min, q} = PSQ.pop(q)
iex> min
nil
iex> Enum.empty?(q)
true
"""
@spec pop(t) :: {value, t}
def pop(q)
def pop(q = %PSQ{tree: :void}) do
{nil, q}
end
def pop(q = %PSQ{tree: {entry, loser, max_key}}) do
new_winner = second_best(loser, max_key)
{Entry.value(entry), %PSQ{q | tree: new_winner}}
end
@doc """
Returns the value with the minimum priority from `q`.
Raises `Enum.EmptyError` if the queue is empty.
## Examples
iex> PSQ.from_list([-2, 3, -5]) |> PSQ.min
-5
iex> PSQ.from_list([-2, 3, -5], &(-&1)) |> PSQ.min
3
iex> PSQ.new |> PSQ.min
** (Enum.EmptyError) empty error
"""
@spec min(t) :: value | no_return
def min(q)
def min(%PSQ{tree: :void}) do
raise Enum.EmptyError
end
def min(%PSQ{tree: tree}) do
tree |> Winner.entry |> Entry.value
end
@doc """
Gets the value for specified `key`. If the key does not exist, returns `nil`.
## Examples
iex> PSQ.new |> PSQ.put(:a, 3, 1) |> PSQ.get(:a)
3
iex> PSQ.new |> PSQ.put(:a, 3, 1) |> PSQ.get(:b)
nil
"""
@spec get(t, key) :: value
def get(q, key) do
case fetch(q, key) do
{:ok, val} -> val
:error -> nil
end
end
@doc """
Fetches the value for specified `key` and returns in a tuple. Returns
`:error` if the key does not exist.
## Examples
iex> PSQ.new |> PSQ.put(:a, 3, 1) |> PSQ.fetch(:a)
{:ok, 3}
iex> PSQ.new |> PSQ.put(:a, 3, 1) |> PSQ.fetch(:b)
:error
"""
@spec fetch(t, key) :: {:ok, value} | :error
def fetch(q, key)
def fetch(%PSQ{tree: tree}, key) do
do_fetch(tree, key)
end
@doc """
Fetches the value for specified `key`.
If `key` does not exist, a `KeyError` is raised.
## Examples
iex> PSQ.new |> PSQ.put(:a, 3, 1) |> PSQ.fetch!(:a)
3
iex> PSQ.new |> PSQ.put(:a, 3, 1) |> PSQ.fetch!(:b)
** (KeyError) key :b not found in: #PSQ<min:3 size:1>
"""
@spec fetch!(t, key) :: value | no_return
def fetch!(q, key) do
case fetch(q, key) do
{:ok, val} -> val
:error -> raise KeyError, key: key, term: q
end
end
@spec do_fetch(Winner.t, key) :: {:ok, value} | :error
defp do_fetch(:void, _), do: :error
defp do_fetch({entry, :start, _}, key) do
case Entry.key(entry) do
^key -> {:ok, Entry.value(entry)}
_ -> :error
end
end
defp do_fetch(winner, key) do
{t1, t2} = unplay(winner)
if key <= Winner.max_key(t1) do
do_fetch(t1, key)
else
do_fetch(t2, key)
end
end
@doc """
Deletes the value associated with `key` from `q`.
If `key` does not exist, returns `q` unchanged.
## Examples
iex> PSQ.from_list([3,1,2]) |> PSQ.delete(2) |> Enum.to_list
[1, 3]
iex> PSQ.from_list([3,1,2]) |> PSQ.delete(4) |> Enum.to_list
[1, 2, 3]
"""
@spec delete(t, key) :: t
def delete(q, key)
def delete(q = %PSQ{tree: tree}, key) do
new_tree = do_delete(tree, key)
%PSQ{q | tree: new_tree}
end
@spec do_delete(Winner.t, key) :: Winner.t
defp do_delete(:void, _), do: :void
defp do_delete(winner = {entry, :start, _}, key) do
case Entry.key(entry) do
^key -> :void
_ -> winner
end
end
defp do_delete(winner, key) do
{t1, t2} = unplay(winner)
if key <= Winner.max_key(t1) do
play(do_delete(t1, key), t2)
else
play(t1, do_delete(t2, key))
end
end
@doc """
Returns a list of all values from `q` where the value's priority is less than
or equal to `priority`.
## Examples
iex> PSQ.from_list([1, 3, 2, 5, 4]) |> PSQ.at_most(3)
[1, 2, 3]
"""
@spec at_most(t, priority) :: list(value)
def at_most(q, priority)
def at_most(%PSQ{tree: tree}, priority) do
do_at_most(tree, priority)
end
@spec do_at_most(Winner.t, priority) :: list(value)
defp do_at_most(:void, _), do: []
defp do_at_most({{_, _, priority}, _, _}, max_priority) when priority > max_priority do
[]
end
defp do_at_most({entry, :start, _}, _) do
[Entry.value(entry)]
end
defp do_at_most(winner, max_priority) do
{t1, t2} = unplay(winner)
do_at_most(t1, max_priority) ++ do_at_most(t2, max_priority)
end
# "Tournament" functions
@spec play(Winner.t, Winner.t) :: Winner.t
defp play(:void, t), do: t
defp play(t, :void), do: t
defp play({e1, l1, k1}, {e2, l2, k2}) when k1 < k2 do
p1 = Entry.priority(e1)
p2 = Entry.priority(e2)
if p1 <= p2 do
loser = balance(Loser.new(e2, l1, k1, l2))
Winner.new(e1, loser, k2)
else
loser = balance(Loser.new(e1, l1, k1, l2))
Winner.new(e2, loser, k2)
end
end
@spec unplay(Winner.t) :: {Winner.t, Winner.t}
defp unplay({winner_entry, loser = {loser_entry, left, split_key, right, _}, max_key}) do
{left_entry, right_entry} = case Loser.origin(loser) do
:right -> {winner_entry, loser_entry}
:left -> {loser_entry, winner_entry}
end
{
Winner.new(left_entry, left, split_key),
Winner.new(right_entry, right, max_key),
}
end
@spec second_best(Loser.t, key) :: Winner.t
defp second_best(:start, _), do: :void
defp second_best({entry, left, split_key, right, _}, max_key) do
key = Entry.key(entry)
if key <= split_key do
play(
Winner.new(entry, left, split_key),
second_best(right, max_key)
)
else
play(
second_best(left, split_key),
Winner.new(entry, right, max_key)
)
end
end
# Balancing functions
@balance_factor 4.0
@spec balance(Loser.t) :: Loser.t
defp balance(:start), do: :start
defp balance(loser = {_, left, _, right, _}) do
l = Loser.size(left)
r = Loser.size(right)
cond do
l + r < 2 -> loser
r > (@balance_factor * l) -> balance_left(loser)
l > (@balance_factor * r) -> balance_right(loser)
true -> loser
end
end
@spec balance_left(Loser.t) :: Loser.t
defp balance_left(loser) do
right = Loser.right(loser)
rl = Loser.left(right)
rr = Loser.right(right)
if Loser.size(rl) < Loser.size(rr) do
single_left(loser)
else
double_left(loser)
end
end
@spec balance_right(Loser.t) :: Loser.t
defp balance_right(loser) do
left = Loser.left(loser)
ll = Loser.left(left)
lr = Loser.right(left)
if Loser.size(lr) < Loser.size(ll) do
single_right(loser)
else
double_right(loser)
end
end
@spec single_left(Loser.t) :: Loser.t
defp single_left(loser) do
{e1, t1, k1, right, _} = loser
{e2, t2, k2, t3, _} = right
if Loser.origin(right) == :left && Entry.priority(e1) <= Entry.priority(e2) do
new_left = Loser.new(e2, t1, k1, t2)
Loser.new(e1, new_left, k2, t3)
else
new_left = Loser.new(e1, t1, k1, t2)
Loser.new(e2, new_left, k2, t3)
end
end
@spec single_right(Loser.t) :: Loser.t
defp single_right(loser) do
{e1, left, k2, t3, _} = loser
{e2, t1, k1, t2, _} = left
if Loser.origin(left) == :right && Entry.priority(e1) <= Entry.priority(e2) do
new_right = Loser.new(e2, t2, k2, t3)
Loser.new(e1, t1, k1, new_right)
else
new_right = Loser.new(e1, t2, k2, t3)
Loser.new(e2, t1, k1, new_right)
end
end
@spec double_left(Loser.t) :: Loser.t
defp double_left({entry, left, split_key, right, _}) do
single_left(Loser.new(entry, left, split_key, single_right(right)))
end
@spec double_right(Loser.t) :: Loser.t
defp double_right({entry, left, split_key, right, _}) do
single_right(Loser.new(entry, single_left(left), split_key, right))
end
end
defimpl Enumerable, for: PSQ do
def count(%PSQ{tree: :void}), do: {:ok, 0}
def count(%PSQ{tree: winner}) do
loser = PSQ.Winner.loser(winner)
{:ok, PSQ.Loser.size(loser) + 1}
end
def member?(q, element) do
case PSQ.fetch(q, element) do
{:ok, _} -> {:ok, true}
:error -> {:ok, false}
end
end
def reduce(_, {:halt, acc}, _fun), do: {:halted, acc}
def reduce(q, {:suspend, acc}, fun), do: {:suspended, acc, &reduce(q, &1, fun)}
def reduce(%PSQ{tree: :void}, {:cont, acc}, _fun), do: {:done, acc}
def reduce(q, {:cont, acc}, fun) do
{x, rest} = PSQ.pop(q)
reduce(rest, fun.(x, acc), fun)
end
end
defimpl Collectable, for: PSQ do
def into(original) do
{original, fn
q, {:cont, x} -> PSQ.put(q, x)
q, :done -> q
_, :halt -> :ok
end}
end
end
defimpl Inspect, for: PSQ do
import Inspect.Algebra
def inspect(q, opts) do
case q.tree do
:void -> "#PSQ<empty>"
_ ->
concat [
"#PSQ<min:",
to_doc(PSQ.min(q), opts),
" size:",
to_doc(Enum.count(q), opts),
">",
]
end
end
end
|
lib/psq.ex
| 0.914958
| 0.679027
|
psq.ex
|
starcoder
|
defmodule Day11 do
def part1(input, steps \\ 100) do
grid = parse(input)
Stream.iterate({grid, 0}, &next_step/1)
|> Stream.drop(steps)
|> Enum.take(1)
|> hd
|> elem(1)
end
def part2(input) do
grid = parse(input)
Stream.iterate({grid, 0}, &next_step/1)
|> Stream.with_index
|> Stream.drop_while(fn {{grid, _}, _} ->
Enum.any?(grid, fn {_, level} -> level != 0 end)
end)
|> Enum.take(1)
|> hd
|> elem(1)
end
defp next_step({grid, num_flashes}) do
grid = grid
|> Enum.map(fn {key, level} -> {key, level + 1} end)
|> Map.new
flashes = Enum.filter(Map.keys(grid), fn key ->
Map.fetch!(grid, key) > 9
end)
num_flashes = num_flashes + length(flashes)
{grid, num_flashes} = flash(grid, flashes, num_flashes)
grid = Enum.reduce(grid, grid, fn {key, level}, acc ->
if level < 0 do
Map.put(acc, key, 0)
else
acc
end
end)
{grid, num_flashes}
end
defp flash(grid, [_ | _] = flashes, num_flashes) do
grid = Enum.reduce(flashes, grid, fn key, grid ->
Map.put(grid, key, -1_000_000)
end)
acc = {grid, []}
{grid, flashes} = Enum.reduce(flashes, acc, &flash_one/2)
flashes = Enum.uniq(flashes)
flash(grid, flashes, num_flashes + length(flashes))
end
defp flash(grid, [], num_flashes), do: {grid, num_flashes}
defp flash_one(key, {grid, flashes}) do
Enum.reduce(neighbors(grid, key), {grid, flashes},
fn neighbor, {grid, flashes} ->
level = Map.fetch!(grid, neighbor) + 1
grid = Map.put(grid, neighbor, level)
flashes = if level > 9, do: [neighbor | flashes], else: flashes
{grid, flashes}
end)
end
defp neighbors(map, {row, col}) do
[{row - 1, col - 1}, {row - 1, col}, {row - 1, col + 1},
{row, col - 1}, {row, col + 1},
{row + 1, col - 1}, {row + 1, col}, {row + 1, col + 1}]
|> Enum.filter(&(Map.has_key?(map, &1)))
end
def print_grid(grid) do
Enum.map(0..9, fn row ->
[Enum.map(0..9, fn col ->
key = {row, col}
level = Map.fetch!(grid, key)
cond do
level < 0 -> ?\-
level === 0 -> ?\*
level > 9 -> ?\+
true -> ?0 + level
end
end), ?\n]
end)
|> :io.put_chars
end
defp parse(input) do
Enum.map(input, fn line ->
String.to_charlist(line)
|> Enum.map(&(&1 - ?0))
end)
|> Enum.with_index
|> Enum.flat_map(fn {list, row} ->
Enum.with_index(list)
|> Enum.map(fn {h, col} -> {{row, col}, h} end)
end)
|> Map.new
end
end
|
day11/lib/day11.ex
| 0.605099
| 0.588623
|
day11.ex
|
starcoder
|
defmodule Chapter9.BinarySearchTree do
defstruct [:value, :left, :right]
alias Chapter9.BinarySearchTree, as: BST
@type t :: %BST{value: number, left: BST.t, right: BST.t} | :empty_node
@spec new() :: :empty_node
def new(), do: :empty_node
@spec insert(BST.t, number) :: BST.t
def insert(:empty_node, value), do: %BST{value: value, left: :empty_node, right: :empty_node}
def insert(%BST{value: v, left: l, right: r}, node) do
cond do
v == node -> %BST{value: v, left: l, right: r}
v < node -> %BST{value: v, left: l, right: insert(r, node)}
v > node -> %BST{value: v, left: insert(l, node), right: r}
end
end
@spec left(BST.t):: BST.t
def left(%BST{left: left}) do
left
end
@spec right(BST.t):: BST.t
def right(%BST{right: right}) do
right
end
@spec contains?(BST.t, number) :: boolean
def contains?(:empty_node, _), do: false
def contains?(%BST{value: value, left: left, right: right}, number) do
cond do
value == number -> true
number < value -> contains?(left, number)
number > value -> contains?(right, number)
end
end
@spec in_order_traversal(BST.t) :: [number]
def in_order_traversal(tree), do: _in_order_traversal(tree, [])
@spec _in_order_traversal(BST.t, [number]) :: [number]
defp _in_order_traversal(:empty_node, _), do: []
defp _in_order_traversal(%BST{value: value, left: left, right: right}, acc) do
l = _in_order_traversal(left, acc)
r = _in_order_traversal(right, acc)
l ++ [value] ++ r
end
@spec min(BST.t) :: number | nil
def min(:empty_node), do: nil
def min(%BST{value: value, left: :empty_node}), do: value
def min(%BST{left: left}), do: min(left)
@spec max(BST.t) :: number | nil
def max(:empty_node), do: nil
def max(%BST{value: value, right: :empty_node}), do: value
def max(%BST{right: right}), do: max(right)
@spec lowest_common_ancestor(BST.t, number, number) :: number
def lowest_common_ancestor(tree, a, b) when a < b, do: lca(tree, a, b)
def lowest_common_ancestor(tree, a, b) when a > b, do: lca(tree, b, a)
@spec lca(BST.t, number, number) :: number
defp lca(%BST{value: value}, smaller, larger) when smaller < value and larger > value do
value
end
defp lca(%BST{value: value, left: left}, smaller, larger) when smaller < value and larger < value do
lca(left, smaller, larger)
end
defp lca(%BST{value: value, right: right}, smaller, larger) when smaller > value and larger > value do
lca(right, smaller, larger)
end
defp lca(%BST{value: value}, smaller, larger) when smaller == value or larger == value do
value
end
end
|
elixir/epi_book/lib/chapter_9/binary_search_tree.ex
| 0.762336
| 0.883034
|
binary_search_tree.ex
|
starcoder
|
defmodule Elixium.Store.Ledger do
alias Elixium.Block
alias Elixium.BlockEncoder
alias Elixium.Utilities
use Elixium.Store
@moduledoc """
Provides an interface for interacting with the blockchain stored within LevelDB. This
is where blocks are stored and fetched
"""
@store_dir "chaindata"
@ets_name :chaindata
def initialize do
initialize(@store_dir)
:ets.new(@ets_name, [:ordered_set, :public, :named_table])
end
@doc """
Add a block to leveldb, indexing it by its hash (this is the most likely piece of data to be unique)
"""
def append_block(block) do
transact @store_dir do
&Exleveldb.put(&1, String.to_atom(block.hash), BlockEncoder.encode(block))
end
:ets.insert(@ets_name, {block.index, block.hash, block})
end
@spec drop_block(Block) :: none
def drop_block(block) do
transact @store_dir do
&Exleveldb.delete(&1, String.to_atom(block.hash))
end
:ets.delete(@ets_name, block.index)
end
@doc """
Given a block hash, return its contents
"""
@spec retrieve_block(String.t()) :: Block
def retrieve_block(hash) do
# Only check the store if we don't have this hash in our ETS cache
case :ets.match(@ets_name, {'_', hash, '$1'}) do
[] -> do_retrieve_block_from_store(hash)
[block] -> block
end
end
defp do_retrieve_block_from_store(hash) do
transact @store_dir do
fn ref ->
case Exleveldb.get(ref, hash) do
{:ok, block} -> BlockEncoder.decode(block)
err -> err
end
end
end
end
@doc """
Return the whole chain from leveldb
"""
def retrieve_chain do
chain =
transact @store_dir do
fn ref ->
ref
|> Exleveldb.map(fn {_, block} -> BlockEncoder.decode(block) end)
|> Enum.sort_by(& &1.index, &>=/2)
end
end
chain
end
@doc """
Hydrate ETS with our chain data
"""
def hydrate do
ets_hydrate = Enum.map(retrieve_chain(), &({&1.index, String.to_atom(&1.hash), &1}))
:ets.insert(@ets_name, ets_hydrate)
end
@doc """
Returns the most recent block on the chain
"""
@spec last_block :: Block
def last_block do
case :ets.last(@ets_name) do
:"$end_of_table" -> :err
key ->
[{_index, _key, block}] = :ets.lookup(@ets_name, key)
block
end
end
@spec block_at_height(integer) :: Atom
def block_at_height(height) when is_integer(height) and height < 0, do: :none
@doc """
Returns the block at a given index
"""
@spec block_at_height(integer) :: Block
def block_at_height(height) when is_integer(height) do
height =
height
|> :binary.encode_unsigned()
|> Utilities.zero_pad(4)
block_at_height(height)
end
def block_at_height(height) when is_binary(height) do
case :ets.lookup(@ets_name, height) do
[] -> :none
[{_index, _key, block}] -> block
end
end
@doc """
Returns the last N blocks in the chain
"""
@spec last_n_blocks(integer) :: list
def last_n_blocks(n, starting_at \\ :binary.decode_unsigned(last_block().index)) do
starting_at - (n - 1)
|> max(0)
|> Range.new(starting_at)
|> Enum.map(&block_at_height/1)
|> Enum.filter(& &1 != :none)
end
@doc """
Returns the number of blocks in the chain
"""
@spec count_blocks :: integer
def count_blocks, do: :ets.info(@ets_name, :size)
def empty?, do: empty?(@store_dir)
end
|
lib/store/ledger.ex
| 0.827445
| 0.492188
|
ledger.ex
|
starcoder
|
defmodule Specify.Options do
require Specify
@moduledoc """
This struct represents the options you can pass
to a call of `Specify.load/2` (or `YourModule.load/1`).
### Metaconfiguration
Besides making it nice and explicit to have the options listed here,
`Specify.Options` has itself been defined using `Specify.defconfig/2`,
which means that it (and thus what default options are passed on to to other Specify configurations)
can be configured in the same way.
"""
@doc false
def list_of_sources(sources) do
res =
Enum.reduce_while(sources, [], fn
source, acc ->
case source do
source = %struct_module{} ->
Protocol.assert_impl!(Specify.Provider, struct_module)
{:cont, [source | acc]}
source when is_atom(source) ->
parse_source_module(source, acc)
{module, args} when is_atom(module) and is_map(args) ->
source = struct(module, args)
Protocol.assert_impl!(Specify.Provider, module)
{:cont, [source | acc]}
{module, fun, args} when is_atom(module) and is_atom(fun) and is_map(args) ->
source = %struct_module{} = Kernel.apply(module, fun, args)
Protocol.assert_impl!(Specify.Provider, struct_module)
{:cont, [source | acc]}
end
end)
case res do
{:error, error} -> {:error, error}
sources_list -> {:ok, Enum.reverse(sources_list)}
end
end
defp parse_source_module(module, acc) do
case module.__info__(:functions)[:new] do
0 ->
source = %struct_module{} = module.new()
Protocol.assert_impl!(Specify.Provider, struct_module)
{:cont, [source | acc]}
_ ->
{:halt,
{:error,
"`#{inspect(module)}` does not seem to have an appropriate default `new/0` function. Instead, pass a full-fledged struct (like `%#{inspect(module)}{}`), or one use one of the other ways to specify a source. \n\n(See the documentation of `Specify.Options.sources` for more information)"}}
end
end
Specify.defconfig do
@doc """
A list of structures that implement the `Specify.Provider` protocol, which will be used to fetch configuration from.
Later entries in the list take precedence over earlier entries.
Defaults always have the lowest precedence, and `:explicit_values` always have the highest precedence.
A source can be:
- A struct. Example: `%Specify.Provider.SystemEnv{}`;
- A module that has a `new/0`-method which returns a struct. Example: `Specify.Provider.SystemEnv`;
- A tuple, whose first argument is a module and second argument is a map of arguments. This will be turned into a full-blown struct at startup using `Kernel.struct/2`. Example: `{Specify.Provider.SystemEnv, %{prefix: "CY", optional: true}}`;
- A {module, function, arguments}-tuple, which will be called on startup. It should return a struct. Example: `{Specify.Provider.SystemEnv, :new, ["CY", [optional: true]]}`.
In all cases, the struct should implement the `Specify.Provider` protocol (and this is enforced at startup).
"""
field(:sources, &Specify.Options.list_of_sources/1, default: [])
@doc """
A list or map (or other enumerable) representing explicit values
that are to be used instead of what can be found in the implicit sources stack.
"""
field(:explicit_values, :term, default: [])
@doc """
The error to be raised if a missing field which is required has been encountered.
"""
field(:missing_fields_error, :term, default: Specify.MissingRequiredFieldsError)
@doc """
The error to be raised if a field value could not properly be parsed.
"""
field(:parsing_error, :term, default: Specify.ParsingError)
@doc """
When set to `true`, rather than returning the config struct,
a map is returned with every field-key containing a list of consecutive found values.
This is useful for debugging.
"""
field(:explain, :boolean, default: false)
end
{line_number, existing_moduledoc} = Module.delete_attribute(__MODULE__, :moduledoc) || {0, ""}
Module.put_attribute(
__MODULE__,
:moduledoc,
{line_number,
existing_moduledoc <>
"""
## Metaconfiguration Gotcha's
Specify will only be able to find a source after it knows it exists.
This means that it is impossible to define a different set of sources inside an external source.
For this special case, Specify will look at the current process' Process dictionary,
falling back to the Application environment (also known as the Mix environment),
and finally falling back to an empty list of sources (its default).
So, from lowest to highest precedence, option values are loaded in this order:
1. Specify.Options default
2. Application Environment `:specify`
3. Process Dictionary `:specify` field
4. Options passed to `Specify.defconfig`
5. Options passed to `YourModule.load`
Requiring Specify to be configured in such an even more general way seems highly unlikely.
If the current approach does turn out to not be good enough for your use-case,
please open an issue on Specify's issue tracker.
"""}
)
end
|
lib/specify/options.ex
| 0.89873
| 0.664171
|
options.ex
|
starcoder
|
defmodule Bonny.Controller do
@moduledoc """
`Bonny.Controller` defines controller behaviours and generates boilerplate for generating Kubernetes manifests.
> A custom controller is a controller that users can deploy and update on a running cluster, independently of the cluster’s own lifecycle. Custom controllers can work with any kind of resource, but they are especially effective when combined with custom resources. The Operator pattern is one example of such a combination. It allows developers to encode domain knowledge for specific applications into an extension of the Kubernetes API.
Controllers allow for simple `add`, `modify`, `delete`, and `reconcile` handling of custom resources in the Kubernetes API.
"""
@doc """
Should return an operation to list resources for watching and reconciliation.
Bonny.Controller comes with a default implementation
"""
@callback list_operation() :: K8s.Operation.t()
@doc """
Bonny.Controller comes with a default implementation which returns Bonny.Config.config()
"""
@callback conn() :: K8s.Conn.t()
# Action Callbacks
@callback add(map()) :: :ok | :error
@callback modify(map()) :: :ok | :error
@callback delete(map()) :: :ok | :error
@callback reconcile(map()) :: :ok | :error
@doc false
defmacro __using__(opts) do
quote bind_quoted: [opts: opts] do
Module.register_attribute(__MODULE__, :rule, accumulate: true)
@behaviour Bonny.Controller
# CRD defaults
@group Bonny.Config.group()
@kind Bonny.Naming.module_to_kind(__MODULE__)
@scope :namespaced
@version Bonny.Naming.module_version(__MODULE__)
@singular Macro.underscore(Bonny.Naming.module_to_kind(__MODULE__))
@plural "#{@singular}s"
@names %{}
@additional_printer_columns []
@before_compile Bonny.Controller
use Supervisor
@spec start_link(term) :: {:ok, pid}
def start_link(_) do
Supervisor.start_link(__MODULE__, %{}, name: __MODULE__)
end
@impl true
def init(_init_arg) do
conn = conn()
list_operation = list_operation()
children = [
{Bonny.Server.AsyncStreamRunner,
id: __MODULE__.WatchServer,
name: __MODULE__.WatchServer,
stream: Bonny.Server.Watcher.get_stream(__MODULE__, conn, list_operation),
termination_delay: 5_000},
{Bonny.Server.AsyncStreamRunner,
id: __MODULE__.ReconcileServer,
name: __MODULE__.ReconcileServer,
stream: Bonny.Server.Reconciler.get_stream(__MODULE__, conn, list_operation),
termination_delay: 30_000}
]
Supervisor.init(
children,
strategy: :one_for_one,
max_restarts: 20,
max_seconds: 120
)
end
@impl Bonny.Controller
def list_operation(), do: Bonny.Controller.list_operation(__MODULE__)
@impl Bonny.Controller
defdelegate conn(), to: Bonny.Config
defoverridable list_operation: 0, conn: 0
end
end
@doc false
defmacro __before_compile__(%{module: controller}) do
additional_printer_columns =
case Module.get_attribute(controller, :additional_printer_columns, []) do
[] -> quote do: []
_ -> quote do: @additional_printer_columns ++ Bonny.CRD.default_columns()
end
quote do
@doc """
Returns the `Bonny.CRD.t()` the controller manages the lifecycle of.
"""
@spec crd() :: Bonny.CRD.t()
def crd() do
%Bonny.CRD{
group: @group,
scope: @scope,
version: @version,
names: Map.merge(default_names(), @names),
additional_printer_columns: additional_printer_columns()
}
end
@doc """
A list of RBAC rules that this controller needs to operate.
This list will be serialized into the operator manifest when using `mix bonny.gen.manifest`.
"""
@spec rules() :: list(map())
def rules() do
Enum.reduce(@rule, [], fn {api, resources, verbs}, acc ->
rule = %{
apiGroups: [api],
resources: resources,
verbs: verbs
}
[rule | acc]
end)
end
@spec default_names() :: map()
defp default_names() do
%{
plural: @plural,
singular: @singular,
kind: @kind,
shortNames: nil
}
end
defp additional_printer_columns(), do: unquote(additional_printer_columns)
end
end
@spec list_operation(module()) :: K8s.Operation.t()
def list_operation(controller) do
crd = controller.crd()
api_version = Bonny.CRD.api_version(crd)
kind = Bonny.CRD.kind(crd)
case crd.scope do
:namespaced -> K8s.Client.list(api_version, kind, namespace: Bonny.Config.namespace())
_ -> K8s.Client.list(api_version, kind)
end
end
end
|
lib/bonny/controller.ex
| 0.915672
| 0.464537
|
controller.ex
|
starcoder
|
defmodule Aoc2019Day1 do
@moduledoc """
Documentation for Aoc2019.
"""
@doc """
Fuel required to launch a given module is based on its mass. Specifically, to find the fuel required for a module, take its mass, divide by three, round down, and subtract 2.
For example:
For a mass of 12, divide by 3 and round down to get 4, then subtract 2 to get 2.
For a mass of 14, dividing by 3 and rounding down still yields 4, so the fuel required is also 2.
For a mass of 1969, the fuel required is 654.
For a mass of 100756, the fuel required is 33583.
The Fuel Counter-Upper needs to know the total fuel requirement. To find it, individually calculate the fuel needed for the mass of each module (your puzzle input), then add together all the fuel values.
What is the sum of the fuel requirements for all of the modules on your spacecraft?
"""
def calculate_fuel(mass) do
trunc(mass / 3) - 2
end
def sum_of_fuel(modules) do
modules |> Enum.map(&calculate_fuel/1) |> Enum.sum()
end
def solve_1(modules) do
sum_of_fuel(modules)
end
@doc """
total_fuel calculate fuel requirements for mass, and the fuel itself
turtle all they way down.
Fuel itself requires fuel just like a module - take its mass, divide by three, round down, and subtract 2. However, that fuel also requires fuel, and that fuel requires fuel, and so on. Any mass that would require negative fuel should instead be treated as if it requires zero fuel; the remaining mass, if any, is instead handled by wishing really hard, which has no mass and is outside the scope of this calculation.
"""
def total_fuel(mass, acc) do
fuel = calculate_fuel(mass)
if fuel <= 0 do
acc
else
total_fuel(fuel, acc + fuel)
end
end
def total_fuel(mass) do
total_fuel(mass, 0)
end
def sum_total_fuel(modules) do
modules |> Enum.map(&total_fuel/1) |> Enum.sum()
end
def solve_2(modules) do
sum_total_fuel(modules)
end
end
|
lib/aoc2019_1_1.ex
| 0.795579
| 0.780035
|
aoc2019_1_1.ex
|
starcoder
|
defmodule Cassandra.Statement do
defstruct [
:query,
:options,
:params,
:prepared,
:request,
:response,
:keyspace,
:partition_key,
:partition_key_picker,
:values,
:connections,
:streamer,
]
def new(query, options \\ []) do
%__MODULE__{
query: query,
options: Keyword.delete(options, :values),
values: Keyword.get(options, :values, []),
}
end
def new(query, options, defaults) do
options = Keyword.put_new_lazy options, :consistency, fn ->
consistency(query, defaults)
end
new(query, options)
end
def put_values(statement, values) do
partition_key = partition_key(statement, values)
%__MODULE__{statement | partition_key: partition_key, values: values}
end
def update_pk(%__MODULE__{values: values} = statement) do
put_values(statement, values || [])
end
def put_prepared(statement, prepared) do
%__MODULE__{statement | prepared: prepared}
|> clean
|> set_pk_picker
|> update_pk
end
def clean(statement) do
%__MODULE__{statement | request: nil, response: nil, connections: nil}
end
def set_pk_picker(%__MODULE__{partition_key_picker: picker} = statement)
when is_function(picker)
do
statement
end
def set_pk_picker(%__MODULE__{prepared: %{metadata: %{global_spec: %{keyspace: keyspace}, pk_indices: [index]}}} = statement) do
%__MODULE__{statement | partition_key_picker: &Enum.at(&1, index), keyspace: keyspace}
end
def set_pk_picker(statement), do: statement
defp partition_key(%__MODULE__{partition_key_picker: picker}, values)
when is_function(picker)
do
picker.(values)
end
defp partition_key(_, _), do: nil
defp consistency(query, defaults) do
key =
if read?(query) do
:read_consistency
else
:write_consistency
end
Keyword.get(defaults, key, :quorum)
end
defp read?("SELECT" <> _), do: true
defp read?(_), do: false
defimpl DBConnection.Query do
alias Cassandra.Statement
def encode(statement, values, options) do
params =
(statement.options || [])
|> Keyword.merge(options)
|> Keyword.put(:values, values)
|> CQL.QueryParams.new
execute = %CQL.Execute{prepared: statement.prepared, params: params}
with {:ok, request} <- CQL.encode(execute) do
{request, params}
end
end
def decode(_statement, %CQL.Result.Rows{} = rows, _options) do
CQL.Result.Rows.decode_rows(rows)
end
def decode(_statement, result, _options) do
with {:ok, %CQL.Frame{body: body}} <- CQL.decode(result) do
body
end
end
def describe(statement, options) do
with {:ok, %CQL.Frame{body: %CQL.Result.Prepared{} = prepared}} <- CQL.decode(statement.response) do
if options[:for_cache] do
prepared
else
Statement.put_prepared(statement, prepared)
end
end
end
def parse(statement, _options) do
prepare = %CQL.Prepare{query: statement.query}
with {:ok, request} <- CQL.encode(prepare) do
%Statement{statement | request: request}
end
end
end
end
|
lib/cassandra/statement.ex
| 0.698946
| 0.485966
|
statement.ex
|
starcoder
|
defmodule Snitch.Tools.Helper.Taxonomy do
@moduledoc """
Provides helper funtions to easily create taxonomy.
"""
alias Snitch.Domain.Taxonomy, as: TaxonomyDomain
alias Snitch.Data.Schema.{Taxon, Taxonomy}
alias Snitch.Core.Tools.MultiTenancy.Repo
@doc """
Creates taxonomy from the hierarchy passed.
Structure of hierarchy should be in following format
{"Brands",
[
{"Bags", []},
{"Mugs", []},
{"Clothing",
[
{"Shirts", []},
{"T-Shirts", []}
]}
]}
"""
@spec create_taxonomy({String.t(), []}) :: Taxonomy.t()
def create_taxonomy({parent, children}) do
taxonomy =
%Taxonomy{name: parent}
|> Taxonomy.changeset()
|> Repo.insert!()
taxon = Repo.preload(%Taxon{name: parent, taxonomy_id: taxonomy.id}, :taxonomy)
root = TaxonomyDomain.add_root(taxon)
for taxon <- children do
create_taxon(taxon, root)
end
taxonomy
|> Taxonomy.changeset(%{root_id: root.id})
|> Repo.update!()
end
defp create_taxon({parent, children}, root) do
child =
Repo.preload(%Taxon{name: parent, taxonomy_id: root.taxonomy_id, parent_id: root.id}, [
:taxonomy,
:parent
])
root = TaxonomyDomain.add_taxon(root, child, :child)
for taxon <- children do
create_taxon(taxon, root)
end
end
def convert_to_map(taxonomy) do
root = convert_taxon(taxonomy.taxons)
%{
id: taxonomy.id,
name: taxonomy.name,
root: root
}
end
def image_url(taxon) do
taxon = Repo.preload(taxon, taxon_image: :image)
case taxon.taxon_image do
nil ->
nil
_ ->
TaxonomyDomain.image_url(taxon.taxon_image.image.name, taxon)
end
end
def convert_taxon([]) do
[]
end
def convert_taxon({taxon, children}) do
%{
id: taxon.id,
name: taxon.name,
pretty_name: "",
permlink: "",
parent_id: taxon.parent_id,
taxonomy_id: taxon.taxonomy_id,
image_url: image_url(taxon),
taxons: Enum.map(children, &convert_taxon/1)
}
end
end
|
apps/snitch_core/lib/core/tools/helpers/taxonomy.ex
| 0.703957
| 0.549641
|
taxonomy.ex
|
starcoder
|
defmodule AdventOfCode.Day16 do
@moduledoc ~S"""
[Advent Of Code day 16](https://adventofcode.com/2018/day/16).
"""
alias __MODULE__.Ops
def solve("1", {raw_samples, _}) do
samples = parse_samples(raw_samples)
Enum.count(samples, fn sample -> Enum.count(opcodes_match(sample)) >= 3 end)
end
def solve("2", {raw_samples, ops}) do
opcodes_map = resolve_opcodes_map(raw_samples)
ops
|> String.split("\n")
|> Enum.map(&parse_line/1)
|> Enum.reduce(reg_to_map([0, 0, 0, 0]), fn [op_id | op_args], reg ->
Map.get(opcodes_map, op_id) |> Ops.op(op_args, reg)
end)
|> Enum.map(&elem(&1, 1))
|> hd()
end
@doc ~S"""
iex> opcodes_match([[3, 2, 1, 1], [9, 2, 1, 2], [3, 2, 2, 1]])
[:addi, :mulr, :seti]
"""
def opcodes_match(sample), do: opcodes_match(sample, Ops.codes())
def opcodes_match([reg_before, [_op_id | op_args], reg_after], candidates) do
ra = reg_to_map(reg_after)
Enum.filter(candidates, fn c -> Ops.op(c, op_args, reg_to_map(reg_before)) == ra end)
end
defp resolve_opcodes_map(raw_samples) do
raw_samples
|> parse_samples()
|> opcodes_map_from_samples()
|> resolve([])
|> Enum.into(%{})
end
defp opcodes_map_from_samples(samples) do
candidates_map = Enum.into(0..(Enum.count(Ops.codes()) - 1), %{}, fn i -> {i, Ops.codes()} end)
Enum.reduce(samples, candidates_map, fn [_, [op_id | _], _] = sample, candidates_map ->
case Map.get(candidates_map, op_id) do
[_] ->
candidates_map
candidates ->
Map.put(candidates_map, op_id, opcodes_match(sample, candidates))
end
end)
end
defp resolve(opcodes_map, resolved) do
{id, [opcode]} = Enum.find(opcodes_map, fn {_id, opcodes} -> Enum.count(opcodes) == 1 end)
resolved = [{id, opcode} | resolved]
if Enum.count(resolved) == Ops.count(), do: resolved, else: drop_candidate(opcode, opcodes_map) |> resolve(resolved)
end
defp drop_candidate(candidate, opcodes_map) do
Enum.map(opcodes_map, fn {id, opcodes} -> {id, List.delete(opcodes, candidate)} end)
end
defp reg_to_map([a, b, c, d]), do: %{0 => a, 1 => b, 2 => c, 3 => d}
defp parse_samples(samples), do: String.split(samples, "\n\n") |> Enum.map(&parse_sample/1)
defp parse_sample(sample), do: sample |> String.split("\n") |> Enum.map(&parse_line/1)
defp parse_line(str), do: Regex.scan(~r/\d+/, str) |> Enum.map(fn [v] -> String.to_integer(v) end)
end
|
lib/advent_of_code/day_16.ex
| 0.617743
| 0.710126
|
day_16.ex
|
starcoder
|
defmodule GenStage do
@moduledoc ~S"""
Stages are data-exchange steps that send and/or receive data
from other stages.
When a stage sends data, it acts as a producer. When it receives
data, it acts as a consumer. Stages may take both producer and
consumer roles at once.
## Stage types
Besides taking both producer and consumer roles, a stage may be
called "source" if it only produces items or called "sink" if it
only consumes items.
For example, imagine the stages below where A sends data to B
that sends data to C:
[A] -> [B] -> [C]
we conclude that:
* A is only a producer (and therefore a source)
* B is both producer and consumer
* C is only a consumer (and therefore a sink)
As we will see in the upcoming Examples section, we must
specify the type of the stage when we implement each of them.
To start the flow of events, we subscribe consumers to
producers. Once the communication channel between them is
established, consumers will ask the producers for events.
We typically say the consumer is sending demand upstream.
Once demand arrives, the producer will emit items, never
emitting more items than the consumer asked for. This provides
a back-pressure mechanism.
A consumer may have multiple producers and a producer may have
multiple consumers. When a consumer asks for data, each producer
is handled separately, with its own demand. When a producer
receives demand and sends data to multiple consumers, the demand
is tracked and the events are sent by a dispatcher. This allows
producers to send data using different "strategies". See
`GenStage.Dispatcher` for more information.
Many developers tend to create layers of stages, such as A, B and
C, for achieving concurrency. If all you want is concurrency, starting
multiple instances of the same stage is enough. Layers in GenStage must
be created when there is a need for back-pressure or to route the data
in different ways.
For example, if you need the data to go over multiple steps but
without a need for back-pressure or without a need to break the
data apart, do not design it as such:
[Producer] -> [Step 1] -> [Step 2] -> [Step 3]
Instead it is better to design it as:
[Consumer]
/
[Producer]-<-[Consumer]
\
[Consumer]
where "Consumer" are multiple processes running the same code that
subscribe to the same "Producer".
## Example
Let's define the simple pipeline below:
[A] -> [B] -> [C]
where A is a producer that will emit items starting from 0,
B is a producer-consumer that will receive those items and
multiply them by a given number and C will receive those events
and print them to the terminal.
Let's start with A. Since A is a producer, its main
responsibility is to receive demand and generate events.
Those events may be in memory or an external queue system.
For simplicity, let's implement a simple counter starting
from a given value of `counter` received on `init/1`:
defmodule A do
use GenStage
def start_link(number) do
GenStage.start_link(A, number)
end
def init(counter) do
{:producer, counter}
end
def handle_demand(demand, counter) when demand > 0 do
# If the counter is 3 and we ask for 2 items, we will
# emit the items 3 and 4, and set the state to 5.
events = Enum.to_list(counter..counter+demand-1)
{:noreply, events, counter + demand}
end
end
B is a producer-consumer. This means it does not explicitly
handle the demand because the demand is always forwarded to
its producer. Once A receives the demand from B, it will send
events to B which will be transformed by B as desired. In
our case, B will receive events and multiply them by a number
given on initialization and stored as the state:
defmodule B do
use GenStage
def start_link(number) do
GenStage.start_link(B, number)
end
def init(number) do
{:producer_consumer, number}
end
def handle_events(events, _from, number) do
events = Enum.map(events, & &1 * number)
{:noreply, events, number}
end
end
C will finally receive those events and print them every second
to the terminal:
defmodule C do
use GenStage
def start_link() do
GenStage.start_link(C, :ok)
end
def init(:ok) do
{:consumer, :the_state_does_not_matter}
end
def handle_events(events, _from, state) do
# Wait for a second.
Process.sleep(1000)
# Inspect the events.
IO.inspect(events)
# We are a consumer, so we would never emit items.
{:noreply, [], state}
end
end
Now we can start and connect them:
{:ok, a} = A.start_link(0) # starting from zero
{:ok, b} = B.start_link(2) # multiply by 2
{:ok, c} = C.start_link() # state does not matter
GenStage.sync_subscribe(c, to: b)
GenStage.sync_subscribe(b, to: a)
Typically, we subscribe from bottom to top. Since A will
start producing items only when B connects to it, we want this
subscription to happen when the whole pipeline is ready. After
you subscribe all of them, demand will start flowing upstream and
events downstream.
When implementing consumers, we often set the `:max_demand` and
`:min_demand` on subscription. The `:max_demand` specifies the
maximum amount of events that must be in flow while the `:min_demand`
specifies the minimum threshold to trigger for more demand. For
example, if `:max_demand` is 1000 and `:min_demand` is 750,
the consumer will ask for 1000 events initially and ask for more
only after it receives at least 250.
In the example above, B is a `:producer_consumer` and therefore
acts as a buffer. Getting the proper demand values in B is
important: making the buffer too small may make the whole pipeline
slower, making the buffer too big may unnecessarily consume
memory.
When such values are applied to the stages above, it is easy
to see the producer works in batches. The producer A ends-up
emitting batches of 50 items which will take approximately
50 seconds to be consumed by C, which will then request another
batch of 50 items.
## `init` and `:subscribe_to`
In the example above, we have started the processes A, B, and C
independently and subscribed them later on. But most often it is
simpler to subscribe a consumer to its producer on its `c:init/1`
callback. This way, if the consumer crashes, restarting the consumer
will automatically re-invoke its `c:init/1` callback and resubscribe
it to the producer.
This approach works as long as the producer can be referenced when
the consumer starts - such as by name for a named process. For example,
if we change the process `A` and `B` to be started as follows:
# Let's call the stage in module A as A
GenStage.start_link(A, 0, name: A)
# Let's call the stage in module B as B
GenStage.start_link(B, 2, name: B)
# No need to name consumers as they won't be subscribed to
GenStage.start_link(C, :ok)
We can now change the `c:init/1` callback for C to the following:
def init(:ok) do
{:consumer, :the_state_does_not_matter, subscribe_to: [B]}
end
Subscription options as outlined in `sync_subscribe/3` can also be
given by making each subscription a tuple, with the process name or
pid as first element and the options as second:
def init(:ok) do
{:consumer, :the_state_does_not_matter, subscribe_to: [{B, options}]}
end
Similarly, we should change `B` to subscribe to `A` on `c:init/1`. Let's
also set `:max_demand` to 10 when we do so:
def init(number) do
{:producer_consumer, number, subscribe_to: [{A, max_demand: 10}]}
end
And we will no longer need to call `sync_subscribe/2`.
Another advantage of using `subscribe_to` is that it makes it straight-forward
to leverage concurrency by simply starting multiple consumers that subscribe
to their producer (or producer-consumer). This can be done in the example above
by simply calling start link multiple times:
# Start 4 consumers
GenStage.start_link(C, :ok)
GenStage.start_link(C, :ok)
GenStage.start_link(C, :ok)
GenStage.start_link(C, :ok)
In a supervision tree, this is often done by starting multiple workers. Typically
we update each `c:start_link/1` call to start a named process:
def start_link(number) do
GenStage.start_link(A, number, name: A)
end
And the same for module `B`:
def start_link(number) do
GenStage.start_link(B, number, name: B)
end
Module `C` does not need to be updated because it won't be subscribed to.
Then we can define our supervision tree like this:
children = [
worker(A, [0]),
worker(B, [2]),
worker(C, []),
worker(C, []),
worker(C, []),
worker(C, [])
]
Supervisor.start_link(children, strategy: :rest_for_one)
Having multiple consumers is often the easiest and simplest way to leverage
concurrency in a GenStage pipeline, especially if events can be processed out
of order.
Also note that we set the supervision strategy to `:rest_for_one`. This
is important because if the producer A terminates, all of the other
processes will terminate too, since they are consuming events produced
by A. In this scenario, the supervisor will see multiple processes shutting
down at the same time, and conclude there are too many failures in a short
interval. However, if the strategy is `:rest_for_one`, the supervisor will
shut down the rest of tree, and already expect the remaining process to fail.
One downside of `:rest_for_one` though is that if a `C` process dies, any other
`C` process after it will die too. You can solve this by putting them under
their own supervisor.
Another alternative to the scenario above is to use a `ConsumerSupervisor`
for consuming the events instead of N consumers. The `ConsumerSupervisor`
will communicate with the producer respecting the back-pressure properties
and start a separate supervised process per event. The number of children
concurrently running in a `ConsumerSupervisor` is at most `max_demand` and
the average amount of children is `(max_demand + min_demand) / 2`.
## Usage guidelines
As you get familiar with GenStage, you may want to organize your stages
according to your business domain. For example, stage A does step 1 in
your company workflow, stage B does step 2 and so forth. That's an anti-
pattern.
The same guideline that applies to processes also applies to GenStage:
use processes/stages to model runtime properties, such as concurrency and
data-transfer, and not for code organization or domain design purposes.
For the latter, you should use modules and functions.
If your domain has to process the data in multiple steps, you should write
that logic in separate modules and not directly in a `GenStage`. You only add
stages according to the runtime needs, typically when you need to provide back-
pressure or leverage concurrency. This way you are free to experiment with
different `GenStage` pipelines without touching your business rules.
Finally, if you don't need back-pressure at all and you just need to process
data that is already in-memory in parallel, a simpler solution is available
directly in Elixir via `Task.async_stream/2`. This function consumes a stream
of data, with each entry running in a separate task. The maximum number of tasks
is configurable via the `:max_concurrency` option.
## Buffering
In many situations, producers may attempt to emit events while no consumers
have yet subscribed. Similarly, consumers may ask producers for events
that are not yet available. In such cases, it is necessary for producers
to buffer events until a consumer is available or buffer the consumer
demand until events arrive, respectively. As we will see next, buffering
events can be done automatically by `GenStage`, while buffering the demand
is a case that must be explicitly considered by developers implementing
producers.
### Buffering events
Due to the concurrent nature of Elixir software, sometimes a producer
may dispatch events without consumers to send those events to. For example,
imagine a `:consumer` B subscribes to `:producer` A. Next, the consumer B
sends demand to A, which starts producing events to satisfy the demand.
Now, if the consumer B crashes, the producer may attempt to dispatch the
now produced events but it no longer has a consumer to send those events to.
In such cases, the producer will automatically buffer the events until another
consumer subscribes. Note however, all of the events being consumed by
`B` in its `handle_events` at the moment of the crash will be lost.
The buffer can also be used in cases where external sources only send
events in batches larger than asked for. For example, if you are
receiving events from an external source that only sends events
in batches of 1000 and the internal demand is smaller than
that, the buffer allows you to always emit batches of 1000 events
even when the consumer has asked for less.
In all of those cases when an event cannot be sent immediately by
a producer, the event will be automatically stored and sent the next
time consumers ask for events. The size of the buffer is configured
via the `:buffer_size` option returned by `init/1` and the default
value is `10_000`. If the `buffer_size` is exceeded, an error is logged.
See the documentation for `c:init/1` for more detailed information about
the `:buffer_size` option.
### Buffering demand
In case consumers send demand and the producer is not yet ready to
fill in the demand, producers must buffer the demand until data arrives.
As an example, let's implement a producer that broadcasts messages
to consumers. For producers, we need to consider two scenarios:
1. what if events arrive and there are no consumers?
2. what if consumers send demand and there are not enough events?
One way to implement such a broadcaster is to simply rely on the internal
buffer available in `GenStage`, dispatching events as they arrive, as explained
in the previous section:
defmodule Broadcaster do
use GenStage
@doc "Starts the broadcaster."
def start_link() do
GenStage.start_link(__MODULE__, :ok, name: __MODULE__)
end
@doc "Sends an event and returns only after the event is dispatched."
def sync_notify(event, timeout \\ 5000) do
GenStage.call(__MODULE__, {:notify, event}, timeout)
end
def init(:ok) do
{:producer, :ok, dispatcher: GenStage.BroadcastDispatcher}
end
def handle_call({:notify, event}, _from, state) do
{:reply, :ok, [event], state} # Dispatch immediately
end
def handle_demand(_demand, state) do
{:noreply, [], state} # We don't care about the demand
end
end
By always sending events as soon as they arrive, if there is any demand,
we will serve the existing demand, otherwise the event will be queued in
`GenStage`'s internal buffer. In case events are being queued and not being
consumed, a log message will be emitted when we exceed the `:buffer_size`
configuration.
While the implementation above is enough to solve the constraints above,
a more robust implementation would have tighter control over the events
and demand by tracking this data locally, leaving the `GenStage` internal
buffer only for cases where consumers crash without consuming all data.
To handle such cases, we will use a two-element tuple as the broadcaster state
where the first element is a queue and the second element is the pending
demand. When events arrive and there are no consumers, we will store the
event in the queue alongside information about the process that broadcast
the event. When consumers send demand and there are not enough events, we will
increase the pending demand. Once we have both data and demand, we
acknowledge the process that has sent the event to the broadcaster and finally
broadcast the event downstream.
defmodule QueueBroadcaster do
use GenStage
@doc "Starts the broadcaster."
def start_link() do
GenStage.start_link(__MODULE__, :ok, name: __MODULE__)
end
@doc "Sends an event and returns only after the event is dispatched."
def sync_notify(event, timeout \\ 5000) do
GenStage.call(__MODULE__, {:notify, event}, timeout)
end
## Callbacks
def init(:ok) do
{:producer, {:queue.new, 0}, dispatcher: GenStage.BroadcastDispatcher}
end
def handle_call({:notify, event}, from, {queue, pending_demand}) do
queue = :queue.in({from, event}, queue)
dispatch_events(queue, pending_demand, [])
end
def handle_demand(incoming_demand, {queue, pending_demand}) do
dispatch_events(queue, incoming_demand + pending_demand, [])
end
defp dispatch_events(queue, 0, events) do
{:noreply, Enum.reverse(events), {queue, 0}}
end
defp dispatch_events(queue, demand, events) do
case :queue.out(queue) do
{{:value, {from, event}}, queue} ->
GenStage.reply(from, :ok)
dispatch_events(queue, demand - 1, [event | events])
{:empty, queue} ->
{:noreply, Enum.reverse(events), {queue, demand}}
end
end
end
Let's also implement a consumer that automatically subscribes to the
broadcaster on `c:init/1`. The advantage of doing so on initialization
is that, if the consumer crashes while it is supervised, the subscription
is automatically re-established when the supervisor restarts it.
defmodule Printer do
use GenStage
@doc "Starts the consumer."
def start_link() do
GenStage.start_link(__MODULE__, :ok)
end
def init(:ok) do
# Starts a permanent subscription to the broadcaster
# which will automatically start requesting items.
{:consumer, :ok, subscribe_to: [QueueBroadcaster]}
end
def handle_events(events, _from, state) do
for event <- events do
IO.inspect {self(), event}
end
{:noreply, [], state}
end
end
With the broadcaster in hand, now let's start the producer as well
as multiple consumers:
# Start the producer
QueueBroadcaster.start_link()
# Start multiple consumers
Printer.start_link()
Printer.start_link()
Printer.start_link()
Printer.start_link()
At this point, all consumers must have sent their demand which we were not
able to fulfill. Now by calling `QueueBroadcaster.sync_notify/1`, the event
shall be broadcast to all consumers at once as we have buffered the demand
in the producer:
QueueBroadcaster.sync_notify(:hello_world)
If we had called `QueueBroadcaster.sync_notify(:hello_world)` before any
consumer was available, the event would also have been buffered in our own
queue and served only when demand had been received.
By having control over the demand and queue, the broadcaster has
full control on how to behave when there are no consumers, when the
queue grows too large, and so forth.
## Asynchronous work and `handle_subscribe`
Both `:producer_consumer` and `:consumer` stages have been designed to do
their work in the `c:handle_events/3` callback. This means that, after
`c:handle_events/3` is invoked, both `:producer_consumer` and `:consumer`
stages will immediately send demand upstream and ask for more items, as the
stage that produced the events assumes events have been fully processed by
`c:handle_events/3`.
Such default behaviour makes `:producer_consumer` and `:consumer` stages
unfeasible for doing asynchronous work. However, given `GenStage` was designed
to run with multiple consumers, it is not a problem to perform synchronous or
blocking actions inside `handle_events/3` as you can then start multiple
consumers in order to max both CPU and IO usage as necessary.
On the other hand, if you must perform some work asynchronously,
`GenStage` comes with an option that manually controls how demand
is sent upstream, avoiding the default behaviour where demand is
sent after `c:handle_events/3`. Such can be done by implementing
the `c:handle_subscribe/4` callback and returning `{:manual, state}`
instead of the default `{:automatic, state}`. Once the consumer mode
is set to `:manual`, developers must use `GenStage.ask/3` to send
demand upstream when necessary.
Note that `:max_demand` and `:min_demand` must be manually respected when
asking for demand through `GenStage.ask/3`.
For example, the `ConsumerSupervisor` module processes events
asynchronously by starting a process for each event and this is achieved by
manually sending demand to producers. `ConsumerSupervisor`
can be used to distribute work to a limited amount of
processes, behaving similar to a pool where a new process is
started for each event. See the `ConsumerSupervisor` docs for more
information.
Setting the demand to `:manual` in `c:handle_subscribe/4` is not
only useful for asynchronous work but also for setting up other
mechanisms for back-pressure. As an example, let's implement a
consumer that is allowed to process a limited number of events
per time interval. Those are often called rate limiters:
defmodule RateLimiter do
use GenStage
def init(_) do
# Our state will keep all producers and their pending demand
{:consumer, %{}}
end
def handle_subscribe(:producer, opts, from, producers) do
# We will only allow max_demand events every 5000 milliseconds
pending = opts[:max_demand] || 1000
interval = opts[:interval] || 5000
# Register the producer in the state
producers = Map.put(producers, from, {pending, interval})
# Ask for the pending events and schedule the next time around
producers = ask_and_schedule(producers, from)
# Returns manual as we want control over the demand
{:manual, producers}
end
def handle_cancel(_, from, producers) do
# Remove the producers from the map on unsubscribe
{:noreply, [], Map.delete(producers, from)}
end
def handle_events(events, from, producers) do
# Bump the amount of pending events for the given producer
producers = Map.update!(producers, from, fn {pending, interval} ->
{pending + length(events), interval}
end)
# Consume the events by printing them.
IO.inspect(events)
# A producer_consumer would return the processed events here.
{:noreply, [], producers}
end
def handle_info({:ask, from}, producers) do
# This callback is invoked by the Process.send_after/3 message below.
{:noreply, [], ask_and_schedule(producers, from)}
end
defp ask_and_schedule(producers, from) do
case producers do
%{^from => {pending, interval}} ->
# Ask for any pending events
GenStage.ask(from, pending)
# And let's check again after interval
Process.send_after(self(), {:ask, from}, interval)
# Finally, reset pending events to 0
Map.put(producers, from, {0, interval})
%{} ->
producers
end
end
end
Let's subscribe the `RateLimiter` above to the
producer we have implemented at the beginning of the module
documentation:
{:ok, a} = GenStage.start_link(A, 0)
{:ok, b} = GenStage.start_link(RateLimiter, :ok)
# Ask for 10 items every 2 seconds
GenStage.sync_subscribe(b, to: a, max_demand: 10, interval: 2000)
Although the rate limiter above is a consumer, it could be made a
producer-consumer by changing `c:init/1` to return a `:producer_consumer`
and then forwarding the events in `c:handle_events/3`.
## Callbacks
`GenStage` is implemented on top of a `GenServer` with a few additions.
Besides exposing all of the `GenServer` callbacks, it also provides
`c:handle_demand/2` to be implemented by producers and `c:handle_events/3` to be
implemented by consumers, as shown above, as well as subscription-related
callbacks. Furthermore, all the callback responses have been modified to
potentially emit events. See the callbacks documentation for more
information.
By adding `use GenStage` to your module, Elixir will automatically
define all callbacks for you except for the following ones:
* `c:init/1` - must be implemented to choose between `:producer`, `:consumer`, or `:producer_consumer` stages
* `c:handle_demand/2` - must be implemented by `:producer` stages
* `c:handle_events/3` - must be implemented by `:producer_consumer` and `:consumer` stages
`use GenStage` also defines a `child_spec/1` function, allowing the
defined module to be put under a supervision tree in Elixir v1.5+.
The generated `child_spec/1` can be customized with the following options:
* `:id` - the child specification id, defauts to the current module
* `:start` - how to start the child process (defaults to calling `__MODULE__.start_link/1`)
* `:restart` - when the child should be restarted, defaults to `:permanent`
* `:shutdown` - how to shut down the child
For example:
use GenStage, restart: :transient, shutdown: 10_000
See the `Supervisor` docs for more information.
Although this module exposes functions similar to the ones found in
the `GenServer` API, like `call/3` and `cast/2`, developers can also
rely directly on GenServer functions such as `GenServer.multi_call/4`
and `GenServer.abcast/3` if they wish to.
### Name registration
`GenStage` is bound to the same name registration rules as a `GenServer`.
Read more about it in the `GenServer` docs.
## Message protocol overview
This section will describe the message protocol implemented
by stages. By documenting these messages, we will allow
developers to provide their own stage implementations.
### Back-pressure
When data is sent between stages, it is done by a message
protocol that provides back-pressure. The first step is
for the consumer to subscribe to the producer. Each
subscription has a unique reference.
Once subscribed, the consumer may ask the producer for messages
for the given subscription. The consumer may demand more items
whenever it wants to. A consumer must never receive more data
than it has asked for from any given producer stage.
A consumer may have multiple producers, where each demand is managed
individually (on a per-subscription basis). A producer may have multiple
consumers, where the demand and events are managed and delivered according to
a `GenStage.Dispatcher` implementation.
### Producer messages
The producer is responsible for sending events to consumers
based on demand. These are the messages that consumers can
send to producers:
* `{:"$gen_producer", from :: {consumer_pid, subscription_tag}, {:subscribe, current, options}}` -
sent by the consumer to the producer to start a new subscription.
Before sending, the consumer MUST monitor the producer for clean-up
purposes in case of crashes. The `subscription_tag` is unique to
identify the subscription. It is typically the subscriber monitoring
reference although it may be any term.
Once sent, the consumer MAY immediately send demand to the producer.
The `current` field, when not `nil`, is a two-item tuple containing a
subscription that must be cancelled with the given reason before the
current one is accepted.
Once received, the producer MUST monitor the consumer. However, if
the subscription reference is known, it MUST send a `:cancel` message
to the consumer instead of monitoring and accepting the subscription.
* `{:"$gen_producer", from :: {consumer_pid, subscription_tag}, {:cancel, reason}}` -
sent by the consumer to cancel a given subscription.
Once received, the producer MUST send a `:cancel` reply to the
registered consumer (which may not necessarily be the one received
in the tuple above). Keep in mind, however, there is no guarantee
such messages can be delivered in case the producer crashes before.
If the pair is unknown, the producer MUST send an appropriate cancel
reply.
* `{:"$gen_producer", from :: {consumer_pid, subscription_tag}, {:ask, demand}}` -
sent by consumers to ask demand for a given subscription (identified
by `subscription_tag`).
Once received, the producer MUST send data up to the demand. If the
pair is unknown, the producer MUST send an appropriate cancel reply.
### Consumer messages
The consumer is responsible for starting the subscription
and sending demand to producers. These are the messages that
producers can send to consumers:
* `{:"$gen_consumer", from :: {producer_pid, subscription_tag}, {:cancel, reason}}` -
sent by producers to cancel a given subscription.
It is used as a confirmation for client cancellations OR
whenever the producer wants to cancel some upstream demand.
* `{:"$gen_consumer", from :: {producer_pid, subscription_tag}, events :: [event, ...]}` -
events sent by producers to consumers.
`subscription_tag` identifies the subscription. The third argument
is a non-empty list of events. If the subscription is unknown, the
events must be ignored and a cancel message must be sent to the producer.
"""
defstruct [
:mod,
:state,
:type,
:dispatcher_mod,
:dispatcher_state,
:buffer,
:buffer_keep,
events: :forward,
monitors: %{},
producers: %{},
consumers: %{}
]
@typedoc "The supported stage types."
@type type :: :producer | :consumer | :producer_consumer
@typedoc "Options used by the `subscribe*` functions"
@type subscription_options ::
{:cancel, :permanent | :transient | :temporary}
| {:min_demand, integer}
| {:max_demand, integer}
@typedoc "Option values used by the `init*` specific to `:producer` type"
@type producer_only_option :: {:demand, :forward | :accumulate}
@typedoc "Option values used by the `init*` common to `:producer` and `:producer_consumer` types"
@type producer_and_producer_consumer_option ::
{:buffer_size, non_neg_integer | :infinity}
| {:buffer_keep, :first | :last}
| {:dispatcher, module | {module, GenStage.Dispatcher.options()}}
@typedoc "Option values used by the `init*` common to `:consumer` and `:producer_consumer` types"
@type consumer_and_producer_consumer_option ::
{:subscribe_to, [module | {module, subscription_options}]}
@typedoc "Option values used by the `init*` functions when stage type is `:producer`"
@type producer_option :: producer_only_option | producer_and_producer_consumer_option
@typedoc "Option values used by the `init*` functions when stage type is `:consumer`"
@type consumer_option :: consumer_and_producer_consumer_option
@typedoc "Option values used by the `init*` functions when stage type is `:producer_consumer`"
@type producer_consumer_option ::
producer_and_producer_consumer_option | consumer_and_producer_consumer_option
@typedoc "The stage."
@type stage :: pid | atom | {:global, term} | {:via, module, term} | {atom, node}
@typedoc "The term that identifies a subscription."
@opaque subscription_tag :: reference
@typedoc "The term that identifies a subscription associated with the corresponding producer/consumer."
@type from :: {pid, subscription_tag}
@doc """
Invoked when the server is started.
`start_link/3` (or `start/3`) will block until this callback returns.
`args` is the argument term (second argument) passed to `start_link/3`
(or `start/3`).
In case of successful start, this callback must return a tuple
where the first element is the stage type, which is one of:
* `:producer`
* `:consumer`
* `:producer_consumer` (if the stage is acting as both)
For example:
def init(args) do
{:producer, some_state}
end
The returned tuple may also contain 3 or 4 elements. The third
element may be the `:hibernate` atom or a set of options defined
below.
Returning `:ignore` will cause `start_link/3` to return `:ignore`
and the process will exit normally without entering the loop or
calling `terminate/2`.
Returning `{:stop, reason}` will cause `start_link/3` to return
`{:error, reason}` and the process to exit with reason `reason`
without entering the loop or calling `terminate/2`.
## Options
This callback may return options. Some options are specific to
the chosen stage type while others are shared across all types.
### `:producer` options
* `:demand` - when `:forward`, the demand is always forwarded to
the `c:handle_demand/2` callback. When `:accumulate`, demand is
accumulated until its mode is set to `:forward` via `demand/2`.
This is useful as a synchronization mechanism, where the demand
is accumulated until all consumers are subscribed. Defaults to
`:forward`.
### `:producer` and `:producer_consumer` options
* `:buffer_size` - the size of the buffer to store events without
demand. Can be `:infinity` to signal no limit on the buffer size. Check
the "Buffer events" section of the module documentation. Defaults to
`10_000` for `:producer`, `:infinity` for `:producer_consumer`.
* `:buffer_keep` - returns whether the `:first` or `:last` entries
should be kept on the buffer in case the buffer size is exceeded.
Defaults to `:last`.
* `:dispatcher` - the dispatcher responsible for handling demands.
Defaults to `GenStage.DemandDispatch`. May be either an atom
representing a dispatcher module or a two-element tuple with
the dispatcher module and the dispatcher options.
### `:consumer` and `:producer_consumer` options
* `:subscribe_to` - a list of producers to subscribe to. Each element
represents either the producer module or a tuple with the producer module
and the subscription options (as defined in `sync_subscribe/2`).
"""
@callback init(args :: term) ::
{:producer, state}
| {:producer, state, [producer_option]}
| {:producer_consumer, state}
| {:producer_consumer, state, [producer_consumer_option]}
| {:consumer, state}
| {:consumer, state, [consumer_option]}
| :ignore
| {:stop, reason :: any}
when state: any
@doc """
Invoked on `:producer` stages.
This callback is invoked on `:producer` stages with the demand from
consumers/dispatcher. The producer that implements this callback must either
store the demand, or return the amount of requested events.
Must always be explicitly implemented by `:producer` stages.
## Examples
def handle_demand(demand, state) do
# We check if we're able to satisfy the demand and fetch
# events if we aren't.
events =
if length(state.events) >= demand do
state.events
else
# fetch_events()
end
# We dispatch only the requested number of events.
{to_dispatch, remaining} = Enum.split(events, demand)
{:noreply, to_dispatch, %{state | events: remaining}}
end
"""
@callback handle_demand(demand :: pos_integer, state :: term) ::
{:noreply, [event], new_state}
| {:noreply, [event], new_state, :hibernate}
| {:stop, reason, new_state}
when new_state: term, reason: term, event: term
@doc """
Invoked when a consumer subscribes to a producer.
This callback is invoked in both producers and consumers.
`producer_or_consumer` will be `:producer` when this callback is
invoked on a consumer that subscribed to a producer, and `:consumer`
if when this callback is invoked on producers a consumer subscribed to.
For consumers, successful subscriptions must return one of:
* `{:automatic, new_state}` - means the stage implementation will take care
of automatically sending demand to producers. This is the default.
* `{:manual, state}` - means that demand must be sent to producers
explicitly via `ask/3`. `:manual` subscriptions must be cancelled when
`c:handle_cancel/3` is called. `:manual` can be used when a special
behaviour is desired (for example, `ConsumerSupervisor` uses `:manual`
demand in its implementation).
For producers, successful subscriptions must always return
`{:automatic, new_state}`. `:manual` mode is not supported.
If this callback is not implemented, the default implementation by
`use GenStage` will return `{:automatic, state}`.
## Examples
Let's see an example where we define this callback in a consumer that will use
`:manual` mode. In this case, we'll store the subscription (`from`) in the
state in order to be able to use it later on when asking demand via `ask/3`.
def handle_subscribe(:producer, _options, from, state) do
new_state = %{state | subscription: from}
{:manual, new_state}
end
"""
@callback handle_subscribe(
producer_or_consumer :: :producer | :consumer,
subscription_options,
from,
state :: term
) ::
{:automatic | :manual, new_state}
| {:stop, reason, new_state}
when new_state: term, reason: term
@doc """
Invoked when a consumer is no longer subscribed to a producer.
It receives the cancellation reason, the `from` tuple representing the
cancelled subscription and the state. The `cancel_reason` will be a
`{:cancel, _}` tuple if the reason for cancellation was a `GenStage.cancel/2`
call. Any other value means the cancellation reason was due to an EXIT.
If this callback is not implemented, the default implementation by
`use GenStage` will return `{:noreply, [], state}`.
Return values are the same as `c:handle_cast/2`.
"""
@callback handle_cancel(
cancellation_reason :: {:cancel | :down, reason :: term},
from,
state :: term
) ::
{:noreply, [event], new_state}
| {:noreply, [event], new_state, :hibernate}
| {:stop, reason, new_state}
when event: term, new_state: term, reason: term
@doc """
Invoked on `:producer_consumer` and `:consumer` stages to handle events.
Must always be explicitly implemented by such types.
Return values are the same as `c:handle_cast/2`.
"""
@callback handle_events(events :: [event], from, state :: term) ::
{:noreply, [event], new_state}
| {:noreply, [event], new_state, :hibernate}
| {:stop, reason, new_state}
when new_state: term, reason: term, event: term
@doc """
Invoked to handle synchronous `call/3` messages.
`call/3` will block until a reply is received (unless the call times out or
nodes are disconnected).
`request` is the request message sent by a `call/3`, `from` is a two-element tuple
containing the caller's PID and a term that uniquely identifies the call, and
`state` is the current state of the `GenStage`.
Returning `{:reply, reply, [events], new_state}` sends the response `reply`
to the caller after events are dispatched (or buffered) and continues the
loop with new state `new_state`. In case you want to deliver the reply before
processing events, use `reply/2` and return `{:noreply, [event],
state}`.
Returning `{:noreply, [event], new_state}` does not send a response to the
caller and processes the given events before continuing the loop with new
state `new_state`. The response must be sent with `reply/2`.
Hibernating is also supported as an atom to be returned from either
`:reply` and `:noreply` tuples.
Returning `{:stop, reason, reply, new_state}` stops the loop and `terminate/2`
is called with reason `reason` and state `new_state`. Then the `reply` is sent
as the response to the call and the process exits with reason `reason`.
Returning `{:stop, reason, new_state}` is similar to
`{:stop, reason, reply, new_state}` except that no reply is sent to the caller.
If this callback is not implemented, the default implementation by
`use GenStage` will return `{:stop, {:bad_call, request}, state}`.
"""
@callback handle_call(request :: term, from :: GenServer.from(), state :: term) ::
{:reply, reply, [event], new_state}
| {:reply, reply, [event], new_state, :hibernate}
| {:noreply, [event], new_state}
| {:noreply, [event], new_state, :hibernate}
| {:stop, reason, reply, new_state}
| {:stop, reason, new_state}
when reply: term, new_state: term, reason: term, event: term
@doc """
Invoked to handle asynchronous `cast/2` messages.
`request` is the request message sent by a `cast/2` and `state` is the current
state of the `GenStage`.
Returning `{:noreply, [event], new_state}` dispatches the events and continues
the loop with new state `new_state`.
Returning `{:noreply, [event], new_state, :hibernate}` is similar to
`{:noreply, new_state}` except the process is hibernated before continuing the
loop. See the return values for `c:GenServer.handle_call/3` for more information
on hibernation.
Returning `{:stop, reason, new_state}` stops the loop and `terminate/2` is
called with the reason `reason` and state `new_state`. The process exits with
reason `reason`.
If this callback is not implemented, the default implementation by
`use GenStage` will return `{:stop, {:bad_cast, request}, state}`.
"""
@callback handle_cast(request :: term, state :: term) ::
{:noreply, [event], new_state}
| {:noreply, [event], new_state, :hibernate}
| {:stop, reason :: term, new_state}
when new_state: term, event: term
@doc """
Invoked to handle all other messages.
`message` is the message and `state` is the current state of the `GenStage`. When
a timeout occurs the message is `:timeout`.
If this callback is not implemented, the default implementation by
`use GenStage` will return `{:noreply, [], state}`.
Return values are the same as `c:handle_cast/2`.
"""
@callback handle_info(message :: term, state :: term) ::
{:noreply, [event], new_state}
| {:noreply, [event], new_state, :hibernate}
| {:stop, reason :: term, new_state}
when new_state: term, event: term
@doc """
The same as `c:GenServer.terminate/2`.
"""
@callback terminate(reason, state :: term) :: term
when reason: :normal | :shutdown | {:shutdown, term} | term
@doc """
The same as `c:GenServer.code_change/3`.
"""
@callback code_change(old_vsn, state :: term, extra :: term) ::
{:ok, new_state :: term}
| {:error, reason :: term}
when old_vsn: term | {:down, term}
@doc """
The same as `c:GenServer.format_status/2`.
"""
@callback format_status(:normal | :terminate, [pdict :: {term, term} | (state :: term), ...]) ::
(status :: term)
@optional_callbacks [
# GenStage
handle_subscribe: 4,
handle_cancel: 3,
handle_demand: 2,
handle_events: 3,
# GenServer
code_change: 3,
format_status: 2,
handle_call: 3,
handle_cast: 2,
handle_info: 2,
terminate: 2
]
@doc false
defmacro __using__(opts) do
quote location: :keep, bind_quoted: [opts: opts] do
@behaviour GenStage
@doc false
def child_spec(arg) do
default = %{
id: __MODULE__,
start: {__MODULE__, :start_link, [arg]}
}
Supervisor.child_spec(default, unquote(Macro.escape(opts)))
end
defoverridable child_spec: 1
end
end
@doc """
Starts a `GenStage` process linked to the current process.
This is often used to start the `GenStage` as part of a supervision tree.
Once the server is started, the `init/1` function of the given `module` is
called with `args` as its arguments to initialize the stage. To ensure a
synchronized start-up procedure, this function does not return until `init/1`
has returned.
Note that a `GenStage` started with `start_link/3` is linked to the
parent process and will exit in case of crashes from the parent. The `GenStage`
will also exit due to the `:normal` reason in case it is configured to trap
exits in the `c:init/1` callback.
## Options
* `:name` - used for name registration as described in the "Name
registration" section of the module documentation
* `:debug` - if present, the corresponding function in the [`:sys`
module](http://www.erlang.org/doc/man/sys.html) is invoked
This function also accepts all the options accepted by
`GenServer.start_link/3`.
## Return values
If the stage is successfully created and initialized, this function returns
`{:ok, pid}`, where `pid` is the pid of the stage. If a process with the
specified name already exists, this function returns
`{:error, {:already_started, pid}}` with the pid of that process.
If the `c:init/1` callback fails with `reason`, this function returns
`{:error, reason}`. Otherwise, if `c:init/1` returns `{:stop, reason}`
or `:ignore`, the process is terminated and this function returns
`{:error, reason}` or `:ignore`, respectively.
"""
@spec start_link(module, term, GenServer.options()) :: GenServer.on_start()
def start_link(module, args, options \\ []) when is_atom(module) and is_list(options) do
GenServer.start_link(__MODULE__, {module, args}, options)
end
@doc """
Starts a `GenStage` process without links (outside of a supervision tree).
See `start_link/3` for more information.
"""
@spec start(module, term, GenServer.options()) :: GenServer.on_start()
def start(module, args, options \\ []) when is_atom(module) and is_list(options) do
GenServer.start(__MODULE__, {module, args}, options)
end
@doc """
Queues an info message that is delivered after all currently buffered events.
This call is synchronous and will return after the stage has queued
the info message. The message will be eventually handled by the
`handle_info/2` callback.
If the stage is a consumer, it does not have buffered events, so the
messaged is queued immediately.
This function will return `:ok` if the info message is successfully queued.
"""
@spec sync_info(stage, msg :: term(), timeout) :: :ok
def sync_info(stage, msg, timeout \\ 5_000) do
call(stage, {:"$info", msg}, timeout)
end
@doc """
Asynchronously queues an info message that is delivered after all
currently buffered events.
If the stage is a consumer, it does not have buffered events, so the
message is queued immediately.
This call returns `:ok` regardless if the info has been successfully
queued or not. It is typically called from the stage itself.
"""
@spec async_info(stage, msg :: term()) :: :ok
def async_info(stage, msg) do
cast(stage, {:"$info", msg})
end
@doc """
Sets the demand mode for a producer.
When `:forward`, the demand is always forwarded to the `handle_demand`
callback. When `:accumulate`, demand is accumulated until its mode is
set to `:forward`. This is useful as a synchronization mechanism, where
the demand is accumulated until all consumers are subscribed. Defaults
to `:forward`.
This command is asynchronous.
"""
@spec demand(stage, :forward | :accumulate) :: :ok
def demand(stage, mode) when mode in [:forward, :accumulate] do
cast(stage, {:"$demand", mode})
end
@doc """
Asks the consumer to subscribe to the given producer synchronously.
This call is synchronous and will return after the called consumer
sends the subscribe message to the producer. It does not, however,
wait for the subscription confirmation. Therefore this function
will return before `c:handle_subscribe/4` is called in the consumer.
In other words, it guarantees the message was sent, but it does not
guarantee a subscription has effectively been established.
This function will return `{:ok, subscription_tag}` as long as the
subscription message is sent. It will return `{:error, :not_a_consumer}`
when the stage is not a consumer. `subscription_tag` is the second element
of the two-element tuple that will be passed to `c:handle_subscribe/4`.
## Options
* `:cancel` - `:permanent` (default), `:transient` or `:temporary`.
When permanent, the consumer exits when the producer cancels or exits.
When transient, the consumer exits only if reason is not `:normal`,
`:shutdown`, or `{:shutdown, reason}`. When temporary, it never exits.
In case of exits, the same reason is used to exit the consumer.
In case of cancellations, the reason is wrapped in a `:cancel` tuple.
* `:min_demand` - the minimum demand for this subscription. See the module
documentation for more information.
* `:max_demand` - the maximum demand for this subscription. See the module
documentation for more information.
Any other option is sent to the producer stage. This may be used by
dispatchers for custom configuration. For example, if a producer uses
a `GenStage.BroadcastDispatcher`, an optional `:selector` function
that receives an event and returns a boolean limits this subscription to
receiving only those events where the selector function returns a truthy
value:
GenStage.sync_subscribe(consumer,
to: producer,
selector: fn %{key: key} -> String.starts_with?(key, "foo-") end)
"""
@spec sync_subscribe(stage, subscription_options, timeout) ::
{:ok, subscription_tag} | {:error, :not_a_consumer} | {:error, {:bad_opts, String.t()}}
def sync_subscribe(stage, opts, timeout \\ 5_000) do
sync_subscribe(stage, nil, opts, timeout)
end
@doc """
Cancels `subscription_tag` with `reason` and subscribes synchronously in one step.
`subscription_tag` is the subscription tag returned by `sync_subscribe/3`.
See `sync_subscribe/3` for examples and options.
"""
@spec sync_resubscribe(stage, subscription_tag, term, subscription_options, timeout) ::
{:ok, subscription_tag} | {:error, :not_a_consumer} | {:error, {:bad_opts, String.t()}}
def sync_resubscribe(stage, subscription_tag, reason, opts, timeout \\ 5000) do
sync_subscribe(stage, {subscription_tag, reason}, opts, timeout)
end
defp sync_subscribe(stage, cancel, opts, timeout) do
{to, opts} =
Keyword.pop_lazy(opts, :to, fn ->
raise ArgumentError, "expected :to argument in sync_(re)subscribe"
end)
call(stage, {:"$subscribe", cancel, to, opts}, timeout)
end
@doc """
Asks the consumer to subscribe to the given producer asynchronously.
This call returns `:ok` regardless if the subscription
effectively happened or not. It is typically called from
a stage's `c:init/1` callback.
## Options
This function accepts the same options as `sync_subscribe/4`.
"""
@spec async_subscribe(stage, subscription_options) :: :ok
def async_subscribe(stage, opts) do
async_subscribe(stage, nil, opts)
end
@doc """
Cancels `subscription_tag` with `reason` and subscribe asynchronously in one
step.
See `async_subscribe/2` for examples and options.
"""
@spec async_resubscribe(stage, subscription_tag, reason :: term, subscription_options) :: :ok
def async_resubscribe(stage, subscription_tag, reason, opts) do
async_subscribe(stage, {subscription_tag, reason}, opts)
end
defp async_subscribe(stage, cancel, opts) do
{to, opts} =
Keyword.pop_lazy(opts, :to, fn ->
raise ArgumentError, "expected :to argument in async_(re)subscribe"
end)
cast(stage, {:"$subscribe", cancel, to, opts})
end
@doc """
Asks the given demand to the producer.
`producer_subscription` is the subscription this demand will be asked on; this
term could be for example stored in the stage when received in
`c:handle_subscribe/4`.
The demand is a non-negative integer with the amount of events to
ask a producer for. If the demand is `0`, this function simply returns `:ok`
without asking for data.
This function must only be used in the cases when a consumer
sets a subscription to `:manual` mode in the `c:handle_subscribe/4`
callback.
It accepts the same options as `Process.send/3`, and returns the same value as
`Process.send/3`.
"""
@spec ask(from, non_neg_integer, [:noconnect | :nosuspend]) :: :ok | :noconnect | :nosuspend
def ask(producer_subscription, demand, opts \\ [])
def ask({_pid, _ref}, 0, _opts) do
:ok
end
def ask({pid, ref}, demand, opts) when is_integer(demand) and demand > 0 do
Process.send(pid, {:"$gen_producer", {self(), ref}, {:ask, demand}}, opts)
end
@doc """
Cancels the given subscription on the producer.
Once the producer receives the request, a confirmation
may be forwarded to the consumer (although there is no
guarantee as the producer may crash for unrelated reasons
before). This is an asynchronous request.
It accepts the same options as `Process.send/3`, and returns the same value as
`Process.send/3`.
"""
@spec cancel(from, term, [:noconnect | :nosuspend]) :: :ok | :noconnect | :nosuspend
def cancel({pid, ref} = _producer_subscription, reason, opts \\ []) do
Process.send(pid, {:"$gen_producer", {self(), ref}, {:cancel, reason}}, opts)
end
@compile {:inline, send_noconnect: 2, ask: 3, cancel: 3}
defp send_noconnect(pid, msg) do
Process.send(pid, msg, [:noconnect])
end
@doc """
Makes a synchronous call to the `stage` and waits for its reply.
The client sends the given `request` to the stage and waits until a reply
arrives or a timeout occurs. `c:handle_call/3` will be called on the stage
to handle the request.
`stage` can be any of the values described in the "Name registration"
section of the documentation for this module.
## Timeouts
`timeout` is an integer greater than zero which specifies how many
milliseconds to wait for a reply, or the atom `:infinity` to wait
indefinitely. The default value is `5000`. If no reply is received within
the specified time, the function call fails and the caller exits. If the
caller catches the failure and continues running, and the stage is just late
with the reply, such reply may arrive at any time later into the caller's message
queue. The caller must in this case be prepared for this and discard any such
garbage messages that are two-element tuples with a reference as the first
element.
"""
@spec call(stage, term, timeout) :: term
def call(stage, request, timeout \\ 5000) do
GenServer.call(stage, request, timeout)
end
@doc """
Sends an asynchronous request to the `stage`.
This function always returns `:ok` regardless of whether
the destination `stage` (or node) exists. Therefore it
is unknown whether the destination stage successfully
handled the message.
`c:handle_cast/2` will be called on the stage to handle
the request. In case the `stage` is on a node which is
not yet connected to the caller one, the call is going to
block until a connection happens.
"""
@spec cast(stage, term) :: :ok
def cast(stage, request) do
GenServer.cast(stage, request)
end
@doc """
Replies to a client.
This function can be used to explicitly send a reply to a client that
called `call/3` when the reply cannot be specified in the return value
of `c:handle_call/3`.
`client` must be the `from` argument (the second argument) accepted by
`c:handle_call/3` callbacks. `reply` is an arbitrary term which will be given
back to the client as the return value of the call.
Note that `reply/2` can be called from any process, not just the `GenStage`
that originally received the call (as long as that `GenStage` communicated the
`from` argument somehow).
This function always returns `:ok`.
## Examples
def handle_call(:reply_in_one_second, from, state) do
Process.send_after(self(), {:reply, from}, 1_000)
{:noreply, [], state}
end
def handle_info({:reply, from}, state) do
GenStage.reply(from, :one_second_has_passed)
end
"""
@spec reply(GenServer.from(), term) :: :ok
def reply(client, reply)
def reply({to, tag}, reply) when is_pid(to) do
try do
send(to, {tag, reply})
:ok
catch
_, _ -> :ok
end
end
@doc """
Stops the stage with the given `reason`.
The `c:terminate/2` callback of the given `stage` will be invoked before
exiting. This function returns `:ok` if the server terminates with the
given reason; if it terminates with another reason, the call exits.
This function keeps OTP semantics regarding error reporting.
If the reason is any other than `:normal`, `:shutdown` or
`{:shutdown, _}`, an error report is logged.
"""
@spec stop(stage, term, timeout) :: :ok
def stop(stage, reason \\ :normal, timeout \\ :infinity) do
:gen.stop(stage, reason, timeout)
end
@doc """
Starts a producer stage from an enumerable (or stream).
This function will start a stage linked to the current process
that will take items from the enumerable when there is demand.
Since streams are enumerables, we can also pass streams as
arguments (in fact, streams are the most common argument to
this function).
The enumerable is consumed in batches, retrieving `max_demand`
items the first time and then `max_demand - min_demand` the
next times. Therefore, for streams that cannot produce items
that fast, it is recommended to pass a lower `:max_demand`
value as an option.
It is also expected the enumerable is able to produce the whole
batch on demand or terminate. If the enumerable is a blocking one,
for example, because it needs to wait for data from another source,
it will block until the current batch is fully filled. GenStage and
Flow were created exactly to address such issue. So if you have a
blocking enumerable that you want to use in your Flow, then it must
be implemented with GenStage and integrated with `from_stages/2`.
When the enumerable finishes or halts, the stage will exit with
`:normal` reason. This means that, if a consumer subscribes to
the enumerable stage and the `:cancel` option is set to
`:permanent`, which is the default, the consumer will also exit
with `:normal` reason. This behaviour can be changed by setting
setting the `:cancel` option to either `:transient` or `:temporary`
at the moment of subscription as described in the `sync_subscribe/3`
docs.
Keep in mind that streams that require the use of the process
inbox to work most likely won't behave as expected with this
function since the mailbox is controlled by the stage process
itself. As explained above, stateful or blocking enumerables
are generally discouraged, as `GenStage` was designed precisely
to support exchange of data in such cases.
## Options
* `:link` - when false, does not link the stage to the current
process. Defaults to `true`.
* `:dispatcher` - the dispatcher responsible for handling demands.
Defaults to `GenStage.DemandDispatch`. May be either an atom or
a tuple with the dispatcher and the dispatcher options.
* `:demand` - configures the demand to `:forward` or `:accumulate`
mode. See `c:init/1` and `demand/2` for more information.
All other options that would be given for `start_link/3` are
also accepted.
"""
@spec from_enumerable(Enumerable.t(), keyword()) :: GenServer.on_start()
def from_enumerable(stream, opts \\ []) do
case Keyword.pop(opts, :link, true) do
{true, opts} -> start_link(GenStage.Streamer, {stream, opts}, opts)
{false, opts} -> start(GenStage.Streamer, {stream, opts}, opts)
end
end
@doc """
Creates a stream that subscribes to the given producers
and emits the appropriate messages.
It expects a list of producers to subscribe to. Each element
represents the producer or a tuple with the producer and the
subscription options as defined in `sync_subscribe/2`:
GenStage.stream([{producer, max_demand: 100}])
If the producer process exits, the stream will exit with the same
reason. If you want the stream to halt instead, set the cancel option
to either `:transient` or `:temporary` as described in the
`sync_subscribe/3` docs:
GenStage.stream([{producer, max_demand: 100, cancel: :transient}])
Once all producers are subscribed to, their demand is automatically
set to `:forward` mode. See the `:demand` and `:producers`
options below for more information.
`GenStage.stream/1` will "hijack" the inbox of the process
enumerating the stream to subscribe and receive messages
from producers. However it guarantees it won't remove or
leave unwanted messages in the mailbox after enumeration
unless one of the producers comes from a remote node.
For more information, read the "Known limitations" section
below.
## Options
* `:demand` - sets the demand in producers to `:forward` or
`:accumulate` after subscription. Defaults to `:forward` so
the stream can receive items.
* `:producers` - the processes to set the demand to `:forward`
on initialization. It defaults to the processes being subscribed
to. Sometimes the stream is subscribing to a `:producer_consumer`
instead of a `:producer`, in such cases, you can set this option
to either an empty list or the list of actual producers so their
demand is properly set.
## Known limitations
### `from_enumerable/2`
This module also provides a function called `from_enumerable/2`
which receives an enumerable (like a stream) and creates a stage
that emits data from the enumerable.
Given both `GenStage.from_enumerable/2` and `GenStage.stream/1`
require the process inbox to send and receive messages, it is
impossible to run a `stream/2` inside a `from_enumerable/2` as
the `stream/2` will never receive the messages it expects.
### Remote nodes
While it is possible to stream messages from remote nodes
such should be done with care. In particular, in case of
disconnections, there is a chance the producer will send
messages after the consumer receives its DOWN messages and
those will remain in the process inbox, violating the
common scenario where `GenStage.stream/1` does not pollute
the caller inbox. In such cases, it is recommended to
consume such streams from a separate process which will be
discarded after the stream is consumed.
"""
@spec stream([stage | {stage, keyword}], keyword) :: Enumerable.t()
def stream(subscriptions, options \\ [])
def stream(subscriptions, options) when is_list(subscriptions) do
GenStage.Stream.build(subscriptions, options)
end
def stream(subscriptions, _options) do
raise ArgumentError,
"GenStage.stream/1 expects a list of subscriptions, got: #{inspect(subscriptions)}"
end
## Callbacks
@compile :inline_list_funcs
require GenStage.Utils, as: Utils
alias GenStage.Buffer
@doc false
def init({mod, args}) do
case mod.init(args) do
{:producer, state} ->
init_producer(mod, [], state)
{:producer, state, opts} when is_list(opts) ->
init_producer(mod, opts, state)
{:producer_consumer, state} ->
init_producer_consumer(mod, [], state)
{:producer_consumer, state, opts} when is_list(opts) ->
init_producer_consumer(mod, opts, state)
{:consumer, state} ->
init_consumer(mod, [], state)
{:consumer, state, opts} when is_list(opts) ->
init_consumer(mod, opts, state)
{:stop, _} = stop ->
stop
:ignore ->
:ignore
other ->
{:stop, {:bad_return_value, other}}
end
end
defp init_producer(mod, opts, state) do
with {:ok, dispatcher_mod, dispatcher_state, opts} <- init_dispatcher(opts),
{:ok, buffer_size, opts} <-
Utils.validate_integer(opts, :buffer_size, 10000, 0, :infinity, true),
{:ok, buffer_keep, opts} <-
Utils.validate_in(opts, :buffer_keep, :last, [:first, :last]),
{:ok, demand, opts} <-
Utils.validate_in(opts, :demand, :forward, [:accumulate, :forward]),
:ok <- Utils.validate_no_opts(opts) do
stage = %GenStage{
mod: mod,
state: state,
type: :producer,
buffer: Buffer.new(buffer_size),
buffer_keep: buffer_keep,
events: if(demand == :accumulate, do: [], else: :forward),
dispatcher_mod: dispatcher_mod,
dispatcher_state: dispatcher_state
}
{:ok, stage}
else
{:error, message} -> {:stop, {:bad_opts, message}}
end
end
defp init_dispatcher(opts) do
case Keyword.pop(opts, :dispatcher, GenStage.DemandDispatcher) do
{dispatcher, opts} when is_atom(dispatcher) ->
{:ok, dispatcher_state} = dispatcher.init([])
{:ok, dispatcher, dispatcher_state, opts}
{{dispatcher, dispatcher_opts}, opts}
when is_atom(dispatcher) and is_list(dispatcher_opts) ->
{:ok, dispatcher_state} = dispatcher.init(dispatcher_opts)
{:ok, dispatcher, dispatcher_state, opts}
{other, _opts} ->
{:error, "expected :dispatcher to be an atom or a {atom, list}, got: #{inspect(other)}"}
end
end
defp init_producer_consumer(mod, opts, state) do
with {:ok, dispatcher_mod, dispatcher_state, opts} <- init_dispatcher(opts),
{:ok, subscribe_to, opts} <- Utils.validate_list(opts, :subscribe_to, []),
{:ok, buffer_size, opts} <-
Utils.validate_integer(opts, :buffer_size, :infinity, 0, :infinity, true),
{:ok, buffer_keep, opts} <-
Utils.validate_in(opts, :buffer_keep, :last, [:first, :last]),
:ok <- Utils.validate_no_opts(opts) do
stage = %GenStage{
mod: mod,
state: state,
type: :producer_consumer,
buffer: Buffer.new(buffer_size),
buffer_keep: buffer_keep,
events: {:queue.new(), 0},
dispatcher_mod: dispatcher_mod,
dispatcher_state: dispatcher_state
}
consumer_init_subscribe(subscribe_to, stage)
else
{:error, message} -> {:stop, {:bad_opts, message}}
end
end
defp init_consumer(mod, opts, state) do
with {:ok, subscribe_to, opts} <- Utils.validate_list(opts, :subscribe_to, []),
:ok <- Utils.validate_no_opts(opts) do
stage = %GenStage{mod: mod, state: state, type: :consumer}
consumer_init_subscribe(subscribe_to, stage)
else
{:error, message} -> {:stop, {:bad_opts, message}}
end
end
@doc false
def handle_call({:"$info", msg}, _from, stage) do
producer_info(msg, stage)
end
def handle_call({:"$subscribe", current, to, opts}, _from, stage) do
consumer_subscribe(current, to, opts, stage)
end
def handle_call(msg, from, %{mod: mod, state: state} = stage) do
case mod.handle_call(msg, from, state) do
{:reply, reply, events, state} when is_list(events) ->
stage = dispatch_events(events, length(events), stage)
{:reply, reply, %{stage | state: state}}
{:reply, reply, events, state, :hibernate} when is_list(events) ->
stage = dispatch_events(events, length(events), stage)
{:reply, reply, %{stage | state: state}, :hibernate}
{:stop, reason, reply, state} ->
{:stop, reason, reply, %{stage | state: state}}
return ->
handle_noreply_callback(return, stage)
end
end
@doc false
def handle_cast({:"$info", msg}, stage) do
{:reply, _, stage} = producer_info(msg, stage)
{:noreply, stage}
end
def handle_cast({:"$demand", mode}, stage) do
producer_demand(mode, stage)
end
def handle_cast({:"$subscribe", current, to, opts}, stage) do
case consumer_subscribe(current, to, opts, stage) do
{:reply, _, stage} -> {:noreply, stage}
{:stop, reason, _, stage} -> {:stop, reason, stage}
{:stop, _, _} = stop -> stop
end
end
def handle_cast(msg, %{state: state} = stage) do
noreply_callback(:handle_cast, [msg, state], stage)
end
@doc false
def handle_info({:DOWN, ref, _, _, reason} = msg, stage) do
%{producers: producers, monitors: monitors, state: state} = stage
case producers do
%{^ref => _} ->
consumer_cancel(ref, :down, reason, stage)
%{} ->
case monitors do
%{^ref => consumer_ref} ->
producer_cancel(consumer_ref, :down, reason, stage)
%{} ->
noreply_callback(:handle_info, [msg, state], stage)
end
end
end
## Producer messages
def handle_info({:"$gen_producer", _, _} = msg, %{type: :consumer} = stage) do
error_msg = 'GenStage consumer ~tp received $gen_producer message: ~tp~n'
:error_logger.error_msg(error_msg, [Utils.self_name(), msg])
{:noreply, stage}
end
def handle_info(
{:"$gen_producer", {consumer_pid, ref} = from, {:subscribe, cancel, opts}},
%{consumers: consumers} = stage
) do
case consumers do
%{^ref => _} ->
error_msg = 'GenStage producer ~tp received duplicated subscription from: ~tp~n'
:error_logger.error_msg(error_msg, [Utils.self_name(), from])
msg = {:"$gen_consumer", {self(), ref}, {:cancel, :duplicated_subscription}}
send_noconnect(consumer_pid, msg)
{:noreply, stage}
%{} ->
case maybe_producer_cancel(cancel, stage) do
{:noreply, stage} ->
mon_ref = Process.monitor(consumer_pid)
stage = put_in(stage.monitors[mon_ref], ref)
stage = put_in(stage.consumers[ref], {consumer_pid, mon_ref})
producer_subscribe(opts, from, stage)
other ->
other
end
end
end
def handle_info(
{:"$gen_producer", {consumer_pid, ref} = from, {:ask, counter}},
%{consumers: consumers} = stage
)
when is_integer(counter) do
case consumers do
%{^ref => _} ->
%{dispatcher_state: dispatcher_state} = stage
dispatcher_callback(:ask, [counter, from, dispatcher_state], stage)
%{} ->
msg = {:"$gen_consumer", {self(), ref}, {:cancel, :unknown_subscription}}
send_noconnect(consumer_pid, msg)
{:noreply, stage}
end
end
def handle_info({:"$gen_producer", {_, ref}, {:cancel, reason}}, stage) do
producer_cancel(ref, :cancel, reason, stage)
end
## Consumer messages
def handle_info({:"$gen_consumer", _, _} = msg, %{type: :producer} = stage) do
error_msg = 'GenStage producer ~tp received $gen_consumer message: ~tp~n'
:error_logger.error_msg(error_msg, [Utils.self_name(), msg])
{:noreply, stage}
end
def handle_info(
{:"$gen_consumer", {producer_pid, ref}, events},
%{type: :producer_consumer, events: {queue, counter}, producers: producers} = stage
)
when is_list(events) do
case producers do
%{^ref => _entry} ->
queue = put_pc_events(events, ref, queue)
take_pc_events(queue, counter, stage)
_ ->
msg = {:"$gen_producer", {self(), ref}, {:cancel, :unknown_subscription}}
send_noconnect(producer_pid, msg)
{:noreply, stage}
end
end
def handle_info(
{:"$gen_consumer", {producer_pid, ref} = from, events},
%{type: :consumer, producers: producers, mod: mod, state: state} = stage
)
when is_list(events) do
case producers do
%{^ref => entry} ->
{batches, stage} = consumer_receive(from, entry, events, stage)
consumer_dispatch(batches, from, mod, state, stage, false)
_ ->
msg = {:"$gen_producer", {self(), ref}, {:cancel, :unknown_subscription}}
send_noconnect(producer_pid, msg)
{:noreply, stage}
end
end
def handle_info({:"$gen_consumer", {_, ref}, {:cancel, reason}}, stage) do
consumer_cancel(ref, :cancel, reason, stage)
end
## Catch-all messages
def handle_info(msg, %{state: state} = stage) do
noreply_callback(:handle_info, [msg, state], stage)
end
@doc false
def terminate(reason, %{mod: mod, state: state}) do
if function_exported?(mod, :terminate, 2) do
mod.terminate(reason, state)
else
:ok
end
end
@doc false
def code_change(old_vsn, %{mod: mod, state: state} = stage, extra) do
if function_exported?(mod, :code_change, 3) do
case mod.code_change(old_vsn, state, extra) do
{:ok, state} -> {:ok, %{stage | state: state}}
other -> other
end
else
{:ok, state}
end
end
@doc false
def format_status(opt, [pdict, %{mod: mod, state: state} = stage]) do
case {function_exported?(mod, :format_status, 2), opt} do
{true, :normal} ->
data = [{~c(State), state}] ++ format_status_for_stage(stage)
format_status(mod, opt, pdict, state, data: data)
{true, :terminate} ->
format_status(mod, opt, pdict, state, state)
{false, :normal} ->
[data: [{~c(State), state}] ++ format_status_for_stage(stage)]
{false, :terminate} ->
state
end
end
defp format_status(mod, opt, pdict, state, default) do
try do
mod.format_status(opt, [pdict, state])
catch
_, _ ->
default
end
end
defp format_status_for_stage(%{
type: :producer,
consumers: consumers,
buffer: buffer,
dispatcher_mod: dispatcher_mod
}) do
consumer_pids = for {_, {pid, _}} <- consumers, do: pid
[
{~c(Stage), :producer},
{~c(Dispatcher), dispatcher_mod},
{~c(Consumers), consumer_pids},
{~c(Buffer size), Buffer.estimate_size(buffer)}
]
end
defp format_status_for_stage(%{
type: :producer_consumer,
producers: producers,
consumers: consumers,
buffer: buffer,
dispatcher_mod: dispatcher_mod
}) do
producer_pids = for {_, {pid, _, _}} <- producers, do: pid
consumer_pids = for {_, {pid, _}} <- consumers, do: pid
[
{~c(Stage), :producer_consumer},
{~c(Dispatcher), dispatcher_mod},
{~c(Producers), producer_pids},
{~c(Consumers), consumer_pids},
{~c(Buffer size), Buffer.estimate_size(buffer)}
]
end
defp format_status_for_stage(%{type: :consumer, producers: producers}) do
producer_pids = for {_, {pid, _, _}} <- producers, do: pid
[{~c(Stage), :consumer}, {~c(Producers), producer_pids}]
end
## Shared helpers
defp noreply_callback(:handle_info, [msg, state], %{mod: mod} = stage) do
if function_exported?(mod, :handle_info, 2) do
handle_noreply_callback(mod.handle_info(msg, state), stage)
else
log = '** Undefined handle_info in ~tp~n** Unhandled message: ~tp~n'
:error_logger.warning_msg(log, [mod, msg])
{:noreply, %{stage | state: state}}
end
end
defp noreply_callback(:handle_cancel, [subscription, from, state], %{mod: mod} = stage) do
if function_exported?(mod, :handle_cancel, 3) do
handle_noreply_callback(mod.handle_cancel(subscription, from, state), stage)
else
{:noreply, %{stage | state: state}}
end
end
defp noreply_callback(callback, args, %{mod: mod} = stage) do
handle_noreply_callback(apply(mod, callback, args), stage)
end
defp handle_noreply_callback(return, stage) do
case return do
{:noreply, events, state} when is_list(events) ->
stage = dispatch_events(events, length(events), stage)
{:noreply, %{stage | state: state}}
{:noreply, events, state, :hibernate} when is_list(events) ->
stage = dispatch_events(events, length(events), stage)
{:noreply, %{stage | state: state}, :hibernate}
{:stop, reason, state} ->
{:stop, reason, %{stage | state: state}}
other ->
{:stop, {:bad_return_value, other}, stage}
end
end
## Producer helpers
defp producer_demand(:forward, %{type: :producer_consumer} = stage) do
# That's the only mode on producer consumers.
{:noreply, stage}
end
defp producer_demand(_mode, %{type: type} = stage) when type != :producer do
error_msg = 'Demand mode can only be set for producers, GenStage ~tp is a ~ts'
:error_logger.error_msg(error_msg, [Utils.self_name(), type])
{:noreply, stage}
end
defp producer_demand(:forward, %{events: events} = stage) do
stage = %{stage | events: :forward}
if is_list(events) do
fold_fun = fn
d, {:noreply, %{state: state} = stage} ->
noreply_callback(:handle_demand, [d, state], stage)
d, {:noreply, %{state: state} = stage, _} ->
noreply_callback(:handle_demand, [d, state], stage)
_, {:stop, _, _} = acc ->
acc
end
:lists.foldl(fold_fun, {:noreply, stage}, :lists.reverse(events))
else
{:noreply, stage}
end
end
defp producer_demand(:accumulate, %{events: events} = stage) do
if is_list(events) do
{:noreply, stage}
else
{:noreply, %{stage | events: []}}
end
end
defp producer_subscribe(opts, from, stage) do
%{mod: mod, state: state, dispatcher_state: dispatcher_state} = stage
case maybe_subscribe(mod, :consumer, opts, from, state) do
{:automatic, state} ->
# Call the dispatcher after since it may generate demand and the
# main module must know the consumer is subscribed.
dispatcher_callback(:subscribe, [opts, from, dispatcher_state], %{stage | state: state})
{:stop, reason, state} ->
{:stop, reason, %{stage | state: state}}
other ->
{:stop, {:bad_return_value, other}, stage}
end
end
defp maybe_subscribe(mod, type, opts, from, state) do
if function_exported?(mod, :handle_subscribe, 4) do
mod.handle_subscribe(type, opts, from, state)
else
{:automatic, state}
end
end
defp maybe_producer_cancel({ref, reason}, stage) do
producer_cancel(ref, :cancel, reason, stage)
end
defp maybe_producer_cancel(nil, stage) do
{:noreply, stage}
end
defp producer_cancel(ref, kind, reason, stage) do
%{consumers: consumers, monitors: monitors, state: state} = stage
case Map.pop(consumers, ref) do
{nil, _consumers} ->
{:noreply, stage}
{{pid, mon_ref}, consumers} ->
Process.demonitor(mon_ref, [:flush])
send_noconnect(pid, {:"$gen_consumer", {self(), ref}, {:cancel, reason}})
stage = %{stage | consumers: consumers, monitors: Map.delete(monitors, mon_ref)}
case noreply_callback(:handle_cancel, [{kind, reason}, {pid, ref}, state], stage) do
{:noreply, %{dispatcher_state: dispatcher_state} = stage} ->
# Call the dispatcher after since it may generate demand and the
# main module must know the consumer is no longer subscribed.
dispatcher_callback(:cancel, [{pid, ref}, dispatcher_state], stage)
{:stop, _, _} = stop ->
stop
end
end
end
defp dispatcher_callback(callback, args, %{dispatcher_mod: dispatcher_mod} = stage) do
{:ok, counter, dispatcher_state} = apply(dispatcher_mod, callback, args)
case stage do
%{type: :producer_consumer, events: {queue, demand}} ->
counter = demand + counter
stage = %{stage | dispatcher_state: dispatcher_state, events: {queue, counter}}
{:ok, _, stage} = take_from_buffer(counter, stage)
%{events: {queue, counter}} = stage
take_pc_events(queue, counter, stage)
%{} ->
case take_from_buffer(counter, %{stage | dispatcher_state: dispatcher_state}) do
{:ok, 0, stage} ->
{:noreply, stage}
{:ok, counter, %{events: :forward, state: state} = stage} ->
noreply_callback(:handle_demand, [counter, state], stage)
{:ok, counter, %{events: events} = stage} when is_list(events) ->
{:noreply, %{stage | events: [counter | events]}}
end
end
end
defp dispatch_events([], _length, stage) do
stage
end
defp dispatch_events(events, _length, %{type: :consumer} = stage) do
error_msg =
'GenStage consumer ~tp cannot dispatch events (an empty list must be returned): ~tp~n'
:error_logger.error_msg(error_msg, [Utils.self_name(), events])
stage
end
defp dispatch_events(events, _length, %{consumers: consumers} = stage)
when map_size(consumers) == 0 do
buffer_events(events, stage)
end
defp dispatch_events(events, length, stage) do
%{dispatcher_mod: dispatcher_mod, dispatcher_state: dispatcher_state} = stage
{:ok, events, dispatcher_state} = dispatcher_mod.dispatch(events, length, dispatcher_state)
stage =
case stage do
%{type: :producer_consumer, events: {queue, demand}} ->
demand = demand - (length - length(events))
%{stage | dispatcher_state: dispatcher_state, events: {queue, max(demand, 0)}}
%{} ->
%{stage | dispatcher_state: dispatcher_state}
end
buffer_events(events, stage)
end
defp take_from_buffer(counter, %{buffer: buffer} = stage) do
case Buffer.take_count_or_until_permanent(buffer, counter) do
:empty ->
{:ok, counter, stage}
{:ok, buffer, new_counter, temps, perms} ->
# Update the buffer because dispatch events may
# trigger more events to be buffered.
stage = dispatch_events(temps, counter - new_counter, %{stage | buffer: buffer})
stage = :lists.foldl(&dispatch_info/2, stage, perms)
take_from_buffer(new_counter, stage)
end
end
defp buffer_events([], stage) do
stage
end
defp buffer_events(events, %{buffer: buffer, buffer_keep: keep} = stage) do
{buffer, excess, perms} = Buffer.store_temporary(buffer, events, keep)
case excess do
0 ->
:ok
excess ->
error_msg = 'GenStage producer ~tp has discarded ~tp events from buffer'
:error_logger.warning_msg(error_msg, [Utils.self_name(), excess])
end
:lists.foldl(&dispatch_info/2, %{stage | buffer: buffer}, perms)
end
## Info helpers
defp producer_info(msg, %{type: :consumer} = stage) do
send(self(), msg)
{:reply, :ok, stage}
end
defp producer_info(msg, %{type: :producer_consumer, events: {queue, demand}} = stage) do
stage =
if :queue.is_empty(queue) do
buffer_or_dispatch_info(msg, stage)
else
%{stage | events: {:queue.in({:info, msg}, queue), demand}}
end
{:reply, :ok, stage}
end
defp producer_info(msg, %{type: :producer} = stage) do
{:reply, :ok, buffer_or_dispatch_info(msg, stage)}
end
defp buffer_or_dispatch_info(msg, %{buffer: buffer} = stage) do
case Buffer.store_permanent_unless_empty(buffer, msg) do
:empty -> dispatch_info(msg, stage)
{:ok, buffer} -> %{stage | buffer: buffer}
end
end
defp dispatch_info(msg, stage) do
%{dispatcher_mod: dispatcher_mod, dispatcher_state: dispatcher_state} = stage
{:ok, dispatcher_state} = dispatcher_mod.info(msg, dispatcher_state)
%{stage | dispatcher_state: dispatcher_state}
end
## Consumer helpers
defp consumer_init_subscribe(producers, stage) do
fold_fun = fn
to, {:ok, stage} ->
case consumer_subscribe(to, stage) do
{:reply, _, stage} -> {:ok, stage}
{:stop, reason, _, _} -> {:stop, reason}
{:stop, reason, _} -> {:stop, reason}
end
_, {:stop, reason} ->
{:stop, reason}
end
:lists.foldl(fold_fun, {:ok, stage}, producers)
end
defp consumer_receive({_, ref} = from, {producer_id, cancel, {demand, min, max}}, events, stage) do
{demand, batches} = Utils.split_batches(events, from, min, max, demand)
stage = put_in(stage.producers[ref], {producer_id, cancel, {demand, min, max}})
{batches, stage}
end
defp consumer_receive(_, {_, _, :manual}, events, stage) do
{[{events, 0}], stage}
end
defp consumer_dispatch([{batch, ask} | batches], from, mod, state, stage, _hibernate?) do
case mod.handle_events(batch, from, state) do
{:noreply, events, state} when is_list(events) ->
stage = dispatch_events(events, length(events), stage)
ask(from, ask, [:noconnect])
consumer_dispatch(batches, from, mod, state, stage, false)
{:noreply, events, state, :hibernate} when is_list(events) ->
stage = dispatch_events(events, length(events), stage)
ask(from, ask, [:noconnect])
consumer_dispatch(batches, from, mod, state, stage, true)
{:stop, reason, state} ->
{:stop, reason, %{stage | state: state}}
other ->
{:stop, {:bad_return_value, other}, %{stage | state: state}}
end
end
defp consumer_dispatch([], _from, _mod, state, stage, false) do
{:noreply, %{stage | state: state}}
end
defp consumer_dispatch([], _from, _mod, state, stage, true) do
{:noreply, %{stage | state: state}, :hibernate}
end
defp consumer_subscribe({to, opts}, stage) when is_list(opts),
do: consumer_subscribe(nil, to, opts, stage)
defp consumer_subscribe(to, stage), do: consumer_subscribe(nil, to, [], stage)
defp consumer_subscribe(_cancel, to, _opts, %{type: :producer} = stage) do
error_msg = 'GenStage producer ~tp cannot be subscribed to another stage: ~tp~n'
:error_logger.error_msg(error_msg, [Utils.self_name(), to])
{:reply, {:error, :not_a_consumer}, stage}
end
defp consumer_subscribe(current, to, opts, stage) do
with {:ok, max, _} <- Utils.validate_integer(opts, :max_demand, 1000, 1, :infinity, false),
{:ok, min, _} <-
Utils.validate_integer(opts, :min_demand, div(max, 2), 0, max - 1, false),
{:ok, cancel, _} <-
Utils.validate_in(opts, :cancel, :permanent, [:temporary, :transient, :permanent]) do
producer_pid = GenServer.whereis(to)
cond do
producer_pid != nil ->
ref = Process.monitor(producer_pid)
msg = {:"$gen_producer", {self(), ref}, {:subscribe, current, opts}}
send_noconnect(producer_pid, msg)
consumer_subscribe(opts, ref, producer_pid, cancel, min, max, stage)
cancel == :permanent or cancel == :transient ->
{:stop, :noproc, {:ok, make_ref()}, stage}
cancel == :temporary ->
{:reply, {:ok, make_ref()}, stage}
end
else
{:error, message} ->
error_msg = 'GenStage consumer ~tp subscribe received invalid option: ~ts~n'
:error_logger.error_msg(error_msg, [Utils.self_name(), message])
{:reply, {:error, {:bad_opts, message}}, stage}
end
end
defp consumer_subscribe(opts, ref, producer_pid, cancel, min, max, stage) do
%{mod: mod, state: state} = stage
to = {producer_pid, ref}
case maybe_subscribe(mod, :producer, opts, to, state) do
{:automatic, state} ->
ask(to, max, [:noconnect])
stage = put_in(stage.producers[ref], {producer_pid, cancel, {max, min, max}})
{:reply, {:ok, ref}, %{stage | state: state}}
{:manual, state} ->
stage = put_in(stage.producers[ref], {producer_pid, cancel, :manual})
{:reply, {:ok, ref}, %{stage | state: state}}
{:stop, reason, state} ->
{:stop, reason, %{stage | state: state}}
other ->
{:stop, {:bad_return_value, other}, stage}
end
end
defp consumer_cancel(ref, kind, reason, %{producers: producers} = stage) do
case Map.pop(producers, ref) do
{nil, _producers} ->
{:noreply, stage}
{{producer_pid, mode, _}, producers} ->
Process.demonitor(ref, [:flush])
stage = %{stage | producers: producers}
schedule_cancel(mode, {kind, reason}, {producer_pid, ref}, stage)
end
end
defp schedule_cancel(
mode,
kind_reason,
pid_ref,
%{type: :producer_consumer, events: {queue, demand}} = stage
) do
if :queue.is_empty(queue) do
invoke_cancel(mode, kind_reason, pid_ref, stage)
else
queue = :queue.in({:cancel, mode, kind_reason, pid_ref}, queue)
{:noreply, %{stage | events: {queue, demand}}}
end
end
defp schedule_cancel(mode, kind_reason, pid_ref, stage) do
invoke_cancel(mode, kind_reason, pid_ref, stage)
end
defp invoke_cancel(mode, {_, reason} = kind_reason, {pid, _} = pid_ref, %{state: state} = stage) do
case noreply_callback(:handle_cancel, [kind_reason, pid_ref, state], stage) do
{:noreply, stage}
when mode == :permanent
when mode == :transient and not Utils.is_transient_shutdown(reason) ->
error_msg =
'GenStage consumer ~tp is stopping after receiving cancel from producer ~tp with reason: ~tp~n'
:error_logger.info_msg(error_msg, [Utils.self_name(), pid, reason])
{:stop, reason, stage}
other ->
other
end
end
## Producer consumer helpers
defp put_pc_events(events, ref, queue) do
:queue.in({events, ref}, queue)
end
defp send_pc_events(events, ref, %{mod: mod, state: state, producers: producers} = stage) do
case producers do
%{^ref => entry} ->
{producer_id, _, _} = entry
from = {producer_id, ref}
{batches, stage} = consumer_receive(from, entry, events, stage)
consumer_dispatch(batches, from, mod, state, stage, false)
%{} ->
# We queued but producer was removed
consumer_dispatch([{events, 0}], {:pid, ref}, mod, state, stage, false)
end
end
defp take_pc_events(queue, counter, stage) when counter > 0 do
case :queue.out(queue) do
{{:value, {:info, msg}}, queue} ->
take_pc_events(queue, counter, buffer_or_dispatch_info(msg, stage))
{{:value, {:cancel, mode, kind_reason, pid_ref}}, queue} ->
case invoke_cancel(mode, kind_reason, pid_ref, stage) do
{:noreply, stage} ->
take_pc_events(queue, counter, stage)
{:noreply, stage, :hibernate} ->
take_pc_events(queue, counter, stage)
{:stop, _, _} = stop ->
stop
end
{{:value, {events, ref}}, queue} ->
case send_pc_events(events, ref, %{stage | events: {queue, counter}}) do
{:noreply, %{events: {queue, counter}} = stage} ->
take_pc_events(queue, counter, stage)
{:noreply, %{events: {queue, counter}} = stage, :hibernate} ->
take_pc_events(queue, counter, stage)
{:stop, _, _} = stop ->
stop
end
{:empty, queue} ->
{:noreply, %{stage | events: {queue, counter}}}
end
end
# It is OK to send more events than the consumer has
# asked (counter < 0) because those will always be buffered.
# Once we have taken from the buffer, the event queue will
# be adjusted again.
defp take_pc_events(queue, counter, stage) do
{:noreply, %{stage | events: {queue, counter}}}
end
end
|
deps/gen_stage/lib/gen_stage.ex
| 0.874553
| 0.789031
|
gen_stage.ex
|
starcoder
|
defmodule AdventOfCode.Day09 do
import AdventOfCode.Utils
@typep heightmap :: [[integer()]]
@typep coordinates :: {integer(), integer()}
@spec part1([binary()]) :: integer()
def part1(args) do
heightmap = parse_args(args)
heightmap
|> all_coordinates()
|> Enum.filter(&low_point?(&1, heightmap))
|> Enum.map(&heightmap_at(&1, heightmap))
|> Enum.map(&(&1 + 1))
|> Enum.sum()
end
@spec part2([binary()]) :: integer()
def part2(args) do
heightmap = parse_args(args)
all_coordinates(heightmap)
|> Enum.filter(&low_point?(&1, heightmap))
|> Enum.map(&basin_size(&1, heightmap))
|> Enum.sort(:desc)
|> Enum.take(3)
|> Enum.product()
end
@spec all_coordinates(heightmap()) :: [coordinates()]
defp all_coordinates(map) do
Enum.flat_map(Enum.with_index(map), fn {line, y} ->
Enum.map(Enum.with_index(line), fn {_, x} ->
{x, y}
end)
end)
end
# Iteratively find all coordinates belonging to a basin from a starting point
@spec basin_size(coordinates(), heightmap()) :: integer()
defp basin_size(coords, heightmap), do: fill_basin([coords], MapSet.new(), heightmap)
@spec fill_basin([coordinates()], MapSet.t(coordinates()), heightmap()) :: integer()
defp fill_basin([], marked, _), do: MapSet.size(marked)
defp fill_basin([coords | queue], marked, heightmap) do
unmarked_adjacent =
adjacent_coordinates(coords)
|> Enum.filter(&(!MapSet.member?(marked, &1)))
|> Enum.filter(&in_basin?(heightmap_at(&1, heightmap)))
fill_basin(unmarked_adjacent ++ queue, MapSet.put(marked, coords), heightmap)
end
@spec low_point?(coordinates(), heightmap()) :: boolean()
defp low_point?(coords, heightmap) do
adjacent_heights(coords, heightmap)
|> Enum.map(&is_lower?(heightmap_at(coords, heightmap), &1))
|> Enum.all?()
end
@spec adjacent_heights(coordinates(), heightmap()) :: [integer()]
defp adjacent_heights(coords, heightmap) do
adjacent_coordinates(coords) |> Enum.map(&heightmap_at(&1, heightmap))
end
@spec adjacent_coordinates(coordinates()) :: [coordinates()]
defp adjacent_coordinates({x, y}), do: [{x, y - 1}, {x - 1, y}, {x + 1, y}, {x, y + 1}]
@spec is_lower?(integer(), integer() | nil) :: boolean()
defp is_lower?(element, adjacent), do: adjacent == nil or element < adjacent
@spec in_basin?(integer() | nil) :: boolean()
defp in_basin?(height), do: height != nil and height < 9
# Retrieve the element at the specified coordinates, or return nil if out of bounds
@spec heightmap_at(coordinates(), heightmap()) :: integer() | nil
defp heightmap_at({x, y}, _) when x == -1 or y == -1, do: nil
defp heightmap_at({x, y}, heightmap), do: heightmap |> Enum.at(y, []) |> Enum.at(x)
# Create a two-dimensional integer heightmap from the input
@spec parse_args([binary()]) :: [[integer()]]
defp parse_args(args), do: Enum.map(args, &parse_line/1)
defp parse_line(line), do: String.graphemes(line) |> Enum.map(&parse_int!/1)
end
|
lib/advent_of_code/day_09.ex
| 0.839339
| 0.571767
|
day_09.ex
|
starcoder
|
defmodule ExAlgo.List.CircularList do
@moduledoc """
Implementation of a circular list.
"""
@type neg_index_error :: {:error, :negative_index}
@type empty_error :: {:error, :empty_list}
@type value_type :: any()
@type t :: %__MODULE__{visited: [value_type()], upcoming: [value_type()]}
defstruct visited: [], upcoming: []
@doc """
Creates an empty circular list.
## Example
iex> CircularList.new
%CircularList{visited: [], upcoming: []}
"""
@spec new :: t()
def new, do: %__MODULE__{visited: [], upcoming: []}
@doc """
Creates a circular list from a list
## Example
iex> CircularList.from 1..3
%CircularList{upcoming: [1, 2, 3]}
"""
@spec from(Enumerable.t()) :: t()
def from(enumerable), do: %__MODULE__{upcoming: Enum.to_list(enumerable)}
@doc """
Inserts a new element on the head of the list.
## Example
iex> list = CircularList.from 1..3
iex> list |> CircularList.insert(10)
%CircularList{upcoming: [10, 1, 2, 3]}
"""
@spec insert(t(), value_type()) :: t()
def insert(%__MODULE__{upcoming: right}, element),
do: %__MODULE__{upcoming: [element | right]}
@doc """
Rewinds the list back to initial.
## Example
iex> CircularList.new == CircularList.new |> CircularList.rewind()
true
iex> list = CircularList.from 1..5
iex> list ==
...> list
...> |> CircularList.next()
...> |> CircularList.next()
...> |> CircularList.next()
...> |> CircularList.rewind()
true
"""
@spec rewind(t()) :: t()
def rewind(%__MODULE__{visited: [], upcoming: _} = list), do: list
def rewind(%__MODULE__{visited: visited, upcoming: upcoming}),
do: %__MODULE__{visited: [], upcoming: Enum.reverse(visited) ++ upcoming}
@doc """
Removes the head.
## Example
iex> list = CircularList.from 1..3
iex> list |> CircularList.remove()
{1, %CircularList{visited: [], upcoming: [2, 3]}}
iex> CircularList.new() |> CircularList.remove()
{:error, :empty_list}
iex> 1..3
...> |> CircularList.from()
...> |> CircularList.next()
...> |> CircularList.next()
...> |> CircularList.next()
...> |> CircularList.remove()
{1, %CircularList{visited: [], upcoming: [2, 3]}}
"""
@spec remove(t()) :: {value_type(), t()} | empty_error()
def remove(%__MODULE__{visited: [], upcoming: []}), do: {:error, :empty_list}
def remove(%__MODULE__{visited: _, upcoming: []} = list), do: list |> rewind() |> remove()
def remove(%__MODULE__{upcoming: [head | rest]}), do: {head, %__MODULE__{upcoming: rest}}
@doc """
Returns the head of the circular list
## Example
iex> CircularList.from(1..10) |> CircularList.head()
1
iex> CircularList.new |> CircularList.head()
{:error, :empty_list}
iex> CircularList.from([1]) |> CircularList.next() |> CircularList.head()
1
"""
@spec head(t()) :: empty_error()
def head(%__MODULE__{visited: [], upcoming: []}), do: {:error, :empty_list}
def head(%__MODULE__{upcoming: [head | _]}), do: head
def head(%__MODULE__{visited: left, upcoming: []}),
do: head(%__MODULE__{visited: [], upcoming: Enum.reverse(left)})
@doc """
Moves the cursor forward.
## Example
iex> CircularList.from(1..3) |> CircularList.next()
%CircularList{visited: [1], upcoming: [2, 3]}
iex> CircularList.from(1..3) |> CircularList.next() |> CircularList.next() |> CircularList.next()
%CircularList{visited: [3, 2, 1], upcoming: []}
iex> CircularList.from(1..2) |> CircularList.next() |> CircularList.next() |> CircularList.next()
%CircularList{visited: [], upcoming: [1, 2]}
iex> CircularList.new |> CircularList.next()
{:error, :empty_list}
"""
@spec next(t()) :: t() | empty_error()
def next(%__MODULE__{visited: [], upcoming: []}), do: {:error, :empty_list}
def next(%__MODULE__{visited: visited, upcoming: [head | next]}),
do: %__MODULE__{visited: [head | visited], upcoming: next}
def next(%__MODULE__{visited: visited, upcoming: []}),
do: %__MODULE__{visited: [], upcoming: Enum.reverse(visited)}
@doc """
Return the element at index. Index is 0 based and must be positive. Errors on empty list.
## Example
iex> alias ExAlgo.List.CircularList
iex> CircularList.from(0..10) |> CircularList.at(3)
3
iex> alias ExAlgo.List.CircularList
iex> CircularList.from(0..10) |> CircularList.at(13)
2
"""
@spec at(t(), value_type()) :: value_type() | empty_error() | neg_index_error()
def at(%__MODULE__{visited: [], upcoming: []}, _), do: {:error, :empty_list}
def at(_, index) when index < 0, do: {:error, :negative_index}
def at(list, 0), do: list |> head()
def at(%__MODULE__{upcoming: []} = list, index), do: list |> rewind() |> at(index)
def at(list, index), do: list |> next() |> at(index - 1)
@doc """
Converts a circular list into a List.
## Example
iex> CircularList.new() |> CircularList.to_list()
[]
iex> CircularList.from(1..4)
...> |> CircularList.next()
...> |> CircularList.next()
...> |> CircularList.to_list()
[1, 2, 3, 4]
"""
@spec to_list(t()) :: [value_type()]
def to_list(%__MODULE__{visited: visited, upcoming: upcoming}) do
Enum.reverse(visited) ++ upcoming
end
end
|
lib/ex_algo/list/circular_list.ex
| 0.862496
| 0.564339
|
circular_list.ex
|
starcoder
|
defmodule Day12 do
def part1(path) do
File.stream!(path) |> simulate_steps(1000)
end
def part2(path) do
File.stream!(path) |> find_cycle()
end
def find_cycle(lines) do
moons = parse(lines)
0..2
|> Enum.map(fn dir ->
target = direction(moons, dir)
find_period(moons, dir, target, 1)
end)
|> lcm()
end
def find_period(moons, dir, target, i) do
moons = step(moons)
case direction(moons, dir) do
^target -> i
_ -> find_period(moons, dir, target, i+1)
end
end
def direction(moons, dir) do
Enum.reduce(moons, %{vel: [], pos: []}, &direction(&1, &2, dir))
end
def direction(%{pos: pos, vel: vel}, acc, dir) do
p = Enum.at(pos, dir)
v = Enum.at(vel, dir)
acc = Map.update!(acc, :vel, fn list -> [v | list] end)
acc = Map.update!(acc, :pos, fn list -> [p | list] end)
acc
end
def simulate_steps(lines, steps) do
moons = parse(lines)
1..steps
|> Enum.reduce(moons, fn _, acc -> step(acc) end)
|> energy()
end
defp step(moons) do
moons
|> Enum.map(&velocity(&1, moons))
|> Enum.map(&position/1)
end
def energy(moons) do
moons
|> Enum.map(fn %{pos: pos, vel: vel} ->
abs_sum(pos) * abs_sum(vel)
end)
|> Enum.sum()
end
def position(%{pos: pos, vel: vel} = moon) do
pos = add_xyz(pos,vel)
%{moon | pos: pos }
end
def velocity(%{pos: pos, vel: vel} = moon, moons) do
vel = moons
|> Enum.map(&Map.fetch!(&1, :pos))
|> Enum.map(&cmp_xyz(&1, pos))
|> Enum.reduce(vel, &add_xyz/2)
%{moon | vel: vel}
end
def parse(lines) do
Enum.map(lines, &parse_line/1)
end
def parse_line(line) do
~r/<x=(-?\d+), y=(-?\d+), z=(-?\d+)>/
|> Regex.run(line)
|> Enum.drop(1)
|> Enum.map(&String.to_integer/1)
|> pack()
end
defp pack(pos), do: %{pos: pos, vel: [0,0,0]}
defp lcm(n), do: Enum.reduce(n, fn x, acc -> div((x*acc), Integer.gcd(x,acc)) end)
defp abs_sum(xyz), do: Enum.map(xyz, &abs/1) |> Enum.sum()
defp cmp_xyz(m1,m2), do: zip_map(m1, m2, &cmp/1)
defp add_xyz(m1, m2), do: zip_map(m1, m2, &add/1)
defp zip_map(x,y, op), do: Enum.zip(x,y) |> Enum.map(op)
defp cmp({x,y}) when x == y, do: 0
defp cmp({x,y}) when x > y, do: 1
defp cmp({x,y}) when x < y, do: -1
defp add({x,y}), do: x+y
end
|
lib/day_12.ex
| 0.59749
| 0.549943
|
day_12.ex
|
starcoder
|
defmodule Indicado.MFI do
@moduledoc """
This is the MFI module used for calculating Money Flow Index
"""
@typedoc """
The argument passed to eval functions should be a list of mfi_data_map type.
"""
@type mfi_data_map :: %{
low: float,
high: float,
close: float,
volume: float
}
@doc """
Calculates MFI for the list. It needs list of mfi_data_map and lenght of list should be at least 1 higher then period.
Returns `{:ok, mfi_list}` or `{:error, reason}`
## Examples
iex> Indicado.MFI.eval([%{low: 1, high: 3, close: 2, volume: 1}, %{low: 2, high: 4, close: 3, volume: 2}, %{low: 1, high: 2, close: 2, volume: 5}, %{low: 3, high: 5, close: 4, volume: 1}], 2)
{:ok, [41.860465116279066, 32.432432432432435]}
iex> Indicado.MFI.eval([%{low: 2, high: 4, close: 4, volume: 1}, %{low: 2, high: 5, close: 3, volume: 5}, %{low: 5, high: 8, close: 5, volume: 5}, %{low: 3, high: 5, close: 5, volume: 3}, %{low: 1, high: 3, close: 2, volume: 10}], 3)
{:ok, [69.76744186046511, 47.61904761904762]}
iex> Indicado.MFI.eval([%{low: 1, high: 3, close: 2, volume: 1}, %{low: 2, high: 4, close: 3, volume: 2}, %{low: 1, high: 2, close: 2, volume: 5}, %{low: 3, high: 5, close: 4, volume: 1}], 5)
{:error, :not_enough_data}
iex> Indicado.MFI.eval([%{low: 1, high: 3, close: 2, volume: 1}, %{low: 2, high: 4, close: 3, volume: 2}, %{low: 1, high: 2, close: 2, volume: 5}, %{low: 3, high: 5, close: 4, volume: 1}], 0)
{:error, :bad_period}
"""
@spec eval(nonempty_list(mfi_data_map), pos_integer) ::
{:ok, nonempty_list(float) | {:error, atom}}
def eval(list, period), do: calc(list, period)
@doc """
Calculates MFI for the list. It needs list of mfi_data_map and lenght of list should be at least 1 higher then period.
Raises `NotEnoughDataError` if the given list is not longh enough for calculating MFI.
Raises `BadPeriodError` if period is an unacceptable number.
## Examples
iex> Indicado.MFI.eval!([%{low: 1, high: 3, close: 2, volume: 1}, %{low: 2, high: 4, close: 3, volume: 2}, %{low: 1, high: 2, close: 2, volume: 5}, %{low: 3, high: 5, close: 4, volume: 1}], 2)
[41.860465116279066, 32.432432432432435]
iex> Indicado.MFI.eval!([%{low: 1, high: 3, close: 2, volume: 1}, %{low: 2, high: 4, close: 3, volume: 2}, %{low: 1, high: 2, close: 2, volume: 5}, %{low: 3, high: 5, close: 4, volume: 1}], 5)
** (NotEnoughDataError) not enough data
iex> Indicado.MFI.eval!([%{low: 1, high: 3, close: 2, volume: 1}, %{low: 2, high: 4, close: 3, volume: 2}, %{low: 1, high: 2, close: 2, volume: 5}, %{low: 3, high: 5, close: 4, volume: 1}], 0)
** (BadPeriodError) bad period
"""
@spec eval!(nonempty_list(mfi_data_map), pos_integer) :: nonempty_list(float) | no_return
def eval!(list, period) do
case calc(list, period) do
{:ok, result} -> result
{:error, :not_enough_data} -> raise NotEnoughDataError
{:error, :bad_period} -> raise BadPeriodError
end
end
defp calc(list, period, results \\ [])
defp calc([], _period, []), do: {:error, :not_enough_data}
defp calc(_list, period, _results) when period < 1, do: {:error, :bad_period}
defp calc([], _period, results), do: {:ok, Enum.reverse(results)}
defp calc([_head | tail] = list, period, results) when length(list) < period + 1 do
calc(tail, period, results)
end
defp calc([_head | tail] = list, period, results) do
money_flows =
list
|> Enum.take(period + 1)
|> Enum.chunk_every(2, 1, :discard)
|> Enum.map(fn [x, y] -> money_flow(x, y) end)
|> Enum.group_by(fn x -> if x > 0, do: :pos, else: :neg end)
|> Map.new(fn
{type, []} -> {type, nil}
{type, values} -> {type, Enum.sum(values)}
end)
|> Map.put_new(:pos, 0.0)
|> Map.put_new(:neg, 0.0)
if money_flows.neg == 0 do
calc(tail, period, [100 | results])
else
mfr = money_flows.pos / abs(money_flows.neg)
mfi = 100 - 100 / (1 + mfr)
calc(tail, period, [mfi | results])
end
end
defp money_flow(x, y) do
case typical_price(y) - typical_price(x) do
n when n > 0 -> typical_price(y) * y.volume
n when n < 0 -> -1 * typical_price(y) * y.volume
_ -> 0
end
end
defp typical_price(row), do: (row.high + row.low + row.close) / 3
end
|
lib/indicado/mfi.ex
| 0.914463
| 0.827619
|
mfi.ex
|
starcoder
|
defmodule HeartCheck.Executor do
@moduledoc """
Handles the execution of the checks in a HeartCheck module.
Spawns several `Task`s for the checks, execute and wait for the result.
Handles timeouts for the checks with the `{:error, "TIMEOUT"}` result.
"""
require Logger
@type result ::
{String.t(),
{term, :ok}
| {term, {:error, term}}
| {term, :error}}
@doc """
Executes the given `HeartCheck` module.
Returns a `Keyword.t` with the results keyed by check name.
"""
@spec execute(HeartCheck) :: Keyword.t()
def execute(heartcheck) do
checks = heartcheck.checks
ref = make_ref()
:timer.send_after(heartcheck.timeout, self(), {ref, :timeout})
checks
|> Enum.map(fn t -> {t, make_task(t, heartcheck, ref)} end)
|> recv(ref)
end
@spec make_task(atom, HeartCheck, reference) :: Task.t()
defp make_task(name, heartcheck, ref) do
Task.async(fn ->
log("(#{inspect(ref)}) Performing #{name}")
{ref, name, :timer.tc(heartcheck, :perform_check, [name])}
end)
end
@spec recv([atom], reference()) :: Keyword.t()
defp recv(checks, ref) do
timeout_by_default = fn {name, _} ->
{name, {0, {:error, "TIMEOUT"}}}
end
recv(checks, Enum.map(checks, timeout_by_default), ref)
end
@spec recv([atom], Keyword.t(), reference()) :: Keyword.t()
defp recv([], results, _ref) do
results
end
defp recv(checks, results, ref) do
receive do
{_, {^ref, name, {time, result}}} when is_reference(ref) ->
log_result(name, ref, result, time)
new_result = Keyword.put(results, name, {time, result})
recv(Keyword.delete(checks, name), new_result, ref)
{^ref, :timeout} ->
log("#{inspect(ref)} Execution timed out")
results
end
end
@spec log_result(atom, reference, :ok | :error | {:error, String.t()}, integer) ::
:ok | {:error, term}
defp log_result(name, ref, :ok, time) do
log("#{inspect(ref)} #{name}: OK - Time: #{time}")
end
defp log_result(name, ref, :error, time) do
log("#{inspect(ref)} #{name}: ERROR: unknown Time: #{time}")
end
defp log_result(name, ref, {:error, reason}, time) do
log("#{inspect(ref)} #{name}: ERROR: #{inspect(reason)} Time: #{time}")
end
@spec log(String.t()) :: :ok | {:error, term}
defp log(message) do
Logger.info("[HeartCheck] #{message}")
end
end
|
lib/heartcheck/executor.ex
| 0.839273
| 0.640341
|
executor.ex
|
starcoder
|
defmodule Mix.Tasks.Ecto.Gen.Erd do
@moduledoc """
A mix task to generate an ERD (Entity Relationship Diagram) in a dot format
## Examples
$ mix ecto.gen.erd
$ mix ecto.gen.erd --output-path=ecto_erd.dot
$ mix ecto.gen.erd && dot -Tpng ecto_erd.dot -o erd.png && xdg-open erd.png
See output and configuration examples in EXAMPLES group of PAGES section.
## Command line options
* `--output-path` - the path to the output file, defaults to `ecto_erd.dot`.
* `--config-path` - the path to the config file, defaults to `.ecto_erd.exs`.
## The configuration file
When running a `mix ecto.gen.erd` task, it tries to read a configuration file from the `.ecto_erd.exs` file in a current
working directory. Configuration file must return a keyword list.
### Options
* `:fontname` - font name, defaults to `Roboto Mono`. Must be monospaced font if more than 1 column is displayed.
* `:columns` - list of columns which will be displayed for each node (schema/source). Set to `[]` to hide fields completelly.
Available columns: `:name` and `:type`. Defaults to `[:name, :type]`.
* `:map_node` - function which allows to remove the node from the diagram or to move the node to the cluster. Defaults to `Function.identity/1`,
which means that all nodes should be displayed and all of them are outside any cluster. Use `Ecto.ERD.Node.set_cluster/2` in this function to set a cluster.
In order to remove the node, the function must return `nil`.
* `:otp_app` - an application which will be scanned alongside with dependent applications in order to get a list of Ecto schemas.
Defaults to `Mix.Project.config()[:app]`. You need to configure this option only if you run a task from umbrella root.
Default values can be represented as follows:
# .ecto_erd.exs
[
fontname: "Roboto Mono",
columns: [:name, :type],
map_node: &Function.identity/1,
otp_app: Mix.Project.config()[:app]
]
"""
use Mix.Task
@requirements ["app.config"]
@impl true
def run(args) do
{cli_opts, _} =
OptionParser.parse!(args, strict: [output_path: :string, config_path: :string])
config_path = Keyword.get(cli_opts, :config_path, ".ecto_erd.exs")
file_opts =
if File.exists?(config_path) do
{file_opts, _} = Code.eval_file(config_path)
file_opts
else
[]
end
otp_app =
cond do
Keyword.has_key?(file_opts, :otp_app) -> file_opts[:otp_app]
not is_nil(Mix.Project.config()[:app]) -> Mix.Project.config()[:app]
true -> raise "Unable to detect `:otp_app`, please specify it explicitly"
end
output_path = cli_opts[:output_path] || file_opts[:output_path] || "ecto_erd.dot"
fontname = file_opts[:fontname] || "Roboto Mono"
columns = file_opts[:columns] || [:name, :type]
map_node_callback = file_opts[:map_node] || (&Function.identity/1)
File.write!(
output_path,
otp_app
|> Ecto.ERD.SchemaModules.scan()
|> Ecto.ERD.Document.new()
|> Ecto.ERD.Document.map_nodes(map_node_callback)
|> Ecto.ERD.Dot.render(fontname: fontname, columns: columns)
)
end
end
|
lib/mix/tasks/ecto.gen.erd.ex
| 0.811303
| 0.527256
|
ecto.gen.erd.ex
|
starcoder
|
defmodule Sentry.Logger do
require Logger
@moduledoc """
This is based on the Erlang [error_logger](http://erlang.org/doc/man/error_logger.html).
To set this up, add `:ok = :error_logger.add_report_handler(Sentry.Logger)` to your application's start function. Example:
```elixir
def start(_type, _opts) do
children = [
supervisor(Task.Supervisor, [[name: Sentry.TaskSupervisor]]),
:hackney_pool.child_spec(Sentry.Client.hackney_pool_name(), [timeout: Config.hackney_timeout(), max_connections: Config.max_hackney_connections()])
]
opts = [strategy: :one_for_one, name: Sentry.Supervisor]
:ok = :error_logger.add_report_handler(Sentry.Logger)
Supervisor.start_link(children, opts)
end
```
"""
use GenEvent
def init(_mod, []), do: {:ok, []}
def handle_call({:configure, new_keys}, _state), do: {:ok, :ok, new_keys}
def handle_event({_level, gl, _event}, state) when node(gl) != node() do
{:ok, state}
end
def handle_event({:error_report, _gl, {_pid, _type, [message | _]}}, state) when is_list(message) do
try do
{kind, exception, stacktrace, module} = get_exception_and_stacktrace(message[:error_info])
|> get_initial_call_and_module(message)
opts = (get_in(message, ~w[dictionary sentry_context]a) || %{})
|> Map.take(Sentry.Context.context_keys)
|> Map.to_list()
|> Keyword.put(:event_source, :logger)
|> Keyword.put(:stacktrace, stacktrace)
|> Keyword.put(:error_type, kind)
|> Keyword.put(:module, module)
Sentry.capture_exception(exception, opts)
rescue ex ->
Logger.warn(fn -> "Unable to notify Sentry due to #{inspect(ex)}! #{inspect(message)}" end)
end
{:ok, state}
end
def handle_event(_, state) do
{:ok, state}
end
defp get_exception_and_stacktrace({kind, {exception, sub_stack}, _stack}) when is_list(sub_stack) do
{kind, exception, sub_stack}
end
defp get_exception_and_stacktrace({kind, exception, stacktrace}) do
{kind, exception, stacktrace}
end
# GenServer exits will usually only report a stacktrace containing core
# GenServer functions, which causes Sentry to group unrelated exits
# together. This gets the `:initial_call` to help disambiguate, as it contains
# the MFA for how the GenServer was started.
defp get_initial_call_and_module({kind, exception, stacktrace}, error_info) do
case Keyword.get(error_info, :initial_call) do
{module, function, arg} ->
{kind, exception, stacktrace ++ [{module, function, arg, []}], module}
_ ->
{kind, exception, stacktrace, nil}
end
end
end
|
lib/sentry/logger.ex
| 0.70202
| 0.633481
|
logger.ex
|
starcoder
|
defmodule ShopifyAPI.Plugs.AdminAuthenticator do
@moduledoc """
The ShopifyAPI.Plugs.AdminAuthenticator plug allows for easy admin authentication. The plug when included
in your route will verify Shopify signatures, that are added to the iframe call on admin page load, and
set a session cookie for the duration of the session.
The plug will assign the Shop, App and AuthToken to the Conn for easy access in your admin controller.
Make sure to include the App name in the path, in our example it is included directly in the path `"/shop-admin/:app"`.
## Example Usage
```elixir
# Router
pipeline :shop_admin do
plug ShopifyAPI.Plugs.AdminAuthenticator
end
scope "/shop-admin/:app", YourAppWeb do
pipe_through :browser
pipe_through :shop_admin
get "/", SomeAdminController, :index
end
```
"""
import Plug.Conn
import ShopifyAPI.ConnHelpers
require Logger
@session_key :shopify_api_admin_authenticated
def init(opts), do: opts
def call(conn, _options) do
if get_session(conn, @session_key) do
# rehydrate the conn.assigns for the app, shop and auth token.
conn
|> assign_app(get_session(conn, :app_name))
|> assign_shop(get_session(conn, :shop_name))
|> assign_auth_token()
else
do_authentication(conn)
end
end
defp do_authentication(conn) do
with {:ok, app} <- fetch_shopify_app(conn),
true <- verify_params_with_hmac(app, conn.query_params) do
# store the App and Shop name in the session for use on other page views
conn
|> assign_app(app)
|> assign_shop()
|> assign_auth_token()
|> put_session(:app_name, app_name(conn))
|> put_session(:shop_domain, shop_domain(conn))
|> put_session(@session_key, true)
else
false ->
Logger.info("#{__MODULE__} failed hmac validation")
send_unauthorized_response(conn)
:error ->
send_unauthorized_response(conn)
end
end
defp send_unauthorized_response(conn) do
conn
|> delete_session(@session_key)
|> resp(401, "Not Authorized.")
|> halt()
end
end
|
lib/shopify_api/plugs/admin_authenticator.ex
| 0.790652
| 0.47859
|
admin_authenticator.ex
|
starcoder
|
defmodule Process do
# This avoids crashing the compiler at build time
@compile {:autoload, false}
@moduledoc """
Conveniences for working with processes and the process dictionary.
Besides the functions available in this module, the `Kernel` module
exposes and auto-imports some basic functionality related to processes
available through the following functions:
* `Kernel.spawn/1` and `Kernel.spawn/3`
* `Kernel.spawn_link/1` and `Kernel.spawn_link/3`
* `Kernel.spawn_monitor/1` and `Kernel.spawn_monitor/3`
* `Kernel.self/0`
* `Kernel.send/2`
While this module provides low-level conveniences to work with processes,
developers typically use abstractions such as `Agent`, `GenServer`,
`Registry`, `Supervisor` and `Task` for building their systems and
resort to this module for gathering information, trapping exits, links
and monitoring.
"""
@typedoc """
A process destination.
A remote or local PID, a local port, a locally registered name, or a tuple in
the form of `{registered_name, node}` for a registered name at another node.
"""
@type dest :: pid | port | (registered_name :: atom) | {registered_name :: atom, node}
@doc """
Tells whether the given process is alive on the local node.
If the process identified by `pid` is alive (that is, it's not exiting and has
not exited yet) than this function returns `true`. Otherwise, it returns
`false`.
`pid` must refer to a process running on the local node or `ArgumentError` is raised.
Inlined by the compiler.
"""
@spec alive?(pid) :: boolean
defdelegate alive?(pid), to: :erlang, as: :is_process_alive
@doc """
Sleeps the current process for the given `timeout`.
`timeout` is either the number of milliseconds to sleep as an
integer or the atom `:infinity`. When `:infinity` is given,
the current process will sleep forever, and not
consume or reply to messages.
**Use this function with extreme care**. For almost all situations
where you would use `sleep/1` in Elixir, there is likely a
more correct, faster and precise way of achieving the same with
message passing.
For example, if you are waiting for a process to perform some
action, it is better to communicate the progress of such action
with messages.
In other words, **do not**:
Task.start_link(fn ->
do_something()
...
end)
# Wait until work is done
Process.sleep(2000)
But **do**:
parent = self()
Task.start_link(fn ->
do_something()
send(parent, :work_is_done)
...
end)
receive do
:work_is_done -> :ok
after
# Optional timeout
30_000 -> :timeout
end
For cases like the one above, `Task.async/1` and `Task.await/2` are
preferred.
Similarly, if you are waiting for a process to terminate,
monitor that process instead of sleeping. **Do not**:
Task.start_link(fn ->
...
end)
# Wait until task terminates
Process.sleep(2000)
Instead **do**:
{:ok, pid} =
Task.start_link(fn ->
...
end)
ref = Process.monitor(pid)
receive do
{:DOWN, ^ref, _, _, _} -> :task_is_down
after
# Optional timeout
30_000 -> :timeout
end
"""
@spec sleep(timeout) :: :ok
def sleep(timeout)
when is_integer(timeout) and timeout >= 0
when timeout == :infinity do
receive after: (timeout -> :ok)
end
@type spawn_opt ::
:link
| :monitor
| {:priority, :low | :normal | :high}
| {:fullsweep_after, non_neg_integer}
| {:min_heap_size, non_neg_integer}
| {:min_bin_vheap_size, non_neg_integer}
@type spawn_opts :: [spawn_opt]
@doc """
Spawns the given function according to the given options.
The result depends on the given options. In particular,
if `:monitor` is given as an option, it will return a tuple
containing the PID and the monitoring reference, otherwise
just the spawned process PID.
More options are available; for the comprehensive list of available options
check `:erlang.spawn_opt/4`.
Inlined by the compiler.
## Examples
Process.spawn(fn -> 1 + 2 end, [:monitor])
#=> {#PID<0.93.0>, #Reference<0.18808174.1939079169.202418>}
Process.spawn(fn -> 1 + 2 end, [:link])
#=> #PID<0.95.0>
"""
@spec spawn((() -> any), spawn_opts) :: pid | {pid, reference}
defdelegate spawn(fun, opts), to: :erlang, as: :spawn_opt
@doc """
Spawns the given function `fun` from module `mod`, passing the given `args`
according to the given options.
The result depends on the given options. In particular,
if `:monitor` is given as an option, it will return a tuple
containing the PID and the monitoring reference, otherwise
just the spawned process PID.
It also accepts extra options, for the list of available options
check `:erlang.spawn_opt/4`.
Inlined by the compiler.
"""
@spec spawn(module, atom, list, spawn_opts) :: pid | {pid, reference}
defdelegate spawn(mod, fun, args, opts), to: :erlang, as: :spawn_opt
@doc """
Returns a list of PIDs corresponding to all the
processes currently existing on the local node.
Note that if a process is exiting, it is considered to exist but not be
alive. This means that for such process, `alive?/1` will return `false` but
its PID will be part of the list of PIDs returned by this function.
See `:erlang.processes/0` for more information.
Inlined by the compiler.
## Examples
Process.list()
#=> [#PID<0.0.0>, #PID<0.1.0>, #PID<0.2.0>, #PID<0.3.0>, ...]
"""
@spec list() :: [pid]
defdelegate list(), to: :erlang, as: :processes
def register(pid_or_port, name) when is_atom(name) do
:erlang.register(name, pid_or_port)
end
@doc """
Returns the PID or port identifier registered under `name` or `nil` if the
name is not registered.
See `:erlang.whereis/1` for more information.
## Examples
Process.register(self(), :test)
Process.whereis(:test)
#=> #PID<0.84.0>
Process.whereis(:wrong_name)
#=> nil
"""
@spec whereis(atom) :: pid | port | nil
def whereis(name) do
nillify(:erlang.whereis(name))
end
@typep heap_size ::
non_neg_integer
| %{size: non_neg_integer, kill: boolean, error_logger: boolean}
@typep priority_level :: :low | :normal | :high | :max
@doc """
Sets the given `flag` to `value` for the calling process.
Returns the old value of `flag`.
See `:erlang.process_flag/2` for more information.
Inlined by the compiler.
"""
@spec flag(:error_handler, module) :: module
@spec flag(:max_heap_size, heap_size) :: heap_size
@spec flag(:message_queue_data, :erlang.message_queue_data()) :: :erlang.message_queue_data()
@spec flag(:min_bin_vheap_size, non_neg_integer) :: non_neg_integer
@spec flag(:min_heap_size, non_neg_integer) :: non_neg_integer
@spec flag(:priority, priority_level) :: priority_level
@spec flag(:save_calls, 0..10000) :: 0..10000
@spec flag(:sensitive, boolean) :: boolean
@spec flag(:trap_exit, boolean) :: boolean
defdelegate flag(flag, value), to: :erlang, as: :process_flag
@doc """
Sets the given `flag` to `value` for the given process `pid`.
Returns the old value of `flag`.
It raises `ArgumentError` if `pid` is not a local process.
The allowed values for `flag` are only a subset of those allowed in `flag/2`,
namely `:save_calls`.
See `:erlang.process_flag/3` for more information.
Inlined by the compiler.
"""
@spec flag(pid, :save_calls, 0..10000) :: 0..10000
defdelegate flag(pid, flag, value), to: :erlang, as: :process_flag
@doc """
Returns information about the process identified by `pid`, or returns `nil` if the process
is not alive.
Use this only for debugging information.
See `:erlang.process_info/1` for more information.
"""
@spec info(pid) :: keyword | nil
def info(pid) do
nillify(:erlang.process_info(pid))
end
@doc """
Returns information about the process identified by `pid`,
or returns `nil` if the process is not alive.
See `:erlang.process_info/2` for more information.
"""
@spec info(pid, atom | [atom]) :: {atom, term} | [{atom, term}] | nil
def info(pid, spec)
def info(pid, :registered_name) do
case :erlang.process_info(pid, :registered_name) do
:undefined -> nil
[] -> {:registered_name, []}
other -> other
end
end
def info(pid, spec) when is_atom(spec) or is_list(spec) do
nillify(:erlang.process_info(pid, spec))
end
@compile {:inline, nillify: 1}
defp nillify(:undefined), do: nil
defp nillify(other), do: other
end
|
libs/exavmlib/lib/Process.ex
| 0.858852
| 0.65397
|
Process.ex
|
starcoder
|
defmodule Lab42.Message do
@moduledoc """
A container for error messages.
Defining some severities.
Create results depending on error messages.
Convenience functions for adding, filtering and sorting messages.
"""
defstruct location: "lnb or similar", message: "text", severity: :error
@type severity_t :: :debug | :info | :warning | :error | :critical | :fatal
@type location_t :: any()
@type t :: %__MODULE__{location: location_t(), message: String.t, severity: severity_t()}
@type ts :: list(t)
@type message_list_t :: list(t()|message_t())
@type message_t :: {severity_t(), String.t, location_t()}
@type message_ts :: list(message_t())
@type result_t :: {:ok|:error, any(), list(message_t)}
severities = ~w(debug info warning error critical fatal)a
@severity_values severities |> Enum.zip(Stream.iterate(0, &(&1+1))) |> Enum.into(%{})
for {severity, index} <- severities |> Enum.zip(Stream.iterate(1, &(&1+1))) do
# Make add_message
@doc """
Create a message with severity :#{severity} and add in front of other messages
iex(#{2*index})> add_#{severity}([], "Just a #{severity} message", {1, 3})
[%Lab42.Message{message: "Just a #{severity} message", severity: :#{severity}, location: {1, 3}}]
"""
@spec unquote( :"add_#{severity}" )(ts(), String.t, any()) :: ts()
def unquote(:"add_#{severity}")(messages, message, location) do
[unquote(:"make_#{severity}")(message, location)|messages]
end
end
for {severity, index} <- severities |> Enum.zip(Stream.iterate(1, &(&1+1))) do
# Make make_message
@doc """
Create a message with severity :#{severity}
iex(#{2*index + 1})> make_#{severity}("Just a #{severity} message", {1, 3})
%Lab42.Message{message: "Just a #{severity} message", severity: :#{severity}, location: {1, 3}}
"""
@spec unquote( :"make_#{severity}" )(String.t, any()) :: t()
def unquote(:"make_#{severity}")(message, location) do
struct(__MODULE__, severity: unquote(severity), location: location, message: message)
end
end
@doc """
Extract a value from an ok result
iex(13)> extract!(result([], 42))
42
However, extracting from an error result is not possible
iex(14)> extract!({:error, 42, []})
** (FunctionClauseError) no function clause matching in Lab42.Message.extract!/1
"""
@spec extract!(result_t()) :: any()
def extract!(result)
def extract!({:ok, value, _anything}), do: value
@doc """
Returns the maximum priority of messages
A list of messages can be passed in
iex(15)> messages =
...(15)> []
...(15)> |> add_error("error1", 1)
...(15)> |> add_info("info2", 2)
...(15)> |> add_warning("warning3", 3)
...(15)> max_severity(messages)
:error
However a list of message tuples is also allowed
iex(16)> messages =
...(16)> []
...(16)> |> add_error("error1", 1)
...(16)> |> add_fatal("fatal2", 2)
...(16)> |> add_warning("warning3", 3)
...(16)> |> messages()
...(16)> max_severity(messages)
:fatal
In accordance of the robustness principle the last can even be mixed
iex(17)> messages =
...(17)> []
...(17)> |> add_error("what an error", 42)
...(17)> |> add_info("what an info", 42)
...(17)> max_severity([{:critical, "", nil}|messages])
:critical
And last, but not least it might be convenient to get the severity_value instead of
the symbolic severity
iex(18)> messages =
...(18)> []
...(18)> |> add_error("what an error", 42)
...(18)> |> add_info("what an info", 42)
...(18)> max_severity([{:critical, "", nil}|messages], value: true)
4
"""
@spec max_severity( message_list_t(), Keyword.t ) :: severity_t()
def max_severity(message_list, opts \\ []), do: _max_severity(message_list, :debug, Keyword.get(opts, :value))
@doc """
Extract messages from a list of messages into a library agnositic form as triples.
As all the `add_*` functions create a list in reverse order, this function also
rereverses the message tuples.
iex(19)> messages =
...(19)> []
...(19)> |> add_error("error1", 1)
...(19)> |> add_info("info2", 2)
...(19)> |> add_warning("warning3", 3)
...(19)> messages(messages)
[ {:error, "error1", 1}, {:warning, "warning3", 3} ]
As you can see only messages with severity of warning and up are returned.
One can of course get messages with less severity too:
iex(20)> messages =
...(20)> []
...(20)> |> add_error("error1", 1)
...(20)> |> add_info("info2", 2)
...(20)> |> add_debug("debug3", 3)
...(20)> messages(messages, severity: :info)
[ {:error, "error1", 1}, {:info, "info2", 2} ]
And, eventually, for your convenience, instead of `severity: :debug` a shorter and more expressive `:all` can be passed in
iex(21)> messages =
...(21)> []
...(21)> |> add_error("error1", 1)
...(21)> |> add_info("info2", 2)
...(21)> |> add_debug("debug3", 3)
...(21)> messages(messages, :all)
[ {:error, "error1", 1}, {:info, "info2", 2}, {:debug, "debug3", 3} ]
"""
@spec messages(ts(), Keyword.t|:all) :: message_ts()
def messages(messages, options \\ [])
def messages(messages, :all) do
messages(messages, severity: :debug)
end
def messages(messages, options) do
min_severity =
options |> Keyword.get(:severity, :warning) |> severity_value()
messages
|> Enum.filter(&(severity_value(&1) >= min_severity))
|> Enum.map(&_format_message/1)
|> Enum.reverse
end
@doc """
Wrap a value and error messages into a result tuple, messages themselves
are converted to message tuples as with `messages`. Also warnings still
deliver an `:ok` reesult.œ
iex(22)> messages = []
...(22)> |> add_debug("hello", 1)
...(22)> |> add_info("hello again", 2)
...(22)> |> add_warning("world", 3)
...(22)> result(messages, "result")
{:ok, "result", [{:warning, "world", 3}]}
However the presence of errors or worse returns an `:error` result.
N.B. that the input can be a mixture of `Lab42.Message` structs and
agnostic tuples.
iex(23)> messages = [{:fatal, "that was not good", 0}]
...(23)> |> add_debug("hello", 1)
...(23)> result(messages, "result")
{:error, "result", [{:fatal, "that was not good", 0}]}
As with `messages` one can control what level of errors shall be included, here
is an example where warnings are surpressed
iex(24)> messages = []
...(24)> |> add_error("hello", 1)
...(24)> |> add_info("hello again", 2)
...(24)> |> add_warning("world", 3)
...(24)> result(messages, 42, severity: :error)
{:error, 42, [{:error, "hello", 1}]}
"""
@spec result( ts(), any(), Keyword.t ) :: result_t()
def result(messages, value, options \\ []) do
status = _status(messages)
{status, value, messages(messages, severity: Keyword.get(options, :severity, :warning))}
end
@doc """
Assigns to each severity a numerical value, where a higher value indicates
a higher severity.
iex(24)> severity_value(:debug)
0
The function extracts the severity from a message if necessary
iex(25)> severity_value(%Lab42.Message{severity: :error})
3
"""
@spec severity_value( t() | severity_t() | message_t()) :: number()
def severity_value(message_or_severity)
def severity_value(%__MODULE__{severity: severity}), do: severity_value(severity)
def severity_value({severity, _, _}), do: severity_value(severity)
def severity_value(severity) do
Map.get(@severity_values, severity, 999_999)
end
@spec _format_message( t() | message_t() ) :: message_t()
defp _format_message({_, _, _}=message), do: message
defp _format_message(%{severity: severity, message: message, location: location}) do
{severity, message, location}
end
@spec _max( severity_t(), severity_t() ) :: severity_t()
defp _max(lhs_severity, rhs_severity)
defp _max(lhs, rhs) do
if severity_value(lhs) > severity_value(rhs),
do: lhs,
else: rhs
end
@spec _max_severity( message_list_t(), severity_t(), any() ) :: severity_t() | number()
defp _max_severity(message_list, current_max, value?)
defp _max_severity([], current_max, value?) do
if value?,
do: severity_value(current_max),
else: current_max
end
defp _max_severity([{severity, _, _}|rest], current_max, value?), do:
_max_severity(rest, _max(severity, current_max), value?)
defp _max_severity([%{severity: severity}|rest], current_max, value?), do:
_max_severity(rest, _max(severity, current_max), value?)
@spec _status( message_list_t() ):: :ok|:error
defp _status(messages) do
severity_max = _max_severity(messages, :debug, true)
if severity_max < severity_value(:error),
do: :ok,
else: :error
end
end
|
lib/lab42/message.ex
| 0.692226
| 0.495422
|
message.ex
|
starcoder
|
defmodule Lab5.CustomTCPProtocol.Server do
@moduledoc """
Sever side of the CustomTCPProtocol library
:func: start
"""
alias Lab5.CustomTCPProtocol.Logger, as: Logger
require Logger
@regex ~r/([-+]?[0-9]*\.?[0-9]+[\/\+\-\*])+([-+]?[0-9]*\.?[0-9]+)/
@doc """
The options below mean:
1. `:binary` - receives data as binaries (instead of lists)
2. `packet: :line` - receives data line by line
3. `active: false` - blocks on `:gen_tcp.recv/2` until data is available
4. `reuseaddr: true` - allows us to reuse the address if the listener crashes
"""
def start(port, verbose \\ false) do
{:ok, socket} = :gen_tcp.listen(port,
[:binary, packet: :line, active: false, reuseaddr: true])
if verbose, do: Logger.log "Accepting connections on port #{port}"
loop_acceptor(socket)
end
defp loop_acceptor(socket) do
{:ok, client} = :gen_tcp.accept(socket)
{:ok, pid} = Task.Supervisor.start_child(Lab5.CustomTCPProtocol.Server.TaskSupervisor, fn -> serve(client) end)
:ok = :gen_tcp.controlling_process(client, pid)
loop_acceptor(socket)
end
defp serve(socket) do
socket
|> get_request()
|> check_closed()
|> drop_status()
|> respond(socket)
serve(socket)
end
defp drop_status({_status, msg}) do
msg
end
defp check_closed(line) do
{status, _msg} = line
case status do
:error -> exit 0
:ok -> line
end
end
defp get_request(socket) do
{:ok, data} = :gen_tcp.recv(socket, 0)
{status, log_msg} = arg_handler(data, socket)
Logger.log log_msg
{status, log_msg}
end
defp respond(data, socket) do
:gen_tcp.send(socket, data)
end
defp exit_request_handler(socket) do
resp = :gen_tcp.close(socket)
case resp do
:ok -> {:ok, "Connection closed"}
_ -> {:error, "Some nasty shit happened"}
end
end
defp help_request_handler do
{:ok, "How can I help you?"}
end
defp extract_expr(data) do
[head | _] = Regex.run @regex, data
head
end
defp exec_request_handler(expr) do
{result, _ctx} = Code.eval_string expr
result_str = cond do
is_float result -> Float.to_string result
is_integer result -> Integer.to_string result
end
{:ok, result_str}
end
defp complex_condition(data) do
data =~ @regex and String.starts_with? data, "/exec"
end
defp arg_handler(data, socket) do
cond do
data == "/exit\r\n" -> exit_request_handler(socket)
data == "/help\r\n" -> help_request_handler()
complex_condition data -> data |> extract_expr |> exec_request_handler
true -> {:ok, "Received #{data}"}
end
end
end
|
lab5/lib/custom_tcp_protocol/server/server.ex
| 0.760161
| 0.434521
|
server.ex
|
starcoder
|
defmodule Chatbase do
@moduledoc """
Provides helper methods to log data to Chatbase Bot Analytics API
"""
@base_url "https://chatbase.com/api/"
@api_key Config.get(:chatbase, :api_key, System.get_env("CHATBASE_API_KEY"))
# Helper method to get time in milli seconds
defp milli_seconds do
:os.system_time(:milli_seconds)
end
# Encodes the map into JSON data
defp encode_body(data) do
Poison.encode!(data)
end
# Method to send single encoded message to Chatbase
defp send(encoded_data) do
request_post("message", encoded_data)
end
# Method to send multiple encoded messages to Chatbase
defp send_all(encoded_data) do
request_post("messages", encoded_data)
end
# This method is responsible for sending data to the specified
# endpoint. It then passes the result to the decoder to retrieve
# body in the form of map.
defp request_post(endpoint, data) do
Tesla.post(@base_url <> endpoint, data)
|> decode_body()
end
# Just returns the body for now
defp decode_body(%{body: body}) do
Poison.decode!(body)
end
@doc """
Log data sent by User to the Bot
## Parameters
- user_id: String used as user identifier
- platform: String used to denote platform, like, facebook, slack, alexa
- message: String sent by the User to the Bot
- intent: String classifying intent of the message
- not_handled: Boolean type, if request handled by agent or not
- feedback: Boolean type, if feedback to agent or not
## Examples
```
cb = Chatbase.user_message("123", "alexa", "some message", "some-intent")
```
"""
def user_message(user_id, platform, message \\ "", intent \\ "", not_handled \\ false, feedback \\ false) do
data = %{
"api_key" => @api_key,
"type" => "user",
"user_id" => user_id,
"time_stamp" => milli_seconds(),
"platform" => platform,
"message" => message,
"intent" => intent,
"not_handled" => not_handled,
"feedback" => feedback
}
send(encode_body(data))
end
@doc """
Log data sent by Bot to the User
## Parameters
- user_id: String used as user identifier
- platform: String used to denote platform, like, facebook, slack, alexa
- message: String sent by the Bot to the User
- intent: String classifying intent of the message
- not_handled: Boolean type, if request handled by agent or not
## Examples
```
cb = Chatbase.agent_message("123", "alexa", "some message", "some-intent")
```
"""
def agent_message(user_id, platform, message \\ "", intent \\ "", not_handled \\ false) do
data = %{
"api_key" => @api_key,
"type" => "agent",
"user_id" => user_id,
"time_stamp" => milli_seconds(),
"platform" => platform,
"message" => message,
"intent" => intent,
"not_handled" => not_handled,
}
send(encode_body(data))
end
@doc """
Log multiple messages at once, can be used in queue
## Parameters
- list_of_maps: A list containing maps
## Examples
```
user_data = %{
"type" => "user",
"user_id" => "123",
"platform" => "alexa",
"message" => "user message",
"intent" => "some-intent"
}
agent_data = %{
"type" => "agent",
"user_id" => "123",
"platform" => "alexa",
"message" => "agent message",
"intent" => "some-intent"
}
list_of_maps = [user_data, agent_data]
cb = Chatbase.multiple_messages(list_of_maps)
```
"""
def multiple_messages(list_of_maps) do
private_data = %{
"api_key" => @api_key,
"time_stamp" => milli_seconds()
}
data = Enum.map(list_of_maps, fn (x) -> Map.merge(x, private_data) end)
send_all(encode_body(%{"messages" => data}))
end
end
|
lib/chatbase.ex
| 0.848628
| 0.445891
|
chatbase.ex
|
starcoder
|
defmodule StateServer.StateGraph do
@moduledoc """
tools for dealing with stategraphs.
State graphs take the form of a keyword list of keyword lists, wherein the
outer list is a comprehensive list of the states, and the inner lists are
keywords list of all the transitions, with the values of the keyword list
as the destination states.
"""
@type t :: keyword(keyword(atom))
@doc """
checks the validity of the state graph.
Should only be called when we build the state graph.
A state graph is valid if and only if all of the following are true:
0. the graph is a keyword of keywords.
1. there is at least one state.
2. there are no duplicate state definitions.
3. there are no duplicate transition definitions emanating from any given state.
4. all of the destinations of the transitions are states.
"""
@spec valid?(t) :: boolean
def valid?([]), do: false
def valid?(stategraph) when is_list(stategraph) do
# check to make sure everything is a keyword of keywords.
Enum.each(stategraph, fn
{state, transitions} when is_atom(state) ->
Enum.each(transitions, fn
{transition, destination} when
is_atom(transition) and is_atom(destination) ->
:ok
_ -> throw :invalid
end)
_ -> throw :invalid
end)
state_names = states(stategraph)
# check to make sure the states are unique.
state_names == Enum.uniq(state_names) || throw :invalid
stategraph
|> Keyword.values
|> Enum.all?(fn state_transitions ->
transition_names = Keyword.keys(state_transitions)
# check to make sure the transition names are unique for each state's transition set.
transition_names == Enum.uniq(transition_names) || throw :invalid
# check to make sure that the transition destinations are valid.
state_transitions
|> Keyword.values
|> Enum.all?(&(&1 in state_names))
end)
catch
:invalid -> false
end
def valid?(_), do: false
@doc """
returns the starting state from the state graph.
```elixir
iex> StateServer.StateGraph.start([start: [t1: :state1], state1: [t2: :state2], state2: []])
:start
```
"""
@spec start(t) :: atom
def start([{v, _} | _]), do: v
@doc """
lists all states in the state graph. The first element in this list will
always be the initial state.
### Example
```elixir
iex> StateServer.StateGraph.states([start: [t1: :state1], state1: [t2: :state2], state2: [t2: :state2]])
[:start, :state1, :state2]
```
"""
@spec states(t) :: [atom]
def states(stategraph), do: Keyword.keys(stategraph)
@doc """
lists all transitions in the state graph.
### Example
```elixir
iex> StateServer.StateGraph.transitions([start: [t1: :state1], state1: [t2: :state2], state2: [t2: :state2]])
[:t1, :t2]
```
"""
@spec transitions(t) :: [atom]
def transitions(stategraph) do
stategraph
|> Keyword.values
|> Enum.flat_map(&Keyword.keys/1)
|> Enum.uniq
end
@doc """
lists all transitions emanating from a given state.
### Example
```elixir
iex> StateServer.StateGraph.transitions([start: [t1: :state1, t2: :state2], state1: [], state2: []], :start)
[:t1, :t2]
```
"""
@spec transitions(t, atom) :: [atom]
def transitions(stategraph, state), do: Keyword.keys(stategraph[state])
@doc """
lists all state/transition pairs. Used to generate the `c:StateServer.is_transition/2` guard.
### Example
```elixir
iex> StateServer.StateGraph.all_transitions([start: [t1: :state1, t2: :state2], state1: [], state2: [t2: :state1]])
[start: :t1, start: :t2, state2: :t2]
```
"""
@spec all_transitions(t) :: keyword
def all_transitions(stategraph) do
stategraph
|> Enum.flat_map(fn
{st, trs} -> Enum.map(trs, fn {tr, _dest} -> {st, tr} end)
end)
end
@doc """
outputs the destination state given a source state and a transition.
### Example
```elixir
iex> StateServer.StateGraph.transition([start: [t1: :state1, t2: :state2], state1: [], state2: []], :start, :t1)
:state1
```
"""
@spec transition(t, start::atom, transition::atom) :: atom
def transition(stategraph, start, transition) do
stategraph
|> Keyword.get(start)
|> Keyword.get(transition)
end
@doc """
outputs a list of terminal states of the graph. Used to generate the
`c:StateServer.is_terminal/1` guard.
```elixir
iex> StateServer.StateGraph.terminal_states(start: [t1: :state1, t2: :state2], state1: [], state2: [])
[:state1, :state2]
```
"""
@spec terminal_states(t) :: [atom]
def terminal_states(stategraph) do
Enum.flat_map(stategraph, fn
{state, []} -> [state]
_ -> []
end)
end
@doc """
outputs a list of edges of the graph. Used to generate the `c:StateServer.is_transition/3` guard.
```elixir
iex> StateServer.StateGraph.edges(start: [t1: :state1, t2: :state2], state1: [t3: :start], state2: [])
[start: {:t1, :state1}, start: {:t2, :state2}, state1: {:t3, :start}]
```
"""
@spec edges(t) :: keyword({atom, atom})
def edges(state_graph) do
Enum.flat_map(state_graph, fn
{_, []} -> []
{state, transitions} ->
Enum.flat_map(transitions, fn
{transition, dest} ->
[{state, {transition, dest}}]
end)
_ -> []
end)
end
@doc """
outputs a list of terminal {state, transition} tuples of the graph. Used to generate the
`c:StateServer.is_terminal/2` guard.
```elixir
iex> StateServer.StateGraph.terminal_transitions(start: [t1: :state1, t2: :state2], state1: [], state2: [])
[start: :t1, start: :t2]
```
"""
@spec terminal_transitions(t) :: keyword(atom)
def terminal_transitions(stategraph) do
t_states = terminal_states(stategraph)
Enum.flat_map(stategraph, &transitions_for_state(&1, t_states))
end
@spec transitions_for_state({atom, keyword(atom)}, [atom]) :: keyword(atom)
defp transitions_for_state({state, trs}, t_states) do
Enum.flat_map(trs, fn {tr, dest} ->
if dest in t_states, do: [{state, tr}], else: []
end)
end
@doc """
converts a list of atoms to a type which is the union of the atom literals
"""
@spec atoms_to_typelist([atom]) :: Macro.t
def atoms_to_typelist([]), do: nil
def atoms_to_typelist([state]), do: state
def atoms_to_typelist([state1, state2]), do: {:|, [], [state1, state2]}
def atoms_to_typelist([state | rest]), do: {:|, [], [state, atoms_to_typelist(rest)]}
end
|
lib/state_server/state_graph.ex
| 0.916531
| 0.952486
|
state_graph.ex
|
starcoder
|
defmodule Nerves.Runtime.Log.KmsgParser do
@moduledoc """
Functions for parsing kmsg strings
"""
alias Nerves.Runtime.Log.SyslogParser
@doc """
Parse out the kmsg facility, severity, and message (including the timestamp
and host) from a kmsg-formatted string.
See https://elixir.bootlin.com/linux/latest/source/Documentation/ABI/testing/dev-kmsg for full details.
Most messages are of the form:
```text
priority,sequence,timestamp,flag;message
```
`priority` is an integer that when broken apart gives you a facility and severity.
`sequence` is a monotonically increasing counter
`timestamp` is the time in microseconds
`flag` is almost always `-`
`message` is everything else
This parser only supports the minimum kmsg reports. The spec above describes
more functionality, but it appears to be uncommon and I haven't seen any
examples yet in my testing.
"""
@spec parse(String.t()) ::
{:ok,
%{
facility: SyslogParser.facility(),
severity: SyslogParser.severity(),
message: String.t(),
timestamp: integer(),
sequence: integer(),
flags: [atom()]
}}
| {:error, :parse_error}
def parse(line) do
with [metadata, message] <- String.split(line, ";"),
[priority_str, sequence_str, timestamp_str, flag] <-
String.split(metadata, ",", parts: 4),
{priority_int, ""} <- Integer.parse(priority_str),
{sequence, ""} <- Integer.parse(sequence_str),
{timestamp, ""} <- Integer.parse(timestamp_str),
{:ok, facility, severity} <- SyslogParser.decode_priority(priority_int) do
{:ok,
%{
facility: facility,
severity: severity,
message: message,
timestamp: timestamp,
sequence: sequence,
flags: parse_flags(flag)
}}
else
_ -> {:error, :parse_error}
end
end
defp parse_flags("-"), do: []
defp parse_flags("c"), do: [:continue]
defp parse_flags(_), do: []
end
|
lib/nerves_runtime/log/kmsg_parser.ex
| 0.792263
| 0.587825
|
kmsg_parser.ex
|
starcoder
|
defmodule UpcomingRouteDepartures do
@moduledoc """
UpcomingRouteDepartures are used to hold information about upcoming departures. UpcomingRouteDepartures
have a route, a direction, and a list
of departures. A Departure is a tuple {Headsign, [PredictedSchedule]}.
"""
alias Routes.Route
alias Predictions.Prediction
alias Schedules.Schedule
require Routes.Route
defstruct route: %Route{},
direction_id: 0,
departures: []
@type t :: %__MODULE__{
route: Route.t(),
direction_id: 0 | 1,
departures: [{String.t(), [PredictedSchedule.t()]}]
}
@doc """
Builds a list of {mode, [route_time]}
Given a list of predictions, a list of schedules, a time, it will return list of
`UpcomingRouteDepartures`'s for each mode available in the given predictions and schedules.
"""
@spec build_mode_list([Prediction.t()], [Schedule.t()], DateTime.t()) :: [
{Route.gtfs_route_type(), [UpcomingRouteDepartures.t()]}
]
def build_mode_list(predictions, schedules, current_time) do
predictions
|> PredictedSchedule.group(filter_schedules(schedules))
|> build_route_times(current_time)
# Group by modes
|> Enum.group_by(&Route.type_atom(&1.route.type))
|> Enum.map(fn {mode, route_times} -> {mode, sort_route_times(route_times)} end)
end
@spec build_route_times([PredictedSchedule.t()], DateTime.t()) :: [UpcomingRouteDepartures.t()]
defp build_route_times(predicted_schedules, current_time) do
predicted_schedules
|> valid_departures(current_time)
|> Enum.group_by(&{PredictedSchedule.route(&1), PredictedSchedule.direction_id(&1)})
|> Enum.map(&build_route_time/1)
end
# Builds a Route time with headsigns grouped together
@spec build_route_time({{Route.t(), 0 | 1}, [PredictedSchedule.t()]}) ::
UpcomingRouteDepartures.t()
defp build_route_time({{route, direction_id}, predicted_schedules}) do
predicted_schedules
|> Enum.group_by(&PredictedSchedule.Display.headsign/1)
|> Enum.map(&sorted_departures/1)
|> do_build_route_time(route, direction_id)
end
@spec do_build_route_time([{String.t(), [PredictedSchedule.t()]}], Route.t(), 0 | 1) ::
UpcomingRouteDepartures.t()
defp do_build_route_time(grouped_predictions, route, direction_id) do
%__MODULE__{
route: route,
direction_id: direction_id,
departures: grouped_predictions
}
end
# sort the departures by time
@spec sorted_departures({String.t(), [PredictedSchedule.t()]}) ::
{String.t(), [PredictedSchedule.t()]}
defp sorted_departures({headsign, predicted_schedules}) do
{headsign, Enum.sort_by(predicted_schedules, &PredictedSchedule.sort_with_status/1)}
end
# Departures are valid if passengers can board, and the departure time is in the future
@spec valid_departures([PredictedSchedule.t()], DateTime.t()) :: [PredictedSchedule.t()]
defp valid_departures(predicted_schedules, current_time) do
predicted_schedules
|> Enum.filter(&PredictedSchedule.departing?/1)
|> Enum.filter(&PredictedSchedule.upcoming?(&1, current_time))
end
@spec sort_route_times([UpcomingRouteDepartures.t()]) :: [UpcomingRouteDepartures.t()]
defp sort_route_times(route_times) do
Enum.sort_by(route_times, &route_time_sorter/1)
end
# Remove schedules for hidden routes and subway
defp filter_schedules({:error, _}) do
[]
end
defp filter_schedules(schedules) do
schedules
|> Enum.reject(&Route.hidden?(&1.route))
|> Enum.reject(&Route.subway?(&1.route.type, &1.route.id))
end
# Sorts bus according to number and name. Busses without a
# numbered name are sorted by name. Otherwise, it is sorted by number
# All other routes are sorted by name
@spec route_time_sorter(UpcomingRouteDepartures.t()) :: {number, number | String.t()}
defp route_time_sorter(%__MODULE__{route: %Route{type: 3, name: name}}) do
case Integer.parse(name) do
{i, ""} -> {1, i}
_ -> {0, name}
end
end
defp route_time_sorter(route_time) do
route_time.route.name
end
end
|
apps/site/lib/upcoming_route_departures.ex
| 0.811863
| 0.431225
|
upcoming_route_departures.ex
|
starcoder
|
defmodule Forklift.Jobs.PartitionedCompaction do
@moduledoc """
This job handles compacting files together for long term storage. Running at a long period,
it takes data out of the main table and then reinserts it to reduce the total number of files in Hive.
No compaction will be performed if no data is present for the current partition.
This process assumes that a forklift-managed `os_partition` field is present on the table.
"""
alias Pipeline.Writer.TableWriter.Helper.PrestigeHelper
alias Pipeline.Writer.TableWriter.Statement
require Logger
import Forklift.Jobs.JobUtils
def run(dataset_ids) do
Forklift.Quantum.Scheduler.deactivate_job(:data_migrator)
dataset_ids
|> Enum.map(&Forklift.Datasets.get!/1)
|> Enum.map(&partitioned_compact/1)
after
Forklift.Quantum.Scheduler.activate_job(:data_migrator)
end
defp partitioned_compact(nil) do
Logger.warn("Dataset not found in view-state, skipping compaction")
:abort
end
defp partitioned_compact(%{id: id, technical: %{systemName: system_name}}) do
partition = current_partition()
Logger.info("Beginning partitioned compaction for partition #{partition} in dataset #{id}")
compact_table = compact_table_name(system_name, partition)
with {:ok, initial_count} <- PrestigeHelper.count(system_name),
{:ok, partition_count} <-
PrestigeHelper.count_query("select count(1) from #{system_name} where os_partition = '#{partition}'"),
{:ok, _} <- pre_check(system_name, compact_table),
{:ok, _} <- check_for_data_to_compact(partition, partition_count),
{:ok, _} <- create_compact_table(system_name, partition),
{:ok, _} <-
verify_count(compact_table, partition_count, "compact table contains all records from the partition"),
{:ok, _} <- drop_partition(system_name, partition),
{:ok, _} <-
verify_count(
system_name,
initial_count - partition_count,
"main table no longer contains records for the partition"
),
{:ok, _} <- reinsert_compacted_data(system_name, compact_table),
{:ok, _} <- verify_count(system_name, initial_count, "main table once again contains all records"),
{:ok, _} <- PrestigeHelper.drop_table(compact_table) do
Logger.info("Successfully compacted partition #{partition} in dataset #{id}")
update_compaction_status(id, :ok)
:ok
else
{:error, error} ->
Logger.error("Error compacting dataset #{id}: " <> inspect(error))
update_compaction_status(id, :error)
:error
{:abort, reason} ->
Logger.info("Aborted compaction of dataset #{id}: " <> reason)
:abort
end
end
defp current_partition() do
Timex.format!(DateTime.utc_now(), "{YYYY}_{0M}")
end
defp create_compact_table(table, partition) do
%{
table: compact_table_name(table, partition),
as: "select * from #{table} where os_partition = '#{partition}'"
}
|> Statement.create()
|> elem(1)
|> PrestigeHelper.execute_query()
end
defp drop_partition(table, partition) do
"delete from #{table} where os_partition = '#{partition}'"
|> PrestigeHelper.execute_query()
end
defp reinsert_compacted_data(table, compact_table) do
"insert into #{table} select * from #{compact_table}"
|> PrestigeHelper.execute_query()
end
def compact_table_name(table_name, partition) do
"#{table_name}__#{partition}__compact"
end
defp pre_check(table, compact_table) do
cond do
PrestigeHelper.table_exists?(table) == false ->
{:error, "Main table #{table} did not exist"}
PrestigeHelper.table_exists?(compact_table) ->
{:error, "Compacted table #{table} still exists"}
true ->
{:ok, :passed_pre_check}
end
end
defp check_for_data_to_compact(partition, 0), do: {:abort, "No data found to compact for partition #{partition}"}
defp check_for_data_to_compact(_partition, _count), do: {:ok, :data_found}
defp update_compaction_status(dataset_id, :error) do
TelemetryEvent.add_event_metrics([dataset_id: dataset_id], [:forklift_compaction_failure], value: %{status: 1})
end
defp update_compaction_status(dataset_id, :ok) do
TelemetryEvent.add_event_metrics([dataset_id: dataset_id], [:forklift_compaction_failure], value: %{status: 0})
end
end
|
apps/forklift/lib/forklift/jobs/partitioned_compaction.ex
| 0.666822
| 0.443359
|
partitioned_compaction.ex
|
starcoder
|
defmodule Turtle.Motion do
@moduledoc """
Motion for Turtle
"""
alias Turtle.Vector
@type distance :: number
@type angle :: number
@doc """
Move the turtle forward by the given amount of distance.
Move the turtle forward by the specified distance, in the direction the
turtle is headed
## Examples
iex> turtle = %{%Turtle{} | angle: 0}
%Turtle{angle: 0, pen_down?: true, x: 0, y: 0}
iex> Turtle.Motion.position(turtle)
{0, 0}
iex> turtle = Turtle.Motion.forward(turtle, 25)
%Turtle{angle: 0, pen_down?: true, x: 25.0, y: 0.0}
iex> Turtle.Motion.position(turtle)
{25.0, 0.0}
iex> turtle = Turtle.Motion.forward(turtle, -75)
%Turtle{angle: 0, pen_down?: true, x: -50.0, y: 0.0}
iex> Turtle.Motion.position(turtle)
{-50.0, 0.0}
"""
@spec forward(Turtle.t(), distance()) :: Turtle.t()
def forward(%Turtle{} = turtle, distance), do: go(turtle, distance)
@doc """
Move the turtle backward by the given amount of distance.
Move the turtle backward by distance, opposite to the direction the turtle is
headed. Do not change the turtle’s heading.
## Examples
iex> turtle = %Turtle{}
%Turtle{angle: 0, pen_down?: true, x: 0, y: 0}
iex> Turtle.Motion.position(turtle)
{0, 0}
iex> turtle = Turtle.Motion.backward(turtle, 30)
%Turtle{angle: 0, pen_down?: true, x: -30.0, y: 0.0}
iex> Turtle.Motion.position(turtle)
{-30.0, 0.0}
"""
@spec backward(Turtle.t(), distance()) :: Turtle.t()
def backward(%Turtle{} = turtle, distance), do: go(turtle, -distance)
@doc """
Turn turtle right (clockwise) by the given angle degrees.
## Examples
iex> turtle = Turtle.new(angle: 22)
%Turtle{angle: 22, pen_down?: true, x: 0, y: 0}
iex> Turtle.Motion.heading(turtle)
22
iex> turtle = Turtle.Motion.right(turtle, 45)
%Turtle{angle: 337, pen_down?: true, x: 0, y: 0}
iex> Turtle.Motion.heading(turtle)
337
"""
@spec right(Turtle.t(), angle()) :: Turtle.t()
def right(%Turtle{angle: angle} = turtle, ang) do
new_angle = angle - ang
%{turtle | angle: Integer.mod(new_angle, 360)}
end
@doc """
Turn turtle left (counterclockwise) by the given angle (degress)
## Examples
iex> turtle = Turtle.new(angle: 22)
%Turtle{angle: 22, pen_down?: true, x: 0, y: 0}
iex> Turtle.Motion.heading(turtle)
22
iex> turtle = Turtle.Motion.left(turtle, 45)
%Turtle{angle: 67, pen_down?: true, x: 0, y: 0}
iex> Turtle.Motion.heading(turtle)
67
"""
@spec left(Turtle.t(), angle()) :: Turtle.t()
def left(%Turtle{} = turtle, angle), do: right(turtle, -1 * angle)
@doc """
Move turtle to an absolute position. Do not change the turtle’s orientation
## Examples
iex> turtle = %Turtle{}
%Turtle{angle: 0, pen_down?: true, x: 0, y: 0}
iex> Turtle.Motion.position(turtle)
{0, 0}
iex> turtle = Turtle.Motion.go_to(turtle, 60, 30)
%Turtle{angle: 0, pen_down?: true, x: 60, y: 30}
iex> Turtle.Motion.position(turtle)
{60, 30}
"""
@spec go_to(Turtle.t(), number(), number()) :: Turtle.t()
def go_to(%Turtle{} = turtle, x, y), do: %{turtle | x: x, y: y}
@doc """
Move turtle to an absolute position. Do not change the turtle’s orientation
## Examples
iex> turtle = %Turtle{}
%Turtle{angle: 0, pen_down?: true, x: 0, y: 0}
iex> Turtle.Motion.position(turtle)
{0, 0}
iex> Turtle.Motion.go_to(turtle, {20, 80})
%Turtle{angle: 0, pen_down?: true, x: 20, y: 80}
"""
@spec go_to(Turtle.t(), Vector.t()) :: Turtle.t()
def go_to(%Turtle{} = turtle, {x, y}), do: go_to(turtle, x, y)
@doc """
Set the turtle's first coordinate to x, leave second coordinate unchanged.
## Examples
iex> turtle = %Turtle{}
%Turtle{angle: 0, pen_down?: true, x: 0, y: 0}
iex> Turtle.Motion.position(turtle)
{0, 0}
iex> Turtle.Motion.set_x(turtle, 10)
%Turtle{angle: 0, pen_down?: true, x: 10, y: 0}
"""
@spec set_x(Turtle.t(), number()) :: Turtle.t()
def set_x(%Turtle{y: y} = turtle, x), do: go_to(turtle, x, y)
@doc """
Set the turtle's second coordinate to y, leave first coordinate unchanged.
## Examples
iex> turtle = %Turtle{}
%Turtle{angle: 0, pen_down?: true, x: 0, y: 0}
iex> Turtle.Motion.position(turtle)
{0, 0}
iex> Turtle.Motion.set_y(turtle, 10)
%Turtle{angle: 0, pen_down?: true, x: 0, y: 10}
"""
@spec set_y(Turtle.t(), number()) :: Turtle.t()
def set_y(%Turtle{x: x} = turtle, y), do: go_to(turtle, x, y)
@doc """
Set the orientation of the turtle to `to_angle`, given in degress
In this scenario, 0 is to the right, 90 is up, 180 is left, and 270 is down.
## Examples
iex> turtle = %Turtle{}
%Turtle{angle: 0, pen_down?: true, x: 0, y: 0}
iex> Turtle.Motion.set_heading(turtle, 45)
%Turtle{angle: 45, pen_down?: true, x: 0, y: 0}
"""
@spec set_heading(Turtle.t(), number()) :: Turtle.t()
def set_heading(%Turtle{} = turtle, to_angle) do
%{turtle | angle: to_angle}
end
@doc """
Move turtle to the origin
Move the turtle to the origin (coordinates: {0, 0}) and set its heading to
its start-orientation (90 degress)
## Examples
iex> turtle = %Turtle{x: 3, y: 20, angle: 45}
%Turtle{angle: 45, pen_down?: true, x: 3, y: 20}
iex> Turtle.Motion.home(turtle)
%Turtle{angle: 0, pen_down?: true, x: 0, y: 0}
"""
def home(%Turtle{} = turtle) do
turtle
|> go_to(0, 0)
|> set_heading(0)
end
@doc """
Returns the turtle's current position {x, y}, as a Vector 2D.
## Examples
iex> Turtle.Motion.position(%Turtle{})
{0, 0}
"""
@spec position(Turtle.t()) :: Vector.t()
def position(%Turtle{x: x, y: y}), do: {x, y}
@doc """
Returns the turtle's current heading
## Examples
iex> turtle = %Turtle{}
%Turtle{angle: 0, pen_down?: true, x: 0, y: 0}
iex> turtle = Turtle.Motion.left(turtle, 67)
%Turtle{angle: 67, pen_down?: true, x: 0, y: 0}
iex> Turtle.Motion.heading(turtle)
67
"""
@spec heading(Turtle.t()) :: angle
def heading(%Turtle{angle: angle}), do: angle
@doc """
Returns the turtle's x coordinate.
## Examples
iex> turtle = %Turtle{}
%Turtle{angle: 0, pen_down?: true, x: 0, y: 0}
iex> turtle = Turtle.Motion.right(turtle, 90)
%Turtle{angle: 270, pen_down?: true, x: 0, y: 0}
iex> turtle = Turtle.Motion.forward(turtle, 60)
%Turtle{angle: 270, pen_down?: true, x: 0.0, y: -60.0}
iex> Turtle.Motion.x_cor(turtle)
0.0
"""
@spec x_cor(Turtle.t()) :: number()
def x_cor(%Turtle{x: x}), do: x
@doc """
Returns the turtle's y coordinate.
## Examples
iex> turtle = %Turtle{}
%Turtle{angle: 0, pen_down?: true, x: 0, y: 0}
iex> turtle = Turtle.Motion.forward(turtle, 60)
%Turtle{angle: 0, pen_down?: true, x: 60.0, y: 0.0}
iex> Turtle.Motion.y_cor(turtle)
0.0
"""
@spec y_cor(Turtle.t()) :: number()
def y_cor(%Turtle{y: y}), do: y
@doc """
Return the angle of the line from the turtle's position to {x, y}.
## Examples
iex> turtle = Turtle.new(x: 10, y: 10)
%Turtle{angle: 0, pen_down?: true, x: 10, y: 10}
iex> Turtle.Motion.towards(turtle, {0, 0})
225.0
"""
@spec towards(Turtle.t(), Vector.t() | Turtle.t()) :: number()
def towards(%Turtle{} = turtle, %Turtle{x: x, y: y}) do
towards(turtle, {x, y})
end
def towards(%Turtle{x: x, y: y}, vector) do
{x1, y1} = Vector.sub(vector, {x, y})
y1
|> :math.atan2(x1)
|> Kernel.*(180.0)
|> Kernel./(:math.pi())
|> fmod()
end
@doc """
Return the angle of the line from the turtle's position to {x, y}.
## Examples
iex> turtle = %Turtle{}
%Turtle{angle: 0, pen_down?: true, x: 0, y: 0}
iex> turtle = Turtle.Motion.go_to(turtle, 10, 10)
%Turtle{angle: 0, pen_down?: true, x: 10, y: 10}
iex> Turtle.Motion.towards(turtle, 0, 0)
225.0
"""
@spec towards(Turtle.t(), number(), number()) :: number()
def towards(%Turtle{} = turtle, x, y), do: towards(turtle, {x, y})
@doc """
Return the distance from the turtle to {x,y} in turtle step units
## Examples
iex> turtle = %Turtle{}
%Turtle{angle: 0, pen_down?: true, x: 0, y: 0}
iex> Turtle.Motion.distance(turtle, {30, 40})
50.0
iex> pen = Turtle.new()
%Turtle{angle: 0, pen_down?: true, x: 0, y: 0}
iex> pen = Turtle.Motion.forward(pen, 77)
%Turtle{angle: 0, pen_down?: true, x: 77.0, y: 0.0}
iex> Turtle.Motion.distance(turtle, pen)
77.0
"""
@spec distance(Turtle.t(), Vector.t() | Turtle.t()) :: number()
def distance(%Turtle{} = turtle, %Turtle{x: x, y: y}) do
distance(turtle, {x, y})
end
def distance(turtle, vector) do
turtle
|> position()
|> Vector.sub(vector)
|> Vector.abs()
end
@doc """
Return the distance from the turtle to {x,y} in turtle step units
## Examples
iex> turtle = %Turtle{}
%Turtle{angle: 0, pen_down?: true, x: 0, y: 0}
iex> Turtle.Motion.distance(turtle, 30, 40)
50.0
"""
@spec distance(Turtle.t(), number(), number()) :: number()
def distance(%Turtle{} = turtle, x, y), do: distance(turtle, {x, y})
## Helpers
@spec radians(number()) :: number()
def radians(degress), do: degress * :math.pi() / 180
@spec degress(number()) :: number()
def degress(radians), do: radians * 180 / :math.pi()
@spec truncate(number(), number()) :: number()
defp truncate(number, decimals \\ 2) do
number
|> :erlang.float_to_list(decimals: decimals)
|> :erlang.list_to_float()
end
@spec go(Turtle.t(), number()) :: Turtle.t()
defp go(%Turtle{angle: angle, x: x, y: y} = turtle, distance) do
radians = radians(angle)
dx = distance * :math.cos(radians)
dy = distance * :math.sin(radians)
%{turtle | x: truncate(x + dx), y: truncate(y + dy)}
end
defp fmod(number, fullcircle \\ 360)
defp fmod(number, fullcircle) when number >= 0, do: :math.fmod(number, fullcircle)
defp fmod(number, fullcircle), do: :math.fmod(fullcircle + number, fullcircle)
end
|
turtle/lib/turtle/motion.ex
| 0.939996
| 0.600979
|
motion.ex
|
starcoder
|
defmodule Cluster.Strategy.DNSPoll do
@moduledoc """
Assumes you have nodes that respond to the specified DNS query (A record), and which follow the node name pattern of
`<name>@<ip-address>`. If your setup matches those assumptions, this strategy will periodically poll DNS and connect
all nodes it finds.
## Options
* `poll_interval` - How often to poll in milliseconds (optional; default: 5_000)
* `query` - DNS query to use (required; e.g. "my-app.example.com")
* `node_basename` - The short name of the nodes you wish to connect to (required; e.g. "my-app")
## Usage
config :libcluster,
topologies: [
dns_poll_example: [
strategy: #{__MODULE__},
config: [
polling_interval: 5_000,
query: "my-app.example.com",
node_basename: "my-app"]]]
"""
use GenServer
import Cluster.Logger
alias Cluster.Strategy.State
alias Cluster.Strategy
@default_polling_interval 5_000
def start_link(args), do: GenServer.start_link(__MODULE__, args)
@impl true
def init([%State{meta: nil} = state]) do
init([%State{state | :meta => MapSet.new()}])
end
def init([%State{} = state]) do
{:ok, do_poll(state)}
end
@impl true
def handle_info(:timeout, state), do: handle_info(:poll, state)
def handle_info(:poll, state), do: {:noreply, do_poll(state)}
def handle_info(_, state), do: {:noreply, state}
defp do_poll(
%State{
topology: topology,
connect: connect,
disconnect: disconnect,
list_nodes: list_nodes
} = state
) do
new_nodelist = state |> get_nodes() |> MapSet.new()
removed = MapSet.difference(state.meta, new_nodelist)
new_nodelist =
case Strategy.disconnect_nodes(
topology,
disconnect,
list_nodes,
MapSet.to_list(removed)
) do
:ok ->
new_nodelist
{:error, bad_nodes} ->
# Add back the nodes which should have been removed, but which couldn't be for some reason
Enum.reduce(bad_nodes, new_nodelist, fn {n, _}, acc ->
MapSet.put(acc, n)
end)
end
new_nodelist =
case Strategy.connect_nodes(
topology,
connect,
list_nodes,
MapSet.to_list(new_nodelist)
) do
:ok ->
new_nodelist
{:error, bad_nodes} ->
# Remove the nodes which should have been added, but couldn't be for some reason
Enum.reduce(bad_nodes, new_nodelist, fn {n, _}, acc ->
MapSet.delete(acc, n)
end)
end
Process.send_after(self(), :poll, polling_interval(state))
%{state | :meta => new_nodelist}
end
defp polling_interval(%{config: config}) do
Keyword.get(config, :polling_interval, @default_polling_interval)
end
defp get_nodes(%State{config: config} = state) do
query = Keyword.fetch(config, :query)
node_basename = Keyword.fetch(config, :node_basename)
resolver =
Keyword.get(config, :resolver, fn query ->
query
|> String.to_charlist()
|> lookup_all_ips
end)
resolve(query, node_basename, resolver, state)
end
# query for all ips responding to a given dns query
# format ips as node names
# filter out me
defp resolve({:ok, query}, {:ok, node_basename}, resolver, %State{topology: topology})
when is_binary(query) and is_binary(node_basename) and query != "" and node_basename != "" do
debug(topology, "polling dns for '#{query}'")
me = node()
query
|> resolver.()
|> Enum.map(&format_node(&1, node_basename))
|> Enum.reject(fn n -> n == me end)
end
defp resolve({:ok, invalid_query}, {:ok, invalid_basename}, _resolver, %State{
topology: topology
}) do
warn(
topology,
"dns polling strategy is selected, but query or basename param is invalid: #{
inspect(%{query: invalid_query, node_basename: invalid_basename})
}"
)
[]
end
defp resolve(:error, :error, _resolver, %State{topology: topology}) do
warn(
topology,
"dns polling strategy is selected, but query and basename params missed"
)
[]
end
def lookup_all_ips(q) do
Enum.flat_map([:a, :aaaa], fn t -> :inet_res.lookup(q, :in, t) end)
end
# turn an ip into a node name atom, assuming that all other node names looks similar to our own name
defp format_node(ip, base_name), do: :"#{base_name}@#{:inet_parse.ntoa(ip)}"
end
|
lib/strategy/dns_poll.ex
| 0.861596
| 0.436922
|
dns_poll.ex
|
starcoder
|
defmodule Pathex do
@moduledoc """
Main module. Use it inside your project to call Pathex macros
To use it just insert
```elixir
defmodule MyModule do
require Pathex
import Pathex, only: [path: 1, path: 2, "~>": 2, ...]
...
end
```
Or you can use `use`
```elixir
defmodule MyModule do
# default_mod option is optional
use Pathex, default_mod: :json
...
end
```
This will import all operatiors and `path` macro
> Note:
> There is no `__using__/2` macro avaliable here
> because it would be better to explicitly define that the
> `Pathex` is used and what macros are exported
Any macro here belongs to one of two categories:
1. Macro which creates path closure (`sigil_P/2`, `path/2`, `~>/2`)
2. Macro which uses path closure as path (`over/3`, `set/3`, `view/2`, etc.)
"""
alias Pathex.Builder
alias Pathex.Combination
alias Pathex.Operations
alias Pathex.Parser
alias Pathex.QuotedParser
@typep update_args :: {pathex_compatible_structure(), (any() -> any())}
@typep force_update_args :: {pathex_compatible_structure(), (any() -> any()), any()}
@typedoc "This depends on the modifier"
@type pathex_compatible_structure :: map() | list() | Keyword.t() | tuple()
@typedoc "Value returned by non-bang path call"
@type result :: {:ok, any()} | :error
@typedoc "Also known as [path-closure](path.md)"
@type t :: (Operations.name(), force_update_args() | update_args() -> result())
@typedoc "More about [modifiers](modifiers.md)"
@type mod :: :map | :json | :naive
defmacro __using__(opts) do
mod = Keyword.get(opts, :default_mod, :naive)
quote do
require Pathex
import Pathex, only: [path: 2, "~>": 2, "&&&": 2, "|||": 2, alongside: 1]
defmacrop path(p) do
mod = unquote(mod)
quote do
Pathex.path(unquote(p), unquote(mod))
end
end
end
end
@doc """
Macro of three arguments which applies given function
for item in the given path of given structure
and returns modified structure
Example:
iex> require Pathex; import Pathex
iex> x = 1
iex> inc = fn x -> x + 1 end
iex> {:ok, [0, %{x: 9}]} = over [0, %{x: 8}], path(x / :x), inc
iex> p = path "hey" / 0
iex> {:ok, %{"hey" => [2, [2]]}} = over %{"hey" => [1, [2]]}, p, inc
> Note:
> Exceptions from passed function left unhandled
iex> require Pathex; import Pathex
iex> over(%{1 => "x"}, path(1), fn x -> x + 1 end)
** (ArithmeticError) bad argument in arithmetic expression
"""
defmacro over(struct, path, func) do
gen(path, :update, [struct, wrap_ok(func)], __CALLER__)
end
@doc """
Macro of three arguments which applies given function
for item in the given path of given structure
Example:
iex> require Pathex; import Pathex
iex> x = 1
iex> inc = fn x -> x + 1 end
iex> [0, %{x: 9}] = over! [0, %{x: 8}], path(x / :x), inc
iex> p = path "hey" / 0
iex> %{"hey" => [2, [2]]} = over! %{"hey" => [1, [2]]}, p, inc
"""
defmacro over!(struct, path, func) do
path
|> gen(:update, [struct, wrap_ok(func)], __CALLER__)
|> bang()
end
@doc """
Macro of three arguments which sets the given value
in the given path of given structure
Example:
iex> require Pathex; import Pathex
iex> x = 1
iex> {:ok, [0, %{x: 123}]} = set [0, %{x: 8}], path(x / :x), 123
iex> p = path "hey" / 0
iex> {:ok, %{"hey" => [123, [2]]}} = set %{"hey" => [1, [2]]}, p, 123
"""
defmacro set(struct, path, value) do
gen(path, :update, [struct, quote(do: fn _ -> {:ok, unquote(value)} end)], __CALLER__)
end
@doc """
Macro of three arguments which sets the given value
in the given path of given structure
Example:
iex> require Pathex; import Pathex
iex> x = 1
iex> [0, %{x: 123}] = set! [0, %{x: 8}], path(x / :x), 123
iex> p = path "hey" / 0
iex> %{"hey" => [123, [2]]} = set! %{"hey" => [1, [2]]}, p, 123
"""
defmacro set!(struct, path, value) do
path
|> gen(:update, [struct, quote(do: fn _ -> {:ok, unquote(value)} end)], __CALLER__)
|> bang()
end
@doc """
Macro of three arguments which sets the given value
in the given path of given structure
If the path does not exist it creates the path favouring maps
when structure is unknown
Example:
iex> require Pathex; import Pathex
iex> x = 1
iex> {:ok, [0, %{x: 123}]} = force_set [0, %{x: 8}], path(x / :x), 123
iex> p = path "hey" / 0
iex> {:ok, %{"hey" => %{0 => 1}}} = force_set %{}, p, 1
If the item in path doesn't have the right type, it returns `:error`
Example:
iex> require Pathex; import Pathex
iex> p = path "hey" / "you"
iex> :error = force_set %{"hey" => {1, 2}}, p, "value"
"""
defmacro force_set(struct, path, value) do
gen(path, :force_update, [struct, quote(do: fn _ -> {:ok, unquote(value)} end), value], __CALLER__)
end
@doc """
Macro of three arguments which sets the given value
in the given path of given structure
If the path does not exist it creates the path favouring maps
when structure is unknown
Example:
iex> require Pathex; import Pathex
iex> x = 1
iex> [0, %{x: 123}] = force_set! [0, %{x: 8}], path(x / :x), 123
iex> p = path "hey" / 0
iex> %{"hey" => %{0 => 1}} = force_set! %{}, p, 1
If the item in path doesn't have the right type, it raises
Example:
iex> require Pathex; import Pathex
iex> p = path "hey" / "you"
iex> force_set! %{"hey" => {1, 2}}, p, "value"
** (Pathex.Error) Type mismatch in structure
"""
defmacro force_set!(struct, path, value) do
path
|> gen(:force_update, [struct, quote(do: fn _ -> {:ok, unquote(value)} end), value], __CALLER__)
|> bang("Type mismatch in structure")
end
@doc """
Macro of four arguments which applies given function
in the given path of given structure
If the path does not exist it creates the path favouring maps
when structure is unknown and inserts default value
Example:
iex> require Pathex; import Pathex
iex> x = 1
iex> {:ok, [0, %{x: {:xxx, 8}}]} = force_over([0, %{x: 8}], path(x / :x), & {:xxx, &1}, 123)
iex> p = path "hey" / 0
iex> {:ok, %{"hey" => %{0 => 1}}} = force_over(%{}, p, fn x -> x + 1 end, 1)
If the item in path doesn't have the right type, it returns `:error`
Example:
iex> require Pathex; import Pathex
iex> p = path "hey" / "you"
iex> :error = force_over %{"hey" => {1, 2}}, p, fn x -> x end, "value"
> Note:
> Default "default" value is nil
"""
defmacro force_over(struct, path, func, value \\ nil) do
gen(path, :force_update, [struct, wrap_ok(func), value], __CALLER__)
end
@doc """
Macro of four arguments which applies given function
in the given path of given structure
If the path does not exist it creates the path favouring maps
when structure is unknown and inserts default value
Example:
iex> require Pathex; import Pathex
iex> x = 1
iex> [0, %{x: {:xxx, 8}}] = force_over!([0, %{x: 8}], path(x / :x), & {:xxx, &1}, 123)
iex> p = path "hey" / 0
iex> %{"hey" => %{0 => 1}} = force_over!(%{}, p, fn x -> x + 1 end, 1)
If the item in path doesn't have the right type, it raises
Example:
iex> require Pathex; import Pathex
iex> p = path "hey" / "you"
iex> force_over! %{"hey" => {1, 2}}, p, fn x -> x end, "value"
** (Pathex.Error) Type mismatch in structure
> Note:
> Default `default` value is `nil`
"""
defmacro force_over!(struct, path, func, value \\ nil) do
path
|> gen(:force_update, [struct, wrap_ok(func), value], __CALLER__)
|> bang("Type mismatch in structure")
end
@doc """
Macro returns function applyed to the value in the path
or error
Example:
iex> require Pathex; import Pathex
iex> x = 1
iex> {:ok, 9} = at [0, %{x: 8}], path(x / :x), fn x -> x + 1 end
iex> p = path "hey" / 0
iex> {:ok, {:here, 9}} = at(%{"hey" => {9, -9}}, p, & {:here, &1})
"""
defmacro at(struct, path, func) do
gen(path, :view, [struct, wrap_ok(func)], __CALLER__)
end
@doc """
Macro returns function applyed to the value in the path
or error
Example:
iex> require Pathex; import Pathex
iex> x = 1
iex> 9 = at! [0, %{x: 8}], path(x / :x), fn x -> x + 1 end
iex> p = path "hey" / 0
iex> {:here, 9} = at!(%{"hey" => {9, -9}}, p, & {:here, &1})
"""
defmacro at!(struct, path, func) do
path
|> gen(:view, [struct, wrap_ok(func)], __CALLER__)
|> bang()
end
@doc """
Macro gets the value in the given path of the given structure
Example:
iex> require Pathex; import Pathex
iex> x = 1
iex> {:ok, 8} = view [0, %{x: 8}], path(x / :x)
iex> p = path "hey" / 0
iex> {:ok, 9} = view %{"hey" => {9, -9}}, p
"""
defmacro view(struct, path) do
gen(path, :view, [struct, quote(do: fn x -> {:ok, x} end)], __CALLER__)
end
@doc """
Macro gets the value in the given path of the given structure
Example:
iex> require Pathex; import Pathex
iex> x = 1
iex> 8 = view! [0, %{x: 8}], path(x / :x)
iex> p = path "hey" / 0
iex> 9 = view! %{"hey" => {9, -9}}, p
"""
defmacro view!(struct, path) do
path
|> gen(:view, [struct, quote(do: fn x -> {:ok, x} end)], __CALLER__)
|> bang()
end
@doc """
Macro gets the value in the given path of the given structure
or returns default value if not found
Example:
iex> require Pathex; import Pathex
iex> x = 1
iex> 8 = get [0, %{x: 8}], path(x / :x)
iex> p = path "hey" / "you"
iex> :default = get %{"hey" => [x: 1]}, p, :default
"""
defmacro get(struct, path, default \\ nil) do
res = gen(path, :view, [struct, quote(do: fn x -> {:ok, x} end)], __CALLER__)
quote do
case unquote(res) do
{:ok, value} -> value
:error -> unquote(default)
end
end
|> set_generated()
end
@doc """
Sigil for paths. Three [modifiers](modifiers.md) are avaliable:
* `naive` (default) paths should look like `~P["string"/:atom/1]`
* `json` paths should look like `~P[string/this_one_is_too/1/0]json`
* `map` paths should look like `~P[:x/1]map`
Example:
iex> require Pathex; import Pathex
iex> x = 1
iex> mypath = path 1 / :atom / "string" / {"tuple?"} / x
iex> structure = [0, [atom: %{"string" => %{{"tuple?"} => %{1 => 2}}}]]
iex> {:ok, 2} = view structure, mypath
"""
defmacro sigil_P({_, _, [string]}, mod) do
mod = detect_mod(mod)
string
|> Parser.parse(mod)
|> assert_combination_length(__CALLER__)
|> Builder.build(Operations.from_mod(mod))
|> set_generated()
end
@doc """
Creates path for given structure
Example:
iex> require Pathex; import Pathex
iex> x = 1
iex> mypath = path 1 / :atom / "string" / {"tuple?"} / x
iex> structure = [0, [atom: %{"string" => %{{"tuple?"} => %{1 => 2}}}]]
iex> {:ok, 2} = view structure, mypath
Default [modifier](modifiers.md) of this `path/2` is `:naive` which means that
* every variable is treated as index / key to any of tuple, list, map, keyword
* every atom is treated as key to map or keyword
* every integer is treated as index to tuple, list or key to map
* every other data is treated as key to map
> Note:
> `-1` allows data to be prepended to the list
iex> require Pathex; import Pathex
iex> x = -1
iex> p1 = path(-1)
iex> p2 = path(x)
iex> {:ok, [1, 2]} = force_set([2], p1, 1)
iex> {:ok, [1, 2]} = force_set([2], p2, 1)
"""
defmacro path(quoted, mod \\ 'naive') do
mod = detect_mod(mod)
quoted
|> QuotedParser.parse(__CALLER__, mod)
|> assert_combination_length(__CALLER__)
|> Builder.build(Operations.from_mod(mod))
|> set_generated()
end
@doc """
Creates composition of two paths similar to concating them together
Example:
iex> require Pathex; import Pathex
iex> p1 = path :x / :y
iex> p2 = path :a / :b
iex> composed_path = p1 ~> p2
iex> {:ok, 1} = view %{x: [y: [a: [a: 0, b: 1]]]}, composed_path
"""
defmacro a ~> b do
{:"~>", [], [a, b]}
|> QuotedParser.parse_composition(:"~>")
|> Builder.build_composition(:"~>")
|> set_generated()
end
@doc """
Creates composition of two paths which has some inspiration from logical `and`
Example:
iex> require Pathex; import Pathex
iex> p1 = path :x / :y
iex> p2 = path :a / :b
iex> ap = p1 &&& p2
iex> {:ok, 1} = view %{x: %{y: 1}, a: [b: 1]}, ap
iex> :error = view %{x: %{y: 1}, a: [b: 2]}, ap
iex> {:ok, %{x: %{y: 2}, a: [b: 2]}} = set %{x: %{y: 1}, a: [b: 1]}, ap, 2
iex> {:ok, %{x: %{y: 2}, a: %{b: 2}}} = force_set %{}, ap, 2
"""
# This code is generated with experimental composition generator
defmacro a &&& b do
{:"&&&", [], [a, b]}
|> QuotedParser.parse_composition(:"&&&")
|> Builder.build_composition(:"&&&")
|> set_generated()
end
@doc """
Creates composition of two paths which has some inspiration from logical `or`
Example:
iex> require Pathex; import Pathex
iex> p1 = path :x / :y
iex> p2 = path :a / :b
iex> op = p1 ||| p2
iex> {:ok, 1} = view %{x: %{y: 1}, a: [b: 2]}, op
iex> {:ok, 2} = view %{x: 1, a: [b: 2]}, op
iex> {:ok, %{x: %{y: 2}, a: [b: 1]}} = set %{x: %{y: 1}, a: [b: 1]}, op, 2
iex> {:ok, %{x: %{y: 2}}} = force_set %{}, op, 2
iex> {:ok, %{x: %{}, a: [b: 1]}} = force_set %{x: %{y: 1}, a: [b: 1]}, op, 2
"""
defmacro a ||| b do
{:"|||", [], [a, b]}
|> QuotedParser.parse_composition(:"|||")
|> Builder.build_composition(:"|||")
|> set_generated()
end
@doc """
This macro creates compositions of paths which work along with each other
Example:
iex> require Pathex; import Pathex
iex> p1 = path :x
iex> p2 = path :y
iex> pa = alongside [p1, p2]
iex> {:ok, [1, 2]} = view(%{x: 1, y: 2}, pa)
iex> {:ok, %{x: 3, y: 3}} = set(%{x: 1, y: 2}, pa, 3)
iex> :error = set(%{x: 1}, pa, 3)
iex> {:ok, %{x: 1, y: 1}} = force_set(%{}, pa, 1)
"""
defmacro alongside(list) do
quote generated: true, bind_quoted: [list: list] do
fn
:view, {input_struct, func} ->
list
|> Enum.reverse()
|> Enum.reduce_while({:ok, []}, fn path, {:ok, res} ->
case path.(:view, {input_struct, func}) do
{:ok, v} -> {:cont, {:ok, [v | res]}}
:error -> {:halt, :error}
end
end)
:update, {input_struct, func} ->
Enum.reduce_while(list, {:ok, input_struct}, fn path, {:ok, res} ->
case path.(:update, {res, func}) do
{:ok, res} -> {:cont, {:ok, res}}
:error -> {:halt, :error}
end
end)
:force_update, {input_struct, func, default} ->
Enum.reduce_while(list, {:ok, input_struct}, fn path, {:ok, res} ->
case path.(:force_update, {res, func, default}) do
{:ok, res} -> {:cont, {:ok, res}}
:error -> {:halt, :error}
end
end)
end
end
|> set_generated()
end
# Helper for generating code for path operation
# Special case for inline paths
defp gen({:path, _, [path]}, op, args, caller) do
path_func = build_only(path, op, caller)
quote generated: true do
unquote(path_func).(unquote_splicing(args))
end
|> set_generated()
end
# Case for not inlined paths
defp gen(path, op, args, _caller) do
quote generated: true do
unquote(path).(unquote(op), {unquote_splicing(args)})
end
|> set_generated()
end
defp wrap_ok(func) do
quote do
fn xu -> {:ok, unquote(func).(xu)} end
end
end
# Helper for generating raising functions
@spec bang(Macro.t(), binary()) :: Macro.t()
defp bang(quoted, err_str \\ "Coundn't find element in given path") do
quote generated: true do
case unquote(quoted) do
{:ok, value} -> value
:error -> raise Pathex.Error, unquote(err_str)
end
end
end
# Helper for detecting mod
defp detect_mod(mod) when mod in ~w(naive map json)a, do: mod
defp detect_mod(str) when is_binary(str), do: detect_mod('#{str}')
defp detect_mod('json'), do: :json
defp detect_mod('map'), do: :map
defp detect_mod(_), do: :naive
defp build_only(path, opname, caller, mod \\ :naive) do
%{^opname => builder} = Operations.from_mod(mod)
case Macro.prewalk(path, & Macro.expand(&1, caller)) do
{{:".", _, [__MODULE__, :path]}, _, args} ->
args
{:path, meta, args} = full ->
case Keyword.fetch(meta, :import) do
{:ok, __MODULE__} ->
args
_ ->
full
end
args ->
args
end
|> QuotedParser.parse(caller, mod)
|> Builder.build_only(builder)
end
# This functions puts `generated: true` flag in meta for every node in AST
# to avoid raising errors for dead code and stuff
defp set_generated(ast) do
Macro.prewalk(ast, fn item ->
Macro.update_meta(item, & Keyword.put(&1, :generated, true))
end)
end
# This function raises warning if combination will lead to very big closure
@maximum_combination_size 128
defp assert_combination_length(combination, env) do
size = Combination.size(combination)
if size > @maximum_combination_size do
{func, arity} = env.function || {:nofunc, 0}
stacktrace = [{env.module, func, arity, [file: '#{env.file}', line: env.line]}]
"""
This path will generate too many clauses, and therefore will slow down
the compilation and increase amount of generated code. Size of current
combination is #{size} while suggested size is #{@maximum_combination_size}
It would be better to split this closure in different paths with `Pathex.~>/2`
Or change the modifier to one which generates less code: `:map` or `:json`
"""
|> IO.warn(stacktrace)
end
combination
end
defmodule Error do
@moduledoc """
Simple exception for bang! functions errors.
Some new field may be added in the future
"""
defexception [:message]
end
end
|
lib/pathex.ex
| 0.825484
| 0.705994
|
pathex.ex
|
starcoder
|
defmodule Cldr do
@moduledoc """
Cldr provides the core functions to retrieve and manage
the CLDR data that supports formatting and localisation.
This module provides the core functions to access formatted
CLDR data, set and retrieve a current locale and validate
certain core data types such as locales, currencies and
territories.
`Cldr` functionality is packaged into a several
packages that each depend on this one. These additional
modules are:
* `Cldr.Number.to_string/2` for formatting numbers and
`Cldr.Currency.to_string/2` for formatting currencies.
These functions are contained in the hex package
[ex_cldr_numbers](https://hex.pm/packages/ex_cldr_numbers).
* `Cldr.List.to_string/2` for formatting lists.
These function is contained in the hex package
[ex_cldr_lists](https://hex.pm/packages/ex_cldr_lists).
* `Cldr.Unit.to_string/2` for formatting SI units.
These function is contained in the hex package
[ex_cldr_units](https://hex.pm/packages/ex_cldr_units).
* `Cldr.DateTime.to_string/2` for formatting of dates,
times and datetimes. This function is contained in the
hex package [ex_cldr_dates_times](https://hex.pm/packages/ex_cldr_dates_times).
"""
@external_resource "priv/cldr/language_tags.ebin"
@type backend :: module()
@type territory :: atom()
alias Cldr.Config
alias Cldr.Locale
alias Cldr.LanguageTag
require Config
require Cldr.Backend.Compiler
import Kernel, except: [to_string: 1]
@doc false
defmacro __using__(opts \\ []) do
quote bind_quoted: [opts: opts] do
@cldr_opts opts
@before_compile Cldr.Backend.Compiler
end
end
@doc """
Returns the version of the CLDR repository as a tuple
## Example
iex> Cldr.version
{37, 0, 0}
"""
@version Config.version()
|> String.split(".")
|> Enum.map(&String.to_integer/1)
|> List.to_tuple()
@spec version :: {non_neg_integer, non_neg_integer, non_neg_integer}
def version do
@version
end
@warn_if_greater_than 100
@doc false
def install_locales(config) do
alias Cldr.Config
Cldr.Install.install_known_locale_names(config)
known_locale_count = Enum.count(Config.known_locale_names(config))
locale_string = if known_locale_count > 1, do: "locales named ", else: "locale named "
if Enum.any?(Config.unknown_locale_names(config)) do
raise Cldr.UnknownLocaleError,
"Some locale names are configured that are not known to CLDR. " <>
"Compilation cannot continue until the configuration includes only " <>
"locales names known in CLDR.\n\n" <>
"Configured locales names: #{inspect(Config.requested_locale_names(config))}\n" <>
"Gettext locales names: #{inspect(Config.known_gettext_locale_names(config))}\n" <>
"Unknown locales names: " <>
"#{IO.ANSI.red()}#{inspect(Config.unknown_locale_names(config))}" <>
"#{IO.ANSI.default_color()}\n"
end
IO.puts(
"Generating #{inspect(config.backend)} for #{known_locale_count} " <>
locale_string <>
"#{inspect(Config.known_locale_names(config), limit: 5)} with " <>
"a default locale named #{inspect(Config.default_locale_name(config))}"
)
if known_locale_count > @warn_if_greater_than do
IO.puts("Please be patient, generating functions for many locales " <> "can take some time")
end
end
@doc """
Return the backend's locale for the
current process.
Note that the locale is set per-process. If the locale
is not set for the given process then:
* return the current processes default locale
* or if not set, return the default locale of the
specified backend
* or if that is not set, return the global default locale
which is defined under the `:ex_cldr` key in `config.exs`
* Or the system-wide default locale which is
#{inspect(Cldr.Config.default_locale())}
Note that if there is no locale set for the current
process then an error is not returned - a default locale
will be returned per the rules above.
## Arguments
* `backend` is any module that includes `use Cldr` and therefore
is a `Cldr` backend module
## Example
iex> Cldr.put_locale(TestBackend.Cldr, "pl")
iex> Cldr.get_locale(TestBackend.Cldr)
%Cldr.LanguageTag{
backend: TestBackend.Cldr,
canonical_locale_name: "pl-Latn-PL",
cldr_locale_name: "pl",
extensions: %{},
language: "pl",
locale: %{},
private_use: [],
rbnf_locale_name: "pl",
territory: :PL,
requested_locale_name: "pl",
script: "Latn",
transform: %{},
language_variant: nil
}
"""
@spec get_locale(backend()) :: LanguageTag.t()
def get_locale(backend) do
Process.get(backend) ||
Process.get(:cldr) ||
backend.default_locale() ||
default_locale()
end
@doc """
Return the `Cldr` locale for the
current process.
Note that the locale is set per-process. If the locale
is not set for the given process then:
* Return the global default locale
which is defined under the `:ex_cldr` key in
`config.exs`
* Or the system-wide default locale which is
#{inspect(Cldr.Config.default_locale())}
Note that if there is no locale set for the current
process then an error is not returned - a default locale
will be returned per the rules above.
## Example
iex> Cldr.put_locale(TestBackend.Cldr.Locale.new!("pl"))
iex> Cldr.get_locale()
%Cldr.LanguageTag{
backend: TestBackend.Cldr,
canonical_locale_name: "pl-Latn-PL",
cldr_locale_name: "pl",
extensions: %{},
language: "pl",
locale: %{},
private_use: [],
rbnf_locale_name: "pl",
territory: :PL,
requested_locale_name: "pl",
script: "Latn",
transform: %{},
language_variant: nil
}
"""
def get_locale do
Process.get(:cldr) ||
default_locale()
end
@doc """
Set the current process's locale for a specified backend.
## Arguments
* `locale` is any valid locale name returned by `Cldr.known_locale_names/1`
or a `Cldr.LanguageTag` struct returned by `Cldr.Locale.new!/2`
* `backend` is any module that includes `use Cldr` and therefore
is a `Cldr` backend module
## Returns
* `{:ok, locale}`
## Notes
See [rfc5646](https://tools.ietf.org/html/rfc5646) for the specification
of a language tag
## Examples
iex> Cldr.put_locale(TestBackend.Cldr, "en")
{:ok,
%Cldr.LanguageTag{
backend: TestBackend.Cldr,
canonical_locale_name: "en-Latn-US",
cldr_locale_name: "en",
language_subtags: [],
extensions: %{},
gettext_locale_name: "en",
language: "en",
locale: %{},
private_use: [],
rbnf_locale_name: "en",
requested_locale_name: "en",
script: "Latn",
territory: :US,
transform: %{},
language_variant: nil
}}
iex> Cldr.put_locale(TestBackend.Cldr, "invalid-locale")
{:error, {Cldr.LanguageTag.ParseError,
"Expected a BCP47 language tag. Could not parse the remaining \\"le\\" starting at position 13"}}
"""
@spec put_locale(backend(), Locale.locale_name() | LanguageTag.t()) ::
{:ok, LanguageTag.t()} | {:error, {module(), String.t()}}
def put_locale(backend, locale) when is_binary(locale) do
with {:ok, locale} <- backend.validate_locale(locale) do
Process.put(backend, locale)
{:ok, locale}
end
end
def put_locale(backend, %LanguageTag{} = locale) do
Process.put(backend, locale)
{:ok, locale}
end
@doc """
Set the current process's locale for all backends.
This is the preferred approach.
## Arguments
* `locale` is a `Cldr.LanguageTag` struct returned by `Cldr.Locale.new!/2`
with a non-nil `:cldr_locale_name`
## Returns
* `{:ok, locale}`
## Examples
iex> Cldr.put_locale(TestBackend.Cldr.Locale.new!("en"))
{:ok,
%Cldr.LanguageTag{
backend: TestBackend.Cldr,
canonical_locale_name: "en-Latn-US",
cldr_locale_name: "en",
language_subtags: [],
extensions: %{},
gettext_locale_name: "en",
language: "en",
locale: %{},
private_use: [],
rbnf_locale_name: "en",
requested_locale_name: "en",
script: "Latn",
territory: :US,
transform: %{},
language_variant: nil
}}
"""
@spec put_locale(Locale.locale_name() | LanguageTag.t()) ::
{:ok, LanguageTag.t()}
def put_locale(%Cldr.LanguageTag{cldr_locale_name: cldr_locale_name} = locale)
when not is_nil(cldr_locale_name) do
Process.put(:cldr, locale)
{:ok, locale}
end
@doc """
Set's the system default locale.
The locale set here is the based level
system default equivalent to setting the
`:default_locale` key in `config.exs`.
## Arguments
* `locale` is a `Cldr.LanguageTag` struct returned by `Cldr.Locale.new!/2`
with a non-nil `:cldr_locale_name`
## Returns
* `{:ok, locale}`
"""
def put_default_locale(%Cldr.LanguageTag{} = locale) do
:ok = Application.put_env(Cldr.Config.app_name(), :default_locale, locale)
{:ok, locale}
end
@doc """
Sets the system default locale.
The locale set here is the based level
system default equivalent to setting the
`:default_locale` key in `config.exs`.
## Arguments
* `locale` is any valid locale name returned by `Cldr.known_locale_names/1`
* `backend` is any module that includes `use Cldr` and therefore
is a `Cldr` backend module
## Returns
* `{:ok, locale}` or
* `{:error, {exception, reason}}`
"""
def put_default_locale(locale_name, backend \\ default_backend()) do
with {:ok, locale} <- validate_locale(locale_name, backend) do
put_default_locale(locale)
end
end
@doc """
Returns the global default `locale` for a
given backend.
## Arguments
* `backend` is any module that includes `use Cldr` and therefore
is a `Cldr` backend module
## Returns
* The default locale for the backend
## Example
iex> Cldr.default_locale(TestBackend.Cldr)
%Cldr.LanguageTag{
backend: TestBackend.Cldr,
canonical_locale_name: "en-Latn-001",
cldr_locale_name: "en-001",
language_subtags: [],
extensions: %{},
gettext_locale_name: "en",
language: "en",
locale: %{},
private_use: [],
rbnf_locale_name: "en",
requested_locale_name: "en-001",
script: "Latn",
territory: :"001",
transform: %{},
language_variant: nil
}
"""
@spec default_locale(backend()) :: LanguageTag.t()
def default_locale(backend) do
backend.default_locale
end
@doc """
Returns the configured global default `locale`.
The default locale can be set with
`Cldr.put_default_locale/1`.
Alternatively the default locale may be configured in
`config.exs` under the `ex_cldr` key as follows:
config :ex_cldr,
default_locale: <locale_name>
## Returns
* The default locale or
* Raises an exception if no default
backend is configured
## Notes
`Cldr.default_locale/0` returns the system-wide
default locale.
## Example
iex> Cldr.default_locale
%Cldr.LanguageTag{
backend: TestBackend.Cldr,
canonical_locale_name: "en-Latn-001",
cldr_locale_name: "en-001",
language_subtags: [],
extensions: %{},
gettext_locale_name: nil,
language: "en",
locale: %{},
private_use: [],
rbnf_locale_name: "en",
requested_locale_name: "en-001",
script: "Latn",
territory: :"001",
transform: %{},
language_variant: nil
}
"""
def default_locale do
case Cldr.Config.default_locale() do
%{backend: nil} = locale ->
Map.put(locale, :backend, default_backend())
locale ->
locale
end
end
@doc """
Returns the default territory when a locale
does not specify one and none can be inferred.
## Arguments
* `backend` is any module that includes `use Cldr` and therefore
is a `Cldr` backend module. The default is `Cldr.default_backend/0`
## Returns
* The default territory or
* Raises if no argument is supplied and there is no
default backend configured
## Example
iex> Cldr.default_territory(TestBackend.Cldr)
:"001"
"""
@spec default_territory(backend()) :: atom()
def default_territory(backend \\ default_backend()) do
backend.default_territory
end
@doc """
Returns the configured default backend module.
The default backend can be set with
`Cldr.put_default_backend/1`.
Alternatively the default backend may be configured in
`config.exs` under the `ex_cldr` key as follows:
config :ex_cldr,
default_backend: <backend_module>
## Important Note
If this function is called and no default backend
is configured an exception will be raised.
"""
@spec default_backend :: backend() | no_return
@compile {:inline, default_backend: 0}
def default_backend do
Cldr.Config.default_backend()
end
@doc """
Set the default system-wide backend module.
## Arguments
* `backend` is any module that includes `use Cldr` and therefore
is a `Cldr` backend module. The default is `Cldr.default_backend/0`
## Returns
* `{:ok, backend}` or
* `{:error, {exception, reason}}`
"""
def put_default_backend(backend) do
with {:ok, backend} <- validate_backend(backend) do
{:ok, backend}
end
end
@doc """
Returns the territory for the world
This is the outermost containment of
territories in CLDR.
CLDR does not yet consider non-terrestrial
territories.
"""
@compile {:inline, the_world: 0}
@the_world :"001"
def the_world do
@the_world
end
@doc """
Return a localsed string for types
that implement the `Cldr.Chars` protocol.
The `Cldr.Chars` protocol is implemented in this
library for `t:Cldr.LanguageTag.t()`.
Other CLDR-related libraries implement
the protocol for the types they support
such as `Float`, `Integer`, `Decimal`,
`Money`, `Unit` and `List`.
"""
@spec to_string(term()) :: String.t()
@compile {:inline, to_string: 1}
def to_string(term) do
Cldr.Chars.to_string(term)
end
@doc """
Validates that a module is a CLDR backend module.
## Arguments
* `backend` is any module
## Returns
* `{:ok, backend}` is the module if a CLDR backend module or
* `{:error, {exception, reason}}` if the module is unknown or if
the module is not a backend module.
## Examples
iex> Cldr.validate_backend MyApp.Cldr
{:ok, MyApp.Cldr}
iex> Cldr.validate_backend :something_else
{:error,
{Cldr.UnknownBackendError,
"The backend :something_else is not known or not a backend module."}}
"""
@spec validate_backend(backend :: atom()) :: {:ok, atom()} | {:error, {atom(), binary()}}
def validate_backend(backend) when is_atom(backend) do
if Cldr.Code.ensure_compiled?(backend) && function_exported?(backend, :__cldr__, 1) do
{:ok, backend}
else
{:error, unknown_backend_error(backend)}
end
end
def validate_backend(backend) do
{:error, unknown_backend_error(backend)}
end
defp unknown_backend_error(backend) do
{Cldr.UnknownBackendError,
"The backend #{inspect(backend)} is not known or not a backend module."}
end
@doc false
def validate_backend!(backend) do
case validate_backend(backend) do
{:ok, backend} -> backend
{:error, {exception, reason}} -> raise exception, reason
end
end
@doc """
Normalise and validate a locale name.
## Arguments
* `locale` is any valid locale name returned by `Cldr.known_locale_names/1`
or a `Cldr.LanguageTag` struct returned by `Cldr.Locale.new!/2`
* `backend` is any module that includes `use Cldr` and therefore
is a `Cldr` backend module. The default is `Cldr.default_backend/0`.
Note that `Cldr.default_backend/0` will raise an exception if
no `:default_backend` is configured under the `:ex_cldr` key in
`config.exs`.
## Returns
* `{:ok, language_tag}`
* `{:error, reason}`
## Examples
iex> Cldr.validate_locale("en", TestBackend.Cldr)
{:ok,
%Cldr.LanguageTag{
backend: TestBackend.Cldr,
canonical_locale_name: "en-Latn-US",
cldr_locale_name: "en",
extensions: %{},
gettext_locale_name: "en",
language: "en",
locale: %{},
private_use: [],
rbnf_locale_name: "en",
requested_locale_name: "en",
script: "Latn",
territory: :US,
transform: %{},
language_variant: nil
}}
iex> Cldr.validate_locale("zzz", TestBackend.Cldr)
{:error, {Cldr.UnknownLocaleError, "The locale \\"zzz\\" is not known."}}
"""
@spec validate_locale(Locale.locale_name() | LanguageTag.t(), backend()) ::
{:ok, LanguageTag.t()} | {:error, {module(), String.t()}}
def validate_locale(locale, backend \\ Cldr.default_backend())
def validate_locale(locale, nil) do
validate_locale(locale, Cldr.default_backend())
end
def validate_locale(locale, backend) do
backend.validate_locale(locale)
end
@doc """
Returns a list of all the locale names defined in
the CLDR repository.
Note that not necessarily all of these locales are
available since functions are only generated for configured
locales which is most cases will be a subset of locales
defined in CLDR.
See also: `requested_locales/1` and `known_locales/1`
"""
@all_locale_names Config.all_locale_names()
@spec all_locale_names :: [Locale.locale_name(), ...]
def all_locale_names do
@all_locale_names
end
@doc """
Returns a list of all requested locale names.
## Arguments
* `backend` is any module that includes `use Cldr` and therefore
is a `Cldr` backend module. The default is `Cldr.default_backend/0`.
Note that `Cldr.default_backend/0` will raise an exception if
no `:default_backend` is configured under the `:ex_cldr` key in
`config.exs`.
The list is the combination of configured locales,
`Gettext` locales and the default locale.
See also `known_locales/1` and `all_locales/0`
"""
@spec requested_locale_names(backend()) :: [Locale.locale_name(), ...] | []
def requested_locale_names(backend \\ default_backend()) do
backend.requested_locale_names
end
@doc """
Returns a list of the known locale names.
## Arguments
* `backend` is any module that includes `use Cldr` and therefore
is a `Cldr` backend module. The default is `Cldr.default_backend/0`.
Note that `Cldr.default_backend/0` will raise an exception if
no `:default_backend` is configured under the `:ex_cldr` key in
`config.exs`.
Known locales are those locales which
are the subset of all CLDR locales that
have been configured for use either
directly in the `config.exs` file or
in `Gettext`.
"""
@spec known_locale_names(backend()) :: [Locale.locale_name(), ...] | []
def known_locale_names(backend \\ default_backend()) do
backend.known_locale_names
end
@doc """
Returns a list of the locales names that are configured,
but not known in CLDR.
## Arguments
* `backend` is any module that includes `use Cldr` and therefore
is a `Cldr` backend module. The default is `Cldr.default_backend/0`.
Note that `Cldr.default_backend/0` will raise an exception if
no `:default_backend` is configured under the `:ex_cldr` key in
`config.exs`.
Since there is a compile-time exception raise if there are
any unknown locales this function should always
return an empty list.
"""
@spec unknown_locale_names(backend()) :: [Locale.locale_name(), ...] | []
def unknown_locale_names(backend \\ default_backend()) do
backend.unknown_locale_names
end
@doc """
Returns a list of locale names which have rules based number
formats (RBNF).
## Arguments
* `backend` is any module that includes `use Cldr` and therefore
is a `Cldr` backend module
"""
@spec known_rbnf_locale_names(backend()) :: [Locale.locale_name(), ...] | []
def known_rbnf_locale_names(backend \\ default_backend()) do
backend.known_rbnf_locale_names
end
@doc """
Returns a list of GetText locale names but in CLDR format with
underscore replaces by hyphen in order to facilitate comparisons
with Cldr locale names.
## Arguments
* `backend` is any module that includes `use Cldr` and therefore
is a `Cldr` backend module. The default is `Cldr.default_backend/0`.
Note that `Cldr.default_backend/0` will raise an exception if
no `:default_backend` is configured under the `:ex_cldr` key in
`config.exs`.
"""
@spec known_gettext_locale_names(backend()) :: [Locale.locale_name(), ...] | []
def known_gettext_locale_names(backend \\ default_backend()) do
backend.known_gettext_locale_names
end
@doc """
Returns either the `locale_name` or `false` based upon
whether the locale name is configured in `Cldr`.
This is helpful when building a list of `or` expressions
to return the first known locale name from a list.
## Arguments
* `locale` is any valid locale name returned by `Cldr.known_locale_names/1`
* `backend` is any module that includes `use Cldr` and therefore
is a `Cldr` backend module. The default is `Cldr.default_backend/0`.
Note that `Cldr.default_backend/0` will raise an exception if
no `:default_backend` is configured under the `:ex_cldr` key in
`config.exs`.
## Examples
iex> Cldr.known_locale_name("en-AU", TestBackend.Cldr)
"en-AU"
iex> Cldr.known_locale_name("en-SA", TestBackend.Cldr)
nil
"""
@spec known_locale_name(Locale.locale_name(), backend()) :: String.t() | nil
def known_locale_name(locale_name, backend \\ default_backend())
when is_binary(locale_name) do
if name = backend.known_locale_name(locale_name) do
name
else
nil
end
end
@doc """
Returns a boolean indicating if the specified locale
name is configured and available in Cldr.
## Arguments
* `locale` is any valid locale name returned by `Cldr.known_locale_names/1`
* `backend` is any module that includes `use Cldr` and therefore
is a `Cldr` backend module. The default is `Cldr.default_backend/0`.
Note that `Cldr.default_backend/0` will raise an exception if
no `:default_backend` is configured under the `:ex_cldr` key in
`config.exs`.
## Examples
iex> Cldr.known_locale_name?("en", TestBackend.Cldr)
true
iex> Cldr.known_locale_name?("!!", TestBackend.Cldr)
false
"""
@spec known_locale_name?(Locale.locale_name(), backend()) :: boolean
def known_locale_name?(locale_name, backend \\ default_backend()) when is_binary(locale_name) do
locale_name in backend.known_locale_names
end
@doc """
Returns a boolean indicating if the specified locale
name is configured and available in Cldr and supports
rules based number formats (RBNF).
## Arguments
* `locale` is any valid locale name returned by `Cldr.known_locale_names/1`
* `backend` is any module that includes `use Cldr` and therefore
is a `Cldr` backend module. The default is `Cldr.default_backend/0`.
Note that `Cldr.default_backend/0` will raise an exception if
no `:default_backend` is configured under the `:ex_cldr` key in
`config.exs`.
## Examples
iex> Cldr.known_rbnf_locale_name?("en", TestBackend.Cldr)
true
iex> Cldr.known_rbnf_locale_name?("!!", TestBackend.Cldr)
false
"""
@spec known_rbnf_locale_name?(Locale.locale_name(), backend()) :: boolean
def known_rbnf_locale_name?(locale_name, backend \\ default_backend())
when is_binary(locale_name) do
locale_name in backend.known_rbnf_locale_names
end
@doc """
Returns a boolean indicating if the specified locale
name is configured and available in Gettext.
## Arguments
* `locale` is any valid locale name returned by `Cldr.known_locale_names/1`
* `backend` is any module that includes `use Cldr` and therefore
is a `Cldr` backend module. The default is `Cldr.default_backend/0`.
Note that `Cldr.default_backend/0` will raise an exception if
no `:default_backend` is configured under the `:ex_cldr` key in
`config.exs`.
## Examples
iex> Cldr.known_gettext_locale_name?("en", TestBackend.Cldr)
true
iex> Cldr.known_gettext_locale_name?("!!", TestBackend.Cldr)
false
"""
@spec known_gettext_locale_name?(Locale.locale_name(), backend) :: boolean
def known_gettext_locale_name?(locale_name, backend \\ default_backend())
when is_binary(locale_name) do
locale_name in backend.known_gettext_locale_names
end
@doc """
Returns either the RBNF `locale_name` or `false` based upon
whether the locale name is configured in `Cldr`
and has RBNF rules defined.
## Arguments
* `locale` is any valid locale name returned by `Cldr.known_locale_names/1`
* `backend` is any module that includes `use Cldr` and therefore
is a `Cldr` backend module. The default is `Cldr.default_backend/0`.
Note that `Cldr.default_backend/0` will raise an exception if
no `:default_backend` is configured under the `:ex_cldr` key in
`config.exs`.
## Examples
iex> Cldr.known_rbnf_locale_name("en", TestBackend.Cldr)
"en"
iex> Cldr.known_rbnf_locale_name("en-SA", TestBackend.Cldr)
false
"""
@spec known_rbnf_locale_name(Locale.locale_name(), backend()) :: String.t() | false
def known_rbnf_locale_name(locale_name, backend \\ default_backend())
when is_binary(locale_name) do
if backend.known_rbnf_locale_name?(locale_name) do
locale_name
else
false
end
end
@doc """
Returns either the Gettext `locale_name` in Cldr format or
`false` based upon whether the locale name is configured in
`GetText`.
## Arguments
* `locale` is any valid locale name returned by `Cldr.known_locale_names/1`
* `backend` is any module that includes `use Cldr` and therefore
is a `Cldr` backend module. The default is `Cldr.default_backend/0`.
Note that `Cldr.default_backend/0` will raise an exception if
no `:default_backend` is configured under the `:ex_cldr` key in
`config.exs`.
## Examples
iex> Cldr.known_gettext_locale_name("en", TestBackend.Cldr)
"en"
iex> Cldr.known_gettext_locale_name("en-SA", TestBackend.Cldr)
false
"""
@spec known_gettext_locale_name(Locale.locale_name(), backend()) :: String.t() | false
def known_gettext_locale_name(locale_name, backend \\ default_backend())
when is_binary(locale_name) do
backend.known_gettext_locale_name(locale_name)
end
@doc """
Returns a boolean indicating if the specified locale
is available in CLDR.
The return value depends on whether the locale is
defined in the CLDR repository. It does not necessarily
mean the locale is configured for `Cldr`. See also
`Cldr.known_locale_name?/2`.
## Arguments
* `locale` is any valid locale name returned by `Cldr.known_locale_names/1`
## Examples
iex> Cldr.available_locale_name?("en-AU")
true
iex> Cldr.available_locale_name?("en-SA")
false
"""
@spec available_locale_name?(Locale.locale_name() | LanguageTag.t()) :: boolean
def available_locale_name?(locale_name) when is_binary(locale_name) do
locale_name in Config.all_locale_names()
end
def available_locale_name?(%LanguageTag{cldr_locale_name: cldr_locale_name}) do
available_locale_name?(cldr_locale_name)
end
@doc """
Add locale-specific quotation marks around a string.
## Arguments
* `string` is any valid Elixir string
* `backend` is any module that includes `use Cldr` and therefore
is a `Cldr` backend module. The default is `Cldr.default_backend/0`.
Note that `Cldr.default_backend/0` will raise an exception if
no `:default_backend` is configured under the `:ex_cldr` key in
`config.exs`.
* `options` is a keyword list of options
## Options
* `:locale` is any valid locale name returned by `Cldr.known_locale_names/1`.
The default is `Cldr.get_locale/0`
## Examples
iex> Cldr.quote "Quoted String", MyApp.Cldr
"“Quoted String”"
iex> Cldr.quote "Quoted String", MyApp.Cldr, locale: "ja"
"「Quoted String」"
"""
@spec quote(String.t(), backend(), Keyword.t()) :: String.t()
def quote(string, backend \\ default_backend(), options \\ [])
def quote(string, options, []) when is_binary(string) and is_list(options) do
{backend, options} = Keyword.pop(options, :backend)
backend = backend || default_backend()
quote(string, backend, options)
end
def quote(string, backend, options) when is_binary(string) and is_list(options) do
backend.quote(string, options)
end
@doc """
Add locale-specific ellipsis to a string.
## Arguments
* `string` is any valid Elixir string
* `backend` is any module that includes `use Cldr` and therefore
is a `Cldr` backend module. The default is `Cldr.default_backend/0`.
Note that `Cldr.default_backend/0` will raise an exception if
no `:default_backend` is configured under the `:ex_cldr` key in
`config.exs`.
* `options` is a keyword list of options
## Options
* `:locale` is any valid locale name returned by `Cldr.known_locale_names/1`.
The default is `Cldr.get_locale/0`
* `:location` determines where to place the ellipsis. The options are
`:after` (the default for a single string argument), `:between` (the default
and only valid location for an argument that is a list of two strings) and `:before`
## Examples
iex> Cldr.ellipsis "And furthermore", MyApp.Cldr
"And furthermore…"
iex> Cldr.ellipsis ["And furthermore", "there is much to be done"], MyApp.Cldr, locale: "ja"
"And furthermore…there is much to be done"
"""
@spec ellipsis(String.t() | list(String.t()), backend(), Keyword.t()) :: String.t()
def ellipsis(string, backend \\ default_backend(), options \\ [])
def ellipsis(string, options, []) when is_list(options) do
{backend, options} = Keyword.pop(options, :backend)
backend = backend || default_backend()
ellipsis(string, backend, options)
end
def ellipsis(string, backend, options) when is_list(options) do
backend.ellipsis(string, options)
end
@doc """
Normalise and validate a gettext locale name.
## Arguments
* `locale_name` is any valid locale name returned by `Cldr.known_locale_names/1`
or a `Cldr.LanguageTag` struct returned by `Cldr.Locale.new!/2`
* `backend` is any module that includes `use Cldr` and therefore
is a `Cldr` backend module. The default is `Cldr.default_backend/0`.
Note that `Cldr.default_backend/0` will raise an exception if
no `:default_backend` is configured under the `:ex_cldr` key in
`config.exs`.
## Returns
* `{:ok, language_tag}`
* `{:error, reason}`
## Examples
"""
@spec validate_gettext_locale(Locale.locale_name() | LanguageTag.t(), backend()) ::
{:ok, LanguageTag.t()} | {:error, {module(), String.t()}}
def validate_gettext_locale(locale_name, backend \\ default_backend())
def validate_gettext_locale(locale_name, backend)
when is_binary(locale_name) do
case Cldr.Locale.new(locale_name, backend) do
{:ok, locale} -> validate_gettext_locale(locale, backend)
{:error, reason} -> {:error, reason}
end
end
def validate_gettext_locale(%LanguageTag{gettext_locale_name: nil} = locale, _backend) do
{:error, Locale.gettext_locale_error(locale)}
end
def validate_gettext_locale(%LanguageTag{} = language_tag, _backend) do
{:ok, language_tag}
end
def validate_gettext_locale(locale, _backend) do
{:error, Locale.gettext_locale_error(locale)}
end
@doc """
Returns a list of strings representing the calendars known to `Cldr`.
## Example
iex> Cldr.known_calendars
[:buddhist, :chinese, :coptic, :dangi, :ethiopic, :ethiopic_amete_alem,
:gregorian, :hebrew, :indian, :islamic, :islamic_civil, :islamic_rgsa,
:islamic_tbla, :islamic_umalqura, :japanese, :persian, :roc]
"""
@known_calendars Cldr.Config.known_calendars()
@spec known_calendars :: [atom(), ...]
def known_calendars do
@known_calendars
end
@doc """
Normalise and validate a calendar name.
## Arguments
* `calendar` is any calendar name returned by `Cldr.known_calendars/0`
## Returns
* `{:ok, normalized_calendar_name}` or
* `{:error, {Cldr.UnknownCalendarError, message}}`
## Examples
iex> Cldr.validate_calendar(:gregorian)
{:ok, :gregorian}
iex> Cldr.validate_calendar(:invalid)
{:error, {Cldr.UnknownCalendarError, "The calendar name :invalid is invalid"}}
"""
@spec validate_calendar(atom() | String.t()) ::
{:ok, atom()} | {:error, {module(), String.t()}}
def validate_calendar(calendar) when is_atom(calendar) and calendar in @known_calendars do
{:ok, calendar}
end
# "gregory" is the name used for the locale "u" extension
def validate_calendar("gregory"), do: {:ok, :gregorian}
def validate_calendar(calendar) when is_atom(calendar) do
{:error, unknown_calendar_error(calendar)}
end
def validate_calendar(calendar) when is_binary(calendar) do
calendar
|> String.downcase()
|> String.to_existing_atom()
|> validate_calendar
rescue
ArgumentError ->
{:error, unknown_calendar_error(calendar)}
end
@doc """
Returns an error tuple for an invalid calendar.
## Arguments
* `calendar` is any calendar name **not** returned by `Cldr.known_calendars/0`
## Returns
* `{:error, {Cldr.UnknownCalendarError, message}}`
## Examples
iex> Cldr.unknown_calendar_error("invalid")
{Cldr.UnknownCalendarError, "The calendar name \\"invalid\\" is invalid"}
"""
def unknown_calendar_error(calendar) do
{Cldr.UnknownCalendarError, "The calendar name #{inspect(calendar)} is invalid"}
end
@doc """
Returns a list of the territories known to `Cldr`.
The territories codes are defined in [UN M.49](https://en.wikipedia.org/wiki/UN_M.49)
which defines both individual territories and enclosing territories. These enclosing
territories are defined for statistical purposes and do not relate to political
alignment.
For example, the territory `:"001"` is defined as "the world".
## Example
iex> Cldr.known_territories
[:"001", :"002", :"003", :"005", :"009", :"011", :"013", :"014", :"015", :"017",
:"018", :"019", :"021", :"029", :"030", :"034", :"035", :"039", :"053", :"054",
:"057", :"061", :"142", :"143", :"145", :"150", :"151", :"154", :"155", :"202",
:"419", :AC, :AD, :AE, :AF, :AG, :AI, :AL, :AM, :AO, :AQ, :AR, :AS, :AT, :AU,
:AW, :AX, :AZ, :BA, :BB, :BD, :BE, :BF, :BG, :BH, :BI, :BJ, :BL, :BM, :BN, :BO,
:BQ, :BR, :BS, :BT, :BV, :BW, :BY, :BZ, :CA, :CC, :CD, :CF, :CG, :CH, :CI, :CK,
:CL, :CM, :CN, :CO, :CP, :CR, :CU, :CV, :CW, :CX, :CY, :CZ, :DE, :DG, :DJ, :DK,
:DM, :DO, :DZ, :EA, :EC, :EE, :EG, :EH, :ER, :ES, :ET, :EU, :EZ, :FI, :FJ, :FK,
:FM, :FO, :FR, :GA, :GB, :GD, :GE, :GF, :GG, :GH, :GI, :GL, :GM, :GN, :GP, :GQ,
:GR, :GS, :GT, :GU, :GW, :GY, :HK, :HM, :HN, :HR, :HT, :HU, :IC, :ID, :IE, :IL,
:IM, :IN, :IO, :IQ, :IR, :IS, :IT, :JE, :JM, :JO, :JP, :KE, :KG, :KH, :KI, :KM,
:KN, :KP, :KR, :KW, :KY, :KZ, :LA, :LB, :LC, :LI, :LK, :LR, :LS, :LT, :LU, :LV,
:LY, :MA, :MC, :MD, :ME, :MF, :MG, :MH, :MK, :ML, :MM, :MN, :MO, :MP, :MQ, :MR,
:MS, :MT, :MU, :MV, :MW, :MX, :MY, :MZ, :NA, :NC, :NE, :NF, :NG, :NI, :NL, :NO,
:NP, :NR, :NU, :NZ, :OM, :PA, :PE, :PF, :PG, :PH, :PK, :PL, :PM, :PN, :PR, :PS,
:PT, :PW, :PY, :QA, :QO, :RE, :RO, :RS, :RU, :RW, :SA, :SB, :SC, :SD, :SE, :SG,
:SH, :SI, :SJ, :SK, :SL, :SM, :SN, :SO, :SR, :SS, :ST, :SV, :SX, :SY, :SZ, :TA,
:TC, :TD, :TF, :TG, :TH, :TJ, :TK, :TL, :TM, :TN, :TO, :TR, :TT, :TV, :TW, :TZ,
:UA, :UG, :UM, :UN, :US, :UY, :UZ, :VA, :VC, :VE, :VG, :VI, :VN, :VU, :WF, :WS,
:XK, :YE, :YT, :ZA, :ZM, :ZW]
"""
@known_territories Cldr.Config.known_territories()
@spec known_territories :: [atom(), ...]
def known_territories do
@known_territories
end
@territory_containment Cldr.Config.territory_containment()
@spec territory_containment() :: map()
def territory_containment do
@territory_containment
end
@doc """
Returns the map of territories and subdivisions and their
child subdivsions.
The subdivision codes designate a subdivision of a country
or region. They are called various names, such as a
state in the United States, or a province in Canada.
The codes in CLDR are based on ISO 3166-2 subdivision codes.
The ISO codes have a region code followed by a hyphen, then a
suffix consisting of 1..3 ASCII letters or digits.
The CLDR codes are designed to work in a unicode_locale_id
(BCP47), and are thus all lowercase, with no hyphen. For
example, the following are valid, and mean “English as
used in California, USA”.
en-u-sd-usca
en-US-u-sd-usca
CLDR has additional subdivision codes. These may start with
a 3-digit region code or use a suffix of 4 ASCII letters or
digits, so they will not collide with the ISO codes. Subdivision
codes for unknown values are the region code plus "zzzz", such as
"uszzzz" for an unknown subdivision of the US. Other codes may be
added for stability.
"""
@territory_subdivisions Cldr.Config.territory_subdivisions()
|> Enum.map(fn
{<<territory::binary-size(2)>>, children} ->
{String.to_existing_atom(territory), children}
other ->
other
end)
|> Map.new()
@spec known_territory_subdivisions :: %{atom() => list()}
def known_territory_subdivisions do
@territory_subdivisions
end
@doc """
Returns a map of territory subdivisions sith a list of
their parent subdivisions and region.
For a description of territory subdivisions see
`Cldr.known_territory_subdivisions/0`
"""
@territory_subdivision_containment Cldr.Config.territory_subdivision_containment()
|> Enum.map(fn {subdivision, parents} ->
parents =
Enum.map(parents, fn
<<territory::binary-size(2)>> ->
String.to_existing_atom(territory)
other ->
other
end)
{subdivision, parents}
end)
|> Map.new()
@spec known_territory_subdivision_containment :: map()
def known_territory_subdivision_containment do
@territory_subdivision_containment
end
@doc """
Normalise and validate a territory code.
## Arguments
* `territory` is any territory code returned by `Cldr.known_territories/0`
## Returns:
* `{:ok, normalized_territory_code}` or
* `{:error, {Cldr.UnknownTerritoryError, message}}`
## Examples
iex> Cldr.validate_territory("en")
{:error, {Cldr.UnknownTerritoryError, "The territory \\"en\\" is unknown"}}
iex> Cldr.validate_territory("gb")
{:ok, :GB}
iex> Cldr.validate_territory("001")
{:ok, :"001"}
iex> Cldr.validate_territory(Cldr.Locale.new!("en", TestBackend.Cldr))
{:ok, :US}
iex> Cldr.validate_territory(%{})
{:error, {Cldr.UnknownTerritoryError, "The territory %{} is unknown"}}
"""
@spec validate_territory(atom() | String.t()) ::
{:ok, atom()} | {:error, {module(), String.t()}}
def validate_territory(territory) when is_atom(territory) and territory in @known_territories do
{:ok, territory}
end
def validate_territory(territory) when is_atom(territory) do
{:error, unknown_territory_error(territory)}
end
def validate_territory(territory) when is_binary(territory) do
territory
|> String.upcase()
|> String.to_existing_atom()
|> validate_territory
rescue
ArgumentError ->
{:error, unknown_territory_error(territory)}
end
def validate_territory(%LanguageTag{territory: nil} = locale) do
{:error, unknown_territory_error(locale)}
end
def validate_territory(%LanguageTag{territory: territory}) do
validate_territory(territory)
end
def validate_territory(territory) do
{:error, unknown_territory_error(territory)}
end
@doc """
Normalise and validate a territory subdivision code.
## Arguments
* `subdivision` is any territory code returned by `Cldr.known_territory_subdivisions/0`
## Returns:
* `{:ok, normalized_subdivision_code}` or
* `{:error, {Cldr.UnknownTerritoryError, message}}`
## Examples
"""
def validate_territory_subdivision(subdivision) when is_binary(subdivision) do
subdivision
|> String.downcase()
|> validate_subdivision
end
def validate_territory_subdivision(subdivision) do
{:error, unknown_territory_error(subdivision)}
end
defp validate_subdivision(<<territory::binary-size(2), "zzzz">>) do
validate_territory(territory)
end
defp validate_subdivision(subdivision) when is_binary(subdivision) do
case Map.fetch(known_territory_subdivision_containment(), subdivision) do
{:ok, _} -> {:ok, subdivision}
_ -> {:error, unknown_territory_error(subdivision)}
end
end
@doc """
Return the territory fallback chain based upon
a locales territory (including `u` extension) and
territory containment definitions.
While CLDR also includes subdivisions in the
territory chain, this implementation does not
consider them.
## Arguments
* `territory` is either a binary or atom territory code
or a `t:Cldr.LanguageTag`
## Returns
* `{:ok, list}` where `list` is a list of territories
in decreasing order of containment (ie larger enclosing
areas) or
* `{:error, {exception, reason}}` indicating an error
## Examples
iex> Cldr.territory_chain "US"
{:ok, [:US, :"021", :"019", :"001"]}
iex> Cldr.territory_chain :AU
{:ok, [:AU, :"053", :"009", :"001"]}
iex> {:ok, locale} = Cldr.validate_locale("en-US-u-rg-CAzzzz", MyApp.Cldr)
iex> Cldr.territory_chain locale
{:ok, [:CA, :"021", :"019", :"001"]}
iex> Cldr.territory_chain :"001"
{:ok, [:"001"]}
"""
def territory_chain(%LanguageTag{} = locale) do
locale
|> Cldr.Locale.territory_from_locale()
|> territory_chain()
end
def territory_chain(:"001" = the_world) do
{:ok, [the_world]}
end
def territory_chain(territory) when is_atom(territory) do
with {:ok, territory} <- Cldr.validate_territory(territory) do
chain =
territory_containment()
|> Map.fetch!(territory)
|> hd()
{:ok, [territory | chain]}
end
end
def territory_chain(territory) when is_binary(territory) do
with {:ok, territory} <- validate_territory(territory) do
territory_chain(territory)
end
end
@doc """
Return the territory fallback chain based upon
a locales territory (including `u` extension) and
territory containment definitions.
While CLDR also includes subdivisions in the
territory chain, this implementation does not
consider them.
## Arguments
* `locale` is a binary locale name
* `backend` is any module that includes `use Cldr` and therefore
is a `Cldr` backend module.
## Returns
* `{:ok, list}` where `list` is a list of territories
in decreasing order of containment (ie larger enclosing
areas) or
* `{:error, {exception, reason}}` indicating an error
## Examples
iex> Cldr.territory_chain "en-US-u-rg-CAzzzz", MyApp.Cldr
{:ok, [:CA, :"021", :"019", :"001"]}
"""
def territory_chain(locale_name, backend) when is_binary(locale_name) and is_atom(backend) do
with {:ok, locale} <- validate_locale(locale_name, backend) do
territory_chain(locale)
end
end
@doc """
Returns an error tuple for an unknown territory.
## Arguments
* `territory` is any territory code **not** returned by `Cldr.known_territories/0`
## Returns
* `{:error, {Cldr.UnknownTerritoryError, message}}`
## Examples
iex> Cldr.unknown_territory_error("invalid")
{Cldr.UnknownTerritoryError, "The territory \\"invalid\\" is unknown"}
"""
@spec unknown_territory_error(any()) :: {Cldr.UnknownTerritoryError, String.t()}
def unknown_territory_error(territory) do
{Cldr.UnknownTerritoryError, "The territory #{inspect(territory)} is unknown"}
end
@doc """
Returns a list of strings representing the currencies known to `Cldr`.
## Example
iex> Cldr.known_currencies
[:XBB, :XEU, :SKK, :AUD, :CZK, :ISJ, :BRC, :IDR, :UYP, :VEF, :UAH, :KMF, :NGN,
:NAD, :LUC, :AWG, :BRZ, :AOK, :SHP, :DEM, :UGS, :ECS, :BRR, :HUF, :INR, :TPE,
:GYD, :MCF, :USS, :ALK, :TJR, :BGO, :BUK, :DKK, :LSL, :AZM, :ZRN, :MKN, :GHC,
:JMD, :NOK, :GWP, :CVE, :RUR, :BDT, :NIC, :LAK, :XFO, :KHR, :SRD, :ESB, :PGK,
:YUD, :BRN, :MAD, :PYG, :QAR, :MOP, :BOB, :CHW, :PHP, :SDG, :SEK, :KZT, :SDP,
:ZWD, :XTS, :SRG, :ANG, :CLF, :BOV, :XBA, :TMT, :TJS, :CUC, :SUR, :MAF, :BRL,
:PLZ, :PAB, :AOA, :ZWR, :UGX, :PTE, :NPR, :BOL, :MRO, :MXN, :ATS, :ARP, :KWD,
:CLE, :NLG, :TMM, :SAR, :PEN, :PKR, :RUB, :AMD, :MDL, :XRE, :AOR, :MZN, :ESA,
:XOF, :CNX, :ILR, :KRW, :CDF, :VND, :DJF, :FKP, :BIF, :FJD, :MYR, :BBD, :GEK,
:PES, :CNY, :GMD, :SGD, :MTP, :ZMW, :MWK, :BGN, :GEL, :TTD, :LVL, :XCD, :ARL,
:EUR, :UYU, :ZAL, :CSD, :ECV, :GIP, :CLP, :KRH, :CYP, :TWD, :SBD, :SZL, :IRR,
:LRD, :CRC, :XDR, :SYP, :YUM, :SIT, :DOP, :MVP, :BWP, :KPW, :GNS, :ZMK, :BZD,
:TRY, :MLF, :KES, :MZE, :ALL, :JOD, :HTG, :TND, :ZAR, :LTT, :BGL, :XPD, :CSK,
:SLL, :BMD, :BEF, :FIM, :ARA, :ZRZ, :CHF, :SOS, :KGS, :GWE, :LTL, :ITL, :DDM,
:ERN, :BAM, :BRB, :ARS, :RHD, :STD, :RWF, :GQE, :HRD, :ILP, :YUR, :AON, :BYR,
:RSD, :ZWL, :XBD, :XFU, :GBP, :VEB, :BTN, :UZS, :BGM, :BAD, :MMK, :XBC, :LUF,
:BSD, :XUA, :GRD, :CHE, :JPY, :EGP, :XAG, :LYD, :XAU, :USD, :BND, :XPT, :BRE,
:ROL, :PLN, :MZM, :FRF, :MGF, :LUL, :SSP, :DZD, :IEP, :SDD, :ADP, :AFN, :IQD,
:GHS, :TOP, :LVR, :YUN, :MRU, :MKD, :GNF, :MXP, :THB, :CNH, :TZS, :XPF, :AED,
:SVC, :RON, :BEC, :CUP, :USN, :LBP, :BOP, :BHD, :UYW, :BAN, :MDC, :VUV, :MGA,
:ISK, :COP, :BYN, :UAK, :TRL, :SCR, :KRO, :ILS, :ETB, :CAD, :AZN, :VNN, :NIO,
:COU, :EEK, :KYD, :MNT, :HNL, :WST, :PEI, :YER, :MTL, :STN, :AFA, :ARM, :HKD,
:NZD, :VES, :UYI, :MXV, :GTQ, :BYB, :XXX, :XSU, :HRK, :OMR, :BEL, :MUR, :ESP,
:YDD, :MVR, :LKR, :XAF]
"""
@known_currencies Cldr.Config.known_currencies()
@spec known_currencies :: [atom(), ...] | []
def known_currencies do
@known_currencies
end
@doc """
Normalize and validate a currency code.
## Arguments
* `currency` is any ISO 4217 currency code as returned by `Cldr.known_currencies/0`
or any valid private use ISO4217 code which is a three-letter alphabetic code that
starts with "X".
## Returns
* `{:ok, normalized_currency_code}` or
* `{:error, {Cldr.UnknownCurrencyError, message}}`
## Examples
iex> Cldr.validate_currency(:USD)
{:ok, :USD}
iex> Cldr.validate_currency("USD")
{:ok, :USD}
iex> Cldr.validate_currency(:XTC)
{:ok, :XTC}
iex> Cldr.validate_currency("xtc")
{:ok, :XTC}
iex> Cldr.validate_currency("invalid")
{:error, {Cldr.UnknownCurrencyError, "The currency \\"invalid\\" is invalid"}}
iex> Cldr.validate_currency(:invalid)
{:error, {Cldr.UnknownCurrencyError, "The currency :invalid is invalid"}}
"""
def validate_currency(currency) when is_atom(currency) and currency in @known_currencies do
{:ok, currency}
end
def validate_currency(currency) when is_atom(currency) do
currency
|> Atom.to_string()
|> validate_currency
|> case do
{:error, _} -> {:error, unknown_currency_error(currency)}
ok -> ok
end
end
def validate_currency(
<<char_1::integer-size(8), char_2::integer-size(8), char_3::integer-size(8)>> = currency
)
when Config.is_alphabetic(char_1) and Config.is_alphabetic(char_2) and
Config.is_alphabetic(char_3) and char_1 in [?x, ?X] do
{:ok, String.to_atom(String.upcase(currency))}
end
def validate_currency(
<<char_1::integer-size(8), char_2::integer-size(8), char_3::integer-size(8)>> = currency
)
when Config.is_alphabetic(char_1) and Config.is_alphabetic(char_2) and
Config.is_alphabetic(char_3) do
currency_code =
currency
|> String.upcase()
|> String.to_existing_atom()
if currency_code in @known_currencies do
{:ok, currency_code}
else
{:error, unknown_currency_error(currency)}
end
rescue
ArgumentError ->
{:error, unknown_currency_error(currency)}
end
def validate_currency(invalid_currency) do
{:error, unknown_currency_error(invalid_currency)}
end
@doc """
Returns an error tuple for an invalid currency.
## Arguments
* `currency` is any currency code **not** returned by `Cldr.known_currencies/0`
## Returns
* `{:error, {Cldr.UnknownCurrencyError, message}}`
## Examples
iex> Cldr.unknown_currency_error("invalid")
{Cldr.UnknownCurrencyError, "The currency \\"invalid\\" is invalid"}
"""
def unknown_currency_error(currency) do
{Cldr.UnknownCurrencyError, "The currency #{inspect(currency)} is invalid"}
end
@doc """
Returns a list of atoms representing the number systems known to `Cldr`.
## Example
iex> Cldr.known_number_systems
[:adlm, :ahom, :arab, :arabext, :armn, :armnlow, :bali, :beng, :bhks, :brah,
:cakm, :cham, :cyrl, :deva, :diak, :ethi, :fullwide, :geor, :gong, :gonm, :grek,
:greklow, :gujr, :guru, :hanidays, :hanidec, :hans, :hansfin, :hant, :hantfin,
:hebr, :hmng, :hmnp, :java, :jpan, :jpanfin, :jpanyear, :kali, :khmr, :knda, :lana, :lanatham,
:laoo, :latn, :lepc, :limb, :mathbold, :mathdbl, :mathmono, :mathsanb,
:mathsans, :mlym, :modi, :mong, :mroo, :mtei, :mymr, :mymrshan, :mymrtlng,
:newa, :nkoo, :olck, :orya, :osma, :rohg, :roman, :romanlow, :saur, :segment, :shrd,
:sind, :sinh, :sora, :sund, :takr, :talu, :taml, :tamldec, :telu, :thai, :tibt,
:tirh, :vaii, :wara, :wcho]
"""
@known_number_systems Cldr.Config.known_number_systems()
@spec known_number_systems :: [atom(), ...] | []
def known_number_systems do
@known_number_systems
end
@doc """
Normalize and validate a number system name.
## Arguments
* `number_system` is any number system name returned by
`Cldr.known_number_systems/0`
## Returns
* `{:ok, normalized_number_system_name}` or
* `{:error, {exception, message}}`
## Examples
iex> Cldr.validate_number_system :latn
{:ok, :latn}
iex> Cldr.validate_number_system "latn"
{:ok, :latn}
iex> Cldr.validate_number_system "invalid"
{
:error,
{Cldr.UnknownNumberSystemError, "The number system :invalid is unknown"}
}
"""
@spec validate_number_system(atom() | String.t()) ::
{:ok, atom()} | {:error, {module(), String.t()}}
def validate_number_system(number_system) when is_atom(number_system) do
if number_system in known_number_systems() do
{:ok, number_system}
else
{:error, unknown_number_system_error(number_system)}
end
end
def validate_number_system(number_system) when is_binary(number_system) do
number_system
|> String.downcase()
|> String.to_existing_atom()
|> validate_number_system
rescue
ArgumentError ->
{:error, unknown_number_system_error(number_system)}
end
@doc """
Normalize and validate a plural type.
## Arguments
* `plural_type` is any plural type returned by
`Cldr.Number.PluralRule.known_plural_types/0`
## Returns
* `{:ok, normalized_plural_type}` or
* `{:error, {exception, message}}`
## Examples
iex> Cldr.validate_plural_type :few
{:ok, :few}
iex> Cldr.validate_plural_type "one"
{:ok, :one}
iex> Cldr.validate_plural_type "invalid"
{
:error,
{Cldr.UnknownPluralTypeError, "The plural type :invalid is unknown"}
}
"""
@spec validate_plural_type(atom() | String.t()) ::
{:ok, Cldr.Number.PluralRule.plural_type()} | {:error, {module(), String.t()}}
def validate_plural_type(plural_type) when is_atom(plural_type) do
if plural_type in Cldr.Number.PluralRule.known_plural_types() do
{:ok, plural_type}
else
{:error, unknown_plural_type_error(plural_type)}
end
end
def validate_plural_type(plural_type) when is_binary(plural_type) do
plural_type
|> String.downcase()
|> String.to_existing_atom()
|> validate_plural_type
rescue
ArgumentError ->
{:error, unknown_plural_type_error(plural_type)}
end
@doc """
Returns an error tuple for an unknown number system.
## Arguments
* `number_system` is any number system name **not** returned by `Cldr.known_number_systems/0`
## Returns
* `{:error, {Cldr.UnknownNumberSystemError, message}}`
## Examples
iex> Cldr.unknown_number_system_error "invalid"
{Cldr.UnknownNumberSystemError, "The number system \\"invalid\\" is invalid"}
iex> Cldr.unknown_number_system_error :invalid
{Cldr.UnknownNumberSystemError, "The number system :invalid is unknown"}
"""
@spec unknown_currency_error(any()) :: {Cldr.UnknownCurrencyError, String.t()}
def unknown_number_system_error(number_system) when is_atom(number_system) do
{Cldr.UnknownNumberSystemError, "The number system #{inspect(number_system)} is unknown"}
end
def unknown_number_system_error(number_system) do
{Cldr.UnknownNumberSystemError, "The number system #{inspect(number_system)} is invalid"}
end
@doc """
Returns a list of atoms representing the number systems types known to `Cldr`.
## Arguments
* `backend` is any module that includes `use Cldr` and therefore
is a `Cldr` backend module. The default is `Cldr.default_backend/0`.
Note that `Cldr.default_backend/0` will raise an exception if
no `:default_backend` is configured under the `:ex_cldr` key in
`config.exs`.
## Example
iex> Cldr.known_number_system_types(TestBackend.Cldr)
[:default, :finance, :native, :traditional]
"""
def known_number_system_types(backend \\ default_backend()) do
backend.known_number_system_types
end
@doc """
Normalise and validate a number system type.
## Arguments
* `number_system_type` is any number system type returned by
`Cldr.known_number_system_types/1`
* `backend` is any module that includes `use Cldr` and therefore
is a `Cldr` backend module. The default is `Cldr.default_backend/0`.
Note that `Cldr.default_backend/0` will raise an exception if
no `:default_backend` is configured under the `:ex_cldr` key in
`config.exs`.
## Returns
* `{:ok, normalized_number_system_type}` or
* `{:error, {exception, message}}`
## Examples
iex> Cldr.validate_number_system_type(:default, TestBackend.Cldr)
{:ok, :default}
iex> Cldr.validate_number_system_type(:traditional, TestBackend.Cldr)
{:ok, :traditional}
iex> Cldr.validate_number_system_type(:latn, TestBackend.Cldr)
{
:error,
{Cldr.UnknownNumberSystemTypeError, "The number system type :latn is unknown"}
}
"""
@spec validate_number_system_type(String.t() | atom(), backend()) ::
{:ok, atom()} | {:error, {module(), String.t()}}
def validate_number_system_type(number_system_type, backend \\ default_backend()) do
backend.validate_number_system_type(number_system_type)
end
@doc """
Returns an error tuple for an unknown number system type.
## Options
* `number_system_type` is any number system type name **not** returned
by `Cldr.known_number_system_types/1`
## Returns
* `{:error, {Cldr.UnknownNumberSystemTypeError, message}}`
## Examples
iex> Cldr.unknown_number_system_type_error("invalid")
{Cldr.UnknownNumberSystemTypeError, "The number system type \\"invalid\\" is invalid"}
iex> Cldr.unknown_number_system_type_error(:invalid)
{Cldr.UnknownNumberSystemTypeError, "The number system type :invalid is unknown"}
"""
@spec unknown_number_system_type_error(any()) :: {Cldr.UnknownNumberSystemTypeError, String.t()}
def unknown_number_system_type_error(number_system_type) when is_atom(number_system_type) do
{
Cldr.UnknownNumberSystemTypeError,
"The number system type #{inspect(number_system_type)} is unknown"
}
end
def unknown_number_system_type_error(number_system_type) do
{
Cldr.UnknownNumberSystemTypeError,
"The number system type #{inspect(number_system_type)} is invalid"
}
end
@doc """
Returns an error tuple for an unknown plural type.
## Options
* `plural_type` is any number system type name **not** returned
by `Cldr.Number.PluralRule.known_plural_types/0`
## Returns
* `{:error, {Cldr.UnknownPluralTypeError, message}}`
## Examples
iex> Cldr.unknown_plural_type_error("invalid")
{Cldr.UnknownPluralTypeError, "The plural type \\"invalid\\" is invalid"}
iex> Cldr.unknown_plural_type_error(:invalid)
{Cldr.UnknownPluralTypeError, "The plural type :invalid is unknown"}
"""
@spec unknown_plural_type_error(any()) :: {Cldr.UnknownPluralTypeError, String.t()}
def unknown_plural_type_error(plural_type) when is_atom(plural_type) do
{
Cldr.UnknownPluralTypeError,
"The plural type #{inspect(plural_type)} is unknown"
}
end
def unknown_plural_type_error(plural_type) do
{
Cldr.UnknownPluralTypeError,
"The plural type #{inspect(plural_type)} is invalid"
}
end
@doc """
Normalise and validate a measurement system type.
## Arguments
* `measurement_system` is a known
measurement system.
## Returns
* `{:ok, normalized_measurement_system}` or
* `{:error, {exception, message}}`
## Examples
iex> Cldr.validate_measurement_system :metric
{:ok, :metric}
iex> Cldr.validate_measurement_system "ussystem"
{:ok, :ussystem}
iex> Cldr.validate_measurement_system "uksystem"
{:ok, :uksystem}
iex> Cldr.validate_measurement_system "something"
{:error, {Cldr.UnknownMeasurementSystemError,
"The measurement system \\"something\\" is invalid"}}
"""
def validate_measurement_system(system) when is_binary(system) do
system
|> String.downcase()
|> do_validate_measurement_system
end
def validate_measurement_system(system) when is_atom(system) do
do_validate_measurement_system(system)
end
@measurement_systems Cldr.Config.measurement_systems()
|> Enum.flat_map(fn
{k, %{alias: nil}} -> [{k, k}]
{k, %{alias: a}} -> [{k, k}, {a, k}]
end)
|> Map.new()
for {system, canonical_system} <- @measurement_systems do
defp do_validate_measurement_system(unquote(system)),
do: {:ok, unquote(canonical_system)}
defp do_validate_measurement_system(unquote(Kernel.to_string(system))),
do: {:ok, unquote(canonical_system)}
end
defp do_validate_measurement_system(measurement_system) do
{:error, unknown_measurement_system_error(measurement_system)}
end
def unknown_measurement_system_error(measurement_system) do
{
Cldr.UnknownMeasurementSystemError,
"The measurement system #{inspect(measurement_system)} is invalid"
}
end
@doc """
Returns a unicode string representing a flag for a territory.
## Options
* `territory` is any valid territory code returned
by `Cldr.known_territories/0` or a `Cldr.LanguageTag.t`
## Returns
* A string representing a flag or
* An empty string if the territory is valid but no
unicode grapheme is defined. This is true for territories
that are aggregate areas such as "the world" which is
`:001`
* `{:error, {Cldr.UnknownTerritoryError, message}}`
## Notes
* If a `Cldr.LanguageTag.t` is provided, the territory is determined
by `Cldr.Locale.territory_from_locale/1`
## Examples
iex> Cldr.flag :AU
"🇦🇺"
iex> Cldr.flag :US
"🇺🇸"
iex> Cldr.flag "UN"
"🇺🇳"
iex> Cldr.flag :UK
{:error, {Cldr.UnknownTerritoryError, "The territory :UK is unknown"}}
"""
def flag(%LanguageTag{} = locale) do
locale
|> Cldr.Locale.territory_from_locale
|> Atom.to_charlist()
|> generate_flag
end
def flag(territory) do
with {:ok, territory} <- validate_territory(territory) do
territory
|> Atom.to_charlist()
|> generate_flag
end
end
defp generate_flag([_, _] = iso_code) do
iso_code
|> Enum.map(&(&1 + 127_397))
|> Kernel.to_string()
end
defp generate_flag(_) do
""
end
@doc false
def locale_and_backend_from(options) when is_list(options) do
locale = Keyword.get(options, :locale)
backend = Keyword.get(options, :backend)
locale_and_backend_from(locale, backend)
end
def locale_and_backend_from(nil, nil) do
locale = Cldr.get_locale()
{locale, locale.backend}
end
def locale_and_backend_from(%Cldr.LanguageTag{} = locale, _backend) do
{locale, locale.backend}
end
def locale_and_backend_from(locale, nil) when is_binary(locale) do
{locale, Cldr.default_backend()}
end
def locale_and_backend_from(nil, backend) do
{backend.get_locale(), backend}
end
def locale_and_backend_from(locale, backend) when is_binary(locale) do
{locale, backend}
end
@doc false
def locale_name(%LanguageTag{cldr_locale_name: locale_name}), do: inspect(locale_name)
def locale_name(locale) when is_binary(locale), do: inspect(locale)
@doc false
def maybe_log(message) do
require Logger
if System.get_env("CLDR_DEBUG") do
Logger.debug message
end
end
end
|
lib/cldr.ex
| 0.89684
| 0.618809
|
cldr.ex
|
starcoder
|
defmodule Interpreter.Resources do
@moduledoc """
A module that exposes Ecto schema structs
"""
# Types
@typedoc """
ID that uniquely identifies the `struct`
"""
@type id :: term
@type params :: map
@typedoc """
* `:associations` - associations to load in the `struct`
"""
@type query_options :: %{optional(:associations) => atom | [atom]}
@type sandbox_access_token :: %{required(:owner) => term}
# Callbacks
@doc """
Allows access to sandbox for testing
"""
@callback allow_sandbox_access(sandbox_access_token) :: :ok | {:already, :owner | :allowed} | :not_found
@doc """
Changeset for creating a struct from the `params`
"""
@callback changeset(params) :: Ecto.Changeset.
@doc <error descr="Strings aren't allowed in types">"""
Deletes a single `struct`
# Returns
* `{:ok, struct}` - the delete succeeded and the returned struct is the state before delete
* `{:error, Ecto.Changeset.t}` - errors while deleting the `struct`. `Ecto.Changeset.t` `errors` contains errors.
"""</error>
@callback delete(struct) :: {:ok, struct} | {:error, Ecto.Changeset.t}
@doc """
Gets a single `struct`
# Returns
* `nil` - if the `id` was not found
* `struct` - if the `id` was found
"""
@callback get(id, query_options) :: nil | struct
@doc """
Inserts `changeset` into a single new `struct`
# Returns
* `{:ok, struct}` - `changeset` was inserted into `struct`
* `{:error, Ecto.Changeset.t}` - insert failed. `Ecto.Changeset.t` `errors` contain errors.
"""
@callback insert(Ecto.Changeset.t, query_options) :: {:ok, struct} | {:error, Ecto.Changeset.t}
@doc """
Inserts `params` into a single new `struct`
# Returns
* `{:ok, struct}` - params were inserted into `struct`
* `{:error, Ecto.Changeset.t}` - insert failed. `Ecto.Changeset.t` `errors` contain errors.
"""
@callback insert(params, query_options) :: {:ok, struct} | {:error, Ecto.Changeset.t}
@doc """
Gets a list of `struct`s.
"""
@callback list(query_options) :: [struct]
@doc """
# Returns
* `true` - if `allow_sandbox_access/1` should be called before any of the query methods are called
* `false` - otherwise
"""
@callback sandboxed?() :: boolean
@doc """
Updates `struct`
# Returns
* `{:ok, struct}` - the update succeeded and the returned `struct` contains the updates
* `{error, Ecto.Changeset.t}` - errors while updating `struct` with `params`. `Ecto.Changeset.t` `errors` contains
errors.
"""
@callback update(struct, params, query_options) :: {:ok, struct} | {:error, Ecto.Changeset.t}
end
|
testData/org/elixir_lang/annotator/module_attribute/issue_469.ex
| 0.88447
| 0.501831
|
issue_469.ex
|
starcoder
|
defmodule AWS.SESv2 do
@moduledoc """
Amazon SES API v2
Welcome to the Amazon SES API v2 Reference. This guide provides information
about the Amazon SES API v2, including supported operations, data types,
parameters, and schemas.
[Amazon SES](https://aws.amazon.com/pinpoint) is an AWS service that you
can use to send email messages to your customers.
If you're new to Amazon SES API v2, you might find it helpful to also
review the [Amazon Simple Email Service Developer
Guide](https://docs.aws.amazon.com/ses/latest/DeveloperGuide/). The *Amazon
SES Developer Guide* provides information and code samples that demonstrate
how to use Amazon SES API v2 features programmatically.
The Amazon SES API v2 is available in several AWS Regions and it provides
an endpoint for each of these Regions. For a list of all the Regions and
endpoints where the API is currently available, see [AWS Service
Endpoints](https://docs.aws.amazon.com/general/latest/gr/rande.html#ses_region)
in the *Amazon Web Services General Reference*. To learn more about AWS
Regions, see [Managing AWS
Regions](https://docs.aws.amazon.com/general/latest/gr/rande-manage.html)
in the *Amazon Web Services General Reference*.
In each Region, AWS maintains multiple Availability Zones. These
Availability Zones are physically isolated from each other, but are united
by private, low-latency, high-throughput, and highly redundant network
connections. These Availability Zones enable us to provide very high levels
of availability and redundancy, while also minimizing latency. To learn
more about the number of Availability Zones that are available in each
Region, see [AWS Global
Infrastructure](http://aws.amazon.com/about-aws/global-infrastructure/).
"""
@doc """
Create a configuration set. *Configuration sets* are groups of rules that
you can apply to the emails that you send. You apply a configuration set to
an email by specifying the name of the configuration set when you call the
Amazon SES API v2. When you apply a configuration set to an email, all of
the rules in that configuration set are applied to the email.
"""
def create_configuration_set(client, input, options \\ []) do
path_ = "/v2/email/configuration-sets"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Create an event destination. *Events* include message sends, deliveries,
opens, clicks, bounces, and complaints. *Event destinations* are places
that you can send information about these events to. For example, you can
send event data to Amazon SNS to receive notifications when you receive
bounces or complaints, or you can use Amazon Kinesis Data Firehose to
stream data to Amazon S3 for long-term storage.
A single configuration set can include more than one event destination.
"""
def create_configuration_set_event_destination(client, configuration_set_name, input, options \\ []) do
path_ = "/v2/email/configuration-sets/#{URI.encode(configuration_set_name)}/event-destinations"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Creates a new custom verification email template.
For more information about custom verification email templates, see [Using
Custom Verification Email
Templates](https://docs.aws.amazon.com/ses/latest/DeveloperGuide/send-email-verify-address-custom.html)
in the *Amazon SES Developer Guide*.
You can execute this operation no more than once per second.
"""
def create_custom_verification_email_template(client, input, options \\ []) do
path_ = "/v2/email/custom-verification-email-templates"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Create a new pool of dedicated IP addresses. A pool can include one or more
dedicated IP addresses that are associated with your AWS account. You can
associate a pool with a configuration set. When you send an email that uses
that configuration set, the message is sent from one of the addresses in
the associated pool.
"""
def create_dedicated_ip_pool(client, input, options \\ []) do
path_ = "/v2/email/dedicated-ip-pools"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Create a new predictive inbox placement test. Predictive inbox placement
tests can help you predict how your messages will be handled by various
email providers around the world. When you perform a predictive inbox
placement test, you provide a sample message that contains the content that
you plan to send to your customers. Amazon SES then sends that message to
special email addresses spread across several major email providers. After
about 24 hours, the test is complete, and you can use the
`GetDeliverabilityTestReport` operation to view the results of the test.
"""
def create_deliverability_test_report(client, input, options \\ []) do
path_ = "/v2/email/deliverability-dashboard/test"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Starts the process of verifying an email identity. An *identity* is an
email address or domain that you use when you send email. Before you can
use an identity to send email, you first have to verify it. By verifying an
identity, you demonstrate that you're the owner of the identity, and that
you've given Amazon SES API v2 permission to send email from the identity.
When you verify an email address, Amazon SES sends an email to the address.
Your email address is verified as soon as you follow the link in the
verification email.
When you verify a domain without specifying the `DkimSigningAttributes`
object, this operation provides a set of DKIM tokens. You can convert these
tokens into CNAME records, which you then add to the DNS configuration for
your domain. Your domain is verified when Amazon SES detects these records
in the DNS configuration for your domain. This verification method is known
as [Easy
DKIM](https://docs.aws.amazon.com/ses/latest/DeveloperGuide/easy-dkim.html).
Alternatively, you can perform the verification process by providing your
own public-private key pair. This verification method is known as Bring
Your Own DKIM (BYODKIM). To use BYODKIM, your call to the
`CreateEmailIdentity` operation has to include the `DkimSigningAttributes`
object. When you specify this object, you provide a selector (a component
of the DNS record name that identifies the public key that you want to use
for DKIM authentication) and a private key.
"""
def create_email_identity(client, input, options \\ []) do
path_ = "/v2/email/identities"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Creates the specified sending authorization policy for the given identity
(an email address or a domain).
<note> This API is for the identity owner only. If you have not verified
the identity, this API will return an error.
</note> Sending authorization is a feature that enables an identity owner
to authorize other senders to use its identities. For information about
using sending authorization, see the [Amazon SES Developer
Guide](https://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html).
You can execute this operation no more than once per second.
"""
def create_email_identity_policy(client, email_identity, policy_name, input, options \\ []) do
path_ = "/v2/email/identities/#{URI.encode(email_identity)}/policies/#{URI.encode(policy_name)}"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Creates an email template. Email templates enable you to send personalized
email to one or more destinations in a single API operation. For more
information, see the [Amazon SES Developer
Guide](https://docs.aws.amazon.com/ses/latest/DeveloperGuide/send-personalized-email-api.html).
You can execute this operation no more than once per second.
"""
def create_email_template(client, input, options \\ []) do
path_ = "/v2/email/templates"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Delete an existing configuration set.
*Configuration sets* are groups of rules that you can apply to the emails
you send. You apply a configuration set to an email by including a
reference to the configuration set in the headers of the email. When you
apply a configuration set to an email, all of the rules in that
configuration set are applied to the email.
"""
def delete_configuration_set(client, configuration_set_name, input, options \\ []) do
path_ = "/v2/email/configuration-sets/#{URI.encode(configuration_set_name)}"
headers = []
query_ = []
request(client, :delete, path_, query_, headers, input, options, nil)
end
@doc """
Delete an event destination.
*Events* include message sends, deliveries, opens, clicks, bounces, and
complaints. *Event destinations* are places that you can send information
about these events to. For example, you can send event data to Amazon SNS
to receive notifications when you receive bounces or complaints, or you can
use Amazon Kinesis Data Firehose to stream data to Amazon S3 for long-term
storage.
"""
def delete_configuration_set_event_destination(client, configuration_set_name, event_destination_name, input, options \\ []) do
path_ = "/v2/email/configuration-sets/#{URI.encode(configuration_set_name)}/event-destinations/#{URI.encode(event_destination_name)}"
headers = []
query_ = []
request(client, :delete, path_, query_, headers, input, options, nil)
end
@doc """
Deletes an existing custom verification email template.
For more information about custom verification email templates, see [Using
Custom Verification Email
Templates](https://docs.aws.amazon.com/es/latest/DeveloperGuide/send-email-verify-address-custom.html)
in the *Amazon SES Developer Guide*.
You can execute this operation no more than once per second.
"""
def delete_custom_verification_email_template(client, template_name, input, options \\ []) do
path_ = "/v2/email/custom-verification-email-templates/#{URI.encode(template_name)}"
headers = []
query_ = []
request(client, :delete, path_, query_, headers, input, options, nil)
end
@doc """
Delete a dedicated IP pool.
"""
def delete_dedicated_ip_pool(client, pool_name, input, options \\ []) do
path_ = "/v2/email/dedicated-ip-pools/#{URI.encode(pool_name)}"
headers = []
query_ = []
request(client, :delete, path_, query_, headers, input, options, nil)
end
@doc """
Deletes an email identity. An identity can be either an email address or a
domain name.
"""
def delete_email_identity(client, email_identity, input, options \\ []) do
path_ = "/v2/email/identities/#{URI.encode(email_identity)}"
headers = []
query_ = []
request(client, :delete, path_, query_, headers, input, options, nil)
end
@doc """
Deletes the specified sending authorization policy for the given identity
(an email address or a domain). This API returns successfully even if a
policy with the specified name does not exist.
<note> This API is for the identity owner only. If you have not verified
the identity, this API will return an error.
</note> Sending authorization is a feature that enables an identity owner
to authorize other senders to use its identities. For information about
using sending authorization, see the [Amazon SES Developer
Guide](https://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html).
You can execute this operation no more than once per second.
"""
def delete_email_identity_policy(client, email_identity, policy_name, input, options \\ []) do
path_ = "/v2/email/identities/#{URI.encode(email_identity)}/policies/#{URI.encode(policy_name)}"
headers = []
query_ = []
request(client, :delete, path_, query_, headers, input, options, nil)
end
@doc """
Deletes an email template.
You can execute this operation no more than once per second.
"""
def delete_email_template(client, template_name, input, options \\ []) do
path_ = "/v2/email/templates/#{URI.encode(template_name)}"
headers = []
query_ = []
request(client, :delete, path_, query_, headers, input, options, nil)
end
@doc """
Removes an email address from the suppression list for your account.
"""
def delete_suppressed_destination(client, email_address, input, options \\ []) do
path_ = "/v2/email/suppression/addresses/#{URI.encode(email_address)}"
headers = []
query_ = []
request(client, :delete, path_, query_, headers, input, options, nil)
end
@doc """
Obtain information about the email-sending status and capabilities of your
Amazon SES account in the current AWS Region.
"""
def get_account(client, options \\ []) do
path_ = "/v2/email/account"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Retrieve a list of the blacklists that your dedicated IP addresses appear
on.
"""
def get_blacklist_reports(client, blacklist_item_names, options \\ []) do
path_ = "/v2/email/deliverability-dashboard/blacklist-report"
headers = []
query_ = []
query_ = if !is_nil(blacklist_item_names) do
[{"BlacklistItemNames", blacklist_item_names} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Get information about an existing configuration set, including the
dedicated IP pool that it's associated with, whether or not it's enabled
for sending email, and more.
*Configuration sets* are groups of rules that you can apply to the emails
you send. You apply a configuration set to an email by including a
reference to the configuration set in the headers of the email. When you
apply a configuration set to an email, all of the rules in that
configuration set are applied to the email.
"""
def get_configuration_set(client, configuration_set_name, options \\ []) do
path_ = "/v2/email/configuration-sets/#{URI.encode(configuration_set_name)}"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Retrieve a list of event destinations that are associated with a
configuration set.
*Events* include message sends, deliveries, opens, clicks, bounces, and
complaints. *Event destinations* are places that you can send information
about these events to. For example, you can send event data to Amazon SNS
to receive notifications when you receive bounces or complaints, or you can
use Amazon Kinesis Data Firehose to stream data to Amazon S3 for long-term
storage.
"""
def get_configuration_set_event_destinations(client, configuration_set_name, options \\ []) do
path_ = "/v2/email/configuration-sets/#{URI.encode(configuration_set_name)}/event-destinations"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Returns the custom email verification template for the template name you
specify.
For more information about custom verification email templates, see [Using
Custom Verification Email
Templates](https://docs.aws.amazon.com/ses/latest/DeveloperGuide/send-email-verify-address-custom.html)
in the *Amazon SES Developer Guide*.
You can execute this operation no more than once per second.
"""
def get_custom_verification_email_template(client, template_name, options \\ []) do
path_ = "/v2/email/custom-verification-email-templates/#{URI.encode(template_name)}"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Get information about a dedicated IP address, including the name of the
dedicated IP pool that it's associated with, as well information about the
automatic warm-up process for the address.
"""
def get_dedicated_ip(client, ip, options \\ []) do
path_ = "/v2/email/dedicated-ips/#{URI.encode(ip)}"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
List the dedicated IP addresses that are associated with your AWS account.
"""
def get_dedicated_ips(client, next_token \\ nil, page_size \\ nil, pool_name \\ nil, options \\ []) do
path_ = "/v2/email/dedicated-ips"
headers = []
query_ = []
query_ = if !is_nil(pool_name) do
[{"PoolName", pool_name} | query_]
else
query_
end
query_ = if !is_nil(page_size) do
[{"PageSize", page_size} | query_]
else
query_
end
query_ = if !is_nil(next_token) do
[{"NextToken", next_token} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Retrieve information about the status of the Deliverability dashboard for
your account. When the Deliverability dashboard is enabled, you gain access
to reputation, deliverability, and other metrics for the domains that you
use to send email. You also gain the ability to perform predictive inbox
placement tests.
When you use the Deliverability dashboard, you pay a monthly subscription
charge, in addition to any other fees that you accrue by using Amazon SES
and other AWS services. For more information about the features and cost of
a Deliverability dashboard subscription, see [Amazon SES
Pricing](http://aws.amazon.com/ses/pricing/).
"""
def get_deliverability_dashboard_options(client, options \\ []) do
path_ = "/v2/email/deliverability-dashboard"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Retrieve the results of a predictive inbox placement test.
"""
def get_deliverability_test_report(client, report_id, options \\ []) do
path_ = "/v2/email/deliverability-dashboard/test-reports/#{URI.encode(report_id)}"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Retrieve all the deliverability data for a specific campaign. This data is
available for a campaign only if the campaign sent email by using a domain
that the Deliverability dashboard is enabled for.
"""
def get_domain_deliverability_campaign(client, campaign_id, options \\ []) do
path_ = "/v2/email/deliverability-dashboard/campaigns/#{URI.encode(campaign_id)}"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Retrieve inbox placement and engagement rates for the domains that you use
to send email.
"""
def get_domain_statistics_report(client, domain, end_date, start_date, options \\ []) do
path_ = "/v2/email/deliverability-dashboard/statistics-report/#{URI.encode(domain)}"
headers = []
query_ = []
query_ = if !is_nil(start_date) do
[{"StartDate", start_date} | query_]
else
query_
end
query_ = if !is_nil(end_date) do
[{"EndDate", end_date} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Provides information about a specific identity, including the identity's
verification status, sending authorization policies, its DKIM
authentication status, and its custom Mail-From settings.
"""
def get_email_identity(client, email_identity, options \\ []) do
path_ = "/v2/email/identities/#{URI.encode(email_identity)}"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Returns the requested sending authorization policies for the given identity
(an email address or a domain). The policies are returned as a map of
policy names to policy contents. You can retrieve a maximum of 20 policies
at a time.
<note> This API is for the identity owner only. If you have not verified
the identity, this API will return an error.
</note> Sending authorization is a feature that enables an identity owner
to authorize other senders to use its identities. For information about
using sending authorization, see the [Amazon SES Developer
Guide](https://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html).
You can execute this operation no more than once per second.
"""
def get_email_identity_policies(client, email_identity, options \\ []) do
path_ = "/v2/email/identities/#{URI.encode(email_identity)}/policies"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Displays the template object (which includes the subject line, HTML part
and text part) for the template you specify.
You can execute this operation no more than once per second.
"""
def get_email_template(client, template_name, options \\ []) do
path_ = "/v2/email/templates/#{URI.encode(template_name)}"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Retrieves information about a specific email address that's on the
suppression list for your account.
"""
def get_suppressed_destination(client, email_address, options \\ []) do
path_ = "/v2/email/suppression/addresses/#{URI.encode(email_address)}"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
List all of the configuration sets associated with your account in the
current region.
*Configuration sets* are groups of rules that you can apply to the emails
you send. You apply a configuration set to an email by including a
reference to the configuration set in the headers of the email. When you
apply a configuration set to an email, all of the rules in that
configuration set are applied to the email.
"""
def list_configuration_sets(client, next_token \\ nil, page_size \\ nil, options \\ []) do
path_ = "/v2/email/configuration-sets"
headers = []
query_ = []
query_ = if !is_nil(page_size) do
[{"PageSize", page_size} | query_]
else
query_
end
query_ = if !is_nil(next_token) do
[{"NextToken", next_token} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Lists the existing custom verification email templates for your account in
the current AWS Region.
For more information about custom verification email templates, see [Using
Custom Verification Email
Templates](https://docs.aws.amazon.com/ses/latest/DeveloperGuide/send-email-verify-address-custom.html)
in the *Amazon SES Developer Guide*.
You can execute this operation no more than once per second.
"""
def list_custom_verification_email_templates(client, next_token \\ nil, page_size \\ nil, options \\ []) do
path_ = "/v2/email/custom-verification-email-templates"
headers = []
query_ = []
query_ = if !is_nil(page_size) do
[{"PageSize", page_size} | query_]
else
query_
end
query_ = if !is_nil(next_token) do
[{"NextToken", next_token} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
List all of the dedicated IP pools that exist in your AWS account in the
current Region.
"""
def list_dedicated_ip_pools(client, next_token \\ nil, page_size \\ nil, options \\ []) do
path_ = "/v2/email/dedicated-ip-pools"
headers = []
query_ = []
query_ = if !is_nil(page_size) do
[{"PageSize", page_size} | query_]
else
query_
end
query_ = if !is_nil(next_token) do
[{"NextToken", next_token} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Show a list of the predictive inbox placement tests that you've performed,
regardless of their statuses. For predictive inbox placement tests that are
complete, you can use the `GetDeliverabilityTestReport` operation to view
the results.
"""
def list_deliverability_test_reports(client, next_token \\ nil, page_size \\ nil, options \\ []) do
path_ = "/v2/email/deliverability-dashboard/test-reports"
headers = []
query_ = []
query_ = if !is_nil(page_size) do
[{"PageSize", page_size} | query_]
else
query_
end
query_ = if !is_nil(next_token) do
[{"NextToken", next_token} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Retrieve deliverability data for all the campaigns that used a specific
domain to send email during a specified time range. This data is available
for a domain only if you enabled the Deliverability dashboard for the
domain.
"""
def list_domain_deliverability_campaigns(client, subscribed_domain, end_date, next_token \\ nil, page_size \\ nil, start_date, options \\ []) do
path_ = "/v2/email/deliverability-dashboard/domains/#{URI.encode(subscribed_domain)}/campaigns"
headers = []
query_ = []
query_ = if !is_nil(start_date) do
[{"StartDate", start_date} | query_]
else
query_
end
query_ = if !is_nil(page_size) do
[{"PageSize", page_size} | query_]
else
query_
end
query_ = if !is_nil(next_token) do
[{"NextToken", next_token} | query_]
else
query_
end
query_ = if !is_nil(end_date) do
[{"EndDate", end_date} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Returns a list of all of the email identities that are associated with your
AWS account. An identity can be either an email address or a domain. This
operation returns identities that are verified as well as those that
aren't. This operation returns identities that are associated with Amazon
SES and Amazon Pinpoint.
"""
def list_email_identities(client, next_token \\ nil, page_size \\ nil, options \\ []) do
path_ = "/v2/email/identities"
headers = []
query_ = []
query_ = if !is_nil(page_size) do
[{"PageSize", page_size} | query_]
else
query_
end
query_ = if !is_nil(next_token) do
[{"NextToken", next_token} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Lists the email templates present in your Amazon SES account in the current
AWS Region.
You can execute this operation no more than once per second.
"""
def list_email_templates(client, next_token \\ nil, page_size \\ nil, options \\ []) do
path_ = "/v2/email/templates"
headers = []
query_ = []
query_ = if !is_nil(page_size) do
[{"PageSize", page_size} | query_]
else
query_
end
query_ = if !is_nil(next_token) do
[{"NextToken", next_token} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Retrieves a list of email addresses that are on the suppression list for
your account.
"""
def list_suppressed_destinations(client, end_date \\ nil, next_token \\ nil, page_size \\ nil, reasons \\ nil, start_date \\ nil, options \\ []) do
path_ = "/v2/email/suppression/addresses"
headers = []
query_ = []
query_ = if !is_nil(start_date) do
[{"StartDate", start_date} | query_]
else
query_
end
query_ = if !is_nil(reasons) do
[{"Reason", reasons} | query_]
else
query_
end
query_ = if !is_nil(page_size) do
[{"PageSize", page_size} | query_]
else
query_
end
query_ = if !is_nil(next_token) do
[{"NextToken", next_token} | query_]
else
query_
end
query_ = if !is_nil(end_date) do
[{"EndDate", end_date} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Retrieve a list of the tags (keys and values) that are associated with a
specified resource. A *tag* is a label that you optionally define and
associate with a resource. Each tag consists of a required *tag key* and an
optional associated *tag value*. A tag key is a general label that acts as
a category for more specific tag values. A tag value acts as a descriptor
within a tag key.
"""
def list_tags_for_resource(client, resource_arn, options \\ []) do
path_ = "/v2/email/tags"
headers = []
query_ = []
query_ = if !is_nil(resource_arn) do
[{"ResourceArn", resource_arn} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Enable or disable the automatic warm-up feature for dedicated IP addresses.
"""
def put_account_dedicated_ip_warmup_attributes(client, input, options \\ []) do
path_ = "/v2/email/account/dedicated-ips/warmup"
headers = []
query_ = []
request(client, :put, path_, query_, headers, input, options, nil)
end
@doc """
Update your Amazon SES account details.
"""
def put_account_details(client, input, options \\ []) do
path_ = "/v2/email/account/details"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Enable or disable the ability of your account to send email.
"""
def put_account_sending_attributes(client, input, options \\ []) do
path_ = "/v2/email/account/sending"
headers = []
query_ = []
request(client, :put, path_, query_, headers, input, options, nil)
end
@doc """
Change the settings for the account-level suppression list.
"""
def put_account_suppression_attributes(client, input, options \\ []) do
path_ = "/v2/email/account/suppression"
headers = []
query_ = []
request(client, :put, path_, query_, headers, input, options, nil)
end
@doc """
Associate a configuration set with a dedicated IP pool. You can use
dedicated IP pools to create groups of dedicated IP addresses for sending
specific types of email.
"""
def put_configuration_set_delivery_options(client, configuration_set_name, input, options \\ []) do
path_ = "/v2/email/configuration-sets/#{URI.encode(configuration_set_name)}/delivery-options"
headers = []
query_ = []
request(client, :put, path_, query_, headers, input, options, nil)
end
@doc """
Enable or disable collection of reputation metrics for emails that you send
using a particular configuration set in a specific AWS Region.
"""
def put_configuration_set_reputation_options(client, configuration_set_name, input, options \\ []) do
path_ = "/v2/email/configuration-sets/#{URI.encode(configuration_set_name)}/reputation-options"
headers = []
query_ = []
request(client, :put, path_, query_, headers, input, options, nil)
end
@doc """
Enable or disable email sending for messages that use a particular
configuration set in a specific AWS Region.
"""
def put_configuration_set_sending_options(client, configuration_set_name, input, options \\ []) do
path_ = "/v2/email/configuration-sets/#{URI.encode(configuration_set_name)}/sending"
headers = []
query_ = []
request(client, :put, path_, query_, headers, input, options, nil)
end
@doc """
Specify the account suppression list preferences for a configuration set.
"""
def put_configuration_set_suppression_options(client, configuration_set_name, input, options \\ []) do
path_ = "/v2/email/configuration-sets/#{URI.encode(configuration_set_name)}/suppression-options"
headers = []
query_ = []
request(client, :put, path_, query_, headers, input, options, nil)
end
@doc """
Specify a custom domain to use for open and click tracking elements in
email that you send.
"""
def put_configuration_set_tracking_options(client, configuration_set_name, input, options \\ []) do
path_ = "/v2/email/configuration-sets/#{URI.encode(configuration_set_name)}/tracking-options"
headers = []
query_ = []
request(client, :put, path_, query_, headers, input, options, nil)
end
@doc """
Move a dedicated IP address to an existing dedicated IP pool.
<note> The dedicated IP address that you specify must already exist, and
must be associated with your AWS account.
The dedicated IP pool you specify must already exist. You can create a new
pool by using the `CreateDedicatedIpPool` operation.
</note>
"""
def put_dedicated_ip_in_pool(client, ip, input, options \\ []) do
path_ = "/v2/email/dedicated-ips/#{URI.encode(ip)}/pool"
headers = []
query_ = []
request(client, :put, path_, query_, headers, input, options, nil)
end
@doc """
<p/>
"""
def put_dedicated_ip_warmup_attributes(client, ip, input, options \\ []) do
path_ = "/v2/email/dedicated-ips/#{URI.encode(ip)}/warmup"
headers = []
query_ = []
request(client, :put, path_, query_, headers, input, options, nil)
end
@doc """
Enable or disable the Deliverability dashboard. When you enable the
Deliverability dashboard, you gain access to reputation, deliverability,
and other metrics for the domains that you use to send email. You also gain
the ability to perform predictive inbox placement tests.
When you use the Deliverability dashboard, you pay a monthly subscription
charge, in addition to any other fees that you accrue by using Amazon SES
and other AWS services. For more information about the features and cost of
a Deliverability dashboard subscription, see [Amazon SES
Pricing](http://aws.amazon.com/ses/pricing/).
"""
def put_deliverability_dashboard_option(client, input, options \\ []) do
path_ = "/v2/email/deliverability-dashboard"
headers = []
query_ = []
request(client, :put, path_, query_, headers, input, options, nil)
end
@doc """
Used to enable or disable DKIM authentication for an email identity.
"""
def put_email_identity_dkim_attributes(client, email_identity, input, options \\ []) do
path_ = "/v2/email/identities/#{URI.encode(email_identity)}/dkim"
headers = []
query_ = []
request(client, :put, path_, query_, headers, input, options, nil)
end
@doc """
Used to configure or change the DKIM authentication settings for an email
domain identity. You can use this operation to do any of the following:
<ul> <li> Update the signing attributes for an identity that uses Bring
Your Own DKIM (BYODKIM).
</li> <li> Change from using no DKIM authentication to using Easy DKIM.
</li> <li> Change from using no DKIM authentication to using BYODKIM.
</li> <li> Change from using Easy DKIM to using BYODKIM.
</li> <li> Change from using BYODKIM to using Easy DKIM.
</li> </ul>
"""
def put_email_identity_dkim_signing_attributes(client, email_identity, input, options \\ []) do
path_ = "/v1/email/identities/#{URI.encode(email_identity)}/dkim/signing"
headers = []
query_ = []
request(client, :put, path_, query_, headers, input, options, nil)
end
@doc """
Used to enable or disable feedback forwarding for an identity. This setting
determines what happens when an identity is used to send an email that
results in a bounce or complaint event.
If the value is `true`, you receive email notifications when bounce or
complaint events occur. These notifications are sent to the address that
you specified in the `Return-Path` header of the original email.
You're required to have a method of tracking bounces and complaints. If you
haven't set up another mechanism for receiving bounce or complaint
notifications (for example, by setting up an event destination), you
receive an email notification when these events occur (even if this setting
is disabled).
"""
def put_email_identity_feedback_attributes(client, email_identity, input, options \\ []) do
path_ = "/v2/email/identities/#{URI.encode(email_identity)}/feedback"
headers = []
query_ = []
request(client, :put, path_, query_, headers, input, options, nil)
end
@doc """
Used to enable or disable the custom Mail-From domain configuration for an
email identity.
"""
def put_email_identity_mail_from_attributes(client, email_identity, input, options \\ []) do
path_ = "/v2/email/identities/#{URI.encode(email_identity)}/mail-from"
headers = []
query_ = []
request(client, :put, path_, query_, headers, input, options, nil)
end
@doc """
Adds an email address to the suppression list for your account.
"""
def put_suppressed_destination(client, input, options \\ []) do
path_ = "/v2/email/suppression/addresses"
headers = []
query_ = []
request(client, :put, path_, query_, headers, input, options, nil)
end
@doc """
Composes an email message to multiple destinations.
"""
def send_bulk_email(client, input, options \\ []) do
path_ = "/v2/email/outbound-bulk-emails"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Adds an email address to the list of identities for your Amazon SES account
in the current AWS Region and attempts to verify it. As a result of
executing this operation, a customized verification email is sent to the
specified address.
To use this operation, you must first create a custom verification email
template. For more information about creating and using custom verification
email templates, see [Using Custom Verification Email
Templates](https://docs.aws.amazon.com/ses/latest/DeveloperGuide/send-email-verify-address-custom.html)
in the *Amazon SES Developer Guide*.
You can execute this operation no more than once per second.
"""
def send_custom_verification_email(client, input, options \\ []) do
path_ = "/v2/email/outbound-custom-verification-emails"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Sends an email message. You can use the Amazon SES API v2 to send two types
of messages:
<ul> <li> **Simple** – A standard email message. When you create this type
of message, you specify the sender, the recipient, and the message body,
and Amazon SES assembles the message for you.
</li> <li> **Raw** – A raw, MIME-formatted email message. When you send
this type of email, you have to specify all of the message headers, as well
as the message body. You can use this message type to send messages that
contain attachments. The message that you specify has to be a valid MIME
message.
</li> <li> **Templated** – A message that contains personalization tags.
When you send this type of email, Amazon SES API v2 automatically replaces
the tags with values that you specify.
</li> </ul>
"""
def send_email(client, input, options \\ []) do
path_ = "/v2/email/outbound-emails"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Add one or more tags (keys and values) to a specified resource. A *tag* is
a label that you optionally define and associate with a resource. Tags can
help you categorize and manage resources in different ways, such as by
purpose, owner, environment, or other criteria. A resource can have as many
as 50 tags.
Each tag consists of a required *tag key* and an associated *tag value*,
both of which you define. A tag key is a general label that acts as a
category for more specific tag values. A tag value acts as a descriptor
within a tag key.
"""
def tag_resource(client, input, options \\ []) do
path_ = "/v2/email/tags"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Creates a preview of the MIME content of an email when provided with a
template and a set of replacement data.
You can execute this operation no more than once per second.
"""
def test_render_email_template(client, template_name, input, options \\ []) do
path_ = "/v2/email/templates/#{URI.encode(template_name)}/render"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Remove one or more tags (keys and values) from a specified resource.
"""
def untag_resource(client, input, options \\ []) do
path_ = "/v2/email/tags"
headers = []
{query_, input} =
[
{"ResourceArn", "ResourceArn"},
{"TagKeys", "TagKeys"},
]
|> AWS.Request.build_params(input)
request(client, :delete, path_, query_, headers, input, options, nil)
end
@doc """
Update the configuration of an event destination for a configuration set.
*Events* include message sends, deliveries, opens, clicks, bounces, and
complaints. *Event destinations* are places that you can send information
about these events to. For example, you can send event data to Amazon SNS
to receive notifications when you receive bounces or complaints, or you can
use Amazon Kinesis Data Firehose to stream data to Amazon S3 for long-term
storage.
"""
def update_configuration_set_event_destination(client, configuration_set_name, event_destination_name, input, options \\ []) do
path_ = "/v2/email/configuration-sets/#{URI.encode(configuration_set_name)}/event-destinations/#{URI.encode(event_destination_name)}"
headers = []
query_ = []
request(client, :put, path_, query_, headers, input, options, nil)
end
@doc """
Updates an existing custom verification email template.
For more information about custom verification email templates, see [Using
Custom Verification Email
Templates](https://docs.aws.amazon.com/ses/latest/DeveloperGuide/send-email-verify-address-custom.html)
in the *Amazon SES Developer Guide*.
You can execute this operation no more than once per second.
"""
def update_custom_verification_email_template(client, template_name, input, options \\ []) do
path_ = "/v2/email/custom-verification-email-templates/#{URI.encode(template_name)}"
headers = []
query_ = []
request(client, :put, path_, query_, headers, input, options, nil)
end
@doc """
Updates the specified sending authorization policy for the given identity
(an email address or a domain). This API returns successfully even if a
policy with the specified name does not exist.
<note> This API is for the identity owner only. If you have not verified
the identity, this API will return an error.
</note> Sending authorization is a feature that enables an identity owner
to authorize other senders to use its identities. For information about
using sending authorization, see the [Amazon SES Developer
Guide](https://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html).
You can execute this operation no more than once per second.
"""
def update_email_identity_policy(client, email_identity, policy_name, input, options \\ []) do
path_ = "/v2/email/identities/#{URI.encode(email_identity)}/policies/#{URI.encode(policy_name)}"
headers = []
query_ = []
request(client, :put, path_, query_, headers, input, options, nil)
end
@doc """
Updates an email template. Email templates enable you to send personalized
email to one or more destinations in a single API operation. For more
information, see the [Amazon SES Developer
Guide](https://docs.aws.amazon.com/ses/latest/DeveloperGuide/send-personalized-email-api.html).
You can execute this operation no more than once per second.
"""
def update_email_template(client, template_name, input, options \\ []) do
path_ = "/v2/email/templates/#{URI.encode(template_name)}"
headers = []
query_ = []
request(client, :put, path_, query_, headers, input, options, nil)
end
@spec request(AWS.Client.t(), binary(), binary(), list(), list(), map(), list(), pos_integer()) ::
{:ok, Poison.Parser.t(), Poison.Response.t()}
| {:error, Poison.Parser.t()}
| {:error, HTTPoison.Error.t()}
defp request(client, method, path, query, headers, input, options, success_status_code) do
client = %{client | service: "ses"}
host = build_host("email", client)
url = host
|> build_url(path, client)
|> add_query(query)
additional_headers = [{"Host", host}, {"Content-Type", "application/x-amz-json-1.1"}]
headers = AWS.Request.add_headers(additional_headers, headers)
payload = encode_payload(input)
headers = AWS.Request.sign_v4(client, method, url, headers, payload)
perform_request(method, url, payload, headers, options, success_status_code)
end
defp perform_request(method, url, payload, headers, options, nil) do
case HTTPoison.request(method, url, payload, headers, options) do
{:ok, %HTTPoison.Response{status_code: 200, body: ""} = response} ->
{:ok, response}
{:ok, %HTTPoison.Response{status_code: status_code, body: body} = response}
when status_code == 200 or status_code == 202 or status_code == 204 ->
{:ok, Poison.Parser.parse!(body, %{}), response}
{:ok, %HTTPoison.Response{body: body}} ->
error = Poison.Parser.parse!(body, %{})
{:error, error}
{:error, %HTTPoison.Error{reason: reason}} ->
{:error, %HTTPoison.Error{reason: reason}}
end
end
defp perform_request(method, url, payload, headers, options, success_status_code) do
case HTTPoison.request(method, url, payload, headers, options) do
{:ok, %HTTPoison.Response{status_code: ^success_status_code, body: ""} = response} ->
{:ok, %{}, response}
{:ok, %HTTPoison.Response{status_code: ^success_status_code, body: body} = response} ->
{:ok, Poison.Parser.parse!(body, %{}), response}
{:ok, %HTTPoison.Response{body: body}} ->
error = Poison.Parser.parse!(body, %{})
{:error, error}
{:error, %HTTPoison.Error{reason: reason}} ->
{:error, %HTTPoison.Error{reason: reason}}
end
end
defp build_host(_endpoint_prefix, %{region: "local"}) do
"localhost"
end
defp build_host(endpoint_prefix, %{region: region, endpoint: endpoint}) do
"#{endpoint_prefix}.#{region}.#{endpoint}"
end
defp build_url(host, path, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}#{path}"
end
defp add_query(url, []) do
url
end
defp add_query(url, query) do
querystring = AWS.Util.encode_query(query)
"#{url}?#{querystring}"
end
defp encode_payload(input) do
if input != nil, do: Poison.Encoder.encode(input, %{}), else: ""
end
end
|
lib/aws/sesv2.ex
| 0.873862
| 0.67816
|
sesv2.ex
|
starcoder
|
defmodule Cforum.Accounts.Badges do
@moduledoc """
The boundary for the Accounts system.
"""
import Ecto.Query, warn: false
import CforumWeb.Gettext
alias Cforum.Repo
alias Cforum.Accounts.{Badge, BadgeUser, BadgeGroup}
alias Cforum.Accounts.Notifications
alias Cforum.System
alias Cforum.Accounts.Users
@doc """
Returns the list of badges.
## Examples
iex> list_badges()
[%Badge{}, ...]
"""
def list_badges(query_params \\ []) do
query_params = Keyword.merge([order: nil, limit: nil, search: nil, preload: [badges_users: :user]], query_params)
from(badge in Badge, preload: ^query_params[:preload])
|> Cforum.PagingApi.set_limit(query_params[:limit])
|> Cforum.OrderApi.set_ordering(query_params[:order], asc: :order)
|> Repo.all()
end
def list_badge_groups() do
from(bg in BadgeGroup, order_by: [asc: :name], preload: [:badges])
|> Repo.all()
end
@doc """
Counts the number of badges.
## Examples
iex> count_badges()
1
"""
def count_badges() do
from(
badge in Badge,
select: count("*")
)
|> Repo.one()
end
@doc """
Gets a single badge.
Raises `Ecto.NoResultsError` if the Badge does not exist.
## Examples
iex> get_badge!(123)
%Badge{}
iex> get_badge!(456)
** (Ecto.NoResultsError)
"""
def get_badge!(id) do
Repo.get!(Badge, id)
|> Repo.preload(badges_users: :user)
end
def get_badge_by(clauses, opts \\ []) do
Badge
|> Repo.get_by(clauses)
|> Repo.maybe_preload(opts[:with])
end
def get_badge_by!(clauses, opts \\ []) do
Badge
|> Repo.get_by!(clauses)
|> Repo.maybe_preload(opts[:with])
end
@doc """
Creates a badge.
## Examples
iex> create_badge(%{field: value})
{:ok, %Badge{}}
iex> create_badge(%{field: bad_value})
{:error, %Ecto.Changeset{}}
"""
def create_badge(current_user, attrs \\ %{}) do
System.audited("create", current_user, fn ->
%Badge{}
|> Badge.changeset(attrs)
|> Repo.insert()
end)
end
@doc """
Updates a badge.
## Examples
iex> update_badge(badge, %{field: new_value})
{:ok, %Badge{}}
iex> update_badge(badge, %{field: bad_value})
{:error, %Ecto.Changeset{}}
"""
def update_badge(current_user, %Badge{} = badge, attrs) do
System.audited("update", current_user, fn ->
badge
|> Badge.changeset(attrs)
|> Repo.update()
end)
end
@doc """
Deletes a Badge.
## Examples
iex> delete_badge(badge)
{:ok, %Badge{}}
iex> delete_badge(badge)
{:error, %Ecto.Changeset{}}
"""
def delete_badge(current_user, %Badge{} = badge) do
System.audited("destroy", current_user, fn ->
Repo.delete(badge)
end)
end
@doc """
Returns an `%Ecto.Changeset{}` for tracking badge changes.
## Examples
iex> change_badge(badge)
%Ecto.Changeset{source: %Badge{}}
"""
def change_badge(%Badge{} = badge) do
Badge.changeset(badge, %{})
end
def unique_users(%Badge{} = badge) do
badge.badges_users
|> Enum.reduce(%{}, fn bu, acc ->
Map.update(acc, bu.user_id, %{user: bu.user, times: 1, created_at: bu.created_at}, fn mp ->
%{mp | times: mp[:times] + 1}
end)
end)
|> Map.values()
end
def grant_badge(type, user) when is_bitstring(type),
do: grant_badge(get_badge_by(badge_type: type), user)
def grant_badge({key, value}, user),
do: grant_badge(get_badge_by([{key, value}]), user)
def grant_badge(badge, user) do
System.audited("badge-gained", user, fn ->
%BadgeUser{}
|> BadgeUser.changeset(%{user_id: user.user_id, badge_id: badge.badge_id})
|> Repo.insert()
end)
|> notify_user(user, badge)
|> Users.discard_user_cache()
end
def notify_user({:ok, badge_user}, user, badge) do
subject =
gettext("You have won the %{mtype} “%{name}”!",
mtype: CforumWeb.Views.Helpers.l10n_medal_type(badge.badge_medal_type),
name: badge.name
)
Notifications.create_notification(%{
recipient_id: user.user_id,
subject: subject,
oid: badge.badge_id,
otype: "badge",
path: CforumWeb.Router.Helpers.badge_path(CforumWeb.Endpoint, :show, badge.slug)
})
{:ok, badge_user}
end
def notify_user(val, _, _), do: val
end
|
lib/cforum/accounts/badges.ex
| 0.718693
| 0.438184
|
badges.ex
|
starcoder
|
defmodule Instream do
@moduledoc """
InfluxDB driver for Elixir
## Connections
To connect to an InfluxDB server you need a connection module:
defmodule MyApp.MyConnection do
use Instream.Connection, otp_app: :my_app
end
The `:otp_app` name and the name of the module can be freely chosen but have
to be linked to a corresponding configuration entry. This defined connection
module needs to be hooked up into your supervision tree:
children = [
# ...
MyApp.Connection,
# ...
]
Example of the matching configuration entry:
config :my_app, MyApp.MyConnection,
database: "my_default_database",
host: "localhost",
port: 8086
More details on connections and configuration options can be found with the
`Instream.Connection` module.
## Queries
_Note:_ Most queries require a database to operate on. The following places
will be searched (in order from top to bottom) for a configured database:
1. `opts[:database]` parameter
2. `Instream.Connection` configuration
3. No database used!
By default the response of a query will be a map decoded from your
server's JSON response.
Alternatively you can pass `[result_as: format]` to
`MyApp.MyConnection.query/2` to change the result format to
one of the following:
- `:csv` CSV encoded response
- `:json` - JSON encoded response (implicit default)
- `:raw` Raw server format (JSON string)
### Query Language Selection
If not otherwise specified all queries will be sent as `InfluxQL`.
This can be changed to `Flux` by passing the option `[query_language: :flux]`
to `MyApp.MyConnection.query/2`
### Reading Data
# passing database to query/2
MyApp.MyConnection.query(
"SELECT * FROM some_measurement",
database: "my_database"
)
# defining database in the query
MyApp.MyConnection.query(~S(
SELECT * FROM "my_database"."default"."some_measurement"
))
# passing precision (= epoch) for query results
MyApp.MyConnection.query(
"SELECT * FROM some_measurement",
precision: :minutes
)
# using parameter binding
MyApp.MyConnection.query(
"SELECT * FROM some_measurement WHERE field = $field_param",
params: %{field_param: "some_value"}
)
### POST Queries
Some queries require you to switch from the regular `read only context`
(all GET requets) to a `write context` (all POST requests).
When not using the query build you have to pass that information
manually to `query/2`:
MyApp.MyConnection.query(
"CREATE DATABASE create_in_write_mode",
method: :post
)
### Query Timeout Configuration
If you find your queries running into timeouts (e.g. `:hackney` not waiting
long enough for a response) you can pass an option to the query call:
MyApp.MyConnection.query(query, http_opts: [recv_timeout: 250])
This value can also be set as a default using your HTTP client configuration
(see `Instream.Connection.Config` for details). A passed configuration will
take precedence over the connection configuration.
## Writing Points
Writing data to your InfluxDB server can be done via
`Instream.Series` modules or using raw maps.
Please also refer to `c:Instream.Connection.write/2` for an overview
of additional options you can use when writing data.
### Writing Points using Series
Each series in your database is represented by a definition module:
defmodule MySeries do
use Instream.Series
series do
measurement "my_measurement"
tag :bar
tag :foo
field :value
end
end
Using this definition you can use the generated struct to create
a data point and write it to your database:
MyConnection.write(%MySeries{
fields: %MySeries.Fields{value: 17},
tags: %MySeries.Tags{bar: "bar", foo: "foo"}
})
More information about series definitions can be found in the
module documentation of `Instream.Series`.
### Writing Points using Plain Maps
As an alternative you can use a non-struct map to write points to a database:
MyConnection.write([
%{
measurement: "my_measurement",
fields: %{answer: 42, value: 1},
tags: %{foo: "bar"},
timestamp: 1_439_587_926_000_000_000
},
# more points possible ...
])
* The field `timestamp` can be omitted, so InfluxDB will use the receive time.
"""
end
|
lib/instream.ex
| 0.893018
| 0.411406
|
instream.ex
|
starcoder
|
defmodule Resourceful.JSONAPI.Error do
@moduledoc """
Tools for converting errors formatted in accordance with `Resourceful.Error`
into [JSON:API-style errors](https://jsonapi.org/format/#errors).
JSON:API errors have a number of reserved top-level names:
* `code`
* `detail`
* `id`
* `links`
* `meta`
* `source`
* `status`
* `title`
Resourceful errors map to JSON:API errors as follows:
An error's `type` symbol is converted to a string for `code`. With the
exception of `meta` and `status` the remainder of keys in an error's `context`
are mapped to either the top-level attribute of the same name or, in the event
the name is not a reserved name, it will be placed in `meta` which, if
present, will always be a map.
`status` is a bit of a special case as "status" in a JSON:API error always
refers to an HTTP status code, but it's quite possible many errors might have
a `status` attribute in their context that has nothing to do with HTTP. As
such, `:http_status` may be passed either as an option or as a key in a
context map.
"""
@default_source_type "pointer"
@error_type_defaults Application.get_env(:resourceful, :error_type_defaults)
@reserved_names ~w[
code
detail
id
links
meta
source
status
title
]a
@doc """
Takes a list of errors, or an `:error` tuple with a list as the second
element, and converts that list to JSON:API errors.
"""
def all(errors, opts \\ [])
def all({:error, errors}, opts), do: all(errors, opts)
def all(errors, opts) when is_list(errors), do: Enum.map(errors, &to_map(&1, opts))
@doc """
Returns a map of all non-reserved attributes from a context map.
"""
def meta(error, opts \\ [])
def meta({:error, {_, %{} = context}}, opts), do: meta(context, opts)
def meta(%{} = context, _) do
meta =
context
|> Map.drop(@reserved_names)
|> stringify_keys()
case Enum.any?(meta) do
true -> meta
_ -> nil
end
end
def meta({:error, _}, _), do: nil
def parameter_source([]), do: nil
def parameter_source(source) do
Enum.reduce(tl(source), hd(source), fn src, str -> "#{str}[#{src}]" end)
end
def pointer_source([]), do: ""
def pointer_source(source), do: "/#{Enum.join(source, "/")}"
@doc """
Returns a JSON:API source map based on the `:source` attribute in a an error's
context map.
"""
def source(error, source_type \\ @default_source_type)
def source(error, opts) when is_list(opts),
do: source(error, Keyword.get(opts, :source_type, @default_source_type))
def source({:error, {_, %{source: source}}}, source_type),
do: %{source_type => source_string(source, source_type)}
def source({:error, _}, _), do: nil
@doc """
Returns a JSON:API source map. Either:
1. `%{"pointer" => "/data/attributes/problem"}`
2. `%{"parameter" => "fields[resource_type]"}`
"""
def source_string(source, source_type) when is_list(source) do
str_sources = Enum.map(source, &to_string/1)
case source_type do
"parameter" -> parameter_source(str_sources)
"pointer" -> pointer_source(str_sources)
end
end
@doc """
Returns the appropriate `status` attribute based on either the context map or
an explicitly passed `:http_status` option. The value in a context takes
precedence. The reason for this is that the keyword will often be used in
conjunction with `all/2` to apply a default but certain errors, when a
situation allows for mixed errors with statuses, will want to be set
explicitly apart from the default.
"""
def status(error, opts \\ [])
def status({:error, {_, %{http_status: status}}}, _), do: to_string(status)
def status({:error, {type, _}}, opts), do: status(type, opts)
def status({:error, type}, opts), do: status(type, opts)
def status(type, opts) when is_atom(type) do
(Keyword.get(opts, :http_status) ||
get_in(@error_type_defaults, [type, :http_status]))
|> to_status()
end
def status(_, _), do: nil
@doc """
Converts a Resourceful error into a JSON:API error map which can then be
converted to JSON. See module overview for details on conventions.
"""
def to_map(error, opts \\ [])
def to_map({:error, {_, %{}}} = error, opts) do
Enum.reduce([:meta, :source, :status], base_error(error, opts), fn key, jerr ->
apply_jsonapi_error_key(jerr, key, error, opts)
end)
end
def to_map({:error, _} = error, opts) do
error
|> Resourceful.Error.with_context()
|> to_map(opts)
end
defp apply_jsonapi_error_key(jsonapi_error, key, {:error, _} = error, opts) do
case apply(__MODULE__, key, [error, opts]) do
nil -> jsonapi_error
value -> Map.put(jsonapi_error, to_string(key), value)
end
end
defp base_error({:error, {type, %{}}} = error, opts) do
error
|> Resourceful.Error.humanize(opts)
|> Resourceful.Error.context()
|> Map.take(@reserved_names)
|> stringify_keys()
|> Map.put("code", to_string(type))
end
defp stringify_keys(map), do: Map.new(map, fn {k, v} -> {to_string(k), v} end)
defp to_status(nil), do: nil
defp to_status(status), do: to_string(status)
end
|
lib/resourceful/jsonapi/error.ex
| 0.841517
| 0.585072
|
error.ex
|
starcoder
|
defmodule Fex do
@moduledoc """
Documentation for `Fex`.
This is an experimental package - do not use in production!
Many Elixir functions are returning two types of tuples:
- {:ok, data}
- {:error, error}
Together they maps quite closely to "either" structure.
Idea is to instead of using the `case` function(s) to
check was operation successfull:
```
with {:ok, json} <- File.read("list.json"),
{:ok, data} <- Jason.decode(json)
do
data
|> Enum.sum()
else
Logger.error("Error occurred")
IO.inspect(msg)
0
end
```
we could use functions that are operating on
"either" looking structures:
```
File.read("list.json") do
|> Fex.chain(&Jason.decode/1)
|> Fex.apply(&Enum.sum/1)
|> Fex.fold(&(&1), fn msg ->
Logger.error("Error occurred")
IO.inspect(msg)
0
end)
```
Above approach gives some advatanges over the
"usual" approach:
- developer has freedom to decide when to handle
the "error" case, which makes it easier to divide
code to dirty and pure parts
- error cases can be grouped together
- code is easier to reason about with a little bit
of understanding about the "either" idea
"""
@doc """
Maps list of results using `Enum.map/2`
## Examples
iex> Fex.map({:ok, [1, 2]}, &(&1 + 2))
{:ok, [3, 4]}
iex> Fex.map({:error, "Error message"}, &(&1 + 2))
{:error, "Error message"}
"""
def map({:ok, data}, iteration_fn) do
{:ok, Enum.map(data, iteration_fn)}
end
def map({:error, error}, _iteration_fn), do: {:error, error}
@doc """
Applies function over data
## Examples
iex> Fex.apply({:ok, [1, 2]}, &Enum.sum/1)
{:ok, 3}
iex> Fex.map({:error, "Error message"}, &Enum.sum/1)
{:error, "Error message"}
"""
def apply({:ok, data}, function) do
{:ok, function.(data)}
end
def apply({:error, error}, _function), do: {:error, error}
@doc """
Like apply but doesn't wrap function in the "either" format.
It relies on the fact that applied function will return the
"either" format.
Useful when you have an "either" structure already and you
need to pass it to function that returns another "either"
structure(using apply would cause "nesting" of eithers).
## Examples
iex> Fex.chain({:ok, "{}"}, &Jason.decode/1)
{:ok, %{}}
iex> Fex.chain({:error, "Error message"}, &Jason.decode/1)
{:error, "Error message"}
"""
def chain({:ok, data}, function), do: function.(data)
def chain({:error, error}, _function), do: {:error, error}
@doc """
When you would like to extract the pure value out of the "either"
struct.
## Examples
iex> Fex.fold({:ok, 5}, &(&1), &(&1))
5
iex> Fex.fold({:error, "Error message"}, &(&1), &(&1))
"Error message"
"""
def fold({:ok, data}, success_fn, _error_fn), do: success_fn.(data)
def fold({:error, error}, _success_fn, error_fn), do: error_fn.(error)
end
|
lib/fex.ex
| 0.909355
| 0.883588
|
fex.ex
|
starcoder
|
defmodule Re.Exporters.Trovit do
@moduledoc """
Listing XML exporter for trovit.
"""
@exported_attributes ~w(id url title sell_type description price listing_type area rooms
bathrooms garage_spots state city neighborhood address postal_code latitude longitude
owner agency virtual_tour pictures)a
@default_options %{attributes: @exported_attributes}
@listing_agency 0
@frontend_url Application.get_env(:re_integrations, :frontend_url)
@image_url "https://res.cloudinary.com/emcasa/image/upload/f_auto/v1513818385"
@matterport_url "https://my.matterport.com/"
def export_listings_xml(listings, options \\ %{}) do
options = merge_default_options(options)
listings
|> Enum.map(&build_node(&1, options))
|> build_root()
|> XmlBuilder.document()
|> XmlBuilder.generate(format: :none)
end
def merge_default_options(options) do
Map.merge(@default_options, options)
end
def build_node(listing, options) do
{"ad", %{}, convert_attributes(listing, options)}
end
defp build_root(nodes) do
{"trovit", %{}, nodes}
end
def convert_attributes(listing, %{attributes: attributes}) do
Enum.map(attributes, &convert_attribute_with_cdata(&1, listing))
end
defp convert_attribute_with_cdata(:pictures = attr, listing) do
convert_attribute(attr, listing)
end
defp convert_attribute_with_cdata(attr, listing) do
{tag, attrs, value} = convert_attribute(attr, listing)
{tag, attrs, escape_cdata(value)}
end
defp convert_attribute(:id, %{id: id}) do
{"id", %{}, id}
end
defp convert_attribute(:url, %{id: id}) do
{"url", %{}, build_url(@frontend_url, "/imoveis/", to_string(id))}
end
defp convert_attribute(:title, %{type: type, address: %{city: city}}) do
{"title", %{}, "#{type} a venda em #{city}"}
end
defp convert_attribute(:sell_type, _) do
{"type", %{}, "For Sale"}
end
defp convert_attribute(:description, %{description: description}) do
{"content", %{}, description}
end
defp convert_attribute(:price, %{price: price}) do
{"price", %{}, price}
end
defp convert_attribute(:listing_type, %{type: type}) do
{"property_type", %{}, type}
end
defp convert_attribute(:area, %{area: area}) do
{"floor_area", %{}, area}
end
defp convert_attribute(:rooms, %{rooms: rooms}) do
{"rooms", %{}, rooms || 0}
end
defp convert_attribute(:bathrooms, %{bathrooms: bathrooms}) do
{"bathrooms", %{}, bathrooms || 0}
end
defp convert_attribute(:garage_spots, %{garage_spots: garage_spots}) do
{"parking", %{}, garage_spots || 0}
end
defp convert_attribute(:state, %{address: %{state: state}}) do
{"region", %{}, expand_state(state)}
end
defp convert_attribute(:city, %{address: %{city: city}}) do
{"city", %{}, city}
end
defp convert_attribute(:neighborhood, %{address: %{neighborhood: neighborhood}}) do
{"city_area", %{}, neighborhood}
end
defp convert_attribute(:address, %{address: %{street: street, street_number: street_number}}) do
{"address", %{}, "#{street}, #{street_number}"}
end
defp convert_attribute(:postal_code, %{address: %{postal_code: postal_code}}) do
{"postcode", %{}, postal_code}
end
defp convert_attribute(:latitude, %{address: %{lat: lat}}) do
{"latitude", %{}, lat}
end
defp convert_attribute(:longitude, %{address: %{lng: lng}}) do
{"longitude", %{}, lng}
end
defp convert_attribute(:virtual_tour, %{matterport_code: nil}) do
{"virtual_tour", %{}, nil}
end
defp convert_attribute(:virtual_tour, %{matterport_code: matterport_code}) do
{"virtual_tour", %{}, build_url(@matterport_url, "/show/", "?m=#{matterport_code}")}
end
defp convert_attribute(:pictures, %{images: []}) do
{"pictures", %{}, nil}
end
defp convert_attribute(:pictures, %{images: images}) do
{"pictures", %{}, Enum.map(images, &build_image/1)}
end
defp convert_attribute(:owner, _) do
{"by_owner", %{}, @listing_agency}
end
defp convert_attribute(:agency, _) do
{"agency", %{}, "EmCasa.com"}
end
defp build_url(host, path, param) do
host
|> URI.merge(path)
|> URI.merge(param)
|> URI.to_string()
end
defp expand_state("RJ"), do: "Rio de Janeiro"
defp expand_state("SP"), do: "São Paulo"
defp expand_state(state), do: state
defp build_image(%{filename: filename, description: description}) do
{
"picture",
%{},
[
{"picture_url", %{}, escape_cdata("#{@image_url}/#{filename}")},
{"picture_title", %{}, escape_cdata(description)}
]
}
end
defp escape_cdata(nil) do
nil
end
defp escape_cdata(value) when is_binary(value) do
{:cdata, value}
end
defp escape_cdata(value) do
escape_cdata(to_string(value))
end
end
|
apps/re/lib/exporters/trovit.ex
| 0.667473
| 0.433142
|
trovit.ex
|
starcoder
|
defmodule Cldr.Unit.IncompatibleUnitsError do
@moduledoc false
defexception [:message]
def exception(message) do
%__MODULE__{message: message}
end
end
defmodule Cldr.Unit.UnknownUnitCategoryError do
@moduledoc false
defexception [:message]
def exception(message) do
%__MODULE__{message: message}
end
end
defmodule Cldr.Unit.UnknownUnitPreferenceError do
@moduledoc false
defexception [:message]
def exception(message) do
%__MODULE__{message: message}
end
end
defmodule Cldr.Unit.UnitNotConvertibleError do
@moduledoc false
defexception [:message]
def exception(message) do
%__MODULE__{message: message}
end
end
defmodule Cldr.Unit.UnknownBaseUnitError do
@moduledoc false
defexception [:message]
def exception(message) do
%__MODULE__{message: message}
end
end
defmodule Cldr.Unit.UnknownUsageError do
@moduledoc false
defexception [:message]
def exception(message) do
%__MODULE__{message: message}
end
end
defmodule Cldr.Unit.UnitNotTranslatableError do
@moduledoc false
defexception [:message]
def exception(message) do
%__MODULE__{message: message}
end
end
defmodule Cldr.Unit.InvalidSystemKeyError do
@moduledoc false
defexception [:message]
def exception(message) do
%__MODULE__{message: message}
end
end
defmodule Cldr.Unit.UnknownMeasurementSystemError do
@moduledoc false
defexception [:message]
def exception(message) do
%__MODULE__{message: message}
end
end
defmodule Cldr.Unit.NoPatternError do
@moduledoc false
defexception [:message]
def exception({name, grammatical_case, gender, plural}) do
message =
"No format pattern was found for unit #{inspect(name)} " <>
"with grammatical case #{inspect(grammatical_case)}, " <>
"gender #{inspect(gender)} and plural type #{inspect(plural)}"
%__MODULE__{message: message}
end
def exception(message) do
%__MODULE__{message: message}
end
end
defmodule Cldr.Unit.UnknownCategoryError do
@moduledoc false
defexception [:message]
def exception(message) do
%__MODULE__{message: message}
end
end
defmodule Cldr.Unit.NotInvertableError do
@moduledoc false
defexception [:message]
def exception(message) do
%__MODULE__{message: message}
end
end
defmodule Cldr.UnknownGrammaticalCaseError do
@moduledoc false
defexception [:message]
def exception(message) do
%__MODULE__{message: message}
end
end
defmodule Cldr.UnknownGrammaticalGenderError do
@moduledoc false
defexception [:message]
def exception(message) do
%__MODULE__{message: message}
end
end
defmodule Cldr.Unit.NotParseableError do
@moduledoc false
defexception [:message]
def exception(message) do
%__MODULE__{message: message}
end
end
defmodule Cldr.Unit.AmbiguousUnitError do
@moduledoc false
defexception [:message]
def exception(message) do
%__MODULE__{message: message}
end
end
defmodule Cldr.Unit.CategoryMatchError do
@moduledoc false
defexception [:message]
def exception(message) do
%__MODULE__{message: message}
end
end
|
lib/cldr/unit/exception.ex
| 0.695235
| 0.401923
|
exception.ex
|
starcoder
|
defmodule Oban.Plugins.Pruner do
@moduledoc """
Periodically delete completed, cancelled and discarded jobs based on age.
## Using the Plugin
The following example demonstrates using the plugin without any configuration, which will prune
jobs older than the default of 60 seconds:
config :my_app, Oban,
plugins: [Oban.Plugins.Pruner],
...
Override the default options to prune jobs after 5 minutes:
config :my_app, Oban,
plugins: [{Oban.Plugins.Pruner, max_age: 300}],
...
> #### 🌟 DynamicPruner {: .info}
>
> This plugin treats all jobs the same and only retains by time. To retain by length or
> provide custom rules for specific queues, workers and job states see the `DynamicPruner` plugin
> in [Oban Pro](dynamic_pruner.html).
## Options
* `:interval` — the number of milliseconds between pruning attempts. The default is `30_000ms`.
* `:limit` — the maximum number of jobs to prune at one time. The default is 10,000 to prevent
request timeouts. Applications that steadily generate more than 10k jobs a minute should increase
this value.
* `:max_age` — the number of seconds after which a job may be pruned. Defaults to 60s.
## Instrumenting with Telemetry
The `Oban.Plugins.Pruner` plugin adds the following metadata to the `[:oban, :plugin, :stop]` event:
* `:pruned_count` - the number of jobs that were pruned from the database
"""
@behaviour Oban.Plugin
use GenServer
import Ecto.Query, only: [join: 5, limit: 2, or_where: 3, select: 2]
alias Oban.{Job, Peer, Plugin, Repo, Validation}
@type option ::
Plugin.option()
| {:limit, pos_integer()}
| {:max_age, pos_integer()}
defmodule State do
@moduledoc false
defstruct [
:conf,
:name,
:timer,
interval: :timer.seconds(30),
max_age: 60,
limit: 10_000
]
end
@impl Plugin
@spec start_link([option()]) :: GenServer.on_start()
def start_link(opts) do
GenServer.start_link(__MODULE__, opts, name: opts[:name])
end
@impl Plugin
def validate(opts) do
Validation.validate(opts, fn
{:conf, _} -> :ok
{:name, _} -> :ok
{:interval, interval} -> Validation.validate_integer(:interval, interval)
{:limit, limit} -> Validation.validate_integer(:limit, limit)
{:max_age, max_age} -> Validation.validate_integer(:max_age, max_age)
option -> {:error, "unknown option provided: #{inspect(option)}"}
end)
end
@impl GenServer
def init(opts) do
Validation.validate!(opts, &validate/1)
Process.flag(:trap_exit, true)
state =
State
|> struct!(opts)
|> schedule_prune()
:telemetry.execute([:oban, :plugin, :init], %{}, %{conf: state.conf, plugin: __MODULE__})
{:ok, state}
end
@impl GenServer
def terminate(_reason, state) do
if is_reference(state.timer), do: Process.cancel_timer(state.timer)
:ok
end
@impl GenServer
def handle_info(:prune, %State{} = state) do
meta = %{conf: state.conf, plugin: __MODULE__}
:telemetry.span([:oban, :plugin], meta, fn ->
case check_leadership_and_delete_jobs(state) do
{:ok, {pruned_count, _}} when is_integer(pruned_count) ->
{:ok, Map.put(meta, :pruned_count, pruned_count)}
error ->
{:error, Map.put(meta, :error, error)}
end
end)
{:noreply, schedule_prune(state)}
end
# Scheduling
defp check_leadership_and_delete_jobs(state) do
if Peer.leader?(state.conf) do
Repo.transaction(state.conf, fn ->
delete_jobs(state.conf, state.max_age, state.limit)
end)
else
{:ok, {0, []}}
end
end
defp schedule_prune(state) do
%{state | timer: Process.send_after(self(), :prune, state.interval)}
end
# Query
defp delete_jobs(conf, seconds, limit) do
time = DateTime.add(DateTime.utc_now(), -seconds)
subquery =
Job
|> or_where([j], j.state == "completed" and j.attempted_at < ^time)
|> or_where([j], j.state == "cancelled" and j.cancelled_at < ^time)
|> or_where([j], j.state == "discarded" and j.discarded_at < ^time)
|> select([:id])
|> limit(^limit)
Repo.delete_all(
conf,
join(Job, :inner, [j], x in subquery(subquery), on: j.id == x.id)
)
end
end
|
lib/oban/plugins/pruner.ex
| 0.879017
| 0.59928
|
pruner.ex
|
starcoder
|
defmodule Membrane.H264.FFmpeg.Encoder do
@moduledoc """
Membrane element that encodes raw video frames to H264 format.
The element expects each frame to be received in a separate buffer, so the parser
(`Membrane.Element.RawVideo.Parser`) may be required in a pipeline before
the encoder (e.g. when input is read from `Membrane.File.Source`).
Additionally, the encoder has to receive proper caps with picture format and dimensions
before any encoding takes place.
Please check `t:t/0` for available options.
"""
use Membrane.Filter
use Bunch.Typespec
alias __MODULE__.Native
alias Membrane.Buffer
alias Membrane.Caps.Video.{H264, Raw}
alias Membrane.H264.FFmpeg.Common
def_input_pad :input,
demand_unit: :buffers,
caps: {Raw, format: one_of([:I420, :I422]), aligned: true}
def_output_pad :output,
caps: {H264, stream_format: :byte_stream, alignment: :au}
@default_crf 23
@list_type presets :: [
:ultrafast,
:superfast,
:veryfast,
:faster,
:fast,
:medium,
:slow,
:slower,
:veryslow,
:placebo
]
def_options crf: [
description: """
Constant rate factor that affects the quality of output stream.
Value of 0 is lossless compression while 51 (for 8-bit samples)
or 63 (10-bit) offers the worst quality.
The range is exponential, so increasing the CRF value +6 results
in roughly half the bitrate / file size, while -6 leads
to roughly twice the bitrate.
""",
type: :int,
default: @default_crf
],
preset: [
description: """
Collection of predefined options providing certain encoding.
The slower the preset choosen, the higher compression for the
same quality can be achieved.
""",
type: :atom,
spec: presets(),
default: :medium
],
profile: [
description: """
Defines the features that will have to be supported by decoder
to decode video encoded with this element.
""",
type: :atom,
spec: H264.profile_t(),
default: :high
]
@impl true
def handle_init(opts) do
state = Map.merge(opts, %{encoder_ref: nil})
{:ok, state}
end
@impl true
def handle_demand(:output, _size, :buffers, _ctx, %{encoder_ref: nil} = state) do
# Wait until we have an encoder
{:ok, state}
end
def handle_demand(:output, size, :buffers, _ctx, state) do
{{:ok, demand: {:input, size}}, state}
end
@impl true
def handle_process(:input, %Buffer{metadata: metadata, payload: payload}, _ctx, state) do
%{encoder_ref: encoder_ref} = state
pts = metadata[:pts] || 0
with {:ok, dts_list, frames} <-
Native.encode(payload, Common.to_h264_time_base(pts), encoder_ref) do
bufs = wrap_frames(dts_list, frames)
# redemand is needed until the internal buffer of encoder is filled (no buffers will be
# generated before that) but it is a noop if the demand has been fulfilled
actions = bufs ++ [redemand: :output]
{{:ok, actions}, state}
else
{:error, reason} ->
{{:error, reason}, state}
end
end
@impl true
def handle_caps(:input, %Raw{} = caps, _ctx, state) do
{framerate_num, framerate_denom} = caps.framerate
with {:ok, buffers} <- flush_encoder_if_exists(state),
{:ok, new_encoder_ref} <-
Native.create(
caps.width,
caps.height,
caps.format,
state.preset,
state.profile,
framerate_num,
framerate_denom,
state.crf
) do
caps = create_new_caps(caps, state)
actions = buffers ++ [caps: caps, redemand: :output]
{{:ok, actions}, %{state | encoder_ref: new_encoder_ref}}
else
{:error, reason} -> {{:error, reason}, state}
end
end
@impl true
def handle_end_of_stream(:input, _ctx, state) do
with {:ok, buffers} <- flush_encoder_if_exists(state) do
actions = buffers ++ [end_of_stream: :output, notify: {:end_of_stream, :input}]
{{:ok, actions}, state}
else
{:error, reason} -> {{:error, reason}, state}
end
end
@impl true
def handle_prepared_to_stopped(_ctx, state) do
{:ok, %{state | encoder_ref: nil}}
end
defp flush_encoder_if_exists(%{encoder_ref: nil}) do
{:ok, []}
end
defp flush_encoder_if_exists(%{encoder_ref: encoder_ref}) do
with {:ok, dts_list, frames} <- Native.flush(encoder_ref) do
buffers = wrap_frames(dts_list, frames)
{:ok, buffers}
end
end
defp wrap_frames([], []), do: []
defp wrap_frames(dts_list, frames) do
Enum.zip(dts_list, frames)
|> Enum.map(fn {dts, frame} ->
%Buffer{metadata: %{dts: Common.to_membrane_time_base(dts)}, payload: frame}
end)
|> then(&[buffer: {:output, &1}])
end
defp create_new_caps(caps, state) do
{:output,
%H264{
alignment: :au,
framerate: caps.framerate,
height: caps.height,
width: caps.width,
profile: state.profile,
stream_format: :byte_stream
}}
end
end
|
lib/membrane_h264_ffmpeg/encoder.ex
| 0.86757
| 0.560403
|
encoder.ex
|
starcoder
|
defmodule Graphmath.Vec3 do
@moduledoc """
This is the 3D mathematics library for graphmath.
This submodule handles 3D vectors using tuples of floats.
"""
@type vec3 :: {float, float, float}
@doc """
`create()` creates a zeroed `vec3`.
It takes no arguments.
It returns a `vec3` of the form `{ 0.0, 0.0, 0.0 }`.
"""
@spec create() :: vec3
def create() do
{0.0, 0.0, 0.0}
end
@doc """
`create(x,y,z)` creates a `vec3` of value (x,y,z).
`x` is the first element of the `vec3` to be created.
`y` is the second element of the `vec3` to be created.
`z` is the third element of the `vec3` to be created.
It returns a `vec3` of the form `{x,y,z}`.
"""
@spec create(float, float, float) :: vec3
def create(x, y, z) do
{x, y, z}
end
@doc """
`create(vec)` creates a `vec3` from a list of 3 or more floats.
`vec` is a list of 3 or more floats.
It returns a `vec3` of the form `{x,y,z}`, where `x`, `y`, and `z` are the first three elements in `vec`.
"""
@spec create([float]) :: vec3
def create(vec) do
[x, y, z | _] = vec
{x, y, z}
end
@doc """
`add( a, b)` adds two `vec3`s.
`a` is the first `vec3`.
`b` is the second `vec3`.
It returns a `vec3` of the form { a<sub>x</sub> + b<sub>x</sub>, a<sub>y</sub> + b<sub>y</sub>, a<sub>z</sub> + b<sub>z</sub> }.
"""
@spec add(vec3, vec3) :: vec3
def add(a, b) do
{x, y, z} = a
{u, v, w} = b
{x + u, y + v, z + w}
end
@doc """
`subtract(a, b)` subtracts one `vec3` from another `vec3`.
`a` is the `vec3` minuend.
`b` is the `vec3` subtrahend.
It returns a `vec3` of the form { a<sub>x</sub> - b<sub>x</sub>, a<sub>y</sub> - b<sub>y</sub>, a<sub>z</sub> - b<sub>z</sub> }.
(the terminology was found [here](http://mathforum.org/library/drmath/view/58801.html)).
"""
@spec subtract(vec3, vec3) :: vec3
def subtract(a, b) do
{x, y, z} = a
{u, v, w} = b
{x - u, y - v, z - w}
end
@doc """
`multiply( a, b)` multiplies element-wise a `vec3` by another `vec3`.
`a` is the `vec3` multiplicand.
`b` is the `vec3` multiplier.
It returns a `vec3` of the form { a<sub>x</sub>b<sub>x</sub>, a<sub>y</sub>b<sub>y</sub>, a<sub>z</sub>b<sub>z</sub> }.
"""
@spec multiply(vec3, vec3) :: vec3
def multiply(a, b) do
{x, y, z} = a
{u, v, w} = b
{x * u, y * v, z * w}
end
@doc """
`scale( a, scale)` uniformly scales a `vec3`.
`a` is the `vec3` to be scaled.
`scale` is the float to scale each element of `a` by.
It returns a tuple of the form { a<sub>x</sub>scale, a<sub>y</sub>scale, a<sub>z</sub>scale }.
"""
@spec scale(vec3, float) :: vec3
def scale(a, scale) do
{x, y, z} = a
{x * scale, y * scale, z * scale}
end
@doc """
`dot( a, b)` finds the dot (inner) product of one `vec3` with another `vec3`.
`a` is the first `vec3`.
`b` is the second `vec3`.
It returns a float of the value (a<sub>x</sub>b<sub>x</sub> + a<sub>y</sub>b<sub>y</sub> + a<sub>z</sub>b<sub>z</sub>).
"""
@spec dot(vec3, vec3) :: float
def dot(a, b) do
{x, y, z} = a
{u, v, w} = b
x * u + y * v + z * w
end
@doc """
`cross( a, b)` finds the cross productof one `vec3` with another `vec3`.
`a` is the first `vec3`.
`b` is the second `vec3`.
It returns a float of the value ( a<sub>y</sub>b<sub>z</sub> - a<sub>z</sub>b<sub>y</sub>, a<sub>z</sub>b<sub>x</sub> - a<sub>x</sub>b<sub>z</sub>, a<sub>x</sub>b<sub>y</sub> - a<sub>y</sub>b<sub>x</sub>).
The cross product of two vectors is a vector perpendicular to the two source vectors.
Its magnitude will be the area of the parallelogram made by the two souce vectors.
"""
@spec cross(vec3, vec3) :: vec3
def cross(a, b) do
{x, y, z} = a
{u, v, w} = b
{y * w - z * v, z * u - x * w, x * v - y * u}
end
@doc """
`length(a)` finds the length (Eucldiean or L2 norm) of a `vec3`.
`a` is the `vec3` to find the length of.
It returns a float of the value (sqrt( a<sub>x</sub><sup>2</sup> + a<sub>y</sub><sup>2</sup> + a<sub>z</sub><sup>2</sup>)).
"""
@spec length(vec3) :: float
def length(a) do
{x, y, z} = a
:math.sqrt(x * x + y * y + z * z)
end
@doc """
`length_squared(a)` finds the square of the length of a `vec3`.
`a` is the `vec3` to find the length squared of.
It returns a float of the value a<sub>x</sub><sup>2</sup> + a<sub>y</sub><sup>2</sup> + a<sub>z</sub><sup>2</sup>.
In many cases, this is sufficient for comparisons and avoids a square root.
"""
@spec length_squared(vec3) :: float
def length_squared(a) do
{x, y, z} = a
x * x + y * y + z * z
end
@doc """
`length_manhattan(a)` finds the Manhattan (L1 norm) length of a `vec3`.
`a` is the `vec3` to find the Manhattan length of.
It returns a float of the value (a<sub>x</sub> + a<sub>y</sub> + a<sub>z</sub>).
The Manhattan length is the sum of the components.
"""
@spec length_manhattan(vec3) :: float
def length_manhattan(a) do
{x, y, z} = a
x + y + z
end
@doc """
`normalize(a)` finds the unit vector with the same direction as a `vec3`.
`a` is the `vec3` to be normalized.
It returns a `vec3` of the form `{normx, normy, normz}`.
This is done by dividing each component by the vector's magnitude.
"""
@spec normalize(vec3) :: vec3
def normalize(a) do
{x, y, z} = a
imag = 1 / :math.sqrt(x * x + y * y + z * z)
{x * imag, y * imag, z * imag}
end
@doc """
`lerp(a,b,t)` linearly interpolates between one `vec3` and another `vec3` along an interpolant.
`a` is the starting `vec3`.
`b` is the ending `vec3`.
`t` is the interpolant float, on the domain [0,1].
It returns a `vec3` of the form (1-t)**a** - (t)**b**.
The interpolant `t` is on the domain [0,1]. Behavior outside of that is undefined.
"""
@spec lerp(vec3, vec3, float) :: vec3
def lerp(a, b, t) do
{x, y, z} = a
{u, v, w} = b
{t * u + (1 - t) * x, t * v + (1 - t) * y, t * w + (1 - t) * z}
end
@doc """
`near(a,b, distance)` checks whether two `vec3`s are within a certain distance of each other.
`a` is the first `vec3`.
`b` is the second `vec3`.
`distance` is the distance between them as a float.
"""
@spec near(vec3, vec3, float) :: boolean
def near(a, b, distance) do
{x, y, z} = a
{u, v, w} = b
dx = u - x
dy = v - y
dz = w - z
distance > :math.sqrt(dx * dx + dy * dy + dz * dz)
end
@doc """
`rotate( v, k, theta)` rotates a vector (v) about a unit vector (k) by theta radians.
`v` is the `vec3` to be rotated.
`k` is the `vec3` axis of rotation. *It must be of unit length*.
`theta` is the angle in radians to rotate as a float.
This uses the [Formula of Rodriguez](http://en.wikipedia.org/wiki/Rodrigues%27_rotation_formula):
**V**<sub>rot</sub> = **V**cos(theta) + (**K** x **V**)sin(theta) + **K**(**K** dot **V**)(1-cos(theta))
"""
@spec rotate(vec3, vec3, float) :: vec3
def rotate(v, k, theta) do
{vx, vy, vz} = v
{kx, ky, kz} = k
ct = :math.cos(theta)
st = :math.sin(theta)
k_dot_v = vx * kx + vy * ky + vz * kz
coeff = (1.0 - ct) * k_dot_v
v
|> scale(ct)
|> add(scale(cross(k, v), st))
|> add(scale(k, coeff))
end
@doc """
`equal(a, b)` checks to see if two vec3s a and b are equivalent.
`a` is the `vec3`.
`b` is the `vec3`.
It returns true if the vectors have equal elements.
Note that due to precision issues, you may want to use `equal/3` instead.
"""
@spec equal(vec3, vec3) :: boolean
def equal({ax, ay, az}, {bx, by, bz}) do
ax == bx and ay == by and az == bz
end
@doc """
`equal(a, b, eps)` checks to see if two vec3s a and b are equivalent within some tolerance.
`a` is the `vec3`.
`b` is the `vec3`.
`eps` is the tolerance, a float.
It returns true if the vectors have equal elements within some tolerance.
"""
@spec equal(vec3, vec3, float) :: boolean
def equal({ax, ay, az}, {bx, by, bz}, eps) do
abs(ax - bx) <= eps and
abs(ay - by) <= eps and
abs(az - bz) <= eps
end
@doc """
`random_sphere()` gives a point at or within unit distance of the origin, using [this](http://extremelearning.com.au/how-to-generate-uniformly-random-points-on-n-spheres-and-n-balls/) polar method.
Another really nice exploration of this is [here](http://mathworld.wolfram.com/SpherePointPicking.html).
It returns a vec3 within at most unit distance of the origin.
"""
@spec random_sphere() :: vec3
def random_sphere() do
u = 2.0 * :rand.uniform() - 1
phi = 2.0 * :math.pi() * :rand.uniform()
x = :math.cos(phi) * :math.sqrt(1 - u * u)
y = :math.sin(phi) * :math.sqrt(1 - u * u)
z = u
{x, y, z}
end
@doc """
`random_ball()` gives a point at or within unit distance of the origin, using [the last algo here](https://karthikkaranth.me/blog/generating-random-points-in-a-sphere/).
It returns a vec3 within at most unit distance of the origin.
"""
@spec random_ball() :: vec3
def random_ball() do
u = :rand.uniform()
v = :rand.uniform()
theta = 2.0 * u * :math.pi()
phi = :math.acos(2.0 * v - 1.0)
# basically cube root
r = :math.pow(:rand.uniform(), 1 / 3)
sin_theta = :math.sin(theta)
cos_theta = :math.cos(theta)
sin_phi = :math.sin(phi)
cos_phi = :math.cos(phi)
x = r * sin_phi * cos_theta
y = r * sin_phi * sin_theta
z = r * cos_phi
{x, y, z}
end
@doc """
`random_box()` gives a point on or in the unit box [0,1]x[0,1]x[0,1].
It returns a vec3.
"""
@spec random_box() :: vec3
def random_box(), do: {:rand.uniform(), :rand.uniform(), :rand.uniform()}
@doc """
`negate(v)` creates a vector whose elements are opposite in sign to `v`.
"""
@spec negate(vec3) :: vec3
def negate({x, y, z}), do: {-1.0 * x, -1.0 * y, -1.0 * z}
@doc """
`weighted_sum(a, v1, b, v2)` returns the sum of vectors `v1` and `v2` having been scaled by `a` and `b`, respectively.
"""
@spec weighted_sum(number, vec3, number, vec3) :: vec3
def weighted_sum(a, {x, y, z}, b, {u, v, w}) do
{a * x + b * u, a * y + b * v, a * z + b * w}
end
@doc """
`scalar_triple(a,b,c)` returns the [scalar triple product](https://en.wikipedia.org/wiki/Triple_product#Scalar_triple_product) of three vectors.
We're using the `a*(b x c)` form.
"""
@spec scalar_triple(vec3, vec3, vec3) :: float
def scalar_triple({ax, ay, az}, {bx, by, bz}, {cx, cy, cz}) do
ax * (by * cz - bz * cy) + ay * (bz * cx - bx * cz) + az * (bx * cy - by * cx)
end
@doc """
`minkowski_distance(a,b,order)` returns the [Minkowski distance](https://en.wikipedia.org/wiki/Minkowski_distance) between two points `a` and b` of order `order`.
`order` needs to be greater than or equal to 1 to define a [metric space](https://en.wikipedia.org/wiki/Metric_space).
`order` 1 is equivalent to manhattan distance, 2 to Euclidean distance, otherwise all bets are off.
"""
@spec minkowski_distance( vec3, vec3, number) :: number
def minkowski_distance({x1,y1,z1}, {x2,y2,z2}, order) do
adx = abs(x2 - x1)
ady = abs(y2 - y1)
adz = abs(z2 - z1)
temp = :math.pow(adx, order) + :math.pow(ady, order) + :math.pow(adz, order)
:math.pow(temp, 1 / order)
end
@doc """
`chebyshev_distance(a,b)` returns the [Chebyshev distance](https://en.wikipedia.org/wiki/Chebyshev_distance) between two points `a` and b`.
"""
@spec chebyshev_distance( vec3, vec3) :: number
def chebyshev_distance({x1,y1,z1}, {x2,y2,z2}) do
adx = abs(x2 - x1)
ady = abs(y2 - y1)
adz = abs(z2 - z1)
max(adx, max(ady,adz))
end
@doc """
`p_norm(v,order)` returns the [P-norm](https://en.wikipedia.org/wiki/Lp_space#The_p-norm_in_finite_dimensions) of vector `v` of order `order`.
`order` needs to be greater than or equal to 1 to define a [metric space](https://en.wikipedia.org/wiki/Metric_space).
`order` 1 is equivalent to manhattan distance, 2 to Euclidean distance, otherwise all bets are off.
"""
@spec p_norm( vec3, number) :: number
def p_norm({x, y, z}, order) do
ax = abs(x)
ay = abs(y)
az = abs(z)
temp = :math.pow(ax, order) + :math.pow(ay, order) + :math.pow(az, order)
:math.pow(temp, 1 / order)
end
end
|
lib/graphmath/Vec3.ex
| 0.960828
| 0.96859
|
Vec3.ex
|
starcoder
|
defmodule Guardian.Permissions do
@moduledoc """
Functions for dealing with permissions sets.
Guardian provides facilities for working with
many permission sets in parallel.
Guardian must be configured with it's permissions at start time.
config :guardian, Guardian,
permissions: %{
default: [
:read_profile,
:write_profile,
:create_item,
:read_item,
:write_item,
:delete_item
],
admin: [
:users_read,
:users_write,
:financials_read,
:financials_write,
]
}
Guardian.Permissions encodes the permissions for each as integer bitstrings
so you have 31 permissions per group.
(remember javascript is only a 32 bit system)
Guardian tokens will remain small, event with a full 31 permissions in a set.
You should use less sets and more permissions,
rather than more sets with fewer permissions per set.
Permissions that are unknown are ignored.
This is to support backwards compatibility with previously issued tokens.
### Example working with permissions manually
# Accessing default permissions
Guardian.Permissions.to_value([:read_profile, :write_profile]) # 3
Guardian.Permissions.to_list(3) # [:read_profile, :write_profile]
# Accessing 'admin' permissions (see config above)
Guardian.Permissions.to_value(
[:financials_read, :financials_write], :admin
) # 12
# [:financials_read, :financials_write]
Guardian.Permissions.to_list(12, :admin)
# Checking permissions
# true
Guardian.Permissions.all?(3, [:users_read, :users_write], :admin)
# false
Guardian.Permissions.all?(1, [:users_read, :users_write], :admin)
# true
Guardian.Permissions.any?(12, [:users_read, :financial_read], :admin)
# true
Guardian.Permissions.any?(11, [:read_profile, :read_item])
# false
Guardian.Permissions.any?(11, [:delete_item, :write_item])
### Reading permissions from claims
Permissions are encoded into claims under the :pem key
and are a map of "type": <value as integer>
claims = %{ pem: %{
"default" => 3,
"admin" => 1
} }
Guardian.Permissions.from_claims(claims) # 3
Guardian.Permissions.from_claims(claims, :admin) # 1
# returns [:users_read]
Guardian.Permissions.from_claims(claims) |> Guardian.Permissions.to_list
### Adding permissions to claims
This will encode the permissions as a map with integer values
Guardian.Claims.permissions(
existing_claims,
admin: [:users_read],
default: [:read_item, :write_item]
)
Assign all permissions (and all future ones)
max = Guardian.Permissions.max
Guardian.Claims.permissions(existing_claims, admin: max, default: max)
### Signing in with permissions
This will encode the permissions as a map with integer values
Guardian.Plug.sign_in(
user,
:access
perms: %{ admin: [:users_read],
default: [:read_item, :write_item] }
)
### Encoding credentials with permissions
This will encode the permissions as a map with integer values
Guardian.encode_and_sign(
user,
:access,
perms: %{
admin: [:users_read],
default: [:read_item, :write_item]
}
)
"""
use Bitwise
def max, do: -1
@doc """
Fetches the list of known permissions for the given type
"""
@spec available(atom) :: List
def available, do: available(:default)
def available(type) when is_binary(type) do
try do
available(String.to_existing_atom(type))
rescue
_e in ArgumentError -> []
end
end
def available(type) when is_atom(type), do: Map.get(all_available(), type, [])
def all_available, do: Enum.into(Guardian.config(:permissions, %{}), %{})
def all?(value, expected, key \\ :default) do
expected_value = to_value(expected, key)
if expected_value == 0 do
false
else
(to_value(value, key) &&& expected_value) == expected_value
end
end
def any?(value, expected, key \\ :default) do
expected_value = to_value(expected, key)
(to_value(value, key) &&& expected_value) > 0
end
@doc """
Fetches the permissions from the claims.
Permissions live in the :pem key and are a map of
"<type>": <value of permissions as integer>
"""
@spec from_claims(map) :: list
def from_claims(claims), do: from_claims(claims, :default)
def from_claims(claims, type) do
c = Map.get(claims, "pem", %{})
Map.get(c, type, Map.get(c, to_string(type), 0))
end
def to_value(val), do: to_value(val, :default)
@doc """
Fetches the value as a bitstring (integer)
of the list of permissions in the `type` list
"""
@spec to_value(integer | list, atom) :: integer
def to_value(num, _) when is_integer(num), do: num
@doc false
def to_value(list, type) when is_list(list) do
to_value(list, 0, available(type))
end
def to_value(_, acc, []), do: acc
@doc false
def to_value([], acc, _), do: acc
# match two lists against each other
def to_value([h|t], acc, perms) do
idx = Enum.find_index(perms, &(&1 == h or to_string(&1) == h))
if idx do
to_value(t, Bitwise.bor(acc, trunc(:math.pow(2,idx))), perms)
else
to_value(t, acc, perms)
end
end
def to_list(thing), do: to_list(thing, :default)
def to_list(thing, type), do: to_list(thing, [], available(type))
def to_list(_,_,[]), do: []
# When given a list of things
def to_list(list, _acc, perms) when is_list(list) do
string_perms = Enum.map(perms, &to_string/1)
list
|> Enum.map(fn
x when is_atom(x) ->
if Enum.member?(perms, x), do: x
x when is_binary(x) ->
if Enum.member?(string_perms, x), do: String.to_existing_atom(x)
_ -> nil
end)
|> Enum.filter(&(&1 != nil))
end
# When given a number
def to_list(num, _acc, perms) when is_integer(num) do
for i <- (0..(length(perms) - 1)),
Bitwise.band(num, trunc(:math.pow(2,i))) != 0
do
Enum.at(perms, i)
end
end
end
|
lib/guardian/permissions.ex
| 0.71889
| 0.541954
|
permissions.ex
|
starcoder
|
defmodule Rig.Config do
@moduledoc """
Rig module configuration that provides `settings/0`.
There are two ways to use this module
### Specify a list of expected keys
```
defmodule Rig.MyExample do
use Rig.Config, [:some_key, :other_key]
end
```
`Rig.Config` expects a config entry similar to this:
```
config :rig, Rig.MyExample,
some_key: ...,
other_key: ...
```
If one of the specified keys is not found, an error is thrown _at compile time_.
Otherwise, `Rig.MyExample` gets a `config/0` function that returns the
configuration converted to a map.
If there are other keys present, they'll be added to that map as well.
### Specify `:custom_validation` instead
```
defmodule Rig.MyExample do
use Rig.Config, :custom_validation
defp validate_config!(config) do
...
end
end
```
If you use :custom_validation, you should deal with the raw keyword list
by implementing `validate_config!/1` in the module.
"""
defmacro __using__(:custom_validation) do
__MODULE__.__everything_but_validation__()
end
defmacro __using__(required_keys) do
quote do
unquote(__MODULE__.__everything_but_validation__())
unquote(__MODULE__.__only_validation__(required_keys))
end
end
def __everything_but_validation__ do
quote do
use Confex, otp_app: :rig
@after_compile __MODULE__
def __after_compile__(env, _bytecode) do
# Make sure missing configuration values are caught early by evaluating the values here
env.module.config()
end
end
end
def __only_validation__(required_keys) do
quote do
defp validate_config!(nil), do: validate_config!([])
defp validate_config!(config) do
# Convert to map and make sure all required keys are present
config = Enum.into(config, %{})
required_keys = unquote(required_keys)
missing_keys = for k <- required_keys, not Map.has_key?(config, k), do: k
case missing_keys do
[] -> config
_ -> raise "Missing required settings for module #{inspect __ENV__.module}: #{inspect missing_keys}"
end
end
end
end
end
|
apps/rig/lib/rig/config.ex
| 0.915818
| 0.81468
|
config.ex
|
starcoder
|
defmodule GitHubActions.Versions do
@moduledoc """
Functions to select and filter lists and tables of versions.
The list of versions can have the following two forms.
- A simple list:
```elixir
["1", "2.0", "2.1", "3", "3.1", "3.1.1"]
```
- A table as list of keyword lists with compatible versions:
```elixir
[
[a: ["1.0.0"], b: ["1.0", "1.1", "1.2"]],
[a: ["2.0.0"], b: ["1.2", "2.0"]]
]
```
"""
alias GitHubActions.Config
alias GitHubActions.Version
alias GitHubActions.Versions.Impl
@type versions_list :: [Version.version()]
@type versions_table :: [keyword(Version.version())]
@type versions :: versions_list() | versions_table()
@type key :: atom()
@doc """
Returns the latest version from the configured versions list.
## Examples
iex> Config.config(:versions, ["1.0.0/2", "1.1.0/3"])
iex> Versions.latest()
#Version<1.1.3>
"""
@spec latest :: Version.t()
def latest, do: latest(from_config())
@doc """
Returns the latest version from the configured `versions` table by the given
`key` or from the given `versions` list.
## Examples
iex> Versions.latest(["1.0.0/2", "1.1.0/3"])
#Version<1.1.3>
iex> Config.config(:versions, [
...> [a: ["1.0.0/2", "1.1.0/3"], b: ["2.0/5"]],
...> [a: ["1.2.0/1", "1.3.0/4"], b: ["3.0/5"]]
...> ])
iex> Versions.latest(:a)
#Version<1.3.4>
iex> Versions.latest(["foo"])
** (GitHubActions.InvalidVersionError) invalid version: "foo"
iex> Versions.latest([a: "1"])
** (ArgumentError) latest/1 expected a list or table of versions or a key, got: [a: "1"]
iex> Versions.latest(:elixir)
#Version<1.13.1>
iex> Versions.latest(:otp)
#Version<24.1>
"""
@spec latest(versions() | key()) :: Version.t()
def latest(versions_or_key) when is_list(versions_or_key) do
case Impl.type(versions_or_key) do
{:list, versions} ->
Impl.latest(versions)
:error ->
raise ArgumentError,
message: """
latest/1 expected a list or table of versions or a key, \
got: #{inspect(versions_or_key)}\
"""
end
end
def latest(key) when is_atom(key), do: latest(from_config(), key)
@doc """
Returns the latest version from a `versions` table by the given `key`.
## Examples
iex> Versions.latest([
...> [a: ["1.0.0/2"], b: ["1.0.0/3"]],
...> [a: ["1.1.0/3"], b: ["1.1.0/4"]]
...> ], :a)
#Version<1.1.3>
iex> Versions.latest([a: "1"], :a)
** (ArgumentError) latest/1 expected a table of versions, got: [a: "1"]
"""
@spec latest(versions_table(), key()) :: Version.t()
def latest(versions, key) when is_list(versions) and is_atom(key) do
case Impl.type(versions) do
{:table, versions} ->
Impl.latest(versions, key)
:error ->
raise ArgumentError,
message: "latest/1 expected a table of versions, got: #{inspect(versions)}"
end
end
@doc """
Returns the latest minor versions from the configured versions list.
## Examples
iex> Config.config(:versions, ["1.0.0/2", "1.1.0/4", "2.0.0/3"])
iex> Versions.latest_minor() |> Enum.map(&to_string/1)
["1.0.2", "1.1.4", "2.0.3"]
"""
@spec latest_minor :: [Version.t()]
def latest_minor, do: latest_minor(from_config())
@doc """
Returns the latest minor versions from the configured `versions` table by the
given `key` or from the given `versions` list.
## Examples
iex> minor_versions = Versions.latest_minor(["1.0.0/2", "1.1.0/3"])
iex> Enum.map(minor_versions, &to_string/1)
["1.0.2", "1.1.3"]
iex> Config.config(:versions, [
...> [a: ["1.0.0/2", "1.1.0/3"], b: ["2.0/5"]],
...> [a: ["1.2.0/1", "1.3.0/4"], b: ["3.0/5"]]
...> ])
iex> minor_versions = Versions.latest_minor(:a)
iex> Enum.map(minor_versions, &to_string/1)
["1.0.2", "1.1.3", "1.2.1", "1.3.4"]
iex> Versions.latest_minor(["foo"])
** (GitHubActions.InvalidVersionError) invalid version: "foo"
iex> Versions.latest_minor([a: "1"])
** (ArgumentError) latest_minor/1 expected a list or table of versions or a key, got: [a: "1"]
iex> minor_versions = Versions.latest_minor(:elixir)
iex> Enum.map(minor_versions, &to_string/1)
["1.0.5", "1.1.1", "1.2.6", "1.3.4", "1.4.5", "1.5.3", "1.6.6", "1.7.4",
"1.8.2", "1.9.4", "1.10.4", "1.11.4", "1.12.3", "1.13.1"]
iex> minor_versions = Versions.latest_minor(:otp)
iex> Enum.map(minor_versions, &to_string/1)
["17.0", "17.1", "17.2", "17.3", "17.4", "17.5", "18.0", "18.1", "18.2",
"18.3", "19.0", "19.1", "19.2", "19.3", "20.0", "20.1", "20.2", "20.3",
"21.0", "21.1", "21.2", "21.3", "22.0", "22.1", "22.2", "22.3", "23.0",
"23.1", "23.2", "23.3", "24.0", "24.1"]
"""
@spec latest_minor(versions_list() | key()) :: [Version.t()]
def latest_minor(versions_or_key) when is_list(versions_or_key) do
case Impl.type(versions_or_key) do
{_type, versions} ->
Impl.latest_minor(versions)
_error ->
raise ArgumentError,
message: """
latest_minor/1 expected a list or table of versions or a key, \
got: #{inspect(versions_or_key)}\
"""
end
end
def latest_minor(key) when is_atom(key), do: latest_minor(from_config(), key)
@doc """
Returns the latest minor versions from a `versions` table by the given `key`.
## Examples
iex> minor_versions = Versions.latest_minor([
...> [a: ["1.0.0/2"], b: ["1.0.0/3"]],
...> [a: ["1.1.0/3"], b: ["1.1.0/4"]]
...> ], :a)
iex> Enum.map(minor_versions, &to_string/1)
["1.0.2", "1.1.3"]
iex> Versions.latest_minor([a: "1"], :a)
** (ArgumentError) latest_minor/1 expected a table of versions, got: [a: "1"]
"""
@spec latest_minor(versions_table(), key()) :: [Version.t()]
def latest_minor(versions, key) when is_list(versions) and is_atom(key) do
case Impl.type(versions) do
{:table, versions} ->
Impl.latest_minor(versions, key)
:error ->
raise ArgumentError,
message: "latest_minor/1 expected a table of versions, got: #{inspect(versions)}"
end
end
@doc """
Returns the latest major versions from the configured versions list.
## Examples
iex> Config.config(:versions, ["1.0.0/2", "1.1.0/4", "2.0.0/3"])
iex> Versions.latest_major() |> Enum.map(&to_string/1)
["1.1.4", "2.0.3"]
"""
@spec latest_major :: [Version.t()]
def latest_major, do: latest_major(from_config())
@doc """
Returns the latest major versions from the configured `versions` table by the
given `key` or from the given `versions` list.
## Examples
iex> major_versions = Versions.latest_major(["1.0.0/2", "1.1.0/3", "2.0.0/2"])
iex> Enum.map(major_versions, &to_string/1)
["1.1.3", "2.0.2"]
iex> Config.config(:versions, [
...> [a: ["1.0.0/2", "1.1.0/3"], b: ["2.0/5"]],
...> [a: ["2.2.0/1", "2.3.0/4"], b: ["3.0/5"]]
...> ])
iex> major_versions = Versions.latest_major(:a)
iex> Enum.map(major_versions, &to_string/1)
["1.1.3", "2.3.4"]
iex> Versions.latest_major(["foo"])
** (GitHubActions.InvalidVersionError) invalid version: "foo"
iex> Versions.latest_major([a: "1"])
** (ArgumentError) latest_major/1 expected a list or table of versions or a key, got: [a: "1"]
iex> major_versions = Versions.latest_major(:elixir)
iex> Enum.map(major_versions, &to_string/1)
["1.13.1"]
iex> major_versions = Versions.latest_major(:otp)
iex> Enum.map(major_versions, &to_string/1)
["17.5", "18.3", "19.3", "20.3", "21.3", "22.3", "23.3", "24.1"]
"""
@spec latest_major(versions_list() | key()) :: [Version.t()]
def latest_major(versions_or_key) when is_list(versions_or_key) do
case Impl.type(versions_or_key) do
{_type, versions} ->
Impl.latest_major(versions)
:error ->
raise ArgumentError,
message: """
latest_major/1 expected a list or table of versions or a key, \
got: #{inspect(versions_or_key)}\
"""
end
end
def latest_major(key) when is_atom(key), do: latest_major(from_config(), key)
@doc """
Returns the latest major versions from a `versions` table by the given `key`.
## Examples
iex> major_versions = Versions.latest_major([
...> [a: ["1.0.0/2"], b: ["1.0.0/3"]],
...> [a: ["2.0.0/3"], b: ["2.0.0/4"]]
...> ], :a)
iex> Enum.map(major_versions, &to_string/1)
["1.0.2", "2.0.3"]
iex> Versions.latest_major([a: "1"], :a)
** (ArgumentError) latest_major/1 expected a table of versions, got: [a: "1"]
"""
@spec latest_major(versions_table(), key()) :: [Version.t()]
def latest_major(versions, key) when is_list(versions) and is_atom(key) do
case Impl.type(versions) do
{:table, versions} ->
Impl.latest_major(versions, key)
_error ->
raise ArgumentError,
message: "latest_major/1 expected a table of versions, got: #{inspect(versions)}"
end
end
@doc """
Returns all versions for `key` from a list of compatible versions.
This function raises a `GitHubActions.InvalidVersionError` for an invalid
version.
## Examples
iex> versions = [
...> [a: ["1.0.0"], b: ["1.0", "1.1", "1.2"]],
...> [a: ["2.0.0"], b: ["1.2", "2.0"]]
...> ]
iex> versions = Versions.get(versions, :b)
iex> hd versions
#Version<1.0>
iex> Enum.map(versions, &to_string/1)
["1.0", "1.1", "1.2", "2.0"]
iex> Versions.get([a: "1"], :a)
** (ArgumentError) get/2 expected a table of versions, got: [a: "1"]
"""
@spec get(versions_table(), key()) :: [Version.t()]
def get(versions \\ from_config(), key) when is_list(versions) do
case Impl.type(versions) do
{:table, versions} ->
Impl.get(versions, key)
_error ->
raise ArgumentError,
message: "get/2 expected a table of versions, got: #{inspect(versions)}"
end
end
@doc """
Returns the versions from the config.
"""
@spec from_config :: versions()
def from_config, do: Config.get(:versions)
@doc """
Sorts the given `versions`.
## Examples
iex> versions = ["1.1", "11.1", "1.0", "2.1", "2.0.1", "2.0.0"]
iex> versions = Versions.sort(versions)
iex> Enum.map(versions, &to_string/1)
["1.0", "1.1", "2.0.0", "2.0.1", "2.1", "11.1"]
iex> Versions.sort([a: ["1", "2"]])
** (ArgumentError) sort/2 expected a list or table of versions, got: [a: ["1", "2"]]
"""
@spec sort([Version.version()]) :: [Version.version()]
def sort(versions) do
case Impl.type(versions) do
{:list, versions} ->
Impl.sort_list(versions)
{:table, versions} ->
Impl.sort_table(versions)
:error ->
raise ArgumentError,
message: "sort/2 expected a list or table of versions, got: #{inspect(versions)}"
end
end
@doc """
Removes all duplicated versions.
## Examples
iex> versions = Versions.expand(["1.0.0/4", "1.0.2/5"])
iex> versions |> Versions.uniq() |> Enum.map(&to_string/1)
["1.0.0", "1.0.1", "1.0.2", "1.0.3", "1.0.4", "1.0.5"]
iex> Versions.uniq([:a])
** (ArgumentError) uniq/1 expected a list or table of versions, got: [:a]
"""
@spec uniq(versions()) :: versions()
def uniq(versions) do
case Impl.type(versions) do
{_type, versions} ->
versions
:error ->
raise ArgumentError,
message: "uniq/1 expected a list or table of versions, got: #{inspect(versions)}"
end
end
@doc """
Filters the list of `versions` by the given `requirement`.
## Examples
iex> versions = ["1", "1.1.0/5", "1.2.0/1", "1.3", "2.0/1"]
iex> Versions.filter(versions, "~> 1.2")
[
%Version{major: 1, minor: 2, patch: 0},
%Version{major: 1, minor: 2, patch: 1},
%Version{major: 1, minor: 3}
]
iex> Versions.filter(versions, ">= 1.3.0")
[
%Version{major: 1, minor: 3},
%Version{major: 2, minor: 0},
%Version{major: 2, minor: 1}
]
iex> Versions.filter([:b, :a], "> 1.0.0")
** (ArgumentError) filter/2 expected a list of versions, got: [:b, :a]
iex> Versions.filter(["1", "2", "3"], "> 1")
** (Version.InvalidRequirementError) invalid requirement: "> 1"
"""
@spec filter(versions_list(), String.t()) :: [Version.t()]
def filter(versions, requirement) when is_binary(requirement) do
case Impl.type(versions) do
{:list, versions} ->
Impl.filter(versions, requirement)
_error ->
raise ArgumentError,
message: "filter/2 expected a list of versions, got: #{inspect(versions)}"
end
end
@doc """
Returns true if `versions` contains the given `version`.
## Examples
iex> versions = ["1.0.0", "1.1.0", "1.1.1"]
iex> Versions.member?(versions, "1.1")
true
iex> Versions.member?(versions, "1.0.1")
false
iex> Versions.member?([a: "1"], "1.0.0")
** (ArgumentError) member?/2 expected a list of versions, got: [a: "1"]
"""
@spec member?(versions_list(), Version.version()) :: boolean
def member?(versions, version) do
case Impl.type(versions) do
{:list, versions} ->
Impl.member?(versions, version)
_error ->
raise ArgumentError,
message: "member?/2 expected a list of versions, got: #{inspect(versions)}"
end
end
@doc """
Returns true if `versions1` has an intersection with `versions2`.
## Examples
iex> Versions.intersection?(["1.0.0/5"], ["1.0.4/7"])
true
iex> Versions.intersection?(["1.0.0/5"], ["2.0.0/7"])
false
iex> Versions.intersection?(["1.0.0/5"], [:a])
** (ArgumentError) intersection?/2 expected two list of versions, got: ["1.0.0/5"], [:a]
"""
@spec intersection?(versions_list(), versions_list()) :: boolean()
def intersection?(versions1, versions2) do
with {:list, versions1} <- Impl.type(versions1),
{:list, versions2} <- Impl.type(versions2) do
Impl.intersection?(versions1, versions2)
else
:error ->
raise ArgumentError,
message: """
intersection?/2 expected two list of versions, \
got: #{inspect(versions1)}, #{inspect(versions2)}\
"""
end
end
@doc """
Returns the versions of `key` that are compatible with `to`.
## Examples
iex> otp = Versions.compatible(:otp, elixir: "1.6.6")
iex> Enum.map(otp, &to_string/1)
["19.0", "19.1", "19.2", "19.3", "20.0", "20.1", "20.2", "20.3", "21.0",
"21.1", "21.2", "21.3"]
iex> elixir = Versions.compatible(:elixir, otp: "20.3")
iex> Enum.map(elixir, &to_string/1)
["1.4.5", "1.5.0", "1.5.1", "1.5.2", "1.5.3", "1.6.0", "1.6.1", "1.6.2",
"1.6.3", "1.6.4", "1.6.5", "1.6.6", "1.7.0", "1.7.1", "1.7.2", "1.7.3",
"1.7.4", "1.8.0", "1.8.1", "1.8.2", "1.9.0", "1.9.1", "1.9.2", "1.9.3",
"1.9.4"]
iex> :otp |> Versions.compatible(elixir: "1.10.0") |> Enum.count()
8
iex> :otp |> Versions.compatible(elixir: "1.10.0/4") |> Enum.count()
12
iex> :otp |> Versions.compatible(elixir: ["1.10.0/4", "1.11.0/4"]) |> Enum.count()
14
iex> Versions.compatible([], :otp, elixir: "1.6.6")
** (ArgumentError) compatible/3 expected a table of versions as first argument, got: []
"""
@spec compatible(versions(), key(), [{key(), Version.version()}]) :: [Version.t()]
def compatible(versions \\ from_config(), key, [{to_key, to_versions}])
when is_atom(key) and is_atom(to_key) do
versions =
case Impl.type(versions) do
{:table, versions} ->
versions
_error ->
raise ArgumentError,
message: """
compatible/3 expected a table of versions as first argument, \
got: #{inspect(versions)}\
"""
end
to_versions =
case to_versions |> List.wrap() |> Impl.type() do
{:list, versions} ->
versions
_error ->
raise ArgumentError,
message: """
compatible/3 expected a list of versions for #{inspect(to_key)}, \
got: #{inspect(to_versions)}\
"""
end
Impl.compatible(versions, key, {to_key, to_versions})
end
@doc """
Returns `true` if the given `version1` is compatible to `version2`.
## Examples
iex> Versions.compatible?(elixir: "1.12.3", otp: "24.0")
true
iex> Versions.compatible?(elixir: "1.6.0", otp: "24.0")
false
iex> versions = [
...> [a: ["1.0.0"], b: ["1.0", "1.1", "1.2"]],
...> [a: ["2.0.0"], b: ["1.2", "2.0"]]
...> ]
iex> Versions.compatible?(versions, a: "1", b: "1.2")
true
iex> Versions.compatible?(versions, a: "2", b: "1.2")
true
iex> Versions.compatible?(versions, a: "2", b: "1")
false
iex> Versions.compatible?([], a: "1", b: "2")
** (ArgumentError) compatible?/2 expected a table of versions as first argument, got: []
"""
@spec compatible?(
versions(),
[{key(), Version.version()}]
) :: boolean()
def compatible?(versions \\ from_config(), [{key1, version1}, {key2, version2}])
when is_atom(key1) and is_atom(key2) do
versions =
case Impl.type(versions) do
{:table, versions} ->
versions
_error ->
raise ArgumentError,
message: """
compatible?/2 expected a table of versions as first argument, \
got: #{inspect(versions)}\
"""
end
version1 = Version.parse!(version1)
version2 = Version.parse!(version2)
Impl.compatible?(versions, {key1, version1}, {key2, version2})
end
@doc """
Returns the incompatible versions between `versions1` and `versions2`.
## Examples
iex> versions = Versions.incompatible(
...> elixir: ["1.9.4", "1.10.4", "1.11.4", "1.12.3"],
...> otp: ["21.3", "22.3", "23.3", "24.0"]
...> )
iex> for [{k1, v1}, {k2, v2}] <- versions do
...> [{k1, to_string(v1)}, {k2, to_string(v2)}]
...> end
[
[elixir: "1.9.4", otp: "23.3"],
[elixir: "1.9.4", otp: "24.0"],
[elixir: "1.10.4", otp: "24.0"],
[elixir: "1.12.3", otp: "21.3"]
]
"""
def incompatible(versions \\ from_config(), [{key1, versions1}, {key2, versions2}])
when is_atom(key1) and is_atom(key2) do
versions =
case Impl.type(versions) do
{:table, versions} ->
versions
_error ->
raise ArgumentError,
message: """
incompatible/2 expected a table of versions as first argument, \
got: #{inspect(versions)}\
"""
end
versions1 =
case Impl.type(versions1) do
{:list, versions} ->
versions
_error ->
raise ArgumentError,
message: """
incompatible/2 expected a list of versions for #{inspect(key1)}, \
got: #{inspect(versions1)}\
"""
end
versions2 =
case Impl.type(versions2) do
{:list, versions} ->
versions
_error ->
raise ArgumentError,
message: """
incompatible/2 expected a list of versions for #{inspect(key2)}, \
got: #{inspect(versions2)}\
"""
end
Impl.incompatible(versions, {key1, versions1}, {key2, versions2})
end
@doc """
Returns the versions matrix for the given requirements.
## Examples
iex> matrix = Versions.matrix(elixir: ">= 1.9.0", otp: ">= 22.0.0")
iex> Enum.map(matrix[:elixir], &to_string/1)
["1.9.4", "1.10.4", "1.11.4", "1.12.3", "1.13.1"]
iex> Enum.map(matrix[:otp], &to_string/1)
["22.3", "23.3", "24.1"]
iex> for [{k1, v1}, {k2, v2}] <- matrix[:exclude] do
...> [{k1, to_string(v1)}, {k2, to_string(v2)}]
...> end
[
[elixir: "1.9.4", otp: "23.3"],
[elixir: "1.9.4", otp: "24.1"],
[elixir: "1.10.4", otp: "24.1"]
]
iex> Versions.matrix([], elixir: ">= 1.9.0", otp: ">= 22.0.0")
** (ArgumentError) matrix/1 expected a table of versions as first argument, got: []
"""
def matrix(versions \\ from_config(), opts) do
case Impl.type(versions) do
{:table, versions} ->
Impl.matrix(versions, opts)
_error ->
raise ArgumentError,
message: """
matrix/1 expected a table of versions as first argument, \
got: #{inspect(versions)}\
"""
end
end
@doc """
Expands the given `versions`.
## Examples
iex> versions = Versions.expand(["1.0/2"])
iex> Enum.map(versions, &to_string/1)
["1.0", "1.1", "1.2"]
iex> versions = Versions.expand([
...> [a: ["1.0/2"], b: ["1.0.0/2"]],
...> [a: ["1.1.0/1"], b: ["2.0.0/2"]]
...> ])
iex> versions |> Enum.at(1) |> Keyword.get(:a) |> Enum.map(&to_string/1)
["1.1.0", "1.1.1"]
iex> versions |> Enum.at(1) |> Keyword.get(:b) |> Enum.map(&to_string/1)
["2.0.0", "2.0.1", "2.0.2"]
iex> Versions.expand([:a])
** (ArgumentError) expand/1 expected a list of versions, or tabel of versions got: [:a]
"""
@spec expand(versions()) :: versions()
def expand(versions) do
case Impl.type(versions) do
{_type, versions} ->
versions
:error ->
raise ArgumentError,
message: """
expand/1 expected a list of versions, or tabel of versions \
got: #{inspect(versions)}\
"""
end
end
defmodule Impl do
@moduledoc false
def type(versions) when is_list(versions) do
# Impl.expand(versions)
case {table?(versions), list?(versions)} do
{true, false} ->
{:table, expand(versions) |> uniq() |> sort_table()}
{false, true} ->
{:list, expand(versions) |> Enum.uniq() |> sort_list()}
_else ->
:error
end
end
def type(_versions), do: :error
defp table?(versions) do
Enum.all?(versions, &Keyword.keyword?/1) &&
versions |> Enum.map(&Keyword.keys/1) |> Enum.uniq() |> Enum.count() == 1
end
defp list?(versions) do
Enum.all?(versions, fn version ->
cond do
is_binary(version) -> true
is_struct(version, Version) -> true
true -> false
end
end)
end
def sort_list(versions) do
Enum.sort(versions, fn a, b -> Version.compare(a, b) == :lt end)
end
def sort_table(versions) do
versions
|> Enum.map(&sort_table_row/1)
|> sort_table_rows()
end
defp sort_table_row(row) do
Enum.map(row, fn {key, list} -> {key, sort_list(list)} end)
end
defp sort_table_rows([]), do: []
defp sort_table_rows([[]]), do: [[]]
defp sort_table_rows([[{key, _version} | _versions] | _rows] = rows) do
Enum.sort(rows, fn a, b ->
case {Enum.at(a[key], 0), Enum.at(b[key], 0)} do
{nil, nil} -> false
{_, nil} -> false
{nil, _} -> true
{x, y} -> Version.compare(x, y) == :lt
end
end)
end
defp uniq(versions) do
Enum.map(versions, fn row -> Enum.uniq(row) end)
end
defp expand(versions) do
Enum.flat_map(versions, fn version -> do_expand(version) end)
end
defp do_expand(version) when is_binary(version) or is_struct(version) do
version |> Version.parse!() |> List.wrap()
end
defp do_expand(versions) when is_list(versions) do
[Enum.map(versions, fn {key, versions} -> {key, expand(versions)} end)]
end
def latest(versions), do: List.last(versions)
def latest(versions, key), do: versions |> get(key) |> List.last()
def latest_minor(versions), do: do_latest(versions, :minor)
def latest_minor(versions, key), do: versions |> get(key) |> do_latest(:minor)
def latest_major(versions), do: do_latest(versions, :major)
def latest_major(versions, key), do: versions |> get(key) |> do_latest(:major)
defp do_latest(versions, precision) do
Enum.reduce(versions, [], fn
version, [] ->
[Version.parse!(version)]
version, [current | rest] = acc ->
case Version.compare(version, current, precision) do
:eq -> [Version.parse!(version) | rest]
:gt -> [Version.parse!(version) | acc]
end
end)
|> Enum.reverse()
end
def member?(versions, version) do
Enum.any?(versions, fn item -> Version.compare(item, version) == :eq end)
end
def intersection?(versions1, versions2) do
Enum.any?(versions1, fn version -> member?(versions2, version) end)
end
def filter(versions, requirement) do
Enum.filter(versions, fn version -> Version.match?(version, requirement) end)
end
def compatible(versions, key, {to_key, to_versions}) do
Enum.flat_map(versions, fn row ->
case row |> Keyword.get(to_key, []) |> intersection?(to_versions) do
true -> Keyword.get(row, key, [])
false -> []
end
end)
end
def compatible?(versions, {key1, version1}, {key2, version2}) do
versions
|> compatible(key2, {key1, List.wrap(version1)})
|> member?(version2)
end
def incompatible(versions, {key1, versions1}, {key2, versions2}) do
for version1 <- versions1,
version2 <- versions2,
compatible?(versions, {key1, version1}, {key2, version2}) == false do
[{key1, version1}, {key2, version2}]
end
end
def get(versions, key) do
versions
|> Enum.flat_map(fn lists -> Keyword.get(lists, key, []) end)
|> Enum.uniq()
|> sort_list()
end
def matrix(versions, opts) do
elixir =
versions
|> get(:elixir)
|> filter(Keyword.fetch!(opts, :elixir))
|> latest_minor()
otp =
versions
|> compatible(:otp, {:elixir, elixir})
|> filter(Keyword.fetch!(opts, :otp))
|> latest_major()
exclude = incompatible(versions, {:elixir, elixir}, {:otp, otp})
[
elixir: elixir,
otp: otp,
exclude: exclude
]
end
end
end
|
lib/git_hub_actions/versions.ex
| 0.912094
| 0.756313
|
versions.ex
|
starcoder
|
defmodule FakersApiWeb.GraphQL.Resolvers.Mutation do
alias FakersApi.People, as: Context
def create_person(arguments, _) do
case Context.create_person(arguments) do
{:ok, person} ->
{:ok, person}
_error ->
{:error,
"Couldn't create a person from given data. Unique fields might already exist in the database."}
end
end
def update_person(arguments, _) do
{person_id, arguments} = Map.pop(arguments, :id)
case Context.get_person(person_id) do
nil ->
{:error, "No person found with given id."}
person ->
case Context.update_person(person, arguments) do
{:ok, person} ->
{:ok, person}
_error ->
{:error,
"Couldn't update person using given data. Perhaps a field should not be null?"}
end
end
end
def create_address(arguments, _) do
case Context.create_address(arguments) do
{:ok, address} ->
{:ok, address}
_error ->
{:error,
"Couldn't create an address from given data. Unique fields might already exist in the database."}
end
end
def update_address(arguments, _) do
{address_id, arguments} = Map.pop(arguments, :id)
case Context.get_address(address_id) do
nil ->
{:error, "No person found with given id."}
address ->
case Context.update_address(address, arguments) do
{:ok, address} ->
{:ok, address}
_error ->
{:error,
"Couldn't update address using given data. Perhaps a field should not be null?"}
end
end
end
def create_contact(arguments, _) do
case Context.create_contact(arguments) do
{:ok, contact} ->
{:ok, contact}
_error ->
{:error,
"Couldn't create a contact from given data. Unique fields might already exist in the database."}
end
end
def update_contact(arguments, _) do
{contact_id, arguments} = Map.pop(arguments, :id)
case Context.get_contact(contact_id) do
nil ->
{:error, "No person found with given id."}
contact ->
case Context.update_contact(contact, arguments) do
{:ok, contact} ->
{:ok, contact}
_error ->
{:error,
"Couldn't modify a contact from given data. Perhaps a field should not be null?"}
end
end
end
def create_deceased_person(arguments, _) do
{person_id, arguments} = Map.pop(arguments, :person_id)
case Context.get_person(person_id) do
nil ->
{:error, "No person found with given id."}
person ->
case Context.create_deceased_person(arguments, person) do
{:ok, person} ->
{:ok, person}
_error ->
{:error, "Couldn't register person as deceased."}
end
end
end
def remove_deceased_person(%{person_id: person_id}, _) do
case Context.get_deceased_person(person_id) do
nil ->
{:error, "No deceased person with given id found."}
person ->
case Context.delete_deceased_person(person) do
{:ok, person} ->
{:ok, person}
_error ->
{:error, "Couldn't revive person with given id."}
end
end
end
def create_person_address(arguments, _) do
{person_id, arguments} = Map.pop(arguments, :person_id)
{address_id, arguments} = Map.pop(arguments, :address_id)
person = Context.get_person(person_id)
address = Context.get_address(address_id)
if is_nil(person) or is_nil(address) do
{:error, "Person or address do not exist."}
else
case Context.create_person_address(arguments, person, address) do
{:ok, person_address} ->
{:ok, person_address}
_error ->
{:error, "Could not create person and address association. Possible duplicate."}
end
end
end
def update_person_address(arguments, _) do
{person_id, arguments} = Map.pop(arguments, :person_id)
{address_id, arguments} = Map.pop(arguments, :address_id)
case Context.get_person_address(person_id, address_id) do
nil ->
{:error, "Given person address association does not exist."}
person_address ->
case Context.update_person_address(arguments, person_address) do
{:ok, person_address} ->
{:ok, person_address}
_error ->
{:error, "Could not update person address association from given arguments."}
end
end
end
def remove_person_address(arguments, _) do
{person_id, arguments} = Map.pop(arguments, :person_id)
{address_id, _arguments} = Map.pop(arguments, :address_id)
case Context.get_person_address(person_id, address_id) do
nil ->
{:error, "Given person address association does not exist."}
person_address ->
case Context.delete_person_address(person_address) do
{:ok, person_address} ->
{:ok, person_address}
_error ->
{:error, "Could not remove person address association from given arguments."}
end
end
end
def create_person_contact(arguments, _) do
{person_id, arguments} = Map.pop(arguments, :person_id)
{contact_id, arguments} = Map.pop(arguments, :contact_id)
person = Context.get_person(person_id)
contact = Context.get_contact(contact_id)
if is_nil(person) or is_nil(contact) do
{:error, "Person or contact do not exist."}
else
case Context.create_person_contact(arguments, person, contact) do
{:ok, person_contact} ->
{:ok, person_contact}
_error ->
{:error, "Could not create person and contact association. Possible duplicate."}
end
end
end
def update_person_contact(arguments, _) do
{person_id, arguments} = Map.pop(arguments, :person_id)
{contact_id, arguments} = Map.pop(arguments, :contact_id)
case Context.get_person_contact(person_id, contact_id) do
nil ->
{:error, "Given person contact association does not exist."}
person_contact ->
case Context.update_person_contact(arguments, person_contact) do
{:ok, person_contact} ->
{:ok, person_contact}
_error ->
{:error, "Could not update person contact association from given arguments."}
end
end
end
def remove_person_contact(arguments, _) do
{person_id, arguments} = Map.pop(arguments, :person_id)
{contact_id, _arguments} = Map.pop(arguments, :contact_id)
case Context.get_person_contact(person_id, contact_id) do
nil ->
{:error, "Given person contact association does not exist."}
person_contact ->
case Context.delete_person_contact(person_contact) do
{:ok, person_contact} ->
{:ok, person_contact}
_error ->
{:error, "Could not remove person contact association from given arguments."}
end
end
end
end
|
server/lib/fakers_api_web/graphql/resolvers/mutation.ex
| 0.646795
| 0.445771
|
mutation.ex
|
starcoder
|
defmodule HoneylixirTracing do
@moduledoc """
Used to trace units of work and send the information to Honeycomb.
## Installation
Adding the library to your mix.exs as a dependency should suffice:
```
def deps() do
[
{:honeylixir_tracing, "~> 0.2.0"}
]
end
```
This is the main entrypoint for this package, used to trace units of work by
wrapping them in a function and report the duration as well as its relation
to parent work.
## Configuration
All of the required configuration for this package depends on configuration set for
the `Honeylixir` project, the underlying library used for sending the data. The
absolute minimum configuration required is to set the `team_writekey` and `dataset`
fields:
```
config :honeylixir,
dataset: "your-dataset-name",
team_writekey: "your-writekey"
```
In addition, optional fields available for this package are as follows:
|Name|Type|Description|Default|
|---|---|---|---|
|`:span_ttl_sec`|`integer`|How long an inactive span should remain in the ets table, in seconds, in case something has gone wrong|`300`|
|`:reaper_interval_sec`|`integer`|How frequently the should attempt to cleanup the ets table of orphaned spans|`60`|
## Usage
Basic usage is to wrap any unit of work in a `HoneylixirTracing.span` call. Let's
say you had the following module in your application already:
```
defmodule TestModule do
def cool_work(arg1, arg2) do
arg1 + arg2
end
end
```
If you wanted to trace this function, you could do:
```
defmodule TestModule do
def cool_work(arg1, arg2) do
span("TestModule.cool_work/2", %{"arg1" => arg1, "arg2" => arg2}, fn ->
arg1 + arg2
end)
end
end
```
Another option is to wrap the business work in a private function and invoke that
in the span function:
```
defmodule TestModule do
def cool_work(arg1, arg2) do
span("TestModule.cool_work/2", %{"arg1" => arg1, "arg2" => arg2}, fn ->
do_cool_work(arg1, arg2)
end)
end
defp do_cool_work(arg1, arg) do
arg1 + arg2
end
end
```
In both cases, the return value remains the same. The result of any `span` (`span/2`, `span/3`, `span/4`) calls
is the result of whatever function is passed in as the work.
### Cross-Process traces
Given this is Elixir running on Erlang, it's quite possible a GenServer or some other
Process-based design will appear in your system. If this is the case, there are a couple of
rough recommendations on how to ensure predictable tracing data:
* For synchronous work, add a final argument of `ctx`, which is a `t:HoneylixirTracing.Propagation.t/0`
struct, to the callback. This should not be *accepted* by the Client API but instead
built for the user directly and passed to the Server. In the callback, use that as the
first argument to a `HoneylixirTracing.span/4` call which wraps your work.
* For asynchronous work, do *not* start a span from a context passed in.
Asynchronous work is akin to background work done by a web application, meaning that
one would consider them linked spans rather than child spans. You can use the
underlying `Honeylixir` library to send these events along. Utility functions
may be provided in the future to help with this.
A small example for doing this within an application for synchronous work can be
found in the `cross_process_example` project in the `examples` directory.
### Adding data to the current span
If you want to add fields to your spans after initialization or invocation, you can
use `add_field_data/1` to add data. `add_field_data/1` accepts a Map of strings
to any encodable entity (just like `span/2` and the underlying `Honeylixir.Event`)
and modifies the currently active span with the information. If no span is active,
this function does nothing.
```
defmodule TestModule do
def some_work() do
span("TestModule.some_work/0", fn ->
result = CoolModule.do_something_else()
HoneylixirTracing.add_field_data(%{"cool_mod.result" => result})
end)
end
end
```
## Integrations
Check the [integrations page](INTEGRATIONS.md) for enabling and configuring any
of the built-in integrations.
## The Reaper
The `Reaper` module handles cleaning up the ets table used to store state. Two pieces
of configuration relate to this:
* `span_ttl_sec` -> how long a span should remain in the ets table
* `:reaper_interval_sec` -> how frequently the Reaper should run in seconds
If the Span TTL is set too low, it may cleanup active spans. The default is currently
set to 5 minutes. However, if a span starts and runs for longer than 5 minutes, it
will be deleted from the ets table. This does not inherently mean your span cannot
be sent still. If it is still the *currently* active span and does not require a
parent, then it will send fine. However, if your span does have a parent older than
5 minutes, it's entirely probable you will end up with an incomplete trace.
"""
@moduledoc since: "0.3.0"
alias HoneylixirTracing.Span
@typedoc """
Create and send a span to Honeycomb.
"""
@type span_return :: any()
@typedoc """
A 0 arity function used as the work to be measured by the span.
"""
@type work_function :: (() -> any())
@doc """
Create and send a span to Honeycomb.
"""
@doc since: "0.2.0"
@spec span(String.t(), work_function()) :: span_return()
def span(span_name, work) when is_binary(span_name) and is_function(work, 0),
do: span(span_name, %{}, work)
@doc """
Create and send a span to Honeycomb by optionally propogating tracing context.
This form, `span/3`, has two possible calling signatures: the first is a non-propogated
span with initial fields; the second accepts a propogated trace but no initial fields.
"""
@doc since: "0.2.0"
@spec span(HoneylixirTracing.Propagation.t() | nil, String.t(), work_function()) ::
span_return()
@spec span(String.t(), Honeylixir.Event.fields_map(), work_function()) :: span_return()
def span(propagation_or_name, name_or_fields, work)
def span(span_name, %{} = fields, work) when is_binary(span_name) and is_function(work, 0) do
Span.setup(span_name, fields) |> do_span(work)
end
def span(%HoneylixirTracing.Propagation{} = prop, span_name, work)
when is_binary(span_name) and is_function(work, 0) do
Span.setup(prop, span_name, %{})
|> do_span(work)
end
def span(nil, span_name, work) when is_binary(span_name) and is_function(work, 0) do
Span.setup(span_name, %{}) |> do_span(work)
end
@doc """
Create and send a span to Honeycomb by propogating tracing context.
Accepts a `t:HoneylixirTracing.Propagation.t/0` for continuing work from another Process's trace.
"""
@doc since: "0.2.0"
@spec span(
HoneylixirTracing.Propagation.t() | nil,
String.t(),
Honeylixir.Event.fields_map(),
work_function()
) :: span_return()
def span(%HoneylixirTracing.Propagation{} = propagation, span_name, %{} = fields, work)
when is_binary(span_name) and is_function(work, 0) do
Span.setup(propagation, span_name, fields)
|> do_span(work)
end
def span(nil, span_name, fields, work)
when is_binary(span_name) and is_map(fields) and is_function(work, 0) do
Span.setup(span_name, fields) |> do_span(work)
end
defp do_span(%HoneylixirTracing.Span{} = span, work) do
{:ok, previous_span} = HoneylixirTracing.Context.set_current_span(span)
try do
work.()
# rescue
# err when is_exception(err) ->
# HoneylixirTracing.add_field_data(%{"error_type" => err.__struct__, "error" => err.message})
after
end_span(previous_span)
end
end
@doc """
Start a span and manage ending it yourself.
See `start_span/3`.
"""
@doc since: "0.3.0"
@spec start_span(HoneylixirTracing.Propagation.t() | nil, String.t()) ::
{:ok, HoneylixirTracing.Span.t() | nil}
@spec start_span(String.t(), Honeylixir.Event.fields_map()) ::
{:ok, HoneylixirTracing.Span.t() | nil}
def start_span(propagation_or_name, name_or_fields)
def start_span(%HoneylixirTracing.Propagation{} = propagation, name) when is_binary(name) do
start_span(propagation, name, %{})
end
def start_span(nil, name) when is_binary(name), do: start_span(name, %{})
def start_span(name, fields) when is_binary(name) and is_map(fields) do
Span.setup(name, fields)
|> HoneylixirTracing.Context.set_current_span()
end
@doc """
Start a span and manage ending it yourself.
Functionally looks and behaves much like the `span` functions. It accepts some combination of
a propagation context, a span name, and a set of fields to start a span. The result
is a tuple of `:ok` and whatever the previous current span was. You can use this
in an `end_span/1` call to set the current span back to what it used to be.
Every usage of `start_span` MUST have an `end_span` call or you may end up with
unfinished spans or traces or other unexpected and undesirable results, such as
a current span that lives longer than it should. If you can, try to store the
previous span somewhere you can use to reset the current span. It is
recommended you only use this in cases where this is impossible since in those
places you could probably use a function in the `span` family instead. A common
example for using this is using `:telemetry` events as spans when those events
only give a duration rather than at least a start time.
"""
@doc since: "0.3.0"
@spec start_span(
HoneylixirTracing.Propagation.t() | nil,
String.t(),
Honeylixir.Event.fields_map()
) ::
{:ok, HoneylixirTracing.Span.t() | nil}
def start_span(%HoneylixirTracing.Propagation{} = propagation, name, fields)
when is_binary(name) and is_map(fields) do
Span.setup(propagation, name, fields)
|> HoneylixirTracing.Context.set_current_span()
end
def start_span(nil, name, fields)
when is_binary(name) and is_map(fields) do
Span.setup(name, fields)
|> HoneylixirTracing.Context.set_current_span()
end
@doc """
Used for manually ending the currently active span.
This SHOULD only be used with `start_span` calls. Any `end_span` call SHOULD have
a corresponding `start_span` call, though it will not result in an error if there is
no active span. The optional `previous_span` argument is what the currently active
span will be set to after the current one is sent.
"""
@doc since: "0.3.0"
def end_span(previous_span \\ nil) do
if current_span = HoneylixirTracing.Context.current_span() do
Span.send(current_span)
HoneylixirTracing.Context.clear_span(current_span)
end
HoneylixirTracing.Context.set_current_span(previous_span)
end
@doc """
Adds field data to the current span.
This function does nothing if there is no currently active span. Any duplicate field
names will have their contents replaced. Returns the updated span if one is active,
`nil` otherwise.
"""
@doc since: "0.3.0"
@spec add_field_data(Honeylixir.Event.fields_map()) :: Honeylixir.Span.t() | nil
def add_field_data(fields) when is_map(fields) do
if current_span = HoneylixirTracing.Context.current_span() do
new_span = HoneylixirTracing.Span.add_field_data(current_span, fields)
HoneylixirTracing.Context.set_current_span(new_span)
new_span
end
end
@doc """
Provides a `t:Honeylixir.Propagation.t/0` for sharing tracing data between processes.
If there is no span currently active, this will return `nil`.
"""
@doc since: "0.2.0"
@spec current_propagation_context() :: HoneylixirTracing.Propagation.t() | nil
def current_propagation_context() do
HoneylixirTracing.Context.current_span()
|> HoneylixirTracing.Propagation.from_span()
end
@doc """
Helper method for sending a `link` span annotation.
Accepts a `t:Honeylixir.Propagation.t/0` as the data for what span to link to.
If no span is currently active, does nothing and returns `nil`. Please consider
this feature experimental.
"""
@doc since: "0.3.0"
def link_to_span(%HoneylixirTracing.Propagation{parent_id: span_id, trace_id: trace_id}) do
if current_span = HoneylixirTracing.Context.current_span() do
event =
Honeylixir.Event.create(%{
"trace.link.trace_id" => trace_id,
"trace.link.span_id" => span_id,
"meta.span_type" => "link",
"trace.parent_id" => current_span.span_id,
"trace.trace_id" => current_span.trace_id
})
%{event | sample_rate: 1} |> Honeylixir.Event.send()
end
end
def link_to_span(_), do: nil
end
|
lib/honeylixir_tracing.ex
| 0.910458
| 0.884838
|
honeylixir_tracing.ex
|
starcoder
|
defmodule Graph do
@moduledoc"""
This module serves as a graph library that enables to handle undirected weighted graphs (directed is in the works) in memory.
It features simple operations such as adding and removing `nodes` and `edges` and finding the shortest path between nodes.
Supported features:
- `Nodes` with an optional heuristic `costs` (for the shortest path algorithm) and and optional `label`
- `Edges` from and to nodes with certain `costs`
- Adding and deleting `nodes`
- Adding and deleting `edges`
The `Graph` module is strucutred like so:
- `nodes` is a `Map` that has the node_id (atom) as a key and another `Map` as value. This `Map` conatins `label` and `costs` as keys that refer to the corresponding value.
- `edges` is a `Map` that has the node_id (atom) as a key and another `Map` as value. The key refers to the starting node of the edge and all nodes stored in the second map refer to the edge's end. Additionally, the second map stores the edge's weight mapped to the end node.
"""
defstruct nodes: %{}, edges: %{}
@type node_id :: atom
@type costs :: non_neg_integer
@type label :: term
@type edge_info :: %{node_id => costs}
@type node_info :: %{label: label, costs: costs}
@type t :: %__MODULE__{
nodes: %{node_id => node_info},
edges: %{node_id => edge_info}
}
@doc"""
Creates a new undirected Graph.
"""
@spec new :: t
def new, do: %__MODULE__{}
@doc~S"""
Adds an edge to the given graph from a to b & b to a and assigns the according costs. If the nodes a and / or b do not exist they are created (costs and label of these nodes is set to nil).
If there are no costs set, the default value of 0 will be assigned.
## Example
iex> g = Graph.new |> Graph.add_edge(:a, :b, 5) |> Graph.add_edge(:b, :c, 3)
%Graph{
edges: %{a: %{b: 5}, b: %{a: 5, c: 3}, c: %{b: 3}},
nodes: %{
a: %{costs: 0, label: nil},
b: %{costs: 0, label: nil},
c: %{costs: 0, label: nil}
}
}
iex> Graph.add_edge(g, :b, :c, 9)
%Graph{
edges: %{a: %{b: 5}, b: %{a: 5, c: 9}, c: %{b: 9}},
nodes: %{
a: %{costs: 0, label: nil},
b: %{costs: 0, label: nil},
c: %{costs: 0, label: nil}
}
}
"""
@spec add_edge(t, node_id, node_id, costs) :: t
def add_edge(%__MODULE__{edges: e} = g, from, to, costs \\ 1) when is_atom(from) and is_atom(to) do
g = g |> add_node(from) |> add_node(to)
e = e |> update_edge(from, to, costs) |> update_edge(to, from, costs)
%{g | edges: e}
end
defp update_edge(e, from, to, costs) do
fe = e[from]
cond do
fe == nil ->
Map.put_new(e, from, %{to => costs})
Map.has_key?(fe, to) ->
%{e | from => %{fe | to => costs}}
true ->
Map.put_new(fe, to, costs) |> (&%{e | from => &1}).()
end
end
@doc~S"""
Gets the edge connecting two nodes. Returns nil if there is no connection.
## Example
iex> g = Graph.new |> Graph.add_edge(:a, :b, 3)
%Graph{
edges: %{a: %{b: 3}, b: %{a: 3}},
nodes: %{a: %{costs: 0, label: nil}, b: %{costs: 0, label: nil}}
}
iex> Graph.get_edge(g, :a, :b)
{:a, :b, 3}
iex> Graph.get_edge(g, :a, :s)
nil
"""
@spec get_edge(t, node_id, node_id) :: {node_id, node_id, costs}
def get_edge(%__MODULE__{edges: e}, from, to) do
case e[from] do
nil -> nil
edges ->
for {^to, costs} <- Map.to_list(edges) do
{from, to, costs}
end |> Enum.at(0)
end
end
defp has_edge?(%__MODULE__{} = g, from, to) do
get_edge(g, from, to) != nil || get_edge(g, to, from) != nil
end
@doc~S"""
Deletes an the edge that goes from a to b. The edge is only deleted if it really exists.
## Example
iex> g = Graph.new |> Graph.add_edge(:a, :b, 5) |> Graph.add_edge(:b, :c, 5)
%Graph{
edges: %{a: %{b: 5}, b: %{a: 5, c: 5}, c: %{b: 5}},
nodes: %{
a: %{costs: 0, label: nil},
b: %{costs: 0, label: nil},
c: %{costs: 0, label: nil}
}
}
iex> Graph.delete_edge(g, :b, :c)
%Graph{
edges: %{a: %{b: 5}, b: %{a: 5}},
nodes: %{
a: %{costs: 0, label: nil},
b: %{costs: 0, label: nil},
c: %{costs: 0, label: nil}
}
}
"""
@spec delete_edge(t, node_id, node_id) :: t
def delete_edge(%__MODULE__{} = g, from, to) do
g = if has_edge?(g, from, to), do: delete_edge!(g, from, to), else: g
g = if has_edge?(g, to, from), do: delete_edge!(g, to, from), else: g
g
end
def delete_edge!(%__MODULE__{edges: e} = g, from, to) do
e = %{e | from => Map.delete(e[from], to)}
if e[from] == %{}, do: e = Map.delete(e, from)
%__MODULE__{g | edges: e}
end
@doc~S"""
Adds a node to the graph with the specified information.
## Example
iex> g = Graph.new |> Graph.add_node(:a, label: "This is a", costs: 2)
%Graph{edges: %{}, nodes: %{a: %{costs: 2, label: "This is a"}}}
iex> Graph.add_node(g, :a, costs: 5)
%Graph{edges: %{}, nodes: %{a: %{costs: 5, label: "This is a"}}}
"""
@spec add_node(t, node_id, node_info) :: t
def add_node(%__MODULE__{nodes: n} = g, node, opts \\ []) when is_atom(node) do
label = Keyword.get(opts, :label, n[node][:label])
costs = case Keyword.get(opts, :costs) do
nil -> 0
value -> value
end
%__MODULE__{g | nodes: Map.put(n, node, %{label: label, costs: costs})}
end
@doc~S"""
Gets node info for the ID. Unlike other functions, this returns the information as a map.
## Example
iex> g = Graph.new |> Graph.add_node(:a, label: "This is a", costs: 4)
%Graph{edges: %{}, nodes: %{a: %{costs: 4, label: "This is a"}}}
iex> Graph.get_node(g, :a)
%{costs: 4, label: "This is a"}
"""
@spec get_node(t, node_id) :: node_info
def get_node(%__MODULE__{nodes: n}, id), do: n[id]
@doc~S"""
Gets the list of all nodes that have an edge towards or from the given node.
## Example
iex> Graph.new |> Graph.add_edge(:a, :b, 5) |> Graph.add_edge(:b, :c, 5) |> Graph.get_neighbors(:b)
[:a, :c]
"""
@spec get_neighbors(t, node_id) :: []
def get_neighbors(%__MODULE__{nodes: n} = g, id) do
for to <- Map.keys(n), has_edge?(g, id, to), do: to
end
@doc"""
Deletes a given node plus the edges it is involved in.
## Example
iex> g = Graph.new |> Graph.add_node(:a) |> Graph.add_node(:b) |> Graph.add_edge(:a, :b)
%Graph{
edges: %{a: %{b: 1}, b: %{a: 1}},
nodes: %{a: %{costs: 0, label: nil}, b: %{costs: 0, label: nil}}
}
iex> Graph.delete_node(g, :b)
%Graph{edges: %{}, nodes: %{a: %{costs: 0, label: nil}}}
"""
@spec delete_node(t, node_id) :: t
def delete_node(%__MODULE__{nodes: n} = g, node) do
g = get_neighbors(g, node) |> delete_relations(g, node)
%__MODULE__{g | nodes: Map.delete(n, node)}
end
defp delete_relations([], %__MODULE__{} = g, _node), do: g
defp delete_relations([h | t], %__MODULE__{} = g, node) do
delete_relations(t, g, node) |> delete_edge(node, h)
end
@doc"""
Gets the total costs for a path (considering edge weights).
## Example
iex> g = Graph.new |>
...> Graph.add_edge(:s, :a, 3) |>
...> Graph.add_edge(:a, :b, 5) |>
...> Graph.add_edge(:b, :c, 10) |>
...> Graph.add_edge(:c, :d, 3) |>
...> Graph.add_edge(:d, :e, 4) |>
...> Graph.add_edge(:b, :e, 5)
%Graph{
edges: %{
a: %{b: 5, s: 3},
b: %{a: 5, c: 10, e: 5},
c: %{b: 10, d: 3},
d: %{c: 3, e: 4},
e: %{b: 5, d: 4},
s: %{a: 3}
},
nodes: %{
a: %{costs: 0, label: nil},
b: %{costs: 0, label: nil},
c: %{costs: 0, label: nil},
d: %{costs: 0, label: nil},
e: %{costs: 0, label: nil},
s: %{costs: 0, label: nil}
}
}
iex> Graph.shortest_path(g, :s, :e) |>
...> Graph.path_costs(g)
13
"""
@spec path_costs(t, []) :: costs
def path_costs(%__MODULE__{} = g, path), do: do_path_costs(g, path)
def path_costs(path, %__MODULE__{} = g), do: path_costs(g, path)
defp do_path_costs(_g, path) when length(path) == 1, do: 0
defp do_path_costs(%__MODULE__{} = g, [f | [t | _] = n]) do
case do_path_costs(g, n) do
nil -> nil
c ->
cond do
has_edge?(g, f, t) ->
edge_costs(g, f, t) + c
true -> nil
end
end
end
@doc~S"""
Returns the costs for the edge from node A to node B. Returns nil if there is no connection.
## Example
iex> g = Graph.new |>
...> Graph.add_edge(:s, :a, 3) |>
...> Graph.add_edge(:a, :b, 5) |>
...> Graph.add_edge(:b, :c, 10) |>
...> Graph.add_edge(:c, :d, 3) |>
...> Graph.add_edge(:d, :e, 4) |>
...> Graph.add_edge(:b, :e, 5)
iex> Graph.edge_costs(g, :s, :a)
3
"""
@spec edge_costs(t, node_id, node_id) :: costs
def edge_costs(%__MODULE__{} = g, from, to) do
case get_edge(g, from, to) do
nil -> nil
e -> elem(e, 2)
end
end
@doc~S"""
Find the shortest path from a to b in the given graph.
## Example
iex> Graph.new |>
...> Graph.add_edge(:s, :a, 3) |>
...> Graph.add_edge(:a, :b, 5) |>
...> Graph.add_edge(:b, :c, 10) |>
...> Graph.add_edge(:c, :d, 3) |>
...> Graph.add_edge(:d, :e, 4) |>
...> Graph.add_edge(:b, :e, 5) |>
...> Graph.shortest_path(:s, :e)
[:s, :a, :b, :e]
"""
@spec shortest_path(t, node_id, node_id) :: [node_id, ...]
def shortest_path(%__MODULE__{} = g, from, to) when is_atom(from) and is_atom(to) do
pq = Priorityqueue.new
|> Priorityqueue.push(from, %{costs_to: 0, costs_hop: 0, costs_heur: 0, from: nil})
do_shortest_path(g, from, to, pq, %{})
end
defp do_shortest_path(%__MODULE__{} = g, from, to, pq, processed) do
pq = case Priorityqueue.pop(pq) do
nil -> []
{_, ^to, data} ->
Map.put(processed, to, data) |> construct_path(from, to)
{pq, id, data} ->
processed = Map.put(processed, id, data)
neighbors = Enum.filter(get_neighbors(g, id),
fn(x) -> !(Enum.member?(Map.keys(processed), x)) end)
insert_pq(g, pq, neighbors, Map.merge(%{key: id}, data))
end
if is_list(pq) do
Enum.reverse(pq)
else
do_shortest_path(g, from, to, pq, processed)
end
end
defp insert_pq(_g, pq, [], _previous), do: pq
defp insert_pq(%__MODULE__{} = g, pq, [h | t], previous) do
insert_pq(g, pq, t, previous) |> Priorityqueue.push(h, calculate_costs(g, h, previous))
end
defp calculate_costs(g, to, from) do
{_, _, costs_hop} = get_edge(g, from[:key], to)
costs_heur = get_node(g, to)[:costs]
costs_to = from[:costs_to] + costs_hop
%{costs_to: costs_to, costs_hop: costs_hop, costs_heur: costs_heur, from: from[:key]}
end
defp construct_path(_processed, from, from), do: [from]
defp construct_path(processed, from, to) do
[to] ++ construct_path(processed, from, processed[to][:from])
end
end
|
lib/graph.ex
| 0.923493
| 0.977905
|
graph.ex
|
starcoder
|
defmodule Ash.DataLayer.Simple do
@moduledoc """
A data layer that simply returns structs
This is the data layer that is used under the hood
by embedded resources
"""
@transformers [
Ash.DataLayer.Simple.Transformers.ValidateDslSections
]
use Ash.Dsl.Extension, transformers: @transformers, sections: []
alias Ash.Query.Operator.{
Eq,
GreaterThan,
GreaterThanOrEqual,
In,
IsNil,
LessThan,
LessThanOrEqual
}
def can?(_, :create), do: true
def can?(_, :update), do: true
def can?(_, :destroy), do: true
def can?(_, :sort), do: true
def can?(_, {:sort, _}), do: true
def can?(_, :filter), do: true
def can?(_, :boolean_filter), do: true
def can?(_, {:filter_operator, %In{}}), do: true
def can?(_, {:filter_operator, %Eq{}}), do: true
def can?(_, {:filter_operator, %LessThan{}}), do: true
def can?(_, {:filter_operator, %GreaterThan{}}), do: true
def can?(_, {:filter_operator, %LessThanOrEqual{}}), do: true
def can?(_, {:filter_operator, %GreaterThanOrEqual{}}), do: true
def can?(_, {:filter_operator, %IsNil{}}), do: true
def can?(_, _), do: false
defmodule Query do
@moduledoc false
defstruct [:data, :resource, :filter, :api, sort: []]
end
def resource_to_query(resource, api) do
%Query{data: [], resource: resource, api: api}
end
def run_query(%{data: data, sort: sort, api: api, filter: filter}, _resource) do
{:ok,
data
|> Enum.filter(&Ash.Filter.Runtime.matches?(api, &1, filter))
|> Ash.Actions.Sort.runtime_sort(sort)}
end
def filter(query, filter, _resource) do
{:ok, %{query | filter: filter}}
end
def sort(query, sort, _resource) do
{:ok, %{query | sort: sort}}
end
def set_context(_resource, query, context) do
data = Map.get(context, :data) || []
{:ok, %{query | data: data}}
end
def create(_resource, changeset) do
Ash.Changeset.apply_attributes(changeset)
end
def update(_resource, changeset) do
Ash.Changeset.apply_attributes(changeset)
end
def destroy(_resource, _changeset) do
:ok
end
end
|
lib/ash/data_layer/simple/simple.ex
| 0.781872
| 0.532243
|
simple.ex
|
starcoder
|
defmodule Grizzly.Transport do
@moduledoc """
Behaviour and functions for communicating to `zipgateway`
"""
defmodule Response do
@moduledoc """
The response from parse response
"""
alias Grizzly.ZWave.Command
@type t() :: %__MODULE__{
port: :inet.port_number() | nil,
ip_address: :inet.ip_address() | nil,
command: Command.t()
}
@enforce_keys [:command]
defstruct port: nil, ip_address: nil, command: nil
end
alias Grizzly.ZWave.{Command, DecodeError}
@opaque t() :: %__MODULE__{impl: module(), assigns: map()}
@type socket() :: :ssl.sslsocket() | :inet.socket()
@type args() :: [
ip_address: :inet.ip_address(),
port: :inet.port_number(),
transport: t()
]
@type parse_opt() :: {:raw, boolean()}
@typedoc """
After starting a server options can be passed back to the caller so that the
caller can do any other work it might seem fit.
Options:
* - `:strategy` - this informs the caller if the transport needs to wait for
connects to accept or if the socket can just process incoming messages.
If the strategy is `:accept` that is to mean the socket is okay to start
accepting new connections.
"""
@type listen_option() :: {:strategy, :none | :accept}
@enforce_keys [:impl]
defstruct assigns: %{}, impl: nil
@callback open(keyword()) :: {:ok, t()} | {:error, :timeout}
@callback listen(t()) :: {:ok, t(), [listen_option()]} | {:error, any()}
@callback accept(t()) :: {:ok, t()} | {:error, any()}
@callback handshake(t()) :: {:ok, t()} | {:error, any()}
@callback send(t(), binary(), keyword()) :: :ok
@callback parse_response(any(), [parse_opt()]) ::
{:ok, Response.t() | binary() | :connection_closed} | {:error, DecodeError.t()}
@callback close(t()) :: :ok
@doc """
Make a new `Grizzly.Transport`
If need to optionally assign some priv data you can map that into this function.
"""
@spec new(module(), map()) :: t()
def new(impl, assigns \\ %{}) do
%__MODULE__{
impl: impl,
assigns: assigns
}
end
@doc """
Update the assigns with this field and value
"""
@spec assigns(t(), atom(), any()) :: t()
def assigns(transport, assign, assign_value) do
new_assigns = Map.put(transport.assigns, assign, assign_value)
%__MODULE__{transport | assigns: new_assigns}
end
@doc """
Get the assign value for the field
"""
@spec assign(t(), atom(), any()) :: any()
def assign(transport, assign, default \\ nil),
do: Map.get(transport.assigns, assign, default)
@doc """
Listen using a transport
"""
@spec listen(t()) :: {:ok, t(), [listen_option()]} | {:error, any()}
def listen(transport) do
%__MODULE__{impl: transport_impl} = transport
transport_impl.listen(transport)
end
@doc """
Accept a new connection
"""
@spec accept(t()) :: {:ok, t()} | {:error, any()}
def accept(transport) do
%__MODULE__{impl: transport_impl} = transport
transport_impl.accept(transport)
end
@doc """
Preform the handshake
"""
@spec handshake(t()) :: {:ok, t()} | {:error, any()}
def handshake(transport) do
%__MODULE__{impl: transport_impl} = transport
transport_impl.handshake(transport)
end
@doc """
Open the transport
"""
@spec open(module(), args()) :: {:ok, t()} | {:error, :timeout}
def open(transport_module, args) do
transport_module.open(args)
end
@doc """
Send binary data using a transport
"""
@spec send(t(), binary(), keyword()) :: :ok
def send(transport, binary, opts \\ []) do
%__MODULE__{impl: transport_impl} = transport
transport_impl.send(transport, binary, opts)
end
@doc """
Parse the response for the transport
"""
@spec parse_response(t(), any()) ::
{:ok, Response.t() | binary() | :connection_closed} | {:error, DecodeError.t()}
def parse_response(transport, response, opts \\ []) do
%__MODULE__{impl: transport_impl} = transport
opts = [transport: transport] ++ opts
transport_impl.parse_response(response, opts)
end
end
|
lib/grizzly/transport.ex
| 0.864925
| 0.456046
|
transport.ex
|
starcoder
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.