code
stringlengths 114
1.05M
| path
stringlengths 3
312
| quality_prob
float64 0.5
0.99
| learning_prob
float64 0.2
1
| filename
stringlengths 3
168
| kind
stringclasses 1
value |
|---|---|---|---|---|---|
defmodule Cog.Pipeline.Tracker do
@pipelines :cog_pipelines
@max_records 500
@short_id_length 15
alias Cog.Events.Util
require Record
Record.defrecordp :pipeline, [id: nil, pid: nil, user: nil, text: nil, count: 0, state: :running, started: nil, finished: nil]
@doc "Configures ETS tables"
def init() do
:ets.new(@pipelines, [:set, :public, :named_table, {:keypos, 2}])
end
def start_pipeline(id, pid, text, user, started) do
pline = pipeline(id: id, pid: pid, user: user, text: text, started: started)
:ets.insert_new(@pipelines, pline)
end
def finish_pipeline(id, finished) do
case :ets.lookup(@pipelines, id) do
[] ->
:ok
[pline] ->
updated = pline
|> pipeline(finished: finished)
|> pipeline(state: :finished)
:ets.insert(@pipelines, updated)
end
prune_old_records(@max_records)
end
def update_pipeline(id, opts) do
case :ets.lookup(@pipelines, id) do
[] ->
false
[pline] ->
updated = update_record(pline, opts)
:ets.insert(@pipelines, updated)
end
end
def all_pipelines() do
:ets.tab2list(@pipelines)
|> Enum.map(&pipeline_to_map/1)
|> Enum.sort(&by_started/2)
end
def pipeline_pid(id) do
results = if String.length(id) == @short_id_length do
pipelines_by(short_id: id)
else
pipelines_by(id: id)
end
case results do
[] ->
nil
[pline] ->
if pipeline(pline, :state) == :finished do
nil
else
pipeline(pline, :pid)
end
end
end
def pipelines_by(user: user) do
:ets.select(@pipelines, [{{:pipeline, :_, :_, user, :_, :_, :_, :_, :_}, [], [:"$_"]}])
|> Enum.map(&pipeline_to_map/1)
|> Enum.sort(&by_started/2)
end
def pipelines_by(state: state) do
:ets.select(@pipelines, [{{:pipeline, :_, :_, :_, :_, :_, state, :_, :_}, [], [:"$_"]}])
|> Enum.map(&pipeline_to_map/1)
|> Enum.sort(&by_started/2)
end
def pipelines_by(id: id) do
:ets.lookup(@pipelines, id)
end
def pipelines_by(short_id: sid) do
:ets.select(@pipelines, [{{:pipeline, :"$1", :_, :_, :_, :_, :_, :_, :_}, [], [:"$1"]}])
|> Enum.filter(&(String.starts_with?(&1, sid)))
|> Enum.flat_map(&(pipelines_by(id: &1)))
end
def prune_old_records(max) do
count = :ets.info(@pipelines, :size)
if count > max do
prune_old_records(pipelines_by(state: :finished), count - max)
end
end
defp prune_old_records([], _), do: :ok
defp prune_old_records(_, 0), do: :ok
defp prune_old_records([%{id: id}|rest], count) do
:ets.delete(@pipelines, id)
prune_old_records(rest, count - 1)
end
defp update_record(pline, []), do: pline
defp update_record(pline, [{:count, v}|rest]) do
updated = pipeline(pline, :count) + v
update_record(pipeline(pline, count: updated), rest)
end
defp update_record(pline, [{:state, v}|rest]) do
update_record(pipeline(pline, state: v), rest)
end
defp pipeline_to_map(pline) do
entry = %{id: short_id(pipeline(pline, :id)),
user: pipeline(pline, :user),
text: pipeline(pline, :text),
processed: pipeline(pline, :count),
state: pipeline(pline, :state),
started: pipeline(pline, :started)}
elapsed = case pipeline(pline, :finished) do
nil ->
Util.elapsed(entry.started, DateTime.utc_now(), :milliseconds)
finished ->
Util.elapsed(entry.started, finished, :milliseconds)
end
Map.put(entry, :elapsed, elapsed)
|> Map.put(:started, format_timestamp(entry.started))
end
defp short_id(id) do
String.slice(id, 0, @short_id_length)
end
defp by_started(p1, p2) do
p1.started >= p2.started
end
defp format_timestamp(ts) do
DateTime.to_iso8601(ts) |> String.replace(~r/\.[0-9]+Z$/, "Z")
end
end
|
lib/cog/pipeline/tracker.ex
| 0.550124
| 0.491578
|
tracker.ex
|
starcoder
|
defmodule Meeseeks.Result do
@moduledoc """
Results are the product of running selections on a document, and package
together a node id and the `Meeseeks.Document` for which that id is
valid.
Results are generally used in one of two ways: either data, such as an
element's tag, is extracted from a result, or further selections are ran
using the result as a source.
When a result is used as a source for further selection, the original
document the result came from is used for context, meaning that questions
about the results ancestors may be asked, but also that queries involving
ancestors need to account for the whole document, not just the contents of
the result.
## Examples
iex> import Meeseeks.CSS
iex> document = Meeseeks.parse("<div><ul><li>1</li><li>2</li></ul></div>")
#Meeseeks.Document<{...}>
iex> ul = Meeseeks.one(document, css("ul"))
#Meeseeks.Result<{ <ul><li>1</li><li>2</li></ul> }>
iex> Meeseeks.tag(ul)
"ul"
iex> Meeseeks.all(ul, css("li")) |> List.last()
#Meeseeks.Result<{ <li>2</li> }>
"""
alias Meeseeks.{Document, Result, TupleTree}
@enforce_keys [:document, :id]
defstruct document: nil, id: nil
@type t :: %Result{document: Document.t(), id: Document.node_id()}
@doc """
Returns the value for attribute in result, or nil if there isn't one.
"""
@spec attr(Result.t(), String.t()) :: String.t() | nil
def attr(result, attribute)
def attr(%Result{id: id, document: document}, attribute) do
node = Document.get_node(document, id)
Document.Node.attr(node, attribute)
end
@doc """
Returns the result's attributes list, which may be empty, or nil if
result represents a node without attributes.
"""
@spec attrs(Result.t()) :: [{String.t(), String.t()}] | nil
def attrs(result)
def attrs(%Result{id: id, document: document}) do
node = Document.get_node(document, id)
Document.Node.attrs(node)
end
@doc """
Returns the combined data of result or result's children, which may be an
empty string.
Once the data has been combined the whitespace is compacted by replacing
all instances of more than one whitespace character with a single space
and then trimmed.
Data is the content of `<script>` or `<style>` tags, or the content of
comments starting with "[CDATA[" and ending with "]]". The latter behavior
is to support the extraction of CDATA from HTML, since HTML5 parsers parse
CDATA as comments.
## Options
* `:collapse_whitespace` - Boolean determining whether or not to replace
blocks of whitespace with a single space character. Defaults to `true`.
* `:trim` - Boolean determining whether or not to trim the resulting
text. Defaults to `true`.
"""
@spec data(Result.t(), Keyword.t()) :: String.t()
def data(result, opts \\ [])
def data(%Result{id: id, document: document}, opts) do
node = Document.get_node(document, id)
Document.Node.data(node, document, opts)
end
@doc """
Returns a map of result's data attributes, or nil if result represents a
node without attributes.
Behaves like HTMLElement.dataset; only valid data attributes are included,
and attribute names have "data-" removed and are converted to camelCase.
See: https://developer.mozilla.org/en-US/docs/Web/API/HTMLElement/dataset
"""
@spec dataset(Result.t()) :: %{optional(String.t()) => String.t()} | nil
def dataset(result) do
case attrs(result) do
nil -> nil
[] -> %{}
attributes -> attributes_to_dataset(attributes)
end
end
defp attributes_to_dataset(attributes) do
Enum.reduce(attributes, %{}, fn {attribute, value}, dataset ->
case Regex.run(~r/^data-([a-z0-9\-\.\:\_]+)$/, attribute) do
[_, raw_name] -> Map.put(dataset, dataset_name(raw_name), value)
_ -> dataset
end
end)
end
defp dataset_name(raw_name) do
Regex.replace(~r/\-([a-z])/, raw_name, fn _, c ->
String.upcase(c)
end)
end
@doc """
Returns the combined HTML of result and its descendants.
"""
@spec html(Result.t()) :: String.t()
def html(result)
def html(%Result{id: id, document: document}) do
node = Document.get_node(document, id)
Document.Node.html(node, document)
end
@doc """
Returns the combined text of result or result's children, which may be an
empty string.
Once the text has been combined the whitespace is compacted by replacing
all instances of more than one whitespace character with a single space
and then trimmed.
## Options
* `:collapse_whitespace` - Boolean determining whether or not to replace
blocks of whitespace with a single space character. Defaults to `true`.
* `:trim` - Boolean determining whether or not to trim the resulting
text. Defaults to `true`.
"""
@spec own_text(Result.t(), Keyword.t()) :: String.t()
def own_text(result, opts \\ [])
def own_text(%Result{id: id, document: document}, opts) do
node = Document.get_node(document, id)
Document.Node.own_text(node, document, opts)
end
@doc """
Returns result's tag, or nil if result represents a node without a tag.
"""
@spec tag(Result.t()) :: String.t() | nil
def tag(result)
def tag(%Result{id: id, document: document}) do
node = Document.get_node(document, id)
Document.Node.tag(node)
end
@doc """
Returns the combined text of result or result's descendants, which may be
an empty string.
Once the text has been combined the whitespace is compacted by replacing
all instances of more than one whitespace character with a single space
and then trimmed.
## Options
* `:collapse_whitespace` - Boolean determining whether or not to replace
blocks of whitespace with a single space character. Defaults to `true`.
* `:trim` - Boolean determining whether or not to trim the resulting
text. Defaults to `true`.
"""
@spec text(Result.t(), Keyword.t()) :: String.t()
def text(result, opts \\ [])
def text(%Result{id: id, document: document}, opts) do
node = Document.get_node(document, id)
Document.Node.text(node, document, opts)
end
@doc """
Returns a `Meeseeks.TupleTree` of result and its descendants.
"""
@spec tree(Result.t()) :: TupleTree.node_t()
def tree(result)
def tree(%Result{id: id, document: document}) do
node = Document.get_node(document, id)
Document.Node.tree(node, document)
end
end
defimpl Inspect, for: Meeseeks.Result do
@moduledoc false
alias Meeseeks.Result
def inspect(result, _opts) do
result_html =
Result.html(result)
|> String.replace(~r/[\s]+/, " ")
"#Meeseeks.Result<{ #{result_html} }>"
end
end
|
lib/meeseeks/result.ex
| 0.905477
| 0.503235
|
result.ex
|
starcoder
|
defmodule Nostrum.Struct.User.Flags do
@moduledoc """
Struct representing the flags a user account can have
"""
import Bitwise
defstruct staff: false,
partner: false,
hypesquad_events: false,
bug_hunter_level_1: false,
hypesquad_bravery: false,
hypesquad_brilliance: false,
hypesquad_balance: false,
early_supporter: false,
team_user: false,
system: false,
bug_hunter_level_2: false,
verified_bot: false,
verified_developer: false
@typedoc """
Discord Employee
"""
@type staff :: boolean
@typedoc """
Discord Partner
"""
@type partner :: boolean
@typedoc """
HypeSquad Events
"""
@type hypesquad_events :: boolean
@typedoc """
Bug Hunter (Level 1)
"""
@type bug_hunter_level_1 :: boolean
@typedoc """
HypeSquad Bravery
"""
@type hypesquad_bravery :: boolean
@typedoc """
HypeSquad Brilliance
"""
@type hypesquad_brilliance :: boolean
@typedoc """
HypeSquad Balance
"""
@type hypesquad_balance :: boolean
@typedoc """
Early Supporter
"""
@type early_supporter :: boolean
@typedoc """
Team User
"""
@type team_user :: boolean
@typedoc """
System user
"""
@type system :: boolean
@typedoc """
Bug Hunter (Level 2)
"""
@type bug_hunter_level_2 :: boolean
@typedoc """
Verified bot
"""
@type verified_bot :: boolean
@typedoc """
Verified developer
"""
@type verified_developer :: boolean
@type flags :: %__MODULE__{
staff: staff,
partner: partner,
hypesquad_events: hypesquad_events,
bug_hunter_level_1: bug_hunter_level_1,
hypesquad_bravery: hypesquad_bravery,
hypesquad_brilliance: hypesquad_brilliance,
hypesquad_balance: hypesquad_balance,
early_supporter: early_supporter,
team_user: team_user,
system: system,
bug_hunter_level_2: bug_hunter_level_2,
verified_bot: verified_bot,
verified_developer: verified_developer
}
@type t :: flags
@flag_values [
staff: 1 <<< 0,
partner: 1 <<< 1,
hypesquad_events: 1 <<< 2,
bug_hunter_level_1: 1 <<< 3,
hypesquad_bravery: 1 <<< 6,
hypesquad_brilliance: 1 <<< 7,
hypesquad_balance: 1 <<< 8,
early_supporter: 1 <<< 9,
team_user: 1 <<< 10,
system: 1 <<< 12,
bug_hunter_level_2: 1 <<< 14,
verified_bot: 1 <<< 16,
verified_developer: 1 <<< 17
]
@doc """
Constructs a flag struct based on an integer from the Discord API (either public_flags or flags).
## Examples
```elixir
iex> Nostrum.Struct.User.Flags.from_integer(131842)
%Nostrum.Struct.User.Flags{
bug_hunter_level_1: false,
bug_hunter_level_2: false,
early_supporter: true,
hypesquad_balance: true,
hypesquad_bravery: false,
hypesquad_brilliance: false,
hypesquad_events: false,
partner: true,
staff: false,
system: false,
team_user: false,
verified_bot: false,
verified_developer: true
}
```
"""
@spec from_integer(integer()) :: t
def from_integer(flag_value) do
boolean_list =
Enum.map(@flag_values, fn {flag, value} ->
{flag, (flag_value &&& value) == value}
end)
struct(__MODULE__, boolean_list)
end
@doc """
Convert a flag struct to an integer value.
## Examples
```elixir
iex> my_flags = %Nostrum.Struct.User.Flags{
...> bug_hunter_level_1: false,
...> bug_hunter_level_2: false,
...> early_supporter: true,
...> hypesquad_balance: true,
...> hypesquad_bravery: false,
...> hypesquad_brilliance: false,
...> hypesquad_events: false,
...> partner: true,
...> staff: false,
...> system: false,
...> team_user: false,
...> verified_bot: false,
...> verified_developer: true
...> }
iex> Nostrum.Struct.User.Flags.to_integer(my_flags)
131842
```
"""
@spec to_integer(t) :: integer()
def to_integer(flag_struct) do
booleans =
flag_struct
|> Map.from_struct()
|> Map.to_list()
Enum.reduce(booleans, 0, fn {flag, enabled}, flag_value ->
case enabled do
true -> flag_value ||| @flag_values[flag]
false -> flag_value
end
end)
end
end
|
lib/nostrum/struct/user/flags.ex
| 0.836354
| 0.420302
|
flags.ex
|
starcoder
|
defmodule ExState do
@moduledoc """
`ExState` loads and persists workflow execution to a database through Ecto.
The `ExState.Execution` is built through the subject's `:workflow`
association.
## Setup
defmodule ShipmentWorkflow do
use ExState.Definition
workflow "shipment" do
subject :shipment, Shipment
initial_state :preparing
state :preparing do
state :packing do
on :packed, :sealing
end
state :sealing do
on :unpack, :packing
on :sealed, :sealed
end
state :sealed do
final
end
on_final :shipping
end
state :shipping do
on :shipped, :in_transit
end
state :in_transit do
on :arrival, :arrived
end
state :arrived od
on :accepted, :complete
on :return, :returning
end
state :returning do
on :arrival, :returned
end
state :returned do
on :replace, :preparing
end
state :complete do
final
end
end
end
defmodule Shipment do
use Ecto.Schema
use ExState.Ecto.Subject
schema "shipments" do
has_workflow ShipmentWorkflow
end
end
## Creating
sale = %Sale{id: 1}
execution = ExState.create(sale) #=> %ExState.Execution{}
## Updating
sale = %Sale{id: 1}
{:ok, sale} =
sale
|> ExState.load()
|> ExState.Execution.transition!(:packed)
|> ExState.Execution.transition!(:sealed)
|> ExState.persist()
sale.workflow.state #=> "shipping"
{:error, reason} = ExState.transition(sale, :return)
reason #=> "no transition from state shipping for event :return"
"""
import Ecto.Query
alias ExState.Execution
alias ExState.Result
alias ExState.Ecto.Workflow
alias ExState.Ecto.WorkflowStep
alias ExState.Ecto.Subject
alias Ecto.Multi
alias Ecto.Changeset
defp repo do
Application.fetch_env!(:ex_state, :repo)
end
@spec create(struct()) :: {:ok, Execution.t()} | {:error, any()}
def create(subject) do
create_multi(subject)
|> repo().transaction()
|> Result.Multi.extract(:subject)
|> Result.map(&load/1)
end
@spec create!(struct()) :: Execution.t()
def create!(subject) do
create(subject) |> Result.get()
end
@spec create_multi(struct()) :: Multi.t()
def create_multi(%queryable{} = subject) do
Multi.new()
|> Multi.insert(:workflow, create_changeset(subject))
|> Multi.run(:subject, fn _repo, %{workflow: workflow} ->
subject
|> queryable.changeset(%{workflow_id: workflow.id})
|> repo().update()
|> Result.map(&Subject.put_workflow(&1, workflow))
end)
end
@spec load(struct()) :: Execution.t() | nil
def load(subject) do
with workflow when not is_nil(workflow) <- get(subject),
definition <- Subject.workflow_definition(subject),
execution <- Execution.continue(definition, workflow.state),
execution <- Execution.put_subject(execution, subject),
execution <- with_completed_steps(execution, workflow),
execution <- Execution.with_meta(execution, :workflow, workflow) do
execution
end
end
defp get(subject) do
subject
|> Ecto.assoc(Subject.workflow_association(subject))
|> preload(:steps)
|> repo().one()
end
defp with_completed_steps(execution, workflow) do
completed_steps = Workflow.completed_steps(workflow)
Enum.reduce(completed_steps, execution, fn step, execution ->
Execution.with_completed(execution, step.state, step.name, step.decision)
end)
end
@spec transition(struct(), any(), keyword()) :: {:ok, struct()} | {:error, any()}
def transition(subject, event, opts \\ []) do
load(subject)
|> Execution.with_meta(:opts, opts)
|> Execution.transition(event)
|> map_execution_error()
|> Result.flat_map(&persist/1)
end
@spec complete(struct(), any(), keyword()) :: {:ok, struct()} | {:error, any()}
def complete(subject, step_id, opts \\ []) do
load(subject)
|> Execution.with_meta(:opts, opts)
|> Execution.complete(step_id)
|> map_execution_error()
|> Result.flat_map(&persist/1)
end
@spec decision(struct(), any(), any(), keyword()) :: {:ok, struct()} | {:error, any()}
def decision(subject, step_id, decision, opts \\ []) do
load(subject)
|> Execution.with_meta(:opts, opts)
|> Execution.decision(step_id, decision)
|> map_execution_error()
|> Result.flat_map(&persist/1)
end
defp map_execution_error({:error, reason, _execution}), do: {:error, reason}
defp map_execution_error(result), do: result
@spec persist(Execution.t()) :: {:ok, struct()} | {:error, any()}
def persist(execution) do
actions_multi =
Enum.reduce(Enum.reverse(execution.actions), Multi.new(), fn action, multi ->
Multi.run(multi, action, fn _, _ ->
case Execution.execute_action(execution, action) do
{:ok, execution, result} -> {:ok, {execution, result}}
e -> e
end
end)
end)
Multi.new()
|> Multi.run(:workflow, fn _repo, _ ->
workflow = Map.fetch!(execution.meta, :workflow)
opts = Map.get(execution.meta, :opts, [])
update_workflow(workflow, execution, opts)
end)
|> Multi.append(actions_multi)
|> repo().transaction()
|> case do
{:ok, %{workflow: workflow} = results} ->
actions_multi
|> Multi.to_list()
|> List.last()
|> case do
nil ->
{:ok, Subject.put_workflow(Execution.get_subject(execution), workflow)}
{action, _} ->
case Map.get(results, action) do
nil ->
{:ok, Subject.put_workflow(Execution.get_subject(execution), workflow)}
{execution, _} ->
{:ok, Subject.put_workflow(Execution.get_subject(execution), workflow)}
end
end
{:error, _, reason, _} ->
{:error, reason}
end
end
defp update_workflow(workflow, execution, opts) do
workflow
|> update_changeset(execution, opts)
|> repo().update()
end
defp create_changeset(subject) do
params =
Subject.workflow_definition(subject)
|> Execution.new()
|> Execution.put_subject(subject)
|> Execution.dump()
Workflow.new(params)
|> Changeset.cast_assoc(:steps,
required: true,
with: fn step, params ->
step
|> WorkflowStep.changeset(params)
end
)
end
defp update_changeset(workflow, execution, opts) do
params =
execution
|> Execution.dump()
|> put_existing_step_ids(workflow)
workflow
|> Workflow.changeset(params)
|> Changeset.cast_assoc(:steps,
required: true,
with: fn step, params ->
step
|> WorkflowStep.changeset(params)
|> WorkflowStep.put_completion(Enum.into(opts, %{}))
end
)
end
defp put_existing_step_ids(params, workflow) do
Map.update(params, :steps, [], fn steps ->
Enum.map(steps, fn step -> put_existing_step_id(step, workflow.steps) end)
end)
end
defp put_existing_step_id(step, existing_steps) do
Enum.find(existing_steps, fn existing_step ->
step.state == existing_step.state and step.name == existing_step.name
end)
|> case do
nil ->
step
existing_step ->
Map.put(step, :id, existing_step.id)
end
end
end
|
lib/ex_state.ex
| 0.836955
| 0.489198
|
ex_state.ex
|
starcoder
|
defmodule Astro.Utils do
@moduledoc false
import Astro.Guards
@radians_to_degrees 180.0 / :math.pi()
def to_degrees(radians) do
radians * @radians_to_degrees
end
def to_radians(degrees) do
degrees / @radians_to_degrees
end
@doc """
Calculates the modulo of a number (integer, float).
Note that this function uses `floored division` whereas the builtin `rem`
function uses `truncated division`. See `Decimal.rem/2` if you want a
`truncated division` function for Decimals that will return the same value as
the BIF `rem/2` but in Decimal form.
See [Wikipedia](https://en.wikipedia.org/wiki/Modulo_operation) for an
explanation of the difference.
## Examples
iex> Cldr.Math.mod(1234.0, 5)
4.0
iex> Cldr.Math.mod(Decimal.new("1234.456"), 5)
#Decimal<4.456>
iex> Cldr.Math.mod(Decimal.new("123.456"), Decimal.new("3.4"))
#Decimal<1.056>
iex> Cldr.Math.mod Decimal.new("123.456"), 3.4
#Decimal<1.056>
"""
@spec mod(number, number) :: number
def mod(number, modulus) when is_float(number) and is_number(modulus) do
number - Float.floor(number / modulus) * modulus
end
def mod(number, modulus) when is_integer(number) and is_integer(modulus) do
modulo =
number
|> Integer.floor_div(modulus)
|> Kernel.*(modulus)
number - modulo
end
def mod(number, modulus) when is_integer(number) and is_number(modulus) do
modulo =
number
|> Kernel./(modulus)
|> Float.floor()
|> Kernel.*(modulus)
number - modulo
end
@doc """
Returns the remainder and dividend of two numbers.
"""
@spec div_mod(number, number) :: {number, number}
def div_mod(n1, n2) when is_integer(n1) and is_integer(n2) do
div = div(n1, n2)
mod = n2 - div * n2
{div, mod}
end
def div_mod(n1, n2) when is_number(n1) and is_number(n2) do
div = n1 / n2
mod = n2 - div * n2
{div, mod}
end
def normalize_location({lng, lat, alt}) when is_lat(lat) and is_lng(lng) and is_alt(alt) do
%Geo.PointZ{coordinates: {lng, lat, alt}}
end
def normalize_location({lng, lat}) when is_lat(lat) and is_lng(lng) do
%Geo.PointZ{coordinates: {lng, lat, 0.0}}
end
def normalize_location(%Geo.Point{coordinates: {lng, lat}}) when is_lat(lat) and is_lng(lng) do
%Geo.PointZ{coordinates: {lng, lat, 0.0}}
end
def normalize_location(%Geo.PointZ{coordinates: {lng, lat, alt}} = location)
when is_lat(lat) and is_lng(lng) and is_alt(alt) do
location
end
def cos(degrees) do
degrees
|> to_radians
|> :math.cos()
end
end
|
lib/astro/utils.ex
| 0.921939
| 0.78785
|
utils.ex
|
starcoder
|
defmodule Univrse.Recipient do
@moduledoc """
A Univrse Recipient is a structure attached to an `t:Univrse.Envelope.t/0`
that helps the intended recipient(s) decrypt the encrypted payload.
An encrypted Envelope may contain one or multiple Recipient structures.
Where the Envelope is intended for a single recipient, the Recipient structure
is merely a set of headers that helps the intended recipient identify what key
and algorithm is needed to decrypt the payload.
Where the Envelope is intended for multiple recipients, a Recipient structure
may also contain an encrypted Key. In this case, the intended recipient must
decrypt the content encryption key, which they can then use to decrypt the
payload.
"""
alias Univrse.{Alg, Envelope, Header, Key}
import Univrse.Util, only: [tag_binary: 1]
defstruct header: %Header{},
key: nil
@typedoc "Recipient struct"
@type t :: %__MODULE__{
header: Header.t,
key: binary | Key.t | nil
}
@doc """
Decrypts the Envelope or Recipient, using the given encryption key.
If an Envelope is being decrypted and it contains multiple recipients, it is
assumed the key belongs to the first recipient. Otherwise, see
`Envelope.decrypt_at/4`.
A keyword list of options can be given for the relevant encryption algorithm.
"""
@spec decrypt(t | Envelope.t, Key.t, keyword) :: {:ok, t | Envelope.t} | {:error, any}
def decrypt(envelope_or_recipient, key, opts \\ [])
def decrypt(%__MODULE__{header: header, key: encrypted} = recipient, %Key{} = key, opts)
when is_binary(encrypted)
do
alg = Map.get(header.headers, "alg")
opts = header.headers
|> Map.take(["epk", "iv", "tag"])
|> Enum.map(fn {k, v} -> {String.to_atom(k), v} end)
|> Keyword.merge(opts)
recipient.key
|> Alg.decrypt(alg, key, opts)
|> case do
{:ok, result} ->
with {:ok, %Key{} = key} <- Key.decode(result) do
{:ok, Map.put(recipient, :key, key)}
end
{:error, error} ->
{:error, error}
end
end
def decrypt(%Envelope{payload: payload, recipient: recipient} = env, %Key{} = key, opts)
when is_binary(payload) and not is_nil(recipient)
do
# Get the first header if list of recipients
header = case recipient do
%__MODULE__{header: header, key: nil} -> header
[%__MODULE__{header: header, key: nil} | _] -> header
end
alg = Map.get(header.headers, "alg")
aad = CBOR.encode(["enc", env.header, Keyword.get(opts, :aad, "")])
opts = header.headers
|> Map.take(["epk", "iv", "tag"])
|> Enum.map(fn {k, v} -> {String.to_atom(k), v} end)
|> Keyword.merge(opts)
|> Keyword.put(:aad, aad)
env.payload
|> Alg.decrypt(alg, key, opts)
|> case do
{:ok, payload} ->
Envelope.decode_payload(env, payload)
{:error, error} ->
{:error, error}
end
end
@doc """
Encrypts the Envelope payload using the given key or list of keys.
A map of headersmust be given including at least the encryption `alg` value.
A keyword list of options can be given for the relevant encryption algorithm.
Where a list of keys is given, the first key is taken as the content key and
used to encrypt the payload. The content key is then encrypted by each
subsequent key and included in the Recipient structs that are attached to the
Envelope.
When encrypting to multiple recipients, it is possible to specify different
algorithms for each key by giving a list of tuple pairs. The first element of
each pair is the key and the second is a map of headers.
## Examples
Encrypts for a single recipient:
Recipient.encrypt(env, aes_key, %{"alg" => "A128GCM"})
Encrypts for a multiple recipients using the same algorithm:
Recipient.encrypt(env, [aes_key, rec_key], %{"alg" => "A128GCM"})
Encrypts for a multiple recipients using different algorithms:
Recipient.encrypt(env, [
aes_key,
{rec1_key, %{"alg" => "ECDH-ES+A128GCM"}},
{rec2_key, %{"alg" => "ECDH-ES+A128GCM"}}
], %{"alg" => "A128GCM"})
"""
@spec encrypt(Envelope.t | Key.t, Key.t | list(Key.t) | list({Key.t, map}), map, keyword) ::
{:ok, Envelope.t | t | list(t)} |
{:error, any}
def encrypt(envelope_or_key, key, headers, opts \\ [])
def encrypt(%Envelope{} = env, [master | keys], headers, opts) do
{mkey, mheaders} = merge_key_headers(master, headers)
with {:ok, env} <- encrypt(env, mkey, mheaders, opts),
{:ok, recipients} <- encrypt(mkey, keys, headers, opts)
do
env = Enum.reduce(recipients, env, & Envelope.push(&2, &1))
{:ok, env}
end
end
def encrypt(%Envelope{header: header} = env, %Key{} = key, %{"alg" => alg} = headers, opts) do
aad = CBOR.encode(["enc", header, Keyword.get(opts, :aad, "")])
opts = headers
|> Map.take(["iv"])
|> Enum.map(fn {k, v} -> {String.to_atom(k), v} end)
|> Keyword.merge(opts)
|> Keyword.put(:aad, aad)
env
|> Envelope.encode_payload()
|> Alg.encrypt(alg, key, opts)
|> case do
{:ok, encrypted, new_headers} ->
headers = Map.merge(headers, new_headers)
recipient = wrap(nil, headers)
env = env
|> Map.put(:payload, encrypted)
|> Envelope.push(recipient)
{:ok, env}
{:error, error} ->
{:error, error}
end
end
def encrypt(%Key{} = rkey, keys, headers, opts) when is_list(keys) do
Enum.reduce_while(keys, [], fn key, result ->
{key, headers} = merge_key_headers(key, headers)
case encrypt(rkey, key, headers, opts) do
{:ok, %__MODULE__{} = recipient} ->
{:cont, [recipient | result]}
{:error, error} ->
{:halt, {:error, error}}
end
end)
|> case do
result when is_list(result) ->
{:ok, Enum.reverse(result)}
{:error, error} ->
{:error, error}
end
end
def encrypt(%Key{} = rkey, key, %{"alg" => alg} = headers, opts) do
aad = Keyword.get(opts, :add, "")
opts = headers
|> Map.take(["iv"])
|> Enum.map(fn {k, v} -> {String.to_atom(k), v} end)
|> Keyword.merge(opts)
|> Keyword.put(:aad, aad)
rkey
|> Key.encode()
|> Alg.encrypt(alg, key, opts)
|> case do
{:ok, encrypted, new_headers} ->
headers = Map.merge(headers, new_headers)
recipient = wrap(encrypted, headers)
{:ok, recipient}
{:error, error} ->
{:error, error}
end
end
@doc """
Wraps the given key and headers in a new Recipient struct.
"""
@spec wrap(binary | nil, map | Header.t) :: t
def wrap(key, headers \\ %{})
def wrap(key, %Header{} = header),
do: %__MODULE__{header: header, key: key}
def wrap(key, %{} = headers),
do: %__MODULE__{header: Header.wrap(headers), key: key}
# Merges key headers with recipient headers
defp merge_key_headers({key, key_headers}, headers),
do: {key, Map.merge(headers, key_headers)}
defp merge_key_headers(key, headers), do: {key, headers}
defimpl CBOR.Encoder do
alias Univrse.Recipient
def encode_into(%Recipient{header: header, key: key}, acc) do
CBOR.Encoder.encode_into([header, tag_binary(key)], acc)
end
end
end
|
lib/univrse/recipient.ex
| 0.889864
| 0.682437
|
recipient.ex
|
starcoder
|
defmodule BankAccount do
@moduledoc """
A bank account that supports access from multiple processes.
"""
@typedoc """
An account handle.
"""
@opaque account :: pid
defp manage_account({:closed, balance}, _any_command) do
{{:error, :account_closed}, {:closed, balance}}
end
defp manage_account({:open, balance}, command) do
case command do
:close ->
{nil, {:closed, balance}}
:balance ->
{balance, {:open, balance}}
{:update, amount} ->
{:ok, {:open, balance + amount}}
end
end
@doc """
Open the bank. Makes the account available.
"""
@spec open_bank() :: account
def open_bank() do
{:ok, pid} = Agent.start(fn -> {:open, 0} end)
pid
end
@doc """
Close the bank. Makes the account unavailable.
Be aware that this does NOT terminate the underlying process. This helps to
differentiate non-existing/deleted accounts from closed ones. Also this
reserves the PID so it can't even theoretically be reused for another bank
account.
## Examples
iex> account = BankAccount.open_bank()
iex> Process.alive?(account)
true
iex> BankAccount.close_bank(account)
iex> Process.alive?(account)
true
iex> BankAccount.balance(account)
{:error, :account_closed}
"""
@spec close_bank(account) :: none
def close_bank(account) do
Agent.get_and_update(account, fn s -> manage_account(s, :close) end)
end
@doc """
Deletes the bank.
Be aware that this frees the PID of the bank to be (theoretically) reused in
the future. Client code is responsible for sweeping all references to this account.
## Examples
iex> account = BankAccount.open_bank()
iex> Process.alive?(account)
true
iex> BankAccount.delete_bank(account)
iex> Process.alive?(account)
false
"""
def delete_bank(account) do
Agent.stop(account)
end
@doc """
Get the account's balance.
"""
@spec balance(account) :: integer
def balance(account) do
Agent.get_and_update(account, fn s -> manage_account(s, :balance) end)
end
@doc """
Update the account's balance by adding the given amount which may be negative.
"""
@spec update(account, integer) :: any
def update(account, amount) do
Agent.get_and_update(account, fn s -> manage_account(s, {:update, amount}) end)
end
end
|
elixir/bank-account/lib/bank_account.ex
| 0.811863
| 0.484807
|
bank_account.ex
|
starcoder
|
defprotocol Bamboo.Formatter do
@moduledoc ~S"""
Converts data to email addresses.
Implementations of the `Bamboo.Formatter` protocol convert a given data
structure to a two item tuple of `{name, address}` or an address string. The
`opts` argument is a map with the key `:type` and a value of `:from`, `:to`,
`:cc`, or `:bcc`. The options argument allows functions to pattern match an
address type and format a given data structure differently for different
types of addresses.
## Simple example
Let's say you have a user struct like this.
defmodule MyApp.User do
defstruct first_name: nil, last_name: nil, email: nil
end
Bamboo can automatically format this struct if you implement the `Bamboo.Formatter`
protocol.
defimpl Bamboo.Formatter, for: MyApp.User do
# Used by `to`, `bcc`, `cc` and `from`
def format_email_address(user, _opts) do
fullname = "#{user.first_name} #{user.last_name}"
{fullname, user.email}
end
end
Now you can create emails like this, and the user will be formatted correctly
user = %User{first_name: "John", last_name: "Doe", email: "<EMAIL>"}
Bamboo.Email.new_email(from: user)
## Customize formatting based on from, to, cc or bcc
By pattern matching the `opts` argument, you can format a given data
structure differently for different types of addresses. For example, if you
want to provide the name of the app when sending email on behalf of a user,
you can format the name for all `type: :from` addresses.
defimpl Bamboo.Formatter, for: MyApp.User do
# Include the app name when used in a from address
def format_email_address(user, %{type: :from}) do
fullname = "#{user.first_name} #{user.last_name}"
{fullname <> " (Sent from MyApp)", user.email}
end
# Just use the name for all other types
def format_email_address(user, _opts) do
fullname = "#{user.first_name} #{user.last_name}"
{fullname, user.email}
end
end
"""
@doc ~S"""
Receives data and opts and returns a string or a two item tuple `{name, address}`
opts is a map with the key `:type` and a value of
`:from`, `:to`, `:cc` or `:bcc`. You can pattern match on this to customize
the address.
"""
@type opts :: %{type: :from | :to | :cc | :bcc}
@spec format_email_address(any, opts) :: Bamboo.Email.address()
def format_email_address(data, opts)
end
defimpl Bamboo.Formatter, for: List do
def format_email_address(email_addresses, opts) do
email_addresses |> Enum.map(&Bamboo.Formatter.format_email_address(&1, opts))
end
end
defimpl Bamboo.Formatter, for: BitString do
def format_email_address(email_address, _opts) do
{nil, email_address}
end
end
defimpl Bamboo.Formatter, for: Tuple do
def format_email_address(already_formatted_email, _opts) do
already_formatted_email
end
end
defimpl Bamboo.Formatter, for: Map do
def format_email_address(invalid_address, _opts) do
raise ArgumentError, """
The format of the address was invalid. Got #{inspect(invalid_address)}.
Expected a string, e.g. "<EMAIL>", a 2 item tuple {name, address}, or
something that implements the Bamboo.Formatter protocol.
Example:
defimpl Bamboo.Formatter, for: MyApp.User do
def format_email_address(user, _opts) do
{user.name, user.email}
end
end
"""
end
end
|
lib/bamboo/formatter.ex
| 0.909267
| 0.648884
|
formatter.ex
|
starcoder
|
defmodule Transpose1 do
def transpose(m) do
attach_row(m, [])
end
@doc """
Given a matrix and a result, make the first row into a column,
attach it to the result, and then recursively attach the
remaining rows to that new result.
When the original matrix has no rows remaining, the result
matrix is complete, but each row needs to be reversed.
"""
def attach_row([], result) do
reverse_rows(result, [])
end
def attach_row(row_list, result) do
[first_row | other_rows] = row_list
new_result = make_column(first_row, result)
attach_row(other_rows, new_result)
end
@doc """
Make a row into a column. There are three clauses:
When there are no more entries in the row, the column you are
making is complete.
Make the row into a column when the result matrix is empty.
Create the first item as a singleton list. Follow it with
the result of making the remaining entries in the column.
Make a row into a column when the result matrix is not empty.
Do this by adding the first item at the beginning of the first row of
the result. That list is followed by the result of making the remaining entries in the column.
"""
def make_column([], result) do # my job here is done
result
end
def make_column(row, []) do
[first_item | other_items] = row
[[first_item] | make_column(other_items, [])]
end
def make_column(row, result) do
[first_item | other_items] = row
[first_row | other_rows] = result
[[first_item | first_row] | make_column(other_items, other_rows)]
end
@doc """
Reverse the order of items in each row of a matrix. This constructs
a new matrix whose rows are in reverse order, so you need to reverse
the final result.
"""
def reverse_rows([], result) do
Enum.reverse(result)
end
def reverse_rows(rows, result) do
[first | others] = rows
reverse_rows(others, [Enum.reverse(first) | result])
end
end
|
triangles/lib/transpose.ex
| 0.681303
| 0.877109
|
transpose.ex
|
starcoder
|
defmodule Scholar.Metrics do
@moduledoc """
Metric functions.
Metrics are used to measure the performance and compare
the performance of any kind of classifier in
easy-to-understand terms.
All of the functions in this module are implemented as
numerical functions and can be JIT or AOT compiled with
any supported `Nx` compiler.
"""
import Nx.Defn
# Standard Metrics
@doc ~S"""
Computes the accuracy of the given predictions
for binary and multi-class classification problems.
## Examples
iex> Scholar.Metrics.accuracy(Nx.tensor([1, 0, 0]), Nx.tensor([1, 0, 1]))
#Nx.Tensor<
f32
0.6666666865348816
>
iex> y_true = Nx.tensor([0, 1, 1, 1, 1, 0, 2, 1, 0, 1], type: {:u, 32})
iex> y_pred = Nx.tensor([0, 2, 1, 1, 2, 2, 2, 0, 0, 1], type: {:u, 32})
iex> Scholar.Metrics.accuracy(y_true, y_pred)
#Nx.Tensor<
f32
0.6000000238418579
>
"""
defn accuracy(y_true, y_pred) do
assert_shape_pattern(y_true, {_})
assert_shape(y_true, Nx.shape(y_pred))
y_pred
|> Nx.equal(y_true)
|> Nx.mean()
end
@doc ~S"""
Computes the precision of the given predictions with respect to
the given targets for binary classification problems.
## Examples
iex> Scholar.Metrics.binary_precision(Nx.tensor([0, 1, 1, 1]), Nx.tensor([1, 0, 1, 1]))
#Nx.Tensor<
f32
0.6666666865348816
>
"""
defn binary_precision(y_true, y_pred) do
assert_shape_pattern(y_true, {_})
assert_shape(y_true, Nx.shape(y_pred))
true_positives = binary_true_positives(y_true, y_pred)
false_positives = binary_false_positives(y_true, y_pred)
true_positives
|> Nx.divide(true_positives + false_positives + 1.0e-16)
end
@doc ~S"""
Computes the precision of the given predictions with respect to
the given targets for multi-class classification problems.
## Options
* `:num_classes` - Number of classes contained in the input tensors
## Examples
iex> y_true = Nx.tensor([0, 1, 1, 1, 1, 0, 2, 1, 0, 1], type: {:u, 32})
iex> y_pred = Nx.tensor([0, 2, 1, 1, 2, 2, 2, 0, 0, 1], type: {:u, 32})
iex> Scholar.Metrics.precision(y_true, y_pred, num_classes: 3)
#Nx.Tensor<
f32[3]
[0.6666666865348816, 1.0, 0.25]
>
"""
defn precision(y_true, y_pred, opts \\ []) do
opts = keyword!(opts, [:num_classes])
assert_shape_pattern(y_true, {_})
assert_shape(y_true, Nx.shape(y_pred))
cm = confusion_matrix(y_true, y_pred, opts)
true_positives = Nx.take_diagonal(cm)
false_positives = Nx.subtract(Nx.sum(cm, axes: [0]), true_positives)
true_positives
|> Nx.divide(true_positives + false_positives + 1.0e-16)
end
@doc ~S"""
Computes the recall of the given predictions with respect to
the given targets for binary classification problems.
## Examples
iex> Scholar.Metrics.binary_recall(Nx.tensor([0, 1, 1, 1]), Nx.tensor([1, 0, 1, 1]))
#Nx.Tensor<
f32
0.6666666865348816
>
"""
defn binary_recall(y_true, y_pred) do
assert_shape_pattern(y_true, {_})
assert_shape(y_true, Nx.shape(y_pred))
true_positives = binary_true_positives(y_true, y_pred)
false_negatives = binary_false_negatives(y_true, y_pred)
Nx.divide(true_positives, false_negatives + true_positives + 1.0e-16)
end
@doc ~S"""
Computes the recall of the given predictions with respect to
the given targets for multi-class classification problems.
## Options
* `:num_classes` - Number of classes contained in the input tensors
## Examples
iex> y_true = Nx.tensor([0, 1, 1, 1, 1, 0, 2, 1, 0, 1], type: {:u, 32})
iex> y_pred = Nx.tensor([0, 2, 1, 1, 2, 2, 2, 0, 0, 1], type: {:u, 32})
iex> Scholar.Metrics.recall(y_true, y_pred, num_classes: 3)
#Nx.Tensor<
f32[3]
[0.6666666865348816, 0.5, 1.0]
>
"""
defn recall(y_true, y_pred, opts \\ []) do
opts = keyword!(opts, [:num_classes])
assert_shape_pattern(y_true, {_})
assert_shape(y_pred, Nx.shape(y_true))
cm = confusion_matrix(y_true, y_pred, opts)
true_positive = Nx.take_diagonal(cm)
false_negative = Nx.subtract(Nx.sum(cm, axes: [1]), true_positive)
Nx.divide(true_positive, true_positive + false_negative + 1.0e-16)
end
defnp binary_true_positives(y_true, y_pred) do
assert_shape_pattern(y_true, {_})
assert_shape(y_true, Nx.shape(y_pred))
y_pred
|> Nx.equal(y_true)
|> Nx.logical_and(Nx.equal(y_pred, 1))
|> Nx.sum()
end
defnp binary_false_negatives(y_true, y_pred) do
assert_shape_pattern(y_true, {_})
assert_shape(y_true, Nx.shape(y_pred))
y_pred
|> Nx.not_equal(y_true)
|> Nx.logical_and(Nx.equal(y_pred, 0))
|> Nx.sum()
end
defnp binary_true_negatives(y_true, y_pred) do
assert_shape_pattern(y_true, {_})
assert_shape(y_true, Nx.shape(y_pred))
y_pred
|> Nx.equal(y_true)
|> Nx.logical_and(Nx.equal(y_pred, 0))
|> Nx.sum()
end
defnp binary_false_positives(y_true, y_pred) do
assert_shape_pattern(y_true, {_})
assert_shape(y_true, Nx.shape(y_pred))
y_pred
|> Nx.not_equal(y_true)
|> Nx.logical_and(Nx.equal(y_pred, 1))
|> Nx.sum()
end
@doc ~S"""
Computes the sensitivity of the given predictions with respect
to the given targets for binary classification problems.
## Examples
iex> Scholar.Metrics.binary_sensitivity(Nx.tensor([0, 1, 1, 1]), Nx.tensor([1, 0, 1, 1]))
#Nx.Tensor<
f32
0.6666666865348816
>
"""
defn binary_sensitivity(y_true, y_pred) do
assert_shape_pattern(y_true, {_})
assert_shape(y_true, Nx.shape(y_pred))
binary_recall(y_true, y_pred)
end
@doc ~S"""
Computes the sensitivity of the given predictions with respect
to the given targets for multi-class classification problems.
## Options
* `:num_classes` - Number of classes contained in the input tensors
## Examples
iex> y_true = Nx.tensor([0, 1, 1, 1, 1, 0, 2, 1, 0, 1], type: {:u, 32})
iex> y_pred = Nx.tensor([0, 2, 1, 1, 2, 2, 2, 0, 0, 1], type: {:u, 32})
iex> Scholar.Metrics.sensitivity(y_true, y_pred, num_classes: 3)
#Nx.Tensor<
f32[3]
[0.6666666865348816, 0.5, 1.0]
>
"""
defn sensitivity(y_true, y_pred, opts \\ []) do
opts = keyword!(opts, [:num_classes])
assert_shape_pattern(y_true, {_})
assert_shape(y_pred, Nx.shape(y_true))
recall(y_true, y_pred, opts)
end
@doc ~S"""
Computes the specificity of the given predictions with respect
to the given targets for binary classification problems.
## Examples
iex> Scholar.Metrics.binary_specificity(Nx.tensor([0, 1, 1, 1]), Nx.tensor([1, 0, 1, 1]))
#Nx.Tensor<
f32
0.0
>
"""
defn binary_specificity(y_true, y_pred) do
assert_shape_pattern(y_true, {_})
assert_shape(y_true, Nx.shape(y_pred))
true_negatives = binary_true_negatives(y_true, y_pred)
false_positives = binary_false_positives(y_true, y_pred)
Nx.divide(true_negatives, false_positives + true_negatives + 1.0e-16)
end
@doc ~S"""
Computes the specificity of the given predictions with respect
to the given targets for multi-class classification problems.
## Options
* `:num_classes` - Number of classes contained in the input tensors
## Examples
iex> y_true = Nx.tensor([0, 1, 1, 1, 1, 0, 2, 1, 0, 1], type: {:u, 32})
iex> y_pred = Nx.tensor([0, 2, 1, 1, 2, 2, 2, 0, 0, 1], type: {:u, 32})
iex> Scholar.Metrics.specificity(y_true, y_pred, num_classes: 3)
#Nx.Tensor<
f32[3]
[0.8571428656578064, 1.0, 0.6666666865348816]
>
"""
defn specificity(y_true, y_pred, opts \\ []) do
opts = keyword!(opts, [:num_classes])
assert_shape_pattern(y_true, {_})
assert_shape(y_pred, Nx.shape(y_true))
cm = confusion_matrix(y_true, y_pred, opts)
true_positive = Nx.take_diagonal(cm)
false_positive = Nx.subtract(Nx.sum(cm, axes: [0]), true_positive)
false_negative = Nx.subtract(Nx.sum(cm, axes: [1]), true_positive)
true_negative = Nx.subtract(Nx.sum(cm), false_negative + false_positive + true_positive)
Nx.divide(true_negative, false_positive + true_negative + 1.0e-16)
end
@doc ~S"""
Calculates the confusion matrix given rank-1 tensors which represent
the expected (`y_true`) and predicted (`y_pred`) classes.
## Options
* `:num_classes` - required. Number of classes contained in the input tensors
## Examples
iex> y_true = Nx.tensor([0, 0, 1, 1, 2, 2], type: {:u, 32})
iex> y_pred = Nx.tensor([0, 1, 0, 2, 2, 2], type: {:u, 32})
iex> Scholar.Metrics.confusion_matrix(y_true, y_pred, num_classes: 3)
#Nx.Tensor<
s64[3][3]
[
[1, 1, 0],
[1, 0, 1],
[0, 0, 2]
]
>
"""
defn confusion_matrix(y_true, y_pred, opts \\ []) do
opts = keyword!(opts, [:num_classes])
assert_shape_pattern(y_true, {_})
assert_shape(y_pred, Nx.shape(y_true))
num_classes =
transform(opts[:num_classes], fn num_classes ->
num_classes || raise ArgumentError, "missing option :num_classes"
end)
zeros = Nx.broadcast(0, {num_classes, num_classes})
indices = Nx.stack([y_true, y_pred], axis: 1)
updates = Nx.broadcast(1, {Nx.size(y_true)})
Nx.indexed_add(zeros, indices, updates)
end
@doc ~S"""
Calculates F1 score given rank-1 tensors which represent
the expected (`y_true`) and predicted (`y_pred`) classes.
## Options
* `:num_classes` - required. Number of classes contained in the input tensors
* `:average` - optional. This determines the type of averaging performed on the data.
* `:macro`. Calculate metrics for each label, and find their unweighted mean.
This does not take label imbalance into account.
* `:weighted`. Calculate metrics for each label, and find their average weighted by
support (the number of true instances for each label).
* `:micro`. Calculate metrics globally by counting the total true positives,
false negatives and false positives.
* If not specified, then the f1 scores for each class are returned.
## Examples
iex> y_true = Nx.tensor([0, 1, 1, 1, 1, 0, 2, 1, 0, 1], type: {:u, 32})
iex> y_pred = Nx.tensor([0, 2, 1, 1, 2, 2, 2, 0, 0, 1], type: {:u, 32})
iex> Scholar.Metrics.f1_score(y_true, y_pred, num_classes: 3, average: nil)
#Nx.Tensor<
f32[3]
[0.6666666865348816, 0.6666666865348816, 0.4000000059604645]
>
iex> Scholar.Metrics.f1_score(y_true, y_pred, num_classes: 3, average: :macro)
#Nx.Tensor<
f32
0.5777778029441833
>
iex> Scholar.Metrics.f1_score(y_true, y_pred, num_classes: 3, average: :weighted)
#Nx.Tensor<
f32
0.64000004529953
>
iex> Scholar.Metrics.f1_score(y_true, y_pred, num_classes: 3, average: :micro)
#Nx.Tensor<
f32
0.6000000238418579
>
"""
defn f1_score(y_true, y_pred, opts \\ []) do
opts = keyword!(opts, [:num_classes, :average])
assert_shape_pattern(y_true, {_})
assert_shape(y_pred, Nx.shape(y_true))
num_classes =
transform(opts[:num_classes], fn num_classes ->
num_classes || raise ArgumentError, "missing option :num_classes"
end)
transform(opts[:average], fn average ->
if Elixir.Kernel.==(average, :micro) do
accuracy(y_true, y_pred)
else
cm = confusion_matrix(y_true, y_pred, num_classes: num_classes)
true_positive = Nx.take_diagonal(cm)
false_positive = Nx.subtract(Nx.sum(cm, axes: [0]), true_positive)
false_negative = Nx.subtract(Nx.sum(cm, axes: [1]), true_positive)
precision = Nx.divide(true_positive, true_positive + false_positive + 1.0e-16)
recall = Nx.divide(true_positive, true_positive + false_negative + 1.0e-16)
per_class_f1 =
Nx.divide(
Nx.multiply(2, Nx.multiply(precision, recall)),
precision + recall + 1.0e-16
)
case average do
nil ->
per_class_f1
:macro ->
Nx.mean(per_class_f1)
:weighted ->
support = Nx.sum(Nx.equal(y_true, Nx.iota({num_classes, 1})), axes: [1])
Nx.sum(Nx.multiply(per_class_f1, Nx.divide(support, Nx.sum(support) + 1.0e-16)))
end
end
end)
end
@doc ~S"""
Calculates the mean absolute error of predictions
with respect to targets.
$$l_i = \sum_i |\hat{y_i} - y_i|$$
## Examples
iex> y_true = Nx.tensor([[0.0, 1.0], [0.0, 0.0]], type: {:f, 32})
iex> y_pred = Nx.tensor([[1.0, 1.0], [1.0, 0.0]], type: {:f, 32})
iex> Scholar.Metrics.mean_absolute_error(y_true, y_pred)
#Nx.Tensor<
f32
0.5
>
"""
defn mean_absolute_error(y_true, y_pred) do
assert_shape(y_true, Nx.shape(y_pred))
y_true
|> Nx.subtract(y_pred)
|> Nx.abs()
|> Nx.mean()
end
# Combinators
@doc """
Returns a function which computes a running average given current average,
new observation, and current iteration.
## Examples
iex> cur_avg = 0.5
iex> iteration = 1
iex> y_true = Nx.tensor([0, 1, 1])
iex> y_pred = Nx.tensor([0, 1, 1])
iex> avg_acc = Scholar.Metrics.running_average(&Scholar.Metrics.accuracy/2)
iex> avg_acc.(cur_avg, [y_true, y_pred], iteration)
#Nx.Tensor<
f32
0.75
>
"""
def running_average(metric) do
&running_average_impl(&1, apply(metric, &2), &3)
end
defnp running_average_impl(avg, obs, i) do
avg
|> Nx.multiply(i)
|> Nx.add(obs)
|> Nx.divide(Nx.add(i, 1))
end
@doc """
Returns a function which computes a running sum given current sum,
new observation, and current iteration.
## Examples
iex> cur_sum = 12
iex> iteration = 2
iex> y_true = Nx.tensor([0, 1, 0, 1])
iex> y_pred = Nx.tensor([1, 1, 0, 1])
iex> fps = Scholar.Metrics.running_sum(&Scholar.Metrics.mean_absolute_error/2)
iex> fps.(cur_sum, [y_true, y_pred], iteration)
#Nx.Tensor<
f32
12.25
>
"""
def running_sum(metric) do
&running_sum_impl(&1, apply(metric, &2), &3)
end
defnp running_sum_impl(sum, obs, _) do
Nx.add(sum, obs)
end
end
|
lib/scholar/metrics.ex
| 0.905652
| 0.770357
|
metrics.ex
|
starcoder
|
defmodule Stache.Tokenizer do
@moduledoc false
defmodule State do
@moduledoc false
defstruct [
line: 0,
pos: 0,
pos_start: 0,
start: 0,
buffer: "",
mode: :text,
delim_start: {"{{", 2},
delim_end: {"}}", 2},
tokens: []
]
end
@default_delimeters {"{{", "}}"}
@doc """
Tokenizes the given binary.
Returns {:ok, list} where list is one of the following:
* `{:text, meta, contents}`
* `{:section, meta, {start, end}, contents}`
* `{:inverted, meta, contents}`
* `{:end, meta, contents}`
* `{:partial, meta, contents}`
* `{:double, meta, contents}`
* `{:triple, meta, contents}`
Or {:error, line, error} in the case of errors.
"""
def tokenize(template, opts \\ []) do
line = Keyword.get(opts, :line, 1)
delimeters = Keyword.get(opts, :delimeters, @default_delimeters)
state = set_delimeters(%State{line: line, start: line}, delimeters)
with {:ok, tokens} <- tokenize_loop(template, state)
do
{:ok, strip(tokens)}
end
end
defp set_delimeters(state, {fst, snd}) do
%State{state|
delim_start: {fst, String.length(fst)},
delim_end: {snd, String.length(snd)}
}
end
defp strip(tokens) do
tokens
|> Enum.chunk_by(fn t -> elem(t, 1) |> Access.get(:line) end)
|> Enum.map(&strip_standalone/1)
|> List.flatten
|> Enum.reject(&comment_or_delimeter?/1)
end
defp strip_standalone(line) do
filtered = Enum.filter(line, fn
{:text, _, contents} -> String.strip(contents) != ""
_ -> true
end)
# If there is only one token on a line other than whitespace, and the token is
# a control structure, we can remove the line entirely from the template.
case filtered do
[{:partial, meta, tag}] ->
indentation = case line do
[{:text, _, contents}|_] -> String.length(contents)
_ -> 0
end
[{:partial, Map.put(meta, :indent, indentation), tag}]
[{tag, _, _}] when tag in [:delimeter, :comment, :end, :section, :inverted] -> filtered
_ -> line
end
end
defp comment_or_delimeter?({:comment, _, _}), do: true
defp comment_or_delimeter?({:delimeter, _, _}), do: true
defp comment_or_delimeter?(_), do: false
defp delimeter_change(state = %State{line: line, buffer: buffer}) do
delimeters =
buffer
|> String.split
|> Enum.reject(&String.contains?(&1, "="))
case delimeters do
[fst, snd] ->
state = set_delimeters(state, {fst, snd})
{:ok, state}
_ -> {:error, line, "Improper delimeter change"}
end
end
defp next_state(stream, next, state = %State{mode: mode, pos: pos}, inc) do
new_pos = pos + inc
boundary = case {mode, next} do
# We just read a closing delimeter. It belongs to the current token.
{_, :text} -> new_pos
# We just read an opening delimeter. It belongs to the next token.
{:text, _} -> pos
end
state = add_token(%State{state|pos: boundary})
tokenize_loop(stream, %State{state|pos: new_pos, mode: next})
end
defp add_token(state = %State{mode: :text, buffer: ""}), do: state
defp add_token(state = %State{start: start, mode: mode, buffer: buffer, pos: pos}) do
contents = case mode do
:text -> buffer
:comment -> buffer
_ -> String.strip(buffer)
end
{fst, _} = state.delim_start
{snd, _} = state.delim_end
delimeters = {fst, snd}
meta = %{delimeters: delimeters, line: start, pos_start: state.pos_start, pos_end: pos}
tokens = [{mode, meta, contents}|state.tokens]
%State{state|tokens: tokens, buffer: "", start: state.line, pos_start: pos}
end
defp tokenize_loop("", state = %State{mode: :text}) do
state = add_token(state)
{:ok, Enum.reverse(state.tokens)}
end
defp tokenize_loop("", %State{line: line}), do: {:error, line, "Unexpected EOF"}
defp tokenize_loop(stream, state = %State{mode: :text, line: line, buffer: buffer}) do
{delim, dsize} = state.delim_start
case stream do
"{{{" <> stream ->
next_state(stream, :triple, state, 3)
<<"{{":: binary, s :: binary-size(1), stream::binary>>
when s in ["!", "="] ->
next = case s do
"!" -> :comment
"=" -> :delimeter
end
next_state(stream, next, state, 3)
<<^delim::binary-size(dsize), s::binary-size(1), stream::binary>>
when s in ["#", "^", "/", ">"] ->
mode = case s do
"#" -> :section
"^" -> :inverted
"/" -> :end
">" -> :partial
end
next_state(stream, mode, state, String.length(delim) + 1)
<<^delim::binary-size(dsize), stream::binary>> ->
next_state(stream, :double, state, String.length(delim))
"\n" <> stream ->
next_state(stream, :text, %State{state|line: line + 1, buffer: buffer <> "\n"}, 1)
_ ->
{c, stream} = String.next_codepoint(stream)
tokenize_loop(stream, %State{state|buffer: buffer <> c, pos: state.pos + 1})
end
end
defp tokenize_loop("\n" <> stream, state = %State{line: line, buffer: buffer}) do
tokenize_loop(stream, %State{state|line: line + 1, buffer: buffer <> "\n", pos: state.pos + 1})
end
defp tokenize_loop("=}}" <> stream, state = %State{mode: :delimeter}) do
with {:ok, state} <- delimeter_change(state),
do: next_state(stream, :text, state, 3)
end
defp tokenize_loop("}}}" <> stream, state = %State{mode: :triple}) do
# We've found a closing }}} after an open {{{.
next_state(stream, :text, state, 3)
end
defp tokenize_loop(stream, state = %State{mode: m, start: start, line: line, buffer: buffer}) do
{delim, dsize} = state.delim_end
{sdelim, sdsize} = state.delim_start
case stream do
<<^delim::binary-size(dsize), "\n", stream::binary>>
when start != line and m in [:comment, :section, :inverted, :end] ->
next_state(stream, :text, %State{state|line: line + 1}, String.length(delim) + 1)
<<^delim::binary-size(dsize), stream::binary>>
when m in [:double, :comment, :section, :inverted, :end, :partial] ->
next_state(stream, :text, state, String.length(delim))
<<^delim::binary-size(dsize), _::binary>> ->
{:error, line, "Unexpected \"#{delim}\""}
<<^sdelim::binary-size(sdsize), _::binary>> ->
{:error, line, "Unexpected \"#{sdelim}\""}
"{{{" <> _ -> {:error, line, "Unexpected \"{{{\"."}
"}}}" <> _ -> {:error, line, "Unexpected \"}}}\"."}
_ ->
{c, stream} = String.next_codepoint(stream)
tokenize_loop(stream, %State{state|pos: state.pos + 1, line: line, buffer: buffer <> c})
end
end
end
|
lib/tokenizer.ex
| 0.742702
| 0.445891
|
tokenizer.ex
|
starcoder
|
defmodule Astarte.Core.Mapping.EndpointsAutomaton do
alias Astarte.Core.Mapping
@doc """
returns `:ok` and an endpoint for a given `path` using a previously built automata (`{transitions, accepting_states}`).
if path is not complete one or more endpoints will be guessed and `:guessed` followed by a list of endpoints is returned.
"""
def resolve_path(path, {transitions, accepting_states}) do
path_tokens = String.split(path, "/", trim: true)
states = do_transitions(path_tokens, [0], transitions)
cond do
states == [] ->
{:error, :not_found}
length(states) == 1 and accepting_states[hd(states)] != nil ->
{:ok, accepting_states[hd(states)]}
true ->
states = force_transitions(states, transitions, accepting_states)
guessed_endpoints =
for state <- states do
accepting_states[state]
end
{:guessed, guessed_endpoints}
end
end
@doc """
builds the automaton for given `mappings`, returns `:ok` followed by the automaton tuple if build succeeded, otherwise `:error` and the reason.
"""
def build(mappings) do
nfa = do_build(mappings)
if is_valid?(nfa, mappings) do
{:ok, nfa}
else
{:error, :overlapping_mappings}
end
end
@doc """
returns true if `nfa` is valid for given `mappings`
"""
def is_valid?(nfa, mappings) do
Enum.all?(mappings, fn mapping ->
resolve_path(mapping.endpoint, nfa) == {:ok, mapping.endpoint}
end)
end
@doc """
returns a list of likely invalid endpoints for a certain list of `mappings`.
"""
def lint(mappings) do
nfa = do_build(mappings)
mappings
|> Enum.filter(fn mapping ->
resolve_path(mapping.endpoint, nfa) != {:ok, mapping.endpoint}
end)
|> Enum.map(fn mapping -> mapping.endpoint end)
end
defp do_transitions([], current_states, _transitions) do
current_states
end
defp do_transitions(_tokens, [], _transitions) do
[]
end
defp do_transitions([token | tail_tokens], current_states, transitions) do
next_states =
List.foldl(current_states, [], fn state, acc ->
if Mapping.is_placeholder?(token) do
all_state_transitions = state_transitions(transitions, state)
all_state_transitions ++ acc
else
transition_list = Map.get(transitions, {state, token}) |> List.wrap()
epsi_transition_list = Map.get(transitions, {state, ""}) |> List.wrap()
transition_list ++ epsi_transition_list ++ acc
end
end)
do_transitions(tail_tokens, next_states, transitions)
end
defp force_transitions(current_states, transitions, accepting_states) do
next_states =
List.foldl(current_states, [], fn state, acc ->
good_state =
if accepting_states[state] == nil do
state_transitions(transitions, state)
else
[state]
end
good_state ++ acc
end)
finished =
Enum.all?(next_states, fn state ->
accepting_states[state]
end)
if finished do
next_states
else
force_transitions(next_states, transitions, accepting_states)
end
end
defp state_transitions(transitions, state) do
Enum.reduce(transitions, [], fn
{{^state, _}, next_state}, acc ->
[next_state | acc]
_transition, acc ->
acc
end)
end
defp do_build(mappings) do
{transitions, _, accepting_states} = List.foldl(mappings, {%{}, [], %{}}, &parse_endpoint/2)
{transitions, accepting_states}
end
def parse_endpoint(mapping, {transitions, states, accepting_states}) do
["" | path_tokens] =
mapping.endpoint
|> Mapping.normalize_endpoint()
|> String.split("/")
{states, _, _, transitions} =
List.foldl(path_tokens, {states, 0, "", transitions}, fn token,
{states, previous_state,
partial_endpoint, transitions} ->
new_partial_endpoint = "#{partial_endpoint}/#{token}"
candidate_previous =
Enum.find_index(states, fn state -> state == new_partial_endpoint end)
if candidate_previous != nil do
{states, candidate_previous, new_partial_endpoint, transitions}
else
states = states ++ [partial_endpoint]
new_state = length(states)
{states, new_state, new_partial_endpoint,
Map.put(transitions, {previous_state, token}, new_state)}
end
end)
accepting_states = Map.put(accepting_states, length(states), mapping.endpoint)
{transitions, states, accepting_states}
end
end
|
lib/astarte_core/mapping/endpoints_automaton.ex
| 0.876443
| 0.624966
|
endpoints_automaton.ex
|
starcoder
|
defmodule Plug.Static do
@moduledoc """
A plug for serving static assets.
It expects two options on initialization:
* `:at` - the request path to reach for static assets.
It must be a binary.
* `:from` - the filesystem path to read static assets from.
It must be a binary, containing a file system path,
or an atom representing the application name,
where assets will be served from the priv/static.
The preferred form is to use `:from` with an atom, since
it will make your application independent from the starting
directory.
If a static asset cannot be found, it simply forwards
the connection to the rest of the stack.
## Examples
This filter can be mounted in a Plug.Builder as follow:
defmodule MyPlug do
use Plug.Builder
plug Plug.Static, at: "/public", from: :my_app
plug :not_found
def not_found(conn, _) do
Plug.Conn.send(conn, 404, "not found")
end
end
"""
@behaviour Plug.Wrapper
@allowed_methods ~w(GET HEAD)
import Plug.Conn
def init(opts) do
at = Keyword.fetch!(opts, :at)
from = Keyword.fetch!(opts, :from)
unless is_atom(from) or is_binary(from) do
raise ArgumentError, message: ":from must be an atom or a binary"
end
{Plug.Router.Utils.split(at), from, opts[:gzip]}
end
def wrap(conn, {at, from, gzip}, fun) do
if conn.method in @allowed_methods do
wrap(conn, at, from, gzip, fun)
else
fun.(conn)
end
end
def wrap(conn, at, from, gzip, fun) do
segments = subset(at, conn.path_info)
segments = for segment <- List.wrap(segments), do: URI.decode(segment)
path = path(from, segments)
cond do
segments in [nil, []] ->
fun.(conn)
invalid_path?(segments) ->
send_resp(conn, 400, "Bad request")
true ->
case file_encoding(conn, path, gzip) do
{conn, path} ->
conn
|> put_resp_header("content-type", Plug.MIME.path(List.last(segments)))
|> put_resp_header("cache-control", "public, max-age=31536000")
|> send_file(200, path)
:error ->
fun.(conn)
end
end
end
defp file_encoding(conn, path, gzip) do
path_gz = path <> ".gz"
cond do
gzip && gzip?(conn) && File.regular?(path_gz) ->
{put_resp_header(conn, "content-encoding", "gzip"), path_gz}
File.regular?(path) ->
{conn, path}
true ->
:error
end
end
defp gzip?(conn) do
fun = &(:binary.match(&1, ["gzip", "*"]) != :nomatch)
Enum.any? get_req_header(conn, "accept-encoding"), fn accept ->
Enum.any?(Plug.Conn.Utils.list(accept), fun)
end
end
defp path(from, segments) when is_atom(from),
do: Path.join([:code.priv_dir(from), "static" | segments])
defp path(from, segments),
do: Path.join([from | segments])
defp subset([h|expected], [h|actual]),
do: subset(expected, actual)
defp subset([], actual),
do: actual
defp subset(_, _), do:
nil
defp invalid_path?([h|_]) when h in [".", "..", ""], do: true
defp invalid_path?([h|t]) do
case :binary.match(h, ["/", "\\", ":"]) do
{_, _} -> true
:nomatch -> invalid_path?(t)
end
end
defp invalid_path?([]), do: false
end
|
lib/plug/static.ex
| 0.830937
| 0.555073
|
static.ex
|
starcoder
|
defmodule Mix.Releases.App do
@moduledoc """
Represents important metadata about a given application.
"""
defstruct name: nil,
vsn: nil,
applications: [],
included_applications: [],
start_type: nil,
path: nil
@type start_type :: :permanent | :temporary | :transient | :load | :none
@type t :: %__MODULE__{
name: atom(),
vsn: String.t(),
applications: [atom()],
included_applications: [atom()],
start_type: start_type,
path: nil | String.t()
}
@valid_start_types [:permanent, :temporary, :transient, :load, :none]
@doc """
Create a new Application struct from an application name
"""
@spec new(atom) :: nil | __MODULE__.t() | {:error, String.t()}
def new(name),
do: do_new(name, nil)
@doc """
Same as new/1, but specify the application's start type
"""
@spec new(atom, start_type | nil) :: nil | __MODULE__.t() | {:error, String.t()}
def new(name, start_type) when is_atom(name) and start_type in @valid_start_types,
do: do_new(name, start_type)
def new(name, nil) when is_atom(name),
do: do_new(name, nil)
def new(name, start_type) do
{:error, {:apps, {:invalid_start_type, name, start_type}}}
end
defp do_new(name, start_type) do
_ = Application.load(name)
case Application.spec(name) do
nil ->
nil
spec ->
vsn = '#{Keyword.get(spec, :vsn)}'
apps = Keyword.get(spec, :applications, [])
included = Keyword.get(spec, :included_applications, [])
path = Application.app_dir(name)
%__MODULE__{
name: name,
vsn: vsn,
start_type: start_type,
applications: apps,
included_applications: included,
path: path
}
end
end
@doc """
Determines if the provided start type is a valid one.
"""
@spec valid_start_type?(atom) :: boolean()
def valid_start_type?(start_type) when start_type in @valid_start_types, do: true
def valid_start_type?(_), do: false
end
|
lib/mix/lib/releases/models/app.ex
| 0.830697
| 0.42913
|
app.ex
|
starcoder
|
defmodule JISHOCALLER do
@moduledoc """
Wrapper for the Jisho API found on https://jisho.org/
Search for a word both in Japanese and English and return the result.
"""
@doc """
Search by a word or a term. Returns a JSON result from the call.
Parameters:
- Word or term: "String"
Result:
- A list of Maps where each map is a word that has been found.
- If successful then {:ok, data} is returned where data is a list of maps or it returns {:ok, "No data"} if there is nothing in the result.
- If unsuccessful this will return {:error, reason}
Examples:
Searching using an English word:
iex> JISHOCALLER.search("dog")
Searching using a term:
iex> JISHOCALLER.search("fast car")
Search using Romanji:
iex> JISHOCALLER.search("hanako")
Searching using Kana:
iex> JISHOCALLER.search("にほん")
Searching using Kanji:
iex> JISHOCALLER.search("招き猫")
"""
def search(word) do
url_for(word)
|> recieve_result
end
@doc """
Search by a word or a term with tags. Returns a JSON result from the call.
Parameters:
- Word or term : "String"
- List of Strings ["String", "String" . . .]
Result:
- A list of Maps where each map is a word that has been found using the tag(s).
- If successful then {:ok, data} is returned where data is a list of maps or it returns {:ok, "No data"} if there is nothing in the result.
- If unsuccessful this will return {:error, reason}
Examples:
No tags (This is the same as just using search):
iex> JISHOCALLER.search("dog", [])
Using only tags:
iex> JISHOCALLER.search("", ["jlpt-n5"])
Using a term and a tag:
iex> JISHOCALLER.search("animal", ["jlpt-n5"])
Using multiple tags:
iex> JISHOCALLER.search("出来る", ["jlpt-n5", "verb"])
"""
def search(word, tags) do
tags = merge_tags(tags) |> URI.encode_www_form
url_for(word) <> tags
|> recieve_result
end
@doc """
Search by a word or a term with tags and a page. Returns a JSON result from the call.
Parameters:
- Word or term : "String"
- List of Strings ["String", "String" . . .]
- Page Number: Integer
Result:
- A list of Maps where each map is a word that has been found using the word, tag(s), and page number.
- If successful then {:ok, data} is returned where data is a list of maps or it returns {:ok, "No data"} if there is nothing in the result.
- If unsuccessful this will return {:error, reason}
Examples:
A term, no tags, and a page:
iex> JISHOCALLER.search("差す", [], 1)
Using only tags and page:
iex> JISHOCALLER.search("", ["jlpt-n5"], 30)
Using a term, a tag and a page:
iex> JISHOCALLER.search("差す", ["verb"], 2)
Using a term, multiple tags and a page:
iex> JISHOCALLER.search("出来る", ["jlpt-n5", "verb"], 1)
"""
def search(word, tags, page) when is_integer(page) and page > 0 do
tags = merge_tags(tags) |> URI.encode_www_form
url_for(word) <> tags <> "&page=#{page}"
|> recieve_result
end
@doc """
Search using tags. Returns a JSON result from the call.
Parameters:
- List of Strings ["String", "String" . . .]
Result:
- A list of Maps where each map is a word that has been found using the tag(s).
- If successful then {:ok, data} is returned where data is a list of maps or it returns {:ok, "No data"} if there is nothing in the result.
- If unsuccessful this will return {:error, reason}
Examples:
One tag:
iex> JISHOCALLER.search_by_tags(["jlpt-n5"])
Multiple tags:
iex> JISHOCALLER.search_by_tags(["jlpt-n5", "verb"])
"""
def search_by_tags(tags) do
merged = merge_tags(tags) |> URI.encode_www_form
url_for("") <> merged
|> recieve_result
end
@doc """
Search using tags and a page. Returns a JSON result from the call.
Parameters:
- List of Strings ["String", "String" . . .]
- Page Number: Integer
Result:
- A list of Maps where each map is a word that has been found using the tag(s).
- If successful then {:ok, data} is returned where data is a list of maps or it returns {:ok, "No data"} if there is nothing in the result.
- If unsuccessful this will return {:error, reason}
Examples:
One tag and page number:
iex> JISHOCALLER.search_by_tags(["jlpt-n5"], 1)
Multiple tags and page number:
iex> JISHOCALLER.search_by_tags(["jlpt-n5", "verb"], 3)
"""
def search_by_tags(tags, page) when is_integer(page) and page > 0 do
merged = merge_tags(tags) |> URI.encode_www_form
url_for("") <> merged <> "&page=#{page}"
|> recieve_result
end
defp recieve_result(result) do
HTTPoison.get(result, [timeout: 10_000, recv_timeout: 10_000])
|> parse_json
end
defp merge_tags([]), do: ""
defp merge_tags(tags) do
Stream.map(tags, &(check_string(&1)))
|> Stream.map(&(String.trim(&1)))
|> Stream.map(&(add_hashcode(&1)))
|> Enum.reduce(fn x, acc -> acc <> x end)
end
defp check_string(tag) when is_binary(tag), do: tag
defp check_string(_), do: ""
defp add_hashcode(""), do: ""
defp add_hashcode(tag), do: " ##{tag}"
defp url_for(word) do
word = URI.encode(word)
"https://jisho.org/api/v1/search/words?keyword=#{word}"
end
defp parse_json({:ok, %HTTPoison.Response{body: body, status_code: 200}}) do
body
|> JSON.decode!
|> getData
end
defp parse_json({:error, %HTTPoison.Error{id: _, reason: reason}}) do
{:error, reason}
end
defp getData(json) do
try do
metaCheck(json["data"], json["meta"])
rescue
_ -> {:error, "error with getting data"}
end
end
defp metaCheck(data, %{"status" => 200}) do
checkEmptyData(data)
end
defp metaCheck(_data, %{"status" => _}) do
{:error, "meta status code not 200"}
end
defp checkEmptyData([]) do
{:ok, "No data"}
end
defp checkEmptyData(data) do
{:ok, data}
end
end
|
lib/jishocaller.ex
| 0.869452
| 0.593256
|
jishocaller.ex
|
starcoder
|
defmodule Appsignal.Ecto do
@moduledoc """
Integration for logging Ecto queries
If you're using Ecto 3, attach `Appsignal.Ecto` to Telemetry query events in
your application's `start/2` function:
```
:telemetry.attach(
"appsignal-ecto",
[:my_app, :repo, :query],
&Appsignal.Ecto.handle_event/4,
nil
)
```
For versions of Telemetry < 0.3.0, you'll need to call it slightly
differently:
```
Telemetry.attach(
"appsignal-ecto",
[:my_app, :repo, :query],
Appsignal.Ecto,
:handle_event,
nil
)
```
On Ecto 2, add the `Appsignal.Ecto` module to your Repo's logger
configuration instead. The `Ecto.LogEntry` logger is the default logger for
Ecto and needs to be set as well to keep the original Ecto logger behavior
intact.
```
config :my_app, MyApp.Repo,
loggers: [Appsignal.Ecto, Ecto.LogEntry]
```
"""
require Logger
@transaction Application.get_env(:appsignal, :appsignal_transaction, Appsignal.Transaction)
def handle_event(_event, event_data, metadata, _config) do
do_handle_event(transaction(), event_data, metadata)
end
def log(entry) do
do_log(transaction(), entry)
end
defp transaction do
Appsignal.TransactionRegistry.lookup(self())
end
defp do_handle_event(%Appsignal.Transaction{} = transaction, %{total_time: duration}, metadata) do
@transaction.record_event(
transaction,
"query.ecto",
"",
metadata.query,
convert_time_unit(duration),
1
)
end
defp do_handle_event(%Appsignal.Transaction{} = transaction, duration, metadata)
when is_integer(duration) do
@transaction.record_event(
transaction,
"query.ecto",
"",
metadata.query,
convert_time_unit(duration),
1
)
end
defp do_handle_event(_transaction, _duration, _metadata), do: nil
defp do_log(%Appsignal.Transaction{} = transaction, entry) do
duration = (entry.queue_time || 0) + (entry.query_time || 0) + (entry.decode_time || 0)
@transaction.record_event(
transaction,
"query.ecto",
"",
entry.query,
convert_time_unit(duration),
1
)
entry
end
defp do_log(_transaction, entry), do: entry
defp convert_time_unit(time) do
# Converts the native time to a value in nanoseconds.
System.convert_time_unit(time, :native, 1_000_000_000)
end
end
|
lib/appsignal/ecto.ex
| 0.879244
| 0.777342
|
ecto.ex
|
starcoder
|
defmodule AstraeaVirgoWeb.SubmissionView do
use AstraeaVirgoWeb, :view
@moduledoc """
Response for Submission API
"""
@doc """
Response
## index.json
Response for index Contest API: `GET /api/submissions`
Response: list of Object
| field | type | required | descript |
|-----------|------|----------|-------------------------|
| id | ID | yes | 当前提交的 ID |
| language | ID | yes | 提交所使用的编程语言 ID |
| problem | ID | yes | 提交的题目的 ID |
| submitter | ID | yes | 提交者的 ID |
| time | Time | yes | 提交的时间 |
| judgement | ID | yes | 提交的判题结果 |
Note: The final results will be arranged in increasing order of time
Example:
```json
[
{
"id": "a61d1e78-5d62-4aa6-a1e9-f756174eec26",
"language": "c",
"problem": "1000",
"submitter": "IQHV2FVS00",
"time": "2014-06-25T11:00:00+01",
"judgement": "AC"
},
{
"id": "ec1cd4e4-7e85-47f6-8d61-1297d5415c66",
"language": "cpp",
"problem": "1002",
"submitter": "IQHV2FVS00",
"time": "2014-06-25T12:00:00+01",
"judgement": "WA"
}
]
```
## show.json
Response for show Contest API: `GET /api/submissions/<submission_id>`
Response: Object
| field | type | required | descript |
|-----------|------|----------|-------------------------|
| id | ID | yes | 当前提交的 ID |
| language | ID | yes | 提交所使用的编程语言 ID |
| problem | ID | yes | 提交的题目的 ID |
| submitter | ID | yes | 提交者的 ID |
| time | Time | yes | 提交的时间 |
| judgement | ID | yes | 提交的判题结果 |
Example:
```json
{
"id": "a61d1e78-5d62-4aa6-a1e9-f756174eec26",
"language": "c",
"problem": "1000",
"submitter": "IQHV2FVS00",
"time": "2014-06-25T11:00:00+01",
"judgement": "AC"
}
```
## detail.json
Reponse for show Submission Detail API
- `GET /api/submissions/<submission_id>/detail`
Response: Object
| field | type | required | null | descript |
|-----------|--------|----------|------|--------------------|
| id | ID | yes | no | 当前提交的 ID |
| submitter | ID | yes | no | 提交者的 ID |
| judgement | ID | yes | no | 提交的判题结果 |
| code | Base64 | yes | no | 提交的代码 |
| message | string | no | yes | 判题机所返回的消息 |
Example:
```json
{
"id": "a61d1e78-5d62-4aa6-a1e9-f756174eec26",
"submitter": "IQHV2FVS00",
"judgement": "AC",
"code": "I2luY2x1ZGUgPHN0ZGlvLmg+CmludCBtYWluKHZvaWQpIHsKICBwcmludGYoIkhlbGxvIFdvcmxkISIpOwp9"
}
```
## create.json
Reponse for create Submission API
- `POST /api/submissions`
Response: Object
| field | type | required | null | descript |
|---------------|------|----------|------|-----------|
| submission_id | ID | yes | no | 提交的 ID |
"""
def render("index.json", assigns), do: assigns.data
def render("show.json", assigns), do: assigns.data
def render("detail.json", assigns), do: assigns.data
def render("create.json", assigns) do
%{
submission_id: assigns.submission_id
}
end
end
|
lib/virgo_web/views/submission_view.ex
| 0.763307
| 0.686035
|
submission_view.ex
|
starcoder
|
defmodule Bolt.Sips.Internals.PackStream.DecoderV2 do
@moduledoc false
_module_doc = """
Bolt V2 has specification for decoding:
- Temporal types:
- Local Date
- Local Time
- Local DateTime
- Time with Timezone Offset
- DateTime with Timezone Id
- DateTime with Timezone Offset
- Duration
- Spatial types:
- Point2D
- Point3D
For documentation about those typs representation in Bolt binary,
please see `Bolt.Sips.Internals.PackStream.EncoderV2`.
Functions from this module are not meant to be used directly.
Use `Decoder.decode(data, bolt_version)` for all decoding purposes.
"""
use Bolt.Sips.Internals.PackStream.Markers
alias Bolt.Sips.Internals.PackStream.Decoder
alias Bolt.Sips.Types.{TimeWithTZOffset, DateTimeWithTZOffset, Duration, Point}
# Local Date
@spec decode({integer(), binary(), integer()}, integer()) :: list() | {:error, :not_implemented}
def decode({@date_signature, struct, @date_struct_size}, bolt_version) do
{[date], rest} = Decoder.decode_struct(struct, @date_struct_size, bolt_version)
[Date.add(~D[1970-01-01], date) | rest]
end
# Local Time
def decode({@local_time_signature, struct, @local_time_struct_size}, bolt_version) do
{[time], rest} = Decoder.decode_struct(struct, @local_time_struct_size, bolt_version)
[Time.add(~T[00:00:00.000], time, :nanosecond) | rest]
end
# Local DateTime
def decode({@local_datetime_signature, struct, @local_datetime_struct_size}, bolt_version) do
{[seconds, nanoseconds], rest} =
Decoder.decode_struct(struct, @local_datetime_struct_size, bolt_version)
ndt =
NaiveDateTime.add(
~N[1970-01-01 00:00:00.000],
seconds * 1_000_000_000 + nanoseconds,
:nanosecond
)
[ndt | rest]
end
# Time with Zone Offset
def decode({@time_with_tz_signature, struct, @time_with_tz_struct_size}, bolt_version) do
{[time, offset], rest} =
Decoder.decode_struct(struct, @time_with_tz_struct_size, bolt_version)
t = TimeWithTZOffset.create(Time.add(~T[00:00:00.000], time, :nanosecond), offset)
[t | rest]
end
# Datetime with zone Id
def decode(
{@datetime_with_zone_id_signature, struct, @datetime_with_zone_id_struct_size},
bolt_version
) do
{[seconds, nanoseconds, zone_id], rest} =
Decoder.decode_struct(struct, @datetime_with_zone_id_struct_size, bolt_version)
naive_dt =
NaiveDateTime.add(
~N[1970-01-01 00:00:00.000],
seconds * 1_000_000_000 + nanoseconds,
:nanosecond
)
dt = Bolt.Sips.TypesHelper.datetime_with_micro(naive_dt, zone_id)
[dt | rest]
end
# Datetime with zone offset
def decode(
{@datetime_with_zone_offset_signature, struct, @datetime_with_zone_offset_struct_size},
bolt_version
) do
{[seconds, nanoseconds, zone_offset], rest} =
Decoder.decode_struct(struct, @datetime_with_zone_id_struct_size, bolt_version)
naive_dt =
NaiveDateTime.add(
~N[1970-01-01 00:00:00.000],
seconds * 1_000_000_000 + nanoseconds,
:nanosecond
)
dt = DateTimeWithTZOffset.create(naive_dt, zone_offset)
[dt | rest]
end
# Duration
def decode({@duration_signature, struct, @duration_struct_size}, bolt_version) do
{[months, days, seconds, nanoseconds], rest} =
Decoder.decode_struct(struct, @duration_struct_size, bolt_version)
duration = Duration.create(months, days, seconds, nanoseconds)
[duration | rest]
end
# Point2D
def decode({@point2d_signature, struct, @point2d_struct_size}, bolt_version) do
{[srid, x, y], rest} = Decoder.decode_struct(struct, @point2d_struct_size, bolt_version)
point = Point.create(srid, x, y)
[point | rest]
end
# Point3D
def decode({@point3d_signature, struct, @point3d_struct_size}, bolt_version) do
{[srid, x, y, z], rest} = Decoder.decode_struct(struct, @point3d_struct_size, bolt_version)
point = Point.create(srid, x, y, z)
[point | rest]
end
def decode(_, _) do
{:error, :not_implemented}
end
end
|
lib/bolt_sips/internals/pack_stream/decoder_v2.ex
| 0.824956
| 0.635435
|
decoder_v2.ex
|
starcoder
|
defmodule D6 do
@moduledoc """
--- Day 6: Custom Customs ---
As your flight approaches the regional airport where you'll switch to a much larger plane, customs declaration forms are distributed to the passengers.
The form asks a series of 26 yes-or-no questions marked a through z. All you need to do is identify the questions for which anyone in your group answers "yes". Since your group is just you, this doesn't take very long.
However, the person sitting next to you seems to be experiencing a language barrier and asks if you can help. For each of the people in their group, you write down the questions for which they answer "yes", one per line. For example:
abcx
abcy
abcz
In this group, there are 6 questions to which anyone answered "yes": a, b, c, x, y, and z. (Duplicate answers to the same question don't count extra; each question counts at most once.)
Another group asks for your help, then another, and eventually you've collected answers from every group on the plane (your puzzle input). Each group's answers are separated by a blank line, and within each group, each person's answers are on a single line. For example:
For each group, count the number of questions to which anyone answered "yes". What is the sum of those counts?
--- Part Two ---
As you finish the last group's customs declaration, you notice that you misread one word in the instructions:
You don't need to identify the questions to which anyone answered "yes"; you need to identify the questions to which everyone answered "yes"!
For each group, count the number of questions to which everyone answered "yes". What is the sum of those counts?
"""
@behaviour Day
def solve(input) do
{part_1, part_2} =
input
|> Enum.map(fn line -> line |> to_charlist |> MapSet.new() end)
|> Enum.chunk_by(fn line -> line == MapSet.new() end)
|> Enum.map(fn [hd | rest] ->
{any, all} =
Enum.reduce(rest, {hd, hd}, fn line, {any, all} ->
{MapSet.union(line, any), MapSet.intersection(line, all)}
end)
{MapSet.size(any), MapSet.size(all)}
end)
|> Enum.reduce(fn {any, all}, {acc_any, acc_all} -> {acc_any + any, acc_all + all} end)
{part_1, part_2}
end
end
|
lib/days/06.ex
| 0.802052
| 0.72783
|
06.ex
|
starcoder
|
defmodule Rummage.Phoenix do
@moduledoc """
`Rummage.Phoenix` is a support framework for `Phoenix` that can be used to manipulate
`Phoenix` collections and `Ecto` models with Search, Sort and Paginate operations.
It accomplishes the above operations by using `Rummage.Ecto`, to paginate `Ecto`
queries and adds Phoenix and HTML support to views and controllers.
Each operation: Search, Sort and Paginate have their hooks defined in `Rummage.Ecto`
and is configurable.
The best part about rummage is that all the three operations: `Search`, `Sort` and
`Paginate` integrate seamlessly and can be configured separately. To check out their
seamless integration, please check the information below.
If you want to check a sample application that uses Rummage, please check
[this link](https://github.com/Excipients/rummage_phoenix_example).
"""
@doc """
`:default_per_page` can also be set at run time
in the `config.exs` file
## Examples
Returns default `per_page` set in the config
(2 in `Rummage.Phoenix`'s test env):
iex> alias Rummage.Phoenix
iex> Phoenix.default_per_page
2
"""
def default_per_page do
config(:default_per_page, Rummage.Ecto.Config.per_page())
end
@doc """
`:default_helpers` can also be set at run time
in the `config.exs` file
## Examples
Returns default `helpers` set in the config
(Rummage.Phoenix.Router.Helpers in `Rummage.Phoenix`'s test env):
iex> alias Rummage.Phoenix
iex> Phoenix.default_helpers
Rummage.Phoenix.Router.Helpers
"""
def default_helpers do
config(:default_helpers, nil)
end
@doc """
`:default_theme` can also be set at run time
in the `config.exs` file
## Examples
Returns default `theme` set in the config
(:bootstrap in `Rummage.Phoenix`'s test env):
iex> alias Rummage.Phoenix
iex> Phoenix.default_theme
:bootstrap
"""
def default_theme do
config(:default_theme, :bootstrap)
end
@doc """
`:default_max_page_links` can also be set at run time
in the `config.exs` file
## Examples
Returns default `default_max_page_links` set in the config
(5 in `Rummage.Phoenix`'s test env):
iex> alias Rummage.Phoenix
iex> Phoenix.default_max_page_links
5
"""
def default_max_page_links do
config(:default_max_page_links, 5)
end
@doc false
defp config do
Application.get_env(:rummage_phoenix, Rummage.Phoenix, [])
end
@doc """
`config` returns the value associated with the given `key` and returns `default` if
the value is `nil`.
## Examples
Returns value corresponding to config or returns the default value:
iex> alias Rummage.Phoenix
iex> Phoenix.config(:x, "default")
"default"
"""
def config(key, default \\ nil) do
config()
|> Keyword.get(key, default)
|> resolve_config(default)
end
@doc """
`resolve_system_config` returns a system variable set up or returns the
specified default value
## Examples
Returns value corresponding to a system variable config or returns the default value:
iex> alias Rummage.Phoenix
iex> Phoenix.resolve_system_config({:system, "some random config"}, "default")
"default"
"""
@spec resolve_system_config(Tuple.t(), term) :: {term}
def resolve_system_config({:system, var_name}, default) do
System.get_env(var_name) || default
end
defp resolve_config(value, _default), do: value
end
|
lib/rummage_phoenix.ex
| 0.768212
| 0.692967
|
rummage_phoenix.ex
|
starcoder
|
defmodule Mix.Tasks.Ecto.Squash do
use Mix.Task
require Logger
import Mix.Generator
import Mix.Ecto
import Mix.EctoSQL
@shortdoc "Squashes several migrations into one"
@aliases [
r: :repo,
t: :to,
y: :yes
]
@switches [
to: :integer,
yes: :boolean,
repo: [:string, :keep],
migrations_path: :string,
no_compile: :boolean,
no_deps_check: :boolean
# XXX: No support for prefix yet.
# prefix: :string,
]
@moduledoc """
Replaces several migrations with a SQL-based migration, which applies schema,
and a second one for making sure that all of the squashed migrations has been
applied and nothing else, before migrating further.
## Examples
Squash migrations upto and including 20210601033528 into a single one:
mix ecto.squash --to 20210601033528
mix ecto.squash --to 20210601033528 -r Custom.Repo
The repository must be set under `:ecto_repos` in the
current app configuration or given via the `-r` option.
SQL migration will have a filename prefixed with timestamp of the latest
migration squashed. That way it won't be applied if squashed migration is
already there. Another generated migration will have a +1 second
timestamp.
By default, the migration will be generated to the
"priv/YOUR_REPO/migrations" directory of the current application
but it can be configured to be any subdirectory of `priv` by
specifying the `:priv` key under the repository configuration.
## Command line options
* `--to VERSION` - squash migrations upto and including VERSION
* `-y`, `--yes` - migrate to specified version, remove squashed migrations
and migrate to latest version without asking to confirm actions
* `-r REPO`, `--repo REPO` - the REPO to generate migration for
* `--migrations-path PATH` - the PATH to run the migrations from,
defaults to `priv/repo/migrations`
* `--no-compile` - does not compile applications before running
* `--no-deps-check` - does not check dependencies before running
## Configuration
If `:ecto_sql` app configuration specifies a custom migration module,
the generated migration code will use that rather than the default
`Ecto.Migration`:
config :ecto_sql, migration_module: MyApplication.CustomMigrationModule
"""
@impl true
def run(args) do
[repo, opts] = parse_args(args)
# Start ecto_sql explicitly before as we don't need
# to restart those apps if migrated.
{:ok, _} = Application.ensure_all_started(:ecto_sql)
ensure_repo(repo, args)
to = opts[:to]
migrate_opts =
["-r", inspect(repo)] ++
if path = opts[:migrations_path], do: ["--migrations-path", path], else: []
if yes?(opts, "Migrate to #{to}? (Mandatory to proceed)") do
migrate_to(repo, migrate_opts, to)
else
Logger.warn("Need to apply migrations to proceed.")
exit(:normal)
end
migrations = get_migrations(repo)
path = opts[:migrations_path] || Path.join(source_repo_priv(repo), "migrations")
unless File.dir?(path), do: create_directory(path)
remove_squashed_migrations(path, migrations, opts)
squash_path = create_squash_migration(path, repo, to)
EctoSquash.Postgres.structure_dump(path, repo.config())
checker_path = create_checker_migration(path, repo, migrations, to)
[squash_path, checker_path]
end
defp parse_args(args) do
repo =
case parse_repo(args) do
[repo] ->
repo
[repo | _] ->
Mix.raise(
"repo ambiguity: several repos available - " <>
"please specify which repo to use with -r, " <>
"e.g. -r #{inspect(repo)}"
)
end
case OptionParser.parse!(args, strict: @switches, aliases: @aliases) do
{opts, []} ->
opts[:to] ||
Mix.raise(
"`--to` option is mandatory, which is stupid and hopefully will be fixed, " <>
"got: #{inspect(Enum.join(args, " "))}"
)
[repo, opts]
{_, _} ->
Mix.raise(
"ecto.squash supports no arguments, " <>
"got: #{inspect(Enum.join(args, " "))}"
)
end
end
defp yes?(opts, question) do
opts[:yes] || Mix.shell().yes?(question)
end
defp migrate_to(repo, migrate_opts, to) do
migrate_opts_to = migrate_opts ++ ["--to"]
# Migrate forward if we're behind.
Mix.Task.run("ecto.migrate", migrate_opts_to ++ [Integer.to_string(to)])
# Migrate backwards if we're ahead.
# XXX: ecto.rollback rolls back migration specified with `--to` as well.
# Offset index +1 to keep that migration.
migrations = get_migrations(repo)
index = migrations |> Enum.find_index(fn {_, id, _} -> id == to end)
case Enum.at(migrations, index + 1) do
{_dir, next_migration_id, _name} ->
Mix.Task.run("ecto.rollback", migrate_opts_to ++ [Integer.to_string(next_migration_id)])
# Migration is nil when squashing all migrations.
nil ->
nil
end
end
defp get_migrations(repo) do
{:ok, migrations, _apps} =
Ecto.Migrator.with_repo(repo, fn repo ->
Ecto.Migrator.migrations(repo)
end)
migrations
end
defp migration_module do
case Application.get_env(:ecto_sql, :migration_module, Ecto.Migration) do
migration_module when is_atom(migration_module) -> migration_module
other -> Mix.raise("Expected :migration_module to be a module, got: #{inspect(other)}")
end
end
defp remove_squashed_migrations(path, migrations, opts) do
rm_list =
migrations
|> Enum.map(fn {_dir, id, _name} -> id end)
|> Enum.filter(fn id -> id <= opts[:to] end)
|> Enum.flat_map(fn id -> Path.wildcard(Path.join(path, "#{id}_*.exs")) end)
if yes?(
opts,
"Remove squashed migrations upto and including #{opts[:to]} (#{length(rm_list)})?"
) do
Enum.each(rm_list, fn path -> File.rm!(path) end)
end
end
defp create_squash_migration(path, repo, to) do
# ID matches that of the last migration squashed to prevent newly created
# migration from being applied, since all migrations it contains
# are already applied.
file = Path.join(path, "#{to}_apply_squashed_migrations.exs")
assigns = [mod: Module.concat([repo, Migrations, SquashMigrations]), repo: repo]
create_file(file, sql_migration_template(assigns))
file
end
defp create_checker_migration(path, repo, migrations, to) do
file = Path.join(path, "#{to + 1}_ensure_migrated_squash.exs")
ids =
migrations
|> Enum.filter(fn {dir, _id, _name} -> dir == :up end)
|> Enum.map(fn {_dir, id, _name} -> id end)
|> Enum.sort()
assigns = [
mod: Module.concat([repo, Migrations, EnsureMigratedSquash]),
repo: repo,
migration_ids: ids
]
create_file(file, checker_migration_template(assigns))
file
end
embed_template(:sql_migration, """
defmodule <%= inspect @mod %> do
use <%= inspect migration_module() %>
def up do
repo = <%= inspect @repo %>
{:ok, _path} = repo.__adapter__.structure_load(__DIR__, repo.config())
end
end
""")
embed_template(:checker_migration, ~S"""
defmodule <%= inspect @mod %> do
use <%= inspect migration_module() %>
alias Ecto.Migration.SchemaMigration
def up do
needed_migrations = MapSet.new(
<%= inspect @migration_ids, limit: :infinity, pretty: true %>
)
repo = <%= inspect @repo %>
# XXX: No support for prefix yet.
{migration_repo, query, all_opts} = SchemaMigration.versions(repo, repo.config(), nil)
has_migrations = migration_repo.all(query, all_opts)
|> MapSet.new()
if needed_migrations != has_migrations do
raise "Missing migrations: #{inspect MapSet.difference(needed_migrations, has_migrations)}
extra migrations: #{inspect MapSet.difference(has_migrations, needed_migrations)}"
end
end
def down do
end
end
""")
end
|
lib/mix/tasks/ecto.squash.ex
| 0.710929
| 0.451931
|
ecto.squash.ex
|
starcoder
|
defmodule SseParser do
@moduledoc """
Server sent event parser acording to w3c using NimbleParsec
ref: https://www.w3.org/TR/2009/WD-eventsource-20090421
ABNF:
```abnf
stream = [ bom ] *event
event = *( comment / field ) end-of-line
comment = colon *any-char end-of-line
field = 1*name-char [ colon [ space ] *any-char ] end-of-line
end-of-line = ( cr lf / cr / lf / eof )
eof = < matches repeatedly at the end of the stream >
; characters
lf = %x000A ; U+000A LINE FEED
cr = %x000D ; U+000D CARRIAGE RETURN
space = %x0020 ; U+0020 SPACE
colon = %x003A ; U+003A COLON
bom = %xFEFF ; U+FEFF BYTE ORDER MARK
name-char = %x0000-0009 / %x000B-000C / %x000E-0039 / %x003B-10FFFF
; a Unicode character other than U+000A LINE FEED, U+000D CARRIAGE RETURN, or U+003A COLON
any-char = %x0000-0009 / %x000B-000C / %x000E-10FFFF
; a Unicode character other than U+000D CARRIAGE RETURN or U+003A COLON
```
"""
alias SseParser.{Event, Stream}
@type field() :: {String.t(), String.t() | nil}
@type comment() :: String.t()
@type event() :: [field() | comment()]
@type error() :: {:error, String.t(), String.t(), map(), {integer(), integer()}, integer()}
import NimbleParsec
lf = utf8_char([0x0A])
cr = utf8_char([0x0D])
space = utf8_char([0x20])
colon = utf8_char([0x3A])
name_char =
choice([
utf8_char([0x00..0x09]),
utf8_char([0x0B..0x0C]),
utf8_char([0x0E..0x39]),
utf8_char([0x3B..0x10FFFF])
])
any_char =
choice([
utf8_char([0x00..0x09]),
utf8_char([0x0B..0x0C]),
utf8_char([0x0E..0x10FFFF])
])
stream_char =
choice([
any_char,
space,
colon
])
end_of_line =
choice([
concat(cr, lf),
cr,
lf
])
field_name =
name_char
|> times(min: 1)
|> tag(:name)
|> post_traverse({:stringify, []})
field_value =
colon
|> ignore()
|> optional(ignore(space))
|> times(any_char, min: 1)
|> tag(:value)
|> post_traverse({:stringify, []})
field =
concat(
field_name,
optional(field_value)
)
|> tag(:field)
|> ignore(end_of_line)
comment =
ignore(colon)
|> repeat(any_char)
|> ignore(end_of_line)
|> tag(:comment)
|> post_traverse({:stringify, []})
event =
repeat(choice([comment, field]))
|> ignore(end_of_line)
|> tag(:event)
|> post_traverse({:escape_event, []})
|> repeat()
|> eos()
stream =
choice([
stream_char,
space,
colon,
concat(end_of_line, stream_char)
])
|> repeat()
|> concat(end_of_line)
|> concat(end_of_line)
|> repeat()
defparsecp(:event_parser, event)
defparsecp(:stream_parser, stream)
@doc ~S"""
Parse string to sse events, returning parsed events and unparsed part of input,
unparsed part can be used when next chunk from sse arrive
## Examples
iex> SseParser.feed(":Order 3 submitted\nevent: order-submitted\nreference: order 3\n\n")
{:ok, [["Order 3 submitted", {"event", "order-submitted"}, {"reference", "order 3"}]], ""}
iex> SseParser.feed(":Test event")
{:ok, [], ":Test event"}
iex> {:ok, [], rest} = SseParser.feed(":Test event")
iex> {:ok, [], rest} = SseParser.feed(rest <> "\nname: test")
iex> SseParser.feed(rest <> "\n\n")
{:ok, [["Test event", {"name", "test"}]], ""}
"""
@type feed_error() :: {:error, String.t(), String.t(), map(), {integer(), integer()}, integer()}
@type feed_success() :: {:ok, [event()], String.t()}
@spec feed(String.t()) :: feed_success() | feed_error()
def feed(data) do
with {:ok, stream, rest, _context, _link, _column} <- stream_parser(data),
{:ok, events, _rest, _context, _link, _column} <- stream |> to_string() |> event_parser() do
{:ok, events, rest}
end
end
@doc """
Interpreting parsed event stream acording to standard
- comment is ignored
- field id is reduced to last received value
- field event is reduced to last received value
- field retry is reduced to last received value that is integer
- field data is join by newline
- any other field is ignored
In every case, event without value `{name, nil}` is ignored
## Examples
iex> SseParser.interpret([[{"data", "d1"}, {"data", "d2"}, {"event", "put"}, {"event", "patch"}, {"event", nil}]])
[%SseParser.Event{event: "patch", data: "d1\\nd2"}]
"""
@spec interpret([event()]) :: [Event.t()]
def interpret(events) do
Enum.map(events, fn parts ->
Enum.reduce(parts, %Event{}, fn
{"id", id}, event when is_bitstring(id) and bit_size(id) > 0 ->
%Event{event | id: id}
{"event", name}, event when is_bitstring(name) and bit_size(name) > 0 ->
%Event{event | event: name}
{"data", data}, event when is_bitstring(data) and bit_size(data) > 0 ->
interpret_data(event, data)
{"retry", interval}, event when is_bitstring(interval) and bit_size(interval) > 0 ->
interpret_interval(event, interval)
_, event ->
event
end)
end)
end
@doc ~S"""
Reduce events to update stream
## Examples
iex> SseParser.streamify(%Stream{}, [
iex> %SseParser.Event{
iex> id: "1",
iex> event: "a"
iex> },
iex> %SseParser.Event{
iex> event: "b",
iex> retry: 2345
iex> }
iex>])
{
[
%SseParser.Event{id: "1", event: "a"},
%SseParser.Event{id: "1", event: "b", retry: 2345}
],
%SseParser.Stream{last_event_id: "1", retry: 2345}
}
"""
@doc since: "3.1.0"
@spec streamify(Stream.t(), [Event.t()]) :: {[Event.t()], Stream.t()}
def streamify(stream, events) do
stream =
Enum.reduce(events, stream, fn event, stream ->
if is_nil(event.retry) do
stream
else
%Stream{stream | retry: event.retry}
end
end)
Enum.map_reduce(events, stream, fn event, stream ->
if is_nil(event.id) do
{%Event{event | id: stream.last_event_id}, stream}
else
{event, %Stream{stream | last_event_id: event.id}}
end
end)
end
@doc ~S"""
First feed data to parser and then interpret, see `SseParser.feed/1` and `SseParser.interpret/1`
# Examples
iex> SseParser.feed_and_interpret(":Order 3 submitted\nevent: order-submitted\nid: 3\n\n")
{:ok, [%SseParser.Event{id: "3", event: "order-submitted"}], ""}
"""
@type feed_and_interpret_success() :: {:ok, [Event.t()], String.t()}
@spec feed_and_interpret(String.t()) :: feed_and_interpret_success() | feed_error()
def feed_and_interpret(data) do
with {:ok, events, buffer} <- feed(data) do
{:ok, interpret(events), buffer}
end
end
@doc ~S"""
Appli feed, interpret and streamify on string
# Examples
iex> SseParser.feed_interpret_stream("id: a\nevent: b\ndata: c\n\nevent: d\n\n", %Stream{})
{
:ok,
[
%SseParser.Event{
id: "a",
event: "b",
data: "c"
},
%SseParser.Event{
id: "a",
event: "d"
}
],
"",
%SseParser.Stream{
last_event_id: "a"
}
}
"""
@doc since: "3.1.0"
@type feed_interpret_stream_success() :: {:ok, [Event.t()], String.t(), Stream.t()}
@spec feed_interpret_stream(String.t(), Stream.t()) ::
feed_interpret_stream_success() | feed_error()
def feed_interpret_stream(data, stream) do
with {:ok, events, buffer} <- feed_and_interpret(data),
{events, stream} <- streamify(stream, events) do
{:ok, events, buffer, stream}
end
end
defp stringify(_rest, args, context, _line, _offset) do
args = Enum.map(args, &{elem(&1, 0), &1 |> elem(1) |> to_string()})
{args, context}
end
defp escape_event(_rest, [event: parts], context, _line, _offset) do
parts =
Enum.map(parts, fn
{:comment, comment} -> comment
{:field, [name: name]} -> {name, nil}
{:field, [name: name, value: value]} -> {name, value}
end)
{[parts], context}
end
defp interpret_interval(event, interval) do
case Integer.parse(interval) do
{interval, ""} -> %{event | retry: interval}
_ -> event
end
end
defp interpret_data(event, data) do
case event do
%Event{data: d} when d in [nil, ""] ->
%Event{event | data: data}
event ->
%Event{event | data: "#{event.data}\n#{data}"}
end
end
end
|
lib/sse_parser.ex
| 0.813831
| 0.570511
|
sse_parser.ex
|
starcoder
|
defmodule ExWire.Packet.Capability.Eth.Status do
@moduledoc """
Status messages establish a proper Eth Wire connection, and verify the two
clients are compatable.
```
**Status** [`+0x00`: `P`, `protocolVersion`: `P`, `networkId`: `P`,
`td`: `P`, `bestHash`: `B_32`, `genesisHash`: `B_32`]
Inform a peer of its current ethereum state. This message should be sent
after the initial handshake and prior to any ethereum related messages.
* `protocolVersion` is one of:
* `0x00` for PoC-1;
* `0x01` for PoC-2;
* `0x07` for PoC-3;
* `0x09` for PoC-4.
* `0x17` for PoC-5.
* `0x1c` for PoC-6.
* `61` for PV61
* `62` for PV62
* `63` for PV63
* `networkId`: 0=Olympic (disused), 1=Frontier (mainnet), 2=Morden (disused),
3=Ropsten (testnet), 4=Rinkeby
* `td`: Total Difficulty of the best chain. Integer, as found in block header.
* `bestHash`: The hash of the best (i.e. highest TD) known block.
* `genesisHash`: The hash of the Genesis block.
```
"""
alias Blockchain.Chain
alias ExWire.Bridge.Sync
require Logger
@behaviour ExWire.Packet
@sync Application.get_env(:ex_wire, :sync_mock, Sync)
@network_id_to_chain_name %{
0 => :olympic,
1 => :foundation,
2 => :morden,
3 => :ropsten,
4 => :rinkeby
}
@type t :: %__MODULE__{
protocol_version: integer(),
network_id: integer(),
total_difficulty: integer(),
best_hash: binary(),
genesis_hash: binary()
}
defstruct [
:protocol_version,
:network_id,
:total_difficulty,
:best_hash,
:genesis_hash
]
@doc """
Create a Status packet to return
Note: we are currently reflecting values based on the packet received, but
that should not be the case. We should provide the total difficulty of the
best chain found in the block header, the best hash, and the genesis hash of
our blockchain.
"""
@spec new(integer, binary, binary) :: t()
def new(total_difficulty, genesis_hash, best_hash) do
%__MODULE__{
protocol_version: ExWire.Config.protocol_version(),
network_id: ExWire.Config.chain().params.network_id,
total_difficulty: total_difficulty,
best_hash: best_hash,
genesis_hash: genesis_hash
}
end
@doc """
Returns the relative message id offset for this message.
This will help determine what its message ID is relative to other Packets in the same Capability.
"""
@impl true
@spec message_id_offset() :: 0
def message_id_offset do
0x00
end
@doc """
Given a Status packet, serializes for transport over Eth Wire Protocol.
## Examples
iex> %ExWire.Packet.Capability.Eth.Status{protocol_version: 0x63, network_id: 3, total_difficulty: 10, best_hash: <<5>>, genesis_hash: <<4>>}
...> |> ExWire.Packet.Capability.Eth.Status.serialize
[0x63, 3, 10, <<5>>, <<4>>]
"""
@impl true
@spec serialize(t) :: ExRLP.t()
def serialize(packet = %__MODULE__{}) do
[
packet.protocol_version,
packet.network_id,
packet.total_difficulty,
packet.best_hash,
packet.genesis_hash
]
end
@doc """
Given an RLP-encoded Status packet from Eth Wire Protocol, decodes into a
Status packet.
## Examples
iex> ExWire.Packet.Capability.Eth.Status.deserialize([<<0x63>>, <<3>>, <<10>>, <<5>>, <<4>>])
%ExWire.Packet.Capability.Eth.Status{protocol_version: 0x63, network_id: 3, total_difficulty: 10, best_hash: <<5>>, genesis_hash: <<4>>}
"""
@impl true
@spec deserialize(ExRLP.t()) :: t
def deserialize(rlp) do
[
protocol_version,
network_id,
total_difficulty,
best_hash,
genesis_hash
] = rlp
%__MODULE__{
protocol_version: :binary.decode_unsigned(protocol_version),
network_id: :binary.decode_unsigned(network_id),
total_difficulty: :binary.decode_unsigned(total_difficulty),
best_hash: best_hash,
genesis_hash: genesis_hash
}
end
@doc """
Handles a Status message.
We should decide whether or not we want to continue communicating with
this peer. E.g. do our network and protocol versions match?
"""
@impl true
@spec handle(ExWire.Packet.packet()) :: ExWire.Packet.handle_response()
def handle(packet = %__MODULE__{}) do
if packet.protocol_version == ExWire.Config.protocol_version() do
Exth.trace(fn -> "[Packet] Got Status: #{inspect(packet)}" end)
{total_difficulty, genesis_hash, block_hash} =
case @sync.get_best_block_and_chain() do
{:ok, block, chain} ->
{block.header.difficulty, chain.genesis.parent_hash, block.block_hash}
{:error, error} ->
_ = Logger.debug(fn -> "Error calling Sync.get_best_block_hash #{error}" end)
get_default_difficulty_genesis_hash_and_best_hash(packet)
end
{:send, new(total_difficulty, genesis_hash, block_hash)}
else
# TODO: We need to follow up on disconnection packets with disconnection
# ourselves
_ =
Logger.debug(fn ->
"[Packet] Disconnecting to due incompatible protocol version (them #{
packet.protocol_version
}, us: #{ExWire.Config.protocol_version()})"
end)
{:disconnect, :useless_peer}
end
end
defp get_default_difficulty_genesis_hash_and_best_hash(packet) do
chain = Chain.load_chain(@network_id_to_chain_name[packet.network_id])
{0, chain.genesis.parent_hash, chain.genesis.parent_hash}
rescue
_ ->
{packet.total_difficulty, packet.genesis_hash, packet.genesis_hash}
end
end
|
apps/ex_wire/lib/ex_wire/packet/capability/eth/status.ex
| 0.833731
| 0.832645
|
status.ex
|
starcoder
|
defmodule VistaClient.Extractors do
@moduledoc """
Helper functions to extract and convert certain attributes from a parsed JSOM
map. For example, IDs are transmitted as strings by VISTA, so we're converting
them to integers.
## Synopsis
iex> VistaClient.Extractors.extract_id %{"ID" => "1001"}
{:ok, 1001}
iex> VistaClient.Extractors.extract_id %{"id" => 1001}
{:error, :unparsable_id}
"""
@doc "Extracts the id from a map with an \"ID\" field containing an ID string"
@type id :: integer()
@type map_with_id :: %{String.t() => String.t()}
@spec extract_id(map_with_id, String.t()) :: {:ok, id()} | {:error, :unparsable_id}
def extract_id(map, id_field \\ "ID") do
with {:ok, id_string} when is_binary(id_string) <- Map.fetch(map, id_field),
{id_int, _} <- Integer.parse(id_string) do
{:ok, id_int}
else
:error -> {:error, :unparsable_id}
end
end
defp version_priority("OmU"), do: 3
defp version_priority("OV"), do: 2
defp version_priority("OmUeng"), do: 1
defp version_priority(_), do: 0
defp version_string_for(attributes) do
attributes
|> Enum.map(fn v -> {v, version_priority(v)} end) # add prios
|> Enum.filter(fn {_v, 0} -> false; _ -> true end) # remove zero-prios
|> Enum.max_by(fn {_v, prio} -> prio end, fn -> :none end) # highest prio element
|> fn {version, _prio} -> version; :none -> "" end.() # just the version
end
@doc """
Given the \"SessionAttributesNames\" field fron a Session map, takes the list and
returns a tuple containing the attributes as in there and a version string.
The version string is the attribute with highest priority in the list of known
version attributes. Unknown attributes will be discarded.
"""
@type version :: String.t()
@type attributes :: list(String.t())
@type map_with_atttrs :: %{required(String.t()) => attributes}
@spec extract_attributes(map_with_atttrs) :: {:ok, {version, attributes}} | {:error, :unparsable_session_attributes}
def extract_attributes(map) do
with {:ok, attributes} when is_list(attributes) <- Map.fetch(map, "SessionAttributesNames"),
version_string <- version_string_for(attributes) do
{:ok, {version_string, attributes}}
else
:error -> {:error, :unparsable_id}
end
end
@doc """
Naive string-to-datetime conversion. Assumes lack of timezone info.
## Examples
iex> #{__MODULE__}.extract_date("2019-02-26T20:00:00")
{:ok, ~D[2019-02-26]}
"""
def extract_date(map = %{}, key) when is_binary(key) do
with {:ok, value} <- Map.fetch(map, key) do
extract_date(value)
else
:error -> {:error, {:key_not_found, key}}
end
end
def extract_date(datetime = %DateTime{}) do
{:ok, DateTime.to_date(datetime)}
end
def extract_date(datetime = %NaiveDateTime{}) do
{:ok, NaiveDateTime.to_date(datetime)}
end
def extract_date(string) when is_binary(string) do
with {:ok, dt} <- extract_datetime(string),
result <- extract_date(dt), do: result
end
@doc """
Naive string-to-datetime conversion. Assumes lack of timezone info.
## Examples
iex> #{__MODULE__}.extract_datetime("2019-02-26T20:00:00")
{:ok, ~N[2019-02-26 20:00:00]}
"""
def extract_datetime(map = %{}, key) when is_binary(key) do
with {:ok, value} <- Map.fetch(map, key) do
extract_datetime(value)
else
:error -> {:error, {:key_not_found, key}}
end
end
def extract_datetime(string) when is_binary(string) do
with {:ok, dt, _offset} <- DateTime.from_iso8601(string <> "+00:00"),
naive_dt <- DateTime.to_naive(dt) do
{:ok, naive_dt}
end
end
end
|
lib/structs/extractors.ex
| 0.804943
| 0.474936
|
extractors.ex
|
starcoder
|
defmodule Commanded.Aggregate.Multi do
@moduledoc """
Use `Commanded.Aggregate.Multi` to generate multiple events from a single
command.
This can be useful when you want to emit multiple events that depend upon the
aggregate state being updated.
## Example
In the example below, money is withdrawn from the bank account and the
updated balance is used to check whether the account is overdrawn.
defmodule BankAccount do
defstruct [
account_number: nil,
balance: 0,
state: nil,
]
alias Commanded.Aggregate.Multi
def withdraw(
%BankAccount{state: :active} = account,
%WithdrawMoney{amount: amount})
when is_number(amount) and amount > 0
do
account
|> Multi.new()
|> Multi.execute(&withdraw_money(&1, amount))
|> Multi.execute(&check_balance/1)
end
defp withdraw_money(%BankAccount{account_number: account_number, balance: balance}, amount) do
%MoneyWithdrawn{
account_number: account_number,
amount: amount,
balance: balance - amount
}
end
defp check_balance(%BankAccount{account_number: account_number, balance: balance})
when balance < 0
do
%AccountOverdrawn{account_number: account_number, balance: balance}
end
defp check_balance(%BankAccount{}), do: []
end
"""
alias Commanded.Aggregate.Multi
@type t :: %__MODULE__{
aggregate: struct(),
executions: list(function()),
}
defstruct [
aggregate: nil,
executions: [],
]
@doc """
Create a new `Commanded.Aggregate.Multi` struct.
"""
@spec new(aggregate :: struct()) :: Multi.t
def new(aggregate), do: %Multi{aggregate: aggregate}
@doc """
Adds a command execute function to the multi.
"""
@spec execute(Multi.t, function()) :: Multi.t
def execute(%Multi{executions: executions} = multi, execute_fun)
when is_function(execute_fun)
do
%Multi{multi |
executions: [execute_fun | executions],
}
end
@doc """
Run the execute functions contained within the multi, returning the updated
aggregate state and any created events.
"""
@spec run(Multi.t) :: {aggregate :: struct(), list(event :: struct())} | {:error, reason :: any()}
def run(%Multi{aggregate: aggregate, executions: executions}) do
try do
executions
|> Enum.reverse()
|> Enum.reduce({aggregate, []}, fn (execute_fun, {aggregate, events}) ->
pending_events =
case execute_fun.(aggregate) do
{:error, _reason} = error -> throw(error)
pending_events -> List.wrap(pending_events)
end
{apply_events(aggregate, pending_events), events ++ pending_events}
end)
catch
{:error, _reason} = error -> error
end
end
defp apply_events(aggregate, events) do
Enum.reduce(events, aggregate, &aggregate.__struct__.apply(&2, &1))
end
end
|
lib/commanded/aggregates/multi.ex
| 0.89301
| 0.607721
|
multi.ex
|
starcoder
|
defmodule Square.Locations do
@moduledoc """
Documentation for `Square.Locations`.
"""
@doc """
Provides information of all locations of a business.
Most other Connect API endpoints have a required `location_id` path parameter.
The `id` field of the `Location` map returned by this endpoint correspond to that `location_id` parameter.
```
def list_locations(client)
```
### Response Type
[`List Locations Response Map`](https://github.com/square/square-ruby-sdk/blob/master/doc/models/list-locations-response.md)
### Example Usage
iex> Square.client |> Square.Locations.list_locations()
"""
@spec list_locations(Tesla.Client.t()) :: {:error, any} | {:ok, Tesla.Env.t()}
def list_locations(client), do: Tesla.get(client, "locations")
@doc """
Creates a location.
For more information about locations, see [Locations API Overview](https://developer.squareup.com/docs/locations-api).
```
def create_location(client, body)
```
### Parameters
| Parameter | Type | Tags | Description |
| --- | --- | --- | --- |
| `body` | [`Create Location Request Map`](https://github.com/square/square-ruby-sdk/blob/master/doc/models/create-location-request.md) | Body, Required | A map containing the fields to POST for the request.<br><br>See the corresponding map definition for field details. |
### Response Type
[`Create Location Response Map`](https://github.com/square/square-ruby-sdk/blob/master/doc/models/create-location-response.md)
### Example Usage
iex> body =%{
location: %{
name: "New location name",
description: "My new location",
address: %{
address_line_1: "1234 Peachtree St. NE",
administrative_district_level_1: "GA",
locality: "Atlanta",
postal_code: "30309"
}
}
}
iex> Square.client |> Square.Locations.create_location(body)
"""
@spec create_location(Tesla.Client.t(), map) :: {:error, any} | {:ok, Tesla.Env.t()}
def create_location(client, body \\ %{}), do: Tesla.post(client, "locations", body)
@doc """
Retrieves details of a location.
```
def retrieve_location(client, location_id)
```
### Parameters
| Parameter | Type | Tags | Description |
| --- | --- | --- | --- |
| `location_id` | `String` | Template, Required | The ID of the location to retrieve. |
### Response Type
[`Retrieve Location Response Map`](https://github.com/square/square-ruby-sdk/blob/master/doc/models/retrieve-location-response.md)
### Example Usage
iex> location_id = "location_id4"
iex> Square.client |> Square.Locations.retrieve_location(location_id)
"""
@spec retrieve_location(Tesla.Client.t(), binary) :: {:error, any} | {:ok, Tesla.Env.t()}
def retrieve_location(client, location_id), do: Tesla.get(client, "locations/#{location_id}")
@doc """
Updates a location.
```
def update_location(client, location_id, body)
```
### Parameters
| Parameter | Type | Tags | Description |
| --- | --- | --- | --- |
| `location_id` | `String` | Template, Required | The ID of the location to update. |
| `body` | [`Update Location Request Map`](https://github.com/square/square-ruby-sdk/blob/master/doc/models/update-location-request.md) | Body, Required | A map containing the fields to POST for the request.<br><br>See the corresponding map definition for field details. |
### Response Type
[`Update Location Response Map`](https://github.com/square/square-ruby-sdk/blob/master/doc/models/update-location-response.md)
### Example Usage
```
iex> location_id = "location_id4"
iex> body = %{
location: %{
name: "New location name",
description: "Updated location",
address: %{
address_line_1: "1234 Peachtree St. NE",
administrative_district_level_1: "GA",
locality: "Atlanta",
postal_code: "30309"
},
business_hours: {
periods: [
%{ day_of_week: "MON", start_local_time: "09:00", end_local_time: "17:00" }
]
},
twitter_username: "twitter",
instagram_username: "instagram"
}
}
iex> Square.client |> Square.Locations.update_location(location_id, body)
"""
@spec update_location(Tesla.Client.t(), binary, map) :: {:error, any} | {:ok, Tesla.Env.t()}
def update_location(client, location_id, body \\ %{}),
do: Tesla.put(client, "locations/#{location_id}", body)
end
|
lib/api/locations_api.ex
| 0.919967
| 0.831006
|
locations_api.ex
|
starcoder
|
defmodule Crux.Structs.Presence do
@moduledoc """
Represents a Discord [Presence Object](https://discord.com/developers/docs/topics/gateway#presence-update-presence-update-event-fields).
Differences opposed to the Discord API Object:
- `:user` is just the user id
"""
@moduledoc since: "0.1.0"
@behaviour Crux.Structs
alias Crux.Structs
alias Crux.Structs.{Emoji, Snowflake, User, Util}
alias Crux.Structs.Presence.ActivityFlags
require Snowflake
defstruct [
:user,
:guild_id,
:status,
:activities,
:client_status
]
@typedoc """
Represents an [Activity Object](https://discord.com/developers/docs/topics/gateway#activity-object).
"""
@typedoc since: "0.2.3"
@type activity :: %{
required(:name) => String.t(),
required(:type) => integer(),
optional(:url) => nil | String.t(),
required(:created_at) => integer(),
optional(:timestamps) => %{start: integer(), stop: integer()},
optional(:application_id) => Snowflake.t(),
optional(:details) => String.t() | nil,
optional(:state) => String.t() | nil,
optional(:emoji) => Emoji.t() | nil,
optional(:party) => %{id: String.t(), size: [integer()]},
optional(:assets) => %{
large_image: String.t(),
large_text: String.t(),
small_image: String.t(),
small_text: String.t()
},
optional(:secrets) => %{
join: String.t(),
spectate: String.t(),
match: String.t()
},
optional(:instance) => boolean(),
optional(:flags) => ActivityFlags.t()
}
@typedoc since: "0.1.0"
@type t :: %__MODULE__{
user: Snowflake.t(),
guild_id: Snowflake.t() | nil,
status: String.t(),
activities: [activity()],
client_status: %{required(atom()) => atom()}
}
@typedoc """
All available types that can be resolved into a user id.
"""
@typedoc since: "0.2.1"
@type id_resolvable :: User.id_resolvable()
@doc """
Resolves the id of a `t:Crux.Structs.Presence.t/0`
> Automatically invoked by `Crux.Structs.resolve_id/2`
```elixir
iex> %Crux.Structs.Presence{user: 218348062828003328}
...> |> Crux.Structs.Presence.resolve_id()
218348062828003328
```
For more examples see `Crux.Structs.User.resolve_id/1`
"""
@doc since: "0.2.1"
@spec resolve_id(id_resolvable()) :: Snowflake.t() | nil
defdelegate resolve_id(resolvable), to: User
@doc """
Creates a `t:Crux.Structs.Presence.t/0` struct from raw data.
> Automatically invoked by `Crux.Structs.create/2`.
"""
@doc since: "0.1.0"
@spec create(data :: map()) :: t()
def create(data) do
presence =
data
|> Util.atomify()
|> Map.update!(:user, Util.map_to_id())
|> Map.update(:guild_id, nil, &Snowflake.to_snowflake/1)
|> Map.update(:activities, nil, fn activities ->
Enum.map(activities, &create_activity/1)
end)
struct(__MODULE__, presence)
end
defp create_activity(%{application_id: application_id} = activity)
when not Snowflake.is_snowflake(application_id) do
activity
|> Map.update!(:application_id, &Snowflake.to_snowflake/1)
|> create_activity()
end
defp create_activity(%{flags: flags} = activity)
when not is_integer(flags) do
activity
|> Map.update!(:flags, &ActivityFlags.resolve/1)
|> create_activity()
end
defp create_activity(%{emoji: nil} = activity) do
activity
end
defp create_activity(%{emoji: _emoji} = activity) do
Map.update!(activity, :emoji, &Structs.create(&1, Emoji))
end
defp create_activity(activity) do
activity
end
end
|
lib/structs/presence.ex
| 0.783575
| 0.544499
|
presence.ex
|
starcoder
|
defmodule Clover.Robot do
@moduledoc """
A Robot.
"""
use GenStateMachine, callback_mode: [:handle_event_function, :state_enter]
@callback handle_connected(connection_state :: map, data :: data()) ::
{:ok, data()} | {:error, Clover.Error}
@callback init(arg :: any, data :: any) :: GenServer.on_start()
@callback scripts() :: [script]
@optional_callbacks [
handle_connected: 2,
init: 2,
scripts: 0
]
alias Clover.{
Adapter,
Error,
Message,
Script,
User
}
alias Clover.Robot.MessageSupervisor
alias Clover.Util.Logger
@type state :: :normal
@type data :: map
@type action :: GenStateMachine.action()
@type actions :: [action]
@type message_action :: :say | :reply | :emote
@type script :: Script.t()
@type name :: String.t()
defmodule Builder do
@moduledoc false
defmacro script(module, options \\ []) do
add_script_module(module, options)
end
defmacro overhear(pattern, function) when is_atom(function) do
add_script(:overhear, pattern, {__CALLER__.module, function})
end
defmacro overhear(pattern, msg, match, data, do: block) do
script = {__CALLER__.module, unique_script_name()}
add_script_block(:overhear, pattern, script, msg, match, data, block)
end
defmacro respond(pattern, function) when is_atom(function) do
add_script(:respond, pattern, {__CALLER__.module, function})
end
defmacro respond(pattern, msg, match, data, do: block) do
script = {__CALLER__.module, unique_script_name()}
add_script_block(:respond, pattern, script, msg, match, data, block)
end
@doc false
defmacro __before_compile__(_env) do
quote do
def scripts, do: @scripts
end
end
@doc false
defmacro __after_compile__(env, _bytecode) do
# Check {mod, fun} scripts and raise error if they are not defined
for %{respond: respond} <- Module.get_attribute(env.module, :scripts) do
case respond do
{mod, fun} when is_atom(mod) and is_atom(fun) ->
unless Module.defines?(mod, {fun, 3}) do
raise(Error.exception({:not_exported, {mod, fun, 3}}))
end
_ ->
:ok
end
end
end
defp add_script(match_mode, pattern, script) do
quote do
@scripts Script.new(unquote(match_mode), unquote(pattern), unquote(script))
end
end
defp add_script_block(match_mode, pattern, {mod, fun}, msg, match, data, block) do
quote do
@scripts Script.new(unquote(match_mode), unquote(pattern), unquote({mod, fun}))
def unquote(fun)(unquote(msg), unquote(match), unquote(data)) do
unquote(block)
end
end
end
def add_script_module(mod, _options) do
quote do
@scripts Script.new(:overhear, unquote(Macro.escape(~r/^.*$/)), unquote(mod))
end
end
defp unique_script_name do
String.to_atom("__script_#{System.unique_integer([:positive, :monotonic])}__")
end
end
defmacro __using__(opts) do
quote location: :keep, bind_quoted: [opts: opts] do
@behaviour Clover.Robot
import Clover.Robot.Builder,
only: [script: 1, script: 2, overhear: 2, overhear: 5, respond: 2, respond: 5]
import Clover.Message, only: [say: 2, say: 3, typing: 1, typing: 2]
Module.register_attribute(__MODULE__, :scripts, accumulate: true)
@before_compile Clover.Robot.Builder
@after_compile Clover.Robot.Builder
end
end
@doc false
def child_spec(arg, opts \\ []) do
default = %{
id: __MODULE__,
start: {__MODULE__, :start_link, [arg, opts]}
}
Supervisor.child_spec(default, [])
end
@spec start_link(any, list) :: GenServer.on_start()
def start_link(arg, opts \\ [])
def start_link(arg, opts) do
GenStateMachine.start_link(__MODULE__, arg, opts)
end
@doc false
def init({name, {mod, arg}, adapter_mod}) do
Process.flag(:trap_exit, true)
state = :uninitialized
data = %{
mod: mod,
name: name,
adapter: adapter_mod,
me: nil
}
{:ok, data} =
if function_exported?(mod, :init, 2),
do: mod.init(arg, data),
else: {:ok, data}
{:ok, state, data}
end
@spec outgoing(name :: name, Message.t()) :: :ok
def outgoing(robot_name, %Message{action: action, delay: delay} = message)
when is_integer(delay) and action in [:say, :typing] do
log(:debug, "outgoing delayed", inspect: message)
cast_after(robot_name, {:outgoing, message}, delay)
end
def outgoing(robot_name, %Message{action: action} = message) when action in [:say, :typing] do
log(:debug, "outgoing immediate", inspect: message)
cast(robot_name, {:outgoing, message})
end
def name(robot_name) do
call(robot_name, :name)
end
@doc false
def assigns(robot_name) do
call(robot_name, :assigns)
end
@doc false
def put_assign(robot_name, key, value) do
call(robot_name, {:put_assign, key, value})
end
@doc false
def get_assign(robot_name, key) do
call(robot_name, {:get_assign, key})
end
@spec scripts(robot :: module) :: []
def scripts(robot) do
if function_exported?(robot, :scripts, 0),
do: robot.scripts(),
else: []
end
def incoming(robot_name, message, _context \\ %{}) do
cast(robot_name, {:incoming, message})
end
def connected(robot_name, connection_state) do
call(robot_name, {:connected, connection_state})
end
defp call(robot_name, message) do
robot_name
|> Clover.whereis_robot()
|> GenServer.call(message)
end
defp cast(robot_name, message) do
robot_name
|> Clover.whereis_robot()
|> GenServer.cast(message)
end
defp cast_after(robot_name, message, delay) do
cast(robot_name, {:delay, message, delay})
end
def via_tuple(name) do
{:via, Registry, {Clover.registry(), name}}
end
def terminate(reason, _state, _data) do
log(:info, "terminate", inspect: reason)
end
@doc false
def handle_event(:cast, {:incoming, raw_message}, _state, data) do
log(:debug, "message", inspect: raw_message)
%{name: name, me: me, mod: mod, adapter: adapter, connection: connection} = data
worker_state = %{
adapter: adapter,
connection: connection,
me: me,
name: name,
robot: mod
}
{:ok, _worker} = MessageSupervisor.dispatch(name, raw_message, worker_state)
:keep_state_and_data
end
@doc false
def handle_event(:cast, {:outgoing, message}, _state, %{name: name}) do
log(:debug, "outgoing", inspect: message)
Adapter.outgoing(name, message)
:keep_state_and_data
end
@doc false
# Send event to self after delay. Comes to handle_event/4 with :info tag
def handle_event(:cast, {:delay, message, delay}, _state, _data) do
Process.send_after(self(), message, delay)
:keep_state_and_data
end
@doc false
def handle_event({:call, from}, {:connected, connection_state}, _state, %{mod: mod} = data) do
log(:debug, "connected", inspect: connection_state)
data =
data
|> put_connection(connection_state)
|> put_me(connection_state)
if function_exported?(mod, :handle_connected, 2) do
case mod.handle_connected(connection_state, data) do
{:ok, new_data} -> {:next_state, :connected, new_data, [{:reply, from, :ok}]}
{:error, error} -> {:next_state, :disconnected, data, [{:reply, from, {:error, error}}]}
end
else
{:next_state, :connected, data, [{:reply, from, :ok}]}
end
end
@doc false
def handle_event({:call, from}, :name, _state, data) do
%User{name: name} = Map.get(data, :me, %User{})
{:keep_state_and_data, [{:reply, from, name}]}
end
@doc false
def handle_event({:call, from}, :assigns, _state, data) do
{:keep_state_and_data, [{:reply, from, Map.get(data, :assigns, %{})}]}
end
@doc false
def handle_event({:call, from}, {:put_assign, key, value}, _state, data) do
assigns =
data
|> Map.get(:assigns, %{})
|> Map.put(key, value)
data = Map.put(data, :assigns, assigns)
{:keep_state, data, [{:reply, from, value}]}
end
@doc false
def handle_event({:call, from}, {:get_assign, key}, _state, data) do
assigns = Map.get(data, :assigns, %{})
value = Map.get(assigns, key)
{:keep_state_and_data, [{:reply, from, value}]}
end
@doc false
def handle_event(:info, {:outgoing, message}, _state, _data) do
GenServer.cast(self(), {:outgoing, message})
:keep_state_and_data
end
@doc false
def handle_event(_type, _event, _state, _data) do
:keep_state_and_data
end
# State handling
defp put_me(data, connection_state) do
case Map.get(connection_state, :me) do
%User{} = me -> Map.put(data, :me, me)
_ -> Map.put(data, :me, nil)
end
end
defp put_connection(data, connection_state) do
Map.put(data, :connection, connection_state)
end
defp log(level, message, opts) do
Logger.log(level, "robot", message, opts)
end
end
|
lib/robot/robot.ex
| 0.663996
| 0.419648
|
robot.ex
|
starcoder
|
defprotocol Timber.Contextable do
@moduledoc """
Converts a data structure into a `Timber.Context.t`. This is called on any data structure passed
in the `Timber.add_context/1` function.
For example, this protocol is how we're able to support `Keyword.t` types:
```elixir
Timber.add_context(build: %{version: "1.0"})
```
This is achieved by:
```elixir
defimpl Timber.Contextable, for: Map do
def to_context(map) when map_size(map) == 1 do
[type] = Map.keys(map)
[data] = Map.values(map)
%Timber.Contexts.CustomContext{
type: type,
data: data
}
end
end
```
## What about custom contexts and structs?
We recommend defining a struct and calling `use Timber.Contexts.CustomContext` in that module.
This takes care of everything automatically. See `Timber.Contexts.CustomContext` for examples.
"""
@doc """
Converts the data structure into a `Timber.Event.t`.
"""
@spec to_context(any) :: Timber.Context.t
def to_context(data)
end
defimpl Timber.Contextable, for: Timber.Contexts.CustomContext do
def to_context(context), do: context
end
defimpl Timber.Contextable, for: Timber.Contexts.HTTPContext do
def to_context(context), do: context
end
defimpl Timber.Contextable, for: Timber.Contexts.JobContext do
def to_context(context), do: context
end
defimpl Timber.Contextable, for: Timber.Contexts.OrganizationContext do
def to_context(context), do: context
end
defimpl Timber.Contextable, for: Timber.Contexts.RuntimeContext do
def to_context(context), do: context
end
defimpl Timber.Contextable, for: Timber.Contexts.SessionContext do
def to_context(context), do: context
end
defimpl Timber.Contextable, for: Timber.Contexts.SystemContext do
def to_context(context), do: context
end
defimpl Timber.Contextable, for: Timber.Contexts.UserContext do
def to_context(context), do: context
end
defimpl Timber.Contextable, for: List do
def to_context(list) do
if Keyword.keyword?(list) do
list
|> Enum.into(%{})
|> Timber.Contextable.to_context()
else
raise "The provided list is not a Keyword.t and therefore cannot be converted " <>
"to a Timber context"
end
end
end
defimpl Timber.Contextable, for: Map do
def to_context(%{type: type, data: data}) do
%Timber.Contexts.CustomContext{
type: type,
data: data
}
end
def to_context(map) when map_size(map) == 1 do
[type] = Map.keys(map)
[data] = Map.values(map)
%Timber.Contexts.CustomContext{
type: type,
data: data
}
end
end
|
lib/timber/contextable.ex
| 0.920012
| 0.889241
|
contextable.ex
|
starcoder
|
defmodule Logger.Translator do
@moduledoc """
Default translation for Erlang log messages.
Logger allows developers to rewrite log messages provided by
Erlang applications into a format more compatible with Elixir
log messages by providing a translator.
A translator is simply a tuple containing a module and a function
that can be added and removed via the `Logger.add_translator/1` and
`Logger.remove_translator/1` functions and is invoked for every Erlang
message above the minimum log level with four arguments:
* `min_level` - the current Logger level
* `level` - the level of the message being translator
* `kind` - if the message is a report or a format
* `data` - the data to format. If it is a report, it is a tuple
with `{report_type, report_data}`, if it is a format, it is a
tuple with `{format_message, format_args}`
The function must return:
* `{:ok, chardata}` - if the message was translated with its translation
* `:skip` - if the message is not meant to be translated nor logged
* `:none` - if there is no translation, which triggers the next translator
See the function `translate/4` in this module for an example implementation
and the default messages translated by Logger.
"""
def translate(min_level, :error, :format, message) do
case message do
{'** Generic server ' ++ _, [name, last, state, reason]} ->
msg = "GenServer #{inspect name} terminating\n"
if min_level == :debug do
msg = msg <> "Last message: #{inspect last}\n"
<> "State: #{inspect state}\n"
end
{:ok, msg <> "** (exit) " <> format_otp_exit(reason)}
{'** gen_event handler ' ++ _, [name, manager, last, state, reason]} ->
msg = "GenEvent handler #{inspect name} installed in #{inspect manager} terminating\n"
if min_level == :debug do
msg = msg <> "Last message: #{inspect last}\n"
<> "State: #{inspect state}\n"
end
{:ok, msg <> "** (exit) " <> format_otp_exit(reason)}
{'** Task ' ++ _, [name, starter, function, args, reason]} ->
msg = "Task #{inspect name} started from #{inspect starter} terminating\n" <>
"Function: #{inspect function}\n" <>
" Args: #{inspect args}\n" <>
"** (exit) " <> format_otp_exit(reason)
{:ok, msg}
_ ->
:none
end
end
def translate(_min_level, :info, :report,
{:std_info, [application: app, exited: reason, type: _type]}) do
{:ok, "Application #{app} exited: #{Application.format_error(reason)}"}
end
def translate(min_level, :error, :report, {:supervisor_report, data}) do
translate_supervisor(min_level, data)
end
def translate(min_level, :error, :report, {:crash_report, data}) do
translate_crash(min_level, data)
end
def translate(min_level, :info, :report, {:progress, data}) do
translate_progress(min_level, data)
end
def translate(_min_level, _level, _kind, _message) do
:none
end
defp translate_supervisor(min_level,
[supervisor: sup, errorContext: context,
reason: reason,
offender: [{:pid, pid}, {:name, name} | offender]])
when is_pid(pid) and context !== :shutdown do
{:ok, ["Child ", inspect(name), " of Supervisor ",
sup_name(sup), ?\s, sup_context(context), ?\n,
"Pid: ", inspect(pid), ?\n,
child_info(min_level, offender), ?\n,
"** (exit) " | offender_reason(reason, context)]}
end
defp translate_supervisor(min_level,
[supervisor: sup, errorContext: context,
reason: reason,
offender: [{:pid, _pid},
{:name, name} | offender]]) do
{:ok, ["Child ", inspect(name), " of Supervisor ",
sup_name(sup), ?\s, sup_context(context), ?\n,
child_info(min_level, offender), ?\n,
"** (exit) " | offender_reason(reason, context)]}
end
defp translate_supervisor(min_level,
[supervisor: sup, errorContext: context,
reason: reason,
offender: [{:pid, pid} | offender]]) do
{:ok, ["Child of Supervisor ",
sup_name(sup), ?\s, sup_context(context), ?\n,
"Pid: ", inspect(pid), ?\n,
child_info(min_level, offender), ?\n,
"** (exit) " | offender_reason(reason, context)]}
end
defp translate_supervisor(min_level,
[supervisor: sup, errorContext: context,
reason: reason,
offender: [{:nb_children, n},
{:name, name} | offender]]) do
{:ok, ["Children ", inspect(name), " of Supervisor ",
sup_name(sup), ?\s, sup_context(context), ?\n,
"Number: ", inspect(n), ?\n,
child_info(min_level, offender), ?\n,
"** (exit) " | offender_reason(reason, context)]}
end
defp translate_supervisor(_min_level, _other), do: :none
defp translate_progress(_min_level,
[application: app, started_at: node_name]) do
{:ok, ["Application ", to_string(app), " started at " | inspect(node_name)]}
end
defp translate_progress(min_level,
[supervisor: sup,
started: [{:pid, pid}, {:name, name} | started]]) do
{:ok, ["Child ", inspect(name), " of Supervisor ",
sup_name(sup), " started\n",
"Pid: ", inspect(pid), ?\n |
child_info(min_level, started)]}
end
defp translate_progress(min_level,
[supervisor: sup,
started: [{:pid, pid} | started]]) do
{:ok, ["Child of Supervisor ", sup_name(sup), " started\n",
"Pid: ", inspect(pid), ?\n |
child_info(min_level, started)]}
end
defp translate_progress(_min_level, _other), do: :none
defp sup_name({:local, name}), do: inspect(name)
defp sup_name({:global, name}), do: inspect(name)
defp sup_name({:via, _mod, name}), do: inspect(name)
defp sup_name({pid, mod}), do: [inspect(pid), " (", inspect(mod), ?)]
defp sup_context(:start_error), do: "failed to start"
defp sup_context(:child_terminated), do: "terminated"
defp sup_context(:shutdown), do: "caused shutdown"
defp sup_context(:shutdown_error), do: "shutdown abnormally"
defp child_info(min_level, [{:mfargs, {mod, fun, args}} | debug]) do
["Start Call: ", Exception.format_mfa(mod, fun, args) |
child_debug(min_level, debug)]
end
defp child_info(min_level, [{:mfa, {mod, fun, args}} | debug]) do
["Start Call: ", Exception.format_mfa(mod, fun, args) |
child_debug(min_level, debug)]
end
defp child_info(min_level, [{:mod, mod} | debug]) do
["Start Module: ", inspect(mod) |
child_debug(min_level, debug)]
end
defp child_debug(:debug,
[restart_type: restart, shutdown: shutdown, child_type: type]) do
[?\n,
"Restart: ", inspect(restart), ?\n,
"Shutdown: ", inspect(shutdown), ?\n,
"Type: ", inspect(type)]
end
defp child_debug(_min_level, _child) do
[]
end
# If start call raises reason will be of form {:EXIT, reason}
defp offender_reason({:EXIT, reason}, :start_error) do
Exception.format_exit(reason)
end
defp offender_reason(reason, _context) do
Exception.format_exit(reason)
end
defp translate_crash(min_level,
[[{:initial_call, _} = initial_call,
{:pid, pid},
{:registered_name, name},
{:error_info, {kind, exception, stack}} | crashed],
linked]) do
{:ok, ["Process ", crash_name(pid, name) , " terminating\n",
crash_info(min_level, [initial_call | crashed]),
crash_linked(min_level, linked) |
Exception.format(kind, exception, stack)]}
end
defp translate_crash(min_level,
[[{:pid, pid},
{:registered_name, name},
{:error_info, {kind, exception, stack}} | crashed],
linked]) do
{:ok, ["Process ", crash_name(pid, name) , " terminating\n",
crash_info(min_level, crashed),
crash_linked(min_level, linked) |
Exception.format(kind, exception, stack)]}
end
defp crash_name(pid, []), do: inspect(pid)
defp crash_name(pid, name), do: [inspect(name), " (", inspect(pid), ?)]
defp crash_info(min_level, info, prefix \\ [])
defp crash_info(min_level,
[{:initial_call, {mod, fun, args}} | info], prefix) do
[prefix, "Initial Call: ", crash_call(mod, fun, args), ?\n |
crash_info(min_level, info, prefix)]
end
defp crash_info(min_level,
[{:current_function, {mod, fun, args}} | info], prefix) do
[prefix, "Current Call: ", crash_call(mod, fun, args), ?\n |
crash_info(min_level, info, prefix)]
end
defp crash_info(min_level, [{:current_function, []} | info], prefix) do
crash_info(min_level, info, prefix)
end
defp crash_info(min_level,
[{:ancestors, ancestors} | debug], prefix) do
[prefix, "Ancestors: ", inspect(ancestors), ?\n |
crash_debug(min_level, debug, prefix)]
end
defp crash_call(mod, fun, arity) when is_integer(arity) do
Exception.format_mfa(mod, fun, arity)
end
defp crash_call(mod, fun, args) do
Exception.format_mfa(mod, fun, length(args))
end
defp crash_debug(:debug,
[messages: msgs, links: links, dictionary: dict,
trap_exit: trap, status: status, heap_size: heap_size,
stack_size: stack_size, reductions: reductions], prefix) do
[prefix, "Messages: ", inspect(msgs), ?\n,
prefix, "Links: ", inspect(links), ?\n,
prefix, "Dictionary: ", inspect(dict), ?\n,
prefix, "Trapping Exits: ", inspect(trap), ?\n,
prefix, "Status: ", inspect(status), ?\n,
prefix, "Heap Size: ", inspect(heap_size), ?\n,
prefix, "Stack Size: ", inspect(stack_size), ?\n,
prefix, "Reductions: ", inspect(reductions), ?\n]
end
defp crash_debug(_min_level, _info, _prefix) do
[]
end
defp crash_linked(_min_level, []), do: []
defp crash_linked(min_level, neighbours) do
Enum.reduce(neighbours, "Neighbours:\n", fn({:neighbour, info}, acc) ->
[acc | crash_neighbour(min_level, info)]
end)
end
defp crash_neighbour(min_level,
[{:pid, pid}, {:registered_name, []} | info]) do
prefix = " "
[prefix, inspect(pid), ?\n |
crash_info(min_level, info, [prefix | prefix])]
end
defp crash_neighbour(min_level,
[{:pid, pid}, {:registered_name, name} | info]) do
prefix = " "
[prefix, inspect(name), " (", inspect(pid), ")\n" |
crash_info(min_level, info, [prefix | prefix])]
end
# OTP process rewrite the :undef error to these reasons when logging
@gen_undef [:"module could not be loaded", :"function not exported"]
defp format_otp_exit({undef, [{mod, fun, args, _info} | _ ] = stacktrace} = reason)
when undef in @gen_undef and is_atom(mod) and is_atom(fun) do
cond do
is_list(args) ->
format_undef(mod, fun, length(args), undef, stacktrace)
is_integer(args) ->
format_undef(mod, fun, args, undef, stacktrace)
true ->
Exception.format_exit(reason)
end
end
defp format_otp_exit(reason) do
Exception.format_exit(reason)
end
defp format_undef(mod, fun, arity, undef, stacktrace) do
opts = [module: mod, function: fun, arity: arity, reason: undef]
exception = UndefinedFunctionError.exception(opts)
Exception.format_exit({exception, stacktrace})
end
end
|
lib/logger/lib/logger/translator.ex
| 0.784938
| 0.400573
|
translator.ex
|
starcoder
|
defmodule AWS.Polly do
@moduledoc """
Amazon Polly is a web service that makes it easy to synthesize speech from text.
The Amazon Polly service provides API operations for synthesizing high-quality
speech from plain text and Speech Synthesis Markup Language (SSML), along with
managing pronunciations lexicons that enable you to get the best results for
your application domain.
"""
@doc """
Deletes the specified pronunciation lexicon stored in an AWS Region.
A lexicon which has been deleted is not available for speech synthesis, nor is
it possible to retrieve it using either the `GetLexicon` or `ListLexicon` APIs.
For more information, see [Managing Lexicons](https://docs.aws.amazon.com/polly/latest/dg/managing-lexicons.html).
"""
def delete_lexicon(client, name, input, options \\ []) do
path_ = "/v1/lexicons/#{URI.encode(name)}"
headers = []
query_ = []
request(client, :delete, path_, query_, headers, input, options, 200)
end
@doc """
Returns the list of voices that are available for use when requesting speech
synthesis.
Each voice speaks a specified language, is either male or female, and is
identified by an ID, which is the ASCII version of the voice name.
When synthesizing speech ( `SynthesizeSpeech` ), you provide the voice ID for
the voice you want from the list of voices returned by `DescribeVoices`.
For example, you want your news reader application to read news in a specific
language, but giving a user the option to choose the voice. Using the
`DescribeVoices` operation you can provide the user with a list of available
voices to select from.
You can optionally specify a language code to filter the available voices. For
example, if you specify `en-US`, the operation returns a list of all available
US English voices.
This operation requires permissions to perform the `polly:DescribeVoices`
action.
"""
def describe_voices(client, engine \\ nil, include_additional_language_codes \\ nil, language_code \\ nil, next_token \\ nil, options \\ []) do
path_ = "/v1/voices"
headers = []
query_ = []
query_ = if !is_nil(next_token) do
[{"NextToken", next_token} | query_]
else
query_
end
query_ = if !is_nil(language_code) do
[{"LanguageCode", language_code} | query_]
else
query_
end
query_ = if !is_nil(include_additional_language_codes) do
[{"IncludeAdditionalLanguageCodes", include_additional_language_codes} | query_]
else
query_
end
query_ = if !is_nil(engine) do
[{"Engine", engine} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, 200)
end
@doc """
Returns the content of the specified pronunciation lexicon stored in an AWS
Region.
For more information, see [Managing Lexicons](https://docs.aws.amazon.com/polly/latest/dg/managing-lexicons.html).
"""
def get_lexicon(client, name, options \\ []) do
path_ = "/v1/lexicons/#{URI.encode(name)}"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, 200)
end
@doc """
Retrieves a specific SpeechSynthesisTask object based on its TaskID.
This object contains information about the given speech synthesis task,
including the status of the task, and a link to the S3 bucket containing the
output of the task.
"""
def get_speech_synthesis_task(client, task_id, options \\ []) do
path_ = "/v1/synthesisTasks/#{URI.encode(task_id)}"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, 200)
end
@doc """
Returns a list of pronunciation lexicons stored in an AWS Region.
For more information, see [Managing Lexicons](https://docs.aws.amazon.com/polly/latest/dg/managing-lexicons.html).
"""
def list_lexicons(client, next_token \\ nil, options \\ []) do
path_ = "/v1/lexicons"
headers = []
query_ = []
query_ = if !is_nil(next_token) do
[{"NextToken", next_token} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, 200)
end
@doc """
Returns a list of SpeechSynthesisTask objects ordered by their creation date.
This operation can filter the tasks by their status, for example, allowing users
to list only tasks that are completed.
"""
def list_speech_synthesis_tasks(client, max_results \\ nil, next_token \\ nil, status \\ nil, options \\ []) do
path_ = "/v1/synthesisTasks"
headers = []
query_ = []
query_ = if !is_nil(status) do
[{"Status", status} | query_]
else
query_
end
query_ = if !is_nil(next_token) do
[{"NextToken", next_token} | query_]
else
query_
end
query_ = if !is_nil(max_results) do
[{"MaxResults", max_results} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, 200)
end
@doc """
Stores a pronunciation lexicon in an AWS Region.
If a lexicon with the same name already exists in the region, it is overwritten
by the new lexicon. Lexicon operations have eventual consistency, therefore, it
might take some time before the lexicon is available to the SynthesizeSpeech
operation.
For more information, see [Managing Lexicons](https://docs.aws.amazon.com/polly/latest/dg/managing-lexicons.html).
"""
def put_lexicon(client, name, input, options \\ []) do
path_ = "/v1/lexicons/#{URI.encode(name)}"
headers = []
query_ = []
request(client, :put, path_, query_, headers, input, options, 200)
end
@doc """
Allows the creation of an asynchronous synthesis task, by starting a new
`SpeechSynthesisTask`.
This operation requires all the standard information needed for speech
synthesis, plus the name of an Amazon S3 bucket for the service to store the
output of the synthesis task and two optional parameters (OutputS3KeyPrefix and
SnsTopicArn). Once the synthesis task is created, this operation will return a
SpeechSynthesisTask object, which will include an identifier of this task as
well as the current status.
"""
def start_speech_synthesis_task(client, input, options \\ []) do
path_ = "/v1/synthesisTasks"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, 200)
end
@doc """
Synthesizes UTF-8 input, plain text or SSML, to a stream of bytes.
SSML input must be valid, well-formed SSML. Some alphabets might not be
available with all the voices (for example, Cyrillic might not be read at all by
English voices) unless phoneme mapping is used. For more information, see [How it
Works](https://docs.aws.amazon.com/polly/latest/dg/how-text-to-speech-works.html).
"""
def synthesize_speech(client, input, options \\ []) do
path_ = "/v1/speech"
headers = []
query_ = []
case request(client, :post, path_, query_, headers, input, options, 200) do
{:ok, body, response} when not is_nil(body) ->
body =
[
{"Content-Type", "ContentType"},
{"x-amzn-RequestCharacters", "RequestCharacters"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@spec request(AWS.Client.t(), binary(), binary(), list(), list(), map(), list(), pos_integer()) ::
{:ok, map() | nil, map()}
| {:error, term()}
defp request(client, method, path, query, headers, input, options, success_status_code) do
client = %{client | service: "polly"}
host = build_host("polly", client)
url = host
|> build_url(path, client)
|> add_query(query, client)
additional_headers = [{"Host", host}, {"Content-Type", "application/x-amz-json-1.1"}]
headers = AWS.Request.add_headers(additional_headers, headers)
payload = encode!(client, input)
headers = AWS.Request.sign_v4(client, method, url, headers, payload)
perform_request(client, method, url, payload, headers, options, success_status_code)
end
defp perform_request(client, method, url, payload, headers, options, success_status_code) do
case AWS.Client.request(client, method, url, payload, headers, options) do
{:ok, %{status_code: status_code, body: body} = response}
when is_nil(success_status_code) and status_code in [200, 202, 204]
when status_code == success_status_code ->
body = if(body != "", do: decode!(client, body))
{:ok, body, response}
{:ok, response} ->
{:error, {:unexpected_response, response}}
error = {:error, _reason} -> error
end
end
defp build_host(_endpoint_prefix, %{region: "local", endpoint: endpoint}) do
endpoint
end
defp build_host(_endpoint_prefix, %{region: "local"}) do
"localhost"
end
defp build_host(endpoint_prefix, %{region: region, endpoint: endpoint}) do
"#{endpoint_prefix}.#{region}.#{endpoint}"
end
defp build_url(host, path, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}#{path}"
end
defp add_query(url, [], _client) do
url
end
defp add_query(url, query, client) do
querystring = encode!(client, query, :query)
"#{url}?#{querystring}"
end
defp encode!(client, payload, format \\ :json) do
AWS.Client.encode!(client, payload, format)
end
defp decode!(client, payload) do
AWS.Client.decode!(client, payload, :json)
end
end
|
lib/aws/generated/polly.ex
| 0.884888
| 0.542742
|
polly.ex
|
starcoder
|
defmodule DynamicSupervisor do
@moduledoc ~S"""
A supervisor that starts children dynamically.
The `Supervisor` module was designed to handle mostly static children
that are started in the given order when the supervisor starts. A
`DynamicSupervisor` starts with no children. Instead, children are
started on demand via `start_child/2`. When a dynamic supervisor
terminates, all children are shutdown at the same time, with no guarantee
of ordering.
## Examples
A dynamic supervisor is started with no children, often under a
supervisor with the supervision strategy (the only strategy currently
supported is `:one_for_one`) and a name:
children = [
{DynamicSupervisor, strategy: :one_for_one, name: MyApp.DynamicSupervisor}
]
Supervisor.start_link(children, strategy: :one_for_one)
The options given in the child specification are documented in `start_link/1`.
Once the dynamic supervisor is running, we can start children
with `start_child/2`, which receives a child specification:
{:ok, agent1} = DynamicSupervisor.start_child(MyApp.DynamicSupervisor, {Agent, fn -> %{} end})
Agent.update(agent1, &Map.put(&1, :key, "value"))
Agent.get(agent1, & &1)
#=> %{key: "value"}
{:ok, agent2} = DynamicSupervisor.start_child(MyApp.DynamicSupervisor, {Agent, fn -> %{} end})
Agent.get(agent2, & &1)
#=> %{}
DynamicSupervisor.count_children(MyApp.DynamicSupervisor)
#=> %{active: 2, specs: 2, supervisors: 0, workers: 2}
## Module-based supervisors
Similar to `Supervisor`, dynamic supervisors also support module-based
supervisors.
defmodule MyApp.DynamicSupervisor do
# Automatically defines child_spec/1
use DynamicSupervisor
def start_link(arg) do
DynamicSupervisor.start_link(__MODULE__, arg, name: __MODULE__)
end
@impl true
def init(_arg) do
DynamicSupervisor.init(strategy: :one_for_one)
end
end
See the `Supervisor` docs for a discussion of when you may want to use
module-based supervisors.
## Name registration
A supervisor is bound to the same name registration rules as a `GenServer`.
Read more about these rules in the documentation for `GenServer`.
## Migrating from Supervisor's :simple_one_for_one
In case you were using the deprecated `:simple_one_for_one` strategy from
the `Supervisor` module, you can migrate to the `DynamicSupervisor` in
few steps.
Imagine the given "old" code:
defmodule MySupervisor do
use Supervisor
def start_link(arg) do
Supervisor.start_link(__MODULE__, arg, name: __MODULE__)
end
def start_child(foo, bar, baz) do
# This will start child by calling MyWorker.start_link(initial_arg, foo, bar, baz)
Supervisor.start_child(__MODULE__, [foo, bar, baz])
end
@impl true
def init(initial_arg) do
children = [
# Or the deprecated: worker(MyWorker, [initial_arg])
%{id: MyWorker, start: {MyWorker, :start_link, [initial_arg]})
]
Supervisor.init(children, strategy: :simple_one_for_one)
end
end
It can be upgraded to the DynamicSupervisor like this:
defmodule MySupervisor do
use DynamicSupervisor
def start_link(arg) do
DynamicSupervisor.start_link(__MODULE__, arg, name: __MODULE__)
end
def start_child(foo, bar, baz) do
# If MyWorker is not using the new child specs, we need to pass a map:
# spec = %{id: MyWorker, start: {MyWorker, :start_link, [foo, bar, baz]}}
spec = {MyWorker, foo: foo, bar: bar, baz: baz}
DynamicSupervisor.start_child(__MODULE__, spec)
end
@impl true
def init(initial_arg) do
DynamicSupervisor.init(
strategy: :one_for_one,
extra_arguments: [initial_arg]
)
end
end
The difference is that the `DynamicSupervisor` expects the child specification
at the moment `start_child/2` is called, and no longer on the init callback.
If there are any initial arguments given on initialization, such as `[initial_arg]`,
it can be given in the `:extra_arguments` flag on `DynamicSupervisor.init/1`.
"""
@behaviour GenServer
@doc """
Callback invoked to start the supervisor and during hot code upgrades.
Developers typically invoke `DynamicSupervisor.init/1` at the end of
their init callback to return the proper supervision flags.
"""
@callback init(args :: term) :: {:ok, sup_flags()} | :ignore
@typedoc "The supervisor flags returned on init"
@type sup_flags() :: %{
strategy: strategy(),
intensity: non_neg_integer(),
period: pos_integer(),
max_children: non_neg_integer() | :infinity,
extra_arguments: [term()]
}
@typedoc "Option values used by the `start*` functions"
@type option :: {:name, Supervisor.name()} | init_option()
@typedoc "Options used by the `start*` functions"
@type options :: [option, ...]
@typedoc "Options given to `start_link/2` and `init/1`"
@type init_option ::
{:strategy, strategy()}
| {:max_restarts, non_neg_integer()}
| {:max_seconds, pos_integer()}
| {:max_children, non_neg_integer() | :infinity}
| {:extra_arguments, [term()]}
@typedoc "Supported strategies"
@type strategy :: :one_for_one
@typedoc "Return values of `start_child` functions"
@type on_start_child ::
{:ok, pid}
| {:ok, pid, info :: term}
| :ignore
| {:error, {:already_started, pid} | :max_children | term}
defstruct [
:args,
:extra_arguments,
:mod,
:name,
:strategy,
:max_children,
:max_restarts,
:max_seconds,
children: %{},
restarts: []
]
@doc """
Returns a specification to start a dynamic supervisor under a supervisor.
See `Supervisor`.
"""
@since "1.6.1"
def child_spec(opts) when is_list(opts) do
id =
case Keyword.get(opts, :name, DynamicSupervisor) do
name when is_atom(name) -> name
{:global, name} -> name
{:via, _module, name} -> name
end
%{
id: id,
start: {DynamicSupervisor, :start_link, [opts]},
type: :supervisor
}
end
@doc false
defmacro __using__(opts) do
quote location: :keep, bind_quoted: [opts: opts] do
@behaviour DynamicSupervisor
@doc """
Returns a specification to start this module under a supervisor.
See `Supervisor`.
"""
def child_spec(arg) do
default = %{
id: __MODULE__,
start: {__MODULE__, :start_link, [arg]},
type: :supervisor
}
Supervisor.child_spec(default, unquote(Macro.escape(opts)))
end
defoverridable child_spec: 1
end
end
@doc """
Starts a supervisor with the given options.
The `:strategy` is a required option and the currently supported
value is `:one_for_one`. The remaining options can be found in the
`init/1` docs.
The `:name` option can also be used to register a supervisor name.
The supported values are described under the "Name registration"
section in the `GenServer` module docs.
If the supervisor is successfully spawned, this function returns
`{:ok, pid}`, where `pid` is the PID of the supervisor. If the supervisor
is given a name and a process with the specified name already exists,
the function returns `{:error, {:already_started, pid}}`, where `pid`
is the PID of that process.
Note that a supervisor started with this function is linked to the parent
process and exits not only on crashes but also if the parent process exits
with `:normal` reason.
"""
@since "1.6.0"
@spec start_link(options) :: Supervisor.on_start()
def start_link(options) when is_list(options) do
keys = [:extra_arguments, :max_children, :max_seconds, :max_restarts, :strategy]
{sup_opts, start_opts} = Keyword.split(options, keys)
start_link(Supervisor.Default, init(sup_opts), start_opts)
end
@doc """
Starts a module-based supervisor process with the given `module` and `arg`.
To start the supervisor, the `c:init/1` callback will be invoked in the given
`module`, with `arg` as its argument. The `c:init/1` callback must return a
supervisor specification which can be created with the help of the `init/1`
function.
If the `c:init/1` callback returns `:ignore`, this function returns
`:ignore` as well and the supervisor terminates with reason `:normal`.
If it fails or returns an incorrect value, this function returns
`{:error, term}` where `term` is a term with information about the
error, and the supervisor terminates with reason `term`.
The `:name` option can also be given in order to register a supervisor
name, the supported values are described in the "Name registration"
section in the `GenServer` module docs.
"""
@since "1.6.0"
@spec start_link(module, term, GenServer.options()) :: Supervisor.on_start()
def start_link(mod, args, opts \\ []) do
GenServer.start_link(__MODULE__, {mod, args, opts[:name]}, opts)
end
@doc """
Dynamically adds a child specification to `supervisor` and starts that child.
`child_spec` should be a valid child specification as detailed in the
"child_spec/1" section of the documentation for `Supervisor`. The child
process will be started as defined in the child specification.
If the child process start function returns `{:ok, child}` or `{:ok, child,
info}`, then child specification and PID are added to the supervisor and
this function returns the same value.
If the child process start function returns `:ignore`, then no child is added
to the supervision tree and this function returns `:ignore` too.
If the child process start function returns an error tuple or an erroneous
value, or if it fails, the child specification is discarded and this function
returns `{:error, error}` where `error` is a term containing information about
the error and child specification.
If the supervisor already has N children in a way that N exceeds the amount
of `:max_children` set on the supervisor initialization (see `init/1`), then
this function returns `{:error, :max_children}`.
"""
@since "1.6.0"
@spec start_child(Supervisor.supervisor(), :supervisor.child_spec() | {module, term} | module) ::
on_start_child()
def start_child(supervisor, {_, _, _, _, _, _} = child_spec) do
validate_and_start_child(supervisor, child_spec)
end
def start_child(supervisor, child_spec) do
validate_and_start_child(supervisor, Supervisor.child_spec(child_spec, []))
end
defp validate_and_start_child(supervisor, child_spec) do
case validate_child(child_spec) do
{:ok, child} -> call(supervisor, {:start_child, child})
error -> {:error, error}
end
end
defp validate_child(%{id: _, start: {mod, _, _} = start} = child) do
restart = Map.get(child, :restart, :permanent)
type = Map.get(child, :type, :worker)
modules = Map.get(child, :modules, [mod])
shutdown =
case type do
:worker -> Map.get(child, :shutdown, 5_000)
:supervisor -> Map.get(child, :shutdown, :infinity)
end
validate_child(start, restart, shutdown, type, modules)
end
defp validate_child({_, start, restart, shutdown, type, modules}) do
validate_child(start, restart, shutdown, type, modules)
end
defp validate_child(other) do
{:invalid_child_spec, other}
end
defp validate_child(start, restart, shutdown, type, modules) do
with :ok <- validate_start(start),
:ok <- validate_restart(restart),
:ok <- validate_shutdown(shutdown),
:ok <- validate_type(type),
:ok <- validate_modules(modules) do
{:ok, {start, restart, shutdown, type, modules}}
end
end
defp validate_start({m, f, args}) when is_atom(m) and is_atom(f) and is_list(args), do: :ok
defp validate_start(mfa), do: {:invalid_mfa, mfa}
defp validate_type(type) when type in [:supervisor, :worker], do: :ok
defp validate_type(type), do: {:invalid_child_type, type}
defp validate_restart(restart) when restart in [:permanent, :temporary, :transient], do: :ok
defp validate_restart(restart), do: {:invalid_restart_type, restart}
defp validate_shutdown(shutdown) when is_integer(shutdown) and shutdown > 0, do: :ok
defp validate_shutdown(shutdown) when shutdown in [:infinity, :brutal_kill], do: :ok
defp validate_shutdown(shutdown), do: {:invalid_shutdown, shutdown}
defp validate_modules(:dynamic), do: :ok
defp validate_modules(mods) do
if is_list(mods) and Enum.all?(mods, &is_atom/1) do
:ok
else
{:invalid_modules, mods}
end
end
@doc """
Terminates the given child identified by `pid`.
If successful, this function returns `:ok`. If there is no process with
the given PID, this function returns `{:error, :not_found}`.
"""
@since "1.6.0"
@spec terminate_child(Supervisor.supervisor(), pid) :: :ok | {:error, :not_found}
def terminate_child(supervisor, pid) when is_pid(pid) do
call(supervisor, {:terminate_child, pid})
end
@doc """
Returns a list with information about all children.
Note that calling this function when supervising a large number
of children under low memory conditions can cause an out of memory
exception.
This function returns a list of tuples containing:
* `id` - it is always `:undefined` for dynamic supervisors
* `child` - the pid of the corresponding child process or the
atom `:restarting` if the process is about to be restarted
* `type` - `:worker` or `:supervisor` as defined in the child
specification
* `modules` - as defined in the child specification
"""
@since "1.6.0"
@spec which_children(Supervisor.supervisor()) :: [
{:undefined, pid | :restarting, :worker | :supervisor, :supervisor.modules()}
]
def which_children(supervisor) do
call(supervisor, :which_children)
end
@doc """
Returns a map containing count values for the supervisor.
The map contains the following keys:
* `:specs` - the number of children processes
* `:active` - the count of all actively running child processes managed by
this supervisor
* `:supervisors` - the count of all supervisors whether or not the child
process is still alive
* `:workers` - the count of all workers, whether or not the child process
is still alive
"""
@since "1.6.0"
@spec count_children(Supervisor.supervisor()) :: %{
specs: non_neg_integer,
active: non_neg_integer,
supervisors: non_neg_integer,
workers: non_neg_integer
}
def count_children(supervisor) do
call(supervisor, :count_children) |> :maps.from_list()
end
@doc """
Synchronously stops the given supervisor with the given `reason`.
It returns `:ok` if the supervisor terminates with the given
reason. If it terminates with another reason, the call exits.
This function keeps OTP semantics regarding error reporting.
If the reason is any other than `:normal`, `:shutdown` or
`{:shutdown, _}`, an error report is logged.
"""
@since "1.7.0"
@spec stop(Supervisor.supervisor(), reason :: term, timeout) :: :ok
def stop(supervisor, reason \\ :normal, timeout \\ :infinity) do
GenServer.stop(supervisor, reason, timeout)
end
@doc """
Receives a set of options that initializes a dynamic supervisor.
This is typically invoked at the end of the `c:init/1` callback of
module-based supervisors. See the sections "Module-based supervisors"
in the module documentation for more information.
The options received by this function are also supported by `start_link/2`.
This function returns a tuple containing the supervisor options.
## Examples
def init(_arg) do
DynamicSupervisor.init(max_children: 1000, strategy: :one_for_one)
end
## Options
* `:strategy` - the restart strategy option. The only supported
value is `:one_for_one` which means that no other child is
terminate if a child process terminates. You can learn more
about strategies in the `Supervisor` module docs.
* `:max_restarts` - the maximum number of restarts allowed in
a time frame. Defaults to `3`.
* `:max_seconds` - the time frame in which `:max_restarts` applies.
Defaults to `5`.
* `:max_children` - the maximum amount of children to be running
under this supervisor at the same time. When `:max_children` is
exceeded, `start_child/2` returns `{:error, :max_children}`. Defaults
to `:infinity`.
* `:extra_arguments` - arguments that are prepended to the arguments
specified in the child spec given to `start_child/2`. Defaults to
an empty list.
"""
@since "1.6.0"
@spec init([init_option]) :: {:ok, sup_flags()}
def init(options) when is_list(options) do
unless strategy = options[:strategy] do
raise ArgumentError, "expected :strategy option to be given"
end
intensity = Keyword.get(options, :max_restarts, 3)
period = Keyword.get(options, :max_seconds, 5)
max_children = Keyword.get(options, :max_children, :infinity)
extra_arguments = Keyword.get(options, :extra_arguments, [])
flags = %{
strategy: strategy,
intensity: intensity,
period: period,
max_children: max_children,
extra_arguments: extra_arguments
}
{:ok, flags}
end
## Callbacks
@impl true
def init({mod, args, name}) do
Process.put(:"$initial_call", {:supervisor, mod, 1})
Process.flag(:trap_exit, true)
case mod.init(args) do
{:ok, flags} when is_map(flags) ->
name =
cond do
is_nil(name) -> {self(), mod}
is_atom(name) -> {:local, name}
is_tuple(name) -> name
end
state = %DynamicSupervisor{mod: mod, args: args, name: name}
case init(state, flags) do
{:ok, state} -> {:ok, state}
{:error, reason} -> {:stop, {:supervisor_data, reason}}
end
:ignore ->
:ignore
other ->
{:stop, {:bad_return, {mod, :init, other}}}
end
end
defp init(state, flags) do
extra_arguments = Map.get(flags, :extra_arguments, [])
max_children = Map.get(flags, :max_children, :infinity)
max_restarts = Map.get(flags, :intensity, 1)
max_seconds = Map.get(flags, :period, 5)
strategy = Map.get(flags, :strategy, :one_for_one)
with :ok <- validate_strategy(strategy),
:ok <- validate_restarts(max_restarts),
:ok <- validate_seconds(max_seconds),
:ok <- validate_dynamic(max_children),
:ok <- validate_extra_arguments(extra_arguments) do
{:ok,
%{
state
| extra_arguments: extra_arguments,
max_children: max_children,
max_restarts: max_restarts,
max_seconds: max_seconds,
strategy: strategy
}}
end
end
defp validate_strategy(strategy) when strategy in [:one_for_one], do: :ok
defp validate_strategy(strategy), do: {:error, {:invalid_strategy, strategy}}
defp validate_restarts(restart) when is_integer(restart) and restart >= 0, do: :ok
defp validate_restarts(restart), do: {:error, {:invalid_intensity, restart}}
defp validate_seconds(seconds) when is_integer(seconds) and seconds > 0, do: :ok
defp validate_seconds(seconds), do: {:error, {:invalid_period, seconds}}
defp validate_dynamic(:infinity), do: :ok
defp validate_dynamic(dynamic) when is_integer(dynamic) and dynamic >= 0, do: :ok
defp validate_dynamic(dynamic), do: {:error, {:invalid_max_children, dynamic}}
defp validate_extra_arguments(list) when is_list(list), do: :ok
defp validate_extra_arguments(extra), do: {:error, {:invalid_extra_arguments, extra}}
@impl true
def handle_call(:which_children, _from, state) do
%{children: children} = state
reply =
for {pid, args} <- children do
case args do
{:restarting, {_, _, _, type, modules}} ->
{:undefined, :restarting, type, modules}
{_, _, _, type, modules} ->
{:undefined, pid, type, modules}
end
end
{:reply, reply, state}
end
def handle_call(:count_children, _from, state) do
%{children: children} = state
specs = map_size(children)
{active, workers, supervisors} =
Enum.reduce(children, {0, 0, 0}, fn
{_pid, {:restarting, {_, _, _, :worker, _}}}, {active, worker, supervisor} ->
{active, worker + 1, supervisor}
{_pid, {:restarting, {_, _, _, :supervisor, _}}}, {active, worker, supervisor} ->
{active, worker, supervisor + 1}
{_pid, {_, _, _, :worker, _}}, {active, worker, supervisor} ->
{active + 1, worker + 1, supervisor}
{_pid, {_, _, _, :supervisor, _}}, {active, worker, supervisor} ->
{active + 1, worker, supervisor + 1}
end)
reply = [specs: specs, active: active, supervisors: supervisors, workers: workers]
{:reply, reply, state}
end
def handle_call({:terminate_child, pid}, _from, %{children: children} = state) do
case children do
%{^pid => info} ->
:ok = terminate_children(%{pid => info}, state)
{:reply, :ok, delete_child(pid, state)}
%{} ->
{:reply, {:error, :not_found}, state}
end
end
def handle_call({:start_task, args, restart, shutdown}, from, state) do
{init_restart, init_shutdown} = Process.get(Task.Supervisor)
restart = restart || init_restart
shutdown = shutdown || init_shutdown
child = {{Task.Supervised, :start_link, args}, restart, shutdown, :worker, [Task.Supervised]}
handle_call({:start_child, child}, from, state)
end
def handle_call({:start_child, child}, _from, state) do
%{children: children, max_children: max_children} = state
if map_size(children) < max_children do
handle_start_child(child, state)
else
{:reply, {:error, :max_children}, state}
end
end
defp handle_start_child({{m, f, args} = mfa, restart, shutdown, type, modules}, state) do
%{extra_arguments: extra} = state
case reply = start_child(m, f, extra ++ args) do
{:ok, pid, _} ->
{:reply, reply, save_child(pid, mfa, restart, shutdown, type, modules, state)}
{:ok, pid} ->
{:reply, reply, save_child(pid, mfa, restart, shutdown, type, modules, state)}
_ ->
{:reply, reply, state}
end
end
defp start_child(m, f, a) do
try do
apply(m, f, a)
catch
kind, reason ->
{:error, exit_reason(kind, reason, __STACKTRACE__)}
else
{:ok, pid, extra} when is_pid(pid) -> {:ok, pid, extra}
{:ok, pid} when is_pid(pid) -> {:ok, pid}
:ignore -> :ignore
{:error, _} = error -> error
other -> {:error, other}
end
end
defp save_child(pid, mfa, restart, shutdown, type, modules, state) do
mfa = mfa_for_restart(mfa, restart)
put_in(state.children[pid], {mfa, restart, shutdown, type, modules})
end
defp mfa_for_restart({m, f, _}, :temporary), do: {m, f, :undefined}
defp mfa_for_restart(mfa, _), do: mfa
defp exit_reason(:exit, reason, _), do: reason
defp exit_reason(:error, reason, stack), do: {reason, stack}
defp exit_reason(:throw, value, stack), do: {{:nocatch, value}, stack}
@impl true
def handle_cast(_msg, state) do
{:noreply, state}
end
@impl true
def handle_info({:EXIT, pid, reason}, state) do
case maybe_restart_child(pid, reason, state) do
{:ok, state} -> {:noreply, state}
{:shutdown, state} -> {:stop, :shutdown, state}
end
end
def handle_info({:"$gen_restart", pid}, state) do
%{children: children} = state
case children do
%{^pid => restarting_args} ->
{:restarting, child} = restarting_args
case restart_child(pid, child, state) do
{:ok, state} -> {:noreply, state}
{:shutdown, state} -> {:stop, :shutdown, state}
end
# We may hit clause if we send $gen_restart and then
# someone calls terminate_child, removing the child.
%{} ->
{:noreply, state}
end
end
def handle_info(msg, state) do
:error_logger.error_msg('DynamicSupervisor received unexpected message: ~p~n', [msg])
{:noreply, state}
end
@impl true
def code_change(_, %{mod: mod, args: args} = state, _) do
case mod.init(args) do
{:ok, flags} when is_map(flags) ->
case init(state, flags) do
{:ok, state} -> {:ok, state}
{:error, reason} -> {:error, {:supervisor_data, reason}}
end
:ignore ->
{:ok, state}
error ->
error
end
end
@impl true
def terminate(_, %{children: children} = state) do
:ok = terminate_children(children, state)
end
defp terminate_children(children, state) do
{pids, times, stacks} = monitor_children(children)
size = map_size(pids)
timers =
Enum.reduce(times, %{}, fn {time, pids}, acc ->
Map.put(acc, :erlang.start_timer(time, self(), :kill), pids)
end)
stacks = wait_children(pids, size, timers, stacks)
for {pid, {child, reason}} <- stacks do
report_error(:shutdown_error, reason, pid, child, state)
end
:ok
end
defp monitor_children(children) do
Enum.reduce(children, {%{}, %{}, %{}}, fn
{_, {:restarting, _}}, acc ->
acc
{pid, {_, restart, _, _, _} = child}, {pids, times, stacks} ->
case monitor_child(pid) do
:ok ->
times = exit_child(pid, child, times)
{Map.put(pids, pid, child), times, stacks}
{:error, :normal} when restart != :permanent ->
{pids, times, stacks}
{:error, reason} ->
{pids, times, Map.put(stacks, pid, {child, reason})}
end
end)
end
defp monitor_child(pid) do
ref = Process.monitor(pid)
Process.unlink(pid)
receive do
{:EXIT, ^pid, reason} ->
receive do
{:DOWN, ^ref, :process, ^pid, _} -> {:error, reason}
end
after
0 -> :ok
end
end
defp exit_child(pid, {_, _, shutdown, _, _}, times) do
case shutdown do
:brutal_kill ->
Process.exit(pid, :kill)
times
:infinity ->
Process.exit(pid, :shutdown)
times
time ->
Process.exit(pid, :shutdown)
Map.update(times, time, [pid], &[pid | &1])
end
end
defp wait_children(_pids, 0, timers, stacks) do
for {timer, _} <- timers do
_ = :erlang.cancel_timer(timer)
receive do
{:timeout, ^timer, :kill} -> :ok
after
0 -> :ok
end
end
stacks
end
defp wait_children(pids, size, timers, stacks) do
receive do
{:DOWN, _ref, :process, pid, reason} ->
case pids do
%{^pid => child} ->
stacks = wait_child(pid, child, reason, stacks)
wait_children(pids, size - 1, timers, stacks)
%{} ->
wait_children(pids, size, timers, stacks)
end
{:timeout, timer, :kill} ->
for pid <- Map.fetch!(timers, timer), do: Process.exit(pid, :kill)
wait_children(pids, size, Map.delete(timers, timer), stacks)
end
end
defp wait_child(pid, {_, _, :brutal_kill, _, _} = child, reason, stacks) do
case reason do
:killed -> stacks
_ -> Map.put(stacks, pid, {child, reason})
end
end
defp wait_child(pid, {_, restart, _, _, _} = child, reason, stacks) do
case reason do
{:shutdown, _} -> stacks
:shutdown -> stacks
:normal when restart != :permanent -> stacks
reason -> Map.put(stacks, pid, {child, reason})
end
end
defp maybe_restart_child(pid, reason, %{children: children} = state) do
case children do
%{^pid => {_, restart, _, _, _} = child} ->
maybe_restart_child(restart, reason, pid, child, state)
%{} ->
{:ok, state}
end
end
defp maybe_restart_child(:permanent, reason, pid, child, state) do
report_error(:child_terminated, reason, pid, child, state)
restart_child(pid, child, state)
end
defp maybe_restart_child(_, :normal, pid, _child, state) do
{:ok, delete_child(pid, state)}
end
defp maybe_restart_child(_, :shutdown, pid, _child, state) do
{:ok, delete_child(pid, state)}
end
defp maybe_restart_child(_, {:shutdown, _}, pid, _child, state) do
{:ok, delete_child(pid, state)}
end
defp maybe_restart_child(:transient, reason, pid, child, state) do
report_error(:child_terminated, reason, pid, child, state)
restart_child(pid, child, state)
end
defp maybe_restart_child(:temporary, reason, pid, child, state) do
report_error(:child_terminated, reason, pid, child, state)
{:ok, delete_child(pid, state)}
end
defp delete_child(pid, %{children: children} = state) do
%{state | children: Map.delete(children, pid)}
end
defp restart_child(pid, child, state) do
case add_restart(state) do
{:ok, %{strategy: strategy} = state} ->
case restart_child(strategy, pid, child, state) do
{:ok, state} ->
{:ok, state}
{:try_again, state} ->
send(self(), {:"$gen_restart", pid})
{:ok, state}
end
{:shutdown, state} ->
report_error(:shutdown, :reached_max_restart_intensity, pid, child, state)
{:shutdown, delete_child(pid, state)}
end
end
defp add_restart(state) do
%{max_seconds: max_seconds, max_restarts: max_restarts, restarts: restarts} = state
now = :erlang.monotonic_time(1)
restarts = add_restart([now | restarts], now, max_seconds)
state = %{state | restarts: restarts}
if length(restarts) <= max_restarts do
{:ok, state}
else
{:shutdown, state}
end
end
defp add_restart(restarts, now, period) do
for then <- restarts, now <= then + period, do: then
end
defp restart_child(:one_for_one, current_pid, child, state) do
{{m, f, args} = mfa, restart, shutdown, type, modules} = child
%{extra_arguments: extra} = state
case start_child(m, f, extra ++ args) do
{:ok, pid, _} ->
state = delete_child(current_pid, state)
{:ok, save_child(pid, mfa, restart, shutdown, type, modules, state)}
{:ok, pid} ->
state = delete_child(current_pid, state)
{:ok, save_child(pid, mfa, restart, shutdown, type, modules, state)}
:ignore ->
{:ok, delete_child(current_pid, state)}
{:error, reason} ->
report_error(:start_error, reason, {:restarting, current_pid}, child, state)
state = put_in(state.children[current_pid], {:restarting, child})
{:try_again, state}
end
end
defp report_error(error, reason, pid, child, %{name: name, extra_arguments: extra}) do
:error_logger.error_report(
:supervisor_report,
supervisor: name,
errorContext: error,
reason: reason,
offender: extract_child(pid, child, extra)
)
end
defp extract_child(pid, {{m, f, args}, restart, shutdown, type, _modules}, extra) do
[
pid: pid,
id: :undefined,
mfargs: {m, f, extra ++ args},
restart_type: restart,
shutdown: shutdown,
child_type: type
]
end
@impl true
def format_status(:terminate, [_pdict, state]) do
state
end
def format_status(_, [_pdict, %{mod: mod} = state]) do
[data: [{~c"State", state}], supervisor: [{~c"Callback", mod}]]
end
## Helpers
@compile {:inline, call: 2}
defp call(supervisor, req) do
GenServer.call(supervisor, req, :infinity)
end
end
|
lib/elixir/lib/dynamic_supervisor.ex
| 0.879432
| 0.515925
|
dynamic_supervisor.ex
|
starcoder
|
defmodule Mix.Tasks.Phx.Gen.Context do
@shortdoc "Generates a context with functions around an Ecto schema"
@moduledoc """
Generates a context with functions around an Ecto schema.
mix phx.gen.context Accounts User users name:string age:integer
The first argument is the context module followed by the schema module
and its plural name (used as the schema table name).
The context is an Elixir module that serves as an API boundary for
the given resource. A context often holds many related resources.
Therefore, if the context already exists, it will be augmented with
functions for the given resource. Note a resource may also be split
over distinct contexts (such as Accounts.User and Payments.User).
The schema is responsible for mapping the database fields into an
Elixir struct.
Overall, this generator will add the following files to lib/your_app:
* a context module in accounts/accounts.ex, serving as the API boundary
* a schema in accounts/user.ex, with a `users` table
A migration file for the repository and test files for the context
will also be generated.
## Generating without a schema
In some cases, you may wish to boostrap the context module and
tests, but leave internal implementation of the context and schema
to yourself. Use the `--no-schema` flags to accomplish this.
## table
By default, the table name for the migration and schema will be
the plural name provided for the resource. To customize this value,
a `--table` option may be provided. For example:
mix phx.gen.context Accounts User users --table cms_users
## binary_id
Generated migration can use `binary_id` for schema's primary key
and its references with option `--binary-id`.
## Default options
This generator uses default options provided in the `:generators`
configuration of your application. These are the defaults:
config :your_app, :generators,
migration: true,
binary_id: false,
sample_binary_id: "11111111-1111-1111-1111-111111111111"
You can override those options per invocation by providing corresponding
switches, e.g. `--no-binary-id` to use normal ids despite the default
configuration or `--migration` to force generation of the migration.
Read the documentation for `phx.gen.schema` for more information on
attributes.
"""
use Mix.Task
alias Mix.Phoenix.{Context, Schema}
alias Mix.Tasks.Phx.Gen
@switches [binary_id: :boolean, table: :string, web: :string,
schema: :boolean, context: :boolean, context_app: :string]
@default_opts [schema: true, context: true]
@doc false
def run(args) do
if Mix.Project.umbrella? do
Mix.raise "mix phx.gen.context can only be run inside an application directory"
end
{context, schema} = build(args)
binding = [context: context, schema: schema]
paths = Mix.Phoenix.generator_paths()
prompt_for_conflicts(context)
context
|> copy_new_files(paths, binding)
|> print_shell_instructions()
end
defp prompt_for_conflicts(context) do
context
|> files_to_be_generated()
|> Mix.Phoenix.prompt_for_conflicts()
end
@doc false
def build(args) do
{opts, parsed, _} = parse_opts(args)
[context_name, schema_name, plural | schema_args] = validate_args!(parsed)
schema_module = inspect(Module.concat(context_name, schema_name))
schema = Gen.Schema.build([schema_module, plural | schema_args], opts, __MODULE__)
context = Context.new(context_name, schema, opts)
{context, schema}
end
defp parse_opts(args) do
{opts, parsed, invalid} = OptionParser.parse(args, switches: @switches)
merged_opts =
@default_opts
|> Keyword.merge(opts)
|> put_context_app(opts[:context_app])
{merged_opts, parsed, invalid}
end
defp put_context_app(opts, nil), do: opts
defp put_context_app(opts, string) do
Keyword.put(opts, :context_app, String.to_atom(string))
end
@doc false
def files_to_be_generated(%Context{schema: schema}) do
if schema.generate? do
Gen.Schema.files_to_be_generated(schema)
else
[]
end
end
@doc false
def copy_new_files(%Context{schema: schema} = context, paths, binding) do
if schema.generate?, do: Gen.Schema.copy_new_files(schema, paths, binding)
inject_schema_access(context, paths, binding)
inject_tests(context, paths, binding)
context
end
defp inject_schema_access(%Context{file: file} = context, paths, binding) do
unless Context.pre_existing?(context) do
Mix.Generator.create_file(file, Mix.Phoenix.eval_from(paths, "priv/templates/phx.gen.context/context.ex", binding))
end
paths
|> Mix.Phoenix.eval_from("priv/templates/phx.gen.context/#{schema_access_template(context)}", binding)
|> inject_eex_before_final_end(file, binding)
end
defp write_file(content, file) do
File.write!(file, content)
end
defp inject_tests(%Context{test_file: test_file} = context, paths, binding) do
unless Context.pre_existing_tests?(context) do
Mix.Generator.create_file(test_file, Mix.Phoenix.eval_from(paths, "priv/templates/phx.gen.context/context_test.exs", binding))
end
paths
|> Mix.Phoenix.eval_from("priv/templates/phx.gen.context/test_cases.exs", binding)
|> inject_eex_before_final_end(test_file, binding)
end
defp inject_eex_before_final_end(content_to_inject, file_path, binding) do
file = File.read!(file_path)
if String.contains?(file, content_to_inject) do
:ok
else
Mix.shell.info([:green, "* injecting ", :reset, Path.relative_to_cwd(file_path)])
file
|> String.trim_trailing()
|> String.trim_trailing("end")
|> EEx.eval_string(binding)
|> Kernel.<>(content_to_inject)
|> Kernel.<>("end\n")
|> write_file(file_path)
end
end
@doc false
def print_shell_instructions(%Context{schema: schema}) do
if schema.generate? do
Gen.Schema.print_shell_instructions(schema)
else
:ok
end
end
defp schema_access_template(%Context{schema: schema}) do
if schema.generate? do
"schema_access.ex"
else
"access_no_schema.ex"
end
end
defp validate_args!([context, schema, _plural | _] = args) do
cond do
not Context.valid?(context) ->
raise_with_help "Expected the context, #{inspect context}, to be a valid module name"
not Schema.valid?(schema) ->
raise_with_help "Expected the schema, #{inspect schema}, to be a valid module name"
context == schema ->
raise_with_help "The context and schema should have different names"
true ->
args
end
end
defp validate_args!(_) do
raise_with_help "Invalid arguments"
end
@doc false
@spec raise_with_help(String.t) :: no_return()
def raise_with_help(msg) do
Mix.raise """
#{msg}
mix phx.gen.html, phx.gen.json and phx.gen.context expect a
context module name, followed by singular and plural names of
the generated resource, ending with any number of attributes.
For example:
mix phx.gen.html Accounts User users name:string
mix phx.gen.json Accounts User users name:string
mix phx.gen.context Accounts User users name:string
The context serves as the API boundary for the given resource.
Multiple resources may belong to a context and a resource may be
split over distinct contexts (such as Accounts.User and Payments.User).
"""
end
end
|
assets/node_modules/phoenix/lib/mix/tasks/phx.gen.context.ex
| 0.859605
| 0.503479
|
phx.gen.context.ex
|
starcoder
|
defmodule Macro.Env do
@moduledoc """
A struct that holds compile time environment information.
The current environment can be accessed at any time as
`__ENV__/0`. Inside macros, the caller environment can be
accessed as `__CALLER__/0`.
An instance of `Macro.Env` must not be modified by hand. If you need to
create a custom environment to pass to `Code.eval_quoted/3`, use the
following trick:
def make_custom_env do
import SomeModule, only: [some_function: 2]
alias A.B.C
__ENV__
end
You may then call `make_custom_env()` to get a struct with the desired
imports and aliases included.
It contains the following fields:
* `module` - the current module name
* `file` - the current file name as a binary
* `line` - the current line as an integer
* `function` - a tuple as `{atom, integer}`, where the first
element is the function name and the second its arity; returns
`nil` if not inside a function
* `context` - the context of the environment; it can be `nil`
(default context), `:guard` (inside a guard) or `:match` (inside a match)
* `aliases` - a list of two-element tuples, where the first
element is the aliased name and the second one the actual name
* `requires` - the list of required modules
* `functions` - a list of functions imported from each module
* `macros` - a list of macros imported from each module
* `macro_aliases` - a list of aliases defined inside the current macro
* `context_modules` - a list of modules defined in the current context
* `lexical_tracker` - PID of the lexical tracker which is responsible for
keeping user info
The following fields pertain to variable handling and must not be accessed or
relied on. To get a list of all variables, see `vars/1`:
* `current_vars`
* `unused_vars`
* `prematch_vars`
* `contextual_vars`
The following fields are deprecated and must not be accessed or relied on:
* `vars` - a list keeping all defined variables as `{var, context}`
"""
@type name_arity :: {atom, arity}
@type file :: binary
@type line :: non_neg_integer
@type aliases :: [{module, module}]
@type macro_aliases :: [{module, {term, module}}]
@type context :: :match | :guard | nil
@type requires :: [module]
@type functions :: [{module, [name_arity]}]
@type macros :: [{module, [name_arity]}]
@type context_modules :: [module]
@type lexical_tracker :: pid | nil
@type variable :: {atom, atom | term}
@typep vars :: [variable]
@typep var_type :: :term
@typep var_version :: non_neg_integer
@typep unused_vars :: %{{variable, var_version} => non_neg_integer | false}
@typep current_vars :: %{variable => {var_version, var_type}}
@typep prematch_vars :: current_vars | :warn | :raise | :pin | :apply
@typep contextual_vars :: [atom]
@type t :: %{
__struct__: __MODULE__,
module: atom,
file: file,
line: line,
function: name_arity | nil,
context: context,
requires: requires,
aliases: aliases,
functions: functions,
macros: macros,
macro_aliases: aliases,
context_modules: context_modules,
vars: vars,
unused_vars: unused_vars,
current_vars: current_vars,
prematch_vars: prematch_vars,
lexical_tracker: lexical_tracker,
contextual_vars: contextual_vars
}
# TODO: Remove :vars field on v2.0
def __struct__ do
%{
__struct__: __MODULE__,
module: nil,
file: "nofile",
line: 0,
function: nil,
context: nil,
requires: [],
aliases: [],
functions: [],
macros: [],
macro_aliases: [],
context_modules: [],
vars: [],
unused_vars: %{},
current_vars: %{},
prematch_vars: :warn,
lexical_tracker: nil,
contextual_vars: []
}
end
def __struct__(kv) do
Enum.reduce(kv, __struct__(), fn {k, v}, acc -> :maps.update(k, v, acc) end)
end
@doc """
Returns a list of variables in the current environment.
Each variable is identified by a tuple of two elements,
where the first element is the variable name as an atom
and the second element is its context, which may be an
atom or an integer.
"""
@doc since: "1.7.0"
@spec vars(t) :: [variable]
def vars(env)
def vars(%{__struct__: Macro.Env, current_vars: current_vars}) do
Map.keys(current_vars)
end
@doc """
Checks if a variable belongs to the environment.
"""
@doc since: "1.7.0"
@spec has_var?(t, variable) :: boolean()
def has_var?(env, var)
def has_var?(%{__struct__: Macro.Env, current_vars: current_vars}, var) do
Map.has_key?(current_vars, var)
end
@doc """
Returns a keyword list containing the file and line
information as keys.
"""
@spec location(t) :: keyword
def location(env)
def location(%{__struct__: Macro.Env, file: file, line: line}) do
[file: file, line: line]
end
@doc """
Returns a `Macro.Env` in the match context.
"""
@spec to_match(t) :: t
def to_match(%{__struct__: Macro.Env, context: :match} = env) do
env
end
def to_match(%{__struct__: Macro.Env, current_vars: vars} = env) do
%{env | context: :match, prematch_vars: vars}
end
@doc """
Returns whether the compilation environment is currently
inside a guard.
"""
@spec in_guard?(t) :: boolean
def in_guard?(env)
def in_guard?(%{__struct__: Macro.Env, context: context}), do: context == :guard
@doc """
Returns whether the compilation environment is currently
inside a match clause.
"""
@spec in_match?(t) :: boolean
def in_match?(env)
def in_match?(%{__struct__: Macro.Env, context: context}), do: context == :match
@doc """
Returns the environment stacktrace.
"""
@spec stacktrace(t) :: list
def stacktrace(%{__struct__: Macro.Env} = env) do
cond do
is_nil(env.module) ->
[{:elixir_compiler, :__FILE__, 1, relative_location(env)}]
is_nil(env.function) ->
[{env.module, :__MODULE__, 0, relative_location(env)}]
true ->
{name, arity} = env.function
[{env.module, name, arity, relative_location(env)}]
end
end
defp relative_location(env) do
[file: String.to_charlist(Path.relative_to_cwd(env.file)), line: env.line]
end
end
|
lib/elixir/lib/macro/env.ex
| 0.856197
| 0.450903
|
env.ex
|
starcoder
|
defmodule Benchee.Formatters.Console do
@moduledoc """
Formatter to transform the statistics output into a structure suitable for
output through `IO.write` on the console.
"""
@behaviour Benchee.Formatter
alias Benchee.Suite
alias Benchee.Formatters.Console.{Memory, RunTime}
def format(suite), do: format(suite, %{})
@doc """
Formats the benchmark statistics to a report suitable for output on the CLI.
Returns a list of lists, where each list element is a group belonging to one
specific input. So if there only was one (or no) input given through `:inputs`
then there's just one list inside.
## Examples
```
iex> scenarios = [
...> %Benchee.Benchmark.Scenario{
...> name: "My Job", input_name: "My input", run_time_statistics: %Benchee.Statistics{
...> average: 200.0,
...> ips: 5000.0,
...> std_dev_ratio: 0.1,
...> median: 190.0,
...> percentiles: %{99 => 300.1},
...> sample_size: 200
...> },
...> memory_usage_statistics: %Benchee.Statistics{}
...> },
...> %Benchee.Benchmark.Scenario{
...> name: "Job 2", input_name: "My input", run_time_statistics: %Benchee.Statistics{
...> average: 400.0,
...> ips: 2500.0,
...> std_dev_ratio: 0.2,
...> median: 390.0,
...> percentiles: %{99 => 500.1},
...> sample_size: 200
...> },
...> memory_usage_statistics: %Benchee.Statistics{}
...> }
...> ]
iex> suite = %Benchee.Suite{
...> scenarios: scenarios,
...> configuration: %Benchee.Configuration{
...> unit_scaling: :best,
...> }
...> }
iex> Benchee.Formatters.Console.format(suite, %{comparison: false, extended_statistics: false})
[["\n##### With input My input #####", "\nName ips average deviation median 99th %\n",
"My Job 5 K 200 ns ±10.00% 190 ns 300.10 ns\n",
"Job 2 2.50 K 400 ns ±20.00% 390 ns 500.10 ns\n"]]
```
"""
@impl true
@spec format(Suite.t(), map) :: [any]
def format(%Suite{scenarios: scenarios, configuration: config}, options) do
if Map.has_key?(options, :unit_scaling), do: warn_unit_scaling()
config =
config
|> Map.take([:unit_scaling, :title])
|> Map.merge(options)
scenarios
|> Enum.reduce([], &update_grouped_list/2)
|> Enum.map(fn {input, scenarios} ->
generate_output(scenarios, config, input)
end)
end
# Normally one would prepend to lists and not append. In this case this lead to 2
# `Enum.reverse` scattered around. As these lists are usually very small (mostly less
# than 10 elements) I opted for `++` here.
defp update_grouped_list(scenario, grouped_scenarios) do
case List.keyfind(grouped_scenarios, scenario.input_name, 0) do
{_, group} ->
new_tuple = {scenario.input_name, group ++ [scenario]}
List.keyreplace(grouped_scenarios, scenario.input_name, 0, new_tuple)
_ ->
grouped_scenarios ++ [{scenario.input_name, [scenario]}]
end
end
def write(suite), do: write(suite, %{})
@doc """
Takes the output of `format/1` and writes that to the console.
"""
@impl true
@spec write(any, map) :: :ok | {:error, String.t()}
def write(output, _options) do
IO.write(output)
rescue
_ -> {:error, "Unknown Error"}
end
defp warn_unit_scaling do
IO.puts(
"unit_scaling is now a top level configuration option, avoid passing it as a formatter option."
)
end
defp generate_output(scenarios, config, input) do
[
suite_header(input, config)
| RunTime.format_scenarios(scenarios, config) ++ Memory.format_scenarios(scenarios, config)
]
end
defp suite_header(input, config) do
"#{title_header(config)}#{input_header(input)}"
end
defp title_header(%{title: nil}), do: ""
defp title_header(%{title: title}), do: "\n*** #{title} ***\n"
@no_input_marker Benchee.Benchmark.no_input()
defp input_header(input) when input == @no_input_marker, do: ""
defp input_header(input), do: "\n##### With input #{input} #####"
end
|
lib/benchee/formatters/console.ex
| 0.865281
| 0.853119
|
console.ex
|
starcoder
|
defmodule AdventOfCode.Day14 do
@mask_regex ~r/^mask = ([01X]{36})$/
@mem_assignment ~r/^mem\[(\d+)\] = (\d+)$/
def convert_to_binary(num) do
default_binary = (for x <- 0..35, do: {x, 0}) |> Enum.into(%{})
{_, bin} =
num
|> :erlang.integer_to_list(2)
|> Enum.reverse()
|> Enum.reduce({0, default_binary}, fn bin, {pos, acc} ->
bin = bin - 48
acc = Map.put(acc, pos, bin)
{pos + 1, acc}
end)
bin
end
def binary_to_int(bin) do
bin
|> Map.to_list()
|> Enum.sort(:desc)
|> Enum.map(fn {_, v} -> v + 48 end)
|> :erlang.list_to_integer(2)
end
def parse_mask(mask) do
mask
|> Enum.reverse()
|> Enum.reduce({0, %{}}, fn x, {pos, acc} ->
case x do
"X" -> {pos + 1, acc}
"0" -> {pos + 1, Map.put(acc, pos, 0)}
"1" -> {pos + 1, Map.put(acc, pos, 1)}
end
end)
end
def parse_mask(mask, :v2) do
mask
|> Enum.reverse()
|> Enum.reduce({0, %{}}, fn x, {pos, acc} ->
case x do
"X" -> {pos + 1, Map.put(acc, pos, :X)}
"0" -> {pos + 1, Map.put(acc, pos, 0)}
"1" -> {pos + 1, Map.put(acc, pos, 1)}
end
end)
end
def compute_addresses(bin_pos, _, 36) do
[binary_to_int(bin_pos)]
end
def compute_addresses(bin_pos, mask, pos) do
%{^pos => value} = mask
case value do
0 -> compute_addresses(bin_pos, mask, pos + 1)
1 ->
new_bin_pos = Map.put(bin_pos, pos, 1)
compute_addresses(new_bin_pos, mask, pos + 1)
:X ->
one_bin_pos = Map.put(bin_pos, pos, 1)
zero_bin_pos = Map.put(bin_pos, pos, 0)
left_results = compute_addresses(one_bin_pos, mask, pos + 1)
right_results = compute_addresses(zero_bin_pos, mask, pos + 1)
left_results ++ right_results
end
end
def day14() do
input =
"day14_input"
|> AdventOfCode.read_file()
{memory, _mask} =
Enum.reduce(input, {%{}, %{}}, fn line, {mem, mask} ->
case Regex.run(@mask_regex, line) do
:nil ->
[_, pos, value] = Regex.run(@mem_assignment, line)
{pos, _} = Integer.parse(pos)
{value, _} = Integer.parse(value)
bin_value = convert_to_binary(value)
bin_value = Map.merge(bin_value, mask)
new_value = binary_to_int(bin_value)
mem = Map.put(mem, pos, new_value)
{mem, mask}
[_, value] ->
{_, mask} =
value
|> String.graphemes()
|> parse_mask()
{mem, mask}
end
end)
part1 = Enum.reduce(memory, 0, fn {_k, v}, acc -> v + acc end)
{memory, _} =
Enum.reduce(input, {%{}, %{}}, fn line, {mem, mask} ->
case Regex.run(@mask_regex, line) do
:nil ->
[_, pos, value] = Regex.run(@mem_assignment, line)
{pos, _} = Integer.parse(pos)
{value, _} = Integer.parse(value)
bin_pos = convert_to_binary(pos)
all_positions = compute_addresses(bin_pos, mask, 0)
mem = Enum.reduce(all_positions, mem, fn pos, mem ->
Map.put(mem, pos, value)
end)
# bin_value = convert_to_binary(value)
# bin_value = Map.merge(bin_value, mask)
# new_value = binary_to_int(bin_value)
# mem = Map.put(mem, pos, new_value)
{mem, mask}
[_, value] ->
{_, mask} =
value
|> String.graphemes()
|> parse_mask(:v2)
{mem, mask}
end
end)
part2 = Enum.reduce(memory, 0, fn {_k, v}, acc -> v + acc end)
{part1, part2}
end
end
|
lib/day14.ex
| 0.530723
| 0.562207
|
day14.ex
|
starcoder
|
defmodule Mix.Tasks.Absinthe.Schema.Json do
require Logger
use Mix.Task
import Mix.Generator
@shortdoc "Generate a schema.json file for an Absinthe schema"
@default_filename "./schema.json"
@moduledoc """
Generate a schema.json file
## Usage
absinthe.schema.json [FILENAME] [OPTIONS]
The JSON codec to be used needs to be included in your `mix.exs` dependencies. If using the default codec,
see the Jason [installation instructions](https://hexdocs.pm/jason).
## Options
* `--schema` - The name of the `Absinthe.Schema` module defining the schema to be generated.
Default: As [configured](https://hexdocs.pm/mix/Mix.Config.html) for `:absinthe` `:schema`
* `--json-codec` - Codec to use to generate the JSON file (see [Custom Codecs](#module-custom-codecs)).
Default: [`Jason`](https://hexdocs.pm/jason/)
* `--pretty` - Whether to pretty-print.
Default: `false`
## Examples
Write to default path `#{@default_filename}` using the `:schema` configured for the `:absinthe` application:
$ mix absinthe.schema.json
Write to default path `#{@default_filename}` using the `MySchema` schema:
$ mix absinthe.schema.json --schema MySchema
Write to path `/path/to/schema.json` using the `MySchema` schema, with pretty-printing:
$ mix absinthe.schema.json --schema MySchema --pretty /path/to/schema.json
Write to default path `#{@default_filename}` using the `MySchema` schema and a custom JSON codec, `MyCodec`:
$ mix absinthe.schema.json --schema MySchema --json-codec MyCodec
## Custom Codecs
Any module that provides `encode!/2` can be used as a custom codec:
encode!(value, options)
* `value` will be provided as a Map containing the generated schema.
* `options` will be a keyword list with a `:pretty` boolean, indicating whether the user requested pretty-printing.
The function should return a string to be written to the output file.
"""
defmodule Options do
@moduledoc false
defstruct filename: nil, schema: nil, json_codec: nil, pretty: false
@type t() :: %__MODULE__{
filename: String.t(),
schema: module(),
json_codec: module(),
pretty: boolean()
}
end
@doc "Callback implementation for `Mix.Task.run/1`, which receives a list of command-line args."
@spec run(argv :: [binary()]) :: any()
def run(argv) do
Application.ensure_all_started(:absinthe)
Mix.Task.run("loadpaths", argv)
Mix.Project.compile(argv)
opts = parse_options(argv)
case generate_schema(opts) do
{:ok, content} -> write_schema(content, opts.filename)
{:error, error} -> raise error
end
end
@doc false
@spec generate_schema(Options.t()) :: String.t()
def generate_schema(%Options{
pretty: pretty,
schema: schema,
json_codec: json_codec
}) do
with {:ok, result} <- Absinthe.Schema.introspect(schema),
content <- json_codec.encode!(result, pretty: pretty) do
{:ok, content}
else
{:error, reason} -> {:error, reason}
error -> {:error, error}
end
end
@doc false
@spec parse_options([String.t()]) :: Options.t()
def parse_options(argv) do
parse_options = [strict: [schema: :string, json_codec: :string, pretty: :boolean]]
{opts, args, _} = OptionParser.parse(argv, parse_options)
%Options{
filename: args |> List.first() || @default_filename,
schema: find_schema(opts),
json_codec: json_codec_as_atom(opts),
pretty: Keyword.get(opts, :pretty, false)
}
end
defp json_codec_as_atom(opts) do
opts
|> Keyword.fetch(:json_codec)
|> case do
{:ok, codec} -> Module.concat([codec])
_ -> Jason
end
end
defp find_schema(opts) do
case Keyword.get(opts, :schema, Application.get_env(:absinthe, :schema)) do
nil ->
raise "No --schema given or :schema configured for the :absinthe application"
value ->
[value] |> Module.safe_concat()
end
end
defp write_schema(content, filename) do
create_directory(Path.dirname(filename))
create_file(filename, content, force: true)
end
end
|
lib/mix/tasks/absinthe.schema.json.ex
| 0.834373
| 0.46035
|
absinthe.schema.json.ex
|
starcoder
|
defmodule Cashtrail.QueryBuilder do
@moduledoc """
This module is responsible to generate queries for contexts to filter and
search by schema fields.
"""
import Ecto.Query
@doc """
Returns a `Ecto.Query` with the queries based on the given filters and
allowed fields, or the given `Ecto.Queryable` without changes.
The query will use only the params that have the key in the allowed_filters param.
## Expected arguments
* query - The `Ecto.Queryable` that the query will be performed.
* params - A `map` keys of the fields and values to be filtered. The keys
can be even `string` or `atom`, and the values must be the same type of
the data in database, or can receive a list with data in the same type of the
data in the database.
* allowed_fields - A `list` of `atom` with the fields that will be used to perform
the query. The query will be based only in params that have the keys matching
this param.
## Examples
iex> Cashtrail.QueryBuilder.build_filter(Cashtrail.Users.User, nil, [])
Cashtrail.Users.User
iex> Cashtrail.QueryBuilder.build_filter(Cashtrail.Users.User, %{first_name: "my name"}, [:first_name])
#Ecto.Query<from u0 in Cashtrail.Users.User, where: u0.first_name == ^"my name">
iex> Cashtrail.QueryBuilder.build_filter(Cashtrail.Users.User, %{first_name: ["my", "name"]}, [:first_name])
#Ecto.Query<from u0 in Cashtrail.Users.User, where: u0.first_name in ^["my", "name"]>
iex> Cashtrail.QueryBuilder.build_filter(Cashtrail.Users.User, %{"first_name" => "my name"}, [:first_name])
#Ecto.Query<from u0 in Cashtrail.Users.User, where: u0.first_name == ^"my name">
iex> Cashtrail.QueryBuilder.build_filter(Cashtrail.Users.User, %{"first_name" => ["my", "name"]}, [:first_name])
#Ecto.Query<from u0 in Cashtrail.Users.User, where: u0.first_name in ^["my", "name"]>
"""
@spec build_filter(Ecto.Queryable.t(), nil | map, list(atom)) ::
Ecto.Query.t() | Ecto.Queryable.t()
def build_filter(query, nil, _), do: query
def build_filter(query, params, allowed_filters) do
params
|> Enum.map(&convert_key_to_atom/1)
|> Enum.filter(&filter_allowed(&1, allowed_filters))
|> Enum.reduce(query, fn
{key, value}, query when is_list(value) ->
from(q in query, where: field(q, ^key) in ^value)
{key, value}, query ->
from(q in query, where: field(q, ^key) == ^value)
end)
end
defp convert_key_to_atom({key, value}) when is_binary(key),
do: {String.to_existing_atom(key), value}
defp convert_key_to_atom(term), do: term
defp filter_allowed({key, _}, allowed_filters), do: key in allowed_filters
@doc """
Returns a `Ecto.Query` with the queries based on the given term and fields,
or the given `Ecto.Queryable` without changes.
The search is implement using `ILIKE` in the fields of the given queryable
schema. And the term must be a `string`.
## Expected arguments
* query - The `Ecto.Queryable` that the query will be performed.
* term - A `string` with the text that will be searched.
* fields - A `list` of `atom` with the fields that will be used to perform
the query. The given fields must be string or text, otherwise you will get
an error from Ecto.
## Examples
iex> Cashtrail.QueryBuilder.build_search(Cashtrail.Users.User, nil, [])
Cashtrail.Users.User
iex> Cashtrail.QueryBuilder.build_search(Cashtrail.Users.User, "my name", [:first_name, :last_name])
#Ecto.Query<from u0 in Cashtrail.Users.User, or_where: ilike(u0.first_name, ^"%my name%"), or_where: ilike(u0.last_name, ^"%my name%")>
"""
@spec build_search(Ecto.Queryable.t(), nil | String.t(), list(atom) | keyword()) ::
Ecto.Query.t() | Ecto.Queryable.t()
def build_search(query, nil, _), do: query
def build_search(query, term, fields) do
do_build_search(query, "%#{term}%", fields)
end
defp do_build_search(query, _, []), do: query
defp do_build_search(query, term, [{relation, fields} | tail]) do
query =
join(query, :inner, [q], r in assoc(q, ^relation))
|> build_relation_search(term, fields)
do_build_search(query, term, tail)
end
defp do_build_search(query, term, [field | tail]) do
query = from(q in query, or_where: ilike(field(q, ^field), ^term))
do_build_search(query, term, tail)
end
defp build_relation_search(query, _term, []), do: query
defp build_relation_search(query, term, [field | tail]) do
query = or_where(query, [_q, r], ilike(field(r, ^field), ^term))
build_relation_search(query, term, tail)
end
end
|
apps/cashtrail/lib/cashtrail/query_builder.ex
| 0.866627
| 0.600481
|
query_builder.ex
|
starcoder
|
defmodule Bamboo.Mailer do
@moduledoc """
Functions for delivering emails using adapters and delivery strategies.
Adds `deliver_now/1` and `deliver_later/1` functions to the mailer module it
is used by.
Bamboo [ships with several adapters][available-adapters]. It is also possible
to create your own adapter.
See the ["Getting Started" section of the README][getting-started] for an
example of how to set up and configure a mailer for use.
[available-adapters]: https://github.com/thoughtbot/bamboo/tree/master/lib/bamboo/adapters
[getting-started]: https://hexdocs.pm/bamboo/readme.html#getting-started
## Example
Creating a Mailer is as simple as defining a module in your application and
using the `Bamboo.Mailer`.
# some/path/within/your/app/mailer.ex
defmodule MyApp.Mailer do
use Bamboo.Mailer, otp_app: :my_app
end
The mailer requires some configuration within your application.
# config/config.exs
config :my_app, MyApp.Mailer,
adapter: Bamboo.MandrillAdapter, # Specify your preferred adapter
api_key: "my_api_key" # Specify adapter-specific configuration
Also you will want to define an email module for building email structs that
your mailer can send. See [`Bamboo.Email`] for more information.
# some/path/within/your/app/email.ex
defmodule MyApp.Email do
import Bamboo.Email
def welcome_email do
new_email(
to: "<EMAIL>",
from: "<EMAIL>",
subject: "Welcome to the app.",
html_body: "<strong>Thanks for joining!</strong>",
text_body: "Thanks for joining!"
)
end
end
You are now able to send emails with your mailer module where you sit fit
within your application.
"""
@cannot_call_directly_error """
cannot call Bamboo.Mailer directly. Instead implement your own Mailer module
with: use Bamboo.Mailer, otp_app: :my_app
"""
require Logger
alias Bamboo.Formatter
defmacro __using__(opts) do
quote bind_quoted: [opts: opts] do
@spec deliver_now(Bamboo.Email.t(), Enum.t()) :: Bamboo.Email.t() | {any, Bamboo.Email.t()}
def deliver_now(email, opts \\ []) do
config = build_config()
Bamboo.Mailer.deliver_now(config.adapter, email, config, opts)
end
@spec deliver_later(Bamboo.Email.t()) :: Bamboo.Email.t()
def deliver_later(email) do
config = build_config()
Bamboo.Mailer.deliver_later(config.adapter, email, config)
end
otp_app = Keyword.fetch!(opts, :otp_app)
defp build_config, do: Bamboo.Mailer.build_config(__MODULE__, unquote(otp_app))
@spec deliver(any()) :: no_return()
def deliver(_email) do
raise """
you called deliver/1, but it has been renamed to deliver_now/1 to add clarity.
Use deliver_now/1 to send right away, or deliver_later/1 to send in the background.
"""
end
end
end
@doc """
Deliver an email right away.
Call your mailer with `deliver_now/1` to send an email right away. Call
`deliver_later/1` if you want to send in the background.
Pass in an argument of `response: true` if you need access to the response
from delivering the email. This returns a tuple of the `Email` struct and the
response from calling `deliver` with your adapter. This is useful if you need
access to any data sent back from your email provider in the response.
Email.welcome_email |> Mailer.deliver_now(response: true)
"""
def deliver_now(_email, _opts \\ []) do
raise @cannot_call_directly_error
end
@doc """
Deliver an email in the background.
Call your mailer with `deliver_later/1` to send an email using the configured
`deliver_later_strategy`. If no `deliver_later_strategy` is set,
`Bamboo.TaskSupervisorStrategy` will be used. See
`Bamboo.DeliverLaterStrategy` to learn how to change how emails are delivered
with `deliver_later/1`.
"""
def deliver_later(_email) do
raise @cannot_call_directly_error
end
@doc false
def deliver_now(adapter, email, config, response: true) do
email = email |> validate_and_normalize(adapter)
if email.to == [] && email.cc == [] && email.bcc == [] do
debug_unsent(email)
email
else
debug_sent(email, adapter)
response = adapter.deliver(email, config)
{email, response}
end
end
@doc false
def deliver_now(adapter, email, config, _opts) do
email = email |> validate_and_normalize(adapter)
if email.to == [] && email.cc == [] && email.bcc == [] do
debug_unsent(email)
else
debug_sent(email, adapter)
adapter.deliver(email, config)
end
email
end
@doc false
def deliver_later(adapter, email, config) do
email = email |> validate_and_normalize(adapter)
if email.to == [] && email.cc == [] && email.bcc == [] do
debug_unsent(email)
else
debug_sent(email, adapter)
config.deliver_later_strategy.deliver_later(adapter, email, config)
end
email
end
defp debug_sent(email, adapter) do
Logger.debug(fn ->
"""
Sending email with #{inspect(adapter)}:
#{inspect(email, limit: 150)}
"""
end)
end
defp debug_unsent(email) do
Logger.debug(fn ->
"""
Email was not sent because recipients are empty.
Full email - #{inspect(email, limit: 150)}
"""
end)
end
defp validate_and_normalize(email, adapter) do
email |> validate(adapter) |> normalize_addresses
end
defp validate(email, adapter) do
email
|> validate_from_address
|> validate_recipients
|> validate_attachment_support(adapter)
end
defp validate_attachment_support(%{attachments: []} = email, _adapter), do: email
defp validate_attachment_support(email, adapter) do
if function_exported?(adapter, :supports_attachments?, 0) && adapter.supports_attachments? do
email
else
raise "the #{adapter} does not support attachments yet."
end
end
defp validate_from_address(%{from: nil}) do
raise Bamboo.EmptyFromAddressError, nil
end
defp validate_from_address(%{from: {_, nil}}) do
raise Bamboo.EmptyFromAddressError, nil
end
defp validate_from_address(email), do: email
defp validate_recipients(%Bamboo.Email{} = email) do
if Enum.all?(
Enum.map([:to, :cc, :bcc], &Map.get(email, &1)),
&is_nil_recipient?/1
) do
raise Bamboo.NilRecipientsError, email
else
email
end
end
defp is_nil_recipient?(nil), do: true
defp is_nil_recipient?({_, nil}), do: true
defp is_nil_recipient?([]), do: false
defp is_nil_recipient?([_ | _] = recipients) do
Enum.all?(recipients, &is_nil_recipient?/1)
end
defp is_nil_recipient?(_), do: false
@doc """
Wraps to, cc and bcc addresses in a list and normalizes email addresses.
Also formats the from address. Email normalization/formatting is done by
implementations of the [Bamboo.Formatter] protocol.
"""
def normalize_addresses(email) do
%{
email
| from: format(email.from, :from),
to: format(List.wrap(email.to), :to),
cc: format(List.wrap(email.cc), :cc),
bcc: format(List.wrap(email.bcc), :bcc)
}
end
defp format(record, type) do
Formatter.format_email_address(record, %{type: type})
end
@doc false
def parse_opts(mailer, opts) do
Logger.warn(
"#{__MODULE__}.parse_opts/2 has been deprecated. Use #{__MODULE__}.build_config/2"
)
otp_app = Keyword.fetch!(opts, :otp_app)
build_config(mailer, otp_app)
end
def build_config(mailer, otp_app) do
otp_app
|> Application.fetch_env!(mailer)
|> Map.new()
|> handle_adapter_config
end
defp handle_adapter_config(base_config = %{adapter: adapter}) do
adapter.handle_config(base_config)
|> Map.put_new(:deliver_later_strategy, Bamboo.TaskSupervisorStrategy)
end
end
|
lib/bamboo/mailer.ex
| 0.727879
| 0.491761
|
mailer.ex
|
starcoder
|
defmodule AWS.Shield do
@moduledoc """
AWS Shield Advanced
This is the *AWS Shield Advanced API Reference*.
This guide is for developers who need detailed information about the AWS Shield
Advanced API actions, data types, and errors. For detailed information about AWS
WAF and AWS Shield Advanced features and an overview of how to use the AWS WAF
and AWS Shield Advanced APIs, see the [AWS WAF and AWS Shield Developer Guide](https://docs.aws.amazon.com/waf/latest/developerguide/).
"""
alias AWS.Client
alias AWS.Request
def metadata do
%AWS.ServiceMetadata{
abbreviation: "AWS Shield",
api_version: "2016-06-02",
content_type: "application/x-amz-json-1.1",
credential_scope: "us-east-1",
endpoint_prefix: "shield",
global?: true,
protocol: "json",
service_id: "Shield",
signature_version: "v4",
signing_name: "shield",
target_prefix: "AWSShield_20160616"
}
end
@doc """
Authorizes the DDoS Response Team (DRT) to access the specified Amazon S3 bucket
containing your AWS WAF logs.
You can associate up to 10 Amazon S3 buckets with your subscription.
To use the services of the DRT and make an `AssociateDRTLogBucket` request, you
must be subscribed to the [Business Support plan](https://aws.amazon.com/premiumsupport/business-support/) or the
[Enterprise Support plan](https://aws.amazon.com/premiumsupport/enterprise-support/).
"""
def associate_drt_log_bucket(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "AssociateDRTLogBucket", input, options)
end
@doc """
Authorizes the DDoS Response Team (DRT), using the specified role, to access
your AWS account to assist with DDoS attack mitigation during potential attacks.
This enables the DRT to inspect your AWS WAF configuration and create or update
AWS WAF rules and web ACLs.
You can associate only one `RoleArn` with your subscription. If you submit an
`AssociateDRTRole` request for an account that already has an associated role,
the new `RoleArn` will replace the existing `RoleArn`.
Prior to making the `AssociateDRTRole` request, you must attach the
[AWSShieldDRTAccessPolicy](https://console.aws.amazon.com/iam/home?#/policies/arn:aws:iam::aws:policy/service-role/AWSShieldDRTAccessPolicy) managed policy to the role you will specify in the request. For more information
see [Attaching and Detaching IAM Policies](
https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_manage-attach-detach.html).
The role must also trust the service principal ` drt.shield.amazonaws.com`. For
more information, see [IAM JSON Policy Elements: Principal](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_principal.html).
The DRT will have access only to your AWS WAF and Shield resources. By
submitting this request, you authorize the DRT to inspect your AWS WAF and
Shield configuration and create and update AWS WAF rules and web ACLs on your
behalf. The DRT takes these actions only if explicitly authorized by you.
You must have the `iam:PassRole` permission to make an `AssociateDRTRole`
request. For more information, see [Granting a User Permissions to Pass a Role to an AWS
Service](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_passrole.html).
To use the services of the DRT and make an `AssociateDRTRole` request, you must
be subscribed to the [Business Support plan](https://aws.amazon.com/premiumsupport/business-support/) or the
[Enterprise Support plan](https://aws.amazon.com/premiumsupport/enterprise-support/).
"""
def associate_drt_role(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "AssociateDRTRole", input, options)
end
@doc """
Adds health-based detection to the Shield Advanced protection for a resource.
Shield Advanced health-based detection uses the health of your AWS resource to
improve responsiveness and accuracy in attack detection and mitigation.
You define the health check in Route 53 and then associate it with your Shield
Advanced protection. For more information, see [Shield Advanced Health-Based Detection](https://docs.aws.amazon.com/waf/latest/developerguide/ddos-overview.html#ddos-advanced-health-check-option)
in the [AWS WAF and AWS Shield Developer Guide](https://docs.aws.amazon.com/waf/latest/developerguide/).
"""
def associate_health_check(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "AssociateHealthCheck", input, options)
end
@doc """
Initializes proactive engagement and sets the list of contacts for the DDoS
Response Team (DRT) to use.
You must provide at least one phone number in the emergency contact list.
After you have initialized proactive engagement using this call, to disable or
enable proactive engagement, use the calls `DisableProactiveEngagement` and
`EnableProactiveEngagement`.
This call defines the list of email addresses and phone numbers that the DDoS
Response Team (DRT) can use to contact you for escalations to the DRT and to
initiate proactive customer support.
The contacts that you provide in the request replace any contacts that were
already defined. If you already have contacts defined and want to use them,
retrieve the list using `DescribeEmergencyContactSettings` and then provide it
to this call.
"""
def associate_proactive_engagement_details(%Client{} = client, input, options \\ []) do
Request.request_post(
client,
metadata(),
"AssociateProactiveEngagementDetails",
input,
options
)
end
@doc """
Enables AWS Shield Advanced for a specific AWS resource.
The resource can be an Amazon CloudFront distribution, Elastic Load Balancing
load balancer, AWS Global Accelerator accelerator, Elastic IP Address, or an
Amazon Route 53 hosted zone.
You can add protection to only a single resource with each CreateProtection
request. If you want to add protection to multiple resources at once, use the
[AWS WAF console](https://console.aws.amazon.com/waf/). For more information see [Getting Started with AWS Shield
Advanced](https://docs.aws.amazon.com/waf/latest/developerguide/getting-started-ddos.html)
and [Add AWS Shield Advanced Protection to more AWS Resources](https://docs.aws.amazon.com/waf/latest/developerguide/configure-new-protection.html).
"""
def create_protection(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateProtection", input, options)
end
@doc """
Creates a grouping of protected resources so they can be handled as a
collective.
This resource grouping improves the accuracy of detection and reduces false
positives.
"""
def create_protection_group(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateProtectionGroup", input, options)
end
@doc """
Activates AWS Shield Advanced for an account.
When you initally create a subscription, your subscription is set to be
automatically renewed at the end of the existing subscription period. You can
change this by submitting an `UpdateSubscription` request.
"""
def create_subscription(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateSubscription", input, options)
end
@doc """
Deletes an AWS Shield Advanced `Protection`.
"""
def delete_protection(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteProtection", input, options)
end
@doc """
Removes the specified protection group.
"""
def delete_protection_group(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteProtectionGroup", input, options)
end
@doc """
Removes AWS Shield Advanced from an account.
AWS Shield Advanced requires a 1-year subscription commitment. You cannot delete
a subscription prior to the completion of that commitment.
"""
def delete_subscription(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteSubscription", input, options)
end
@doc """
Describes the details of a DDoS attack.
"""
def describe_attack(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeAttack", input, options)
end
@doc """
Provides information about the number and type of attacks AWS Shield has
detected in the last year for all resources that belong to your account,
regardless of whether you've defined Shield protections for them.
This operation is available to Shield customers as well as to Shield Advanced
customers.
The operation returns data for the time range of midnight UTC, one year ago, to
midnight UTC, today. For example, if the current time is `2020-10-26 15:39:32
PDT`, equal to `2020-10-26 22:39:32 UTC`, then the time range for the attack
data returned is from `2019-10-26 00:00:00 UTC` to `2020-10-26 00:00:00 UTC`.
The time range indicates the period covered by the attack statistics data items.
"""
def describe_attack_statistics(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeAttackStatistics", input, options)
end
@doc """
Returns the current role and list of Amazon S3 log buckets used by the DDoS
Response Team (DRT) to access your AWS account while assisting with attack
mitigation.
"""
def describe_drt_access(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeDRTAccess", input, options)
end
@doc """
A list of email addresses and phone numbers that the DDoS Response Team (DRT)
can use to contact you if you have proactive engagement enabled, for escalations
to the DRT and to initiate proactive customer support.
"""
def describe_emergency_contact_settings(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeEmergencyContactSettings", input, options)
end
@doc """
Lists the details of a `Protection` object.
"""
def describe_protection(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeProtection", input, options)
end
@doc """
Returns the specification for the specified protection group.
"""
def describe_protection_group(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeProtectionGroup", input, options)
end
@doc """
Provides details about the AWS Shield Advanced subscription for an account.
"""
def describe_subscription(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeSubscription", input, options)
end
@doc """
Removes authorization from the DDoS Response Team (DRT) to notify contacts about
escalations to the DRT and to initiate proactive customer support.
"""
def disable_proactive_engagement(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DisableProactiveEngagement", input, options)
end
@doc """
Removes the DDoS Response Team's (DRT) access to the specified Amazon S3 bucket
containing your AWS WAF logs.
To make a `DisassociateDRTLogBucket` request, you must be subscribed to the
[Business Support plan](https://aws.amazon.com/premiumsupport/business-support/) or the [Enterprise Support
plan](https://aws.amazon.com/premiumsupport/enterprise-support/). However, if
you are not subscribed to one of these support plans, but had been previously
and had granted the DRT access to your account, you can submit a
`DisassociateDRTLogBucket` request to remove this access.
"""
def disassociate_drt_log_bucket(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DisassociateDRTLogBucket", input, options)
end
@doc """
Removes the DDoS Response Team's (DRT) access to your AWS account.
To make a `DisassociateDRTRole` request, you must be subscribed to the [Business Support plan](https://aws.amazon.com/premiumsupport/business-support/) or the
[Enterprise Support plan](https://aws.amazon.com/premiumsupport/enterprise-support/). However, if
you are not subscribed to one of these support plans, but had been previously
and had granted the DRT access to your account, you can submit a
`DisassociateDRTRole` request to remove this access.
"""
def disassociate_drt_role(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DisassociateDRTRole", input, options)
end
@doc """
Removes health-based detection from the Shield Advanced protection for a
resource.
Shield Advanced health-based detection uses the health of your AWS resource to
improve responsiveness and accuracy in attack detection and mitigation.
You define the health check in Route 53 and then associate or disassociate it
with your Shield Advanced protection. For more information, see [Shield Advanced Health-Based
Detection](https://docs.aws.amazon.com/waf/latest/developerguide/ddos-overview.html#ddos-advanced-health-check-option)
in the [AWS WAF and AWS Shield Developer Guide](https://docs.aws.amazon.com/waf/latest/developerguide/).
"""
def disassociate_health_check(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DisassociateHealthCheck", input, options)
end
@doc """
Authorizes the DDoS Response Team (DRT) to use email and phone to notify
contacts about escalations to the DRT and to initiate proactive customer
support.
"""
def enable_proactive_engagement(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "EnableProactiveEngagement", input, options)
end
@doc """
Returns the `SubscriptionState`, either `Active` or `Inactive`.
"""
def get_subscription_state(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetSubscriptionState", input, options)
end
@doc """
Returns all ongoing DDoS attacks or all DDoS attacks during a specified time
period.
"""
def list_attacks(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListAttacks", input, options)
end
@doc """
Retrieves the `ProtectionGroup` objects for the account.
"""
def list_protection_groups(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListProtectionGroups", input, options)
end
@doc """
Lists all `Protection` objects for the account.
"""
def list_protections(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListProtections", input, options)
end
@doc """
Retrieves the resources that are included in the protection group.
"""
def list_resources_in_protection_group(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListResourcesInProtectionGroup", input, options)
end
@doc """
Gets information about AWS tags for a specified Amazon Resource Name (ARN) in
AWS Shield.
"""
def list_tags_for_resource(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListTagsForResource", input, options)
end
@doc """
Adds or updates tags for a resource in AWS Shield.
"""
def tag_resource(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "TagResource", input, options)
end
@doc """
Removes tags from a resource in AWS Shield.
"""
def untag_resource(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UntagResource", input, options)
end
@doc """
Updates the details of the list of email addresses and phone numbers that the
DDoS Response Team (DRT) can use to contact you if you have proactive engagement
enabled, for escalations to the DRT and to initiate proactive customer support.
"""
def update_emergency_contact_settings(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateEmergencyContactSettings", input, options)
end
@doc """
Updates an existing protection group.
A protection group is a grouping of protected resources so they can be handled
as a collective. This resource grouping improves the accuracy of detection and
reduces false positives.
"""
def update_protection_group(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateProtectionGroup", input, options)
end
@doc """
Updates the details of an existing subscription.
Only enter values for parameters you want to change. Empty parameters are not
updated.
"""
def update_subscription(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateSubscription", input, options)
end
end
|
lib/aws/generated/shield.ex
| 0.872768
| 0.610831
|
shield.ex
|
starcoder
|
defmodule Tournamex do
@moduledoc """
# Tournamex
Simple package for managing online tournament system.
Tournamex only has a function for generating matchlist from list.
We will add functions gradually.
"""
@doc """
Generates a matchlist.
"""
@spec generate_matchlist([integer()]) :: {:ok, [any()]} | {:error, String.t()}
def generate_matchlist(list) when is_list(list) do
case generate(list) do
list when is_list(list) -> {:ok, list}
tuple when is_tuple(tuple) -> tuple
scala -> {:ok, [scala]}
end
end
def generate_matchlist(_), do: {:error, "Argument is not list"}
@spec generate([any()]) :: integer() | [any()] | {:error, String.t()}
defp generate(list) when length(list) >= 0 do
shuffled = Enum.shuffle(list)
case (length(shuffled)) do
1 ->
hd(shuffled)
2 ->
shuffled
_ ->
b =
Enum.slice(shuffled, 0..trunc(length(shuffled)/2 -1))
|> generate()
c =
Enum.slice(shuffled, trunc(length(shuffled)/2)..length(shuffled)-1)
|> generate()
[b,c]
end
end
defp generate(_), do: {:error, "No entrants"}
@doc """
Initialize match list with fight result.
"""
@spec initialize_match_list_with_fight_result([any()], [any()]) :: [any()]
def initialize_match_list_with_fight_result(match_list, result \\ []) do
Enum.reduce(match_list, result, fn match, acc ->
case match do
x when is_integer(match) ->
acc ++ [%{"user_id" => x, "is_loser" => false}]
x when is_list(match) ->
acc ++ [initialize_match_list_with_fight_result(x)]
x -> x
end
end)
end
@doc """
Initialize match list of teams with fight result.
"""
@spec initialize_match_list_of_team_with_fight_result([any()], [any()]) :: [any()]
def initialize_match_list_of_team_with_fight_result(match_list, result \\ []) do
Enum.reduce(match_list, result, fn match, acc ->
case match do
x when is_integer(match) ->
acc ++ [%{"team_id" => x, "is_loser" => false}]
x when is_list(match) ->
acc ++ [initialize_match_list_of_team_with_fight_result(x)]
x -> x
end
end)
end
@doc """
Renew match list with loser.
"""
@spec renew_match_list_with_loser([any()], integer()) :: [any()]
def renew_match_list_with_loser(match_list, loser) when is_integer(loser) do
renew_defeat(match_list, loser)
end
def renew_match_list_with_loser(match_list, _), do: match_list
@spec renew_defeat([any()], integer(), [any()]) :: [any()]
defp renew_defeat(match_list, loser, result \\ []) do
Enum.reduce(match_list, result, fn match, acc ->
case match do
x when is_map(match) ->
cond do
x["user_id"] == loser ->
acc ++ [Map.put(x, "is_loser", true)]
x["team_id"] == loser ->
acc ++ [Map.put(x, "is_loser", true)]
true ->
acc ++ [x]
end
x when is_list(match) ->
acc ++ [renew_defeat(x, loser)]
x -> x
end
end)
end
@doc """
Win count increment.
"""
@spec win_count_increment([any()], integer()) :: [any()]
def win_count_increment(match_list, user_id) when is_integer(user_id) do
renew_win_count(match_list, user_id)
end
def win_count_increment(match_list, _), do: match_list
defp renew_win_count(match_list, user_id, result \\ []) do
Enum.reduce(match_list, result, fn match, acc ->
case match do
x when is_map(match) ->
cond do
x["user_id"] == user_id ->
count = x["win_count"]
acc ++ [Map.put(x, "win_count", count+1)]
x["team_id"] == user_id ->
count = x["win_count"]
acc ++ [Map.put(x, "win_count", count+1)]
true ->
acc ++ [x]
end
x when is_list(match) ->
acc ++ [renew_win_count(x, user_id)]
x -> x
end
end)
end
@doc """
Check if the user has already lost.
"""
@spec check_lose?([any()], integer()) :: boolean() | nil
def check_lose?(match_list, user_id) when is_integer(user_id) do
check?(match_list, user_id)
end
def check_lose?(_, _), do: nil
defp check?(match_list, user_id, result \\ false) do
Enum.reduce(match_list, result, fn match, acc ->
case match do
x when is_map(match) ->
if x["user_id"] == user_id do
x["is_loser"]
else
acc
end
x when is_list(match) -> check?(x, user_id, acc)
_ -> acc
end
end)
end
@doc """
Put value on bracket list.
The second argument 'key' should be the user id.
"""
@spec put_value_on_brackets([any()], integer() | String.t() | atom(), any(), [any()]) :: [any()]
def put_value_on_brackets(match_list, key, value, result \\ []) when is_map(value) do
Enum.reduce(match_list, result, fn match, acc ->
case match do
x when is_list(x) ->
acc ++ [put_value_on_brackets(x, key, value)]
x when is_map(x) ->
cond do
x["user_id"] == key ->
acc ++ [Map.merge(x, value)]
x["team_id"] == key ->
acc ++ [Map.merge(x, value)]
true ->
acc ++ [x]
end
x ->
acc ++ [x]
end
end)
end
@doc """
Delete losers from match list.
"""
@spec delete_loser([any()], [integer()] | integer()) :: [any()]
def delete_loser(list, loser) when is_integer(loser) do
delete_loser(list, [loser])
end
def delete_loser([a, b], loser) when is_integer(a) and is_integer(b) do
list = [a, b] -- loser
if length(list) == 1, do: hd(list), else: list
end
def delete_loser(list, loser) do
case list do
[[a, b], [c, d]] -> [delete_loser([a, b], loser), delete_loser([c, d], loser)]
[a, [b, c]] when is_integer(a) and [a] == loser -> [b, c]
[a, [b, c]] -> [a, delete_loser([b, c], loser)]
[[a, b], c] when is_integer(c) and [c] == loser -> [a, b]
[[a, b], c] -> [delete_loser([a, b], loser), c]
[a, b] -> delete_loser([a, b], loser)
[a] when is_integer(a) -> []
a when is_integer(a) -> []
_ -> raise "Bad Argument"
end
end
@doc """
Returns data which is presenting tournament brackets.
"""
@spec brackets_with_fight_result([any()]) :: {:ok, [any()]} | {:error, String.t()}
def brackets_with_fight_result(match_list) do
{:ok, align_with_fight_result(match_list)}
end
defp align_with_fight_result(match_list, result \\ []) do
Enum.reduce(match_list, result, fn x, acc ->
case x do
x when is_list(x) ->
align_with_fight_result(x, acc)
x when is_map(x) ->
if hd(match_list) == x do
[fr(match_list) | acc]
else
acc
end
_ -> acc
end
end)
end
defp fr(list) do
Enum.reduce(list, [], fn element, acc ->
case element do
x when is_map(x) -> [x | acc]
_ -> [nil | acc]
end
end)
end
@doc """
Returns data which is presenting tournament brackets.
"""
@spec brackets([any()]) :: {:ok, [any()]} | {:error, String.t()}
def brackets(match_list) do
{:ok, align(match_list)}
end
defp align(match_list, result \\ []) do
Enum.reduce(match_list, result, fn x, acc ->
case x do
x when is_list(x) ->
align(x, acc)
x when is_integer(x) and hd(match_list) == x ->
[ml(match_list) | acc]
x when is_integer(x) ->
acc
_ ->
raise "invalid list"
end
end)
end
# Length of this list should be 2.
defp ml(list) do
Enum.reduce(list, [], fn element, acc ->
case element do
x when is_integer(x) -> [x | acc]
_ -> [nil | acc]
end
end)
end
end
|
tournamex/lib/tournamex.ex
| 0.600657
| 0.426919
|
tournamex.ex
|
starcoder
|
defmodule ExCubicIngestion.Schema.CubicLoad do
@moduledoc """
Contains information on the objects passing through the 'incoming' S3 bucket, as well
their status while transitioning through the various steps in the data pipeline process.
"""
use Ecto.Schema
import Ecto.Query
alias Ecto.Changeset
alias ExCubicIngestion.Repo
alias ExCubicIngestion.Schema.CubicTable
@derive {Jason.Encoder,
only: [
:id,
:table_id,
:status,
:s3_key,
:s3_modified,
:s3_size,
:deleted_at,
:inserted_at,
:updated_at
]}
@type t :: %__MODULE__{
id: integer() | nil,
table_id: integer(),
status: String.t() | nil,
s3_key: String.t() | nil,
s3_modified: DateTime.t() | nil,
s3_size: integer() | nil,
deleted_at: DateTime.t() | nil,
inserted_at: DateTime.t() | nil,
updated_at: DateTime.t() | nil
}
schema "cubic_loads" do
field(:table_id, :integer)
# @todo specify the different statuses
field(:status, :string)
field(:s3_key, :string)
field(:s3_modified, :utc_datetime)
field(:s3_size, :integer)
field(:deleted_at, :utc_datetime)
timestamps(type: :utc_datetime)
end
@spec not_deleted :: Ecto.Queryable.t()
defp not_deleted do
from(load in __MODULE__, where: is_nil(load.deleted_at))
end
@spec get!(integer()) :: t()
def get!(id) do
Repo.get!(not_deleted(), id)
end
@spec insert_new_from_objects_with_table([map()], CubicTable.t()) ::
{:ok, [t()]} | {:error, term()}
def insert_new_from_objects_with_table(objects, table) do
Repo.transaction(fn ->
# query loads to see what we can ignore when inserting
# usually happens when objects have not been moved out of 'incoming' bucket
recs = get_by_objects(objects)
# create a list of objects that have not been added to database
new_objects = Enum.filter(objects, ¬_added(&1, recs))
# insert new objects
Enum.map(new_objects, &insert_from_object_with_table(&1, table))
end)
end
@spec insert_from_object_with_table(map(), CubicTable.t()) :: Ecto.Schema.t()
def insert_from_object_with_table(object, table) do
last_modified = parse_and_drop_msec(object[:last_modified])
size = String.to_integer(object[:size])
Repo.insert!(%__MODULE__{
table_id: table.id,
status: "ready",
s3_key: object[:key],
s3_modified: last_modified,
s3_size: size
})
end
@spec get_by_objects(list()) :: [t()]
def get_by_objects(objects) do
# put together filters based on the object info
filters =
Enum.map(objects, fn object ->
last_modified = parse_and_drop_msec(object[:last_modified])
{object[:key], last_modified}
end)
# we only want to query if we have filters because otherwise the query will the return
# the whole table
if Enum.empty?(filters) do
[]
else
query_with_filters =
Enum.reduce(filters, __MODULE__, fn {s3_key, s3_modified}, query ->
# query
from(load in query,
or_where:
is_nil(load.deleted_at) and load.s3_key == ^s3_key and
load.s3_modified == ^s3_modified
)
end)
Repo.all(query_with_filters)
end
end
@spec not_added(map(), list()) :: boolean()
def not_added(load_object, load_recs) do
key = load_object[:key]
last_modified = parse_and_drop_msec(load_object[:last_modified])
not Enum.any?(
load_recs,
fn r -> r.s3_key == key and r.s3_modified == last_modified end
)
end
@spec get_status_ready :: [t()]
def get_status_ready do
query =
from(load in not_deleted(),
where: load.status == "ready",
order_by: [load.s3_modified, load.s3_key]
)
Repo.all(query)
end
@spec get_status_ready_for :: [t()]
def get_status_ready_for do
query =
from(load in not_deleted(),
where: load.status in ["ready_for_archiving", "ready_for_erroring"]
)
Repo.all(query)
end
@spec get_many_with_table([integer()]) :: [{t(), CubicTable.t()}]
def get_many_with_table(load_rec_ids) do
Repo.all(
from(load in not_deleted(),
join: table in CubicTable,
on: table.id == load.table_id,
where: load.id in ^load_rec_ids,
select: {load, table}
)
)
end
@spec change(t(), %{required(atom()) => term()}) :: Changeset.t()
def change(load_rec, changes) do
Changeset.change(load_rec, changes)
end
# @todo consider making this more specific to use cases
@spec update(t(), map()) :: t()
def update(load_rec, changes) do
{:ok, load_rec} =
Repo.transaction(fn ->
Repo.update!(change(load_rec, changes))
end)
load_rec
end
@spec query_many([integer()]) :: Ecto.Queryable.t()
def query_many(load_rec_ids) do
from(load in not_deleted(), where: load.id in ^load_rec_ids, select: load)
end
# @todo consider making this more specific to use cases
@spec update_many([integer()], Keyword.t()) :: [t()]
def update_many(load_rec_ids, change) do
{:ok, {_count, updated_load_recs}} =
Repo.transaction(fn ->
Repo.update_all(
query_many(load_rec_ids),
set: change
)
end)
updated_load_recs
end
# private
@spec parse_and_drop_msec(String.t()) :: DateTime.t()
defp parse_and_drop_msec(datetime) do
{:ok, datetime_with_msec, _offset} = DateTime.from_iso8601(datetime)
DateTime.truncate(datetime_with_msec, :second)
end
end
|
ex_cubic_ingestion/lib/ex_cubic_ingestion/schema/cubic_load.ex
| 0.734405
| 0.452234
|
cubic_load.ex
|
starcoder
|
defmodule ExDimensions.Math do
@moduledoc """
This module contains the required macros to perform math on unit quantities.
It must be `use`d in the current scope for the math functions to work properly.
Math on standard values will still work as expected if this module is `use`d.
Using these math functions will ensure proper dimensional analysis is performed
on quantities. The following rules are enforced:
* Quantities with different units may not be added or subtracted
* Quantities may not be added or subtracted with plain scalar numbers
* Quantities may be multiplied and divided with scalars
* Quantities with different units that are multiplied and divided will have
their units changed as needed
Any math operations that violate these rules will result in an ArithmeticError
that will bubble up to the caller.
"""
@moduledoc since: "0.1.0"
defmacro __using__(_opts) do
quote do
import Kernel,
except: [
{:+, 2},
{:-, 2},
{:*, 2},
{:/, 2},
{:<, 2},
{:<=, 2},
{:>, 2},
{:>=, 2},
{:==, 2}
]
def %{value: v1, units: u, denom: d} + %{value: v2, units: u, denom: d} do
%ExDimensions.Quantity{value: v1 + v2, units: u, denom: d}
end
def %{value: v1, units: u, denom: d} - %{value: v2, units: u, denom: d} do
%ExDimensions.Quantity{value: v1 - v2, units: u, denom: d}
end
def %{value: v1, units: u, denom: d} * x when is_number(x) do
%ExDimensions.Quantity{value: v1 * x, units: u, denom: d}
end
def %{value: v1, units: u, denom: []} *
%{value: v2, units: u2, denom: []} do
%ExDimensions.Quantity{value: v1 * v2, units: u ++ u2, denom: []}
end
def %{value: v1, units: u, denom: d} *
%{value: v2, units: u2, denom: d2} do
%ExDimensions.Quantity{
value: v1 * v2,
units: u ++ u2,
denom: d ++ d2
}
end
def %{value: v1, units: u, denom: d} / x when is_number(x) do
%ExDimensions.Quantity{value: v1 / x, units: u, denom: d}
end
def %{value: v1, units: [u | u_rest], denom: []} /
%{value: v2, units: [u | u_rest], denom: []} do
v1 / v2
end
def %{value: v1, units: u, denom: []} / %{value: v2, units: u2, denom: []} do
{units, denom} = cancel_units(u, u2)
%ExDimensions.Quantity{value: v1 / v2, units: units, denom: denom}
end
def %{value: v1, units: u, denom: d} > %{value: v2, units: u, denom: d} do
v1 > v2
end
def %{value: v1, units: u, denom: d} >= %{value: v2, units: u, denom: d} do
v1 >= v2
end
def %{value: v1, units: u, denom: d} < %{value: v2, units: u, denom: d} do
v1 < v2
end
def %{value: v1, units: u, denom: d} <= %{value: v2, units: u, denom: d} do
v1 <= v2
end
def %{value: v1, units: u, denom: d} == %{value: v2, units: u, denom: d} do
v1 == v2
end
defp cancel_units(u1, u2) do
u2
|> Enum.reduce({u1, u2}, fn u, {num, denom} ->
if Enum.member?(num, u) do
{List.delete(num, u), List.delete(denom, u)}
else
{num, denom}
end
end)
end
def u ^^^ num do
List.duplicate(u, num)
end
def left + right do
Kernel.+(left, right)
end
def left - right do
Kernel.-(left, right)
end
def left * right do
Kernel.*(left, right)
end
def left / right do
Kernel./(left, right)
end
def left < right do
Kernel.<(left, right)
end
def left <= right do
Kernel.<=(left, right)
end
def left > right do
Kernel.>(left, right)
end
def left >= right do
Kernel.>=(left, right)
end
def left == right do
Kernel.==(left, right)
end
end
end
end
|
lib/ex_dimensions/math.ex
| 0.808861
| 0.836421
|
math.ex
|
starcoder
|
defmodule Blockchain.Transaction.Validity do
@moduledoc """
This module is responsible for transaction validation,
as defined in the Yellow Paper.
"""
alias Block.Header
alias Blockchain.{Account, Chain, Transaction}
alias MerklePatriciaTree.Trie
@doc """
Validates the validity of a transaction that is required to be
true before we're willing to execute a transaction. This is
specified in Section 6.2 of the Yellow Paper Eq.(65) and Eq.(66).
"""
@spec validate(Trie.t(), Transaction.t(), Block.Header.t(), Chain.t()) ::
:valid | {:invalid, atom()}
def validate(state, trx, block_header, chain) do
evm_config = Chain.evm_config(chain, block_header.number)
with :ok <- validate_signature(trx, chain, evm_config),
{:ok, sender_address} <- Transaction.Signature.sender(trx, chain.params.network_id) do
errors =
[]
|> check_intristic_gas(trx, evm_config)
|> check_account_validity(trx, state, sender_address)
|> check_gas_limit(trx, block_header)
if errors == [], do: :valid, else: {:invalid, errors}
end
end
defp check_account_validity(errors, trx, state, sender_address) do
sender_account = Account.get_account(state, sender_address)
if sender_account do
errors
|> check_sender_nonce(trx, sender_account)
|> check_balance(trx, sender_account)
else
errors
end
end
defp validate_signature(trx, chain, evm_config) do
max_s_value = evm_config.max_signature_s
if Transaction.Signature.is_signature_valid?(trx.r, trx.s, trx.v, chain.params.network_id,
max_s: max_s_value
) do
:ok
else
{:invalid, :invalid_sender}
end
end
@spec check_sender_nonce([atom()], Transaction.t(), Account.t()) :: [atom()]
defp check_sender_nonce(errors, transaction, account) do
if account.nonce != transaction.nonce do
[:nonce_mismatch | errors]
else
errors
end
end
@spec check_intristic_gas([atom()], Transaction.t(), EVM.Configuration.t()) :: [atom()]
defp check_intristic_gas(errors, transaction, config) do
intrinsic_gas_cost = Transaction.intrinsic_gas_cost(transaction, config)
if intrinsic_gas_cost > transaction.gas_limit do
[:insufficient_intrinsic_gas | errors]
else
errors
end
end
@spec check_balance([atom()], Transaction.t(), Account.t()) :: [atom()]
defp check_balance(errors, transaction, account) do
value = transaction.gas_limit * transaction.gas_price + transaction.value
if value > account.balance do
[:insufficient_balance | errors]
else
errors
end
end
@spec check_gas_limit([atom()], Transaction.t(), Block.Header.t()) :: [atom()]
defp check_gas_limit(errors, transaction, header) do
if transaction.gas_limit > Header.available_gas(header) do
[:over_gas_limit | errors]
else
errors
end
end
end
|
apps/blockchain/lib/blockchain/transaction/validity.ex
| 0.835484
| 0.447581
|
validity.ex
|
starcoder
|
defmodule Roger.Partition.Worker do
@moduledoc """
Handles the decoding and execution of a single job.
Besides running the job, various administrative tasks need to be
performed as well, namely:
- Check whether the job has not been cancelled in the meantime
- Check whether another job is currently running with the same
execution_key, and if so, delay this current job until the
currently running one finishes
- On job failure, the job needs to be queued in the retry queue, if
the job is marked retryable. By default, jobs are *not* retried.
"""
require Logger
alias Roger.{Job, GProc, Queue, Partition.Retry}
alias Roger.Partition.Global
# after how long the wait queue for execution_key-type jobs expires
@execution_waiting_expiry 1800 * 1000
use GenServer, restart: :transient
def start_link(worker_input) do
GenServer.start_link(__MODULE__, worker_input)
end
def name(job_id) do
{:roger_job_worker, job_id}
end
## Server interface
defmodule State do
@moduledoc false
@type t :: %__MODULE__{
partition_id: String.t(),
meta: map,
raw_payload: binary,
channel: AMQP.Channel.t(),
worker_task_pid: pid,
job: Job.t()
}
defstruct partition_id: nil, meta: nil, raw_payload: nil, channel: nil, worker_task_pid: nil, job: nil
end
def init([partition_id, channel, payload, meta]) do
state = %State{
partition_id: partition_id,
channel: channel,
meta: meta,
raw_payload: payload
}
Process.flag(:trap_exit, true)
{:ok, state, 0}
end
@doc """
This will make sure the worker task is killed when the worker get's stopped
"""
@spec terminate(any(), State.t()) :: any()
def terminate(_reason, state) do
if state.worker_task_pid do
Process.exit(state.worker_task_pid, :kill)
end
end
@doc """
This function starts processing the job as soon as the worker GenServer is started up.
"""
@spec handle_info(:timeout, State.t()) :: {:noreply, State.t()} | {:stop, :normal, State.t()}
def handle_info(:timeout, state) do
case Job.decode(state.raw_payload) do
{:ok, job} ->
job = %Job{job | started_at: Roger.now()}
cond do
Global.cancelled?(state.partition_id, job.id, :remove) ->
job_cancel(job, state)
{:stop, :normal, state}
job_waiting?(job, state) ->
job_waiting(job, state)
{:stop, :normal, state}
true ->
pid = job_startup(job, state)
{:noreply, %{state | worker_task_pid: pid, job: job}}
end
{:error, message} ->
# Decode error
Logger.debug("Job decoding error: #{inspect(message)} #{inspect(state.raw_payload)}")
job_done(nil, :ack, state)
{:stop, :normal, state}
end
end
# When job is finished it sends a message to the GenServer to finish off the worker task.
@spec handle_info(:job_finished, State.t()) :: {:stop, :normal, State.t()}
def handle_info(:job_finished, state) do
{:stop, :normal, state}
end
# When job has errors the async job task sends a message to this worker to correctly unregister and shutdown the worker.
@spec handle_info(:job_errored, State.t()) :: {:stop, :normal, State.t()}
def handle_info(:job_errored, state) do
state.job.id
|> name()
|> GProc.unregp()
GProc.unregp({:roger_job_worker_meta, state.partition_id, state.job.id})
{:stop, :normal, state}
end
# If a timeout is set on the job and the job exceeds the timeout this method is called and correctly shuts down the job.
@spec handle_info(:handle_job_timeout, State.t()) :: {:stop, :normal, State.t()}
def handle_info(:handle_job_timeout, %{worker_task_pid: pid, job: job} = state) when is_pid(pid) do
Process.exit(pid, :kill)
handle_error(job, {:timeout, "Job stopped because of timeout"}, nil, state, nil)
{:stop, :normal, state}
end
# This handle a hard crash
@spec handle_info({:DOWN, reference(), :process, pid(), String.t()}, State.t()) :: {:stop, :normal, State.t()}
def handle_info({:DOWN, _ref, :process, _child, reason}, state) do
handle_error(state.job, {:worker_crash, reason}, nil, state, nil)
{:stop, :normal, state}
end
# This is called when job needs to be cancelled it kills running job and runs the timeout task to correctly finish the job.
@spec handle_call(:cancel_job, any(), State.t()) :: {:reply, :ok, State.t(), 0}
def handle_call(:cancel_job, _source, state) do
Process.exit(state.worker_task_pid, :kill)
{:reply, :ok, state, 0}
end
defp execute_job(job, state, parent) do
before_run_state = callback(:before_run, [state.partition_id, job])
try do
result = Job.execute(job)
job_done(job, :ack, state)
callback(:after_run, [state.partition_id, job, result, before_run_state])
catch
type, exception ->
handle_error(job, {type, exception}, before_run_state, state, __STACKTRACE__)
send(parent, :job_errored)
end
end
defp handle_error(job, {type, exception}, before_run_state, state, stacktrace) do
cb =
with true <- Job.retryable?(job),
{:ok, :buried} <- Retry.retry(state.channel, state.partition_id, job) do
:on_buried
else
_ -> :on_error
end
job_done(job, :ack, state)
callback(cb, [state.partition_id, job, {type, exception}, stacktrace, before_run_state])
end
defp job_startup(job, state) do
GProc.regp(name(job.id))
GProc.regp({:roger_job_worker_meta, state.partition_id, job.id}, job)
parent = self()
{pid, _ref} =
spawn_monitor(fn ->
execute_job(job, state, parent)
send(parent, :job_finished)
end)
if job.max_execution_time != :infinity do
Process.send_after(self(), :handle_job_timeout, job.max_execution_time * 1000)
end
pid
end
defp job_waiting?(job, state) do
job.execution_key != nil && Global.executing?(state.partition_id, job.execution_key, :add)
end
defp job_waiting(job, state) do
# put job in the waiting queue,
:ok = put_execution_waiting(job, state)
# then ack it.
AMQP.Basic.ack(state.channel, state.meta.delivery_tag)
end
defp job_cancel(job, state) do
callback(:on_cancel, [state.partition_id, job])
job_done(job, :ack, state)
end
# Ran at the end of the job, either ack'ing or nack'ing the message.
defp job_done(job, ack_or_nack, state) do
if job != nil do
if job.queue_key != nil do
:ok = Global.remove_queued(state.partition_id, job.queue_key)
end
if job.execution_key != nil do
# mark as "free"
:ok = Global.remove_executed(state.partition_id, job.execution_key)
# check if there are any messages in the waiting queue
check_execution_waiting(job, state)
end
end
meta = state.meta
if meta != nil do
if Process.alive?(state.channel.pid) do
Kernel.apply(AMQP.Basic, ack_or_nack, [state.channel, meta.delivery_tag])
end
end
end
# Run the given worker callback, if a callback module has been defined.
defp callback(callback, args) when is_atom(callback) do
mod = Application.get_env(:roger, :callbacks)
# Make sure module is loaded so function_exported? works correctly
Code.ensure_loaded(mod)
if mod != nil do
try do
# We never want the callback to crash the worker process.
if function_exported?(mod, callback, length(args)) do
Kernel.apply(mod, callback, args)
else
nil
end
catch
:exit = t, e ->
Logger.error("Worker error in callback function #{mod}.#{callback}: #{t}:#{e}")
end
end
end
# Put in the waiting queue
defp put_execution_waiting(job, state) do
Job.enqueue(job, state.partition_id, execution_waiting_queue(job, state, :unprefixed))
end
# Get the next message from the job's execution waiting queue, and
# enqueues it back on the Job's main queue, if there is any
defp check_execution_waiting(job, state) do
name = execution_waiting_queue(job, state)
case AMQP.Basic.get(state.channel, name) do
{:ok, payload, meta} ->
# enqueue the job again
{:ok, job} = Job.decode(payload)
:ok = Job.enqueue(job, state.partition_id)
# ack it to have it removed from waiting queue
:ok = AMQP.Basic.ack(state.channel, meta.delivery_tag)
{:empty, _} ->
# FIXME delete waiting queue when empty - this can error
:ok
end
end
# Return the name of the execution waiting queue. The queue gets
# declared on the AMQP side as well. Returns the queue either
# prefixed with the partition or unprefixed.
defp execution_waiting_queue(job, state, return \\ :prefixed) do
bare_name = "execution-waiting-#{job.execution_key}"
name = Queue.make_name(state.partition_id, bare_name)
{:ok, _} =
AMQP.Queue.declare(state.channel, name, durable: true, arguments: [{"x-expires", @execution_waiting_expiry}])
case return do
:prefixed -> name
:unprefixed -> bare_name
end
end
end
|
lib/roger/partition/worker.ex
| 0.608594
| 0.560373
|
worker.ex
|
starcoder
|
defmodule Day02.Coder do
@moduledoc """
Functions for parsing Incode programs.
"""
@doc """
Sets up the data for, executes and returns the result of the gravity assist program.
"""
def calculate_result(noun, verb) do
"./intcode.txt"
|> Aoc.get_file_list_values()
|> execute_program([{1, noun}, {2, verb}])
|> hd()
end
@doc """
Executes the gravity assist program.
## Examples
iex> Day02.Coder.execute_program([1,0,0,0,99])
[2,0,0,0,99]
iex> Day02.Coder.execute_program([2,3,0,3,99])
[2,3,0,6,99]
iex> Day02.Coder.execute_program([2,4,4,5,99,0])
[2,4,4,5,99,9801]
iex> Day02.Coder.execute_program([1,1,1,4,99,5,6,0,99])
[30,1,1,4,2,5,6,0,99]
iex> Day02.Coder.execute_program([1,9,10,3,2,3,11,0,99,30,40,50])
[3500,9,10,70,2,3,11,0,99,30,40,50]
iex> Day02.Coder.execute_program([1,1,1,3,2,3,11,0,99,30,40,50], [{1, 9}, {2, 10}])
[3500,9,10,70,2,3,11,0,99,30,40,50]
"""
def execute_program(codes, replacement_values \\ []) do
codes_map =
codes
|> build_codes_map()
|> replace_values(replacement_values)
|> run_code(0)
Map.keys(codes_map)
|> Enum.sort()
|> Enum.map(fn key ->
Map.get(codes_map, key)
end)
end
defp build_codes_map(codes) do
codes
|> Enum.with_index()
|> Enum.reduce(%{}, fn {value, index}, acc ->
Map.put(acc, index, value)
end)
end
defp replace_values(codes_map, new_values) do
new_values
|> Enum.reduce(codes_map, fn {position, value}, acc ->
Map.put(acc, position, value)
end)
end
defp run_code(codes_map, starting_index) do
action = Map.get(codes_map, starting_index)
case action do
99 ->
codes_map
_ ->
first_position = Map.get(codes_map, starting_index + 1)
second_position = Map.get(codes_map, starting_index + 2)
output_position = Map.get(codes_map, starting_index + 3)
update_map(codes_map, action, first_position, second_position, output_position)
|> run_code(starting_index + 4)
end
end
defp update_map(codes_map, 1, first_position, second_position, output_position) do
Map.put(
codes_map,
output_position,
Map.get(codes_map, first_position) + Map.get(codes_map, second_position)
)
end
defp update_map(codes_map, 2, first_position, second_position, output_position) do
Map.put(
codes_map,
output_position,
Map.get(codes_map, first_position) * Map.get(codes_map, second_position)
)
end
end
|
day_02/lib/day02/coder.ex
| 0.80038
| 0.439988
|
coder.ex
|
starcoder
|
defmodule ExVatcheck.VIESClient.XMLParser do
@moduledoc """
A module for parsing XML responses from VIES client requests into Elixir maps.
"""
@type response :: %{
country_code: binary,
vat_number: binary,
request_date: binary,
valid: boolean,
name: binary | nil,
address: binary | nil
}
@check_vat_service_url SweetXml.sigil_x(
"//wsdl:definitions/wsdl:service[name=checkVatService]/wsdl:port[name=checkVatPort]/wsdlsoap:address/@location"
)
@check_vat_fault SweetXml.sigil_x("//soap:Envelope/soap:Body/soap:Fault/faultstring/text()")
@check_vat_response SweetXml.sigil_x("//soap:Envelope/soap:Body/checkVatResponse")
@check_vat_response_fields [
country_code: SweetXml.sigil_x("./countryCode/text()"),
vat_number: SweetXml.sigil_x("./vatNumber/text()"),
request_date: SweetXml.sigil_x("./requestDate/text()"),
valid: SweetXml.sigil_x("./valid/text()"),
name: SweetXml.sigil_x("./name/text()"),
address: SweetXml.sigil_x("./address/text()")
]
@doc ~S"""
The `parse_service/1` function parses the URL of the checkVatService from the
VIES WSDL response.
The WSDL has the following structure:
```
<wsdl:definitions ...>
...
<wsdl:service name="checkVatService">
<wsdl:port name="checkVatPort" binding="impl:checkVatBinding">
<wsdlsoap:address location="https://ec.europa.eu/taxation_customs/vies/services/checkVatService"/>
</wsdl:port>
</wsdl:service>
</wsdl:definitions>
```
"""
@spec parse_service(binary) :: {:ok, binary} | {:error, binary}
def parse_service(wsdl_response) do
case SweetXml.xpath(wsdl_response, @check_vat_service_url) do
nil -> {:error, :invalid_wsdl}
url -> {:ok, to_string(url)}
end
end
@doc ~S"""
The `parse_response/1` function parses the XML response returned by requests to
the checkVatService.
When the service is available, the response has the following structure:
```
<soap:Envelope xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/">
<soap:Body>
<checkVatResponse xmlns="urn:ec.europa.eu:taxud:vies:services:checkVat:types">
<countryCode>BE</countryCode>
<vatNumber>0829071668</vatNumber>
<requestDate>2016-01-16+01:00</requestDate>
<valid>true</valid>
<name><NAME></name>
<address>RUE LONGUE 93 1320 BEAUVECHAIN</address>
</checkVatResponse>
</soap:Body>
</soap:Envelope>
```
Sometimes, the VIES service is unavailable (see http://ec.europa.eu/taxation_customs/vies/help.html).
In the case that it is not, the response has the following structure:
```
<soap:Envelope xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/">
<soap:Body>
<soap:Fault>
...
</soap:Fault>
</soap:Body>
</soap:Envelope>
```
"""
@spec parse_response(binary) :: {:ok, map} | {:error, binary}
def parse_response(response_body) do
if fault = SweetXml.xpath(response_body, @check_vat_fault) do
{:error, fault |> to_string() |> format_fault()}
else
body = SweetXml.xpath(response_body, @check_vat_response, @check_vat_response_fields)
{:ok, format_fields(body)}
end
end
@spec format_fields(map) :: response
defp format_fields(body) do
%{
country_code: format_field(body.country_code),
vat_number: format_field(body.vat_number),
request_date: body.request_date |> format_field() |> format_date(),
valid: body.valid == 'true',
name: format_field(body.name),
address: format_field(body.address)
}
end
@spec format_field(charlist | nil) :: binary | nil
defp format_field(nil), do: nil
defp format_field(charlist), do: to_string(charlist)
@spec format_date(binary) :: binary
defp format_date(<<date::binary-size(10), "+", _time::binary-size(5)>>), do: date
defp format_date(date), do: date
@spec format_fault(binary) :: binary
defp format_fault(fault) do
if String.contains?(fault, "MS_UNAVAILABLE") do
"Service unavailable"
else
"Unknown error: #{fault}"
end
end
end
|
lib/ex_vatcheck/vies_client/xml_parser.ex
| 0.736306
| 0.583441
|
xml_parser.ex
|
starcoder
|
defmodule Max do
@moduledoc """
A matrix library in pure Elixir based on `:array`.
[Erlang array documentation](http://erlang.org/doc/man/array.html)
## Examples
iex> matrix = Max.new(5, 5, default: 2) # 5x5 matrix with default value 2
iex> Max.get(matrix, {0, 0})
2
iex> matrix = Max.set(matrix, {0, 0}, 8)
iex> Max.get(matrix, {0, 0})
8
## Enumberable protocol
`Max` implements the Enumerable protocol, so all Enum functions can be used:
iex> matrix = Max.new(10, 10, default: 8)
iex> Enum.max(matrix)
8
iex> Enum.member?(matrix, 7)
false
"""
@compile {:inline, get: 2, set: 3, index_to_position: 2, position_to_index: 2, size: 1}
@enforce_keys [:array, :rows, :columns]
defstruct [:array, :rows, :columns]
@type t :: %Max{
array: tuple,
rows: pos_integer,
columns: pos_integer
}
@type position :: {row :: non_neg_integer, col :: non_neg_integer}
@doc """
Returns a new `%Max{}` struct with the given `rows` and `columns` size.
## Options
* `:default` - (term) the default value of the matrix. Defaults to `0`.
## Examples
Max.new(10, 5) # 10 x 5 matrix
Max.new(10, 5, default: 70) # 70 as a default value
"""
@spec new(pos_integer, pos_integer, list) :: t
def new(rows, columns, options \\ [])
when is_integer(rows) and is_integer(columns) and rows > 0 and columns > 0 do
default = Keyword.get(options, :default, 0)
array = :array.new(rows * columns, fixed: true, default: default)
%Max{
array: array,
rows: rows,
columns: columns
}
end
@doc """
Converts a flat list to a new `%Max{}` struct with the given `rows` & `columns` size.
## Options
* `:default` - (term) the default value of the matrix. Defaults to `0`.
## Examples
iex> matrix = Max.from_list([1,2,3,4,5,6], 2, 3)
iex> matrix |> Max.to_list_of_lists
[[1,2,3], [4, 5, 6]]
"""
@spec from_list(nonempty_list, pos_integer, pos_integer, list) :: t
def from_list(list, rows, columns, options \\ []) do
default = Keyword.get(options, :default, 0)
array =
:array.resize(
rows * columns,
:array.from_list(list, default)
)
|> :array.fix()
%Max{
array: array,
rows: rows,
columns: columns
}
end
@doc """
Converts a list of lists matrix to a new `%Max{}` struct.
## Options
* `:default` - (term) the default value of the matrix. Defaults to `0`.
## Examples
iex> matrix = %Max{rows: 2, columns: 3} = Max.from_list_of_lists([[1,2,3], [4, 5, 6]])
iex> matrix |> Max.to_list_of_lists
[[1,2,3], [4, 5, 6]]
"""
@spec from_list_of_lists(nonempty_list(nonempty_list), list) :: t
def from_list_of_lists([h | _] = list, options \\ []) do
default = Keyword.get(options, :default, 0)
rows = length(list)
columns = length(h)
array =
:array.resize(
rows * columns,
:array.from_list(List.flatten(list), default)
)
|> :array.fix()
%Max{
array: array,
rows: rows,
columns: columns
}
end
@doc """
Returns the default value for matrix.
## Examples
iex> matrix = Max.from_list_of_lists([[1,2], [3,4]])
iex> matrix |> Max.default()
0
iex> matrix = Max.new(5, 5, default: "preciz")
iex> matrix |> Max.default()
"preciz"
"""
@spec default(t) :: any
def default(%Max{array: array}), do: :array.default(array)
@doc """
Returns the size of matrix. (rows * columns)
## Examples
iex> matrix = Max.new(5, 5)
iex> Max.size(matrix)
25
"""
@spec size(t) :: pos_integer
def size(%Max{rows: rows, columns: columns}) do
rows * columns
end
@doc """
Returns the sparse size of the `:array`.
Erlang array docs:
"Gets the number of entries in the array up until the last non-default-valued entry. That is, returns I+1 if I is the last non-default-valued entry in the array, or zero if no such entry exists."
## Examples
iex> matrix = Max.new(5, 5)
iex> Max.sparse_size(matrix)
0
"""
@spec sparse_size(t) :: pos_integer
def sparse_size(%Max{array: array}) do
:array.sparse_size(array)
end
@doc """
Returns a position tuple for the given index.
`:array` indices are 0 based.
## Examples
iex> matrix = Max.new(5, 5)
iex> matrix |> Max.position_to_index({0, 0})
0
iex> matrix |> Max.position_to_index({1, 0})
5
"""
@spec position_to_index(t, position) :: pos_integer
def position_to_index(%Max{rows: rows, columns: columns}, {row, col})
when row >= 0 and row < rows and col >= 0 and col < columns do
row * columns + col
end
@doc """
Returns array index corresponding to the position tuple.
## Examples
iex> matrix = Max.new(10, 10)
iex> matrix |> Max.position_to_index({1, 1})
11
iex> matrix |> Max.position_to_index({0, 4})
4
"""
@spec index_to_position(t, non_neg_integer) :: position
def index_to_position(%Max{columns: columns}, index) do
{div(index, columns), rem(index, columns)}
end
@doc """
Returns value at `position` from the given `matrix`.
## Examples
iex> matrix = Max.identity(5)
iex> matrix |> Max.get({1, 1})
1
"""
@spec get(t, position) :: any
def get(%Max{array: array} = matrix, position) do
index = position_to_index(matrix, position)
:array.get(index, array)
end
@doc """
Sets `value` at `position` in `matrix`.
Returns `%Max{}` struct.
## Examples
iex> matrix = Max.new(10, 10)
iex> matrix = matrix |> Max.set({1, 3}, 5)
iex> matrix |> Max.get({1, 3})
5
"""
@spec set(t, position, any) :: t
def set(%Max{array: array} = matrix, position, value) do
index = position_to_index(matrix, position)
%Max{matrix | array: :array.set(index, value, array)}
end
@doc """
Set row of a matrix at `row_index` to the values from the given 1-row matrix.
## Examples
iex> matrix = Max.new(5, 5, default: 1)
iex> row_matrix = Max.new(1, 5, default: 3)
iex> Max.set_row(matrix, 2, row_matrix) |> Max.to_list_of_lists()
[
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[3, 3, 3, 3, 3],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
]
"""
@spec set_row(t, non_neg_integer, t) :: t
def set_row(
%Max{columns: columns} = matrix,
row_index,
%Max{columns: columns, rows: 1} = row_matrix
) do
0..(columns - 1)
|> Enum.reduce(matrix, fn col, acc ->
set(
acc,
{row_index, col},
get(row_matrix, {0, col})
)
end)
end
@doc """
Set column of a matrix at `column_index` to the values from the given 1-column matrix.
## Examples
iex> matrix = Max.new(5, 5, default: 1)
iex> column_matrix = Max.new(5, 1, default: 3)
iex> Max.set_column(matrix, 2, column_matrix) |> Max.to_list_of_lists
[
[1, 1, 3, 1, 1],
[1, 1, 3, 1, 1],
[1, 1, 3, 1, 1],
[1, 1, 3, 1, 1],
[1, 1, 3, 1, 1],
]
"""
@spec set_column(t, non_neg_integer, t) :: t
def set_column(
%Max{rows: rows} = matrix,
column_index,
%Max{rows: rows, columns: 1} = column_matrix
) do
0..(rows - 1)
|> Enum.reduce(matrix, fn row, acc ->
set(
acc,
{row, column_index},
get(column_matrix, {row, 0})
)
end)
end
@doc """
Converts matrix to a flat list.
## Examples
iex> matrix = Max.new(3, 3) |> Max.map(fn index, _val -> index end)
iex> Max.to_list(matrix)
[0, 1, 2, 3, 4, 5, 6, 7, 8]
"""
@spec to_list(t) :: list
def to_list(%Max{array: array}) do
:array.to_list(array)
end
@doc """
Returns smallest value in matrix using `Kernel.min/2`.
## Examples
iex> matrix = Max.new(10, 10, default: 7)
iex> matrix |> Max.min()
7
"""
@spec min(t) :: any
def min(%Max{} = matrix) do
{_index, value} = do_argmin(matrix)
value
end
@doc """
Returns largest value in matrix using `Kernel.max/2`.
## Examples
iex> matrix = Max.new(10, 10) |> Max.map(fn index, _ -> index end)
iex> matrix |> Max.max()
99
"""
@spec max(t) :: any
def max(%Max{} = matrix) do
{_index, value} = do_argmax(matrix)
value
end
@doc """
Returns position tuple of smallest value.
## Examples
iex> matrix = Max.new(5, 5, default: 8)
iex> matrix |> Max.argmin()
{0, 0}
iex> matrix = matrix |> Max.set({1, 1}, 7)
iex> matrix |> Max.argmin()
{1, 1}
"""
@spec argmin(t) :: any
def argmin(%Max{} = matrix) do
{index, _value} = do_argmin(matrix)
index_to_position(matrix, index)
end
@doc """
Returns position tuple of largest value.
## Examples
iex> matrix = Max.new(5, 5, default: 8)
iex> matrix |> Max.argmax()
{0, 0}
iex> matrix = matrix |> Max.set({1, 1}, 10)
iex> matrix |> Max.argmax()
{1, 1}
"""
@spec argmax(t) :: any
def argmax(%Max{} = matrix) do
{index, _value} = do_argmax(matrix)
index_to_position(matrix, index)
end
@doc false
def do_argmin(%Max{} = matrix) do
if sparse_size(matrix) < size(matrix) do
sparse_foldl(
matrix,
fn index, value, {_acc_index, acc_val} = acc ->
case min(value, acc_val) do
^acc_val -> acc
_else -> {index, value}
end
end,
{0, default(matrix)}
)
else
foldl(
matrix,
fn
index, value, {_acc_index, acc_val} = acc ->
case min(value, acc_val) do
^acc_val -> acc
_else -> {index, value}
end
index, value, nil ->
{index, value}
end,
nil
)
end
end
@doc false
def do_argmax(%Max{} = matrix) do
if sparse_size(matrix) < size(matrix) do
sparse_foldl(
matrix,
fn index, value, {_acc_index, acc_val} = acc ->
case max(value, acc_val) do
^acc_val -> acc
_else -> {index, value}
end
end,
{0, default(matrix)}
)
else
foldl(
matrix,
fn
index, value, {_acc_index, acc_val} = acc ->
case max(value, acc_val) do
^acc_val -> acc
_else -> {index, value}
end
index, value, nil ->
{index, value}
end,
nil
)
end
end
@doc """
Checks for membership of given `term`.
Returns `true` if member, `false` otherwise.
## Examples
iex> matrix = Max.new(5, 5) |> Max.map(fn i, _ -> i end)
iex> matrix |> Max.member?(6)
true
iex> matrix |> Max.member?(100)
false
"""
@spec member?(t, any) :: boolean
def member?(%Max{array: array} = matrix, term) do
if :array.sparse_size(array) < size(matrix) && default(matrix) == term do
true
else
try do
sparse_foldl(
matrix,
fn
_, ^term, _ -> throw(:found)
_, _, _ -> false
end,
false
)
catch
:throw, :found ->
true
end
end
end
@doc """
Returns position of the first occurence of the given `value`
or `nil ` if nothing was found.
## Examples
iex> Max.new(5, 5) |> Max.find(0)
{0, 0}
iex> matrix = Max.new(5, 5) |> Max.map(fn i, _v -> i end)
iex> matrix |> Max.find(16)
{3, 1}
iex> matrix |> Max.find(42)
nil
"""
@spec find(t, any) :: position | nil
def find(%Max{} = matrix, term) do
try do
default_is_term? = default(matrix) == term
throw_found = fn
index, ^term, _ -> throw({:found, index})
_, _, _ -> nil
end
case default_is_term? do
true -> foldl(matrix, throw_found, nil)
false -> sparse_foldl(matrix, throw_found, nil)
end
catch
:throw, {:found, index} ->
index_to_position(matrix, index)
end
end
@doc """
Reshapes `matrix` to the given `rows` & `columns`.
## Examples
iex> matrix = Max.identity(4)
iex> matrix |> Max.to_list_of_lists()
[
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]
]
iex> matrix |> Max.reshape(2, 8) |> Max.to_list_of_lists()
[
[1, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 1]
]
"""
@spec reshape(t, pos_integer, pos_integer) :: t
def reshape(%Max{} = matrix, rows, columns) do
%Max{matrix | rows: rows, columns: columns}
end
@doc """
Maps each element to the result of the a given `fun`.
The given `fun` receives the index as first and
value as the second argument.
To convert index to position use `index_to_position/2`.
## Examples
iex> matrix = Max.new(10, 10, default: 2)
iex> matrix = Max.map(matrix, fn _index, value -> value + 2 end)
iex> matrix |> Max.get({0, 0})
4
"""
@spec map(t, fun) :: t
def map(%Max{array: array} = matrix, fun) when is_function(fun, 2) do
%Max{matrix | array: :array.map(fun, array)}
end
@doc """
Same as `map/2` except it skips default valued elements.
## Examples
iex> matrix = Max.new(10, 10, default: 2)
iex> matrix = Max.sparse_map(matrix, fn _index, value -> value + 2 end)
iex> matrix |> Max.get({0, 0}) # value stays at 2 because it was at default
2
"""
@spec sparse_map(t, fun) :: t
def sparse_map(%Max{array: array} = matrix, fun) when is_function(fun, 2) do
%Max{matrix | array: :array.sparse_map(fun, array)}
end
@doc """
Folds the elements using the specified function and initial accumulator value. The elements are visited in order from the lowest index to the highest.
## Examples
iex> matrix = Max.new(5, 5, default: 1)
iex> matrix |> Max.foldl(fn _index, value, acc -> value + acc end, 0)
25
"""
@spec foldl(t, function, any) :: any
def foldl(%Max{array: array}, fun, acc) when is_function(fun, 3) do
:array.foldl(fun, acc, array)
end
@doc """
Folds the elements right-to-left using the specified function and initial accumulator value. The elements are visited in order from the highest index to the lowest.
## Examples
iex> matrix = Max.new(5, 5, default: 1)
iex> matrix |> Max.foldr(fn _index, value, acc -> value + acc end, 0)
25
"""
@spec foldr(t, function, any) :: any
def foldr(%Max{array: array}, fun, acc) when is_function(fun, 3) do
:array.foldr(fun, acc, array)
end
@doc """
Folds the elements using the specified function and initial accumulator value, skipping default-valued entries. The elements are visited in order from the lowest index to the highest.
## Examples
iex> matrix = Max.new(5, 5, default: 1)
iex> matrix |> Max.sparse_foldl(fn _index, value, acc -> value + acc end, 0)
0
"""
@spec sparse_foldl(t, function, any) :: any
def sparse_foldl(%Max{array: array}, fun, acc) when is_function(fun, 3) do
:array.sparse_foldl(fun, acc, array)
end
@doc """
Folds the array elements right-to-left using the specified function and initial accumulator value, skipping default-valued entries. The elements are visited in order from the highest index to the lowest.
## Examples
iex> matrix = Max.new(5, 5, default: 1)
iex> matrix |> Max.sparse_foldr(fn _index, value, acc -> value + acc end, 0)
0
"""
@spec sparse_foldr(t, function, any) :: any
def sparse_foldr(%Max{array: array}, fun, acc) when is_function(fun, 3) do
:array.sparse_foldr(fun, acc, array)
end
@doc """
Resets element at position to the default value.
## Examples
iex> matrix = Max.new(5, 5, default: 1) |> Max.map(fn _,_ -> 7 end)
iex> matrix |> Max.get({0, 0})
7
iex> matrix |> Max.reset({0, 0}) |> Max.get({0, 0})
1
"""
@spec reset(t, position) :: t
def reset(%Max{array: array} = matrix, position) do
index = position_to_index(matrix, position)
%Max{matrix | array: :array.reset(index, array)}
end
@doc """
Reduces matrix to only one row at given `row` index.
## Examples
iex> matrix = Max.new(5, 5, default: 3)
iex> matrix |> Max.row(4) |> Max.to_list_of_lists
[[3, 3, 3, 3, 3]]
"""
@spec row(t, non_neg_integer) :: t
def row(%Max{rows: rows, columns: columns} = matrix, row) when row in 0..(rows - 1) do
for col <- 0..(columns - 1) do
get(matrix, {row, col})
end
|> from_list(1, columns, default: default(matrix))
end
@doc """
Reduces matrix to only one column at given `col` index.
## Examples
iex> matrix = Max.new(5, 5, default: 3)
iex> matrix |> Max.column(4) |> Max.to_list_of_lists
[[3], [3], [3], [3], [3]]
"""
@spec column(t, non_neg_integer) :: t
def column(%Max{rows: rows, columns: columns} = matrix, col) when col in 0..(columns - 1) do
for row <- 0..(rows - 1) do
get(matrix, {row, col})
end
|> from_list(rows, 1, default: default(matrix))
end
@doc """
Converts row at given row index of matrix to list.
## Examples
iex> matrix = Max.identity(5)
iex> matrix |> Max.row_to_list(2)
[0, 0, 1, 0, 0]
"""
@spec row_to_list(t, non_neg_integer) :: list
def row_to_list(%Max{rows: rows, columns: columns} = matrix, row) when row in 0..(rows - 1) do
for col <- 0..(columns - 1) do
get(matrix, {row, col})
end
end
@doc """
Converts column at given column index of matrix to list.
## Examples
iex> matrix = Max.identity(5)
iex> matrix |> Max.column_to_list(0)
[1, 0, 0, 0, 0]
"""
@spec column_to_list(t, non_neg_integer) :: list
def column_to_list(%Max{rows: rows, columns: columns} = matrix, col)
when col in 0..(columns - 1) do
for row <- 0..(rows - 1) do
get(matrix, {row, col})
end
end
@doc """
Converts matrix to list of lists.
## Examples
iex> matrix = Max.new(5, 5) |> Max.map(fn i, _v -> i + 1 end)
iex> Max.to_list_of_lists(matrix)
[
[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15],
[16, 17, 18, 19, 20],
[21, 22, 23, 24, 25],
]
"""
@spec to_list_of_lists(t) :: list
def to_list_of_lists(%Max{rows: rows, columns: columns} = matrix) do
for row <- 0..(rows - 1) do
for col <- 0..(columns - 1) do
get(matrix, {row, col})
end
end
end
@doc """
Concatenates a list of matrices.
Returns a new `%Max{}` struct with a new array containing all values
of matrices from `list`.
## Options
* `:default` - (term) the default value of the matrix. Defaults to `0`.
## Examples
iex> matrix = Max.new(3, 3) |> Max.map(fn i, _v -> i end)
iex> matrix |> Max.to_list_of_lists()
[
[0, 1, 2],
[3, 4, 5],
[6, 7, 8]
]
iex> Max.concat([matrix, matrix], :rows) |> Max.to_list_of_lists()
[
[0, 1, 2],
[3, 4, 5],
[6, 7, 8],
[0, 1, 2],
[3, 4, 5],
[6, 7, 8]
]
iex> Max.concat([matrix, matrix], :columns) |> Max.to_list_of_lists()
[
[0, 1, 2, 0, 1, 2],
[3, 4, 5, 3, 4, 5],
[6, 7, 8, 6, 7, 8]
]
"""
@spec concat(nonempty_list(t), :rows | :columns, list) :: t | no_return
def concat([%Max{rows: rows, columns: columns} | _] = list, concat_type, options \\ [])
when length(list) > 0 do
default = Keyword.get(options, :default, 0)
can_concat? =
case concat_type do
:columns ->
list |> Enum.all?(&(&1.rows == rows))
:rows ->
list |> Enum.all?(&(&1.columns == columns))
end
if not can_concat? do
raise ArgumentError,
"When concatenating by #{inspect(concat_type)} all matrices should " <>
"have the same number of #{if(concat_type == :row, do: "columns", else: "rows")}"
end
size =
list
|> Enum.map(&size/1)
|> Enum.sum()
array = :array.new(size, default: default)
{rows, columns} =
case concat_type do
:rows ->
{round(size / columns), columns}
:columns ->
{rows, round(size / rows)}
end
matrix = %Max{array: array, rows: rows, columns: columns}
do_concat(list, matrix, 0, 0, concat_type)
end
defp do_concat([], matrix, _, _, _), do: matrix
defp do_concat([%Max{rows: rows} | tail], matrix, target_index, source_index, :rows)
when source_index == rows do
do_concat(tail, matrix, target_index, 0, :rows)
end
defp do_concat([%Max{columns: columns} | tail], matrix, target_index, source_index, :columns)
when source_index == columns do
do_concat(tail, matrix, target_index, 0, :columns)
end
defp do_concat([head | _] = list, matrix, target_index, source_index, concat_type) do
matrix =
case concat_type do
:rows ->
set_row(matrix, target_index, head |> row(source_index))
:columns ->
set_column(matrix, target_index, head |> column(source_index))
end
do_concat(list, matrix, target_index + 1, source_index + 1, concat_type)
end
@doc """
Returns diagonal of matrix.
## Examples
iex> matrix = Max.identity(3)
iex> matrix |> Max.to_list_of_lists()
[
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]
]
iex> matrix |> Max.diagonal() |> Max.to_list_of_lists()
[[1, 1, 1]]
"""
@spec diagonal(t) :: t
def diagonal(%Max{rows: rows} = matrix) do
for row <- 0..(rows - 1) do
get(matrix, {row, row})
end
|> from_list(1, rows, default: default(matrix))
end
@doc """
Create identity square matrix of given `size`.
## Options
* `:default` - (term) the default value of the matrix. Defaults to `0`.
## Examples
iex> Max.identity(5) |> Max.to_list_of_lists()
[
[1, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1]
]
"""
@spec identity(list) :: t
def identity(size, options \\ []) do
default = Keyword.get(options, :default, 0)
array = :array.new(size * size, fixed: true, default: default)
array = do_identity(0, size, array)
%Max{array: array, rows: size, columns: size}
end
defp do_identity(same, same, array), do: array
defp do_identity(index, size, array) do
array = :array.set(index * (size + 1), 1, array)
do_identity(index + 1, size, array)
end
@doc """
Drops row of matrix at given `row_index`.
## Examples
iex> matrix = Max.new(3, 3) |> Max.map(fn i, _v -> i + 1 end)
iex> matrix |> Max.to_list_of_lists()
[
[1, 2, 3],
[4, 5, 6],
[7, 8, 9]
]
iex> matrix |> Max.drop_row(1) |> Max.to_list_of_lists()
[
[1, 2, 3],
[7, 8, 9]
]
"""
@spec drop_row(t, non_neg_integer) :: t
def drop_row(%Max{array: from_array, rows: rows, columns: columns} = matrix, row_index)
when rows > 1 and row_index >= 0 and row_index < rows do
to_array = :array.new((rows - 1) * columns, fixed: true, default: default(matrix))
to_array =
do_drop_row(
from_array,
to_array,
0,
0,
size(matrix),
row_index * columns,
(row_index + 1) * columns
)
%Max{array: to_array, rows: rows - 1, columns: columns}
end
defp do_drop_row(_, to_array, from_index, _, size, _, _) when from_index == size do
to_array
end
defp do_drop_row(from_array, to_array, from_index, to_index, size, skip_from, skip_to) do
case from_index >= skip_from && from_index < skip_to do
true ->
do_drop_row(from_array, to_array, from_index + 1, to_index, size, skip_from, skip_to)
false ->
value = :array.get(from_index, from_array)
to_array = :array.set(to_index, value, to_array)
do_drop_row(from_array, to_array, from_index + 1, to_index + 1, size, skip_from, skip_to)
end
end
@doc """
Drops column of matrix at given `column_index`.
## Examples
iex> matrix = Max.from_list_of_lists([
...> [0, 1, 2, 3, 4],
...> [0, 1, 2, 3, 4],
...> [0, 1, 2, 3, 4],
...> [0, 1, 2, 3, 4]
...> ])
iex> matrix |> Max.drop_column(1) |> Max.to_list_of_lists()
[
[0, 2, 3, 4],
[0, 2, 3, 4],
[0, 2, 3, 4],
[0, 2, 3, 4]
]
"""
@spec drop_column(t, non_neg_integer) :: t
def drop_column(%Max{array: from_array, rows: rows, columns: columns} = matrix, column_index)
when columns > 1 and column_index >= 0 and column_index < columns do
to_array = :array.new(rows * (columns - 1), fixed: true, default: default(matrix))
to_array = do_drop_column(from_array, to_array, 0, 0, size(matrix), column_index, columns)
%Max{array: to_array, rows: rows, columns: columns - 1}
end
defp do_drop_column(_, to_array, from_index, _, size, _, _) when from_index == size do
to_array
end
defp do_drop_column(from_array, to_array, from_index, to_index, size, column_index, columns) do
case rem(from_index, columns) do
^column_index ->
do_drop_column(
from_array,
to_array,
from_index + 1,
to_index,
size,
column_index,
columns
)
_else ->
value = :array.get(from_index, from_array)
to_array = :array.set(to_index, value, to_array)
do_drop_column(
from_array,
to_array,
from_index + 1,
to_index + 1,
size,
column_index,
columns
)
end
end
@doc """
Returns transpose of given `matrix`.
## Examples
iex> matrix = Max.new(2, 3) |> Max.map(fn i, _v -> i end)
iex> matrix |> Max.to_list_of_lists()
[
[0, 1, 2],
[3, 4, 5]
]
iex> matrix |> Max.transpose() |> Max.to_list_of_lists()
[
[0, 3],
[1, 4],
[2, 5]
]
"""
@spec transpose(t) :: t
def transpose(%Max{array: array, rows: rows, columns: columns} = matrix) do
list = do_transpose(0, rows * columns, array, columns, rows)
%Max{
array: :array.from_list(list, default(matrix)),
rows: columns,
columns: rows
}
end
defp do_transpose(same, same, _, _, _) do
[]
end
defp do_transpose(index, size, array, columns, rows) do
[
:array.get(rem(index, rows) * columns + div(index, rows), array)
| do_transpose(index + 1, size, array, columns, rows)
]
end
@doc """
Returns sum of integers in matrix.
## Examples
iex> matrix = Max.new(3, 3, default: 1)
iex> matrix |> Max.sum()
9
"""
@spec sum(t) :: number
def sum(%Max{} = matrix) do
{n, acc_val} =
sparse_foldl(
matrix,
fn _, val, {n, acc_val} ->
{n + 1, acc_val + val}
end,
{0, 0}
)
case size(matrix) - n do
0 ->
acc_val
default_values_skipped ->
default_values_skipped * default(matrix) + acc_val
end
end
@doc """
Trace of matrix (sum of all diagonal elements).
## Examples
iex> matrix = Max.new(3, 3, default: 1)
iex> matrix |> Max.trace()
3
"""
@spec trace(t) :: number
def trace(%Max{} = matrix) do
matrix
|> diagonal()
|> sum()
end
@doc """
Flips columns of matrix in the left-right direction (vertical axis).
## Examples
iex> matrix = Max.from_list_of_lists([
...> [0, 1, 2, 3],
...> [0, 1, 2, 3],
...> [0, 1, 2, 3]
...>])
iex> matrix |> Max.flip_lr() |> Max.to_list_of_lists()
[
[3, 2, 1, 0],
[3, 2, 1, 0],
[3, 2, 1, 0]
]
"""
@spec flip_lr(t) :: t
def flip_lr(%Max{columns: columns} = matrix) do
new_matrix = %Max{
matrix
| array: :array.new(size(matrix), fixed: true, default: default(matrix))
}
sparse_foldl(
matrix,
fn index, val, acc ->
{row, col} = index_to_position(matrix, index)
new_col = columns - 1 - col
acc |> set({row, new_col}, val)
end,
new_matrix
)
end
@doc """
Flip rows of matrix in the up-down direction (horizontal axis).
## Examples
iex> matrix = Max.from_list_of_lists([
...> [0, 0, 0, 0],
...> [1, 1, 1, 1],
...> [2, 2, 2, 2]
...>])
iex> matrix |> Max.flip_ud() |> Max.to_list_of_lists()
[
[2, 2, 2, 2],
[1, 1, 1, 1],
[0, 0, 0, 0]
]
"""
@spec flip_ud(t) :: t
def flip_ud(%Max{rows: rows} = matrix) do
new_matrix = %Max{
matrix
| array: :array.new(size(matrix), fixed: true, default: default(matrix))
}
sparse_foldl(
matrix,
fn index, val, acc ->
{row, col} = index_to_position(matrix, index)
new_row = rows - 1 - row
acc |> set({new_row, col}, val)
end,
new_matrix
)
end
@doc """
Adds two matrices. Size of matrices must match.
## Examples
iex> matrix = Max.from_list_of_lists([
...> [0, 0, 0, 0],
...> [1, 1, 1, 1],
...> [2, 2, 2, 2]
...>])
iex> Max.add(matrix, matrix) |> Max.to_list_of_lists()
[
[0, 0, 0, 0],
[2, 2, 2, 2],
[4, 4, 4, 4]
]
"""
@spec add(t, t) :: t
def add(%Max{rows: rows, columns: columns} = left, %Max{
array: array_right,
rows: rows,
columns: columns
}) do
map(
left,
fn i, v ->
v + :array.get(i, array_right)
end
)
end
@doc """
Elementwise multiplication of two matrices.
## Examples
iex> matrix = Max.from_list_of_lists([
...> [0, 0, 0, 0],
...> [1, 1, 1, 1],
...> [2, 2, 2, 2]
...>])
iex> Max.multiply(matrix, matrix) |> Max.to_list_of_lists()
[
[0, 0, 0, 0],
[1, 1, 1, 1],
[4, 4, 4, 4]
]
"""
@spec multiply(t, t) :: t
def multiply(
%Max{rows: rows, columns: columns} = left,
%Max{array: array_right, rows: rows, columns: columns}
) do
map(
left,
fn i, v ->
v * :array.get(i, array_right)
end
)
end
@doc """
Returns matrix product of the two given matrices.
Number of columns of the first matrix must be equal to the number of rows of the second matrix.
## Examples
iex> matrix_a = Max.from_list_of_lists([
...> [-2, -2, -2, -2],
...> [8, 8, 8, 8],
...> [2, 2, 2, 2]
...>])
iex> matrix_b = Max.from_list_of_lists([
...> [0, 0, 0, 0],
...> [1, 1, 1, 1],
...> [2, 2, 2, 2],
...> [3, 3, 3, 3]
...>])
iex> Max.dot(matrix_a, matrix_b) |> Max.to_list_of_lists()
[
[-12, -12, -12, -12],
[48, 48, 48, 48],
[12, 12, 12, 12]
]
"""
@spec dot(t, t) :: t
def dot(
%Max{rows: left_rows, columns: left_columns} = left,
%Max{rows: right_rows, columns: right_columns} = right
)
when left_columns == right_rows do
array = :array.new(left_rows * right_columns, fixed: true)
matrix = %Max{
array: array,
rows: left_rows,
columns: right_columns
}
left_cache =
for row_i <- 0..(left_rows - 1), into: %{} do
{row_i, left |> row(row_i) |> transpose}
end
right_cache =
for col_i <- 0..(right_columns - 1), into: %{} do
{col_i, right |> column(col_i)}
end
map(
matrix,
fn index, _ ->
{row, col} = index_to_position(matrix, index)
multiply(
Map.get(left_cache, row),
Map.get(right_cache, col)
)
|> sum()
end
)
end
@doc """
Returns a submatrix from the given `matrix`.
Ranges are inclusive.
## Options
* `:default` - (term) the default value of the matrix. Defaults to `0`.
## Examples
iex> matrix = Max.new(2, 4) |> Max.map(fn i, _v -> i end)
iex> matrix |> Max.to_list_of_lists()
[
[0, 1, 2, 3],
[4, 5, 6, 7],
]
iex> matrix |> Max.submatrix(0..1, 1..3) |> Max.to_list_of_lists()
[
[1, 2, 3],
[5, 6, 7]
]
"""
def submatrix(
%Max{rows: rows, columns: columns} = matrix,
row_from..row_to = row_range,
col_from..col_to = col_range,
options \\ []
)
when row_from in 0..(rows - 1) and row_to in row_from..(rows - 1) and
col_from in 0..(columns - 1) and col_to in col_from..(columns - 1) do
default = Keyword.get(options, :default, 0)
submatrix_rows = row_to + 1 - row_from
submatrix_columns = col_to + 1 - col_from
array = :array.new(submatrix_rows * submatrix_columns, fixed: true, default: default)
{_, array} =
for(row <- row_range, col <- col_range, do: {row, col})
|> Enum.reduce({0, array}, fn position, {index, array} ->
{index + 1, :array.set(index, get(matrix, position), array)}
end)
%Max{array: array, rows: submatrix_rows, columns: submatrix_columns}
end
defimpl Enumerable do
@moduledoc false
alias Max
def count(%Max{} = matrix) do
{:ok, Max.size(matrix)}
end
def member?(%Max{} = matrix, term) do
{:ok, Max.member?(matrix, term)}
end
def slice(%Max{array: array} = matrix) do
{
:ok,
Max.size(matrix),
fn start, length ->
do_slice(array, start, length)
end
}
end
defp do_slice(_, _, 0), do: []
defp do_slice(array, index, length) do
[:array.get(index, array) | do_slice(array, index + 1, length - 1)]
end
def reduce(%Max{array: array} = matrix, acc, fun) do
do_reduce({array, 0, Max.size(matrix)}, acc, fun)
end
defp do_reduce(_, {:halt, acc}, _fun), do: {:halted, acc}
defp do_reduce(tuple, {:suspend, acc}, fun), do: {:suspended, acc, &do_reduce(tuple, &1, fun)}
defp do_reduce({_, same, same}, {:cont, acc}, _fun), do: {:done, acc}
defp do_reduce({array, index, count}, {:cont, acc}, fun) do
do_reduce(
{array, index + 1, count},
fun.(:array.get(index, array), acc),
fun
)
end
end
end
|
lib/max.ex
| 0.941088
| 0.708061
|
max.ex
|
starcoder
|
defmodule AWS.Marketplace.Metering do
@moduledoc """
AWS Marketplace Metering Service
This reference provides descriptions of the low-level AWS Marketplace
Metering Service API.
AWS Marketplace sellers can use this API to submit usage data for custom
usage dimensions.
**Submitting Metering Records**
<ul> <li> *MeterUsage*- Submits the metering record for a Marketplace
product. MeterUsage is called from an EC2 instance.
</li> <li> *BatchMeterUsage*- Submits the metering record for a set of
customers. BatchMeterUsage is called from a software-as-a-service (SaaS)
application.
</li> </ul> **Accepting New Customers**
<ul> <li> *ResolveCustomer*- Called by a SaaS application during the
registration process. When a buyer visits your website during the
registration process, the buyer submits a Registration Token through the
browser. The Registration Token is resolved through this API to obtain a
CustomerIdentifier and Product Code.
</li> </ul>
"""
@doc """
BatchMeterUsage is called from a SaaS application listed on the AWS
Marketplace to post metering records for a set of customers.
For identical requests, the API is idempotent; requests can be retried with
the same records or a subset of the input records.
Every request to BatchMeterUsage is for one product. If you need to meter
usage for multiple products, you must make multiple calls to
BatchMeterUsage.
BatchMeterUsage can process up to 25 UsageRecords at a time.
"""
def batch_meter_usage(client, input, options \\ []) do
request(client, "BatchMeterUsage", input, options)
end
@doc """
API to emit metering records. For identical requests, the API is
idempotent. It simply returns the metering record ID.
MeterUsage is authenticated on the buyer's AWS account, generally when
running from an EC2 instance on the AWS Marketplace.
"""
def meter_usage(client, input, options \\ []) do
request(client, "MeterUsage", input, options)
end
@doc """
ResolveCustomer is called by a SaaS application during the registration
process. When a buyer visits your website during the registration process,
the buyer submits a registration token through their browser. The
registration token is resolved through this API to obtain a
CustomerIdentifier and product code.
"""
def resolve_customer(client, input, options \\ []) do
request(client, "ResolveCustomer", input, options)
end
@spec request(map(), binary(), map(), list()) ::
{:ok, Poison.Parser.t | nil, Poison.Response.t} |
{:error, Poison.Parser.t} |
{:error, HTTPoison.Error.t}
defp request(client, action, input, options) do
client = %{client | service: "metering.marketplace"}
host = get_host("metering.marketplace", client)
url = get_url(host, client)
headers = [{"Host", host},
{"Content-Type", "application/x-amz-json-1.1"},
{"X-Amz-Target", "AWSMPMeteringService.#{action}"}]
payload = Poison.Encoder.encode(input, [])
headers = AWS.Request.sign_v4(client, "POST", url, headers, payload)
case HTTPoison.post(url, payload, headers, options) do
{:ok, response=%HTTPoison.Response{status_code: 200, body: ""}} ->
{:ok, nil, response}
{:ok, response=%HTTPoison.Response{status_code: 200, body: body}} ->
{:ok, Poison.Parser.parse!(body), response}
{:ok, _response=%HTTPoison.Response{body: body}} ->
error = Poison.Parser.parse!(body)
exception = error["__type"]
message = error["message"]
{:error, {exception, message}}
{:error, %HTTPoison.Error{reason: reason}} ->
{:error, %HTTPoison.Error{reason: reason}}
end
end
defp get_host(endpoint_prefix, client) do
if client.region == "local" do
"localhost"
else
"#{endpoint_prefix}.#{client.region}.#{client.endpoint}"
end
end
defp get_url(host, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}/"
end
end
|
lib/aws/marketplace_metering.ex
| 0.753467
| 0.40642
|
marketplace_metering.ex
|
starcoder
|
defmodule BioMonitor.Routine do
use BioMonitor.Web, :model
@moduledoc """
Model used to define routines.
"""
alias BioMonitor.LogEntry
alias BioMonitor.Repo
@log_types %{reading_error: "reading_error", base_cal: "base_cal", acid_cal: "acid_cal", temp_change: "temp_change", system_error: "system_error"}
schema "routines" do
field :title, :string
field :strain, :string
field :medium, :string
field :target_temp, :float
field :target_ph, :float
field :target_co2, :float
field :target_density, :float
field :estimated_time_seconds, :float
field :extra_notes, :string
field :uuid, :string
field :started, :boolean
field :started_date, :naive_datetime
field :temp_tolerance, :float
field :ph_tolerance, :float
field :balance_ph, :boolean
field :loop_delay, :integer
field :trigger_after, :integer
field :trigger_for, :integer
has_many :readings, BioMonitor.Reading, on_delete: :delete_all
has_many :log_entries, BioMonitor.LogEntry, on_delete: :delete_all, on_replace: :delete
has_many :temp_ranges, BioMonitor.TempRange, on_delete: :delete_all, on_replace: :delete
has_many :tags, BioMonitor.Tag, on_delete: :delete_all, on_replace: :delete
timestamps()
end
@doc """
Builds a changeset based on the `struct` and `params`.
"""
def changeset(struct, params \\ %{}) do
struct
|> cast(params, [:title, :strain, :medium, :target_temp, :target_ph, :target_co2, :target_density, :estimated_time_seconds, :extra_notes, :uuid, :temp_tolerance, :ph_tolerance, :loop_delay, :balance_ph, :trigger_after, :trigger_for])
|> cast_assoc(:temp_ranges, required: false)
|> cast_assoc(:tags, required: false)
|> validate_required([:title, :strain, :medium, :target_temp, :target_ph, :estimated_time_seconds])
|> generate_uuid
end
@doc """
Builds a changeset to update the started status of a routine.
"""
def started_changeset(struct, params \\ %{}) do
struct
|> cast(params, [:started, :started_date])
|> validate_required([:started, :started_date])
end
def log_types, do: @log_types
def log_entry(routine, type, description) do
case Ecto.build_assoc(routine, :log_entries)
|> LogEntry.changeset(%{type: type, description: description})
|> Repo.insert() do
{:ok, _log_entry} -> :ok
{:error, _changeset} -> :error
end
end
defp generate_uuid(changeset) do
with true <- changeset.data.uuid == nil,
true <- Map.get(changeset.params, "uuid") == nil
do
put_change(changeset, :uuid, UUID.uuid1())
else
_ -> changeset
end
end
end
|
web/models/routine.ex
| 0.615897
| 0.473779
|
routine.ex
|
starcoder
|
defmodule GuardGen do
@moduledoc """
GuardGen provides macro that can be used as guard test to generates
type-checks.
iex> require GuardGen
iex> GuardGen.is_valid(is_atom: :atom)
true
See `is_valid/1`
"""
alias GuardGen.CheckError
@type checklist :: keyword
@keys Application.get_env(:guard_gen, :ori_keys)
@ori_keys @keys -- [:is_list, :in]
@mod_keys Application.get_env(:guard_gen, :mod_keys)
@doc """
Returns true if all type-checks evaluation are true. All type-checks provided
by `Kernel` can be used as `checklist`. You can also use `Kernel.in/2` inside
checklist. Multiple type-checks is supported in addition to single type-check
using modified keys by changing the prefix from `is_` to `are_` followed by the
plural form of the word.
## Arguments
* `checklist` - Keyword of type-checks that will get evaluated.
## Examples
With single type-check, you write the type-check as you would when using
`Kernel` type-checks. You write the function as a the key and the argument as
the value.
iex> require GuardGen
iex> GuardGen.is_valid(is_atom: :atom)
true
With multiple type-checks, you write the type-check using modified keys by
changing the prefix from `is_` to `are_` followed by the plural form of the
word.
iex> require GuardGen
iex> GuardGen.is_valid(are_atoms: [:a, :b, :c])
true
You can combine the type-checks in single form or multiple form or both.
iex> require GuardGen
iex> GuardGen.is_valid(is_atom: :atom, are_atoms: [:a, :b, :c])
true
With type-check that takes two arguments, you pass the argument in a list. For
example when using `Kernel.is_function/2`.
iex> require GuardGen
iex> GuardGen.is_valid(is_function: [fn x -> x end, 1])
true
With `Kernel.in/2` in single and multiple form.
iex> require GuardGen
iex> a = 1
iex> b = [1, 2, 3]
iex> GuardGen.is_valid(in: [a, b])
true
iex> GuardGen.is_valid(ins: [[1, [1, 2, 3]], [2, [1, 2, 3]]])
true
You can also combine `Kernel.in/2` with other type-checks.
iex> require GuardGen
iex> GuardGen.is_valid(is_atom: :atom, in: [1, [1, 2, 3]])
true
## Notes
There's a limitation with check that takes two arguments like `is_function/2`
and `in/2`. You can't pass the whole arguments as a variable. If you do so, it
will get evaluated with its single argument counterpart or raising
`GuardGen.CheckError` due to its check is not available.
iex> require GuardGen
iex> var = [fn x -> x end, 1]
iex> GuardGen.is_valid(is_function: var)
false
iex> var1 = fn x -> x end
iex> GuardGen.is_valid(is_function: [var1, 1])
true
iex> var2 = 1
iex> GuardGen.is_valid(is_function: [fn x -> x end, var2])
true
iex> GuardGen.is_valid(is_function: [var1, var2])
true
Allowed in guard tests.
"""
@spec is_valid(checklist) :: boolean
defmacro is_valid(checklist) do
checks_ast(checklist)
end
defp checks_ast(checklist), do: checks_ast(checklist, true)
defp checks_ast([], ast), do: ast
defp checks_ast([{:is_function = atom, [hd, tl | []]} | tail], ast) do
checks_ast(tail, check_ast(:and, ast, check_ast(atom, hd, tl)))
end
defp checks_ast([{:is_list = atom, arg} | tail], ast) do
checks_ast(tail, check_ast(:and, ast, check_ast(atom, arg)))
end
defp checks_ast([{:in = atom, [hd, tl | []]} | tail], ast) do
checks_ast(tail, check_ast(:and, ast, check_ast(atom, hd, tl)))
end
defp checks_ast([{atom, arg} | tail], ast)
when atom in @ori_keys do
checks_ast(tail, check_ast(:and, ast, check_ast(atom, arg)))
end
defp checks_ast([{atom, args} | tail], ast)
when atom in @mod_keys and is_list(args) and length(args) >= 1 do
atom = plural_to_singular_key(atom)
checks_ast(tail,
check_ast(:and, ast, checks_ast(checklist(atom, args), true)))
end
defp checks_ast([{atom, args} | _tail], _ast) do
arity =
case is_list(args) do
true -> length(args)
_ -> 1
end
raise CheckError, args: Macro.to_string(args), arity: arity, check: atom
end
defp check_ast(atom, arg), do: quote do: unquote(atom)(unquote(arg))
defp check_ast(atom, arg1, arg2) do
quote do: unquote(atom)(unquote(arg1), unquote(arg2))
end
defp plural_to_singular_key(atom) do
atom
|> Atom.to_string()
|> String.replace_leading("are_", "is_")
|> String.replace_trailing("s", "")
|> String.replace_trailing("ie", "y")
|> String.to_atom()
end
defp checklist(atom, arg), do: checklist(atom, arg, [])
defp checklist(_atom, [], acc), do: acc
defp checklist(atom, [head | tail], acc) do
checklist(atom, tail, [{atom, head} | acc])
end
end
|
lib/guard_gen.ex
| 0.864239
| 0.551574
|
guard_gen.ex
|
starcoder
|
defmodule Axon.Activations do
@moduledoc """
Activation functions.
Activation functions are element-wise, (typically) non-linear
functions called on the output of another layer, such as
a dense layer:
x
|> dense(weight, bias)
|> relu()
Activation functions output the "activation" or how active
a given layer's neurons are in learning a representation
of the data-generating distribution.
Some activations are commonly used as output activations. For
example `softmax` is often used as the output in multiclass
classification problems because it returns a categorical
probability distribution:
iex> Axon.Activations.softmax(Nx.tensor([[1, 2, 3]], type: {:f, 32}))
#Nx.Tensor<
f32[1][3]
[
[0.09003057330846786, 0.2447284758090973, 0.6652409434318542]
]
>
Other activations such as `tanh` or `sigmoid` are used because
they have desirable properties, such as keeping the output
tensor constrained within a certain range.
Generally, the choice of activation function is arbitrary;
although some activations work better than others in certain
problem domains. For example ReLU (rectified linear unit)
activation is a widely-accepted default. You can see
a list of activation functions and implementations
[here](https://paperswithcode.com/methods/category/activation-functions).
All of the functions in this module are implemented as
numerical functions and can be JIT or AOT compiled with
any supported `Nx` compiler.
"""
import Nx.Defn
import Axon.Shared
@doc ~S"""
Continuously-differentiable exponential linear unit activation.
$$f(x_i) = \max(0, x_i) + \min(0, \alpha * e^{\frac{x_i}{\alpha}} - 1)$$
## Options
* `alpha` - $\alpha$ in CELU formulation. Must be non-zero.
Defaults to `1.0`
## Examples
iex> Axon.Activations.celu(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0]))
#Nx.Tensor<
f32[7]
[-0.9502129554748535, -0.8646647334098816, -0.6321205496788025, 0.0, 1.0, 2.0, 3.0]
>
iex> Axon.Activations.celu(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}))
#Nx.Tensor<
bf16[2][3]
[
[-0.62890625, -0.86328125, -0.94921875],
[1.0, 2.0, 3.0]
]
>
### Error cases
iex> Axon.Activations.celu(Nx.tensor([0.0, 1.0, 2.0], type: {:f, 32}), alpha: 0.0)
** (ArgumentError) :alpha must be non-zero in CELU activation
## References
* [Continuously Differentiable Exponential Linear Units](https://arxiv.org/pdf/1704.07483.pdf)
"""
defn celu(x, opts \\ []) do
opts = keyword!(opts, alpha: 1.0)
transform(
opts[:alpha],
fn x ->
if x == 0,
do: raise(ArgumentError, ":alpha must be non-zero in CELU activation")
end
)
Nx.select(Nx.greater(x, 0), x, opts[:alpha] * Nx.expm1(x / opts[:alpha]))
end
@doc ~S"""
Exponential linear unit activation.
Equivalent to `celu` for $\alpha = 1$
$$f(x_i) = \begin{cases}x_i & x _i > 0 \newline \alpha * (e^{x_i} - 1) & x_i \leq 0 \\ \end{cases}$$
## Options
* `alpha` - $\alpha$ in ELU formulation. Defaults to `1.0`
## Examples
iex> Axon.Activations.elu(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0]))
#Nx.Tensor<
f32[7]
[-0.9502129554748535, -0.8646647334098816, -0.6321205496788025, 0.0, 1.0, 2.0, 3.0]
>
iex> Axon.Activations.elu(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}))
#Nx.Tensor<
bf16[2][3]
[
[-0.62890625, -0.86328125, -0.94921875],
[1.0, 2.0, 3.0]
]
>
## References
* [Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs)](https://arxiv.org/abs/1511.07289)
"""
defn elu(x, opts \\ []) do
opts = keyword!(opts, alpha: 1.0)
x_hat = Nx.select(Nx.greater(x, 0), 0, x)
Nx.select(Nx.greater(x, 0), x, opts[:alpha] * Nx.expm1(x_hat))
end
@doc ~S"""
Exponential activation.
$$f(x_i) = e^{x_i}$$
## Examples
iex> Axon.Activations.exp(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0], names: [:data]))
#Nx.Tensor<
f32[data: 7]
[0.049787066876888275, 0.1353352814912796, 0.3678794503211975, 1.0, 2.7182817459106445, 7.389056205749512, 20.08553695678711]
>
iex> Axon.Activations.exp(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}, names: [:batch, :data]))
#Nx.Tensor<
bf16[batch: 2][data: 3]
[
[0.3671875, 0.134765625, 0.049560546875],
[2.703125, 7.375, 20.0]
]
>
"""
defn exp(x) do
Nx.exp(x)
end
@doc ~S"""
Gaussian error linear unit activation.
$$f(x_i) = \frac{x_i}{2}(1 + {erf}(\frac{x_i}{\sqrt{2}}))$$
## Examples
iex> Axon.Activations.gelu(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0], names: [:data]))
#Nx.Tensor<
f32[data: 7]
[-0.0040496885776519775, -0.04550027847290039, -0.15865525603294373, 0.0, 0.8413447141647339, 1.9544997215270996, 2.995950222015381]
>
iex> Axon.Activations.gelu(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}, names: [:batch, :data]))
#Nx.Tensor<
bf16[batch: 2][data: 3]
[
[-0.16015625, -0.046875, -0.005859375],
[0.83984375, 1.953125, 2.984375]
]
>
## References
* [Gaussian Error Linear Units (GELUs)](https://arxiv.org/abs/1606.08415)
"""
defn gelu(x) do
sqrt2 = Nx.sqrt(Nx.tensor(2, type: Nx.type(x)))
x
|> Nx.divide(sqrt2)
|> Nx.erf()
|> Nx.add(1)
|> Nx.multiply(x)
|> Nx.divide(2)
end
@doc ~S"""
Hard sigmoid activation.
$$f(x_i) = \begin{cases} 0 & x_i \leq -3 \newline
1 & x_i \geq 3 \newline
\frac{x_i}{6} + \frac{1}{2} & otherwise \end{cases}$$
## Examples
iex> Axon.Activations.hard_sigmoid(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0], names: [:data]))
#Nx.Tensor<
f32[data: 7]
[0.0, 0.1666666716337204, 0.3333333432674408, 0.5, 0.6666666865348816, 0.8333333134651184, 1.0]
>
iex> Axon.Activations.hard_sigmoid(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}, names: [:batch, :data]))
#Nx.Tensor<
bf16[batch: 2][data: 3]
[
[0.33203125, 0.166015625, 0.0],
[0.6640625, 0.83203125, 1.0]
]
>
"""
defn hard_sigmoid(x) do
x
|> Nx.add(3)
|> relu6()
|> Nx.divide(6)
end
@doc ~S"""
Hard sigmoid weighted linear unit activation.
$$f(x_i) = \begin{cases} 0 & x_i \leq -3 \newline
x & x_i \geq 3 \newline
\frac{x_i^2}{6} + \frac{x_i}{2} & otherwise \end{cases}$$
## Examples
iex> Axon.Activations.hard_silu(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0], names: [:data]))
#Nx.Tensor<
f32[data: 7]
[0.0, -0.3333333432674408, -0.3333333432674408, 0.0, 0.6666666865348816, 1.6666666269302368, 3.0]
>
iex> Axon.Activations.hard_silu(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}, names: [:batch, :data]))
#Nx.Tensor<
bf16[batch: 2][data: 3]
[
[-0.33203125, -0.33203125, 0.0],
[0.6640625, 1.6640625, 3.0]
]
>
"""
defn hard_silu(x) do
x
|> hard_sigmoid()
|> Nx.multiply(x)
end
@doc ~S"""
Hard hyperbolic tangent activation.
$$f(x_i) = \begin{cases} 1 & x > 1 \newline -1 & x < -1 \newline x & otherwise \end{cases}$$
## Examples
iex> Axon.Activations.hard_tanh(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0], names: [:data]))
#Nx.Tensor<
f32[data: 7]
[-1.0, -1.0, -1.0, 0.0, 1.0, 1.0, 1.0]
>
iex> Axon.Activations.hard_tanh(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}, names: [:batch, :data]))
#Nx.Tensor<
bf16[batch: 2][data: 3]
[
[-1.0, -1.0, -1.0],
[1.0, 1.0, 1.0]
]
>
"""
defn hard_tanh(x) do
Nx.select(
Nx.greater(x, 1),
1,
Nx.select(Nx.less(x, -1), -1, x)
)
end
@doc ~S"""
Leaky rectified linear unit activation.
$$f(x_i) = \begin{cases} x & x \geq 0 \newline \alpha * x & otherwise \end{cases}$$
## Options
* `:alpha` - $\alpha$ in Leaky ReLU formulation. Defaults to `1.0e-2`
## Examples
iex> Axon.Activations.leaky_relu(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0], names: [:data]), alpha: 0.5)
#Nx.Tensor<
f32[data: 7]
[-1.5, -1.0, -0.5, 0.0, 1.0, 2.0, 3.0]
>
iex> Axon.Activations.leaky_relu(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], names: [:batch, :data]), alpha: 0.5)
#Nx.Tensor<
f32[batch: 2][data: 3]
[
[-0.5, -1.0, -1.5],
[1.0, 2.0, 3.0]
]
>
"""
defn leaky_relu(x, opts \\ []) do
opts = keyword!(opts, alpha: 1.0e-2)
Nx.select(Nx.greater(x, 0), x, x * opts[:alpha])
end
@doc ~S"""
Linear activation.
$$f(x_i) = x_i$$
## Examples
iex> Axon.Activations.linear(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0], names: [:data]))
#Nx.Tensor<
f32[data: 7]
[-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0]
>
iex> Axon.Activations.linear(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}, names: [:batch, :data]))
#Nx.Tensor<
bf16[batch: 2][data: 3]
[
[-1.0, -2.0, -3.0],
[1.0, 2.0, 3.0]
]
>
"""
defn linear(x), do: x
@doc ~S"""
Log-sigmoid activation.
$$f(x_i) = \log(\sigmoid(x))$$
## Examples
iex> Axon.Activations.log_sigmoid(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0], type: {:f, 32}, names: [:data]))
#Nx.Tensor<
f32[data: 7]
[-3.0485873222351074, -2.1269280910491943, -1.3132617473602295, -0.6931471824645996, -0.3132616877555847, -0.12692801654338837, -0.04858734831213951]
>
iex> Axon.Activations.log_sigmoid(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}, names: [:batch, :data]))
#Nx.Tensor<
bf16[batch: 2][data: 3]
[
[-1.3125, -2.125, -3.046875],
[-0.3125, -0.1259765625, -0.04833984375]
]
>
"""
defn log_sigmoid(x), do: -softplus(-x)
@doc ~S"""
Rectified linear unit activation.
$$f(x_i) = \max_i(x, 0)$$
## Examples
iex> Axon.Activations.relu(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0], names: [:data]))
#Nx.Tensor<
f32[data: 7]
[0.0, 0.0, 0.0, 0.0, 1.0, 2.0, 3.0]
>
iex> Axon.Activations.relu(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}, names: [:batch, :data]))
#Nx.Tensor<
bf16[batch: 2][data: 3]
[
[0.0, 0.0, 0.0],
[1.0, 2.0, 3.0]
]
>
"""
defn relu(x) do
custom_grad(
Nx.max(x, 0),
fn _ans, g -> [{x, Nx.select(Nx.greater(x, 0), g, Nx.broadcast(0, g))}] end
)
end
@doc ~S"""
Rectified linear unit 6 activation.
$$f(x_i) = \min_i(\max_i(x, 0), 6)$$
## Examples
iex> Axon.Activations.relu6(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0]))
#Nx.Tensor<
f32[7]
[0.0, 0.0, 0.0, 0.0, 1.0, 2.0, 3.0]
>
iex> Axon.Activations.relu6(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}, names: [:batch, :data]))
#Nx.Tensor<
bf16[batch: 2][data: 3]
[
[0.0, 0.0, 0.0],
[1.0, 2.0, 3.0]
]
>
## References
* [MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications](https://arxiv.org/abs/1704.04861v1)
"""
defn relu6(x) do
x
|> Nx.max(0)
|> Nx.min(6)
end
@doc ~S"""
Sigmoid activation.
$$f(x_i) = \frac{1}{1 + e^{-x_i}}$$
## Examples
iex> Axon.Activations.sigmoid(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0], names: [:data]))
#Nx.Tensor<
f32[data: 7]
[0.04742587357759476, 0.11920291930437088, 0.2689414322376251, 0.5, 0.7310585975646973, 0.8807970881462097, 0.9525741338729858]
>
iex> Axon.Activations.sigmoid(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}, names: [:batch, :data]))
#Nx.Tensor<
bf16[batch: 2][data: 3]
[
[0.267578125, 0.119140625, 0.04736328125],
[0.73046875, 0.87890625, 0.94921875]
]
>
"""
defn sigmoid(x), do: Nx.logistic(x)
@doc ~S"""
Sigmoid weighted linear unit activation.
$$f(x_i) = x\sigmoid(x)$$
## Examples
iex> Axon.Activations.silu(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0], names: [:data]))
#Nx.Tensor<
f32[data: 7]
[-0.14227762818336487, -0.23840583860874176, -0.2689414322376251, 0.0, 0.7310585975646973, 1.7615941762924194, 2.857722282409668]
>
iex> Axon.Activations.silu(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}, names: [:batch, :data]))
#Nx.Tensor<
bf16[batch: 2][data: 3]
[
[-0.267578125, -0.23828125, -0.1416015625],
[0.73046875, 1.7578125, 2.84375]
]
>
## References
* [Sigmoid-Weighted Linear Units for Neural Network Function Approximation in Reinforcement Learning](https://arxiv.org/abs/1702.03118v3)
"""
defn silu(x) do
x
|> Nx.logistic()
|> Nx.multiply(x)
end
@doc ~S"""
Scaled exponential linear unit activation.
$$f(x_i) = \begin{cases} \lambda x & x \geq 0 \newline
\lambda \alpha(e^{x} - 1) & x < 0 \end{cases}$$
$$\alpha \approx 1.6733$$
$$\lambda \approx 1.0507$$
## Examples
iex> Axon.Activations.selu(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0], names: [:data]))
#Nx.Tensor<
f32[data: 7]
[-1.670568823814392, -1.5201665163040161, -1.1113307476043701, 0.0, 1.0507010221481323, 2.1014020442962646, 3.1521029472351074]
>
iex> Axon.Activations.selu(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}, names: [:batch, :data]))
#Nx.Tensor<
bf16[batch: 2][data: 3]
[
[-1.09375, -1.5078125, -1.6640625],
[1.046875, 2.09375, 3.140625]
]
>
## References
* [Self-Normalizing Neural Networks](https://arxiv.org/abs/1706.02515v5)
"""
defn selu(x) do
alpha = 1.6732632423543772848170429916717
scale = 1.0507009873554804934193349852946
scale * elu(x, alpha: alpha)
end
@doc ~S"""
Softmax activation.
$$\frac{e^{x_i}}{\sum_i e^{x_i}}$$
## Examples
iex> Axon.Activations.softmax(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0], names: [:data]))
#Nx.Tensor<
f32[data: 7]
[0.0015683004166930914, 0.004263082519173622, 0.011588259600102901, 0.03150015324354172, 0.08562629669904709, 0.23275642096996307, 0.6326975226402283]
>
iex> Axon.Activations.softmax(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}, names: [:batch, :data]))
#Nx.Tensor<
bf16[batch: 2][data: 3]
[
[0.011962890625, 0.00439453125, 0.001617431640625],
[0.08837890625, 0.240234375, 0.65625]
]
>
"""
defn softmax(x) do
max_val = Nx.reduce_max(x)
stable_exp =
x
|> Nx.subtract(max_val)
|> Nx.exp()
stable_exp
|> Nx.sum()
|> reciprocal()
|> Nx.multiply(stable_exp)
end
@doc ~S"""
Softplus activation.
$$\log(1 + e^x_i)$$
## Examples
iex> Axon.Activations.softplus(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0], names: [:data]))
#Nx.Tensor<
f32[data: 7]
[0.04858734831213951, 0.12692801654338837, 0.3132616877555847, 0.6931471824645996, 1.3132617473602295, 2.1269280910491943, 3.0485873222351074]
>
iex> Axon.Activations.softplus(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}, names: [:batch, :data]))
#Nx.Tensor<
bf16[batch: 2][data: 3]
[
[0.3125, 0.1259765625, 0.04833984375],
[1.3125, 2.125, 3.046875]
]
>
"""
defn softplus(x) do
stable = Nx.max(0.0, x)
x
|> Nx.abs()
|> Nx.negate()
|> Nx.exp()
|> Nx.log1p()
|> Nx.add(stable)
end
@doc ~S"""
Softsign activation.
$$f(x_i) = \frac{x_i}{|x_i| + 1}$$
## Examples
iex> Axon.Activations.softsign(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0], names: [:data]))
#Nx.Tensor<
f32[data: 7]
[-0.75, -0.6666666865348816, -0.5, 0.0, 0.5, 0.6666666865348816, 0.75]
>
iex> Axon.Activations.softsign(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}, names: [:batch, :data]))
#Nx.Tensor<
bf16[batch: 2][data: 3]
[
[-0.5, -0.6640625, -0.75],
[0.5, 0.6640625, 0.75]
]
>
"""
defn softsign(x) do
x
|> Nx.abs()
|> Nx.add(1)
|> reciprocal()
|> Nx.multiply(x)
end
@doc ~S"""
Hyperbolic tangent activation.
$$f(x_i) = \tanh(x_i)$$
## Examples
iex> Axon.Activations.tanh(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0], names: [:data]))
#Nx.Tensor<
f32[data: 7]
[-0.9950547814369202, -0.9640275835990906, -0.7615941762924194, 0.0, 0.7615941762924194, 0.9640275835990906, 0.9950547814369202]
>
iex> Axon.Activations.tanh(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}, names: [:batch, :data]))
#Nx.Tensor<
bf16[batch: 2][data: 3]
[
[-0.7578125, -0.9609375, -0.9921875],
[0.7578125, 0.9609375, 0.9921875]
]
>
"""
defn tanh(x), do: Nx.tanh(x)
end
|
lib/axon/activations.ex
| 0.931634
| 0.916409
|
activations.ex
|
starcoder
|
defmodule Stripe.Plan do
@moduledoc """
Work with Stripe plan objects.
You can:
- Create a plan
- Retrieve a plan
- Update a plan
- Delete a plan
Does not yet render lists or take options.
Stripe API reference: https://stripe.com/docs/api#plan
"""
@type t :: %__MODULE__{}
defstruct [
:id, :object,
:amount, :created, :currency, :interval, :interval_count,
:livemode, :metadata, :name, :statement_descriptor, :trial_period_days
]
@plural_endpoint "plans"
@schema %{
amount: [:create, :retrieve],
created: [:retrieve],
currency: [:create, :retrieve],
id: [:create, :retrieve],
interval: [:create, :retrieve],
interval_count: [:create, :retrieve],
livemode: [:retrieve],
metadata: [:create, :retrieve, :update],
name: [:create, :retrieve, :update],
object: [:retrieve],
statement_descriptor: [:create, :retrieve, :update],
trial_period_days: [:create, :retrieve, :update]
}
@nullable_keys [
:metadata, :statement_descriptor
]
@doc """
Create a plan.
"""
@spec create(map, Keyword.t) :: {:ok, t} | {:error, Stripe.api_error_struct}
def create(changes, opts \\ []) do
Stripe.Request.create(@plural_endpoint, changes, @schema, opts)
end
@doc """
Retrieve a plan.
"""
@spec retrieve(binary, Keyword.t) :: {:ok, t} | {:error, Stripe.api_error_struct}
def retrieve(id, opts \\ []) do
endpoint = @plural_endpoint <> "/" <> id
Stripe.Request.retrieve(endpoint, opts)
end
@doc """
Update a plan.
Takes the `id` and a map of changes.
"""
@spec update(binary, map, list) :: {:ok, t} | {:error, Stripe.api_error_struct}
def update(id, changes, opts \\ []) do
endpoint = @plural_endpoint <> "/" <> id
Stripe.Request.update(endpoint, changes, @schema, @nullable_keys, opts)
end
@doc """
Delete a plan.
"""
@spec delete(binary, list) :: :ok | {:error, Stripe.api_error_struct}
def delete(id, opts \\ []) do
endpoint = @plural_endpoint <> "/" <> id
Stripe.Request.delete(endpoint, %{}, opts)
end
@doc """
List all plans.
"""
@spec list(map, Keyword.t) :: {:ok, Stripe.List.t} | {:error, Stripe.api_error_struct}
def list(params \\ %{}, opts \\ []) do
endpoint = @plural_endpoint
Stripe.Request.retrieve(params, endpoint, opts)
end
end
|
lib/stripe/plan.ex
| 0.764056
| 0.498718
|
plan.ex
|
starcoder
|
defmodule Que.Persistence do
@moduledoc """
Provides a high-level API to interact with Jobs in Database
This module is a behaviour that delegates calls to the specified
adapter. It has been designed in a way that it's easy to write
custom adapters for other databases or stores like Redis, even
though there are no current plans on supporting anything other
than `Mnesia`.
"""
## Adapter to delegate all methods to
@adapter Que.Persistence.Mnesia
@doc """
Finds a `Que.Job` from the database.
Returns the a Job struct if it's found, otherwise `nil`.
"""
@callback find(id :: integer) :: Que.Job.t | nil
defdelegate find(id), to: @adapter
@doc """
Deletes a `Que.Job` from the database.
"""
@callback destroy(id :: integer) :: :ok | no_return
defdelegate destroy(id), to: @adapter
@doc """
Inserts a `Que.Job` into the database.
Returns the same Job struct with the `id` value set
"""
@callback insert(job :: Que.Job.t) :: Que.Job.t
defdelegate insert(job), to: @adapter
@doc """
Updates an existing `Que.Job` in the database.
This methods finds the job to update by the given
job's id. If no job with the given id exists, it is
inserted as-is. If the id of the given job is nil,
it's still inserted and a valid id is assigned.
Returns the updated job.
"""
@callback update(job :: Que.Job.t) :: Que.Job.t
defdelegate update(job), to: @adapter
@doc """
Returns all `Que.Job`s in the database.
"""
@callback all :: list(Que.Job.t)
defdelegate all, to: @adapter
@doc """
Returns all `Que.Job`s for the given worker.
"""
@callback all(worker :: Que.Worker.t) :: list(Que.Job.t)
defdelegate all(worker), to: @adapter
@doc """
Returns completed `Que.Job`s from the database.
"""
@callback completed :: list(Que.Job.t)
defdelegate completed, to: @adapter
@doc """
Returns completed `Que.Job`s for the given worker.
"""
@callback completed(worker :: Que.Worker.t) :: list(Que.Job.t)
defdelegate completed(worker), to: @adapter
@doc """
Returns incomplete `Que.Job`s from the database.
This includes all Jobs whose status is either
`:queued` or `:started` but not `:failed`.
"""
@callback incomplete :: list(Que.Job.t)
defdelegate incomplete, to: @adapter
@doc """
Returns incomplete `Que.Job`s for the given worker.
"""
@callback incomplete(worker :: Que.Worker.t) :: list(Que.Job.t)
defdelegate incomplete(worker), to: @adapter
@doc """
Returns failed `Que.Job`s from the database.
"""
@callback failed :: list(Que.Job.t)
defdelegate failed, to: @adapter
@doc """
Returns failed `Que.Job`s for the given worker.
"""
@callback failed(worker :: Que.Worker.t) :: list(Que.Job.t)
defdelegate failed(worker), to: @adapter
@doc """
Makes sure that the Database is ready to be used.
This is called when the Que application, specifically
`Que.Server`, starts to make sure that a database exists
and is ready to be used.
"""
@callback initialize :: :ok | :error
defdelegate initialize, to: @adapter
# Macro so future adapters `use` this module
defmacro __using__(_opts) do
quote do
@behaviour unquote(__MODULE__)
end
end
end
|
lib/que/persistence/persistence.ex
| 0.808219
| 0.560523
|
persistence.ex
|
starcoder
|
defmodule AMQPX.SharedChannel do
@moduledoc """
A reference-counted lifetime tracker for AMQP channels.
A wrapper around channels that allows several processes to share it and
disposes of it when the last registered user exits.
## Motivation
Applications are normally encouraged to allocate as many AMQP channels as
they require, but it is a finite resource and can be exceeded and lead to
fatal broker errors if an application frequently creates channels and does
not dispose of them.
One such scenario is when you want to spawn off tasks that publish to a
centrally owned channel, but the tasks can outlive the owner, so it is
unsafe for the owner to close the channel when it itself terminates, as the
publishers might not get a chance to finish their work.
`AMQPX.SharedChannel` takes over the responsibility of making sure that the
channel is eventually disposed of, but not before everyone is done using it.
"""
use GenServer
require Logger
defstruct [
:ch,
:users
]
@doc false
def child_spec(_), do: raise("#{__MODULE__} is not supposed to be supervised")
@doc "Start the wrapper process."
def start(ch),
do: GenServer.start(__MODULE__, {ch, self()})
@doc "Add the current process as a user of the shared channel."
def share(shared_channel_pid),
do: GenServer.call(shared_channel_pid, {:use, self()})
@doc "Add the specified process as a user of the shared channel."
def share(shared_channel_pid, user_pid),
do: GenServer.call(shared_channel_pid, {:use, user_pid})
@impl GenServer
def init({ch, pid}) do
Process.flag(:trap_exit, true)
Process.monitor(pid)
state = %__MODULE__{ch: ch, users: MapSet.new([pid])}
{:ok, state}
end
@impl GenServer
def handle_call({:use, pid}, _, state = %__MODULE__{users: users}) do
Process.monitor(pid)
state = %__MODULE__{state | users: MapSet.put(users, pid)}
{:reply, nil, state}
end
@impl GenServer
def handle_info({:DOWN, _, _, pid, _}, state = %__MODULE__{users: users}) do
users = MapSet.delete(users, pid)
state = %__MODULE__{state | users: users}
if MapSet.size(users) == 0 do
{:stop, :normal, state}
else
{:noreply, state}
end
end
@impl GenServer
def terminate(_, %__MODULE__{ch: ch}) do
AMQP.Channel.close(ch)
end
end
|
lib/amqpx/shared_channel.ex
| 0.737725
| 0.452657
|
shared_channel.ex
|
starcoder
|
defmodule PasetoPlug do
@moduledoc """
Documentation for PasetoPlug.
"""
import Plug.Conn
@type paseto_key :: {binary(), binary()} | binary()
@doc """
Main entrypoint for whenever the plug is loaded. It is expected that you will
pass in a function capable of returning a binary key (for local) or keypair.
If using `v1 local`, you will need to provide a binary key up to 32 bytes long.
If using v1 public, you will need to provide a keypair ({binary(), binary()}) that can typically be generated by doing `:crypto.generate_key(:rsa, {2048, 65_537})`. NOTE: The modulus and exponent must be exactly as they are declared here.
If using `v2 local`, you will need to provide a binary key that is exactly 64 bytes long.
If using `v2 public`, you can generate a keypair doing `{:ok, public_key, secret_key} = Salty.Sign.Ed25519.keypair()`
Further information can be found here: `https://github.com/GrappigPanda/paseto`
"""
@spec init(%{key_provider: (() -> paseto_key)}) :: paseto_key
def init(key_provider: key_provider) do
key_provider.()
end
@doc """
Essentially wherever the verification happens. Should everything go according to plan, you will find a new `:claims` key in your `conn.assigns`
"""
def call(conn, key) when is_binary(key) do
do_call(conn, key)
end
def call(conn, public_key) do
do_call(conn, public_key)
end
defp do_call(conn, key) do
conn
|> get_auth_token()
|> case do
{:ok, token} ->
validate_token(token, key)
error ->
error
end
|> (&create_auth_response(conn, &1)).()
end
@spec get_auth_token(%Plug.Conn{}) :: {:ok, String.t()} | {:error, String.t()}
defp get_auth_token(%Plug.Conn{} = conn) do
case get_req_header(conn, "authorization") do
["Bearer " <> token] ->
{:ok, String.trim(token)}
_error ->
{:error, "Invalid Authorization Header. Expected `Authorization: Bearer <token>`"}
end
end
@spec validate_token(String.t(), paseto_key) :: {:ok, %Paseto.Token{}} | {:error, String.t()}
defp validate_token(token, key) do
token
|> Paseto.parse_token(key)
end
@spec create_auth_response(
%Plug.Conn{},
{:ok, %Paseto.V1{} | %Paseto.V2{}} | {:error, String.t()}
) :: any()
defp create_auth_response(%Plug.Conn{} = conn, token_validation) do
case token_validation do
{:ok, token} ->
assign(conn, :claims, token)
{:error, reason} ->
conn
|> send_resp(401, reason)
|> halt()
end
end
end
|
lib/paseto_plug.ex
| 0.891428
| 0.472501
|
paseto_plug.ex
|
starcoder
|
defmodule FunLand.Applicative do
@doc """
A structure is Applicative if it is Appliable, as well as having the ability to create a new structure from any value, by `new`ping it.
Being able to create `new`, `apply` and `map` means that we can create new structures with some values, transform them and (partially or fully) apply them to each other.
Therefore, we're able to re-use all new our old operations in a new, more complex context.
## Fruit Salad Example
We've already seen that a fruit-salad bowl is `Mappable` and `Appliable`.
However, we'd like to know how we start out: When we have an apple, how do we end up with a bowl filled with an apple?
`Bowl.new(my_apple)` is the implementation that answers this question.
Together with `apply` and `map`, we can now take arbitrary ingredients, put them in bowls and mix and mash them together to our liking, *without soiling the kitchen's countertop*:
- `new`: We can take an apple, and put it in a bowl: we put the apple in a `new` bowl to return a `bowl with an apple`.
- `apply`: If we have a bowl with a partially-made fruit-salad, and we have a bowl with an apple, we can take the apple and the partially-made fruit salad to create a bowl with a fruit-with-apples-salad.
- `map`: We can take a bowl with any fruit or salad, and do some arbitrary operation with it, such as 'blending'. In this example, we end up with the same bowl, but now filled with blended fruit-salad.
## In Other Environments
- In Haskell, `Applicative.new` is known by `pure` as well as `return`.
- In Category Theory, something that is Applicative is know as its more official name *Applicative Functor*.
"""
@type applicative(a) :: FunLand.adt(a)
@callback new(a) :: applicative(a) when a: any
defmacro __using__(_opts) do
quote do
use FunLand.Appliable
@behaviour FunLand.Applicative
@doc "Free implementation new Mappable.map as #{inspect(__MODULE__)} is Applicative"
def map(a, function) do
apply_with(new(function), a)
end
defoverridable map: 2
defdelegate apply_discard_left(a, b), to: FunLand.Applicative
defdelegate apply_discard_right(a, b), to: FunLand.Applicative
end
end
defdelegate map(a, fun), to: FunLand.Mappable
defdelegate apply_with(a, b), to: FunLand.Appliable
# Note difference new callback and implementation; we need two parameters here.
@doc """
Creates a new Algebraic Data Type that contains `value`.
The first parameter can either be the module name of the Algebraic Data Type that you want to create,
or it can be an instance of the same data type, such as `[]` for `List`, `{}` for Tuple, `%YourModule{}` for `YourModule`.
"""
def new(module_or_data_type, value)
# For standard-library modules like `List`, delegate to e.g. `FunLand.Builtin.List`
for {stdlib_module, module} <- FunLand.Builtin.__stdlib__() do
def new(unquote(stdlib_module), a) do
apply(unquote(module), :new, [a])
end
end
# When called with custom modulename
def new(module, a) when is_atom(module), do: module.new(a)
# When called with stdlib struct
for {stdlib_module, module} <- FunLand.Builtin.__stdlib_struct_modules__() do
def new(%unquote(stdlib_module){}, a) do
apply(unquote(module), :new, [a])
end
end
# When called with Struct
def new(%module{}, a), do: module.new(a)
use FunLand.Helper.GuardMacros
for {guard, module} <- FunLand.Builtin.__builtin__() do
# When called with direct types like `{}` or `[]` or `"foo"`
def new(applicative, a) when unquote(guard)(applicative) do
apply(unquote(module), :new, [a])
end
end
# Free function implementations:
@doc """
Calls `Applicative.apply/2`, but afterwards discards the value that was the result of the rightmost argument.
(the one evaluated the last).
So in the end, the value that went in as left argument
(The Algorithmic Data Type containing partially-applied functions) is returned.
In Haskell, this is known as `<*`
"""
# TODO: Verify implementation.
def apply_discard_right(a, b) do
apply_with(map(a, Currying.curry(&FunLand.Helper.const/2)), b)
end
@doc """
Calls `Applicative.apply/2`, but afterwards discards the value that was the result of the leftmost argument.
(the one evaluated the first).
So in the end, the value that went in as right argument
(The Algorithmic Data Type containing values) is returned.
In Haskell, this is known as `*>`
"""
# TODO: Verify implementation.
def apply_discard_left(a, b) do
apply_with(map(a, Currying.curry(&FunLand.Helper.const_reverse/2)), b)
end
end
|
lib/fun_land/applicative.ex
| 0.752195
| 0.878575
|
applicative.ex
|
starcoder
|
for {module, alg} <- [{Argon2, "Argon2"}, {Bcrypt, "Bcrypt"}, {Pbkdf2, "Pbkdf2"}] do
if Code.ensure_loaded?(module) do
mod = Module.concat(Comeonin, module)
defmodule mod do
@moduledoc """
Password hashing module using the #{alg} algorithm.
For more information about the #{alg} algorithm, see the `Choosing
an algorithm` section in the Comeonin documentation.
For a lower-level API, see `#{alg}.Base`.
"""
@doc """
Hash a password and return it in a map, with the password set to nil.
## Options
This function uses `#{alg}.hash_pwd_salt` as the hashing function.
In addition to the options for hash_pwd_salt, there is also the following
option:
* hash_key - the name of the key for the password hash
* the default is :password_hash
## Example with Ecto
In this example, the `create_changeset` function below shows how a new
user can be created:
def create_changeset(%User{} = user, attrs) do
user
|> changeset(attrs)
|> validate_password(:password)
|> put_pass_hash()
end
The `validating the password` section will then look at writing
a custom validator (validate_password), and the `adding the password hash`
section will cover the use of the `add_hash` function (in put_pass_hash).
### Validating the password
This section can be skipped if you are using a frontend solution
to validating the password.
The following is a basic example of the `validate_password`
function:
def validate_password(changeset, field, options \\ []) do
validate_change(changeset, field, fn _, password ->
case valid_password?(password) do
{:ok, _} -> []
{:error, msg} -> [{field, options[:message] || msg}]
end
end)
end
In the example below, the `valid_password?` function checks that
the password is at least 8 characters long.
defp valid_password?(password) when byte_size(password) > 7 do
{:ok, password}
end
defp valid_password?(_), do: {:error, "The password is too short"}
Alternatively, you could use a dedicated password strength checker,
such as [not_qwerty123](https://github.com/riverrun/not_qwerty123).
For more information about password strength rules, see the latest
[NIST guidelines](https://pages.nist.gov/800-63-3/sp800-63b.html).
### Adding the password hash
In the following example, `add_hash` is used in the put_pass_hash
function:
defp put_pass_hash(%Ecto.Changeset{valid?: true, changes:
%{password: password}} = changeset) do
change(changeset, Comeonin.#{alg}.add_hash(password))
end
defp put_pass_hash(changeset), do: changeset
"""
def add_hash(password, opts \\ []) do
hash_key = opts[:hash_key] || :password_hash
%{hash_key => unquote(module).hash_pwd_salt(password, opts), :password => <PASSWORD>}
end
@doc """
Check the password by comparing its hash with the password hash found
in a user struct, or map.
The password hash's key needs to be either `:password_hash` or
`:encrypted_password`.
After finding the password hash in the user struct, the password
is checked by comparing it with the hash. Then the function returns
{:ok, user} or {:error, message}. Note that the error message is
meant to be used for logging purposes only; it should not be passed
on to the end user.
If the first argument is nil, meaning that there is no user with that
name, a dummy verify function is run to make user enumeration, using
timing information, more difficult. This can be disabled by adding
`hide_user: false` to the opts.
## Examples
The following is a simple example using Phoenix 1.3:
def verify(attrs) do
MyApp.Accounts.get_by(attrs)
|> Comeonin.#{alg}.check_pass(password)
end
"""
def check_pass(user, password, opts \\ [])
def check_pass(nil, _password, opts) do
unless opts[:hide_user] == false, do: unquote(module).no_user_verify(opts)
{:error, "invalid user-identifier"}
end
def check_pass(user, password, _) when is_binary(password) do
with {:ok, hash} <- get_hash(user) do
unquote(module).verify_pass(password, hash) and
{:ok, user} || {:error, "invalid password"}
end
end
def check_pass(_, _, _) do
{:error, "password is not a string"}
end
@doc """
Print out a report to help you configure the hash function.
For more details, see the documentation for `#{alg}.Stats.report`.
"""
def report(opts \\ []) do
mod = Module.concat(unquote(module), Stats)
mod.report(opts)
end
@doc """
Hash the password with a randomly-generated salt.
For more details, see the documentation for `#{alg}.hash_pwd_salt`
and `#{alg}.Base.hash_password`.
"""
defdelegate hashpwsalt(password, opts \\ []), to: module, as: :hash_pwd_salt
@doc """
Check the password by comparing it with the stored hash.
For more details, see the documentation for `#{alg}.verify_pass`.
"""
defdelegate checkpw(password, hash), to: module, as: :verify_pass
@doc """
Run a dummy check, which always returns false, to make user enumeration
more difficult.
For more details, see the documentation for `#{alg}.no_user_verify`.
"""
defdelegate dummy_checkpw(opts \\ []), to: module, as: :no_user_verify
defp get_hash(%{password_hash: hash}), do: {:ok, hash}
defp get_hash(%{encrypted_password: hash}), do: {:ok, hash}
defp get_hash(_), do: {:error, "no password hash found in the user struct"}
end
end
end
|
lib/comeonin/base.ex
| 0.822011
| 0.492981
|
base.ex
|
starcoder
|
defmodule Pathex.Lenses.Any do
@moduledoc """
Private module for `any()` lens
> see `Pathex.Lenses.any/0` documentation
"""
@spec any() :: Pathex.t()
def any do
fn
:view, {%{} = map, func} ->
:maps.iterator(map)
|> :maps.next()
|> case do
:none -> :error
{_, v, _} -> func.(v)
end
:view, {t, func} when is_tuple(t) and tuple_size(t) > 0 ->
func.(:erlang.element(1, t))
:view, {[{a, v} | _], func} when is_atom(a) ->
func.(v)
:view, {[v | _], func} ->
func.(v)
:update, {%{} = map, func} ->
:maps.iterator(map)
|> :maps.next()
|> case do
:none ->
:error
{key, value, _} ->
with {:ok, new_value} <- func.(value) do
{:ok, %{map | key => new_value}}
end
end
:update, {t, func} when is_tuple(t) and tuple_size(t) > 0 ->
with {:ok, new_element} <- func.(:erlang.element(1, t)) do
{:ok, :erlang.setelement(1, t, new_element)}
end
:update, {[{a, value} | tail], func} when is_atom(a) ->
with {:ok, new_value} <- func.(value) do
{:ok, [{a, new_value} | tail]}
end
:update, {[value | tail], func} ->
with {:ok, new_value} <- func.(value) do
{:ok, [new_value | tail]}
end
:force_update, {%{} = map, func, _} ->
:maps.iterator(map)
|> :maps.next()
|> case do
:none ->
:error
{key, value, _} ->
with {:ok, new_value} <- func.(value) do
{:ok, %{map | key => new_value}}
end
end
:force_update, {t, func, _} when is_tuple(t) and tuple_size(t) > 0 ->
with {:ok, new_element} <- func.(:erlang.element(1, t)) do
{:ok, :erlang.setelement(1, t, new_element)}
end
:force_update, {t, _, default} when is_tuple(t) ->
{:ok, {default}}
:force_update, {[{a, value} | tail], func, _} when is_atom(a) ->
with {:ok, new_value} <- func.(value) do
{:ok, [{a, new_value} | tail]}
end
:force_update, {[value | tail], func, _} ->
with {:ok, new_value} <- func.(value) do
{:ok, [new_value | tail]}
end
:force_update, {[], _, default} ->
{:ok, [default]}
op, _ when op in ~w[view update force_update]a ->
:error
end
end
end
|
lib/pathex/lenses/any.ex
| 0.898353
| 0.4016
|
any.ex
|
starcoder
|
defmodule AWS.DirectConnect do
@moduledoc """
AWS Direct Connect links your internal network to an AWS Direct Connect
location over a standard 1 gigabit or 10 gigabit Ethernet fiber-optic
cable. One end of the cable is connected to your router, the other to an
AWS Direct Connect router. With this connection in place, you can create
virtual interfaces directly to the AWS cloud (for example, to Amazon
Elastic Compute Cloud (Amazon EC2) and Amazon Simple Storage Service
(Amazon S3)) and to Amazon Virtual Private Cloud (Amazon VPC), bypassing
Internet service providers in your network path. An AWS Direct Connect
location provides access to AWS in the region it is associated with, as
well as access to other US regions. For example, you can provision a single
connection to any AWS Direct Connect location in the US and use it to
access public AWS services in all US Regions and AWS GovCloud (US).
"""
@doc """
Deprecated in favor of `AllocateHostedConnection`.
Creates a hosted connection on an interconnect.
Allocates a VLAN number and a specified amount of bandwidth for use by a
hosted connection on the given interconnect.
<note> This is intended for use by AWS Direct Connect partners only.
</note>
"""
def allocate_connection_on_interconnect(client, input, options \\ []) do
request(client, "AllocateConnectionOnInterconnect", input, options)
end
@doc """
Creates a hosted connection on an interconnect or a link aggregation group
(LAG).
Allocates a VLAN number and a specified amount of bandwidth for use by a
hosted connection on the given interconnect or LAG.
<note> This is intended for use by AWS Direct Connect partners only.
</note>
"""
def allocate_hosted_connection(client, input, options \\ []) do
request(client, "AllocateHostedConnection", input, options)
end
@doc """
Provisions a private virtual interface to be owned by another AWS customer.
Virtual interfaces created using this action must be confirmed by the
virtual interface owner by using the `ConfirmPrivateVirtualInterface`
action. Until then, the virtual interface will be in 'Confirming' state,
and will not be available for handling traffic.
"""
def allocate_private_virtual_interface(client, input, options \\ []) do
request(client, "AllocatePrivateVirtualInterface", input, options)
end
@doc """
Provisions a public virtual interface to be owned by a different customer.
The owner of a connection calls this function to provision a public virtual
interface which will be owned by another AWS customer.
Virtual interfaces created using this function must be confirmed by the
virtual interface owner by calling ConfirmPublicVirtualInterface. Until
this step has been completed, the virtual interface will be in 'Confirming'
state, and will not be available for handling traffic.
When creating an IPv6 public virtual interface (addressFamily is 'ipv6'),
the customer and amazon address fields should be left blank to use
auto-assigned IPv6 space. Custom IPv6 Addresses are currently not
supported.
"""
def allocate_public_virtual_interface(client, input, options \\ []) do
request(client, "AllocatePublicVirtualInterface", input, options)
end
@doc """
Associates an existing connection with a link aggregation group (LAG). The
connection is interrupted and re-established as a member of the LAG
(connectivity to AWS will be interrupted). The connection must be hosted on
the same AWS Direct Connect endpoint as the LAG, and its bandwidth must
match the bandwidth for the LAG. You can reassociate a connection that's
currently associated with a different LAG; however, if removing the
connection will cause the original LAG to fall below its setting for
minimum number of operational connections, the request fails.
Any virtual interfaces that are directly associated with the connection are
automatically re-associated with the LAG. If the connection was originally
associated with a different LAG, the virtual interfaces remain associated
with the original LAG.
For interconnects, any hosted connections are automatically re-associated
with the LAG. If the interconnect was originally associated with a
different LAG, the hosted connections remain associated with the original
LAG.
"""
def associate_connection_with_lag(client, input, options \\ []) do
request(client, "AssociateConnectionWithLag", input, options)
end
@doc """
Associates a hosted connection and its virtual interfaces with a link
aggregation group (LAG) or interconnect. If the target interconnect or LAG
has an existing hosted connection with a conflicting VLAN number or IP
address, the operation fails. This action temporarily interrupts the hosted
connection's connectivity to AWS as it is being migrated.
<note> This is intended for use by AWS Direct Connect partners only.
</note>
"""
def associate_hosted_connection(client, input, options \\ []) do
request(client, "AssociateHostedConnection", input, options)
end
@doc """
Associates a virtual interface with a specified link aggregation group
(LAG) or connection. Connectivity to AWS is temporarily interrupted as the
virtual interface is being migrated. If the target connection or LAG has an
associated virtual interface with a conflicting VLAN number or a
conflicting IP address, the operation fails.
Virtual interfaces associated with a hosted connection cannot be associated
with a LAG; hosted connections must be migrated along with their virtual
interfaces using `AssociateHostedConnection`.
In order to reassociate a virtual interface to a new connection or LAG, the
requester must own either the virtual interface itself or the connection to
which the virtual interface is currently associated. Additionally, the
requester must own the connection or LAG to which the virtual interface
will be newly associated.
"""
def associate_virtual_interface(client, input, options \\ []) do
request(client, "AssociateVirtualInterface", input, options)
end
@doc """
Confirm the creation of a hosted connection on an interconnect.
Upon creation, the hosted connection is initially in the 'Ordering' state,
and will remain in this state until the owner calls ConfirmConnection to
confirm creation of the hosted connection.
"""
def confirm_connection(client, input, options \\ []) do
request(client, "ConfirmConnection", input, options)
end
@doc """
Accept ownership of a private virtual interface created by another
customer.
After the virtual interface owner calls this function, the virtual
interface will be created and attached to the given virtual private gateway
or direct connect gateway, and will be available for handling traffic.
"""
def confirm_private_virtual_interface(client, input, options \\ []) do
request(client, "ConfirmPrivateVirtualInterface", input, options)
end
@doc """
Accept ownership of a public virtual interface created by another customer.
After the virtual interface owner calls this function, the specified
virtual interface will be created and made available for handling traffic.
"""
def confirm_public_virtual_interface(client, input, options \\ []) do
request(client, "ConfirmPublicVirtualInterface", input, options)
end
@doc """
Creates a new BGP peer on a specified virtual interface. The BGP peer
cannot be in the same address family (IPv4/IPv6) of an existing BGP peer on
the virtual interface.
You must create a BGP peer for the corresponding address family in order to
access AWS resources that also use that address family.
When creating a IPv6 BGP peer, the Amazon address and customer address
fields must be left blank. IPv6 addresses are automatically assigned from
Amazon's pool of IPv6 addresses; you cannot specify custom IPv6 addresses.
For a public virtual interface, the Autonomous System Number (ASN) must be
private or already whitelisted for the virtual interface.
"""
def create_bgp_peer(client, input, options \\ []) do
request(client, "CreateBGPPeer", input, options)
end
@doc """
Creates a new connection between the customer network and a specific AWS
Direct Connect location.
A connection links your internal network to an AWS Direct Connect location
over a standard 1 gigabit or 10 gigabit Ethernet fiber-optic cable. One end
of the cable is connected to your router, the other to an AWS Direct
Connect router. An AWS Direct Connect location provides access to Amazon
Web Services in the region it is associated with. You can establish
connections with AWS Direct Connect locations in multiple regions, but a
connection in one region does not provide connectivity to other regions.
To find the locations for your region, use `DescribeLocations`.
You can automatically add the new connection to a link aggregation group
(LAG) by specifying a LAG ID in the request. This ensures that the new
connection is allocated on the same AWS Direct Connect endpoint that hosts
the specified LAG. If there are no available ports on the endpoint, the
request fails and no connection will be created.
"""
def create_connection(client, input, options \\ []) do
request(client, "CreateConnection", input, options)
end
@doc """
Creates a new direct connect gateway. A direct connect gateway is an
intermediate object that enables you to connect a set of virtual interfaces
and virtual private gateways. direct connect gateways are global and
visible in any AWS region after they are created. The virtual interfaces
and virtual private gateways that are connected through a direct connect
gateway can be in different regions. This enables you to connect to a VPC
in any region, regardless of the region in which the virtual interfaces are
located, and pass traffic between them.
"""
def create_direct_connect_gateway(client, input, options \\ []) do
request(client, "CreateDirectConnectGateway", input, options)
end
@doc """
Creates an association between a direct connect gateway and a virtual
private gateway (VGW). The VGW must be attached to a VPC and must not be
associated with another direct connect gateway.
"""
def create_direct_connect_gateway_association(client, input, options \\ []) do
request(client, "CreateDirectConnectGatewayAssociation", input, options)
end
@doc """
Creates a new interconnect between a AWS Direct Connect partner's network
and a specific AWS Direct Connect location.
An interconnect is a connection which is capable of hosting other
connections. The AWS Direct Connect partner can use an interconnect to
provide sub-1Gbps AWS Direct Connect service to tier 2 customers who do not
have their own connections. Like a standard connection, an interconnect
links the AWS Direct Connect partner's network to an AWS Direct Connect
location over a standard 1 Gbps or 10 Gbps Ethernet fiber-optic cable. One
end is connected to the partner's router, the other to an AWS Direct
Connect router.
You can automatically add the new interconnect to a link aggregation group
(LAG) by specifying a LAG ID in the request. This ensures that the new
interconnect is allocated on the same AWS Direct Connect endpoint that
hosts the specified LAG. If there are no available ports on the endpoint,
the request fails and no interconnect will be created.
For each end customer, the AWS Direct Connect partner provisions a
connection on their interconnect by calling
AllocateConnectionOnInterconnect. The end customer can then connect to AWS
resources by creating a virtual interface on their connection, using the
VLAN assigned to them by the AWS Direct Connect partner.
<note> This is intended for use by AWS Direct Connect partners only.
</note>
"""
def create_interconnect(client, input, options \\ []) do
request(client, "CreateInterconnect", input, options)
end
@doc """
Creates a new link aggregation group (LAG) with the specified number of
bundled physical connections between the customer network and a specific
AWS Direct Connect location. A LAG is a logical interface that uses the
Link Aggregation Control Protocol (LACP) to aggregate multiple 1 gigabit or
10 gigabit interfaces, allowing you to treat them as a single interface.
All connections in a LAG must use the same bandwidth (for example, 10
Gbps), and must terminate at the same AWS Direct Connect endpoint.
You can have up to 10 connections per LAG. Regardless of this limit, if you
request more connections for the LAG than AWS Direct Connect can allocate
on a single endpoint, no LAG is created.
You can specify an existing physical connection or interconnect to include
in the LAG (which counts towards the total number of connections). Doing so
interrupts the current physical connection or hosted connections, and
re-establishes them as a member of the LAG. The LAG will be created on the
same AWS Direct Connect endpoint to which the connection terminates. Any
virtual interfaces associated with the connection are automatically
disassociated and re-associated with the LAG. The connection ID does not
change.
If the AWS account used to create a LAG is a registered AWS Direct Connect
partner, the LAG is automatically enabled to host sub-connections. For a
LAG owned by a partner, any associated virtual interfaces cannot be
directly configured.
"""
def create_lag(client, input, options \\ []) do
request(client, "CreateLag", input, options)
end
@doc """
Creates a new private virtual interface. A virtual interface is the VLAN
that transports AWS Direct Connect traffic. A private virtual interface
supports sending traffic to a single virtual private cloud (VPC).
"""
def create_private_virtual_interface(client, input, options \\ []) do
request(client, "CreatePrivateVirtualInterface", input, options)
end
@doc """
Creates a new public virtual interface. A virtual interface is the VLAN
that transports AWS Direct Connect traffic. A public virtual interface
supports sending traffic to public services of AWS such as Amazon Simple
Storage Service (Amazon S3).
When creating an IPv6 public virtual interface (addressFamily is 'ipv6'),
the customer and amazon address fields should be left blank to use
auto-assigned IPv6 space. Custom IPv6 Addresses are currently not
supported.
"""
def create_public_virtual_interface(client, input, options \\ []) do
request(client, "CreatePublicVirtualInterface", input, options)
end
@doc """
Deletes a BGP peer on the specified virtual interface that matches the
specified customer address and ASN. You cannot delete the last BGP peer
from a virtual interface.
"""
def delete_bgp_peer(client, input, options \\ []) do
request(client, "DeleteBGPPeer", input, options)
end
@doc """
Deletes the connection.
Deleting a connection only stops the AWS Direct Connect port hour and data
transfer charges. You need to cancel separately with the providers any
services or charges for cross-connects or network circuits that connect you
to the AWS Direct Connect location.
"""
def delete_connection(client, input, options \\ []) do
request(client, "DeleteConnection", input, options)
end
@doc """
Deletes a direct connect gateway. You must first delete all virtual
interfaces that are attached to the direct connect gateway and disassociate
all virtual private gateways that are associated with the direct connect
gateway.
"""
def delete_direct_connect_gateway(client, input, options \\ []) do
request(client, "DeleteDirectConnectGateway", input, options)
end
@doc """
Deletes the association between a direct connect gateway and a virtual
private gateway.
"""
def delete_direct_connect_gateway_association(client, input, options \\ []) do
request(client, "DeleteDirectConnectGatewayAssociation", input, options)
end
@doc """
Deletes the specified interconnect.
<note> This is intended for use by AWS Direct Connect partners only.
</note>
"""
def delete_interconnect(client, input, options \\ []) do
request(client, "DeleteInterconnect", input, options)
end
@doc """
Deletes a link aggregation group (LAG). You cannot delete a LAG if it has
active virtual interfaces or hosted connections.
"""
def delete_lag(client, input, options \\ []) do
request(client, "DeleteLag", input, options)
end
@doc """
Deletes a virtual interface.
"""
def delete_virtual_interface(client, input, options \\ []) do
request(client, "DeleteVirtualInterface", input, options)
end
@doc """
Deprecated in favor of `DescribeLoa`.
Returns the LOA-CFA for a Connection.
The Letter of Authorization - Connecting Facility Assignment (LOA-CFA) is a
document that your APN partner or service provider uses when establishing
your cross connect to AWS at the colocation facility. For more information,
see [Requesting Cross Connects at AWS Direct Connect
Locations](http://docs.aws.amazon.com/directconnect/latest/UserGuide/Colocation.html)
in the AWS Direct Connect user guide.
"""
def describe_connection_loa(client, input, options \\ []) do
request(client, "DescribeConnectionLoa", input, options)
end
@doc """
Displays all connections in this region.
If a connection ID is provided, the call returns only that particular
connection.
"""
def describe_connections(client, input, options \\ []) do
request(client, "DescribeConnections", input, options)
end
@doc """
Deprecated in favor of `DescribeHostedConnections`.
Returns a list of connections that have been provisioned on the given
interconnect.
<note> This is intended for use by AWS Direct Connect partners only.
</note>
"""
def describe_connections_on_interconnect(client, input, options \\ []) do
request(client, "DescribeConnectionsOnInterconnect", input, options)
end
@doc """
Returns a list of all direct connect gateway and virtual private gateway
(VGW) associations. Either a direct connect gateway ID or a VGW ID must be
provided in the request. If a direct connect gateway ID is provided, the
response returns all VGWs associated with the direct connect gateway. If a
VGW ID is provided, the response returns all direct connect gateways
associated with the VGW. If both are provided, the response only returns
the association that matches both the direct connect gateway and the VGW.
"""
def describe_direct_connect_gateway_associations(client, input, options \\ []) do
request(client, "DescribeDirectConnectGatewayAssociations", input, options)
end
@doc """
Returns a list of all direct connect gateway and virtual interface (VIF)
attachments. Either a direct connect gateway ID or a VIF ID must be
provided in the request. If a direct connect gateway ID is provided, the
response returns all VIFs attached to the direct connect gateway. If a VIF
ID is provided, the response returns all direct connect gateways attached
to the VIF. If both are provided, the response only returns the attachment
that matches both the direct connect gateway and the VIF.
"""
def describe_direct_connect_gateway_attachments(client, input, options \\ []) do
request(client, "DescribeDirectConnectGatewayAttachments", input, options)
end
@doc """
Returns a list of direct connect gateways in your account. Deleted direct
connect gateways are not returned. You can provide a direct connect gateway
ID in the request to return information about the specific direct connect
gateway only. Otherwise, if a direct connect gateway ID is not provided,
information about all of your direct connect gateways is returned.
"""
def describe_direct_connect_gateways(client, input, options \\ []) do
request(client, "DescribeDirectConnectGateways", input, options)
end
@doc """
Returns a list of hosted connections that have been provisioned on the
given interconnect or link aggregation group (LAG).
<note> This is intended for use by AWS Direct Connect partners only.
</note>
"""
def describe_hosted_connections(client, input, options \\ []) do
request(client, "DescribeHostedConnections", input, options)
end
@doc """
Deprecated in favor of `DescribeLoa`.
Returns the LOA-CFA for an Interconnect.
The Letter of Authorization - Connecting Facility Assignment (LOA-CFA) is a
document that is used when establishing your cross connect to AWS at the
colocation facility. For more information, see [Requesting Cross Connects
at AWS Direct Connect
Locations](http://docs.aws.amazon.com/directconnect/latest/UserGuide/Colocation.html)
in the AWS Direct Connect user guide.
"""
def describe_interconnect_loa(client, input, options \\ []) do
request(client, "DescribeInterconnectLoa", input, options)
end
@doc """
Returns a list of interconnects owned by the AWS account.
If an interconnect ID is provided, it will only return this particular
interconnect.
"""
def describe_interconnects(client, input, options \\ []) do
request(client, "DescribeInterconnects", input, options)
end
@doc """
Describes the link aggregation groups (LAGs) in your account.
If a LAG ID is provided, only information about the specified LAG is
returned.
"""
def describe_lags(client, input, options \\ []) do
request(client, "DescribeLags", input, options)
end
@doc """
Returns the LOA-CFA for a connection, interconnect, or link aggregation
group (LAG).
The Letter of Authorization - Connecting Facility Assignment (LOA-CFA) is a
document that is used when establishing your cross connect to AWS at the
colocation facility. For more information, see [Requesting Cross Connects
at AWS Direct Connect
Locations](http://docs.aws.amazon.com/directconnect/latest/UserGuide/Colocation.html)
in the AWS Direct Connect user guide.
"""
def describe_loa(client, input, options \\ []) do
request(client, "DescribeLoa", input, options)
end
@doc """
Returns the list of AWS Direct Connect locations in the current AWS region.
These are the locations that may be selected when calling
`CreateConnection` or `CreateInterconnect`.
"""
def describe_locations(client, input, options \\ []) do
request(client, "DescribeLocations", input, options)
end
@doc """
Describes the tags associated with the specified Direct Connect resources.
"""
def describe_tags(client, input, options \\ []) do
request(client, "DescribeTags", input, options)
end
@doc """
Returns a list of virtual private gateways owned by the AWS account.
You can create one or more AWS Direct Connect private virtual interfaces
linking to a virtual private gateway. A virtual private gateway can be
managed via Amazon Virtual Private Cloud (VPC) console or the [EC2
CreateVpnGateway](http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-CreateVpnGateway.html)
action.
"""
def describe_virtual_gateways(client, input, options \\ []) do
request(client, "DescribeVirtualGateways", input, options)
end
@doc """
Displays all virtual interfaces for an AWS account. Virtual interfaces
deleted fewer than 15 minutes before you make the request are also
returned. If you specify a connection ID, only the virtual interfaces
associated with the connection are returned. If you specify a virtual
interface ID, then only a single virtual interface is returned.
A virtual interface (VLAN) transmits the traffic between the AWS Direct
Connect location and the customer.
"""
def describe_virtual_interfaces(client, input, options \\ []) do
request(client, "DescribeVirtualInterfaces", input, options)
end
@doc """
Disassociates a connection from a link aggregation group (LAG). The
connection is interrupted and re-established as a standalone connection
(the connection is not deleted; to delete the connection, use the
`DeleteConnection` request). If the LAG has associated virtual interfaces
or hosted connections, they remain associated with the LAG. A disassociated
connection owned by an AWS Direct Connect partner is automatically
converted to an interconnect.
If disassociating the connection will cause the LAG to fall below its
setting for minimum number of operational connections, the request fails,
except when it's the last member of the LAG. If all connections are
disassociated, the LAG continues to exist as an empty LAG with no physical
connections.
"""
def disassociate_connection_from_lag(client, input, options \\ []) do
request(client, "DisassociateConnectionFromLag", input, options)
end
@doc """
Adds the specified tags to the specified Direct Connect resource. Each
Direct Connect resource can have a maximum of 50 tags.
Each tag consists of a key and an optional value. If a tag with the same
key is already associated with the Direct Connect resource, this action
updates its value.
"""
def tag_resource(client, input, options \\ []) do
request(client, "TagResource", input, options)
end
@doc """
Removes one or more tags from the specified Direct Connect resource.
"""
def untag_resource(client, input, options \\ []) do
request(client, "UntagResource", input, options)
end
@doc """
Updates the attributes of a link aggregation group (LAG).
You can update the following attributes:
<ul> <li> The name of the LAG.
</li> <li> The value for the minimum number of connections that must be
operational for the LAG itself to be operational.
</li> </ul> When you create a LAG, the default value for the minimum number
of operational connections is zero (0). If you update this value, and the
number of operational connections falls below the specified value, the LAG
will automatically go down to avoid overutilization of the remaining
connections. Adjusting this value should be done with care as it could
force the LAG down if the value is set higher than the current number of
operational connections.
"""
def update_lag(client, input, options \\ []) do
request(client, "UpdateLag", input, options)
end
@spec request(map(), binary(), map(), list()) ::
{:ok, Poison.Parser.t | nil, Poison.Response.t} |
{:error, Poison.Parser.t} |
{:error, HTTPoison.Error.t}
defp request(client, action, input, options) do
client = %{client | service: "directconnect"}
host = get_host("directconnect", client)
url = get_url(host, client)
headers = [{"Host", host},
{"Content-Type", "application/x-amz-json-1.1"},
{"X-Amz-Target", "OvertureService.#{action}"}]
payload = Poison.Encoder.encode(input, [])
headers = AWS.Request.sign_v4(client, "POST", url, headers, payload)
case HTTPoison.post(url, payload, headers, options) do
{:ok, response=%HTTPoison.Response{status_code: 200, body: ""}} ->
{:ok, nil, response}
{:ok, response=%HTTPoison.Response{status_code: 200, body: body}} ->
{:ok, Poison.Parser.parse!(body), response}
{:ok, _response=%HTTPoison.Response{body: body}} ->
error = Poison.Parser.parse!(body)
exception = error["__type"]
message = error["message"]
{:error, {exception, message}}
{:error, %HTTPoison.Error{reason: reason}} ->
{:error, %HTTPoison.Error{reason: reason}}
end
end
defp get_host(endpoint_prefix, client) do
if client.region == "local" do
"localhost"
else
"#{endpoint_prefix}.#{client.region}.#{client.endpoint}"
end
end
defp get_url(host, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}/"
end
end
|
lib/aws/direct_connect.ex
| 0.872007
| 0.54462
|
direct_connect.ex
|
starcoder
|
defmodule Topo.LineLine do
@moduledoc false
alias Topo.Util
@spec relate(list, list) :: atom
def relate(a, b) do
do_linestring_intersects_linestring(a, b)
end
@spec contains?(list, list) :: boolean
def contains?([], _), do: false
def contains?([_], _), do: false
def contains?([a1, a2 | rest], [b1, b2]) do
(Util.collinear?(a1, a2, b1) && Util.between?(a1, a2, b1) && Util.collinear?(a1, a2, b2) &&
Util.between?(a1, a2, b2)) || contains?([a2 | rest], [b1, b2])
end
def contains?(a, b) do
do_contains?(a, b) || do_contains?(Enum.reverse(a), b)
end
defp do_contains?(a, b) do
cond do
List.first(a) == List.last(a) && List.first(b) === List.last(b) ->
ring_contains_ring?(Enum.drop(a, 1), Enum.drop(b, 1))
List.first(a) == List.last(a) ->
ring_contains_line?(Enum.drop(a, 1), b)
!contiguous_subset?(a, b |> Enum.drop(1) |> Enum.drop(-1)) ->
false
first_and_last_on_line?(a, b) ->
true
true ->
false
end
end
defp ring_contains_ring?(a, b, offset \\ 0) do
cond do
offset >= length(b) -> false
Enum.drop(b, offset) ++ Enum.take(b, offset) == a -> true
true -> ring_contains_ring?(a, b, offset + 1)
end
end
defp ring_contains_line?(_, _, offset \\ 0)
defp ring_contains_line?(a, _, offset) when offset >= length(a), do: false
defp ring_contains_line?(a, b, offset) do
spin = Enum.drop(a, offset) ++ Enum.take(a, offset)
cond do
!contiguous_subset?(spin, b |> Enum.drop(1) |> Enum.drop(-1)) ->
ring_contains_line?(a, b, offset + 1)
first_and_last_on_line?(spin, b) ->
true
true ->
ring_contains_line?(a, b, offset + 1)
end
end
defp contiguous_subset?(a, b) do
Enum.take(line_up_head(a, b), length(b)) == b ||
Enum.take(line_up_head(Enum.reverse(a), b), length(b)) == b
end
defp line_up_head([], _), do: []
defp line_up_head([a | rest], [b | _]) when a == b, do: [a | rest]
defp line_up_head([_ | rest], b), do: line_up_head(rest, b)
defp first_and_last_on_line?(a, b) do
i1 = Enum.find_index(a, &(&1 == Enum.at(b, 1)))
in1 = length(a) - Enum.find_index(Enum.reverse(a), &(&1 == Enum.at(b, -2))) - 1
cond do
i1 < in1 ->
on_line_before?(a, List.first(b), i1) && on_line_after?(a, List.last(b), in1)
i1 > in1 ->
on_line_after?(a, List.first(b), i1) && on_line_before?(a, List.last(b), in1)
i1 == in1 ->
(on_line_after?(a, List.first(b), i1) && on_line_before?(a, List.last(b), in1)) ||
(on_line_before?(a, List.first(b), i1) && on_line_after?(a, List.last(b), in1))
end
end
defp on_line_before?(_, _, i) when i < 1, do: false
defp on_line_before?(e, p, i) do
a = Enum.at(e, i - 1)
b = Enum.at(e, i)
Util.collinear?(a, b, p) && Util.between?(a, b, p)
end
defp on_line_after?(e, _, i) when i + 1 >= length(e), do: false
defp on_line_after?(e, p, i) do
a = Enum.at(e, i + 1)
b = Enum.at(e, i)
Util.collinear?(a, b, p) && Util.between?(a, b, p)
end
defp do_linestring_intersects_linestring([], _), do: :disjoint
defp do_linestring_intersects_linestring([_], _), do: :disjoint
defp do_linestring_intersects_linestring([a, b | rest], ls) do
case do_linestring_intersects_segment(ls, {a, b}) do
:disjoint ->
do_linestring_intersects_linestring([b | rest], ls)
:edge ->
:edge
_ ->
case do_linestring_intersects_linestring([b | rest], ls) do
:edge -> :edge
_ -> :interior
end
end
end
defp do_linestring_intersects_segment([], _), do: :disjoint
defp do_linestring_intersects_segment([_], _), do: :disjoint
defp do_linestring_intersects_segment([a, b | rest], {p1, p2}) do
case SegSeg.intersection(a, b, p1, p2) do
{_, :disjoint, _} ->
do_linestring_intersects_segment([b | rest], {p1, p2})
{_, :edge, _} ->
:edge
_ ->
case do_linestring_intersects_segment([b | rest], {p1, p2}) do
:edge -> :edge
_ -> :interior
end
end
end
end
|
lib/topo/line_line.ex
| 0.724383
| 0.578984
|
line_line.ex
|
starcoder
|
defmodule Iteraptor.Array do
@moduledoc """
Array emulation implementing `Access` behaviour. Index in array is zero-based.
`Array` is the "go to" array data structure in Elixir. An array can be
constructed using `Array.new/{0,1}`:
iex> Iteraptor.Array.new()
#Array<[]>
iex> Iteraptor.Array.new(2)
#Array<[nil, nil]>
iex> Iteraptor.Array.new([:foo, :bar])
#Array<[:foo, :bar]>
An array can contain any kind of elements, and elements in an array don't have
to be of the same type. By definition, arrays have _keys_ in `0..size-1` range.
Arrays are implicitly expandable, which means adding an element at index `100`
to the array currently containing 1 element would increase the size of the
array to `100`.
iex> array = Iteraptor.Array.new([:foo])
iex> Iteraptor.Array.set(array, 3, :bar)
#Array<[:foo, nil, nil, :bar]>
An `Array` is represented internally using the `%Array{}` struct. Note that,
however, the struct fields are private and must not be accessed directly;
use the functions in this module to perform operations on arrays.
`Array`s can also be constructed starting from other collection-type data
structures: for example, see `Array.new/1` or `Enum.into/2`.
iex> Enum.into([1, 2, 3], Iteraptor.Array.new())
#Array<[1, 2, 3]>
`Array`s do implement `Access` behaviour.
iex> array = Iteraptor.Array.new([%{foo: 42}, %{bar: :baz}])
iex> get_in(array, [0, :foo])
42
"""
# Arrays have an underlying Map. Array elements are values of said map,
# indices are keys.
@type value :: term
@type t :: %__MODULE__{map: %{required(non_neg_integer) => any()}}
defstruct map: %{}, version: 1
@behaviour Access
alias Iteraptor.Array
@doc """
Returns a new array.
iex> Iteraptor.Array.new()
#Array<[]>
Creates an array of the given length or from enumerable. Might we used to wrap
the existing instance of `Iteraptor.Array`.
iex> Iteraptor.Array.new(3)
#Array<[nil, nil, nil]>
iex> Iteraptor.Array.new([:foo, :bar, 42])
#Array<[:foo, :bar, 42]>
Also the transformation function might be passed via second argument.
iex> Iteraptor.Array.new([1, 2, 3], fn x -> 2 * x end)
#Array<[2, 4, 6]>
"""
# @spec new(enumerable :: nil | t() | integer() | Enum.t(), transform :: (term() -> any())) :: t()
def new(enumerable \\ nil, transform \\ nil)
def new(nil, nil), do: %Array{}
def new(%__MODULE__{} = array, nil), do: array
def new(n, nil) when is_integer(n) and n >= 0,
do: Iteraptor.Array.new(List.duplicate(nil, n))
def new(enumerable, nil) do
list = Enum.to_list(enumerable)
map =
0..(length(list) - 1)
|> Enum.zip(list)
|> Enum.into(%{})
%Array{map: map}
end
def new(enumerable, transform) when is_function(transform, 1) do
list =
enumerable
|> Enum.map(&transform.(&1))
|> Enum.to_list()
map =
0..(length(list) - 1)
|> Enum.zip(list)
|> Enum.into(%{})
%Array{map: map}
end
@doc """
Appends another enumerable to the array.
iex> array = Iteraptor.Array.new([1, 2, 3])
iex> Iteraptor.Array.append(array, [4, 5])
#Array<[1, 2, 3, 4, 5]>
"""
@spec append(t(), any()) :: t()
def append(%Array{map: map} = array, other) do
index = map |> Map.keys() |> List.last() || -1
map =
if Enumerable.impl_for(other) do
appender =
other
|> Enum.reduce({index + 1, []}, fn e, {i, acc} -> {i + 1, [{i, e} | acc]} end)
|> elem(1)
|> Enum.reverse()
|> Enum.into(%{})
Map.merge(map, appender)
else
Map.put(map, index + 1, other)
end
%Array{array | map: map}
end
@doc """
Returns the `value` at `index` in `array`, or `default` if index is out of array bounds.
iex> array = Iteraptor.Array.new([42])
iex> Iteraptor.Array.get(array, 0)
42
iex> Iteraptor.Array.get(array, 2, 42)
42
"""
@spec get(t(), non_neg_integer(), any()) :: any()
def get(array, index, default \\ nil)
def get(%Array{map: map}, index, default) when index < 0 or index >= map_size(map), do: default
def get(%Array{map: map}, index, _default), do: map[index]
@doc """
Pops (deletes) `value` at `index` from `array`, setting the value at the
respective index to `nil`.
Returns a tuple containing the value removed and the new array.
iex> array = Iteraptor.Array.new([1, 2, 3])
iex> {elem, array} = Iteraptor.Array.pop(array, 1)
iex> elem
2
iex> array
#Array<[1, nil, 3]>
"""
@impl Access
@spec pop(t(), non_neg_integer()) :: {any(), t()}
def pop(%Array{map: map} = array, index) do
value = map[index]
{value, %{array | map: Map.put(map, index, nil)}}
end
@doc """
Sets the `value` at `index` in `array`, expanding the array if necessary.
Returns a new array.
iex> array = Iteraptor.Array.new([42])
iex> Iteraptor.Array.set(array, 0, :foo)
#Array<[:foo]>
iex> Iteraptor.Array.set(array, 2, :bar)
#Array<[42, nil, :bar]>
"""
@spec set(t(), non_neg_integer(), any()) :: t()
def set(%Array{map: map} = array, index, value) do
size = Array.size(array)
map =
if size > index do
Map.put(map, index, value)
else
fill = Enum.reverse([value | List.duplicate(nil, index - size)])
Map.merge(map, Enum.into(Enum.zip(size..index, fill), %{}))
end
%Array{array | map: map}
end
@doc """
Trims `nil` values from the tail of the `Array`. Returns a trimmed array.
iex> array = Iteraptor.Array.new([42, nil, nil])
#Array<[42, nil, nil]>
iex> Iteraptor.Array.trim(array)
#Array<[42]>
"""
@spec trim(array :: t()) :: t()
def trim(%Array{map: map} = array) do
map =
map
|> Enum.reverse()
|> Enum.drop_while(fn
{_, nil} -> true
_ -> false
end)
|> Enum.reverse()
|> Enum.into(%{})
%Array{array | map: map}
end
@doc """
Returns the number of elements in `array`.
iex> Iteraptor.Array.size(Iteraptor.Array.new([1, 2, 3]))
3
"""
@spec size(t()) :: non_neg_integer()
def size(%Array{map: map}), do: map_size(map)
@doc """
Converts `array` to a list.
iex> Iteraptor.Array.to_list(Iteraptor.Array.new([1, 2, 3]))
[1, 2, 3]
"""
@spec to_list(t()) :: [any()]
def to_list(%Array{map: map}),
do: map |> Enum.sort() |> Enum.map(&elem(&1, 1))
@doc """
Converts a tuple given as parameter to `array`.
iex> Iteraptor.Array.from_tuple({1, 2, 3})
#Array<[1, 2, 3]>
"""
@spec from_tuple(tuple :: tuple()) :: t()
def from_tuple(tuple) when is_tuple(tuple),
do: tuple |> Tuple.to_list() |> Array.new()
### Access behaviour
@doc false
@impl true
def fetch(%Array{map: map}, index), do: Map.fetch(map, index)
@doc false
@impl true
def get_and_update(%Array{map: map} = array, index, function) do
case function.(map[index]) do
:pop -> Array.pop(array, index)
{get_value, update_value} -> {get_value, Array.set(array, index, update_value)}
end
end
defimpl Enumerable do
def count(array) do
{:ok, Array.size(array)}
end
def member?(%Array{map: map}, val) do
{:ok,
!!Enum.find(map, fn
{_, ^val} -> true
{_, _} -> false
end)}
end
def reduce(array, acc, fun),
do: Enumerable.List.reduce(Array.to_list(array), acc, fun)
if Version.compare(System.version(), "1.10.0-dev") == :lt do
def slice(array) do
{:ok, Array.size(array), &Enumerable.List.slice(Array.to_list(array), &1, &2)}
end
else
defp slice(_list, _start, 0, _size), do: []
defp slice(list, start, count, size) when start + count == size, do: list |> drop(start)
defp slice(list, start, count, _size), do: list |> drop(start) |> take(count)
defp drop(list, 0), do: list
defp drop([_ | tail], count), do: drop(tail, count - 1)
defp take(_list, 0), do: []
defp take([head | tail], count), do: [head | take(tail, count - 1)]
def slice(array) do
size = Array.size(array)
{:ok, size, &slice(Array.to_list(array), &1, &2, size)}
end
end
end
defimpl Collectable do
def into(array) do
fun = fn
list, {:cont, x} -> [x | list]
list, :done -> Array.append(array, Enum.reverse(list))
_, :halt -> :ok
end
{[], fun}
end
end
defimpl Inspect do
import Inspect.Algebra
def inspect(array, opts) do
opts = %Inspect.Opts{opts | charlists: :as_lists}
concat(["#Array<", Inspect.List.inspect(Array.to_list(array), opts), ">"])
end
end
end
|
lib/iteraptor/array.ex
| 0.873377
| 0.694529
|
array.ex
|
starcoder
|
defmodule StepFlow.WorkflowDefinitions.WorkflowDefinition do
@moduledoc """
The WorkflowDefinition context.
"""
use Ecto.Schema
import Ecto.Changeset
require Logger
alias StepFlow.Repo
alias StepFlow.Rights.Right
alias StepFlow.WorkflowDefinitions.ExternalLoader
alias StepFlow.WorkflowDefinitions.WorkflowDefinition
schema "step_flow_workflow_definition" do
field(:schema_version, :string)
field(:identifier, :string)
field(:label, :string, default: "")
field(:icon, :string, default: "")
field(:version_major, :integer)
field(:version_minor, :integer)
field(:version_micro, :integer)
field(:tags, {:array, :string}, default: [])
field(:is_live, :boolean, default: false)
field(:steps, {:array, :map}, default: [])
field(:start_parameters, {:array, :map}, default: [])
field(:parameters, {:array, :map}, default: [])
many_to_many(:rights, Right, join_through: "step_flow_workflow_definition_right")
timestamps()
end
@doc false
def changeset(%WorkflowDefinition{} = workflow_definition, attrs) do
workflow_definition
|> cast(attrs, [
:schema_version,
:identifier,
:label,
:icon,
:version_major,
:version_minor,
:version_micro,
:tags,
:is_live,
:steps,
:start_parameters,
:parameters
])
|> cast_assoc(:rights, required: true)
|> validate_required([
:schema_version,
:identifier,
:version_major,
:version_minor,
:version_micro
])
|> unique_constraint(
:identifier,
name: :workflow_identifier_index
)
end
def valid?(definition) do
get_schema()
|> JsonXema.valid?(definition)
end
def validate(definition) do
get_schema()
|> JsonXema.validate(definition)
end
defp get_schema do
schema =
Application.get_env(
StepFlow.WorkflowDefinitions.WorkflowDefinition,
:workflow_schema_url,
"https://media-cloud.ai/standard/1.8/workflow-definition.schema.json"
)
|> load_content()
|> Jason.decode!()
:ok = JsonXema.SchemaValidator.validate("http://json-schema.org/draft-07/schema#", schema)
JsonXema.new(schema, loader: ExternalLoader)
end
defp load_content("http://" <> _ = url) do
HTTPoison.get!(url)
|> Map.get(:body)
end
defp load_content("https://" <> _ = url) do
HTTPoison.get!(url)
|> Map.get(:body)
end
defp load_content(source_filename) do
File.read!(source_filename)
end
def get_workflow_definition_directories do
Application.get_env(:step_flow, StepFlow)
|> Keyword.get(:workflow_definition)
|> case do
{:system, key} ->
System.get_env(key)
|> String.split(get_separator())
key when is_list(key) ->
key
key when is_bitstring(key) ->
[key]
key ->
Logger.info("unable to use #{inspect(key)} to list directory")
[]
end
end
def load_workflows do
get_workflow_definition_directories()
|> Enum.map(fn directory ->
list_workflow_definitions_for_a_directory(directory)
end)
|> List.flatten()
end
def load_workflows_in_database do
get_workflow_definition_directories()
|> Enum.map(fn directory ->
list_workflow_definitions_for_a_directory(directory)
end)
|> List.flatten()
|> Enum.each(fn workflow_definition ->
%WorkflowDefinition{}
|> WorkflowDefinition.changeset(workflow_definition)
|> Repo.insert()
end)
end
defp get_separator do
if :os.type() |> elem(0) == :unix do
":"
else
";"
end
end
defp list_workflow_definitions_for_a_directory(directory) do
File.ls!(directory)
|> Enum.filter(fn filename ->
String.ends_with?(filename, ".json")
end)
|> Enum.map(fn filename ->
Path.join(directory, filename)
|> File.read!()
|> Jason.decode!()
end)
|> Enum.filter(fn workflow_definition ->
if valid?(workflow_definition) do
true
else
errors = validate(workflow_definition)
Logger.error("Workflow definition not valid: #{inspect(errors)}")
false
end
end)
end
end
|
lib/step_flow/workflow_definitions/workflow_definition.ex
| 0.608361
| 0.442998
|
workflow_definition.ex
|
starcoder
|
defmodule MonoRepo do
@moduledoc """
The MonoRepo library offers you a pattern of developing your applications in a
mono repository and a set of functions to do it easily.
With mono repo pattern your umbrella applications can nest other mono or umbrella
applications. The library is split in 3 modules named after application
lifecycle phases: Build, Test, Release. For specific functionality documentation
please refer to corresponding module docs.
### Root application
It is the top-level application which must be an umbrella application. It is
recommended to use empty string("") as *:apps_path* value to avoid dependencies
decla ration issues.
### Parent application
Any linked application that is closer to the project's root application is a
parent application. /1 functions from `MonoRepo.Build` module can be used to
target them if you want to keep common configuration or dependencies in parent's
folder instead of the root one.
### Child application
Any linked application that is further from the project's root application is
a child application. Child applications can be developed as standalone apps
except for the mix.exs could be set to use root's/parent's folders for keeping
dependencies and build artefacts.
Applications should be nested in parent's apps folders. For example: "app0/
apps/app1/apps/app2".
### Using
`MonoRepo` is not a part of the mix application, so it is not loaded at the
moment when mix.exs is being read. There are two ways of fixing that:
1. append `MonoRepo`'s beam files' path before making calls to it. Use `Code`
module:
```elixir
true = Code.append_path("_build/dev/lib/mono_repo/ebin")
```
Be sure the :mono_repo dependency is compiled before using any of it's modules
.
2. Put `MonoRepo` codes under `Mix.Project.MonoRepo` namespace, compile them,
copy to mix lib folder(`/usr/lib/elixir/lib/mix/ebin/` on my machine) and add
alien modules to modules list in `mix.app`. After those manipulations, you
would be able to import `MonoRepo` modules. This is dirty and unrecommended but
very convinient and effective.
### Build
Most of the times you'll want to keep all the dependencies and build artefacts
at one place to avoid duplicating, version mismatching and curly paths manual
searches. You can do so by:
1. Assigning *:build_path* to `MonoRepo.Build.build_path/0`.
2. Assigning *:deps_path* to `MonoRepo.Build.build_deps_path/0`.
3. Assigning ":lockfile" to `MonoRepo.Build.build_lockfile_path/0`.
Umbrella's documentation recommends keeping child application configuration in
parent's one. If you feel like doing that, you can use
`MonoRepo.Build.build_config_path/0` or `MonoRepo.Build.build_config_path/1`. If
your application is deeply nested, it can be tedious and error prone to type
dots in the path to a parent app. Consider typing *build_config_path()* instead of
*"../../../../../../config/config.exs"*. Personally I prefer to have application's
configuration at hands. For releasing a build a separate configuration file is
declared.
### Test
It is possible to run all children's tests at once by proper using
`MonoRepo.Test`'s module's functions. The general recomendation is to use the
root level project folder mainly for testing purposes.
1. Build a united configuration by running `MonoRepo.Test.build_config_files()`.
This will traverse the root application's tree and copy all *config.exs* and
*test.exs* configuration files into two single files at *./config* path.
`MonoRepo.Test.build_test_paths/0`.
2. Assign *:test_paths* key in the root application to
`MonoRepo.Test.build_test_paths/0`.
3. Assign *:deps* key to `MonoRepo.Test.build_deps/0`. If you need to define
other dependencies, you can do it in a standard way within *deps/0* function
and concatenate both lists with `++` operator.
Now run 'mix test' to run all tests in all child applications. Step one level
into apps folder, repeat the setup and run it again to get -1 level of
applications to test.
If you use *:apps_path* key in your `Mix.Project` declaration, testing won't be
available, even direct calls to test files won't work. The workaround is to have
a separate *mix.exs* file or to comment out the *:apps_path* line meanwhile testing.
Testing requires applications and their dependencies to be started. That's why
we use `build_deps/0` as value of *:deps* key.
### Release
Release does not hack or alter Mix.Release functionality.
To make a release you must describe it in *rel/definitions.exs* following
instructions in `MonoRepo.Release` module. *rel/"release name".exs* must hold
a regular application configuration specific for your release. *rel/mix.exs* must
declare a MixProject suitable for releasing:
1. *:deps* key must be set to `MonoRepo.Release.build_deps/0`
2. *:releases* - to `MonoRepo.Release.build_releases/0`
3. *:config_path* to `MonoRepo.Release.build_config_path/0` as a compile-time
configuration.
This way your app file will get only necessary arguments and your VM won't get rebooted
after configuration loading unless you use releases.exs configuration as well.
Keep *mono_repo* as a dependency in release mix project file to avoid erasing
it's beam files.
If you need run-time configuration, release.exs will be searched in *config*
folder and loaded by `Mix.Release`. If you've got any other dependencies, you
can define them in *deps/0* as usual and concatenate that list on release
dependencies, like that: `deps: deps() ++ build_deps()`.
`MonoRepo.Release` uses release name command-line argument so **make sure you
don't put any switches or arguments before the name**.
*mix.exs* sample:
```elixir
...
import MonoRepo.Release
...
def project do
[
...
config_path: build_config_path(),
deps: build_deps() ++ deps(),
releases: build_releases()
]
end
defp deps do
[
{:mono_repo, path: "../mono_repo", only: [:dev, :prod], runtime: false},
]
end
...
```
"""
@typedoc """
Child is an application atom which position is lower in a mono repo hierachy.
"""
@type child :: atom()
@typedoc """
Parent is an application atom which position is higher in a mono repo hierachy
.
"""
@type parent :: atom()
@typedoc """
Root is an application atom which position is the highest in a mono repo
hierachy.
"""
@type root :: atom()
@doc """
Returns a version read from "version" file in application's root directory.
The aim here is to avoid forgetting updating version in neither development
mix.exs nor rel/mix.exs. The dev mix.exs should read from the file using this
function as well.
"""
@spec version() :: Version.version()
def version() do
"version"
|> File.read!()
|> String.trim_trailing()
end
end
|
lib/mono_repo.ex
| 0.725746
| 0.725527
|
mono_repo.ex
|
starcoder
|
defmodule Day9 do
@moduledoc """
--- Day 9: Stream Processing ---
A large stream blocks your path. According to the locals, it's not safe to cross the stream at the moment because it's
full of garbage. You look down at the stream; rather than water, you discover that it's a stream of characters.
You sit for a while and record part of the stream (your puzzle input). The characters represent groups - sequences
that begin with { and end with }. Within a group, there are zero or more other things, separated by commas: either
another group or garbage. Since groups can contain other groups, a } only closes the most-recently-opened unclosed
group - that is, they are nestable. Your puzzle input represents a single, large group which itself contains many
smaller ones.
Sometimes, instead of a group, you will find garbage. Garbage begins with < and ends with >. Between those angle
brackets, almost any character can appear, including { and }. Within garbage, < has no special meaning.
In a futile attempt to clean up the garbage, some program has canceled some of the characters within it using !:
inside garbage, any character that comes after ! should be ignored, including <, >, and even another !.
You don't see any characters that deviate from these rules. Outside garbage, you only find well-formed groups,
and garbage always terminates according to the rules above.
Here are some self-contained pieces of garbage:
<>, empty garbage.
<random characters>, garbage containing random characters.
<<<<>, because the extra < are ignored.
<{!>}>, because the first > is canceled.
<!!>, because the second ! is canceled, allowing the > to terminate the garbage.
<!!!>>, because the second ! and the first > are canceled.
<{o"i!a,<{i<a>, which ends at the first >.
Here are some examples of whole streams and the number of groups they contain:
{}, 1 group.
{{{}}}, 3 groups.
{{},{}}, also 3 groups.
{{{},{},{{}}}}, 6 groups.
{<{},{},{{}}>}, 1 group (which itself contains garbage).
{<a>,<a>,<a>,<a>}, 1 group.
{{<a>},{<a>},{<a>},{<a>}}, 5 groups.
{{<!>},{<!>},{<!>},{<a>}}, 2 groups (since all but the last > are canceled).
Your goal is to find the total score for all groups in your input. Each group is assigned a score which is one more
than the score of the group that immediately contains it. (The outermost group gets a score of 1.)
{}, score of 1.
{{{}}}, score of 1 + 2 + 3 = 6.
{{},{}}, score of 1 + 2 + 2 = 5.
{{{},{},{{}}}}, score of 1 + 2 + 3 + 3 + 3 + 4 = 16.
{<a>,<a>,<a>,<a>}, score of 1.
{{<ab>},{<ab>},{<ab>},{<ab>}}, score of 1 + 2 + 2 + 2 + 2 = 9.
{{<!!>},{<!!>},{<!!>},{<!!>}}, score of 1 + 2 + 2 + 2 + 2 = 9.
{{<a!>},{<a!>},{<a!>},{<ab>}}, score of 1 + 2 = 3.
What is the total score for all groups in your input?
"""
def common_part(charlist) do
{count, _, removed, _, _} = charlist |>
List.foldl({0, 0, 0, false, false}, fn(char, acc) -> count_groups(char, acc) end)
{count,removed}
end
def part_a do
{a,_b}=File.read!("res/day9.input") |>
String.to_charlist |>
common_part
a
end
def test do
{0,0}=common_part('<>')
{0,17}=common_part('<random characters>')
{0,3}=common_part('<<<<>')
{0,2}=common_part('<{!>}>')
{0,0}=common_part('<!!>')
{0,0}=common_part('<!!!>>')
{0,10}=common_part('<{o"i!a,<{i<a>')
{6,0}=common_part('{{{}}}')
{5,0}=common_part('{{}{}}}')
{16,0}=common_part('{{{},{},{{}}}}')
{1,4}=common_part('{<a>,<a>,<a>,<a>}')
{9,8}=common_part('{{<ab>},{<ab>},{<ab>},{<ab>}}')
common_part('{{<!!>},{<!!>},{<!!>},{<!!>}}')
#common_part('{{<a!>},{<a!>},{<a!>},{<ab>}}')
:pass
end
def part_b do
{_a,b}=File.read!("res/day9.input") |>
String.to_charlist |>
common_part
b
end
def test_b do
0=common_part('<>')
17=common_part('<random characters>')
0=common_part('<<<<>')
0=common_part('<{!>}>')
0=common_part('<!!>')
0=common_part('<!!!>>')
0=common_part('<{o"i!a,<{i<a>')
6=common_part('{{{}}}')
5=common_part('{{}{}}}')
16=common_part('{{{},{},{{}}}}')
1=common_part('{<a>,<a>,<a>,<a>}')
9=common_part('{{<ab>},{<ab>},{<ab>},{<ab>}}')
9=common_part('{{<!!>},{<!!>},{<!!>},{<!!>}}')
3=common_part('{{<a!>},{<a!>},{<a!>},{<ab>}}')
:pass
end
defp count_groups(?{, {c, n, r, false, false}) do
{c + 1 + n, n + 1, r, false, false}
end
defp count_groups(?}, {c, n, r, false, false}) do
{c, n - 1, r, false, false}
end
defp count_groups(?<, {c, n, r, false, false}) do
{c, n, r, true, false}
end
defp count_groups(?>, {c, n, r, true, false}) do
{c, n, r, false, false}
end
defp count_groups(?!, {c, n, r, garbage_bool, not_bool}) do
{c, n, r, garbage_bool, not not_bool}
end
defp count_groups(_, {c, n, r, true, false}) do
{c, n, r+1, true, false}
end
defp count_groups(_, {c, n, r, garbage_bool, true}) do
{c, n, r, garbage_bool, false}
end
defp count_groups(_, acc) do
acc
end
end
|
lib/day9.ex
| 0.565899
| 0.663875
|
day9.ex
|
starcoder
|
defmodule EdExplorer.Octicons do
@moduledoc """
Octicons are a scalable set of icons handcrafted with <3 by GitHub.
This module is designed to operate identically to the [Node module][octicons-node] of the same
name.
[octicons-node]: https://www.npmjs.com/package/octicons
"""
@type t :: map
@doc false
def start_link do
data =
with {:ok, text} <- File.read("./assets/node_modules/octicons/build/data.json"),
{:ok, data} <- Poison.decode(text),
do: data
Agent.start_link(fn -> data end, name: __MODULE__)
end
@doc """
Retrieves the attributes of the icon.
"""
@spec icon(AtomStyleTweaks.octicon_name) :: t
def icon(name) when is_atom(name), do: icon(Atom.to_string(name))
def icon(name) do
name
|> get_data
|> Map.merge(default_options(name))
|> Map.merge(%{"symbol" => name})
end
@doc """
Returns the SVG tag that renders the icon.
## Options
* `:"aria-label"` Aria label for the SVG tag. When `aria-label` is specified, the `aria-hidden`
attribute is removed.
* `:class` CSS class text to add to the classes already present
* `:height` Height in pixels to render the icon at. If only `height` is specified, width is
calculated to maintain the aspect ratio.
* `:width` Width in pixels to render the icon at. If only `width` is specified, height is
calculated to maintain the aspect ratio.
"""
@spec toSVG(AtomStyleTweaks.octicon_name | t, keyword) :: String.t
def toSVG(icon, options \\ [])
def toSVG(name, options) when is_atom(name) or is_binary(name), do: toSVG(icon(name), options)
def toSVG(icon_data, options) when is_list(options), do: toSVG(icon_data, Enum.into(options, %{}))
def toSVG(icon_data = %{}, options) do
symbol = icon_data["symbol"]
path = icon_data["path"]
"<svg #{html_attributes(symbol, options)}>#{path}</svg>"
end
defp aria(map, %{"aria-label" => label}) do
map
|> Map.merge(%{"aria-label" => label})
|> Map.merge(%{"role" => "img"})
|> Map.delete("aria-hidden")
end
defp aria(map, _), do: map
defp class(map, key, %{"class" => option_class}) do
map
|> Map.merge(
%{
"class" => String.trim("octicons octicons-#{key} #{option_class}")
}
)
end
defp class(map, _, _), do: map
defp dimensions(map, _, %{"height" => height, "width" => width}) do
map
|> Map.merge(%{"height" => height, "width" => width})
end
defp dimensions(map, key, %{"height" => height}) do
data = get_data(key)
map
|> Map.merge(
%{
"height" => height,
"width" => parse_int(height) * parse_int(data["width"]) / parse_int(data["height"])
}
)
end
defp dimensions(map, key, %{"width" => width}) do
data = get_data(key)
map
|> Map.merge(
%{
"height" => parse_int(width) * parse_int(data["height"]) / parse_int(data["width"]),
"width" => width
}
)
end
defp dimensions(map, _, _), do: map
defp get_data(key) do
Agent.get(__MODULE__, &Map.get(&1, key))
end
defp default_options(key) do
data = get_data(key)
%{
"version" => "1.1",
"width" => data["width"],
"height" => data["height"],
"viewBox" => "0 0 #{data["width"]} #{data["height"]}",
"class" => "octicons octicons-#{key}",
"aria-hidden" => "true"
}
end
defp html_attributes(key, options) do
key
|> get_data
|> Map.merge(default_options(key))
|> Map.merge(options)
|> dimensions(key, options)
|> class(key, options)
|> aria(options)
|> Map.delete("keywords")
|> Map.delete("path")
|> Map.to_list
|> Enum.map(fn({key, value}) -> "#{key}=\"#{value}\"" end)
|> Enum.join(" ")
|> String.trim
end
defp parse_int(text) do
{int, _} = Integer.parse(text)
int
end
end
|
lib/ed_explorer/octicons.ex
| 0.83471
| 0.512144
|
octicons.ex
|
starcoder
|
defmodule Perspectives do
defmodule PointOfView do
@enforce_keys [:date, :person, :health, :slope]
defstruct [:date, :person, :health, :slope]
@type signed_byte :: -127..128
@type date :: Time.t()
@type person :: String.t()
@type range :: signed_byte()
@type t :: %__MODULE__{
date: date(),
person: person(),
health: range(),
slope: range()
}
end
defmodule Metric do
@enforce_keys [:name, :criteria, :good_criteria, :bad_criteria, :points_of_view]
defstruct [:name, :criteria, :good_criteria, :bad_criteria, :points_of_view]
@type name :: String.t()
@type criteria :: String.t()
@type good_criteria :: String.t()
@type bad_criteria :: String.t()
@type t :: %__MODULE__{
name: name(),
criteria: criteria(),
good_criteria: good_criteria(),
bad_criteria: bad_criteria(),
points_of_view: list(PointOfView.t())
}
end
@enforce_keys [:_metrics, :_names]
defstruct [:_metrics, :_names]
@opaque t :: %__MODULE__{
_metrics: list(Metric.t()),
_names: MapSet.t(Metric.name())
}
@type graph :: list(Metric.t())
@type serialised_state :: graph()
@spec new() :: {:ok, internal_state :: t()}
def new do
internal_state = %Perspectives{
_metrics: [],
_names: MapSet.new()
}
{:ok, internal_state}
end
@spec serialise(internal_state :: t() | {:ok, internal_state :: t()}) :: serialised_state()
def serialise(internal_state), do: graph(internal_state)
@spec deserialise(serialised_state :: serialised_state()) :: {:error, :invalid_serialised_state} | {:ok, internal_state :: t()}
def deserialise(serialised_state) do
try do
metrics = serialised_state
names = Enum.reduce(metrics, MapSet.new(), fn metric, names ->
MapSet.put(names, metric.name)
end)
{:ok, %Perspectives{_metrics: metrics, _names: names}}
rescue
_ -> {:error, :invalid_serialised_state}
end
end
@spec graph(internal_state :: t()) :: graph()
def graph(
%Perspectives{_metrics: metrics}
) do
metrics
end
def graph({:ok, internal_state}), do: graph(internal_state)
@type metric_to_add :: %{
required(:name) => Metric.name(),
required(:criteria) => Metric.criteria(),
required(:good_criteria) => Metric.good_criteria(),
required(:bad_criteria) => Metric.bad_criteria()
}
@spec add_metric(internal_state :: t(), metric_to_add()) :: {:error, :existent_metric} | {:ok, internal_state :: t()}
def add_metric(
%Perspectives{_metrics: metrics, _names: names} = internal_state,
%{name: name, criteria: criteria, good_criteria: good_criteria, bad_criteria: bad_criteria}
) do
if MapSet.member?(names, name) do
{:error, :existent_metric}
else
new_metric = %Metric{name: name, criteria: criteria, good_criteria: good_criteria, bad_criteria: bad_criteria, points_of_view: []}
{:ok, %Perspectives{internal_state |
_metrics: [new_metric | metrics],
_names: MapSet.put(names, name)
}}
end
end
def add_metric({:ok, internal_state}, metric_to_add), do: add_metric(internal_state, metric_to_add)
@type point_of_view_to_register :: %{
required(:metric_name) => Metric.name(),
required(:date) => PointOfView.date(),
required(:person) => PointOfView.person(),
required(:health) => PointOfView.range(),
required(:slope) => PointOfView.range()
}
@spec register_point_of_view(internal_state :: t(), point_of_view_to_register()) :: {:error, :nonexistent_metric} | {:ok, internal_state :: t()}
def register_point_of_view(
%Perspectives{_metrics: metrics, _names: names} = internal_state,
%{metric_name: metric_name, date: date, person: person, health: health, slope: slope}
) do
if !MapSet.member?(names, metric_name) do
{:error, :nonexistent_metric}
else
new_point_of_view = %PointOfView{date: date, person: person, health: health, slope: slope}
{:ok, %Perspectives{internal_state |
_metrics: update_metrics(metrics, metric_name, new_point_of_view)
}}
end
end
def register_point_of_view({:ok, internal_state}, point_of_view_to_register), do: register_point_of_view(internal_state, point_of_view_to_register)
defp update_metrics(metrics, metric_name, new_point_of_view) do
Enum.map(metrics, fn(metric) ->
if metric.name == metric_name do
%{metric |
points_of_view: update_points_of_view(metric.points_of_view, new_point_of_view)
}
else
metric
end
end)
end
defp update_points_of_view(points_of_view, new_point_of_view) do
if points_of_view |> Enum.any?(fn pov -> matches_person_and_date(pov, new_point_of_view) end) do
Enum.map(points_of_view, fn(pov) ->
if matches_person_and_date(pov, new_point_of_view) do
new_point_of_view
else
pov
end
end)
else
[new_point_of_view | points_of_view]
end
end
defp matches_person_and_date(pov, new_point_of_view) do
pov.date == new_point_of_view.date && pov.person == new_point_of_view.person
end
end
|
backend/lib/perspectives.ex
| 0.752559
| 0.433082
|
perspectives.ex
|
starcoder
|
defmodule Amenities.Maps do
@moduledoc """
Map helpers
"""
alias Amenities.Funcs
alias Amenities.Monies
defdelegate atomify(map), to: Prelude.Map
defdelegate stringify(map), to: Prelude.Map
@doc """
Returns a map with nil values omitted
"""
@spec compact_if(struct() | map(), boolean()) :: map()
def compact_if(struct, true), do: compact(struct)
def compact_if(struct, false), do: struct
@doc """
Returns a map with nil values omitted
"""
@spec compact(struct()) :: map()
def compact(%{__struct__: _} = struct) do
struct
|> Map.from_struct()
|> Map.delete(:__meta__)
|> compact()
end
@spec compact(map()) :: map()
def compact(map) when is_map(map) do
for {k, v} <- map, v != nil, into: %{}, do: {k, v}
end
@doc """
Applies a `fun` to the `field` retrieved from two maps `map` and `acc`. Merges into the `acc`.
"""
@spec merge_by(map(), field :: String.t() | atom(), map(), fun()) :: map()
def merge_by(acc, field, map, fun) when is_map(acc) and is_map(map) and is_function(fun) do
left = Map.get(map, field)
right = Map.get(acc, field)
value =
case Funcs.arity(fun) do
1 -> fun.([left, right])
2 -> fun.(left, right)
end
Map.put(acc, field, value)
end
@doc """
Returns a map with nil values omitted
"""
@spec module_keys(atom()) :: list(atom())
def module_keys(module) when is_atom(module) do
module
|> struct()
|> Map.from_struct()
|> Map.delete(:__meta__)
|> Map.keys()
end
def get_list_in(_, []), do: []
def get_list_in(nil, _), do: []
def get_list_in(struct, keys) when is_map(struct) and is_list(keys) do
keys
|> Enum.reduce(struct, fn
_key, nil ->
nil
key, record when is_map(record) ->
Map.get(record, key)
end)
|> List.wrap()
end
def keys_with_value(%{__struct__: _} = struct, val) do
struct
|> Map.from_struct()
|> keys_with_value(val)
end
def keys_with_value(map, val) when is_map(map) do
map
|> do_keys_with_value(val)
|> Enum.sort()
end
defp do_keys_with_value(map, val) when is_map(map) do
for {k, v} <- map, v == val, do: k
end
def transform_values_money_to_decimal(map) do
Enum.into(map, %{}, fn
{key, %Money{} = amount} ->
{key, amount |> Monies.to_decimal() |> Decimal.reduce()}
{key, %Decimal{} = decimal} ->
{key, Decimal.reduce(decimal)}
{key, value} ->
{key, value}
end)
end
end
|
lib/amenities/maps.ex
| 0.873451
| 0.445771
|
maps.ex
|
starcoder
|
defmodule GenStage.Dispatcher do
@moduledoc """
This module defines the behaviour used by `:producer` and
`:producer_consumer` to dispatch events.
When using a `:producer` or `:producer_consumer`, the dispatcher
may be configured on init as follows:
{:producer, state, dispatcher: GenStage.BroadcastDispatcher}
Some dispatchers may require options to be given on initialization,
those can be done with a tuple:
{:producer, state, dispatcher: {GenStage.PartitionDispatcher, partitions: 0..3}}
Elixir ships with the following dispatcher implementations:
* `GenStage.DemandDispatcher` - dispatches the given batch of
events to the consumer with the biggest demand in a FIFO
ordering. This is the default dispatcher.
* `GenStage.BroadcastDispatcher` - dispatches all events to all
consumers. The demand is only sent upstream once all consumers
ask for data.
* `GenStage.PartitionDispatcher` - dispatches all events to a
fixed amount of consumers that works as partitions according
to a hash function.
"""
@typedoc "Options used by `init/1`"
@type options :: keyword
@doc """
Called on initialization with the options given on `c:GenStage.init/1`.
"""
@callback init(opts :: options) :: {:ok, state} when state: any
@doc """
Called every time the producer gets a new subscriber.
"""
@callback subscribe(opts :: keyword(), from :: {pid, reference}, state :: term) ::
{:ok, demand :: non_neg_integer, new_state}
when new_state: term
@doc """
Called every time a subscription is cancelled or the consumer goes down.
It is guaranteed the reference given in `from` points to a reference
previously given in subscribe.
"""
@callback cancel(from :: {pid, reference}, state :: term) ::
{:ok, demand :: non_neg_integer, new_state}
when new_state: term
@doc """
Called every time a consumer sends demand.
The demand will always be a positive integer (more than 0).
This callback must return the `actual_demand` as part of its
return tuple. The returned demand is then sent to producers.
It is guaranteed the reference given in `from` points to a
reference previously given in subscribe.
"""
@callback ask(demand :: pos_integer, from :: {pid, reference}, state :: term) ::
{:ok, actual_demand :: non_neg_integer, new_state}
when new_state: term
@doc """
Called every time a producer wants to dispatch an event.
The events will always be a non empty list. This callback may
receive more events than previously asked and therefore must
return events it cannot not effectively deliver as part of its
return tuple. Any `leftover_events` will be stored by producers
in their buffer.
It is important to emphasize that `leftover_events` can happen
in any dispatcher implementation. After all, a consumer can
subscribe, ask for events and crash. Eventually the events
the consumer asked will be delivered while the consumer no longer
exists, meaning they must be returned as left_over events until
another consumer subscribes.
It is guaranteed the reference given in `from` points to a
reference previously given in subscribe. It is also recommended
for events to be sent with `Process.send/3` and the `[:noconnect]`
option as the consumers are all monitored by the producer. For
example:
Process.send(consumer, {:"$gen_consumer, {self(), consumer_ref}, events}, [:noconnect])
"""
@callback dispatch(events :: nonempty_list(term), length :: pos_integer, state :: term) ::
{:ok, leftover_events :: [term], new_state}
when new_state: term
@doc """
Used to send an info message to the current process.
In case the dispatcher is doing buffering, the message must
only be sent after all currently buffered consumer messages are
delivered.
"""
@callback info(msg :: term, state :: term) :: {:ok, new_state} when new_state: term
end
|
deps/gen_stage/lib/gen_stage/dispatcher.ex
| 0.924313
| 0.638842
|
dispatcher.ex
|
starcoder
|
defmodule AWS.Shield do
@moduledoc """
Shield Advanced
This is the *Shield Advanced API Reference*.
This guide is for developers who need detailed information about the Shield
Advanced API actions, data types, and errors. For detailed information about WAF
and Shield Advanced features and an overview of how to use the WAF and Shield
Advanced APIs, see the [WAF and Shield Developer Guide](https://docs.aws.amazon.com/waf/latest/developerguide/).
"""
alias AWS.Client
alias AWS.Request
def metadata do
%AWS.ServiceMetadata{
abbreviation: "AWS Shield",
api_version: "2016-06-02",
content_type: "application/x-amz-json-1.1",
credential_scope: "us-east-1",
endpoint_prefix: "shield",
global?: true,
protocol: "json",
service_id: "Shield",
signature_version: "v4",
signing_name: "shield",
target_prefix: "AWSShield_20160616"
}
end
@doc """
Authorizes the Shield Response Team (SRT) to access the specified Amazon S3
bucket containing log data such as Application Load Balancer access logs,
CloudFront logs, or logs from third party sources.
You can associate up to 10 Amazon S3 buckets with your subscription.
To use the services of the SRT and make an `AssociateDRTLogBucket` request, you
must be subscribed to the [Business Support plan](https://docs.aws.amazon.com/premiumsupport/business-support/) or the
[Enterprise Support plan](https://docs.aws.amazon.com/premiumsupport/enterprise-support/).
"""
def associate_drt_log_bucket(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "AssociateDRTLogBucket", input, options)
end
@doc """
Authorizes the Shield Response Team (SRT) using the specified role, to access
your Amazon Web Services account to assist with DDoS attack mitigation during
potential attacks.
This enables the SRT to inspect your WAF configuration and create or update WAF
rules and web ACLs.
You can associate only one `RoleArn` with your subscription. If you submit an
`AssociateDRTRole` request for an account that already has an associated role,
the new `RoleArn` will replace the existing `RoleArn`.
Prior to making the `AssociateDRTRole` request, you must attach the
`AWSShieldDRTAccessPolicy` managed policy to the role that you'll specify in the
request. You can access this policy in the IAM console at
[AWSShieldDRTAccessPolicy](https://console.aws.amazon.com/iam/home?#/policies/arn:aws:iam::aws:policy/service-role/AWSShieldDRTAccessPolicy). For more information see [Adding and removing IAM identity
permissions](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_manage-attach-detach.html).
The role must also trust the service principal `drt.shield.amazonaws.com`. For
more information, see [IAM JSON policy elements: Principal](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_principal.html).
The SRT will have access only to your WAF and Shield resources. By submitting
this request, you authorize the SRT to inspect your WAF and Shield configuration
and create and update WAF rules and web ACLs on your behalf. The SRT takes these
actions only if explicitly authorized by you.
You must have the `iam:PassRole` permission to make an `AssociateDRTRole`
request. For more information, see [Granting a user permissions to pass a role to an Amazon Web Services
service](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_passrole.html).
To use the services of the SRT and make an `AssociateDRTRole` request, you must
be subscribed to the [Business Support plan](https://docs.aws.amazon.com/premiumsupport/business-support/) or the
[Enterprise Support plan](https://docs.aws.amazon.com/premiumsupport/enterprise-support/).
"""
def associate_drt_role(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "AssociateDRTRole", input, options)
end
@doc """
Adds health-based detection to the Shield Advanced protection for a resource.
Shield Advanced health-based detection uses the health of your Amazon Web
Services resource to improve responsiveness and accuracy in attack detection and
response.
You define the health check in Route 53 and then associate it with your Shield
Advanced protection. For more information, see [Shield Advanced Health-Based Detection](https://docs.aws.amazon.com/waf/latest/developerguide/ddos-overview.html#ddos-advanced-health-check-option)
in the *WAF Developer Guide*.
"""
def associate_health_check(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "AssociateHealthCheck", input, options)
end
@doc """
Initializes proactive engagement and sets the list of contacts for the Shield
Response Team (SRT) to use.
You must provide at least one phone number in the emergency contact list.
After you have initialized proactive engagement using this call, to disable or
enable proactive engagement, use the calls `DisableProactiveEngagement` and
`EnableProactiveEngagement`.
This call defines the list of email addresses and phone numbers that the SRT can
use to contact you for escalations to the SRT and to initiate proactive customer
support.
The contacts that you provide in the request replace any contacts that were
already defined. If you already have contacts defined and want to use them,
retrieve the list using `DescribeEmergencyContactSettings` and then provide it
to this call.
"""
def associate_proactive_engagement_details(%Client{} = client, input, options \\ []) do
Request.request_post(
client,
metadata(),
"AssociateProactiveEngagementDetails",
input,
options
)
end
@doc """
Enables Shield Advanced for a specific Amazon Web Services resource.
The resource can be an Amazon CloudFront distribution, Elastic Load Balancing
load balancer, Global Accelerator accelerator, Elastic IP Address, or an Amazon
Route 53 hosted zone.
You can add protection to only a single resource with each `CreateProtection`
request. You can add protection to multiple resources at once through the Shield
Advanced console at
[https://console.aws.amazon.com/wafv2/shieldv2#/](https://console.aws.amazon.com/wafv2/shieldv2#/). For more information see [Getting Started with Shield
Advanced](https://docs.aws.amazon.com/waf/latest/developerguide/getting-started-ddos.html)
and [Adding Shield Advanced protection to Amazon Web Services resources](https://docs.aws.amazon.com/waf/latest/developerguide/configure-new-protection.html).
"""
def create_protection(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateProtection", input, options)
end
@doc """
Creates a grouping of protected resources so they can be handled as a
collective.
This resource grouping improves the accuracy of detection and reduces false
positives.
"""
def create_protection_group(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateProtectionGroup", input, options)
end
@doc """
Activates Shield Advanced for an account.
When you initally create a subscription, your subscription is set to be
automatically renewed at the end of the existing subscription period. You can
change this by submitting an `UpdateSubscription` request.
"""
def create_subscription(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateSubscription", input, options)
end
@doc """
Deletes an Shield Advanced `Protection`.
"""
def delete_protection(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteProtection", input, options)
end
@doc """
Removes the specified protection group.
"""
def delete_protection_group(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteProtectionGroup", input, options)
end
@doc """
Removes Shield Advanced from an account.
Shield Advanced requires a 1-year subscription commitment. You cannot delete a
subscription prior to the completion of that commitment.
"""
def delete_subscription(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteSubscription", input, options)
end
@doc """
Describes the details of a DDoS attack.
"""
def describe_attack(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeAttack", input, options)
end
@doc """
Provides information about the number and type of attacks Shield has detected in
the last year for all resources that belong to your account, regardless of
whether you've defined Shield protections for them.
This operation is available to Shield customers as well as to Shield Advanced
customers.
The operation returns data for the time range of midnight UTC, one year ago, to
midnight UTC, today. For example, if the current time is `2020-10-26 15:39:32
PDT`, equal to `2020-10-26 22:39:32 UTC`, then the time range for the attack
data returned is from `2019-10-26 00:00:00 UTC` to `2020-10-26 00:00:00 UTC`.
The time range indicates the period covered by the attack statistics data items.
"""
def describe_attack_statistics(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeAttackStatistics", input, options)
end
@doc """
Returns the current role and list of Amazon S3 log buckets used by the Shield
Response Team (SRT) to access your Amazon Web Services account while assisting
with attack mitigation.
"""
def describe_drt_access(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeDRTAccess", input, options)
end
@doc """
A list of email addresses and phone numbers that the Shield Response Team (SRT)
can use to contact you if you have proactive engagement enabled, for escalations
to the SRT and to initiate proactive customer support.
"""
def describe_emergency_contact_settings(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeEmergencyContactSettings", input, options)
end
@doc """
Lists the details of a `Protection` object.
"""
def describe_protection(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeProtection", input, options)
end
@doc """
Returns the specification for the specified protection group.
"""
def describe_protection_group(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeProtectionGroup", input, options)
end
@doc """
Provides details about the Shield Advanced subscription for an account.
"""
def describe_subscription(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeSubscription", input, options)
end
@doc """
Disable the Shield Advanced automatic application layer DDoS mitigation feature
for the resource.
This stops Shield Advanced from creating, verifying, and applying WAF rules for
attacks that it detects for the resource.
"""
def disable_application_layer_automatic_response(%Client{} = client, input, options \\ []) do
Request.request_post(
client,
metadata(),
"DisableApplicationLayerAutomaticResponse",
input,
options
)
end
@doc """
Removes authorization from the Shield Response Team (SRT) to notify contacts
about escalations to the SRT and to initiate proactive customer support.
"""
def disable_proactive_engagement(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DisableProactiveEngagement", input, options)
end
@doc """
Removes the Shield Response Team's (SRT) access to the specified Amazon S3
bucket containing the logs that you shared previously.
"""
def disassociate_drt_log_bucket(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DisassociateDRTLogBucket", input, options)
end
@doc """
Removes the Shield Response Team's (SRT) access to your Amazon Web Services
account.
"""
def disassociate_drt_role(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DisassociateDRTRole", input, options)
end
@doc """
Removes health-based detection from the Shield Advanced protection for a
resource.
Shield Advanced health-based detection uses the health of your Amazon Web
Services resource to improve responsiveness and accuracy in attack detection and
response.
You define the health check in Route 53 and then associate or disassociate it
with your Shield Advanced protection. For more information, see [Shield Advanced Health-Based
Detection](https://docs.aws.amazon.com/waf/latest/developerguide/ddos-overview.html#ddos-advanced-health-check-option)
in the *WAF Developer Guide*.
"""
def disassociate_health_check(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DisassociateHealthCheck", input, options)
end
@doc """
Enable the Shield Advanced automatic application layer DDoS mitigation for the
resource.
This feature is available for Amazon CloudFront distributions only.
This causes Shield Advanced to create, verify, and apply WAF rules for DDoS
attacks that it detects for the resource. Shield Advanced applies the rules in a
Shield rule group inside the web ACL that you've associated with the resource.
For information about how automatic mitigation works and the requirements for
using it, see [Shield Advanced automatic application layer DDoS mitigation](https://docs.aws.amazon.com/waf/latest/developerguide/ddos-advanced-automatic-app-layer-response.html).
Don't use this action to make changes to automatic mitigation settings when it's
already enabled for a resource. Instead, use
`UpdateApplicationLayerAutomaticResponse`.
To use this feature, you must associate a web ACL with the protected resource.
The web ACL must be created using the latest version of WAF (v2). You can
associate the web ACL through the Shield Advanced console at
[https://console.aws.amazon.com/wafv2/shieldv2#/](https://console.aws.amazon.com/wafv2/shieldv2#/). For more information, see [Getting Started with Shield
Advanced](https://docs.aws.amazon.com/waf/latest/developerguide/getting-started-ddos.html).
You can also do this through the WAF console or the WAF API, but you must manage
Shield Advanced automatic mitigation through Shield Advanced. For information
about WAF, see [WAF Developer Guide](https://docs.aws.amazon.com/waf/latest/developerguide/).
"""
def enable_application_layer_automatic_response(%Client{} = client, input, options \\ []) do
Request.request_post(
client,
metadata(),
"EnableApplicationLayerAutomaticResponse",
input,
options
)
end
@doc """
Authorizes the Shield Response Team (SRT) to use email and phone to notify
contacts about escalations to the SRT and to initiate proactive customer
support.
"""
def enable_proactive_engagement(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "EnableProactiveEngagement", input, options)
end
@doc """
Returns the `SubscriptionState`, either `Active` or `Inactive`.
"""
def get_subscription_state(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetSubscriptionState", input, options)
end
@doc """
Returns all ongoing DDoS attacks or all DDoS attacks during a specified time
period.
"""
def list_attacks(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListAttacks", input, options)
end
@doc """
Retrieves the `ProtectionGroup` objects for the account.
"""
def list_protection_groups(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListProtectionGroups", input, options)
end
@doc """
Lists all `Protection` objects for the account.
"""
def list_protections(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListProtections", input, options)
end
@doc """
Retrieves the resources that are included in the protection group.
"""
def list_resources_in_protection_group(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListResourcesInProtectionGroup", input, options)
end
@doc """
Gets information about Amazon Web Services tags for a specified Amazon Resource
Name (ARN) in Shield.
"""
def list_tags_for_resource(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListTagsForResource", input, options)
end
@doc """
Adds or updates tags for a resource in Shield.
"""
def tag_resource(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "TagResource", input, options)
end
@doc """
Removes tags from a resource in Shield.
"""
def untag_resource(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UntagResource", input, options)
end
@doc """
Updates an existing Shield Advanced automatic application layer DDoS mitigation
configuration for the specified resource.
"""
def update_application_layer_automatic_response(%Client{} = client, input, options \\ []) do
Request.request_post(
client,
metadata(),
"UpdateApplicationLayerAutomaticResponse",
input,
options
)
end
@doc """
Updates the details of the list of email addresses and phone numbers that the
Shield Response Team (SRT) can use to contact you if you have proactive
engagement enabled, for escalations to the SRT and to initiate proactive
customer support.
"""
def update_emergency_contact_settings(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateEmergencyContactSettings", input, options)
end
@doc """
Updates an existing protection group.
A protection group is a grouping of protected resources so they can be handled
as a collective. This resource grouping improves the accuracy of detection and
reduces false positives.
"""
def update_protection_group(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateProtectionGroup", input, options)
end
@doc """
Updates the details of an existing subscription.
Only enter values for parameters you want to change. Empty parameters are not
updated.
"""
def update_subscription(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateSubscription", input, options)
end
end
|
lib/aws/generated/shield.ex
| 0.87168
| 0.547343
|
shield.ex
|
starcoder
|
defmodule AtomTweaks.Ecto.Markdown do
@moduledoc """
An `Ecto.Type` that handles the conversion between a Markdown-formatted string in the database and
a `AtomTweaks.Markdown` struct in memory.
Use this as the type of the database field in the schema:
```
defmodule AtomTweaks.Tweaks.Tweak do
use Ecto.Schema
schema "tweaks" do
field :title, :string
field :code, :string
field :type, :string
field :description, AtomTweaks.Ecto.Markdown
belongs_to :user, AtomTweaks.Accounts.User, foreign_key: :created_by, type: :binary_id
timestamps()
end
end
```
All other references in the codebase should be to the `AtomTweaks.Markdown` module. See that
module's documentation on how it is used.
**See:** [Beyond Functions in Elixir: Refactoring for Maintainability][beyond-functions] for
details on this pattern.
[beyond-functions]: https://blog.usejournal.com/beyond-functions-in-elixir-refactoring-for-maintainability-5c73daba77f3
"""
@behaviour Ecto.Type
@doc """
Returns the underlying schema type for the custom type.
See: `c:Ecto.Type.type/0`
"""
@impl Ecto.Type
def type, do: :string
@doc """
Casts the given input to the custom type.
See: `c:Ecto.Type.cast/1`
"""
@impl Ecto.Type
def cast(binary) when is_binary(binary) do
{:ok, %AtomTweaks.Markdown{text: binary}}
end
def cast(markdown = %AtomTweaks.Markdown{}), do: {:ok, markdown}
def cast(_other), do: :error
@doc """
Loads the given term into a custom type.
See: `c:Ecto.Type.load/1`
"""
@impl Ecto.Type
def load(binary) when is_binary(binary) do
{:ok, %AtomTweaks.Markdown{text: binary, html: AtomTweaks.Markdown.to_html(binary)}}
end
def load(_other), do: :error
@doc """
Dumps the given term into an Ecto native type.
See: `c:Ecto.Type.dump/1`
"""
@impl Ecto.Type
def dump(%AtomTweaks.Markdown{text: binary}) when is_binary(binary) do
{:ok, binary}
end
def dump(binary) when is_binary(binary), do: {:ok, binary}
def dump(_other), do: :error
end
|
lib/atom_tweaks/ecto/markdown.ex
| 0.832883
| 0.832917
|
markdown.ex
|
starcoder
|
defmodule Eactivitypub.Types do
defmodule ID do
@moduledoc """
A hash of a User for ETS.
"""
@enforce_keys [:created, :nonce]
defstruct created: nil, nonce: nil
@type t :: %__MODULE__{
created: DateTime.t(),
nonce: integer()
}
@spec create :: t()
def create() do
nonce = :erlang.unique_integer()
created = DateTime.utc_now()
%__MODULE__{created: created, nonce: nonce}
end
@spec hash(t()) :: binary
def hash(id) do
Base.encode16(
:crypto.hash(:sha3_256, [to_charlist(id.nonce), to_charlist(DateTime.to_unix(id.created))])
)
end
end
defmodule Mention do
@moduledoc """
A WebFingerable representation of a User.
"""
@enforce_keys [:username, :hostname]
defstruct username: nil, hostname: nil
@type t :: %__MODULE__{username: binary, hostname: binary}
@spec decode!(binary) :: Eactivitypub.Types.Mention.t()
@doc """
Unsafely decodes a federated mention into an Eactivitypub one.
## Examples
iex> Eactivitypub.Types.Mention.decode!("<EMAIL>")
%Eactivitypub.Types.Mention{hostname: "example.com", username: "chlorophytus"}
"""
def decode!(mention) do
# Pass 1: Capture a possibly invalid username but a valid host
# For Pass 1 we will use https://tools.ietf.org/html/rfc3986#appendix-B
%{"user" => user!, "host" => host} =
Regex.named_captures(~r/^((?<user>[[:graph:]]+)\@)(?<host>[^\/?#]*)$/u, mention)
# Pass 2: Evaluate the username
cond do
not Regex.match?(~r/^.*(\@).*$/, user!) ->
%__MODULE__{username: user!, hostname: host}
end
end
@spec decode(binary) ::
{:error, :invalid_host | :invalid_user} | {:ok, t()}
@doc """
Decodes a federated mention into an Eactivitypub one.
## Examples
iex> Eactivitypub.Types.Mention.decode("<EMAIL>")
{:ok, %Eactivitypub.Types.Mention{hostname: "example.com", username: "chlorophytus"}}
"""
def decode(mention) do
case Regex.named_captures(~r/^((?<user>[[:graph:]]+)\@)(?<host>[^\/?#]*)$/u, mention) do
%{"user" => user!, "host" => host} ->
if not Regex.match?(~r/^.*(\@).*$/, user!) do
{:ok, %__MODULE__{username: user!, hostname: host}}
else
{:error, :invalid_user}
end
_ ->
{:error, :invalid_host}
end
end
@spec encode(t()) :: binary
@doc """
Encodes an Eactivitypub mention into a federated one. No sanitisation is performed.
## Examples
iex> Eactivitypub.Types.Mention.encode(%Eactivitypub.Types.Mention{hostname: "example.com", username: "chlorophytus"})
"<EMAIL>"
"""
def encode(mention) do
"#{mention.username}@#{mention.hostname}"
end
end
defmodule User do
@moduledoc """
Stores a user. These can be distributed on-the-fly.
NOTE: ID is not required, though advised to implement internally. This is a
preliminary statement.
"""
@enforce_keys [:preferred_username, :type, :manually_approves_followers, :discoverable]
defstruct hashable_id: nil,
preferred_username: nil,
name: nil,
summary: nil,
type: :person,
url: nil,
icon: nil,
image: nil,
manually_approves_followers: false,
discoverable: true,
public_key: nil,
featured: [],
also_known_as: []
@type t :: %__MODULE__{
hashable_id: ID.t(),
preferred_username: Mention.t(),
name: binary,
summary: binary,
type: :person | :service | :application,
url: binary,
icon: binary,
image: binary,
manually_approves_followers: boolean,
discoverable: boolean,
public_key: :public_key.public_key(),
featured: list,
also_known_as: [Mention.t()]
}
end
end
|
lib/eactivitypub/types.ex
| 0.867233
| 0.425098
|
types.ex
|
starcoder
|
defmodule Chex.Chexers do
@moduledoc """
The Chexers context.
"""
import Ecto.Query, warn: false
alias Chex.Repo
alias Chex.Chexers.SlackChex
@doc """
Returns the list of slackchex.
## Examples
iex> list_slackchex()
[%SlackChex{}, ...]
"""
def list_slackchex do
Repo.all(SlackChex)
end
@doc """
Gets a single slack_chex.
Raises `Ecto.NoResultsError` if the Slack chex does not exist.
## Examples
iex> get_slack_chex!(123)
%SlackChex{}
iex> get_slack_chex!(456)
** (Ecto.NoResultsError)
"""
def get_slack_chex!(id), do: Repo.get!(SlackChex, id)
@doc """
Creates a slack_chex.
## Examples
iex> create_slack_chex(%{field: value})
{:ok, %SlackChex{}}
iex> create_slack_chex(%{field: bad_value})
{:error, %Ecto.Changeset{}}
"""
def create_slack_chex(attrs \\ %{}) do
presence = get_user_slack_status(attrs)
IO.puts("#################")
IO.inspect(presence)
IO.puts("#################")
%SlackChex{}
|> SlackChex.changeset(attrs)
|> Repo.insert()
end
@doc """
Updates a slack_chex.
## Examples
iex> update_slack_chex(slack_chex, %{field: new_value})
{:ok, %SlackChex{}}
iex> update_slack_chex(slack_chex, %{field: bad_value})
{:error, %Ecto.Changeset{}}
"""
def update_slack_chex(%SlackChex{} = slack_chex, attrs) do
slack_chex
|> SlackChex.changeset(attrs)
|> Repo.update()
end
@doc """
Deletes a SlackChex.
## Examples
iex> delete_slack_chex(slack_chex)
{:ok, %SlackChex{}}
iex> delete_slack_chex(slack_chex)
{:error, %Ecto.Changeset{}}
"""
def delete_slack_chex(%SlackChex{} = slack_chex) do
Repo.delete(slack_chex)
end
@doc """
Returns an `%Ecto.Changeset{}` for tracking slack_chex changes.
## Examples
iex> change_slack_chex(slack_chex)
%Ecto.Changeset{source: %SlackChex{}}
"""
def change_slack_chex(%SlackChex{} = slack_chex) do
SlackChex.changeset(slack_chex, %{})
end
defp get_user_slack_status(attrs) do
IO.inspect(attrs)
end
defp get_user_slack_status(%{"user" => username}) do
IO.inspect(username)
#refactor
#Call slack API
Slack.Web.Users.get_presence(username)
end
end
|
apps/chex/lib/chex/chexers/chexers.ex
| 0.794544
| 0.447219
|
chexers.ex
|
starcoder
|
defmodule Gateway.Bridge do
@moduledoc """
Implements the Litebridge protocol as a server.
Litebridge is a protocol running over websockets
that uses the gateway as a server and the rest
component as a client to share information between.
"""
require Logger
@behaviour :cowboy_websocket
defmodule State do
@moduledoc false
defstruct [:heartbeat, :identify, :encoding, :compress]
end
def hb_interval() do
20_000
end
def encode(map, state) do
encoded = case state.encoding do
"json" ->
Poison.encode!(map)
end
if state.compress do
""
else
encoded
end
end
def decode(data, state) do
case state.encoding do
"json" ->
Poison.decode!(data)
end
end
def init(req, _state) do
{peer_ip, peer_port} = :cowboy_req.peer(req)
Logger.info "litebridge: new #{inspect peer_ip}:#{inspect peer_port}"
{:cowboy_websocket, req, %State{heartbeat: false,
identify: false,
encoding: "json",
compress: false}}
end
def terminate(reason, _req, _state) do
Logger.info "Terminated from #{inspect reason}"
Litebridge.remove self()
:ok
end
def hb_timer() do
interv = hb_interval()
# The world is really bad.
:erlang.start_timer(interv + 1000, self(), :req_heartbeat)
end
def websocket_init(state) do
hb_timer()
hello = encode(%{op: 0,
hb_interval: hb_interval()}, state)
Litebridge.register self()
{:reply, {:text, hello}, state}
end
# payload handlers
def websocket_handle({:text, frame}, state) do
payload = decode(frame, state)
%{"op" => opcode} = payload
handle_payload(opcode, payload, state)
end
def websocket_handle(_any_frame, state) do
{:ok, state}
end
# erlang timer handlers
def websocket_info({:timeout, _ref, :req_heartbeat}, state) do
case state.heartbeat do
true ->
hb_timer()
{:ok, Map.put(state, :heartbeat, false)}
false ->
{:reply, {:close, 4003, "Heartbeat timeout"}, state}
end
end
def websocket_info({:send, packet}, state) do
{:reply, {:text, encode(packet, state)}, state}
end
# specific payload handlers
@doc """
Handle OP 1 Hello ACK.
Checks in application data if the
provided password is correct
"""
def handle_payload(1, payload, state) do
correct = Application.fetch_env!(:gateway, :bridge_password)
given = payload["password"]
if correct == given do
{:ok, Map.put(state, :identify, true)}
else
{:reply, {:close, 4001, "Authentication failed"}, state}
end
end
@doc """
Handle OP 2 Heartbeat
If the client is not properly identified through
OP 1 Hello ACK, it is disconnected.
"""
def handle_payload(2, _payload, state) do
case state.identify do
true ->
hb_ack = encode(%{op: 3}, state)
{:reply, {:text, hb_ack}, Map.put(state, :heartbeat, true)}
false ->
{:reply, {:close, 4002, "Not Authenticated"}, state}
end
end
@doc """
Handle OP 4 Request
Handle a specific request from the client.
Sends OP 5 Response.
"""
def handle_payload(4, payload, state) do
%{"n" => nonce,
"w" => request,
"a" => args} = decode(payload, state)
{response, new_state} = bridge_request(request, args, state)
response_payload = encode(%{op: 5,
n: nonce,
r: response
}, new_state)
{:reply, {:text, response_payload}, new_state}
end
@doc """
Request all subscribers that are in a guild.
"""
def bridge_request("GET_SUBSCRIBERS", [guild_id], state) do
guild_pid = Guild.Registry.get(guild_id)
{GenGuild.get_subs(guild_pid), state}
end
@doc """
Handle OP 5 Response.
"""
def handle_payload(5, payload, state) do
%{"n" => nonce,
"r" => response} = payload
Logger.debug fn ->
"Got a response for #{nonce}, #{inspect response}"
end
Litebridge.process_response(nonce, response)
{:ok, state}
end
@doc """
Handle OP 6 Dispatch.
Do a request that won't have any response back.
"""
def handle_payload(6, payload, state) do
%{
"w" => request,
"a" => args
} = decode(payload, state)
new_state = bridge_dispatch(request, args, state)
{:ok, new_state}
end
def bridge_dispatch("NEW_GUILD", [guild_id, owner_id], state) do
# get GenGuild
guild_pid = Guild.Registry.get(guild_id)
state_pids = State.Registry.get(owner_id, guild_id)
# ???
GenGuild.subscribe(guild_pid, owner_id)
Enum.each(state_pids, fn state_pid ->
GenGuild.add_presence(guild_pid, state_pid)
end)
Presence.dispatch(guild_id, fn guild_pid, state_pid ->
# We need to fill an entire guild payload here.
# Fuck.
{"GUILD_CREATE", Guild.guild_dump(guild_pid, state_pid)}
end)
state
end
def bridge_dispatch("DISPATCH", %{
"guild" => u_guild_id,
"event" => [u_event_name, event_data]
}, state) do
# dispatch to all members of a guild
guild_id = to_string(u_guild_id)
event_name = to_string(u_event_name)
guild_pid = Guild.Registry.get(guild_id)
Presence.dispatch(guild_id, fn guild_pid, state_pid ->
{event_name, event_data}
end)
state
end
def bridge_request("DISPATCH", %{"user" => user_id}, state) do
# dispatch to one user (all of the user's shards will receive)
state
end
def bridge_dispatch("DISPATCH_MEMBER", [guild_id, user_id], state) do
# dispatch to a *single* user in a gulid
state
end
def bridge_dispatch("DISPATCH_CHANNEL", [guild_id, channel_id], state) do
# dispatch to all users in a channel
state
end
end
|
lib/bridge/websocket.ex
| 0.58166
| 0.402128
|
websocket.ex
|
starcoder
|
defmodule Harnais.Runner.Suite.Map.Helper do
@moduledoc false
def runner_suite_map_new_old_tuple(v) do
{v, :new_value}
end
def runner_suite_map_value() do
42
end
def runner_suite_map_passthru(v) do
v
end
end
defmodule Harnais.Runner.Suite.Map do
@moduledoc false
require Harnais.Runner.Suite.Map.Helper, as: HRTMH
use Harnais.Attribute
use Harnais.Attribute.Data
@harnais_runner_tests_state_deep @harnais_state_deep
@harnais_runner_suite_map %{
default:
[
[:r, :delete, [:a]],
[:r, :delete, [:x]],
[:r, :drop, [[:a, :b, :c]]],
[:r, :drop, [[:a, :x]]],
[:r, :equal?, [@harnais_runner_tests_state_deep]],
[:r, :equal?, [%{}]],
[:r, :fetch, [:a]],
[:r, :fetch, [:x]],
[:r, :fetch!, [:a]],
[{:e, KeyError}, :fetch!, [:x]],
[:r, :get, [:a]],
[:r, :get, [:x]],
[:r, :get_and_update, [:a, &HRTMH.runner_suite_map_new_old_tuple/1]],
[:r, :get_and_update, [:x, &HRTMH.runner_suite_map_new_old_tuple/1]],
[:r, :get_and_update!, [:a, &HRTMH.runner_suite_map_new_old_tuple/1]],
[{:e, KeyError}, :get_and_update!, [:x, &HRTMH.runner_suite_map_new_old_tuple/1]],
[:r, :get_lazy, [:a, &HRTMH.runner_suite_map_value/0]],
[:r, :get_lazy, [:x, &HRTMH.runner_suite_map_value/0]],
[:r, :has_key?, [:a]],
[:r, :has_key?, [:x]],
[:r, :keys],
[:r, :merge, [%{a: 1, b: 2}]],
[:r, :new],
[:r, :new, [&HRTMH.runner_suite_map_passthru/1]],
[:r, :pop, [:a]],
[:r, :pop, [:a, 42]],
[:r, :pop, [:x]],
[:r, :pop, [:x, 42]],
[:r, :pop_lazy, [:a, &HRTMH.runner_suite_map_value/0]],
[:r, :put, [:a, 42]],
[:r, :put, [:x, 42]],
[:r, :put_new, [:a, 42]],
[:r, :put_new, [:x, 42]],
[:r, :put_new_lazy, [:a, &HRTMH.runner_suite_map_value/0]],
[:r, :put_new_lazy, [:x, &HRTMH.runner_suite_map_value/0]],
[:r, :split, [[:a]]],
[:r, :split, [[:a, :x]]],
[:r, :take, [[:a, :c]]],
[:r, :take, [[:a, :x]]],
[:r, :to_list],
[:r, :update, [:a, 42, &HRTMH.runner_suite_map_passthru/1]],
[:r, :update, [:x, 42, &HRTMH.runner_suite_map_passthru/1]],
[:r, :update!, [:a, &HRTMH.runner_suite_map_passthru/1]],
[{:e, KeyError}, :update!, [:x, &HRTMH.runner_suite_map_passthru/1]],
[:r, :values]
]
|> Stream.map(fn
[flag, call] ->
[f: flag, c: call, a: [], compare: [d: Map, c: call, a: []]]
[flag, call, args] ->
[f: flag, c: call, a: args, compare: [d: Map, c: call, a: args]]
[flag, call, args, value] ->
[f: flag, c: call, a: args, v: value, compare: [d: Map, c: call, a: args, v: value]]
[flag, call, args, value, result] ->
[f: flag, c: call, a: args, v: value, r: result]
end)
|> Enum.map(fn test_spec ->
with {:ok, test_spec} <-
test_spec
|> Harnais.Runner.Prova.Utility.prova_spec_normalise() do
test_spec
else
{:error, %{__struct__: _} = error} -> raise error
end
end)
}
@args_vars 0..5 |> Enum.map(fn n -> "arg#{n}" |> String.to_atom() |> Macro.var(nil) end)
@wrappers [tests_get: 1, tests_get: 2]
for {name, arity} <- @wrappers do
args = @args_vars |> Enum.take(arity)
def unquote(name)(unquote_splicing(args)) do
Harnais.Runner.Suite.Utility.unquote(name)(
@harnais_runner_suite_map,
unquote_splicing(args)
)
end
end
end
|
lib/harnais/runner/suite/map.ex
| 0.549278
| 0.586523
|
map.ex
|
starcoder
|
defmodule Lapin.Connection do
@moduledoc """
RabbitMQ connection handler
This module handles the RabbitMQ connection. It also provides a behaviour for
worker module implementation. The worker module should use the `Lapin.Connection`
behaviour and implement the callbacks it needs.
When using the `Lapin.Connection` behaviour a `publish/4` function is injected in
the worker module as a shortcut to the `Lapin.Connection.publish/5` function
which removes the need for passing in the connection and is publicly callable
to publish messages on the connection configured for the implementing module.
"""
use Connection
require Logger
import Lapin.Utils, only: [check_mandatory_params: 2]
alias Lapin.{Message, Channel}
@typedoc """
Connection configuration
The following keys are supported:
- module: module using the `Lapin.Connection` behaviour
- uri: AMQP URI (String.t | URI.t)
- host: broker hostname (string | charlist), *default: 'localhost'*
- port: broker port (string | integer), *default: 5672*
- virtual_host: broker vhost (string), *default: "/"*
- username: username (string)
- password: password (string)
- auth_mechanisms: broker auth_mechanisms ([:amqplain | :external | :plain]), *default: amqp_client default*
- ssl_options: ssl options ([:ssl:ssl_option]), *default: none*
- channels: channels to configure ([Channel.config]), *default: []*
"""
@type config :: [channels: [Channel.config()]]
@typedoc "Connection"
@type t :: GenServer.server()
@typedoc "Callback result"
@type on_callback :: :ok | {:error, message :: String.t()}
@typedoc "Reason for message rejection"
@type reason :: term
@typedoc "`handle_deliver/2` callback result"
@type on_deliver :: :ok | {:reject, reason} | term
@doc """
Called when receiving a `basic.cancel` from the broker.
"""
@callback handle_cancel(Channel.t()) :: on_callback
@doc """
Called when receiving a `basic.cancel_ok` from the broker.
"""
@callback handle_cancel_ok(Channel.t()) :: on_callback
@doc """
Called when receiving a `basic.consume_ok` from the broker.
This signals successul registration as a consumer.
"""
@callback handle_consume_ok(Channel.t()) :: on_callback
@doc """
Called when receiving a `basic.deliver` from the broker.
Return values from this callback determine message acknowledgement:
- `:ok`: Message was processed by the consumer and should be removed from queue
- `{:reject, reason}`: Message was not processed and should be rejected
Any other return value requeues the message to prevent data loss.
A crash in the callback code will however reject the message to prevent loops
if the message was already delivered before.
The `reason` term can be used by the application
to signal the reason of rejection and is logged in debug.
"""
@callback handle_deliver(Channel.t(), Message.t()) :: on_deliver
@doc """
Called when completing a `basic.publish` with the broker.
Message transmission to the broker is successful when this callback is called.
"""
@callback handle_publish(Channel.t(), Message.t()) :: on_callback
@doc """
Called when receiving a `basic.return` from the broker.
This signals an undeliverable returned message from the broker.
"""
@callback handle_return(Channel.t(), Message.t()) :: on_callback
@doc """
Called before `handle_deliver/2` to get the payload type.
Should return a data type instance to decode the payload into.
A `Lapin.Message.Payload` implementation must be provided for this type. The
default implementation leaves the payload unaltered.
"""
@callback payload_for(Channel.t(), Message.t()) :: Message.Payload.t()
defmacro __using__(_) do
quote do
alias Lapin.{Channel, Message}
@behaviour Lapin.Connection
def handle_cancel(_channel), do: :ok
def handle_cancel_ok(_channel), do: :ok
def handle_consume_ok(_channel), do: :ok
def handle_deliver(_channel, _message), do: :ok
def handle_publish(_channel, _message), do: :ok
def handle_return(_channel, _message), do: :ok
def payload_for(_channel, _message), do: <<>>
defoverridable Lapin.Connection
def publish(exchange, routing_key, message, options \\ []) do
Lapin.Connection.publish(__MODULE__, exchange, routing_key, message, options)
end
end
end
@backoff 1_000
@connection_default_params [connection_timeout: @backoff]
@default_rabbitmq_host 'localhost'
@default_rabbitmq_port 5672
@doc """
Starts a `Lapin.Connection` with the specified configuration
"""
@spec start_link(config, options :: GenServer.options()) :: GenServer.on_start()
def start_link(configuration, options \\ []) do
{:ok, configuration} = cleanup_configuration(configuration)
Connection.start_link(__MODULE__, configuration, options)
end
def init(configuration) do
{:connect, :init, %{configuration: configuration, channels: [], connection: nil, module: nil}}
end
@doc """
Closes the connection
"""
@spec close(connection :: t) :: GenServer.on_callback()
def close(connection), do: GenServer.stop(connection)
def terminate(_reason, %{connection: nil}), do: :ok
def terminate(_reason, %{connection: connection}) do
AMQP.Connection.close(connection)
end
@doc """
Publishes a message to the specified exchange with the given routing_key
"""
@spec publish(
connection :: t,
Channel.exchange(),
Channel.routing_key(),
Message.Payload.t(),
options :: Keyword.t()
) :: on_callback
def publish(connection, exchange, routing_key, payload, options \\ []) do
Connection.call(connection, {:publish, exchange, routing_key, payload, options})
end
def handle_call(
{:publish, _exchange, _routing_key, _payload, _options},
_from,
%{connection: nil} = state
) do
{:reply, {:error, :not_connected}, state}
end
def handle_call(
{:publish, exchange, routing_key, payload, options},
_from,
%{channels: channels, module: module} = state
) do
with channel when not is_nil(channel) <-
Channel.get(channels, exchange, routing_key, :producer),
%Channel{pattern: pattern} <- channel,
amqp_channel when not is_nil(amqp_channel) <- channel.amqp_channel,
mandatory <- pattern.publisher_mandatory(channel),
persistent <- pattern.publisher_persistent(channel),
options <- Keyword.merge([mandatory: mandatory, persistent: persistent], options),
content_type <- Message.Payload.content_type(payload),
meta <- %{content_type: content_type},
{:ok, payload} <- Message.Payload.encode(payload),
:ok <- AMQP.Basic.publish(amqp_channel, exchange, routing_key, payload, options) do
message = %Message{meta: Enum.into(options, meta), payload: payload}
if not pattern.publisher_confirm(channel) or AMQP.Confirm.wait_for_confirms(amqp_channel) do
Logger.debug(fn -> "Published #{inspect(message)} on #{inspect(channel)}" end)
{:reply, module.handle_publish(channel, message), state}
else
error = "Error publishing #{inspect(message)}"
Logger.debug(fn -> error end)
{:reply, {:error, error}, state}
end
else
:passive ->
error = "Cannot publish, channel role is :passive"
Logger.error(error)
{:reply, {:error, error}, state}
:consumer ->
error = "Cannot publish, channel role is :consumer"
Logger.error(error)
{:reply, {:error, error}, state}
nil ->
error =
"Error publishing message: no channel for exchange '#{exchange}' with routing key '#{
routing_key
}'"
Logger.debug(fn -> error end)
{:reply, {:error, error}, state}
{:error, error} ->
Logger.debug(fn -> "Error sending message: #{inspect(error)}" end)
{:reply, {:error, error}, state}
end
end
def handle_info(
{:basic_cancel, %{consumer_tag: consumer_tag}},
%{channels: channels, module: module} = state
) do
with channel when not is_nil(channel) <- Channel.get(channels, consumer_tag) do
Logger.debug(fn -> "Broker cancelled consumer for #{inspect(channel)}" end)
module.handle_cancel(channel)
else
nil ->
Logger.warn("Broker cancelled consumer_tag '#{consumer_tag}' for locally unknown channel")
{:error, error} ->
Logger.error("Error canceling consumer_tag '#{consumer_tag}': #{error}")
end
{:stop, :normal, state}
end
def handle_info(
{:basic_cancel_ok, %{consumer_tag: consumer_tag}},
%{channels: channels, module: module} = state
) do
with channel when not is_nil(channel) <- Channel.get(channels, consumer_tag),
:ok <- module.handle_cancel_ok(channel) do
Logger.debug(fn -> "Broker confirmed cancelling consumer for #{inspect(channel)}" end)
else
nil ->
Logger.debug(fn ->
"Broker confirmed cancelling consumer for locally unknown tag '#{consumer_tag}'"
end)
error ->
Logger.error("Error handling broker cancel for '#{consumer_tag}': #{inspect(error)}")
end
{:noreply, state}
end
def handle_info(
{:basic_consume_ok, %{consumer_tag: consumer_tag}},
%{channels: channels, module: module} = state
) do
with channel when not is_nil(channel) <- Channel.get(channels, consumer_tag),
:ok <- module.handle_consume_ok(channel) do
Logger.debug(fn -> "Broker registered consumer for #{inspect(channel)}" end)
else
nil ->
Logger.warn(
"Broker registered consumer_tag '#{consumer_tag}' for locally unknown channel"
)
error ->
Logger.error("Error handling broker register for '#{consumer_tag}': #{inspect(error)}")
end
{:noreply, state}
end
def handle_info(
{:basic_return, payload, %{exchange: exchange, routing_key: routing_key} = meta},
%{channels: channels, module: module} = state
) do
message = %Message{meta: meta, payload: payload}
with channel when not is_nil(channel) <-
Channel.get(channels, exchange, routing_key, :producer),
:ok <- module.handle_return(channel, message) do
Logger.debug(fn -> "Broker returned message #{inspect(message)}" end)
else
nil ->
Logger.warn("Broker returned message #{inspect(message)} for locally unknown channel")
error ->
Logger.debug(fn -> "Error handling returned message: #{inspect(error)}" end)
end
{:noreply, state}
end
def handle_info({:DOWN, _, :process, _pid, _reason}, state) do
Logger.warn("Connection down, restarting...")
{:stop, :normal, state}
end
def handle_info(
{:basic_deliver, payload, %{consumer_tag: consumer_tag} = meta},
%{channels: channels, module: module} = state
) do
message = %Message{meta: meta, payload: payload}
with channel when not is_nil(channel) <- Channel.get(channels, consumer_tag) do
spawn(fn -> consume(module, channel, meta, payload) end)
else
nil ->
Logger.error("Error processing message #{inspect(message)}, no local channel")
end
{:noreply, state}
end
defp consume(
module,
%Channel{pattern: pattern} = channel,
%{delivery_tag: delivery_tag, redelivered: redelivered} = meta,
payload
) do
message = %Message{meta: meta, payload: payload}
with consumer_ack <- pattern.consumer_ack(channel),
payload_for <- module.payload_for(channel, message),
content_type <- Message.Payload.content_type(payload_for),
message <- %Message{message | meta: Map.put(meta, :content_type, content_type)},
{:ok, payload} <- Message.Payload.decode_into(payload_for, payload),
message <- %Message{message | payload: payload},
:ok <- module.handle_deliver(channel, message) do
Logger.debug(fn -> "Consuming message #{delivery_tag}" end)
consume_ack(consumer_ack, channel.amqp_channel, delivery_tag)
else
{:reject, reason} ->
AMQP.Basic.reject(channel.amqp_channel, delivery_tag, requeue: false)
Logger.debug(fn -> "Rejected message #{delivery_tag}: #{inspect(reason)}" end)
reason ->
AMQP.Basic.reject(channel.amqp_channel, delivery_tag, requeue: not redelivered)
Logger.debug(fn -> "Requeued message #{delivery_tag}: #{inspect(reason)}" end)
end
rescue
exception ->
AMQP.Basic.reject(channel.amqp_channel, delivery_tag, requeue: not redelivered)
Logger.error("Rejected message #{delivery_tag}: #{inspect(exception)}")
end
defp consume_ack(true = _consumer_ack, amqp_channel, delivery_tag) do
if AMQP.Basic.ack(amqp_channel, delivery_tag) do
Logger.debug(fn -> "Consumed message #{delivery_tag} successfully, ACK sent" end)
:ok
else
Logger.debug(fn -> "ACK failed for message #{delivery_tag}" end)
:error
end
end
defp consume_ack(false = _consumer_ack, _amqp_channel, delivery_tag) do
Logger.debug(fn -> "Consumed message #{delivery_tag}, ACK not required" end)
:ok
end
def connect(_info, %{configuration: configuration} = state) do
module = Keyword.get(configuration, :module)
with channels <- Keyword.get(configuration, :channels, []),
configuration <- Keyword.merge(@connection_default_params, configuration),
{:ok, connection} <- AMQP.Connection.open(configuration),
channels <- Enum.map(channels, &Channel.create(connection, &1)) do
Process.monitor(connection.pid)
{:ok, %{state | module: module, channels: channels, connection: connection}}
else
{:error, error} ->
Logger.error(fn ->
"Connection error: #{error} for #{module}, backing off for #{@backoff}"
end)
{:backoff, @backoff, state}
end
end
defp cleanup_configuration(configuration) do
with :ok <- check_mandatory_params(configuration, [:module]),
{uri, configuration} <-
Keyword.get_and_update(configuration, :uri, fn uri ->
{map_uri(uri), :pop}
end),
configuration <- Keyword.merge(configuration, uri),
{_, configuration} <-
Keyword.get_and_update(configuration, :host, fn host ->
{host, map_host(host)}
end),
{_, configuration} <-
Keyword.get_and_update(configuration, :port, fn port ->
{port, map_port(port)}
end),
{_, configuration} <-
Keyword.get_and_update(configuration, :virtual_host, fn vhost ->
{vhost, map_vhost(vhost)}
end),
{_, configuration} =
Keyword.get_and_update(configuration, :auth_mechanisms, fn
mechanisms when is_list(mechanisms) ->
{mechanisms, Enum.map(mechanisms, &map_auth_mechanism(&1))}
_ ->
:pop
end) do
{:ok, configuration}
else
{:error, :missing_params, missing_params} ->
params = Enum.join(missing_params, ", ")
error =
"Error creating connection #{inspect(configuration)}: missing mandatory params: #{
params
}"
Logger.error(error)
{:error, error}
end
end
defp map_uri(nil), do: []
defp map_uri(uri) when is_binary(uri) do
uri
|> URI.parse()
|> map_uri()
end
defp map_uri(%URI{} = uri) do
uri
|> Map.from_struct()
|> Enum.to_list()
|> uri_to_list()
end
defp uri_to_list(uri) when is_list(uri) do
with {path, uri} <- Keyword.pop(uri, :path),
{userinfo, uri} <- Keyword.pop(uri, :userinfo),
uri <- Keyword.drop(uri, [:authority, :query, :fragment, :scheme]),
[username, password] <- map_userinfo(userinfo) do
uri
|> Keyword.put(:virtual_host, map_vhost(path))
|> Keyword.put(:username, username)
|> Keyword.put(:password, password)
|> Enum.reject(fn {_k, v} -> v === nil end)
end
end
defp map_userinfo(userinfo) when is_binary(userinfo) do
parts =
userinfo
|> String.split(":", parts: 2)
[Enum.at(parts, 0), Enum.at(parts, 1)]
end
defp map_userinfo(_), do: [nil, nil]
defp map_vhost(nil), do: "/"
defp map_vhost(path) do
case String.replace_leading(path, "/", "") do
"" -> "/"
vhost -> vhost
end
end
defp map_auth_mechanism(:amqplain), do: &:amqp_auth_mechanisms.amqplain/3
defp map_auth_mechanism(:external), do: &:amqp_auth_mechanisms.external/3
defp map_auth_mechanism(:plain), do: &:amqp_auth_mechanisms.plain/3
defp map_auth_mechanism(auth_mechanism), do: auth_mechanism
defp map_host(nil), do: @default_rabbitmq_host
defp map_host(host) when is_binary(host), do: String.to_charlist(host)
defp map_host(host), do: host
defp map_port(nil), do: @default_rabbitmq_port
defp map_port(port) when is_binary(port), do: String.to_integer(port)
defp map_port(port), do: port
end
|
lib/lapin/connection.ex
| 0.886107
| 0.471649
|
connection.ex
|
starcoder
|
defmodule AWS.Support do
@moduledoc """
AWS Support
The AWS Support API reference is intended for programmers who need detailed
information about the AWS Support operations and data types. This service
enables you to manage your AWS Support cases programmatically. It uses HTTP
methods that return results in JSON format.
The AWS Support service also exposes a set of [Trusted
Advisor](http://aws.amazon.com/premiumsupport/trustedadvisor/) features.
You can retrieve a list of checks and their descriptions, get check
results, specify checks to refresh, and get the refresh status of checks.
The following list describes the AWS Support case management operations:
<ul> <li> **Service names, issue categories, and available severity levels.
**The `DescribeServices` and `DescribeSeverityLevels` operations return AWS
service names, service codes, service categories, and problem severity
levels. You use these values when you call the `CreateCase` operation.
</li> <li> **Case creation, case details, and case resolution.** The
`CreateCase`, `DescribeCases`, `DescribeAttachment`, and `ResolveCase`
operations create AWS Support cases, retrieve information about cases, and
resolve cases.
</li> <li> **Case communication.** The `DescribeCommunications`,
`AddCommunicationToCase`, and `AddAttachmentsToSet` operations retrieve and
add communications and attachments to AWS Support cases.
</li> </ul> The following list describes the operations available from the
AWS Support service for Trusted Advisor:
<ul> <li> `DescribeTrustedAdvisorChecks` returns the list of checks that
run against your AWS resources.
</li> <li> Using the `checkId` for a specific check returned by
`DescribeTrustedAdvisorChecks`, you can call
`DescribeTrustedAdvisorCheckResult` to obtain the results for the check you
specified.
</li> <li> `DescribeTrustedAdvisorCheckSummaries` returns summarized
results for one or more Trusted Advisor checks.
</li> <li> `RefreshTrustedAdvisorCheck` requests that Trusted Advisor rerun
a specified check.
</li> <li> `DescribeTrustedAdvisorCheckRefreshStatuses` reports the refresh
status of one or more checks.
</li> </ul> For authentication of requests, AWS Support uses [Signature
Version 4 Signing
Process](http://docs.aws.amazon.com/general/latest/gr/signature-version-4.html).
See [About the AWS Support
API](http://docs.aws.amazon.com/awssupport/latest/user/Welcome.html) in the
*AWS Support User Guide* for information about how to use this service to
create and manage your support cases, and how to call Trusted Advisor for
results of checks on your resources.
"""
@doc """
Adds one or more attachments to an attachment set. If an `attachmentSetId`
is not specified, a new attachment set is created, and the ID of the set is
returned in the response. If an `attachmentSetId` is specified, the
attachments are added to the specified set, if it exists.
An attachment set is a temporary container for attachments that are to be
added to a case or case communication. The set is available for one hour
after it is created; the `expiryTime` returned in the response indicates
when the set expires. The maximum number of attachments in a set is 3, and
the maximum size of any attachment in the set is 5 MB.
"""
def add_attachments_to_set(client, input, options \\ []) do
request(client, "AddAttachmentsToSet", input, options)
end
@doc """
Adds additional customer communication to an AWS Support case. You use the
`caseId` value to identify the case to add communication to. You can list a
set of email addresses to copy on the communication using the
`ccEmailAddresses` value. The `communicationBody` value contains the text
of the communication.
The response indicates the success or failure of the request.
This operation implements a subset of the features of the AWS Support
Center.
"""
def add_communication_to_case(client, input, options \\ []) do
request(client, "AddCommunicationToCase", input, options)
end
@doc """
Creates a new case in the AWS Support Center. This operation is modeled on
the behavior of the AWS Support Center [Create
Case](https://console.aws.amazon.com/support/home#/case/create) page. Its
parameters require you to specify the following information:
<ul> <li> **issueType.** The type of issue for the case. You can specify
either "customer-service" or "technical." If you do not indicate a value,
the default is "technical."
</li> <li> **serviceCode.** The code for an AWS service. You obtain the
`serviceCode` by calling `DescribeServices`.
</li> <li> **categoryCode.** The category for the service defined for the
`serviceCode` value. You also obtain the category code for a service by
calling `DescribeServices`. Each AWS service defines its own set of
category codes.
</li> <li> **severityCode.** A value that indicates the urgency of the
case, which in turn determines the response time according to your service
level agreement with AWS Support. You obtain the SeverityCode by calling
`DescribeSeverityLevels`.
</li> <li> **subject.** The **Subject** field on the AWS Support Center
[Create Case](https://console.aws.amazon.com/support/home#/case/create)
page.
</li> <li> **communicationBody.** The **Description** field on the AWS
Support Center [Create
Case](https://console.aws.amazon.com/support/home#/case/create) page.
</li> <li> **attachmentSetId.** The ID of a set of attachments that has
been created by using `AddAttachmentsToSet`.
</li> <li> **language.** The human language in which AWS Support handles
the case. English and Japanese are currently supported.
</li> <li> **ccEmailAddresses.** The AWS Support Center **CC** field on the
[Create Case](https://console.aws.amazon.com/support/home#/case/create)
page. You can list email addresses to be copied on any correspondence about
the case. The account that opens the case is already identified by passing
the AWS Credentials in the HTTP POST method or in a method or function call
from one of the programming languages supported by an [AWS
SDK](http://aws.amazon.com/tools/).
</li> </ul> <note> To add additional communication or attachments to an
existing case, use `AddCommunicationToCase`.
</note> A successful `CreateCase` request returns an AWS Support case
number. Case numbers are used by the `DescribeCases` operation to retrieve
existing AWS Support cases.
"""
def create_case(client, input, options \\ []) do
request(client, "CreateCase", input, options)
end
@doc """
Returns the attachment that has the specified ID. Attachment IDs are
generated by the case management system when you add an attachment to a
case or case communication. Attachment IDs are returned in the
`AttachmentDetails` objects that are returned by the
`DescribeCommunications` operation.
"""
def describe_attachment(client, input, options \\ []) do
request(client, "DescribeAttachment", input, options)
end
@doc """
Returns a list of cases that you specify by passing one or more case IDs.
In addition, you can filter the cases by date by setting values for the
`afterTime` and `beforeTime` request parameters. You can set values for the
`includeResolvedCases` and `includeCommunications` request parameters to
control how much information is returned.
Case data is available for 12 months after creation. If a case was created
more than 12 months ago, a request for data might cause an error.
The response returns the following in JSON format:
<ul> <li> One or more `CaseDetails` data types.
</li> <li> One or more `nextToken` values, which specify where to paginate
the returned records represented by the `CaseDetails` objects.
</li> </ul>
"""
def describe_cases(client, input, options \\ []) do
request(client, "DescribeCases", input, options)
end
@doc """
Returns communications (and attachments) for one or more support cases. You
can use the `afterTime` and `beforeTime` parameters to filter by date. You
can use the `caseId` parameter to restrict the results to a particular
case.
Case data is available for 12 months after creation. If a case was created
more than 12 months ago, a request for data might cause an error.
You can use the `maxResults` and `nextToken` parameters to control the
pagination of the result set. Set `maxResults` to the number of cases you
want displayed on each page, and use `nextToken` to specify the resumption
of pagination.
"""
def describe_communications(client, input, options \\ []) do
request(client, "DescribeCommunications", input, options)
end
@doc """
Returns the current list of AWS services and a list of service categories
that applies to each one. You then use service names and categories in your
`CreateCase` requests. Each AWS service has its own set of categories.
The service codes and category codes correspond to the values that are
displayed in the **Service** and **Category** drop-down lists on the AWS
Support Center [Create
Case](https://console.aws.amazon.com/support/home#/case/create) page. The
values in those fields, however, do not necessarily match the service codes
and categories returned by the `DescribeServices` request. Always use the
service codes and categories obtained programmatically. This practice
ensures that you always have the most recent set of service and category
codes.
"""
def describe_services(client, input, options \\ []) do
request(client, "DescribeServices", input, options)
end
@doc """
Returns the list of severity levels that you can assign to an AWS Support
case. The severity level for a case is also a field in the `CaseDetails`
data type included in any `CreateCase` request.
"""
def describe_severity_levels(client, input, options \\ []) do
request(client, "DescribeSeverityLevels", input, options)
end
@doc """
Returns the refresh status of the Trusted Advisor checks that have the
specified check IDs. Check IDs can be obtained by calling
`DescribeTrustedAdvisorChecks`.
<note> Some checks are refreshed automatically, and their refresh statuses
cannot be retrieved by using this operation. Use of the
`DescribeTrustedAdvisorCheckRefreshStatuses` operation for these checks
causes an `InvalidParameterValue` error.
</note>
"""
def describe_trusted_advisor_check_refresh_statuses(client, input, options \\ []) do
request(client, "DescribeTrustedAdvisorCheckRefreshStatuses", input, options)
end
@doc """
Returns the results of the Trusted Advisor check that has the specified
check ID. Check IDs can be obtained by calling
`DescribeTrustedAdvisorChecks`.
The response contains a `TrustedAdvisorCheckResult` object, which contains
these three objects:
<ul> <li> `TrustedAdvisorCategorySpecificSummary`
</li> <li> `TrustedAdvisorResourceDetail`
</li> <li> `TrustedAdvisorResourcesSummary`
</li> </ul> In addition, the response contains these fields:
<ul> <li> **status.** The alert status of the check: "ok" (green),
"warning" (yellow), "error" (red), or "not_available".
</li> <li> **timestamp.** The time of the last refresh of the check.
</li> <li> **checkId.** The unique identifier for the check.
</li> </ul>
"""
def describe_trusted_advisor_check_result(client, input, options \\ []) do
request(client, "DescribeTrustedAdvisorCheckResult", input, options)
end
@doc """
Returns the summaries of the results of the Trusted Advisor checks that
have the specified check IDs. Check IDs can be obtained by calling
`DescribeTrustedAdvisorChecks`.
The response contains an array of `TrustedAdvisorCheckSummary` objects.
"""
def describe_trusted_advisor_check_summaries(client, input, options \\ []) do
request(client, "DescribeTrustedAdvisorCheckSummaries", input, options)
end
@doc """
Returns information about all available Trusted Advisor checks, including
name, ID, category, description, and metadata. You must specify a language
code; English ("en") and Japanese ("ja") are currently supported. The
response contains a `TrustedAdvisorCheckDescription` for each check.
"""
def describe_trusted_advisor_checks(client, input, options \\ []) do
request(client, "DescribeTrustedAdvisorChecks", input, options)
end
@doc """
Requests a refresh of the Trusted Advisor check that has the specified
check ID. Check IDs can be obtained by calling
`DescribeTrustedAdvisorChecks`.
<note> Some checks are refreshed automatically, and they cannot be
refreshed by using this operation. Use of the `RefreshTrustedAdvisorCheck`
operation for these checks causes an `InvalidParameterValue` error.
</note> The response contains a `TrustedAdvisorCheckRefreshStatus` object,
which contains these fields:
<ul> <li> **status.** The refresh status of the check: "none", "enqueued",
"processing", "success", or "abandoned".
</li> <li> **millisUntilNextRefreshable.** The amount of time, in
milliseconds, until the check is eligible for refresh.
</li> <li> **checkId.** The unique identifier for the check.
</li> </ul>
"""
def refresh_trusted_advisor_check(client, input, options \\ []) do
request(client, "RefreshTrustedAdvisorCheck", input, options)
end
@doc """
Takes a `caseId` and returns the initial state of the case along with the
state of the case after the call to `ResolveCase` completed.
"""
def resolve_case(client, input, options \\ []) do
request(client, "ResolveCase", input, options)
end
@spec request(map(), binary(), map(), list()) ::
{:ok, Poison.Parser.t | nil, Poison.Response.t} |
{:error, Poison.Parser.t} |
{:error, HTTPoison.Error.t}
defp request(client, action, input, options) do
client = %{client | service: "support"}
host = get_host("support", client)
url = get_url(host, client)
headers = [{"Host", host},
{"Content-Type", "application/x-amz-json-1.1"},
{"X-Amz-Target", "AWSSupport_20130415.#{action}"}]
payload = Poison.Encoder.encode(input, [])
headers = AWS.Request.sign_v4(client, "POST", url, headers, payload)
case HTTPoison.post(url, payload, headers, options) do
{:ok, response=%HTTPoison.Response{status_code: 200, body: ""}} ->
{:ok, nil, response}
{:ok, response=%HTTPoison.Response{status_code: 200, body: body}} ->
{:ok, Poison.Parser.parse!(body), response}
{:ok, _response=%HTTPoison.Response{body: body}} ->
error = Poison.Parser.parse!(body)
exception = error["__type"]
message = error["message"]
{:error, {exception, message}}
{:error, %HTTPoison.Error{reason: reason}} ->
{:error, %HTTPoison.Error{reason: reason}}
end
end
defp get_host(endpoint_prefix, client) do
if client.region == "local" do
"localhost"
else
"#{endpoint_prefix}.#{client.region}.#{client.endpoint}"
end
end
defp get_url(host, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}/"
end
end
|
lib/aws/support.ex
| 0.875121
| 0.645085
|
support.ex
|
starcoder
|
defmodule DoomFire.FireState do
use GenServer
def update_whole_state(column, {state, row, update_function}) do
new_state = update_function.(row, column, state)
{new_state, row, update_function}
end
def update_row(row, {state, width, update_function}) do
{new_state, _, _} =
Enum.reduce(0..width, {state, row, update_function}, &update_whole_state/2)
{new_state, width, update_function}
end
def update_state({state, width, height}, update_function) do
height_minus = height
width_minus = width
{update_state, _, _} =
Enum.reduce(0..height_minus, {state, width_minus, update_function}, &update_row/2)
update_state
end
def update_state_initiate({state, width, height}) do
update_fun = fn row, column, state ->
if row == 0 do
index = column + (width - 1) * (height - 1) - width
index_row = Kernel.trunc(index / (width - 1))
index_column =
cond do
index_row > 0 -> Kernel.trunc(rem(index, index_row * (width - 1)))
true -> 0
end
DoomFire.Utils.TupleMatrix.put(state, index_row, index_column, 36)
else
state
end
end
update_state({state, width, height}, update_fun)
end
def generate_decay() do
Kernel.trunc(:rand.uniform(100) * 3 / 100)
end
def update_state_fire_propagation({state, width, height}) do
update_fun = fn row, column, state ->
if row + 1 < height - 1 && column < width - 1 do
value = row * (width - 1) + column
bellow_value = DoomFire.Utils.TupleMatrix.get(state, row + 1, column)
decay = generate_decay()
update_pixel_decay = value - decay
update_pixel_decay =
cond do
update_pixel_decay > 0 -> update_pixel_decay
true -> 0
end
bellow_value_intensity = bellow_value - decay
bellow_value_intensity =
cond do
bellow_value_intensity > 0 -> bellow_value_intensity
true -> 0
end
upd_row = Kernel.trunc(update_pixel_decay / (width - 1))
upd_column =
cond do
upd_row > 0 -> Kernel.trunc(rem(update_pixel_decay, upd_row * (width - 1)))
true -> 0
end
DoomFire.Utils.TupleMatrix.put(state, upd_row, upd_column, bellow_value_intensity)
else
state
end
end
update_state({state, width, height}, update_fun)
end
@impl true
def init(_) do
{:ok, width} = :io.columns()
{:ok, height} = :io.rows()
{:ok,
{List.duplicate(List.duplicate(0, width + 1) |> List.to_tuple(), height + 1)
|> List.to_tuple(), width, height}}
end
@impl true
def handle_call(:state, _from, {state, width, height}) do
GenServer.cast(self(), :update)
{:reply, {state, width, height}, {state, width, height}}
end
@impl true
def handle_cast(:update, {state, width, height}) do
state = update_state_fire_propagation({state, width, height})
{:noreply, {state, width, height}}
end
def handle_cast(:initiate, {state, width, height}) do
state = update_state_initiate({state, width, height})
{:noreply, {state, width, height}}
end
end
|
playground/elixir-algorithm-render-in-terminal/lib/fire_state.ex
| 0.668447
| 0.459561
|
fire_state.ex
|
starcoder
|
defmodule ExImageInfo.Types.TIFF do
@moduledoc false
@behaviour ExImageInfo.Detector
# https://en.wikipedia.org/wiki/TIFF
require Bitwise
@mime "image/tiff"
@ftype_ii "TIFFII"
@ftype_mm "TIFFMM"
@signature_ii << "II", 0x2A00::size(16) >>
@signature_mm << "MM", 0x002A::size(16) >>
## Public API
def seems?(<< @signature_ii, _rest::binary >>), do: true
def seems?(<< @signature_mm, _rest::binary >>), do: true
def seems?(_), do: false
def info(<< @signature_ii, rest::binary >>), do: parse_tiff(false, rest, 4 + byte_size rest)
def info(<< @signature_mm, rest::binary >>), do: parse_tiff(true, rest, 4 + byte_size rest)
def info(_), do: nil
def type(<< @signature_ii, _rest::binary >>), do: {@mime, @ftype_ii}
def type(<< @signature_mm, _rest::binary >>), do: {@mime, @ftype_mm}
def type(_), do: nil
## Private
defp parse_tiff(false = b_e, << idf_off::little-size(32), rest::binary >>, fsize) do
parse_tiff_block(b_e, idf_off, rest, fsize)
end
defp parse_tiff(true = b_e, << idf_off::size(32), rest::binary >>, fsize) do
parse_tiff_block(b_e, idf_off, rest, fsize)
end
defp parse_tiff(_, _, _), do: nil
defp parse_tiff_block(b_e, idf_off, rest, fsize) do
buff_size = 1024
buff_size = if (idf_off + buff_size) > fsize, do: fsize - idf_off - 10, else: buff_size
idf_off_pos = idf_off - 4 - 4 # @signature_xx (4), idf_off::size(32) (4)
case rest do
<< _skip1::bytes-size(idf_off_pos), _skip2::bytes-size(2), buff_idf::bytes-size(buff_size), _::binary >> ->
tags = parse_tiff_tags(b_e, buff_idf, %{})
ftype = if b_e == false, do: @ftype_ii, else: @ftype_mm
w = Map.get(tags, 256)
h = Map.get(tags, 257)
if w != nil and h != nil, do: {@mime, w, h, ftype}, else: nil
_ -> nil
end
end
defp parse_tiff_nexttags(<< _skip::bytes-size(12), rest::binary >>) do
if byte_size(rest) > 12, do: rest, else: <<>>
end
defp parse_tiff_nexttags(_rest), do: <<>>
defp parse_tiff_tags(_b_e, <<>>, tags) do
tags
end
defp parse_tiff_tags(true = b_e, << code::size(16), type::size(16), length::size(32), low::size(16), high::size(16), rest::binary >> = buff, tags) do
parse_tiff_tags(b_e, code, type, length, low, high, rest, buff, tags)
end
defp parse_tiff_tags(false = b_e, << code::little-size(16), type::little-size(16), length::little-size(32), low::little-size(16), high::little-size(16), rest::binary >> = buff, tags) do
parse_tiff_tags(b_e, code, type, length, low, high, rest, buff, tags)
end
defp parse_tiff_tags(b_e, code, type, length, low, high, << _rest::binary >>, buff, tags) do
if code == 0 do
tags
else
tags = if length == 1 and (type == 3 or type == 4) do
val = (Bitwise.<<< high, 16) + low
Map.put(tags, code, val)
else
tags
end
buff = parse_tiff_nexttags(buff)
parse_tiff_tags(b_e, buff, tags)
end
end
end
|
lib/ex_image_info/types/tiff.ex
| 0.636579
| 0.475605
|
tiff.ex
|
starcoder
|
defmodule ScrapyCloudEx.Endpoints.Storage.Logs do
@moduledoc """
Wraps the [Logs](https://doc.scrapinghub.com/api/logs.html) endpoint.
The logs API lets you work with logs from your crawls.
"""
import ScrapyCloudEx.Endpoints.Guards
alias ScrapyCloudEx.Endpoints.Helpers
alias ScrapyCloudEx.Endpoints.Storage.QueryParams
alias ScrapyCloudEx.HttpAdapter.RequestConfig
@base_url "https://storage.scrapinghub.com/logs"
@typedoc """
Integer log level.
| Value | Log level |
| ----- | --------- |
| 10 | DEBUG |
| 20 | INFO |
| 30 | WARNING |
| 40 | ERROR |
| 50 | CRITICAL |
"""
@type log_level :: 10 | 20 | 30 | 40 | 50
@typedoc """
A log object.
Map with the following keys:
* `"message"` - the log message (`t:String.t/0`).
* `"level"` - the integer log level (`t:log_level/0`).
* `"time"` - the UNIX timestamp of the message, in milliseconds (`t:integer/0`).
"""
@type log_object :: %{required(String.t()) => integer() | String.t() | log_level()}
@doc """
Retrieves logs for a given job.
The `composite_id` must have at least 3 sections (i.e. refer to a job).
The following parameters are supported in the `params` argument:
* `:format` - the [format](ScrapyCloudEx.Endpoints.Storage.html#module-format) to be used
for returning results. Can be `:json`, `:xml`, `:csv`, `:text`, or `:jl`. Defaults to `:json`.
* `:pagination` - [pagination parameters](ScrapyCloudEx.Endpoints.Storage.html#module-pagination).
* `:meta` - [meta parameters](ScrapyCloudEx.Endpoints.Storage.html#module-meta-parameters) to show.
The `opts` value is documented [here](ScrapyCloudEx.Endpoints.html#module-options).
See docs [here](https://doc.scrapinghub.com/api/logs.html#logs-project-id-spider-id-job-id) (GET method).
## Example
```
ScrapyCloudEx.Endpoints.Storage.Logs.get("API_KEY", "14/13/12")
```
"""
@spec get(String.t(), String.t(), Keyword.t(), Keyword.t()) ::
ScrapyCloudEx.result([log_object()])
def get(api_key, composite_id, params \\ [], opts \\ [])
when is_api_key(api_key)
when is_binary(composite_id) and composite_id != ""
when is_list(params)
when is_list(opts) do
with %QueryParams{error: nil} = query_params <- params |> QueryParams.from_keywords() do
query_string = query_params |> QueryParams.to_query()
base_url = [@base_url, composite_id] |> Enum.join("/")
RequestConfig.new()
|> RequestConfig.put(:api_key, api_key)
|> RequestConfig.put(:url, "#{base_url}?#{query_string}")
|> RequestConfig.put(:headers, Keyword.get(opts, :headers, []))
|> RequestConfig.put(:opts, opts)
|> Helpers.make_request()
else
%QueryParams{error: error} -> {:error, error}
error -> {:error, error}
end
end
end
|
lib/endpoints/storage/logs.ex
| 0.815122
| 0.691484
|
logs.ex
|
starcoder
|
defmodule AWS.WorkSpaces do
@moduledoc """
Amazon WorkSpaces Service
Amazon WorkSpaces enables you to provision virtual, cloud-based Microsoft
Windows and Amazon Linux desktops for your users.
"""
@doc """
Associates the specified connection alias with the specified directory to enable
cross-Region redirection.
For more information, see [ Cross-Region Redirection for Amazon WorkSpaces](https://docs.aws.amazon.com/workspaces/latest/adminguide/cross-region-redirection.html).
Before performing this operation, call [
DescribeConnectionAliases](https://docs.aws.amazon.com/workspaces/latest/api/API_DescribeConnectionAliases.html)
to make sure that the current state of the connection alias is `CREATED`.
"""
def associate_connection_alias(client, input, options \\ []) do
request(client, "AssociateConnectionAlias", input, options)
end
@doc """
Associates the specified IP access control group with the specified directory.
"""
def associate_ip_groups(client, input, options \\ []) do
request(client, "AssociateIpGroups", input, options)
end
@doc """
Adds one or more rules to the specified IP access control group.
This action gives users permission to access their WorkSpaces from the CIDR
address ranges specified in the rules.
"""
def authorize_ip_rules(client, input, options \\ []) do
request(client, "AuthorizeIpRules", input, options)
end
@doc """
Copies the specified image from the specified Region to the current Region.
"""
def copy_workspace_image(client, input, options \\ []) do
request(client, "CopyWorkspaceImage", input, options)
end
@doc """
Creates the specified connection alias for use with cross-Region redirection.
For more information, see [ Cross-Region Redirection for Amazon WorkSpaces](https://docs.aws.amazon.com/workspaces/latest/adminguide/cross-region-redirection.html).
"""
def create_connection_alias(client, input, options \\ []) do
request(client, "CreateConnectionAlias", input, options)
end
@doc """
Creates an IP access control group.
An IP access control group provides you with the ability to control the IP
addresses from which users are allowed to access their WorkSpaces. To specify
the CIDR address ranges, add rules to your IP access control group and then
associate the group with your directory. You can add rules when you create the
group or at any time using `AuthorizeIpRules`.
There is a default IP access control group associated with your directory. If
you don't associate an IP access control group with your directory, the default
group is used. The default group includes a default rule that allows users to
access their WorkSpaces from anywhere. You cannot modify the default IP access
control group for your directory.
"""
def create_ip_group(client, input, options \\ []) do
request(client, "CreateIpGroup", input, options)
end
@doc """
Creates the specified tags for the specified WorkSpaces resource.
"""
def create_tags(client, input, options \\ []) do
request(client, "CreateTags", input, options)
end
@doc """
Creates one or more WorkSpaces.
This operation is asynchronous and returns before the WorkSpaces are created.
"""
def create_workspaces(client, input, options \\ []) do
request(client, "CreateWorkspaces", input, options)
end
@doc """
Deletes the specified connection alias.
For more information, see [ Cross-Region Redirection for Amazon WorkSpaces](https://docs.aws.amazon.com/workspaces/latest/adminguide/cross-region-redirection.html).
**If you will no longer be using a fully qualified domain name (FQDN) as the
registration code for your WorkSpaces users, you must take certain precautions
to prevent potential security issues.** For more information, see [ Security Considerations if You Stop Using Cross-Region
Redirection](https://docs.aws.amazon.com/workspaces/latest/adminguide/cross-region-redirection.html#cross-region-redirection-security-considerations).
To delete a connection alias that has been shared, the shared account must first
disassociate the connection alias from any directories it has been associated
with. Then you must unshare the connection alias from the account it has been
shared with. You can delete a connection alias only after it is no longer shared
with any accounts or associated with any directories.
"""
def delete_connection_alias(client, input, options \\ []) do
request(client, "DeleteConnectionAlias", input, options)
end
@doc """
Deletes the specified IP access control group.
You cannot delete an IP access control group that is associated with a
directory.
"""
def delete_ip_group(client, input, options \\ []) do
request(client, "DeleteIpGroup", input, options)
end
@doc """
Deletes the specified tags from the specified WorkSpaces resource.
"""
def delete_tags(client, input, options \\ []) do
request(client, "DeleteTags", input, options)
end
@doc """
Deletes the specified image from your account.
To delete an image, you must first delete any bundles that are associated with
the image and unshare the image if it is shared with other accounts.
"""
def delete_workspace_image(client, input, options \\ []) do
request(client, "DeleteWorkspaceImage", input, options)
end
@doc """
Deregisters the specified directory.
This operation is asynchronous and returns before the WorkSpace directory is
deregistered. If any WorkSpaces are registered to this directory, you must
remove them before you can deregister the directory.
"""
def deregister_workspace_directory(client, input, options \\ []) do
request(client, "DeregisterWorkspaceDirectory", input, options)
end
@doc """
Retrieves a list that describes the configuration of Bring Your Own License
(BYOL) for the specified account.
"""
def describe_account(client, input, options \\ []) do
request(client, "DescribeAccount", input, options)
end
@doc """
Retrieves a list that describes modifications to the configuration of Bring Your
Own License (BYOL) for the specified account.
"""
def describe_account_modifications(client, input, options \\ []) do
request(client, "DescribeAccountModifications", input, options)
end
@doc """
Retrieves a list that describes one or more specified Amazon WorkSpaces clients.
"""
def describe_client_properties(client, input, options \\ []) do
request(client, "DescribeClientProperties", input, options)
end
@doc """
Describes the permissions that the owner of a connection alias has granted to
another AWS account for the specified connection alias.
For more information, see [ Cross-Region Redirection for Amazon WorkSpaces](https://docs.aws.amazon.com/workspaces/latest/adminguide/cross-region-redirection.html).
"""
def describe_connection_alias_permissions(client, input, options \\ []) do
request(client, "DescribeConnectionAliasPermissions", input, options)
end
@doc """
Retrieves a list that describes the connection aliases used for cross-Region
redirection.
For more information, see [ Cross-Region Redirection for Amazon WorkSpaces](https://docs.aws.amazon.com/workspaces/latest/adminguide/cross-region-redirection.html).
"""
def describe_connection_aliases(client, input, options \\ []) do
request(client, "DescribeConnectionAliases", input, options)
end
@doc """
Describes one or more of your IP access control groups.
"""
def describe_ip_groups(client, input, options \\ []) do
request(client, "DescribeIpGroups", input, options)
end
@doc """
Describes the specified tags for the specified WorkSpaces resource.
"""
def describe_tags(client, input, options \\ []) do
request(client, "DescribeTags", input, options)
end
@doc """
Retrieves a list that describes the available WorkSpace bundles.
You can filter the results using either bundle ID or owner, but not both.
"""
def describe_workspace_bundles(client, input, options \\ []) do
request(client, "DescribeWorkspaceBundles", input, options)
end
@doc """
Describes the available directories that are registered with Amazon WorkSpaces.
"""
def describe_workspace_directories(client, input, options \\ []) do
request(client, "DescribeWorkspaceDirectories", input, options)
end
@doc """
Describes the permissions that the owner of an image has granted to other AWS
accounts for an image.
"""
def describe_workspace_image_permissions(client, input, options \\ []) do
request(client, "DescribeWorkspaceImagePermissions", input, options)
end
@doc """
Retrieves a list that describes one or more specified images, if the image
identifiers are provided.
Otherwise, all images in the account are described.
"""
def describe_workspace_images(client, input, options \\ []) do
request(client, "DescribeWorkspaceImages", input, options)
end
@doc """
Describes the snapshots for the specified WorkSpace.
"""
def describe_workspace_snapshots(client, input, options \\ []) do
request(client, "DescribeWorkspaceSnapshots", input, options)
end
@doc """
Describes the specified WorkSpaces.
You can filter the results by using the bundle identifier, directory identifier,
or owner, but you can specify only one filter at a time.
"""
def describe_workspaces(client, input, options \\ []) do
request(client, "DescribeWorkspaces", input, options)
end
@doc """
Describes the connection status of the specified WorkSpaces.
"""
def describe_workspaces_connection_status(client, input, options \\ []) do
request(client, "DescribeWorkspacesConnectionStatus", input, options)
end
@doc """
Disassociates a connection alias from a directory.
Disassociating a connection alias disables cross-Region redirection between two
directories in different AWS Regions. For more information, see [ Cross-Region Redirection for Amazon
WorkSpaces](https://docs.aws.amazon.com/workspaces/latest/adminguide/cross-region-redirection.html).
Before performing this operation, call [
DescribeConnectionAliases](https://docs.aws.amazon.com/workspaces/latest/api/API_DescribeConnectionAliases.html)
to make sure that the current state of the connection alias is `CREATED`.
"""
def disassociate_connection_alias(client, input, options \\ []) do
request(client, "DisassociateConnectionAlias", input, options)
end
@doc """
Disassociates the specified IP access control group from the specified
directory.
"""
def disassociate_ip_groups(client, input, options \\ []) do
request(client, "DisassociateIpGroups", input, options)
end
@doc """
Imports the specified Windows 10 Bring Your Own License (BYOL) image into Amazon
WorkSpaces.
The image must be an already licensed Amazon EC2 image that is in your AWS
account, and you must own the image. For more information about creating BYOL
images, see [ Bring Your Own Windows Desktop Licenses](https://docs.aws.amazon.com/workspaces/latest/adminguide/byol-windows-images.html).
"""
def import_workspace_image(client, input, options \\ []) do
request(client, "ImportWorkspaceImage", input, options)
end
@doc """
Retrieves a list of IP address ranges, specified as IPv4 CIDR blocks, that you
can use for the network management interface when you enable Bring Your Own
License (BYOL).
The management network interface is connected to a secure Amazon WorkSpaces
management network. It is used for interactive streaming of the WorkSpace
desktop to Amazon WorkSpaces clients, and to allow Amazon WorkSpaces to manage
the WorkSpace.
"""
def list_available_management_cidr_ranges(client, input, options \\ []) do
request(client, "ListAvailableManagementCidrRanges", input, options)
end
@doc """
Migrates a WorkSpace from one operating system or bundle type to another, while
retaining the data on the user volume.
The migration process recreates the WorkSpace by using a new root volume from
the target bundle image and the user volume from the last available snapshot of
the original WorkSpace. During migration, the original `D:\Users\%USERNAME%`
user profile folder is renamed to
`D:\Users\%USERNAME%MMddyyTHHmmss%.NotMigrated`. A new `D:\Users\%USERNAME%\`
folder is generated by the new OS. Certain files in the old user profile are
moved to the new user profile.
For available migration scenarios, details about what happens during migration,
and best practices, see [Migrate a WorkSpace](https://docs.aws.amazon.com/workspaces/latest/adminguide/migrate-workspaces.html).
"""
def migrate_workspace(client, input, options \\ []) do
request(client, "MigrateWorkspace", input, options)
end
@doc """
Modifies the configuration of Bring Your Own License (BYOL) for the specified
account.
"""
def modify_account(client, input, options \\ []) do
request(client, "ModifyAccount", input, options)
end
@doc """
Modifies the properties of the specified Amazon WorkSpaces clients.
"""
def modify_client_properties(client, input, options \\ []) do
request(client, "ModifyClientProperties", input, options)
end
@doc """
Modifies the self-service WorkSpace management capabilities for your users.
For more information, see [Enable Self-Service WorkSpace Management Capabilities for Your
Users](https://docs.aws.amazon.com/workspaces/latest/adminguide/enable-user-self-service-workspace-management.html).
"""
def modify_selfservice_permissions(client, input, options \\ []) do
request(client, "ModifySelfservicePermissions", input, options)
end
@doc """
Specifies which devices and operating systems users can use to access their
WorkSpaces.
For more information, see [ Control Device Access](https://docs.aws.amazon.com/workspaces/latest/adminguide/update-directory-details.html#control-device-access).
"""
def modify_workspace_access_properties(client, input, options \\ []) do
request(client, "ModifyWorkspaceAccessProperties", input, options)
end
@doc """
Modify the default properties used to create WorkSpaces.
"""
def modify_workspace_creation_properties(client, input, options \\ []) do
request(client, "ModifyWorkspaceCreationProperties", input, options)
end
@doc """
Modifies the specified WorkSpace properties.
For important information about how to modify the size of the root and user
volumes, see [ Modify a WorkSpace](https://docs.aws.amazon.com/workspaces/latest/adminguide/modify-workspaces.html).
"""
def modify_workspace_properties(client, input, options \\ []) do
request(client, "ModifyWorkspaceProperties", input, options)
end
@doc """
Sets the state of the specified WorkSpace.
To maintain a WorkSpace without being interrupted, set the WorkSpace state to
`ADMIN_MAINTENANCE`. WorkSpaces in this state do not respond to requests to
reboot, stop, start, rebuild, or restore. An AutoStop WorkSpace in this state is
not stopped. Users cannot log into a WorkSpace in the `ADMIN_MAINTENANCE` state.
"""
def modify_workspace_state(client, input, options \\ []) do
request(client, "ModifyWorkspaceState", input, options)
end
@doc """
Reboots the specified WorkSpaces.
You cannot reboot a WorkSpace unless its state is `AVAILABLE` or `UNHEALTHY`.
This operation is asynchronous and returns before the WorkSpaces have rebooted.
"""
def reboot_workspaces(client, input, options \\ []) do
request(client, "RebootWorkspaces", input, options)
end
@doc """
Rebuilds the specified WorkSpace.
You cannot rebuild a WorkSpace unless its state is `AVAILABLE`, `ERROR`,
`UNHEALTHY`, `STOPPED`, or `REBOOTING`.
Rebuilding a WorkSpace is a potentially destructive action that can result in
the loss of data. For more information, see [Rebuild a WorkSpace](https://docs.aws.amazon.com/workspaces/latest/adminguide/reset-workspace.html).
This operation is asynchronous and returns before the WorkSpaces have been
completely rebuilt.
"""
def rebuild_workspaces(client, input, options \\ []) do
request(client, "RebuildWorkspaces", input, options)
end
@doc """
Registers the specified directory.
This operation is asynchronous and returns before the WorkSpace directory is
registered. If this is the first time you are registering a directory, you will
need to create the workspaces_DefaultRole role before you can register a
directory. For more information, see [ Creating the workspaces_DefaultRole Role](https://docs.aws.amazon.com/workspaces/latest/adminguide/workspaces-access-control.html#create-default-role).
"""
def register_workspace_directory(client, input, options \\ []) do
request(client, "RegisterWorkspaceDirectory", input, options)
end
@doc """
Restores the specified WorkSpace to its last known healthy state.
You cannot restore a WorkSpace unless its state is ` AVAILABLE`, `ERROR`,
`UNHEALTHY`, or `STOPPED`.
Restoring a WorkSpace is a potentially destructive action that can result in the
loss of data. For more information, see [Restore a WorkSpace](https://docs.aws.amazon.com/workspaces/latest/adminguide/restore-workspace.html).
This operation is asynchronous and returns before the WorkSpace is completely
restored.
"""
def restore_workspace(client, input, options \\ []) do
request(client, "RestoreWorkspace", input, options)
end
@doc """
Removes one or more rules from the specified IP access control group.
"""
def revoke_ip_rules(client, input, options \\ []) do
request(client, "RevokeIpRules", input, options)
end
@doc """
Starts the specified WorkSpaces.
You cannot start a WorkSpace unless it has a running mode of `AutoStop` and a
state of `STOPPED`.
"""
def start_workspaces(client, input, options \\ []) do
request(client, "StartWorkspaces", input, options)
end
@doc """
Stops the specified WorkSpaces.
You cannot stop a WorkSpace unless it has a running mode of `AutoStop` and a
state of `AVAILABLE`, `IMPAIRED`, `UNHEALTHY`, or `ERROR`.
"""
def stop_workspaces(client, input, options \\ []) do
request(client, "StopWorkspaces", input, options)
end
@doc """
Terminates the specified WorkSpaces.
Terminating a WorkSpace is a permanent action and cannot be undone. The user's
data is destroyed. If you need to archive any user data, contact Amazon Web
Services before terminating the WorkSpace.
You can terminate a WorkSpace that is in any state except `SUSPENDED`.
This operation is asynchronous and returns before the WorkSpaces have been
completely terminated.
"""
def terminate_workspaces(client, input, options \\ []) do
request(client, "TerminateWorkspaces", input, options)
end
@doc """
Shares or unshares a connection alias with one account by specifying whether
that account has permission to associate the connection alias with a directory.
If the association permission is granted, the connection alias is shared with
that account. If the association permission is revoked, the connection alias is
unshared with the account. For more information, see [ Cross-Region Redirection for Amazon
WorkSpaces](https://docs.aws.amazon.com/workspaces/latest/adminguide/cross-region-redirection.html).
Before performing this operation, call [
DescribeConnectionAliases](https://docs.aws.amazon.com/workspaces/latest/api/API_DescribeConnectionAliases.html)
to make sure that the current state of the connection alias is `CREATED`.
To delete a connection alias that has been shared, the shared
account must first disassociate the connection alias from any directories it has
been associated with. Then you must unshare the connection alias from the
account it has been shared with. You can delete a connection alias only after it
is no longer shared with any accounts or associated with any directories.
"""
def update_connection_alias_permission(client, input, options \\ []) do
request(client, "UpdateConnectionAliasPermission", input, options)
end
@doc """
Replaces the current rules of the specified IP access control group with the
specified rules.
"""
def update_rules_of_ip_group(client, input, options \\ []) do
request(client, "UpdateRulesOfIpGroup", input, options)
end
@doc """
Shares or unshares an image with one account by specifying whether that account
has permission to copy the image.
If the copy image permission is granted, the image is shared with that account.
If the copy image permission is revoked, the image is unshared with the account.
To delete an image that has been shared, you must unshare the image
before you delete it.
Sharing Bring Your Own License (BYOL) images across AWS accounts
isn't supported at this time in the AWS GovCloud (US-West) Region. To share BYOL
images across accounts in the AWS GovCloud (US-West) Region, contact AWS
Support.
"""
def update_workspace_image_permission(client, input, options \\ []) do
request(client, "UpdateWorkspaceImagePermission", input, options)
end
@spec request(AWS.Client.t(), binary(), map(), list()) ::
{:ok, map() | nil, map()}
| {:error, term()}
defp request(client, action, input, options) do
client = %{client | service: "workspaces"}
host = build_host("workspaces", client)
url = build_url(host, client)
headers = [
{"Host", host},
{"Content-Type", "application/x-amz-json-1.1"},
{"X-Amz-Target", "WorkspacesService.#{action}"}
]
payload = encode!(client, input)
headers = AWS.Request.sign_v4(client, "POST", url, headers, payload)
post(client, url, payload, headers, options)
end
defp post(client, url, payload, headers, options) do
case AWS.Client.request(client, :post, url, payload, headers, options) do
{:ok, %{status_code: 200, body: body} = response} ->
body = if body != "", do: decode!(client, body)
{:ok, body, response}
{:ok, response} ->
{:error, {:unexpected_response, response}}
error = {:error, _reason} -> error
end
end
defp build_host(_endpoint_prefix, %{region: "local", endpoint: endpoint}) do
endpoint
end
defp build_host(_endpoint_prefix, %{region: "local"}) do
"localhost"
end
defp build_host(endpoint_prefix, %{region: region, endpoint: endpoint}) do
"#{endpoint_prefix}.#{region}.#{endpoint}"
end
defp build_url(host, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}/"
end
defp encode!(client, payload) do
AWS.Client.encode!(client, payload, :json)
end
defp decode!(client, payload) do
AWS.Client.decode!(client, payload, :json)
end
end
|
lib/aws/generated/work_spaces.ex
| 0.874279
| 0.49109
|
work_spaces.ex
|
starcoder
|
defmodule ZenMonitor.Local.Connector do
@moduledoc """
`ZenMonitor.Local.Connector` performs a variety of duties. For every remote that a the local
is interested in monitoring processes on there will be a dedicated `ZenMonitor.Local.Connector`.
This collection of Connectors are managed by a `GenRegistry` registered under the
`ZenMonitor.Local.Connector` atom.
# Connecting and Monitoring the remote `ZenMonitor.Proxy`
Connectors, as their name suggests, connect to the `ZenMonitor.Proxy` on the remote node that they
are responsible for. They do this using standard ERTS Distribution, by invoking the remote
Proxy's ping command. A Remote is considered compatible if the ping command returns the :pong
atom, otherwise it will be marked incompatible.
Connectors manage their remote node's status in the global node status cache, and provide
facilities for efficient querying of remote status, see `compatibility/1` and
`cached_compatibility/1`
# Batching and Updating the remote `ZenMonitor.Proxy`
When a local process wishes to monitor a remote process, the Connector will be informed of this
fact with a call to `monitor/3`. The Connector is responsible for maintaining a local record of
this monitor for future fan-out and for efficiently batching up these requests to be delivered
to the remote ZenMonitor.Proxy.
# Fan-out of Dead Summaries
Periodically, the `ZenMonitor.Proxy` (technically the `ZenMonitor.Proxy.Batcher`) on the remote
node will send a "Dead Summary". This is a message from the remote that informs the Connector
of all the processes the Connector has monitored that have gone down since the last summary.
The Connector uses it's local records to generate a batch of _down dispatches_. These are
messages that look identical to the messages provided by `Process.monitor/1` when a process goes
down. It is sometimes necessary for the original monitoring process to be able to discern
whether the `:DOWN` message originated from ERTS or from ZenMonitor, to aid this, ZenMonitor
will wrap the original reason in a tuple of `{:zen_monitor, original_reason}`.
The fan-out messages are sent to `ZenMonitor.Local` for eventual delivery via
`ZenMonitor.Local.Dispatcher`, see those modules for more information.
# Fan-out of nodedown / ZenMonitor.Proxy down
The Connector is also responsible for monitoring the remote node and dealing with nodedown (or
the node becoming incompatible, either due to the `ZenMonitor.Proxy` crashing or a code change).
If the Connector detects that the remote it is responsible for is down or no longer compatible,
it will fire every established monitor with `{:zen_monitor, :nodedown}`. It uses the same
mechanism as for Dead Summaries, see `ZenMonitor.Local` and `ZenMonitor.Local.Dispatcher` for
more information.
"""
use GenServer
use Instruments.CustomFunctions, prefix: "zen_monitor.local.connector"
alias ZenMonitor.Local
alias ZenMonitor.Local.Tables
@base_penalty 1_000
@maximum_penalty 60_000
@max_attempt :math.ceil(:math.log2(@maximum_penalty))
@chunk_size 5000
@sweep_interval 100
@type t :: __MODULE__
@type compatibility :: :compatible | :incompatible
@type cached_compatibility :: compatibility | :miss | {:expired, integer} | :unavailable
@type death_certificate :: {pid, reason :: any}
@type down_dispatch :: {pid, {:DOWN, reference, :process, pid, {:zen_monitor, any}}}
defmodule State do
@moduledoc """
Maintains the internal state for the Connector
- `monitors` is an ETS table for keeping track of monitors for the purpose of fan-out.
- `remote_node_monitored` is a flag used to track whether or not the remote node has been
monitored
- `remote_proxy_ref` is the monitoring reference of the remote node's ZenMonitor.Proxy
- `remote` is the remote node for which the Connector is responsible.
- `batch` is the queue of instructions pending until the next sweep.
- `length` is the current length of the batch queue (calculating queue length is an O(n)
operation, it is simple to track it as elements are added / removed)
"""
@type t :: %__MODULE__{
monitors: :ets.tab(),
remote_node_monitored: boolean(),
remote_proxy_ref: reference() | nil,
remote: node(),
length: integer(),
batch: :queue.queue()
}
defstruct [
:monitors,
:remote,
:remote_proxy_ref,
remote_node_monitored: false,
length: 0,
batch: :queue.new()
]
end
## Client
def start_link(remote) do
GenServer.start_link(__MODULE__, remote)
end
@doc """
Get a connector from the registry by destination
"""
@spec get(target :: ZenMonitor.destination()) :: pid()
def get(target) do
target
|> ZenMonitor.find_node()
|> get_for_node()
end
@doc """
Get a connector from the registry by remote node
"""
@spec get_for_node(remote :: node()) :: pid()
def get_for_node(remote) when is_atom(remote) do
case GenRegistry.lookup(__MODULE__, remote) do
{:ok, connector} ->
connector
{:error, :not_found} ->
{:ok, connector} = GenRegistry.lookup_or_start(__MODULE__, remote, [remote])
connector
end
end
@doc """
Asynchronously monitors a pid.
"""
@spec monitor(target :: ZenMonitor.destination(), ref :: reference(), subscriber :: pid()) ::
:ok
def monitor(target, ref, subscriber) do
target
|> get()
|> GenServer.cast({:monitor, target, ref, subscriber})
end
@doc """
Retrieves all the monitors established between the target and the subscriber
"""
@spec monitors(target :: ZenMonitor.destination(), subscriber :: pid()) :: [reference()]
def monitors(target, subscriber) do
target
|> get()
|> GenServer.call({:monitors, target, subscriber})
end
@doc """
Asynchronously demonitors a pid.
"""
@spec demonitor(target :: ZenMonitor.destination(), ref :: reference()) :: :ok
def demonitor(target, ref) do
target
|> get()
|> GenServer.cast({:demonitor, target, ref})
end
@doc """
Determine the effective compatibility of a remote node
This will attempt a fast client-side lookup in the ETS table. Only a positive `:compatible`
record will result in `:compatible`, otherwise the effective compatibility is `:incompatible`
"""
@spec compatibility(remote :: node()) :: compatibility
def compatibility(remote) do
case cached_compatibility(remote) do
:compatible ->
:compatible
_ ->
:incompatible
end
end
@doc """
Check the cached compatibility status for a remote node
This will only perform a fast client-side lookup in the ETS table. If an authoritative entry is
found it will be returned (either `:compatible`, `:incompatible`, or `:unavailable`). If no
entry is found then `:miss` is returned. If an expired entry is found then
`{:expired, attempts}` is returned.
"""
@spec cached_compatibility(remote :: node()) :: cached_compatibility
def cached_compatibility(remote) do
case :ets.lookup(Tables.nodes(), remote) do
[] ->
:miss
[{^remote, :compatible}] ->
:compatible
[{^remote, {:incompatible, enforce_until, attempt}}] ->
if enforce_until < ZenMonitor.now() do
{:expired, attempt}
else
:incompatible
end
[{^remote, :unavailable}] ->
:unavailable
end
end
@doc """
Connect to the provided remote
This function will not consult the cache before calling into the GenServer, the GenServer will
consult with the cache before attempting to connect, this allows for many callers to connect
with the server guaranteeing that only one attempt will actually perform network work.
If the compatibility of a remote host is needed instead, callers should use the
`compatibility/1` or `cached_compatibility/1` functions. `compatibility/1` will provide the
effective compatibility, `cached_compatibility/1` is mainly used internally but can provide more
detailed information about the cache status of the remote. Neither of these methods,
`compatibility/1` nor `cached_compatibility/1`, will perform network work or call into the
GenServer.
"""
@spec connect(remote :: node()) :: compatibility
def connect(remote) do
remote
|> get_for_node()
|> GenServer.call(:connect)
end
@doc """
Gets the sweep interval from the Application Environment
The sweep interval is the number of milliseconds to wait between sweeps, see
ZenMonitor.Local.Connector's @sweep_interval for the default value
This can be controlled at boot and runtime with the {:zen_monitor, :connector_sweep_interval}
setting, see `ZenMonitor.Local.Connector.sweep_interval/1` for runtime convenience
functionality.
"""
@spec sweep_interval() :: integer
def sweep_interval do
Application.get_env(:zen_monitor, :connector_sweep_interval, @sweep_interval)
end
@doc """
Puts the sweep interval into the Application Environment
This is a simple convenience function for overwriting the
{:zen_monitor, :connector_sweep_interval} setting at runtime.
"""
@spec sweep_interval(value :: integer) :: :ok
def sweep_interval(value) do
Application.put_env(:zen_monitor, :connector_sweep_interval, value)
end
@doc """
Gets the chunk size from the Application Environment
The chunk size is the maximum number of subscriptions that will be sent during each sweep, see
ZenMonitor.Local.Connector's @chunk_size for the default value
This can be controlled at boot and runtime with the {:zen_monitor, :connector_chunk_size}
setting, see `ZenMonitor.Local.Connector.chunk_size/1` for runtime convenience functionality.
"""
@spec chunk_size() :: integer
def chunk_size do
Application.get_env(:zen_monitor, :connector_chunk_size, @chunk_size)
end
@doc """
Puts the chunk size into the Application Environment
This is a simple convenience function for overwriting the {:zen_monitor, :connector_chunk_size}
setting at runtime.
"""
@spec chunk_size(value :: integer) :: :ok
def chunk_size(value) do
Application.put_env(:zen_monitor, :connector_chunk_size, value)
end
## Server
def init(remote) do
schedule_sweep()
monitors = :ets.new(:monitors, [:private, :ordered_set])
{:ok, %State{remote: remote, monitors: monitors}}
end
@doc """
Synchronous connect handler
Attempts to connect to the remote, this handler does check the cache before connecting to avoid
a thundering herd.
"""
def handle_call(:connect, _from, %State{} = state) do
{result, state} = do_compatibility(state)
{:reply, result, state}
end
@doc """
Returns all the monitors between a target and a subscriber
"""
def handle_call({:monitors, target, subscriber}, _from, %State{} = state) do
size = :ets.info(state.monitors, :size)
monitors =
if size == 0 do
# Don't bother doing the match on an empty table
[]
else
case :ets.match(state.monitors, {{target, :"$1"}, subscriber}, size) do
:"$end_of_table" ->
# Match failed
[]
{monitors, _} ->
# Unwrap the references
List.flatten(monitors)
end
end
{:reply, monitors, state}
end
@doc """
Handles establishing a new monitor
1. Records the monitor into the internal ETS table
2. If this is the first monitor for the pid, adds it to the queue for subsequent dispatch to
the ZenMonitor.Proxy during the next sweep.
"""
def handle_cast(
{:monitor, target, ref, subscriber},
%State{batch: batch, length: length, monitors: monitors} = state
) do
# Check if we should subscribe to this target (this check has to happen before we insert the
# new monitor otherwise the new monitor will always be found and we will never enqueue
# anything)
should_subscribe? = unknown_target?(monitors, target)
# Always add it to the monitor table
:ets.insert(monitors, {{target, ref}, subscriber})
# Enqueue the subscribe instruction if it isn't already monitored
new_state =
if should_subscribe? do
increment("enqueue", 1, tags: ["op:subscribe"])
%State{state | batch: :queue.in({:subscribe, target}, batch), length: length + 1}
else
state
end
{:noreply, new_state}
end
@doc """
Handles demonitoring a reference for a given pid
Cleans up the internal ETS record if it exists
"""
def handle_cast(
{:demonitor, target, ref},
%State{batch: batch, length: length, monitors: monitors} = state
) do
# Remove it from the monitors table
:ets.delete(monitors, {target, ref})
# If that was the last monitor for the target, we should unsubscribe. Unlike monitor we have
# to perform this check after the delete or else the row we are deleting will always make the
# target known.
should_unsubscribe? = unknown_target?(monitors, target)
# Enqueue the unsubscribe instruction if the target no longer exists
state =
if should_unsubscribe? do
increment("enqueue", 1, tags: ["op:unsubscribe"])
%State{state | batch: :queue.in({:unsubscribe, target}, batch), length: length + 1}
else
state
end
{:noreply, state}
end
@doc """
Handles nodedown for the Connector's remote
When the remote node goes down, every monitor maintained by the Connector should fire
"""
def handle_info({:nodedown, remote}, %State{remote: remote} = state) do
# Mark this node as unavailable
{:incompatible, state} = do_mark_unavailable(state)
# Mark the remote node as unmonitored (any monitors that existed were just consumed)
state = %State{state | remote_node_monitored: false}
# Dispatch down to everyone
{:noreply, do_down(state)}
end
@doc """
Handles when the proxy crashes because of noconnection
This reason indicates that we have lost connection with the remote node, mark it as unavailable.
"""
def handle_info({:DOWN, ref, :process, _, :noconnection}, %State{remote_proxy_ref: ref} = state) do
# Mark this node as unavailable
{:incompatible, state} = do_mark_unavailable(state)
# Clear the remote_proxy_ref
state = %State{state | remote_proxy_ref: nil}
# Dispatch down to everyone
{:noreply, do_down(state)}
end
@doc """
Handles when the proxy crashes for any other reason
Penalize the remote as incompatible and let the normal remote recovery take care of it.
"""
def handle_info({:DOWN, ref, :process, _, _}, %State{remote_proxy_ref: ref} = state) do
# Mark this node as incompatible
{:incompatible, state} = do_mark_incompatible(state, 1)
# Clear the remote_proxy_ref
state = %State{state | remote_proxy_ref: nil}
# Dispatch down to everyone
{:noreply, do_down(state)}
end
@doc """
Handle the dead summary from the remote
Periodically the remote node will send us a summary of everything that has died that we have
monitored.
Connector will find and consume all the matching monitors and enqueue the appropriate messages
for each monitor with ZenMonitor.Local
"""
def handle_info(
{:dead, remote, death_certificates},
%State{remote: remote, monitors: monitors} = state
) do
death_certificates
|> messages_for_death_certificates(monitors)
|> Local.enqueue()
{:noreply, state}
end
@doc """
Handle the periodic sweep
If the remote is compatible this will create a subscription summary up to chunk_size of all the
pids that need monitoring since the last sweep. This will be sent to the remote for monitoring.
If the remote is incompatible, all pids since the last sweep will have their monitors fire with
`{:zen_monitor, :nodedown}`
"""
def handle_info(:sweep, %State{} = state) do
new_state =
case do_compatibility(state) do
{:compatible, state} ->
do_sweep(state)
{:incompatible, state} ->
do_down(state)
end
schedule_sweep()
{:noreply, new_state}
end
@doc """
Handle other info
If a call times out, the remote end might still reply and that would result in a handle_info
"""
def handle_info(_, %State{} = state) do
increment("unhandled_info")
{:noreply, state}
end
## Private
@spec do_compatibility(state :: State.t()) :: {compatibility, State.t()}
defp do_compatibility(%State{remote: remote} = state) do
case cached_compatibility(remote) do
:miss ->
do_connect(state, 1)
{:expired, attempt} ->
do_connect(state, attempt + 1)
:unavailable ->
do_connect(state, 1)
hit ->
{hit, state}
end
end
@spec do_connect(State.t(), attempt :: integer) :: {compatibility, State.t()}
defp do_connect(%State{remote: remote} = state, attempt) do
try do
with {:known_node, true} <- {:known_node, known_node?(remote)},
{:ping, :pong} <-
{:ping, ZenMonitor.gen_module().call({ZenMonitor.Proxy, remote}, :ping)} do
do_mark_compatible(state)
else
{:known_node, false} ->
do_mark_unavailable(state)
{:ping, _} ->
do_mark_incompatible(state, attempt)
end
catch
:exit, {{:nodedown, _node}, _} ->
do_mark_unavailable(state)
:exit, _ ->
do_mark_incompatible(state, attempt)
end
end
@spec do_sweep(state :: State.t()) :: State.t()
defp do_sweep(%State{batch: batch, length: length} = state) do
{summary, overflow, new_length} = chunk(batch, length)
increment("sweep", length - new_length)
do_subscribe(state, summary)
%State{state | batch: overflow, length: new_length}
end
@spec chunk(batch :: :queue.queue(), length :: integer) :: {[pid], :queue.queue(), integer}
defp chunk(batch, length) do
size = chunk_size()
if length <= size do
{:queue.to_list(batch), :queue.new(), 0}
else
{summary, overflow} = :queue.split(size, batch)
{:queue.to_list(summary), overflow, length - size}
end
end
@spec do_subscribe(state :: State.t(), summary :: []) :: :ok
defp do_subscribe(%State{}, []), do: :ok
defp do_subscribe(%State{remote: remote}, summary) do
ZenMonitor.gen_module().cast({ZenMonitor.Proxy, remote}, {:process, self(), summary})
end
@spec do_down(state :: State.t()) :: State.t()
defp do_down(%State{monitors: monitors} = state) do
# Generate messages for every monitor
messages =
for [{{pid, ref}, subscriber}] <- :ets.match(monitors, :"$1") do
{subscriber, {:DOWN, ref, :process, pid, {:zen_monitor, :nodedown}}}
end
# Clear the monitors table
:ets.delete_all_objects(monitors)
# Enqueue the messages with ZenMonitor.Local
Local.enqueue(messages)
# Return a new empty state
%State{state | batch: :queue.new(), length: 0}
end
@spec do_mark_compatible(State.t()) :: {:compatible, State.t()}
defp do_mark_compatible(%State{remote: remote} = state) do
state =
state
|> monitor_remote_node()
|> monitor_remote_proxy()
:ets.insert(Tables.nodes(), {remote, :compatible})
{:compatible, state}
end
@spec do_mark_incompatible(State.t(), attempt :: integer) :: {:incompatible, State.t()}
defp do_mark_incompatible(%State{remote: remote} = state, attempt) do
state = monitor_remote_node(state)
:ets.insert(
Tables.nodes(),
{remote, {:incompatible, ZenMonitor.now() + penalty(attempt), attempt}}
)
{:incompatible, state}
end
@spec do_mark_unavailable(State.t()) :: {:incompatible, State.t()}
defp do_mark_unavailable(%State{remote: remote} = state) do
:ets.insert(Tables.nodes(), {remote, :unavailable})
{:incompatible, state}
end
@spec monitor_remote_node(State.t()) :: State.t()
defp monitor_remote_node(%State{remote_node_monitored: true} = state), do: state
defp monitor_remote_node(%State{remote_node_monitored: false, remote: remote} = state) do
Node.monitor(remote, true)
%State{state | remote_node_monitored: true}
end
@spec monitor_remote_proxy(State.t()) :: State.t()
defp monitor_remote_proxy(%State{remote_proxy_ref: nil, remote: remote} = state) do
%State{state | remote_proxy_ref: Process.monitor({ZenMonitor.Proxy, remote})}
end
defp monitor_remote_proxy(%State{} = state), do: state
@spec messages_for_death_certificates(
death_certificates :: [death_certificate],
monitors :: :ets.tab()
) :: [down_dispatch]
defp messages_for_death_certificates(death_certificates, monitors) do
do_messages_for_death_certificates(death_certificates, monitors, [])
end
@spec do_messages_for_death_certificates(
death_certificates :: [death_certificate],
monitors :: :ets.tab(),
acc :: [down_dispatch]
) :: [down_dispatch]
defp do_messages_for_death_certificates([], _monitors, acc), do: Enum.reverse(acc)
defp do_messages_for_death_certificates([{pid, reason} | rest], monitors, acc) do
acc =
monitors
|> :ets.match({{pid, :"$1"}, :"$2"})
|> Enum.reduce(acc, fn [ref, subscriber], acc ->
# Consume the monitor
:ets.delete(monitors, {pid, ref})
# Add the new message into the accumulator
[{subscriber, {:DOWN, ref, :process, pid, {:zen_monitor, reason}}} | acc]
end)
do_messages_for_death_certificates(rest, monitors, acc)
end
@spec known_node?(remote :: node()) :: boolean()
defp known_node?(remote) do
remote == Node.self() or remote in Node.list()
end
@spec penalty(attempt :: integer) :: integer
defp penalty(attempt) do
min(@maximum_penalty, @base_penalty * round(:math.pow(2, min(attempt, @max_attempt))))
end
@spec unknown_target?(monitors :: :ets.tid(), target :: pid) :: boolean
defp unknown_target?(monitors, target) do
# ETS does not make for the most readable code, here's what the following line does.
# Perform a match on the internal monitors table looking for keys that start with
# {target, ...}
# Since we are just interested to see if there are any, but don't care about the content, we
# set the other fields to :_ to ignore them.
# The target is known if there are _any_ results, so we apply a limit to the match of just 1
# result.
# This means that we either get back a tuple of {[[]]], continuation} or :"$end_of_table"
# :"$end_of_table" implies that the match for a single item found nothing, therefore the
# target does not exist and is unknown
:ets.match(monitors, {{target, :_}, :_}, 1) == :"$end_of_table"
end
@spec schedule_sweep() :: reference
defp schedule_sweep do
Process.send_after(self(), :sweep, sweep_interval())
end
end
|
lib/zen_monitor/local/connector.ex
| 0.923541
| 0.72337
|
connector.ex
|
starcoder
|
defmodule Faker.Phone.EnGb do
import Faker, only: [samplerp: 2]
@moduledoc """
This follows the rules of
[Telephone numbers in the United Kingdom](https://en.wikipedia.org/wiki/Telephone_numbers_in_the_United_Kingdom).
"""
@prefixes %{
"International dialling" => ["0"],
"Geographic numbers with area codes - for list see telephone area codes" => ["1", "2"],
"Geographic rate numbers - used by public sector and not-for-profit bodies" => ["30"],
"Geographic rate numbers - new allocations" => ["33"],
"Geographic rate numbers - migrating numbers from matching 084 numbers" => ["34"],
"Geographic rate numbers - migrating numbers from matching 087 numbers" => ["37"],
"Corporate numbers" => ["55"],
"Location independent electronic communications service (VoIP)" => ["56"],
"Personal numbering service" => ["70"],
"Radiopaging services" => ["76"],
"Freephone numbers" => ["80"],
"Internet for schools" => ["82"],
"Basic revenue share numbers" => ["84"],
"Higher rate revenue share numbers" => ["87"],
"Premium Rate Services (PRS) revenue share numbers" => ["90", "91"],
"Sexual Entertainment Services (SES) revenue share at a premium rate" => ["908", "909", "98"]
}
@doc """
Returns a random UK phone number
## Examples
iex> Faker.Phone.EnGb.number()
"+44054264610"
iex> Faker.Phone.EnGb.number()
"+44562970523"
iex> Faker.Phone.EnGb.number()
"+447502 030320"
iex> Faker.Phone.EnGb.number()
"+447933 760337"
"""
@spec number() :: String.t()
def number do
if Faker.random_between(0, 1) == 0 do
landline_number()
else
cell_number()
end
end
@doc """
Returns a random UK landline phone number
## Examples
iex> Faker.Phone.EnGb.landline_number()
"+44331542646"
iex> Faker.Phone.EnGb.landline_number()
"+44560832970"
iex> Faker.Phone.EnGb.landline_number()
"+44023570203"
iex> Faker.Phone.EnGb.landline_number()
"+44703209733"
"""
@spec landline_number() :: String.t()
def landline_number do
"+44#{number_prefix()}"
|> random_numbers_until(12)
end
samplerp(:cell_number_format, [
"074## ######",
"075## ######",
"076## ######",
"077## ######",
"078## ######",
"079## ######",
"+4474## ######",
"+4475## ######",
"+4476## ######",
"+4477## ######",
"+4478## ######",
"+4479## ######"
])
@doc """
Returns a random UK mobile phone number
## Examples
iex> Faker.Phone.EnGb.cell_number()
"+447415 426461"
iex> Faker.Phone.EnGb.cell_number()
"07483 297052"
iex> Faker.Phone.EnGb.cell_number()
"+447557 020303"
iex> Faker.Phone.EnGb.cell_number()
"+447609 733760"
"""
@spec cell_number() :: String.t()
def cell_number do
Faker.format(cell_number_format())
end
@doc """
Returns a random UK mobile phone number
## Examples
iex> Faker.Phone.EnGb.mobile_number()
"+447415 426461"
iex> Faker.Phone.EnGb.mobile_number()
"07483 297052"
iex> Faker.Phone.EnGb.mobile_number()
"+447557 020303"
iex> Faker.Phone.EnGb.mobile_number()
"+447609 733760"
"""
@spec mobile_number() :: String.t()
defdelegate mobile_number, to: __MODULE__, as: :cell_number
defp random_numbers_until(out, count) do
char_count =
out
|> String.to_charlist()
|> Enum.count()
format = String.duplicate("#", count - char_count)
"#{out}#{Faker.format(format)}"
end
defp number_prefix do
numbers = Map.values(@prefixes)
type = Enum.at(numbers, Faker.random_between(0, Enum.count(@prefixes) - 1))
Enum.at(type, Faker.random_between(0, Enum.count(type) - 1))
end
end
|
lib/faker/phone/en_gb.ex
| 0.550366
| 0.593786
|
en_gb.ex
|
starcoder
|
defmodule Elastic.HTTP do
@moduledoc ~S"""
Used to make raw calls to Elastic Search.
Each function returns a tuple indicating whether or not the request
succeeded or failed (`:ok` or `:error`), the status code of the response,
and then the processed body of the response.
For example, a request like this:
```elixir
Elastic.HTTP.get("/answer/_search")
```
Would return a response like this:
```
{:ok, 200,
%{"_shards" => %{"failed" => 0, "successful" => 5, "total" => 5},
"hits" => %{"hits" => [%{"_id" => "1", "_index" => "answer", "_score" => 1.0,
"_source" => %{"text" => "I like using Elastic Search"}, "_type" => "answer"}],
"max_score" => 1.0, "total" => 1}, "timed_out" => false, "took" => 7}}
```
"""
alias Elastic.ResponseHandler
@doc """
Makes a request using the GET HTTP method, and can take a body.
```
Elastic.HTTP.get("/answer/_search", body: %{query: ...})
```
"""
def get(url, options \\ []) do
request(:get, url, options)
end
@doc """
Makes a request using the POST HTTP method, and can take a body.
"""
def post(url, options \\ []) do
request(:post, url, options)
end
@doc """
Makes a request using the PUT HTTP method:
```
Elastic.HTTP.put("/answers/answer/1", body: %{
text: "I like using Elastic Search"
})
```
"""
def put(url, options \\ []) do
request(:put, url, options)
end
@doc """
Makes a request using the DELETE HTTP method:
```
Elastic.HTTP.delete("/answers/answer/1")
```
"""
def delete(url, options \\ []) do
request(:delete, url, options)
end
@doc """
Makes a request using the HEAD HTTP method:
```
Elastic.HTTP.head("/answers")
```
"""
def head(url, options \\ []) do
request(:head, url, options)
end
def bulk(options) do
body = Keyword.get(options, :body, "") <> "\n"
options = Keyword.put(options, :body, body)
request(:post, "_bulk", options)
end
# Private helpers
defp request(method, url, options) do
options =
[]
|> Keyword.put(:method, method)
|> Keyword.put(:url, url)
|> Keyword.put(:headers, Keyword.new())
|> Keyword.put(:body, Keyword.get(options, :body, %{}))
|> Keyword.put(:query, Keyword.get(options, :query, []))
options
|> client()
|> Tesla.request(options)
|> process_response()
end
defp process_response(response) do
ResponseHandler.process(response)
end
defp client(options) do
middleware =
[
{Tesla.Middleware.BaseUrl, Application.get_env(:elastic, :base_url, "http://localhost:9200")},
{Tesla.Middleware.Timeout, timeout: Application.get_env(:elastic, :timeout, 30_000)},
Elastic.Middleware.AWSMiddleware,
]
|> add_basic_auth_middleware(options)
|> add_content_type_middleware_headers(options)
Tesla.client(middleware)
end
defp add_content_type_middleware_headers(middleware, options) do
case Keyword.get(options, :url) do
"_bulk" ->
middleware
|> add_content_type_middleware_header("application/x-ndjson")
|> add_json_middleware(:decode)
_ ->
add_json_middleware(middleware, :full)
end
end
defp add_content_type_middleware_header(middleware, content_type) do
[{Tesla.Middleware.Headers, [{"content-type", content_type}]} | middleware]
end
defp add_json_middleware(middleware, :decode), do: [Tesla.Middleware.DecodeJson | middleware]
defp add_json_middleware(middleware, :encode), do: [Tesla.Middleware.EncodeJson | middleware]
defp add_json_middleware(middleware, :full), do: [Tesla.Middleware.JSON | middleware]
defp add_json_middleware(middleware, _), do: middleware
def add_basic_auth_middleware(middleware, options) do
case Keyword.get(options, :basic_auth, Elastic.basic_auth()) do
{username, password} ->
[{Tesla.Middleware.BasicAuth, %{username: username, password: password}} | middleware]
_ ->
middleware
end
end
end
|
lib/elastic/http.ex
| 0.901554
| 0.801392
|
http.ex
|
starcoder
|
defmodule Kitto.Notifier do
@moduledoc """
Module responsible for broadcasting events across connections.
"""
use Supervisor
import Agent, only: [start_link: 2, update: 2, get: 2]
@doc """
Starts the notifier supervision tree
"""
def start_link, do: Supervisor.start_link(__MODULE__, :ok, name: :notifier_sup)
@doc false
def init(:ok) do
children = [
worker(__MODULE__, [], function: :start_connections_cache, id: make_ref()),
worker(__MODULE__, [], function: :start_notifier_cache, id: make_ref())
]
supervise(children, strategy: :one_for_one)
end
@doc """
Starts the connections cache agent
"""
def start_connections_cache, do: start_link(fn -> [] end, name: :notifier_connections)
@doc """
Starts the notifier cache agent
"""
def start_notifier_cache, do: start_link(fn -> %{} end, name: :notifier_cache)
@doc """
Every new SSE connection gets all the cached payloads for each job.
The last broadcasted payload of each job is cached
"""
@spec initial_broadcast!(pid()) :: list()
def initial_broadcast!(pid) do
cache() |> Enum.each(fn ({topic, data}) -> broadcast!(pid, topic, data) end)
end
@doc """
Emits a server-sent event to each of the active connections with the given
topic and payload
"""
@spec broadcast!(atom() | String.t(), atom() | map() | list()) :: list()
def broadcast!(data, topic) when is_atom(topic), do: broadcast!(topic, data)
def broadcast!(topic, data) do
unless topic == "_kitto", do: cache(topic, data)
connections() |> Enum.each(fn (connection) -> broadcast!(connection, topic, data) end)
end
@doc """
Emits a server-sent event to each of the active connections with the given
topic and payload to a specific process
"""
@spec broadcast!(pid(), atom() | String.t(), map() | list()) :: list()
def broadcast!(pid, topic, data) when is_atom(topic), do: broadcast!(pid, topic |> to_string, data)
def broadcast!(pid, topic, data) do
if !Process.alive?(pid), do: delete(pid)
send pid, {:broadcast, {topic, data |> Map.merge(updated_at())}}
end
@doc """
Updates the list of connections to use for broadcasting
"""
@spec register(Conn.t()) :: Conn.t()
def register(conn) do
notifier_connections() |> update(&(&1 ++ [conn]))
conn
end
@doc """
Returns cached broadcasts
"""
@spec cache() :: map()
def cache, do: notifier_cache() |> get(&(&1))
@doc """
Resets the broadcast cache
"""
@spec clear_cache() :: :ok
def clear_cache, do: notifier_cache() |> update(fn (_) -> %{} end)
@doc """
Caches the given payload with the key provided as the first argument
"""
def cache(topic, data) when is_atom(topic), do: cache(topic |> to_string, data)
def cache(topic, data), do: notifier_cache() |> update(&(Map.merge(&1, %{topic => data})))
@doc """
Removes a connection from the connections list
"""
@spec delete(Conn.t()) :: :ok
def delete(conn), do: notifier_connections() |> update(&(&1 |> List.delete(conn)))
@doc """
Returns the registered connections
"""
@spec connections() :: [Conn.t()]
def connections, do: notifier_connections() |> get(&(&1))
defp notifier_connections, do: Process.whereis(:notifier_connections)
defp notifier_cache, do: Process.whereis(:notifier_cache)
defp updated_at, do: %{updated_at: :os.system_time(:seconds)}
end
|
lib/kitto/notifier.ex
| 0.808521
| 0.487978
|
notifier.ex
|
starcoder
|
defmodule PaEss.Utilities do
@moduledoc """
Some simple helpers for working with the PA/ESS system
"""
require Logger
@space "21000"
@abbreviation_replacements [
{~r"\bOL\b", "Orange Line"},
{~r"\bBL\b", "Blue Line"},
{~r"\bRL\b", "Red Line"},
{~r"\bGL\b", "Green Line"},
{~r"\bNB\b", "Northbound"},
{~r"\bSB\b", "Southbound"},
{~r"\bEB\b", "Eastbound"},
{~r"\bWB\b", "Westbound"},
{~r"\bDesign Ctr\b", "Design Center "},
{~r"\b88 Blk Flcn\b", "88 Black Falcon Avenue"},
{~r"\b23 Dry Dock\b", "23 Dry Dock Avenue"},
{~r"\b21 Dry Dock\b", "21 Dry Dock Avenue"},
{~r"\bTide St\b", "Tide Street"},
{~r"\bHarbor St\b", "Harbor Street"},
{~r"\bSilvr Ln Wy\b", "Silver Line Way"},
{~r"\bWTC\b", "World Trade Center"},
{~r"\bHerald St\b", "Herald Street"},
{~r"\bE Berkeley\b", "East Berkley Street"},
{~r"\bNewton St\b", "Newton Street"},
{~r"\bWo'?ster Sq\b", "Worcester Square"},
{~r"\bMass Ave\b", "Massachusetts Avenue"},
{~r"\bLenox St\b", "Lenox Street"},
{~r"\bMelnea Cass\b", "Melnea Cass Boulevard"},
{~r"\bEastern Ave\b", "Eastern Avenue"},
{~r"\bBox Dist\b", "Box District"},
{~r"\bBellingham\b", "Bellingham Square"},
{~r"\bMedfd/Tufts\b", "Medford Tufts"},
{~r"\bBall Sq\b", "Ball Square"},
{~r"\bMagoun Sq\b", "Magoun Square"},
{~r"\bGilman Sq\b", "Gilman Square"},
{~r"\bE Somervlle\b", "East Somerville"},
{~r"\bUnion Sq\b", "Union Square"},
{~r"\bScience Pk\b", "Science Park West End"},
{~r"\bHynes\b", "Hynes Convention Center"},
{~r"\bNortheast'?n\b", "Northeastern"},
{~r"\bMFA\b", "Museum of Fine Arts"},
{~r"\bLngwd Med \b", "Longwood Medical Area"},
{~r"\bBrigham Cir\b", "Brigham Circle"},
{~r"\bFenwood Rd\b", "Fenwood Road"},
{~r"\bMission Pk\b", "Mission Park"},
{~r"\bBack o'?Hill\b", "Back of the Hill"},
{~r"\bHeath St\b", "Heath Street"},
{~r"\bB'?kline Vil\b", "Brookline Village"},
{~r"\bB'?kline Hls\b", "Brookline Hills"},
{~r"\bB'?consfield\b", "Beaconsfield"},
{~r"\bChestnut Hl\b", "Chestnut Hill"},
{~r"\bNewton Ctr\b", "Newton Centre"},
{~r"\bNewton Hlnd\b", "Newton Highlands"},
{~r"\bSt Mary'?s\b", "Saint Mary's Street"},
{~r"\bHawes St\b", "Hawes Street"},
{~r"\bKent St\b", "Kent Street"},
{~r"\bCoolidge Cn\b", "Coolidge Corner"},
{~r"\bSummit Ave\b", "Summit Avenue"},
{~r"\bBrandon Hll\b", "Brandon Hall"},
{~r"\bFairbanks\b", "Fairbanks Street"},
{~r"\bWashington \b", "Washington Square"},
{~r"\bTappan St\b", "Tappan Street"},
{~r"\bDean Rd\b", "Dean Road"},
{~r"\bEnglew'?d Av\b", "Englewood Avenue"},
{~r"\bClvlnd Cir\b", "Cleveland Circle"},
{~r"\bBlandford\b", "Blandford Street"},
{~r"\bBU East\b", "Boston University East"},
{~r"\bBU Central\b", "Boston University Central"},
{~r"\bBU West\b", "Boston University West"},
{~r"\bSt Paul St\b", "Saint Paul Street"},
{~r"\bBabcock St\b", "Babcock Street"},
{~r"\bPackards Cn\b", "Packard's Corner"},
{~r"\bHarvard Ave\b", "Harvard Avenue"},
{~r"\bGriggs St\b", "Griggs Street"},
{~r"\bAllston St\b", "Allston Street"},
{~r"\bWarren St\b", "Warren Street"},
{~r"\bWashington \b", "Washington Street"},
{~r"\bSutherland\b", "Sutherland Road"},
{~r"\bChiswick Rd\b", "Chiswick Road"},
{~r"\bChestnut Hl\b", "Chestnut Hill Avenue"},
{~r"\bSouth St\b", "South Street"},
{~r"\bBoston Coll\b", "Boston College"},
{~r"\bSullivan Sq\b", "Sullivan Square"},
{~r"\bCom College\b", "Community College"},
{~r"\bMass Ave\b", "Massachusetts Avenue"},
{~r"\bRoxbury Xng\b", "Roxbury Crossing"},
{~r"\bJackson Sq\b", "Jackson Square"},
{~r"\bGreen St\b", "Green Street"},
{~r"\bFrst Hills\b", "Forest Hills"},
{~r"\bRevere Bch\b", "Revere Beach"},
{~r"\bSuffolk Dns\b", "Suffolk Downs"},
{~r"\bOrient Hts\b", "Orient Heights"},
{~r"\bKendall/MIT\b", "Kendall MIT"},
{~r"\bCharles/MGH\b", "Charles MGH"},
{~r"\bFields Cnr\b", "Fields Corner"},
{~r"\bCedar Grv\b", "Cedar Grove"},
{~r"\bCentral Ave\b", "Central Avenue"},
{~r"\bValley Rd\b", "Valley Road"},
{~r"\bCapen St\b", "Capen Street"},
{~r"\bN Quincy\b", "North Quincy"},
{~r"\bQuincy Adms\b", "Quincy Adams"},
{~r"\bDownt'?n Xng\b", "Downtown Crossing"},
{~r"\bSouth Sta\b", "South Station"},
{~r"\bPark St\b", "Park Street"},
{~r"\bJFK/Umass\b", "JFK Umass"},
{~r"\bQuincy Ctr\b", "Quincy Center"},
{~r"\bTufts Med\b", "Tufts Medical Center"},
{~r"\bMalden Ctr\b", "Malden Center"},
{~r"\bNorth Sta\b", "North Station"},
{~r"\bGov'?t Ctr\b", "Government Center"},
{~r/\bSVC\b/i, "Service"}
]
@spec valid_range?(integer(), Content.Audio.language()) :: boolean()
def valid_range?(n, :english) do
n > 0 and n < 60
end
def valid_range?(n, :spanish) do
n > 0 and n < 21
end
@spec valid_destination?(PaEss.destination(), Content.Audio.language()) :: boolean()
def valid_destination?(destination, language) when not is_nil(destination) do
language == :english or destination in [:chelsea, :south_station]
end
@spec number_var(integer(), Content.Audio.language()) :: String.t() | nil
def number_var(n, :english) do
if valid_range?(n, :english) do
Integer.to_string(5500 + n)
else
nil
end
end
def number_var(n, :spanish) do
if valid_range?(n, :spanish) do
Integer.to_string(37000 + n)
else
nil
end
end
@doc "Recording of the time from 12:01 to 12:59, given the minutes"
@spec time_var(integer()) :: String.t()
def time_var(n) when n > 0 and n < 60 do
Integer.to_string(9100 + n)
end
def countdown_minutes_var(n) when n >= 0 and n < 30 do
Integer.to_string(5000 + n)
end
def countdown_minutes_var(n) when n >= 30 do
Integer.to_string(5030)
end
@doc "Constructs message from TAKE variables"
@spec take_message([String.t()], Content.Audio.av_type()) :: Content.Audio.canned_message()
def take_message(vars, av_type) do
vars_with_spaces = Enum.intersperse(vars, @space)
{:canned, {take_message_id(vars_with_spaces), vars_with_spaces, av_type}}
end
@spec take_message_id([String.t()]) :: String.t()
def take_message_id(vars) do
Integer.to_string(102 + length(vars))
end
@doc "Take ID for terminal destinations"
@spec destination_var(PaEss.destination()) :: {:ok, String.t()} | {:error, :unknown}
def destination_var(:alewife), do: {:ok, "4000"}
def destination_var(:ashmont), do: {:ok, "4016"}
def destination_var(:braintree), do: {:ok, "4021"}
def destination_var(:mattapan), do: {:ok, "4100"}
def destination_var(:bowdoin), do: {:ok, "4055"}
def destination_var(:wonderland), do: {:ok, "4044"}
def destination_var(:oak_grove), do: {:ok, "4022"}
def destination_var(:forest_hills), do: {:ok, "4043"}
def destination_var(:lechmere), do: {:ok, "4056"}
def destination_var(:north_station), do: {:ok, "4027"}
def destination_var(:government_center), do: {:ok, "4061"}
def destination_var(:park_street), do: {:ok, "4007"}
def destination_var(:kenmore), do: {:ok, "4070"}
def destination_var(:boston_college), do: {:ok, "4202"}
def destination_var(:cleveland_circle), do: {:ok, "4203"}
def destination_var(:reservoir), do: {:ok, "4076"}
def destination_var(:riverside), do: {:ok, "4084"}
def destination_var(:heath_street), do: {:ok, "4204"}
def destination_var(_), do: {:error, :unknown}
@spec headsign_to_destination(String.t()) :: {:ok, PaEss.destination()} | {:error, :unknown}
def headsign_to_destination("Alewife"), do: {:ok, :alewife}
def headsign_to_destination("Ashmont"), do: {:ok, :ashmont}
def headsign_to_destination("Braintree"), do: {:ok, :braintree}
def headsign_to_destination("Mattapan"), do: {:ok, :mattapan}
def headsign_to_destination("Bowdoin"), do: {:ok, :bowdoin}
def headsign_to_destination("Wonderland"), do: {:ok, :wonderland}
def headsign_to_destination("Oak Grove"), do: {:ok, :oak_grove}
def headsign_to_destination("Forest Hills"), do: {:ok, :forest_hills}
def headsign_to_destination("Chelsea"), do: {:ok, :chelsea}
def headsign_to_destination("South Station"), do: {:ok, :south_station}
def headsign_to_destination("Lechmere"), do: {:ok, :lechmere}
def headsign_to_destination("North Station"), do: {:ok, :north_station}
def headsign_to_destination("Government Center"), do: {:ok, :government_center}
def headsign_to_destination("Park Street"), do: {:ok, :park_street}
def headsign_to_destination("Kenmore"), do: {:ok, :kenmore}
def headsign_to_destination("Boston College"), do: {:ok, :boston_college}
def headsign_to_destination("Cleveland Circle"), do: {:ok, :cleveland_circle}
def headsign_to_destination("Reservoir"), do: {:ok, :reservoir}
def headsign_to_destination("Riverside"), do: {:ok, :riverside}
def headsign_to_destination("Heath Street"), do: {:ok, :heath_street}
def headsign_to_destination("Northbound"), do: {:ok, :northbound}
def headsign_to_destination("Southbound"), do: {:ok, :southbound}
def headsign_to_destination("Eastbound"), do: {:ok, :eastbound}
def headsign_to_destination("Westbound"), do: {:ok, :westbound}
def headsign_to_destination(_unknown), do: {:error, :unknown}
@spec destination_to_sign_string(PaEss.destination()) :: String.t()
def destination_to_sign_string(:alewife), do: "Alewife"
def destination_to_sign_string(:ashmont), do: "Ashmont"
def destination_to_sign_string(:braintree), do: "Braintree"
def destination_to_sign_string(:mattapan), do: "Mattapan"
def destination_to_sign_string(:bowdoin), do: "Bowdoin"
def destination_to_sign_string(:wonderland), do: "Wonderland"
def destination_to_sign_string(:oak_grove), do: "Oak Grove"
def destination_to_sign_string(:forest_hills), do: "Frst Hills"
def destination_to_sign_string(:chelsea), do: "Chelsea"
def destination_to_sign_string(:south_station), do: "South Sta"
def destination_to_sign_string(:lechmere), do: "Lechmere"
def destination_to_sign_string(:north_station), do: "North Sta"
def destination_to_sign_string(:government_center), do: "Govt Ctr"
def destination_to_sign_string(:park_street), do: "Park St"
def destination_to_sign_string(:kenmore), do: "Kenmore"
def destination_to_sign_string(:boston_college), do: "Boston Col"
def destination_to_sign_string(:cleveland_circle), do: "Clvlnd Cir"
def destination_to_sign_string(:reservoir), do: "Reservoir"
def destination_to_sign_string(:riverside), do: "Riverside"
def destination_to_sign_string(:heath_street), do: "Heath St"
def destination_to_sign_string(:northbound), do: "Northbound"
def destination_to_sign_string(:southbound), do: "Southbound"
def destination_to_sign_string(:eastbound), do: "Eastbound"
def destination_to_sign_string(:westbound), do: "Westbound"
@spec destination_to_ad_hoc_string(PaEss.destination()) ::
{:ok, String.t()} | {:error, :unknown}
def destination_to_ad_hoc_string(:alewife), do: {:ok, "Alewife"}
def destination_to_ad_hoc_string(:ashmont), do: {:ok, "Ashmont"}
def destination_to_ad_hoc_string(:braintree), do: {:ok, "Braintree"}
def destination_to_ad_hoc_string(:mattapan), do: {:ok, "Mattapan"}
def destination_to_ad_hoc_string(:bowdoin), do: {:ok, "Bowdoin"}
def destination_to_ad_hoc_string(:wonderland), do: {:ok, "Wonderland"}
def destination_to_ad_hoc_string(:oak_grove), do: {:ok, "Oak Grove"}
def destination_to_ad_hoc_string(:forest_hills), do: {:ok, "Forest Hills"}
def destination_to_ad_hoc_string(:chelsea), do: {:ok, "Chelsea"}
def destination_to_ad_hoc_string(:south_station), do: {:ok, "South Station"}
def destination_to_ad_hoc_string(:lechmere), do: {:ok, "Lechmere"}
def destination_to_ad_hoc_string(:north_station), do: {:ok, "North Station"}
def destination_to_ad_hoc_string(:government_center), do: {:ok, "Government Center"}
def destination_to_ad_hoc_string(:park_street), do: {:ok, "Park Street"}
def destination_to_ad_hoc_string(:kenmore), do: {:ok, "Kenmore"}
def destination_to_ad_hoc_string(:boston_college), do: {:ok, "Boston College"}
def destination_to_ad_hoc_string(:cleveland_circle), do: {:ok, "Cleveland Circle"}
def destination_to_ad_hoc_string(:reservoir), do: {:ok, "Reservoir"}
def destination_to_ad_hoc_string(:riverside), do: {:ok, "Riverside"}
def destination_to_ad_hoc_string(:heath_street), do: {:ok, "Heath Street"}
def destination_to_ad_hoc_string(:northbound), do: {:ok, "Northbound"}
def destination_to_ad_hoc_string(:southbound), do: {:ok, "Southbound"}
def destination_to_ad_hoc_string(:eastbound), do: {:ok, "Eastbound"}
def destination_to_ad_hoc_string(:westbound), do: {:ok, "Westbound"}
def destination_to_ad_hoc_string(_unknown), do: {:error, :unknown}
@spec route_to_ad_hoc_string(String.t()) :: {:ok, String.t()} | {:error, :unknown}
def route_to_ad_hoc_string("Red"), do: {:ok, "Red Line"}
def route_to_ad_hoc_string("Blue"), do: {:ok, "Blue Line"}
def route_to_ad_hoc_string("Orange"), do: {:ok, "Orange Line"}
def route_to_ad_hoc_string("Mattapan"), do: {:ok, "Mattapan"}
def route_to_ad_hoc_string("Green-B"), do: {:ok, "B"}
def route_to_ad_hoc_string("Green-C"), do: {:ok, "C"}
def route_to_ad_hoc_string("Green-D"), do: {:ok, "D"}
def route_to_ad_hoc_string("Green-E"), do: {:ok, "E"}
def route_to_ad_hoc_string(_unknown), do: {:error, :unknown}
@spec ad_hoc_trip_description(PaEss.destination(), String.t() | nil) ::
{:ok, String.t()} | {:error, :unknown}
def ad_hoc_trip_description(destination, route_id \\ nil)
def ad_hoc_trip_description(destination, nil)
when destination in [:eastbound, :westbound, :southbound, :northbound] do
case destination_to_ad_hoc_string(destination) do
{:ok, destination_string} ->
{:ok, "#{destination_string} train"}
_ ->
{:error, :unknown}
end
end
def ad_hoc_trip_description(destination, route_id)
when destination == :eastbound and route_id in ["Green-B", "Green-C", "Green-D", "Green-E"] do
ad_hoc_trip_description(destination)
end
def ad_hoc_trip_description(destination, route_id)
when destination in [:eastbound, :westbound, :southbound, :northbound] do
case {destination_to_ad_hoc_string(destination), route_to_ad_hoc_string(route_id)} do
{{:ok, destination_string}, {:ok, route_string}} ->
{:ok, "#{destination_string} #{route_string} train"}
{{:ok, _destination_string}, {:error, :unknown}} ->
ad_hoc_trip_description(destination)
_ ->
{:error, :unknown}
end
end
def ad_hoc_trip_description(destination, nil) do
case destination_to_ad_hoc_string(destination) do
{:ok, destination_string} ->
{:ok, "train to #{destination_string}"}
_ ->
{:error, :unknown}
end
end
def ad_hoc_trip_description(destination, route_id)
when destination in [:lechmere, :north_station, :government_center, :park_street, :kenmore] and
route_id in ["Green-B", "Green-C", "Green-D", "Green-E"] do
ad_hoc_trip_description(destination)
end
def ad_hoc_trip_description(destination, route_id) do
case {destination_to_ad_hoc_string(destination), route_to_ad_hoc_string(route_id)} do
{{:ok, destination_string}, {:ok, route_string}} ->
{:ok, "#{route_string} train to #{destination_string}"}
{{:ok, _destination_string}, {:error, :unknown}} ->
ad_hoc_trip_description(destination)
_ ->
{:error, :unknown}
end
end
@spec green_line_branch_var(Content.Utilities.green_line_branch()) :: String.t()
def green_line_branch_var(:b), do: "536"
def green_line_branch_var(:c), do: "537"
def green_line_branch_var(:d), do: "538"
def green_line_branch_var(:e), do: "539"
@spec replace_abbreviations(String.t()) :: String.t()
def replace_abbreviations(text) when is_binary(text) do
Enum.reduce(
@abbreviation_replacements,
text,
fn {abbr, replacement}, text ->
String.replace(text, abbr, replacement)
end
)
end
end
|
lib/pa_ess/utilities.ex
| 0.57069
| 0.509093
|
utilities.ex
|
starcoder
|
defmodule Crony.DualMap do
use Brex.Result
alias __MODULE__
@compile {:inline, deassociated_right_for: 2, deassociated_left_for: 2}
defstruct left: %{},
right: %{}
@type t(left, right, val) :: %DualMap{
left: %{required(left) => {val, right}},
right: %{required(right) => {val, left}}
}
def(put_new(dualmap, {key_left, key_right}, value)) do
preexisting_key? =
Map.has_key?(dualmap.left, key_left) || Map.has_key?(dualmap.right, key_right)
case preexisting_key? do
true ->
{:error, :key_collision}
false ->
{:ok,
%{
dualmap
| left: Map.put(dualmap.left, key_left, {value, {:assoc, key_right}}),
right: Map.put(dualmap.right, key_right, {value, {:assoc, key_left}})
}}
end
end
def put_brutal(dualmap, {key_left, key_right}, value) do
new_right =
case dualmap.left do
%{^key_left => {_, {:assoc, key}}} ->
Map.delete(dualmap.right, key)
_ ->
dualmap.right
end
|> Map.put(key_right, {value, {:assoc, key_left}})
new_left =
case dualmap.right do
%{^key_right => {_, {:assoc, key}}} ->
Map.delete(dualmap.left, key)
_ ->
dualmap.left
end
|> Map.put(key_left, {value, {:assoc, key_right}})
%{
dualmap
| left: new_left,
right: new_right
}
end
def put_unsafe(dualmap, {key_left, key_right}, value) do
new_right =
case dualmap.left do
%{^key_left => {_, {:assoc, key}}} ->
deassociated_right_for(dualmap, key)
_ ->
dualmap.right
end
|> Map.put(key_right, {value, {:assoc, key_left}})
new_left =
case dualmap.right do
%{^key_right => {_, {:assoc, key}}} ->
deassociated_left_for(dualmap, key)
_ ->
dualmap.left
end
|> Map.put(key_left, {value, {:assoc, key_right}})
%{
dualmap
| left: new_left,
right: new_right
}
end
def delete_left(dualmap, key_left) do
case Map.has_key?(dualmap.left, key_left) do
true ->
{_, key_right} = Map.fetch!(dualmap.left, key_left)
new_right =
case key_right do
:nothing -> dualmap.right
{:assoc, key} -> Map.delete(dualmap.right, key)
end
%{
dualmap
| left: Map.delete(dualmap.left, key_left),
right: new_right
}
false ->
dualmap
end
end
def delete_right(dualmap, key_right) do
case Map.has_key?(dualmap.right, key_right) do
true ->
{_, key_left} = Map.fetch!(dualmap.right, key_right)
new_left =
case key_left do
:nothing -> dualmap.left
{:assoc, key} -> Map.delete(dualmap.left, key)
end
%{
dualmap
| left: new_left,
right: Map.delete(dualmap.right, key_right)
}
false ->
dualmap
end
end
def fetch_left(dualmap, key_left) do
Map.fetch(dualmap.left, key_left)
|> normalize_error(:not_found)
|> fmap(fn {value, _} ->
value
end)
end
def fetch_right(dualmap, key_right) do
Map.fetch(dualmap.right, key_right)
|> normalize_error(:not_found)
|> fmap(fn {value, _} ->
value
end)
end
def associated_right(dualmap, key_left) do
Map.fetch(dualmap.left, key_left)
|> normalize_error(:not_found)
|> fmap(fn {_, key_right} ->
key_right
end)
end
def associated_left(dualmap, key_right) do
Map.fetch(dualmap.right, key_right)
|> normalize_error(:not_found)
|> fmap(fn {_, key_left} ->
key_left
end)
end
def keys(dualmap) do
Enum.map(dualmap.left, fn {left, {_, right}} ->
{{:assoc, left}, right}
end)
end
def keys_left(dualmap) do
Enum.map(dualmap.left, fn {left, _} ->
left
end)
end
def keys_right(dualmap) do
Enum.map(dualmap.right, fn {right, _} ->
right
end)
end
def to_list(dualmap) do
lefts =
Stream.map(dualmap.left, fn {left, {value, right}} ->
{{{:assoc, left}, right}, value}
end)
dualmap.right
|> Stream.filter(fn
{right, {value, :nothing}} -> true
_ -> false
end)
|> Stream.map(fn {right, {value, _}} ->
{{:nothing, {:assoc, right}}, value}
end)
|> Stream.concat(lefts)
|> Enum.to_list()
end
defp deassociated_right_for(dualmap, key) do
Map.fetch!(dualmap.right, key)
|> case do
{value, {:assoc, _}} ->
Map.put(dualmap.right, key, {value, :nothing})
_ ->
dualmap.right
end
end
defp deassociated_left_for(dualmap, key) do
Map.fetch!(dualmap.left, key)
|> case do
{value, {:assoc, _}} ->
Map.put(dualmap.left, key, {value, :nothing})
_ ->
dualmap.left
end
end
end
defimpl Inspect, for: Crony.DualMap do
import Inspect.Algebra
def inspect(dualmap, _opts) do
inspect_opts = %Inspect.Opts{}
dualmap_renderer = fn {{left, right}, value}, _opts ->
rl =
case left do
{:assoc, key} -> inspect(key)
:nothing -> ""
end
rr =
case right do
{:assoc, key} -> inspect(key)
:nothing -> ""
end
"{#{rl},#{rr}} => #{inspect(value)}"
end
concat([
"#DualMap<",
container_doc(
"%{",
Crony.DualMap.to_list(dualmap),
"}",
inspect_opts,
dualmap_renderer
),
">"
])
end
end
|
lib/crony/dual_map.ex
| 0.724675
| 0.601359
|
dual_map.ex
|
starcoder
|
defmodule Zaryn.OracleChain do
@moduledoc """
Manage network based oracle to verify, add new oracle transaction in the system and request last udpate.any()
ZARYN Price is the first network Oracle and it's used for many algorithms such as: transaction fee, node rewards, smart contracts
"""
alias __MODULE__.MemTable
alias __MODULE__.MemTableLoader
alias __MODULE__.Scheduler
alias __MODULE__.Services
alias __MODULE__.Summary
alias Zaryn.PubSub
alias Zaryn.TransactionChain.Transaction
alias Zaryn.TransactionChain.TransactionData
@doc """
Determines if the oracle transaction is valid.
This operation will check the data from the service providers
"""
@spec valid_services_content?(binary()) :: boolean()
def valid_services_content?(content) when is_binary(content) do
with {:ok, data} <- Jason.decode(content),
true <- Services.verify_correctness?(data) do
true
else
{:error, _} ->
false
false ->
false
end
end
@doc """
Determines if the oracle summary is valid.
This operation will check the data from the previous oracle transactions
"""
@spec valid_summary?(binary(), list(Transaction.t())) :: boolean()
def valid_summary?(content, oracle_chain) when is_binary(content) do
with {:ok, data} <- Jason.decode(content),
true <-
%Summary{transactions: oracle_chain, aggregated: parse_summary_data(data)}
|> Summary.verify?() do
true
else
{:error, _} ->
true
false ->
false
end
end
defp parse_summary_data(data) do
Enum.map(data, fn {timestamp, service_data} ->
with {timestamp, _} <- Integer.parse(timestamp),
{:ok, datetime} <- DateTime.from_unix(timestamp),
{:ok, data} <- Services.parse_data(service_data) do
{datetime, data}
else
_ ->
nil
end
end)
|> Enum.filter(& &1)
|> Enum.into(%{})
end
@doc """
Load the transaction in the memtable
"""
@spec load_transaction(Transaction.t()) :: :ok
def load_transaction(tx = %Transaction{type: :oracle, data: %TransactionData{content: content}}) do
MemTableLoader.load_transaction(tx)
PubSub.notify_new_oracle_data(content)
end
def load_transaction(tx = %Transaction{type: :oracle_summary}),
do: MemTableLoader.load_transaction(tx)
def load_transaction(%Transaction{}), do: :ok
@doc """
Get the ZARYN price at the given date
Returns the EUR and USD price
If the price is not found, it use the default value at $0.07
"""
@spec get_zaryn_price(DateTime.t()) :: list({binary(), float()})
def get_zaryn_price(date = %DateTime{}) do
case MemTable.get_oracle_data("zaryn", date) do
{:ok, prices} ->
Enum.map(prices, fn {pair, price} -> {String.to_existing_atom(pair), price} end)
_ ->
[eur: 0.05, usd: 0.07]
end
end
def config_change(changed_conf) do
changed_conf
|> Keyword.get(Scheduler)
|> Scheduler.config_change()
end
end
|
lib/zaryn/oracle_chain.ex
| 0.807233
| 0.483405
|
oracle_chain.ex
|
starcoder
|
defmodule Estructura do
@moduledoc ~S"""
`Estructura` is a set of extensions for Elixir structures,
such as `Access` implementation, `Enumerable` and `Collectable`
implementations, validations and test data generation via `StreamData`.
`Estructura` simplifies the following
* `Access` implementation for structs
* `Enumerable` implementation for structs (as maps)
* `Collectable` implementation for one of struct’s fields (as `MapSet` does)
* `StreamData` generation of structs for property-based testing
### Use Options
`use Estructura` accepts four keyword arguments.
* `access: true | false | :lazy` whether to generate the `Access` implementation, default `true`;
when `true` or `:lazy`, it also produces `put/3` and `get/3` methods to be used with `coercion`
and `validation`, when `:lazy`, instances of `Estructura.Lazy` are understood as values
* `coercion: boolean() | [key()]` whether to generate the bunch of `coerce_×××/1` functions
to be overwritten by implementations, default `false`
* `validation: boolean() | [key()]` whether to generate the bunch of `validate_×××/1` functions
to be overwritten by implementations, default `false`
* `enumerable: boolean()` whether to generate the `Enumerable` porotocol implementation, default `false`
* `collectable: false | key()` whether to generate the `Collectable` protocol implementation,
default `false`; if non-falsey atom is given, it must point to a struct field where `Collectable`
would collect. Should be one of `list()`, `map()`, `MapSet.t()`, `bitstribg()`
* `generator: %{optional(key()) => Estructura.Config.generator()}` the instructions
for the `__generate__/{0,1}` functions that would produce the target structure values suitable
for usage in `StreamData` property testing; the generated `__generator__/1` function is overwritable.
Please note, that setting `coercion` and/or `validation` to truthy values has effect
if and only if `access` has been also set to `true`.
Typical example of usage would be:
```elixir
defmodule MyStruct do
use Estructura,
access: true,
coercion: [:foo], # requires `c:MyStruct.Coercible.coerce_foo/1` impl
validation: true, # requires `c:MyStruct.Validatable.validate_×××/1` impls
enumerable: true,
collectable: :bar,
generator: [
foo: {StreamData, :integer},
bar: {StreamData, :list_of, [{StreamData, :string, [:alphanumeric]}]},
baz: {StreamData, :fixed_map,
[[key1: {StreamData, :integer}, key2: {StreamData, :integer}]]}
]
defstruct foo: 42, bar: [], baz: %{}
@impl MyStruct.Coercible
def coerce_foo(value) when is_integer(value), do: {:ok, value}
def coerce_foo(value) when is_float(value), do: {:ok, round(value)}
def coerce_foo(value) when is_binary(value) do
case Integer.parse(value) do
{value, ""} -> {:ok, value}
_ -> {:error, "#{value} is not a valid integer value"}
end
end
def coerce_foo(value),
do: {:error, "Cannot coerce value given for `foo` field (#{inspect(value)})"}
@impl MyStruct.Validatable
def validate_foo(value) when value >= 0, do: {:ok, value}
def validate_foo(_), do: {:error, ":foo must be positive"}
@impl MyStruct.Validatable
def validate_bar(value), do: {:ok, value}
@impl MyStruct.Validatable
def validate_baz(value), do: {:ok, value}
end
```
The above would allow the following to be done with the structure:
```elixir
s = %MyStruct{}
put_in s, [:foo], :forty_two
#⇒ %MyStruct{foo: :forty_two, bar: [], baz: %{}}
for i <- [1, 2, 3], into: s, do: i
#⇒ %MyStruct{foo: 42, bar: [1, 2, 3], baz: %{}}
Enum.map(s, &elem(&1, 1))
#⇒ [42, [], %{}]
MyStruct.__generator__() |> Enum.take(3)
#⇒ [
# %MyStruct{bar: [], baz: %{key1: 0, key2: 0}, foo: -1},
# %MyStruct{bar: ["g", "xO"], baz: %{key1: -1, key2: -2}, foo: 2},
# %MyStruct{bar: ["", "", ""], baz: %{key1: -3, key2: 1}, foo: -1}
# ]
```
### Coercion
When `coercion: true | [key()]` is passed as an argument to `use Estructura`,
the nested behaviour `Coercible` is generated and the target module claims to implement it.
To make a coercion work with `MyStruct.put/3` and `put_in/3` provided
by `Access` implementation, the consumer module should implement `MyStruct.Coercible`
behaviour.
For the consumer convenience, the warnings for not implemented functions will be issued by compiler.
### Validation
When `validation: true | [key()]` is passed as an argument to `use Estructura`,
the nested behaviour `Validatable` is generated and the target module claims to implement it.
To make a validation work with `MyStruct.put/3` and `put_in/3` provided
by `Access` implementation, the consumer module should implement `MyStruct.Validatable`
behaviour.
For the consumer convenience, the warnings for not implemented functions will be issued by compiler.
### Generation
If `generator` keyword argument has been passed, `MyStruct.__generate__/{0,1}` can be
used to generate instances of this struct for `StreamData` property based tests.
```elixir
property "generation" do
check all %MyStruct{foo: foo, bar: bar, baz: baz} <- MyStruct.__generator__() do
assert match?(%{key1: v1, key2: v2} when is_integer(v1) and is_integer(v2), baz)
assert is_integer(foo)
assert is_binary(bar)
end
end
```
### Lazy
If `access: :lazy` is passed as an option, the struct content might be instantiated lazily,
upon first access through `Kernel.×××_in/{2,3}` family.
This might be explicitly helpful when the real content requires a significant time
to load and/or store. Consider the full response from the web server, including
the gzipped content, which might in turn be a huge text file. Or an attachment to an email.
Instead of unarchiving the content, one might use `Lazy` as
```elixir
defmodule Response do
@moduledoc false
use Estructura, access: :lazy
def extract(file), do: {:ok, ZipHelper.unzip(file)}
defstruct __lazy_data__: nil,
file: Estructura.Lazy.new(&Response.extract/1)
end
response = %Response{__lazy_data__: zipped_content}
# immediate response
response |> get_in([:file])
# unzip and return
{unzipped, struct_with_cached_value} = response |> pop_in([:file])
# unzip and return the value, alter the struct with it
```
See `Estructura.Lazy` for details and options, see `Estructura.LazyMap` for
the implementation of lazy map.
"""
use Boundary
@doc false
defmacro __using__(opts) do
quote do
@__estructura__ struct!(Estructura.Config, unquote(opts))
@before_compile {Estructura.Hooks, :inject_estructura}
if @__estructura__.access == :lazy and
is_nil(Enum.find(Module.get_attribute(__MODULE__, :derive), &match?({Inspect, _}, &1))) do
@derive {Inspect, except: [:__lazy_data__]}
end
end
end
end
|
lib/estructura.ex
| 0.930142
| 0.920576
|
estructura.ex
|
starcoder
|
defmodule Cat.Either do
@moduledoc """
Either `left` or `right`.
Implements protocols:
* `Functor`
* `Applicative`
* `Monad`
"""
alias Cat.Maybe
defmodule Left do
@enforce_keys [:v]
defstruct [:v]
end
defmodule Right do
@enforce_keys [:v]
defstruct [:v]
end
@type left(a) :: %Left{v: a}
@type right(a) :: %Right{v: a}
@type t(l, r) :: left(l) | right(r)
@spec left(l) :: t(l, none) when l: var
def left(l), do: %Left{v: l}
@spec left?(t(any, any)) :: boolean
def left?(%Left{}), do: true
def left?(_), do: false
@spec maybe_left(t(l, any)) :: Maybe.t(l) when l: var
def maybe_left(%Left{v: l}), do: l
def maybe_left(%Right{}), do: nil
@spec right(r) :: t(none, r) when r: var
def right(r), do: %Right{v: r}
@spec right?(t(any, any)) :: boolean
def right?(%Right{}), do: true
def right?(_), do: false
@spec maybe_right(t(any, r)) :: Maybe.t(r) when r: var
def maybe_right(%Right{v: r}), do: r
def maybe_right(%Left{}), do: nil
@spec fold(t(l, r), (l -> a), (r -> a)) :: a when l: var, r: var, a: var
def fold(%Left{v: l}, case_left, _), do: case_left.(l)
def fold(%Right{v: r}, _, case_right), do: case_right.(r)
@spec swap(t(l, r)) :: t(l, r) when l: var, r: var
def swap(%Left{v: l}), do: %Right{v: l}
def swap(%Right{v: r}), do: %Left{v: r}
@spec sample() :: t(:sample, none)
def sample(), do: %Left{v: :sample}
end
alias Cat.Either
alias Cat.Either.{Left, Right}
defimpl Cat.Functor, for: [Either, Left, Right] do
@type t(r) :: Either.t(any, r)
@spec map(t(a), (a -> b)) :: t(b) when a: var, b: var
def map(%Right{v: a}, f), do: %Right{v: f.(a)}
def map(either, _), do: either
@spec as(t(any), a) :: t(a) when a: var
defdelegate as(t, a), to: Cat.Functor.Default
end
defimpl Cat.Applicative, for: [Either, Left, Right] do
@type t(r) :: Either.t(any, r)
@spec pure(t(any), a) :: t(a) when a: var
def pure(_, a), do: %Right{v: a}
@spec ap(t((a -> b)), t(a)) :: t(b) when a: var, b: var
def ap(%Right{v: f}, %Right{v: a}), do: %Right{v: f.(a)}
def ap(%Right{}, l=%Left{}), do: l
def ap(l, _), do: l
@spec product(t(a), t(b)) :: t({a, b}) when a: var, b: var
defdelegate product(ta, tb), to: Cat.Applicative.Default
@spec product_l(t(a), t(any)) :: t(a) when a: var
defdelegate product_l(ta, tb), to: Cat.Applicative.Default
@spec product_r(t(any), t(b)) :: t(b) when b: var
defdelegate product_r(ta, tb), to: Cat.Applicative.Default
@spec map2(t(a), t(b), (a, b -> c)) :: t(c) when a: var, b: var, c: var
defdelegate map2(ta, tb, f), to: Cat.Applicative.Default
end
defimpl Cat.Monad, for: [Either, Left, Right] do
@type t(r) :: Either.t(any, r)
@spec flat_map(t(a), (a -> t(b))) :: t(b) when a: var, b: var
def flat_map(%Right{v: a}, f), do: f.(a)
def flat_map(l=%Left{}, _), do: l
@spec flat_tap(t(a), (a -> t(no_return))) :: t(a) when a: var
defdelegate flat_tap(ta, f), to: Cat.Monad.Default
@spec flatten(t(t(a))) :: t(a) when a: var
defdelegate flatten(tta), to: Cat.Monad.Default
end
defimpl Cat.MonadError, for: [Either, Left, Right] do
@type t(r) :: Either.t(any, r)
@spec raise(t(any), any) :: t(none)
def raise(_, error), do: %Left{v: error}
@spec recover(t(a), (any -> t(a))) :: t(a) when a: var
def recover(%Left{v: error}, f), do: f.(error)
def recover(right, _), do: right
@spec on_error(t(a), (error -> t(no_return))) :: t(a) when a: var, error: any
defdelegate on_error(ta, f), to: Cat.MonadError.Default
@spec lift_ok_or_error(t(any), Cat.MonadError.ok_or_error(a)) :: t(a) when a: var
def lift_ok_or_error(_, {:ok, a}), do: %Right{v: a}
def lift_ok_or_error(_, {:error, e}), do: %Left{v: e}
@spec attempt(t(a)) :: t(Cat.MonadError.ok_or_error(a)) when a: var
defdelegate attempt(ta), to: Cat.MonadError.Default
end
|
lib/cat/data/either.ex
| 0.881066
| 0.599602
|
either.ex
|
starcoder
|
defmodule ContentSecurityPolicy do
@moduledoc """
Provides functions for interacting with Content Security Policies.
A Content Security Policy is a header which determines which assets the
browser is allowed to retrieve.
See https://developer.mozilla.org/en-US/docs/Web/HTTP/CSP for more in depth
documentation.
"""
alias ContentSecurityPolicy.Directive
alias ContentSecurityPolicy.Policy
@doc """
Converts a `ContentSecurityPolicy.Policy` struct to a valid content security
policy string.
## Examples
iex> policy = %ContentSecurityPolicy.Policy{default_src: ["'self'"]}
iex> ContentSecurityPolicy.serialize(policy)
"default-src 'self';"
"""
def serialize(%Policy{} = csp) do
csp
|> Map.from_struct()
|> filter_empty_sources
|> stringify_and_hyphenate_directives
|> join_sources_with_spaces
|> format_each_directive
|> join_directives_with_spaces
end
defp filter_empty_sources(policy) do
Enum.reject(policy, fn {_directive, source} -> is_empty(source) end)
end
defp is_empty(nil), do: true
defp is_empty([]), do: true
defp is_empty(""), do: true
defp is_empty(_), do: false
defp stringify_and_hyphenate_directives(policy) do
Enum.map(policy, fn {directive, source} ->
updated_directive =
directive
|> to_string
|> String.replace("_", "-")
{updated_directive, source}
end)
end
defp join_sources_with_spaces(policy) do
Enum.map(policy, fn {directive, sources} -> {directive, Enum.join(sources, " ")} end)
end
defp format_each_directive(policy) do
Enum.map(policy, fn {directive, sources} -> "#{directive} #{sources};" end)
end
defp join_directives_with_spaces(directives) when is_list(directives) do
Enum.join(directives, " ")
end
@doc """
Adds a single source value to a directive on the given policy.
"""
@spec add_source_value(Policy.t(), Directive.valid_directive(), String.t()) ::
Policy.t()
def add_source_value(policy, directive, source_value) do
Directive.validate_directive!(directive)
current_source_values = Map.get(policy, directive) || []
new_source_values = current_source_values ++ [source_value]
|> Enum.uniq
Map.put(policy, directive, new_source_values)
end
@doc """
Generates a random base 64 encoded string for use in Content Security Policy
nonces.
"""
@spec generate_nonce(bytes :: pos_integer()) :: String.t()
def generate_nonce(bytes \\ 32) do
bytes
|> :crypto.strong_rand_bytes
|> Base.encode64(padding: false)
end
end
|
lib/content_security_policy.ex
| 0.92427
| 0.414188
|
content_security_policy.ex
|
starcoder
|
defmodule Regex do
@moduledoc ~S"""
Regular expressions for Elixir built on top of Erlang's `re` module.
As the `re` module, Regex is based on PCRE
(Perl Compatible Regular Expressions). More information can be
found in the [`re` documentation](http://www.erlang.org/doc/man/re.html).
Regular expressions in Elixir can be created using `Regex.compile!/2`
or using the special form with [`~r`](Kernel.html#sigil_r/2):
# A simple regular expressions that matches foo anywhere in the string
~r/foo/
# A regular expression with case insensitive and unicode options
~r/foo/iu
A Regex is represented internally as the `Regex` struct. Therefore,
`%Regex{}` can be used whenever there is a need to match on them.
## Modifiers
The modifiers available when creating a Regex are:
* `unicode` (u) - enables unicode specific patterns like `\p` and changes
modifiers like `\w`, `\W`, `\s` and friends to also match on unicode.
It expects valid unicode strings to be given on match
* `caseless` (i) - add case insensitivity
* `dotall` (s) - causes dot to match newlines and also set newline to
anycrlf; the new line setting can be overridden by setting `(*CR)` or
`(*LF)` or `(*CRLF)` or `(*ANY)` according to re documentation
* `multiline` (m) - causes `^` and `$` to mark the beginning and end of
each line; use `\A` and `\z` to match the end or beginning of the string
* `extended` (x) - whitespace characters are ignored except when escaped
and allow `#` to delimit comments
* `firstline` (f) - forces the unanchored pattern to match before or at the
first newline, though the matched text may continue over the newline
* `ungreedy` (U) - inverts the "greediness" of the regexp
(the previous `r` option is deprecated in favor of `U`)
The options not available are:
* `anchored` - not available, use `^` or `\A` instead
* `dollar_endonly` - not available, use `\z` instead
* `no_auto_capture` - not available, use `?:` instead
* `newline` - not available, use `(*CR)` or `(*LF)` or `(*CRLF)` or
`(*ANYCRLF)` or `(*ANY)` at the beginning of the regexp according to the
re documentation
## Captures
Many functions in this module allows what to capture in a regex
match via the `:capture` option. The supported values are:
* `:all` - all captured subpatterns including the complete matching string
(this is the default)
* `:first` - only the first captured subpattern, which is always the
complete matching part of the string; all explicitly captured subpatterns
are discarded
* `:all_but_first`- all but the first matching subpattern, i.e. all
explicitly captured subpatterns, but not the complete matching part of
the string
* `:none` - do not return matching subpatterns at all
* `:all_names` - captures all names in the Regex
* `list(binary)` - a list of named captures to capture
"""
defstruct re_pattern: nil, source: "", opts: ""
@type t :: %__MODULE__{re_pattern: term, source: binary, opts: binary}
defmodule CompileError do
defexception message: "regex could not be compiled"
end
@doc """
Compiles the regular expression.
The given options can either be a binary with the characters
representing the same regex options given to the `~r` sigil,
or a list of options, as expected by the [Erlang `re` docs](http://www.erlang.org/doc/man/re.html).
It returns `{:ok, regex}` in case of success,
`{:error, reason}` otherwise.
## Examples
iex> Regex.compile("foo")
{:ok, ~r"foo"}
iex> Regex.compile("*foo")
{:error, {'nothing to repeat', 0}}
"""
@spec compile(binary, binary | [term]) :: {:ok, t} | {:error, any}
def compile(source, options \\ "")
def compile(source, options) when is_binary(options) do
case translate_options(options, []) do
{:error, rest} ->
{:error, {:invalid_option, rest}}
translated_options ->
compile(source, translated_options, options)
end
end
def compile(source, options) when is_list(options) do
compile(source, options, "")
end
defp compile(source, opts, doc_opts) when is_binary(source) do
case :re.compile(source, opts) do
{:ok, re_pattern} ->
{:ok, %Regex{re_pattern: re_pattern, source: source, opts: doc_opts}}
error ->
error
end
end
@doc """
Compiles the regular expression according to the given options.
Fails with `Regex.CompileError` if the regex cannot be compiled.
"""
@spec compile(binary, binary | [term]) :: t
def compile!(source, options \\ "") do
case compile(source, options) do
{:ok, regex} -> regex
{:error, {reason, at}} -> raise Regex.CompileError, message: "#{reason} at position #{at}"
end
end
@doc """
Returns a boolean indicating whether there was a match or not.
## Examples
iex> Regex.match?(~r/foo/, "foo")
true
iex> Regex.match?(~r/foo/, "bar")
false
"""
@spec match?(t, String.t) :: boolean
def match?(%Regex{re_pattern: compiled}, string) when is_binary(string) do
:re.run(string, compiled, [{:capture, :none}]) == :match
end
@doc """
Returns true if the given argument is a regex.
## Examples
iex> Regex.regex?(~r/foo/)
true
iex> Regex.regex?(0)
false
"""
@spec regex?(t) :: true
@spec regex?(any) :: false
def regex?(%Regex{}), do: true
def regex?(_), do: false
@doc """
Runs the regular expression against the given string until the first match.
It returns a list with all captures or `nil` if no match occurred.
## Options
* `:return` - set to `:index` to return indexes. Defaults to `:binary`.
* `:capture` - what to capture in the result. Check the moduledoc for `Regex`
to see the possible capture values.
## Examples
iex> Regex.run(~r/c(d)/, "abcd")
["cd", "d"]
iex> Regex.run(~r/e/, "abcd")
nil
iex> Regex.run(~r/c(d)/, "abcd", return: :index)
[{2,2},{3,1}]
"""
@spec run(t, binary, [term]) :: nil | [binary] | [{integer, integer}]
def run(regex, string, options \\ [])
def run(%Regex{re_pattern: compiled}, string, options) when is_binary(string) do
return = Keyword.get(options, :return, :binary)
captures = Keyword.get(options, :capture, :all)
case :re.run(string, compiled, [{:capture, captures, return}]) do
:nomatch -> nil
:match -> []
{:match, results} -> results
end
end
@doc """
Returns the given captures as a map or `nil` if no captures are
found. The option `:return` can be set to `:index` to get indexes
back.
## Examples
iex> Regex.named_captures(~r/c(?<foo>d)/, "abcd")
%{"foo" => "d"}
iex> Regex.named_captures(~r/a(?<foo>b)c(?<bar>d)/, "abcd")
%{"bar" => "d", "foo" => "b"}
iex> Regex.named_captures(~r/a(?<foo>b)c(?<bar>d)/, "efgh")
nil
"""
@spec named_captures(t, String.t, [term]) :: map | nil
def named_captures(regex, string, options \\ []) when is_binary(string) do
names = names(regex)
options = Keyword.put(options, :capture, names)
results = run(regex, string, options)
if results, do: Enum.zip(names, results) |> Enum.into(%{})
end
@doc """
Returns the underlying `re_pattern` in the regular expression.
"""
@spec re_pattern(t) :: term
def re_pattern(%Regex{re_pattern: compiled}) do
compiled
end
@doc """
Returns the regex source as a binary.
## Examples
iex> Regex.source(~r(foo))
"foo"
"""
@spec source(t) :: String.t
def source(%Regex{source: source}) do
source
end
@doc """
Returns the regex options as a string.
## Examples
iex> Regex.opts(~r(foo)m)
"m"
"""
@spec opts(t) :: String.t
def opts(%Regex{opts: opts}) do
opts
end
@doc """
Returns a list of names in the regex.
## Examples
iex> Regex.names(~r/(?<foo>bar)/)
["foo"]
"""
@spec names(t) :: [String.t]
def names(%Regex{re_pattern: re_pattern}) do
{:namelist, names} = :re.inspect(re_pattern, :namelist)
names
end
@doc """
Same as `run/3`, but scans the target several times collecting all
matches of the regular expression. A list of lists is returned,
where each entry in the primary list represents a match and each
entry in the secondary list represents the captured contents.
## Options
* `:return` - set to `:index` to return indexes. Defaults to `:binary`.
* `:capture` - what to capture in the result. Check the moduledoc for `Regex`
to see the possible capture values.
## Examples
iex> Regex.scan(~r/c(d|e)/, "abcd abce")
[["cd", "d"], ["ce", "e"]]
iex> Regex.scan(~r/c(?:d|e)/, "abcd abce")
[["cd"], ["ce"]]
iex> Regex.scan(~r/e/, "abcd")
[]
"""
@spec scan(t, String.t, [term]) :: [[String.t]]
def scan(regex, string, options \\ [])
def scan(%Regex{re_pattern: compiled}, string, options) when is_binary(string) do
return = Keyword.get(options, :return, :binary)
captures = Keyword.get(options, :capture, :all)
options = [{:capture, captures, return}, :global]
case :re.run(string, compiled, options) do
:match -> []
:nomatch -> []
{:match, results} -> results
end
end
@doc """
Splits the given target into the number of parts specified.
## Options
* `:parts` - when specified, splits the string into the given number of
parts. If not specified, `:parts` defaults to `:infinity`, which will
split the string into the maximum number of parts possible based on the
given pattern.
* `:trim` - when true, remove blank strings from the result.
* `:on` - specifies which captures and order to split the string
on. Check the moduledoc for `Regex` to see the possible capture
values. Defaults to `:first` which means captures inside the
Regex does not affect the split result.
## Examples
iex> Regex.split(~r/-/, "a-b-c")
["a","b","c"]
iex> Regex.split(~r/-/, "a-b-c", [parts: 2])
["a","b-c"]
iex> Regex.split(~r/-/, "abc")
["abc"]
iex> Regex.split(~r//, "abc")
["a", "b", "c", ""]
iex> Regex.split(~r/a(?<second>b)c/, "abc")
["", ""]
iex> Regex.split(~r/a(?<second>b)c/, "abc", on: [:second])
["a", "c"]
"""
@spec split(t, String.t, [term]) :: [String.t]
def split(regex, string, options \\ [])
def split(%Regex{}, "", opts) do
if Keyword.get(opts, :trim, false) do
[]
else
[""]
end
end
def split(%Regex{re_pattern: compiled}, string, opts) when is_binary(string) do
on = Keyword.get(opts, :on, :first)
case :re.run(string, compiled, [:global, capture: on]) do
{:match, matches} ->
do_split(matches, string, 0,
parts_to_index(Keyword.get(opts, :parts, :infinity)),
Keyword.get(opts, :trim, false))
:match ->
[string]
:nomatch ->
[string]
end
end
defp parts_to_index(:infinity), do: 0
defp parts_to_index(n) when is_integer(n) and n > 0, do: n
defp do_split(_, string, offset, _counter, true) when byte_size(string) <= offset,
do: []
defp do_split(_, string, offset, 1, _trim),
do: [binary_part(string, offset, byte_size(string) - offset)]
defp do_split([], string, offset, _counter, _trim),
do: [binary_part(string, offset, byte_size(string) - offset)]
defp do_split([[{pos, _}|h]|t], string, offset, counter, trim) when pos - offset < 0,
do: do_split([h|t], string, offset, counter, trim)
defp do_split([[]|t], string, offset, counter, trim),
do: do_split(t, string, offset, counter, trim)
defp do_split([[{pos, length}|h]|t], string, offset, counter, trim) do
new_offset = pos + length
keep = pos - offset
if keep == 0 and (length == 0 or trim) do
do_split([h|t], string, new_offset, counter, trim)
else
<<_::binary-size(offset), part::binary-size(keep), _::binary>> = string
[part|do_split([h|t], string, new_offset, counter - 1, trim)]
end
end
@doc ~S"""
Receives a regex, a binary and a replacement, returns a new
binary where the all matches are replaced by replacement.
The replacement can be either a string or a function. The string
is used as a replacement for every match and it allows specific
captures to be accessed via `\N` or `\g{N}`, where `N` is the
capture. In case `\0` is used, the whole match is inserted.
When the replacement is a function, the function may have arity
N where each argument maps to a capture, with the first argument
being the whole match. If the function expects more arguments
than captures found, the remaining arguments will receive `""`.
## Options
* `:global` - when `false`, replaces only the first occurrence
(defaults to true)
## Examples
iex> Regex.replace(~r/d/, "abc", "d")
"abc"
iex> Regex.replace(~r/b/, "abc", "d")
"adc"
iex> Regex.replace(~r/b/, "abc", "[\\0]")
"a[b]c"
iex> Regex.replace(~r/a(b|d)c/, "abcadc", "[\\1]")
"[b][d]"
iex> Regex.replace(~r/a(b|d)c/, "abcadc", fn _, x -> "[#{x}]" end)
"[b][d]"
"""
@spec replace(t, String.t, String.t | (... -> String.t), [term]) :: String.t
def replace(regex, string, replacement, options \\ [])
def replace(regex, string, replacement, options) when is_binary(replacement) do
do_replace(regex, string, precompile_replacement(replacement), options)
end
def replace(regex, string, replacement, options) when is_function(replacement) do
{:arity, arity} = :erlang.fun_info(replacement, :arity)
do_replace(regex, string, {replacement, arity}, options)
end
defp do_replace(%Regex{re_pattern: compiled}, string, replacement, options) do
opts = if Keyword.get(options, :global) != false, do: [:global], else: []
opts = [{:capture, :all, :index}|opts]
case :re.run(string, compiled, opts) do
:nomatch ->
string
{:match, [mlist|t]} when is_list(mlist) ->
apply_list(string, replacement, [mlist|t]) |> IO.iodata_to_binary
{:match, slist} ->
apply_list(string, replacement, [slist]) |> IO.iodata_to_binary
end
end
defp precompile_replacement(""),
do: []
defp precompile_replacement(<<?\\, ?g, ?{, rest :: binary>>) when byte_size(rest) > 0 do
{ns, <<?}, rest :: binary>>} = pick_int(rest)
[List.to_integer(ns) | precompile_replacement(rest)]
end
defp precompile_replacement(<<?\\, ?\\, rest :: binary>>) do
[<<?\\>> | precompile_replacement(rest)]
end
defp precompile_replacement(<<?\\, x, rest :: binary>>) when x in ?0..?9 do
{ns, rest} = pick_int(rest)
[List.to_integer([x|ns]) | precompile_replacement(rest)]
end
defp precompile_replacement(<<x, rest :: binary>>) do
case precompile_replacement(rest) do
[head | t] when is_binary(head) ->
[<<x, head :: binary>> | t]
other ->
[<<x>> | other]
end
end
defp pick_int(<<x, rest :: binary>>) when x in ?0..?9 do
{found, rest} = pick_int(rest)
{[x|found], rest}
end
defp pick_int(bin) do
{[], bin}
end
defp apply_list(string, replacement, list) do
apply_list(string, string, 0, replacement, list)
end
defp apply_list(_, "", _, _, []) do
[]
end
defp apply_list(_, string, _, _, []) do
string
end
defp apply_list(whole, string, pos, replacement, [[{mpos, _} | _] | _] = list) when mpos > pos do
length = mpos - pos
<<untouched :: binary-size(length), rest :: binary>> = string
[untouched | apply_list(whole, rest, mpos, replacement, list)]
end
defp apply_list(whole, string, pos, replacement, [[{pos, length} | _] = head | tail]) do
<<_ :: size(length)-binary, rest :: binary>> = string
new_data = apply_replace(whole, replacement, head)
[new_data | apply_list(whole, rest, pos + length, replacement, tail)]
end
defp apply_replace(string, {fun, arity}, indexes) do
apply(fun, get_indexes(string, indexes, arity))
end
defp apply_replace(_, [bin], _) when is_binary(bin) do
bin
end
defp apply_replace(string, repl, indexes) do
indexes = List.to_tuple(indexes)
for part <- repl do
cond do
is_binary(part) ->
part
part >= tuple_size(indexes) ->
""
true ->
get_index(string, elem(indexes, part))
end
end
end
defp get_index(_string, {pos, _len}) when pos < 0 do
""
end
defp get_index(string, {pos, len}) do
<<_ :: size(pos)-binary, res :: size(len)-binary, _ :: binary>> = string
res
end
defp get_indexes(_string, _, 0) do
[]
end
defp get_indexes(string, [], arity) do
[""|get_indexes(string, [], arity - 1)]
end
defp get_indexes(string, [h|t], arity) do
[get_index(string, h)|get_indexes(string, t, arity - 1)]
end
{:ok, pattern} = :re.compile(~S"[.^$*+?()[{\\\|\s#]", [:unicode])
@escape_pattern pattern
@doc ~S"""
Escapes a string to be literally matched in a regex.
## Examples
iex> Regex.escape(".")
"\\."
iex> Regex.escape("\\what if")
"\\\\what\\ if"
"""
@spec escape(String.t) :: String.t
def escape(string) when is_binary(string) do
:re.replace(string, @escape_pattern, "\\\\&", [:global, {:return, :binary}])
end
# Helpers
@doc false
# Unescape map function used by Macro.unescape_string.
def unescape_map(?f), do: ?\f
def unescape_map(?n), do: ?\n
def unescape_map(?r), do: ?\r
def unescape_map(?t), do: ?\t
def unescape_map(?v), do: ?\v
def unescape_map(?a), do: ?\a
def unescape_map(_), do: false
# Private Helpers
defp translate_options(<<?u, t :: binary>>, acc), do: translate_options(t, [:unicode, :ucp|acc])
defp translate_options(<<?i, t :: binary>>, acc), do: translate_options(t, [:caseless|acc])
defp translate_options(<<?x, t :: binary>>, acc), do: translate_options(t, [:extended|acc])
defp translate_options(<<?f, t :: binary>>, acc), do: translate_options(t, [:firstline|acc])
defp translate_options(<<?U, t :: binary>>, acc), do: translate_options(t, [:ungreedy|acc])
defp translate_options(<<?s, t :: binary>>, acc), do: translate_options(t, [:dotall, {:newline, :anycrlf}|acc])
defp translate_options(<<?m, t :: binary>>, acc), do: translate_options(t, [:multiline|acc])
# TODO: Deprecate by 1.2
# TODO: Remove by 1.3
defp translate_options(<<?r, t :: binary>>, acc), do: translate_options(t, [:ungreedy|acc])
defp translate_options(<<>>, acc), do: acc
defp translate_options(rest, _acc), do: {:error, rest}
end
|
lib/elixir/lib/regex.ex
| 0.92792
| 0.766206
|
regex.ex
|
starcoder
|
defmodule Opencensus.Plug.Trace do
@moduledoc """
Template method for creating `Plug` to trace your `Plug` requests.
## Usage
1. Create your own `Plug` module:
```elixir
defmodule MyApp.TracingPlug do
use Opencensus.Plug.Trace
end
```
2. Add it to your pipeline, ex. for Phoenix:
```elixir
defmodule MyAppWeb.Endpoint do
use Phoenix.Endpoint, otp_app: :my_app
plug MyApp.TracingPlug
end
```
## Configuration
This module creates 2 callback modules, which allows you to configure your
span and also provides a way to add custom attributes assigned to span.
- `c:span_name/1` - defaults to request path
- `c:span_status/1` - defaults to mapping of reponse code to OpenCensus span
value, see `:opencensus.http_status_to_trace_status/1`.
And also you can use `attributes` argument in `use` which must be either list
of attributes which are names of 1-argument functions in current module that
must return string value of the attribute, or map/keyword list of one of:
- `atom` - which is name of the called function
- `{module, function}` - which will call `apply(module, function, [conn])`
- `{module, function, args}` - which will prepend `conn` to the given arguments
and call `apply(module, function, [conn | args])`
Finally, you can configure what propagation format to use for tracing by setting
a `propagation_format` argument in `use` that specifies if you are using `:b3` or
`:tracecontext`. If none is given it defaults to `tracecontext`.
Example:
```elixir
defmodule MyAppWeb.TraceWithCustomAttribute do
use Opencensus.Plug.Trace, attributes: [:method], propagation_format: :b3
def method(conn), do: conn.method
end
```
"""
@enforce_keys [:span_name, :tags, :conn_fields]
defstruct @enforce_keys
@doc """
Return name for current span. By defaut returns `"plug"`
"""
@callback span_name(Plug.Conn.t(), options :: term()) :: String.t()
@doc """
Return tuple containing span status and message. By default return value
status assigned by [default mapping](https://opencensus.io/tracing/span/status/)
and empty message.
"""
@callback span_status(Plug.Conn.t(), options :: term()) :: {integer(), String.t()}
defmacro __using__(opts) do
attributes = Keyword.get(opts, :attributes, [])
propagation_format = Keyword.get(opts, :propagation_format, :tracecontext)
quote do
@behaviour Plug
@behaviour unquote(__MODULE__)
def init(opts), do: opts
def call(conn, opts) do
parent_span_ctx =
case unquote(propagation_format) do
:tracecontext -> :oc_propagation_http_tracecontext.from_headers(conn.req_headers)
:b3 -> :oc_propagation_http_b3.from_headers(conn.req_headers)
end
:ocp.with_span_ctx(parent_span_ctx)
user_agent =
conn
|> Plug.Conn.get_req_header("user-agent")
|> List.first()
default_attributes = %{
"http.host" => conn.host,
"http.method" => conn.method,
"http.path" => conn.request_path,
"http.user_agent" => user_agent,
"http.url" => Plug.Conn.request_url(conn)
# TODO: How do we get this?
# "http.route" => ""
}
attributes = Opencensus.Plug.get_tags(conn, __MODULE__, unquote(attributes))
:ocp.with_child_span(span_name(conn, opts), Map.merge(default_attributes, attributes))
span_ctx = :ocp.current_span_ctx()
:ok = unquote(__MODULE__).set_logger_metadata(span_ctx)
conn
|> Plug.Conn.put_private(:opencensus_span_ctx, span_ctx)
|> unquote(__MODULE__).put_ctx_resp_header(span_ctx, unquote(propagation_format))
|> Plug.Conn.register_before_send(fn conn ->
{status, msg} = span_status(conn, opts)
:oc_trace.put_attribute("http.status_code", Integer.to_string(conn.status), span_ctx)
:oc_trace.put_attribute("span.kind", "SERVER", span_ctx)
:oc_trace.set_status(status, msg, span_ctx)
:oc_trace.set_kind(:opencensus.span_kind_server(), span_ctx)
:oc_trace.finish_span(span_ctx)
:ocp.with_span_ctx(parent_span_ctx)
conn
end)
end
def span_name(conn, _opts), do: conn.request_path
def span_status(conn, _opts),
do: {:opencensus.http_status_to_trace_status(conn.status), ""}
defoverridable span_name: 2, span_status: 2
end
end
## PRIVATE
require Record
Record.defrecordp(
:ctx,
Record.extract(:span_ctx, from_lib: "opencensus/include/opencensus.hrl")
)
@doc false
def set_logger_metadata(span) do
trace_id = List.to_string(:io_lib.format("~.16b", [ctx(span, :trace_id)]))
span_id = List.to_string(:io_lib.format("~16.16.0b", [ctx(span, :span_id)]))
Logger.metadata(
trace_id: trace_id,
span_id: span_id,
trace_options: ctx(span, :trace_options)
)
:ok
end
@doc false
def put_ctx_resp_header(conn, span_ctx, :tracecontext) do
headers =
for {k, v} <- :oc_propagation_http_tracecontext.to_headers(span_ctx) do
{String.downcase(k), List.to_string(v)}
end
Plug.Conn.prepend_resp_headers(conn, headers)
end
def put_ctx_resp_header(conn, span_ctx, :b3) do
headers =
for {k, v} <- :oc_propagation_http_b3.to_headers(span_ctx) do
cond do
is_list(v) -> {String.downcase(k), List.to_string(v)}
true -> {String.downcase(k), v}
end
end
Plug.Conn.prepend_resp_headers(conn, headers)
end
end
|
lib/opencensus/plug/trace.ex
| 0.807271
| 0.810854
|
trace.ex
|
starcoder
|
defmodule RDF.XSD.Datatype.Test.Case do
use ExUnit.CaseTemplate
alias RDF.XSD
using(opts) do
datatype = Keyword.fetch!(opts, :datatype)
datatype_name = Keyword.fetch!(opts, :name)
datatype_iri =
Keyword.get(opts, :iri, RDF.NS.XSD.__base_iri__ <> datatype_name)
valid = Keyword.get(opts, :valid)
invalid = Keyword.get(opts, :invalid)
primitive = Keyword.get(opts, :primitive)
base = unless primitive, do: Keyword.fetch!(opts, :base)
base_primitive = unless primitive, do: Keyword.fetch!(opts, :base_primitive)
applicable_facets = Keyword.get(opts, :applicable_facets, [])
facets = Keyword.get(opts, :facets)
quote do
alias RDF.XSD
alias RDF.XSD.Datatype
alias RDF.TestDatatypes.{Age, DecimalUnitInterval, DoubleUnitInterval, FloatUnitInterval}
alias unquote(datatype)
import unquote(__MODULE__)
doctest unquote(datatype)
@moduletag datatype: unquote(datatype)
if unquote(valid) do
@valid unquote(valid)
@invalid unquote(invalid)
test "registration" do
assert unquote(datatype) in RDF.Literal.Datatype.Registry.builtin_datatypes()
assert unquote(datatype) in RDF.Literal.Datatype.Registry.builtin_xsd_datatypes()
assert unquote(datatype) |> RDF.Literal.Datatype.Registry.builtin_datatype?()
assert unquote(datatype) |> RDF.Literal.Datatype.Registry.builtin_xsd_datatype?()
assert RDF.Literal.Datatype.get(unquote(datatype_iri)) == unquote(datatype)
assert XSD.Datatype.get(unquote(datatype_iri)) == unquote(datatype)
end
test "primitive/0" do
assert unquote(datatype).primitive?() == unquote(!!primitive)
end
test "base/0" do
if unquote(primitive) do
assert unquote(datatype).base == nil
else
assert unquote(datatype).base == unquote(base)
end
end
test "base_primitive/0" do
if unquote(primitive) do
assert unquote(datatype).base_primitive == unquote(datatype)
else
assert unquote(datatype).base_primitive == unquote(base_primitive)
end
end
test "derived_from?/1" do
assert unquote(datatype).derived_from?(unquote(datatype)) == false
unless unquote(primitive) do
assert unquote(datatype).derived_from?(unquote(base)) == true
assert unquote(datatype).derived_from?(unquote(base_primitive)) == true
end
end
describe "datatype?/1" do
test "with itself" do
assert unquote(datatype).datatype?(unquote(datatype)) == true
end
test "with non-RDF values" do
assert unquote(datatype).datatype?(self()) == false
assert unquote(datatype).datatype?(Elixir.Enum) == false
assert unquote(datatype).datatype?(:foo) == false
end
unless unquote(primitive) do
test "on a base datatype" do
# We're using apply here to suppress "nil.datatype?/1 is undefined" warnings caused by the primitives
assert apply(unquote(base), :datatype?, [unquote(datatype)]) == true
assert apply(unquote(base_primitive), :datatype?, [unquote(datatype)]) == true
end
end
end
test "applicable_facets/0" do
assert MapSet.new(unquote(datatype).applicable_facets()) ==
MapSet.new(unquote(applicable_facets))
end
if unquote(facets) do
test "facets" do
Enum.each(unquote(facets), fn {facet, value} ->
assert apply(unquote(datatype), facet, []) == value
end)
end
end
test "name/0" do
assert unquote(datatype).name() == unquote(datatype_name)
end
test "id/0" do
assert unquote(datatype).id() == RDF.iri(unquote(datatype_iri))
end
describe "general datatype?/1" do
test "on the exact same datatype" do
assert (unquote(datatype).datatype?(unquote(datatype))) == true
Enum.each(@valid, fn {input, _} ->
literal = unquote(datatype).new(input)
assert (unquote(datatype).datatype?(literal)) == true
assert (unquote(datatype).datatype?(literal.literal)) == true
end)
end
unless unquote(primitive) do
test "on the base datatype" do
assert (unquote(base).datatype?(unquote(datatype))) == true
Enum.each(@valid, fn {input, _} ->
literal = unquote(datatype).new(input)
assert (unquote(base).datatype?(literal)) == true
assert (unquote(base).datatype?(literal.literal)) == true
end)
end
test "on the base primitive datatype" do
assert (unquote(base_primitive).datatype?(unquote(datatype))) == true
Enum.each(@valid, fn {input, _} ->
literal = unquote(datatype).new(input)
assert (unquote(base_primitive).datatype?(literal)) == true
assert (unquote(base_primitive).datatype?(literal.literal)) == true
end)
end
end
end
test "datatype_id/1" do
Enum.each(@valid, fn {input, _} ->
assert (unquote(datatype).new(input) |> unquote(datatype).datatype_id()) == RDF.iri(unquote(datatype_iri))
end)
end
test "language/1" do
Enum.each(@valid, fn {input, _} ->
assert (unquote(datatype).new(input) |> unquote(datatype).language()) == nil
end)
end
describe "general new" do
Enum.each(@valid, fn {input, {value, lexical, _}} ->
expected = %RDF.Literal{
literal: %unquote(datatype){value: value, uncanonical_lexical: lexical}
}
@tag example: %{input: input, output: expected}
test "valid: #{unquote(datatype)}.new(#{inspect(input)})", %{example: example} do
assert unquote(datatype).new(example.input) == example.output
end
end)
Enum.each(@invalid, fn value ->
expected = %RDF.Literal{
literal: %unquote(datatype){
uncanonical_lexical: unquote(datatype).init_invalid_lexical(value, [])
}
}
@tag example: %{input: value, output: expected}
test "invalid: #{unquote(datatype)}.new(#{inspect(value)})",
%{example: example} do
assert unquote(datatype).new(example.input) == example.output
end
end)
test "canonicalize option" do
Enum.each(@valid, fn {input, _} ->
assert unquote(datatype).new(input, canonicalize: true) ==
unquote(datatype).new(input) |> unquote(datatype).canonical()
end)
Enum.each(@invalid, fn input ->
assert unquote(datatype).new(input, canonicalize: true) ==
unquote(datatype).new(input) |> unquote(datatype).canonical()
end)
end
end
describe "general new!" do
test "with valid values, it behaves the same as new" do
Enum.each(@valid, fn {input, _} ->
assert unquote(datatype).new!(input) == unquote(datatype).new(input)
assert unquote(datatype).new!(input) ==
unquote(datatype).new(input)
assert unquote(datatype).new!(input, canonicalize: true) ==
unquote(datatype).new(input, canonicalize: true)
end)
end
test "with invalid values, it raises an error" do
Enum.each(@invalid, fn value ->
assert_raise ArgumentError, fn -> unquote(datatype).new!(value) end
assert_raise ArgumentError, fn ->
unquote(datatype).new!(value, canonicalize: true)
end
end)
end
end
describe "general value" do
Enum.each(@valid, fn {input, {value, _, canonicalized}} ->
@tag example: %{input: input, value: value}
test "of valid #{unquote(datatype)}.new(#{inspect(input)})",
%{example: example} do
assert unquote(datatype).new(example.input) |> unquote(datatype).value() ==
example.value
end
end)
Enum.each(@invalid, fn value ->
@tag example: %{input: value, value: value}
test "of invalid #{unquote(datatype)}.new(#{inspect(value)})", %{example: example} do
assert unquote(datatype).new(example.input) |> unquote(datatype).value() == nil
end
end)
end
describe "general lexical" do
Enum.each(@valid, fn {input, {_, lexical, canonicalized}} ->
lexical = lexical || canonicalized
@tag example: %{input: input, lexical: lexical}
test "of valid #{unquote(datatype)}.new(#{inspect(input)})",
%{example: example} do
assert unquote(datatype).new(example.input) |> unquote(datatype).lexical() ==
example.lexical
end
end)
Enum.each(@invalid, fn value ->
lexical = unquote(datatype).init_invalid_lexical(value, [])
@tag example: %{input: value, lexical: lexical}
test "of invalid #{unquote(datatype)}.new(#{inspect(value)}) == #{inspect(lexical)}",
%{example: example} do
assert unquote(datatype).new(example.input) |> unquote(datatype).lexical() ==
example.lexical
end
end)
end
describe "general canonicalization" do
Enum.each(@valid, fn {input, {value, _, _}} ->
expected = %RDF.Literal{literal: %unquote(datatype){value: value}}
@tag example: %{input: input, output: expected}
test "#{unquote(datatype)} #{inspect(input)}", %{example: example} do
assert unquote(datatype).new(example.input) |> unquote(datatype).canonical() ==
example.output
end
end)
Enum.each(@valid, fn {input, {_, _, canonicalized}} ->
@tag example: %{input: input, canonicalized: canonicalized}
test "lexical of canonicalized #{unquote(datatype)} #{inspect(input, limit: 4)} is #{
inspect(canonicalized, limit: 4)
}",
%{example: example} do
assert unquote(datatype).new(example.input)
|> unquote(datatype).canonical()
|> unquote(datatype).lexical() ==
example.canonicalized
end
end)
Enum.each(@valid, fn {input, {_, _, canonicalized}} ->
@tag example: %{input: input, canonicalized: canonicalized}
test "canonical? for #{unquote(datatype)} #{inspect(input)}", %{example: example} do
literal = unquote(datatype).new(example.input)
assert unquote(datatype).canonical?(literal) == (
unquote(datatype).lexical(literal) ==example.canonicalized
)
end
end)
test "does not change the XSD datatype value when it is invalid" do
Enum.each(@invalid, fn value ->
assert unquote(datatype).new(value) |> unquote(datatype).canonical() ==
unquote(datatype).new(value)
end)
end
test "canonical_lexical with valid literals" do
Enum.each(@valid, fn {input, {_, _, canonicalized}} ->
assert unquote(datatype).new(input) |> unquote(datatype).canonical_lexical() ==
canonicalized
end)
end
test "canonical_lexical with invalid literals" do
Enum.each(@invalid, fn value ->
assert unquote(datatype).new(value) |> unquote(datatype).canonical_lexical() ==
nil
end)
end
end
describe "general validation" do
Enum.each(Map.keys(@valid), fn value ->
@tag value: value
test "#{inspect(value)} as a #{unquote(datatype)} is valid", %{value: value} do
assert unquote(datatype).valid?(unquote(datatype).new(value))
end
end)
Enum.each(@invalid, fn value ->
@tag value: value
test "#{inspect(value)} as a #{unquote(datatype)} is invalid", %{value: value} do
refute unquote(datatype).valid?(unquote(datatype).new(value))
end
end)
end
end
test "String.Chars protocol implementation" do
Enum.each(@valid, fn {input, _} ->
assert unquote(datatype).new(input) |> to_string() ==
unquote(datatype).new(input) |> unquote(datatype).lexical()
end)
end
end
end
def dt(value) do
{:ok, date, _} = DateTime.from_iso8601(value)
date
end
end
|
test/support/xsd_datatype_case.ex
| 0.7586
| 0.721204
|
xsd_datatype_case.ex
|
starcoder
|
defmodule MapSets do
use Koans
@intro "My name is Set, MapSet."
@set MapSet.new([1, 2, 3, 4, 5])
koan "I am very similar to a list" do
assert Enum.fetch(@set, 0) == {:ok, 1}
end
koan "However, I do not allow duplication" do
new_set = MapSet.new([1, 1, 2, 3, 3, 3])
assert MapSet.size(new_set) == 3
end
def sorted?(set) do
list = MapSet.to_list(set)
sorted = Enum.sort(list)
list == sorted
end
koan "You cannot depend on my order" do
new_set = MapSet.new(1..33)
assert sorted?(new_set) == false
# Note: The number "33" is actually special here. Erlang uses a different
# implementation for maps after 32 elements which does not maintain order.
# http://stackoverflow.com/a/40408469
# What do you think this answer to this assertion is?
assert sorted?(@set) == true
end
koan "Does this value exist in the map set?" do
assert MapSet.member?(@set, 3) == true
end
koan "I am merely another collection, but you can perform some operations on me" do
new_set = MapSet.new(@set, fn x -> 3 * x end)
#MapSet<[3, 6, 9, 12, 15]
assert MapSet.member?(new_set, 15) == true
assert MapSet.member?(new_set, 1) == false
end
koan "Add this value into a map set" do
modified_set = MapSet.put(@set, 6)
assert MapSet.member?(modified_set, 6) == true
end
koan "Delete this value from the map set" do
modified_set = MapSet.delete(@set, 1)
assert MapSet.member?(modified_set, 1) == false
end
koan "How large is my map set?" do
assert MapSet.size(@set) == 5
end
koan "Are these maps twins?" do
new_set = MapSet.new([1, 2, 3])
assert MapSet.equal?(@set, new_set) == false
end
koan "I want only the common values in both sets" do
intersection_set = MapSet.intersection(@set, MapSet.new([5, 6, 7]))
#MapSet<[5]>
assert MapSet.member?(intersection_set, 5) == true
end
koan "Unify my sets" do
new_set = MapSet.union(@set, MapSet.new([1, 5, 6, 7]))
#MapSet<[1, 2, 3, 4, 5, 6, 7]>
assert MapSet.size(new_set) == 7
end
koan "I want my set in a list" do
assert MapSet.to_list(@set) == [1, 2, 3, 4, 5]
end
end
|
lib/koans/09_map_sets.ex
| 0.79538
| 0.489748
|
09_map_sets.ex
|
starcoder
|
defmodule Cadet.Test.Seeds do
@moduledoc """
This module contains functions that seed more complex setups into the DB for tests.
"""
import Cadet.Factory
@doc """
This sets up the common assessments environment by inserting relevant entries into the DB.
Returns a map of the following format:
%{
accounts: %{
avenger: avenger,
mentor: mentor,
group: group,
students: students,
admin: admin
},
users: %{
staff: avenger,
student: List.first(students),
admin: admin
},
assessments: %{
path: %{
assessment: assessment,
programming_questions: programming_questions,
mcq_questions: mcq_questions,
submissions: submissions,
programming_answers: programming_answers,
mcq_answers: mcq_answers
},
mission: ...,
contest: ...,
sidequest: ...
}
}
"""
def assessments do
if Cadet.Env.env() == :test do
# User and Group
avenger = insert(:user, %{name: "avenger", role: :staff})
mentor = insert(:user, %{name: "mentor", role: :staff})
group = insert(:group, %{leader: avenger, mentor: mentor})
students = insert_list(5, :student, %{group: group})
admin = insert(:user, %{name: "admin", role: :admin})
assessments =
Enum.reduce(
Cadet.Assessments.AssessmentType.__enum_map__(),
%{},
fn type, acc -> Map.put(acc, type, insert_assessments(type, students)) end
)
%{
accounts: %{
avenger: avenger,
mentor: mentor,
group: group,
students: students,
admin: admin
},
users: %{
staff: avenger,
student: List.first(students),
admin: admin
},
assessments: assessments
}
end
end
defp insert_assessments(assessment_type, students) do
assessment = insert(:assessment, %{type: assessment_type, is_published: true})
programming_questions =
Enum.map(1..3, fn id ->
insert(:programming_question, %{
display_order: id,
assessment: assessment,
max_grade: 200,
max_xp: 1000
})
end)
mcq_questions =
Enum.map(4..6, fn id ->
insert(:mcq_question, %{
display_order: id,
assessment: assessment,
max_grade: 40,
max_xp: 500
})
end)
submissions =
students
|> Enum.take(2)
|> Enum.map(&insert(:submission, %{assessment: assessment, student: &1}))
# Programming Answers
programming_answers =
Enum.map(submissions, fn submission ->
Enum.map(programming_questions, fn question ->
insert(:answer, %{
grade: 200,
xp: 1000,
question: question,
submission: submission,
answer: build(:programming_answer)
})
end)
end)
mcq_answers =
Enum.map(submissions, fn submission ->
Enum.map(mcq_questions, fn question ->
insert(:answer, %{
grade: 40,
xp: 500,
question: question,
submission: submission,
answer: build(:mcq_answer)
})
end)
end)
%{
assessment: assessment,
programming_questions: programming_questions,
mcq_questions: mcq_questions,
submissions: submissions,
programming_answers: programming_answers,
mcq_answers: mcq_answers
}
end
end
|
test/support/seeds.ex
| 0.717507
| 0.465873
|
seeds.ex
|
starcoder
|
defmodule DawdleDB.Handler do
@moduledoc """
Defines a handler for database events on a single table.
To define an event handler, `use DawdleDB.Handler` and provide a Ecto schema
type that you wish to handle. Then, override the callbacks
`c:handle_insert/1`, `c:handle_update/2`, `c:handle_delete/1`,
as appropriate.
## Examples
```
defmodule MyApp.TestDBHandler do
use DawdleDB.Handler, type: [MyApp.MySchema]
alias MyApp.MySchema
def handle_insert(%MySchema{} = new) do
# Do something...
end
def handle_update(%MySchema{} = new, old) do
# Do something else...
end
def handle_delete(%MySchema{} = old) do
# Default case
end
end
```
"""
alias Ecto.Changeset
alias Ecto.Schema
@doc """
This function is called when DawdleDB pulls an insert event for the specified
table from the queue. The function is executed for its side effects
and the return value is ignored.
"""
@callback handle_insert(new :: Schema.t()) :: any()
@doc """
This function is called when DawdleDB pulls an update event for the specified
table from the queue. The function is executed for its side effects and the
return value is ignored.
"""
@callback handle_update(new :: Schema.t(), old :: Schema.t()) :: any()
@doc """
This function is called when DawdleDB pulls a delete event for the specified
table from the queue. The function is executed for its side effects and the
return value is ignored.
"""
@callback handle_delete(old :: Schema.t()) :: any()
defmacro __using__(opts) do
type =
opts
|> Keyword.get(:type)
|> DawdleDB.Handler._expand_alias(__CALLER__)
table = type.__schema__(:source)
quote do
use Dawdle.Handler, only: [DawdleDB.Event]
import DawdleDB.Handler, only: [_rehydrate: 2]
@behaviour DawdleDB.Handler
@impl true
def handle_event(%DawdleDB.Event{table: unquote(table)} = e) do
case e.action do
:insert ->
handle_insert(_rehydrate(unquote(type), e.new))
:update ->
handle_update(
_rehydrate(unquote(type), e.new),
_rehydrate(unquote(type), e.old)
)
:delete ->
handle_delete(_rehydrate(unquote(type), e.old))
end
end
# Catch-all handler
def handle_event(_event), do: :ok
@impl true
def handle_insert(_new), do: :ok
@impl true
def handle_update(_new, _old), do: :ok
@impl true
def handle_delete(_old), do: :ok
defoverridable handle_insert: 1, handle_update: 2, handle_delete: 1
end
end
@doc false
# credo:disable-for-lines:5 Credo.Check.Readability.Specs
def _rehydrate(_type, nil), do: nil
def _rehydrate(type, data) do
data = fix_maps(data, type)
type.__struct__
|> Changeset.cast(
data,
type.__schema__(:fields) -- type.__schema__(:embeds)
)
|> rehydrate_embeds()
|> Changeset.apply_changes()
end
@spec rehydrate_embeds(Changeset.t()) :: Changeset.t()
defp rehydrate_embeds(changeset) do
changeset.data.__struct__.__schema__(:embeds)
|> Enum.reduce(changeset, &Changeset.cast_embed(&2, &1))
end
# Ecto recommends maps always use string keys rather than atoms, however on
# load we end up converting these back to atoms. Assume that people followed
# the advice, and re-stringify the keys on embedded maps.
defp fix_maps(data, type) do
map_keys =
Enum.filter(Map.keys(data), fn k -> type.__schema__(:type, k) == :map end)
Enum.reduce(map_keys, data, fn k, d ->
new_map =
d
|> Map.fetch!(k)
|> Enum.map(fn {k, v} -> {to_string(k), v} end)
|> Enum.into(%{})
Map.put(d, k, new_map)
end)
end
@doc false
# credo:disable-for-lines:5 Credo.Check.Readability.Specs
def _expand_alias({:__aliases__, _, _} = ast, env),
do: Macro.expand(ast, %{env | function: {:__schema__, 2}})
def _expand_alias(ast, _env),
do: ast
end
|
lib/dawdle_db/handler.ex
| 0.848109
| 0.590691
|
handler.ex
|
starcoder
|
defmodule ElhexDelivery.PostalCode.Navigator do
use GenServer
alias ElhexDelivery.PostalCode.{Cache, Store}
alias :math, as: Math
#@radius 6371 #km
@radius 3959
def init(init_arg) do
{:ok, init_arg}
end
def start_link do
GenServer.start_link(__MODULE__, [], name: :postal_code_navigator)
end
def get_distance(from ,to) do
GenServer.call(:postal_code_navigator, {:get_distance, from, to})
end
# Callbacks
def handle_call({:get_distance, from, to}, _from, state) do
distance = do_get_distance(from, to)
{:reply, distance, state}
end
defp do_get_distance(from, to) do
from = format_postal_code(from)
to = format_postal_code(to)
case Cache.get_distance(from, to) do
nil ->
{lat1, long1} = get_geolocation(from)
{lat2, long2} = get_geolocation(to)
distance = calculate_distance({lat1, long1}, {lat2, long2})
Cache.set_distance(from, to, distance)
distance
distance -> distance
end
end
defp get_geolocation(postal_code) do
Store.get_geolocation(postal_code)
end
defp format_postal_code(postal_code) when is_binary(postal_code), do: postal_code
defp format_postal_code(postal_code) when is_integer(postal_code) do
postal_code = Integer.to_string(postal_code)
format_postal_code(postal_code)
end
defp format_postal_code(postal_code) do
error = "unexpected `postal_code`, received: (#{inspect(postal_code)})"
raise ArgumentError, error
end
defp calculate_distance({lat1, long1}, {lat2, long2}) do
lat_diff = degrees_to_radians(lat2 - lat1)
long_diff = degrees_to_radians(long2 - long1)
lat1 = degrees_to_radians(lat1)
lat2 = degrees_to_radians(lat2)
cos_lat1 = Math.cos(lat1)
cos_lat2 = Math.cos(lat2)
sin_lat_diff_sq = Math.sin(lat_diff / 2) |> Math.pow(2)
sin_long_diff_sq = Math.sin(long_diff / 2) |> Math.pow(2)
a = sin_lat_diff_sq + (cos_lat1 * cos_lat2 * sin_long_diff_sq)
c= 2 * Math.atan2(Math.sqrt(a), Math.sqrt(1-a))
@radius * c |> Float.round(2)
end
defp degrees_to_radians(degrees) do
degrees * (Math.pi / 180)
end
end
|
lib/postal_code/navigator.ex
| 0.755186
| 0.482368
|
navigator.ex
|
starcoder
|
defmodule Spear.ExpectationViolation do
@moduledoc """
A structure representing how an append request's expectations were violated
## Expectations
A client may exert expectations on write requests which will fail the request
if violated. Any of these values may be passed to the `:expect` option of
`Spear.append/4`:
* `:any` - (default) any stream. Cannot be violated.
* `:exists` - the EventStoreDB stream must exist prior to the proposed events
being written
* `:empty` - the EventStoreDB stream must **not** exist prior to the
proposed events being written
* `revision` - any positive integer representing the current size of the
stream. The head of the EventStoreDB stream must match this revision number
in order for the append request to succeed.
If an expectation is violated, the return signature will be
`{:error, %Spear.ExpectationViolation{}}`, which gives information about
the expectation and the current revision. See `t:t/0`.
Expectations may also be set in `Spear.delete_stream/3`, although if
expectations set on a deletion request are violated, EventStoreDB returns
a gRPC error response instead of this struct.
"""
defstruct [:current, :expected]
@typedoc """
A structure representing how an append request's expectations were violated
This struct is returned on calls to `Spear.append/4` which set an expectation
on the current stream revision with the `:expect` option.
`:current` is not the number of events in the EventStoreDB stream but rather
the current event revision. If three events are appended to an empty stream,
the `:current` will be `2`. Note that deletions do not reset a stream's
revision number.
## Examples
# say EventStoreDB stream "stream_that_should_be_empty" has 6 events
iex> Spear.append(events, conn, "stream_that_should_be_empty", expect: :empty)
{:error, %Spear.ExpectationViolation{current: 5, expected: :empty}}
# say EventStoreDB stream "stream_that_should_have_events" has no events
iex> Spear.append(events, conn, "stream_that_should_have_events", expect: :exists)
{:error, %Spear.ExpectationViolation{current: :empty, expected: :exists}}
"""
@typedoc since: "0.1.0"
@type t :: %__MODULE__{
current: pos_integer() | :empty,
expected: pos_integer() | :empty | :exists | :any
}
end
|
lib/spear/expectation_violation.ex
| 0.868548
| 0.683155
|
expectation_violation.ex
|
starcoder
|
if Code.ensure_loaded?(Plug) do
defmodule Guardian.Plug do
@moduledoc """
Provides functions for the implementation module for dealing with
Guardian in a Plug environment
```elixir
defmodule MyApp.Tokens do
use Guardian, otp_app: :my_app
# ... snip
end
```
Your implementation module will be given a `Plug` module for
interacting with plug.
If you're using Guardian in your application most of the setters will
be uninteresting. They're mostly for library authors and Guardian itself.
The usual functions you'd use in your application are:
### `sign_in(conn, resource, claims \\ %{}, opts \\ [])`
Sign in a resource for your application.
This will generate a token for your resource according to
your TokenModule and `subject_for_token` callback.
`sign_in` will also cache the `resource`, `claims`, and `token` on the
connection.
```elixir
conn = MyApp.Guardian.Plug.sign_in(conn, resource, my_custom_claims)
```
If there is a session present the token will be stored in the session
to provide traditional session based authentication.
"""
defmodule UnauthenticatedError do
defexception message: "Unauthenticated", status: 401
end
@default_key "default"
@default_cookie_options [max_age: 60 * 60 * 24 * 7 * 4]
import Guardian, only: [returning_tuple: 1]
import Guardian.Plug.Keys
import Plug.Conn
alias Guardian.Plug, as: GPlug
alias GPlug.Pipeline
alias __MODULE__.UnauthenticatedError
defmacro __using__(impl) do
quote do
def implementation, do: unquote(impl)
def put_current_token(conn, token, opts \\ []),
do: GPlug.put_current_token(conn, token, opts)
def put_current_claims(conn, claims, opts \\ []),
do: GPlug.put_current_claims(conn, claims, opts)
def put_current_resource(conn, resource, opts \\ []),
do: GPlug.put_current_resource(conn, resource, opts)
def current_token(conn, opts \\ []), do: GPlug.current_token(conn, opts)
def current_claims(conn, opts \\ []), do: GPlug.current_claims(conn, opts)
def current_resource(conn, opts \\ []), do: GPlug.current_resource(conn, opts)
def authenticated?(conn, opts \\ []), do: GPlug.authenticated?(conn, opts)
def sign_in(conn, resource, claims \\ %{}, opts \\ []),
do: GPlug.sign_in(conn, implementation(), resource, claims, opts)
def sign_out(conn, opts \\ []), do: GPlug.sign_out(conn, implementation(), opts)
def remember_me(conn, resource, claims \\ %{}, opts \\ []),
do: GPlug.remember_me(conn, implementation(), resource, claims, opts)
def remember_me_from_token(conn, token, claims \\ %{}, opts \\ []),
do: GPlug.remember_me_from_token(conn, implementation(), token, claims, opts)
end
end
def session_active?(conn) do
key = :seconds |> System.os_time() |> to_string()
get_session(conn, key) == nil
rescue
ArgumentError -> false
end
@spec authenticated?(Plug.Conn.t(), Guardian.opts()) :: true | false
def authenticated?(conn, opts) do
key =
conn
|> fetch_key(opts)
|> token_key()
conn.private[key] != nil
end
@doc """
Provides the default key for the location of a token in the session and connection
"""
@spec default_key() :: String.t()
def default_key, do: @default_key
@spec current_claims(Plug.Conn.t(), Guardian.opts()) :: Guardian.Token.claims() | nil
def current_claims(conn, opts \\ []) do
key =
conn
|> fetch_key(opts)
|> claims_key()
conn.private[key]
end
@spec current_resource(Plug.Conn.t(), Guardian.opts()) :: any | nil
def current_resource(conn, opts \\ []) do
key =
conn
|> fetch_key(opts)
|> resource_key()
conn.private[key]
end
@spec current_token(Plug.Conn.t(), Guardian.opts()) :: Guardian.Token.token() | nil
def current_token(conn, opts \\ []) do
key =
conn
|> fetch_key(opts)
|> token_key()
conn.private[key]
end
@spec put_current_token(Plug.Conn.t(), Guardian.Token.token() | nil, Guardian.opts()) ::
Plug.Conn.t()
def put_current_token(conn, token, opts \\ []) do
key =
conn
|> fetch_key(opts)
|> token_key()
put_private(conn, key, token)
end
@spec put_current_claims(Plug.Conn.t(), Guardian.Token.claims() | nil, Guardian.opts()) ::
Plug.Conn.t()
def put_current_claims(conn, claims, opts \\ []) do
key =
conn
|> fetch_key(opts)
|> claims_key()
put_private(conn, key, claims)
end
@spec put_current_resource(Plug.Conn.t(), resource :: any | nil, Guardian.opts()) ::
Plug.Conn.t()
def put_current_resource(conn, resource, opts \\ []) do
key =
conn
|> fetch_key(opts)
|> resource_key()
put_private(conn, key, resource)
end
@spec sign_in(Plug.Conn.t(), module, any, Guardian.Token.claims(), Guardian.opts()) ::
Plug.Conn.t()
def sign_in(conn, impl, resource, claims \\ %{}, opts \\ []) do
with {:ok, token, full_claims} <- Guardian.encode_and_sign(impl, resource, claims, opts),
{:ok, conn} <- add_data_to_conn(conn, resource, token, full_claims, opts),
{:ok, conn} <-
returning_tuple({impl, :after_sign_in, [conn, resource, token, full_claims, opts]}) do
if session_active?(conn) do
key =
conn
|> fetch_key(opts)
|> token_key()
conn
|> put_session(key, token)
|> configure_session(renew: true)
else
conn
end
else
err -> handle_unauthenticated(conn, err, opts)
end
end
@spec sign_out(Plug.Conn.t(), module, Guardian.opts()) :: Plug.Conn.t()
def sign_out(conn, impl, opts) do
key = Keyword.get(opts, :key, :all)
result = do_sign_out(conn, impl, key, opts)
case result do
{:ok, conn} -> conn
{:error, reason} -> handle_unauthenticated(conn, reason, opts)
end
end
@spec remember_me(Plug.Conn.t(), module, any, Guardian.Token.claims(), Guardian.opts()) ::
Plug.Conn.t()
def remember_me(conn, mod, resource, claims \\ %{}, opts \\ []) do
opts = Keyword.put_new(opts, :token_type, "refresh")
key = fetch_token_key(conn, opts)
case Guardian.encode_and_sign(mod, resource, claims, opts) do
{:ok, token, new_claims} ->
put_resp_cookie(conn, key, token, cookie_options(mod, new_claims))
{:error, _} = err ->
handle_unauthenticated(conn, err, opts)
end
end
@spec remember_me_from_token(
Plug.Conn.t(),
module,
Guardian.Token.token(),
Guardian.Token.claims(),
Guardian.opts()
) :: Plug.Conn.t()
def remember_me_from_token(conn, mod, token, claims_to_check \\ %{}, opts \\ []) do
token_type = Keyword.get(opts, :token_type, "refresh")
key = fetch_token_key(conn, opts)
with {:ok, claims} <- Guardian.decode_and_verify(mod, token, claims_to_check, opts),
{:ok, _old, {new_t, full_new_c}} <-
Guardian.exchange(mod, token, claims["typ"], token_type, opts) do
put_resp_cookie(conn, key, new_t, cookie_options(mod, full_new_c))
else
{:error, _} = err -> handle_unauthenticated(conn, err, opts)
end
end
defp fetch_token_key(conn, opts) do
conn
|> Pipeline.fetch_key(opts)
|> token_key()
|> Atom.to_string()
end
defp cookie_options(mod, %{"exp" => timestamp}) do
max_age = timestamp - Guardian.timestamp()
[max_age: max_age] ++ cookie_options(mod, %{})
end
defp cookie_options(mod, _claims) do
mod.config(:cookie_options, []) ++ @default_cookie_options
end
defp add_data_to_conn(conn, resource, token, claims, opts) do
conn =
conn
|> put_current_token(token, opts)
|> put_current_claims(claims, opts)
|> put_current_resource(resource, opts)
{:ok, conn}
end
defp cleanup_session({:ok, conn}, opts) do
conn =
if session_active?(conn) do
key =
conn
|> fetch_key(opts)
|> token_key()
conn
|> delete_session(key)
|> configure_session(renew: true)
else
conn
end
{:ok, conn}
end
defp cleanup_session({:error, _} = err, _opts), do: err
defp cleanup_session(err, _opts), do: {:error, err}
defp clear_key(key, {:ok, conn}, impl, opts), do: do_sign_out(conn, impl, key, opts)
defp clear_key(_, err, _, _), do: err
defp fetch_key(conn, opts),
do: Keyword.get(opts, :key) || Pipeline.current_key(conn) || default_key()
defp remove_data_from_conn(conn, opts) do
conn =
conn
|> put_current_token(nil, opts)
|> put_current_claims(nil, opts)
|> put_current_resource(nil, opts)
{:ok, conn}
end
defp revoke_token(conn, impl, key, opts) do
token = current_token(conn, key: key)
with {:ok, _} <- impl.revoke(token, opts), do: {:ok, conn}
end
defp do_sign_out(%{private: private} = conn, impl, :all, opts) do
private
|> Map.keys()
|> Enum.map(&key_from_other/1)
|> Enum.filter(&(&1 != nil))
|> Enum.uniq()
|> Enum.reduce({:ok, conn}, &clear_key(&1, &2, impl, opts))
|> cleanup_session(opts)
end
defp do_sign_out(conn, impl, key, opts) do
with {:ok, conn} <- returning_tuple({impl, :before_sign_out, [conn, key, opts]}),
{:ok, conn} <- revoke_token(conn, impl, key, opts),
{:ok, conn} <- remove_data_from_conn(conn, key: key) do
if session_active?(conn) do
{:ok, delete_session(conn, token_key(key))}
else
{:ok, conn}
end
end
end
defp handle_unauthenticated(conn, reason, opts) do
error_handler = Pipeline.current_error_handler(conn)
if error_handler do
conn
|> halt()
|> error_handler.auth_error({:unauthenticated, reason}, opts)
else
raise UnauthenticatedError, inspect(reason)
end
end
end
end
|
lib/guardian/plug.ex
| 0.733356
| 0.796372
|
plug.ex
|
starcoder
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.