code
stringlengths 114
1.05M
| path
stringlengths 3
312
| quality_prob
float64 0.5
0.99
| learning_prob
float64 0.2
1
| filename
stringlengths 3
168
| kind
stringclasses 1
value |
|---|---|---|---|---|---|
defmodule Bitmap.Binary do
@moduledoc """
Contains functions to create and work with a [bitmap](https://en.wikipedia.org/wiki/Bitmap)
Bitmaps are also known as bit arrays, bit sets and is a fast space efficient
data structure for lookups
The module has been designed to be pipe-friendly, so pipe 'em up
"""
import Kernel, except: [to_string: 1]
@behaviour Bitmap
@type t :: binary
@type argt :: non_neg_integer | [any] | Range.t
@type index :: non_neg_integer
@type bit :: 1 | 0
@set_bit 1
@unset_bit 0
@doc """
Creates and returns a bitmap of size corresponding to the `argument` passed.
If `argument` is
- integer, size of bitmap is equal to the `argument`
- range, size of bitmap is equal to the length of `argument`
- list, size of bitmap is equal to the length of `argument`
> Note: All bits are set to 0 by default
## Examples
iex> Bitmap.Binary.new(400)
<<0::size(400)>>
iex> Bitmap.Binary.new([1,2,3,4,5])
<<0::size(5)>>
iex> Bitmap.Binary.new(1..25)
<<0::size(25)>>
"""
@spec new(argt) :: __MODULE__.t
def new(argument)
def new(size) when is_integer(size), do: <<0::size(size)>>
def new(list) when is_list(list), do: new(length(list))
def new(a..b), do: new(abs(b - a) + 1)
@doc """
Returns the bit value at `index` in the bitmap
## Examples
iex> bm = Bitmap.Binary.new(5)
iex> Bitmap.Binary.at(bm, 2)
0
iex> bm = Bitmap.Binary.set(bm, 2)
iex> Bitmap.Binary.at(bm, 2)
1
"""
@spec at(__MODULE__.t, index) :: bit
def at(bitmap, index) when index >= 0 and index < bit_size(bitmap) do
bitmap |> split_at(index) |> elem(1)
end
@doc """
Returns a boolean representing whether the bit at position `index`
is set or not
## Examples
iex> bm = Bitmap.Binary.new(5) |> Bitmap.Binary.set(1) |> Bitmap.Binary.set(3)
iex> Bitmap.Binary.set?(bm, 1)
true
iex> Bitmap.Binary.set?(bm, 4)
false
"""
@spec set?(__MODULE__.t, index) :: boolean
def set?(bitmap, index) when index >= 0 and index < bit_size(bitmap) do
at(bitmap, index) == @set_bit
end
@doc """
Sets the bit at `index` in the bitmap and returns the new bitmap
Index can also have a value `:all` in which case all bits
will be set like in set_all
## Examples
iex> Bitmap.Binary.set(Bitmap.Binary.new(5), 3)
<<2::size(5)>>
iex> Bitmap.Binary.set(Bitmap.Binary.new(1..10), 2)
<<32, 0::size(2)>>
"""
@spec set(__MODULE__.t, index) :: __MODULE__.t
def set(bitmap, index) when index >= 0 and index < bit_size(bitmap) do
set_bit(bitmap, index, @set_bit)
end
def set(bitmap, :all), do: set_all(bitmap)
@doc """
Set all bits in the bitmap and returns a new bitmap
## Examples
iex> Bitmap.Binary.set_all(Bitmap.Binary.new(10))
<<255, 3::size(2)>>
iex> Bitmap.Binary.set_all(Bitmap.Binary.new(100))
<<255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 15::size(4)>>
"""
@spec set_all(__MODULE__.t) :: __MODULE__.t
def set_all(bitmap) do
import Bitmap.Utils, only: [pow: 2]
bitmap_size = bit_size(bitmap)
<<pow(2, bitmap_size)-1::size(bitmap_size)>>
end
@doc """
Returns a boolean representing whether the bit at position `index`
is unset or not
## Examples
iex> bm = Bitmap.Binary.new(5) |> Bitmap.Binary.set(1) |> Bitmap.Binary.set(3)
iex> Bitmap.Binary.unset?(bm, 1)
false
iex> Bitmap.Binary.unset?(bm, 4)
true
"""
@spec unset?(__MODULE__.t, index) :: boolean
def unset?(bitmap, index) when index >= 0 and index < bit_size(bitmap) do
at(bitmap, index) == @unset_bit
end
@doc """
Unsets the bit at `index` in the bitmap and returns the new bitmap
Index can also have a value `:all` in which case all bits
will be unset like in unset_all
## Examples
iex> bm = Bitmap.Binary.new(10) |> Bitmap.Binary.set(4) |> Bitmap.Binary.set(8)
iex> Bitmap.Binary.unset(bm, 4)
<<0, 2::size(2)>>
iex> Bitmap.Binary.unset(bm, 8)
<<8, 0::size(2)>>
"""
@spec unset(__MODULE__.t, index) :: __MODULE__.t
def unset(bitmap, index) when index >= 0 and index < bit_size(bitmap) do
set_bit(bitmap, index, @unset_bit)
end
def unset(bitmap, :all), do: unset_all(bitmap)
@doc """
Unsets all bits in the bitmap and returns a new bitmap
## Examples
iex> bm = Bitmap.Binary.new(10) |> Bitmap.Binary.set(4) |> Bitmap.Binary.set(8)
iex> Bitmap.Binary.unset_all(bm)
<<0, 0::size(2)>>
"""
@spec unset_all(__MODULE__.t) :: __MODULE__.t
def unset_all(bitmap) do
bitmap_size = bit_size(bitmap)
<<0::size(bitmap_size)>>
end
@doc """
Toggles the bit at `index` in the bitmap and returns the new bitmap
i.e. it sets the bit to 1 if it was 0 or sets the bit to 0 if it was 1
Index can also have a value `:all` in which case all bits will be toggled
like in toggle_all
## Examples
iex> bm = Bitmap.Binary.new(10) |> Bitmap.Binary.set(4) |> Bitmap.Binary.set(8)
iex> Bitmap.Binary.toggle(bm, 3)
<<24, 2::size(2)>>
iex> Bitmap.Binary.toggle(bm, 6)
<<10, 2::size(2)>>
"""
@spec toggle(__MODULE__.t, index) :: __MODULE__.t
def toggle(bitmap, index) when index >= 0 and index < bit_size(bitmap) do
{prefix, bit, rest} = split_at(bitmap, index)
case bit do
1 -> <<prefix::size(index), @unset_bit::size(1), rest::bitstring>>
0 -> <<prefix::size(index), @set_bit::size(1), rest::bitstring>>
end
end
def toggle(bitmap, :all), do: toggle_all(bitmap)
@doc """
Toggles all bits in the bitmap and returns a new bitmap
## Examples
iex> bm = Bitmap.Binary.new(10) |> Bitmap.Binary.set(4) |> Bitmap.Binary.set(8)
iex> Bitmap.Binary.toggle_all(bm)
<<247, 1::size(2)>>
"""
@spec toggle_all(__MODULE__.t) :: __MODULE__.t
def toggle_all(bitmap) do
toggle_binary(bitmap, bit_size(bitmap), <<>>)
end
@doc """
Returns the string representation of the bitmap
Note: This can be very long for huge bitmaps.
"""
@spec to_string(__MODULE__.t) :: String.t
def to_string(bitmap) do
to_string(bitmap, <<>>)
end
@doc """
Inspects the bitmap and returns the string representation of the bitmap
Note: This can be very long for huge bitmaps.
"""
@spec inspect(__MODULE__.t) :: String.t
def inspect(bitmap) do
bitmap |> to_string |> IO.inspect
end
defp to_string(<<>>, acc), do: String.reverse(acc)
defp to_string(<<bit::size(1), rest::bitstring>>, acc) do
case bit do
1 -> to_string(rest, "#{@set_bit}" <> acc)
0 -> to_string(rest, "#{@unset_bit}" <> acc)
end
end
defp set_bit(bitmap, index, bit) do
{prefix, o_bit, rest} = split_at(bitmap, index)
cond do
o_bit == bit -> bitmap
true -> <<prefix::size(index), bit::size(1), rest::bitstring>>
end
end
defp split_at(bitmap, index) do
<<prefix::size(index), bit::size(1), rest::bitstring>> = bitmap
{prefix, bit, rest}
end
defp toggle_binary(_bitmap, 0, acc), do: reverse_binary(acc)
defp toggle_binary(<<bit::size(1), rest::bitstring>>, size, acc) do
case bit do
1 -> toggle_binary(rest, size-1, <<@unset_bit::size(1), acc::bitstring>>)
0 -> toggle_binary(rest, size-1, <<@set_bit::size(1), acc::bitstring>>)
end
end
defp reverse_binary(binary), do: reverse_binary(binary, bit_size(binary), <<>>)
defp reverse_binary(_binary, 0, acc), do: acc
defp reverse_binary(<<bit::size(1), rest::bitstring>>, size, acc) do
reverse_binary(rest, size-1, <<bit::size(1), acc::bitstring>>)
end
end
|
lib/bitmap/binary.ex
| 0.914463
| 0.620305
|
binary.ex
|
starcoder
|
defmodule Gettext.Interpolation.Default do
@moduledoc """
Default Interpolation Implementation
Replaces `%{binding_name}` with a string value.
"""
@behaviour Gettext.Interpolation
@type interpolatable :: [String.t() | atom]
# Extracts interpolations from a given string.
# This function extracts all interpolations in the form `%{interpolation}`
# contained inside `str`, converts them to atoms and then returns a list of
# string and interpolation keys.
@doc false
@spec to_interpolatable(String.t()) :: interpolatable
def to_interpolatable(string) do
start_pattern = :binary.compile_pattern("%{")
end_pattern = :binary.compile_pattern("}")
string
|> to_interpolatable(_current = "", _acc = [], start_pattern, end_pattern)
|> Enum.reverse()
end
defp to_interpolatable(string, current, acc, start_pattern, end_pattern) do
case :binary.split(string, start_pattern) do
# If we have one element, no %{ was found so this is the final part of the
# string.
[rest] ->
prepend_if_not_empty(current <> rest, acc)
# If we found a %{ but it's followed by an immediate }, then we just
# append %{} to the current string and keep going.
[before, "}" <> rest] ->
new_current = current <> before <> "%{}"
to_interpolatable(rest, new_current, acc, start_pattern, end_pattern)
# Otherwise, we found the start of a binding.
[before, binding_and_rest] ->
case :binary.split(binding_and_rest, end_pattern) do
# If we don't find the end of this binding, it means we're at a string
# like "foo %{ no end". In this case we consider no bindings to be
# there.
[_] ->
[current <> string | acc]
# This is the case where we found a binding, so we put it in the acc
# and keep going.
[binding, rest] ->
new_acc = [String.to_atom(binding) | prepend_if_not_empty(before, acc)]
to_interpolatable(rest, "", new_acc, start_pattern, end_pattern)
end
end
end
defp prepend_if_not_empty("", list), do: list
defp prepend_if_not_empty(string, list), do: [string | list]
@doc """
Interpolate a message or interpolatable with the given bindings.
This function takes a message and some bindings and returns an `{:ok,
interpolated_string}` tuple if interpolation is successful. If it encounters
a binding in the message that is missing from `bindings`, it returns
`{:missing_bindings, incomplete_string, missing_bindings}` where
`incomplete_string` is the string with only the present bindings interpolated
and `missing_bindings` is a list of atoms representing bindings that are in
`interpolatable` but not in `bindings`.
## Examples
iex> msgid = "Hello %{name}, you have %{count} unread messages"
iex> good_bindings = %{name: "José", count: 3}
iex> Gettext.Interpolation.Default.runtime_interpolate(msgid, good_bindings)
{:ok, "Hello José, you have 3 unread messages"}
iex> Gettext.Interpolation.Default.runtime_interpolate(msgid, %{name: "José"})
{:missing_bindings, "Hello José, you have %{count} unread messages", [:count]}
iex> msgid = "Hello %{name}, you have %{count} unread messages"
iex> interpolatable = Gettext.Interpolation.Default.to_interpolatable(msgid)
iex> good_bindings = %{name: "José", count: 3}
iex> Gettext.Interpolation.Default.runtime_interpolate(interpolatable, good_bindings)
{:ok, "Hello José, you have 3 unread messages"}
iex> Gettext.Interpolation.Default.runtime_interpolate(interpolatable, %{name: "José"})
{:missing_bindings, "Hello José, you have %{count} unread messages", [:count]}
"""
@impl Gettext.Interpolation
def runtime_interpolate(message, %{} = bindings) when is_binary(message),
do: message |> to_interpolatable() |> runtime_interpolate(bindings)
def runtime_interpolate(interpolatable, bindings)
when is_list(interpolatable) and is_map(bindings) do
interpolate(interpolatable, bindings, [], [])
end
defp interpolate([string | segments], bindings, strings, missing) when is_binary(string) do
interpolate(segments, bindings, [string | strings], missing)
end
defp interpolate([atom | segments], bindings, strings, missing) when is_atom(atom) do
case bindings do
%{^atom => value} ->
interpolate(segments, bindings, [to_string(value) | strings], missing)
%{} ->
strings = ["%{" <> Atom.to_string(atom) <> "}" | strings]
interpolate(segments, bindings, strings, [atom | missing])
end
end
defp interpolate([], _bindings, strings, []) do
{:ok, IO.iodata_to_binary(Enum.reverse(strings))}
end
defp interpolate([], _bindings, strings, missing) do
missing = missing |> Enum.reverse() |> Enum.uniq()
{:missing_bindings, IO.iodata_to_binary(Enum.reverse(strings)), missing}
end
# Returns all the interpolation keys contained in the given string or list of
# segments.
# This function returns a list of all the interpolation keys (patterns in the
# form `%{interpolation}`) contained in its argument.
# If the argument is a segment list, that is, a list of strings and atoms where
# atoms represent interpolation keys, then only the atoms in the list are
# returned.
@doc false
@spec keys(String.t() | interpolatable) :: [atom]
def keys(string_or_interpolatable)
def keys(string) when is_binary(string), do: string |> to_interpolatable() |> keys()
def keys(interpolatable) when is_list(interpolatable),
do: interpolatable |> Enum.filter(&is_atom/1) |> Enum.uniq()
@doc """
Compile a static message to interpolate with dynamic bindings.
This macro takes a static message and some dynamic bindings. The generated
code will return an `{:ok, interpolated_string}` tuple if the interpolation
is successful. If it encounters a binding in the message that is missing from
`bindings`, it returns `{:missing_bindings, incomplete_string, missing_bindings}`,
where `incomplete_string` is the string with only the present bindings interpolated
and `missing_bindings` is a list of atoms representing bindings that are in
`interpolatable` but not in `bindings`.
"""
@impl Gettext.Interpolation
defmacro compile_interpolate(translation_type, message, bindings) do
unless is_binary(message) do
raise """
#{__MODULE__}.compile_interpolate/2 can only be used at compile time with static messages.
Alternatively use #{__MODULE__}.runtime_interpolate/2.
"""
end
interpolatable = to_interpolatable(message)
keys = keys(interpolatable)
match_clause = match_clause(keys)
compile_string = compile_string(interpolatable)
case {keys, translation_type} do
# If no keys are in the message, the message can be returned without interpolation
{[], _translation_type} ->
quote do: {:ok, unquote(message)}
# If the message only contains the key `count` and it is a plural translation,
# gettext ensures that `count` is always set. Therefore the dynamic interpolation
# will never be needed.
{[:count], :plural_translation} ->
quote do
unquote(match_clause) = unquote(bindings)
{:ok, unquote(compile_string)}
end
{_keys, _translation_type} ->
quote do
case unquote(bindings) do
unquote(match_clause) ->
{:ok, unquote(compile_string)}
%{} = other_bindings ->
unquote(__MODULE__).runtime_interpolate(unquote(interpolatable), other_bindings)
end
end
end
end
# Compiles a list of atoms into a "match" map. For example `[:foo, :bar]` gets
# compiled to `%{foo: foo, bar: bar}`. All generated variables are under the
# current `__MODULE__`.
defp match_clause(keys) do
{:%{}, [], Enum.map(keys, &{&1, Macro.var(&1, __MODULE__)})}
end
# Compiles a string into a binary with `%{var}` patterns turned into `var`
# variables, namespaced inside the current `__MODULE__`.
defp compile_string(interpolatable) do
parts =
Enum.map(interpolatable, fn
key when is_atom(key) ->
quote do: to_string(unquote(Macro.var(key, __MODULE__))) :: binary
str ->
str
end)
{:<<>>, [], parts}
end
@impl Gettext.Interpolation
def message_format, do: "elixir-format"
end
|
api/deps/gettext/lib/gettext/interpolation/default.ex
| 0.91854
| 0.538134
|
default.ex
|
starcoder
|
defmodule Fixtures.Helper do
@moduledoc false
@doc false
defmacro __using__(_opts \\ []) do
quote do
@before_compile unquote(__MODULE__)
@on_definition {unquote(__MODULE__), :on_def}
require unquote(__MODULE__)
Module.register_attribute(__MODULE__, :generators, accumulate: true)
end
end
@doc false
defmacro __before_compile__(env) do
env.module
|> Module.get_attribute(:generators)
|> Enum.reduce(nil, &generator/2)
end
@doc false
defp generator({fun, doc, spec}, acc) do
spec =
if spec do
{:@, [context: Elixir, import: Kernel],
[
{:spec, [context: Elixir],
[
{:"::", [],
[
{fun, [],
[
{{:., [], [{:__aliases__, [alias: false], [:Keyword]}, :t]}, [], []},
{:atom, [], Elixir},
{{:., [], [{:__aliases__, [alias: false], [:Keyword]}, :t]}, [], []}
]},
spec
]}
]}
]}
end
quote do
unquote(acc)
@doc unquote(doc)
unquote(spec)
def unquote(fun)(settings, key, opts \\ []) do
case Keyword.fetch(settings, key) do
:error -> unquote(fun)(opts)
{:ok, v} -> if(Keyword.keyword?(v), do: unquote(fun)(v), else: v)
end
end
end
end
@doc false
@spec on_def(Macro.Env.t(), atom, atom, term, term, term) :: term
def on_def(env, kind, name, args, _guards, _body) do
if kind == :def and Enum.count(args) == 1 do
doc =
case Module.get_attribute(env.module, :doc) do
{_, d} -> d
d -> d
end
spec =
case Module.get_attribute(env.module, :spec) do
[{:spec, {_, _, x}, _} | _] -> List.last(x)
_ -> nil
end
Module.put_attribute(env.module, :generators, {name, doc, spec})
end
end
@doc false
@spec generate_fixture(Macro.Env.t(), module, Keyword.t(), term) :: term
def generate_fixture(caller, model, opts, block) do
model = Macro.expand(model, caller)
persist =
cond do
r = opts[:ecto_repo] -> {Macro.expand(r, caller), :insert}
r = opts[:persist] -> Macro.expand(r, caller)
:test -> nil
end
block_content =
case block[:do] do
a = {:def, _, _} -> [a]
{_, _, a} -> a
a -> [a]
end
gen_create =
unless has_create?(block_content) do
fields = List.last(block_content)
field_generation =
Enum.reduce(
fields,
quote do
%{}
end,
fn {field, {fun, _, _}}, acc ->
quote do
Map.put(unquote(acc), unquote(field), unquote(fun)(opts, unquote(field)))
end
end
)
quote do
def create(opts) do
{:ok, struct!(unquote(model), unquote(field_generation))}
end
end
end
user_create =
if has_create?(block_content),
do: block,
else: [do: {:__block__, [], Enum.slice(block_content, 0..-2)}]
quote do
defmodule unquote(Module.concat([Fixtures.Impl, model])) do
@moduledoc false
alias unquote(model)
@doc false
@spec create(Keyword.t()) :: {:ok, term}
def create(opts \\ [])
unquote(gen_create)
unquote(user_create)
unquote(generate_persist(persist))
end
end
end
@spec has_create?(term) :: boolean
defp has_create?(block) do
Enum.any?(block, fn m ->
case m do
{:def, _, [{:create, _, _} | _]} -> true
_ -> false
end
end)
end
@spec generate_persist({module, atom} | module | any) :: term
defp generate_persist(nil), do: nil
defp generate_persist({mod, fun}) do
quote do
@doc """
See: `#{unquote(mod)}.#{unquote(fun)}`.
## Examples
```elixir
iex> persist(field: :value)
```
"""
@spec persist(Keyword.t()) :: {:ok, struct}
def persist(opts \\ []) do
with {:ok, d} <- create(opts), do: unquote(mod).unquote(fun)(d, opts)
end
end
end
defp generate_persist(persist) when is_atom(persist) do
quote do
@doc """
See: `#{unquote(persist)}`.
## Examples
```elixir
iex> persist(field: :value)
```
"""
@spec persist(Keyword.t()) :: {:ok, struct}
def persist(opts \\ []) do
with {:ok, d} <- create(opts), do: unquote(persist).persist(d, opts)
end
end
end
defp generate_persist(_), do: nil
end
|
lib/fixtures/helper.ex
| 0.678433
| 0.609698
|
helper.ex
|
starcoder
|
defmodule Brook.Storage.Postgres.Query do
@moduledoc """
Abstracts the Postgrex SQL query functions away from
the storage behaviour implementation.
Collects the command run directly against the Postgrex
API in a single location for creates, inserts, selects,
and deletes.
"""
@typedoc "The pid of the Postgrex connection process"
@type conn :: pid
@typedoc "The combination of the view application view schema and table in 'schema_name.table_name' format"
@type schema_table :: String.t()
@typedoc "The timestamp of an event's creation"
@type timestamp :: non_neg_integer
@typedoc "The serialized and compressed version of a Brook event"
@type event :: binary()
require Logger
import Brook.Storage.Postgres.Statement
@pg_already_exists "42P07"
@spec postgres_upsert(
conn(),
schema_table(),
Brook.view_collection(),
Brook.view_key(),
Brook.view_value()
) :: :ok | {:error, Brook.reason()}
def postgres_upsert(conn, view_table, collection, key, value) do
case Postgrex.query(conn, upsert_stmt(view_table), [collection, key, value]) do
{:ok, %Postgrex.Result{num_rows: 1}} -> :ok
error_result -> error_result
end
end
@spec postgres_insert_event(
conn(),
schema_table(),
Brook.view_collection(),
Brook.view_key(),
Brook.event_type(),
timestamp(),
event()
) :: :ok | {:error, Brook.reason()}
def postgres_insert_event(conn, events_table, collection, key, type, timestamp, event) do
case Postgrex.query(conn, insert_event_stmt(events_table), [
collection,
key,
type,
timestamp,
event
]) do
{:ok, %Postgrex.Result{num_rows: 1}} -> :ok
error_result -> error_result
end
end
@spec postgres_delete(conn(), schema_table(), Brook.view_collection(), Brook.view_key()) ::
:ok | {:error, Brook.reason()}
def postgres_delete(conn, view_table, collection, key) do
case Postgrex.query(conn, delete_stmt(view_table), [collection, key]) do
{:ok, %Postgrex.Result{num_rows: rows, rows: nil}} when rows in [0, 1] -> :ok
error_result -> error_result
end
end
@spec postgres_get(conn(), schema_table(), Brook.view_collection(), Brook.view_key() | nil) ::
{:ok, [Brook.view_value()]} | {:error, Brook.reason()}
def postgres_get(conn, view_table, collection, key \\ nil) do
{key_filter, key_variable} = if key, do: {true, [key]}, else: {false, []}
case Postgrex.query(conn, get_stmt(view_table, key_filter), [collection] ++ key_variable) do
{:ok, %Postgrex.Result{rows: rows}} -> {:ok, List.flatten(rows)}
error_result -> error_result
end
end
@spec postgres_get_events(
conn(),
schema_table(),
Brook.view_collection(),
Brook.view_key(),
Brook.event_type() | nil
) ::
{:ok, [event()]} | {:error, Brook.reason()}
def postgres_get_events(conn, events_table, collection, key, type \\ nil) do
{type_filter, type_variable} = if type, do: {true, [type]}, else: {false, []}
case Postgrex.query(
conn,
get_events_stmt(events_table, type_filter),
[collection, key] ++ type_variable
) do
{:ok, %Postgrex.Result{rows: rows}} -> {:ok, List.flatten(rows)}
error_result -> error_result
end
end
@spec schema_create(conn(), String.t()) :: :ok | {:error, Brook.reason()}
def schema_create(conn, schema) do
case Postgrex.query(conn, create_schema_stmt(schema), []) do
{:ok, %Postgrex.Result{}} ->
Logger.info(fn -> "Schema #{schema} successfully created" end)
:ok
error ->
error
end
end
@spec view_table_create(conn(), schema_table()) :: :ok | {:error, Brook.reason()}
def view_table_create(conn, view_table) do
with {:ok, %Postgrex.Result{messages: []}} <-
Postgrex.query(conn, create_view_stmt(view_table), []) do
Logger.info(fn -> "Table #{view_table} created with indices : key" end)
:ok
else
{:ok, %Postgrex.Result{messages: [%{code: @pg_already_exists}]}} ->
Logger.info(fn ->
"Table #{view_table} already exists; skipping index creation"
end)
:ok
error ->
error
end
end
@spec events_table_create(pid(), schema_table(), schema_table()) ::
:ok | {:error, Brook.reason()}
def events_table_create(conn, view_table, events_table) do
type_field = "type"
timestamp_field = "create_ts"
with {:ok, %Postgrex.Result{messages: []}} <-
Postgrex.query(conn, create_events_stmt(view_table, events_table), []),
{:ok, %Postgrex.Result{}} <-
Postgrex.query(conn, create_index_stmt(events_table, type_field), []),
{:ok, %Postgrex.Result{}} <-
Postgrex.query(conn, create_index_stmt(events_table, timestamp_field), []) do
Logger.info(fn ->
"Table #{events_table} created with indices : #{type_field}, #{timestamp_field}"
end)
:ok
else
{:ok, %Postgrex.Result{messages: [%{code: @pg_already_exists}]}} ->
Logger.info(fn ->
"Table #{events_table} already exists; skipping index creation"
end)
:ok
error ->
error
end
end
end
|
lib/brook/storage/postgres/query.ex
| 0.770939
| 0.40031
|
query.ex
|
starcoder
|
defmodule Joi.Type.Integer do
@moduledoc false
import Joi.Validator.Skipping
import Joi.Util
import Joi.Validator.Max, only: [max_validate: 4]
import Joi.Validator.Min, only: [min_validate: 4]
import Joi.Validator.Inclusion, only: [inclusion_validate: 4]
import Joi.Validator.Greater, only: [greater_validate: 4]
import Joi.Validator.Less, only: [less_validate: 4]
@t :integer
@default_options [
required: true,
min: nil,
max: nil,
greater: nil,
less: nil
]
def message_map(options) do
field = options[:label] || options[:path] |> List.last()
limit = options[:limit]
inclusion = options[:inclusion]
%{
"#{@t}.base" => "#{field} must be a #{@t}",
"#{@t}.required" => "#{field} is required",
"#{@t}.max" => "#{field} must be less than or equal to #{limit}",
"#{@t}.min" => "#{field} must be greater than or equal to #{limit}",
"#{@t}.inclusion" => "#{field} must be one of #{inspect(inclusion)}",
"#{@t}.greater" => "#{field} must be greater than #{limit}",
"#{@t}.less" => "#{field} must be less than #{limit}"
}
end
def message(code, options) do
message_map(options) |> Map.get(code)
end
def validate_field(field, params, options) when is_list(options) do
options = Keyword.merge(@default_options, options) |> Enum.into(%{})
validate_field(field, params, options)
end
def validate_field(field, params, options) do
unless_skipping(:integer, field, params, options) do
with {:ok, params} <- convert(field, params, options),
{:ok, params} <- inclusion_validate(:integer, field, params, options),
{:ok, params} <- min_validate(:integer, field, params, options),
{:ok, params} <-
max_validate(:integer, field, params, options),
{:ok, params} <- greater_validate(:integer, field, params, options),
{:ok, params} <- less_validate(:integer, field, params, options) do
{:ok, params}
else
{:error, msg} -> {:error, msg}
end
end
end
defp convert(field, params, options) do
# NOTE: do not convert decimal
raw_value = params[field]
cond do
raw_value == nil ->
{:ok, params}
is_float(raw_value) ->
{:ok, Map.put(params, field, round(raw_value))}
is_integer(raw_value) ->
{:ok, params}
String.valid?(raw_value) && string_to_integer(raw_value) ->
{:ok, Map.put(params, field, string_to_integer(raw_value))}
true ->
error("#{@t}.base", path: path(field, options), value: raw_value)
end
end
@doc """
Returns a integer when input a integer string or float string, others, Returns `nil`
Examples:
iex> string_to_integer("1")
1
iex> string_to_integer("01")
1
iex> string_to_integer("01k")
nil
iex> string_to_integer("1k")
nil
iex> string_to_integer("1.1")
1
iex> string_to_integer("1.1k")
nil
"""
def string_to_integer(str) do
case Integer.parse(str) do
# integer string, like "1", "2"
{num, ""} ->
num
{num, maybe_float} ->
case Float.parse("0" <> maybe_float) do
{float, ""} when is_float(float) -> num
_ -> nil
end
_ ->
nil
end
end
end
|
lib/joi/type/integer.ex
| 0.726814
| 0.438605
|
integer.ex
|
starcoder
|
defmodule Faker.Pokemon.It do
import Faker, only: [sampler: 2]
@moduledoc """
Functions for Pokemon names in Italian
"""
@doc """
Returns a Pokemon name
## Examples
iex> Faker.Pokemon.It.name()
"Magmortar"
iex> Faker.Pokemon.It.name()
"Gastly"
iex> Faker.Pokemon.It.name()
"Mienshao"
iex> Faker.Pokemon.It.name()
"Articuno"
"""
@spec name() :: String.t()
sampler(:name, [
"Abomasnow",
"Abra",
"Absol",
"Accelgor",
"Aegislash",
"Aerodactyl",
"Aggron",
"Aipom",
"Alakazam",
"Alomomola",
"Altaria",
"Amaura",
"Ambipom",
"Amoonguss",
"Ampharos",
"Anorith",
"Araquanid",
"Arbok",
"Arcanine",
"Arceus",
"Archen",
"Archeops",
"Ariados",
"Armaldo",
"Aromatisse",
"Aron",
"Articuno",
"Audino",
"Aurorus",
"Avalugg",
"Axew",
"Azelf",
"Azumarill",
"Azurill",
"Bagon",
"Baltoy",
"Banette",
"Barbaracle",
"Barboach",
"Basculin",
"Bastiodon",
"Bayleef",
"Beartic",
"Beautifly",
"Beedrill",
"Beheeyem",
"Beldum",
"Bellossom",
"Bellsprout",
"Bergmite",
"Bewear",
"Bibarel",
"Bidoof",
"Binacle",
"Bisharp",
"Blacephalon",
"Blastoise",
"Blaziken",
"Blissey",
"Blitzle",
"Boldore",
"Bonsly",
"Bouffalant",
"Bounsweet",
"Braixen",
"Braviary",
"Breloom",
"Brionne",
"Bronzong",
"Bronzor",
"Bruxish",
"Budew",
"Buizel",
"Bulbasaur",
"Buneary",
"Bunnelby",
"Burmy",
"Butterfree",
"Buzzwole",
"Cacnea",
"Cacturne",
"Camerupt",
"Carbink",
"Carnivine",
"Carracosta",
"Carvanha",
"Cascoon",
"Castform",
"Caterpie",
"Celebi",
"Celesteela",
"Chandelure",
"Chansey",
"Charizard",
"Charjabug",
"Charmander",
"Charmeleon",
"Chatot",
"Cherrim",
"Cherubi",
"Chesnaught",
"Chespin",
"Chikorita",
"Chimchar",
"Chimecho",
"Chinchou",
"Chingling",
"Cinccino",
"Clamperl",
"Clauncher",
"Clawitzer",
"Claydol",
"Clefable",
"Clefairy",
"Cleffa",
"Cloyster",
"Cobalion",
"Cofagrigus",
"Combee",
"Combusken",
"Comfey",
"Conkeldurr",
"Corphish",
"Corsola",
"Corviknight",
"Cosmog",
"Cosmoem",
"Cottonee",
"Crabrawler",
"Crabominable",
"Cradily",
"Cranidos",
"Crawdaunt",
"Cresselia",
"Croagunk",
"Crobat",
"Croconaw",
"Crustle",
"Cryogonal",
"Cubchoo",
"Cubone",
"Cutiefly",
"Cyndaquil",
"Darkrai",
"Darmanitan",
"Dartrix",
"Darumaka",
"Decidueye",
"Dedenne",
"Deerling",
"Deino",
"Delcatty",
"Delibird",
"Delphox",
"Deoxys",
"Dewgong",
"Dewott",
"Dewpider",
"Dhelmise",
"Dialga",
"Diancie",
"Diggersby",
"Diglett",
"Ditto",
"Dodrio",
"Doduo",
"Donphan",
"Doublade",
"Dragalge",
"Dragonair",
"Dragonite",
"Drampa",
"Drapion",
"Dratini",
"Drednaw",
"Drifblim",
"Drifloon",
"Drilbur",
"Drowzee",
"Druddigon",
"Ducklett",
"Dugtrio",
"Dunsparce",
"Duosion",
"Durant",
"Dusclops",
"Dusknoir",
"Duskull",
"Dustox",
"Dwebble",
"Eelektrik",
"Eelektross",
"Eevee",
"Ekans",
"Eldegoss",
"Electabuzz",
"Electivire",
"Electrike",
"Electrode",
"Elekid",
"Elgyem",
"Emboar",
"Emolga",
"Empoleon",
"Entei",
"Escavalier",
"Espeon",
"Espurr",
"Excadrill",
"Exeggcute",
"Exeggutor",
"Exploud",
"Farfetch'd",
"Fearow",
"Feebas",
"Fennekin",
"Feraligatr",
"Ferroseed",
"Ferrothorn",
"Finneon",
"Flaaffy",
"Flabébé",
"Flareon",
"Fletchinder",
"Fletchling",
"Floatzel",
"Floette",
"Florges",
"Flygon",
"Fomantis",
"Foongus",
"Forretress",
"Fraxure",
"Frillish",
"Froakie",
"Frogadier",
"Froslass",
"Furfrou",
"Furret",
"Gabite",
"Gallade",
"Galvantula",
"Garbodor",
"Garchomp",
"Gardevoir",
"Gastly",
"Gastrodon",
"Genesect",
"Gengar",
"Geodude",
"Gible",
"Gigalith",
"Girafarig",
"Giratina",
"Glaceon",
"Glalie",
"Glameow",
"Gligar",
"Gliscor",
"Gloom",
"Gogoat",
"Golbat",
"Goldeen",
"Golduck",
"Golem",
"Golett",
"Golisopod",
"Golurk",
"Goodra",
"Goomy",
"Gorebyss",
"Gossifleur",
"Gothita",
"Gothitelle",
"Gothorita",
"Gourgeist",
"Granbull",
"Graveler",
"Greninja",
"Grimer",
"Grookey",
"Grotle",
"Groudon",
"Grovyle",
"Growlithe",
"Grubbin",
"Grumpig",
"Gulpin",
"Gumshoos",
"Gurdurr",
"Guzzlord",
"Gyarados",
"Hakamo-o",
"Happiny",
"Hariyama",
"Haunter",
"Hawlucha",
"Haxorus",
"Heatmor",
"Heatran",
"Heliolisk",
"Helioptile",
"Heracross",
"Herdier",
"Hippopotas",
"Hippowdon",
"Hitmonchan",
"Hitmonlee",
"Hitmontop",
"Ho-Oh",
"Honchkrow",
"Honedge",
"Hoothoot",
"Hoopa",
"Hoppip",
"Horsea",
"Houndoom",
"Houndour",
"Huntail",
"Hydreigon",
"Hypno",
"Igglybuff",
"Illumise",
"Impidimp",
"Infernape",
"Incineroar",
"Inkay",
"Ivysaur",
"Jangmo-o",
"Jellicent",
"Jigglypuff",
"Jirachi",
"Jolteon",
"Joltik",
"Jumpluff",
"Jynx",
"Kabuto",
"Kabutops",
"Kadabra",
"Kakuna",
"Kangaskhan",
"Karrablast",
"Kartana",
"Kecleon",
"Keldeo",
"Kingdra",
"Kingler",
"Kirlia",
"Klang",
"Klefki",
"Klink",
"Klinklang",
"Koffing",
"Komala",
"Kommo-o",
"Krabby",
"Kricketot",
"Kricketune",
"Krokorok",
"Krookodile",
"Kyogre",
"Kyurem",
"Lairon",
"Lampent",
"Landorus",
"Lanturn",
"Lapras",
"Larvesta",
"Larvitar",
"Latias",
"Latios",
"Leafeon",
"Leavanny",
"Ledian",
"Ledyba",
"Lickilicky",
"Lickitung",
"Liepard",
"Lileep",
"Lilligant",
"Lillipup",
"Linoone",
"Litleo",
"Litten",
"Litwick",
"Lombre",
"Lopunny",
"Lotad",
"Loudred",
"Lucario",
"Ludicolo",
"Lugia",
"Lumineon",
"Lunala",
"Lunatone",
"Lurantis",
"Luvdisc",
"Luxio",
"Luxray",
"Lycanroc",
"Machamp",
"Machoke",
"Machop",
"Magby",
"Magcargo",
"Magearna",
"Magikarp",
"Magmar",
"Magmortar",
"Magnemite",
"Magneton",
"Magnezone",
"Makuhita",
"Malamar",
"Mamoswine",
"Manaphy",
"Mandibuzz",
"Manectric",
"Mankey",
"Mantine",
"Mantyke",
"Maractus",
"Mareanie",
"Mareep",
"Marill",
"Marowak",
"Marshadow",
"Marshtomp",
"Masquerain",
"Mawile",
"Medicham",
"Meditite",
"Meganium",
"Melmetal",
"Meloetta",
"Meltan",
"Meowstic",
"Meowth",
"Mesprit",
"Metagross",
"Metang",
"Metapod",
"Mew",
"Mewtwo",
"Mienfoo",
"Mienshao",
"Mightyena",
"Milotic",
"Miltank",
"<NAME>.",
"Mimikyu",
"Minccino",
"Minior",
"Minun",
"Misdreavus",
"Mismagius",
"Moltres",
"Monferno",
"Morelull",
"Mothim",
"Mr. Mime",
"Mudbray",
"Mudkip",
"Mudsdale",
"Muk",
"Munchlax",
"Munna",
"Murkrow",
"Musharna",
"Naganadel",
"Natu",
"Necrozma",
"Nidoking",
"Nidoqueen",
"Nidoran♀",
"Nidoran♂",
"Nidorina",
"Nidorino",
"Nihilego",
"Nincada",
"Ninetales",
"Ninjask",
"Noctowl",
"Noibat",
"Noivern",
"Nosepass",
"Numel",
"Nuzleaf",
"Octillery",
"Oddish",
"Omanyte",
"Omastar",
"Onix",
"Oranguru",
"Oricorio",
"Oshawott",
"Pachirisu",
"Palkia",
"Palossand",
"Palpitoad",
"Pancham",
"Pangoro",
"Panpour",
"Pansage",
"Pansear",
"Paras",
"Parasect",
"Passimian",
"Patrat",
"Pawniard",
"Pelipper",
"Persian",
"Petilil",
"Phanpy",
"Phantump",
"Pheromosa",
"Phione",
"Pichu",
"Pidgeot",
"Pidgeotto",
"Pidgey",
"Pidove",
"Pignite",
"Pikachu",
"Pikipek",
"Piloswine",
"Pineco",
"Pinsir",
"Piplup",
"Plusle",
"Poipole",
"Politoed",
"Poliwag",
"Poliwhirl",
"Poliwrath",
"Ponyta",
"Poochyena",
"Popplio",
"Porygon",
"Porygon2",
"Porygon-Z",
"Primarina",
"Primeape",
"Prinplup",
"Probopass",
"Psyduck",
"Pumpkaboo",
"Pupitar",
"Purrloin",
"Purugly",
"Pyroar",
"Pyukumuku",
"Quagsire",
"Quilava",
"Quilladin",
"Qwilfish",
"Raichu",
"Raikou",
"Ralts",
"Rampardos",
"Rapidash",
"Raticate",
"Rattata",
"Rayquaza",
"Regice",
"Regigigas",
"Regirock",
"Registeel",
"Relicanth",
"Remoraid",
"Reshiram",
"Reuniclus",
"Rhydon",
"Rhyhorn",
"Rhyperior",
"Ribombee",
"Riolu",
"Rockruff",
"Roggenrola",
"Roselia",
"Roserade",
"Rotom",
"Rowlet",
"Rufflet",
"Sableye",
"Salamence",
"Salandit",
"Salazzle",
"Samurott",
"Sandile",
"Sandshrew",
"Sandslash",
"Sandygast",
"Sawk",
"Sawsbuck",
"Scatterbug",
"Sceptile",
"Scizor",
"Scolipede",
"Scorbunny",
"Scrafty",
"Scraggy",
"Scyther",
"Seadra",
"Seaking",
"Sealeo",
"Seedot",
"Seel",
"Seismitoad",
"Sentret",
"Serperior",
"Servine",
"Seviper",
"Sewaddle",
"Sharpedo",
"Shaymin",
"Shedinja",
"Shelgon",
"Shellder",
"Shellos",
"Shelmet",
"Shieldon",
"Shiftry",
"Shiinotic",
"Shinx",
"Shroomish",
"Shuckle",
"Shuppet",
"Sigilyph",
"Silcoon",
"Silvally",
"Simipour",
"Simisage",
"Simisear",
"Skarmory",
"Skiddo",
"Skiploom",
"Skitty",
"Skorupi",
"Skrelp",
"Skuntank",
"Slaking",
"Slakoth",
"Sliggoo",
"Slowbro",
"Slowking",
"Slowpoke",
"Slugma",
"Slurpuff",
"Smeargle",
"Smoochum",
"Sneasel",
"Snivy",
"Snorlax",
"Snorunt",
"Snover",
"Snubbull",
"Sobble",
"Solgaleo",
"Solosis",
"Solrock",
"Spearow",
"Spewpa",
"Spheal",
"Spinarak",
"Spinda",
"Spiritomb",
"Spoink",
"Spritzee",
"Squirtle",
"Stakataka",
"Stantler",
"Staraptor",
"Staravia",
"Starly",
"Starmie",
"Staryu",
"Steelix",
"Steenee",
"Stoutland",
"Stufful",
"Stunfisk",
"Stunky",
"Sudowoodo",
"Suicune",
"Sunflora",
"Sunkern",
"Surskit",
"Swablu",
"Swadloon",
"Swalot",
"Swampert",
"Swanna",
"Swellow",
"Swinub",
"Swirlix",
"Swoobat",
"Sylveon",
"Taillow",
"Talonflame",
"Tangela",
"Tangrowth",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"Tauros",
"Teddiursa",
"Tentacool",
"Tentacruel",
"Tepig",
"Terrakion",
"Throh",
"Thundurus",
"Timburr",
"<NAME>",
"Tirtouga",
"Togedemaru",
"Togekiss",
"Togepi",
"Togetic",
"Torchic",
"Torkoal",
"Tornadus",
"Torracat",
"Torterra",
"Totodile",
"Toucannon",
"Toxapex",
"Toxicroak",
"Tranquill",
"Trapinch",
"Treecko",
"Trevenant",
"Tropius",
"Trubbish",
"Trumbeak",
"Tsareena",
"Turtonator",
"Turtwig",
"Tympole",
"Tynamo",
"Typhlosion",
"Tyranitar",
"Tyrantrum",
"Tyrogue",
"Tyrunt",
"Umbreon",
"Unfezant",
"Unown",
"Ursaring",
"Uxie",
"Vanillish",
"Vanillite",
"Vanilluxe",
"Vaporeon",
"Venipede",
"Venomoth",
"Venonat",
"Venusaur",
"Vespiquen",
"Vibrava",
"Victini",
"Victreebel",
"Vigoroth",
"Vikavolt",
"Vileplume",
"Virizion",
"Vivillon",
"Volbeat",
"Volcanion",
"Volcarona",
"Voltorb",
"Vullaby",
"Vulpix",
"Wailmer",
"Wailord",
"Walrein",
"Wartortle",
"Watchog",
"Weavile",
"Weedle",
"Weepinbell",
"Weezing",
"Whimsicott",
"Whirlipede",
"Whiscash",
"Whismur",
"Wigglytuff",
"Wimpod",
"Wingull",
"Wishiwashi",
"Wobbuffet",
"Woobat",
"Wooloo",
"Wooper",
"Wormadam",
"Wurmple",
"Wynaut",
"Xatu",
"Xerneas",
"Xurkitree"
])
@doc """
Returns a location from Pokemon universe
## Examples
iex> Faker.Pokemon.It.location()
"Arenipoli"
iex> Faker.Pokemon.It.location()
"Spiraria"
iex> Faker.Pokemon.It.location()
"Novartopoli"
iex> Faker.Pokemon.It.location()
"Castel Vanità"
"""
@spec location() :: String.t()
sampler(:location, [
"Accesso al Parco",
"Albanova",
"Alisopoli",
"Altoripoli",
"Amarantopoli",
"Aranciopoli",
"Area Provviste",
"Area Sfida",
"Area Svago",
"Arenipoli",
"Austropoli",
"Azalina",
"Azzurropoli",
"Batikopoli",
"Biancavilla",
"Bluruvia",
"Boreduopoli",
"<NAME>",
"<NAME>",
"Brunifoglia",
"Canalipoli",
"Castel Vanità",
"Celestopoli",
"Ceneride",
"Ciclamipoli",
"Città Nera",
"Cromleburgo",
"Cuordilava",
"Cuoripoli",
"Duefoglie",
"Ebanopoli",
"Ferrugipoli",
"Fiordoropoli",
"Fiorlisopoli",
"Fiorpescopoli",
"Flemminia",
"Flemminia",
"Fluxopoli",
"Foresta Bianca",
"Forestopoli",
"Fortebrezza",
"Fractalopoli",
"Frescovilla",
"Fucsiapoli",
"Giardinfiorito",
"Giubilopoli",
"Grecalopoli",
"Hau'oli",
"Ingresso Safari",
"Iridopoli",
"Is<NAME>",
"Kantai",
"Konikoni",
"Lavandonia",
"Levantopoli",
"Libecciopoli",
"Lili",
"Luminopoli",
"Malie",
"Memoride",
"Mentania",
"Mineropoli",
"Mistralopoli",
"Mogania",
"Nevepoli",
"Novartopoli",
"Ohana",
"Olivinopoli",
"Orocea",
"Petalipoli",
"Petroglifari",
"Plumbeopoli",
"Poggiovento",
"Poh",
"Ponentopoli",
"<NAME>",
"<NAME>",
"<NAME>",
"Pratopoli",
"Primisola",
"Quartisola",
"Quintisola",
"<NAME>",
"Romantopoli",
"Roteolia",
"Rupepoli",
"Sabbiafine",
"Sciroccopoli",
"Secondisola",
"Sestisola",
"Settimisola",
"Smeraldopoli",
"Soffiolieve",
"Soffiolieve",
"Solarosa",
"Spiraria",
"Temperopoli",
"Terzisola",
"Venturia",
"Verdeazzupoli",
"<NAME>",
"<NAME>",
"Violapoli",
"Yantaropoli",
"Zafferanopoli",
"Zefiropoli",
"Zondopoli"
])
end
|
lib/faker/pokemon/it.ex
| 0.530236
| 0.48182
|
it.ex
|
starcoder
|
defmodule Serum.Plugins.SitemapGenerator do
@moduledoc """
A Serum plugin that create a sitemap so that the search engine can index posts.
## Using the Plugin
# serum.exs:
%{
server_root: "https://example.io",
plugins: [
{Serum.Plugins.SitemapGenerator, only: :prod}
]
}
"""
@behaviour Serum.Plugin
serum_ver = Version.parse!(Mix.Project.config()[:version])
serum_req = "~> #{serum_ver.major}.#{serum_ver.minor}"
require EEx
alias Serum.GlobalBindings
alias Serum.Page
alias Serum.Post
def name, do: "Create sitemap for search engine"
def version, do: "1.2.0"
def elixir, do: ">= 1.8.0"
def serum, do: unquote(serum_req)
def description do
"Create a sitemap so that the search engine can index posts."
end
def implements, do: [build_succeeded: 3]
def build_succeeded(_src, dest, args) do
{pages, posts} = get_items(args[:for])
dest
|> create_file(pages, posts)
|> Serum.File.write()
|> case do
{:ok, _} -> :ok
{:error, _} = error -> error
end
end
@spec get_items(term()) :: {[Page.t()], [Post.t()]}
defp get_items(arg)
defp get_items(nil), do: get_items([:posts])
defp get_items(arg) when not is_list(arg), do: get_items([arg])
defp get_items(arg) do
pages = if :pages in arg, do: GlobalBindings.get(:all_pages), else: []
posts = if :posts in arg, do: GlobalBindings.get(:all_posts), else: []
{pages, posts}
end
sitemap_path =
:serum
|> :code.priv_dir()
|> Path.join("build_resources")
|> Path.join("sitemap.xml.eex")
EEx.function_from_file(:defp, :sitemap_xml, sitemap_path, [
:pages,
:posts,
:transformer,
:server_root
])
@spec create_file(binary(), [Page.t()], [Post.t()]) :: Serum.File.t()
defp create_file(dest, pages, posts) do
%Serum.File{
dest: Path.join(dest, "sitemap.xml"),
out_data: sitemap_xml(pages, posts, &to_w3c_format/1, get_server_root())
}
end
defp to_w3c_format(erl_datetime) do
# reference to https://www.w3.org/TR/NOTE-datetime
Timex.format!(erl_datetime, "%Y-%m-%d", :strftime)
end
defp get_server_root do
:site
|> GlobalBindings.get()
|> Map.fetch!(:server_root)
end
end
|
lib/serum/plugins/sitemap_generator.ex
| 0.671794
| 0.434821
|
sitemap_generator.ex
|
starcoder
|
defmodule Exchange.Order do
@moduledoc """
A struct representing an Order to be placed in the Exchange
side: :buy, :sell
type: :market, :limit
trader_id: Alchemist or the user_id
expiration_time: unix timestamp in milliseconds when the order expires
"""
defstruct order_id: nil,
trader_id: nil,
side: :buy,
price: 0,
size: 0,
stop: 0,
initial_size: 0,
type: :market,
exp_time: nil,
acknowledged_at: DateTime.utc_now() |> DateTime.to_unix(:nanosecond),
modified_at: DateTime.utc_now() |> DateTime.to_unix(:nanosecond),
ticker: nil
@type price_in_cents :: integer
@type size_in_grams :: integer
@type order :: %Exchange.Order{
order_id: String.t(),
trader_id: String.t(),
side: atom,
price: price_in_cents,
size: size_in_grams,
initial_size: size_in_grams,
type: atom,
ticker: atom,
exp_time: integer | atom,
stop: integer
}
@doc """
Decodes the payload to an Order struct
## Parameters
- payload: map with necessary parameters to populate the struct
"""
@spec decode_from_jason(map) :: Exchange.Order.order()
def decode_from_jason(order) do
ticker = Map.get(order, :ticker)
ticker =
if is_atom(ticker) do
ticker
else
String.to_atom(ticker)
end
%Exchange.Order{
order_id: Map.get(order, :order_id),
trader_id: Map.get(order, :trader_id),
side: Map.get(order, :side) |> String.to_atom(),
price: Map.get(order, :price),
size: Map.get(order, :size),
stop: Map.get(order, :stop),
initial_size: Map.get(order, :initial_size),
type: Map.get(order, :type) |> String.to_atom(),
exp_time: Map.get(order, :exp_time),
acknowledged_at: Map.get(order, :acknowledged_at),
modified_at: Map.get(order, :modified_at),
ticker: ticker
}
end
@doc """
It set the price of an order considering an order book
## Parameters
order: Order to assign the price
order_book: Reference order book
"""
@spec assign_prices(
order :: Exchange.Order.order(),
order_book :: Exchange.OrderBook.order_book()
) :: Exchange.Order.order()
def assign_prices(%Exchange.Order{type: :market, side: :buy} = order, order_book) do
order |> Map.put(:price, order_book.max_price - 1)
end
def assign_prices(%Exchange.Order{type: :market, side: :sell} = order, order_book) do
order |> Map.put(:price, order_book.min_price + 1)
end
def assign_prices(%Exchange.Order{type: :marketable_limit, side: :buy} = order, order_book) do
order |> Map.put(:price, order_book.ask_min)
end
def assign_prices(%Exchange.Order{type: :marketable_limit, side: :sell} = order, order_book) do
order |> Map.put(:price, order_book.bid_max)
end
def assign_prices(
%Exchange.Order{type: :stop_loss, side: :buy, price: price, stop: stop} = order,
order_book
) do
case order_book.ask_min >= price * (1 + stop / 100) do
true ->
order |> Map.put(:price, order_book.max_price - 1)
_ ->
order
end
end
def assign_prices(
%Exchange.Order{type: :stop_loss, side: :sell, price: price, stop: stop} = order,
order_book
) do
case order_book.bid_max <= price * (1 - stop / 100) do
true ->
order |> Map.put(:price, order_book.min_price + 1)
_ ->
order
end
end
def assign_prices(order, _order_book) do
order
end
@doc """
Function that checks if a order's price is correct for the given order book.
## Parameters
order: Order to validate the price
order_book: Reference order book
"""
def validate_price(%Exchange.Order{type: type} = order, order_book)
when type == :limit or type == :stop_loss do
cond do
order.price < order_book.max_price and order.price > order_book.min_price ->
:ok
order.price > order_book.max_price ->
{:error, :max_price_exceeded}
order.price < order_book.min_price ->
{:error, :behind_min_price}
end
end
def validate_price(_order, _order_book) do
:ok
end
end
defimpl Jason.Encoder, for: Exchange.Order do
def encode(value, opts) do
Jason.Encode.map(
Map.take(value, [
:order_id,
:trader_id,
:side,
:price,
:size,
:stop,
:initial_size,
:type,
:exp_time,
:acknowledged_at,
:modified_at,
:ticker
]),
opts
)
end
end
|
lib/exchange/order.ex
| 0.837968
| 0.658146
|
order.ex
|
starcoder
|
defmodule Blockchain.Transaction do
@moduledoc """
Implements a blockchain transaction
"""
alias Blockchain.Hash
alias Blockchain.TransactionIO
alias Blockchain.Wallet
@enforce_keys [:signature, :from, :to, :value, :inputs, :outputs]
defstruct @enforce_keys
@typedoc """
Represents a transaction
"""
@type t :: %__MODULE__{
signature: Hash.t() | nil,
from: Wallet.t(),
to: Wallet.t(),
value: number(),
inputs: [TransactionIO.t()],
outputs: [TransactionIO.t()]
}
@doc """
Creates a new transaction
"""
@spec new(Wallet.t(), Wallet.t(), number(), [TransactionIO.t()]) :: __MODULE__.t()
def new(%Wallet{} = from, %Wallet{} = to, value, inputs \\ []) when is_number(value) do
%__MODULE__{
signature: nil,
from: from,
to: to,
value: value,
inputs: inputs,
outputs: []
}
end
@doc """
Updates the transaction's value but does not do any processing
"""
@spec update_value(__MODULE__.t(), number()) :: __MODULE__.t()
def update_value(%__MODULE__{} = transaction, new_value) when is_number(new_value) do
%__MODULE__{transaction | signature: nil, value: new_value}
end
@doc """
The from and to wallets and the transaction value are hashed using SHA256, and
then the hash is signed (i.e., encrypted) using the from wallet's private key
and RSA
"""
@spec sign(Wallet.t(), Wallet.t(), number()) :: Hash.t()
def sign(%Wallet{} = from, %Wallet{} = to, value) do
{:ok, signed_transaction} =
ExPublicKey.sign(
[
:erlang.term_to_binary(from),
:erlang.term_to_binary(to),
to_string(value)
],
from.private_key
)
Hash.new(signed_transaction)
end
@doc """
The from and to wallets and the transaction value are hashed using SHA256, and
then the hash is signed (i.e., encrypted) using the from wallet's private key
and RSA
"""
@spec sign(__MODULE__.t()) :: Hash.t()
def sign(%__MODULE__{from: from, to: to, value: value} = _transaction) do
sign(from, to, value)
end
@doc """
Process a transaction
"""
@spec process(__MODULE__.t()) :: __MODULE__.t()
def process(
%__MODULE__{
from: from,
to: to,
value: value,
inputs: inputs,
outputs: outputs
} = transaction
) do
inputs_sum =
inputs
|> Enum.map(fn %TransactionIO{value: value} = _input -> value end)
|> Enum.sum()
leftover = inputs_sum - value
new_outputs = [
TransactionIO.new(value, to),
TransactionIO.new(leftover, from)
]
%__MODULE__{
transaction
| signature: sign(transaction),
outputs: new_outputs ++ outputs
}
end
@doc """
Validates a transaction's signature
"""
@spec valid_signature?(__MODULE__.t()) :: boolean()
def valid_signature?(%__MODULE__{} = transaction) do
public_key = transaction.from.public_key
{:ok, verified?} =
ExPublicKey.verify(
[
:erlang.term_to_binary(transaction.from),
:erlang.term_to_binary(transaction.to),
to_string(transaction.value)
],
Hash.to_binary(transaction.signature),
public_key
)
verified?
end
@doc """
Validates a transaction
"""
@spec valid?(__MODULE__.t()) :: boolean()
def valid?(%__MODULE__{inputs: inputs, outputs: outputs} = transaction) do
sum_inputs = sum_transaction_io_list(inputs)
sum_outputs = sum_transaction_io_list(outputs)
Enum.all?([
valid_signature?(transaction),
Enum.all?(outputs, &TransactionIO.valid?/1),
sum_inputs >= sum_outputs
])
end
@spec format(__MODULE__.t()) :: String.t()
def format(%__MODULE__{from: from, to: to, value: value} = _transaction) do
from_address =
:erlang.binary_part(ExPublicKey.RSAPublicKey.get_fingerprint(from.public_key), 60, 4)
to_address =
:erlang.binary_part(ExPublicKey.RSAPublicKey.get_fingerprint(to.public_key), 60, 4)
"... #{from_address} ... sends ... #{to_address} ... an amount of #{value}.\n"
end
# Helper function to sum all `TransactionIO` values
@spec sum_transaction_io_list([TransactionIO.t()]) :: integer()
defp sum_transaction_io_list(transaction_io_list) do
transaction_io_list
|> Enum.map(fn %TransactionIO{value: value} = _input -> value end)
|> Enum.sum()
end
end
|
lib/blockchain/transaction.ex
| 0.896547
| 0.567547
|
transaction.ex
|
starcoder
|
defmodule WebpackStatic.Plug do
@moduledoc """
Phoenix plug to proxy a locally running instance of the webpack dev server.<br />
This plug will only serve assets when the env parameter has the value of `:dev`.<br />
Phoenix will be allowed a chance to resolve any assets not resolved by webpack.<br />
## Installation
```
defp deps do
[
{:WebpackStaticPlug, "~> 0.1.1"}
]
end
```
And run:
$ mix deps.get
## Usage
Add WebpackStatic.Plug as a plug in the phoenix project's endpoint.
## Arguments
* **port** - *(required)* The port that the webpack dev server is listening on.
* **webpack_assets** - *(required)* a list of the paths in the static folder that webpack will for serve. The plug will ignore requests to any other path.
* **env** - *(required)* the current environment the project is running under.
* **manifest_path** - *(optional)* relative path that will resolve from the static folder of the webpack manifest file.
## Example
in `endpoint.ex`
```
plug WebpackStatic.Plug,
port: 9000, webpack_assets: ~w(css fonts images js),
env: Mix.env, manifest_path: "/manifest.json"
```
"""
alias HTTPotion, as: Http
alias Plug.Conn, as: Conn
require Poison
@doc false
def init(args) do
List.keysort(args, 0)
end
@doc false
def call(conn, [
{:env, env},
{:manifest_path, manifest_path},
{:port, port},
{:webpack_assets, assets}
]) do
if env == :dev do
manifest_task = Task.async(fn -> get_manifest(manifest_path, port) end)
manifest = Task.await(manifest_task)
case manifest do
{:error, message} -> raise message
{:ok, manifest} -> serve_asset(conn, port, assets, manifest)
nil -> serve_asset(conn, port, assets, nil)
end
else
conn
end
end
defp get_manifest(path, port) when is_binary(path) do
url =
"http://localhost:#{port}"
|> URI.merge(path)
|> URI.to_string()
response = Http.get(url, headers: [Accept: "application/json"])
case response do
%HTTPotion.Response{status_code: code} when code == 404 ->
{:error, "Error: could not find manifest located at #{url}"}
%HTTPotion.Response{body: body, status_code: code} when code >= 400 ->
{:error, "Error: fetching manifest, status:#{code} body:#{body}"}
%HTTPotion.Response{body: body} ->
Poison.decode(body)
%HTTPotion.ErrorResponse{message: message} ->
{:error, "Error: fetching manifest: #{message}"}
end
end
defp get_manifest(_, _), do: nil
defp serve_asset(
conn = %Plug.Conn{
path_info: [uri, file_name],
req_headers: req_headers
},
port,
assets,
manifest
) do
requested_path = "#{uri}/#{file_name}"
actual_path =
case manifest do
%{^requested_path => value} -> value
_ -> requested_path
end
url =
"http://localhost:#{port}"
|> URI.merge(actual_path)
|> URI.to_string()
asset_type =
uri
|> String.split("/")
|> hd
if Enum.any?(assets, &(&1 == asset_type)) do
Http.get(
url,
stream_to: self(),
headers: req_headers
)
response = receive_response(conn)
case response do
{:not_found, conn} -> conn
{:error, message} -> raise message
{:ok, conn} -> Conn.halt(conn)
end
else
conn
end
end
defp serve_asset(conn = %Plug.Conn{}, _, _, _), do: conn
defp receive_response(conn) do
receive do
%HTTPotion.AsyncChunk{chunk: chunk} ->
case Conn.chunk(conn, chunk) do
{:ok, conn} -> receive_response(conn)
{:error, reason} -> {:error, "Error fetching webpack resource: #{reason}"}
end
%HTTPotion.AsyncHeaders{status_code: status} when status == 404 ->
{:not_found, conn}
%HTTPotion.AsyncHeaders{
status_code: code,
headers: %HTTPotion.Headers{
hdrs: headers
}
}
when code < 400 ->
headers
|> Map.to_list()
|> Enum.reduce(conn, fn {key, value}, acc ->
Conn.put_resp_header(acc, key, value)
end)
|> Conn.send_chunked(code)
|> receive_response()
%HTTPotion.AsyncEnd{} ->
{:ok, conn}
%HTTPotion.ErrorResponse{message: message} ->
{:error, "Error fetching webpack resource: #{message}"}
%HTTPotion.AsyncHeaders{
status_code: code
}
when code >= 400 ->
{:error, "Webpack responded with error code: #{code}"}
after
15_000 -> {:error, "Error fetching webpack resource: Timeout exceeded"}
end
end
end
|
lib/webpack_static.ex
| 0.864982
| 0.727056
|
webpack_static.ex
|
starcoder
|
defmodule AWS.MarketplaceMetering do
@moduledoc """
AWS Marketplace Metering Service
This reference provides descriptions of the low-level AWS Marketplace
Metering Service API.
AWS Marketplace sellers can use this API to submit usage data for custom
usage dimensions.
For information on the permissions you need to use this API, see [AWS
Marketing metering and entitlement API
permissions](https://docs.aws.amazon.com/marketplace/latest/userguide/iam-user-policy-for-aws-marketplace-actions.html)
in the *AWS Marketplace Seller Guide.*
**Submitting Metering Records**
<ul> <li> *MeterUsage*- Submits the metering record for a Marketplace
product. MeterUsage is called from an EC2 instance or a container running
on EKS or ECS.
</li> <li> *BatchMeterUsage*- Submits the metering record for a set of
customers. BatchMeterUsage is called from a software-as-a-service (SaaS)
application.
</li> </ul> **Accepting New Customers**
<ul> <li> *ResolveCustomer*- Called by a SaaS application during the
registration process. When a buyer visits your website during the
registration process, the buyer submits a Registration Token through the
browser. The Registration Token is resolved through this API to obtain a
CustomerIdentifier and Product Code.
</li> </ul> **Entitlement and Metering for Paid Container Products**
<ul> <li> Paid container software products sold through AWS Marketplace
must integrate with the AWS Marketplace Metering Service and call the
RegisterUsage operation for software entitlement and metering. Free and
BYOL products for Amazon ECS or Amazon EKS aren't required to call
RegisterUsage, but you can do so if you want to receive usage data in your
seller reports. For more information on using the RegisterUsage operation,
see [Container-Based
Products](https://docs.aws.amazon.com/marketplace/latest/userguide/container-based-products.html).
</li> </ul> BatchMeterUsage API calls are captured by AWS CloudTrail. You
can use Cloudtrail to verify that the SaaS metering records that you sent
are accurate by searching for records with the eventName of
BatchMeterUsage. You can also use CloudTrail to audit records over time.
For more information, see the * [AWS CloudTrail User
Guide](http://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-concepts.html)
*.
"""
@doc """
BatchMeterUsage is called from a SaaS application listed on the AWS
Marketplace to post metering records for a set of customers.
For identical requests, the API is idempotent; requests can be retried with
the same records or a subset of the input records.
Every request to BatchMeterUsage is for one product. If you need to meter
usage for multiple products, you must make multiple calls to
BatchMeterUsage.
BatchMeterUsage can process up to 25 UsageRecords at a time.
"""
def batch_meter_usage(client, input, options \\ []) do
request(client, "BatchMeterUsage", input, options)
end
@doc """
API to emit metering records. For identical requests, the API is
idempotent. It simply returns the metering record ID.
MeterUsage is authenticated on the buyer's AWS account using credentials
from the EC2 instance, ECS task, or EKS pod.
"""
def meter_usage(client, input, options \\ []) do
request(client, "MeterUsage", input, options)
end
@doc """
Paid container software products sold through AWS Marketplace must
integrate with the AWS Marketplace Metering Service and call the
RegisterUsage operation for software entitlement and metering. Free and
BYOL products for Amazon ECS or Amazon EKS aren't required to call
RegisterUsage, but you may choose to do so if you would like to receive
usage data in your seller reports. The sections below explain the behavior
of RegisterUsage. RegisterUsage performs two primary functions: metering
and entitlement.
<ul> <li> *Entitlement*: RegisterUsage allows you to verify that the
customer running your paid software is subscribed to your product on AWS
Marketplace, enabling you to guard against unauthorized use. Your container
image that integrates with RegisterUsage is only required to guard against
unauthorized use at container startup, as such a
CustomerNotSubscribedException/PlatformNotSupportedException will only be
thrown on the initial call to RegisterUsage. Subsequent calls from the same
Amazon ECS task instance (e.g. task-id) or Amazon EKS pod will not throw a
CustomerNotSubscribedException, even if the customer unsubscribes while the
Amazon ECS task or Amazon EKS pod is still running.
</li> <li> *Metering*: RegisterUsage meters software use per ECS task, per
hour, or per pod for Amazon EKS with usage prorated to the second. A
minimum of 1 minute of usage applies to tasks that are short lived. For
example, if a customer has a 10 node Amazon ECS or Amazon EKS cluster and a
service configured as a Daemon Set, then Amazon ECS or Amazon EKS will
launch a task on all 10 cluster nodes and the customer will be charged: (10
* hourly_rate). Metering for software use is automatically handled by the
AWS Marketplace Metering Control Plane -- your software is not required to
perform any metering specific actions, other than call RegisterUsage once
for metering of software use to commence. The AWS Marketplace Metering
Control Plane will also continue to bill customers for running ECS tasks
and Amazon EKS pods, regardless of the customers subscription state,
removing the need for your software to perform entitlement checks at
runtime.
</li> </ul>
"""
def register_usage(client, input, options \\ []) do
request(client, "RegisterUsage", input, options)
end
@doc """
ResolveCustomer is called by a SaaS application during the registration
process. When a buyer visits your website during the registration process,
the buyer submits a registration token through their browser. The
registration token is resolved through this API to obtain a
CustomerIdentifier and product code.
"""
def resolve_customer(client, input, options \\ []) do
request(client, "ResolveCustomer", input, options)
end
@spec request(AWS.Client.t(), binary(), map(), list()) ::
{:ok, Poison.Parser.t() | nil, Poison.Response.t()}
| {:error, Poison.Parser.t()}
| {:error, HTTPoison.Error.t()}
defp request(client, action, input, options) do
client = %{client | service: "aws-marketplace"}
host = build_host("metering.marketplace", client)
url = build_url(host, client)
headers = [
{"Host", host},
{"Content-Type", "application/x-amz-json-1.1"},
{"X-Amz-Target", "AWSMPMeteringService.#{action}"}
]
payload = Poison.Encoder.encode(input, %{})
headers = AWS.Request.sign_v4(client, "POST", url, headers, payload)
case HTTPoison.post(url, payload, headers, options) do
{:ok, %HTTPoison.Response{status_code: 200, body: ""} = response} ->
{:ok, nil, response}
{:ok, %HTTPoison.Response{status_code: 200, body: body} = response} ->
{:ok, Poison.Parser.parse!(body, %{}), response}
{:ok, %HTTPoison.Response{body: body}} ->
error = Poison.Parser.parse!(body, %{})
{:error, error}
{:error, %HTTPoison.Error{reason: reason}} ->
{:error, %HTTPoison.Error{reason: reason}}
end
end
defp build_host(_endpoint_prefix, %{region: "local"}) do
"localhost"
end
defp build_host(endpoint_prefix, %{region: region, endpoint: endpoint}) do
"#{endpoint_prefix}.#{region}.#{endpoint}"
end
defp build_url(host, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}/"
end
end
|
lib/aws/marketplace_metering.ex
| 0.833663
| 0.536131
|
marketplace_metering.ex
|
starcoder
|
defmodule Bitcoinex.Address do
@moduledoc """
Bitcoinex.Address supports Base58 and Bech32 address encoding and validation.
"""
alias Bitcoinex.{Segwit, Base58, Network}
@typedoc """
The address_type describes the address type to use.
Four address types are supported:
* p2pkh: Pay-to-Public-Key-Hash
* p2sh: Pay-to-Script-Hash
* p2wpkh: Pay-to-Witness-Public-Key-Hash
* p2wsh: Pay-To-Witness-Script-Hash
"""
@type address_type :: :p2pkh | :p2sh | :p2wpkh | :p2wsh
@address_types ~w(p2pkh p2sh p2wpkh p2wsh)a
@doc """
Accepts a public key hash, network, and address_type and returns its address.
"""
@spec encode(binary, Bitcoinex.Network.network_name(), address_type) :: String.t()
def encode(pubkey_hash, network_name, :p2pkh) do
network = Network.get_network(network_name)
decimal_prefix = network.p2pkh_version_decimal_prefix
Base58.encode(<<decimal_prefix>> <> pubkey_hash)
end
def encode(script_hash, network_name, :p2sh) do
network = Network.get_network(network_name)
decimal_prefix = network.p2sh_version_decimal_prefix
Base58.encode(<<decimal_prefix>> <> script_hash)
end
@doc """
Checks if the address is valid.
Both encoding and network is checked.
"""
@spec is_valid?(String.t(), Bitcoinex.Network.network_name()) :: boolean
def is_valid?(address, network_name) do
Enum.any?(@address_types, &is_valid?(address, network_name, &1))
end
@doc """
Checks if the address is valid and matches the given address_type.
Both encoding and network is checked.
"""
@spec is_valid?(String.t(), Bitcoinex.Network.network_name(), address_type) :: boolean
def is_valid?(address, network_name, :p2pkh) do
network = apply(Bitcoinex.Network, network_name, [])
is_valid_base58_check_address?(address, network.p2pkh_version_decimal_prefix)
end
def is_valid?(address, network_name, :p2sh) do
network = apply(Bitcoinex.Network, network_name, [])
is_valid_base58_check_address?(address, network.p2sh_version_decimal_prefix)
end
def is_valid?(address, network_name, :p2wpkh) do
case Segwit.decode_address(address) do
{:ok, {^network_name, witness_version, witness_program}}
when witness_version == 0 and length(witness_program) == 20 ->
true
# network is not same as network set in config
{:ok, {_network_name, _, _}} ->
false
{:error, _error} ->
false
end
end
def is_valid?(address, network_name, :p2wsh) do
case Segwit.decode_address(address) do
{:ok, {^network_name, witness_version, witness_program}}
when witness_version == 0 and length(witness_program) == 32 ->
true
# network is not same as network set in config
{:ok, {_network_name, _, _}} ->
false
{:error, _error} ->
false
end
end
@doc """
Returns a list of supported address types.
"""
def supported_address_types() do
@address_types
end
defp is_valid_base58_check_address?(address, valid_prefix) do
case Base58.decode(address) do
{:ok, <<^valid_prefix::8, _::binary>>} ->
true
_ ->
false
end
end
@doc """
Decodes an address and returns the address_type.
"""
@spec decode_type(String.t(), Bitcoinex.Network.network_name()) ::
{:ok, address_type} | {:error, :decode_error}
def decode_type(address, network_name) do
case Enum.find(@address_types, &is_valid?(address, network_name, &1)) do
nil -> {:error, :decode_error}
type -> {:ok, type}
end
end
end
|
server/bitcoinex/lib/address.ex
| 0.852859
| 0.572036
|
address.ex
|
starcoder
|
defmodule Docker do
@moduledoc false
defmodule Error do
@moduledoc false
defexception [:command, :args, :status, :output]
def message(%{command: command, args: args, status: status, output: output}) do
"Failed on docker #{Enum.join([command | args], " ")} (#{status}):\n#{output}"
end
end
@doc """
Checks whether docker is available and ready to be run.
Returns false if:
1. Docker is not installed or the `docker` command cannot be found.
2. you're on Mac or Windows, but Docker Machine is not set up.
Otherwise returns true and Docker should be ready for use.
"""
def ready? do
case cmd("info", []) do
{_, 0} -> true
_ -> false
end
end
@doc """
Determines the Docker host address.
Checks for the `DOCKER_HOST` environment variable set by Docker Machine or
falls back to `127.0.0.1`.
The containers we start for testing publish their local SSH port (22) to
random ports on the host machine. On Mac and Windows the host machine is
the Docker Machine `DOCKER_HOST`. Systems running Docker Engine directly
publish ports to localhost (127.0.0.1) directly.
Returns the name (or IP address) of the configured Docker host.
"""
def host do
case System.get_env("DOCKER_HOST") do
addr when is_binary(addr) -> Map.get(URI.parse(addr), :host)
nil -> "127.0.0.1"
end
end
@doc """
Builds a tagged Docker image from a Dockerfile.
Returns the image ID.
"""
def build!(tag, path) do
cmd!("build", ["--quiet", "--tag", tag, path])
end
@doc """
Runs a command in a new container.
Returns the command output.
"""
def run!(options \\ [], image, command \\ nil, args \\ [])
def run!(options, image, nil, []) do
cmd!("run", options ++ [image])
end
def run!(options, image, command, args) do
cmd!("run", options ++ [image, command] ++ args)
end
@doc """
Runs a command in a running container.
Returns the command output.
"""
def exec!(options \\ [], container, command, args \\ []) do
cmd!("exec", options ++ [container, command] ++ args)
end
@doc """
Kills one or more running containers.
Returns a list of the killed containers' IDs.
"""
def kill!(options \\ [], containers) do
"kill"
|> cmd!(options ++ List.wrap(containers))
|> String.split("\n")
end
@doc """
Runs a docker command with the given arguments.
Returns a tuple containing the command output and exit status.
For details, see [`System.cmd/3`](https://hexdocs.pm/elixir/System.html#cmd/3).
"""
def cmd(command, args \\ []) do
System.cmd("docker", [command | args], stderr_to_stdout: true)
end
@doc """
Runs a docker command with the given arguments.
Returns the command output or, if the command exits with a non-zero status,
raises a `Docker.Error`.
"""
def cmd!(command, args \\ []) do
{output, status} = cmd(command, args)
case status do
0 -> String.trim(output)
_ -> raise Error, command: command, args: args, status: status, output: output
end
end
end
|
test/support/docker.ex
| 0.805709
| 0.470554
|
docker.ex
|
starcoder
|
defmodule Estuary.DataWriter do
@moduledoc """
Implementation of `Pipeline.Writer` for Estuary's edges.
"""
require Logger
alias Estuary.Datasets.DatasetSchema
alias Estuary.DataReader
@behaviour Pipeline.Writer
@table_writer Application.get_env(:estuary, :table_writer)
@impl Pipeline.Writer
@doc """
Ensures a table exists using `:table_writer` from
Estuary's application environment.
"""
def init(args) do
:ok = @table_writer.init(args)
rescue
e -> {:error, e, "Presto Error"}
end
@impl Pipeline.Writer
@doc """
Writes data to PrestoDB and Kafka using `:table_writer` from
Estuary's application environment.
"""
def write(events, _ \\ []) do
payload = make_datawriter_payload(events)
case get_errors(payload) do
[] ->
@table_writer.write(payload,
table: DatasetSchema.table_name(),
schema: DatasetSchema.schema()
)
_ ->
{:error, events, "Required field missing"}
end
rescue
_ -> {:error, events, "Presto Error"}
end
@impl Pipeline.Writer
def compact(_ \\ []) do
Logger.info("Beginning #{DatasetSchema.table_name()} compaction")
DataReader.terminate()
@table_writer.compact(table: DatasetSchema.table_name())
DataReader.init()
Logger.info("Completed #{DatasetSchema.table_name()} compaction")
:ok
rescue
error ->
Logger.error("#{DatasetSchema.table_name()} failed to compact: #{inspect(error)}")
{:error, error}
end
defp make_datawriter_payload(events) do
Enum.map(events, &make_payload/1)
end
defp make_payload(%{
"author" => author,
"create_ts" => create_ts,
"data" => data,
"type" => type
}) do
%{
payload: %{
"author" => author,
"create_ts" => create_ts,
"data" => data,
"type" => type
}
}
end
defp make_payload(event) do
{:error, event, "Required field missing"}
end
defp get_errors(payload) do
Enum.filter(payload, &match?({:error, _, _}, &1))
end
end
|
apps/estuary/lib/estuary/data_writer.ex
| 0.798187
| 0.520496
|
data_writer.ex
|
starcoder
|
defmodule StepFlow.Amqp.CommonConsumer do
@moduledoc """
Definition of a Common Consumer of RabbitMQ queue.
To implement a consumer,
```elixir
defmodule MyModule do
use StepFlow.Amqp.CommonConsumer, %{
queue: "name_of_the_rabbit_mq_queue",
exchange "name_of_exchange",
consumer: &MyModule.consume/4
}
def consume(channel, tag, redelivered, payload) do
...
Basic.ack(channel, tag)
end
end
```
"""
alias StepFlow.Amqp.CommonConsumer
@doc false
defmacro __using__(opts) do
quote do
use GenServer
use AMQP
alias StepFlow.Amqp.CommonEmitter
alias StepFlow.Amqp.Helpers
@doc false
def start_link do
GenServer.start_link(__MODULE__, :ok, name: __MODULE__)
end
@doc false
def init(:ok) do
rabbitmq_connect()
end
# Confirmation sent by broker after registering this process as consumer
def handle_info({:basic_consume_ok, %{consumer_tag: _consumer_tag}}, channel) do
{:noreply, channel}
end
# Sent by broker when consumer is unexpectedly cancelled (such as after queue deletion)
def handle_info({:basic_cancel, %{consumer_tag: _consumer_tag}}, channel) do
{:stop, :normal, channel}
end
# Confirmation sent by broker to consumer process after a Basic.cancel
def handle_info({:basic_cancel_ok, %{consumer_tag: _consumer_tag}}, channel) do
{:noreply, channel}
end
def handle_info(
{:basic_deliver, payload, %{delivery_tag: tag, redelivered: redelivered} = headers},
channel
) do
queue = unquote(opts).queue
data =
payload
|> Jason.decode!()
Logger.info("#{__MODULE__}: receive message on queue: #{queue}")
max_retry_to_timeout =
StepFlow.Configuration.get_var_value(StepFlow.Amqp, :max_retry_to_timeout, 10)
Logger.debug("#{__MODULE__} #{inspect(headers)}")
max_retry_reached =
with headers when headers != :undefined <- Map.get(headers, :headers),
{"x-death", :array, death} <- List.keyfind(headers, "x-death", 0),
{:table, table} <- List.first(death),
{"count", :long, count} <- List.keyfind(table, "count", 0) do
count > max_retry_to_timeout
else
_ -> false
end
if max_retry_reached do
Logger.warn("#{__MODULE__}: timeout message sent to queue: #{queue}_timeout")
CommonEmitter.publish(queue <> "_timeout", payload)
AMQP.Basic.ack(channel, tag)
else
unquote(opts).consumer.(channel, tag, redelivered, data)
end
{:noreply, channel}
end
def handle_info({:DOWN, _, :process, _pid, _reason}, _) do
{:ok, chan} = rabbitmq_connect()
end
def terminate(_reason, state) do
AMQP.Connection.close(state.conn)
end
defp rabbitmq_connect do
url = Helpers.get_amqp_connection_url()
case AMQP.Connection.open(url) do
{:ok, connection} ->
init_amqp_connection(connection)
{:error, message} ->
Logger.error(
"#{__MODULE__}: unable to connect to: #{url}, reason: #{inspect(message)}"
)
# Reconnection loop
:timer.sleep(10_000)
rabbitmq_connect()
end
end
defp init_amqp_connection(connection) do
Process.monitor(connection.pid)
{:ok, channel} = AMQP.Channel.open(connection)
queue = unquote(opts).queue
exchange_name = unquote(opts).exchange
if Map.has_key?(unquote(opts), :prefetch_count) do
:ok = AMQP.Basic.qos(channel, prefetch_count: unquote(opts).prefetch_count)
end
CommonConsumer.create_queues(channel, queue)
Logger.warn("#{__MODULE__}: bind #{queue}")
AMQP.Queue.bind(channel, queue, exchange_name, routing_key: queue)
Logger.warn("#{__MODULE__}: connected to queue #{queue}")
{:ok, _consumer_tag} = AMQP.Basic.consume(channel, queue)
{:ok, channel}
end
end
end
def create_queues(channel, queue) do
AMQP.Queue.declare(channel, "job_response_not_found", durable: true)
AMQP.Queue.declare(channel, queue <> "_timeout", durable: true)
exchange =
AMQP.Exchange.topic(channel, "job_response",
durable: true,
arguments: [{"alternate-exchange", :longstr, "job_response_not_found"}]
)
exchange =
AMQP.Exchange.topic(channel, "worker_response",
durable: true,
arguments: [{"alternate-exchange", :longstr, "worker_response_not_found"}]
)
AMQP.Queue.declare(channel, "direct_messaging_not_found", durable: true)
AMQP.Queue.declare(channel, queue <> "_timeout", durable: true)
exchange =
AMQP.Exchange.declare(channel, "direct_messaging", :headers,
durable: true,
arguments: [{"alternate-exchange", :longstr, "direct_messaging_not_found"}]
)
exchange = AMQP.Exchange.fanout(channel, "job_response_delayed", durable: true)
{:ok, job_response_delayed_queue} =
AMQP.Queue.declare(channel, "job_response_delayed",
arguments: [
{"x-message-ttl", :short, 5000},
{"x-dead-letter-exchange", :longstr, ""}
]
)
AMQP.Queue.bind(channel, "job_response_delayed", "job_response_delayed", routing_key: "*")
AMQP.Queue.declare(channel, queue,
durable: true,
arguments: [
{"x-dead-letter-exchange", :longstr, "job_response_delayed"},
{"x-dead-letter-routing-key", :longstr, queue}
]
)
end
end
|
lib/step_flow/amqp/common_consumer.ex
| 0.733547
| 0.590897
|
common_consumer.ex
|
starcoder
|
defmodule Chatter.PeerData do
require Record
require Chatter.NetID
require Chatter.BroadcastID
alias Chatter.NetID
alias Chatter.BroadcastID
Record.defrecord :peer_data,
id: nil,
broadcast_seqno: 0,
seen_ids: []
@type t :: record( :peer_data,
id: NetID.t,
broadcast_seqno: integer,
seen_ids: list(BroadcastID.t) )
@spec new(NetID.t) :: t
def new(id)
when NetID.is_valid(id)
do
peer_data(id: id)
end
defmacro is_valid(data) do
case Macro.Env.in_guard?(__CALLER__) do
true ->
quote do
is_tuple(unquote(data)) and tuple_size(unquote(data)) == 4 and
:erlang.element(1, unquote(data)) == :peer_data and
# id
NetID.is_valid(:erlang.element(2, unquote(data))) and
# broadcast_seqno
is_integer(:erlang.element(3, unquote(data))) and
:erlang.element(3, unquote(data)) >= 0 and
# seen ids
is_list(:erlang.element(4, unquote(data)))
end
false ->
quote bind_quoted: binding() do
is_tuple(data) and tuple_size(data) == 4 and
:erlang.element(1, data) == :peer_data and
# id
NetID.is_valid(:erlang.element(2, data)) and
# broadcast_seqno
is_integer(:erlang.element(3, data)) and
:erlang.element(3, data) >= 0 and
# seen ids
is_list(:erlang.element(4, data))
end
end
end
@spec valid?(t) :: boolean
def valid?(data)
when is_valid(data)
do
true
end
def valid?(_), do: false
@spec id(t) :: NetID.t
def id(d)
when is_valid(d)
do
peer_data(d, :id)
end
@spec inc_broadcast_seqno(t) :: integer
def inc_broadcast_seqno(d)
when is_valid(d)
do
peer_data(d, broadcast_seqno: (1+peer_data(d, :broadcast_seqno)))
end
@spec broadcast_seqno(t) :: integer
def broadcast_seqno(d)
when is_valid(d)
do
peer_data(d, :broadcast_seqno)
end
@spec broadcast_seqno(t, integer) :: t
def broadcast_seqno(d, v)
when is_valid(d) and
is_integer(v) and
v >= 0
do
peer_data(d, broadcast_seqno: v)
end
@spec max_broadcast_seqno(t, integer) :: t
def max_broadcast_seqno(d, v)
when is_valid(d) and is_integer(v) and v >= 0
do
peer_data(d, broadcast_seqno: max(v, peer_data(d, :broadcast_seqno)))
end
@spec seen_ids(t) :: list(BroadcastID.t)
def seen_ids(d)
when is_valid(d)
do
peer_data(d, :seen_ids)
end
@spec merge_seen_ids(t, list(BroadcastID.t)) :: t
def merge_seen_ids(d, []), do: d
def merge_seen_ids(d, ids)
when is_valid(d) and
is_list(ids)
do
old_ids = peer_data(d, :seen_ids)
peer_data(d, seen_ids: BroadcastID.merge_lists(old_ids, ids))
end
end
|
lib/peer_data.ex
| 0.659076
| 0.40204
|
peer_data.ex
|
starcoder
|
defmodule OptimusHash do
@moduledoc """
OptimusHash is a small library to do integer hashing based on Knuth's
multiplicative hashing algorithm. The algorithm is fast, reversible and has
zero collisions.
This comes in very handy when you have e.g. integer-based primary keys in your
database and you don't want to expose them to the outside world.
## Usage
To get started, you will need three values: a prime number, the modular
multiplicative inverse of that prime number, and a random number. There is a
built-in task to generate those values for you—see the section about
[seeding](#module-seeding).
**Warning**: Before you use this library in production, you should think about
the largest possible ID you will have. OptimusHash supports IDs up to
2,147,483,647 by default. If you need larger IDs, you will need to pass the
`max_size` option to `new/1`. Since this change will affect the results of
`encode/2` and `decode/2` you have to plan ahead.
## Seeding
This package comes with a Mix task to generate the required configuration
values for you. The task requires the `openssl` binary to be installed on your
system. If you don't want to or can't install it, you will have to calculate
or find a prime number yourself. A good starting point is the [the list of
the first fifty million primes](https://primes.utm.edu/lists/small/millions/).
$ mix optimus_hash.seed
Configuration:
- prime: 2120909159
- mod_inverse: 1631586903
- random: 1288598321
- max_size: 31
Code:
```
OptimusHash.new(
prime: 2_120_909_159,
mod_inverse: 1_631_586_903,
random: 1_288_598_321,
max_size: 31
)
```
*Please do not use the example values used in this documentation for your
production environment. That would be silly.*
You can set the size of the largest possible by passing `--bits=40`. If you
already have a prime number you can pass it in as the first argument:
`mix optimus_hash.seed --bits=62 3665010176750768309`.
"""
alias __MODULE__
alias OptimusHash.Helpers
use Bitwise
defstruct prime: nil,
mod_inverse: nil,
random: nil,
max_id: nil
@type t :: %__MODULE__{
prime: non_neg_integer,
mod_inverse: non_neg_integer,
random: non_neg_integer,
max_id: non_neg_integer
}
@doc """
Creates a new struct containing the configuration options for OptimusHash.
This struct must be passed as the first argument to `encode/2` and `decode/2`.
**NOTE:** Keep this configuration values secret.
## Options
* `:prime` - A prime number which is smaller than `:max_id`.
* `:mod_inverse` - The [modular multiplicative inverse](https://en.wikipedia.org/wiki/Modular_multiplicative_inverse)
of the provided prime number. Must fulfill the constraint
`(prime * mod_inverse) & max_id == 1`
* `:random` - A random integer smaller than `:max_id`
* `:max_size` (optional) - The maximum number of bits for the largest id.
Defaults to `31`
* `:validate` (optional) - Flag to toggle prime number and mod inverse
validation. Defaults to `true`
## Examples
iex> OptimusHash.new([prime: 1580030173, mod_inverse: 59260789, random: 1163945558])
%OptimusHash{prime: 1580030173, mod_inverse: 59260789, random: 1163945558, max_id: 2147483647}
"""
@spec new(Keyword.t()) :: OptimusHash.t()
def new(opts) do
prime = Keyword.get(opts, :prime)
mod_inverse = Keyword.get(opts, :mod_inverse)
random = Keyword.get(opts, :random)
max_id = trunc(:math.pow(2, Keyword.get(opts, :max_size, 31))) - 1
if prime > max_id do
raise ArgumentError,
"Argument :prime is larger than the largest possible id with :max_size"
end
if random > max_id do
raise ArgumentError,
"Argument :random is larger than the largest possible id with :max_size"
end
if Keyword.get(opts, :validate, true) do
if !is_integer(prime) || !Helpers.is_prime?(prime) do
raise ArgumentError, "Argument :prime is not a prime number"
end
if (prime * mod_inverse &&& max_id) !== 1 do
raise ArgumentError, "Argument :mod_inverse is invalid"
end
end
%OptimusHash{prime: prime, mod_inverse: mod_inverse, random: random, max_id: max_id}
end
@doc """
Encodes the given number and returns the result.
iex> OptimusHash.encode(o, 1)
458_047_115
"""
@spec encode(OptimusHash.t(), non_neg_integer) :: non_neg_integer
def encode(o, number) when is_integer(number) do
(number * o.prime &&& o.max_id) ^^^ o.random
end
def encode(_, _), do: nil
@doc """
Decodes the given number and returns the result.
iex> OptimusHash.decode(o, 458_047_115)
1
"""
@spec decode(OptimusHash.t(), non_neg_integer) :: non_neg_integer
def decode(o, number) when is_integer(number) do
(number ^^^ o.random) * o.mod_inverse &&& o.max_id
end
def decode(_, _), do: nil
end
|
lib/optimus_hash.ex
| 0.909244
| 0.897111
|
optimus_hash.ex
|
starcoder
|
defmodule Pushex.Sandbox do
@moduledoc """
Sandbox where notifications get saved when the application is running in sandbox mode.
This is meant to be used in tests, and should not be used in production.
Note that all operations are dependent on the `pid`, so the process
calling and `Pushex.send_notification/2` and the process calling `Pushex.Sandbox.list_notifications/1`
must be the same, or the `pid` should be passed explicitly otherwise.
"""
use GenServer
@doc """
Records the notification. This is used by `Pushex.ResponseHandler.Sandbox` to record
requests and responses.
"""
@spec record_notification(Pushex.GCM.response, Pushex.GCM.request, {pid, reference}) :: :ok
def record_notification(response, request, info) do
GenServer.call(__MODULE__, {:record_notification, response, request, info})
end
@doc """
Wait until a notification arrives.
"""
@spec wait_notifications([pid: pid, timeout: non_neg_integer, count: non_neg_integer]) :: [{Pushex.GCM.response, Pushex.GCM.request, {pid, reference}}]
def wait_notifications(opts \\ []) do
pid = opts[:pid] || self()
timeout = opts[:timeout] || 100
count = opts[:count] || 1
case list_notifications(pid: pid) do
notifications when length(notifications) < count and timeout > 0 ->
receive do
after 10 ->
wait_notifications(pid: pid, timeout: timeout - 10, count: count)
end
notifications -> notifications
end
end
@doc """
List recorded notifications keeping their order of arrival.
"""
@spec list_notifications([pid: pid]) :: [{Pushex.GCM.response, Pushex.GCM.request, {pid, reference}}]
def list_notifications(opts \\ []) do
pid = opts[:pid] || self()
GenServer.call(__MODULE__, {:list_notifications, pid})
end
@doc """
Clear all the recorded notifications.
"""
@spec clear_notifications([pid: pid]) :: :ok
def clear_notifications(opts \\ []) do
pid = opts[:pid] || self()
GenServer.call(__MODULE__, {:clear_notifications, pid})
end
@doc false
def start_link do
GenServer.start_link(__MODULE__, %{}, name: __MODULE__)
end
@doc false
def handle_call({:record_notification, response, request, {pid, _ref} = info}, _from, state) do
notifications = [{response, request, info} | Map.get(state, pid, [])]
{:reply, :ok, Map.put(state, pid, notifications)}
end
@doc false
def handle_call({:list_notifications, pid}, _from, state) do
notifications = Map.get(state, pid, []) |> Enum.reverse
{:reply, notifications, state}
end
@doc false
def handle_call({:clear_notifications, pid}, _from, state) do
{:reply, :ok, Map.put(state, pid, [])}
end
end
|
lib/pushex/sandbox.ex
| 0.777342
| 0.432243
|
sandbox.ex
|
starcoder
|
defmodule Bowling do
@doc """
Creates a new game of bowling that can be used to store the results of
the game
There are several possible states of the game:
* :ball_1 means waiting for the first roll of the frame
* {:ball_2, roll_1} means waiting for the second roll of the frame after a first roll_1 < 10
* :spare means waiting for the first roll of the frame after a spare
* :strike means waiting for the first roll of the frame after a strike
* {:strike, :strike} means waiting for the first roll of the frame after two consecutive strikes
* {:strike, roll_1} means waiting for the second roll of the frame after a strike and a first roll_1 < 10
* {:strike_fill, roll_1} means waiting for a second fill ball
* :fill_done means there are no more fill balls expected
"""
@max_pins 10
@frames 10
@spec start() :: any
def start do
%{score: 0, frame: 1, state: :ball_1}
end
@doc """
Records the number of pins knocked down on a single roll. Returns `any`
unless there is something wrong with the given number of pins, in which
case it returns a helpful message.
"""
@spec roll(any, integer) :: any | String.t()
# Invalid number of pins
def roll(_game, roll) when roll < -0,
do: {:error, "Negative roll is invalid"}
def roll(_game, roll) when roll > @max_pins,
do: {:error, "Pin count exceeds pins on the lane"}
def roll(%{state: {:ball_2, roll_1}}, roll_2) when roll_1 + roll_2 > @max_pins,
do: {:error, "Pin count exceeds pins on the lane"}
def roll(%{state: {:strike, roll_1}}, roll_2) when roll_1 + roll_2 > @max_pins,
do: {:error, "Pin count exceeds pins on the lane"}
def roll(%{state: {:strike_fill, roll_1}}, roll_2)
when roll_1 != @max_pins and roll_1 + roll_2 > @max_pins,
do: {:error, "Pin count exceeds pins on the lane"}
# Fill balls
def roll(%{frame: frame, state: :strike} = game, roll) when frame > @frames,
do: %{game | state: {:strike_fill, roll}}
def roll(%{frame: frame, score: score, state: {:strike, :strike}} = game, roll)
when frame > @frames,
do: %{game | score: score + 2 * @max_pins + roll, state: {:strike_fill, roll}}
def roll(%{frame: frame, score: score, state: {:strike_fill, roll_1}} = game, roll_2)
when frame > @frames,
do: %{game | score: score + @max_pins + roll_1 + roll_2, state: :fill_done}
def roll(%{frame: frame, score: score, state: :spare} = game, roll) when frame > @frames,
do: %{game | score: score + @max_pins + roll, state: :fill_done}
# Too many rolls
def roll(%{frame: frame}, _roll) when frame > @frames,
do: {:error, "Cannot roll after game is over"}
# Game scoring
def roll(%{frame: frame, state: :ball_1} = game, @max_pins),
do: %{game | frame: frame + 1, state: :strike}
def roll(%{state: :ball_1} = game, roll), do: %{game | state: {:ball_2, roll}}
def roll(%{frame: frame, state: {:ball_2, roll_1}} = game, roll_2)
when roll_1 + roll_2 == @max_pins,
do: %{game | frame: frame + 1, state: :spare}
def roll(%{frame: frame, score: score, state: {:ball_2, roll_1}} = game, roll_2),
do: %{game | frame: frame + 1, score: score + roll_1 + roll_2, state: :ball_1}
def roll(%{frame: frame, score: score, state: :spare} = game, @max_pins),
do: %{game | frame: frame + 1, score: score + 2 * @max_pins, state: :strike}
def roll(%{score: score, state: :spare} = game, roll),
do: %{game | score: score + @max_pins + roll, state: {:ball_2, roll}}
def roll(%{frame: frame, state: :strike} = game, @max_pins),
do: %{game | frame: frame + 1, state: {:strike, :strike}}
def roll(%{state: :strike} = game, roll), do: %{game | state: {:strike, roll}}
def roll(%{frame: frame, score: score, state: {:strike, :strike}} = game, @max_pins),
do: %{game | frame: frame + 1, score: score + 3 * @max_pins, state: {:strike, :strike}}
def roll(%{score: score, state: {:strike, :strike}} = game, roll),
do: %{game | score: score + 2 * @max_pins + roll, state: {:strike, roll}}
def roll(%{frame: frame, score: score, state: {:strike, roll_1}} = game, roll_2)
when roll_1 + roll_2 == @max_pins,
do: %{game | frame: frame + 1, score: score + 2 * @max_pins, state: :spare}
def roll(%{frame: frame, score: score, state: {:strike, roll_1}} = game, roll_2),
do: %{
game
| frame: frame + 1,
score: score + @max_pins + 2 * (roll_1 + roll_2),
state: :ball_1
}
@doc """
Returns the score of a given game of bowling if the game is complete.
If the game isn't complete, it returns a helpful message.
"""
@spec score(any) :: integer | String.t()
def score(%{frame: frame}) when frame < @frames,
do: {:error, "Score cannot be taken until the end of the game"}
def score(%{score: score, state: :ball_1}), do: score
def score(%{score: score, state: :fill_done}), do: score
def score(%{state: _other}), do: {:error, "Score cannot be taken until the end of the game"}
end
|
elixir/bowling/lib/bowling.ex
| 0.877129
| 0.645888
|
bowling.ex
|
starcoder
|
defmodule Csp.AC3 do
@moduledoc """
Pure AC-3 algorithm implementation.
Also provides `reduce/3` helper that can be used as an inference part of search algorithms.
"""
alias Csp.Constraint
@type unassigned :: [Csp.variable()]
@type domain_reduction :: {Csp.t(), Csp.assignment(), unassigned()}
@type reduce_result :: {:ok, Csp.t(), Csp.assignment(), unassigned()} | :no_solution
@doc """
Tries to solve `csp` with AC-3 algorithm, applying node and arc consistency.
Only considers unary and binary constraints; will skip n-ary constraints where n > 2.
Returns a tuple `{status, csp}`.
The returned `csp` will possibly have reduced `domains`.
If all variables have domain length of 1, we found a solution (`:solved` status is returned).
If any variable has a domain length of 0, we proved that `csp` is not solvable,
and `:no_solution` status is returned.
If neither of those conditions is true, `:reduced` status is returend, irrespective of
any actual domain reduction occuring.
"""
@spec solve(Csp.t()) :: {Csp.solver_status(), Csp.t()}
def solve(csp) do
csp = solve(csp, csp.constraints)
status = analyze(csp)
{status, csp}
end
@doc """
Reduces the `csp` with AC-3.
Accepts `csp` with `assignment` (map from variables to their assigned values),
and a list of `unassigned` variables.
Compared to `solve/2`, apart from tracking the assignment, it also uses
simplified version of domain reduction for constraint: it doesn't attempt
to track affected constraints when reducing some constraint's domain.
Returns `:no_solution` if an inconsistency is detected, or a tuple of
`{:ok, csp, assignment, unassigned}`, where domains of `csp` are reduced,
`assignment` is amended with inferred variable assignments, and
`unassigned` list is updated to reflect those assignment changes.
"""
@spec reduce(Csp.t(), Csp.assignment(), unassigned()) :: reduce_result()
def reduce(csp, assignment, unassigned) do
reduce(csp, assignment, unassigned, csp.constraints)
end
## Helpers
@spec solve(Csp.t(), [constraint :: any()]) :: Csp.t()
defp solve(csp, constraints)
defp solve(csp, [] = _constraint), do: csp
defp solve(csp, [constraint | rest]) do
case Constraint.arguments(constraint) do
# node consistency for unary constraints
[variable] ->
original_domain = Map.fetch!(csp.domains, variable)
reduced_domain =
Enum.filter(original_domain, fn value ->
Constraint.satisfies?(constraint, %{variable => value})
end)
{csp, affected_dependents} = reduce_domain(csp, constraint, variable, original_domain, reduced_domain)
constraints =
case affected_dependents do
[] -> rest
_ -> Enum.uniq(rest ++ affected_dependents)
end
solve(csp, constraints)
# arc consistency for binary constraints
[x, y] ->
{csp, constraints_to_consider_from_x} = enforce_arc_consistency(csp, constraint, x, y)
{csp, constraints_to_consider_from_y} = enforce_arc_consistency(csp, constraint, y, x)
constraints = Enum.uniq(rest ++ constraints_to_consider_from_x ++ constraints_to_consider_from_y)
solve(csp, constraints)
# skip higher arity constraints
k_ary when is_list(k_ary) ->
solve(csp, rest)
end
end
@spec enforce_arc_consistency(Csp.t(), Constraint.t(), Csp.variable(), Csp.variable()) ::
{Csp.t(), [Constraint.t()]}
defp enforce_arc_consistency(csp, constraint, x, y) do
x_original_domain = Map.fetch!(csp.domains, x)
y_original_domain = Map.fetch!(csp.domains, y)
x_reduced_domain =
Enum.filter(x_original_domain, fn x_value ->
Enum.any?(y_original_domain, fn y_value ->
Constraint.satisfies?(constraint, %{x => x_value, y => y_value})
end)
end)
reduce_domain(csp, constraint, x, x_original_domain, x_reduced_domain)
end
@spec reduce_domain(Csp.t(), Constraint.t(), Csp.variable(), Csp.domain(), Csp.domain()) ::
{Csp.t(), [Constraint.t()]}
defp reduce_domain(csp, constraint, variable, original_domain, reduced_domain) do
if length(reduced_domain) < length(original_domain) do
csp = %{csp | domains: Map.put(csp.domains, variable, reduced_domain)}
affected_dependents =
Csp.constraints_on(csp, variable)
|> List.delete(constraint)
{csp, affected_dependents}
else
{csp, []}
end
end
@spec analyze(Csp.t()) :: Csp.solver_status()
defp analyze(csp) do
Enum.reduce_while(csp.domains, :solved, fn {_variable, domain}, status ->
case length(domain) do
0 -> {:halt, :no_solution}
1 -> {:cont, status}
_ -> {:halt, :reduced}
end
end)
end
@spec reduce(Csp.t(), Csp.assignment(), unassigned(), [Csp.constraint()]) :: reduce_result()
defp reduce(csp, assignment, unassigned, constraints)
defp reduce(csp, assignment, unassigned, []), do: {:ok, csp, assignment, unassigned}
defp reduce(csp, assignment, unassigned, [constraint | remaining_constraints]) do
case Constraint.arguments(constraint) do
# node consistency for unary constraints
[variable] ->
original_domain = Map.fetch!(csp.domains, variable)
reduced_domain =
Enum.filter(original_domain, fn value ->
Constraint.satisfies?(constraint, %{variable => value})
end)
case reduced_domain do
[] ->
:no_solution
_ ->
{csp, assignment, unassigned} =
reduce_domain_and_assign(csp, variable, assignment, unassigned, reduced_domain)
reduce(csp, assignment, unassigned, remaining_constraints)
end
# arc consistency for binary constraints
[x, y] ->
{csp, assignment, unassigned} =
enforce_arc_consistency_and_assign(csp, constraint, assignment, unassigned, x, y)
{csp, assignment, unassigned} =
enforce_arc_consistency_and_assign(csp, constraint, assignment, unassigned, y, x)
reduce(csp, assignment, unassigned, remaining_constraints)
# skip higher arity constraints
k_ary when is_list(k_ary) ->
reduce(csp, assignment, unassigned, remaining_constraints)
end
end
@spec reduce_domain_and_assign(
Csp.t(),
Csp.variable(),
Csp.assignment(),
unassigned(),
Csp.domain()
) :: domain_reduction()
defp reduce_domain_and_assign(csp, variable, assignment, unassigned, reduced_domain) do
original_domain = Map.fetch!(csp.domains, variable)
domain_length = length(reduced_domain)
if domain_length < length(original_domain) do
csp = %{csp | domains: Map.put(csp.domains, variable, reduced_domain)}
if domain_length == 1 do
assignment = Map.put(assignment, variable, hd(reduced_domain))
unassigned = List.delete(unassigned, variable)
{csp, assignment, unassigned}
else
{csp, assignment, unassigned}
end
else
{csp, assignment, unassigned}
end
end
@spec enforce_arc_consistency_and_assign(
Csp.t(),
Csp.constraint(),
Csp.assignment(),
unassigned(),
Csp.variable(),
Csp.variable()
) :: domain_reduction()
defp enforce_arc_consistency_and_assign(csp, constraint, assignment, unassigned, x, y) do
x_original_domain = Map.fetch!(csp.domains, x)
y_original_domain = Map.fetch!(csp.domains, y)
x_reduced_domain =
Enum.filter(x_original_domain, fn x_value ->
Enum.any?(y_original_domain, fn y_value ->
Constraint.satisfies?(constraint, %{x => x_value, y => y_value})
end)
end)
reduce_domain_and_assign(csp, x, assignment, unassigned, x_reduced_domain)
end
end
|
lib/csp/ac3.ex
| 0.928676
| 0.838911
|
ac3.ex
|
starcoder
|
defmodule Urania do
@moduledoc """
Efficient and elegant data access for Elixir.
It's a port of a Clojure library named Urania.
A brief explanation blatantly stolen from
https://funcool.github.io/urania/latest/ ensues:
Oftentimes, your business logic relies on remote data that you need to fetch
from different sources: databases, caches, web services, or third party APIs,
and you can’t mess things up. Urania helps you to keep your business logic
clear of low-level details while performing efficiently:
* batch multiple requests to the same data source
* request data from multiple data sources concurrently
* cache previous requests
Having all this gives you the ability to access remote data sources in a
concise and consistent way, while the library handles batching and overlapping
requests to multiple data sources behind the scenes.
"""
alias Pinky, as: P
defmodule Done do
@moduledoc false
defstruct [:value]
end
defmodule UMap do
@moduledoc false
defstruct [:f, :values]
end
defmodule FlatMap do
@moduledoc false
defstruct [:f, :values]
end
defmodule Value do
@moduledoc false
defstruct [:value]
end
defmodule Impl do
@moduledoc false
def is_data_source(x) do
!!Urania.DataSource.impl_for(x)
end
def is_ast(x) do
!!Urania.AST.impl_for(x)
end
def is_composed_ast(x) do
!!Urania.ComposedAST.impl_for(x)
end
def assert_not_ast!(x) do
if is_ast(x), do: throw "Value is already an AST: #{x}"
end
def is_batched_source(x) do
!!Urania.BatchedSource.impl_for(x)
end
def inject_into(env, node) do
if is_data_source(node) do
cached_or(env, node)
else
Urania.AST.inject(node, env)
end
end
def comp(f, g) do
fn x -> f.(g.(x)) end
end
def identity(x) do
x
end
def resource_name(res), do: to_string(Map.get(res, :__struct__))
def cache_id(res), do: Urania.DataSource.identity(res)
def cached_or(env, res) do
cache = env[:cache]
cached = cache |> Map.get(resource_name(res), %{}) |> Map.get(cache_id(res), :not_found)
if :not_found == cached do
%UMap { f: &identity/1, values: [res] }
else
%Done { value: cached }
end
end
def run_fetch(env, muse) do
P.promise(fn -> Urania.DataSource.fetch(muse, env) end)
end
def run_fetch_multi(env, muse, muses) do
P.promise(fn -> Urania.BatchedSource.fetch_multi(muse, muses, env) end)
end
def fetch_many_caching(opts, sources) do
ids = Enum.map(sources, &cache_id/1)
response_tasks = Enum.map(sources, fn x -> run_fetch(opts, x) end)
P.all(response_tasks)
|> P.map(fn responses ->
Enum.reduce(Enum.zip(ids, responses), %{}, fn({id, response}, acc) -> Map.put(acc, id, response) end)
end)
end
def fetch_one_caching(opts, source) do
run_fetch(opts, source)
|> P.map(fn result ->
%{cache_id(source) => result}
end)
end
def fetch_sources(opts, [source]) do
fetch_one_caching(opts, source)
end
def fetch_sources(opts, [head | tail]) do
if is_batched_source(head) do
run_fetch_multi(opts, head, tail)
else
fetch_many_caching(opts, [head | tail])
end
end
def dedupe_sources(sources) do
values = sources |> Enum.group_by(&cache_id/1) |> Map.values
Enum.map(values, &List.first/1)
end
def fetch_resource(opts, {resource_name, sources}) do
fetch_sources(opts, dedupe_sources(sources))
|> P.map(fn results ->
{resource_name, results}
end)
end
def next_level(node) do
if is_data_source(node) do
[node]
else
children = Urania.AST.children(node)
if children do
Enum.concat(Enum.map(Urania.AST.children(node), &next_level/1))
else
[]
end
end
end
def interpret_ast(node, opts) do
ast_node = inject_into(opts, node)
requests = next_level(ast_node)
if Enum.empty?(requests) do
if Urania.AST.done?(ast_node) do
P.resolved({ast_node.value, opts[:cache]})
else
interpret_ast(ast_node, opts)
end
else
requests_by_type = Map.to_list(Enum.group_by(requests, &resource_name/1))
response_tasks = Enum.map(requests_by_type, fn x -> fetch_resource(opts, x) end)
P.all(response_tasks)
|> P.flat_map(fn responses ->
cache_map = Enum.reduce(responses, %{}, fn ({k, v}, acc) -> Map.put(acc, k, v) end)
next_cache = Map.merge(opts[:cache], cache_map, &Map.merge/2)
next_opts = Map.put(opts, :cache, next_cache)
interpret_ast(ast_node, next_opts)
end)
end
end
def run_defaults, do: %{cache: %{}}
end
defprotocol DataSource do
def identity(this)
def fetch(this, env)
end
defprotocol BatchedSource do
def fetch_multi(this, resources, env)
end
defprotocol AST do
@moduledoc false
def children(this)
def inject(this, env)
def done?(this)
end
defprotocol ComposedAST do
@moduledoc false
def compose_ast(this, f)
end
defimpl ComposedAST, for: Done do
def compose_ast(this, f2), do: %Done { value: f2.(this.value) }
end
defimpl AST, for: Done do
def children(_), do: nil
def done?(_), do: true
def inject(this, _), do: this
end
defimpl ComposedAST, for: UMap do
import Urania.Impl
def compose_ast(this, f2), do: %UMap { f: comp(f2, this.f), values: this.values }
end
defimpl AST, for: UMap do
def children(this), do: this.values
def done?(_), do: false
def inject(this, env) do
import Urania.Impl
next = Enum.map(this.values, fn x -> inject_into(env, x) end)
if Enum.all?(next, &AST.done?/1) do
if Enum.count(next) == 1 do
%Done { value: this.f.(List.first(next).value) }
else
%Done { value: this.f.(Enum.map(next, &(&1.value))) }
end
else
%UMap { f: this.f, values: next }
end
end
end
defimpl ComposedAST, for: FlatMap do
import Urania.Impl
def compose_ast(this, f2), do: %UMap { f: comp(f2, this.f), values: this.values }
end
defimpl AST, for: FlatMap do
import Urania.Impl
def children(this), do: this.values
def done?(_), do: false
def inject(this, env) do
next = Enum.map(this.values, fn x -> inject_into(env, x) end)
if Enum.all?(next, &AST.done?/1) do
result = if Enum.count(next) == 1 do
this.f.(List.first(next).value)
else
this.f.(Enum.map(next, &(&1.value)))
end
result = inject_into(env, result)
if is_data_source(result) do
%UMap { f: &identity/1, values: [result] }
else
result
end
else
%FlatMap { f: this.f, values: next }
end
end
end
defimpl ComposedAST, for: Value do
def compose_ast(this, f2), do: %UMap { f: f2, values: [this.value] }
end
defimpl AST, for: Value do
import Urania.Impl
def children(this), do: [this.value]
def done?(_), do: false
def inject(this, env) do
next = Impl.inject_into(env, this.value)
if AST.done?(next) do
%Done { value: next }
else
next
end
end
end
@doc """
Constructs a muse that will evaluate to a predefined value.
## Examples
iex> Urania.run!(Urania.value(3))
{:ok, 3}
"""
def value(v) do
import Urania.Impl
assert_not_ast!(v)
%Done { value: v }
end
@doc """
Returns a new muse that will have a function applied to its value.
## Examples
iex> Urania.value(3) |> Urania.map(fn x -> x + 1 end) |> Urania.run!
{:ok, 4}
"""
def map(muses, f) when is_list(muses) do
import Urania.Impl
if Enum.count(muses) == 1 and is_composed_ast(List.first(muses)) do
ComposedAST.compose_ast(List.first(muses), f)
else
%UMap { f: f, values: muses }
end
end
def map(muse, f) do
%UMap { f: f, values: [muse] }
end
@doc """
Returns a new muse that will have a function applied to its value, assuming
the function will return another muse.
## Examples
iex> Urania.value(3) |> Urania.flat_map(fn x -> Urania.value(x + 1) end) |> Urania.run!
{:ok, 4}
"""
def flat_map(muses, f) when is_list(muses) do
%FlatMap { f: f, values: muses }
end
def flat_map(muse, f) do
flat_map([muse], f)
end
@doc """
Groups a list of muses and returns a new muse that will evaluate to a list of
all the muses' results.
## Examples
iex> Urania.collect([Urania.value(3), Urania.value(5)]) |> Urania.run!
{:ok, [3, 5]}
"""
def collect([]) do
value([])
end
def collect(muses) do
import Urania.Impl
map(muses, &identity/1)
end
@doc """
Groups a list of muses and returns a new muse that will evaluate to a list of
all the muses' results.
## Examples
iex> [Urania.value(3), Urania.value(5)]
...> |> Urania.traverse(fn x -> Urania.value(x + 1) end)
...> |> Urania.run!
{:ok, [4, 6]}
"""
def traverse(muses, f) do
flat_map(muses, fn xs -> collect(Enum.map(xs, f)) end)
end
@doc """
Runs a Urania muse and returns a Pinky promise of { result, data_source_cache_map }.
## Examples
iex> Urania.value(3) |> Urania.execute |> Pinky.extract
{:ok, {3, %{}}}
"""
def execute(ast_node) do
import Urania.Impl
execute(ast_node, run_defaults())
end
def execute(ast_node, opts) do
import Urania.Impl
interpret_ast(ast_node, Map.merge(run_defaults(), opts))
end
@doc """
Runs a Urania muse and returns a Pinky promise of the result, discarding the cache.
## Examples
iex> Urania.value(3) |> Urania.run |> Pinky.extract
{:ok, 3}
"""
def run(ast) do
import Urania.Impl
run(ast, run_defaults())
end
def run(ast, opts) do
execute(ast, opts) |> P.map(fn ({val, _cache}) -> val end)
end
@doc """
Runs a Urania muse and extracts it from the promise. It blocks until the
run is completed.
## Examples
iex> Urania.value(3) |> Urania.run!
{:ok, 3}
"""
def run!(ast) do
P.extract(run(ast))
end
def run!(ast, opts) do
P.extract(run(ast, opts))
end
end
|
lib/urania.ex
| 0.684791
| 0.628664
|
urania.ex
|
starcoder
|
defmodule Akd.DestinationResolver do
@moduledoc """
This module defines helper functions which can be used to resolve
a destination for based on deployment and the destination type
"""
alias Akd.{Destination, Deployment}
@doc """
This function takes a `destination` variable and a `Deployment.t` struct.
`destination` variable could be either a `Destination.t` struct or one of the
atoms: `:build, :publish, :local`
This function returns a resolved `Destination.t` struct.
## Examples
When a `Destination.t` struct is passed:
iex> destination = Akd.Destination.local()
iex> deployment = %Akd.Deployment{mix_env: "prod",
...> build_at: Akd.Destination.local("."),
...> publish_to: Akd.Destination.local("."),
...> name: "name",
...> vsn: "0.1.1"}
iex> Akd.DestinationResolver.resolve(destination, deployment)
%Akd.Destination{user: :current, host: :local, path: "."}
When `:build` is passed:
iex> deployment = %Akd.Deployment{mix_env: "prod",
...> build_at: Akd.Destination.local("build"),
...> publish_to: Akd.Destination.local("publish"),
...> name: "name",
...> vsn: "0.1.1"}
iex> Akd.DestinationResolver.resolve(:build, deployment)
%Akd.Destination{user: :current, host: :local, path: "build"}
When `:publish` is passed:
iex> deployment = %Akd.Deployment{mix_env: "prod",
...> build_at: Akd.Destination.local("build"),
...> publish_to: Akd.Destination.local("publish"),
...> name: "name",
...> vsn: "0.1.1"}
iex> Akd.DestinationResolver.resolve(:publish, deployment)
%Akd.Destination{user: :current, host: :local, path: "publish"}
When `:local` is passed:
iex> deployment = %Akd.Deployment{mix_env: "prod",
...> build_at: Akd.Destination.local("build"),
...> publish_to: Akd.Destination.local("publish"),
...> name: "name",
...> vsn: "0.1.1"}
iex> Akd.DestinationResolver.resolve(:local, deployment)
%Akd.Destination{user: :current, host: :local, path: "."}
"""
@spec resolve(Destination.t() | :build | :publish | :local, Deployment.t()) :: Destination.t()
def resolve(dest, deployment)
def resolve(%Destination{} = dest, _deployment), do: dest
def resolve(:build, deployment), do: deployment.build_at
def resolve(:publish, deployment), do: deployment.publish_to
def resolve(:local, _deployment), do: Destination.local()
end
|
lib/akd/helpers/destination_resolver.ex
| 0.865025
| 0.585309
|
destination_resolver.ex
|
starcoder
|
defmodule Cldr.Http do
@moduledoc """
Supports securely downloading https content.
"""
@doc """
Securely download https content from
a URL.
This function uses the built-in `:httpc`
client but enables certificate verification
which is not enabled by `:httc` by default.
### Arguments
* `url` is a binary URL
### Returns
* `{:ok, body}` if the return is successful
* `{:error, error}` if the download is
unsuccessful. An error will also be logged
in these cases.
### Certificate stores
In order to keep dependencies to a minimum,
`get/1` attempts to locate an already installed
certificate store. It will try to locate a
store in the following order which is intended
to satisfy most host systems. The certificate
store is expected to be a path name on the
host system.
```elixir
# A certificate store configured by the
# developer
Application.get_env(:ex_cldr, :cacertfile)
# Populated if hex package `CAStore` is configured
CAStore.file_path()
# Populated if hex package `certfi` is configured
:certifi.cacertfile()
# Debian/Ubuntu/Gentoo etc.
"/etc/ssl/certs/ca-certificates.crt",
# Fedora/RHEL 6
"/etc/pki/tls/certs/ca-bundle.crt",
# OpenSUSE
"/etc/ssl/ca-bundle.pem",
# OpenELEC
"/etc/pki/tls/cacert.pem",
# CentOS/RHEL 7
"/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem",
# Open SSL on MacOS
"/usr/local/etc/openssl/cert.pem",
# MacOS & Alpine Linux
"/etc/ssl/cert.pem"
```
"""
@spec get(String.t) :: {:ok, binary} | {:error, any}
def get(url) when is_binary(url) do
require Logger
url = String.to_charlist(url)
case :httpc.request(:get, {url, headers()}, https_opts(), []) do
{:ok, {{_version, 200, 'OK'}, _headers, body}} ->
{:ok, body}
{_, {{_version, code, message}, _headers, _body}} ->
Logger.bare_log(
:error,
"Failed to download #{url}. " <>
"HTTP Error: (#{code}) #{inspect(message)}"
)
{:error, code}
{:error, {:failed_connect, [{_, {host, _port}}, {_, _, sys_message}]}} ->
Logger.bare_log(
:error,
"Failed to connect to #{inspect(host)} to download #{inspect url}"
)
{:error, sys_message}
{:error, {other}} ->
Logger.bare_log(
:error,
"Failed to download #{inspect url}. Error #{inspect other}"
)
{:error, other}
end
end
defp headers do
# [{'Connection', 'close'}]
[]
end
@certificate_locations [
# Configured cacertfile
Application.get_env(:ex_cldr, :cacertfile),
# Populated if hex package CAStore is configured
if(Code.ensure_loaded?(CAStore), do: CAStore.file_path()),
# Populated if hex package certfi is configured
if(Code.ensure_loaded?(:certifi), do: :certifi.cacertfile() |> List.to_string),
# Debian/Ubuntu/Gentoo etc.
"/etc/ssl/certs/ca-certificates.crt",
# Fedora/RHEL 6
"/etc/pki/tls/certs/ca-bundle.crt",
# OpenSUSE
"/etc/ssl/ca-bundle.pem",
# OpenELEC
"/etc/pki/tls/cacert.pem",
# CentOS/RHEL 7
"/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem",
# Open SSL on MacOS
"/usr/local/etc/openssl/cert.pem",
# MacOS & Alpine Linux
"/etc/ssl/cert.pem"
]
|> Enum.reject(&is_nil/1)
defp certificate_store do
@certificate_locations
|> Enum.find(&File.exists?/1)
|> raise_if_no_cacertfile!
|> :erlang.binary_to_list
end
defp raise_if_no_cacertfile!(nil) do
raise RuntimeError, """
No certificate trust store was found.
Tried looking for: #{inspect @certificate_locations}
A certificate trust store is required in
order to download locales for your configuration.
Since ex_cldr could not detect a system
installed certificate trust store one of the
following actions may be taken:
1. Install the hex package `castore`. It will
be automatically detected after recompilation.
2. Install the hex package `certifi`. It will
be automatically detected after recomilation.
3. Specify the location of a certificate trust store
by configuring it in `config.exs`:
config :ex_cldr,
cacertfile: "/path/to/cacertfile",
...
"""
end
defp raise_if_no_cacertfile!(file) do
file
end
defp https_opts do
[ssl:
[
verify: :verify_peer,
cacertfile: certificate_store(),
customize_hostname_check: [
match_fun: :public_key.pkix_verify_hostname_match_fun(:https)
]
]
]
end
end
|
lib/cldr/http/http.ex
| 0.80406
| 0.69311
|
http.ex
|
starcoder
|
defmodule Scenic.Primitive.Style do
@moduledoc """
Modify the look of a primitive by applying a Style.
Styles are optional modifiers that you can put on any primitive. Each style does a specific thing and some only affect certain primitives.
There is a fixed list of primitive styles which are understood by the drivers. Some Components may introduce their own optional styles, but the only ones sent down to the drivers for rendering are contained in the list below.
In general, the primitive styles are each defined in their own module, but you apply them as options in a primitive's option list.
For example, to use the style defined in the module Scenic.Primitive.Style.Font you would define an option on a text primitive like this:
graph =
Graph.build
|> text( "Styled Text", font: :roboto_slab )
## Primitive Styles
* [`Cap`](Scenic.Primitive.Style.Cap.html) sets how to draw the end of a line.
* [`ClearColor`](Scenic.Primitive.Style.ClearColor.html) sets the background color.
* [`Fill`](Scenic.Primitive.Style.Fill.html) fills in a primitive with a [paint style](overview_styles.html#primitive-paint-styles).
* [`Font`](Scenic.Primitive.Style.Font.html) sets the font to use to draw text.
* [`FontBlur`](Scenic.Primitive.Style.FontBlur.html) applies a blur effect to text.
* [`FontSize`](Scenic.Primitive.Style.FontSize.html) sets the point size text.
* [`Hidden`](Scenic.Primitive.Style.Hidden.html) a flag that sets if a primitive is drawn at all.
* [`Join`](Scenic.Primitive.Style.Join.html) sets how to render the intersection of two lines. Works on the intersections of other primitives as well.
* [`MiterLimit`](Scenic.Primitive.Style.MiterLimit.html) sets whether or not to miter a joint if the intersection of two lines is very sharp.
* [`Scissor`](Scenic.Primitive.Style.Scissor.html) defines a rectangle that drawing will be clipped to.
* [`Stroke`](Scenic.Primitive.Style.Stroke.html) defines how to draw the edge of a primitive. Specifies both a width and a [paint style](overview_styles.html#primitive-paint-styles).
* [`TextAlign`](Scenic.Primitive.Style.TextAlign.html) sets the alignment of text relative to the starting point. Examples: :left, :center, or :right
* [`Theme`](Scenic.Primitive.Style.Theme.html) a collection of default colors. Usually passed to components, telling them how to draw in your preferred color scheme.
## Primitive Paint Styles
The `Fill` and `Stroke` styles accept a paint type. This describes what to fill or stroke the the primitive with.
There is a fixed set of paint types that the drivers know how to render.
* [`BoxGradient`](Scenic.Primitive.Style.Paint.BoxGradient.html) fills a primitive with a box gradient.
* [`Color`](Scenic.Primitive.Style.Paint.Color.html) fills a primitive with a solid color.
* [`Image`](Scenic.Primitive.Style.Paint.Image.html) fills a primitive with an image that is loaded into `Scenic.Cache`.
* [`LinearGradient`](Scenic.Primitive.Style.Paint.LinearGradient.html) fills a primitive with a linear gradient.
* [`RadialGradient`](Scenic.Primitive.Style.Paint.RadialGradient.html) fills a primitive with a radial gradient.
### Specifying Paint
When you use either the `Fill` and `Stroke` you specify the paint in a tuple like this.
graph =
Graph.build
|> circle( 100, fill: {:color, :green}, stroke: {2, {:color, :blue}} )
Each paint type has specific values it expects in order to draw. See the documentation for that paint type for details.
### Color Paint
Specifying a solid color to paint is very common, so has a shortcut. If you simply set a valid color as the paint type, it is assumed that you mean `Color`.
graph =
Graph.build
|> circle( 100, fill: :green, stroke: {2, :blue} ) # simple color
|> rect( {100, 200}, fill: {:green, 128} ) # color with alpha
|> rect( {100, 100}, fill: {10, 20, 30, 40} ) # red, green, blue, alpha
"""
alias Scenic.Primitive.Style
# import IEx
@style_name_map %{
:hidden => Style.Hidden,
:clear_color => Style.ClearColor,
:texture_wrap => Style.TextureWrap,
:texture_filter => Style.TextureFilter,
:fill => Style.Fill,
:stroke => Style.Stroke,
:join => Style.Join,
:cap => Style.Cap,
:miter_limit => Style.MiterLimit,
:font => Style.Font,
:font_blur => Style.FontBlur,
:font_size => Style.FontSize,
:text_align => Style.TextAlign,
:text_height => Style.TextHeight,
:scissor => Style.Scissor,
:theme => Style.Theme
}
@primitive_styles [
:hidden,
:clear_color,
:texture_wrap,
:texture_filter,
:fill,
:stroke,
:join,
:cap,
:miter_limit,
:font,
:font_blur,
:font_size,
:text_align,
:text_height,
:scissor,
:theme
]
@callback info(data :: any) :: bitstring
@callback verify(any) :: boolean
# ===========================================================================
defmodule FormatError do
defexception message: nil, module: nil, data: nil
end
# ===========================================================================
# defmacro __using__([type_code: type_code]) when is_integer(type_code) do
defmacro __using__(_opts) do
quote do
@behaviour Scenic.Primitive.Style
@doc false
def verify!(data) do
case verify(data) do
true ->
data
false ->
raise FormatError, message: info(data), module: __MODULE__, data: data
end
end
@doc false
def normalize(data), do: data
# --------------------------------------------------------
defoverridable normalize: 1
end
# quote
end
# defmacro
# ===========================================================================
@doc false
def verify(style_key, style_data) do
case Map.get(@style_name_map, style_key) do
# don't verify non-primitives
nil -> true
module -> module.verify(style_data)
end
end
# ===========================================================================
@doc false
def verify!(style_key, style_data) do
case Map.get(@style_name_map, style_key) do
nil -> style_data
module -> module.verify!(style_data)
end
end
# ===========================================================================
# normalize the format of the style data
@doc false
def normalize(style_type, data)
def normalize(style_type, data) do
case Map.get(@style_name_map, style_type) do
nil ->
nil
mod ->
mod.verify!(data)
mod.normalize(data)
end
end
# ===========================================================================
# filter a style map so only the primitive types remain
@doc false
def primitives(style_map)
def primitives(style_map) do
Enum.reduce(@primitive_styles, %{}, fn k, acc ->
case Map.get(style_map, k) do
nil -> acc
v -> Map.put(acc, k, normalize(k, v))
end
end)
end
end
|
lib/scenic/primitive/style/style.ex
| 0.93866
| 0.657836
|
style.ex
|
starcoder
|
defmodule AWS.RDS do
@moduledoc """
Amazon Relational Database Service
Amazon Relational Database Service (Amazon RDS) is a web service that makes it
easier to set up, operate, and scale a relational database in the cloud.
It provides cost-efficient, resizeable capacity for an industry-standard
relational database and manages common database administration tasks, freeing up
developers to focus on what makes their applications and businesses unique.
Amazon RDS gives you access to the capabilities of a MySQL, MariaDB, PostgreSQL,
Microsoft SQL Server, Oracle, or Amazon Aurora database server. These
capabilities mean that the code, applications, and tools you already use today
with your existing databases work with Amazon RDS without modification. Amazon
RDS automatically backs up your database and maintains the database software
that powers your DB instance. Amazon RDS is flexible: you can scale your DB
instance's compute resources and storage capacity to meet your application's
demand. As with all Amazon Web Services, there are no up-front investments, and
you pay only for the resources you use.
This interface reference for Amazon RDS contains documentation for a programming
or command line interface you can use to manage Amazon RDS. Amazon RDS is
asynchronous, which means that some interfaces might require techniques such as
polling or callback functions to determine when a command has been applied. In
this reference, the parameter descriptions indicate whether a command is applied
immediately, on the next instance reboot, or during the maintenance window. The
reference structure is as follows, and we list following some related topics
from the user guide.
## Amazon RDS API Reference
* For the alphabetical list of API actions, see [API Actions](https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_Operations.html).
* For the alphabetical list of data types, see [Data Types](https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_Types.html).
* For a list of common query parameters, see [Common Parameters](https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/CommonParameters.html).
* For descriptions of the error codes, see [Common Errors](https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/CommonErrors.html).
## Amazon RDS User Guide
* For a summary of the Amazon RDS interfaces, see [Available RDS Interfaces](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Welcome.html#Welcome.Interfaces).
* For more information about how to use the Query API, see [Using the Query
API](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Using_the_Query_API.html).
"""
alias AWS.Client
alias AWS.Request
def metadata do
%AWS.ServiceMetadata{
abbreviation: "Amazon RDS",
api_version: "2014-10-31",
content_type: "application/x-www-form-urlencoded",
credential_scope: nil,
endpoint_prefix: "rds",
global?: false,
protocol: "query",
service_id: "RDS",
signature_version: "v4",
signing_name: "rds",
target_prefix: nil
}
end
@doc """
Associates an Identity and Access Management (IAM) role from an Amazon Aurora DB
cluster.
For more information, see [Authorizing Amazon Aurora MySQL to Access Other AWS Services on Your
Behalf](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/AuroraMySQL.Integrating.Authorizing.html)
in the *Amazon Aurora User Guide*.
This action only applies to Aurora DB clusters.
"""
def add_role_to_db_cluster(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "AddRoleToDBCluster", input, options)
end
@doc """
Associates an AWS Identity and Access Management (IAM) role with a DB instance.
To add a role to a DB instance, the status of the DB instance must be
`available`.
"""
def add_role_to_db_instance(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "AddRoleToDBInstance", input, options)
end
@doc """
Adds a source identifier to an existing RDS event notification subscription.
"""
def add_source_identifier_to_subscription(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "AddSourceIdentifierToSubscription", input, options)
end
@doc """
Adds metadata tags to an Amazon RDS resource.
These tags can also be used with cost allocation reporting to track cost
associated with Amazon RDS resources, or used in a Condition statement in an IAM
policy for Amazon RDS.
For an overview on tagging Amazon RDS resources, see [Tagging Amazon RDS Resources](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Overview.Tagging.html).
"""
def add_tags_to_resource(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "AddTagsToResource", input, options)
end
@doc """
Applies a pending maintenance action to a resource (for example, to a DB
instance).
"""
def apply_pending_maintenance_action(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ApplyPendingMaintenanceAction", input, options)
end
@doc """
Enables ingress to a DBSecurityGroup using one of two forms of authorization.
First, EC2 or VPC security groups can be added to the DBSecurityGroup if the
application using the database is running on EC2 or VPC instances. Second, IP
ranges are available if the application accessing your database is running on
the Internet. Required parameters for this API are one of CIDR range,
EC2SecurityGroupId for VPC, or (EC2SecurityGroupOwnerId and either
EC2SecurityGroupName or EC2SecurityGroupId for non-VPC).
You can't authorize ingress from an EC2 security group in one AWS Region to an
Amazon RDS DB instance in another. You can't authorize ingress from a VPC
security group in one VPC to an Amazon RDS DB instance in another.
For an overview of CIDR ranges, go to the [Wikipedia Tutorial](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing).
"""
def authorize_db_security_group_ingress(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "AuthorizeDBSecurityGroupIngress", input, options)
end
@doc """
Backtracks a DB cluster to a specific time, without creating a new DB cluster.
For more information on backtracking, see [ Backtracking an Aurora DB Cluster](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/AuroraMySQL.Managing.Backtrack.html)
in the *Amazon Aurora User Guide.*
This action only applies to Aurora MySQL DB clusters.
"""
def backtrack_db_cluster(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "BacktrackDBCluster", input, options)
end
@doc """
Cancels an export task in progress that is exporting a snapshot to Amazon S3.
Any data that has already been written to the S3 bucket isn't removed.
"""
def cancel_export_task(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CancelExportTask", input, options)
end
@doc """
Copies the specified DB cluster parameter group.
This action only applies to Aurora DB clusters.
"""
def copy_db_cluster_parameter_group(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CopyDBClusterParameterGroup", input, options)
end
@doc """
Copies a snapshot of a DB cluster.
To copy a DB cluster snapshot from a shared manual DB cluster snapshot,
`SourceDBClusterSnapshotIdentifier` must be the Amazon Resource Name (ARN) of
the shared DB cluster snapshot.
You can copy an encrypted DB cluster snapshot from another AWS Region. In that
case, the AWS Region where you call the `CopyDBClusterSnapshot` action is the
destination AWS Region for the encrypted DB cluster snapshot to be copied to. To
copy an encrypted DB cluster snapshot from another AWS Region, you must provide
the following values:
* `KmsKeyId` - The AWS Key Management System (AWS KMS) key
identifier for the key to use to encrypt the copy of the DB cluster snapshot in
the destination AWS Region.
* `PreSignedUrl` - A URL that contains a Signature Version 4 signed
request for the `CopyDBClusterSnapshot` action to be called in the source AWS
Region where the DB cluster snapshot is copied from. The pre-signed URL must be
a valid request for the `CopyDBClusterSnapshot` API action that can be executed
in the source AWS Region that contains the encrypted DB cluster snapshot to be
copied.
The pre-signed URL request must contain the following parameter values:
* `KmsKeyId` - The AWS KMS key identifier for the
customer master key (CMK) to use to encrypt the copy of the DB cluster snapshot
in the destination AWS Region. This is the same identifier for both the
`CopyDBClusterSnapshot` action that is called in the destination AWS Region, and
the action contained in the pre-signed URL.
* `DestinationRegion` - The name of the AWS Region that
the DB cluster snapshot is to be created in.
* `SourceDBClusterSnapshotIdentifier` - The DB cluster
snapshot identifier for the encrypted DB cluster snapshot to be copied. This
identifier must be in the Amazon Resource Name (ARN) format for the source AWS
Region. For example, if you are copying an encrypted DB cluster snapshot from
the us-west-2 AWS Region, then your `SourceDBClusterSnapshotIdentifier` looks
like the following example:
`arn:aws:rds:us-west-2:123456789012:cluster-snapshot:aurora-cluster1-snapshot-20161115`.
To learn how to generate a Signature Version 4 signed request, see [
Authenticating Requests: Using Query Parameters (AWS Signature Version
4)](https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html)
and [ Signature Version 4 Signing Process](https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html).
If you are using an AWS SDK tool or the AWS CLI, you can specify `SourceRegion`
(or `--source-region` for the AWS CLI) instead of specifying `PreSignedUrl`
manually. Specifying `SourceRegion` autogenerates a pre-signed URL that is a
valid request for the operation that can be executed in the source AWS Region.
* `TargetDBClusterSnapshotIdentifier` - The identifier for the new
copy of the DB cluster snapshot in the destination AWS Region.
* `SourceDBClusterSnapshotIdentifier` - The DB cluster snapshot
identifier for the encrypted DB cluster snapshot to be copied. This identifier
must be in the ARN format for the source AWS Region and is the same value as the
`SourceDBClusterSnapshotIdentifier` in the pre-signed URL.
To cancel the copy operation once it is in progress, delete the target DB
cluster snapshot identified by `TargetDBClusterSnapshotIdentifier` while that DB
cluster snapshot is in "copying" status.
For more information on copying encrypted DB cluster snapshots from one AWS
Region to another, see [ Copying a Snapshot](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_CopySnapshot.html)
in the *Amazon Aurora User Guide.*
For more information on Amazon Aurora, see [ What Is Amazon Aurora?](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_AuroraOverview.html)
in the *Amazon Aurora User Guide.*
This action only applies to Aurora DB clusters.
"""
def copy_db_cluster_snapshot(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CopyDBClusterSnapshot", input, options)
end
@doc """
Copies the specified DB parameter group.
"""
def copy_db_parameter_group(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CopyDBParameterGroup", input, options)
end
@doc """
Copies the specified DB snapshot.
The source DB snapshot must be in the `available` state.
You can copy a snapshot from one AWS Region to another. In that case, the AWS
Region where you call the `CopyDBSnapshot` action is the destination AWS Region
for the DB snapshot copy.
For more information about copying snapshots, see [Copying a DB Snapshot](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_CopySnapshot.html#USER_CopyDBSnapshot)
in the *Amazon RDS User Guide.*
"""
def copy_db_snapshot(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CopyDBSnapshot", input, options)
end
@doc """
Copies the specified option group.
"""
def copy_option_group(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CopyOptionGroup", input, options)
end
@doc """
Creates a custom Availability Zone (AZ).
A custom AZ is an on-premises AZ that is integrated with a VMware vSphere
cluster.
For more information about RDS on VMware, see the [ RDS on VMware User Guide.](https://docs.aws.amazon.com/AmazonRDS/latest/RDSonVMwareUserGuide/rds-on-vmware.html)
"""
def create_custom_availability_zone(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateCustomAvailabilityZone", input, options)
end
@doc """
Creates a new Amazon Aurora DB cluster.
You can use the `ReplicationSourceIdentifier` parameter to create the DB cluster
as a read replica of another DB cluster or Amazon RDS MySQL DB instance. For
cross-region replication where the DB cluster identified by
`ReplicationSourceIdentifier` is encrypted, you must also specify the
`PreSignedUrl` parameter.
For more information on Amazon Aurora, see [ What Is Amazon Aurora?](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_AuroraOverview.html)
in the *Amazon Aurora User Guide.*
This action only applies to Aurora DB clusters.
"""
def create_db_cluster(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateDBCluster", input, options)
end
@doc """
Creates a new custom endpoint and associates it with an Amazon Aurora DB
cluster.
This action only applies to Aurora DB clusters.
"""
def create_db_cluster_endpoint(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateDBClusterEndpoint", input, options)
end
@doc """
Creates a new DB cluster parameter group.
Parameters in a DB cluster parameter group apply to all of the instances in a DB
cluster.
A DB cluster parameter group is initially created with the default parameters
for the database engine used by instances in the DB cluster. To provide custom
values for any of the parameters, you must modify the group after creating it
using `ModifyDBClusterParameterGroup`. Once you've created a DB cluster
parameter group, you need to associate it with your DB cluster using
`ModifyDBCluster`. When you associate a new DB cluster parameter group with a
running DB cluster, you need to reboot the DB instances in the DB cluster
without failover for the new DB cluster parameter group and associated settings
to take effect.
After you create a DB cluster parameter group, you should wait at least 5
minutes before creating your first DB cluster that uses that DB cluster
parameter group as the default parameter group. This allows Amazon RDS to fully
complete the create action before the DB cluster parameter group is used as the
default for a new DB cluster. This is especially important for parameters that
are critical when creating the default database for a DB cluster, such as the
character set for the default database defined by the `character_set_database`
parameter. You can use the *Parameter Groups* option of the [Amazon RDS console](https://console.aws.amazon.com/rds/) or the
`DescribeDBClusterParameters` action to verify that your DB cluster parameter
group has been created or modified.
For more information on Amazon Aurora, see [ What Is Amazon Aurora?](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_AuroraOverview.html)
in the *Amazon Aurora User Guide.*
This action only applies to Aurora DB clusters.
"""
def create_db_cluster_parameter_group(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateDBClusterParameterGroup", input, options)
end
@doc """
Creates a snapshot of a DB cluster.
For more information on Amazon Aurora, see [ What Is Amazon Aurora?](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_AuroraOverview.html)
in the *Amazon Aurora User Guide.*
This action only applies to Aurora DB clusters.
"""
def create_db_cluster_snapshot(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateDBClusterSnapshot", input, options)
end
@doc """
Creates a new DB instance.
"""
def create_db_instance(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateDBInstance", input, options)
end
@doc """
Creates a new DB instance that acts as a read replica for an existing source DB
instance.
You can create a read replica for a DB instance running MySQL, MariaDB, Oracle,
PostgreSQL, or SQL Server. For more information, see [Working with Read Replicas](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_ReadRepl.html)
in the *Amazon RDS User Guide*.
Amazon Aurora doesn't support this action. Call the `CreateDBInstance` action to
create a DB instance for an Aurora DB cluster.
All read replica DB instances are created with backups disabled. All other DB
instance attributes (including DB security groups and DB parameter groups) are
inherited from the source DB instance, except as specified.
Your source DB instance must have backup retention enabled.
"""
def create_db_instance_read_replica(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateDBInstanceReadReplica", input, options)
end
@doc """
Creates a new DB parameter group.
A DB parameter group is initially created with the default parameters for the
database engine used by the DB instance. To provide custom values for any of the
parameters, you must modify the group after creating it using
*ModifyDBParameterGroup*. Once you've created a DB parameter group, you need to
associate it with your DB instance using *ModifyDBInstance*. When you associate
a new DB parameter group with a running DB instance, you need to reboot the DB
instance without failover for the new DB parameter group and associated settings
to take effect.
After you create a DB parameter group, you should wait at least 5 minutes before
creating your first DB instance that uses that DB parameter group as the default
parameter group. This allows Amazon RDS to fully complete the create action
before the parameter group is used as the default for a new DB instance. This is
especially important for parameters that are critical when creating the default
database for a DB instance, such as the character set for the default database
defined by the `character_set_database` parameter. You can use the *Parameter
Groups* option of the [Amazon RDS console](https://console.aws.amazon.com/rds/)
or the *DescribeDBParameters* command to verify that your DB parameter group has
been created or modified.
"""
def create_db_parameter_group(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateDBParameterGroup", input, options)
end
@doc """
Creates a new DB proxy.
"""
def create_db_proxy(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateDBProxy", input, options)
end
@doc """
Creates a `DBProxyEndpoint`.
Only applies to proxies that are associated with Aurora DB clusters. You can use
DB proxy endpoints to specify read/write or read-only access to the DB cluster.
You can also use DB proxy endpoints to access a DB proxy through a different VPC
than the proxy's default VPC.
"""
def create_db_proxy_endpoint(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateDBProxyEndpoint", input, options)
end
@doc """
Creates a new DB security group.
DB security groups control access to a DB instance.
A DB security group controls access to EC2-Classic DB instances that are not in
a VPC.
"""
def create_db_security_group(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateDBSecurityGroup", input, options)
end
@doc """
Creates a snapshot of a DB instance.
The source DB instance must be in the `available` or `storage-optimization`
state.
"""
def create_db_snapshot(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateDBSnapshot", input, options)
end
@doc """
Creates a new DB subnet group.
DB subnet groups must contain at least one subnet in at least two AZs in the AWS
Region.
"""
def create_db_subnet_group(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateDBSubnetGroup", input, options)
end
@doc """
Creates an RDS event notification subscription.
This action requires a topic Amazon Resource Name (ARN) created by either the
RDS console, the SNS console, or the SNS API. To obtain an ARN with SNS, you
must create a topic in Amazon SNS and subscribe to the topic. The ARN is
displayed in the SNS console.
You can specify the type of source (`SourceType`) that you want to be notified
of and provide a list of RDS sources (`SourceIds`) that triggers the events. You
can also provide a list of event categories (`EventCategories`) for events that
you want to be notified of. For example, you can specify `SourceType` =
`db-instance`, `SourceIds` = `mydbinstance1`, `mydbinstance2` and
`EventCategories` = `Availability`, `Backup`.
If you specify both the `SourceType` and `SourceIds`, such as `SourceType` =
`db-instance` and `SourceIdentifier` = `myDBInstance1`, you are notified of all
the `db-instance` events for the specified source. If you specify a `SourceType`
but do not specify a `SourceIdentifier`, you receive notice of the events for
that source type for all your RDS sources. If you don't specify either the
SourceType or the `SourceIdentifier`, you are notified of events generated from
all RDS sources belonging to your customer account.
RDS event notification is only available for unencrypted SNS topics. If you
specify an encrypted SNS topic, event notifications aren't sent for the topic.
"""
def create_event_subscription(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateEventSubscription", input, options)
end
@doc """
Creates an Aurora global database spread across multiple AWS Regions.
The global database contains a single primary cluster with read-write
capability, and a read-only secondary cluster that receives data from the
primary cluster through high-speed replication performed by the Aurora storage
subsystem.
You can create a global database that is initially empty, and then add a primary
cluster and a secondary cluster to it. Or you can specify an existing Aurora
cluster during the create operation, and this cluster becomes the primary
cluster of the global database.
This action only applies to Aurora DB clusters.
"""
def create_global_cluster(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateGlobalCluster", input, options)
end
@doc """
Creates a new option group.
You can create up to 20 option groups.
"""
def create_option_group(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateOptionGroup", input, options)
end
@doc """
Deletes a custom Availability Zone (AZ).
A custom AZ is an on-premises AZ that is integrated with a VMware vSphere
cluster.
For more information about RDS on VMware, see the [ RDS on VMware User Guide.](https://docs.aws.amazon.com/AmazonRDS/latest/RDSonVMwareUserGuide/rds-on-vmware.html)
"""
def delete_custom_availability_zone(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteCustomAvailabilityZone", input, options)
end
@doc """
The DeleteDBCluster action deletes a previously provisioned DB cluster.
When you delete a DB cluster, all automated backups for that DB cluster are
deleted and can't be recovered. Manual DB cluster snapshots of the specified DB
cluster are not deleted.
For more information on Amazon Aurora, see [ What Is Amazon Aurora?](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_AuroraOverview.html)
in the *Amazon Aurora User Guide.*
This action only applies to Aurora DB clusters.
"""
def delete_db_cluster(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteDBCluster", input, options)
end
@doc """
Deletes a custom endpoint and removes it from an Amazon Aurora DB cluster.
This action only applies to Aurora DB clusters.
"""
def delete_db_cluster_endpoint(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteDBClusterEndpoint", input, options)
end
@doc """
Deletes a specified DB cluster parameter group.
The DB cluster parameter group to be deleted can't be associated with any DB
clusters.
For more information on Amazon Aurora, see [ What Is Amazon Aurora?](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_AuroraOverview.html)
in the *Amazon Aurora User Guide.*
This action only applies to Aurora DB clusters.
"""
def delete_db_cluster_parameter_group(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteDBClusterParameterGroup", input, options)
end
@doc """
Deletes a DB cluster snapshot.
If the snapshot is being copied, the copy operation is terminated.
The DB cluster snapshot must be in the `available` state to be deleted.
For more information on Amazon Aurora, see [ What Is Amazon Aurora?](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_AuroraOverview.html)
in the *Amazon Aurora User Guide.*
This action only applies to Aurora DB clusters.
"""
def delete_db_cluster_snapshot(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteDBClusterSnapshot", input, options)
end
@doc """
The DeleteDBInstance action deletes a previously provisioned DB instance.
When you delete a DB instance, all automated backups for that instance are
deleted and can't be recovered. Manual DB snapshots of the DB instance to be
deleted by `DeleteDBInstance` are not deleted.
If you request a final DB snapshot the status of the Amazon RDS DB instance is
`deleting` until the DB snapshot is created. The API action `DescribeDBInstance`
is used to monitor the status of this operation. The action can't be canceled or
reverted once submitted.
When a DB instance is in a failure state and has a status of `failed`,
`incompatible-restore`, or `incompatible-network`, you can only delete it when
you skip creation of the final snapshot with the `SkipFinalSnapshot` parameter.
If the specified DB instance is part of an Amazon Aurora DB cluster, you can't
delete the DB instance if both of the following conditions are true:
* The DB cluster is a read replica of another Amazon Aurora DB
cluster.
* The DB instance is the only instance in the DB cluster.
To delete a DB instance in this case, first call the
`PromoteReadReplicaDBCluster` API action to promote the DB cluster so it's no
longer a read replica. After the promotion completes, then call the
`DeleteDBInstance` API action to delete the final instance in the DB cluster.
"""
def delete_db_instance(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteDBInstance", input, options)
end
@doc """
Deletes automated backups using the `DbiResourceId` value of the source DB
instance or the Amazon Resource Name (ARN) of the automated backups.
"""
def delete_db_instance_automated_backup(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteDBInstanceAutomatedBackup", input, options)
end
@doc """
Deletes a specified DB parameter group.
The DB parameter group to be deleted can't be associated with any DB instances.
"""
def delete_db_parameter_group(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteDBParameterGroup", input, options)
end
@doc """
Deletes an existing DB proxy.
"""
def delete_db_proxy(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteDBProxy", input, options)
end
@doc """
Deletes a `DBProxyEndpoint`.
Doing so removes the ability to access the DB proxy using the endpoint that you
defined. The endpoint that you delete might have provided capabilities such as
read/write or read-only operations, or using a different VPC than the DB proxy's
default VPC.
"""
def delete_db_proxy_endpoint(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteDBProxyEndpoint", input, options)
end
@doc """
Deletes a DB security group.
The specified DB security group must not be associated with any DB instances.
"""
def delete_db_security_group(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteDBSecurityGroup", input, options)
end
@doc """
Deletes a DB snapshot.
If the snapshot is being copied, the copy operation is terminated.
The DB snapshot must be in the `available` state to be deleted.
"""
def delete_db_snapshot(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteDBSnapshot", input, options)
end
@doc """
Deletes a DB subnet group.
The specified database subnet group must not be associated with any DB
instances.
"""
def delete_db_subnet_group(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteDBSubnetGroup", input, options)
end
@doc """
Deletes an RDS event notification subscription.
"""
def delete_event_subscription(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteEventSubscription", input, options)
end
@doc """
Deletes a global database cluster.
The primary and secondary clusters must already be detached or destroyed first.
This action only applies to Aurora DB clusters.
"""
def delete_global_cluster(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteGlobalCluster", input, options)
end
@doc """
Deletes the installation medium for a DB engine that requires an on-premises
customer provided license, such as Microsoft SQL Server.
"""
def delete_installation_media(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteInstallationMedia", input, options)
end
@doc """
Deletes an existing option group.
"""
def delete_option_group(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteOptionGroup", input, options)
end
@doc """
Remove the association between one or more `DBProxyTarget` data structures and a
`DBProxyTargetGroup`.
"""
def deregister_db_proxy_targets(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeregisterDBProxyTargets", input, options)
end
@doc """
Lists all of the attributes for a customer account.
The attributes include Amazon RDS quotas for the account, such as the number of
DB instances allowed. The description for a quota includes the quota name,
current usage toward that quota, and the quota's maximum value.
This command doesn't take any parameters.
"""
def describe_account_attributes(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeAccountAttributes", input, options)
end
@doc """
Lists the set of CA certificates provided by Amazon RDS for this AWS account.
"""
def describe_certificates(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeCertificates", input, options)
end
@doc """
Returns information about custom Availability Zones (AZs).
A custom AZ is an on-premises AZ that is integrated with a VMware vSphere
cluster.
For more information about RDS on VMware, see the [ RDS on VMware User Guide.](https://docs.aws.amazon.com/AmazonRDS/latest/RDSonVMwareUserGuide/rds-on-vmware.html)
"""
def describe_custom_availability_zones(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeCustomAvailabilityZones", input, options)
end
@doc """
Returns information about backtracks for a DB cluster.
For more information on Amazon Aurora, see [ What Is Amazon Aurora?](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_AuroraOverview.html)
in the *Amazon Aurora User Guide.*
This action only applies to Aurora MySQL DB clusters.
"""
def describe_db_cluster_backtracks(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeDBClusterBacktracks", input, options)
end
@doc """
Returns information about endpoints for an Amazon Aurora DB cluster.
This action only applies to Aurora DB clusters.
"""
def describe_db_cluster_endpoints(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeDBClusterEndpoints", input, options)
end
@doc """
Returns a list of `DBClusterParameterGroup` descriptions.
If a `DBClusterParameterGroupName` parameter is specified, the list will contain
only the description of the specified DB cluster parameter group.
For more information on Amazon Aurora, see [ What Is Amazon Aurora?](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_AuroraOverview.html)
in the *Amazon Aurora User Guide.*
This action only applies to Aurora DB clusters.
"""
def describe_db_cluster_parameter_groups(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeDBClusterParameterGroups", input, options)
end
@doc """
Returns the detailed parameter list for a particular DB cluster parameter group.
For more information on Amazon Aurora, see [ What Is Amazon Aurora?](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_AuroraOverview.html)
in the *Amazon Aurora User Guide.*
This action only applies to Aurora DB clusters.
"""
def describe_db_cluster_parameters(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeDBClusterParameters", input, options)
end
@doc """
Returns a list of DB cluster snapshot attribute names and values for a manual DB
cluster snapshot.
When sharing snapshots with other AWS accounts,
`DescribeDBClusterSnapshotAttributes` returns the `restore` attribute and a list
of IDs for the AWS accounts that are authorized to copy or restore the manual DB
cluster snapshot. If `all` is included in the list of values for the `restore`
attribute, then the manual DB cluster snapshot is public and can be copied or
restored by all AWS accounts.
To add or remove access for an AWS account to copy or restore a manual DB
cluster snapshot, or to make the manual DB cluster snapshot public or private,
use the `ModifyDBClusterSnapshotAttribute` API action.
This action only applies to Aurora DB clusters.
"""
def describe_db_cluster_snapshot_attributes(%Client{} = client, input, options \\ []) do
Request.request_post(
client,
metadata(),
"DescribeDBClusterSnapshotAttributes",
input,
options
)
end
@doc """
Returns information about DB cluster snapshots.
This API action supports pagination.
For more information on Amazon Aurora, see [ What Is Amazon Aurora?](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_AuroraOverview.html)
in the *Amazon Aurora User Guide.*
This action only applies to Aurora DB clusters.
"""
def describe_db_cluster_snapshots(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeDBClusterSnapshots", input, options)
end
@doc """
Returns information about provisioned Aurora DB clusters.
This API supports pagination.
For more information on Amazon Aurora, see [ What Is Amazon Aurora?](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_AuroraOverview.html)
in the *Amazon Aurora User Guide.*
This operation can also return information for Amazon Neptune DB instances and
Amazon DocumentDB instances.
"""
def describe_db_clusters(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeDBClusters", input, options)
end
@doc """
Returns a list of the available DB engines.
"""
def describe_db_engine_versions(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeDBEngineVersions", input, options)
end
@doc """
Displays backups for both current and deleted instances.
For example, use this operation to find details about automated backups for
previously deleted instances. Current instances with retention periods greater
than zero (0) are returned for both the `DescribeDBInstanceAutomatedBackups` and
`DescribeDBInstances` operations.
All parameters are optional.
"""
def describe_db_instance_automated_backups(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeDBInstanceAutomatedBackups", input, options)
end
@doc """
Returns information about provisioned RDS instances.
This API supports pagination.
This operation can also return information for Amazon Neptune DB instances and
Amazon DocumentDB instances.
"""
def describe_db_instances(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeDBInstances", input, options)
end
@doc """
Returns a list of DB log files for the DB instance.
"""
def describe_db_log_files(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeDBLogFiles", input, options)
end
@doc """
Returns a list of `DBParameterGroup` descriptions.
If a `DBParameterGroupName` is specified, the list will contain only the
description of the specified DB parameter group.
"""
def describe_db_parameter_groups(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeDBParameterGroups", input, options)
end
@doc """
Returns the detailed parameter list for a particular DB parameter group.
"""
def describe_db_parameters(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeDBParameters", input, options)
end
@doc """
Returns information about DB proxies.
"""
def describe_db_proxies(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeDBProxies", input, options)
end
@doc """
Returns information about DB proxy endpoints.
"""
def describe_db_proxy_endpoints(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeDBProxyEndpoints", input, options)
end
@doc """
Returns information about DB proxy target groups, represented by
`DBProxyTargetGroup` data structures.
"""
def describe_db_proxy_target_groups(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeDBProxyTargetGroups", input, options)
end
@doc """
Returns information about `DBProxyTarget` objects.
This API supports pagination.
"""
def describe_db_proxy_targets(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeDBProxyTargets", input, options)
end
@doc """
Returns a list of `DBSecurityGroup` descriptions.
If a `DBSecurityGroupName` is specified, the list will contain only the
descriptions of the specified DB security group.
"""
def describe_db_security_groups(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeDBSecurityGroups", input, options)
end
@doc """
Returns a list of DB snapshot attribute names and values for a manual DB
snapshot.
When sharing snapshots with other AWS accounts, `DescribeDBSnapshotAttributes`
returns the `restore` attribute and a list of IDs for the AWS accounts that are
authorized to copy or restore the manual DB snapshot. If `all` is included in
the list of values for the `restore` attribute, then the manual DB snapshot is
public and can be copied or restored by all AWS accounts.
To add or remove access for an AWS account to copy or restore a manual DB
snapshot, or to make the manual DB snapshot public or private, use the
`ModifyDBSnapshotAttribute` API action.
"""
def describe_db_snapshot_attributes(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeDBSnapshotAttributes", input, options)
end
@doc """
Returns information about DB snapshots.
This API action supports pagination.
"""
def describe_db_snapshots(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeDBSnapshots", input, options)
end
@doc """
Returns a list of DBSubnetGroup descriptions.
If a DBSubnetGroupName is specified, the list will contain only the descriptions
of the specified DBSubnetGroup.
For an overview of CIDR ranges, go to the [Wikipedia Tutorial](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing).
"""
def describe_db_subnet_groups(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeDBSubnetGroups", input, options)
end
@doc """
Returns the default engine and system parameter information for the cluster
database engine.
For more information on Amazon Aurora, see [ What Is Amazon Aurora?](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_AuroraOverview.html)
in the *Amazon Aurora User Guide.*
"""
def describe_engine_default_cluster_parameters(%Client{} = client, input, options \\ []) do
Request.request_post(
client,
metadata(),
"DescribeEngineDefaultClusterParameters",
input,
options
)
end
@doc """
Returns the default engine and system parameter information for the specified
database engine.
"""
def describe_engine_default_parameters(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeEngineDefaultParameters", input, options)
end
@doc """
Displays a list of categories for all event source types, or, if specified, for
a specified source type.
You can see a list of the event categories and source types in [
Events](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Events.html)
in the *Amazon RDS User Guide.*
"""
def describe_event_categories(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeEventCategories", input, options)
end
@doc """
Lists all the subscription descriptions for a customer account.
The description for a subscription includes `SubscriptionName`, `SNSTopicARN`,
`CustomerID`, `SourceType`, `SourceID`, `CreationTime`, and `Status`.
If you specify a `SubscriptionName`, lists the description for that
subscription.
"""
def describe_event_subscriptions(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeEventSubscriptions", input, options)
end
@doc """
Returns events related to DB instances, DB clusters, DB parameter groups, DB
security groups, DB snapshots, and DB cluster snapshots for the past 14 days.
Events specific to a particular DB instances, DB clusters, DB parameter groups,
DB security groups, DB snapshots, and DB cluster snapshots group can be obtained
by providing the name as a parameter.
By default, the past hour of events are returned.
"""
def describe_events(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeEvents", input, options)
end
@doc """
Returns information about a snapshot export to Amazon S3.
This API operation supports pagination.
"""
def describe_export_tasks(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeExportTasks", input, options)
end
@doc """
Returns information about Aurora global database clusters.
This API supports pagination.
For more information on Amazon Aurora, see [ What Is Amazon Aurora?](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_AuroraOverview.html)
in the *Amazon Aurora User Guide.*
This action only applies to Aurora DB clusters.
"""
def describe_global_clusters(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeGlobalClusters", input, options)
end
@doc """
Describes the available installation media for a DB engine that requires an
on-premises customer provided license, such as Microsoft SQL Server.
"""
def describe_installation_media(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeInstallationMedia", input, options)
end
@doc """
Describes all available options.
"""
def describe_option_group_options(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeOptionGroupOptions", input, options)
end
@doc """
Describes the available option groups.
"""
def describe_option_groups(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeOptionGroups", input, options)
end
@doc """
Returns a list of orderable DB instance options for the specified engine.
"""
def describe_orderable_db_instance_options(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeOrderableDBInstanceOptions", input, options)
end
@doc """
Returns a list of resources (for example, DB instances) that have at least one
pending maintenance action.
"""
def describe_pending_maintenance_actions(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribePendingMaintenanceActions", input, options)
end
@doc """
Returns information about reserved DB instances for this account, or about a
specified reserved DB instance.
"""
def describe_reserved_db_instances(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeReservedDBInstances", input, options)
end
@doc """
Lists available reserved DB instance offerings.
"""
def describe_reserved_db_instances_offerings(%Client{} = client, input, options \\ []) do
Request.request_post(
client,
metadata(),
"DescribeReservedDBInstancesOfferings",
input,
options
)
end
@doc """
Returns a list of the source AWS Regions where the current AWS Region can create
a read replica, copy a DB snapshot from, or replicate automated backups from.
This API action supports pagination.
"""
def describe_source_regions(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeSourceRegions", input, options)
end
@doc """
You can call `DescribeValidDBInstanceModifications` to learn what modifications
you can make to your DB instance.
You can use this information when you call `ModifyDBInstance`.
"""
def describe_valid_db_instance_modifications(%Client{} = client, input, options \\ []) do
Request.request_post(
client,
metadata(),
"DescribeValidDBInstanceModifications",
input,
options
)
end
@doc """
Downloads all or a portion of the specified log file, up to 1 MB in size.
"""
def download_db_log_file_portion(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DownloadDBLogFilePortion", input, options)
end
@doc """
Forces a failover for a DB cluster.
A failover for a DB cluster promotes one of the Aurora Replicas (read-only
instances) in the DB cluster to be the primary instance (the cluster writer).
Amazon Aurora will automatically fail over to an Aurora Replica, if one exists,
when the primary instance fails. You can force a failover when you want to
simulate a failure of a primary instance for testing. Because each instance in a
DB cluster has its own endpoint address, you will need to clean up and
re-establish any existing connections that use those endpoint addresses when the
failover is complete.
For more information on Amazon Aurora, see [ What Is Amazon Aurora?](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_AuroraOverview.html)
in the *Amazon Aurora User Guide.*
This action only applies to Aurora DB clusters.
"""
def failover_db_cluster(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "FailoverDBCluster", input, options)
end
@doc """
Initiates the failover process for an Aurora global database (`GlobalCluster`).
A failover for an Aurora global database promotes one of secondary read-only DB
clusters to be the primary DB cluster and demotes the primary DB cluster to
being a secondary (read-only) DB cluster. In other words, the role of the
current primary DB cluster and the selected (target) DB cluster are switched.
The selected secondary DB cluster assumes full read/write capabilities for the
Aurora global database.
For more information about failing over an Amazon Aurora global database, see
[Managed planned failover for Amazon Aurora global databases](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-global-database-disaster-recovery.managed-failover)
in the *Amazon Aurora User Guide.*
This action applies to `GlobalCluster` (Aurora global databases) only. Use this
action only on healthy Aurora global databases with running Aurora DB clusters
and no Region-wide outages, to test disaster recovery scenarios or to
reconfigure your Aurora global database topology.
"""
def failover_global_cluster(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "FailoverGlobalCluster", input, options)
end
@doc """
Imports the installation media for a DB engine that requires an on-premises
customer provided license, such as SQL Server.
"""
def import_installation_media(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ImportInstallationMedia", input, options)
end
@doc """
Lists all tags on an Amazon RDS resource.
For an overview on tagging an Amazon RDS resource, see [Tagging Amazon RDS Resources](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Overview.Tagging.html)
in the *Amazon RDS User Guide*.
"""
def list_tags_for_resource(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListTagsForResource", input, options)
end
@doc """
Override the system-default Secure Sockets Layer/Transport Layer Security
(SSL/TLS) certificate for Amazon RDS for new DB instances temporarily, or remove
the override.
By using this operation, you can specify an RDS-approved SSL/TLS certificate for
new DB instances that is different from the default certificate provided by RDS.
You can also use this operation to remove the override, so that new DB instances
use the default certificate provided by RDS.
You might need to override the default certificate in the following situations:
* You already migrated your applications to support the latest
certificate authority (CA) certificate, but the new CA certificate is not yet
the RDS default CA certificate for the specified AWS Region.
* RDS has already moved to a new default CA certificate for the
specified AWS Region, but you are still in the process of supporting the new CA
certificate. In this case, you temporarily need additional time to finish your
application changes.
For more information about rotating your SSL/TLS certificate for RDS DB engines,
see [ Rotating Your SSL/TLS Certificate](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.SSL-certificate-rotation.html)
in the *Amazon RDS User Guide*.
For more information about rotating your SSL/TLS certificate for Aurora DB
engines, see [ Rotating Your SSL/TLS Certificate](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/UsingWithRDS.SSL-certificate-rotation.html)
in the *Amazon Aurora User Guide*.
"""
def modify_certificates(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ModifyCertificates", input, options)
end
@doc """
Set the capacity of an Aurora Serverless DB cluster to a specific value.
Aurora Serverless scales seamlessly based on the workload on the DB cluster. In
some cases, the capacity might not scale fast enough to meet a sudden change in
workload, such as a large number of new transactions. Call
`ModifyCurrentDBClusterCapacity` to set the capacity explicitly.
After this call sets the DB cluster capacity, Aurora Serverless can
automatically scale the DB cluster based on the cooldown period for scaling up
and the cooldown period for scaling down.
For more information about Aurora Serverless, see [Using Amazon Aurora Serverless](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless.html)
in the *Amazon Aurora User Guide*.
If you call `ModifyCurrentDBClusterCapacity` with the default `TimeoutAction`,
connections that prevent Aurora Serverless from finding a scaling point might be
dropped. For more information about scaling points, see [ Autoscaling for Aurora Serverless](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless.how-it-works.html#aurora-serverless.how-it-works.auto-scaling)
in the *Amazon Aurora User Guide*.
This action only applies to Aurora DB clusters.
"""
def modify_current_db_cluster_capacity(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ModifyCurrentDBClusterCapacity", input, options)
end
@doc """
Modify a setting for an Amazon Aurora DB cluster.
You can change one or more database configuration parameters by specifying these
parameters and the new values in the request. For more information on Amazon
Aurora, see [ What Is Amazon Aurora?](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_AuroraOverview.html)
in the *Amazon Aurora User Guide.*
This action only applies to Aurora DB clusters.
"""
def modify_db_cluster(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ModifyDBCluster", input, options)
end
@doc """
Modifies the properties of an endpoint in an Amazon Aurora DB cluster.
This action only applies to Aurora DB clusters.
"""
def modify_db_cluster_endpoint(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ModifyDBClusterEndpoint", input, options)
end
@doc """
Modifies the parameters of a DB cluster parameter group.
To modify more than one parameter, submit a list of the following:
`ParameterName`, `ParameterValue`, and `ApplyMethod`. A maximum of 20 parameters
can be modified in a single request.
For more information on Amazon Aurora, see [ What Is Amazon Aurora?](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_AuroraOverview.html)
in the *Amazon Aurora User Guide.*
Changes to dynamic parameters are applied immediately. Changes to static
parameters require a reboot without failover to the DB cluster associated with
the parameter group before the change can take effect.
After you create a DB cluster parameter group, you should wait at least 5
minutes before creating your first DB cluster that uses that DB cluster
parameter group as the default parameter group. This allows Amazon RDS to fully
complete the create action before the parameter group is used as the default for
a new DB cluster. This is especially important for parameters that are critical
when creating the default database for a DB cluster, such as the character set
for the default database defined by the `character_set_database` parameter. You
can use the *Parameter Groups* option of the [Amazon RDS console](https://console.aws.amazon.com/rds/) or the
`DescribeDBClusterParameters` action to verify that your DB cluster parameter
group has been created or modified.
If the modified DB cluster parameter group is used by an Aurora Serverless
cluster, Aurora applies the update immediately. The cluster restart might
interrupt your workload. In that case, your application must reopen any
connections and retry any transactions that were active when the parameter
changes took effect.
This action only applies to Aurora DB clusters.
"""
def modify_db_cluster_parameter_group(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ModifyDBClusterParameterGroup", input, options)
end
@doc """
Adds an attribute and values to, or removes an attribute and values from, a
manual DB cluster snapshot.
To share a manual DB cluster snapshot with other AWS accounts, specify `restore`
as the `AttributeName` and use the `ValuesToAdd` parameter to add a list of IDs
of the AWS accounts that are authorized to restore the manual DB cluster
snapshot. Use the value `all` to make the manual DB cluster snapshot public,
which means that it can be copied or restored by all AWS accounts.
Don't add the `all` value for any manual DB cluster snapshots that contain
private information that you don't want available to all AWS accounts.
If a manual DB cluster snapshot is encrypted, it can be shared, but only by
specifying a list of authorized AWS account IDs for the `ValuesToAdd` parameter.
You can't use `all` as a value for that parameter in this case.
To view which AWS accounts have access to copy or restore a manual DB cluster
snapshot, or whether a manual DB cluster snapshot is public or private, use the
`DescribeDBClusterSnapshotAttributes` API action. The accounts are returned as
values for the `restore` attribute.
This action only applies to Aurora DB clusters.
"""
def modify_db_cluster_snapshot_attribute(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ModifyDBClusterSnapshotAttribute", input, options)
end
@doc """
Modifies settings for a DB instance.
You can change one or more database configuration parameters by specifying these
parameters and the new values in the request. To learn what modifications you
can make to your DB instance, call `DescribeValidDBInstanceModifications` before
you call `ModifyDBInstance`.
"""
def modify_db_instance(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ModifyDBInstance", input, options)
end
@doc """
Modifies the parameters of a DB parameter group.
To modify more than one parameter, submit a list of the following:
`ParameterName`, `ParameterValue`, and `ApplyMethod`. A maximum of 20 parameters
can be modified in a single request.
Changes to dynamic parameters are applied immediately. Changes to static
parameters require a reboot without failover to the DB instance associated with
the parameter group before the change can take effect.
After you modify a DB parameter group, you should wait at least 5 minutes before
creating your first DB instance that uses that DB parameter group as the default
parameter group. This allows Amazon RDS to fully complete the modify action
before the parameter group is used as the default for a new DB instance. This is
especially important for parameters that are critical when creating the default
database for a DB instance, such as the character set for the default database
defined by the `character_set_database` parameter. You can use the *Parameter
Groups* option of the [Amazon RDS console](https://console.aws.amazon.com/rds/)
or the *DescribeDBParameters* command to verify that your DB parameter group has
been created or modified.
"""
def modify_db_parameter_group(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ModifyDBParameterGroup", input, options)
end
@doc """
Changes the settings for an existing DB proxy.
"""
def modify_db_proxy(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ModifyDBProxy", input, options)
end
@doc """
Changes the settings for an existing DB proxy endpoint.
"""
def modify_db_proxy_endpoint(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ModifyDBProxyEndpoint", input, options)
end
@doc """
Modifies the properties of a `DBProxyTargetGroup`.
"""
def modify_db_proxy_target_group(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ModifyDBProxyTargetGroup", input, options)
end
@doc """
Updates a manual DB snapshot with a new engine version.
The snapshot can be encrypted or unencrypted, but not shared or public.
Amazon RDS supports upgrading DB snapshots for MySQL, Oracle, and PostgreSQL.
"""
def modify_db_snapshot(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ModifyDBSnapshot", input, options)
end
@doc """
Adds an attribute and values to, or removes an attribute and values from, a
manual DB snapshot.
To share a manual DB snapshot with other AWS accounts, specify `restore` as the
`AttributeName` and use the `ValuesToAdd` parameter to add a list of IDs of the
AWS accounts that are authorized to restore the manual DB snapshot. Uses the
value `all` to make the manual DB snapshot public, which means it can be copied
or restored by all AWS accounts.
Don't add the `all` value for any manual DB snapshots that contain private
information that you don't want available to all AWS accounts.
If the manual DB snapshot is encrypted, it can be shared, but only by specifying
a list of authorized AWS account IDs for the `ValuesToAdd` parameter. You can't
use `all` as a value for that parameter in this case.
To view which AWS accounts have access to copy or restore a manual DB snapshot,
or whether a manual DB snapshot public or private, use the
`DescribeDBSnapshotAttributes` API action. The accounts are returned as values
for the `restore` attribute.
"""
def modify_db_snapshot_attribute(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ModifyDBSnapshotAttribute", input, options)
end
@doc """
Modifies an existing DB subnet group.
DB subnet groups must contain at least one subnet in at least two AZs in the AWS
Region.
"""
def modify_db_subnet_group(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ModifyDBSubnetGroup", input, options)
end
@doc """
Modifies an existing RDS event notification subscription.
You can't modify the source identifiers using this call. To change source
identifiers for a subscription, use the `AddSourceIdentifierToSubscription` and
`RemoveSourceIdentifierFromSubscription` calls.
You can see a list of the event categories for a given source type
(`SourceType`) in
[Events](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Events.html)
in the *Amazon RDS User Guide* or by using the `DescribeEventCategories`
operation.
"""
def modify_event_subscription(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ModifyEventSubscription", input, options)
end
@doc """
Modify a setting for an Amazon Aurora global cluster.
You can change one or more database configuration parameters by specifying these
parameters and the new values in the request. For more information on Amazon
Aurora, see [ What Is Amazon Aurora?](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_AuroraOverview.html)
in the *Amazon Aurora User Guide.*
This action only applies to Aurora DB clusters.
"""
def modify_global_cluster(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ModifyGlobalCluster", input, options)
end
@doc """
Modifies an existing option group.
"""
def modify_option_group(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ModifyOptionGroup", input, options)
end
@doc """
Promotes a read replica DB instance to a standalone DB instance.
Backup duration is a function of the amount of changes to the
database since the previous backup. If you plan to promote a read replica to a
standalone instance, we recommend that you enable backups and complete at least
one backup prior to promotion. In addition, a read replica cannot be promoted to
a standalone instance when it is in the `backing-up` status. If you have enabled
backups on your read replica, configure the automated backup window so that
daily backups do not interfere with read replica promotion.
This command doesn't apply to Aurora MySQL and Aurora PostgreSQL.
"""
def promote_read_replica(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "PromoteReadReplica", input, options)
end
@doc """
Promotes a read replica DB cluster to a standalone DB cluster.
This action only applies to Aurora DB clusters.
"""
def promote_read_replica_db_cluster(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "PromoteReadReplicaDBCluster", input, options)
end
@doc """
Purchases a reserved DB instance offering.
"""
def purchase_reserved_db_instances_offering(%Client{} = client, input, options \\ []) do
Request.request_post(
client,
metadata(),
"PurchaseReservedDBInstancesOffering",
input,
options
)
end
@doc """
You might need to reboot your DB instance, usually for maintenance reasons.
For example, if you make certain modifications, or if you change the DB
parameter group associated with the DB instance, you must reboot the instance
for the changes to take effect.
Rebooting a DB instance restarts the database engine service. Rebooting a DB
instance results in a momentary outage, during which the DB instance status is
set to rebooting.
For more information about rebooting, see [Rebooting a DB Instance](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_RebootInstance.html)
in the *Amazon RDS User Guide.*
"""
def reboot_db_instance(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "RebootDBInstance", input, options)
end
@doc """
Associate one or more `DBProxyTarget` data structures with a
`DBProxyTargetGroup`.
"""
def register_db_proxy_targets(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "RegisterDBProxyTargets", input, options)
end
@doc """
Detaches an Aurora secondary cluster from an Aurora global database cluster.
The cluster becomes a standalone cluster with read-write capability instead of
being read-only and receiving data from a primary cluster in a different region.
This action only applies to Aurora DB clusters.
"""
def remove_from_global_cluster(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "RemoveFromGlobalCluster", input, options)
end
@doc """
Disassociates an AWS Identity and Access Management (IAM) role from an Amazon
Aurora DB cluster.
For more information, see [Authorizing Amazon Aurora MySQL to Access Other AWS Services on Your Behalf
](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/AuroraMySQL.Integrating.Authorizing.html)
in the *Amazon Aurora User Guide*.
This action only applies to Aurora DB clusters.
"""
def remove_role_from_db_cluster(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "RemoveRoleFromDBCluster", input, options)
end
@doc """
Disassociates an AWS Identity and Access Management (IAM) role from a DB
instance.
"""
def remove_role_from_db_instance(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "RemoveRoleFromDBInstance", input, options)
end
@doc """
Removes a source identifier from an existing RDS event notification
subscription.
"""
def remove_source_identifier_from_subscription(%Client{} = client, input, options \\ []) do
Request.request_post(
client,
metadata(),
"RemoveSourceIdentifierFromSubscription",
input,
options
)
end
@doc """
Removes metadata tags from an Amazon RDS resource.
For an overview on tagging an Amazon RDS resource, see [Tagging Amazon RDS Resources](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Overview.Tagging.html)
in the *Amazon RDS User Guide.*
"""
def remove_tags_from_resource(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "RemoveTagsFromResource", input, options)
end
@doc """
Modifies the parameters of a DB cluster parameter group to the default value.
To reset specific parameters submit a list of the following: `ParameterName` and
`ApplyMethod`. To reset the entire DB cluster parameter group, specify the
`DBClusterParameterGroupName` and `ResetAllParameters` parameters.
When resetting the entire group, dynamic parameters are updated immediately and
static parameters are set to `pending-reboot` to take effect on the next DB
instance restart or `RebootDBInstance` request. You must call `RebootDBInstance`
for every DB instance in your DB cluster that you want the updated static
parameter to apply to.
For more information on Amazon Aurora, see [ What Is Amazon Aurora?](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_AuroraOverview.html)
in the *Amazon Aurora User Guide.*
This action only applies to Aurora DB clusters.
"""
def reset_db_cluster_parameter_group(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ResetDBClusterParameterGroup", input, options)
end
@doc """
Modifies the parameters of a DB parameter group to the engine/system default
value.
To reset specific parameters, provide a list of the following: `ParameterName`
and `ApplyMethod`. To reset the entire DB parameter group, specify the
`DBParameterGroup` name and `ResetAllParameters` parameters. When resetting the
entire group, dynamic parameters are updated immediately and static parameters
are set to `pending-reboot` to take effect on the next DB instance restart or
`RebootDBInstance` request.
"""
def reset_db_parameter_group(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ResetDBParameterGroup", input, options)
end
@doc """
Creates an Amazon Aurora DB cluster from MySQL data stored in an Amazon S3
bucket.
Amazon RDS must be authorized to access the Amazon S3 bucket and the data must
be created using the Percona XtraBackup utility as described in [ Migrating Data from MySQL by Using an Amazon S3
Bucket](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/AuroraMySQL.Migrating.ExtMySQL.html#AuroraMySQL.Migrating.ExtMySQL.S3)
in the *Amazon Aurora User Guide*.
This action only restores the DB cluster, not the DB instances for that DB
cluster. You must invoke the `CreateDBInstance` action to create DB instances
for the restored DB cluster, specifying the identifier of the restored DB
cluster in `DBClusterIdentifier`. You can create DB instances only after the
`RestoreDBClusterFromS3` action has completed and the DB cluster is available.
For more information on Amazon Aurora, see [ What Is Amazon Aurora?](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_AuroraOverview.html)
in the *Amazon Aurora User Guide.*
This action only applies to Aurora DB clusters. The source DB engine must be
MySQL.
"""
def restore_db_cluster_from_s3(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "RestoreDBClusterFromS3", input, options)
end
@doc """
Creates a new DB cluster from a DB snapshot or DB cluster snapshot.
This action only applies to Aurora DB clusters.
The target DB cluster is created from the source snapshot with a default
configuration. If you don't specify a security group, the new DB cluster is
associated with the default security group.
This action only restores the DB cluster, not the DB instances for that DB
cluster. You must invoke the `CreateDBInstance` action to create DB instances
for the restored DB cluster, specifying the identifier of the restored DB
cluster in `DBClusterIdentifier`. You can create DB instances only after the
`RestoreDBClusterFromSnapshot` action has completed and the DB cluster is
available.
For more information on Amazon Aurora, see [ What Is Amazon Aurora?](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_AuroraOverview.html)
in the *Amazon Aurora User Guide.*
This action only applies to Aurora DB clusters.
"""
def restore_db_cluster_from_snapshot(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "RestoreDBClusterFromSnapshot", input, options)
end
@doc """
Restores a DB cluster to an arbitrary point in time.
Users can restore to any point in time before `LatestRestorableTime` for up to
`BackupRetentionPeriod` days. The target DB cluster is created from the source
DB cluster with the same configuration as the original DB cluster, except that
the new DB cluster is created with the default DB security group.
This action only restores the DB cluster, not the DB instances for that DB
cluster. You must invoke the `CreateDBInstance` action to create DB instances
for the restored DB cluster, specifying the identifier of the restored DB
cluster in `DBClusterIdentifier`. You can create DB instances only after the
`RestoreDBClusterToPointInTime` action has completed and the DB cluster is
available.
For more information on Amazon Aurora, see [ What Is Amazon Aurora?](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_AuroraOverview.html)
in the *Amazon Aurora User Guide.*
This action only applies to Aurora DB clusters.
"""
def restore_db_cluster_to_point_in_time(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "RestoreDBClusterToPointInTime", input, options)
end
@doc """
Creates a new DB instance from a DB snapshot.
The target database is created from the source database restore point with most
of the source's original configuration, including the default security group and
DB parameter group. By default, the new DB instance is created as a Single-AZ
deployment, except when the instance is a SQL Server instance that has an option
group associated with mirroring. In this case, the instance becomes a Multi-AZ
deployment, not a Single-AZ deployment.
If you want to replace your original DB instance with the new, restored DB
instance, then rename your original DB instance before you call the
RestoreDBInstanceFromDBSnapshot action. RDS doesn't allow two DB instances with
the same name. After you have renamed your original DB instance with a different
identifier, then you can pass the original name of the DB instance as the
DBInstanceIdentifier in the call to the RestoreDBInstanceFromDBSnapshot action.
The result is that you replace the original DB instance with the DB instance
created from the snapshot.
If you are restoring from a shared manual DB snapshot, the
`DBSnapshotIdentifier` must be the ARN of the shared DB snapshot.
This command doesn't apply to Aurora MySQL and Aurora PostgreSQL. For Aurora,
use `RestoreDBClusterFromSnapshot`.
"""
def restore_db_instance_from_db_snapshot(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "RestoreDBInstanceFromDBSnapshot", input, options)
end
@doc """
Amazon Relational Database Service (Amazon RDS) supports importing MySQL
databases by using backup files.
You can create a backup of your on-premises database, store it on Amazon Simple
Storage Service (Amazon S3), and then restore the backup file onto a new Amazon
RDS DB instance running MySQL. For more information, see [Importing Data into an Amazon RDS MySQL DB
Instance](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/MySQL.Procedural.Importing.html)
in the *Amazon RDS User Guide.*
"""
def restore_db_instance_from_s3(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "RestoreDBInstanceFromS3", input, options)
end
@doc """
Restores a DB instance to an arbitrary point in time.
You can restore to any point in time before the time identified by the
LatestRestorableTime property. You can restore to a point up to the number of
days specified by the BackupRetentionPeriod property.
The target database is created with most of the original configuration, but in a
system-selected Availability Zone, with the default security group, the default
subnet group, and the default DB parameter group. By default, the new DB
instance is created as a single-AZ deployment except when the instance is a SQL
Server instance that has an option group that is associated with mirroring; in
this case, the instance becomes a mirrored deployment and not a single-AZ
deployment.
This command doesn't apply to Aurora MySQL and Aurora PostgreSQL. For Aurora,
use `RestoreDBClusterToPointInTime`.
"""
def restore_db_instance_to_point_in_time(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "RestoreDBInstanceToPointInTime", input, options)
end
@doc """
Revokes ingress from a DBSecurityGroup for previously authorized IP ranges or
EC2 or VPC Security Groups.
Required parameters for this API are one of CIDRIP, EC2SecurityGroupId for VPC,
or (EC2SecurityGroupOwnerId and either EC2SecurityGroupName or
EC2SecurityGroupId).
"""
def revoke_db_security_group_ingress(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "RevokeDBSecurityGroupIngress", input, options)
end
@doc """
Starts a database activity stream to monitor activity on the database.
For more information, see [Database Activity Streams](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/DBActivityStreams.html)
in the *Amazon Aurora User Guide*.
"""
def start_activity_stream(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "StartActivityStream", input, options)
end
@doc """
Starts an Amazon Aurora DB cluster that was stopped using the AWS console, the
stop-db-cluster AWS CLI command, or the StopDBCluster action.
For more information, see [ Stopping and Starting an Aurora Cluster](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-cluster-stop-start.html)
in the *Amazon Aurora User Guide.*
This action only applies to Aurora DB clusters.
"""
def start_db_cluster(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "StartDBCluster", input, options)
end
@doc """
Starts an Amazon RDS DB instance that was stopped using the AWS console, the
stop-db-instance AWS CLI command, or the StopDBInstance action.
For more information, see [ Starting an Amazon RDS DB instance That Was Previously
Stopped](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_StartInstance.html)
in the *Amazon RDS User Guide.*
This command doesn't apply to Aurora MySQL and Aurora PostgreSQL. For Aurora DB
clusters, use `StartDBCluster` instead.
"""
def start_db_instance(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "StartDBInstance", input, options)
end
@doc """
Enables replication of automated backups to a different AWS Region.
For more information, see [ Replicating Automated Backups to Another AWS Region](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_ReplicateBackups.html)
in the *Amazon RDS User Guide.*
"""
def start_db_instance_automated_backups_replication(%Client{} = client, input, options \\ []) do
Request.request_post(
client,
metadata(),
"StartDBInstanceAutomatedBackupsReplication",
input,
options
)
end
@doc """
Starts an export of a snapshot to Amazon S3.
The provided IAM role must have access to the S3 bucket.
"""
def start_export_task(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "StartExportTask", input, options)
end
@doc """
Stops a database activity stream that was started using the AWS console, the
`start-activity-stream` AWS CLI command, or the `StartActivityStream` action.
For more information, see [Database Activity Streams](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/DBActivityStreams.html)
in the *Amazon Aurora User Guide*.
"""
def stop_activity_stream(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "StopActivityStream", input, options)
end
@doc """
Stops an Amazon Aurora DB cluster.
When you stop a DB cluster, Aurora retains the DB cluster's metadata, including
its endpoints and DB parameter groups. Aurora also retains the transaction logs
so you can do a point-in-time restore if necessary.
For more information, see [ Stopping and Starting an Aurora Cluster](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-cluster-stop-start.html)
in the *Amazon Aurora User Guide.*
This action only applies to Aurora DB clusters.
"""
def stop_db_cluster(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "StopDBCluster", input, options)
end
@doc """
Stops an Amazon RDS DB instance.
When you stop a DB instance, Amazon RDS retains the DB instance's metadata,
including its endpoint, DB parameter group, and option group membership. Amazon
RDS also retains the transaction logs so you can do a point-in-time restore if
necessary.
For more information, see [ Stopping an Amazon RDS DB Instance Temporarily](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_StopInstance.html)
in the *Amazon RDS User Guide.*
This command doesn't apply to Aurora MySQL and Aurora PostgreSQL. For Aurora
clusters, use `StopDBCluster` instead.
"""
def stop_db_instance(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "StopDBInstance", input, options)
end
@doc """
Stops automated backup replication for a DB instance.
For more information, see [ Replicating Automated Backups to Another AWS Region](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_ReplicateBackups.html)
in the *Amazon RDS User Guide.*
"""
def stop_db_instance_automated_backups_replication(%Client{} = client, input, options \\ []) do
Request.request_post(
client,
metadata(),
"StopDBInstanceAutomatedBackupsReplication",
input,
options
)
end
end
|
lib/aws/generated/rds.ex
| 0.844377
| 0.522202
|
rds.ex
|
starcoder
|
defmodule Phoenix.LiveView do
@moduledoc """
Live views are stateful views which update the browser on state changes.
## Configuration
A `:signing_salt` configuration is required in your endpoint's
`:live_view` configuration, for example:
config :my_app, AppWeb.Endpoint,
...,
live_view: [signing_salt: ...]
You can generate a secure, random signing salt with the
`mix phx.gen.secret 32` task.
## Life-cycle
A Live View begins as a regular HTTP request and HTML response,
and then upgrades to a stateful view on client connect,
guaranteeing a regular HTML page even if JavaScript is disabled.
Any time a stateful view changes or updates its socket assigns, it is
automatically re-rendered and the updates are pushed to the client.
You begin by rendering a Live View from your router or controller
while providing *session* data to the view, which represents request info
necessary for the view, such as params, cookie session info, etc.
The session is signed and stored on the client, then provided back
to the server when the client connects, or reconnects to the stateful
view. When a view is rendered from the controller, the `mount/2` callback
is invoked with the provided session data and the Live View's socket.
The `mount/2` callback wires up socket assigns necessary for rendering
the view. After mounting, `render/1` is invoked and the HTML is sent
as a regular HTML response to the client.
After rendering the static page with a signed session, the live
views connect from the client where stateful views are spawned
to push rendered updates to the browser, and receive client events
via phx bindings. Just like the controller flow, `mount/2` is invoked
with the signed session, and socket state, where mount assigns
values for rendering. However, in the connected client case, a
Live View process is spawned on the server, pushes the result of
`render/1` to the client and continues on for the duration of the
connection. If at any point during the stateful life-cycle a
crash is encountered, or the client connection drops, the client
gracefully reconnects to the server, passing its signed session
back to `mount/2`.
## Usage
First, a Live View requires two callbacks: `mount/2` and `render/1`:
defmodule AppWeb.ThermostatView do
def render(assigns) do
~L\"""
Current temperature: <%= @temperature %>
\"""
end
def mount(%{id: id, current_user_id: user_id}, socket) do
case Thermostat.get_user_reading(user_id, id) do
{:ok, temperature} ->
{:ok, assign(socket, :temperature, temperature)}
{:error, reason} ->
{:error, reason}
end
end
end
The `render/1` callback receives the `socket.assigns` and is responsible
for returning rendered content. You can use `Phoenix.LiveView.sigil_L/2`
to inline Live View templates. If you want to use `Phoenix.HTML` helpers,
remember to `use Phoenix.HTML` at the top of your `LiveView`.
With a Live View defined, you first define the `socket` path in your endpoint,
and point it to `Phoenix.LiveView.Socket`:
defmodule AppWeb.Endpoint do
use Phoenix.Endpoint
socket "/live", Phoenix.LiveView.Socket
...
end
Next, you can serve Live Views directly from your router:
defmodule AppWeb.Router do
use Phoenix.Router
import Phoenix.LiveView.Router
scope "/", AppWeb do
live "/thermostat", ThermostatView
end
end
Or you can `live_render` your view from any controller:
defmodule AppWeb.ThermostatController do
...
alias Phoenix.LiveView
def show(conn, %{"id" => id}) do
LiveView.Controller.live_render(conn, AppWeb.ThermostatView, session: %{
id: id,
current_user_id: get_session(conn, :user_id),
})
end
end
As we saw in the life-cycle section, you pass `:session` data about the
request to the view, such as the current user's id in the cookie session,
and parameters from the request. A regular HTML response is sent with a
signed token embedded in the DOM containing your Live View session data.
Next, your client code connects to the server:
import LiveSocket from "phoenix_live_view"
let liveSocket = new LiveSocket("/live")
liveSocket.connect()
After the client connects, `mount/2` will be invoked inside a spawn
Live View process. At this point, you can use `connected?/1` to
conditionally perform stateful work, such as subscribing to pubsub topics,
sending messages, etc. For example, you can periodically update a Live View
with a timer:
defmodule DemoWeb.ThermostatView do
use Phoenix.LiveView
...
def mount(%{id: id, current_user_id: user_id}, socket) do
if connected?(socket), do: :timer.send_interval(30000, self(), :update)
case Thermostat.get_user_reading(user_id, id) do
{:ok, temperature} ->
{:ok, assign(socket, temperature: temperature, id: id)}
{:error, reason} ->
{:error, reason}
end
end
def handle_info(:update, socket) do
{:ok, temperature} = Thermostat.get_reading(socket.assigns.id)
{:noreply, assign(socket, :temperature, temperature)}
end
end
We used `connected?(socket)` on mount to send our view a message every 30s if
the socket is in a connected state. We receive `:update_reading` in a
`handle_info` just like a GenServer, and update our socket assigns. Whenever
a socket's assigns change, `render/1` is automatically invoked, and the
updates are sent to the client.
## LiveEEx Templates
`Phoenix.LiveView`'s built-in templates provided by the `.leex`
extension or `~L` sigil, stands for Live EEx. They are similar
to regular `.eex` templates except they are designed to
minimize the amount of data sent over the wire by tracking
changes.
When you first render a `.leex` template, it will send
all of the static and dynamic parts of the template to
the client. After that, any change you do on the server
will now send only the dyamic parts and only if those
parts have changed.
The tracking of changes are done via assigns. Therefore,
if part of your template does this:
<%= something_with_user(@user) %>
That particular section will be re-rendered only if the
`@user` assign changes between events. Therefore, you
MUST pass all of the data to your templates via assigns
and avoid performing direct operations on the template
as much as possible. For example, if you perform this
operation in your template:
<%= for user <- Repo.all(User) do %>
<%= user.name %>
<% end %>
Then Phoenix will never re-render the section above, even
if the amount of users in the database changes. Instead,
you need to store the users as assigns in your LiveView
before it renders the template:
assign(socket, :users, Repo.all(User))
Generally speaking, **data loading should never happen inside
the template**, regardless if you are using LiveView or not.
The difference is that LiveView enforces those as best
practices.
Another restriction of LiveView is that, in order to track
variables, it may make some macros incompatible with `.leex`
templates. However, this would only happen if those macros
are injecting or accessing user variables, which are not
recommended in the first place. Overall, `.leex` templates
do their best to be compatible with any Elixir code, sometimes
even turning off optimizations to keep compatibility.
## Bindings
Phoenix supports DOM element bindings for client-server interaction. For
example, to react to a click on a button, you would render the element:
<button phx-click="inc_temperature">+</button>
Then on the server, all Live View bindings are handled with the `handle_event`
callback, for example:
def handle_event("inc_temperature", _value, socket) do
{:ok, new_temp} = Thermostat.inc_temperature(socket.assigns.id)
{:noreply, assign(socket, :temperature, new_temp)}
end
### Click Events
The `phx-click` binding is used to send click events to the server. The
`value` passed to `handle_event` is chosen on the client with the following
priority:
* An optional `"phx-value"` binding on the clicked element
* The clicked element's `value` property
* An empty string
### Form Events
To handle form changes and submissions, use the `phx-change` and `phx-submit`
events. In general, it is preferred to handle input changes at the form level,
where all form fields are passed to the Live View's callback given any
single input change. For example, to handle real-time form validation and
saving, your template would use both `phx_change` and `phx_submit` bindings:
<%= form_for @changeset, "#", [phx_change: :validate, phx_submit: :save], fn f -> %>
<%= label f, :username %>
<%= text_input f, :username %>
<%= error_tag f, :username %>
<%= label f, :email %>
<%= text_input f, :email %>
<%= error_tag f, :email %>
<%= submit "Save" %>
<% end %>
Next, your Live View picks up the events in `handle_event` callbacks:
def render(assigns) ...
def mount(_session, socket) do
{:ok, assign(socket, %{changeset: Accounts.change_user(%User{})})}
end
def handle_event("validate", %{"user" => params}, socket) do
changeset =
%User{}
|> Accounts.change_user(params)
|> Map.put(:action, :insert)
{:noreply, assign(socket, changeset: changeset)}
end
def handle_event("save", %{"user" => user_params}, socket) do
case Accounts.create_user(user_params) do
{:ok, user} ->
{:stop,
socket
|> put_flash(:info, "user created")
|> redirect(to: Routes.user_path(AppWeb.Endpoint, AppWeb.User.ShowView, user))}
{:error, %Ecto.Changeset{} = changeset} ->
{:noreply, assign(socket, changeset: changeset)}
end
end
The validate callback simply updates the changeset based on all form input
values, then assigns the new changeset to the socket. If the changeset
changes, such as generating new errors, `render/1` is invoked and
the form is re-rendered.
Likewise for `phx-submit` bindings, the save callback is invoked and
persistence is attempted. On success, a `:stop` tuple is returned and the
socket is annotated for redirect with `Phoenix.LiveView.redirect/2`,
otherwise the socket assigns are updated with the errored changeset to be
re-rerendered for the client.
*Note*: For proper form error tag updates, the error tag must specify which
input it belongs to. This is accomplished with the `data-phx-error-for` attribute.
For example, your `AppWeb.ErrorHelpers` may use this function:
def error_tag(form, field) do
Enum.map(Keyword.get_values(form.errors, field), fn error ->
content_tag(:span, translate_error(error),
class: "help-block",
data: [phx_error_for: input_id(form, field)]
)
end)
end
### Key Events
The onkeypress, onkeydown, and onkeyup events are supported via
the `phx-keypress`, `phx-keydown`, and `phx-keyup` bindings. When
pushed, the value sent to the server will be the event's keyCode.
By default, the bound element will be the event listener, but an
optional `phx-target` may be provided which may be `"document"`,
`"window"`, or the DOM id of a target element, for example:
@up_key 38
@down_key 40
def render(assigns) do
~L\"""
<div id="thermostat" phx-keyup="update_temp" phx-target="document">
Current temperature: <%= @temperature %>
</div>
\"""
end
def handle_event("update_temp", @up_key, socket) do
{:ok, new_temp} = Thermostat.inc_temperature(socket.assigns.id)
{:noreply, assign(socket, :temperature, new_temp)}
end
def handle_event("update_temp", _key, socket) do
{:ok, new_temp} = Thermostat.dec_temperature(socket.assigns.id)
{:noreply, assign(socket, :temperature, new_temp)}
end
def handle_event("update_temp", _key, socket) do
{:noreply, socket}
end
"""
alias Phoenix.LiveView
alias Phoenix.LiveView.Socket
@type unsigned_params :: map
@type from :: binary
@callback mount(session :: map, Socket.t()) ::
{:ok, Socket.t()} | {:stop, Socket.t()}
@callback render(Socket.assigns()) :: Phoenix.LiveView.Rendered.t()
@callback terminate(
reason :: :normal | :shutdown | {:shutdown, :left | :closed | term},
Socket.t()
) :: term
@callback handle_event(event :: binary, unsigned_params, Socket.t()) ::
{:noreply, Socket.t()} | {:stop, Socket.t()}
@optional_callbacks terminate: 2, mount: 2, handle_event: 3
defmacro __using__(_opts) do
quote do
import unquote(__MODULE__), except: [render: 2]
@behaviour unquote(__MODULE__)
@impl unquote(__MODULE__)
def mount(_session, socket), do: {:ok, socket}
@impl unquote(__MODULE__)
def terminate(reason, state), do: {:ok, state}
defoverridable mount: 2, terminate: 2
end
end
@doc """
Renders a Live View within an originating plug request or
within a parent Live View.
## Options
* `:session` - the map of session data to sign and send
to the client. When connecting from the client, the Live View
will receive the signed session from the client and verify
the contents before proceeding with `mount/2`.
## Examples
# within eex template
<%= live_render(@conn, MyApp.ThermostatLive) %>
# within leex template
<%= live_render(@socket, MyApp.ThermostatLive) %>
"""
def live_render(conn_or_socket, view, opts \\ []) do
opts = Keyword.put_new(opts, :session, %{})
do_live_render(conn_or_socket, view, opts)
end
defp do_live_render(%Plug.Conn{} = conn, view, opts) do
endpoint = Phoenix.Controller.endpoint_module(conn)
case LiveView.View.static_render(endpoint, view, opts) do
{:ok, content} ->
content
{:stop, {:redirect, _opts}} ->
raise RuntimeError, """
attempted to redirect from #{inspect(view)} while rendering Plug request.
Redirects from live renders inside a Plug request are not supported.
"""
end
end
defp do_live_render(%Socket{} = parent, view, opts) do
case LiveView.View.nested_static_render(parent, view, opts) do
{:ok, content} -> content
{:stop, reason} -> throw({:stop, reason})
end
end
@doc """
Returns true if the sockect is connected.
Useful for checking the connectivity status when mounting the view.
For example, on initial page render, the view is mounted statically,
rendered, and the HTML is sent to the client. Once the client
connects to the server, a Live View is then spawned and mounted
statefully within a process. Use `connected?/1` to conditionally
perform stateful work, such as subscribing to pubsub topics,
sending messages, etc.
## Examples
defmodule DemoWeb.ClockView do
use Phoenix.LiveView
...
def mount(_session, socket) do
if connected?(socket), do: :timer.send_interval(1000, self(), :tick)
{:ok, assign(socket, date: :calendar.local_time())}
end
def handle_info(:tick, socket) do
{:noreply, assign(socket, date: :calendar.local_time())}
end
end
"""
def connected?(%Socket{} = socket) do
LiveView.View.connected?(socket)
end
@doc """
Adds key value pairs to socket assigns.
A single key value pair may be passed, or a keyword list
of assigns may be provided to be merged into existing
socket assigns.
## Examples
iex> assign(socket, :name, "Elixir")
iex> assign(socket, name: "Elixir", logo: "💧")
"""
def assign(%Socket{} = socket, key, value) do
assign(socket, [{key, value}])
end
def assign(%Socket{} = socket, attrs)
when is_map(attrs) or is_list(attrs) do
Enum.reduce(attrs, socket, fn {key, val}, acc ->
case Map.fetch(acc.assigns, key) do
{:ok, ^val} -> acc
{:ok, _old_val} -> do_assign(acc, key, val)
:error -> do_assign(acc, key, val)
end
end)
end
defp do_assign(%Socket{assigns: assigns, changed: changed} = acc, key, val) do
new_changed = Map.put(changed || %{}, key, true)
new_assigns = Map.put(assigns, key, val)
%Socket{acc | assigns: new_assigns, changed: new_changed}
end
@doc """
Updates an existing key in the socket assigns.
The update function receives the current key's value and
returns the updated value. Raises if the key does not exist.
## Examples
iex> update(socket, :count, fn count -> count + 1 end)
iex> update(socket, :count, &(&1 + 1))
"""
def update(%Socket{assigns: assigns} = socket, key, func) do
case Map.fetch(assigns, key) do
{:ok, val} -> assign(socket, key, func.(val))
:error -> raise KeyError, key: key, term: assigns
end
end
@doc """
Adds a flash message to the socket to be displayed on redirect.
*Note*: the `Phoenix.LiveView.Flash` plug must be plugged in
your browser's pipeline for flash to be supported, for example:
pipeline :browser do
plug :accepts, ["html"]
plug :fetch_session
plug Phoenix.LiveView.Flash
...
end
## Examples
iex> put_flash(socket, :info, "It worked!")
iex> put_flash(socket, :error, "You can't access that page")
"""
def put_flash(%Socket{private: private} = socket, kind, msg) do
new_private = Map.update(private, :flash, %{kind => msg}, &Map.put(&1, kind, msg))
%Socket{socket | private: new_private}
end
@doc """
Annotates the socket for redirect to a destination path.
*Note*: Live View redirects rely on instructing client
to perform a `window.location` update on the provided
redirect location.
TODO support `:external` and validation `:to` is a local path
## Options
* `:to` - the path to redirect to
"""
def redirect(%Socket{} = socket, opts) do
LiveView.View.put_redirect(socket, Keyword.fetch!(opts, :to))
end
@doc """
Provides `~L` sigil with HTML safe Live EEx syntax inside source files.
iex> ~L"\""
...> Hello <%= "world" %>
...> "\""
{:safe, ["Hello ", "world", "\\n"]}
"""
defmacro sigil_L({:<<>>, _, [expr]}, []) do
EEx.compile_string(expr, engine: Phoenix.LiveView.Engine, line: __CALLER__.line + 1)
end
end
|
lib/phoenix_live_view.ex
| 0.884788
| 0.637877
|
phoenix_live_view.ex
|
starcoder
|
defmodule Vix.Vips.Interpolate do
alias Vix.Type
defstruct [:ref]
alias __MODULE__
@moduledoc """
Make interpolators for operators like `affine` and `mapim`.
"""
alias Vix.Type
alias Vix.Nif
@behaviour Type
@typedoc """
Represents an instance of VipsInterpolate
"""
@type t() :: %Interpolate{ref: reference()}
@impl Type
def typespec do
quote do
unquote(__MODULE__).t()
end
end
@impl Type
def default(nil), do: :unsupported
@impl Type
def to_nif_term(interpolate, _data) do
case interpolate do
%Interpolate{ref: ref} ->
ref
value ->
raise ArgumentError, message: "expected Vix.Vips.Interpolate. given: #{inspect(value)}"
end
end
@impl Type
def to_erl_term(ref), do: %Interpolate{ref: ref}
@doc """
Make a new interpolator by name.
Make a new interpolator from the libvips class nickname. For example:
```elixir
{:ok, interpolate} = Interpolate.new("bilindear")
```
You can get a list of all supported interpolators from the command-line with:
```shell
$ vips -l interpolate
```
See for example `affine`.
"""
@spec new(String.t()) :: {:ok, __MODULE__.t()} | {:error, term()}
def new(name) do
if String.valid?(name) do
Nif.nif_interpolate_new(name)
|> wrap_type()
else
{:error, "expected UTF-8 binary string"}
end
end
@doc """
Make a new interpolator by name.
Make a new interpolator from the libvips class nickname. For example:
```elixir
interpolate = Interpolate.new!("bilindear")
```
You can get a list of all supported interpolators from the command-line with:
```shell
$ vips -l interpolate
```
See for example `affine`.
"""
@spec new!(String.t()) :: __MODULE__.t()
def new!(name) do
case new(name) do
{:ok, interpolate} ->
interpolate
{:error, error} ->
raise error
end
end
defp wrap_type({:ok, ref}), do: {:ok, %Interpolate{ref: ref}}
defp wrap_type(value), do: value
end
|
lib/vix/vips/interpolate.ex
| 0.905444
| 0.700933
|
interpolate.ex
|
starcoder
|
defmodule Exglicko2 do
@moduledoc """
Tools for working with Glicko-2 ratings.
Players are represented by a `Exglicko2.Player` struct.
You can get a new, default struct with the `new_player/0` function.
iex> Exglicko2.new_player()
%Exglicko2.Player{rating: 0.0, deviation: 2.0, volatility: 0.06}
Once your players have ratings, the games can begin!
Game results are represented by a number ranging from zero to one,
with a one representing a win, and a zero representing a loss.
Ratings are updated with a list of game results passed to the `update_rating/3` function.
Game results are batched into a list of tuples, with the first element being the opponent's rating tuple,
and the second being the resulting score.
This function also accepts an optional system constant, which governs how much ratings are allowed to change.
This value must be between 0.4 and 1.2, and is 0.5 by default.
iex> player = Exglicko2.Player.new(0.0, 1.2, 0.06)
iex> results = [
...> {Exglicko2.new_player(-0.6, 0.2, 0), 1},
...> {Exglicko2.new_player(0.3, 0.6, 0), 0},
...> {Exglicko2.new_player(1.2, 1.7, 0), 0}
...> ]
iex> Exglicko2.update_player(player, results, tau: 0.5)
%Exglicko2.Player{rating: -0.21522518921916625, deviation: 0.8943062104659615, volatility: 0.059995829968027437}
Here is some guidance on the optimal number of games to pass into the `update_rating/3` function,
directly from the original paper:
> The Glicko-2 system works best when the number of games in a rating period is moderate to large,
> say an average of at least 10-15 games per player in a rating period.
> The length of time for a rating period is at the discretion of the administrator.
If you use the older Glicko rating system,
you can convert a player back-and-forth using the `Exglicko2.Player.from_glicko/1` and `Exglicko2.Player.to_glicko/1` functions.
iex> Exglicko2.Player.from_glicko({1500.0, 350, 0.06})
%Exglicko2.Player{rating: 0.0, deviation: 2.014761872416068, volatility: 0.06}
"""
alias Exglicko2.Player
@doc """
Create a new player with a default rating.
"""
def new_player do
Player.new()
end
@doc """
Create a new player with the given rating.
"""
def new_player(rating, deviation, volatility) do
Player.new(rating, deviation, volatility)
end
@doc """
Update a player's rating based on game results.
Each player is represented by a tuple of the player's rating, their rating deviation, and their rating volatility.
Game results are batched into a list of tuples, with the first element being the opponent's values,
and the second being the resulting score between zero and one.
You can also specify a system constant, called `:tau`, which governs how much ratings are allowed to change.
This value must be between 0.4 and 1.2, and the default is 0.5.
## Example
A player with a rating of 0.0, a deviation of 1.2, and a volatility of 0.06 plays three games.
- Against the first opponent, they win. Thus the score is 1.
- Against the second opponent, they lose. Thus the score is 0.
- Against the third opponent, they lose again. Thus the score is 0.
The result is that the player's score drops to -0.2, their deviation drops to 0.9, and their volatility drops slightly.
iex> player = Exglicko2.Player.new(0.0, 1.2, 0.06)
iex> results = [
...> {Exglicko2.Player.new(-0.6, 0.2, 0.06), 1},
...> {Exglicko2.Player.new(0.3, 0.6, 0.06), 0},
...> {Exglicko2.Player.new(1.2, 1.7, 0.06), 0}
...> ]
iex> Exglicko2.update_player(player, results, tau: 0.5)
%Exglicko2.Player{rating: -0.21522518921916625, deviation: 0.8943062104659615, volatility: 0.059995829968027437}
"""
def update_player(%Player{} = player, results, opts \\ []) do
system_constant = Keyword.get(opts, :tau, 0.5)
if not is_number(system_constant) or system_constant < 0.4 or system_constant > 1.2 do
raise "System constant must be a number between 0.4 and 1.2, but it was #{inspect system_constant}"
end
Player.update_rating(player, results, system_constant)
end
@doc """
Updates a whole team of players with `update_rating/3`.
Instead of individual player structs, pass in lists of players, like this:
iex> team_one = [
...> Exglicko2.new_player(-0.6, 0.2, 0.06),
...> Exglicko2.new_player(0.3, 0.6, 0.06),
...> Exglicko2.new_player(1.2, 1.7, 0.06)
...> ]
...> team_two = [
...> Exglicko2.new_player(-0.6, 0.2, 0.06),
...> Exglicko2.new_player(0.3, 0.6, 0.06),
...> Exglicko2.new_player(1.2, 1.7, 0.06)
...> ]
...> results = [
...> {team_two, 1}
...> ]
...> Exglicko2.update_team(team_one, results)
[
%Exglicko2.Player{
rating: -0.5727225148150104,
deviation: 0.20801152963424144,
volatility: 0.05999777767142373
},
%Exglicko2.Player{
rating: 0.45366492480429327,
deviation: 0.581562104768686,
volatility: 0.059997452826507966
},
%Exglicko2.Player{
rating: 1.7340823171025699,
deviation: 1.3854013493398154,
volatility: 0.05999869242065375
}
]
"""
def update_team(team, results, opts \\ []) when is_list(team) do
results =
Enum.map(results, fn {opponents, result} ->
{Player.composite(opponents), result}
end)
Enum.map(team, fn player ->
update_player(player, results, opts)
end)
end
end
|
lib/exglicko2.ex
| 0.837321
| 0.649057
|
exglicko2.ex
|
starcoder
|
defmodule Tortoise311 do
@moduledoc """
A MQTT client for Elixir.
`Tortoise311` provides ways of publishing messages to, and receiving
messages from one or many MQTT brokers via TCP or SSL. The design
philosophy of Tortoise311 is to hide the protocol specific details from
the user, and expose interfaces and a connection life cycle that
should feel natural to Elixir, while not limiting the capability of
what one can do with the MQTT protocol.
First off, connection to a broker happens through a connection
specification. This results in a process that can be supervised,
either by the application the connection should live and die with,
or by being supervised by the Tortoise311 application itself. Once the
connection is established the Tortoise311 application should do its
best to keep that connection open, by automatically sending keep
alive messages (as the protocol specifies), and eventually attempt
to reconnect if the connection should drop.
Secondly, a connection is specified with a user defined callback
module, following the `Tortoise311.Handler`-behaviour, which allow the
user to hook into certain events happening in the life cycle of the
connection. This way code can get executed when:
- The connection is established
- The client has been disconnected from the broker
- A topic filter subscription has been accepted (or declined)
- A topic filter has been successfully unsubscribed
- A message is received on one of the subscribed topic filters
Besides this there are hooks for the usual life-cycle events one
would expect, such as `init/1` and `terminate/2`.
Thirdly, publishing is handled in such a way that the semantics of
the levels of Quality of Service, specified by the MQTT protocol, is
mapped to the Elixir message passing semantics. Tortoise311 expose an
interface for publishing messages that hide the protocol details of
message delivery (retrieval of acknowledge, release, complete
messages) and instead provide `Tortoise311.publish/4` which will
deliver the message to the broker and receive a response in the
process mailbox when a message with a QoS>0 has been handed to the
server. This allow the user to keep track of the messages that has
been delivered, or simply by using the `Tortoise311.publish_sync/4`
form that will block the calling process until the message has been
safely handed to the broker. Messages with QoS1 or QoS2 are stored
in a process until they are delivered, so once they are published
the client should retry delivery to make sure they reach their
destination.
An alternative way of posting messages is implemented in
`Tortoise311.Pipe`, which provide a data structure that among other
things keep a reference to the connection socket. This allow for an
efficient way of posting messages because the data can get shot
directly onto the wire without having to copy the message between
processes (unless the message has a QoS of 1 or 2, in which case
they will end up in a process to ensure they will get
delivered). The pipe will automatically renew its connection socket
if the connection has been dropped, so ideally this message sending
approach should be fast and efficient.
"""
alias Tortoise311.Package
alias Tortoise311.Connection
alias Tortoise311.Connection.Inflight
@typedoc """
An identifier used to identify the client on the server.
Most servers accept a maximum of 23 UTF-8 encode bytes for a client
id, and only the characters:
- "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
Tortoise311 accept atoms as client ids but they it will be converted to
a string before going on the wire. Be careful with atoms such as
`Example` because they are expanded to the atom `:"Elixir.Example"`,
it is really easy to hit the maximum byte limit. Solving this is
easy, just add a `:` before the client id such as `:Example`.
"""
@type client_id() :: atom() | String.t()
@typedoc """
A 16-bit number identifying a message in a message exchange.
Some MQTT packages are part of a message exchange and need an
identifier so the server and client can distinct between multiple
in-flight messages.
Tortoise311 will assign package identifier to packages that need them,
so outside of tests (where it is beneficial to assert on the
identifier of a package) it should be set by tortoise itself; so
just leave it as `nil`.
"""
@type package_identifier() :: 0x0001..0xFFFF | nil
@typedoc """
What Quality of Service (QoS) mode should be used.
Quality of Service is one of 0, 1, and 2 denoting the following:
- `0` no quality of service. The message is a fire and forget.
- `1` at least once delivery. The receiver will respond with an
acknowledge message, so the sender will be certain that the
message has reached the destination. It is possible that a message
will be delivered twice though, as the package identifier for a
publish will be relinquished when the message has been
acknowledged, so a package with the same identifier will be
treated as a new message though it might be a re-transmission.
- `2` exactly once delivery. The receiver will only receive the
message once. This happens by having a more elaborate message
exchange than the QoS=1 variant.
There are a difference in the semantics of assigning a QoS to a
publish and a subscription. When assigned to a publish the message
will get delivered to the server with the requested QoS; that is if
it accept that level of QoS for the given topic.
When used in the context of a subscription it should be read as *the
maximum QoS*. When messages are published to the subscribed topic
the message will get on-warded with the same topic as it was
delivered with, or downgraded to the maximum QoS of the subscription
for the given subscribing client. That is, if the client subscribe
with a maximum QoS=2 and a message is published to said topic with a
QoS=1, the message will get downgraded to QoS=1 when on-warded to
the client.
"""
@type qos() :: 0..2
@typedoc """
A topic for a message.
According to the MQTT 3.1.1 specification a valid topic must be at
least one character long. They are case sensitive and can include
space characters.
MQTT topics consist of topic levels which are delimited with forward
slashes `/`. A topic with a leading or trailing forward slash is
allowed but they create distinct topics from the ones without;
`/sports/tennis/results` are different from
`sports/tennis/results`. While a topic level normally require at
least one character the topic `/` (a single forward slash) is valid.
The server will drop the connection if it receive an invalid topic.
"""
@type topic() :: String.t()
@typedoc """
A topic filter for a subscription.
The topic filter is different from a `topic` because it is allowed
to contain wildcard characters:
- `+` is a single level wildcard which is allowed to stand on any
position in the topic filter. For instance: `sport/+/results` will
match `sport/tennis/results`, `sport/soccer/results`, etc.
- `#` is a multi-level wildcard and is only allowed to be on the
last position of the topic filter. For instance: `sport/#` will
match `sport/tennis/results`, `sport/tennis/announcements`, etc.
The server will reject any invalid topic filter and close the
connection.
"""
@type topic_filter() :: String.t()
@typedoc """
An optional message payload.
A message can optionally have a payload. The payload is a series of
bytes and for MQTT 3.1.1 the payload has no defined structure; any
series of bytes will do, and the client has to make sense of it.
The payload will be `nil` if there is no payload. This is done to
distinct between a zero byte binary and an empty payload.
"""
@type payload() :: binary() | nil
@doc """
Publish a message to the MQTT broker.
The publish function requires a `client_id` and a valid MQTT
topic. If no `payload` is set an empty zero byte message will get
send to the broker.
Optionally an options list can get passed to the publish, making it
possible to specify if the message should be retained on the server,
and with what quality of service the message should be published
with.
* `retain` indicates, when set to `true`, that the broker should
retain the message for the topic. Retained messages are
delivered to clients when they subscribe to the topic. Only one
message at a time can be retained for a given topic, so sending
a new one will overwrite the old. `retain` defaults to `false`.
* `qos` set the quality of service, and integer of 0, 1, or 2. The
`qos` defaults to `0`.
Publishing a message with the payload *hello* to to topic *foo/bar*
with a *QoS1* could look like this:
Tortoise311.publish("client_id", "foo/bar", "hello", qos: 1)
Notice that if you want to send a message with an empty payload with
options you will have to set to payload to nil like this:
Tortoise311.publish("client_id", "foo/bar", nil, retain: true)
## Return Values
The specified Quality of Service for a given publish will alter the
behaviour of the return value. When publishing a message with a QoS0
an `:ok` will simply get returned. This is because a QoS0 is a "fire
and forget." There are no quality of service so no efforts are made
to ensure that the message will reach its destination (though it very
likely will).
:ok = Tortoise311.publish("client_id", "foo/bar", nil, qos: 0)
When a message is published using either a QoS1 or QoS2, Tortoise311
will ensure that the message is delivered. A unique reference will
get returned and eventually a message will get delivered to the
process mailbox, containing the result of the publish when it has
been handed over:
{:ok, ref} = Tortoise311.publish("client_id", "foo/bar", nil, qos: 2)
receive do
{{Tortoise311, "client_id"}, ^ref, result} ->
IO.inspect({:result, result})
after
5000 ->
{:error, :timeout}
end
Be sure to implement a `handle_info/2` in `GenServer` processes that
publish messages using Tortoise311.publish/4. Notice that the returned
message has a structure:
{{Tortoise311, "client_id"}, ^ref, result}
It is possible to send to multiple clients and blanket match on
results designated for a given client id, and the message is tagged
with `Tortoise311` so it is easy to see where the message originated
from.
"""
@spec publish(client_id(), topic(), payload, [options]) ::
:ok | {:ok, reference()} | {:error, :unknown_connection} | {:error, :timeout}
when payload: binary() | nil,
options:
{:qos, qos()}
| {:retain, boolean()}
| {:identifier, package_identifier()}
| {:timeout, non_neg_integer()}
def publish(client_id, topic, payload \\ nil, opts \\ []) do
qos = Keyword.get(opts, :qos, 0)
publish = %Package.Publish{
topic: topic,
qos: qos,
payload: payload,
retain: Keyword.get(opts, :retain, false)
}
timeout = Keyword.get(opts, :timeout, :infinity)
with {:ok, {transport, socket}} <- Connection.connection(client_id, timeout: timeout) do
case publish do
%Package.Publish{qos: 0} ->
encoded_publish = Package.encode(publish)
apply(transport, :send, [socket, encoded_publish])
%Package.Publish{qos: qos} when qos in [1, 2] ->
Inflight.track(client_id, {:outgoing, publish})
end
else
{:error, :unknown_connection} ->
{:error, :unknown_connection}
{:error, :timeout} ->
{:error, :timeout}
end
end
@doc """
Synchronously send a message to the MQTT broker.
This is very similar to `Tortoise311.publish/4` with the difference
that it will block the calling process until the message has been
handed over to the server; the configuration options are the same
with the addition of the `timeout` option which specifies how long
we are willing to wait for a reply. Per default the timeout is set
to `:infinity`, it is advisable to set it to a reasonable amount in
milliseconds as it otherwise could block forever.
msg = "Hello, from the World of Tomorrow !"
case Tortoise311.publish_sync("my_client_id", "foo/bar", msg, qos: 2, timeout: 200) do
:ok ->
:done
{:error, :timeout} ->
:timeout
end
Notice: It does not make sense to use `publish_sync/4` on a publish
that has a QoS=0, because that will return instantly anyways. It is
made possible for consistency, and it is the default QoS.
See the documentation for `Tortoise311.publish/4` for configuration.
"""
@spec publish_sync(client_id(), topic(), payload, [options]) ::
:ok | {:error, :unknown_connection} | {:error, :timeout}
when payload: binary() | nil,
options:
{:qos, qos()}
| {:retain, boolean()}
| {:identifier, package_identifier()}
| {:timeout, timeout()}
def publish_sync(client_id, topic, payload \\ nil, opts \\ []) do
timeout = Keyword.get(opts, :timeout, :infinity)
qos = Keyword.get(opts, :qos, 0)
publish = %Package.Publish{
topic: topic,
qos: qos,
payload: payload,
retain: Keyword.get(opts, :retain, false)
}
with {:ok, {transport, socket}} <- Connection.connection(client_id, timeout: timeout) do
case publish do
%Package.Publish{qos: 0} ->
encoded_publish = Package.encode(publish)
apply(transport, :send, [socket, encoded_publish])
%Package.Publish{qos: qos} when qos in [1, 2] ->
Inflight.track_sync(client_id, {:outgoing, publish}, timeout)
end
else
{:error, :unknown_connection} ->
{:error, :unknown_connection}
{:error, :timeout} ->
{:error, :timeout}
end
end
end
|
lib/tortoise311.ex
| 0.938379
| 0.547222
|
tortoise311.ex
|
starcoder
|
defmodule Kvasir.Offset do
@moduledoc ~S"""
Kafka offset per partition.
"""
@typedoc @moduledoc
@type t :: %__MODULE__{} | :earliest
defstruct partitions: %{},
offset: 0
@spec create(map | list) :: t
def create(partitions \\ [])
def create(offset = %__MODULE__{}), do: offset
def create(partitions) do
%__MODULE__{
partitions:
Enum.into(partitions, %{}, fn
{p, o} when is_binary(p) -> {String.to_integer(p), o}
{p, o} -> {p, o}
p -> {p, 0}
end)
}
end
@spec create(non_neg_integer, non_neg_integer) :: t
def create(partition, offset), do: %__MODULE__{partitions: %{partition => offset}}
@spec offset(t) :: non_neg_integer
def offset(%__MODULE__{partitions: partitions}) do
partitions
|> Map.values()
|> Enum.sum()
end
@spec set(t, t) :: t
def set(:earliest, set), do: set(create(), set)
def set(t = %__MODULE__{partitions: partitions}, %__MODULE__{partitions: set}),
do: %{t | partitions: Map.merge(partitions, set)}
@spec set(t, non_neg_integer, non_neg_integer) :: t
def set(t = %__MODULE__{partitions: partitions}, partition, offset),
do: %{t | partitions: Map.put(partitions, partition, offset)}
@spec partition(t, non_neg_integer) :: t
def partition(:earliest, _partition), do: :earliest
def partition(%__MODULE__{partitions: partitions}, partition),
do: Map.get(partitions, partition, 0)
@spec partitions(t) :: [non_neg_integer]
def partitions(%__MODULE__{partitions: partitions}), do: Map.keys(partitions)
@spec compare(t, t | map | list) :: :lt | :eg | :gt
def compare(:earliest, :earliest), do: :eq
def compare(:earliest, %__MODULE__{partitions: to}), do: if(to == %{}, do: :eq, else: :lt)
def compare(%__MODULE__{partitions: from}, :earliest), do: if(from == %{}, do: :eq, else: :gt)
def compare(%__MODULE__{partitions: partitions}, %__MODULE__{partitions: to}),
do: do_compare(partitions, Enum.to_list(to))
def compare(%__MODULE__{partitions: partitions}, to),
do: do_compare(partitions, Enum.to_list(to))
@spec compare(map, list) :: :lt | :eg | :gt
def do_compare(_, []), do: :eq
def do_compare(partitions, [{p, o} | t]) do
cmp = Map.get(partitions, p)
cond do
is_nil(cmp) -> do_compare(partitions, t)
cmp == o -> do_compare(partitions, t)
cmp < o -> :lt
cmp > o -> :gt
end
end
defimpl Inspect, for: __MODULE__ do
def inspect(offset, _opts), do: "#Kvasir.Offset<#{offset}>"
end
defimpl String.Chars, for: __MODULE__ do
def to_string(%Kvasir.Offset{partitions: partitions}) do
case Enum.to_list(partitions) do
[] -> "0"
[{0, o}] -> Kernel.to_string(o)
offsets -> offsets |> Enum.map(fn {p, o} -> "#{p}:#{o}" end) |> Enum.join(",")
end
end
end
defimpl Jason.Encoder, for: __MODULE__ do
alias Jason.Encoder.Map
def encode(%Kvasir.Offset{partitions: partitions}, opts), do: Map.encode(partitions, opts)
end
end
|
lib/kvasir/offset.ex
| 0.863233
| 0.506836
|
offset.ex
|
starcoder
|
defmodule Ecto.Model.Schema do
@moduledoc """
Defines a schema for a model.
A schema is a struct with associated metadata that is persisted to a
repository. Every schema model is also a struct, that means that you work
with models just like you would work with structs.
## Example
defmodule User do
use Ecto.Model.Schema
schema "users" do
field :name, :string
field :age, :integer, default: 0
has_many :posts, Post
end
end
This module also automatically imports `from/2` from `Ecto.Query`
as a convenience.
## Schema defaults
When using the block syntax, the created model uses the default
of a primary key named `:id`, of type `:integer`. This can be
customized by passing `primary_key: false` to schema:
schema "weather", primary_key: false do
...
end
Or by passing a tuple in the format `{field, type, opts}`:
schema "weather", primary_key: {:custom_field, :string, []} do
...
end
Implicit defaults can be specified via the `@schema_defaults` attribute.
This is useful if you want to use a different default primary key
through your entire application.
The supported options are:
* `primary_key` - either `false`, or a `{field, type, opts}` tuple
* `foreign_key_type` - sets the type for any `belongs_to` associations.
This can be overridden using the `:type` option
to the `belongs_to` statement. Defaults to
type `:integer`
## Example
defmodule MyApp.Model do
defmacro __using__(_) do
quote do
@schema_defaults primary_key: {:uuid, :string, []},
foreign_key_type: :string
use Ecto.Model
end
end
end
defmodule MyApp.Post do
use MyApp.Model
schema "posts" do
has_many :comments, MyApp.Comment
end
end
defmodule MyApp.Comment do
use MyApp.Model
schema "comments" do
belongs_to :post, MyApp.Comment
end
end
Any models using `MyApp.Model will get the `:uuid` field, with type
`:string` as the primary key.
The `belongs_to` association on `MyApp.Comment` will also now require
that `:post_id` be of `:string` type to reference the `:uuid` of a
`MyApp.Post` model.
## Setting Primary Keys with Schema Defaults
In the example above, the `:uuid` primary key field needs to be
explicitly set by the developer before the Model can be inserted
or updated in a database.
To set a primary key, the developer **must** call the function
`Ecto.Model.put_primary_key/2`.
Example:
uuid = "some_uuid"
# Don't do this
post = %MyApp.Post{uuid: uuid}
# Do this instead
post = Ecto.Model.put_primary_key(%MyApp.Post{}, uuid)
This must be done in order to ensure that any associations of the Model
are appropriately updated.
## Reflection
Any schema module will generate the `__schema__` function that can be used for
runtime introspection of the schema.
* `__schema__(:source)` - Returns the source as given to `schema/2`;
* `__schema__(:field_type, field)` - Returns the type of the given field;
* `__schema__(:field_names)` - Returns a list of all field names;
* `__schema__(:associations)` - Returns a list of all association field names;
* `__schema__(:association, field)` - Returns the given field's association
reflection;
* `__schema__(:primary_key)` - Returns the field that is the primary key or
`nil` if there is none;
* `__schema__(:allocate, values)` - Creates a new model struct from the given
field values;
* `__schema__(:keywords, model)` - Return a keyword list of all non-virtual
fields and their values;
"""
@doc false
defmacro __using__(_) do
quote do
# TODO: Move those imports out to Ecto.Model
import Ecto.Query, only: [from: 2]
import Ecto.Model, only: [primary_key: 1, put_primary_key: 2, scoped: 2]
import Ecto.Model.Schema, only: [schema: 2, schema: 3]
end
end
@doc """
Defines a schema with a source name and field definitions.
"""
defmacro schema(source, opts \\ [], block)
defmacro schema(source, opts, [do: block]) do
quote do
opts = (Module.get_attribute(__MODULE__, :schema_defaults) || [])
|> Keyword.merge(unquote(opts))
@ecto_primary_key nil
@ecto_source unquote(source)
Module.register_attribute(__MODULE__, :assign_fields, accumulate: true)
Module.register_attribute(__MODULE__, :struct_fields, accumulate: true)
Module.register_attribute(__MODULE__, :ecto_fields, accumulate: true)
Module.register_attribute(__MODULE__, :ecto_assocs, accumulate: true)
@ecto_foreign_key_type opts[:foreign_key_type]
case opts[:primary_key] do
nil ->
Ecto.Model.Schema.field(:id, :integer, primary_key: true)
false ->
:ok
{name, type, opts} ->
Ecto.Model.Schema.field(name, type, Keyword.put(opts, :primary_key, true))
other ->
raise ArgumentError, ":primary_key must be false or {name, type, opts}"
end
try do
import Ecto.Model.Schema
unquote(block)
after
:ok
end
fields = @ecto_fields |> Enum.reverse
assocs = @ecto_assocs |> Enum.reverse
def __schema__(:source), do: @ecto_source
Module.eval_quoted __MODULE__, [
Ecto.Model.Schema.__assign__(@assign_fields, @ecto_primary_key),
Ecto.Model.Schema.__struct__(@struct_fields),
Ecto.Model.Schema.__fields__(fields),
Ecto.Model.Schema.__assocs__(__MODULE__, assocs, @ecto_primary_key, fields),
Ecto.Model.Schema.__primary_key__(@ecto_primary_key),
Ecto.Model.Schema.__helpers__(fields, @ecto_primary_key) ]
end
end
## API
@doc """
Defines a field on the model schema with given name and type, will also create
a struct field.
## Options
* `:default` - Sets the default value on the schema and the struct
* `:virtual` - When true, the field is not persisted
* `:primary_key` - When true, the field is set as primary key
"""
defmacro field(name, type \\ :string, opts \\ []) do
quote do
Ecto.Model.Schema.__field__(__MODULE__, unquote(name), unquote(type), unquote(opts))
end
end
@doc ~S"""
Indicates a one-to-many association with another model, where the current
model has zero or more records of the other model. The other model often
has a `belongs_to` field with the reverse association.
Creates a virtual field called `name`. The association can be accessed via
this field, see `Ecto.Associations.HasMany` for more information. See the
examples to see how to perform queries on the association and
`Ecto.Query.join/3` for joins.
## Options
* `:foreign_key` - Sets the foreign key, this should map to a field on the
other model, defaults to: `:"#{model}_id"`;
* `:references` - Sets the key on the current model to be used for the
association, defaults to the primary key on the model;
## Examples
defmodule Post do
schema "posts" do
has_many :comments, Comment
end
end
# Get all comments for a given post
post = Repo.get(Post, 42)
comments = Repo.all(post.comments)
# The comments can come preloaded on the post struct
[post] = Repo.all(from(p in Post, where: p.id == 42, preload: :comments))
post.comments.all #=> [ %Comment{...}, ... ]
# Or via an association join
[post] = Repo.all(from(p in Post,
where: p.id == 42,
left_join: c in p.comments,
select: assoc(p, c)))
post.comments.all #=> [ %Comment{...}, ... ]
"""
defmacro has_many(name, queryable, opts \\ []) do
quote do
Ecto.Model.Schema.__has_many__(__MODULE__, unquote(name), unquote(queryable), unquote(opts))
end
end
@doc ~S"""
Indicates a one-to-one association with another model, where the current model
has zero or one records of the other model. The other model often has a
`belongs_to` field with the reverse association.
Creates a virtual field called `name`. The association can be accessed via
this field, see `Ecto.Associations.HasOne` for more information. Check the
examples to see how to perform queries on the association and
`Ecto.Query.join/3` for joins.
## Options
* `:foreign_key` - Sets the foreign key, this should map to a field on the
other model, defaults to: `:"#{model}_id"`;
* `:references` - Sets the key on the current model to be used for the
association, defaults to the primary key on the model;
## Examples
defmodule Post do
schema "posts" do
has_one :permalink, Permalink
end
end
# The permalink can come preloaded on the post record
[post] = Repo.all(from(p in Post, where: p.id == 42, preload: :permalink))
post.permalink.get #=> %Permalink{...}
# Or via an association join
[post] = Repo.all(from(p in Post,
where: p.id == 42,
left_join: pl in p.permalink,
select: assoc(p, pl)))
post.permalink.get #=> %Permalink{...}
"""
defmacro has_one(name, queryable, opts \\ []) do
quote do
Ecto.Model.Schema.__has_one__(__MODULE__, unquote(name), unquote(queryable), unquote(opts))
end
end
@doc ~S"""
Indicates a one-to-one association with another model, the current model
belongs to zero or one records of the other model. The other model
often has a `has_one` or a `has_many` field with the reverse association.
Compared to `has_one` this association should be used where you would place
the foreign key on an SQL table.
Creates a virtual field called `name`. The association can be accessed via
this field, see `Ecto.Associations.BelongsTo` for more information. Check the
examples to see how to perform queries on the association and
`Ecto.Query.join/3` for joins.
## Options
* `:foreign_key` - Sets the foreign key field name, defaults to:
`:"#{other_model}_id"`;
* `:references` - Sets the key on the other model to be used for the
association, defaults to: `:id`;
* `:type` - Sets the type of `:foreign_key`. Defaults to: `:integer`;
## Examples
defmodule Comment do
schema "comments" do
belongs_to :post, Post
end
end
# The post can come preloaded on the comment record
[comment] = Repo.all(from(c in Comment, where: c.id == 42, preload: :post))
comment.post.get #=> %Post{...}
# Or via an association join
[comment] = Repo.all(from(c in Comment,
where: c.id == 42,
left_join: p in c.post,
select: assoc(c, p)))
comment.post.get #=> %Post{...}
"""
defmacro belongs_to(name, queryable, opts \\ []) do
quote do
Ecto.Model.Schema.__belongs_to__(__MODULE__, unquote(name), unquote(queryable), unquote(opts))
end
end
## Callbacks
# TODO: Check that the opts are valid for the given type,
# especially check the default value
@doc false
def __field__(mod, name, type, opts) do
check_type!(type, opts[:virtual])
if opts[:primary_key] do
if pk = Module.get_attribute(mod, :ecto_primary_key) do
raise ArgumentError, message: "primary key already defined as `#{pk}`"
else
Module.put_attribute(mod, :ecto_primary_key, name)
end
end
Module.put_attribute(mod, :assign_fields, {name, type})
put_struct_field(mod, name, opts[:default])
unless opts[:virtual] do
Module.put_attribute(mod, :ecto_fields, {name, type, opts})
end
end
@doc false
def __has_many__(mod, name, queryable, opts) do
assoc = Ecto.Associations.HasMany.Proxy.__assoc__(:new, name, mod)
put_struct_field(mod, name, assoc)
opts = [queryable: queryable] ++ opts
Module.put_attribute(mod, :ecto_assocs, {name, :has_many, opts})
end
@doc false
def __has_one__(mod, name, queryable, opts) do
assoc = Ecto.Associations.HasOne.Proxy.__assoc__(:new, name, mod)
put_struct_field(mod, name, assoc)
opts = [queryable: queryable] ++ opts
Module.put_attribute(mod, :ecto_assocs, {name, :has_one, opts})
end
@doc false
def __belongs_to__(mod, name, queryable, opts) do
opts = opts
|> Keyword.put_new(:references, :id)
|> Keyword.put_new(:foreign_key, :"#{name}_id")
foreign_key_type =
opts[:type] || Module.get_attribute(mod, :ecto_foreign_key_type) || :integer
__field__(mod, opts[:foreign_key], foreign_key_type, [])
assoc = Ecto.Associations.BelongsTo.Proxy.__assoc__(:new, name, mod)
put_struct_field(mod, name, assoc)
opts = [queryable: queryable] ++ opts
Module.put_attribute(mod, :ecto_assocs, {name, :belongs_to, opts})
end
defp put_struct_field(mod, name, assoc) do
fields = Module.get_attribute(mod, :struct_fields)
if List.keyfind(fields, name, 0) do
raise ArgumentError, message: "field/association `#{name}` is already set on schema"
end
Module.put_attribute(mod, :struct_fields, {name, assoc})
end
## Helpers
@doc false
def __assign__(assign_fields, primary_key) do
map = assign_fields |> Enum.into(%{}) |> Map.delete(primary_key) |> Macro.escape()
quote do
def __assign__ do
unquote(map)
end
end
end
@doc false
def __struct__(struct_fields) do
quote do
defstruct unquote(Macro.escape(struct_fields))
end
end
@doc false
def __fields__(fields) do
quoted = Enum.map(fields, fn {name, type, _opts} ->
quote do
def __schema__(:field_type, unquote(name)), do: unquote(type)
end
end)
field_names = Enum.map(fields, &elem(&1, 0))
quoted ++ [ quote do
def __schema__(:field_type, _), do: nil
def __schema__(:field_names), do: unquote(field_names)
end ]
end
@doc false
def __assocs__(module, assocs, primary_key, fields) do
quoted = Enum.map(assocs, fn {name, type, opts} ->
pk = opts[:references] || primary_key
if is_nil(pk) do
raise ArgumentError, message: "need to set :references option for " <>
"association #{inspect name} when model has no primary key"
end
if type in [:has_many, :has_one] do
unless List.keyfind(fields, pk, 0) do
raise ArgumentError, message: "model does not have the field #{inspect pk} used by " <>
"association #{inspect name}, please set the :references option accordingly"
end
end
refl = Ecto.Associations.create_reflection(type, name,
module, pk, opts[:queryable], opts[:foreign_key])
quote do
def __schema__(:association, unquote(name)) do
unquote(Macro.escape(refl))
end
end
end)
assoc_names = Enum.map(assocs, &elem(&1, 0))
quote do
def __schema__(:associations), do: unquote(assoc_names)
unquote(quoted)
def __schema__(:association, _), do: nil
end
end
@doc false
def __primary_key__(primary_key) do
quote do
def __schema__(:primary_key), do: unquote(primary_key)
end
end
@doc false
def __helpers__(fields, primary_key) do
field_names = Enum.map(fields, &elem(&1, 0))
quote do
# TODO: This can be optimized
def __schema__(:allocate, values) do
zip = Enum.zip(unquote(field_names), values)
pk = Dict.get(zip, unquote(primary_key))
model = struct(__MODULE__, zip)
if pk, do: model = Ecto.Model.put_primary_key(model, pk)
model
end
def __schema__(:keywords, model, opts \\ []) do
keep_pk = Keyword.get(opts, :primary_key, true)
primary_key = unquote(primary_key)
values = Map.take(model, unquote(field_names))
Map.to_list(if keep_pk do
values
else
Map.delete(values, primary_key)
end)
end
end
end
defp check_type!(type, virtual?) do
cond do
type == :any and not virtual? ->
raise ArgumentError, "only virtual fields can have type :any"
Ecto.Query.Types.primitive?(type) ->
true
true ->
raise ArgumentError, "unknown field type `#{inspect type}`"
end
end
end
|
lib/ecto/model/schema.ex
| 0.856917
| 0.542742
|
schema.ex
|
starcoder
|
defmodule Sourceror.Code do
@moduledoc false
alias Sourceror.Code.Formatter
alias Sourceror.Code.Normalizer
@spec string_to_quoted_with_comments(List.Chars.t(), keyword) ::
{:ok, Macro.t(), list(map())} | {:error, {location :: keyword, term, term}}
def string_to_quoted_with_comments(string, opts \\ [])
when is_binary(string) and is_list(opts) do
charlist = to_charlist(string)
file = Keyword.get(opts, :file, "nofile")
line = Keyword.get(opts, :line, 1)
column = Keyword.get(opts, :column, 1)
Process.put(:code_formatter_comments, [])
opts = [preserve_comments: &preserve_comments/5] ++ opts
with {:ok, tokens} <- :sourceror_elixir.string_to_tokens(charlist, line, column, file, opts),
{:ok, forms} <- :sourceror_elixir.tokens_to_quoted(tokens, file, opts) do
comments = Enum.reverse(Process.get(:code_formatter_comments))
{:ok, forms, comments}
end
after
Process.delete(:code_formatter_comments)
end
@spec string_to_quoted_with_comments!(List.Chars.t(), keyword) :: {Macro.t(), list(map())}
def string_to_quoted_with_comments!(string, opts \\ []) do
case string_to_quoted_with_comments(string, opts) do
{:ok, forms, comments} ->
{forms, comments}
{:error, {location, error, token}} ->
:sourceror_errors.parse_error(location, Keyword.get(opts, :file, "nofile"), error, token)
end
end
defp preserve_comments(line, column, tokens, comment, rest) do
comments = Process.get(:code_formatter_comments)
comment = %{
line: line,
column: column,
previous_eol_count: previous_eol_count(tokens),
next_eol_count: next_eol_count(rest, 0),
text: List.to_string(comment)
}
Process.put(:code_formatter_comments, [comment | comments])
end
defp next_eol_count('\s' ++ rest, count), do: next_eol_count(rest, count)
defp next_eol_count('\t' ++ rest, count), do: next_eol_count(rest, count)
defp next_eol_count('\n' ++ rest, count), do: next_eol_count(rest, count + 1)
defp next_eol_count('\r\n' ++ rest, count), do: next_eol_count(rest, count + 1)
defp next_eol_count(_, count), do: count
defp previous_eol_count([{token, {_, _, count}} | _])
when token in [:eol, :",", :";"] and count > 0 do
count
end
defp previous_eol_count([]), do: 1
defp previous_eol_count(_), do: 0
@spec quoted_to_algebra(Macro.t(), keyword) :: Inspect.Algebra.t()
def quoted_to_algebra(quoted, opts \\ []) do
quoted
|> Normalizer.normalize(opts)
|> Formatter.to_algebra(opts)
end
end
|
lib/sourceror/code.ex
| 0.748444
| 0.451447
|
code.ex
|
starcoder
|
defmodule ExW3.Utils do
@type invalid_hex_string :: :invalid_hex_string
@type negative_integer :: :negative_integer
@type non_integer :: :non_integer
@type eth_hex :: String.t()
@doc "Convert eth hex string to integer"
@spec hex_to_integer(eth_hex) ::
{:ok, non_neg_integer} | {:error, invalid_hex_string}
def hex_to_integer(hex) do
case hex do
"0x" <> hex -> {:ok, String.to_integer(hex, 16)}
_ -> {:error, :invalid_hex_string}
end
rescue
ArgumentError ->
{:error, :invalid_hex_string}
end
@doc "Convert an integer to eth hex string"
@spec integer_to_hex(non_neg_integer) ::
{:ok, eth_hex} | {:error, negative_integer | non_integer}
def integer_to_hex(i) do
case i do
i when i < 0 -> {:error, :negative_integer}
i -> {:ok, "0x" <> Integer.to_string(i, 16)}
end
rescue
ArgumentError ->
{:error, :non_integer}
end
@doc "Returns a 0x prepended 32 byte hash of the input string"
@spec keccak256(String.t()) :: String.t()
def keccak256(string) do
{:ok, hash} = ExKeccak.hash_256(string)
"0x#{Base.encode16(hash, case: :lower)}"
end
@unit_map %{
:noether => 0,
:wei => 1,
:kwei => 1_000,
:Kwei => 1_000,
:babbage => 1_000,
:femtoether => 1_000,
:mwei => 1_000_000,
:Mwei => 1_000_000,
:lovelace => 1_000_000,
:picoether => 1_000_000,
:gwei => 1_000_000_000,
:Gwei => 1_000_000_000,
:shannon => 1_000_000_000,
:nanoether => 1_000_000_000,
:nano => 1_000_000_000,
:szabo => 1_000_000_000_000,
:microether => 1_000_000_000_000,
:micro => 1_000_000_000_000,
:finney => 1_000_000_000_000_000,
:milliether => 1_000_000_000_000_000,
:milli => 1_000_000_000_000_000,
:ether => 1_000_000_000_000_000_000,
:kether => 1_000_000_000_000_000_000_000,
:grand => 1_000_000_000_000_000_000_000,
:mether => 1_000_000_000_000_000_000_000_000,
:gether => 1_000_000_000_000_000_000_000_000_000,
:tether => 1_000_000_000_000_000_000_000_000_000_000
}
@doc "Converts the value to whatever unit key is provided. See unit map for details."
@spec to_wei(integer, atom) :: integer
def to_wei(num, key) do
if @unit_map[key] do
num * @unit_map[key]
else
throw("#{key} not valid unit")
end
end
@doc "Converts the value to whatever unit key is provided. See unit map for details."
@spec from_wei(integer, atom) :: integer | float | no_return
def from_wei(num, key) do
if @unit_map[key] do
num / @unit_map[key]
else
throw("#{key} not valid unit")
end
end
@doc "Returns a checksummed address conforming to EIP-55"
@spec to_checksum_address(String.t()) :: String.t()
def to_checksum_address(address) do
address = address |> String.downcase() |> String.replace(~r/^0x/, "")
{:ok, hash_bin} = ExKeccak.hash_256(address)
hash =
hash_bin
|> Base.encode16(case: :lower)
|> String.replace(~r/^0x/, "")
keccak_hash_list =
hash
|> String.split("", trim: true)
|> Enum.map(fn x -> elem(Integer.parse(x, 16), 0) end)
list_arr =
for n <- 0..(String.length(address) - 1) do
number = Enum.at(keccak_hash_list, n)
cond do
number >= 8 -> String.upcase(String.at(address, n))
true -> String.downcase(String.at(address, n))
end
end
"0x" <> List.to_string(list_arr)
end
@doc "Checks if the address is a valid checksummed address"
@spec is_valid_checksum_address(String.t()) :: boolean
def is_valid_checksum_address(address) do
ExW3.Utils.to_checksum_address(address) == address
end
@doc "converts Ethereum style bytes to string"
@spec bytes_to_string(binary()) :: binary()
def bytes_to_string(bytes) do
bytes
|> Base.encode16(case: :lower)
|> String.replace_trailing("0", "")
|> Base.decode16!(case: :lower)
end
@doc "Converts an Ethereum address into a form that can be used by the ABI encoder"
@spec format_address(binary()) :: integer()
def format_address(address) do
address
|> String.slice(2..-1)
|> Base.decode16!(case: :lower)
|> :binary.decode_unsigned()
end
@doc "Converts bytes to Ethereum address"
@spec to_address(binary()) :: binary()
def to_address(bytes) do
Enum.join(["0x", bytes |> Base.encode16(case: :lower)], "")
end
end
|
lib/exw3/utils.ex
| 0.856017
| 0.475788
|
utils.ex
|
starcoder
|
if Code.ensure_loaded?(Absinthe) do
defmodule Datacop.AbsintheMiddleware.Authorize do
@moduledoc """
Performs authorization for the given resolution.
With Datacop module we are able to get `{:dataloader, _dataloader_config}` while authorizing, when
we work with batched fields. To process result we should accumulate these params for all fields.
When the data is ready, we call the appropriated callback and process results
to return resolution with either resolved state or Absinthe error.
## Example
```elixir
middleware(Datacop.AbsintheMiddleware.Authorize, {:view_users, MyApp.MyContext})
middleware(Datacop.AbsintheMiddleware.Authorize, {:view_users, MyApp.MyContext, opts})
```
We also are able to run this middleware from the resolve function, with custom callback fuction:
```elixir
{:middleware, Datacop.AbsintheMiddleware.Authorize,
callback: fn
:ok -> {:ok, true)
error -> {:ok, false}
end}
```
In the latter case this middleware uses `Absinthe.Middleware.Dataloader` under the hood for `{:dataloader, _config}`
authorization result, and resolve the value with custom callback.
The source field from resolution is the subject in case of empty subject option.
You can also pass a function, to fetch loader or actor struct from the resolution.context with options like:
```elixir
[actor: &(&1.current_user), loader: &(&1.loader)]
```
"""
@behaviour Absinthe.Middleware
@type opts() :: [
actor: (context :: map() -> Datacop.actor()) | Datacop.actor(),
subject: any(),
loader: (context :: map() -> Dataloader.t()) | Dataloader.t(),
callback: (:ok | {:error, Datacop.UnauthorizedError.t()} -> {:ok, any()} | {:error, map})
]
@impl Absinthe.Middleware
def call(%{state: :unresolved} = resolution, {action, module}), do: call(resolution, {action, module, []})
@impl Absinthe.Middleware
def call(%{state: :unresolved} = resolution, {action, module, opts}) do
actor = get_actor(resolution, opts)
subject = Keyword.get(opts, :subject, resolution.source)
custom_resolver = Keyword.get(opts, :callback)
action
|> module.authorize(actor, subject)
|> Datacop.Policy.normalize_output()
|> process(resolution, module, custom_resolver, opts)
end
@impl Absinthe.Middleware
def call(%{state: :suspended} = resolution, callback) do
resolution.context.loader
|> callback.()
|> case do
:ok -> %{resolution | state: :unresolved}
error -> Absinthe.Resolution.put_result(resolution, error)
end
end
@impl Absinthe.Middleware
def call(resolution, _params), do: resolution
defp process(result, resolution, module, resolver, opts) when is_nil(resolver) do
case result do
{:dataloader, %{source_name: source_name, batch_key: batch_key, inputs: inputs}} ->
loader = resolution |> get_loader(module, opts) |> Dataloader.load(source_name, batch_key, inputs)
on_load = on_load(source_name, batch_key, inputs, opts)
context = Map.put(resolution.context, :loader, loader)
middleware = [{__MODULE__, on_load} | resolution.middleware]
%{resolution | state: :suspended, context: context, middleware: middleware}
:ok ->
resolution
error ->
Absinthe.Resolution.put_result(resolution, error)
end
end
defp process(result, resolution, module, resolver, opts) when not is_nil(resolver) do
case result do
{:dataloader, %{source_name: source_name, batch_key: batch_key, inputs: inputs}} ->
loader = resolution |> get_loader(module, opts) |> Dataloader.load(source_name, batch_key, inputs)
on_load = on_load(source_name, batch_key, inputs, opts)
context = Map.put(resolution.context, :loader, loader)
middleware = [{Absinthe.Middleware.Dataloader, {loader, on_load}} | resolution.middleware]
%{resolution | context: context, middleware: middleware}
result ->
Absinthe.Resolution.put_result(resolution, resolver.(result))
end
end
defp on_load(source_name, batch_key, inputs, opts) do
callback = Keyword.get(opts, :callback, &Function.identity/1)
fn loader ->
loader
|> Dataloader.get(source_name, batch_key, inputs)
|> Datacop.Policy.normalize_output()
|> callback.()
end
end
defp get_actor(resolution, opts) do
case opts[:actor] do
get_actor when is_function(get_actor, 1) -> get_actor.(resolution.context)
actor -> actor
end
end
defp get_loader(resolution, module, opts) do
case opts[:loader] do
nil -> Datacop.default_loader(module)
get_loader when is_function(get_loader, 1) -> get_loader.(resolution.context)
loader -> loader
end
end
end
end
|
lib/datacop/absinthe_middleware/authorize.ex
| 0.918192
| 0.795698
|
authorize.ex
|
starcoder
|
defmodule AWS.Snowball do
@moduledoc """
AWS Snowball is a petabyte-scale data transport solution that uses secure
devices to transfer large amounts of data between your on-premises data
centers and Amazon Simple Storage Service (Amazon S3). The Snowball
commands described here provide access to the same functionality that is
available in the AWS Snowball Management Console, which enables you to
create and manage jobs for Snowball. To transfer data locally with a
Snowball device, you'll need to use the Snowball client or the Amazon S3
API adapter for Snowball. For more information, see the [User
Guide](https://docs.aws.amazon.com/AWSImportExport/latest/ug/api-reference.html).
"""
@doc """
Cancels a cluster job. You can only cancel a cluster job while it's in the
`AwaitingQuorum` status. You'll have at least an hour after creating a
cluster job to cancel it.
"""
def cancel_cluster(client, input, options \\ []) do
request(client, "CancelCluster", input, options)
end
@doc """
Cancels the specified job. You can only cancel a job before its `JobState`
value changes to `PreparingAppliance`. Requesting the `ListJobs` or
`DescribeJob` action returns a job's `JobState` as part of the response
element data returned.
"""
def cancel_job(client, input, options \\ []) do
request(client, "CancelJob", input, options)
end
@doc """
Creates an address for a Snowball to be shipped to. In most regions,
addresses are validated at the time of creation. The address you provide
must be located within the serviceable area of your region. If the address
is invalid or unsupported, then an exception is thrown.
"""
def create_address(client, input, options \\ []) do
request(client, "CreateAddress", input, options)
end
@doc """
Creates an empty cluster. Each cluster supports five nodes. You use the
`CreateJob` action separately to create the jobs for each of these nodes.
The cluster does not ship until these five node jobs have been created.
"""
def create_cluster(client, input, options \\ []) do
request(client, "CreateCluster", input, options)
end
@doc """
Creates a job to import or export data between Amazon S3 and your
on-premises data center. Your AWS account must have the right trust
policies and permissions in place to create a job for Snowball. If you're
creating a job for a node in a cluster, you only need to provide the
`clusterId` value; the other job attributes are inherited from the cluster.
"""
def create_job(client, input, options \\ []) do
request(client, "CreateJob", input, options)
end
@doc """
Takes an `AddressId` and returns specific details about that address in the
form of an `Address` object.
"""
def describe_address(client, input, options \\ []) do
request(client, "DescribeAddress", input, options)
end
@doc """
Returns a specified number of `ADDRESS` objects. Calling this API in one of
the US regions will return addresses from the list of all addresses
associated with this account in all US regions.
"""
def describe_addresses(client, input, options \\ []) do
request(client, "DescribeAddresses", input, options)
end
@doc """
Returns information about a specific cluster including shipping
information, cluster status, and other important metadata.
"""
def describe_cluster(client, input, options \\ []) do
request(client, "DescribeCluster", input, options)
end
@doc """
Returns information about a specific job including shipping information,
job status, and other important metadata.
"""
def describe_job(client, input, options \\ []) do
request(client, "DescribeJob", input, options)
end
@doc """
Returns a link to an Amazon S3 presigned URL for the manifest file
associated with the specified `JobId` value. You can access the manifest
file for up to 60 minutes after this request has been made. To access the
manifest file after 60 minutes have passed, you'll have to make another
call to the `GetJobManifest` action.
The manifest is an encrypted file that you can download after your job
enters the `WithCustomer` status. The manifest is decrypted by using the
`UnlockCode` code value, when you pass both values to the Snowball through
the Snowball client when the client is started for the first time.
As a best practice, we recommend that you don't save a copy of an
`UnlockCode` value in the same location as the manifest file for that job.
Saving these separately helps prevent unauthorized parties from gaining
access to the Snowball associated with that job.
The credentials of a given job, including its manifest file and unlock
code, expire 90 days after the job is created.
"""
def get_job_manifest(client, input, options \\ []) do
request(client, "GetJobManifest", input, options)
end
@doc """
Returns the `UnlockCode` code value for the specified job. A particular
`UnlockCode` value can be accessed for up to 90 days after the associated
job has been created.
The `UnlockCode` value is a 29-character code with 25 alphanumeric
characters and 4 hyphens. This code is used to decrypt the manifest file
when it is passed along with the manifest to the Snowball through the
Snowball client when the client is started for the first time.
As a best practice, we recommend that you don't save a copy of the
`UnlockCode` in the same location as the manifest file for that job. Saving
these separately helps prevent unauthorized parties from gaining access to
the Snowball associated with that job.
"""
def get_job_unlock_code(client, input, options \\ []) do
request(client, "GetJobUnlockCode", input, options)
end
@doc """
Returns information about the Snowball service limit for your account, and
also the number of Snowballs your account has in use.
The default service limit for the number of Snowballs that you can have at
one time is 1. If you want to increase your service limit, contact AWS
Support.
"""
def get_snowball_usage(client, input, options \\ []) do
request(client, "GetSnowballUsage", input, options)
end
@doc """
Returns an Amazon S3 presigned URL for an update file associated with a
specified `JobId`.
"""
def get_software_updates(client, input, options \\ []) do
request(client, "GetSoftwareUpdates", input, options)
end
@doc """
Returns an array of `JobListEntry` objects of the specified length. Each
`JobListEntry` object is for a job in the specified cluster and contains a
job's state, a job's ID, and other information.
"""
def list_cluster_jobs(client, input, options \\ []) do
request(client, "ListClusterJobs", input, options)
end
@doc """
Returns an array of `ClusterListEntry` objects of the specified length.
Each `ClusterListEntry` object contains a cluster's state, a cluster's ID,
and other important status information.
"""
def list_clusters(client, input, options \\ []) do
request(client, "ListClusters", input, options)
end
@doc """
This action returns a list of the different Amazon EC2 Amazon Machine
Images (AMIs) that are owned by your AWS account that would be supported
for use on a Snowball Edge device. Currently, supported AMIs are based on
the CentOS 7 (x86_64) - with Updates HVM, Ubuntu Server 14.04 LTS (HVM),
and Ubuntu 16.04 LTS - Xenial (HVM) images, available on the AWS
Marketplace.
"""
def list_compatible_images(client, input, options \\ []) do
request(client, "ListCompatibleImages", input, options)
end
@doc """
Returns an array of `JobListEntry` objects of the specified length. Each
`JobListEntry` object contains a job's state, a job's ID, and a value that
indicates whether the job is a job part, in the case of export jobs.
Calling this API action in one of the US regions will return jobs from the
list of all jobs associated with this account in all US regions.
"""
def list_jobs(client, input, options \\ []) do
request(client, "ListJobs", input, options)
end
@doc """
While a cluster's `ClusterState` value is in the `AwaitingQuorum` state,
you can update some of the information associated with a cluster. Once the
cluster changes to a different job state, usually 60 minutes after the
cluster being created, this action is no longer available.
"""
def update_cluster(client, input, options \\ []) do
request(client, "UpdateCluster", input, options)
end
@doc """
While a job's `JobState` value is `New`, you can update some of the
information associated with a job. Once the job changes to a different job
state, usually within 60 minutes of the job being created, this action is
no longer available.
"""
def update_job(client, input, options \\ []) do
request(client, "UpdateJob", input, options)
end
@spec request(AWS.Client.t(), binary(), map(), list()) ::
{:ok, Poison.Parser.t() | nil, Poison.Response.t()}
| {:error, Poison.Parser.t()}
| {:error, HTTPoison.Error.t()}
defp request(client, action, input, options) do
client = %{client | service: "snowball"}
host = build_host("snowball", client)
url = build_url(host, client)
headers = [
{"Host", host},
{"Content-Type", "application/x-amz-json-1.1"},
{"X-Amz-Target", "AWSIESnowballJobManagementService.#{action}"}
]
payload = Poison.Encoder.encode(input, %{})
headers = AWS.Request.sign_v4(client, "POST", url, headers, payload)
case HTTPoison.post(url, payload, headers, options) do
{:ok, %HTTPoison.Response{status_code: 200, body: ""} = response} ->
{:ok, nil, response}
{:ok, %HTTPoison.Response{status_code: 200, body: body} = response} ->
{:ok, Poison.Parser.parse!(body, %{}), response}
{:ok, %HTTPoison.Response{body: body}} ->
error = Poison.Parser.parse!(body, %{})
{:error, error}
{:error, %HTTPoison.Error{reason: reason}} ->
{:error, %HTTPoison.Error{reason: reason}}
end
end
defp build_host(_endpoint_prefix, %{region: "local"}) do
"localhost"
end
defp build_host(endpoint_prefix, %{region: region, endpoint: endpoint}) do
"#{endpoint_prefix}.#{region}.#{endpoint}"
end
defp build_url(host, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}/"
end
end
|
lib/aws/snowball.ex
| 0.877398
| 0.674546
|
snowball.ex
|
starcoder
|
defmodule Pushover.Model.Message do
@moduledoc """
A message.
## Attributes
* `data` (*type:* `String.t`, *default:* `nil`) - Your message.
* `device` (*type:* `String.t`, *default:* `nil`) - Your user's device name(s) to send the message directly (multiple devices may be separated by a comma).
* `title` (*type:* `String.t`, *default:* `nil`) - Your message's title, otherwise your app name is used.
* `url` (*type:* `String.t`, *default:* `nil`) - A supplementary URL to show with your message.
* `url_title` (*type:* `String.t`, *default:* `nil`) - A title for your supplementary URL, otherwise just the URL is given.
* `priority` (*type:* `integer()`, *default:* `nil`) - Send as -2 to generate no notification/alert, -1 to always send as a quiet notification, 0 (default), 1 to display as high-priority and bypass the user's quiet hours, or 2 to also require confirmation from the user. Retry and expire are required if sending emergency-priority (2) notifications.
* `retry` (*type:* `integer()`, *default:* `nil`) - How often (in seconds) the Pushover servvers will send the same notification to the user.
* `expire` (*type:* `integer()`, *default:* `nil`) - How many seconds your notification will continue to be retried.
* `sound` (*type:* `String.t`, *default:* `nil`) - The name of the sound to override the user's default sound choice.
* `timestamp` (*type:* `integer()`, *default:* `nil`) - a Unix timestamp of your message's date and time to display to the user, rather than the time your message is received by Pushover's servers.
"""
use GoogleApi.Gax.ModelBase
@type t :: %__MODULE__{
:data => String.t(),
:device => String.t(),
:title => String.t(),
:url => String.t(),
:url_title => String.t(),
:priority => integer(),
:retry => integer(),
:expire => integer(),
:sound => String.t(),
:timestamp => integer(),
}
field(:data)
field(:device)
field(:title)
field(:url)
field(:url_title)
field(:priority)
field(:expire)
field(:retry)
field(:sound)
field(:timestamp)
end
defimpl Poison.Decoder, for: Pushover.Model.Message do
def decode(value, options) do
Pushover.Model.Message.decode(value, options)
end
end
defimpl Poison.Encoder, for: Pushover.Model.Message do
def encode(value, options) do
GoogleApi.Gax.ModelBase.encode(value, options)
end
end
|
lib/pushover/model/message.ex
| 0.77569
| 0.464841
|
message.ex
|
starcoder
|
defmodule LearnKit.Regression.Polynomial do
@moduledoc """
Module for Polynomial Regression algorithm
"""
defstruct factors: [], results: [], coefficients: [], degree: 2
alias LearnKit.Regression.Polynomial
use Polynomial.Calculations
use LearnKit.Regression.Score
@type factors :: [number]
@type results :: [number]
@type coefficients :: [number]
@type degree :: integer
@doc """
Creates polynomial predictor with data_set
## Parameters
- factors: Array of predictor variables
- results: Array of criterion variables
## Examples
iex> predictor = LearnKit.Regression.Polynomial.new([1, 2, 3, 4], [3, 6, 10, 15])
%LearnKit.Regression.Polynomial{factors: [1, 2, 3, 4], results: [3, 6, 10, 15], coefficients: [], degree: 2}
"""
@spec new(factors, results) :: %Polynomial{factors: factors, results: results, coefficients: [], degree: 2}
def new(factors, results) when is_list(factors) and is_list(results) do
%Polynomial{factors: factors, results: results}
end
def new(_, _), do: Polynomial.new([], [])
def new, do: Polynomial.new([], [])
@doc """
Fit train data
## Parameters
- predictor: %LearnKit.Regression.Polynomial{}
- options: keyword list with options
## Options
- degree: nth degree of polynomial model, default set to 2
## Examples
iex> predictor = predictor |> LearnKit.Regression.Polynomial.fit
%LearnKit.Regression.Polynomial{
coefficients: [0.9999999999998295, 1.5000000000000853, 0.4999999999999787],
degree: 2,
factors: [1, 2, 3, 4],
results: [3, 6, 10, 15]
}
iex> predictor = predictor |> LearnKit.Regression.Polynomial.fit([degree: 3])
%LearnKit.Regression.Polynomial{
coefficients: [1.0000000000081855, 1.5000000000013642, 0.5,
8.526512829121202e-14],
degree: 3,
factors: [1, 2, 3, 4],
results: [3, 6, 10, 15]
}
"""
@spec fit(%Polynomial{factors: factors, results: results}) :: %Polynomial{factors: factors, results: results, coefficients: coefficients, degree: degree}
def fit(%Polynomial{factors: factors, results: results}, options \\ []) do
degree = options[:degree] || 2
matrix = matrix(factors, degree)
xys = x_y_matrix(factors, results, degree + 1, [])
coefficients = matrix |> Matrix.inv() |> Matrix.mult(xys) |> List.flatten()
%Polynomial{factors: factors, results: results, coefficients: coefficients, degree: degree}
end
@doc """
Predict using the polynomial model
## Parameters
- predictor: %LearnKit.Regression.Polynomial{}
- samples: Array of variables
## Examples
iex> predictor |> LearnKit.Regression.Polynomial.predict([5,6])
{:ok, [20.999999999999723, 27.999999999999574]}
"""
@spec predict(%Polynomial{coefficients: coefficients, degree: degree}, list) :: {:ok, list}
def predict(polynomial = %Polynomial{coefficients: _, degree: _}, samples) when is_list(samples) do
{:ok, do_predict(polynomial, samples)}
end
@doc """
Predict using the polynomial model
## Parameters
- predictor: %LearnKit.Regression.Polynomial{}
- sample: Sample variable
## Examples
iex> predictor |> LearnKit.Regression.Polynomial.predict(5)
{:ok, 20.999999999999723}
"""
@spec predict(%Polynomial{coefficients: coefficients, degree: degree}, number) :: {:ok, number}
def predict(%Polynomial{coefficients: coefficients, degree: degree}, sample) do
ordered_coefficients = coefficients |> Enum.reverse()
{:ok, substitute_coefficients(ordered_coefficients, sample, degree, 0.0)}
end
end
|
lib/learn_kit/regression/polynomial.ex
| 0.940051
| 0.988949
|
polynomial.ex
|
starcoder
|
defmodule Durex do
@moduledoc """
Parse durations, such as `"1s"`, to its numerical millisecond value, e.g. `1_000`,
so you can do things such as:
```
"1s"
|> Durex.ms!()
|> Process.sleep()
```
## Examples
iex> Durex.ms "3s"
{:ok, 3_000}
iex> Durex.ms "1h"
{:ok, 3600_000}
# Works with float too
iex> Durex.ms "0.5s"
{:ok, 500}
iex> Durex.ms "1.5h"
{:ok, 5400_000}
# Cannot ms duration less than 1ms | 1.0ms
iex> Durex.ms "0.5ms"
:error
# Fractional durations in ms will be truncated
iex> Durex.ms "1.5ms"
{:ok, 1}
Of course, there is also the bang version `ms!/1`:
# Bang version available
iex> Durex.ms! "3s"
3_000
#### Supported units
* `ms` (for millisecond)
* `s` (for second)
* `m` (for minute)
* `h` (for hour)
* `d` (for days)
* `w` (for week)
#### Performance Notes
* Parsing durations which include integers is about 4x faster
than their version containing floats.
So instead of parsing "0.5s", use "500ms" for maximum performance.
* To benchmark, run: `$ mix run bench/ms.exs`
"""
@type duration :: bitstring
defmacrop s_to_ms(s), do: quote(do: 1_000 * unquote(s))
defmacrop m_to_ms(m), do: quote(do: 1_000 * 60 * unquote(m))
defmacrop h_to_ms(h), do: quote(do: 1_000 * 60 * 60 * unquote(h))
defmacrop d_to_ms(d), do: quote(do: 1_000 * 60 * 60 * 24 * unquote(d))
defmacrop w_to_ms(w), do: quote(do: 1_000 * 60 * 60 * 24 * 7 * unquote(w))
@doc "Parse duration as milliseconds"
@spec ms(duration) :: {:ok, pos_integer} | :error
def ms(duration) when is_bitstring(duration) do
case Integer.parse(duration) do
{ms, "ms"} when ms >= 1 ->
{:ok, ms}
{s, "s"} when s > 0 ->
{:ok, s_to_ms(s)}
{m, "m"} when m > 0 ->
{:ok, m_to_ms(m)}
{h, "h"} when h > 0 ->
{:ok, h_to_ms(h)}
{d, "d"} when d > 0 ->
{:ok, d_to_ms(d)}
{w, "w"} when w > 0 ->
{:ok, w_to_ms(w)}
_ ->
case Float.parse(duration) do
{ms, "ms"} when ms >= 1.0 -> {:ok, trunc(ms)}
{s, "s"} when s > 0.0 -> {:ok, trunc(s_to_ms(s))}
{m, "m"} when m > 0.0 -> {:ok, trunc(m_to_ms(m))}
{h, "h"} when h > 0.0 -> {:ok, trunc(h_to_ms(h))}
{d, "d"} when d > 0.0 -> {:ok, trunc(d_to_ms(d))}
{w, "w"} when w > 0.0 -> {:ok, trunc(w_to_ms(w))}
_ -> :error
end
end
end
@doc "Parse duration but raise if it fails"
@spec ms!(duration) :: pos_integer
def ms!(duration) do
case ms(duration) do
{:ok, ms} -> ms
:error -> raise ArgumentError, "cannot parse #{inspect(duration)}"
end
end
end
|
lib/durex.ex
| 0.932184
| 0.855308
|
durex.ex
|
starcoder
|
defmodule Raxx.Request do
@moduledoc """
HTTP requests to a Raxx application are encapsulated in a `Raxx.Request` struct.
A request has all the properties of the url it was sent to.
In addition it has optional content, in the body.
As well as a variable number of headers that contain meta data.
Where appropriate URI properties are named from this definition.
> scheme:[//[user:password@]host[:port]][/]path[?query][#fragment]
from [wikipedia](https://en.wikipedia.org/wiki/Uniform_Resource_Identifier#Syntax)
The contents are itemised below:
| **scheme** | `http` or `https`, depending on the transport used. |
| **authority** | The location of the hosting server, as a binary. e.g. `www.example.com`. Plus an optional port number, separated from the hostname by a colon |
| **method** | The HTTP request method, such as `:GET` or `:POST`, as an atom. This cannot ever be `nil`. It is always uppercase. |
| **path** | The remainder of the request URL's “path”, split into segments. It designates the virtual “location” of the request's target within the application. This may be an empty array, if the requested URL targets the application root. |
| **raw_path** | The request URL's "path" |
| **query** | the URL query string. |
| **headers** | The headers from the HTTP request as a proplist of strings. Note all headers will be downcased, e.g. `[{"content-type", "text/plain"}]` |
| **body** | The body content sent with the request |
"""
@typedoc """
Method to indicate the desired action to be performed on the identified resource.
"""
@type method :: atom
@typedoc """
Scheme describing protocol used.
"""
@type scheme :: :http | :https
@typedoc """
Elixir representation for an HTTP request.
"""
@type t :: %__MODULE__{
scheme: scheme,
authority: binary,
method: method,
path: [binary],
raw_path: binary,
query: binary | nil,
headers: Raxx.headers(),
body: Raxx.body()
}
defstruct scheme: nil,
authority: nil,
method: nil,
path: [],
raw_path: "",
query: nil,
headers: [],
body: nil
@default_ports %{
http: 80,
https: 443
}
@doc """
Return the host value for the request.
The `t:Raxx.Request.t/0` struct contains `authority` field, which
may contain the port number. This function returns the host value which
won't include the port number.
"""
def host(%__MODULE__{authority: authority}) do
hd(String.split(authority, ":"))
end
@doc """
Return the port number used for the request.
If no port number is explicitly specified in the request url, the
default one for the scheme is used.
"""
@spec port(t, %{optional(atom) => :inet.port_number()}) :: :inet.port_number()
def port(%__MODULE__{scheme: scheme, authority: authority}, default_ports \\ @default_ports) do
case String.split(authority, ":") do
[_host] ->
Map.get(default_ports, scheme)
[_host, port_string] ->
case Integer.parse(port_string) do
{port, _} when port in 0..65535 ->
port
end
end
end
@doc """
Returns an `URI` struct corresponding to the url used in the provided request.
**NOTE**: the `userinfo` field of the `URI` will always be `nil`, even if there
is `Authorization` header basic auth information contained in the request.
The `fragment` will also be `nil`, as the servers don't have access to it.
"""
@spec uri(t) :: URI.t()
def uri(%__MODULE__{} = request) do
scheme =
case request.scheme do
nil -> nil
atom when is_atom(atom) -> Atom.to_string(atom)
end
%URI{
authority: request.authority,
host: Raxx.request_host(request),
path: request.raw_path,
port: port(request),
query: request.query,
scheme: scheme,
# you can't provide userinfo in a http request url (anymore)
# pulling it out of Authorization headers would go against the
# main use-case for this function
userinfo: nil
}
end
end
|
lib/raxx/request.ex
| 0.898566
| 0.616128
|
request.ex
|
starcoder
|
defmodule Statistics.Distributions.Normal do
@moduledoc """
The normal, or gaussian, distribution
When invoking the distibution functions without parameters,
a distribution with mean of 0 and standard deviation of 1 is assumed.
"""
alias Statistics.Math
alias Statistics.Math.Functions
@doc """
Probability density function
Roughly the expectation of a given value in the distribution
## Examples
iex> Statistics.Distributions.Normal.pdf().(0)
0.3989422804014327
iex> Statistics.Distributions.Normal.pdf(0.2, 1).(1.3)
0.21785217703255055
"""
@spec pdf :: fun
def pdf do
pdf(0, 1)
end
@spec pdf(number, number) :: fun
def pdf(mu, sigma) do
fn x ->
numexp = Math.pow(x - mu, 2) / (2 * Math.pow(sigma, 2))
denom = sigma * Math.sqrt(2 * Math.pi())
numer = Math.pow(Math.e(), numexp * -1)
numer / denom
end
end
@doc """
The cumulative density function
The probability that a value lies below `x`
Cumulative gives a probability that a statistic
is less than Z. This equates to the area of the distribution below Z.
e.g: Pr(Z = 0.69) = 0.7549. This value is usually given in Z tables.
## Examples
iex> Statistics.Distributions.Normal.cdf().(2)
0.9772499371127437
iex> Statistics.Distributions.Normal.cdf(0,1).(0)
0.5000000005
"""
@spec cdf :: fun
def cdf() do
cdf(0, 1)
end
@spec cdf(number, number) :: fun
def cdf(mu, sigma) do
denom = sigma * Math.sqrt(2)
fn x ->
0.5 * (1.0 + Functions.erf((x - mu) / denom))
end
end
@doc """
The percentile-point function
Get the maximum point which lies below the given probability.
This is the inverse of the cdf
## Examples
iex> Statistics.Distributions.Normal.ppf().(0.025)
-1.96039491692534
iex> Statistics.Distributions.Normal.ppf(7, 2.1).(0.25)
5.584202805909036
"""
@spec ppf :: fun
def ppf() do
ppf(0, 1)
end
@spec ppf(number, number) :: fun
def ppf(mu, sigma) do
res = fn p ->
mu + p * sigma
end
fn x ->
cond do
x < 0.5 ->
res.(-Functions.inv_erf(Math.sqrt(-2.0 * Math.ln(x))))
x >= 0.5 ->
res.(Functions.inv_erf(Math.sqrt(-2.0 * Math.ln(1 - x))))
end
end
end
@doc """
Draw a random number from a normal distribution
`rnd/0` will return a random number from a normal distribution
with a mean of 0 and a standard deviation of 1
`rnd/2` allows you to provide the mean and standard deviation
parameters of the distribution from which the random number is drawn
Uses the [rejection sampling method](https://en.wikipedia.org/wiki/Rejection_sampling)
## Examples
iex> Statistics.Distributions.Normal.rand()
1.5990817245679434
iex> Statistics.Distributions.Normal.rand(22, 2.3)
23.900248900049736
"""
@spec rand() :: number
def rand do
rand(0, 1)
end
@spec rand(number, number) :: number
def rand(mu, sigma), do: rand(mu, sigma, pdf(0, 1))
defp rand(mu, sigma, rpdf) do
# Note: an alternate method exists and may be better
# Inverse transform sampling - https://en.wikipedia.org/wiki/Inverse_transform_sampling
# ----
# Generate a random number between -10,+10
# (probability of 10 ocurring in a Normal(0,1) distribution is
# too small to calculate with the precision available to us)
x = Math.rand() * 20 - 10
cond do
rpdf.(x) > Math.rand() ->
# transpose to specified distribution
mu - x * sigma
true ->
# keep trying
rand(mu, sigma, rpdf)
end
end
end
|
lib/statistics/distributions/normal.ex
| 0.942626
| 0.874881
|
normal.ex
|
starcoder
|
defmodule PlayfabEx.Server.SharedGroupData do
use Interface
@doc """
Adds users to the set of those able to update both the shared data, as well as the set of users in the group. Only users in the group (and the server) can add new members. Shared Groups are designed for sharing data between a very small number of players, please see our guide:
[online docs](https://api.playfab.com/documentation/server/method/AddSharedGroupMembers)
"""
@spec add_shared_group_members(map()) :: {:ok, map} | {:error, String.t}
definterface add_shared_group_members(params), to: (Application.get_env(:playfab_ex, PlayfabEx.Server.SharedGroupData) || PlayfabEx.Server.Default.SharedGroupData)
@doc """
Deletes a shared group, freeing up the shared group ID to be reused for a new group. Shared Groups are designed for sharing data between a very small number of players, please see our guide:
[online docs](https://api.playfab.com/documentation/server/method/CreateSharedGroup)
"""
@spec create_shared_group(map()) :: {:ok, map} | {:error, String.t}
definterface create_shared_group(params), to: (Application.get_env(:playfab_ex, PlayfabEx.Server.SharedGroupData) || PlayfabEx.Server.Default.SharedGroupData)
@doc """
Removes users from the set of those able to update the shared data and the set of users in the group. Only users in the group can remove members. If as a result of the call, zero users remain with access, the group and its associated data will be deleted. Shared Groups are designed for sharing data between a very small number of players, please see our guide:
[online docs](https://api.playfab.com/documentation/server/method/DeleteSharedGroup)
"""
@spec delete_shared_group(map()) :: {:ok, map} | {:error, String.t}
definterface delete_shared_group(params), to: (Application.get_env(:playfab_ex, PlayfabEx.Server.SharedGroupData) || PlayfabEx.Server.Default.SharedGroupData)
@doc """
[online docs](https://api.playfab.com/documentation/server/method/GetSharedGroupData)
"""
@spec get_shared_group_data(map()) :: {:ok, map} | {:error, String.t}
definterface get_shared_group_data(params), to: (Application.get_env(:playfab_ex, PlayfabEx.Server.SharedGroupData) || PlayfabEx.Server.Default.SharedGroupData)
@doc """
[online docs](https://api.playfab.com/documentation/server/method/RemoveSharedGroupMembers)
"""
@spec remove_shared_group_members(map()) :: {:ok, map} | {:error, String.t}
definterface remove_shared_group_members(params), to: (Application.get_env(:playfab_ex, PlayfabEx.Server.SharedGroupData) || PlayfabEx.Server.Default.SharedGroupData)
@doc """
[online docs](https://api.playfab.com/documentation/server/method/UpdateSharedGroupData)
"""
@spec update_shared_group_data(map()) :: {:ok, map} | {:error, String.t}
definterface update_shared_group_data(params), to: (Application.get_env(:playfab_ex, PlayfabEx.Server.SharedGroupData) || PlayfabEx.Server.Default.SharedGroupData)
end
|
lib/server/shared_group_data.ex
| 0.584864
| 0.476945
|
shared_group_data.ex
|
starcoder
|
defmodule ExWire.P2P.Server do
@moduledoc """
Server handling peer to peer communication.
It starts a TCP server to handle incoming and outgoing RLPx, DevP2P, Eth Wire
connection.
Once this connection is up, it's possible to add a subscriber to the different
packets that are sent over the connection. This is the primary way of handling
packets.
Note: incoming connections are not fully tested at this moment.
Note: we do not currently store token to restart connections (this upsets some peers)
"""
use GenServer
require Logger
alias ExWire.{P2P, TCP}
@doc """
Child spec definition to be used by a supervisor when wanting to supervise an
inbound TCP connection.
We spawn a temporary child process for each inbound connection.
"""
def child_spec([:inbound, socket]) do
%{
id: ExWire.P2P.Inbound,
start: {__MODULE__, :start_link, [{:inbound, socket}]},
restart: :temporary
}
end
@doc """
Child spec definition to be used by a supervisor when wanting to supervise an
outbound TCP connection.
We spawn a temporary child process for each outbound connection.
"""
def child_spec([:outbound, peer, subscribers]) do
%{
id: ExWire.P2P.Outbound,
start: {__MODULE__, :start_link, [:outbound, peer, subscribers]},
restart: :temporary
}
end
@doc """
Starts an outbound or inbound peer to peer connection.
"""
def start_link(:outbound, peer, subscribers \\ []) do
GenServer.start_link(__MODULE__, %{
is_outbound: true,
peer: peer,
subscribers: subscribers
})
end
def start_link({:inbound, socket}) do
GenServer.start_link(__MODULE__, %{
is_outbound: false,
socket: socket
})
end
@doc """
Client function for sending a packet over to a peer.
"""
@spec send_packet(pid(), struct()) :: :ok
def send_packet(pid, packet) do
GenServer.cast(pid, {:send, %{packet: packet}})
end
@doc """
Client function to subscribe to incoming packets.
A subscription can be in one of two forms:
1. Provide a `{:server, server_pid}`, and we will send a packet to that
process with the contents `{:packet, packet, peer}` for each received packet.
2. Provde a `{module, function, arguments}`, and we will apply that function
with the provided arguments along with the packet.
"""
@spec subscribe(pid(), {module(), atom(), list()} | {:server, pid()}) :: :ok
def subscribe(pid, subscription) do
GenServer.call(pid, {:subscribe, subscription})
end
@doc """
Client function to disconnect from tcp connection
"""
def disconnect(pid) do
GenServer.cast(pid, :disconnect)
end
@doc """
Initialize by opening up a `gen_tcp` connection to given host and port.
"""
def init(%{is_outbound: true, peer: peer}) do
{:ok, socket} = TCP.connect(peer.host, peer.port)
Logger.debug(fn ->
"[Network] [#{peer}] Established outbound connection with #{peer.host}."
end)
state = P2P.new_outbound_connection(socket, peer)
{:ok, state}
end
def init(%{is_outbound: false, socket: socket}) do
state = P2P.new_inbound_connection(socket)
{:ok, state}
end
@doc """
Allows a client to subscribe to incoming packets. Subscribers must be in the form
of `{module, function, args}`, in which case we'll call `module.function(packet, ...args)`,
or `{:server, server_pid}` for a GenServer, in which case we'll send a message
`{:packet, packet, peer}`.
"""
def handle_call({:subscribe, {_module, _function, _args} = mfa}, _from, state) do
updated_state =
Map.update(state, :subscribers, [mfa], fn subscribers -> [mfa | subscribers] end)
{:reply, :ok, updated_state}
end
def handle_call({:subscribe, {:server, _server_pid} = server}, _from, state) do
updated_state =
Map.update(state, :subscribers, [server], fn subscribers -> [server | subscribers] end)
{:reply, :ok, updated_state}
end
@doc """
Handle inbound communication from a peer node via tcp.
"""
def handle_info({:tcp, _socket, data}, state) do
new_state = P2P.handle_message(state, data)
{:noreply, new_state}
end
@doc """
Function triggered when tcp closes the connection
"""
def handle_info({:tcp_closed, _socket}, state) do
peer = Map.get(state, :peer, :unknown)
Logger.warn("[Network] [#{peer}] Peer closed connection")
Process.exit(self(), :normal)
{:noreply, state}
end
@doc """
Server function for sending packets to a peer.
"""
def handle_cast({:send, %{packet: packet}}, state) do
updated_state = P2P.send_packet(state, packet)
{:noreply, updated_state}
end
@doc """
Server function handling disconnecting from tcp connection.
"""
def handle_cast(:disconnect, state = %{socket: socket}) do
TCP.shutdown(socket)
{:noreply, Map.delete(state, :socket)}
end
end
|
apps/ex_wire/lib/ex_wire/p2p/server.ex
| 0.721743
| 0.500549
|
server.ex
|
starcoder
|
defmodule Coxir.API do
@moduledoc """
Used to interact with Discord's API while
keeping track of all the ratelimits.
"""
alias Coxir.API.Base
@table :rates
@doc false
def create_tables do
:ets.new @table, [:set, :public, :named_table]
end
@doc """
Performs an API request.
Returns raw data, the atom `:ok`
or a map containing error information.
"""
@spec request(atom, String.t(), String.t(), Keyword.t(), Keyword.t()) :: :ok | map
def request(method, route, body \\ "", options \\ [], headers \\ []) do
route
|> route_param
|> route_limit
|> case do
nil ->
Base.request(method, route, body, headers, options)
|> response(route)
limit ->
Process.sleep(limit)
request(method, route, body, options, headers)
end
end
@doc """
Performs a multipart API request.
Refer to `request/5` for more information.
"""
@spec request_multipart(atom, String.t(), String.t(), Keyword.t(), Keyword.t()) :: :ok | map
def request_multipart(method, route, body, options \\ [], headers \\ []) do
body = body
|> Enum.to_list
body = {:multipart, body}
headers = [
{"Content-Type", "multipart/form-data"}
| headers
]
request(method, route, body, options, headers)
end
defp response({_atom, struct}, route) do
struct
|> case do
%{body: body, headers: headers, status_code: code} ->
route = route
|> route_param
reset = headers["X-RateLimit-Reset"]
remaining = headers["X-RateLimit-Remaining"]
{final, reset, remaining} = \
headers["X-RateLimit-Global"]
|> case do
nil ->
{route, reset, remaining}
_global ->
retry = headers["Retry-After"]
reset = current_time() + retry
{:global, reset, 0}
end
if reset && remaining do
remote = headers["Date"]
|> date_header
offset = (remote - current_time())
|> abs
{final, remaining, reset + offset}
|> update_limit
end
cond do
final != route ->
unlock(route)
!(reset && remaining) ->
unlock(route)
true ->
:ignore
end
cond do
code in [204] ->
:ok
code in [200, 201, 304] ->
body
true ->
%{error: body, code: code}
end
%{reason: reason} ->
unlock(route)
%{error: reason}
end
end
defp route_param(route) do
~r/(channels|guilds)\/([0-9]{15,})+/i
|> Regex.run(route)
|> case do
[match, _route, _param] ->
match
nil ->
route
end
end
defp route_limit(route) do
ignore = \
reset(:global)
|> case do
0 -> reset(route)
n -> n
end
remaining = \
count(:global)
|> case do
false -> count(route)
other -> other
end
cond do
ignore > 0 ->
nil
remaining > -1 ->
nil
true ->
250
end
end
defp count(route) do
arguments = \
[@table, route, {2, -1}]
arguments = \
case route do
:global ->
arguments
_other ->
tuple = {route, 1, :lock}
arguments ++ [tuple]
end
try do
apply(:ets, :update_counter, arguments)
rescue
_ -> false
end
end
defp reset(route) do
return = \
case route do
:global ->
{:"$1", nil, 0}
_other ->
{:"$1", 0, :lock}
end
fun = \
[{
{:"$1", :"$2", :"$3"},
[{
:andalso,
{:==, :"$1", route},
{:"/=", :"$3", :lock},
{:<, {:-, :"$3", current_time()}, 0}
}],
[{return}]
}]
:ets.select_replace(@table, fun)
end
defp unlock(route) do
fun = \
[{
{:"$1", :"$2", :"$3"},
[{
:andalso,
{:==, :"$1", route},
{:==, :"$3", :lock}
}],
[{
{:"$1", 1, :"$3"}
}]
}]
:ets.select_replace(@table, fun)
end
defp update_limit({route, remaining, reset}) do
fun = \
[{
{:"$1", :"$2", :"$3"},
[{
:andalso,
{:==, :"$1", route},
{:==, :"$3", :lock}
}],
[{
{:"$1", remaining, reset}
}]
}]
:ets.select_replace(@table, fun)
end
defp current_time do
DateTime.utc_now
|> DateTime.to_unix(:milliseconds)
end
defp date_header(header) do
header
|> String.to_charlist
|> :httpd_util.convert_request_date
|> :calendar.datetime_to_gregorian_seconds
|> :erlang.-(62_167_219_200)
|> :erlang.*(1000)
end
end
|
lib/coxir/api/api.ex
| 0.843105
| 0.418459
|
api.ex
|
starcoder
|
defmodule Membrane.Query do
@moduledoc ~S"""
`Membrane.Query` module evaluates query against a map or struct. It evaluates every condition through
`Membrane.Parser` module.
## Query Conditions
The query conditions are divided into 4 types.
* [Logical Conditions](#module-logical-conditions)
* [Atom Conditions](#module-atom-conditions)
* [Property Conditions](#module-property-conditions)
* [List Conditions](#module-list-conditions)
### Logical Conditions
These are conditions which are handled by `Membrane.Parser.LogicalParser` module.
The condition will be of format `keyword: value`.
| Keyword | Meaning |
| ------- | ------- |
| `:gt` | Greater than |
| `:gte` | Greater than or equal to |
| `:lt` | Lesser than |
| `:lte` | Lesser than or equal to |
| `:eq` | Equal to |
| `:neq` | Not Equal to |
### Examples
iex> data = %{a: 20, b: "hello", c: [1, 2, 3], d: %{e: 30}}
iex> Membrane.Query.process(data, a: [gt: 10])
true
iex> Membrane.Query.process(data, a: [lt: 15])
false
# Underneath, its a simple greater or lesser operator
# Hence it also supports strings or mixed types
iex> Membrane.Query.process(data, b: [lt: "Hello"]) # => "hello" < "Hello"
false
iex> Membrane.Query.process(data, b: [gte: 5]) # => "hello" >= 5
true
# With multiple conditions
iex> Membrane.Query.process(data, a: [lte: 20], c: [eq: [1, 2, 3]], d: [e: [gt: 20]])
true
### Atom Conditions
These are conditions which are handled by `Membrane.Parser.AtomParser` module.
The condition will be of format `attribute: keyword`.
| Keyword | Meaning |
| ------- | ------- |
| `:exists` | True if attribute exists |
| `:notexists` | True if attribute does not exist |
| `:integer` | True if the attrbute is an `integer` |
| `:float` | True if the attrbute is a `float` |
| `:number` | True if the attrbute is a `number` |
| `:string` | True if the attrbute is a `string` |
| `:list` | True if the attrbute is a `list` |
| `:map` | True if the attrbute is a `map` |
### Examples
iex> data = %{a: 20, b: "hello", c: [1, 2, 3], d: %{e: 30}}
iex> Membrane.Query.process(data, a: :exists)
true
iex> Membrane.Query.process(data, d: [e: :notexists])
false
iex> Membrane.Query.process(data, b: :string)
true
### Property Conditions
These are conditions which are handled by `Membrane.Parser.PropertyParser` module.
The condition will be of format `keyword: value | condition`.
This also handles a `Regex` value. It returns `true` if the attribute's value matches with regex
| Keyword | Meaning |
| ------- | ------- |
| `:len` | Length of the attribute should be equal to value, if condition is passed then evaluation is aginst its length |
### Examples
iex> data = %{a: 20, b: "hello", c: [1, 2, 3], d: %{e: 30}}
iex> Membrane.Query.process(data, c: [len: 3])
true
# Length against a number doesn't exist.
iex> Membrane.Query.process(data, a: [len: 10])
false
# With Regex
iex> Membrane.Query.process(data, b: ~r'\w+ll\w+')
true
# Internally a number is converted to a string, Hence regex works even against a number
iex> Membrane.Query.process(data, d: [e: ~r'\d+'])
true
### List Conditions
These are conditions which are handled by `Membrane.Parser.ListParser` module.
The condition will be of format `keyword: value`. In this condition either attribute's value
or keyword's value has to be a list
| Keyword | Meaning |
| ------- | ------- |
| `:in` | True if value of attribute is in keyword's value |
| `:nin` | True if value of attribute is not in keyword's value |
| `:has` | True if value of attribute has keyword's value |
| `:nha` | True if value of attribute does not have keyword's value |
### Examples
iex> data = %{a: 20, b: "hello", c: [1, 2, 3], d: %{e: 30}}
iex> Membrane.Query.process(data, a: [in: [10, 20, 30]])
true
iex> Membrane.Query.process(data, c: [has: 2])
true
iex> Membrane.Query.process(data, d: [e: [nin: [10, 20, 30]]])
false
"""
alias Membrane.Parser
@doc ~S"""
Evaluates the struct against a query and returns `true` or `false`.
## Examples
iex> alias Membrane.Query
Membrane.Query
iex> document = %{a: 100, b: 20, c: -1, meta: %{creator: "max"}}
%{a: 100, b: 20, c: -1, meta: %{creator: "max"}}
iex> Query.process(document, a: :exists, meta: [creator: "max"])
true
iex> Query.process(document, a: :exists, c: [gt: 0])
false
"""
@spec process(map | struct, list) :: boolean
def process(document, [{name, value} | rest]) do
Map.get(document, name)
|> parse(value)
|> case do
true -> process(document, rest)
false -> false
end
end
def process(_document, []) do
true
end
defp parse(value, compare) when is_list(compare) and is_map(value) do
process(value, compare)
end
defp parse(value, compare) do
Parser.parse(value, compare)
end
end
|
lib/membrane/query.ex
| 0.86792
| 0.723285
|
query.ex
|
starcoder
|
defmodule CoursePlanner.Classes.Calendars do
@moduledoc """
This module provides helper functions to populate the json for the calendar view
"""
import Ecto.Query
alias CoursePlanner.{Repo, Courses.OfferedCourse}
alias Ecto.Changeset
def get_user_classes(user, true, week_range) do
case user.role do
"Student" -> get_student_classes(user.id, week_range)
"Teacher" -> get_teacher_classes(user.id, week_range)
_ -> get_all_classes(week_range)
end
end
def get_user_classes(_user, false, week_range) do
get_all_classes(week_range)
end
def get_student_classes(user_id, week_range) do
query = from oc in OfferedCourse,
join: s in assoc(oc, :students),
join: c in assoc(oc, :classes),
preload: [:term, :course, :teachers, students: s, classes: c],
where: ^user_id == s.id and
c.date >= ^week_range.beginning_of_week and c.date <= ^week_range.end_of_week
Repo.all(query)
end
def get_teacher_classes(user_id, week_range) do
query = from oc in OfferedCourse,
join: t in assoc(oc, :teachers),
join: c in assoc(oc, :classes),
preload: [:term, :course, teachers: t, classes: c],
where: ^user_id == t.id and
c.date >= ^week_range.beginning_of_week and c.date <= ^week_range.end_of_week
Repo.all(query)
end
def get_all_classes(week_range) do
query = from oc in OfferedCourse,
join: c in assoc(oc, :classes),
preload: [:term, :course, :teachers, classes: c],
where: c.date >= ^week_range.beginning_of_week and c.date <= ^week_range.end_of_week
Repo.all(query)
end
def get_week_range(date) do
%{
beginning_of_week: Timex.beginning_of_week(date),
end_of_week: Timex.end_of_week(date)
}
end
def validate(params) do
data = %{}
types = %{date: :date, my_classes: :boolean}
{data, types}
|> Changeset.cast(params, Map.keys(types))
end
def format_errors(changeset_errors) do
errors =
Enum.reduce(changeset_errors, %{}, fn({error_field, {error_message, _}}, out) ->
Map.put(out, error_field, error_message)
end)
%{errors: errors}
end
end
|
lib/course_planner/classes/calendars.ex
| 0.694717
| 0.533094
|
calendars.ex
|
starcoder
|
defmodule Speed.Deck do
@moduledoc """
Provides functions for working with a deck of playing cards.
"""
@typedoc "A list of binaries representing playing cards."
@type t :: [card]
@typedoc "A binary representing a playing card."
@type card :: binary
@typedoc "The successful result of dealing to a player"
@type dealt :: {list(card), list(card), card}
@suits ~w( ♠ ♣ ♥ ♦ )
@numbers ~w( A 2 3 4 5 6 7 8 9 10 J Q K )
@doc """
Deals the specified `deck` to two players, returning those players’ hands, draw
piles and face-down cards, plus the remaining deck.
## Examples
iex> ["A♠", "2♠", "3♠"] |> Speed.Deck.deal
{:error, "Not enough cards in the deck"}
iex> shuffled_deck = Speed.Deck.new |> Speed.Deck.shuffle
...> {{[_, _, _, _, _], [_, _, _, _, _, _, _, _, _, _, _, _, _, _, _], _}, {[_, _, _, _, _], [_, _, _, _, _, _, _, _, _, _, _, _, _, _, _], _}, remaining_deck} = shuffled_deck |> Speed.Deck.deal
...> remaining_deck |> length
10
"""
@spec deal(t) :: {{dealt, dealt}, t} | Speed.error
def deal(deck) do
if length(deck) < 40 do
{:error, "Not enough cards in the deck"}
else
hands_cards = deck |> Enum.take(10)
hand1 = hands_cards |> Enum.take_every(2)
hand2 = hands_cards |> Enum.drop(1) |> Enum.take_every(2)
draw_piles_cards = deck |> Enum.drop(10) |> Enum.take(30)
draw_pile1 = draw_piles_cards |> Enum.take_every(2)
draw_pile2 = draw_piles_cards |> Enum.drop(1) |> Enum.take_every(2)
face_down_cards = deck |> Enum.drop(40) |> Enum.take(2)
face_down_card1 = face_down_cards |> Enum.at(0)
face_down_card2 = face_down_cards |> Enum.at(1)
remaining_deck = deck |> Enum.drop(42)
{{hand1, draw_pile1, face_down_card1},
{hand2, draw_pile2, face_down_card2},
remaining_deck}
end
end
@doc """
Constructs a deck using the specified `options`.
## Example
iex> Speed.Deck.new
["A♠", "2♠", "3♠", "4♠", "5♠", "6♠", "7♠", "8♠", "9♠", "10♠", "J♠", "Q♠", "K♠", "A♣", "2♣", "3♣", "4♣", "5♣", "6♣", "7♣", "8♣", "9♣", "10♣", "J♣", "Q♣", "K♣", "A♥", "2♥", "3♥", "4♥", "5♥", "6♥", "7♥", "8♥", "9♥", "10♥", "J♥", "Q♥", "K♥", "A♦", "2♦", "3♦", "4♦", "5♦", "6♦", "7♦", "8♦", "9♦", "10♦", "J♦", "Q♦", "K♦"]
iex> Speed.Deck.new jokers: true
["A♠", "2♠", "3♠", "4♠", "5♠", "6♠", "7♠", "8♠", "9♠", "10♠", "J♠", "Q♠", "K♠", "A♣", "2♣", "3♣", "4♣", "5♣", "6♣", "7♣", "8♣", "9♣", "10♣", "J♣", "Q♣", "K♣", "A♥", "2♥", "3♥", "4♥", "5♥", "6♥", "7♥", "8♥", "9♥", "10♥", "J♥", "Q♥", "K♥", "A♦", "2♦", "3♦", "4♦", "5♦", "6♦", "7♦", "8♦", "9♦", "10♦", "J♦", "Q♦", "K♦", "🃏", "🃏"]
iex> Speed.Deck.new jokers: false
["A♠", "2♠", "3♠", "4♠", "5♠", "6♠", "7♠", "8♠", "9♠", "10♠", "J♠", "Q♠", "K♠", "A♣", "2♣", "3♣", "4♣", "5♣", "6♣", "7♣", "8♣", "9♣", "10♣", "J♣", "Q♣", "K♣", "A♥", "2♥", "3♥", "4♥", "5♥", "6♥", "7♥", "8♥", "9♥", "10♥", "J♥", "Q♥", "K♥", "A♦", "2♦", "3♦", "4♦", "5♦", "6♦", "7♦", "8♦", "9♦", "10♦", "J♦", "Q♦", "K♦"]
"""
@spec new(keyword(jokers: boolean)) :: t
def new(options \\ [])
def new(jokers: true), do: new() ++ ~w( 🃏 🃏 )
def new(_options) do
for suit <- @suits, number <- @numbers, do: "#{number}#{suit}"
end
@doc """
Shuffles the specified `deck`.
## Examples
iex> deck = Speed.Deck.new
...> shuffled_deck = Speed.Deck.shuffle(deck)
...> length(shuffled_deck) == length(deck)
true
iex> deck = Speed.Deck.new
...> shuffled1 = Speed.Deck.shuffle(deck)
...> shuffled2 = Speed.Deck.shuffle(deck)
...> shuffled1 == shuffled2
false
"""
@spec shuffle(t) :: t
defdelegate shuffle(deck), to: Enum
end
|
lib/speed/deck.ex
| 0.887205
| 0.530966
|
deck.ex
|
starcoder
|
defmodule Nixa.Tree.ID3Regressor do
@moduledoc """
Implementation of ID3 regressor decision tree algorithm
"""
import Nixa.Tree.Shared
import Nixa.Shared
defmodule WrappedModel do
defstruct [
root: nil,
binning_strategy: [],
binning_borders: []
]
end
@doc """
Train a model using the provided inputs and targets
"""
def fit(inputs, targets, opts \\ []) when is_list(inputs) and is_list(targets) do
binning_strategy = {:uniform, 10} #Keyword.get(opts, :binning_strategy, {:uniform, 10})
{binning_strategy, binning_borders} = calc_binning(inputs, binning_strategy)
xform_inputs = inputs
|> Enum.map(fn inst -> Nixa.Discretizers.transform_instance(inst, binning_borders) end)
num_attrs = inputs |> Enum.fetch!(0) |> Nx.size()
root = build_tree({xform_inputs, targets}, MapSet.new(0..(num_attrs - 1)), opts)
%WrappedModel{
root: root,
binning_strategy: binning_strategy,
binning_borders: binning_borders
}
end
@doc """
Predict a value using a trained model
"""
def predict(%WrappedModel{} = wrapped_model, inputs) when is_list(inputs) do
model = wrapped_model.root
binning_borders = wrapped_model.binning_borders
inputs
|> Enum.map(fn inst -> Nixa.Discretizers.transform_instance(inst, binning_borders) end)
|> Enum.map(fn i -> traverse_tree(model, i) end)
end
### Internal functions
defp traverse_tree(node, input) do
cond do
node.children != nil and !Enum.empty?(node.children) ->
cond do
node.attr == nil ->
nil
true ->
attr_val = input[0][node.attr] |> Nx.to_scalar()
child = Map.get(node.children, attr_val)
if child == nil, do: node.target, else: traverse_tree(child, input)
end
node.target != nil -> node.target
true ->
nil
end
end
defp build_tree({inputs, targets}, attrs, opts) do
h = calc_targets_entropy(targets)
if Nx.to_scalar(h) == 0.0 do
# Base case where there is only one target value
t = Nx.concatenate(targets)
%Nixa.Tree.Node{target: t[0]}
else
# Find split attribute
split_arg = attrs
|> Enum.map(fn a -> Task.async(fn -> calc_info_gain(inputs, targets, a, h) |> Nx.new_axis(0) end) end)
|> Task.await_many(:infinity)
|> Nx.concatenate()
|> Nx.argmax()
|> Nx.to_scalar()
split_a = Enum.fetch!(attrs, split_arg)
rem_attrs = MapSet.delete(attrs, split_a)
split_vals = get_split_vals(inputs, split_a)
children = split_vals
|> Enum.map(fn val -> Task.async(fn -> {Nx.to_scalar(val[0]), create_child(inputs, targets, split_a, val, rem_attrs, opts)} end) end)
|> Task.await_many(:infinity)
|> Map.new()
%Nixa.Tree.Node{
attr: split_a,
children: children,
target: get_mean_target(targets)
}
end
end
defp create_child(inputs, targets, split_a, split_val, rem_attrs, opts) do
{v_inputs, v_targets} = filter_inputs_targets(inputs, targets, split_a, split_val)
cond do
Enum.empty?(v_inputs) or Enum.empty?(v_targets) ->
%Nixa.Tree.Node{
target: get_mean_target(targets)
}
MapSet.size(rem_attrs) == 0 ->
%Nixa.Tree.Node{
target: get_mean_target(v_targets)
}
true -> build_tree({v_inputs, v_targets}, rem_attrs, opts)
end
end
end
|
lib/nixa/tree/id3_regressor.ex
| 0.803405
| 0.529811
|
id3_regressor.ex
|
starcoder
|
defmodule LayoutOMatic.ComponentLayout do
@moduledoc """
Handles Auto-Layouts for Scenic Components.
Each Scenic component is a little different in how it's sized and positioned. While most components are positioned from it's top left most point, passing the next starting point is
a little different. Sizing for components are based on font metrics. By determining the dimensions of the font, width and height are calculated and applied to the component. The Layout-O-Matic
takes care of all of this for you. Width and height can also be passed as style arguments on a component in which case those dimensions will be used.
Auto-Layout, while a made up term, is used to describe that components will be automatically laid out by positioning components in equal rows and columns. Possibly in the future there may be other
types of layouts.
Layout takes data like the component, starting {x,y}, grid {x,y}, graph which are required to do any autolayouting. Optionally layout can apply padding to a group which will pad the groups elements
within the groups grid. Options here include: `:padding-top`, `:padding-right`, `:padding-bottom`, `:padding-left`. These are followed by an integer representing the number of pixels to pad by.
Percentages are not currently supported. This also supports padding shorthand: {10, 10, 5} which will apply 10 px padding to the top and right and left then 5 px to the bottom. With this pattern
a single value will apply to all sides.
To achieve something which mimics a fixed position, use a separate grid or scene which occupies that space of the viewport and use the max {x, y} of that grid/scene and the min {x, y} for every
subsequent scene.
Objects can be positioned relative to other elements by using passing `:absolute, <group_id_to_position_relative_to`>, {top_pixels, right_pixels, bottom_pixels, left_pixels}`
"""
alias Scenic.Graph
alias LayoutOMatic.Button
alias LayoutOMatic.Checkbox
alias LayoutOMatic.Dropdown
alias LayoutOMatic.Slider
alias LayoutOMatic.TextField
alias LayoutOMatic.Toggle
import Scenic.Primitives
defmodule Layout do
defstruct component: %Scenic.Primitive{},
starting_xy: {},
max_xy: {},
grid_xy: {},
graph: %{}
end
@spec auto_layout(Scenic.Graph.t(), atom, [atom]) :: {:ok, Scenic.Graph.t()}
def auto_layout(graph, group_id, list_of_comp_ids) do
rect_id =
group_id
|> Atom.to_string()
|> String.split("_group")
|> hd()
|> String.to_atom()
[%{transforms: %{translate: grid_xy}}] = Graph.get(graph, group_id)
[%{data: max_xy}] = Graph.get(graph, rect_id)
graph =
Enum.reduce(list_of_comp_ids, [], fn c_id, acc ->
[%{data: {comp_type, _}} = component] = Graph.get(graph, c_id)
layout =
case acc do
[] ->
%Layout{
component: component,
starting_xy: grid_xy,
max_xy: max_xy,
grid_xy: grid_xy,
graph: graph
}
_ ->
acc
end
do_layout(comp_type, layout, c_id)
end)
|> Map.get(:graph)
{:ok, graph}
end
defp do_layout(Scenic.Component.Button, layout, c_id) do
case Button.translate(layout) do
{:ok, {x, y}, new_layout} ->
new_graph = Graph.modify(Map.get(new_layout, :graph), c_id, &update_opts(&1, t: {x, y}))
Map.put(new_layout, :graph, new_graph)
{:error, error} ->
{:error, error}
end
end
defp do_layout(Scenic.Component.Input.Checkbox, layout, c_id) do
case Checkbox.translate(layout) do
{:ok, {x, y}, new_layout} ->
new_graph = Graph.modify(Map.get(new_layout, :graph), c_id, &update_opts(&1, t: {x, y}))
Map.put(new_layout, :graph, new_graph)
{:error, error} ->
{:error, error}
end
end
defp do_layout(Scenic.Component.Input.Dropdown, layout, c_id) do
case Dropdown.translate(layout) do
{:ok, {x, y}, new_layout} ->
new_graph = Graph.modify(Map.get(new_layout, :graph), c_id, &update_opts(&1, t: {x, y}))
Map.put(new_layout, :graph, new_graph)
{:error, error} ->
{:error, error}
end
end
defp do_layout(Scenic.Component.Input.RadioGroup, _layout, _c_id) do
nil
end
defp do_layout(Scenic.Component.Input.Slider, layout, c_id) do
case Slider.translate(layout) do
{:ok, {x, y}, new_layout} ->
new_graph = Graph.modify(Map.get(new_layout, :graph), c_id, &update_opts(&1, t: {x, y}))
Map.put(new_layout, :graph, new_graph)
{:error, error} ->
{:error, error}
end
end
defp do_layout(Scenic.Component.Input.TextField, layout, c_id) do
case TextField.translate(layout) do
{:ok, {x, y}, new_layout} ->
new_graph = Graph.modify(Map.get(new_layout, :graph), c_id, &update_opts(&1, t: {x, y}))
Map.put(new_layout, :graph, new_graph)
{:error, error} ->
{:error, error}
end
end
defp do_layout(Scenic.Component.Input.Toggle, layout, c_id) do
case Toggle.translate(layout) do
{:ok, {x, y}, new_layout} ->
new_graph = Graph.modify(Map.get(new_layout, :graph), c_id, &update_opts(&1, t: {x, y}))
Map.put(new_layout, :graph, new_graph)
{:error, error} ->
{:error, error}
end
end
end
|
lib/layouts/components/autolayout.ex
| 0.860193
| 0.813831
|
autolayout.ex
|
starcoder
|
defmodule Automaton.Types.TWEANN.Neuron do
@moduledoc """
A signal processing element. It accepts signals, accumulates them
into an ordered vectorr, then processes this input vector to produce
an output, and finally passes the output to other elements it is
connected to.
Nuerons's are represented with the tuple: { id, cx_id, af, input_ids, output_ids}
• id, a unique id (useful for datastores)
• cx_id, id of the cortex for this neuron
• af, name of function the neuron uses on the extended dot product (dot product + bias).
• input_ids, [{id1, Weights}...{idN, Weights},{bias, Val}]
• output_ids, list of neuron ids to which the fan out its output signal
"""
defstruct id: nil, cx_id: nil, af: nil, input_id_ps: [], output_ids: []
@doc ~S"""
When gen/1 is executed it spawns the neuron element and immediately begins to
wait for its initial state message.
"""
def gen(exoself_pid) do
spawn(fn -> loop(exoself_pid) end)
end
def loop(exoself_pid) do
receive do
{^exoself_pid, {id, cortex_pid, af, input_id_ps, output_pids}} ->
loop(id, cortex_pid, af, {input_id_ps, input_id_ps}, output_pids, 0)
end
end
@doc ~S"""
The neuron process waits for vector signals from all the processes that it's
connected from, taking the dot product of the input and weight vectors, and
then adding it to the accumulator. Once all the signals from input_pids are
received, the accumulator contains the dot product to which the neuron then
adds the bias and executes the activation function on. After fanning out the
output signal, the neuron again returns to waiting for incoming signals. When
the neuron receives the {cortex_pid, get_backup} message, it forwards to the
cortex its full m_input_id_ps list, and its Id. Once the training/learning
algorithm is added to the system, the m_input_id_ps would contain a full set of
the most recent and updated version of the weights.
"""
def loop(
id,
cortex_pid,
af,
{[{input_pid, weights} | input_id_ps], m_input_id_ps},
output_pids,
acc
) do
receive do
{^input_pid, :forward, input} ->
result = dot(input, weights, 0)
loop(id, cortex_pid, af, {input_id_ps, m_input_id_ps}, output_pids, result + acc)
{^cortex_pid, :get_backup} ->
send(cortex_pid, {self(), id, m_input_id_ps})
loop(
id,
cortex_pid,
af,
{[{input_pid, weights} | input_id_ps], m_input_id_ps},
output_pids,
acc
)
{^cortex_pid, :terminate} ->
:ok
end
end
def loop(id, cortex_pid, af, {[bias], m_input_id_ps}, output_pids, acc) do
output = apply(__MODULE__, af, [acc + bias])
for output_pid <- output_pids, do: send(output_pid, {self(), :forward, [output]})
loop(id, cortex_pid, af, {m_input_id_ps, m_input_id_ps}, output_pids, 0)
end
def loop(id, cortex_pid, af, {[], m_input_id_ps}, output_pids, acc) do
output = apply(af, [acc])
for output_pid <- output_pids, do: send(output_pid, {self(), :forward, [output]})
loop(id, cortex_pid, af, {m_input_id_ps, m_input_id_ps}, output_pids, 0)
end
def dot([i | input], [w | weights], acc), do: dot(input, weights, i * w + acc)
def dot([], [], acc), do: acc
@doc ~S"""
Though in this current implementation the neuron has only the tanh/1 and
sigmoid/1 function available to it, we will later extend the system to allow
different neurons to use different activation functions.
"""
def tanh(val), do: :math.tanh(val)
@doc """
Sigmoid function
"""
@spec sigmoid(number) :: float
def sigmoid(x) do
1 / (1 + :math.exp(-x))
end
end
|
lib/automata/automaton_types/neuroevolution/neuron.ex
| 0.784154
| 0.693927
|
neuron.ex
|
starcoder
|
defmodule Cldr.Locale.Backend do
@moduledoc false
def define_locale_backend(config) do
quote location: :keep, bind_quoted: [config: Macro.escape(config)] do
defmodule Locale do
@moduledoc false
if Cldr.Config.include_module_docs?(config.generate_docs) do
@moduledoc """
Backend module that provides functions
to define new locales and display human-readable
locale names for presentation purposes.
"""
end
alias Cldr.{Locale, LanguageTag}
def new(locale_name), do: Locale.new(locale_name, unquote(config.backend))
def new!(locale_name), do: Locale.new!(locale_name, unquote(config.backend))
@doc """
Returns the territory from a language tag or
locale name.
## Arguments
* `locale` is any language tag returned by
`#{inspect(__MODULE__)}.new/1`
or a locale name in the list returned by
`#{inspect(config.backend)}.known_locale_names/0`
## Returns
* A territory code as an atom
## Examples
iex> #{inspect(__MODULE__)}.territory_from_locale "en-US"
:US
iex> #{inspect(__MODULE__)}.territory_from_locale "en-US-u-rg-GBzzzz"
:GB
"""
@spec territory_from_locale(LanguageTag.t() | Locale.locale_name()) ::
Locale.territory_code()
@doc since: "2.18.2"
def territory_from_locale(%LanguageTag{} = locale) do
Locale.territory_from_locale(locale)
end
def territory_from_locale(locale) do
Locale.territory_from_locale(locale, unquote(config.backend))
end
@doc """
Returns the time zone from a language tag or
locale name.
## Arguments
* `locale` is any language tag returned by
`#{inspect(__MODULE__)}.new/1`
or a locale name in the list returned by
`#{inspect(config.backend)}.known_locale_names/0`
## Returns
* A time zone ID as a string or
* `:error` if no time zone can be determined
## Examples
iex> #{inspect(__MODULE__)}.timezone_from_locale "en-US-u-tz-ausyd"
"Australia/Sydney"
"""
@doc since: "2.19.0"
@spec timezone_from_locale(LanguageTag.t() | Locale.locale_name()) ::
String.t() | {:error, {module(), String.t()}}
def timezone_from_locale(%LanguageTag{} = locale) do
Locale.timezone_from_locale(locale)
end
def timezone_from_locale(locale) do
Locale.timezone_from_locale(locale, unquote(config.backend))
end
@doc """
Returns the "best fit" locale for a given territory.
Using the population percentage data from CLDR, the
language most commonly spoken in the given territory
is used to form a locale name which is then validated
against the given backend.
First a territory-specific locale is validated and if
that fails, the base language only is validate.
For example, if the territory is `AU` then then the
language most spoken is "en". First, the locale "en-AU"
is validated and if that fails, "en" is validated.
## Arguments
* `territory` is any ISO 3166 Alpha-2 territory
code that can be validated by `Cldr.validate_territory/1`
## Returns
* `{:ok, language_tag}` or
* `{:error, {exception, reason}}`
## Examples
iex> #{inspect(__MODULE__)}.locale_for_territory(:AU)
#{config.backend}.validate_locale(:"en-AU")
iex> #{inspect(__MODULE__)}.locale_for_territory(:US)
#{config.backend}.validate_locale(:"en-US")
iex> #{inspect(__MODULE__)}.locale_for_territory(:ZZ)
{:error, {Cldr.UnknownTerritoryError, "The territory :ZZ is unknown"}}
"""
@doc since: "2.26.0"
@spec locale_for_territory(Locale.territory_code()) ::
{:ok, LanguageTag.t()} | {:error, {module(), String.t()}}
def locale_for_territory(territory) do
Locale.locale_for_territory(territory)
end
@doc """
Returns a "best fit" locale for a host name.
## Arguments
* `host` is any valid host name
* `options` is a keyword list of options. The default
is `[]`.
## Options
* `:tlds` is a list of territory codes as upper-cased
atoms that are to be considered as top-level domains.
See `Cldr.Locale.locale_from_host/2` for the default
list.
## Returns
* `{:ok, langauge_tag}` or
* `{:error, {exception, reason}}`
## Notes
Certain top-level domains have become associated with content
underlated to the territory for who the domain is registered.
Therefore Google (and perhaps others) do not associate these
TLDs as belonging to the territory but rather are considered
generic top-level domain names.
## Examples
iex> #{inspect(__MODULE__)}.locale_from_host "a.b.com.au"
#{config.backend}.validate_locale(:"en-AU")
iex> #{inspect(__MODULE__)}.locale_from_host("a.b.com.tv")
{:error,
{Cldr.UnknownLocaleError, "No locale was identified for territory \\"tv\\""}}
iex> #{inspect(__MODULE__)}.locale_from_host("a.b.com")
{:error,
{Cldr.UnknownLocaleError, "No locale was identified for territory \\"com\\""}}
"""
@doc since: "2.26.0"
@spec locale_from_host(String.t(), Keyword.t()) ::
{:ok, LanguageTag.t()} | {:error, {module(), String.t()}}
def locale_from_host(host, options \\ []) do
Locale.locale_from_host(host, unquote(config.backend), options)
end
@doc """
Returns the last segment of a host that might
be a territory.
## Arguments
* `host` is any valid host name
## Returns
* `{:ok, territory}` or
* `{:error, {exception, reason}}`
## Examples
iex> Cldr.Locale.territory_from_host("a.b.com.au")
{:ok, :AU}
iex> Cldr.Locale.territory_from_host("a.b.com")
{:error,
{Cldr.UnknownLocaleError, "No locale was identified for territory \\"com\\""}}
"""
@doc since: "2.26.0"
@spec territory_from_host(String.t()) ::
{:ok, Locale.territory_code()} | {:error, {module(), String.t()}}
def territory_from_host(host) do
Cldr.Locale.territory_from_host(host)
end
@doc """
Returns the list of fallback locales, starting
with the provided locale.
Fallbacks are a list of locate names which can
be used to resolve translation or other localization
data if such localised data does not exist for
this specific locale. After locale-specific fallbacks
are determined, the the default locale and its fallbacks
are added to the chain.
## Arguments
* `locale` is any `LanguageTag.t`
## Returns
* `{:ok, list_of_locales}` or
* `{:error, {exception, reason}}`
## Examples
In these examples the default locale is `:"en-001"`.
#{inspect __MODULE__}.fallback_locales(#{inspect __MODULE__}.new!("fr-CA"))
=> {:ok,
[#Cldr.LanguageTag<fr-CA [validated]>, #Cldr.LanguageTag<fr [validated]>,
#Cldr.LanguageTag<en [validated]>]}
# Fallbacks are typically formed by progressively
# stripping variant, territory and script from the
# given locale name. But not always - there are
# certain fallbacks that take a different path.
#{inspect __MODULE__}.fallback_locales(#{inspect __MODULE__}.new!("nb"))
=> {:ok,
[#Cldr.LanguageTag<nb [validated]>, #Cldr.LanguageTag<no [validated]>,
#Cldr.LanguageTag<en [validated]>]}
"""
@spec fallback_locales(LanguageTag.t() | Cldr.Locale.locale_reference) ::
{:ok, [LanguageTag.t(), ...]} | {:error, {module(), binary()}}
@doc since: "2.26.0"
def fallback_locales(%LanguageTag{} = locale) do
Cldr.Locale.fallback_locales(locale)
end
@doc """
Returns the list of fallback locales, starting
with the provided locale name.
Fallbacks are a list of locate names which can
be used to resolve translation or other localization
data if such localised data does not exist for
this specific locale. After locale-specific fallbacks
are determined, the the default locale and its fallbacks
are added to the chain.
## Arguments
* `locale_name` is any locale name returned by
`#{inspect config.backend}.known_locale_names/0`
## Returns
* `{:ok, list_of_locales}` or
* `{:error, {exception, reason}}`
## Examples
In these examples the default locale is `:"en-001"`.
#{inspect __MODULE__}.fallback_locales(:"fr-CA")
=> {:ok,
[#Cldr.LanguageTag<fr-CA [validated]>, #Cldr.LanguageTag<fr [validated]>,
#Cldr.LanguageTag<en [validated]>]}
# Fallbacks are typically formed by progressively
# stripping variant, territory and script from the
# given locale name. But not always - there are
# certain fallbacks that take a different path.
#{inspect __MODULE__}.fallback_locales(:nb))
=> {:ok,
[#Cldr.LanguageTag<nb [validated]>, #Cldr.LanguageTag<no [validated]>,
#Cldr.LanguageTag<en [validated]>]}
"""
@doc since: "2.26.0"
def fallback_locales(locale_name) do
Cldr.Locale.fallback_locales(locale_name, unquote(config.backend))
end
@doc """
Returns the list of fallback locale names, starting
with the provided locale.
Fallbacks are a list of locate names which can
be used to resolve translation or other localization
data if such localised data does not exist for
this specific locale. After locale-specific fallbacks
are determined, the the default locale and its fallbacks
are added to the chain.
## Arguments
* `locale` is any `Cldr,LangaugeTag.t`
## Returns
* `{:ok, list_of_locale_names}` or
* `{:error, {exception, reason}}`
## Examples
In these examples the default locale is `:"en-001"`.
iex> #{inspect __MODULE__}.fallback_locale_names(#{inspect __MODULE__}.new!("fr-CA"))
{:ok, [:"fr-CA", :fr, :"en-001", :en]}
# Fallbacks are typically formed by progressively
# stripping variant, territory and script from the
# given locale name. But not always - there are
# certain fallbacks that take a different path.
iex> #{inspect __MODULE__}.fallback_locale_names(#{inspect __MODULE__}.new!("nb"))
{:ok, [:nb, :no, :"en-001", :en]}
"""
@spec fallback_locale_names(LanguageTag.t() | Cldr.Locale.locale_reference) ::
{:ok, [Cldr.Locale.locale_name, ...]} | {:error, {module(), binary()}}
@doc since: "2.26.0"
def fallback_locale_names(%LanguageTag{} = locale) do
Cldr.Locale.fallback_locale_names(locale)
end
@doc """
Returns the list of fallback locale names, starting
with the provided locale name.
Fallbacks are a list of locate names which can
be used to resolve translation or other localization
data if such localised data does not exist for
this specific locale. After locale-specific fallbacks
are determined, the the default locale and its fallbacks
are added to the chain.
## Arguments
* `locale_name` is any locale name returned by
`#{inspect config.backend}.known_locale_names/0`
## Returns
* `{:ok, list_of_locale_names}` or
* `{:error, {exception, reason}}`
## Examples
In these examples the default locale is `:"en-001"`.
iex> #{inspect __MODULE__}.fallback_locale_names(:"fr-CA")
{:ok, [:"fr-CA", :fr, :"en-001", :en]}
# Fallbacks are typically formed by progressively
# stripping variant, territory and script from the
# given locale name. But not always - there are
# certain fallbacks that take a different path.
iex> #{inspect __MODULE__}.fallback_locale_names(:nb)
{:ok, [:nb, :no, :"en-001", :en]}
"""
@doc since: "2.26.0"
def fallback_locale_names(locale_name) do
Cldr.Locale.fallback_locale_names(locale_name, unquote(config.backend))
end
end
end
end
end
|
lib/cldr/backend/locale.ex
| 0.865494
| 0.445107
|
locale.ex
|
starcoder
|
defmodule Formular.Compiler do
@moduledoc """
This module is used to compile the code into Elixir modules.
"""
@scope_and_binding_ops ~w[-> def]a
@scope_ops ~w[for]a
@binding_ops ~w[<- =]a
@doc """
Create an Elixir module from the raw code (AST).
The created module will have to public functions:
- `run/1` which accepts a binding keyword list and execute the code.
- `used_variables/0` which returns a list of variable names that have
been used in the code.
## Usage
```elixir
iex> ast = quote do: a + b
...> Formular.Compiler.create_module(MyMod, ast)
...> MyMod.run(a: 1, b: 2)
3
...> MyMod.used_variables()
[:a, :b]
```
"""
@spec create_module(module(), Macro.t(), Macro.Env.t()) :: {:module, module()}
def create_module(module, raw_ast, env \\ %Macro.Env{}) do
Module.create(
module,
mod_body(raw_ast, env),
env
)
{:module, module}
end
defp mod_body(raw_ast, env) do
quote do
unquote(importing(env))
unquote(def_run(raw_ast))
unquote(def_used_variables(raw_ast))
end
end
defp importing(%{functions: functions, macros: macros}) do
default = [{Kernel, [def: 2]}]
merge_f = fn _, a, b -> a ++ b end
imports =
default
|> Keyword.merge(functions, merge_f)
|> Keyword.merge(macros, merge_f)
for {mod, fun_list} <- imports do
quote do
import unquote(mod), only: unquote(fun_list)
end
end
end
defp def_run(raw_ast) do
{ast, args} = inject_vars(raw_ast)
quote do
def run(binding) do
unquote(def_args(args))
unquote(ast)
end
end
end
defp def_args(args) do
for arg <- args do
quote do
unquote(Macro.var(arg, __MODULE__)) = Keyword.fetch!(binding, unquote(arg))
end
end
end
@doc false
def extract_vars(ast),
do: do_extract_vars(ast) |> MapSet.to_list()
defp inject_vars(ast) do
collection = do_extract_vars(ast)
{
set_hygiene(ast, __MODULE__),
MapSet.to_list(collection)
}
end
defp do_extract_vars(ast) do
initial_vars = {
_bound_scopes = [MapSet.new([])],
_collection = MapSet.new([])
}
pre = fn
{:cond, _, [[do: cond_do_bock]]} = ast, acc ->
acc =
for {:->, _, [left, _right]} <- cond_do_bock,
unbind_var <- do_extract_vars(left),
reduce: acc do
acc ->
collect_var_if_unbind(acc, unbind_var)
end
{ast, acc}
{op, _, [left | _]} = ast, acc when op in @scope_and_binding_ops ->
bound = do_extract_vars(left)
{ast, acc |> push_scope() |> collect_bound(bound)}
{op, _, _} = ast, acc when op in @scope_ops ->
{ast, push_scope(acc)}
{op, _, [left, _]} = ast, acc when op in @binding_ops ->
bound = do_extract_vars(left)
{ast, collect_bound(acc, bound)}
{:^, _, [{pinned, _, _}]} = ast, acc when is_atom(pinned) ->
{ast, delete_unbound(acc, pinned)}
ast, vars ->
{ast, vars}
end
post = fn
{op, _, _} = ast, acc
when op in @scope_ops
when op in @scope_and_binding_ops ->
{ast, pop_scope(acc)}
{var, _meta, context} = ast, acc
when is_atom(var) and is_atom(context) ->
if defined?(var, acc) do
{ast, acc}
else
{ast, collect_var(acc, var)}
end
ast, vars ->
{ast, vars}
end
{^ast, {_, collection}} = Macro.traverse(ast, initial_vars, pre, post)
collection
end
defp push_scope({scopes, collection}),
do: {[MapSet.new([]) | scopes], collection}
defp pop_scope({scopes, collection}),
do: {tl(scopes), collection}
defp collect_var_if_unbind({scopes, collection}, var) do
if Enum.all?(scopes, &(var not in &1)) do
{scopes, MapSet.put(collection, var)}
else
{scopes, collection}
end
end
defp collect_var({scopes, collection}, unbind_var),
do: {scopes, MapSet.put(collection, unbind_var)}
defp delete_unbound({[scope | tail], collection}, var),
do: {[MapSet.delete(scope, var) | tail], collection}
defp collect_bound({[scope | tail], collection}, bounds),
do: {[MapSet.union(scope, bounds) | tail], collection}
defp defined?(var, {scopes, _}),
do: Enum.any?(scopes, &(var in &1))
defp set_hygiene(ast, hygiene_context) do
Macro.postwalk(ast, fn
{var, meta, context} when is_atom(var) and is_atom(context) ->
{var, meta, hygiene_context}
other ->
other
end)
end
defp def_used_variables(raw_ast) do
vars = extract_vars(raw_ast)
quote do
def used_variables do
[unquote_splicing(vars)]
end
end
end
end
|
lib/formular/compiler.ex
| 0.810966
| 0.781414
|
compiler.ex
|
starcoder
|
defmodule ETag.Plug.Options do
@moduledoc """
Applies defaults and validates the given options for the plug. Allowed options are:
- `generator`
- `methods`
- `status_codes`
For details on their usage, values and defaults take a look at the `ETag.Plug` module.
"""
@spec sanitize!(Keyword.t()) :: Keyword.t()
def sanitize!(opts) do
unless Keyword.keyword?(opts) do
raise ArgumentError,
"Expected to receive a Keyword list as " <>
"options but instead received: #{inspect(opts)}"
end
opts
|> with_default!(:generator)
|> with_default!(:methods)
|> with_default!(:status_codes)
|> do_sanitize!()
end
defp with_default!(opts, key) do
Keyword.put_new_lazy(opts, key, fn -> config!(key) end)
end
defp config!(key), do: Application.fetch_env!(:etag_plug, key)
defp do_sanitize!(opts) do
opts
|> Keyword.update!(:generator, &validate_generator!/1)
|> Keyword.update!(:methods, &validate_and_uppercase_methods!/1)
|> Keyword.update!(:status_codes, &validate_status_codes!/1)
end
defp validate_generator!(generator) do
unless is_atom(generator) do
raise ArgumentError,
"Expected the generator to be a module but received: #{inspect(generator)}"
end
generator
end
defp validate_and_uppercase_methods!(methods) do
methods =
Enum.map(methods, fn
method when is_binary(method) ->
String.upcase(method)
method ->
raise ArgumentError,
"Expected the methods to be strings but received: #{inspect(method)}"
end)
with [] <- methods do
raise ArgumentError, "Received an empty list for `methods` which makes no sense!"
end
end
defp validate_status_codes!(status_codes) do
status_codes = Enum.map(status_codes, &Plug.Conn.Status.code/1)
with [] <- status_codes do
raise ArgumentError, "Received an empty list for `status_codes` which makes no sense!"
end
end
end
|
lib/etag/plug/options.ex
| 0.857619
| 0.460653
|
options.ex
|
starcoder
|
defmodule Ffaker.KoKr.Address do
@moduledoc"""
주소 데이터에 관한 함수가 들어있는 모듈
"""
use Ffaker
alias Ffaker.KoKr.Name
import Ffaker, only: [numerify: 1]
@building_dongs ~w(가 나 다 라 마 바 ## ###)
@building_suffixes ~w(빌라 아파트 연립 마을 타운 타워)
@street_suffixes ~w(로 #로 가 #가 거리 길)
@town_suffixes ~w(동 리 마을)
@metropolitan_cities ~w(서울특별시 부산광역시 대구광역시 인천광역시
광주광역시 대전광역시 울산광역시 세종특별자치시)
@provinces ~w(경기도 강원도 충청북도 충청남도 전라북도 전라남도 경상북도
경상남도 제주특별자치도)
@doc"""
도로명 주소의 우편번호를 반환
## Examples
iex> Ffaker.KoKr.Address.postal_code
"12345"
"""
@spec postal_code() :: String.t
def postal_code do
numerify("#####")
end
@doc"""
옛 주소의 우편번호를 반환
## Examples
iex> Ffaker.KoKr.Address.old_postal_code
"123-456"
"""
@spec old_postal_code() :: String.t
def old_postal_code do
numerify("###-###")
end
@doc"""
도로명 주소를 반환
## Examples
iex> Ffaker.KoKr.Address.road_address
"전라남도 수원시 영통구 병철5로"
"""
@spec road_address() :: String.t
def road_address do
prefix =
["#{metropolitan_city()} #{borough()}", "#{province()} #{city()}"]
|> Enum.random
suffix = Enum.random([street(), "#{street()} (#{city()})"])
"#{prefix} #{suffix}"
end
@doc"""
옛 주소를 반환
## Examples
iex> Ffaker.KoKr.Address.land_address
"경상북도 안산시 단원구 예준마을 821-65"
"""
@spec land_address() :: String.t
def land_address do
prefix =
["#{metropolitan_city()} #{borough()}", "#{province()} #{city()}"]
|> Enum.random
"#{prefix} #{town()} #{land_number()}"
end
@doc"""
옛 주소의 지번을 반환
## Examples
iex> Ffaker.KoKr.Address.land_number
"1192-2"
"""
@spec land_number() :: String.t
def land_number do
~w(### ###-# ###-## #### ####-#) |> Enum.random |> numerify
end
@doc"""
건물이름을 반환
## Examples
iex> Ffaker.KoKr.Address.building_name
"대영빌딩"
"""
@spec building_name() :: String.t
def building_name do
Name.first_name <> Enum.random(@building_suffixes)
end
@doc"""
건물이름, 호수를 반환
## Examples
iex> Ffaker.KoKr.Address.address_detail
"대영빌딩 123호"
"""
@spec address_detail() :: String.t
def address_detail do
dong = @building_dongs |> Enum.random |> numerify
suffix = numerify("###호")
name = building_name()
Enum.random([name, name <> dong <> suffix, name <> suffix])
end
@doc"""
도로명을 반환
## Examples
iex> Ffaker.KoKr.Address.street
"대영2로"
"""
@spec street() :: String.t
def street do
suffix = @street_suffixes |> Enum.random |> numerify
Name.first_name <> suffix
end
@doc"""
동을 반환
## Examples
iex> Ffaker.KoKr.Address.town
"대영마을"
"""
@spec town() :: String.t
def town do
Name.first_name <> Enum.random(@town_suffixes)
end
@doc"""
구를 반환
## Examples
iex> Ffaker.KoKr.Address.borough
"종로구"
"""
@spec borough() :: String.t
def borough do
Enum.random(~F(boroughs))
end
@doc"""
시를 반환
## Examples
iex> Ffaker.KoKr.Address.city
"화성시"
"""
@spec city() :: String.t
def city do
Enum.random(~F(cities))
end
@doc"""
도를 반환
## Examples
iex> Ffaker.KoKr.Address.province
"경기도"
"""
@spec province() :: String.t
def province do
Enum.random(@provinces)
end
@doc"""
광역시를 반환
## Examples
iex> Ffaker.KoKr.Address.metropolitan_city
"서울특별시"
"""
@spec metropolitan_city() :: String.t
def metropolitan_city do
Enum.random(@metropolitan_cities)
end
end
|
lib/ffaker/ko_kr/address.ex
| 0.588889
| 0.450843
|
address.ex
|
starcoder
|
defmodule StellarBase.XDR.ClaimOfferAtomV0 do
@moduledoc """
Representation of Stellar `ClaimOfferAtomV0` type.
ClaimOfferAtomV0 is a ClaimOfferAtom with the AccountID discriminant stripped
off, leaving a raw ed25519 public key to identify the source account. This is
used for backwards compatibility starting from the protocol 17/18 boundary.
If an "old-style" ClaimOfferAtom is parsed with this XDR definition, it will
be parsed as a "new-style" ClaimAtom containing a ClaimOfferAtomV0.
"""
alias StellarBase.XDR.{Asset, Int64, UInt256}
@behaviour XDR.Declaration
@struct_spec XDR.Struct.new(
seller_ed25519: UInt256,
offer_id: Int64,
asset_sold: Asset,
amount_sold: Int64,
asset_bought: Asset,
amount_bought: Int64
)
@type t :: %__MODULE__{
seller_ed25519: UInt256.t(),
offer_id: Int64.t(),
asset_sold: Asset.t(),
amount_sold: Int64.t(),
asset_bought: Asset.t(),
amount_bought: Int64.t()
}
defstruct [:seller_ed25519, :offer_id, :asset_sold, :amount_sold, :asset_bought, :amount_bought]
@spec new(
seller_ed25519 :: UInt256.t(),
offer_id :: Int64.t(),
asset_sold :: Asset.t(),
amount_sold :: Int64.t(),
asset_bought :: Asset.t(),
amount_bought :: Int64.t()
) :: t()
def new(
%UInt256{} = seller_ed25519,
%Int64{} = offer_id,
%Asset{} = asset_sold,
%Int64{} = amount_sold,
%Asset{} = asset_bought,
%Int64{} = amount_bought
),
do: %__MODULE__{
seller_ed25519: seller_ed25519,
offer_id: offer_id,
asset_sold: asset_sold,
amount_sold: amount_sold,
asset_bought: asset_bought,
amount_bought: amount_bought
}
@impl true
def encode_xdr(%__MODULE__{
seller_ed25519: seller_ed25519,
offer_id: offer_id,
asset_sold: asset_sold,
amount_sold: amount_sold,
asset_bought: asset_bought,
amount_bought: amount_bought
}) do
[
seller_ed25519: seller_ed25519,
offer_id: offer_id,
asset_sold: asset_sold,
amount_sold: amount_sold,
asset_bought: asset_bought,
amount_bought: amount_bought
]
|> XDR.Struct.new()
|> XDR.Struct.encode_xdr()
end
@impl true
def encode_xdr!(%__MODULE__{
seller_ed25519: seller_ed25519,
offer_id: offer_id,
asset_sold: asset_sold,
amount_sold: amount_sold,
asset_bought: asset_bought,
amount_bought: amount_bought
}) do
[
seller_ed25519: seller_ed25519,
offer_id: offer_id,
asset_sold: asset_sold,
amount_sold: amount_sold,
asset_bought: asset_bought,
amount_bought: amount_bought
]
|> XDR.Struct.new()
|> XDR.Struct.encode_xdr!()
end
@impl true
def decode_xdr(bytes, struct \\ @struct_spec)
def decode_xdr(bytes, struct) do
case XDR.Struct.decode_xdr(bytes, struct) do
{:ok,
{%XDR.Struct{
components: [
seller_ed25519: seller_ed25519,
offer_id: offer_id,
asset_sold: asset_sold,
amount_sold: amount_sold,
asset_bought: asset_bought,
amount_bought: amount_bought
]
}, rest}} ->
{:ok,
{new(seller_ed25519, offer_id, asset_sold, amount_sold, asset_bought, amount_bought),
rest}}
error ->
error
end
end
@impl true
def decode_xdr!(bytes, struct \\ @struct_spec)
def decode_xdr!(bytes, struct) do
{%XDR.Struct{
components: [
seller_ed25519: seller_ed25519,
offer_id: offer_id,
asset_sold: asset_sold,
amount_sold: amount_sold,
asset_bought: asset_bought,
amount_bought: amount_bought
]
}, rest} = XDR.Struct.decode_xdr!(bytes, struct)
{new(seller_ed25519, offer_id, asset_sold, amount_sold, asset_bought, amount_bought), rest}
end
end
|
lib/xdr/transactions/claim_offer_atom_v0.ex
| 0.857679
| 0.436142
|
claim_offer_atom_v0.ex
|
starcoder
|
defmodule Glock.Conn do
@moduledoc """
Defines the glock connection struct that serves as
the configuration state of an initialized glock process.
The struct tracks all configuration settings and arguments
passed into the connection when it is initialized and provides
common default values for all settings except for the host
and path of the remote websocket server.
Provides utility functions for creating and ensuring the proper
default values are set within the connection struct.
For the various connection options please see
https://ninenines.eu/docs/en/gun/2.0/manual/gun
https://www.erlang.org/doc/man/ssl.html#type-tls_client_option
By default it is set up with TLS and no verification of the host name
Minimal to pass in for common uses
For no encryption use
transport: :tcp
and it will do other sensible things
To verify the host name set
host_name_verify: 'example.com' | "example.com"
and it will do other sensible things
If you want something more custom send in a full spec properly nested and it will override the defaults
"""
@type t :: %__MODULE__{
client: pid,
connect_opts: %{
connect_timeout: non_neg_integer,
retry: non_neg_integer,
retry_timeout: non_neg_integer,
transport: :tcp | :tls,
tls_opts: %{
verify: :verify_none | :verify_peer,
cacerts: fun(),
depth: integer(),
server_name_indication: charlist() | nil,
reuse_sessions: boolean(),
verify_fun: tuple() | nil
},
# This can also be :http2, etc
protocols: [:http],
# Request http 1.1 from the server
# Typically %{version: :"HTTP/1.1"}
# Note: the second value is an atom
http_opts: map()
},
handler_init_args: term,
headers: [binary],
host: charlist,
monitor: reference,
path: charlist,
port: non_neg_integer,
stream: reference,
stream_state: term,
ws_opts: %{
compress: boolean,
closing_timeout: non_neg_integer,
keepalive: non_neg_integer
}
}
defstruct client: nil,
connect_opts: %{
connect_timeout: 60_000,
retry: 10,
retry_timeout: 300,
transport: :tls,
tls_opts: [
# If you want to use verify_peer instead you should fill in server_name_indication and verify_fun
verify: :verify_none,
# Alternatively something like
# cacertfile: CAStore.file_path(),
cacerts: :certifi.cacerts(),
depth: 99,
# Make sure this matches :check_hostname
# server_name_indication: 'example.com',
reuse_sessions: false
# verify_fun: {&:ssl_verify_hostname.verify_fun/3, [check_hostname: 'example.com']}
],
protocols: [:http],
http_opts: %{version: :"HTTP/1.1"}
},
handler_init_args: %{},
headers: [],
host: nil,
monitor: nil,
path: nil,
port: 443,
stream: nil,
stream_state: nil,
ws_opts: %{
compress: false,
closing_timeout: 15_000,
keepalive: 5_000
}
# See docs for better info
# https://ninenines.eu/docs/en/gun/2.0/manual/gun/
# https://www.erlang.org/doc/man/ssl.html#type-client_option
@allowed_gun_opts [
# non_neg_integer
:connect_timeout,
# gun_cookies:store()
:cookie_store,
# non_neg_integer
:domain_lookup_timeout,
# map
:http_opts,
# map
:http2_opts,
# :http or :http2 or etc see docs
:protocols,
# non_neg_integer
:retry,
# function
:retry_fun,
# pos_integer
:retry_timeout,
# boolean
:supervise,
# map
:tcp_opts,
# pos_integer
:tls_handshake_timeout,
# keyword https://www.erlang.org/doc/man/ssl.html#type-client_option
:tls_opts,
# boolean
:trace,
# :tcp | :tls
:transport
]
# If these are set it will set up decent choices
@doc """
Reduces over a keyword list of arguments for configuring the
glock process and adds them to an empty instance of the `Glock.Conn.t`
struct. Configs are merged with values passed by the user superseding
default values with the exception of the http protocol which is locked
to HTTP/1.1 for websocket compatibility.
"""
@spec new(keyword) :: Glock.Conn.t()
def new(opts) do
opts
|> Enum.reduce(%__MODULE__{}, &put_opts/2)
|> validate_required()
end
defp validate_required(%__MODULE__{host: host, path: path}) when host == nil or path == nil do
raise Glock.ConnError,
message:
"Must supply valid socket host and path. Binary strings are accepted for both. Received: #{inspect(host: host, path: path)}"
end
defp validate_required(conn), do: conn
defp put_opts({:host, host}, conn) when is_binary(host) do
Map.put(conn, :host, to_charlist(host))
end
# This can be a charlist or ip address
# https://www.erlang.org/doc/man/inet.html#type-ip_address
defp put_opts({:host, host}, conn) do
Map.put(conn, :host, host)
end
# path should be binary
defp put_opts({:path, path}, conn) when is_list(path) do
Map.put(conn, :path, to_string(path))
end
defp put_opts({:path, path}, conn) when is_binary(path) do
Map.put(conn, :path, path)
end
defp put_opts({:connect_opts, value}, conn) do
merged_opts =
conn.connect_opts
|> Map.merge(value, fn _key, _default, override -> override end)
%{conn | connect_opts: merged_opts}
end
# If they want no security strip out tls_opts
defp put_opts({:transport, :tcp}, %{connect_opts: connect_opts} = conn) do
%{
conn
| connect_opts:
Map.delete(connect_opts, :tls_opts) |> Map.update!(:transport, fn _ -> :tcp end)
}
end
# If they want to verify the host put that into default tls_opts
defp put_opts({:host_name_verify, host_name}, %{connect_opts: connect_opts} = conn) do
%{
conn
| connect_opts: update_in(connect_opts, [:tls_opts], fn _ -> put_verify_peer(host_name) end)
}
end
defp put_opts({opt, value}, %{connect_opts: connect_opts} = conn)
when opt in @allowed_gun_opts do
%{conn | connect_opts: update_in(connect_opts, [opt], fn _ -> value end)}
end
# ws_opts gets put in no the upgrade so it doesn't go into connect_opts
defp put_opts({:ws_opts, value}, %{ws_opts: ws_opts} = conn) do
%{
conn
| ws_opts: Map.merge(ws_opts, value, fn _key, _default, override -> override end)
}
end
defp put_opts({key, value}, conn), do: Map.put(conn, key, value)
# If they send in binary change to charlist and forward
defp put_verify_peer(host_name) when is_binary(host_name) do
host_name |> to_charlist() |> put_verify_peer
end
defp put_verify_peer(host_name) do
[
verify: :verify_peer,
cacerts: :certifi.cacerts(),
depth: 99,
server_name_indication: host_name,
reuse_sessions: false,
verify_fun: {&:ssl_verify_hostname.verify_fun/3, [check_hostname: host_name]}
]
end
end
|
lib/glock/conn.ex
| 0.591723
| 0.421909
|
conn.ex
|
starcoder
|
defmodule Snapex7.Client do
use GenServer
require Logger
@c_timeout 5000
@block_types [
OB: 0x38,
DB: 0x41,
SDB: 0x42,
FC: 0x43,
SFC: 0x44,
FB: 0x45,
SFB: 0x46
]
@connection_types [
PG: 0x01,
OP: 0x02,
S7_basic: 0x03
]
@area_types [
PE: 0x81,
PA: 0x82,
MK: 0x83,
DB: 0x84,
CT: 0x1C,
TM: 0x1D
]
@word_types [
bit: 0x01,
byte: 0x02,
word: 0x04,
d_word: 0x06,
real: 0x08,
counter: 0x1C,
timer: 0x1D
]
defmodule State do
@moduledoc false
# port: C port process
# controlling_process: where events get sent
# queued_messages: queued messages when in passive mode
# ip: the address of the server
# rack: the rack of the server.
# slot: the slot of the server.
# is_active: active or passive mode
defstruct port: nil,
controlling_process: nil,
queued_messages: [],
ip: nil,
rack: nil,
slot: nil,
state: nil,
is_active: false
end
@doc """
Start up a Snap7 Client GenServer.
"""
@spec start_link([term]) :: {:ok, pid} | {:error, term} | {:error, :einval}
def start_link(opts \\ []) do
GenServer.start_link(__MODULE__, [], opts)
end
@doc """
Stop the Snap7 Client GenServer.
"""
@spec stop(GenServer.server()) :: :ok
def stop(pid) do
GenServer.stop(pid)
end
# Administrative functions.
@type connect_opt ::
{:ip, bitstring}
| {:rack, 0..7}
| {:slot, 1..31}
| {:local_tsap, integer}
| {:remote_tsap, integer}
@doc """
Connect to a S7 server.
The following options are available:
* `:active` - (`true` or `false`) specifies whether data is received as
messages or by calling "Data I/O functions".
* `:ip` - (string) PLC/Equipment IPV4 Address (e.g., "192.168.0.1")
* `:rack` - (int) PLC Rack number (0..7).
* `:slot` - (int) PLC Slot number (1..31).
For more info see pg. 96 form Snap7 docs.
"""
@spec connect_to(GenServer.server(), [connect_opt]) :: :ok | {:error, map()} | {:error, :einval}
def connect_to(pid, opts \\ []) do
GenServer.call(pid, {:connect_to, opts})
end
@doc """
Sets the connection resource type, i.e the way in which the Clients connects to a PLC.
"""
@spec set_connection_type(GenServer.server(), atom()) ::
:ok | {:error, map()} | {:error, :einval}
def set_connection_type(pid, connection_type) do
GenServer.call(pid, {:set_connection_type, connection_type})
end
@doc """
Sets internally (IP, LocalTSAP, RemoteTSAP) Coordinates
The following options are available:
* `:ip` - (string) PLC/Equipment IPV4 Address (e.g., "192.168.0.1")
* `:local_tsap` - (int) Local TSAP (PC TSAP) // 0.
* `:remote_tsap` - (int) Remote TSAP (PLC TSAP) // 0.
"""
@spec set_connection_params(GenServer.server(), [connect_opt]) ::
:ok | {:error, map()} | {:error, :einval}
def set_connection_params(pid, opts \\ []) do
GenServer.call(pid, {:set_connection_params, opts})
end
@doc """
Connects the client to the PLC with the parameters specified in the previous call of
`connect_to/2` or `set_connection_params/2`.
"""
@spec connect(GenServer.server()) :: :ok | {:error, map()} | {:error, :einval}
def connect(pid) do
GenServer.call(pid, :connect)
end
@doc """
Disconnects “gracefully” the Client from the PLC.
"""
@spec disconnect(GenServer.server()) :: :ok | {:error, map()} | {:error, :einval}
def disconnect(pid) do
GenServer.call(pid, :disconnect)
end
@doc """
Reads an internal Client object parameter.
For more info see pg. 89 form Snap7 docs.
"""
@spec get_params(GenServer.server(), integer()) :: :ok | {:error, map()} | {:error, :einval}
def get_params(pid, param_number) do
GenServer.call(pid, {:get_params, param_number})
end
@doc """
Sets an internal Client object parameter.
"""
@spec set_params(GenServer.server(), integer(), integer()) ::
:ok | {:error, map()} | {:error, :einval}
def set_params(pid, param_number, value) do
GenServer.call(pid, {:set_params, param_number, value})
end
@type data_io_opt ::
{:area, atom}
| {:db_number, integer}
| {:start, integer}
| {:amount, integer}
| {:word_len, atom}
| {:data, bitstring}
# Data I/O functions
@doc """
Reads a data area from a PLC.
The following options are available:
* `:area` - (atom) Area Identifier (see @area_types).
* `:db_number` - (int) DB number, if `area: :DB` otherwise is ignored.
* `:start` - (int) An offset to start.
* `:amount` - (int) Amount of words to read/write.
* `:word_len` - (atom) Word size (see @word_types).
For more info see pg. 104 form Snap7 docs.
"""
@spec read_area(GenServer.server(), [data_io_opt]) ::
{:ok, bitstring} | {:error, map()} | {:error, :einval}
def read_area(pid, opts) do
GenServer.call(pid, {:read_area, opts})
end
@doc """
Write a data area from a PLC.
The following options are available:
* `:area` - (atom) Area Identifier (see @area_types).
* `:db_number` - (int) DB number, if `area: :DB` otherwise is ignored.
* `:start` - (int) An offset to start.
* `:amount` - (int) Amount of words to read/write.
* `:word_len` - (atom) Word size (see @word_types).
* `:data` - (atom) buffer to write.
For more info see pg. 104 form Snap7 docs.
"""
@spec write_area(GenServer.server(), [data_io_opt]) :: :ok | {:error, map()} | {:error, :einval}
def write_area(pid, opts) do
GenServer.call(pid, {:write_area, opts})
end
@doc """
This is a lean function of read_area/2 to read PLC DB.
It simply internally calls read_area/2 with
* `area: :DB`
* `word_len: :byte`
The following options are available:
* `:db_number` - (int) DB number (0..0xFFFF).
* `:start` - (int) An offset to start.
* `:amount` - (int) Amount of words (bytes) to read/write.
For more info see pg. 104 form Snap7 docs.
"""
@spec db_read(GenServer.server(), [data_io_opt]) ::
{:ok, bitstring} | {:error, map()} | {:error, :einval}
def db_read(pid, opts) do
GenServer.call(pid, {:db_read, opts})
end
@doc """
This is a lean function of write_area/2 to write PLC DB.
It simply internally calls read_area/2 with
* `area: :DB`
* `word_len: :byte`
The following options are available:
* `:db_number` - (int) DB number (0..0xFFFF).
* `:start` - (int) An offset to start.
* `:amount` - (int) Amount of words (bytes) to read/write.
* `:data` - (bitstring) buffer to write.
For more info see pg. 104 form Snap7 docs.
"""
@spec db_write(GenServer.server(), [data_io_opt]) :: :ok | {:error, map()} | {:error, :einval}
def db_write(pid, opts) do
GenServer.call(pid, {:db_write, opts})
end
@doc """
This is a lean function of read_area/2 to read PLC process outputs.
It simply internally calls read_area/2 with
* `area: :PA`
* `word_len: :byte`
The following options are available:
* `:start` - (int) An offset to start.
* `:amount` - (int) Amount of words (bytes) to read/write .
For more info see pg. 104 form Snap7 docs.
"""
@spec ab_read(GenServer.server(), [data_io_opt]) ::
{:ok, bitstring} | {:error, map()} | {:error, :einval}
def ab_read(pid, opts) do
GenServer.call(pid, {:ab_read, opts})
end
@doc """
This is a lean function of write_area/2 to write PLC process outputs.
It simply internally calls read_area/2 with
* `area: :PA`
* `word_len: :byte`
The following options are available:
* `:start` - (int) An offset to start.
* `:amount` - (int) Amount of words (bytes) to read/write.
* `:data` - (bitstring) buffer to write.
For more info see pg. 104 form Snap7 docs.
"""
@spec ab_write(GenServer.server(), [data_io_opt]) :: :ok | {:error, map()} | {:error, :einval}
def ab_write(pid, opts) do
GenServer.call(pid, {:ab_write, opts})
end
@doc """
This is a lean function of read_area/2 to read PLC process inputs.
It simply internally calls read_area/2 with
* `area: :PE`
* `word_len: :byte`
The following options are available:
* `:start` - (int) An offset to start.
* `:amount` - (int) Amount of words (bytes) to read/write .
For more info see pg. 104 form Snap7 docs.
"""
@spec eb_read(GenServer.server(), [data_io_opt]) ::
{:ok, bitstring} | {:error, map()} | {:error, :einval}
def eb_read(pid, opts) do
GenServer.call(pid, {:eb_read, opts})
end
@doc """
This is a lean function of write_area/2 to write PLC process inputs.
It simply internally calls read_area/2 with
* `area: :PE`
* `word_len: :byte`
The following options are available:
* `:start` - (int) An offset to start.
* `:amount` - (int) Amount of words (bytes) to read/write.
* `:data` - (bitstring) buffer to write.
For more info see pg. 104 form Snap7 docs.
"""
@spec eb_write(GenServer.server(), [data_io_opt]) :: :ok | {:error, map()} | {:error, :einval}
def eb_write(pid, opts) do
GenServer.call(pid, {:eb_write, opts})
end
@doc """
This is a lean function of read_area/2 to read PLC merkers.
It simply internally calls read_area/2 with
* `area: :MK`
* `word_len: :byte`
The following options are available:
* `:start` - (int) An offset to start.
* `:amount` - (int) Amount of words (bytes) to read/write .
For more info see pg. 104 form Snap7 docs.
"""
@spec mb_read(GenServer.server(), [data_io_opt]) ::
{:ok, bitstring} | {:error, map()} | {:error, :einval}
def mb_read(pid, opts) do
GenServer.call(pid, {:mb_read, opts})
end
@doc """
This is a lean function of write_area/2 to write PLC merkers.
It simply internally calls read_area/2 with
* `area: :MK`
* `word_len: :byte`
The following options are available:
* `:start` - (int) An offset to start.
* `:amount` - (int) Amount of words (bytes) to read/write.
* `:data` - (bitstring) buffer to write.
For more info see pg. 104 form Snap7 docs.
"""
@spec mb_write(GenServer.server(), [data_io_opt]) :: :ok | {:error, map()} | {:error, :einval}
def mb_write(pid, opts) do
GenServer.call(pid, {:mb_write, opts})
end
@doc """
This is a lean function of read_area/2 to read PLC Timers.
It simply internally calls read_area/2 with
* `area: :TM`
* `word_len: :timer`
The following options are available:
* `:start` - (int) An offset to start.
* `:amount` - (int) Amount of words (bytes) to read/write .
For more info see pg. 104 form Snap7 docs.
"""
@spec tm_read(GenServer.server(), [data_io_opt]) ::
{:ok, bitstring} | {:error, map()} | {:error, :einval}
def tm_read(pid, opts) do
GenServer.call(pid, {:tm_read, opts})
end
@doc """
This is a lean function of write_area/2 to write PLC Timers.
It simply internally calls read_area/2 with
* `area: :TM`
* `word_len: :timer`
The following options are available:
* `:start` - (int) An offset to start.
* `:amount` - (int) Amount of words (bytes) to read/write.
* `:data` - (bitstring) buffer to write.
For more info see pg. 104 form Snap7 docs.
"""
@spec tm_write(GenServer.server(), [data_io_opt]) :: :ok | {:error, map()} | {:error, :einval}
def tm_write(pid, opts) do
GenServer.call(pid, {:tm_write, opts})
end
@doc """
This is a lean function of read_area/2 to read PLC Counters.
It simply internally calls read_area/2 with
* `area: :CT`
* `word_len: :timer`
The following options are available:
* `:start` - (int) An offset to start.
* `:amount` - (int) Amount of words (bytes) to read/write .
For more info see pg. 104 form Snap7 docs.
"""
@spec ct_read(GenServer.server(), [data_io_opt]) ::
{:ok, bitstring} | {:error, map()} | {:error, :einval}
def ct_read(pid, opts) do
GenServer.call(pid, {:ct_read, opts})
end
@doc """
This is a lean function of write_area/2 to write PLC Counters.
It simply internally calls read_area/2 with
* `area: :CT`
* `word_len: :timer`
The following options are available:
* `:start` - (int) An offset to start.
* `:amount` - (int) Amount of words (bytes) to read/write.
* `:data` - (bitstring) buffer to write.
For more info see pg. 104 form Snap7 docs.
"""
@spec ct_write(GenServer.server(), [data_io_opt]) :: :ok | {:error, map()} | {:error, :einval}
def ct_write(pid, opts) do
GenServer.call(pid, {:ct_write, opts})
end
@doc """
This function allows to read different kind of variables from a PLC in a single call.
With it can read DB, inputs, outputs, Merkers, Timers and Counters.
The following options are available:
* `:data` - (list of maps) a list of requests (maps with @data_io_opt options as keys) to read from PLC.
For more info see pg. 119 form Snap7 docs.
"""
@spec read_multi_vars(GenServer.server(), list) ::
{:ok, bitstring} | {:error, map()} | {:error, :einval}
def read_multi_vars(pid, opt) do
GenServer.call(pid, {:read_multi_vars, opt})
end
@doc """
This function allows to write different kind of variables from a PLC in a single call.
With it can read DB, inputs, outputs, Merkers, Timers and Counters.
The following options are available:
* `:data` - (list of maps) a list of requests (maps with @data_io_opt options as keys) to read from PLC.
For more info see pg. 119 form Snap7 docs.
"""
@spec write_multi_vars(GenServer.server(), [data_io_opt]) ::
:ok | {:error, map()} | {:error, :einval}
def write_multi_vars(pid, opts) do
GenServer.call(pid, {:write_multi_vars, opts})
end
# Directory functions
@doc """
This function returns the AG blocks amount divided by type.
"""
@spec list_blocks(GenServer.server()) :: {:ok, list} | {:error, map()} | {:error, :einval}
def list_blocks(pid) do
GenServer.call(pid, :list_blocks)
end
@doc """
This function returns the AG list of a specified block type.
"""
@spec list_blocks_of_type(GenServer.server(), atom(), integer()) ::
{:ok, list} | {:error, map} | {:error, :einval}
def list_blocks_of_type(pid, block_type, n_items) do
GenServer.call(pid, {:list_blocks_of_type, block_type, n_items})
end
@doc """
Return detail information about an AG given block.
This function is very useful if you nead to read or write data in a DB
which you do not know the size in advance (see pg 127).
"""
@spec get_ag_block_info(GenServer.server(), atom(), integer()) ::
{:ok, list} | {:error, map} | {:error, :einval}
def get_ag_block_info(pid, block_type, block_num) do
GenServer.call(pid, {:get_ag_block_info, block_type, block_num})
end
@doc """
Return detailed information about a block present in a user buffer.
This function is usually used in conjunction with full_upload/2.
An uploaded a block saved to disk, could be loaded in a user buffer
and checked with this function.
"""
@spec get_pg_block_info(GenServer.server(), bitstring()) ::
{:ok, list} | {:error, map} | {:error, :einval}
def get_pg_block_info(pid, buffer) do
GenServer.call(pid, {:get_pg_block_info, buffer})
end
# Block Oriented functions
@doc """
Uploads a block from AG. (gets a block from PLC)
The whole block (including header and footer) is copied into the user buffer (as bytes).
"""
@spec full_upload(GenServer.server(), atom(), integer(), integer()) ::
{:ok, bitstring} | {:error, map} | {:error, :einval}
def full_upload(pid, block_type, block_num, bytes2read) do
GenServer.call(pid, {:full_upload, block_type, block_num, bytes2read})
end
@doc """
Uploads a block from AG. (gets a block from PLC)
Only the block body (but header and footer) is copied into the user buffer (as bytes).
"""
@spec upload(GenServer.server(), atom(), integer(), integer()) ::
{:ok, bitstring} | {:error, map} | {:error, :einval}
def upload(pid, block_type, block_num, bytes2read) do
GenServer.call(pid, {:upload, block_type, block_num, bytes2read})
end
@doc """
Downloads a block from AG. (gets a block from PLC)
The whole block (including header and footer) must be available into the user buffer.
"""
@spec download(GenServer.server(), integer(), bitstring()) ::
:ok | {:error, map} | {:error, :einval}
def download(pid, block_num, buffer) do
GenServer.call(pid, {:download, block_num, buffer})
end
@doc """
Deletes a block from AG.
(There is an undo function available).
"""
@spec delete(GenServer.server(), atom(), integer()) :: :ok | {:error, map} | {:error, :einval}
def delete(pid, block_type, block_num) do
GenServer.call(pid, {:delete, block_type, block_num})
end
@doc """
Uploads a DB from AG.
This function is equivalent to upload/4 with block_type = :DB but it uses a
different approach so it's not subject to the security level set.
Only data is uploaded.
"""
@spec db_get(GenServer.server(), integer(), integer()) ::
{:ok, list} | {:error, map} | {:error, :einval}
def db_get(pid, db_number, size \\ 65536) do
GenServer.call(pid, {:db_get, db_number, size})
end
@doc """
Fills a DB in AG qirh a given byte without the need of specifying its size.
"""
@spec db_fill(GenServer.server(), integer(), integer()) ::
{:ok, list} | {:error, map} | {:error, :einval}
def db_fill(pid, db_number, fill_char) do
GenServer.call(pid, {:db_fill, db_number, fill_char})
end
# Date/Time functions
@doc """
Reads PLC date and time, if successful, returns `{:ok, date, time}`
"""
@spec get_plc_date_time(GenServer.server()) ::
{:ok, term, term} | {:error, map} | {:error, :einval}
def get_plc_date_time(pid) do
GenServer.call(pid, :get_plc_date_time)
end
@type plc_time_opt ::
{:sec, 0..59}
| {:min, 0..7}
| {:hour, 0..23}
| {:mday, 1..31}
| {:mon, 1..12}
| {:year, integer}
| {:wday, 0..6}
| {:yday, 0..365}
| {:isdst, integer}
@doc """
Sets PLC date and time.
The following options are available:
* `:sec` - (int) seconds afer the minute (0..59).
* `:min` - (int) minutes after the hour (0..59).
* `:hour` - (int) hour since midnight (0..23).
* `:mday` - (int) day of the month (1..31).
* `:mon` - (int) month since January (1..12).
* `:year` - (int) year (1900...).
* `:wday` - (int) days since Sunday (0..6).
* `:yday` - (int) days since January 1 (0..365).
* `:isdst` - (int) Daylight Saving Time flag.
The default is of all functions are the minimum value.
"""
@spec set_plc_date_time(GenServer.server(), [plc_time_opt]) ::
:ok | {:error, map} | {:error, :einval}
def set_plc_date_time(pid, opts \\ []) do
GenServer.call(pid, {:set_plc_date_time, opts})
end
@doc """
Sets the PLC date and time in accord to the PC system Date/Time.
"""
@spec set_plc_system_date_time(GenServer.server()) :: :ok | {:error, map} | {:error, :einval}
def set_plc_system_date_time(pid) do
GenServer.call(pid, :set_plc_system_date_time)
end
# System info functions
@doc """
Reads a partial list of given ID and INDEX
See System Software for S7-300/400 System and Standard Functions
Volume 1 and Volume 2 for ID and INDEX info (chapter 13.3), look for
TIA Portal Information Systems for DR data type.
"""
@spec read_szl(GenServer.server(), integer, integer) ::
{:ok, bitstring} | {:error, map} | {:error, :einval}
def read_szl(pid, id, index) do
GenServer.call(pid, {:read_szl, id, index})
end
@doc """
Reads the directory of the partial list
"""
@spec read_szl_list(GenServer.server()) :: {:ok, list} | {:error, map} | {:error, :einval}
def read_szl_list(pid) do
GenServer.call(pid, :read_szl_list)
end
@doc """
Gets CPU order code and version info.
"""
@spec get_order_code(GenServer.server()) :: {:ok, list} | {:error, map} | {:error, :einval}
def get_order_code(pid) do
GenServer.call(pid, :get_order_code)
end
@doc """
Gets CPU module name, serial number and other info.
"""
@spec get_cpu_info(GenServer.server()) :: {:ok, list} | {:error, map} | {:error, :einval}
def get_cpu_info(pid) do
GenServer.call(pid, :get_cpu_info)
end
@doc """
Gets CP (communication processor) info.
"""
@spec get_cp_info(GenServer.server()) :: {:ok, list} | {:error, map} | {:error, :einval}
def get_cp_info(pid) do
GenServer.call(pid, :get_cp_info)
end
# PLC control functions
@doc """
Puts the CPU in RUN mode performing an HOT START.
"""
@spec plc_hot_start(GenServer.server()) :: :ok | {:error, map} | {:error, :einval}
def plc_hot_start(pid) do
GenServer.call(pid, :plc_hot_start)
end
@doc """
Puts the CPU in RUN mode performing an COLD START.
"""
@spec plc_cold_start(GenServer.server()) :: :ok | {:error, map} | {:error, :einval}
def plc_cold_start(pid) do
GenServer.call(pid, :plc_cold_start)
end
@doc """
Puts the CPU in STOP mode.
"""
@spec plc_stop(GenServer.server()) :: :ok | {:error, map} | {:error, :einval}
def plc_stop(pid) do
GenServer.call(pid, :plc_stop)
end
@doc """
Performs the copy ram to rom action. (CPU must be in STOP mode)
"""
@spec copy_ram_to_rom(GenServer.server(), integer) :: :ok | {:error, map} | {:error, :einval}
def copy_ram_to_rom(pid, timeout \\ 1000) do
GenServer.call(pid, {:copy_ram_to_rom, timeout})
end
@doc """
Performas the Memory compress action (not all CPU's supports this function and the CPU must be in STOP mode).
"""
@spec compress(GenServer.server(), integer) :: :ok | {:error, map} | {:error, :einval}
def compress(pid, timeout \\ 1000) do
GenServer.call(pid, {:compress, timeout})
end
@doc """
Returns the CPU status (running/stoppped).
"""
@spec get_plc_status(GenServer.server()) :: :ok | {:error, map} | {:error, :einval}
def get_plc_status(pid) do
GenServer.call(pid, :get_plc_status)
end
# Security functions
@doc """
Send the password (an 8 chars string) to the PLC to meet its security level.
"""
@spec set_session_password(GenServer.server(), bitstring()) ::
:ok | {:error, map} | {:error, :einval}
def set_session_password(pid, password) do
GenServer.call(pid, {:set_session_password, password})
end
@doc """
Clears the password set for the current session (logout).
"""
@spec clear_session_password(GenServer.server()) :: :ok | {:error, map} | {:error, :einval}
def clear_session_password(pid) do
GenServer.call(pid, :clear_session_password)
end
@doc """
Gets the CPU protection level info.
"""
@spec get_protection(GenServer.server()) :: :ok | {:error, map} | {:error, :einval}
def get_protection(pid) do
GenServer.call(pid, :get_protection)
end
# Low level functions
@doc """
Exchanges a given S7 PDU (protocol data unit) with the CPU.
"""
@spec iso_exchange_buffer(GenServer.server(), bitstring) ::
:ok | {:error, map} | {:error, :einval}
def iso_exchange_buffer(pid, buffer) do
GenServer.call(pid, {:iso_exchange_buffer, buffer})
end
# Miscellaneous functions
@doc """
Returns the last job execution time in miliseconds.
"""
@spec get_exec_time(GenServer.server()) :: {:ok, integer} | {:error, map} | {:error, :einval}
def get_exec_time(pid) do
GenServer.call(pid, :get_exec_time)
end
@doc """
Returns the last job result.
"""
@spec get_last_error(GenServer.server()) :: {:ok, map} | {:error, map} | {:error, :einval}
def get_last_error(pid) do
GenServer.call(pid, :get_last_error)
end
@doc """
Returns info about the PDU length.
"""
@spec get_pdu_length(GenServer.server()) :: {:ok, list} | {:error, map} | {:error, :einval}
def get_pdu_length(pid) do
GenServer.call(pid, :get_pdu_length)
end
@doc """
Returns the connection status.
"""
@spec get_connected(GenServer.server()) :: {:ok, boolean} | {:error, map} | {:error, :einval}
def get_connected(pid) do
GenServer.call(pid, :get_connected)
end
@doc """
This function can execute any desired function as a request.
The `request` can be a tuple (the first element is an atom according to the desired function to be executed,
and the following elements are the args of the desired function) or an atom (when the desired function
has no arguments), for example:
request = {:connect_to , [ip: "192.168.1.100", rack: 0, slot: 0]},
request = :get_connected
"""
@spec command(GenServer.server(), term) :: :ok | {:ok, term} | {:error, map} | {:error, :einval}
def command(pid, request) do
GenServer.call(pid, request)
end
@spec init([]) :: {:ok, Snapex7.Client.State.t()}
def init([]) do
snap7_dir = :code.priv_dir(:snapex7) |> List.to_string()
System.put_env("LD_LIBRARY_PATH", snap7_dir)
System.put_env("DYLD_LIBRARY_PATH", snap7_dir)
executable = :code.priv_dir(:snapex7) ++ '/s7_client.o'
port =
Port.open({:spawn_executable, executable}, [
{:args, []},
{:packet, 2},
:use_stdio,
:binary,
:exit_status
])
state = %State{port: port}
{:ok, state}
end
# Administrative funtions
def handle_call({:connect_to, opts}, {from_pid, _}, state) do
ip = Keyword.fetch!(opts, :ip)
rack = Keyword.get(opts, :rack, 0)
slot = Keyword.get(opts, :slot, 0)
active = Keyword.get(opts, :active, false)
response = call_port(state, :connect_to, {ip, rack, slot})
new_state =
case response do
:ok ->
%State{
state
| state: :connected,
ip: ip,
rack: rack,
slot: slot,
is_active: active,
controlling_process: from_pid
}
{:error, _x} ->
%State{state | state: :idle}
end
{:reply, response, new_state}
end
def handle_call({:set_connection_type, connection_type}, _from, state) do
connection_type = Keyword.fetch!(@connection_types, connection_type)
response = call_port(state, :set_connection_type, connection_type)
{:reply, response, state}
end
def handle_call({:set_connection_params, opts}, _from, state) do
ip = Keyword.fetch!(opts, :ip)
local_tsap = Keyword.get(opts, :local_tsap, 0)
remote_tsap = Keyword.get(opts, :remote_tsap, 0)
response = call_port(state, :set_connection_params, {ip, local_tsap, remote_tsap})
{:reply, response, state}
end
def handle_call(:connect, _from, state) do
response = call_port(state, :connect, nil)
new_state =
case response do
:ok ->
%{state | state: :connected}
{:error, _x} ->
%State{state | state: :idle}
end
{:reply, response, new_state}
end
def handle_call(:disconnect, {_from, _}, state) do
response = call_port(state, :disconnect, nil)
new_state = %State{state | state: :idle}
{:reply, response, new_state}
end
def handle_call({:get_params, param_number}, {_from, _}, state) do
response = call_port(state, :get_params, param_number)
{:reply, response, state}
end
def handle_call({:set_params, param_number, value}, {_from, _}, state) do
response = call_port(state, :set_params, {param_number, value})
{:reply, response, state}
end
# Data I/O functions
def handle_call({:read_area, opts}, _from, state) do
area_key = Keyword.fetch!(opts, :area)
word_len_key = Keyword.get(opts, :word_len, :byte)
db_number = Keyword.get(opts, :db_number, 0)
start = Keyword.get(opts, :start, 0)
amount = Keyword.get(opts, :amount, 0)
area_type = Keyword.fetch!(@area_types, area_key)
word_type = Keyword.fetch!(@word_types, word_len_key)
response = call_port(state, :read_area, {area_type, db_number, start, amount, word_type})
{:reply, response, state}
end
def handle_call({:write_area, opts}, _from, state) do
area_key = Keyword.fetch!(opts, :area)
word_len_key = Keyword.get(opts, :word_len, :byte)
db_number = Keyword.get(opts, :db_number, 0)
start = Keyword.get(opts, :start, 0)
data = Keyword.fetch!(opts, :data)
amount = Keyword.get(opts, :amount, byte_size(data))
area_type = Keyword.fetch!(@area_types, area_key)
word_type = Keyword.fetch!(@word_types, word_len_key)
response =
call_port(state, :write_area, {area_type, db_number, start, amount, word_type, data})
{:reply, response, state}
end
def handle_call({:db_read, opts}, _from, state) do
db_number = Keyword.get(opts, :db_number, 0)
start = Keyword.get(opts, :start, 0)
amount = Keyword.get(opts, :amount, 0)
response = call_port(state, :db_read, {db_number, start, amount})
{:reply, response, state}
end
def handle_call({:db_write, opts}, _from, state) do
db_number = Keyword.get(opts, :db_number, 0)
start = Keyword.get(opts, :start, 0)
data = Keyword.fetch!(opts, :data)
amount = Keyword.get(opts, :amount, byte_size(data))
response = call_port(state, :db_write, {db_number, start, amount, data})
{:reply, response, state}
end
def handle_call({:ab_read, opts}, _from, state) do
start = Keyword.get(opts, :start, 0)
amount = Keyword.get(opts, :amount, 0)
response = call_port(state, :ab_read, {start, amount})
{:reply, response, state}
end
def handle_call({:ab_write, opts}, _from, state) do
start = Keyword.get(opts, :start, 0)
data = Keyword.fetch!(opts, :data)
amount = Keyword.get(opts, :amount, byte_size(data))
response = call_port(state, :ab_write, {start, amount, data})
{:reply, response, state}
end
def handle_call({:eb_read, opts}, _from, state) do
start = Keyword.get(opts, :start, 0)
amount = Keyword.get(opts, :amount, 0)
response = call_port(state, :eb_read, {start, amount})
{:reply, response, state}
end
def handle_call({:eb_write, opts}, _from, state) do
start = Keyword.get(opts, :start, 0)
data = Keyword.fetch!(opts, :data)
amount = Keyword.get(opts, :amount, byte_size(data))
response = call_port(state, :eb_write, {start, amount, data})
{:reply, response, state}
end
def handle_call({:mb_read, opts}, _from, state) do
start = Keyword.get(opts, :start, 0)
amount = Keyword.get(opts, :amount, 0)
response = call_port(state, :mb_read, {start, amount})
{:reply, response, state}
end
def handle_call({:mb_write, opts}, _from, state) do
start = Keyword.get(opts, :start, 0)
data = Keyword.fetch!(opts, :data)
amount = Keyword.get(opts, :amount, byte_size(data))
response = call_port(state, :mb_write, {start, amount, data})
{:reply, response, state}
end
def handle_call({:tm_read, opts}, _from, state) do
start = Keyword.get(opts, :start, 0)
amount = Keyword.get(opts, :amount, 0)
response = call_port(state, :tm_read, {start, amount})
{:reply, response, state}
end
def handle_call({:tm_write, opts}, _from, state) do
start = Keyword.get(opts, :start, 0)
data = Keyword.fetch!(opts, :data)
amount = Keyword.get(opts, :amount, byte_size(data))
response = call_port(state, :tm_write, {start, amount, data})
{:reply, response, state}
end
def handle_call({:ct_read, opts}, _from, state) do
start = Keyword.get(opts, :start, 0)
amount = Keyword.get(opts, :amount, 0)
response = call_port(state, :ct_read, {start, amount})
{:reply, response, state}
end
def handle_call({:ct_write, opts}, _from, state) do
start = Keyword.get(opts, :start, 0)
data = Keyword.fetch!(opts, :data)
amount = Keyword.get(opts, :amount, byte_size(data))
response = call_port(state, :ct_write, {start, amount, data})
{:reply, response, state}
end
def handle_call({:read_multi_vars, opts}, _from, state) do
data = Keyword.fetch!(opts, :data) |> Enum.map(&key2value/1)
size = length(data)
response = call_port(state, :read_multi_vars, {size, data})
{:reply, response, state}
end
def handle_call({:write_multi_vars, opts}, _from, state) do
data = Keyword.fetch!(opts, :data) |> Enum.map(&key2value/1)
size = length(data)
response = call_port(state, :write_multi_vars, {size, data})
{:reply, response, state}
end
# Directory functions
def handle_call(:list_blocks, _from, state) do
response = call_port(state, :list_blocks, nil)
{:reply, response, state}
end
def handle_call({:list_blocks_of_type, block_type, n_items}, _from, state) do
block_value = Keyword.fetch!(@block_types, block_type)
response = call_port(state, :list_blocks_of_type, {block_value, n_items})
{:reply, response, state}
end
def handle_call({:get_ag_block_info, block_type, block_num}, _from, state) do
block_value = Keyword.fetch!(@block_types, block_type)
response = call_port(state, :get_ag_block_info, {block_value, block_num})
{:reply, response, state}
end
def handle_call({:get_pg_block_info, buffer}, _from, state) do
b_size = byte_size(buffer)
response = call_port(state, :get_pg_block_info, {b_size, buffer})
{:reply, response, state}
end
# Block Oriented functions
def handle_call({:full_upload, block_type, block_num, bytes2read}, _from, state) do
block_value = Keyword.fetch!(@block_types, block_type)
response = call_port(state, :full_upload, {block_value, block_num, bytes2read})
{:reply, response, state}
end
def handle_call({:upload, block_type, block_num, bytes2read}, _from, state) do
block_value = Keyword.fetch!(@block_types, block_type)
response = call_port(state, :upload, {block_value, block_num, bytes2read})
{:reply, response, state}
end
def handle_call({:download, block_num, buffer}, _from, state) do
b_size = byte_size(buffer)
response = call_port(state, :download, {block_num, b_size, buffer})
{:reply, response, state}
end
def handle_call({:delete, block_type, block_num}, _from, state) do
block_value = Keyword.fetch!(@block_types, block_type)
response = call_port(state, :delete, {block_value, block_num})
{:reply, response, state}
end
def handle_call({:db_get, db_number, size}, _from, state) do
response = call_port(state, :db_get, {db_number, size})
{:reply, response, state}
end
def handle_call({:db_fill, db_number, fill_char}, _from, state) do
response = call_port(state, :db_fill, {db_number, fill_char})
{:reply, response, state}
end
# Date/Time functions
def handle_call(:get_plc_date_time, _from, state) do
response =
case call_port(state, :get_plc_date_time, nil) do
{:ok, tm} ->
{:ok, time} = Time.new(tm.tm_hour, tm.tm_min, tm.tm_sec)
{:ok, date} = Date.new(tm.tm_year, tm.tm_mon, tm.tm_mday)
{:ok, date, time}
x ->
x
end
{:reply, response, state}
end
def handle_call({:set_plc_date_time, opt}, _from, state) do
sec = Keyword.get(opt, :sec, 0)
min = Keyword.get(opt, :min, 0)
hour = Keyword.get(opt, :hour, 1)
mday = Keyword.get(opt, :mday, 1)
mon = Keyword.get(opt, :mon, 1)
year = Keyword.get(opt, :year, 1900)
wday = Keyword.get(opt, :wday, 0)
yday = Keyword.get(opt, :yday, 0)
isdst = Keyword.get(opt, :isdst, 1)
response =
call_port(state, :set_plc_date_time, {sec, min, hour, mday, mon, year, wday, yday, isdst})
{:reply, response, state}
end
def handle_call(:set_plc_system_date_time, _from, state) do
response = call_port(state, :set_plc_system_date_time, nil)
{:reply, response, state}
end
# System info functions
def handle_call({:read_szl, id, index}, _from, state) do
response = call_port(state, :read_szl, {id, index})
{:reply, response, state}
end
def handle_call(:read_szl_list, _from, state) do
response = call_port(state, :read_szl_list, nil)
{:reply, response, state}
end
def handle_call(:get_order_code, _from, state) do
response = call_port(state, :get_order_code, nil)
{:reply, response, state}
end
def handle_call(:get_cpu_info, _from, state) do
response = call_port(state, :get_cpu_info, nil)
{:reply, response, state}
end
def handle_call(:get_cp_info, _from, state) do
response = call_port(state, :get_cp_info, nil)
{:reply, response, state}
end
# PLC control functions
def handle_call(:plc_hot_start, _from, state) do
response = call_port(state, :plc_hot_start, nil)
{:reply, response, state}
end
def handle_call(:plc_cold_start, _from, state) do
response = call_port(state, :plc_cold_start, nil)
{:reply, response, state}
end
def handle_call(:plc_stop, _from, state) do
response = call_port(state, :plc_stop, nil)
{:reply, response, state}
end
def handle_call({:copy_ram_to_rom, timeout}, _from, state) do
response = call_port(state, :copy_ram_to_rom, timeout)
{:reply, response, state}
end
def handle_call({:compress, timeout}, _from, state) do
response = call_port(state, :compress, timeout)
{:reply, response, state}
end
def handle_call(:get_plc_status, _from, state) do
response = call_port(state, :get_plc_status, nil)
{:reply, response, state}
end
# Security functions
def handle_call({:set_session_password, password}, _from, state) do
response = call_port(state, :set_session_password, password)
{:reply, response, state}
end
def handle_call(:clear_session_password, _from, state) do
response = call_port(state, :clear_session_password, nil)
{:reply, response, state}
end
def handle_call(:get_protection, _from, state) do
response = call_port(state, :get_protection, nil)
{:reply, response, state}
end
# Low Level functions
def handle_call({:iso_exchange_buffer, buffer}, _from, state) do
b_size = byte_size(buffer)
response = call_port(state, :iso_exchange_buffer, {b_size, buffer})
{:reply, response, state}
end
# Miscellaneous functions
def handle_call(:get_exec_time, _from, state) do
response = call_port(state, :get_exec_time, nil)
{:reply, response, state}
end
def handle_call(:get_last_error, _from, state) do
response = call_port(state, :get_last_error, nil)
{:reply, response, state}
end
def handle_call(:get_pdu_length, _from, state) do
response = call_port(state, :get_pdu_length, nil)
{:reply, response, state}
end
def handle_call(:get_connected, _from, state) do
response = call_port(state, :get_connected, nil)
{:reply, response, state}
end
def handle_call(request, _from, state) do
Logger.error("(#{__MODULE__}) Invalid request: #{inspect(request)}")
response = {:error, :einval}
{:reply, response, state}
end
defp call_port(state, command, arguments, timeout \\ @c_timeout) do
msg = {command, arguments}
send(state.port, {self(), {:command, :erlang.term_to_binary(msg)}})
# Block until the response comes back since the C side
# doesn't want to handle any queuing of requests. REVISIT
receive do
{_, {:data, <<?r, response::binary>>}} ->
:erlang.binary_to_term(response)
after
timeout ->
# Not sure how this can be recovered
exit(:port_timed_out)
end
end
defp key2value(map) do
area_key = Map.fetch!(map, :area)
area_value = Keyword.fetch!(@area_types, area_key)
map = Map.put(map, :area, area_value)
word_len_key = Map.get(map, :word_len, :byte)
word_len_value = Keyword.get(@word_types, word_len_key)
map = Map.put(map, :word_len, word_len_value)
map
end
end
|
lib/snap7/client.ex
| 0.765681
| 0.413477
|
client.ex
|
starcoder
|
defmodule ProteinTranslation do
@moduledoc """
Provides functions to translate a RNA codons sequence into the corresponding proteins sequence :
- `of_codon\1` translate a given codon in the corresponding protein
- `of_rna\1` translate a given strand of codon in the corresponding protein sequence
"""
@doc """
Given an RNA string, return a list of proteins specified by codons, in order.
## Parameters
- rna : String that represents the rna strand
## Examples
iex> ProteinTranslation.of_rna("AUGUUUUAA")
["Methionine", "Phenylalanine", "Tryptophan"]
"""
@spec of_rna(String.t()) :: {atom, list(String.t())}
def of_rna(rna) do
raw_translation =
String.graphemes(rna)
|> Enum.chunk_every(3)
|> Enum.map(&Enum.join(&1, ""))
|> Enum.map(&of_codon/1)
if Enum.any?(raw_translation, fn val -> elem(val, 0) == :error end) do
{:error, "invalid RNA"}
else
{:ok,
Enum.map(raw_translation, &elem(&1, 1))
|> Enum.take_while(fn protein -> protein != "STOP" end)}
end
end
defp of_rna()
@doc """
Given a codon, return a tuple with the corresponding protein if the codon is correct, a tuple with an error message otherwise.
The mapping between codon and the corresponding protein is the following :
- UGU -> Cysteine
- UGC -> Cysteine
- UUA -> Leucine
- UUG -> Leucine
- AUG -> Methionine
- UUU -> Phenylalanine
- UUC -> Phenylalanine
- UCU -> Serine
- UCC -> Serine
- UCA -> Serine
- UCG -> Serine
- UGG -> Tryptophan
- UAU -> Tyrosine
- UAC -> Tyrosine
- UAA -> STOP
- UAG -> STOP
- UGA -> STOP
## Parameters
## Examples
iex> ProteinTranslation.of_codon("UGC")
{:ok, "Cysteine"}
iex> ProteinTranslation.of_codon("UAA")
{:ok, "STOP"}
iex> ProteinTranslation.of_codon("AAA")
{:error, "invalid codon"}
"""
@spec of_codon(String.t()) :: {atom, String.t()}
def of_codon("UGU"), do: {:ok, "Cysteine"}
def of_codon("UGC"), do: {:ok, "Cysteine"}
def of_codon("UUA"), do: {:ok, "Leucine"}
def of_codon("UUG"), do: {:ok, "Leucine"}
def of_codon("AUG"), do: {:ok, "Methionine"}
def of_codon("UUU"), do: {:ok, "Phenylalanine"}
def of_codon("UUC"), do: {:ok, "Phenylalanine"}
def of_codon("UCU"), do: {:ok, "Serine"}
def of_codon("UCC"), do: {:ok, "Serine"}
def of_codon("UCA"), do: {:ok, "Serine"}
def of_codon("UCG"), do: {:ok, "Serine"}
def of_codon("UGG"), do: {:ok, "Tryptophan"}
def of_codon("UAU"), do: {:ok, "Tyrosine"}
def of_codon("UAC"), do: {:ok, "Tyrosine"}
def of_codon("UAA"), do: {:ok, "STOP"}
def of_codon("UAG"), do: {:ok, "STOP"}
def of_codon("UGA"), do: {:ok, "STOP"}
def of_codon(_), do: {:error, "invalid codon"}
end
|
exercism/elixir/protein-translation/lib/protein_translation.ex
| 0.914405
| 0.615853
|
protein_translation.ex
|
starcoder
|
defmodule BubbleLib.DslStruct do
@moduledoc """
A "DSL Struct" is a struct which can be exposed in Bubblescript.
Most notable are intent, message, attachment, location, event.
"""
defmacro __using__(struct) do
str_fields = struct |> Keyword.keys() |> Enum.map(&to_string/1)
quote do
defstruct unquote(struct)
@str_fields unquote(str_fields)
@behaviour Access
@impl Access
def fetch(term, key) when key in unquote(str_fields) do
fetch(term, String.to_atom(key))
end
def fetch(term, key) do
Map.fetch(term, key)
end
defoverridable fetch: 2
@impl Access
defdelegate get_and_update(a, b, c), to: Map
@impl Access
defdelegate pop(a, b), to: Map
def __jason_encode__(struct, opts, only) do
struct
|> Map.keys()
|> Enum.reject(&(&1 != "__struct__" && (is_list(only) && &1 in only)))
|> Enum.map(fn k -> {to_string(k), Map.get(struct, k)} end)
|> Map.new()
|> Jason.Encode.map(opts)
end
end
end
defmacro jason_derive(mod, only \\ nil) do
quote do
defimpl Jason.Encoder, for: unquote(mod) do
def encode(struct, opts) do
unquote(mod).__jason_encode__(struct, opts, unquote(only))
end
end
end
end
def instantiate_structs(%{"__struct__" => mod} = struct) do
mod = String.to_atom(mod)
orig = apply(mod, :__struct__, [])
fields = apply(mod, :__struct__, []) |> Map.keys()
fields
|> Enum.map(fn k -> {k, Map.get(struct, to_string(k)) || Map.get(orig, k)} end)
|> Map.new()
|> Map.put(:__struct__, mod)
end
def instantiate_structs(%{__struct__: _} = struct) do
struct
end
def instantiate_structs(%{} = map) do
map
|> Enum.map(fn {k, v} ->
{k, instantiate_structs(v)}
end)
|> Map.new()
end
def instantiate_structs(list) when is_list(list) do
Enum.map(list, &instantiate_structs/1)
end
def instantiate_structs(value), do: value
def struct_from_map(struct, input) do
Enum.reduce(Map.to_list(struct), struct, fn {k, _}, acc ->
case Map.fetch(input, Atom.to_string(k)) do
{:ok, v} -> %{acc | k => v}
:error -> acc
end
end)
end
end
|
lib/bubble_lib/dsl_struct.ex
| 0.792424
| 0.548492
|
dsl_struct.ex
|
starcoder
|
defmodule Sea.Signal do
@moduledoc """
Defines signal (aka. event) with payload that will get emitted to defined observers.
"""
@doc """
Build the signal struct from arbitrary input (or return it if already a signal struct).
Sea provides the default implementation that will simply return the signal struct if it's provided
as argument. Specific signal module may define further variants of `build` capable of taking any
input and converting it to signal payload.
"""
@callback build(any()) :: struct()
@doc """
Emit the signal from arbitrary input (converted to signal struct if necessary).
Sea provides its implementation of `emit` that will call `build` with whatever is passed to it in
order to normalize the input into the signal struct and then it'll call `Sea.Signal.emit/1` with
that in order to actually call defined observers.
"""
@callback emit(any()) :: :ok
defmacro __using__(_opts) do
quote do
import unquote(__MODULE__), only: :macros
defmodule Behaviour do
@moduledoc false
@callback build(any()) :: struct()
@callback emit(any()) :: :ok
end
@behaviour __MODULE__.Behaviour
Module.register_attribute(__MODULE__, :observers_rev, accumulate: true)
@before_compile unquote(__MODULE__)
@after_compile unquote(__MODULE__)
end
end
defmacro __before_compile__(_env) do
quote do
@observers Enum.reverse(@observers_rev)
@doc false
def __observers__ do
@observers
end
@doc """
Build the signal struct from arbitrary input (or return it if already a signal struct).
"""
def build(%__MODULE__{} = signal) do
signal
end
@doc """
Emit the signal from arbitrary input (converted to signal struct if necessary).
"""
def emit(input) do
input
|> build()
|> unquote(__MODULE__).emit()
:ok
end
end
end
defmacro __after_compile__(_env, _bytecode) do
quote do
functions = __MODULE__.__info__(:functions)
unless Keyword.has_key?(functions, :__struct__) do
raise(
CompileError,
description: "defstruct missing in #{inspect(__MODULE__)} signal",
file: __ENV__.file,
line: __ENV__.line
)
end
end
end
@doc """
Adds observer module(s) that signal will be emitted to.
It may be invoked multiple times, but specific observer may be added only once.
## Example
Emit to single observer:
defmodule MainApp.Accounts.UserRegisteredSignal do
use Sea.Signal
emit_to AnalyticsApp.Observer
emit_to CustomerExperienceApp.Observer
end
Emit to multiple observers specified as an array:
defmodule MainApp.Accounts.UserRegisteredSignal do
use Sea.Signal
emit_to [
MainApp.Mailing.UserRegisteredObserver,
MainApp.Sales.UserRegisteredObserver
]
end
Emit to multiple observers specified with the multi-alias syntax:
defmodule MainApp.Accounts.UserRegisteredSignal do
use Sea.Signal
emit_to CustomerExperienceApp.{Mailing, Sales}
end
Examples above present multiple approaches to organizing observers. Please refer to *Organizing
observers* guide for complete explanation and examples on how to approach this problem.
"""
defmacro emit_to(observer_mod_or_mods)
defmacro emit_to({{:., _, [base_alias = {:__aliases__, _, _}, :{}]}, _, sub_aliases}) do
base_mod = Macro.expand(base_alias, __CALLER__)
nested_mods_names = Enum.map(sub_aliases, &elem(&1, 2))
observer_mods =
Enum.map(nested_mods_names, fn nested_mod_names ->
:"#{base_mod}.#{Enum.join(nested_mod_names, ".")}"
end)
quote do
emit_to(unquote(observer_mods))
end
end
defmacro emit_to(observer_mods) when is_list(observer_mods) do
Enum.map(observer_mods, fn observer_mod ->
quote do
emit_to(unquote(observer_mod))
end
end)
end
defmacro emit_to(observer_mod) do
quote do
if unquote(observer_mod) in @observers_rev do
raise(
CompileError,
description: "observer #{inspect(unquote(observer_mod))} already added",
file: __ENV__.file,
line: __ENV__.line
)
end
@observers_rev unquote(observer_mod)
end
end
@doc """
Emits passed signal struct to observers defined in the struct module.
"""
def emit(%{__struct__: signal_mod} = signal) do
observers = signal_mod.__observers__()
Enum.each(observers, fn observer ->
observer.handle_signal(signal)
end)
end
end
|
lib/sea/signal.ex
| 0.877214
| 0.596022
|
signal.ex
|
starcoder
|
defmodule Extreme do
@moduledoc """
Extreme module is main communication point with EventStore using tcp connection. Extreme is implemented using
GenServer and is OTP compatible. If client is disconnected from server we are not trying to reconnect,
instead you should rely on your supervisor. For example:
defmodule MyApp.Supervisor do
use Supervisor
def start_link,
do: Supervisor.start_link __MODULE__, :ok
@event_store MyApp.EventStore
def init(:ok) do
event_store_settings = Application.get_env :my_app, :event_store
children = [
worker(Extreme, [event_store_settings, [name: @event_store]]),
# ... other workers / supervisors
]
supervise children, strategy: :one_for_one
end
end
You can manually start adapter as well (as you can see in test file):
{:ok, server} = Application.get_env(:extreme, :event_store) |> Extreme.start_link
From now on, `server` pid is used for further communication. Since we are relying on supervisor to reconnect,
it is wise to name `server` as we did in example above.
"""
use GenServer
alias Extreme.Request
require Logger
alias Extreme.Response
## Client API
@doc """
Starts connection to EventStore using `connection_settings` and optional `opts`.
Extreme can connect to single ES node or to cluster specified with node IPs and ports.
Example for connecting to single node:
config :extreme, :event_store,
db_type: :node,
host: "localhost",
port: 1113,
username: "admin",
password: "<PASSWORD>",
reconnect_delay: 2_000,
connection_name: :my_app,
max_attempts: :infinity
* `db_type` - defaults to :node, thus it can be omitted
* `host` - check EXT IP setting of your EventStore
* `port` - check EXT TCP PORT setting of your EventStore
* `reconnect_delay` - in ms. Defaults to 1_000. If tcp connection fails this is how long it will wait for reconnection.
* `connection_name` - Optional param introduced in EventStore 4. Connection can be identified by this name on ES UI
* `max_attempts` - Defaults to :infinity. Specifies how many times we'll try to connect to EventStore
Example for connecting to cluster:
config :extreme, :event_store,
db_type: :cluster,
gossip_timeout: 300,
nodes: [
%{host: "10.10.10.29", port: 2113},
%{host: "10.10.10.28", port: 2113},
%{host: "10.10.10.30", port: 2113}
],
connection_name: :my_app,
username: "admin",
password: "<PASSWORD>"
* `gossip_timeout` - in ms. Defaults to 1_000. We are iterating through `nodes` list, asking for cluster member details.
This setting represents timeout for gossip response before we are asking next node from `nodes` list for cluster details.
* `nodes` - Mandatory for cluster connection. Represents list of nodes in the cluster as we know it
* `host` - should be EXT IP setting of your EventStore node
* `port` - should be EXT HTTP PORT setting of your EventStore node
Example of connection to cluster via DNS lookup
config :extreme, :event_store,
db_type: :cluster_dns,
gossip_timeout: 300,
host: "es-cluster.example.com", # accepts char list too, this whould be multy A record host enrty in your nameserver
port: 2113, # the external gossip port
connection_name: :my_app,
username: "admin",
password: "<PASSWORD>",
max_attempts: :infinity
When `cluster` mode is used, adapter goes thru `nodes` list and tries to gossip with node one after another
until it gets response about nodes. Based on nodes information from that response it ranks their statuses and chooses
the best candidate to connect to. For the way ranking is done, take a look at `lib/cluster_connection.ex`:
defp rank_state("Master"), do: 1
defp rank_state("PreMaster"), do: 2
defp rank_state("Slave"), do: 3
defp rank_state("Clone"), do: 4
defp rank_state("CatchingUp"), do: 5
defp rank_state("PreReplica"), do: 6
defp rank_state("Unknown"), do: 7
defp rank_state("Initializing"), do: 8
Note that above will work with same procedure with `cluster_dns` mode turned on, since internally it will get ip addresses to witch same connection procedure will be used.
Once client is disconnected from EventStore, supervisor should respawn it and connection starts over again.
"""
def start_link(connection_settings, opts \\ []),
do: GenServer.start_link(__MODULE__, connection_settings, opts)
@doc """
Executes protobuf `message` against `server`. Returns:
- {:ok, protobuf_message} on success .
- {:error, :not_authenticated} on wrong credentials.
- {:error, error_reason, protobuf_message} on failure.
EventStore uses ProtoBuf for taking requests and sending responses back.
We are using [exprotobuf](https://github.com/bitwalker/exprotobuf) to deal with them.
List and specification of supported protobuf messages can be found in `include/event_store.proto` file.
Instead of wrapping each and every request in elixir function, we are using `execute/2` function that takes server pid and request message:
{:ok, response} = Extreme.execute server, write_events()
where `write_events` can be helper function like:
alias Extreme.Msg, as: ExMsg
defp write_events(stream \\ "people", events \\ [%PersonCreated{name: "<NAME>"}, %PersonChangedName{name: "Zika"}]) do
proto_events = Enum.map(events, fn event ->
ExMsg.NewEvent.new(
event_id: Extreme.Tools.gen_uuid(),
event_type: to_string(event.__struct__),
data_content_type: 0,
metadata_content_type: 0,
data: :erlang.term_to_binary(event),
meta: ""
) end)
ExMsg.WriteEvents.new(
event_stream_id: stream,
expected_version: -2,
events: proto_events,
require_master: false
)
end
This way you can fine tune your requests, i.e. choose your serialization. We are using erlang serialization in this case
`data: :erlang.term_to_binary(event)`, but you can do whatever suites you.
For more information about protobuf messages EventStore uses,
take a look at their [documentation](http://docs.geteventstore.com) or for common use cases
you can check `test/extreme_test.exs` file.
"""
def execute(server, message),
do: GenServer.call(server, {:execute, message})
@doc """
Reads events specified in `read_events`, sends them to `subscriber`
and leaves `subscriber` subscribed per `subscribe` message.
`subscriber` is process that will keep receiving {:on_event, event} messages.
`read_events` :: Extreme.Msg.ReadStreamEvents
`subscribe` :: Extreme.Msg.SubscribeToStream
Returns {:ok, subscription} when subscription is success.
If `stream` is hard deleted `subscriber` will receive message {:extreme, :error, :stream_hard_deleted, stream}
If `stream` is soft deleted `subscriber` will receive message {:extreme, :warn, :stream_soft_deleted, stream}.
In case of soft deleted stream, new event will recreate stream and it will be sent to `subscriber` as described above
Hard deleted streams can't be recreated so suggestion is not to handle this message but rather crash when it happens
## Examples:
defmodule MyApp.StreamSubscriber
use GenServer
def start_link(extreme, last_processed_event),
do: GenServer.start_link __MODULE__, {extreme, last_processed_event}
def init({extreme, last_processed_event}) do
stream = "people"
state = %{ event_store: extreme, stream: stream, last_event: last_processed_event }
GenServer.cast self(), :subscribe
{:ok, state}
end
def handle_cast(:subscribe, state) do
# read only unprocessed events and stay subscribed
{:ok, subscription} = Extreme.read_and_stay_subscribed state.event_store, self(), state.stream, state.last_event + 1
# we want to monitor when subscription is crashed so we can resubscribe
ref = Process.monitor subscription
{:noreply, %{state|subscription_ref: ref}}
end
def handle_info({:DOWN, ref, :process, _pid, _reason}, %{subscription_ref: ref} = state) do
GenServer.cast self(), :subscribe
{:noreply, state}
end
def handle_info({:on_event, push}, state) do
push.event.data
|> :erlang.binary_to_term
|> process_event
event_number = push.link.event_number
:ok = update_last_event state.stream, event_number
{:noreply, %{state|last_event: event_number}}
end
def handle_info(_msg, state), do: {:noreply, state}
defp process_event(event), do: IO.puts("Do something with event: " <> inspect(event))
defp update_last_event(_stream, _event_number), do: IO.puts("Persist last processed event_number for stream")
end
This way unprocessed events will be sent by Extreme, using `{:on_event, push}` message.
After all persisted messages are sent, new messages will be sent the same way as they arrive to stream.
Since there's a lot of boilerplate code here, you can use `Extreme.Listener` to reduce it and focus only
on business part of code.
"""
def read_and_stay_subscribed(
server,
subscriber,
stream,
from_event_number \\ 0,
per_page \\ 4096,
resolve_link_tos \\ true,
require_master \\ false
) do
GenServer.call(
server,
{:read_and_stay_subscribed, subscriber,
{stream, from_event_number, per_page, resolve_link_tos, require_master}}
)
end
@doc """
Subscribe `subscriber` to `stream` using `server`.
`subscriber` is process that will keep receiving {:on_event, event} messages.
Returns {:ok, subscription} when subscription is success.
```NOTE: If `stream` is hard deleted, `subscriber` will NOT receive any message!```
## Example:
def subscribe(server, stream \\ "people"), do: Extreme.subscribe_to(server, self(), stream)
def handle_info({:on_event, event}, state) do
Logger.debug "New event added to stream 'people': " <> inspect(event)
{:noreply, state}
end
As `Extreme.read_and_stay_subscribed/7` has it's abstraction in `Extreme.Listener`, there's abstraction for this function
as well in `Extreme.FanoutListener` behaviour.
"""
def subscribe_to(server, subscriber, stream, resolve_link_tos \\ true),
do: GenServer.call(server, {:subscribe_to, subscriber, stream, resolve_link_tos})
@doc """
Connect the `subscriber` to an existing persistent subscription named `subscription` on `stream`
`subscriber` is process that will keep receiving {:on_event, event} messages.
Returns {:ok, subscription} when subscription is success.
"""
def connect_to_persistent_subscription(
server,
subscriber,
subscription,
stream,
buffer_size \\ 1
),
do:
GenServer.call(
server,
{:connect_to_persistent_subscription, subscriber, {subscription, stream, buffer_size}}
)
## Server Callbacks
def init(connection_settings) do
user = Keyword.fetch!(connection_settings, :username)
pass = Keyword.fetch!(connection_settings, :password)
GenServer.cast(self(), {:connect, connection_settings, 1})
{:ok, subscriptions_sup} = Extreme.SubscriptionsSupervisor.start_link(self())
{:ok, persistent_subscriptions_sup} =
Extreme.PersistentSubscriptionsSupervisor.start_link(connection_settings)
state = %{
socket: nil,
pending_responses: %{},
subscriptions: %{},
subscriptions_sup: subscriptions_sup,
persistent_subscriptions_sup: persistent_subscriptions_sup,
credentials: %{user: user, pass: pass},
received_data: <<>>,
should_receive: nil
}
{:ok, state}
end
def handle_cast({:connect, connection_settings, attempt}, state) do
db_type =
Keyword.get(connection_settings, :db_type, :node)
|> cast_to_atom
case connect(db_type, connection_settings, attempt) do
{:ok, socket} -> {:noreply, %{state | socket: socket}}
error -> {:stop, error, state}
end
end
defp connect(:cluster, connection_settings, attempt) do
{:ok, host, port} = Extreme.ClusterConnection.get_node(connection_settings)
connect(host, port, connection_settings, attempt)
end
defp connect(:node, connection_settings, attempt) do
host = Keyword.fetch!(connection_settings, :host)
port = Extreme.Tools.normalize_port(Keyword.fetch!(connection_settings, :port))
connect(host, port, connection_settings, attempt)
end
defp connect(:cluster_dns, connection_settings, attempt) do
{:ok, host, port} = Extreme.ClusterConnection.get_node(:cluster_dns, connection_settings)
connect(host, port, connection_settings, attempt)
end
defp connect(host, port, connection_settings, attempt) do
Logger.info(fn -> "Connecting Extreme to #{host}:#{port}" end)
opts = [:binary, active: :once]
case :gen_tcp.connect(String.to_charlist(host), port, opts) do
{:ok, socket} ->
on_connect(
socket,
Application.get_env(:extreme, :protocol_version, 3),
connection_settings[:connection_name]
)
_other ->
max_attempts = Keyword.get(connection_settings, :max_attempts, :infinity)
reconnect =
case max_attempts do
:infinity -> true
max when attempt <= max -> true
_any -> false
end
if reconnect do
reconnect_delay = Keyword.get(connection_settings, :reconnect_delay, 1_000)
Logger.warn(fn ->
"Error connecting to EventStore @ #{host}:#{port}. Will retry in #{reconnect_delay} ms."
end)
:timer.sleep(reconnect_delay)
db_type =
Keyword.get(connection_settings, :db_type, :node)
|> cast_to_atom
connect(db_type, connection_settings, attempt + 1)
else
{:error, :max_attempt_exceeded}
end
end
end
defp on_connect(socket, protocol_version, connection_name) when protocol_version >= 4 do
Logger.info(fn ->
"Successfully connected to EventStore using protocol version #{protocol_version}"
end)
send(self(), {:identify_client, 1, to_string(connection_name)})
{:ok, socket}
end
defp on_connect(socket, protocol_version, _) do
Logger.info(fn ->
"Successfully connected to EventStore using protocol version #{protocol_version}"
end)
:timer.send_after(1_000, :send_ping)
{:ok, socket}
end
def handle_call({:execute, protobuf_msg}, from, state) do
{message, correlation_id} = Request.prepare(protobuf_msg, state.credentials)
# Logger.debug "Will execute #{inspect protobuf_msg}"
:ok = :gen_tcp.send(state.socket, message)
state =
put_in(state.pending_responses, Map.put(state.pending_responses, correlation_id, from))
{:noreply, state}
end
def handle_call({:read_and_stay_subscribed, subscriber, params}, _from, state) do
{:ok, subscription} =
Extreme.SubscriptionsSupervisor.start_subscription(
state.subscriptions_sup,
subscriber,
params
)
# Logger.debug "Subscription is: #{inspect subscription}"
{:reply, {:ok, subscription}, state}
end
def handle_call({:subscribe_to, subscriber, stream, resolve_link_tos}, _from, state) do
{:ok, subscription} =
Extreme.SubscriptionsSupervisor.start_subscription(
state.subscriptions_sup,
subscriber,
stream,
resolve_link_tos
)
# Logger.debug "Subscription is: #{inspect subscription}"
{:reply, {:ok, subscription}, state}
end
def handle_call({:connect_to_persistent_subscription, subscriber, params}, _from, state) do
{:ok, persistent_subscription} =
Extreme.PersistentSubscriptionsSupervisor.start_persistent_subscription(
state.persistent_subscriptions_sup,
subscriber,
params
)
{:reply, {:ok, persistent_subscription}, state}
end
def handle_call({:subscribe, subscriber, msg}, from, state) do
# Logger.debug "Subscribing #{inspect subscriber} with: #{inspect msg}"
{message, correlation_id} = Request.prepare(msg, state.credentials)
:ok = :gen_tcp.send(state.socket, message)
state =
put_in(state.pending_responses, Map.put(state.pending_responses, correlation_id, from))
state = put_in(state.subscriptions, Map.put(state.subscriptions, correlation_id, subscriber))
{:noreply, state}
end
def handle_call({:ack, protobuf_msg, correlation_id}, _from, state) do
{message, _correlation_id} = Request.prepare(protobuf_msg, state.credentials, correlation_id)
# Logger.debug(fn -> "Ack received event: #{inspect protobuf_msg}" end)
:ok = :gen_tcp.send(state.socket, message)
{:reply, :ok, state}
end
def handle_call({:nack, protobuf_msg, correlation_id}, _from, state) do
{message, _correlation_id} = Request.prepare(protobuf_msg, state.credentials, correlation_id)
Logger.debug(fn -> "Nack received event: #{inspect(protobuf_msg)}" end)
:ok = :gen_tcp.send(state.socket, message)
{:reply, :ok, state}
end
def handle_info({:identify_client, version, connection_name}, state) do
Logger.debug(fn -> "Identifying client with EventStore" end)
protobuf_msg =
Extreme.Msg.IdentifyClient.new(
version: version,
connection_name: connection_name
)
{message, _correlation_id} = Request.prepare(protobuf_msg, state.credentials)
:ok = :gen_tcp.send(state.socket, message)
{:noreply, state}
end
def handle_info(:send_ping, state) do
message = Request.prepare(:ping)
:ok = :gen_tcp.send(state.socket, message)
{:noreply, state}
end
def handle_info({:tcp, socket, pkg}, state = %{received_data: received_data}) do
state = process_package(state, received_data <> pkg)
:inet.setopts(socket, active: :once)
{:noreply, state}
end
def handle_info({:tcp_closed, _port}, state) do
{:stop, :tcp_closed, state}
end
defp process_package(
state,
<<message_length::32-unsigned-little-integer, content::binary-size(message_length),
rest::binary>>
) do
# Handle binary data containing zero, one or many messages
# All messages start with a 32 bit unsigned little endian integer of the content length + a binary body of that size
state
|> process_message(content)
|> process_package(rest)
end
# No full message left, keep state in GenServer to reprocess once more data arrives
defp process_package(state, incomplete_package),
do: %{state | received_data: incomplete_package}
defp process_message(state, message) do
# Logger.debug(fn -> "Received tcp message: #{inspect Response.parse(message)}" end)
Response.parse(message)
|> respond(state)
end
defp respond({:client_identified, _correlation_id}, state) do
Logger.info(fn -> "Successfully connected to EventStore >= 4" end)
:timer.send_after(1_000, :send_ping)
state
end
defp respond({:pong, _correlation_id}, state) do
# Logger.debug "#{inspect self()} got :pong"
:timer.send_after(1_000, :send_ping)
state
end
defp respond({:heartbeat_request, correlation_id}, state) do
# Logger.debug "#{inspect self()} Tick-Tack"
message = Request.prepare(:heartbeat_response, correlation_id)
:ok = :gen_tcp.send(state.socket, message)
%{state | pending_responses: state.pending_responses}
end
defp respond({:error, :not_authenticated, correlation_id}, state) do
{:error, :not_authenticated}
|> respond_with(correlation_id, state)
end
defp respond({_auth, correlation_id, response}, state) do
response
|> respond_with(correlation_id, state)
end
defp respond_with(response, correlation_id, state) do
# Logger.debug "Responding with response: #{inspect response}"
case Map.get(state.pending_responses, correlation_id) do
nil ->
respond_to_subscription(response, correlation_id, state.subscriptions)
state
from ->
:ok = GenServer.reply(from, Response.reply(response, correlation_id))
pending_responses = Map.delete(state.pending_responses, correlation_id)
%{state | pending_responses: pending_responses}
end
end
defp respond_to_subscription(response, correlation_id, subscriptions) do
# Logger.debug "Attempting to respond to subscription with response: #{inspect response}"
case Map.get(subscriptions, correlation_id) do
# Logger.error "Can't find correlation_id #{inspect correlation_id} for response #{inspect response}"
nil ->
:ok
subscription ->
GenServer.cast(subscription, Response.reply(response, correlation_id))
end
end
@doc """
Cast the provided value to an atom if appropriate.
If the provided value is a string, convert it to an atom, otherwise return it as-is.
"""
def cast_to_atom(value) when is_binary(value),
do: String.to_atom(value)
def cast_to_atom(value),
do: value
end
|
lib/extreme.ex
| 0.847274
| 0.428413
|
extreme.ex
|
starcoder
|
defmodule Snitch.Domain.Inventory do
@moduledoc """
Interface for handling inventory related business logic
"""
alias Snitch.Data.Model.StockItem, as: StockModel
alias Snitch.Data.Model.StockLocation, as: SLModel
alias Snitch.Data.Schema.StockItem, as: StockSchema
alias Snitch.Data.Schema.Product
alias Snitch.Data.Model.Product, as: ProductModel
alias Snitch.Core.Tools.MultiTenancy.Repo
use Snitch.Domain
@doc """
Updates the stock with stock fields passed for a product
If the stock item is not present for a particular product and stock location,
it created and then updated with the stock item params.
"""
@spec add_stock(Product.t(), map) :: {:ok, StockSchema.t()} | {:error, Ecto.Changeset.t()}
def add_stock(product, stock_params) do
with {:ok, stock} <- check_stock(product.id, stock_params["stock_location_id"]),
{:ok, updated_stock} <- StockModel.update(stock_params, stock) do
{:ok, updated_stock}
end
end
@doc """
Decreases stock count for a product at particular stock location by the amount passed.
This method takes into consideration the inventory tracking level that is
applied on the product to reduce the stock.
`none`
When the inventory tracking for the product is `none`, we dont reduce the stock
for the product.
`product`
When we track inventory by product, we always reduce the stock of the product.
> Note: You can pass both variant or product id to reduce the stock.
`variant`
When we track product by variant, the variant product stock is decreased.
> Note: Always pass product id of the variant(product) to reduce the stock.
"""
@spec reduce_stock(integer, integer, integer) ::
{:ok, StockSchema.t()} | {:error, Ecto.Changeset.t() | :variant_not_found}
def reduce_stock(product_id, stock_location_id, reduce_count) do
with product <- ProductModel.get(product_id),
product_with_inventory <- ProductModel.product_with_inventory_tracking(product),
stock_location <- SLModel.get(stock_location_id) do
perform_stock_reduce(product, product_with_inventory, stock_location, reduce_count)
end
end
defp perform_stock_reduce(actual_product, product_with_tracking, stock_location, count) do
case product_with_tracking.inventory_tracking do
:none ->
check_stock(product_with_tracking.id, stock_location.id)
:product ->
{:ok, stock} = check_stock(product_with_tracking.id, stock_location.id)
do_reduce_stock(stock, count)
:variant ->
case ProductModel.is_child_product(actual_product) do
true ->
{:ok, stock} = check_stock(actual_product.id, stock_location.id)
do_reduce_stock(stock, count)
_ ->
{:error, :variant_not_found}
end
end
end
defp do_reduce_stock(stock_item, reduce_count) do
new_stock_count = stock_item.count_on_hand - reduce_count
StockModel.update(%{count_on_hand: new_stock_count}, stock_item)
end
def set_inventory_tracking(product, inventory_tracking, %{"stock" => stock_params})
when inventory_tracking in ["product", :product] do
{:ok, stock_item} = check_stock(product.id, stock_params["stock_location_id"])
Ecto.Multi.new()
|> Ecto.Multi.run(:inventory_tracking, fn _ ->
ProductModel.update(product, %{inventory_tracking: inventory_tracking})
end)
|> Ecto.Multi.run(:stock, fn _ -> StockModel.update(stock_params, stock_item) end)
|> Repo.transaction()
|> case do
{:ok, multi_result} ->
{:ok, multi_result.inventory_tracking}
{:error, _, error, _} ->
{:error, error}
end
end
def set_inventory_tracking(product, inventory_tracking, _params) do
ProductModel.update(product, %{inventory_tracking: inventory_tracking})
end
defp check_stock(product_id, location_id) do
query_fields = %{product_id: product_id, stock_location_id: location_id}
case StockModel.get(query_fields) do
%StockSchema{} = stock_item -> {:ok, stock_item}
nil -> StockModel.create(product_id, location_id, 0, false)
end
end
end
|
apps/snitch_core/lib/core/domain/stock/inventory.ex
| 0.833833
| 0.482795
|
inventory.ex
|
starcoder
|
defmodule Geometry.MultiLineString do
@moduledoc """
A set of line-strings from type `Geometry.LineString`
`MultiLineStringMZ` implements the protocols `Enumerable` and `Collectable`.
## Examples
iex> Enum.map(
...> MultiLineString.new([
...> LineString.new([
...> Point.new(1, 2),
...> Point.new(3, 4)
...> ]),
...> LineString.new([
...> Point.new(1, 2),
...> Point.new(11, 12),
...> Point.new(13, 14)
...> ])
...> ]),
...> fn line_string -> length line_string end
...> )
[2, 3]
iex> Enum.into(
...> [LineString.new([Point.new(1, 2), Point.new(5, 6)])],
...> MultiLineString.new())
%MultiLineString{
line_strings:
MapSet.new([
[[1, 2], [5, 6]]
])
}
"""
alias Geometry.{GeoJson, LineString, MultiLineString, Point, WKB, WKT}
defstruct line_strings: MapSet.new()
@type t :: %MultiLineString{line_strings: MapSet.t(Geometry.coordinates())}
@doc """
Creates an empty `MultiLineString`.
## Examples
iex> MultiLineString.new()
%MultiLineString{line_strings: MapSet.new()}
"""
@spec new :: t()
def new, do: %MultiLineString{}
@doc """
Creates a `MultiLineString` from the given `Geometry.MultiLineString`s.
## Examples
iex> MultiLineString.new([
...> LineString.new([
...> Point.new(1, 2),
...> Point.new(2, 3),
...> Point.new(3, 4)
...> ]),
...> LineString.new([
...> Point.new(10, 20),
...> Point.new(30, 40)
...> ]),
...> LineString.new([
...> Point.new(10, 20),
...> Point.new(30, 40)
...> ])
...> ])
%Geometry.MultiLineString{
line_strings:
MapSet.new([
[[1, 2], [2, 3], [3, 4]],
[[10, 20], [30, 40]]
])
}
iex> MultiLineString.new([])
%MultiLineString{line_strings: MapSet.new()}
"""
@spec new([LineString.t()]) :: t()
def new([]), do: %MultiLineString{}
def new(line_strings) do
%MultiLineString{
line_strings:
Enum.into(line_strings, MapSet.new(), fn line_string -> line_string.points end)
}
end
@doc """
Returns `true` if the given `MultiLineString` is empty.
## Examples
iex> MultiLineString.empty?(MultiLineString.new())
true
iex> MultiLineString.empty?(
...> MultiLineString.new([
...> LineString.new([Point.new(1, 2), Point.new(3, 4)])
...> ])
...> )
false
"""
@spec empty?(t()) :: boolean
def empty?(%MultiLineString{} = multi_line_string),
do: Enum.empty?(multi_line_string.line_strings)
@doc """
Creates a `MultiLineString` from the given coordinates.
## Examples
iex> MultiLineString.from_coordinates([
...> [[-1, 1], [2, 2], [-3, 3]],
...> [[-10, 10], [-20, 20]]
...> ])
%MultiLineString{
line_strings:
MapSet.new([
[[-1, 1], [2, 2], [-3, 3]],
[[-10, 10], [-20, 20]]
])
}
"""
@spec from_coordinates([Geometry.coordinate()]) :: t()
def from_coordinates(coordinates) do
%MultiLineString{line_strings: MapSet.new(coordinates)}
end
@doc """
Returns an `:ok` tuple with the `MultiLineString` from the given GeoJSON
term. Otherwise returns an `:error` tuple.
## Examples
iex> ~s(
...> {
...> "type": "MultiLineString",
...> "coordinates": [
...> [[-1, 1], [2, 2], [-3, 3]],
...> [[-10, 10], [-20, 20]]
...> ]
...> }
...> )
iex> |> Jason.decode!()
iex> |> MultiLineString.from_geo_json()
{:ok,
%Geometry.MultiLineString{
line_strings:
MapSet.new([
[[-10, 10], [-20, 20]],
[[-1, 1], [2, 2], [-3, 3]]
])
}}
"""
@spec from_geo_json(Geometry.geo_json_term()) :: {:ok, t()} | Geometry.geo_json_error()
def from_geo_json(json), do: GeoJson.to_multi_line_string(json, MultiLineString)
@doc """
The same as `from_geo_json/1`, but raises a `Geometry.Error` exception if it fails.
"""
@spec from_geo_json!(Geometry.geo_json_term()) :: t()
def from_geo_json!(json) do
case GeoJson.to_multi_line_string(json, MultiLineString) do
{:ok, geometry} -> geometry
error -> raise Geometry.Error, error
end
end
@doc """
Returns the GeoJSON term of a `MultiLineString`.
There are no guarantees about the order of line-strings in the returned
`coordinates`.
## Examples
```elixir
[
[[-1, 1], [2, 2], [-3, 3]],
[[-10, 10], [-20, 20]]
]
|> MultiLineString.from_coordinates()
MultiLineString.to_geo_json(
MultiLineString.new([
LineString.new([
Point.new(-1, 1),
Point.new(2, 2),
Point.new(-3, 3)
]),
LineString.new([
Point.new(-10, 10),
Point.new(-20, 20)
])
])
)
# =>
# %{
# "type" => "MultiLineString",
# "coordinates" => [
# [[-1, 1], [2, 2], [-3, 3]],
# [[-10, 10], [-20, 20]]
# ]
# }
```
"""
@spec to_geo_json(t()) :: Geometry.geo_json_term()
def to_geo_json(%MultiLineString{line_strings: line_strings}) do
%{
"type" => "MultiLineString",
"coordinates" => MapSet.to_list(line_strings)
}
end
@doc """
Returns an `:ok` tuple with the `MultiLineString` from the given WKT string.
Otherwise returns an `:error` tuple.
If the geometry contains a SRID the id is added to the tuple.
## Examples
iex> MultiLineString.from_wkt("
...> SRID=1234;MultiLineString (
...> (10 20, 20 10, 20 40),
...> (40 30, 30 30)
...> )
...> ")
{:ok, {
%MultiLineString{
line_strings:
MapSet.new([
[[10, 20], [20, 10], [20, 40]],
[[40, 30], [30, 30]]
])
},
1234
}}
iex> MultiLineString.from_wkt("MultiLineString EMPTY")
{:ok, %MultiLineString{}}
"""
@spec from_wkt(Geometry.wkt()) ::
{:ok, t() | {t(), Geometry.srid()}} | Geometry.wkt_error()
def from_wkt(wkt), do: WKT.to_geometry(wkt, MultiLineString)
@doc """
The same as `from_wkt/1`, but raises a `Geometry.Error` exception if it fails.
"""
@spec from_wkt!(Geometry.wkt()) :: t() | {t(), Geometry.srid()}
def from_wkt!(wkt) do
case WKT.to_geometry(wkt, MultiLineString) do
{:ok, geometry} -> geometry
error -> raise Geometry.Error, error
end
end
@doc """
Returns the WKT representation for a `MultiLineString`. With option `:srid`
an EWKT representation with the SRID is returned.
There are no guarantees about the order of line-strings in the returned
WKT-string.
## Examples
```elixir
MultiLineString.to_wkt(MultiLineString.new())
# => "MultiLineString EMPTY"
MultiLineString.to_wkt(
MultiLineString.new([
LineString(
[Point.new(7.1, 8.1), Point.new(9.2, 5.2)]
),
LineString(
[Point.new(5.5, 9.2), Point.new(1.2, 3.2)]
)
])
)
# Returns a string without any \\n or extra spaces (formatted just for readability):
# MultiLineString (
# (5.5 9.2, 1.2 3.2),
# (7.1 8.1, 9.2 5.2)
# )
MultiLineString.to_wkt(
MultiLineString.new([
LineString(
[Point.new(7.1, 8.1), Point.new(9.2, 5.2)]
),
LineString(
[Point.new(5.5, 9.2), Point.new(1.2, 3.2)]
)
]),
srid: 555
)
# Returns a string without any \\n or extra spaces (formatted just for readability):
# SRID=555;MultiLineString (
# (5.5 9.2, 1.2 3.2),
# (7.1 8.1, 9.2 5.2)
# )
```
"""
@spec to_wkt(t(), opts) :: Geometry.wkt()
when opts: [srid: Geometry.srid()]
def to_wkt(%MultiLineString{line_strings: line_strings}, opts \\ []) do
WKT.to_ewkt(
<<
"MultiLineString ",
line_strings |> MapSet.to_list() |> to_wkt_line_strings()::binary()
>>,
opts
)
end
@doc """
Returns the WKB representation for a `MultiLineString`.
With option `:srid` an EWKB representation with the SRID is returned.
The option `endian` indicates whether `:xdr` big endian or `:ndr` little
endian is returned. The default is `:xdr`.
The `:mode` determines whether a hex-string or binary is returned. The default
is `:binary`.
An example of a simpler geometry can be found in the description for the
`Geometry.Point.to_wkb/1` function.
"""
@spec to_wkb(t(), opts) :: Geometry.wkb()
when opts: [endian: Geometry.endian(), srid: Geometry.srid(), mode: Geometry.mode()]
def to_wkb(%MultiLineString{} = multi_line_string, opts \\ []) do
endian = Keyword.get(opts, :endian, Geometry.default_endian())
mode = Keyword.get(opts, :mode, Geometry.default_mode())
srid = Keyword.get(opts, :srid)
to_wkb(multi_line_string, srid, endian, mode)
end
@doc """
Returns an `:ok` tuple with the `MultiLineString` from the given WKB string. Otherwise
returns an `:error` tuple.
If the geometry contains a SRID the id is added to the tuple.
An example of a simpler geometry can be found in the description for the
`Geometry.Point.from_wkb/2` function.
"""
@spec from_wkb(Geometry.wkb(), Geometry.mode()) ::
{:ok, t() | {t(), Geometry.srid()}} | Geometry.wkb_error()
def from_wkb(wkb, mode \\ :binary), do: WKB.to_geometry(wkb, mode, MultiLineString)
@doc """
The same as `from_wkb/2`, but raises a `Geometry.Error` exception if it fails.
"""
@spec from_wkb!(Geometry.wkb(), Geometry.mode()) :: t() | {t(), Geometry.srid()}
def from_wkb!(wkb, mode \\ :binary) do
case WKB.to_geometry(wkb, mode, MultiLineString) do
{:ok, geometry} -> geometry
error -> raise Geometry.Error, error
end
end
@doc """
Returns the number of elements in `MultiLineString`.
## Examples
iex> MultiLineString.size(
...> MultiLineString.new([
...> LineString.new([
...> Point.new(11, 12),
...> Point.new(21, 22)
...> ]),
...> LineString.new([
...> Point.new(31, 32),
...> Point.new(41, 42)
...> ])
...> ])
...> )
2
"""
@spec size(t()) :: non_neg_integer()
def size(%MultiLineString{line_strings: line_strings}), do: MapSet.size(line_strings)
@doc """
Checks if `MultiLineString` contains `line_string`.
## Examples
iex> MultiLineString.member?(
...> MultiLineString.new([
...> LineString.new([
...> Point.new(11, 12),
...> Point.new(21, 22)
...> ]),
...> LineString.new([
...> Point.new(31, 32),
...> Point.new(41, 42)
...> ])
...> ]),
...> LineString.new([
...> Point.new(31, 32),
...> Point.new(41, 42)
...> ])
...> )
true
iex> MultiLineString.member?(
...> MultiLineString.new([
...> LineString.new([
...> Point.new(11, 12),
...> Point.new(21, 22)
...> ]),
...> LineString.new([
...> Point.new(31, 32),
...> Point.new(41, 42)
...> ])
...> ]),
...> LineString.new([
...> Point.new(11, 12),
...> Point.new(41, 42)
...> ])
...> )
false
"""
@spec member?(t(), LineString.t()) :: boolean()
def member?(%MultiLineString{line_strings: line_strings}, %LineString{points: points}) do
MapSet.member?(line_strings, points)
end
@doc """
Converts `MultiLineString` to a list.
"""
@spec to_list(t()) :: [Point.t()]
def to_list(%MultiLineString{line_strings: line_strings}), do: MapSet.to_list(line_strings)
@compile {:inline, to_wkt_line_strings: 1}
defp to_wkt_line_strings([]), do: "EMPTY"
defp to_wkt_line_strings([line_string | line_strings]) do
<<"(",
Enum.reduce(line_strings, LineString.to_wkt_points(line_string), fn line_string, acc ->
<<acc::binary(), ", ", LineString.to_wkt_points(line_string)::binary()>>
end)::binary(), ")">>
end
@doc false
@compile {:inline, to_wkb: 4}
@spec to_wkb(t(), srid, endian, mode) :: wkb
when srid: Geometry.srid() | nil,
endian: Geometry.endian(),
mode: Geometry.mode(),
wkb: Geometry.wkb()
def to_wkb(%MultiLineString{line_strings: line_strings}, srid, endian, mode) do
<<
WKB.byte_order(endian, mode)::binary(),
wkb_code(endian, not is_nil(srid), mode)::binary(),
WKB.srid(srid, endian, mode)::binary(),
to_wkb_line_strings(line_strings, endian, mode)::binary()
>>
end
@compile {:inline, to_wkb_line_strings: 3}
defp to_wkb_line_strings(line_strings, endian, mode) do
Enum.reduce(line_strings, WKB.length(line_strings, endian, mode), fn line_string, acc ->
<<acc::binary(), LineString.to_wkb(line_string, nil, endian, mode)::binary()>>
end)
end
@compile {:inline, wkb_code: 3}
defp wkb_code(endian, srid?, :hex) do
case {endian, srid?} do
{:xdr, false} -> "00000005"
{:ndr, false} -> "05000000"
{:xdr, true} -> "20000005"
{:ndr, true} -> "05000020"
end
end
defp wkb_code(endian, srid?, :binary) do
case {endian, srid?} do
{:xdr, false} -> <<0x00000005::big-integer-size(32)>>
{:ndr, false} -> <<0x00000005::little-integer-size(32)>>
{:xdr, true} -> <<0x20000005::big-integer-size(32)>>
{:ndr, true} -> <<0x20000005::little-integer-size(32)>>
end
end
defimpl Enumerable do
# credo:disable-for-next-line Credo.Check.Readability.Specs
def count(multi_line_string) do
{:ok, MultiLineString.size(multi_line_string)}
end
# credo:disable-for-next-line Credo.Check.Readability.Specs
def member?(multi_line_string, val) do
{:ok, MultiLineString.member?(multi_line_string, val)}
end
# credo:disable-for-next-line Credo.Check.Readability.Specs
def slice(multi_line_string) do
size = MultiLineString.size(multi_line_string)
{:ok, size,
&Enumerable.List.slice(MultiLineString.to_list(multi_line_string), &1, &2, size)}
end
# credo:disable-for-next-line Credo.Check.Readability.Specs
def reduce(multi_line_string, acc, fun) do
Enumerable.List.reduce(MultiLineString.to_list(multi_line_string), acc, fun)
end
end
defimpl Collectable do
# credo:disable-for-next-line Credo.Check.Readability.Specs
def into(%MultiLineString{line_strings: line_strings}) do
fun = fn
list, {:cont, x} ->
[{x, []} | list]
list, :done ->
map =
Map.merge(
line_strings.map,
Enum.into(list, %{}, fn {line_string, []} -> {line_string.points, []} end)
)
%MultiLineString{line_strings: %{line_strings | map: map}}
_list, :halt ->
:ok
end
{[], fun}
end
end
end
|
lib/geometry/multi_line_string.ex
| 0.939789
| 0.557785
|
multi_line_string.ex
|
starcoder
|
defmodule SmartTracer do
@moduledoc """
A simple wrapper for recon_trace.
## Usage
When connected to a live remote console, issue the `trace/2` passing the function reference and rate limit.
### Tracing a global function
```
iex> SmartTracer.trace(&FakeModule.hello/1, 5)
1
iex> FakeModule.hello("Vince")
Elixir.SmartTracer.Support.FakeModule.hello/1 is being called with:
["Vince"]
```
### Tracing a local function
```
iex> SmartTracer.trace(&FakeModule.get_name/1, 5, scope: :local)
1
iex> FakeModule.hello("Vince")
Elixir.SmartTracer.Support.FakeModule.get_name/1 is being called with:
["Vince"]
```
### Tracing a function and getting it's return value (possible also for local tracing)
```
iex> SmartTracer.trace(&FakeModulne.hello/1, 5, return: true)
1
iex> FakeModule.hello("Vince")
Elixir.SmartTracer.Support.FakeModule.hello/1 is being called with:
["Vince"]
Elixir.SmartTracer.Support.FakeModule.hello/1 returns:
"Hello, my name is NAME-Vince"
```
### Tracing a function and recording calls and returns
```
iex> SmartTracer.trace(&FakeModulne.hello/1, 5, return: true, record: true)
1
```
To playback all the recordings, use `playback/0`
```
iex> SmartTracer.playback()
[%SmartTracer.Utils.Recorder.Call{
args: ["Vince"],
arity: 1,
function: :hello,
module: SmartTracer.Support.FakeModule,
type: :call
},
%SmartTracer.Utils.Recorder.Return{
arity: 1,
function: :hello,
module: SmartTracer.Support.FakeModule,
return_value: "Hello, my name is NAME-Vince",
type: :return
}]
```
"""
alias SmartTracer.Core
alias SmartTracer.Utils.Recorder
require Logger
@default_formatter Application.get_env(:smart_tracer, :default_formatter)
@doc """
Traces calls for the specified function.
## Options
* `:return` - display return value of the specified function, defaults to `false`
* `:scope` - determines wether to trace local calls as well
* `:global` (default) - trace only public functions
* `:local` - trace private function calls as well
"""
@spec trace(function :: fun(), calls_count :: integer(), opts :: keyword()) :: :ok | :no_matches
def trace(function, calls_count, opts \\ []) when is_list(opts) and is_integer(calls_count) do
Core.trace(function, calls_count, @default_formatter, opts)
end
@doc """
Stops tracing any function calls.
"""
@spec stop() :: :ok
def stop() do
Core.stop()
end
@doc """
Returns a list of all the traces.
"""
@spec playback() ::
[call: {module(), atom(), [String.t()]}]
| [return: {module(), atom(), integer(), String.t()}]
def playback(), do: Recorder.playback()
@doc false
def handle(:call, {module, func_name, args}) do
Logger.info("#{module}.#{func_name}/#{length(args)} is being called with: #{inspect(args)}")
end
@doc false
def handle(:return, {module, func_name, arity, return_value}) do
Logger.info("#{module}.#{func_name}/#{arity} returns: #{inspect(return_value)}")
end
end
|
lib/smart_tracer.ex
| 0.825871
| 0.712932
|
smart_tracer.ex
|
starcoder
|
defmodule Fastfwd.Sender do
@moduledoc """
Functions for a generic Fastfwd sender, such as an adapter frontend, forwarder or facade.
This module is a quick way to convert a module into a Fastfwd sender, acting as frontend to various tagged receiver
modules.
## Example
```
defmodule Icecream do
use Fastfwd.Sender
def mix(type, tubs) do
fwd(type, :mix, tubs)
end
def icecream_types() do
fwd_tags()
end
def icecream_processors() do
fwd_modules()
end
end
```
## Usage
Include this module's functions in your module by `use`ing it. Various options can be set to configure it.
## Options
### :namespace
Specifies the namespace to search for suitable receiver modules. Modules with names that start with the specified namespace
will be checked for compatibility.
In Fastfwd a "namespace" is a fragment of the module naming hierarchy. For instance, setting a namespace of "Icecream" will cause all Elixir modules with
names beginning with "Icecream." to be selected. Note the "." - Specifying "NetworkAdapter" as a namespace will
include "NetworkAdapter.Wifi" but will *not* include "NetworkAdapterSettings"
Specifying Elixir as the namespace will cause all modules to be checked for compatibility.
Receiver modules must also have a suitable behaviour.
The default namespace is the module name of the sender.
#### Examples
use Fastfwd.Sender
use Fastfwd.Sender, namespace: Turtles
use Fastfwd.Sender, namespace: Elixir
### :behaviour
Specifies the behaviour module that is used by receivers for this sender.
Only modules that implement this behaviour will be detected by the sender.
The default behaviour is `Fastfwd.Behaviours.Receiver`
#### Examples
use Fastfwd.Sender
use Fastfwd.Sender, behaviour: MyApp.Behaviours.UserType
### :cache
If enabled the cache option will cache the the module search and routing map for the sender. This makes forwarded
call to receiver functions much faster.
You should usually leave this set to true.
The default value is true.
#### Examples
use Fastfwd.Sender, cache: true
### :default
This option defined the default module, returned if searching for a tag returns no value.
It can be used to implement a generic fallback module, or to trigger errors.
By default nil is returned if a tag cannot be found.
#### Examples
use Fastfwd.Sender, default: MyApp.User.GenericUser
use Fastfwd.Sender, default: MyApp.User.TypeError
### :autoload
Enables or disables autoloading of modules. If enabled then the module will attempt to find and load modules before
searching for suitable receivers. This can cause a delay.
Autoload will only occur once as the result is cached (even if caching is disabled)
The default value is true.
#### Examples
use Fastfwd.Sender, autoload: true
### :load_apps
If `autoload` is enabled then this module will attempt to load modules before searching them. This option specifies
which applications should be searched for modules.
The default setting is to load modules from all applications.
#### Examples
use Fastfwd.Sender, load_apps: [:my_app, :an_extension_library]
## Functions
Functions will be added to your module
* fwd
* fwd_tags/0
* fwd_modules/0
* fwd_routes/0
See `Fastfwd.Behaviours.Sender` for more information
"""
defmacro __using__(opts \\ []) do
this_module = __CALLER__.module
namespace = Keyword.get(opts, :namespace, this_module)
behaviour = Keyword.get(opts, :behaviour, nil)
cache = Keyword.get(opts, :cache, true)
default = Keyword.get(opts, :default, nil)
autoload = Keyword.get(opts, :autoload, true)
load_apps = Keyword.get(opts, :load_apps, :all)
quote do
@behaviour Fastfwd.Behaviours.Sender
@fwd_modcache :"fastfwd/modcache/#{unquote(namespace)}/#{unquote(behaviour)}"
@fwd_mapcache :"fastfwd/mapcache/#{unquote(namespace)}/#{unquote(behaviour)}"
@fwd_apploadcache :"fastfwd/apploadcache/#{unquote(this_module)}"
@fwd_default_module unquote(default)
@doc """
Forward a call to a receiver module selected by tag. This function is provided by `Fastfwd.Sender`.
Returns whatever the receiver method returns.
## Examples
iex> Icecream.fwd(:chocolate, :eat, [8])
"Eating 8 of Icecream.DoubleChocolate"
"""
@impl Fastfwd.Behaviours.Sender
@spec fwd(atom(), atom(), list()) :: Anything
def fwd(tag, function_name, params) do
fwd_routes()
|> Map.get(tag, @fwd_default_module)
|> apply(function_name, params)
end
@doc """
List all tags provided by this module. This function is provided by `Fastfwd.Sender`.
Returns a list of tags as atoms
## Examples
Icecream.DoubleChocolate.fwd_tags()
[:chocolate, :double_chocolate]
"""
@impl Fastfwd.Behaviours.Sender
@spec fwd_tags() :: [atom(), ...]
@impl Fastfwd.Behaviours.Sender
def fwd_tags do
fwd_modules()
|> Fastfwd.Modules.tags()
end
@doc """
List all receiver modules used by this module. This function is provided by `Fastfwd.Sender`.
Returns a list of module names as atoms/modules
## Examples
Icecream.fwd_modules()
[Icecream.Pistachio, Icecream.Chocolate, Icecream.ShavedIce, Icecream.Strawberry, Icecream.DoubleChocolate]
"""
@impl Fastfwd.Behaviours.Sender
@spec fwd_modules() :: [module(), ...]
@impl Fastfwd.Behaviours.Sender
def fwd_modules() do
if unquote(cache) do
cached_mods = FastGlobal.get(@fwd_modcache)
if is_nil(cached_mods) do
if unquote(autoload), do: {:ok, _} = fwd_cached_app_autoloader()
mods = Fastfwd.modules(unquote(namespace), unquote(behaviour))
:ok = FastGlobal.put(@fwd_modcache, mods)
mods
else
cached_mods
end
else
if unquote(autoload), do: {:ok, _} = fwd_cached_app_autoloader()
Fastfwd.modules(unquote(namespace), unquote(behaviour))
end
end
@doc """
Returns a map of tags and modules. This function is provided by `Fastfwd.Sender`.
If more than one module has a particular tag then module sort order will determine which one gets included as the active
mapping of tag to module.
## Examples
iex> Icecream.fwd_routes()
%{chocolate: Icecream.DoubleChocolate, double_chocolate: Icecream.DoubleChocolate, pistachio: Icecream.Pistachio, strawberry: Icecream.Strawberry}
"""
@impl Fastfwd.Behaviours.Sender
@spec fwd_routes() :: map()
def fwd_routes() do
if unquote(cache) do
cached_map = FastGlobal.get(@fwd_mapcache)
if is_nil(cached_map) do
map = fwd_modules()
|> Fastfwd.routes()
:ok = FastGlobal.put(@fwd_mapcache, map)
map
else
cached_map
end
else
fwd_modules()
|> Fastfwd.routes()
end
end
## Scan and load all modules in advance, and cache this to save a little time.
## The first run is still rather slow
defp fwd_cached_app_autoloader() do
case FastGlobal.get(@fwd_apploadcache) do
nil -> cached_appload = Fastfwd.Loader.run(unquote(load_apps))
FastGlobal.put(@fwd_apploadcache, cached_appload)
cached_appload
stored -> stored
end
end
end
end
end
|
lib/fastfwd/sender.ex
| 0.896795
| 0.717723
|
sender.ex
|
starcoder
|
defmodule Dust.Parsers do
@moduledoc """
Parsers API provides abstractions to
get relevant assets from the DOM.
"""
alias Dust.Parsers
alias Dust.Parsers.{CSS, Image, Favicon, JS}
@type sources() :: list(String.t())
@type document() :: Floki.html_tag() | Floki.html_tree()
@doc """
Parses raw HTML document and extracts all links
to CSS, JS and images for images it also extracts
`url(...)` values directly embedded via `style` attribute.
Returns:
```elixir
[
css: ["some/relative/url.css", "http://absolute.url/app.css"],
js: ["some/relative/url.js", "http://absolute.url/app.js"],
images: ["some/relative/url.jpg", "http://absolute.url.png"]
]
```
"""
@spec parse(String.t()) :: list()
def parse(document) do
with {:ok, dom} <- Floki.parse_document(document) do
[
Task.async(fn -> {:css, css(dom)} end),
Task.async(fn -> {:js, js(dom)} end),
Task.async(fn ->
{
:image,
dom
|> favicon()
|> Dust.List.merge(image(dom))
|> Dust.List.merge(parse_urls(document))
}
end)
]
|> Enum.map(&Task.await(&1))
end
end
@doc """
Parses raw HTML document and extracts CSS
`url(...)` values directly embedded via `style` attribute.
"""
@spec parse_urls(String.t()) :: sources()
def parse_urls(document) do
Parsers.URI.parse(document)
end
@doc """
Parses raw HTML document and extracts CSS urls
"""
@spec css(document()) :: sources()
def css(document), do: CSS.parse(document)
@doc """
Parses raw HTML document and extracts JavaScript urls
"""
@spec js(document()) :: sources()
def js(document), do: JS.parse(document)
@doc """
Parses raw HTML document and extracts image urls
"""
@spec image(document()) :: sources()
def image(document), do: Image.parse(document)
@doc """
Parses raw HTML document and extracts favicon urls
"""
@spec favicon(document()) :: sources()
def favicon(document), do: Favicon.parse(document)
end
|
lib/dust/parsers.ex
| 0.835953
| 0.521227
|
parsers.ex
|
starcoder
|
defmodule Nerves.Network do
require Logger
alias Nerves.Network.Types
@moduledoc """
The Nerves.Network application handles the low level details of connecting
to networks. To quickly get started, create a new Nerves project and add
the following line someplace early on in your program:
Nerves.Network.setup "wlan0", ssid: "myssid", key_mgmt: :"WPA-PSK", psk: "secretsecret"
When you boot your Nerves image, Nerves.Network monitors for an interface
called "wlan0" to be created. This occurs when you plug in a USB WiFi dongle.
If you plug in more than one WiFi dongle, each one will be given a name like
"wlan1", etc. Those may be setup as well.
When not connected, Nerves.Network continually scans
for the desired access point. Once found, it associates and runs DHCP to
acquire an IP address.
"""
@typedoc "Settings to `setup/2`"
@type setup_setting ::
{:ipv4_address_method, :dhcp | :static | :linklocal}
| {:ipv4_address, Types.ip_address()}
| {:ipv4_subnet_mask, Types.ip_address()}
| {:domain, String.t()}
| {:nameservers, [Types.ip_address()]}
| {atom, any()}
@typedoc "Keyword List settings to `setup/2`"
@type setup_settings :: [setup_setting]
@doc """
Configure the specified interface. Settings contains one or more of the
following:
* `:ipv4_address_method` - `:dhcp`, `:static`, or `:linklocal`
* `:ipv4_address` - e.g., "192.168.1.5" (specify when :ipv4_address_method = :static)
* `:ipv4_subnet_mask` - e.g., "255.255.255.0" (specify when :ipv4_address_method = :static)
* `:domain` - e.g., "mycompany.com" (specify when :ipv4_address_method = :static)
* `:nameservers` - e.g., ["8.8.8.8", "8.8.4.4"] (specify when :ipv4_address_method = :static)
* `:ssid` - "My WiFi AP" (specify if this is a wireless interface)
* `:key_mgmt` - e.g., `:"WPA-PSK"` or `:NONE`
* `:psk` - e.g., "my-secret-wlan-key"
See `t(#{__MODULE__}.setup_setting)` for more info.
"""
@spec setup(Types.ifname(), setup_settings) :: :ok
def setup(ifname, settings \\ []) do
Logger.debug("#{__MODULE__} setup(#{ifname})")
{:ok, {_new, _old}} = Nerves.Network.Config.put(ifname, settings)
:ok
end
@doc """
Stop all control of `ifname`
"""
@spec teardown(Types.ifname()) :: :ok
def teardown(ifname) do
Logger.debug("#{__MODULE__} teardown(#{ifname})")
{:ok, {_new, _old}} = Nerves.Network.Config.drop(ifname)
:ok
end
@doc """
Convenience function for returning the current status of a network interface
from SystemRegistry.
"""
@spec status(Types.ifname()) :: Nerves.NetworkInterface.Worker.status() | nil
def status(ifname) do
SystemRegistry.match(:_)
|> get_in([:state, :network_interface, ifname])
end
@doc """
If `ifname` is a wireless LAN, scan for access points.
"""
@spec scan(Types.ifname()) :: [String.t()] | {:error, any}
def scan(ifname) do
Nerves.Network.IFSupervisor.scan(ifname)
end
@doc """
Change the regulatory domain for wireless operations. This must be set to the
two character `alpha2` code for the country where this device is operating.
See [the kernel database](http://git.kernel.org/cgit/linux/kernel/git/sforshee/wireless-regdb.git/tree/db.txt)
for the latest database and the frequencies allowed per country.
The default is to use the world regulatory domain (00).
You may also configure the regulatory domain in your app's `config/config.exs`:
config :nerves_network,
regulatory_domain: "US"
"""
@spec set_regulatory_domain(String.t()) :: :ok
def set_regulatory_domain(country) do
Logger.warn("Regulatory domain currently can only be updated on WiFi device addition.")
Application.put_env(:nerves_network, :regulatory_domain, country)
end
end
|
lib/nerves_network.ex
| 0.784195
| 0.407834
|
nerves_network.ex
|
starcoder
|
defmodule Elixoids.Space do
@moduledoc """
Define the play area (world) in which the Game is played.
All units are in metres.
"""
alias Elixoids.World.Point
import Elixoids.Const
# The ratio of the play area
@ratio world_ratio()
@width world_width_m()
@height @width / @ratio
@half_width @width / 2.0
@border @width / 100
@min_x -@border
@max_x @width + @border
@min_y -@border
@max_y @height + @border
@doc """
Wrap point p so that its coordinates remain inside the world.
"""
def wrap(p), do: p |> wrap_x |> wrap_y
defp wrap_x(p) do
cond do
p.x < @min_x -> %{p | x: p.x + @width + @border + @border}
p.x > @max_x -> %{p | x: p.x - @width - @border - @border}
true -> p
end
end
defp wrap_y(p) do
cond do
p.y < @min_y -> %{p | y: p.y + @height + @border + @border}
p.y > @max_y -> %{p | y: p.y - @height - @border - @border}
true -> p
end
end
def random_point do
%Point{x: :rand.uniform() * @width, y: :rand.uniform() * @height}
end
def random_point_on_vertical_edge do
%Point{x: 0.0, y: :rand.uniform() * @height}
end
def random_point_on_border do
if :rand.uniform() * @ratio < 1.0 do
%Point{x: :rand.uniform() * @width, y: 0.0}
else
random_point_on_vertical_edge()
end
end
def dimensions, do: [@width, @height]
# Points on a grid
@grid_points 8
defp rand_grid_position(size_px, grid_count) do
grid_size = size_px / grid_count
p = :rand.uniform(grid_count - 1)
x = grid_size * p
perturb = :rand.normal() * grid_size / @grid_points
x + perturb
end
@grid_points 8
def random_grid_point do
x = rand_grid_position(@width, @grid_points)
y = rand_grid_position(@height, @grid_points - 2)
%Point{x: x, y: y}
end
@doc """
Return the x ordinate as a fraction -1..1 of the screen width
"""
def frac_x(x) do
cond do
x <= 0.0 -> -1.0
x >= @width -> 1.0
true -> (x - @half_width) / @half_width
end
|> Float.round(2)
end
end
|
lib/elixoids/space.ex
| 0.824285
| 0.41745
|
space.ex
|
starcoder
|
defmodule Scipio.Utils.Pipeline do
require Logger
def is_valid(pipelines) when is_list(pipelines), do: pipelines
def is_valid(tasks), do: tasks
def parse(pipeline, :config) do
String.trim(pipeline)
|> String.split("\n")
|> Enum.map(fn item -> String.trim(item) end)
end
@doc """
pipelines
[
add_1 > add_2 > add_3,
add_1 > add_2 > add_3
]
tasks add_1 > add_2 > add_3
"""
def parse(pipelines) when is_list(pipelines) do
tasks_map = pipelines
|> Enum.map(fn pipeline -> parse(pipeline) end)
|> Enum.reduce(%{}, fn acc, item ->
merge_pipes(acc, item)
end)
entry_tasks = Enum.filter(Map.keys(tasks_map), fn item -> item not in List.flatten(Map.values(tasks_map)) end)
tasks_map |> Map.put(:entry_, entry_tasks)
end
def parse(tasks) do
import Scipio.Utils.Operator
import Kernel, except: [>: 2, <: 2]
tasks
|> Code.string_to_quoted()
|> parse_quoted_form()
end
@doc """
add_1 > [add_2,add_3] > [add_4,add_5] > add_6
to
%{
_entry: :add_1,
add_1: [:add_2, :add_3],
add_2: [:add_4, :add_5],
add_3: [:add_4, :add_5],
add_4: :add_6,
add_5: :add_6
}
"""
def parse_quoted_form(quoted_result) do
import Scipio.Utils.Operator
import Kernel, except: [>: 2, <: 2]
case quoted_result do
{:ok, quoted_form} ->
operator_list = quoted_form |> lookup_ast() |> List.flatten()
params = Enum.zip(operator_list, operator_list)
{ eval_result, _} = quoted_form
|> Code.eval_quoted(params, __ENV__)
eval_result
|> Enum.chunk_every(
2, 1, :discard
)
|> Enum.map(fn [first, last] ->
cond do
is_list(first) ->
for i <- first, do: {i, last}
true ->
{first, last}
end
end)
|> List.flatten()
|> Map.new()
{:error, {_line, error, _token}} -> Logger.error(error |> inspect())
end
end
def lookup_ast({node, _, next}) when is_nil(next), do: node
def lookup_ast({_, _, next}), do: lookup_ast(next)
def lookup_ast(next) when is_list(next) do
next |> Enum.map(fn n -> lookup_ast(n) end)
end
def merge_pipes(pipe1, pipe2) when is_map(pipe1) and is_map(pipe2) do
# IO.inspect([pipe1, pipe2])
Map.merge(pipe1, pipe2, fn _k, v1, v2 ->
cond do
is_list(v1) and is_list(v2) -> v1 ++ v2
not is_list(v1) and not is_list(v2) -> [v1] ++ [v2]
is_list(v1) and not is_list(v2) -> v1 ++ [v2]
not is_list(v1) and is_list(v2) -> [v1] ++ v2
end |> Enum.uniq()
end)
end
def load_file(path) do
import Mix.Config
import_config(path)
end
end
|
lib/utils/pipeline.ex
| 0.618665
| 0.459137
|
pipeline.ex
|
starcoder
|
defmodule AkinML.Tangram.Name do
@moduledoc """
1. [Install](https://www.tangram.dev/docs/install) the Tangram CLI
1. Training data is in `names.csv` in same directory as this module
1. Train the model: `tangram train --file names.csv --target outcome`
1. Model is `metrics_for_training.tangram`
1. Run the Tangram app: `tangram app`
1. Access at http://localhost:8080/ to interact with the model, features, predictions, etc.
"""
@doc """
Run the predictions using the model
"""
def predict() do
model_path = "lib/tangram/metrics_for_training.tangram"
# Load the model
model = Tangram.load_model_from_path(model_path)
# Build inputs from data ready for predictions
# File.stream!("lib/tangram/metrics_for_predicting.csv")
File.stream!("lib/tangram/mini_metrics_for_predicting.csv")
|> Stream.map(&String.trim(&1))
|> Enum.to_list()
|> Enum.each(fn row ->
[bag_distance, substring_set, dice_sorensen, metaphone, double_metaphone,
substring_double_metaphone, jaccard, jaro_winkler, levenshtein, ngram, overlap,
substring_sort, tversky, initials, name, _match] = String.split(row, "\t")
input = %{
:bag_distance => bag_distance,
:substring_set => substring_set,
:dice_sorensen => dice_sorensen,
:metaphone => metaphone,
:double_metaphone => double_metaphone,
:substring_double_metaphone => substring_double_metaphone,
:jaccard => jaccard,
:jaro_winkler => jaro_winkler,
:levenshtein => levenshtein,
:ngram => ngram,
:overlap => overlap,
:substring_sort => substring_sort,
:tversky => tversky,
:initials => initials
}
# Make the prediction!
output = Tangram.predict(model, input)
# id = make_ref()
# |> :erlang.ref_to_list()
# |> List.to_string()
# indentifier = "#{id}: #{name}"
indentifier = "#{name}"
log_predictions = %Tangram.LogPredictionArgs{
:identifier => indentifier,
:input => input,
:options => nil,
:output => output
}
Tangram.log_prediction(model, log_predictions)
# Print the output.
IO.write("Prediction Identifier: ")
IO.inspect(indentifier)
IO.write("Output: ")
IO.inspect(output)
end)
end
@doc """
Log true values
"""
def truth() do
model_path = "lib/tangram/metrics_for_training.tangram"
# Load the model
model = Tangram.load_model_from_path(model_path)
File.stream!("lib/tangram/mini_metrics_for_predicting.csv")
|> Stream.map(&String.trim(&1))
|> Enum.to_list()
|> Enum.each(fn row ->
[_, _, _, _, _, _, _, _, _, _, _, _, _, _, name, match] = String.split(row, "\t")
indentifier = "#{name}"
true_value = %Tangram.LogTrueValueArgs{
:identifier => indentifier,
:true_value => to_string(match),
}
Tangram.log_true_value(model, true_value)
end)
end
@spec predict(binary(), list() | map(), binary()) :: list() | %Tangram.BinaryClassificationPredictOutput{}
@doc """
Run the predictions using the model
"""
def predict(model_path, inputs, identifier) when is_list(inputs) do
Enum.map(inputs, fn input ->
predict(model_path, input, identifier)
end)
end
def predict(model_path, input, identifier) do
model = Tangram.load_model_from_path(model_path)
Tangram.predict(model, input)
|> log_prediction(model, input, identifier)
end
defp log_prediction(output, model, input, identifier) do
log = %Tangram.LogPredictionArgs{
:identifier => "#{identifier}",
:input => input,
:options => nil,
:output => output
}
Tangram.log_prediction(model, log)
output
end
end
|
lib/tangram/name.ex
| 0.822759
| 0.532729
|
name.ex
|
starcoder
|
defmodule Sanbase.Signal.Evaluator do
@moduledoc ~s"""
A module that takes a list of triggers and returns the ones that are triggered.
The evaluation can be executed or the values can be taken from a cache. Taking
data from the cache respects the last triggered datetimes, the cooldown value and
all relevat trigger settings. Some of the fields such as the distribution channel
(email or telegram), name and description of the signal, etc. are ignored
"""
alias Sanbase.Signal.Evaluator.Cache
alias Sanbase.Signal.{UserTrigger, Trigger}
require Logger
@doc ~s"""
Takes a list of triggers and returns its a list of those triggers that are
triggered at the current time and the user should be notified about.
"""
@spec run(list(), String.t() | nil) :: list()
def run(user_triggers, type \\ nil)
def run([], _), do: []
def run(user_triggers, type) do
Logger.info("Start evaluating #{length(user_triggers)} signals of type #{type}")
user_triggers
|> Sanbase.Parallel.map(
&evaluate/1,
ordered: false,
max_concurrency: 8,
timeout: 90_000,
on_timeout: :kill_task
)
|> filter_triggered(type)
|> populate_payload()
end
defp evaluate(%UserTrigger{trigger: trigger} = user_trigger) do
%{cooldown: cd, last_triggered: lt} = trigger
# Along with the trigger settings (the `cache_key`) take into account also
# the last triggered datetime and cooldown. This is done because a signal
# can only be fired if it did not fire in the past `cooldown` intereval of time
evaluated_trigger =
Cache.get_or_store(
{Trigger.cache_key(trigger), {lt, cd}},
fn -> Trigger.evaluate(trigger) end
)
# Take only `template_kv` and `triggered?` from the cache. Each `put` is done
# by a separete `put_in` invocation
user_trigger
|> put_in(
[Access.key!(:trigger), Access.key!(:settings), Access.key!(:template_kv)],
evaluated_trigger.settings.template_kv
)
|> put_in(
[Access.key!(:trigger), Access.key!(:settings), Access.key!(:triggered?)],
evaluated_trigger.settings.triggered?
)
end
defp filter_triggered(triggers, type) do
triggers
|> Enum.filter(fn
%UserTrigger{trigger: trigger} ->
Trigger.triggered?(trigger)
{:exit, :timeout} ->
Logger.info("A trigger of type #{type} has timed out and has been killed.")
false
_ ->
false
end)
end
defp populate_payload(triggers) do
triggers
|> Enum.map(fn %UserTrigger{} = user_trigger ->
template_kv = user_trigger.trigger.settings.template_kv
payload =
Enum.into(template_kv, %{}, fn {identifier, {template, kv}} ->
{identifier, Trigger.payload_to_string({template, kv})}
end)
user_trigger
|> put_in(
[Access.key!(:trigger), Access.key!(:settings), Access.key!(:payload)],
payload
)
end)
end
end
|
lib/sanbase/signals/evaluator/evaluator.ex
| 0.786131
| 0.493348
|
evaluator.ex
|
starcoder
|
defmodule KafkaEx.New.KafkaExAPI do
@moduledoc """
This module interfaces Kafka through the New.Client implementation
This is intended to become the future KafkaEx API
Most functions here take a client pid as the first argument.
```
{:ok, client} = KafkaEx.New.Client.start_link()
KafkaEx.New.KafkaExAPI.latest_offset(client, "some_topic", 0)
```
"""
alias KafkaEx.New.Client
alias KafkaEx.New.ClusterMetadata
alias KafkaEx.New.Topic
alias KafkaEx.New.NodeSelector
@type node_id :: non_neg_integer
@type topic_name :: binary
@type partition_id :: non_neg_integer
@type consumer_group_name :: binary
@type offset :: non_neg_integer
@type error_atom :: atom
@type client :: GenServer.server()
@type correlation_id :: non_neg_integer
@doc """
Fetch the latest offset for a given partition
"""
@spec latest_offset(client, topic_name, partition_id) ::
{:error, error_atom} | {:ok, offset}
def latest_offset(client, topic, partition) do
request = %Kayrock.ListOffsets.V1.Request{
replica_id: -1,
topics: [
%{topic: topic, partitions: [%{partition: partition, timestamp: -1}]}
]
}
{:ok, resp} =
Client.send_request(
client,
request,
NodeSelector.topic_partition(topic, partition)
)
[topic_resp] = resp.responses
[%{error_code: error_code, offset: offset}] = topic_resp.partition_responses
case error_code do
0 -> {:ok, offset}
_ -> {:error, Kayrock.ErrorCode.code_to_atom(error_code)}
end
end
@doc """
Get topic metadata for the given topics
Always calls out to the broker to get the most up-to-date metadata (and
subsequently updates the client's state with the updated metadata). Set
allow_topic_creation to true to allow the topics to be created if they
don't exist
"""
@spec topics_metadata(client, [topic_name], boolean) :: {:ok, [Topic.t()]}
def topics_metadata(client, topics, allow_topic_creation \\ false) do
GenServer.call(client, {:topic_metadata, topics, allow_topic_creation})
end
@doc """
Returns the cluster metadata from the given client
"""
@spec cluster_metadata(client) :: {:ok, ClusterMetadata.t()}
def(cluster_metadata(client)) do
GenServer.call(client, :cluster_metadata)
end
@doc """
Returns the current correlation id for the given client
"""
@spec correlation_id(client) :: {:ok, correlation_id}
def correlation_id(client) do
GenServer.call(client, :correlation_id)
end
@doc """
Set the consumer group name that will be used by the given client for
autocommit
NOTE this function will not be supported after the legacy API is removed
"""
@spec set_consumer_group_for_auto_commit(client, consumer_group_name) ::
:ok | {:error, :invalid_consumer_group}
def set_consumer_group_for_auto_commit(client, consumer_group) do
GenServer.call(
client,
{:set_consumer_group_for_auto_commit, consumer_group}
)
end
end
|
lib/kafka_ex/new/kafka_ex_api.ex
| 0.836388
| 0.644582
|
kafka_ex_api.ex
|
starcoder
|
defmodule Gradient do
@moduledoc """
Documentation for `Gradient`.
Options:
- `app_path` - Path to the app that contains file with code (for umbrella apps).
- `code_path` - Path to a file with code (e.g. when beam was compiled without project).
- `no_gradualizer_check` - Skip Gradualizer checks if true.
- `no_ex_check` - Skip Elixir checks if true.
- `no_specify` - Skip AST specifying if true.
"""
alias Gradient.ElixirFileUtils
alias Gradient.ElixirFmt
alias Gradient.AstSpecifier
alias Gradient.ElixirChecker
require Logger
@type options() :: [{:app_path, String.t()}, {:code_path, String.t()}]
@spec type_check_file(String.t(), options()) :: :ok | :error
def type_check_file(file, opts \\ []) do
opts = Keyword.put(opts, :return_errors, true)
with {:ok, forms} <- ElixirFileUtils.get_forms(file),
{:elixir, _} <- wrap_language_name(forms) do
forms = maybe_specify_forms(forms, opts)
case maybe_gradient_check(forms, opts) ++ maybe_gradualizer_check(forms, opts) do
[] ->
:ok
errors ->
opts = Keyword.put(opts, :forms, forms)
ElixirFmt.print_errors(errors, opts)
:error
end
else
{:erlang, forms} ->
opts = Keyword.put(opts, :return_errors, false)
case maybe_gradualizer_check(forms, opts) do
:nok -> :error
_ -> :ok
end
error ->
Logger.error("Can't load file - #{inspect(error)}")
:error
end
end
defp maybe_gradualizer_check(forms, opts) do
unless opts[:no_gradualizer_check] do
try do
:gradualizer.type_check_forms(forms, opts)
catch
err ->
{:attribute, _, :file, {path, _}} = hd(forms)
[{path, err}]
end
else
[]
end
end
defp maybe_gradient_check(forms, opts) do
unless opts[:no_ex_check] do
ElixirChecker.check(forms, opts)
else
[]
end
end
defp maybe_specify_forms(forms, opts) do
unless opts[:no_specify] do
forms
|> put_code_path(opts)
|> AstSpecifier.specify()
else
forms
end
end
defp wrap_language_name([{:attribute, _, :file, {file_name, _}} | _] = forms) do
if :string.str(file_name, '.erl') > 0 do
{:erlang, forms}
else
{:elixir, forms}
end
end
defp put_code_path(forms, opts) do
case opts[:code_path] do
nil ->
case opts[:app_path] do
nil ->
forms
app_path ->
{:attribute, anno, :file, {path, line}} = hd(forms)
[
{:attribute, anno, :file, {String.to_charlist(app_path) ++ '/' ++ path, line}}
| tl(forms)
]
end
path ->
[{:attribute, 1, :file, {path, 1}} | tl(forms)]
end
end
end
|
lib/gradient.ex
| 0.738386
| 0.546859
|
gradient.ex
|
starcoder
|
defmodule Exq.Redis.JobStat do
@moduledoc """
The JobStat module encapsulates storing system-wide stats on top of Redis
It aims to be compatible with the Sidekiq stats format.
"""
require Logger
alias Exq.Support.{Binary, Process, Job, Time}
alias Exq.Redis.{Connection, JobQueue}
def record_processed_commands(namespace, _job, current_date \\ DateTime.utc_now()) do
{time, date} = Time.format_current_date(current_date)
[
["INCR", JobQueue.full_key(namespace, "stat:processed")],
["INCR", JobQueue.full_key(namespace, "stat:processed_rt:#{time}")],
["EXPIRE", JobQueue.full_key(namespace, "stat:processed_rt:#{time}"), 120],
["INCR", JobQueue.full_key(namespace, "stat:processed:#{date}")]
]
end
def record_processed(namespace, job, current_date \\ DateTime.utc_now()) do
instr = record_processed_commands(namespace, job, current_date)
{:ok, [count, _, _, _]} = Connection.qp(instr)
{:ok, count}
end
def record_failure_commands(namespace, _error, _job, current_date \\ DateTime.utc_now()) do
{time, date} = Time.format_current_date(current_date)
[
["INCR", JobQueue.full_key(namespace, "stat:failed")],
["INCR", JobQueue.full_key(namespace, "stat:failed_rt:#{time}")],
["EXPIRE", JobQueue.full_key(namespace, "stat:failed_rt:#{time}"), 120],
["INCR", JobQueue.full_key(namespace, "stat:failed:#{date}")]
]
end
def record_failure(namespace, error, job, current_date \\ DateTime.utc_now()) do
instr = record_failure_commands(namespace, error, job, current_date)
{:ok, [count, _, _, _]} = Connection.qp(instr)
{:ok, count}
end
def add_process_commands(namespace, process_info, serialized_process \\ nil) do
serialized = serialized_process || Exq.Support.Process.encode(process_info)
[["SADD", JobQueue.full_key(namespace, "processes"), serialized]]
end
def add_process(namespace, process_info, serialized_process \\ nil) do
instr = add_process_commands(namespace, process_info, serialized_process)
Connection.qmn!(instr)
:ok
end
def remove_process_commands(namespace, process_info, serialized_process \\ nil) do
serialized = serialized_process || Exq.Support.Process.encode(process_info)
[["SREM", JobQueue.full_key(namespace, "processes"), serialized]]
end
def remove_process(namespace, process_info, serialized_process \\ nil) do
instr = remove_process_commands(namespace, process_info, serialized_process)
Connection.qmn!(instr)
:ok
end
def cleanup_processes(namespace, host) do
Connection.smembers!(JobQueue.full_key(namespace, "processes"))
|> Enum.map(fn serialized -> {Process.decode(serialized), serialized} end)
|> Enum.filter(fn {process, _} -> process.host == host end)
|> Enum.each(fn {process, serialized} ->
remove_process(namespace, process, serialized)
end)
:ok
end
def busy(namespace) do
Connection.scard!(JobQueue.full_key(namespace, "processes"))
end
def processes(namespace) do
list = Connection.smembers!(JobQueue.full_key(namespace, "processes")) || []
Enum.map(list, &Process.decode/1)
end
def find_failed(namespace, jid) do
Connection.zrange!(JobQueue.full_key(namespace, "dead"), 0, -1)
|> JobQueue.search_jobs(jid)
end
def remove_queue(namespace, queue) do
Connection.qp([
["SREM", JobQueue.full_key(namespace, "queues"), queue],
["DEL", JobQueue.queue_key(namespace, queue)]
])
end
def remove_failed(namespace, jid) do
{:ok, failure} = find_failed(namespace, jid)
Connection.qp([
["DECR", JobQueue.full_key(namespace, "stat:failed")],
["ZREM", JobQueue.full_key(namespace, "dead"), Job.encode(failure)]
])
end
def clear_failed(namespace) do
Connection.qp([
["SET", JobQueue.full_key(namespace, "stat:failed"), 0],
["DEL", JobQueue.full_key(namespace, "dead")]
])
end
def clear_processes(namespace) do
Connection.del!(JobQueue.full_key(namespace, "processes"))
end
def realtime_stats(namespace) do
[ok: failure_keys, ok: success_keys] =
Connection.qp([
["KEYS", JobQueue.full_key(namespace, "stat:failed_rt:*")],
["KEYS", JobQueue.full_key(namespace, "stat:processed_rt:*")]
])
formatter = realtime_stats_formatter(namespace)
failures = formatter.(failure_keys, "stat:failed_rt:")
successes = formatter.(success_keys, "stat:processed_rt:")
{:ok, failures, successes}
end
defp realtime_stats_formatter(namespace) do
fn keys, ns ->
if Enum.empty?(keys) do
[]
else
res = Connection.qp(Enum.map(keys, &["GET", &1]))
counts = Enum.map(res, &elem(&1, 1))
Enum.map(keys, &Binary.take_prefix(&1, JobQueue.full_key(namespace, ns)))
|> Enum.zip(counts)
end
end
end
def get_count(namespace, key) do
case Connection.get!(JobQueue.full_key(namespace, "stat:#{key}")) do
:undefined ->
0
nil ->
0
count when is_integer(count) ->
count
count ->
{val, _} = Integer.parse(count)
val
end
end
end
|
lib/exq/redis/job_stat.ex
| 0.667256
| 0.52409
|
job_stat.ex
|
starcoder
|
defmodule DryValidation do
@moduledoc """
Used to create a schema to validate input data against.
## Example
```
iex> alias DryValidation.Types
...>
iex> schema = DryValidation.schema do
...> required :name, Types.String
...> optional :age, Types.Integer
...> end
...>
iex> input_data = %{"name" => "John", "age" => "15"}
iex> {:ok, output_data} = DryValidation.Validator.validate(schema, input_data)
iex> assert output_data == %{"name" => "John", "age" => 15}
...>
iex> input_data = %{"name" => 15, "age" => "nonsense"}
iex> {:error, error} = DryValidation.Validator.validate(schema, input_data)
iex> assert error == %{
...> "name" => "15 is not a valid type; Expected type is DryValidation.Types.String",
...> "age" => ~s("nonsense" is not a valid type; Expected type is DryValidation.Types.Integer)
...> }
```
Complex schemas can be crafted using the methods - `required`, `optional`, `map` and `map_list`.
With the use the provided `DryValidation.Types`, requirements can be set and also cast values when possible.
## Available Types
Type | Description
------------- | -------------
`DryValidation.Types.String` | Expects a string type `"some text"`. Will try to cast the value into a string (`1001` = `"1001"`).
`DryValidation.Types.Bool` | Expects a boolean type `[true/false]`. Will cast the strings "true"/"false" to real booleans
`DryValidation.Types.Float` | Expects a float type `[15.51]`. Will try to cast a string to a float (`"15.5"` = `15.5`).
`DryValidation.Types.Integer` | Expects an integer type `[101]`. Will try to cast a string to an integer (`"100"` = `100`). It'll fail the cast if the string is a float.
`DryValidation.Types.Func` | Custom rules can be build using this, see the module docs. Example is the `DryValidation.Types.Integer.greater_than(5)` rule.
`DryValidation.Types.List` | Expects a list. Can have the list type set to one of the above, including a `Func`.
`DryValidation.Types.Any` | Accepts any value and will do no casting. Usually not used as the type can just be omitted when using `optional` and `required`
## Advanced example
```
schema = DryValidation.schema do
required :name, Types.String
required :age, Types.Integer.greater_than(18)
required :type, Types.Func.equal("users")
optional :pets, Types.Func.member_of(["dog", "cat", "bird"])
optional :favourite_numbers, Types.List.type(Types.Integer)
map_list :cars do
required :make, Types.String
required :cc, Types.Integer
end
map :house, optional: true do
required :address, Types.String
end
end
input_data = %{
"name" => "<NAME>",
"age" => 42,
"type" => "users",
"pet" => "dog",
"favourite_numbers" => [],
"cars" => [
%{"make" => "AUDI", "cc" => 3000},
%{"make" => "BMW", "cc" => 2000},
],
"house" => %{
"address" => "Church Road"
}
}
{:ok, _output_data} = DryValidation.Validator.validate(schema, input_data)
```
"""
@doc """
Creates a validation schema.
"""
defmacro schema(do: block) do
quote do
import DryValidation
{:ok, var!(buffer, __MODULE__)} = start_buffer([])
unquote(block)
result = render(var!(buffer, __MODULE__))
:ok = stop_buffer(var!(buffer, __MODULE__))
result
end
end
@doc false
def start_buffer(state), do: Agent.start_link(fn -> state end)
@doc false
def stop_buffer(buff), do: Agent.stop(buff)
@doc false
def put_buffer(buff, content), do: Agent.update(buff, &[content | &1])
@doc false
def render(buff), do: Agent.get(buff, & &1) |> Enum.reverse() |> DryValidation.construct([])
@doc false
def last_start_block_id(buff) do
Agent.get(buff, & &1)
|> Enum.filter(fn x -> Map.get(x, :block) end)
|> Enum.map(fn x -> Map.get(x, :id) end)
|> Enum.max(&>=/2, fn -> 0 end)
end
@doc """
Defines a list of maps. Can be made optional.
```
schema = DryValidation.schema do
map_list :users, do
required(:name, Types.String)
end
end
input_data = %{"users" => [%{"name" => "John"}, %{"name" => "Bob"}]}
{:ok, output_data} = DryValidation.Validator.validate(schema, input_data)
```
"""
defmacro map_list(name, opts \\ [], do: inner) do
quote do
map(unquote(name), unquote(Keyword.put(opts, :rule, :map_list)), do: unquote(inner))
end
end
@doc """
Defines a map. Can be made optional.
```
schema = DryValidation.schema do
map :user, do
required(:name, Types.String)
end
map :car, optional: true do
required(:name, Types.String)
end
end
input_data = %{"user" => %{"name" => "John"}}
{:ok, output_data} = DryValidation.Validator.validate(schema, input_data)
```
"""
defmacro map(name, opts \\ [], do: inner) do
quote do
optional = unquote(Keyword.get(opts, :optional, false))
rule = unquote(Keyword.get(opts, :rule, :map))
new_id = last_start_block_id(var!(buffer, __MODULE__)) + 1
put_buffer(
var!(buffer, __MODULE__),
%{rule: rule, block: :start, name: unquote(name), optional: optional, id: new_id}
)
unquote(inner)
put_buffer(
var!(buffer, __MODULE__),
%{rule: rule, block: :end, name: unquote(name), optional: optional, id: new_id}
)
end
end
@doc """
Defines an optional attribute in the schema.
First argument is the name of the attribute, second argument is optional and defines the type.
```
schema = DryValidation.schema do
required(:name)
optional(:age)
end
input_data = %{"name" => "Jon"}
{:ok, output_data} = DryValidation.Validator.validate(schema, input_data)
output_data == %{"name" => "Jon"}
```
"""
defmacro optional(name, type \\ nil) do
quote do
tag(:optional, unquote(name), unquote(type))
end
end
@doc """
Defines a mandatory attribute in the schema.
First argument is the name of the attribute, second argument is optional and defines the type.
```
schema = DryValidation.schema do
required(:name, Types.String)
optional(:age)
end
input_data = %{"age" => 21}
{:error, errors} = DryValidation.Validator.validate(schema, input_data)
errors == %{"name" => "Is missing"}
```
"""
defmacro required(name, type \\ nil) do
quote do
tag(:required, unquote(name), unquote(type))
end
end
@doc false
defmacro tag(tag, name, type \\ nil) do
quote do
put_buffer(
var!(buffer, __MODULE__),
%{rule: unquote(tag), name: unquote(to_string(name)), type: unquote(type)}
)
%{name: unquote(name)}
end
end
@doc false
def construct([%{block: :end} | tail], result) do
construct(tail, result)
end
@doc false
def construct(
[%{name: name, block: :start, rule: rule, optional: optional, id: id} | tail],
result
) do
{to_end, rest} =
Enum.split_while(tail, fn el ->
!(Map.get(el, :block) == :end && Map.get(el, :id) == id)
end)
inner = construct(to_end, [])
result =
result ++
[%{name: to_string(name), inner: inner, rule: rule, optional: optional}]
construct(rest, result)
end
@doc false
def construct([head | tail], result) do
construct(tail, result ++ [head])
end
def construct([], result) do
result
end
end
|
lib/dry_validation.ex
| 0.93108
| 0.828766
|
dry_validation.ex
|
starcoder
|
defmodule AshGraphql.Resource do
alias Ash.Dsl.Extension
alias Ash.Query.Aggregate
alias AshGraphql.Resource
alias AshGraphql.Resource.{Mutation, Query}
@get %Ash.Dsl.Entity{
name: :get,
args: [:name, :action],
describe: "A query to fetch a record by primary key",
examples: [
"get :get_post, :default"
],
schema: Query.get_schema(),
target: Query,
auto_set_fields: [
type: :get
]
}
@list %Ash.Dsl.Entity{
name: :list,
schema: Query.list_schema(),
args: [:name, :action],
describe: "A query to fetch a list of records",
examples: [
"list :list_posts, :default"
],
target: Query,
auto_set_fields: [
type: :list
]
}
@create %Ash.Dsl.Entity{
name: :create,
schema: Mutation.create_schema(),
args: [:name, :action],
describe: "A mutation to create a record",
examples: [
"create :create_post, :default"
],
target: Mutation,
auto_set_fields: [
type: :create
]
}
@update %Ash.Dsl.Entity{
name: :update,
schema: Mutation.update_schema(),
args: [:name, :action],
describe: "A mutation to update a record",
examples: [
"update :update_post, :default"
],
target: Mutation,
auto_set_fields: [
type: :update
]
}
@destroy %Ash.Dsl.Entity{
name: :destroy,
schema: Mutation.destroy_schema(),
args: [:name, :action],
describe: "A mutation to destroy a record",
examples: [
"destroy :destroy_post, :default"
],
target: Mutation,
auto_set_fields: [
type: :destroy
]
}
@queries %Ash.Dsl.Section{
name: :queries,
describe: """
Queries (read actions) to expose for the resource.
""",
examples: [
"""
queries do
get :get_post, :default
list :list_posts, :default
end
"""
],
entities: [
@get,
@list
]
}
@mutations %Ash.Dsl.Section{
name: :mutations,
describe: """
Mutations (create/update/destroy actions) to expose for the resource.
""",
examples: [
"""
mutations do
create :create_post, :default
update :update_post, :default
destroy :destroy_post, :default
end
"""
],
entities: [
@create,
@update,
@destroy
]
}
@graphql %Ash.Dsl.Section{
name: :graphql,
describe: """
Configuration for a given resource in graphql
""",
examples: [
"""
graphql do
type :post
queries do
get :get_post, :default
list :list_posts, :default
end
mutations do
create :create_post, :default
update :update_post, :default
destroy :destroy_post, :default
end
end
"""
],
schema: [
type: [
type: :atom,
required: true,
doc: "The type to use for this entity in the graphql schema"
]
],
sections: [
@queries,
@mutations
]
}
@transformers [
AshGraphql.Resource.Transformers.RequireIdPkey
]
@sections [@graphql]
@moduledoc """
This Ash resource extension adds configuration for exposing a resource in a graphql.
# Table of Contents
#{Ash.Dsl.Extension.doc_index(@sections)}
#{Ash.Dsl.Extension.doc(@sections)}
"""
use Extension, sections: @sections, transformers: @transformers
def queries(resource) do
Extension.get_entities(resource, [:graphql, :queries])
end
def mutations(resource) do
Extension.get_entities(resource, [:graphql, :mutations])
end
def type(resource) do
Extension.get_opt(resource, [:graphql], :type, nil)
end
@doc false
def queries(api, resource, schema) do
type = Resource.type(resource)
if type do
resource
|> queries()
|> Enum.map(fn query ->
query_action = Ash.Resource.action(resource, query.action, :read)
%Absinthe.Blueprint.Schema.FieldDefinition{
arguments: args(query.type, resource, query_action, query.identity),
identifier: query.name,
middleware: [
{{AshGraphql.Graphql.Resolver, :resolve}, {api, resource, query}}
],
module: schema,
name: to_string(query.name),
type: query_type(query.type, query_action, type)
}
end)
else
[]
end
end
# sobelow_skip ["DOS.StringToAtom"]
@doc false
def mutations(api, resource, schema) do
resource
|> mutations()
|> Enum.map(fn
%{type: :destroy} = mutation ->
%Absinthe.Blueprint.Schema.FieldDefinition{
arguments: mutation_args(mutation, resource, schema),
identifier: mutation.name,
middleware: [
{{AshGraphql.Graphql.Resolver, :mutate}, {api, resource, mutation}}
],
module: schema,
name: to_string(mutation.name),
type: String.to_atom("#{mutation.name}_result")
}
%{type: :create} = mutation ->
%Absinthe.Blueprint.Schema.FieldDefinition{
arguments: [
%Absinthe.Blueprint.Schema.InputValueDefinition{
identifier: :input,
module: schema,
name: "input",
placement: :argument_definition,
type: String.to_atom("#{mutation.name}_input")
}
],
identifier: mutation.name,
middleware: [
{{AshGraphql.Graphql.Resolver, :mutate}, {api, resource, mutation}}
],
module: schema,
name: to_string(mutation.name),
type: String.to_atom("#{mutation.name}_result")
}
mutation ->
%Absinthe.Blueprint.Schema.FieldDefinition{
arguments:
mutation_args(mutation, resource, schema) ++
[
%Absinthe.Blueprint.Schema.InputValueDefinition{
identifier: :input,
module: schema,
name: "input",
placement: :argument_definition,
type: String.to_atom("#{mutation.name}_input")
}
],
identifier: mutation.name,
middleware: [
{{AshGraphql.Graphql.Resolver, :mutate}, {api, resource, mutation}}
],
module: schema,
name: to_string(mutation.name),
type: String.to_atom("#{mutation.name}_result")
}
end)
end
defp mutation_args(%{identity: identity}, resource, _schema) when not is_nil(identity) do
resource
|> Ash.Resource.identities()
|> Enum.find(&(&1.name == identity))
|> Map.get(:keys)
|> Enum.map(fn key ->
attribute = Ash.Resource.attribute(resource, key)
%Absinthe.Blueprint.Schema.InputValueDefinition{
name: to_string(key),
identifier: key,
type: %Absinthe.Blueprint.TypeReference.NonNull{
of_type: field_type(attribute.type, attribute, resource)
},
description: attribute.description || ""
}
end)
end
defp mutation_args(_, _, schema) do
[
%Absinthe.Blueprint.Schema.InputValueDefinition{
identifier: :id,
module: schema,
name: "id",
placement: :argument_definition,
type: :id
}
]
end
@doc false
# sobelow_skip ["DOS.StringToAtom"]
def mutation_types(resource, schema) do
resource
|> mutations()
|> Enum.flat_map(fn mutation ->
mutation = %{
mutation
| action: Ash.Resource.action(resource, mutation.action, mutation.type)
}
description =
if mutation.type == :destroy do
"The record that was successfully deleted"
else
"The successful result of the mutation"
end
result = %Absinthe.Blueprint.Schema.ObjectTypeDefinition{
description: "The result of the #{inspect(mutation.name)} mutation",
fields: [
%Absinthe.Blueprint.Schema.FieldDefinition{
description: description,
identifier: :result,
module: schema,
name: "result",
type: Resource.type(resource)
},
%Absinthe.Blueprint.Schema.FieldDefinition{
description: "Any errors generated, if the mutation failed",
identifier: :errors,
module: schema,
name: "errors",
type: %Absinthe.Blueprint.TypeReference.List{
of_type: :mutation_error
}
}
],
identifier: String.to_atom("#{mutation.name}_result"),
module: schema,
name: Macro.camelize("#{mutation.name}_result")
}
if mutation.type == :destroy do
[result]
else
input = %Absinthe.Blueprint.Schema.InputObjectTypeDefinition{
fields: mutation_fields(resource, schema, mutation),
identifier: String.to_atom("#{mutation.name}_input"),
module: schema,
name: Macro.camelize("#{mutation.name}_input")
}
[input, result]
end
end)
end
defp mutation_fields(resource, schema, mutation) do
attribute_fields =
resource
|> Ash.Resource.public_attributes()
|> Enum.filter(fn attribute ->
is_nil(mutation.action.accept) || attribute.name in mutation.action.accept
end)
|> Enum.filter(& &1.writable?)
|> Enum.map(fn attribute ->
type = field_type(attribute.type, attribute, resource)
field_type =
if attribute.allow_nil? || attribute.default || mutation.type == :update do
type
else
%Absinthe.Blueprint.TypeReference.NonNull{
of_type: type
}
end
%Absinthe.Blueprint.Schema.FieldDefinition{
description: attribute.description,
identifier: attribute.name,
module: schema,
name: to_string(attribute.name),
type: field_type
}
end)
relationship_fields =
resource
|> Ash.Resource.public_relationships()
|> Enum.filter(fn relationship ->
Resource in Ash.Resource.extensions(relationship.destination)
end)
|> Enum.map(fn
%{cardinality: :one} = relationship ->
type =
if relationship.type == :belongs_to and relationship.required? do
%Absinthe.Blueprint.TypeReference.NonNull{
of_type: :id
}
else
:id
end
%Absinthe.Blueprint.Schema.FieldDefinition{
identifier: relationship.name,
module: schema,
name: to_string(relationship.name),
type: type
}
%{cardinality: :many} = relationship ->
case mutation.type do
:update ->
%Absinthe.Blueprint.Schema.FieldDefinition{
identifier: relationship.name,
module: schema,
name: to_string(relationship.name),
type: :relationship_change
}
:create ->
%Absinthe.Blueprint.Schema.FieldDefinition{
identifier: relationship.name,
module: schema,
name: to_string(relationship.name),
type: %Absinthe.Blueprint.TypeReference.List{
of_type: :id
}
}
end
end)
argument_fields =
Enum.map(mutation.action.arguments, fn argument ->
type =
if argument.allow_nil? do
%Absinthe.Blueprint.TypeReference.NonNull{
of_type: field_type(argument.type, argument, resource)
}
else
field_type(argument.type, argument, resource)
end
%Absinthe.Blueprint.Schema.FieldDefinition{
identifier: argument.name,
module: schema,
name: to_string(argument.name),
type: type
}
end)
attribute_fields ++ relationship_fields ++ argument_fields
end
defp query_type(:get, _, type), do: type
# sobelow_skip ["DOS.StringToAtom"]
defp query_type(:list, action, type) do
if action.pagination do
String.to_atom("page_of_#{type}")
else
%Absinthe.Blueprint.TypeReference.NonNull{
of_type: %Absinthe.Blueprint.TypeReference.List{
of_type: %Absinthe.Blueprint.TypeReference.NonNull{
of_type: type
}
}
}
end
end
defp args(action_type, resource, action, identity \\ nil)
defp args(:get, _resource, _action, nil) do
[
%Absinthe.Blueprint.Schema.InputValueDefinition{
name: "id",
identifier: :id,
type: %Absinthe.Blueprint.TypeReference.NonNull{of_type: :id},
description: "The id of the record"
}
]
end
defp args(:get, resource, _action, identity) do
resource
|> Ash.Resource.identities()
|> Enum.find(&(&1.name == identity))
|> Map.get(:keys)
|> Enum.map(fn key ->
attribute = Ash.Resource.attribute(resource, key)
%Absinthe.Blueprint.Schema.InputValueDefinition{
name: to_string(key),
identifier: key,
type: %Absinthe.Blueprint.TypeReference.NonNull{
of_type: field_type(attribute.type, attribute, resource)
},
description: attribute.description || ""
}
end)
end
defp args(:list, resource, action, _) do
[
%Absinthe.Blueprint.Schema.InputValueDefinition{
name: "filter",
identifier: :filter,
type: resource_filter_type(resource),
description: "A filter to limit the results"
},
%Absinthe.Blueprint.Schema.InputValueDefinition{
name: "sort",
identifier: :sort,
type: %Absinthe.Blueprint.TypeReference.List{
of_type: resource_sort_type(resource)
},
description: "How to sort the records in the response"
}
] ++
pagination_args(action)
end
defp pagination_args(action) do
if action.pagination do
max_message =
if action.pagination.max_page_size do
" Maximum #{action.pagination.max_page_size}"
else
""
end
limit_type =
if action.pagination.required? && is_nil(action.pagination.default_limit) do
%Absinthe.Blueprint.TypeReference.NonNull{
of_type: :integer
}
else
:integer
end
[
%Absinthe.Blueprint.Schema.InputValueDefinition{
name: "limit",
identifier: :limit,
type: limit_type,
default_value: action.pagination.default_limit,
description: "The number of records to return." <> max_message
}
] ++ keyset_pagination_args(action) ++ offset_pagination_args(action)
else
[]
end
end
# sobelow_skip ["DOS.StringToAtom"]
defp resource_sort_type(resource) do
String.to_atom(to_string(AshGraphql.Resource.type(resource)) <> "_sort_input")
end
# sobelow_skip ["DOS.StringToAtom"]
defp resource_filter_type(resource) do
String.to_atom(to_string(AshGraphql.Resource.type(resource)) <> "_filter_input")
end
# sobelow_skip ["DOS.StringToAtom"]
defp attribute_filter_field_type(resource, attribute) do
String.to_atom(
to_string(AshGraphql.Resource.type(resource)) <> "_filter_" <> to_string(attribute.name)
)
end
defp keyset_pagination_args(action) do
if action.pagination.keyset? do
[
%Absinthe.Blueprint.Schema.InputValueDefinition{
name: "before",
identifier: :before,
type: :string,
description: "Show records before the specified keyset."
},
%Absinthe.Blueprint.Schema.InputValueDefinition{
name: "after",
identifier: :after,
type: :string,
description: "Show records after the specified keyset."
}
]
else
[]
end
end
defp offset_pagination_args(action) do
if action.pagination.offset? do
[
%Absinthe.Blueprint.Schema.InputValueDefinition{
name: "offset",
identifier: :offset,
type: :integer,
description: "The number of records to skip."
}
]
else
[]
end
end
@doc false
def type_definitions(resource, api, schema) do
[
type_definition(resource, api, schema),
sort_input(resource, schema),
filter_input(resource, schema)
] ++
filter_field_types(resource, schema) ++
List.wrap(page_of(resource, schema)) ++ enum_definitions(resource, schema)
end
defp filter_field_types(resource, schema) do
filter_attribute_types(resource, schema) ++ filter_aggregate_types(resource, schema)
end
defp filter_attribute_types(resource, schema) do
resource
|> Ash.Resource.public_attributes()
|> Enum.flat_map(&filter_type(&1, resource, schema))
end
defp filter_aggregate_types(resource, schema) do
resource
|> Ash.Resource.public_aggregates()
|> Enum.flat_map(&filter_type(&1, resource, schema))
end
defp attribute_or_aggregate_type(%Ash.Resource.Attribute{type: type}, _resource), do: type
defp attribute_or_aggregate_type(%Ash.Resource.Aggregate{kind: kind, field: field}, resource) do
field_type =
if field do
Ash.Resource.attribute(resource, field).type
end
{:ok, aggregate_type} = Ash.Query.Aggregate.kind_to_type(kind, field_type)
aggregate_type
end
defp filter_type(attribute_or_aggregate, resource, schema) do
type = attribute_or_aggregate_type(attribute_or_aggregate, resource)
fields =
Enum.flat_map(Ash.Filter.builtin_operators(), fn operator ->
expressable_types =
Enum.filter(operator.types(), fn
[_, {:ref, _}] ->
false
[{:ref, :any}, _] ->
true
[{:ref, type}, _] ->
type in [type, Ash.Type.storage_type(type)]
:any_same_or_ref ->
true
end)
if Enum.any?(expressable_types, &(&1 == :any_same_or_ref)) do
[
%Absinthe.Blueprint.Schema.FieldDefinition{
identifier: operator.name(),
module: schema,
name: to_string(operator.name()),
type: field_type(type, attribute_or_aggregate, resource)
}
]
else
type =
case Enum.at(expressable_types, 0) do
[_, {:array, :any}] ->
{:array, Ash.Type.String}
[_, {:array, :same}] ->
{:array, type}
[_, :same] ->
type
[_, :any] ->
Ash.Type.String
[_, type] when is_atom(type) ->
case Ash.Type.get_type(type) do
nil ->
nil
type ->
type
end
_ ->
nil
end
if type do
[
%Absinthe.Blueprint.Schema.FieldDefinition{
identifier: operator.name(),
module: schema,
name: to_string(operator.name()),
type: field_type(type, attribute_or_aggregate, resource)
}
]
else
[]
end
end
end)
if fields == [] do
[]
else
identifier = attribute_filter_field_type(resource, attribute_or_aggregate)
[
%Absinthe.Blueprint.Schema.InputObjectTypeDefinition{
identifier: identifier,
fields: fields,
module: schema,
name: identifier |> to_string() |> Macro.camelize()
}
]
end
end
defp sort_input(resource, schema) do
%Absinthe.Blueprint.Schema.InputObjectTypeDefinition{
fields: [
%Absinthe.Blueprint.Schema.FieldDefinition{
identifier: :order,
module: schema,
name: "order",
default_value: :asc,
type: :sort_order
},
%Absinthe.Blueprint.Schema.FieldDefinition{
identifier: :field,
module: schema,
name: "field",
type: resource_sort_field_type(resource)
}
],
identifier: resource_sort_type(resource),
module: schema,
name: resource |> resource_sort_type() |> to_string() |> Macro.camelize()
}
end
defp filter_input(resource, schema) do
%Absinthe.Blueprint.Schema.InputObjectTypeDefinition{
identifier: resource_filter_type(resource),
module: schema,
name: resource |> resource_filter_type() |> to_string() |> Macro.camelize(),
fields: resource_filter_fields(resource, schema)
}
end
defp resource_filter_fields(resource, schema) do
boolean_filter_fields(resource, schema) ++
attribute_filter_fields(resource, schema) ++
relationship_filter_fields(resource, schema) ++ aggregate_filter_fields(resource, schema)
end
defp attribute_filter_fields(resource, schema) do
resource
|> Ash.Resource.public_attributes()
|> Enum.flat_map(fn attribute ->
[
%Absinthe.Blueprint.Schema.FieldDefinition{
identifier: attribute.name,
module: schema,
name: to_string(attribute.name),
type: attribute_filter_field_type(resource, attribute)
}
]
end)
end
defp aggregate_filter_fields(resource, schema) do
resource
|> Ash.Resource.public_aggregates()
|> Enum.flat_map(fn aggregate ->
[
%Absinthe.Blueprint.Schema.FieldDefinition{
identifier: aggregate.name,
module: schema,
name: to_string(aggregate.name),
type: attribute_filter_field_type(resource, aggregate)
}
]
end)
end
defp relationship_filter_fields(resource, schema) do
resource
|> Ash.Resource.public_relationships()
|> Enum.filter(fn relationship ->
AshGraphql.Resource.type(relationship.destination)
end)
|> Enum.map(fn relationship ->
%Absinthe.Blueprint.Schema.FieldDefinition{
identifier: relationship.name,
module: schema,
name: to_string(relationship.name),
type: resource_filter_type(relationship.destination)
}
end)
end
defp boolean_filter_fields(resource, schema) do
if Ash.DataLayer.can?(:boolean_filter, resource) do
[
%Absinthe.Blueprint.Schema.FieldDefinition{
identifier: :and,
module: schema,
name: "and",
type: %Absinthe.Blueprint.TypeReference.List{
of_type: %Absinthe.Blueprint.TypeReference.NonNull{
of_type: resource_filter_type(resource)
}
}
},
%Absinthe.Blueprint.Schema.FieldDefinition{
identifier: :or,
module: schema,
name: "or",
type: %Absinthe.Blueprint.TypeReference.List{
of_type: %Absinthe.Blueprint.TypeReference.NonNull{
of_type: resource_filter_type(resource)
}
}
}
]
else
[]
end
end
# sobelow_skip ["DOS.StringToAtom"]
defp resource_sort_field_type(resource) do
type = AshGraphql.Resource.type(resource)
String.to_atom(to_string(type) <> "_sort_field")
end
defp enum_definitions(resource, schema) do
atom_enums =
resource
|> Ash.Resource.public_attributes()
|> Enum.filter(&(&1.type == Ash.Type.Atom))
|> Enum.filter(&is_list(&1.constraints[:one_of]))
|> Enum.map(fn attribute ->
type_name = atom_enum_type(resource, attribute.name)
%Absinthe.Blueprint.Schema.EnumTypeDefinition{
module: schema,
name: type_name |> to_string() |> Macro.camelize(),
values:
Enum.map(attribute.constraints[:one_of], fn value ->
%Absinthe.Blueprint.Schema.EnumValueDefinition{
module: schema,
identifier: value,
name: String.upcase(to_string(value)),
value: value
}
end),
identifier: type_name
}
end)
attribute_sort_values = Enum.map(Ash.Resource.attributes(resource), & &1.name)
aggregate_sort_values = Enum.map(Ash.Resource.aggregates(resource), & &1.name)
sort_values = attribute_sort_values ++ aggregate_sort_values
sort_order = %Absinthe.Blueprint.Schema.EnumTypeDefinition{
module: schema,
name: resource |> resource_sort_field_type() |> to_string() |> Macro.camelize(),
identifier: resource_sort_field_type(resource),
values:
Enum.map(sort_values, fn sort_value ->
%Absinthe.Blueprint.Schema.EnumValueDefinition{
module: schema,
identifier: sort_value,
name: String.upcase(to_string(sort_value)),
value: sort_value
}
end)
}
[sort_order | atom_enums]
end
# sobelow_skip ["DOS.StringToAtom"]
defp page_of(resource, schema) do
type = Resource.type(resource)
paginatable? =
resource
|> Ash.Resource.actions()
|> Enum.any?(fn action ->
action.type == :read && action.pagination
end)
if paginatable? do
%Absinthe.Blueprint.Schema.ObjectTypeDefinition{
description: "A page of #{inspect(type)}",
fields: [
%Absinthe.Blueprint.Schema.FieldDefinition{
description: "The records contained in the page",
identifier: :results,
module: schema,
name: "results",
type: %Absinthe.Blueprint.TypeReference.List{
of_type: %Absinthe.Blueprint.TypeReference.NonNull{
of_type: type
}
}
},
%Absinthe.Blueprint.Schema.FieldDefinition{
description: "The count of records",
identifier: :count,
module: schema,
name: "count",
type: :integer
}
],
identifier: String.to_atom("page_of_#{type}"),
module: schema,
name: Macro.camelize("page_of_#{type}")
}
else
nil
end
end
defp type_definition(resource, api, schema) do
type = Resource.type(resource)
%Absinthe.Blueprint.Schema.ObjectTypeDefinition{
description: Ash.Resource.description(resource),
fields: fields(resource, api, schema),
identifier: type,
module: schema,
name: Macro.camelize(to_string(type))
}
end
defp fields(resource, api, schema) do
attributes(resource, schema) ++
relationships(resource, api, schema) ++
aggregates(resource, schema) ++
calculations(resource, schema)
end
defp attributes(resource, schema) do
resource
|> Ash.Resource.public_attributes()
|> Enum.map(fn
%{name: :id} = attribute ->
%Absinthe.Blueprint.Schema.FieldDefinition{
description: attribute.description,
identifier: :id,
module: schema,
name: "id",
type: %Absinthe.Blueprint.TypeReference.NonNull{of_type: :id}
}
attribute ->
field_type = field_type(attribute.type, attribute, resource)
field_type =
if attribute.allow_nil? do
field_type
else
%Absinthe.Blueprint.TypeReference.NonNull{
of_type: field_type
}
end
%Absinthe.Blueprint.Schema.FieldDefinition{
description: attribute.description,
identifier: attribute.name,
module: schema,
name: to_string(attribute.name),
type: field_type
}
end)
end
# sobelow_skip ["DOS.StringToAtom"]
defp relationships(resource, api, schema) do
resource
|> Ash.Resource.public_relationships()
|> Enum.filter(fn relationship ->
Resource in Ash.Resource.extensions(relationship.destination)
end)
|> Enum.map(fn
%{cardinality: :one} = relationship ->
type =
if relationship.type == :belongs_to && relationship.required? do
%Absinthe.Blueprint.TypeReference.NonNull{
of_type: Resource.type(relationship.destination)
}
else
Resource.type(relationship.destination)
end
%Absinthe.Blueprint.Schema.FieldDefinition{
identifier: relationship.name,
module: schema,
name: to_string(relationship.name),
middleware: [
{{AshGraphql.Graphql.Resolver, :resolve_assoc}, {api, relationship}}
],
arguments: [],
type: type
}
%{cardinality: :many} = relationship ->
read_action = Ash.Resource.primary_action!(relationship.destination, :read)
type = Resource.type(relationship.destination)
query_type = %Absinthe.Blueprint.TypeReference.NonNull{
of_type: %Absinthe.Blueprint.TypeReference.List{
of_type: %Absinthe.Blueprint.TypeReference.NonNull{
of_type: type
}
}
}
%Absinthe.Blueprint.Schema.FieldDefinition{
identifier: relationship.name,
module: schema,
name: to_string(relationship.name),
middleware: [
{{AshGraphql.Graphql.Resolver, :resolve_assoc}, {api, relationship}}
],
arguments: args(:list, relationship.destination, read_action),
type: query_type
}
end)
end
defp aggregates(resource, schema) do
resource
|> Ash.Resource.public_aggregates()
|> Enum.map(fn aggregate ->
field_type =
if aggregate.field do
Ash.Resource.attribute(resource, aggregate.field).type
end
{:ok, type} = Aggregate.kind_to_type(aggregate.kind, field_type)
%Absinthe.Blueprint.Schema.FieldDefinition{
identifier: aggregate.name,
module: schema,
name: to_string(aggregate.name),
type: field_type(type, nil, resource)
}
end)
end
defp calculations(resource, schema) do
resource
|> Ash.Resource.public_calculations()
|> Enum.map(fn calculation ->
%Absinthe.Blueprint.Schema.FieldDefinition{
identifier: calculation.name,
module: schema,
name: to_string(calculation.name),
type: field_type(calculation.type, nil, resource)
}
end)
end
defp field_type({:array, type}, %Ash.Resource.Aggregate{} = aggregate, resource) do
%Absinthe.Blueprint.TypeReference.List{
of_type: field_type(type, aggregate, resource)
}
end
defp field_type({:array, type}, attribute, resource) do
new_constraints = attribute.constraints[:items] || []
new_attribute = %{attribute | constraints: new_constraints, type: type}
if attribute.constraints[:nil_items?] do
%Absinthe.Blueprint.TypeReference.List{
of_type: field_type(type, new_attribute, resource)
}
else
%Absinthe.Blueprint.TypeReference.List{
of_type: %Absinthe.Blueprint.TypeReference.NonNull{
of_type: field_type(type, new_attribute, resource)
}
}
end
end
defp field_type(type, attribute, resource) do
if Ash.Type.builtin?(type) do
do_field_type(type, attribute, resource)
else
type.graphql_type(attribute, resource)
end
end
defp do_field_type(Ash.Type.Atom, %{constraints: constraints, name: name}, resource) do
if is_list(constraints[:one_of]) do
atom_enum_type(resource, name)
else
:string
end
end
defp do_field_type(Ash.Type.Map, _, _), do: :json
defp do_field_type(Ash.Type.Term, _, _), do: :string
defp do_field_type(Ash.Type.String, _, _), do: :string
defp do_field_type(Ash.Type.Integer, _, _), do: :integer
defp do_field_type(Ash.Type.Boolean, _, _), do: :boolean
defp do_field_type(Ash.Type.UUID, _, _), do: :string
defp do_field_type(Ash.Type.Date, _, _), do: :date
defp do_field_type(Ash.Type.UtcDatetime, _, _), do: :naive_datetime
# sobelow_skip ["DOS.StringToAtom"]
defp atom_enum_type(resource, attribute_name) do
resource
|> AshGraphql.Resource.type()
|> to_string()
|> Kernel.<>("_")
|> Kernel.<>(to_string(attribute_name))
|> String.to_atom()
end
end
|
lib/resource/resource.ex
| 0.745398
| 0.491273
|
resource.ex
|
starcoder
|
defmodule Paginator.Ecto.Query do
@moduledoc false
import Ecto.Query
alias Paginator.Config
alias Paginator.Ecto.Query.DynamicFilterBuilder
def paginate(queryable, config \\ [])
def paginate(queryable, %Config{} = config) do
queryable
|> maybe_where(config)
|> limit(^query_limit(config))
end
def paginate(queryable, opts) do
config = Config.new(opts)
paginate(queryable, config)
end
# This clause is responsible for transforming legacy list cursors into map cursors
defp filter_values(query, fields, values, cursor_direction) when is_list(values) do
new_values =
fields
|> Enum.map(&elem(&1, 0))
|> Enum.zip(values)
|> Map.new()
filter_values(query, fields, new_values, cursor_direction)
end
defp filter_values(query, fields, values, cursor_direction) when is_map(values) do
filters = build_where_expression(query, fields, values, cursor_direction)
where(query, [{q, 0}], ^filters)
end
defp build_where_expression(query, [{column, order}], values, cursor_direction) do
value = Map.get(values, column)
{q_position, q_binding} = column_position(query, column)
DynamicFilterBuilder.build!(%{
sort_order: order,
direction: cursor_direction,
value: value,
entity_position: q_position,
column: q_binding,
next_filters: true
})
end
defp build_where_expression(query, [{column, order} | fields], values, cursor_direction) do
value = Map.get(values, column)
{q_position, q_binding} = column_position(query, column)
filters = build_where_expression(query, fields, values, cursor_direction)
DynamicFilterBuilder.build!(%{
sort_order: order,
direction: cursor_direction,
value: value,
entity_position: q_position,
column: q_binding,
next_filters: filters
})
end
defp maybe_where(query, %Config{
after: nil,
before: nil
}) do
query
end
defp maybe_where(query, %Config{
after_values: after_values,
before: nil,
cursor_fields: cursor_fields
}) do
query
|> filter_values(cursor_fields, after_values, :after)
end
defp maybe_where(query, %Config{
after: nil,
before_values: before_values,
cursor_fields: cursor_fields
}) do
query
|> filter_values(cursor_fields, before_values, :before)
|> reverse_order_bys()
end
defp maybe_where(query, %Config{
after_values: after_values,
before_values: before_values,
cursor_fields: cursor_fields
}) do
query
|> filter_values(cursor_fields, after_values, :after)
|> filter_values(cursor_fields, before_values, :before)
end
# Lookup position of binding in query aliases
defp column_position(query, {binding_name, column}) do
case Map.fetch(query.aliases, binding_name) do
{:ok, position} ->
{position, column}
_ ->
raise(
ArgumentError,
"Could not find binding `#{binding_name}` in query aliases: #{inspect(query.aliases)}"
)
end
end
# Without named binding we assume position of binding is 0
defp column_position(_query, column), do: {0, column}
# In order to return the correct pagination cursors, we need to fetch one more
# # record than we actually want to return.
defp query_limit(%Config{limit: limit}) do
limit + 1
end
# This code was taken from https://github.com/elixir-ecto/ecto/blob/v2.1.4/lib/ecto/query.ex#L1212-L1226
defp reverse_order_bys(query) do
update_in(query.order_bys, fn
[] ->
[]
order_bys ->
for %{expr: expr} = order_by <- order_bys do
%{
order_by
| expr:
Enum.map(expr, fn
{:desc, ast} -> {:asc, ast}
{:desc_nulls_first, ast} -> {:asc_nulls_last, ast}
{:desc_nulls_last, ast} -> {:asc_nulls_first, ast}
{:asc, ast} -> {:desc, ast}
{:asc_nulls_last, ast} -> {:desc_nulls_first, ast}
{:asc_nulls_first, ast} -> {:desc_nulls_last, ast}
end)
}
end
end)
end
end
|
lib/paginator/ecto/query.ex
| 0.760384
| 0.469338
|
query.ex
|
starcoder
|
defmodule RZstd do
@moduledoc """
`RZstd` API.
"""
alias RZstd.{Compressor, Decompressor, Handler, Native}
use Unsafe.Generator, docs: true, handler: {Handler, :unwrap}
@type compression_error ::
:binary_decode_failed
| :integer_decode_failed
| :compression_failed
| :binary_alloc_failed
| :binary_write_failed
@type compression_with_compressor_error :: compression_error | :compressor_decode_failed
@type decompression_error ::
:binary_decode_failed
| :decompression_failed
| :binary_alloc_failed
| :binary_write_failed
@type decompression_with_decompressor_error :: decompression_error | :decompressor_decode_failed
@unsafe [
{:compress, [2, 3]},
{:compress_dirty, [2, 3]},
{:decompress, [1, 2]},
{:decompress_dirty, [1, 2]}
]
@spec compress(binary, integer) :: {:ok, binary} | {:error, compression_error}
def compress(data, level), do: Native.compress(data, level)
@spec compress(binary, integer, Compressor.t()) ::
{:ok, binary} | {:error, compression_with_compressor_error}
def compress(data, level, compressor),
do: Native.compress_with_compressor(data, level, compressor)
@spec compress_dirty(binary, integer) :: {:ok, binary} | {:error, compression_error}
def compress_dirty(data, level), do: Native.compress_dirty(data, level)
@spec compress_dirty(binary, integer, Compressor.t()) ::
{:ok, binary} | {:error, compression_with_compressor_error}
def compress_dirty(data, level, compressor),
do: Native.compress_with_compressor_dirty(data, level, compressor)
@spec decompress(binary) :: {:ok, binary} | {:error, compression_error}
def decompress(data), do: Native.decompress(data)
@spec decompress(binary, Decompressor.t()) ::
{:ok, binary} | {:error, decompression_with_decompressor_error}
def decompress(data, decompressor), do: Native.decompress_with_decompressor(data, decompressor)
@spec decompress_dirty(binary) :: {:ok, binary} | {:error, decompression_error}
def decompress_dirty(data), do: Native.decompress_dirty(data)
@spec decompress_dirty(binary, Decompressor.t()) ::
{:ok, binary} | {:error, decompression_with_decompressor_error}
def decompress_dirty(data, decompressor),
do: Native.decompress_with_decompressor_dirty(data, decompressor)
end
|
lib/r_zstd.ex
| 0.84599
| 0.511351
|
r_zstd.ex
|
starcoder
|
defmodule Wavexfront do
@moduledoc """
This guide provide the necessary to configure and use the Wavexfront and start monitoring your application
## Configuration
The ```:wavexfront``` application needs to be configured properly in order to work.
This configuration can be done, for example, in ```config/config.exs```:
```elixir
config :wavexfront,
enabled: true,
histogram_1m: [
enabled: true,
host: "myhost.com"
]
```
This would enabled wavexfront and start a connection pool for the 1 minute histogram support. This will also
send the metrics to "myhost.com" on default port 40001
* ```:enabled``` - Enable sending metrics to Wavefront proxy. If set to ```false``` it is not even attempting to send them.
If value is set to ```:log```, metrics will be showing in the logs. If set to ```true```, metrics will attempt to forward to the
the correct connection pool and proxy configured based on the type of metric sent. Default values is ```false```.
* One or many configuration for each pool.
This library is quite configurable so you can decide where to send the metrics and how
much concurrency you want for it (using pool connection)
There are currently 4 connection pools supported:
* ```histogram_1m```: for Minute histograms metrics
* ```histogram_1h```: for Hourly histograms metrics
* ```histogram_1d```: for daily histograms metrics
* ```counter_and_gauge```: for counter and gauge metrics
Each connection pool has its own set of configuration. The following options are available
* ```:enabled``` - Wether to enable this connection pool or not. When false, the connection pool will not be started
* ```:host``` - The host to send the metrics too. Default to **localhost**
* ```:port``` - The port to connect to. Defaults are:
* for histogram_1m: 40001
* for histogram_1h: 40002
* for histogram_1d: 40003
* for counter_and_gauge: 2878
* ```:timeout``` - Connection timeout
* ```:pool_size``` - Number of connection in the pool
## Sending metrics
In order to send metrics, a simple API exists. Currently we do not support storing locally metrics and then then send the data to the proxy.
We expect the proxy to store histogram or counter value and take care of it. Which means need to talk to the same proxy.
For counter, you will need to store yourself the counter (in process, state or :ets table) if you decide to use them
Here is how you would send an histogram value
```elixir
Wavexfront.send_histogram("my_histo", value: 2, source: "my_host", labels: [name: :value])
```
"""
use Application
def start(_type, _args) do
children = [{Wavexfront.Client, client_config()}]
children =
case wavexfront_enabled?() do
true ->
children
|> start_histogram_pool(:histogram_1m)
|> start_histogram_pool(:histogram_1h)
|> start_histogram_pool(:histogram_1d)
|> start_counter_and_gauge_pool
_ ->
children
end
Supervisor.start_link(children, strategy: :one_for_one)
end
def send_metric(name, details, client \\ Wavexfront.Client) do
final_details = details |> Keyword.put(:name, name)
item = Wavexfront.Item.new(final_details)
client.emit(item)
end
def send_histogram(name, details, client \\ Wavexfront.Client) do
{interval, final_details} = Keyword.pop(details, :interval, :one_minute)
send_metric(name, final_details |> Keyword.put(:type, histogram_type_for(interval)), client)
end
def histogram_type_for(:one_minute = _interval), do: :histogram_1m
def histogram_type_for(:one_hour = _interval), do: :histogram_1h
def histogram_type_for(:one_day = _interval), do: :histogram_1d
def send_gauge(name, details, client \\ Wavexfront.Client) do
send_metric(name, details |> Keyword.put(:type, :gauge), client)
end
def send_counter(name, details, client \\ Wavexfront.Client) do
send_metric(name, details |> Keyword.put(:type, :counter), client)
end
def send_delta_counter(name, details, client \\ Wavexfront.Client) do
final_details =
details
|> Keyword.put(:type, :counter)
|> Keyword.put(:delta, true)
send_metric(name, final_details, client)
end
def wavexfront_enabled?() do
Application.get_env(:wavexfront, :enabled, false)
end
def histogram_enabled?(timerange) do
case Application.get_env(:wavexfront, timerange) do
nil ->
false
config ->
Keyword.get(config, :enabled, false)
end
end
def counter_and_gauge_enabled? do
case Application.get_env(:wavexfront, :counter_and_gauge) do
nil ->
false
config ->
Keyword.get(config, :enabled, false)
end
end
defp start_histogram_pool(children, timerange) do
if histogram_enabled?(timerange) do
children ++ connection_pool_spec(timerange)
else
children
end
end
defp start_counter_and_gauge_pool(children) do
if counter_and_gauge_enabled?() do
children ++ connection_pool_spec(:counter_and_gauge)
else
children
end
end
def connection_pool_spec(name) do
[
:poolboy.child_spec(
name,
poolboy_config(name),
worker_config(name)
)
]
end
def client_config() do
env = Application.get_all_env(:wavexfront)
env
|> Keyword.take([:enabled])
end
defp poolboy_config(name) do
[
{:name, {:local, name}},
{:worker_module, Wavexfront.Proxy.Worker},
{:size, pool_size_config(name)},
{:max_overflow, 1}
]
end
def pool_size_config(name) do
case Application.get_env(:wavexfront, name) do
nil ->
2
config ->
Keyword.get(config, :pool_size, 2)
end
end
def worker_config(name) do
default_config_parse(name)
end
def default_config_parse(name) do
config = Application.get_env(:wavexfront, name, [])
[
host: Keyword.get(config, :host, "localhost"),
port: Keyword.get(config, :port, default_port(name)),
timeout: Keyword.get(config, :timeout, 5000)
]
end
def default_port(:histogram_1m = _name), do: 40_001
def default_port(:histogram_1h = _name), do: 40_002
def default_port(:histogram_1d = _name), do: 40_003
def default_port(:counter_and_gauge = _name), do: 2878
end
|
lib/wavexfront.ex
| 0.843863
| 0.938857
|
wavexfront.ex
|
starcoder
|
defmodule ExActor.Operations do
@moduledoc """
Macros that can be used for simpler definition of `gen_server` operations
such as casts or calls.
"""
@doc """
Defines the initializer callback.
Examples:
# ignoring the input argument
definit do: HashSet.new
# using the input argument
definit x do
x + 1
end
# pattern matching
definit x, when: ..., do: ...
"""
defmacro definit(arg \\ quote(do: _), opts), do: do_definit([{:arg, arg} | opts])
defp do_definit(opts) do
quote bind_quoted: [opts: Macro.escape(opts, unquote: true)] do
if (opts[:when]) do
def init(unquote_splicing([opts[:arg]]))
when unquote(opts[:when]),
do: unquote(opts[:do])
else
def init(unquote_splicing([opts[:arg]])),
do: unquote(opts[:do])
end
end
end
@doc """
Defines the cast callback clause and a corresponding interface fun.
Examples:
defcast operation, do: noreply
defcast inc(x), state: state, do: new_state(state + x)
# omitting interface fun
defcast operation, export: false, do: ...
# pattern matching
defcast a(1), do: ...
defcast a(2), do: ...
defcast a(x), state: 1, do: ...
defcast a(x), when: x > 1, do: ...
defcast a(x), state: state, when: state > 1, do: ...
defcast a(_), do: ...
"""
defmacro defcast(req_def, options \\ [], body) do
generate_funs(:defcast, req_def, options ++ body)
end
@doc """
Defines the call callback clause and a corresponding interface fun.
Examples:
defcall operation, do: reply(response)
defcall get, state: state, do: reply(state)
defcall inc, state: state, do: set_and_reply(state + 1, response)
# timeout option
defcall long_call, state: state, timeout: :timer.seconds(10), do: ...
# omitting interface fun
defcall operation, export: false, do: ...
# pattern matching
defcall a(1), do: ...
defcall a(2), do: ...
defcall a(x), state: 1, do: ...
defcall a(x), when: x > 1, do: ...
defcall a(x), state: state, when: state > 1, do: ...
defcall a(_), do: ...
"""
defmacro defcall(req_def, options \\ [], body) do
generate_funs(:defcall, req_def, options ++ body)
end
@doc """
Defines the info callback clause. Responses work just like with casts.
Examples:
definfo :some_message, do: ...
definfo :another_message, state: ..., do:
"""
defmacro definfo(msg, opts \\ [], body) do
impl_definfo(msg, opts ++ body)
end
# Generation of call/cast functions. Essentially, this is just
# deferred to be evaluated in the module context.
defp generate_funs(type, req_def, options) do
quote bind_quoted: [
type: type,
req_def: Macro.escape(req_def, unquote: true),
options: Macro.escape(options, unquote: true)
] do
options = Keyword.merge(
options,
Module.get_attribute(__MODULE__, :exactor_global_options)
)
ExActor.Operations.def_request(type, req_def, options)
|> ExActor.Helper.inject_to_module(__MODULE__, __ENV__)
end
end
@doc false
def def_request(type, req_def, options) do
{req_name, args} = parse_req_def(req_def)
quote do
unquote(define_interface(type, req_name, args, options))
unquote(implement_handler(type, options, msg_payload(req_name, args)))
end
end
defp parse_req_def(req_name) when is_atom(req_name), do: {req_name, []}
defp parse_req_def({req_name, _, args}), do: {req_name, args || []}
defp msg_payload(req_name, nil), do: req_name
defp msg_payload(req_name, []), do: req_name
defp msg_payload(req_name, args), do: quote(do: {unquote_splicing([req_name | args])})
# Defines the interface function to call/cast
defp define_interface(type, req_name, args, options) do
passthrough_args = stub_args(args)
unless options[:export] == false do
quote bind_quoted: [
type: type,
req_name: req_name,
server_fun: server_fun(type),
arity: interface_arity(length(args), options[:export]),
interface_args: Macro.escape(interface_args(passthrough_args, options), unquote: true),
gen_server_args: Macro.escape(gen_server_args(options, type, msg_payload(req_name, passthrough_args)), unquote: true)
] do
unless HashSet.member?(@exported, {req_name, arity}) do
def unquote(req_name)(unquote_splicing(interface_args)) do
GenServer.unquote(server_fun)(unquote_splicing(gen_server_args))
end
@exported HashSet.put(@exported, {req_name, arity})
end
end
end
end
defp server_fun(:defcast), do: :cast
defp server_fun(:defcall), do: :call
defp interface_arity(args_num, nil), do: args_num + 1
defp interface_arity(args_num, true), do: args_num + 1
defp interface_arity(args_num, _), do: args_num
defp interface_args(passthrough_args, options) do
case options[:export] do
nil -> [quote(do: server) | passthrough_args]
true -> [quote(do: server) | passthrough_args]
_registered -> passthrough_args
end
end
defp stub_args([]), do: []
defp stub_args(args) do
for index <- 1..length(args) do
{:"arg#{index}", [], __MODULE__}
end
end
defp gen_server_args(options, type, msg) do
[server_ref(options), msg] ++ timeout_arg(options, type)
end
defp server_ref(options) do
case options[:export] do
default when default in [nil, false, true] -> quote(do: server)
local when is_atom(local) -> local
{:local, local} -> local
{:global, _} = global -> global
{:{}, _, [:via, _, _]} = via -> via
end
end
defp timeout_arg(options, type) do
case {type, options[:timeout]} do
{:defcall, timeout} when timeout != nil ->
[timeout]
_ -> []
end
end
@doc false
# Implements the handler function (handle_call, handle_cast, handle_timeout)
def implement_handler(type, options, msg) do
state_arg = get_state_identifier(options[:state])
{handler_name, handler_args} = handler_sig(type, options, msg, state_arg)
quote bind_quoted: [
type: type,
handler_name: handler_name,
handler_args: Macro.escape(handler_args, unquote: true),
guard: Macro.escape(options[:when], unquote: true),
body: Macro.escape(options[:do], unquote: true)
] do
if guard do
def unquote(handler_name)(unquote_splicing(handler_args))
when unquote(guard),
do: unquote(body)
else
def unquote(handler_name)(unquote_splicing(handler_args)),
do: unquote(body)
end
end
end
defp get_state_identifier(nil), do: get_state_identifier(quote(do: _))
defp get_state_identifier(any),
do: quote(do: unquote(any) = var!(___generated_state))
defp handler_sig(:defcall, options, msg, state_arg),
do: {:handle_call, [msg, options[:from] || quote(do: _from), state_arg]}
defp handler_sig(:defcast, _, msg, state_arg),
do: {:handle_cast, [msg, state_arg]}
defp handler_sig(:definfo, _, msg, state_arg),
do: {:handle_info, [msg, state_arg]}
# Implements handle_info
defp impl_definfo(msg, options) do
quote bind_quoted: [
msg: Macro.escape(msg, unquote: true),
options: Macro.escape(options, unquote: true)
] do
options = Keyword.merge(
options,
Module.get_attribute(__MODULE__, :exactor_global_options)
)
ExActor.Operations.implement_handler(:definfo, options, msg)
|> ExActor.Helper.inject_to_module(__MODULE__, __ENV__)
end
end
end
|
lib/exactor/operations.ex
| 0.670716
| 0.461927
|
operations.ex
|
starcoder
|
defmodule EWallet.Web.SearchParser do
@moduledoc """
This module allows parsing of arbitrary attributes into a search query.
It takes in a request's attributes, parses only the attributes needed for searching,
then builds those attributes into a search query on top of the given `Ecto.Queryable`.
"""
import Ecto.Query
@doc """
Parses search attributes and appends the resulting queries into the given queryable.
To search for one term in all fields, use:
%{"search_term" => "term"}
For multiple search, use the following format:
%{"search_terms" => %{ "field_name_1" => "term", "field_name_2" => "term2" }}
Where "field_name" is in the list of available search fields.
"""
@spec to_query(Ecto.Queryable.t(), map(), [atom()]) :: Ecto.Queryable.t()
@spec to_query(Ecto.Queryable.t(), map(), [atom()], map()) :: Ecto.Queryable.t()
def to_query(queryable, terms, fields, mapping \\ %{})
def to_query(queryable, %{"search_terms" => terms}, fields, mapping) when terms != nil do
terms
|> Enum.reduce(false, fn {field, value}, dynamic ->
field =
field
|> map_field(mapping)
|> allowed?(fields)
build_and_search_query(dynamic, field, value)
end)
|> handle_dynamic_return(queryable)
end
def to_query(queryable, %{"search_term" => term}, fields, _mapping) when term != nil do
fields
|> Enum.reduce(false, fn field, dynamic ->
build_or_search_query(dynamic, field, term)
end)
|> handle_dynamic_return(queryable)
end
def to_query(queryable, _, _, _), do: queryable
defp handle_dynamic_return(false, queryable), do: queryable
defp handle_dynamic_return(dynamic, queryable) do
from(queryable, where: ^dynamic)
end
defp map_field(original, mapping) do
case mapping[original] do
nil -> original
mapped -> mapped
end
end
defp allowed?(field, allowed_fields) when is_binary(field) do
field
|> String.to_existing_atom()
|> allowed?(allowed_fields)
rescue
_ in ArgumentError -> nil
end
defp allowed?(field, allowed_fields) do
cond do
Enum.member?(allowed_fields, {field, :uuid}) -> {field, :uuid}
Enum.member?(allowed_fields, field) -> field
true -> nil
end
end
defp build_or_search_query(dynamic, _field, nil), do: dynamic
defp build_or_search_query(dynamic, nil, _value), do: dynamic
defp build_or_search_query(false, {field, :uuid}, term) do
dynamic([q], ilike(fragment("?::text", field(q, ^field)), ^"%#{term}%"))
end
defp build_or_search_query(dynamic, {field, :uuid}, term) do
dynamic([q], ilike(fragment("?::text", field(q, ^field)), ^"%#{term}%") or ^dynamic)
end
defp build_or_search_query(false, field, term) do
dynamic([q], ilike(field(q, ^field), ^"%#{term}%"))
end
defp build_or_search_query(dynamic, field, term) do
dynamic([q], ilike(field(q, ^field), ^"%#{term}%") or ^dynamic)
end
defp build_and_search_query(dynamic, _field, nil), do: dynamic
defp build_and_search_query(dynamic, nil, _value), do: dynamic
defp build_and_search_query(false, {field, :uuid}, term) do
dynamic([q], ilike(fragment("?::text", field(q, ^field)), ^"%#{term}%"))
end
defp build_and_search_query(dynamic, {field, :uuid}, term) do
dynamic([q], ilike(fragment("?::text", field(q, ^field)), ^"%#{term}%") and ^dynamic)
end
defp build_and_search_query(false, field, term) do
dynamic([q], ilike(field(q, ^field), ^"%#{term}%"))
end
defp build_and_search_query(dynamic, field, term) do
dynamic([q], ilike(field(q, ^field), ^"%#{term}%") and ^dynamic)
end
end
|
apps/ewallet/lib/ewallet/web/search_parser.ex
| 0.794624
| 0.568176
|
search_parser.ex
|
starcoder
|
defmodule Memento.Capture.Handler do
@moduledoc """
The `Memento.Capture.Handler` behaviour can be used
to implement a handler that knows how to authorize
and fetch data from a specific source.
A handler has an implicit lifecycle: authorization,
refresh and processing of new data.
This lifecycle is implemented by the `Memento.Capture.Feed` state machine.
"""
alias Memento.Schema.Entry
@typedoc """
A handler's data is a map with freeform structure. This map is passed to
the handler at different stages of its lifecycle.
"""
@type data :: map()
@type content_list :: [map()]
@doc """
Returns the initial data needed by the handler to authenticate against the
data source and fetch information. This should include any
authorization credential or query params.
"""
@callback initial_data :: data
@doc """
Given initial data, perform the authorization step and return new data
with relevant information (e.g. an api token).
For data sources that don't require authorization, it's enough to just return
`{:ok, original_data}`.
"""
@callback authorize(data) :: {:ok, data} | {:error, term()}
@doc """
Given data can include authorization tokens and/or query params,
fetch the source data and process it to a list of maps that can be saved
as content for database entries (see the docs for `Memento.Schema.Entry` for
more details.
Note that function has to return (if needed) updated data, with updated params
(e.g. pagination). This is needed to provide a way to incrementally fetch changes
from the target source.
"""
@callback refresh(data) :: {:ok, content_list, data} | {:error, term()}
@doc """
A valid `Memento.Schema.Entry.Type.t` value.
"""
@callback entry_type :: Entry.Type.t()
@doc """
Given a piece of content (one of the elements returned in the list from
`refresh/1`, how to extract a `DateTime.t` that represents the point
in time when that very piece of content was saved.
"""
@callback get_saved_at(map()) :: DateTime.t()
end
|
lib/memento/capture/handler.ex
| 0.84994
| 0.50769
|
handler.ex
|
starcoder
|
import Ecto.Query, only: [from: 2, join: 4, distinct: 3, select: 3]
defmodule Ecto.Associations do
@moduledoc """
Documents the functions required for associations to implement
in order to work with Ecto query mechanism.
This module contains documentation for those interested in
understanding how Ecto associations work internally. If you are
interested in an overview about associations in Ecto, you should
look into the documentation for `Ecto` and `Ecto.Schema`
modules.
## Associations
Associations work in Ecto via behaviours. Anyone can add new
associations to Ecto as long as they implement the callbacks
specified in this module.
Note though that, since the associations API is in development,
existing callbacks signature and new callbacks can be added
in upcoming Ecto releases.
"""
@type t :: %{__struct__: atom, cardinality: :one | :many,
field: atom, owner_key: atom, owner: atom}
use Behaviour
@doc """
Builds the association struct.
The struct must be defined in the module that implements the
callback and it must contain at least the following keys:
* `:cardinality` - tells if the association is one to one
or one/many to many
* `:field` - tells the field in the owner struct where the
association should be stored
* `:owner` - the owner module of the association
* `:owner_key` - the key in the owner with the association value
"""
defcallback struct(module, field :: atom, opts :: Keyword.t) :: t
@doc """
Builds a model for the given association.
The struct to build from is given as argument in case default values
should be set in the struct.
Invoked by `Ecto.Model.build/2`.
"""
defcallback build(t, Ecto.Model.t) :: Ecto.Model.t
@doc """
Returns an association join query.
This callback receives the association struct and it must return
a query that retrieves all associated objects using joins up to
the owner association.
For example, a `has_many :comments` inside a `Post` module would
return:
from c in Comment, join: p in Post, on: c.post_id == p.id
Note all the logic must be expressed inside joins, as fields like
`where` and `order_by` won't be used by the caller.
This callback is invoked when `join: assoc(p, :comments)` is used
inside queries.
"""
defcallback joins_query(t) :: Ecto.Query.t
@doc """
Returns the association query.
This callback receives the association struct and it must return
a query that retrieves all associated objects with the given
values for the owner key.
This callback is used by `Ecto.Model.assoc/2`.
"""
defcallback assoc_query(t, values :: [term]) :: Ecto.Query.t
@doc """
Returns information used by the preloader.
"""
defcallback preload_info(t) ::
{:assoc, t, atom} | {:through, t, [atom]}
@doc """
Retrieves the association from the given model.
"""
def association_from_model!(model, assoc) do
model.__schema__(:association, assoc) ||
raise ArgumentError, "model #{inspect model} does not have association #{inspect assoc}"
end
@doc """
Returns the association key for the given module with the given prefix.
## Examples
iex> Ecto.Associations.association_key(Hello.World, :id)
:world_id
iex> Ecto.Associations.association_key(Hello.HTTP, :id)
:http_id
iex> Ecto.Associations.association_key(Hello.HTTPServer, :id)
:http_server_id
"""
def association_key(module, suffix) do
prefix = module |> Module.split |> List.last |> underscore
:"#{prefix}_#{suffix}"
end
defp underscore(""), do: ""
defp underscore(<<h, t :: binary>>) do
<<to_lower_char(h)>> <> do_underscore(t, h)
end
defp do_underscore(<<h, t, rest :: binary>>, _) when h in ?A..?Z and not t in ?A..?Z do
<<?_, to_lower_char(h), t>> <> do_underscore(rest, t)
end
defp do_underscore(<<h, t :: binary>>, prev) when h in ?A..?Z and not prev in ?A..?Z do
<<?_, to_lower_char(h)>> <> do_underscore(t, h)
end
defp do_underscore(<<?-, t :: binary>>, _) do
<<?_>> <> do_underscore(t, ?-)
end
defp do_underscore(<< "..", t :: binary>>, _) do
<<"..">> <> underscore(t)
end
defp do_underscore(<<?.>>, _), do: <<?.>>
defp do_underscore(<<?., t :: binary>>, _) do
<<?/>> <> underscore(t)
end
defp do_underscore(<<h, t :: binary>>, _) do
<<to_lower_char(h)>> <> do_underscore(t, h)
end
defp do_underscore(<<>>, _) do
<<>>
end
defp to_lower_char(char) when char in ?A..?Z, do: char + 32
defp to_lower_char(char), do: char
end
defmodule Ecto.Associations.NotLoaded do
@moduledoc """
Struct returned by one to one associations when there are not loaded.
The fields are:
* `__field__` - the association field in `__owner__`
* `__owner__` - the model that owns the association
"""
defstruct [:__field__, :__owner__]
defimpl Inspect do
def inspect(not_loaded, _opts) do
msg = "association #{inspect not_loaded.__field__} is not loaded"
~s(#Ecto.Associations.NotLoaded<#{msg}>)
end
end
end
defmodule Ecto.Associations.Has do
@moduledoc """
The association struct for `has_one` and `has_many` associations.
Its fields are:
* `cardinality` - The association cardinality
* `field` - The name of the association field on the model
* `owner` - The model where the association was defined
* `assoc` - The model that is associated
* `owner_key` - The key on the `owner` model used for the association
* `assoc_key` - The key on the `associated` model used for the association
"""
@behaviour Ecto.Associations
defstruct [:cardinality, :field, :owner, :assoc, :owner_key, :assoc_key]
@doc false
def struct(module, name, opts) do
ref =
cond do
ref = opts[:references] ->
ref
primary_key = Module.get_attribute(module, :primary_key) ->
elem(primary_key, 0)
true ->
raise ArgumentError, "need to set :references option for " <>
"association #{inspect name} when model has no primary key"
end
unless Module.get_attribute(module, :ecto_fields)[ref] do
raise ArgumentError, "model does not have the field #{inspect ref} used by " <>
"association #{inspect name}, please set the :references option accordingly"
end
assoc = Keyword.fetch!(opts, :queryable)
unless is_atom(assoc) do
raise ArgumentError, "association queryable must be a model, got: #{inspect assoc}"
end
if opts[:through] do
raise ArgumentError, "invalid association #{inspect name}. When using the :through " <>
"option, the model should not be passed as second argument"
end
%__MODULE__{
field: name,
cardinality: Keyword.fetch!(opts, :cardinality),
owner: module,
assoc: assoc,
owner_key: ref,
assoc_key: opts[:foreign_key] || Ecto.Associations.association_key(module, ref)
}
end
@doc false
def build(%{assoc: assoc, owner_key: owner_key, assoc_key: assoc_key}, struct) do
Map.put apply(assoc, :__struct__, []), assoc_key, Map.get(struct, owner_key)
end
@doc false
def joins_query(refl) do
from o in refl.owner,
join: q in ^refl.assoc,
on: field(q, ^refl.assoc_key) == field(o, ^refl.owner_key)
end
@doc false
def assoc_query(refl, values) do
from x in refl.assoc,
where: field(x, ^refl.assoc_key) in ^values
end
@doc false
def preload_info(refl) do
{:assoc, refl, refl.assoc_key}
end
end
defmodule Ecto.Associations.HasThrough do
@moduledoc """
The association struct for `has_one` and `has_many` through associations.
Its fields are:
* `cardinality` - The association cardinality
* `field` - The name of the association field on the model
* `owner` - The model where the association was defined
* `owner_key` - The key on the `owner` model used for the association
* `through` - The through associations
"""
@behaviour Ecto.Associations
defstruct [:cardinality, :field, :owner, :owner_key, :through]
@doc false
def struct(module, name, opts) do
through = Keyword.fetch!(opts, :through)
refl =
case through do
[h,_|_] ->
Module.get_attribute(module, :ecto_assocs)[h]
_ ->
raise ArgumentError, ":through expects a list with at least two entries: " <>
"the association in the current module and one step through, got: #{inspect through}"
end
unless refl do
raise ArgumentError, "model does not have the association #{inspect hd(through)} " <>
"used by association #{inspect name}, please ensure the association exists and " <>
"is defined before the :through one"
end
%__MODULE__{
field: name,
cardinality: Keyword.fetch!(opts, :cardinality),
through: through,
owner: module,
owner_key: refl.owner_key,
}
end
@doc false
def build(%{field: name}, %{__struct__: struct}) do
raise ArgumentError,
"cannot build through association #{inspect name} for #{inspect struct}. " <>
"Instead build the intermediate steps explicitly."
end
@doc false
def preload_info(refl) do
{:through, refl, refl.through}
end
@doc false
def joins_query(%{owner: owner, through: through}) do
joins_query(owner, through)
end
defp joins_query(query, through) do
Enum.reduce(through, {query, 0}, fn current, {acc, counter} ->
{join(acc, :inner, [x: counter], assoc(x, ^current)), counter + 1}
end) |> elem(0)
end
@doc false
def assoc_query(%{owner: owner, through: [h|t]}, values) do
refl = owner.__schema__(:association, h)
query =
refl.__struct__.assoc_query(refl, values)
|> joins_query(t)
|> Ecto.Query.Planner.prepare_sources()
{joins, {mapping, last}} = rewrite_joins(query)
wheres = rewrite_many(query.wheres, mapping)
from = last.source
[_|joins] = Enum.reverse([%{last | source: query.from}|joins])
%{query | from: from, joins: joins, wheres: wheres, sources: nil}
|> distinct([x], x)
|> select([x], x)
end
alias Ecto.Query.JoinExpr
defp rewrite_joins(query) do
count = length(query.joins)
Enum.map_reduce(query.joins, {%{0 => count}, nil}, fn
%JoinExpr{ix: ix, on: on} = join, {acc, _} ->
acc = Map.put(acc, ix, count - Map.size(acc))
join = %{join | ix: nil, on: rewrite_expr(on, acc)}
{join, {acc, join}}
end)
end
defp rewrite_expr(%{expr: expr, params: params} = part, mapping) do
expr =
Macro.prewalk expr, fn
{:&, meta, [ix]} ->
{:&, meta, [Map.fetch!(mapping, ix)]}
other ->
other
end
params =
Enum.reduce params, params, fn
{key, {val, {composite, {ix, field}}}}, acc when is_integer(ix) ->
Map.put(acc, key, {val, {composite, {Map.fetch!(mapping, ix), field}}})
{key, {val, {ix, field}}}, acc when is_integer(ix) ->
Map.put(acc, key, {val, {Map.fetch!(mapping, ix), field}})
{_, _}, acc ->
acc
end
%{part | expr: expr, params: params}
end
defp rewrite_many(exprs, acc) do
Enum.map(exprs, &rewrite_expr(&1, acc))
end
end
defmodule Ecto.Associations.BelongsTo do
@moduledoc """
The association struct for a `belongs_to` association.
Its fields are:
* `cardinality` - The association cardinality
* `field` - The name of the association field on the model
* `owner` - The model where the association was defined
* `assoc` - The model that is associated
* `owner_key` - The key on the `owner` model used for the association
* `assoc_key` - The key on the `assoc` model used for the association
"""
@behaviour Ecto.Associations
defstruct [:cardinality, :field, :owner, :assoc, :owner_key, :assoc_key]
@doc false
def struct(module, name, opts) do
ref =
cond do
ref = opts[:references] ->
ref
primary_key = Module.get_attribute(module, :primary_key) ->
elem(primary_key, 0)
true ->
raise ArgumentError, "need to set :references option for " <>
"association #{inspect name} when model has no primary key"
end
assoc = Keyword.fetch!(opts, :queryable)
unless is_atom(assoc) do
raise ArgumentError, "association queryable must be a model, got: #{inspect assoc}"
end
%__MODULE__{
field: name,
cardinality: :one,
owner: module,
assoc: assoc,
owner_key: Keyword.fetch!(opts, :foreign_key),
assoc_key: ref
}
end
@doc false
def build(%{assoc: assoc}, _struct) do
apply(assoc, :__struct__, [])
end
@doc false
def joins_query(refl) do
from o in refl.owner,
join: q in ^refl.assoc,
on: field(q, ^refl.assoc_key) == field(o, ^refl.owner_key)
end
@doc false
def assoc_query(refl, values) do
from x in refl.assoc,
where: field(x, ^refl.assoc_key) in ^values
end
@doc false
def preload_info(refl) do
{:assoc, refl, refl.assoc_key}
end
end
|
lib/ecto/associations.ex
| 0.860208
| 0.481393
|
associations.ex
|
starcoder
|
defmodule AMQP.Basic do
@moduledoc """
Functions to publish, consume and acknowledge messages.
"""
import AMQP.Core
alias AMQP.{Channel, Utils}
@type error :: {:error, reason :: :blocked | :closing}
@doc """
Publishes a message to an Exchange.
This method publishes a message to a specific exchange. The message will be routed
to queues as defined by the exchange configuration and distributed to any subscribers.
The parameter `exchange` specifies the name of the exchange to publish to. If set to
empty string, it publishes to the default exchange.
The `routing_key` parameter specifies the routing key for the message.
The `payload` parameter specifies the message content as a binary.
In addition to the previous parameters, the following options can be used:
# Options
* `:mandatory` - If set, returns an error if the broker can't route the message to a queue (default `false`);
* `:immediate` - If set, returns an error if the broker can't deliver te message to a consumer immediately (default `false`);
* `:content_type` - MIME Content type;
* `:content_encoding` - MIME Content encoding;
* `:headers` - Message headers. Can be used with headers Exchanges;
* `:persistent` - If set, uses persistent delivery mode. Messages marked as `persistent` that are delivered to `durable` \
queues will be logged to disk;
* `:correlation_id` - application correlation identifier;
* `:priority` - message priority, ranging from 0 to 9;
* `:reply_to` - name of the reply queue;
* `:expiration` - how long the message is valid (in milliseconds);
* `:message_id` - message identifier;
* `:timestamp` - timestamp associated with this message (epoch time);
* `:type` - message type as a string;
* `:user_id` - creating user ID. RabbitMQ will validate this against the active connection user;
* `:app_id` - publishing application ID.
## Examples
iex> AMQP.Basic.publish chan, \"my_exchange\", \"my_routing_key\", \"Hello World!\", persistent: true
:ok
"""
@spec publish(Channel.t, String.t, String.t, String.t, keyword) :: :ok | error
def publish(%Channel{pid: pid}, exchange, routing_key, payload, options \\ []) do
basic_publish =
basic_publish(exchange: exchange,
routing_key: routing_key,
mandatory: Keyword.get(options, :mandatory, false),
immediate: Keyword.get(options, :immediate, false))
p_basic =
p_basic(content_type: Keyword.get(options, :content_type, :undefined),
content_encoding: Keyword.get(options, :content_encoding, :undefined),
headers: Keyword.get(options, :headers, :undefined) |> Utils.to_type_tuple,
delivery_mode: if(options[:persistent], do: 2, else: 1),
priority: Keyword.get(options, :priority, :undefined),
correlation_id: Keyword.get(options, :correlation_id, :undefined),
reply_to: Keyword.get(options, :reply_to, :undefined),
expiration: Keyword.get(options, :expiration, :undefined),
message_id: Keyword.get(options, :message_id, :undefined),
timestamp: Keyword.get(options, :timestamp, :undefined),
type: Keyword.get(options, :type, :undefined),
user_id: Keyword.get(options, :user_id, :undefined),
app_id: Keyword.get(options, :app_id, :undefined),
cluster_id: Keyword.get(options, :cluster_id, :undefined))
case :amqp_channel.call(pid, basic_publish, amqp_msg(props: p_basic, payload: payload)) do
:ok -> :ok
error -> {:error, error}
end
end
@doc """
Sets the message prefetch count or prefetech size (in bytes). If `global` is set to `true` this
applies to the entire Connection, otherwise it applies only to the specified Channel.
"""
@spec qos(Channel.t, keyword) :: :ok | error
def qos(%Channel{pid: pid}, options \\ []) do
basic_qos = basic_qos(prefetch_size: Keyword.get(options, :prefetch_size, 0),
prefetch_count: Keyword.get(options, :prefetch_count, 0),
global: Keyword.get(options, :global, false))
case :amqp_channel.call(pid, basic_qos) do
basic_qos_ok() -> :ok
error -> {:error, error}
end
end
@doc """
Acknowledges one or more messages. If `multiple` is set to `true`, all messages up to the one
specified by `delivery_tag` are considered acknowledged by the server.
"""
@spec ack(Channel.t, String.t, keyword) :: :ok | error
def ack(%Channel{pid: pid}, delivery_tag, options \\ []) do
basic_ack = basic_ack(delivery_tag: delivery_tag,
multiple: Keyword.get(options, :multiple, false))
case :amqp_channel.call(pid, basic_ack) do
:ok -> :ok
error -> {:error, error}
end
end
@doc """
Rejects (and, optionally, requeues) a message.
"""
@spec reject(Channel.t, String.t, keyword) :: :ok | error
def reject(%Channel{pid: pid}, delivery_tag, options \\ []) do
basic_reject = basic_reject(delivery_tag: delivery_tag,
requeue: Keyword.get(options, :requeue, true))
case :amqp_channel.call(pid, basic_reject) do
:ok -> :ok
error -> {:error, error}
end
end
@doc """
Negative acknowledge of one or more messages. If `multiple` is set to `true`, all messages up to the
one specified by `delivery_tag` are considered as not acknowledged by the server. If `requeue` is set
to `true`, the message will be returned to the queue and redelivered to the next available consumer.
This is a RabbitMQ specific extension to AMQP 0.9.1. It is equivalent to reject, but allows rejecting
multiple messages using the `multiple` option.
"""
@spec nack(Channel.t, String.t, keyword) :: :ok | error
def nack(%Channel{pid: pid}, delivery_tag, options \\ []) do
basic_nack = basic_nack(delivery_tag: delivery_tag,
multiple: Keyword.get(options, :multiple, false),
requeue: Keyword.get(options, :requeue, true))
case :amqp_channel.call(pid, basic_nack) do
:ok -> :ok
error -> {:error, error}
end
end
@doc """
Polls a queue for an existing message.
Returns the tuple `{:empty, meta}` if the queue is empty or the tuple {:ok, payload, meta} if at least
one message exists in the queue. The returned meta map includes the entry `message_count` with the
current number of messages in the queue.
Receiving messages by polling a queue is not as as efficient as subscribing a consumer to a queue,
so consideration should be taken when receiving large volumes of messages.
Setting the `no_ack` option to true will tell the broker that the receiver will not send an acknowledgement of
the message. Once it believes it has delivered a message, then it is free to assume that the consuming application
has taken responsibility for it. In general, a lot of applications will not want these semantics, rather, they
will want to explicitly acknowledge the receipt of a message and have `no_ack` with the default value of false.
"""
@spec get(Channel.t, String.t, keyword) :: {:ok, String.t, map} | {:empty, map} | error
def get(%Channel{pid: pid}, queue, options \\ []) do
case :amqp_channel.call pid, basic_get(queue: queue, no_ack: Keyword.get(options, :no_ack, false)) do
{basic_get_ok(delivery_tag: delivery_tag,
redelivered: redelivered,
exchange: exchange,
routing_key: routing_key,
message_count: message_count),
amqp_msg(props: p_basic(content_type: content_type,
content_encoding: content_encoding,
headers: headers,
delivery_mode: delivery_mode,
priority: priority,
correlation_id: correlation_id,
reply_to: reply_to,
expiration: expiration,
message_id: message_id,
timestamp: timestamp,
type: type,
user_id: user_id,
app_id: app_id,
cluster_id: cluster_id), payload: payload)} ->
{:ok, payload, %{delivery_tag: delivery_tag,
redelivered: redelivered,
exchange: exchange,
routing_key: routing_key,
message_count: message_count,
content_type: content_type,
content_encoding: content_encoding,
headers: headers,
persistent: delivery_mode == 2,
priority: priority,
correlation_id: correlation_id,
reply_to: reply_to,
expiration: expiration,
message_id: message_id,
timestamp: timestamp,
type: type,
user_id: user_id,
app_id: app_id,
cluster_id: cluster_id}}
basic_get_empty(cluster_id: cluster_id) ->
{:empty, %{cluster_id: cluster_id}}
error -> {:error, error}
end
end
@doc """
Asks the server to redeliver all unacknowledged messages on a specified channel.
If `requeue` is set to `true` the server will attempt to requeue the message,
potentially delivering it to another subscriber. Otherwise it will be redelivered
to the original recipient.
"""
@spec recover(Channel.t, keyword) :: :ok | error
def recover(%Channel{pid: pid}, options \\ []) do
basic_recover = basic_recover(requeue: Keyword.get(options, :requeue, false))
case :amqp_channel.call(pid, basic_recover) do
basic_recover_ok() -> :ok
error -> {:error, error}
end
end
@doc """
Registers a queue consumer process. The `pid` of the process can be set using
the `consumer_pid` argument and defaults to the calling process.
The consumer process will receive the following data structures:
* `{:basic_deliver, payload, meta}` - This is sent for each message consumed, where \
`payload` contains the message content and `meta` contains all the metadata set when \
sending with Basic.publish or additional info set by the broker;
* `{:basic_consume_ok, %{consumer_tag: consumer_tag}}` - Sent when the consumer \
process is registered with Basic.consume. The caller receives the same information \
as the return of Basic.consume;
* `{:basic_cancel, %{consumer_tag: consumer_tag, no_wait: no_wait}}` - Sent by the \
broker when the consumer is unexpectedly cancelled (such as after a queue deletion)
* `{:basic_cancel_ok, %{consumer_tag: consumer_tag}}` - Sent to the consumer process after a call to Basic.cancel
"""
@spec consume(Channel.t, String.t, pid | nil, keyword) :: {:ok, String.t} | error
def consume(%Channel{} = chan, queue, consumer_pid \\ nil, options \\ []) do
basic_consume =
basic_consume(queue: queue,
consumer_tag: Keyword.get(options, :consumer_tag, ""),
no_local: Keyword.get(options, :no_local, false),
no_ack: Keyword.get(options, :no_ack, false),
exclusive: Keyword.get(options, :exclusive, false),
nowait: Keyword.get(options, :no_wait, false),
arguments: Keyword.get(options, :arguments, []))
consumer_pid = consumer_pid || self()
adapter_pid = spawn fn ->
Process.flag(:trap_exit, true)
Process.monitor(consumer_pid)
Process.monitor(chan.pid)
do_start_consumer(chan, consumer_pid)
end
case :amqp_channel.subscribe(chan.pid, basic_consume, adapter_pid) do
basic_consume_ok(consumer_tag: consumer_tag) -> {:ok, consumer_tag}
error -> {:error, error}
end
end
defp do_start_consumer(chan, consumer_pid) do
receive do
basic_consume_ok(consumer_tag: consumer_tag) ->
send consumer_pid, {:basic_consume_ok, %{consumer_tag: consumer_tag}}
do_consume(chan, consumer_pid, consumer_tag)
error ->
send consumer_pid, error
end
end
defp do_consume(chan, consumer_pid, consumer_tag) do
receive do
{basic_deliver(consumer_tag: consumer_tag,
delivery_tag: delivery_tag,
redelivered: redelivered,
exchange: exchange,
routing_key: routing_key),
amqp_msg(props: p_basic(content_type: content_type,
content_encoding: content_encoding,
headers: headers,
delivery_mode: delivery_mode,
priority: priority,
correlation_id: correlation_id,
reply_to: reply_to,
expiration: expiration,
message_id: message_id,
timestamp: timestamp,
type: type,
user_id: user_id,
app_id: app_id,
cluster_id: cluster_id), payload: payload)} ->
send consumer_pid, {:basic_deliver, payload, %{consumer_tag: consumer_tag,
delivery_tag: delivery_tag,
redelivered: redelivered,
exchange: exchange,
routing_key: routing_key,
content_type: content_type,
content_encoding: content_encoding,
headers: headers,
persistent: delivery_mode == 2,
priority: priority,
correlation_id: correlation_id,
reply_to: reply_to,
expiration: expiration,
message_id: message_id,
timestamp: timestamp,
type: type,
user_id: user_id,
app_id: app_id,
cluster_id: cluster_id}}
do_consume(chan, consumer_pid, consumer_tag)
basic_consume_ok(consumer_tag: consumer_tag) ->
send consumer_pid, {:basic_consume_ok, %{consumer_tag: consumer_tag}}
do_consume(chan, consumer_pid, consumer_tag)
basic_cancel_ok(consumer_tag: consumer_tag) ->
send consumer_pid, {:basic_cancel_ok, %{consumer_tag: consumer_tag}}
basic_cancel(consumer_tag: consumer_tag, nowait: no_wait) ->
send consumer_pid, {:basic_cancel, %{consumer_tag: consumer_tag, no_wait: no_wait}}
{:DOWN, _ref, :process, ^consumer_pid, reason} ->
cancel(chan, consumer_tag)
exit(reason)
{:DOWN, _ref, :process, _pid, reason} ->
exit(reason)
end
end
@doc """
End a queue consumer.
This method cancels a consumer. This does not affect already delivered messages, but it does
mean the server will not send any more messages for that consumer. The client may receive an
arbitrary number of messages in between sending the cancel method and receiving the reply.
"""
@spec cancel(Channel.t, String.t, keyword) :: {:ok, String.t} | error
def cancel(%Channel{pid: pid}, consumer_tag, options \\ []) do
basic_cancel = basic_cancel(consumer_tag: consumer_tag, nowait: Keyword.get(options, :no_wait, false))
case :amqp_channel.call(pid, basic_cancel) do
basic_cancel_ok(consumer_tag: consumer_tag) -> {:ok, consumer_tag}
error -> {:error, error}
end
end
@doc """
Registers a handler to deal with returned messages. The registered
process will receive `{:basic_return, payload, meta}` data structures.
"""
@spec return(Channel.t, pid) :: :ok
def return(%Channel{pid: pid}, return_handler_pid) do
adapter_pid = spawn fn ->
Process.flag(:trap_exit, true)
Process.monitor(return_handler_pid)
Process.monitor(pid)
handle_return_messages(pid, return_handler_pid)
end
:amqp_channel.register_return_handler(pid, adapter_pid)
end
@doc """
Removes the return handler, if it exists. Does nothing if there is no
such handler.
"""
@spec cancel_return(Channel.t) :: :ok
def cancel_return(%Channel{pid: pid}) do
:amqp_channel.unregister_return_handler(pid)
end
defp handle_return_messages(chan_pid, return_handler_pid) do
receive do
{basic_return(reply_code: reply_code,
reply_text: reply_text,
exchange: exchange,
routing_key: routing_key),
amqp_msg(props: p_basic(content_type: content_type,
content_encoding: content_encoding,
headers: headers,
delivery_mode: delivery_mode,
priority: priority,
correlation_id: correlation_id,
reply_to: reply_to,
expiration: expiration,
message_id: message_id,
timestamp: timestamp,
type: type,
user_id: user_id,
app_id: app_id,
cluster_id: cluster_id), payload: payload)} ->
send return_handler_pid, {:basic_return, payload, %{reply_code: reply_code,
reply_text: reply_text,
exchange: exchange,
routing_key: routing_key,
content_type: content_type,
content_encoding: content_encoding,
headers: headers,
persistent: delivery_mode == 2,
priority: priority,
correlation_id: correlation_id,
reply_to: reply_to,
expiration: expiration,
message_id: message_id,
timestamp: timestamp,
type: type,
user_id: user_id,
app_id: app_id,
cluster_id: cluster_id}}
handle_return_messages(chan_pid, return_handler_pid)
{:DOWN, _ref, :process, _pid, reason} ->
exit(reason)
end
end
end
|
lib/amqp/basic.ex
| 0.9255
| 0.564519
|
basic.ex
|
starcoder
|
defmodule Ecto.Adapters.Postgres do
@moduledoc """
Adapter module for PostgreSQL.
It uses `postgrex` for communicating to the database
and a connection pool, such as `poolboy`.
## Features
* Full query support (including joins, preloads and associations)
* Support for transactions
* Support for data migrations
* Support for ecto.create and ecto.drop operations
* Support for transactional tests via `Ecto.Adapters.SQL`
## Options
Postgres options split in different categories described
below. All options should be given via the repository
configuration. These options are also passed to the module
specified in the `:pool` option, so check that module's
documentation for more options.
### Compile time options
Those options should be set in the config file and require
recompilation in order to make an effect.
* `:adapter` - The adapter name, in this case, `Ecto.Adapters.Postgres`
* `:name`- The name of the Repo supervisor process
* `:pool` - The connection pool module, defaults to `DBConnection.Poolboy`
* `:pool_timeout` - The default timeout to use on pool calls, defaults to `5000`
* `:timeout` - The default timeout to use on queries, defaults to `15000`
### Connection options
* `:hostname` - Server hostname
* `:port` - Server port (default: 5432)
* `:username` - Username
* `:password` - <PASSWORD>
* `:ssl` - Set to true if ssl should be used (default: false)
* `:ssl_opts` - A list of ssl options, see Erlang's `ssl` docs
* `:parameters` - Keyword list of connection parameters
* `:connect_timeout` - The timeout for establishing new connections (default: 5000)
* `:socket_options` - Specifies socket configuration
The `:socket_options` are particularly useful when configuring the size
of both send and receive buffers. For example, when Ecto starts with a
pool of 20 connections, the memory usage may quickly grow from 20MB to
50MB based on the operating system default values for TCP buffers. It is
advised to stick with the operating system defaults but they can be
tweaked if desired:
socket_options: [recbuf: 8192, sndbuf: 8192]
We also recommend developers to consult the
[Postgrex documentation](https://hexdocs.pm/postgrex/Postgrex.html#start_link/1)
for a complete listing of all supported options.
### Storage options
* `:encoding` - the database encoding (default: "UTF8")
* `:template` - the template to create the database from
* `:lc_collate` - the collation order
* `:lc_ctype` - the character classification
* `:dump_path` - where to place dumped structures
## Extensions
Both PostgreSQL and its adapter for Elixir, Postgrex, support an
extension system. If you want to use custom extensions for Postgrex
alongside Ecto, you must define a type module with your extensions.
Create a new file anywhere in your application with the following:
Postgrex.Types.define(MyApp.PostgresTypes,
[MyExtension.Foo, MyExtensionBar] ++ Ecto.Adapters.Postgres.extensions(),
json: Poison)
Once your type module is defined, you can configure the repository to use it:
config :my_app, MyApp.Repo, types: MyApp.PostgresTypes
"""
# Inherit all behaviour from Ecto.Adapters.SQL
use Ecto.Adapters.SQL, :postgrex
# And provide a custom storage implementation
@behaviour Ecto.Adapter.Storage
@behaviour Ecto.Adapter.Structure
@doc """
All Ecto extensions for Postgrex.
"""
def extensions do
[Ecto.Adapters.Postgres.Date, Ecto.Adapters.Postgres.Time,
Ecto.Adapters.Postgres.Timestamp, Ecto.Adapters.Postgres.TimestampTZ]
end
# Support arrays in place of IN
@doc false
def dumpers({:embed, _} = type, _), do: [&Ecto.Adapters.SQL.dump_embed(type, &1)]
def dumpers({:in, sub}, {:in, sub}), do: [{:array, sub}]
def dumpers(:binary_id, type), do: [type, Ecto.UUID]
def dumpers(_, type), do: [type]
## Storage API
@doc false
def storage_up(opts) do
database = Keyword.fetch!(opts, :database) || raise ":database is nil in repository configuration"
encoding = opts[:encoding] || "UTF8"
opts = Keyword.put(opts, :database, "template1")
command =
~s(CREATE DATABASE "#{database}" ENCODING '#{encoding}')
|> concat_if(opts[:template], &"TEMPLATE=#{&1}")
|> concat_if(opts[:lc_ctype], &"LC_CTYPE='#{&1}'")
|> concat_if(opts[:lc_collate], &"LC_COLLATE='#{&1}'")
case run_query(command, opts) do
{:ok, _} ->
:ok
{:error, %{postgres: %{code: :duplicate_database}}} ->
{:error, :already_up}
{:error, error} ->
{:error, Exception.message(error)}
end
end
defp concat_if(content, nil, _fun), do: content
defp concat_if(content, value, fun), do: content <> " " <> fun.(value)
@doc false
def storage_down(opts) do
database = Keyword.fetch!(opts, :database) || raise ":database is nil in repository configuration"
command = "DROP DATABASE \"#{database}\""
opts = Keyword.put(opts, :database, "template1")
case run_query(command, opts) do
{:ok, _} ->
:ok
{:error, %{postgres: %{code: :invalid_catalog_name}}} ->
{:error, :already_down}
{:error, error} ->
{:error, Exception.message(error)}
end
end
@doc false
def supports_ddl_transaction? do
true
end
@doc false
def structure_dump(default, config) do
table = config[:migration_source] || "schema_migrations"
with {:ok, versions} <- select_versions(table, config),
{:ok, path} <- pg_dump(default, config),
do: append_versions(table, versions, path)
end
defp select_versions(table, config) do
case run_query(~s[SELECT version FROM "#{table}" ORDER BY version], config) do
{:ok, %{rows: rows}} -> {:ok, Enum.map(rows, &hd/1)}
{:error, %{postgres: %{code: :undefined_table}}} -> {:ok, []}
{:error, _} = error -> error
end
end
defp pg_dump(default, config) do
path = config[:dump_path] || Path.join(default, "structure.sql")
File.mkdir_p!(Path.dirname(path))
case run_with_cmd("pg_dump", config, ["--file", path, "--schema-only", "--no-acl",
"--no-owner", config[:database]]) do
{_output, 0} ->
{:ok, path}
{output, _} ->
{:error, output}
end
end
defp append_versions(_table, [], path) do
{:ok, path}
end
defp append_versions(table, versions, path) do
sql =
~s[INSERT INTO "#{table}" (version) VALUES ] <>
Enum.map_join(versions, ", ", &"(#{&1})") <>
~s[;\n\n]
File.open!(path, [:append], fn file ->
IO.write(file, sql)
end)
{:ok, path}
end
@doc false
def structure_load(default, config) do
path = config[:dump_path] || Path.join(default, "structure.sql")
case run_with_cmd("psql", config, ["--quiet", "--file", path, config[:database]]) do
{_output, 0} -> {:ok, path}
{output, _} -> {:error, output}
end
end
## Helpers
defp run_query(sql, opts) do
{:ok, _} = Application.ensure_all_started(:postgrex)
opts =
opts
|> Keyword.drop([:name, :log])
|> Keyword.put(:pool, DBConnection.Connection)
|> Keyword.put(:backoff_type, :stop)
{:ok, pid} = Task.Supervisor.start_link
task = Task.Supervisor.async_nolink(pid, fn ->
{:ok, conn} = Postgrex.start_link(opts)
value = Ecto.Adapters.Postgres.Connection.execute(conn, sql, [], opts)
GenServer.stop(conn)
value
end)
timeout = Keyword.get(opts, :timeout, 15_000)
case Task.yield(task, timeout) || Task.shutdown(task) do
{:ok, {:ok, result}} ->
{:ok, result}
{:ok, {:error, error}} ->
{:error, error}
{:exit, {%{__struct__: struct} = error, _}}
when struct in [Postgrex.Error, DBConnection.Error] ->
{:error, error}
{:exit, reason} ->
{:error, RuntimeError.exception(Exception.format_exit(reason))}
nil ->
{:error, RuntimeError.exception("command timed out")}
end
end
defp run_with_cmd(cmd, opts, opt_args) do
unless System.find_executable(cmd) do
raise "could not find executable `#{cmd}` in path, " <>
"please guarantee it is available before running ecto commands"
end
env =
[{"PGCONNECT_TIMEOUT", "10"}]
env =
if password = opts[:password] do
[{"PGPASSWORD", password}|env]
else
env
end
args =
[]
args =
if username = opts[:username], do: ["-U", username|args], else: args
args =
if port = opts[:port], do: ["-p", to_string(port)|args], else: args
host = opts[:hostname] || System.get_env("PGHOST") || "localhost"
args = ["--host", host|args]
args = args ++ opt_args
System.cmd(cmd, args, env: env, stderr_to_stdout: true)
end
end
|
deps/ecto/lib/ecto/adapters/postgres.ex
| 0.813164
| 0.550426
|
postgres.ex
|
starcoder
|
defmodule Chopperbot.Split.OptionTransformer do
@percentage_pattern ~r/^(\+|-)(\d+|\d+[.]\d+)(%)$/
@doc """
Transform options to a multiplier.
## Examples
iex> transform(["+vat"])
{:ok, 1.07}
iex> transform(["+s", "+v"])
{:ok, 1.177}
iex> transform(["-20%"])
{:ok, 0.8}
iex> transform(["+service", "+vat", "-25%"])
{:ok, 0.88275}
iex> transform([])
{:ok, 1.0}
iex> transform(["+v", "-10!", "-invalid", "-5%", "-ten%"])
{:error, :invalid_option, ["-10!", "-invalid", "-ten%"]}
"""
@spec transform([String.t()]) :: {:ok, float()} | {:error, :invalid_option, [String.t()]}
def transform(options) do
case transform_to_multipliers(options) do
{multipliers, []} ->
{:ok, accumulate_multipliers(multipliers)}
{_, invalid_options} ->
{:error, :invalid_option, invalid_options}
end
end
defp transform_to_multipliers(options, multipliers \\ [], invalid_options \\ [])
defp transform_to_multipliers([option | rest_options], multipliers, invalid_options) do
case get_multiplier_from_option(option) do
{:ok, multiplier} ->
transform_to_multipliers(rest_options, [multiplier | multipliers], invalid_options)
:error ->
transform_to_multipliers(rest_options, multipliers, [option | invalid_options])
end
end
defp transform_to_multipliers([], multipliers, invalid_options) do
{multipliers, Enum.reverse(invalid_options)}
end
defp get_multiplier_from_option(option) when option in ["+service", "+s"] do
get_multiplier_from_option("+10%")
end
defp get_multiplier_from_option(option) when option in ["+vat", "+v"] do
get_multiplier_from_option("+7%")
end
defp get_multiplier_from_option(option) do
case Regex.run(@percentage_pattern, option) do
[^option, operator, number, "%"] ->
{float_number, ""} = Float.parse(number)
multiplier =
Kernel
|> apply(String.to_existing_atom(operator), [100, float_number])
|> Kernel./(100)
{:ok, multiplier}
_ ->
:error
end
end
defp accumulate_multipliers(multipliers) do
multipliers
|> Enum.reduce(1.0, &(&1 * &2))
|> Float.round(15)
end
end
|
lib/chopperbot/split/parser/option_transformer.ex
| 0.726231
| 0.42179
|
option_transformer.ex
|
starcoder
|
defmodule Galena.Consumer do
@moduledoc """
**Galena.Consumer** is a customized `GenStage` producer which is able to receive _some_ messages
from _some_ producers or producers-consumers. The consumer will have the possibility to be subscribed
to the chosen topics from the chosen producers.
### Definition
```elixir
defmodule MyConsumer do
use Galena.Consumer
def handle_consume(topic, message) do
IO.puts(topic <> ": " <> message)
end
end
```
### Start up
Define the `args` of your Consumer. It has to be a Keyword list which has to contain a `producers_info` field which
will have a list of tuples of two parameters, where the first one will be a list of topics and the second one
the producer or producer-consumer:
```elixir
args = [
producers_info: [
{["topic_1", "topic_2", "topic_3"], :producer1},
{["topic_A"], :producer2},
{["topic_a", "topic_b"], :producer3},
{[], :producer4}
]
]
```
When the list of topics is empty, your consumer will receive all the information published by the producer.
```elixir
{:ok, consumer} = MyConsumer.start_link(args, [name: :consumer])
```
"""
@type topic :: any
@type message :: any
@doc """
It will be executed when a message is received by the consumer.
The first argument will be the subscribed topic and the second one the received message.
"""
@callback handle_consume(topic, message) :: any
defmacro __using__(_) do
quote do
@behaviour Galena.Consumer
use GenStage
alias Galena.Common.ConsumerFunctions
@init_time 1
def start_link(args, opts) do
GenStage.start_link(__MODULE__, args[:producers_info], opts)
end
def init(producers_info) do
Process.send_after(self(), {:init, producers_info}, @init_time)
{:consumer, :ok}
end
def handle_info({:init, producers_info}, state) do
ConsumerFunctions.subscription(self(), producers_info)
{:noreply, [], state}
end
def handle_events(events, _from, state) do
Enum.each(events, fn {topic, message} -> handle_consume(topic, message) end)
{:noreply, [], state}
end
end
end
end
|
lib/galena/consumer.ex
| 0.719186
| 0.806243
|
consumer.ex
|
starcoder
|
defmodule Holidays.DateCalculator.DateMath do
@doc """
Adds the given number of `days` to the given `date`.
## Examples
iex> Holidays.DateCalculator.DateMath.add_days({2015, 12, 31}, 1)
{2016, 1, 1}
iex> Holidays.DateCalculator.DateMath.add_days({2016, 1, 6}, -12)
{2015, 12, 25}
"""
@spec add_days(:calendar.date, integer) :: :calendar.date
def add_days(date, days) do
:calendar.gregorian_days_to_date(:calendar.date_to_gregorian_days(date) + days)
end
@offset %{:first => 1,
:second => 8,
:third => 15,
:fourth => 22}
@doc """
Returns the date for the `week`th `weekday` for the given `year` and `month`.
`week` may be one of :first, :second, :third, :fourth, :last
`weekday` may be a number between 1 and 7, which is the way Erlang
represents Monday through Sunday. Or use one the atoms
:monday, :tuesday, :wednesday, :thursday, :friday, :saturday, :sunday
## Examples
# The second Tuesday of June, 2013
iex> Holidays.DateCalculator.DateMath.get_weekth_day(2013, 6, :second, :tuesday)
{2013, 6, 11}
# The third Friday of December, 2013
iex> Holidays.DateCalculator.DateMath.get_weekth_day(2013, 12, :third, :friday)
{2013, 12, 20}
# The last Saturday of January, 2013
iex> Holidays.DateCalculator.DateMath.get_weekth_day(2013, 1, :last, :saturday)
{2013, 1, 26}
"""
@spec get_weekth_day(pos_integer, pos_integer, Holidays.week, Holidays.weekday | pos_integer) :: :calendar.date
def get_weekth_day(year, month, :last, weekday) do
offset = :calendar.last_day_of_the_month(year, month) - 6
do_get_weekth_day(year, month, offset, weekday)
end
def get_weekth_day(year, month, week, weekday) do
do_get_weekth_day(year, month, @offset[week], weekday)
end
@daynum %{:monday => 1,
:tuesday => 2,
:wednesday => 3,
:thursday => 4,
:friday => 5,
:saturday => 6,
:sunday => 7}
@spec do_get_weekth_day(pos_integer, pos_integer, pos_integer, Holidays.weekday | pos_integer) :: :calendar.date
defp do_get_weekth_day(year, month, offset, weekday) when not is_integer(weekday) do
do_get_weekth_day(year, month, offset, @daynum[weekday])
end
defp do_get_weekth_day(year, month, offset, weekday) do
day = weekday - :calendar.day_of_the_week(year, month, offset) + offset
correct_offset(year, month, offset, day)
end
@spec correct_offset(pos_integer, pos_integer, pos_integer, integer) :: :calendar.date
defp correct_offset(year, month, offset, day) when day < offset do
{year, month, day + 7}
end
defp correct_offset(year, month, _offset, day), do: {year, month, day}
@dayname %{1 => :monday,
2 => :tuesday,
3 => :wednesday,
4 => :thursday,
5 => :friday,
6 => :saturday,
7 => :sunday}
@doc """
Returns a list of tuples with week and day atoms.
The list will contain a single item except when the day is both the `:fourth`
and the `:last` week.
## Examples
iex> Holidays.DateCalculator.DateMath.get_week_and_weekday({2016,1,29})
[{:last, :friday}]
iex> Holidays.DateCalculator.DateMath.get_week_and_weekday({2016,1,25})
[{:fourth, :monday}, {:last, :monday}]
iex> Holidays.DateCalculator.DateMath.get_week_and_weekday({2016,1,5})
[{:first, :tuesday}]
"""
@spec get_week_and_weekday(:calendar.date) :: [{Holidays.week, Holidays.weekday}]
def get_week_and_weekday({year, month, day} = date) do
day_name = @dayname[:calendar.day_of_the_week(date)]
week_name(div(day - 1, 7), day_name) ++
check_last_week(:calendar.last_day_of_the_month(year, month) - day,
day_name)
end
defp week_name(0, day_name), do: [{:first, day_name}]
defp week_name(1, day_name), do: [{:second, day_name}]
defp week_name(2, day_name), do: [{:third, day_name}]
defp week_name(3, day_name), do: [{:fourth, day_name}]
defp week_name(_, _), do: []
defp check_last_week(daysleft, day_name) when daysleft < 7 do
[{:last, day_name}]
end
defp check_last_week(_, _), do: []
end
|
lib/holidays/date_calculator/date_math.ex
| 0.887285
| 0.682706
|
date_math.ex
|
starcoder
|
defmodule Ockam.Workers.RemoteForwarder do
@moduledoc """
Ockam worker to handle forwarding from the Ockam Hub forwarding service `Ockam.Hub.Service.Forwarding`
On start creates a hub forwarder in the forwarding service.
Forwards messages from the hub forwarder to configured route.
Options:
`service_route` - a route to the forwarding service
`forward_to` - a route to forward messages from the hub forwarder to
`register_payload` - (defaults to "register") payload to use when registering a forwarder
Usage:
Create a forwarder:
{:ok, forwarder} = RemoteForwarder.create(
service_route: route_to_service,
forward_to: local_route
)
Get the forwarding address local to the Hub:
forwarder_address = RemoteForwarder.forwarder_address(forwarder)
Send messages from another node:
Ockam.Router.route(%{onward_route: hub_route ++ [forwarder_address], ...})
Messages will be delivered through the hub forwarder
to the remote forwarder on the first node
to the configured `local_route`
"""
use Ockam.Worker
alias Ockam.Message
alias Ockam.Router
@doc """
Get the remote forwarder address to send messages to this worker
"""
def forwarder_address(server) when is_binary(server) do
forwarder_address(Ockam.Node.whereis(server))
end
def forwarder_address(server) when is_pid(server) do
GenServer.call(server, :forwarder_address)
end
@impl true
def setup(options, state) do
service_route = Keyword.fetch!(options, :service_route)
forward_to = Keyword.fetch!(options, :forward_to)
register_payload = Keyword.get(options, :register_payload, "register")
case register(service_route, state.address, register_payload) do
{:ok, forwarder_address} ->
{:ok, Map.merge(state, %{forward_to: forward_to, forwarder_address: forwarder_address})}
{:error, err} ->
{:error, err}
end
end
@impl true
def handle_message(message, state) do
[_me | onward_route] = Message.onward_route(message)
forward_to = Map.get(state, :forward_to)
Router.route(Message.forward(message, forward_to ++ onward_route))
{:ok, state}
end
@impl true
def handle_call(:forwarder_address, _from, state) do
{:reply, Map.get(state, :forwarder_address), state}
end
def register(service_route, self_address, register_payload, timeout \\ 60_000) do
# Send 'register' message to forwarding service with the own address in the return_route
Router.route(%{
onward_route: service_route,
return_route: [self_address],
payload: register_payload
})
# Route to remote forwarder is the return_route of the reply
receive do
%{onward_route: [^self_address], return_route: forwarder_route, payload: ^register_payload} ->
{:ok, List.last(forwarder_route)}
after
timeout ->
{:error, :timeout}
end
end
end
|
implementations/elixir/ockam/ockam/lib/ockam/workers/remote_forwarder.ex
| 0.912392
| 0.547767
|
remote_forwarder.ex
|
starcoder
|
defmodule Wallaby.Feature do
@moduledoc """
Helpers for writing features.
You can `use` or `import` this module.
## use Wallaby.Feature
Calling this module with `use` will automatically call `use Wallaby.DSL`.
When called with `use` and you are using Ecto, please configure your `otp_app`.
```
config :wallaby, otp_app: :your_app
```
"""
defmacro __using__(_) do
quote do
ExUnit.Case.register_attribute(__MODULE__, :sessions)
use Wallaby.DSL
import Wallaby.Feature
setup context do
metadata = unquote(__MODULE__).Utils.maybe_checkout_repos(context[:async])
start_session_opts =
[metadata: metadata]
|> unquote(__MODULE__).Utils.put_create_session_fn(context[:create_session_fn])
get_in(context, [:registered, :sessions])
|> unquote(__MODULE__).Utils.sessions_iterable()
|> Enum.map(fn
opts when is_list(opts) ->
unquote(__MODULE__).Utils.start_session(opts, start_session_opts)
i when is_number(i) ->
unquote(__MODULE__).Utils.start_session([], start_session_opts)
end)
|> unquote(__MODULE__).Utils.build_setup_return()
end
end
end
@doc """
Defines a feature with a message.
Adding `import Wallaby.Feature` to your test module will import the `Wallaby.Feature.feature/3` macro. This is a drop in replacement for the `ExUnit.Case.test/3` macro that you normally use.
Adding `use Wallaby.Feature` to your test module will act the same as `import Wallaby.Feature`, as well as configure your Ecto repos properly and pass a `Wallaby.Session` into the test context.
## Sessions
When called with `use`, the `Wallaby.Feature.feature/3` macro will automatically start a single session using the currently configured capabilities and is passed to the feature via the `:session` key in the context.
```
feature "test with a single session", %{session: session} do
# ...
end
```
If you would like to start multiple sessions, assign the `@sessions` attribute to the number of sessions that the feature should start, and they will be pass to the feature via the `:sessions` key in the context.
```
@sessions 2
feature "test with a two sessions", %{sessions: [session_1, sessions_2]} do
# ...
end
```
If you need to change the capabilities sent to the session for a specific feature, you can assign `@sessions` to a list of keyword lists of the options to be passed to `Wallaby.start_session/1`. This will start the number of sessions equal to the size of the list.
```
@sessions [
[capabilities: %{}]
]
feature "test with different capabilities", %{session: session} do
# ...
end
```
If you don't wish to `use Wallaby.Feature` in your test module, you can add the following code to configure Ecto and create a session.
```
setup tags do
:ok = Ecto.Adapters.SQL.Sandbox.checkout(YourApp.Repo)
unless tags[:async] do
Ecto.Adapters.SQL.Sandbox.mode(YourApp.Repo, {:shared, self()})
end
metadata = Phoenix.Ecto.SQL.Sandbox.metadata_for(YourApp.Repo, self())
{:ok, session} = Wallaby.start_session(metadata: metadata)
{:ok, session: session}
end
```
## Screenshots
If you have configured `screenshot_on_failure` to be true, any exceptions raised during the feature will trigger a screenshot to be taken.
"""
defmacro feature(message, context \\ quote(do: _), contents) do
contents =
quote do
try do
unquote(contents)
:ok
rescue
e ->
if Wallaby.screenshot_on_failure?() do
unquote(__MODULE__).Utils.take_screenshots_for_sessions(self(), unquote(message))
end
reraise(e, __STACKTRACE__)
end
end
context = Macro.escape(context)
contents = Macro.escape(contents, unquote: true)
quote bind_quoted: [context: context, contents: contents, message: message] do
name = ExUnit.Case.register_test(__ENV__, :feature, message, [:feature])
def unquote(name)(unquote(context)), do: unquote(contents)
end
end
defmodule Utils do
@includes_ecto Code.ensure_loaded?(Ecto.Adapters.SQL.Sandbox) &&
Code.ensure_loaded?(Phoenix.Ecto.SQL.Sandbox)
@moduledoc false
def build_setup_return([session]) do
[session: session]
end
def build_setup_return(sessions) do
[sessions: sessions]
end
def sessions_iterable(nil), do: 1..1
def sessions_iterable(count) when is_number(count), do: 1..count
def sessions_iterable(capabilities) when is_list(capabilities), do: capabilities
def start_session(more_opts, start_session_opts) when is_list(more_opts) do
{:ok, session} =
start_session_opts
|> Keyword.merge(more_opts)
|> Wallaby.start_session()
session
end
def put_create_session_fn(opts, nil), do: opts
def put_create_session_fn(opts, func), do: Keyword.put(opts, :create_session_fn, func)
if @includes_ecto do
def maybe_checkout_repos(async?) do
otp_app()
|> ecto_repos()
|> Enum.map(&checkout_ecto_repos(&1, async?))
|> metadata_for_ecto_repos()
end
defp otp_app(), do: Application.get_env(:wallaby, :otp_app)
defp ecto_repos(nil), do: []
defp ecto_repos(otp_app), do: Application.get_env(otp_app, :ecto_repos, [])
defp checkout_ecto_repos(repo, async) do
:ok = Ecto.Adapters.SQL.Sandbox.checkout(repo)
unless async, do: Ecto.Adapters.SQL.Sandbox.mode(repo, {:shared, self()})
repo
end
defp metadata_for_ecto_repos([]), do: Map.new()
defp metadata_for_ecto_repos(repos) do
Phoenix.Ecto.SQL.Sandbox.metadata_for(repos, self())
end
else
def maybe_checkout_repos(_) do
""
end
end
def take_screenshots_for_sessions(pid, test_name) do
time = :erlang.system_time(:second) |> to_string()
test_name = String.replace(test_name, " ", "_")
screenshot_paths =
Wallaby.SessionStore.list_sessions_for(pid)
|> Enum.with_index(1)
|> Enum.flat_map(fn {s, i} ->
filename = time <> "_" <> test_name <> "(#{i})"
Wallaby.Browser.take_screenshot(s, name: filename).screenshots
end)
|> Enum.map_join("\n- ", &Wallaby.Browser.build_file_url/1)
IO.write("\n- #{screenshot_paths}")
end
end
end
|
lib/wallaby/feature.ex
| 0.784773
| 0.783782
|
feature.ex
|
starcoder
|
defmodule DataQuacker.Adapter do
@moduledoc ~S"""
Specifies the behaviour to which adapters must conform.
An adapter must implement these functions: `parse_source/2`, `get_headers/1`, `get_rows/1`, `get_row/1`.
The first one takes a source (e.g. a file path) and a keyword list of options,
and returns a tuple of `{:ok, any()}` or `{:error, any()}`.
In case of success the second element of the tuple
will be the value given to the other two function.
The second one takes the result of `parse_source/2`
and returns `{:ok, list(any())} | {:error, any()}`.
In case of success the second element of the tuple
will be the value used to determine the indexes
of sources described in the schema.
The third one takes the result of `parse_source/2`
and returns `{:ok, list(any())} | {:error, any()}`.
In case of success each subsequent element of the resulting list
will be passed to the get row function.
The last one takes an element of the list
which is the result of `get_rows/1`
and returns `{:ok, list(any())} | {:error, any()}`.
In case of success the resulting list will be treated
as the list of columns in a row of the source.
> Note: The resulting list in the `get_row/1` function must be of the same length as the resulting list in the `get_headers/1` function.
For an example implementation take a look at the built-in adapters.
> The rationale behind this API for adapters is that, depending on the source, potential errors may occur at different stages of parsing the source. For example the CSV library included in the default CSV adapter returns a tuple with `:ok` or `:error`as the first element for each row. However, some external APIs, like Google Sheets, return a list of rows without specifying for each whether it's valid or not. Therefore we need for it to be possible to specify that for each row, but not required for an adapter to eagerly iterate over all of the rows and wrap them in a tuple with `:ok`.
"""
@callback parse_source(any(), Keyword.t()) :: {:ok, any()} | {:error, any()}
@callback get_headers(any()) :: {:ok, list(any())} | {:error, any()}
@callback get_rows(any()) :: {:ok, list(any())} | {:error, any()}
@callback get_row(any()) :: {:ok, list(any())} | {:error, any()}
end
|
lib/data_quacker/adapter.ex
| 0.893783
| 0.863507
|
adapter.ex
|
starcoder
|
defmodule Tipalti.IFrame.SetupProcess do
@moduledoc """
Generate URLs for the Tipalti Setup Process iFrame.
"""
import Tipalti.IFrame, only: [build_url: 3]
@type flaggable_fields ::
:country
| :first
| :middle
| :last
| :company
| :street1
| :street2
| :city
| :zip
| :state
| :email
@typedoc """
These are the fields that are "forcable", meaning that a value supplied for them will override any value that may
already exist for the account.
"""
@type forcable_fields :: flaggable_fields
@typedoc """
These fields you can mark as "read-only"; they will appear in the setup form, but not be changable.
"""
@type read_onlyable_fields :: flaggable_fields
@type option :: {:force, [forcable_fields]} | {:read_only, [read_onlyable_fields]}
@type options :: [option]
@typedoc """
Struct used to represent params to the Tipalti Setup Process iFrame.
## Fields:
* `:idap` - Payee ID
* `:country` - ISO 3166 2-letter country code
* `:first` - Name of payee
* `:middle` - Name of payee
* `:last` - Name of payee
* `:company` - Company name
* `:uiculture` - Language code; one of (ar, zh-CHS, en, fr, de, it, ja, ko, nl, pt-BR, ru, es, vi)
* `:street1` - The payee contact address details
* `:street2` - The payee contact address details
* `:city` - The payee contact address details
* `:zip` - The payee contact address details
* `:state` - The payee contact address details
* `:alias` - An alternate name for the payee, if applicable
* `:email` - The payee email address
* `:force` - A list of fields you'd like to force (override the value even if a value already exists for the account)
* `:read_only` - A list of fields you'd like to make read-only
"""
@type t :: %__MODULE__{
idap: Tipalti.idap(),
country: String.t() | nil,
first: String.t() | nil,
middle: String.t() | nil,
last: String.t() | nil,
company: String.t() | nil,
uiculture: String.t() | nil,
street1: String.t() | nil,
street2: String.t() | nil,
city: String.t() | nil,
zip: String.t() | nil,
state: String.t() | nil,
alias: String.t() | nil,
email: String.t() | nil,
preferred_payer_entity: String.t() | nil
}
@enforce_keys [:idap]
defstruct idap: nil,
country: nil,
first: nil,
middle: nil,
last: nil,
company: nil,
uiculture: nil,
street1: nil,
street2: nil,
city: nil,
zip: nil,
state: nil,
alias: nil,
email: nil,
preferred_payer_entity: nil
@url %{
sandbox: URI.parse("https://ui2.sandbox.tipalti.com/PayeeDashboard/Home"),
production: URI.parse("https://ui2.tipalti.com/PayeeDashboard/Home")
}
@doc """
Generates a Setup Process iFrame URL for the given struct of parameters.
## Examples
iex> params = %Tipalti.IFrame.SetupProcess{idap: "mypayee", preferred_payer_entity: "Foo"}
...> url(params)
%URI{
authority: "ui2.sandbox.tipalti.com",
fragment: nil,
host: "ui2.sandbox.tipalti.com",
path: "/PayeeDashboard/Home",
port: 443,
query: "idap=mypayee&payer=MyPayer&preferredPayerEntity=Foo&ts=1521234048&hashkey=899314ff57da786a9cda58f3296b844cd4fbeac75dbfaec13cf8a04aca3d99db",
scheme: "https",
userinfo: nil
}
iex> params = %Tipalti.IFrame.SetupProcess{idap: "mypayee", company: "My Company", first: "Joe"}
...> url(params, force: [:company], read_only: [:first]) |> URI.to_string()
"https://ui2.sandbox.tipalti.com/PayeeDashboard/Home?first=Joe&firstSetReadOnly=TRUE&forceCompany=My+Company&idap=mypayee&payer=MyPayer&ts=1521234048&hashkey=<KEY>"
"""
@spec url(t(), options()) :: URI.t()
def url(struct, opts \\ []) do
params = Map.from_struct(struct)
build_url(@url, params, opts)
end
end
|
lib/tipalti/iframe/setup_process.ex
| 0.738763
| 0.467271
|
setup_process.ex
|
starcoder
|
defmodule Joken.Signer do
@moduledoc """
Interface between Joken and JOSE for signing and verifying tokens.
In the future we plan to keep this interface but make it pluggable for other crypto
implementations like using only standard `:crypto` and `:public_key` modules. So,
**avoid** depending on the inner structure of this module.
"""
alias JOSE.{JWK, JWS, JWT}
@hs_algorithms ["HS256", "HS384", "HS512"]
@rs_algorithms ["RS256", "RS384", "RS512"]
@es_algorithms ["ES256", "ES384", "ES512", "ES256K"]
@ps_algorithms ["PS256", "PS384", "PS512"]
@eddsa_algorithms ["Ed25519", "Ed25519ph", "Ed448", "Ed448ph"]
@map_key_algorithms @rs_algorithms ++ @es_algorithms ++ @ps_algorithms ++ @eddsa_algorithms
@algorithms @hs_algorithms ++ @map_key_algorithms
@typedoc "A key may be an octet or a map with parameters according to JWK (JSON Web Key)"
@type key :: binary() | map()
@typedoc """
A `Joken.Signer` instance is a JWS (JSON Web Signature) and JWK (JSON Web Key) struct.
It also contains an `alg` field for performance reasons.
"""
@type t :: %__MODULE__{
jwk: JWK.t() | nil,
jws: JWS.t() | nil,
alg: binary() | nil
}
defstruct jwk: nil, jws: nil, alg: nil
@doc """
All supported algorithms.
"""
@spec algorithms() :: [binary()]
def algorithms, do: @algorithms
@doc """
Map key algorithms.
"""
@spec map_key_algorithms() :: [binary()]
def map_key_algorithms, do: @map_key_algorithms
@doc """
Creates a new Joken.Signer struct. Can accept either a binary for HS*** algorithms
or a map with arguments for the other kinds of keys. Also, accepts an optional map
that will be passed as extra header arguments for generated JWT tokens.
## Example
iex> Joken.Signer.create("HS256", "s3cret")
%Joken.Signer{
alg: "HS256",
jwk: %JOSE.JWK{
fields: %{},
keys: :undefined,
kty: {:jose_jwk_kty_oct, "s3cret"}
},
jws: %JOSE.JWS{
alg: {:jose_jws_alg_hmac, :HS256},
b64: :undefined,
fields: %{"typ" => "JWT"}
}
}
"""
@spec create(binary(), key(), %{binary() => term()}) :: __MODULE__.t()
def create(alg, key, jose_extra_headers \\ %{})
def create(alg, secret, headers) when is_binary(secret) and alg in @hs_algorithms do
raw_create(
alg,
headers |> Map.merge(%{"alg" => alg, "typ" => "JWT"}) |> JWS.from_map(),
JWK.from_oct(secret)
)
end
def create(alg, %{"pem" => pem}, headers) when alg in @map_key_algorithms do
raw_create(
alg,
headers |> Map.merge(%{"alg" => alg, "typ" => "JWT"}) |> JWS.from_map(),
JWK.from_pem(pem)
)
end
def create(alg, key, headers) when is_map(key) and alg in @map_key_algorithms do
raw_create(
alg,
headers |> Map.merge(%{"alg" => alg, "typ" => "JWT"}) |> JWS.from_map(),
JWK.from_map(key)
)
end
def create(alg, _key, _headers) when alg in @map_key_algorithms,
do: raise(Joken.Error, :algorithm_needs_key)
def create(_, _, _), do: raise(Joken.Error, :unrecognized_algorithm)
defp raw_create(alg, jws, jwk) do
%__MODULE__{
jws: jws,
jwk: jwk,
alg: alg
}
end
@doc """
Signs a map of claims with the given Joken.Signer.
## Examples
iex> Joken.Signer.sign(%{"name" => "<NAME>"}, Joken.Signer.create("HS256", "secret"))
{:ok, "<KEY>"}
iex> Joken.Signer.sign(%{"name" => "<NAME>"}, Joken.Signer.parse_config(:rs256))
{:ok, "<KEY>"}
"""
@spec sign(Joken.claims(), __MODULE__.t()) ::
{:ok, Joken.bearer_token()} | {:error, Joken.error_reason()}
def sign(claims, %__MODULE__{alg: _, jwk: jwk, jws: %JWS{alg: {alg, _}} = jws})
when is_map(claims) do
with result = {%{alg: ^alg}, _} <- JWT.sign(jwk, jws, claims),
{_, compacted_token} <- JWS.compact(result) do
{:ok, compacted_token}
end
end
@doc """
Verifies the given token's signature with the given `Joken.Signer`.
## Examples
iex> Joken.Signer.verify("<KEY>", Joken.Signer.create("HS256", "secret"))
{:ok, %{"name" => "<NAME>"}}
iex> Joken.Signer.verify("eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJuYW1lIjoiSm9obiBEb2UifQ.e3hyn_oaaA2lxMlqH1UPo8STN-a_sszl8B2_s6tY9aT_YBAmfd7BXJOPsOMl7x2wXeKMQaNBVjna2tA0UiO_m3SpwiYgoTcU65D6OgkzugmLD_DhjDK1YCOKlm7So1uhbkb_QCuo4Ij5scsQqwv7hkxo4IximGBeH9LAvPhPTaGmYJMI7_tWIld2TlY6tNUQP4n0qctXsI3hjvGzdvuQW-tRnzAQCC4TYe-mJgFa033NSHeiX-sZB-SuYlWi7DJqDTiwlb_beVdqWpxxtFDA005Iw6FZTpH9Rs1LVwJU5t3RN5iWB-z4ZI-kKsGUGLNrAZ7btV6Ow2FMAdj9TXmNpQ", Joken.Signer.parse_config(:rs256))
{:ok, %{"name" => "<NAME>"}}
"""
@spec verify(Joken.bearer_token(), __MODULE__.t()) ::
{:ok, Joken.claims()} | {:error, Joken.error_reason()}
def verify(token, %__MODULE__{alg: alg, jwk: jwk}) when is_binary(token) do
case JWT.verify_strict(jwk, [alg], token) do
{true, %JWT{fields: claims}, _} -> {:ok, claims}
_ -> {:error, :signature_error}
end
end
@doc """
Generates a `Joken.Signer` from Joken's application configuration.
A `Joken.Signer` has an algorithm (one of #{inspect(@algorithms)}) and a key.
There are several types of keys used by JWTs algorithms:
- RSA
- Elliptic Curve
- Octet (binary)
- So on...
Also, they can be encoded in several ways:
- Raw (map of parameters)
- PEM (Privacy Enhanced Mail format)
- Open SSH encoding
- So on...
To ease configuring these types of keys used by JWTs algorithms, Joken accepts a few
parameters in its configuration:
- `:signer_alg` : one of #{inspect(@algorithms)}.
- `:key_pem` : a binary containing a key in PEM encoding format.
- `:key_openssh` : a binary containing a key in Open SSH encoding format.
- `:key_map` : a map with the raw parameters.
- `:key_octet` : a binary used as the password for HS algorithms only.
## Examples
config :joken,
hs256: [
signer_alg: "HS256",
key_octet: "test"
]
config :joken,
rs256: [
signer_alg: "RS256",
key_pem: \"\"\"
-----BEGIN RSA PRIVATE KEY-----
<KEY>KBgQDdlatRjRjogo3WojgGHFHYLugdUWAY9iR3fy4arWNA1KoS8kVw33cJibXr8bvwUAUparCwlvdbH6dvEOfou0/gCFQsHUfQrSDv+MuSUMAe8jzKE4qW+jK+xQU9a03GUnKHkkle+Q0pX/g6jXZ7r1/xAK5Do2kQ+X5xK9cipRgEKwIDAQABAoGAD+onAtVye4ic7VR7V50DF9bOnwRwNXrARcDhq9LWNRrRGElESYYTQ6EbatXS3MCyjjX2eMhu/aF5YhXBwkppwxg+EOmXeh+MzL7Zh284OuPbkglAaGhV9bb6/5CpuGb1esyPbYW+Ty2PC0GSZfIXkXs76jXAu9TOBvD0ybc2YlkCQQDywg2R/7t3Q2OE2+yo382CLJdrlSLVROWKwb4tb2PjhY4XAwV8d1vy0RenxTB+K5Mu57uVSTHtrMK0GAtFr833AkEA6avx20OHo61Yela/4k5kQDtjEf1N0LfI+BcWZtxsS3jDM3i1Hp0KSu5rsCPb8acJo5RO26gGVrfAsDcIXKC+bQJAZZ2XIpsitLyPpuiMOvBbzPavd4gY6Z8KWrfYzJoI/Q9FuBo6rKwl4BFoToD7WIUS+hpkagwWiz+6zLoX1dbOZwJACmH5fSSjAkLRi54PKJ8TFUeOP15h9sQzydI8zJU+upvDEKZsZc/UhT/SySDOxQ4G/523Y0sz/OZtSWcol/UMgQJALesy++GdvoIDLfJX5GBQpuFgFenRiRDabxrE9MNUZ2aPFaFp+DyAe+b4nDwuJaW2LURbr8AEZga7oQj0uYxcYw==
-----END RSA PRIVATE KEY-----
\"\"\"
]
"""
@spec parse_config(atom()) :: __MODULE__.t() | nil
def parse_config(key \\ :default_key) do
case Application.get_env(:joken, key) do
key_config when is_binary(key_config) ->
create("HS256", key_config)
key_config when is_list(key_config) ->
parse_list_config(key_config)
_ ->
nil
end
end
defp parse_list_config(config) do
signer_alg = config[:signer_alg] || "HS256"
headers = config[:jose_extra_headers] || %{}
key_pem = config[:key_pem]
key_map = config[:key_map]
key_openssh = config[:key_openssh]
key_octet = config[:key_octet]
key_config =
[
{&JWK.from_pem/1, key_pem},
{&JWK.from_map/1, key_map},
{&JWK.from_openssh_key/1, key_openssh},
{&JWK.from_oct/1, key_octet}
]
|> Enum.filter(fn {_, val} -> not is_nil(val) end)
unless Enum.count(key_config) == 1, do: raise(Joken.Error, :wrong_key_parameters)
{jwk_function, value} = List.first(key_config)
if signer_alg in @algorithms do
do_parse_signer(jwk_function.(value), signer_alg, headers)
else
raise Joken.Error, :unrecognized_algorithm
end
end
defp do_parse_signer(jwk, signer_alg, headers) do
raw_create(
signer_alg,
headers |> Map.merge(%{"alg" => signer_alg, "typ" => "JWT"}) |> JWS.from_map(),
jwk
)
end
end
|
lib/joken/signer.ex
| 0.866683
| 0.57075
|
signer.ex
|
starcoder
|
defmodule Margaret.Helpers.Connection do
@moduledoc """
Helper functions for working with pagination connections.
See https://facebook.github.io/relay/graphql/connections.html
for more information.
"""
alias Absinthe.Relay
alias Margaret.{Repo, Helpers}
@doc """
Returns a pagination connection from a query.
## Options
* `:total_count` - when true, inserts the `total_count`
key into the connection.
It gets the count from the query provided.
Defaults to `true`.
## Examples
iex> from_query(query, args, total_count: false)
{:ok, connection}
"""
@spec from_query(Ecto.Queryable.t(), map(), Keyword.t()) :: {:ok, map()} | {:error, any()}
def from_query(query, args, opts \\ []) do
total_count = maybe_get_total_count(opts, query)
case Relay.Connection.from_query(query, &Repo.all/1, args) do
{:ok, connection} ->
connection
|> maybe_put_total_count(total_count)
|> transform_edges()
|> Helpers.ok()
error ->
error
end
end
@spec maybe_get_total_count(Keyword.t(), Ecto.Queryable.t()) :: non_neg_integer() | nil
defp maybe_get_total_count(opts, query) do
if Keyword.get(opts, :total_count, true) do
Repo.count(query)
else
nil
end
end
@spec maybe_put_total_count(any, non_neg_integer() | nil) :: any()
defp maybe_put_total_count(connection, total_count) when not is_nil(total_count),
do: Map.put(connection, :total_count, total_count)
defp maybe_put_total_count(connection, nil), do: connection
@spec transform_edges(map()) :: map()
defp transform_edges(connection) do
Map.update!(connection, :edges, &Enum.map(&1, fn edge -> put_edge_fields(edge) end))
end
@spec put_edge_fields(map()) :: map()
defp put_edge_fields(%{node: {nodes, fields}} = edge) when is_list(nodes) do
node = Enum.find(nodes, &(not is_nil(&1)))
do_put_edge_fields(edge, node, fields)
end
defp put_edge_fields(%{node: {node, fields}} = edge), do: do_put_edge_fields(edge, node, fields)
defp put_edge_fields(edge), do: edge
@spec do_put_edge_fields(map(), map(), map()) :: map()
defp do_put_edge_fields(edge, node, fields) do
edge
|> Map.merge(fields)
|> Map.put(:node, node)
end
end
|
projects/api/lib/margaret/helpers/connection.ex
| 0.833833
| 0.403508
|
connection.ex
|
starcoder
|
defmodule Kino.Control do
@moduledoc """
Various widgets for user interactions.
Each widget is a UI control element that the user interacts
with, consequenty producing an event stream.
Those widgets are often useful paired with `Kino.Frame` for
presenting content that changes upon user interactions.
## Examples
First, create a control and make sure it is rendered,
either by placing it at the end of a code cell or by
explicitly rendering it with `Kino.render/1`.
button = Kino.Control.button("Hello")
Next, to receive events from the control, a process needs to
subscribe to it and specify pick a name to distinguish the
events.
Kino.Control.subscribe(button, :hello)
As the user interacts with the button, the subscribed process
receives corresponding events.
IEx.Helpers.flush()
#=> {:hello, %{origin: #PID<10895.9854.0>}}
#=> {:hello, %{origin: #PID<10895.9854.0>}}
"""
defstruct [:attrs]
@type t :: %__MODULE__{attrs: Kino.Output.control_attrs()}
defp new(attrs) do
ref = Kino.Output.random_ref()
subscription_manager = Kino.SubscriptionManager.cross_node_name()
attrs = Map.merge(attrs, %{ref: ref, destination: subscription_manager})
Kino.Bridge.reference_object(ref, self())
Kino.Bridge.monitor_object(ref, subscription_manager, {:clear_topic, ref})
%__MODULE__{attrs: attrs}
end
@doc """
Creates a new button.
"""
@spec button(String.t()) :: t()
def button(label) when is_binary(label) do
new(%{type: :button, label: label})
end
@doc """
Creates a new keyboard control.
This widget is represented as button that toggles interception
mode, in which the given keyboard events are captured.
## Event info
In addition to standard properties, all events include additional
properties.
### Key events
* `:type` - either `:keyup` or `:keydown`
* `:key` - the value matching the browser [KeyboardEvent.key](https://developer.mozilla.org/en-US/docs/Web/API/KeyboardEvent/key)
### Status event
* `:type` - either `:status`
* `:enabled` - whether the keyboard is activated
## Examples
Create the widget:
keyboard = Kino.Control.keyboard([:keyup, :keydown])
Subscribe to events:
Kino.Control.subscribe(keyboard, :keyboard)
As the user types events are streamed:
IEx.Helpers.flush()
#=> {:keyboard, %{key: "o", origin: #PID<10895.9854.0>, type: :keydown}}
#=> {:keyboard, %{key: "k", origin: #PID<10895.9854.0>, type: :keydown}}
#=> {:keyboard, %{key: "o", origin: #PID<10895.9854.0>, type: :keyup}}
#=> {:keyboard, %{key: "k", origin: #PID<10895.9854.0>, type: :keyup}}
"""
@spec keyboard(list(:keyup | :keydown | :status)) :: t()
def keyboard(events) when is_list(events) do
if events == [] do
raise ArgumentError, "expected at least one event, got: []"
end
for event <- events do
unless event in [:keyup, :keydown, :status] do
raise ArgumentError,
"expected event to be either :keyup, :keydown or :status, got: #{inspect(event)}"
end
end
new(%{type: :keyboard, events: events})
end
@doc """
Creates a new form.
A form is composed of regular inputs from the `Kino.Input` module,
however in a form input values are not synchronized between users.
Consequently, the form is another control for producing user-specific
events.
Either `:submit` or `:report_changes` must be specified.
## Options
* `:submit` - specifies the label to use for the submit button
and enables submit events
* `:report_changes` - whether to send new form value whenever any
of the input changes. Defaults to `false`
* `:reset_on_submit` - a list of fields to revert to their default
values once the form is submitted. Use `true` to indicate all
fields. Defaults to `[]`
## Event info
In addition to standard properties, all events include additional
properties.
* `:type` - either `:submit` or `:change`
* `:data` - a map with field values, matching the field list
## Examples
Create a form out of inputs:
form =
Kino.Control.form(
[
name: Kino.Input.text("Name"),
message: Kino.Input.textarea("Message")
],
submit: "Send"
)
Subscribe to events:
Kino.Control.subscribe(form, :chat_form)
As users submit the form the payload is sent:
IEx.Helpers.flush()
#=> {:chat_form,
#=> %{
#=> data: %{message: "Hola", name: "Amy"},
#=> origin: #PID<10905.5195.0>,
#=> type: :submit
#=> }}
#=> {:chat_form,
#=> %{
#=> data: %{message: "Hey!", name: "Jake"},
#=> origin: #PID<10905.5186.0>,
#=> type: :submit
#=> }}
"""
@spec form(list({atom(), Kino.Input.t()}), keyword()) :: t()
def form(fields, opts \\ []) when is_list(fields) do
if fields == [] do
raise ArgumentError, "expected at least one field, got: []"
end
for {field, input} <- fields do
unless is_atom(field) do
raise ArgumentError,
"expected each field key to be an atom, got: #{inspect(field)}"
end
unless is_struct(input, Kino.Input) do
raise ArgumentError,
"expected each field to be a Kino.Input widget, got: #{inspect(input)} for #{inspect(field)}"
end
end
unless opts[:submit] || opts[:report_changes] do
raise ArgumentError, "expected either :submit or :report_changes option to be enabled"
end
fields =
Enum.map(fields, fn {field, input} ->
# Make sure we use this input only in the form and nowhere else
input = Kino.Input.duplicate(input)
{field, input.attrs}
end)
submit = Keyword.get(opts, :submit, nil)
report_changes =
if Keyword.get(opts, :report_changes, false) do
Map.new(fields, fn {field, _} -> {field, true} end)
else
%{}
end
reset_on_submit =
case Keyword.get(opts, :reset_on_submit, []) do
true -> Keyword.keys(fields)
false -> []
fields -> fields
end
new(%{
type: :form,
fields: fields,
submit: submit,
report_changes: report_changes,
reset_on_submit: reset_on_submit
})
end
@doc """
Subscribes the calling process to control events.
The events are sent as `{tag, info}`, where info is a map with
event details. In particular, it always includes `:origin`, which
is an opaque identifier of the client that triggered the event.
"""
@spec subscribe(t(), term()) :: :ok
def subscribe(%Kino.Control{} = control, tag) do
Kino.SubscriptionManager.subscribe(control.attrs.ref, self(), tag)
end
@doc """
Unsubscribes the calling process from control events.
"""
@spec unsubscribe(t()) :: :ok
def unsubscribe(%Kino.Control{} = control) do
Kino.SubscriptionManager.unsubscribe(control.attrs.ref, self())
end
end
|
lib/kino/control.ex
| 0.932894
| 0.521288
|
control.ex
|
starcoder
|
defmodule SPARQL.Algebra.Construct do
defstruct [:template, :query]
alias SPARQL.Algebra.Expression
alias RDF.BlankNode
def result(%SPARQL.Query.Result{results: results}, template, generator, prefixes) do
template_bnodes = template_bnodes(template)
prefixes = if Enum.empty?((prefixes)), do: nil, else: prefixes
Enum.reduce results, RDF.Graph.new(prefixes: prefixes), fn result, graph ->
template_for_solution =
template_bnodes
|> create_solution_bnodes(generator)
|> set_template_solution_bnodes(template)
RDF.Graph.add(graph, solve_patterns(template_for_solution, result, generator))
end
end
defp solve_patterns(template, solutions, generator) do
template
|> Stream.map(&(solve_pattern(&1, solutions, generator)))
|> Enum.filter(&RDF.Triple.valid?/1)
end
defp solve_pattern({variable, predicate, object}, solutions, generator) when is_binary(variable) do
if subject = solutions[variable] do
{replace_solved_bnode(subject, solutions, generator), predicate, object}
|> solve_pattern(solutions, generator)
end
end
defp solve_pattern({subject, variable, object}, solutions, generator) when is_binary(variable) do
if predicate = solutions[variable] do
{subject, replace_solved_bnode(predicate, solutions, generator), object}
|> solve_pattern(solutions, generator)
end
end
defp solve_pattern({subject, predicate, variable}, solutions, generator) when is_binary(variable) do
if object = solutions[variable] do
{subject, predicate, replace_solved_bnode(object, solutions, generator)}
|> solve_pattern(solutions, generator)
end
end
defp solve_pattern(pattern, _, _), do: pattern
defp template_bnodes(template) do
Enum.reduce template, MapSet.new, fn pattern, bnodes ->
MapSet.union(bnodes, pattern_bnodes(pattern))
end
end
defp pattern_bnodes(pattern, acc \\ MapSet.new)
defp pattern_bnodes({%BlankNode{} = bnode, p, o}, acc), do: pattern_bnodes({nil, p, o}, MapSet.put(acc, bnode))
defp pattern_bnodes({s, %BlankNode{} = bnode, o}, acc), do: pattern_bnodes({s, nil, o}, MapSet.put(acc, bnode))
defp pattern_bnodes({s, p, %BlankNode{} = bnode}, acc), do: pattern_bnodes({s, p, nil}, MapSet.put(acc, bnode))
defp pattern_bnodes(_, acc), do: acc
defp create_solution_bnodes(bnodes, generator) do
Enum.reduce bnodes, %{}, fn bnode, solution_bnodes ->
Map.put(solution_bnodes, bnode, BlankNode.Generator.generate(generator))
end
end
defp set_template_solution_bnodes(bnodes, template) when map_size(bnodes) == 0, do: template
defp set_template_solution_bnodes(bnodes, template) do
Enum.map template, &(set_solution_bnodes(&1, bnodes))
end
defp set_solution_bnodes({s, p, o}, bnodes) do
{set_solution_bnode(s, bnodes), set_solution_bnode(p, bnodes), set_solution_bnode(o, bnodes)}
end
defp set_solution_bnode(%BlankNode{} = bnode, solution_bnodes), do: solution_bnodes[bnode]
defp set_solution_bnode(node, _), do: node
defp replace_solved_bnode(%BlankNode{} = bnode, %{__id__: solution_id}, generator) do
BlankNode.Generator.generate_for(generator, {:construct, bnode})
end
defp replace_solved_bnode(node, _, _), do: node
defimpl Expression do
def evaluate(construct, data, execution) do
Expression.evaluate(construct.query, data, execution)
|> SPARQL.Algebra.Construct.result(construct.template, execution.bnode_generator, execution.prefixes)
end
def variables(construct) do
Expression.variables(construct.query)
end
end
end
|
lib/sparql/algebra/expression/construct.ex
| 0.514644
| 0.590248
|
construct.ex
|
starcoder
|
defmodule Bolt.Cogs.ForceNick do
@moduledoc false
@behaviour Nosedrum.Command
alias Bolt.Events.Handler
alias Bolt.Schema.Infraction
alias Bolt.{Converters, ErrorFormatters}
alias Bolt.{Helpers, ModLog, Parsers, Repo}
alias Nosedrum.Predicates
alias Nostrum.Api
alias Nostrum.Struct.User
require Logger
@impl true
def usage, do: ["forcenick <user:member> <duration:duration> <nick:str...>"]
@impl true
def description,
do: """
Apply the given nickname on the given member.
If the member attempts to change the nickname to anything else while the forced nick is active, Bolt will revert it.
**Example**:
```rs
// Apply the nick "<NAME>" to Dude#0007 for 2 days.
.forcenick @Dude#0007 2d <NAME>
```
"""
@impl true
def predicates,
do: [&Predicates.guild_only/1, Predicates.has_permission(:manage_nicknames)]
@impl true
def command(msg, [user, duration | nick]) when nick != [] do
nickname = Enum.join(nick, " ")
response =
with {:ok, expiry} <- Parsers.human_future_date(duration),
{:ok, member} <- Converters.to_member(msg.guild_id, user),
nil <-
Repo.get_by(Infraction,
type: "forced_nick",
guild_id: msg.guild_id,
user_id: member.user.id,
active: true
),
{:ok} <- Api.modify_guild_member(msg.guild_id, member.user.id, nick: nickname) do
infraction_map = %{
type: "forced_nick",
guild_id: msg.guild_id,
user_id: member.user.id,
actor_id: msg.author.id,
expires_at: expiry,
data: %{
"nick" => nickname
}
}
case Handler.create(infraction_map) do
{:ok, _infraction} ->
ModLog.emit(
msg.guild_id,
"INFRACTION_CREATE",
"#{User.full_name(msg.author)} has forced the nickname `#{nickname}` on " <>
"#{User.full_name(member.user)} (`#{member.user.id}`) until #{Helpers.datetime_to_human(expiry)}"
)
"👌 user #{User.full_name(member.user)} will have nickname `#{nickname}` for #{duration}"
error ->
Logger.error(fn ->
"Error trying to create `forced_nick` infraction: #{inspect(error)}"
end)
"❌ unknown error encountered trying to create infraction, maybe retry"
end
else
%Infraction{expires_at: expiry} ->
"🚫 there is already an active `forced_nick` infraction for that member expiring at #{Helpers.datetime_to_human(expiry)}"
error ->
ErrorFormatters.fmt(msg, error)
end
{:ok, _msg} = Api.create_message(msg.channel_id, response)
end
def command(msg, _args) do
response = "ℹ️ usage: `#{List.first(usage())}`"
{:ok, _msg} = Api.create_message(msg.channel_id, response)
end
end
|
lib/bolt/cogs/forcenick.ex
| 0.79049
| 0.472927
|
forcenick.ex
|
starcoder
|
defmodule Beamchmark.Formatters.HTML do
@moduledoc """
The module formats `#{inspect(Beamchmark.Suite)}` and outputs it to an HTML file.
"""
@behaviour Beamchmark.Formatter
alias __MODULE__.Templates
alias Beamchmark.Suite
alias Beamchmark.Utils
@default_output_path "index.html"
@default_auto_open true
@default_inline_assets false
@typedoc """
Configuration for `#{inspect(__MODULE__)}`.
* `output_path` – path to the file, where the report will be saved. Defaults to `#{inspect(@default_output_path)}`.
* `auto_open?` – if `true`, opens the report in system's default browser. Defaults to `#{inspect(@default_auto_open)}`.
* `inline_assets?` – if `true`, pastes contents of `.css` and `.js` assets directly into HTML. Defaults to `#{inspect(@default_inline_assets)}`.
"""
@type options_t() :: [
output_path: Path.t(),
auto_open?: boolean(),
inline_assets?: boolean()
]
@impl true
def format(%Suite{} = suite, options) do
Templates.index(suite, nil, Keyword.get(options, :inline_assets?, @default_inline_assets))
end
@impl true
def format(%Suite{} = new_suite, %Suite{} = base_suite, options) do
Templates.index(
new_suite,
base_suite,
Keyword.get(options, :inline_assets?, @default_inline_assets)
)
end
@impl true
def write(content, options) do
output_path =
options |> Keyword.get(:output_path, @default_output_path) |> Path.expand() |> format_path()
auto_open? = Keyword.get(options, :auto_open?, @default_auto_open)
dirname = Path.dirname(output_path)
unless File.exists?(dirname) do
File.mkdir_p!(dirname)
end
File.write!(output_path, content)
Mix.shell().info("The HTML file was successfully saved under #{output_path}!")
maybe_open_report(output_path, auto_open?)
end
defp maybe_open_report(_path_to_html, false), do: :ok
defp maybe_open_report(path_to_html, true) do
browser = get_browser()
{_, exit_code} = System.cmd(browser, [path_to_html])
if exit_code > 0 do
Mix.shell().error("Failed to open report using \"#{browser}\".")
else
Mix.shell().info("Opened report using \"#{browser}\".")
end
end
defp get_browser() do
case Utils.get_os_name() do
:macOS -> "open"
:Windows -> "explorer"
:Linux -> "xdg-open"
os_name -> raise RuntimeError, message: "Beamchmark not supported for #{os_name}"
end
end
defp format_path(path) do
case Utils.get_os_name() do
:Windows -> String.replace(path, "/", "\\")
_os -> path
end
end
end
|
lib/beamchmark/formatters/html.ex
| 0.784732
| 0.407805
|
html.ex
|
starcoder
|
defmodule Axon.Recurrent do
@moduledoc """
Functional implementations of common recurrent neural network
routines.
Recurrent Neural Networks are commonly used for working with
sequences of data where there is some level of dependence between
outputs at different timesteps.
This module contains 3 RNN Cell functions and methods to "unroll"
cells over an entire sequence. Each cell function returns a tuple:
{new_carry, output}
Where `new_carry` is an updated carry state and `output` is the output
for a singular timestep. In order to apply an RNN across multiple timesteps,
you need to use either `static_unroll` or `dynamic_unroll` (coming soon).
Unrolling an RNN is equivalent to a `map_reduce` or `scan` starting
from an initial carry state and ending with a final carry state and
an output sequence.
All of the functions in this module are implemented as
numerical functions and can be JIT or AOT compiled with
any supported `Nx` compiler.
"""
import Nx.Defn
import Axon.Layers
import Axon.Activations
@doc """
GRU Cell.
"""
defn gru_cell(
input,
carry,
input_kernel,
hidden_kernel,
bias,
gate_fn \\ &sigmoid/1,
activation_fn \\ &tanh/1
) do
{hidden} = carry
{wir, wiz, win} = input_kernel
{whr, whz, whn} = hidden_kernel
{br, bz, bin, bhn} = bias
r = gate_fn.(dense(input, wir, br) + dense(hidden, whr, 0))
z = gate_fn.(dense(input, wiz, bz) + dense(hidden, whz, 0))
n = activation_fn.(dense(input, win, bin) + r * dense(hidden, whn, bhn))
new_h = (1.0 - z) * n + z * hidden
{{new_h}, new_h}
end
@doc """
LSTM Cell.
"""
defn lstm_cell(
input,
carry,
input_kernel,
hidden_kernel,
bias,
gate_fn \\ &sigmoid/1,
activation_fn \\ &tanh/1
) do
{cell, hidden} = carry
{wii, wif, wig, wio} = input_kernel
{whi, whf, whg, who} = hidden_kernel
{bi, bf, bg, bo} = bias
i = gate_fn.(dense(input, wii, bi) + dense(hidden, whi, 0))
f = gate_fn.(dense(input, wif, bf) + dense(hidden, whf, 0))
g = activation_fn.(dense(input, wig, bg) + dense(hidden, whg, 0))
o = gate_fn.(dense(input, wio, bo) + dense(hidden, who, 0))
new_c = f * cell + i * g
new_h = o * activation_fn.(new_c)
{{new_c, new_h}, new_h}
end
defnp rank_down(rnn_data) do
transform(rnn_data, fn {{cell, hidden}, input} ->
[cell, hidden, input] =
for tensor <- [cell, hidden, input] do
Nx.squeeze(tensor, axes: [1])
end
{{cell, hidden}, input}
end)
end
defnp rank_up(rnn_data) do
transform(rnn_data, fn {{cell, hidden}, input} ->
[cell, hidden, input] =
for tensor <- [cell, hidden, input] do
new_shape =
Nx.shape(tensor)
|> Tuple.insert_at(1, 1)
Nx.reshape(tensor, new_shape)
end
{{cell, hidden}, input}
end)
end
@doc """
ConvLSTM Cell.
"""
defn conv_lstm_cell(input, carry, input_kernel, hidden_kernel, bias, opts \\ []) do
opts = keyword!(opts, strides: 1, padding: :same)
{ih} = input_kernel
{hh} = hidden_kernel
{bi} = bias
{{cell, hidden}, input} = rank_down({carry, input})
gates =
Nx.add(
conv(input, ih, bi, strides: opts[:strides], padding: opts[:padding]),
conv(hidden, hh, 0, strides: opts[:strides], padding: opts[:padding])
)
{i, g, f, o} = split_gates(gates)
f = sigmoid(f + 1)
new_c = f * cell + sigmoid(i) * tanh(g)
new_h = sigmoid(o) * tanh(new_c)
rank_up({{new_c, new_h}, new_h})
end
defnp split_gates(gates) do
transform(gates, fn gates ->
channels = elem(Nx.shape(gates), 1)
split_every = div(channels, 4)
split_dims =
for i <- 0..3 do
{i * split_every, split_every}
end
split_dims
|> Enum.map(fn {start, len} -> Nx.slice_along_axis(gates, start, len, axis: 1) end)
|> List.to_tuple()
end)
end
@doc """
Dynamically unrolls an RNN.
"""
defn dynamic_unroll(cell_fn, input_sequence, carry, input_kernel, recurrent_kernel, bias) do
time_steps = transform(Nx.shape(input_sequence), &elem(&1, 1))
feature_dims = transform(Nx.rank(input_sequence), &List.duplicate(0, &1 - 2))
initial_shape =
transform({Nx.shape(input_sequence), Nx.shape(elem(input_kernel, 0))}, fn {shape, kernel} ->
put_elem(shape, 2, elem(kernel, 1))
end)
init_sequence = Nx.broadcast(0.0, initial_shape)
i = Nx.tensor(0)
{_, carry, output, _, _, _, _} =
while {i, carry, init_sequence, input_sequence, input_kernel, recurrent_kernel, bias},
Nx.less(i, time_steps) do
sequence = Nx.slice_along_axis(input_sequence, i, 1, axis: 1)
indices = transform({feature_dims, i}, fn {feature_dims, i} -> [0, i] ++ feature_dims end)
{carry, output} = cell_fn.(sequence, carry, input_kernel, recurrent_kernel, bias)
update_sequence = Nx.put_slice(init_sequence, indices, output)
{i + 1, carry, update_sequence, input_sequence, input_kernel, recurrent_kernel, bias}
end
{carry, output}
end
@doc """
Statically unrolls an RNN.
"""
defn static_unroll(cell_fn, input_sequence, carry, input_kernel, recurrent_kernel, bias) do
transform(
{cell_fn, input_sequence, carry, input_kernel, recurrent_kernel, bias},
fn {cell_fn, input_sequence, carry, input_kernel, recurrent_kernel, bias} ->
time_steps = elem(Nx.shape(input_sequence), 1)
{carry, outputs} =
for t <- 0..(time_steps - 1), reduce: {carry, []} do
{carry, outputs} ->
input = Nx.slice_along_axis(input_sequence, t, 1, axis: 1)
{carry, output} = cell_fn.(input, carry, input_kernel, recurrent_kernel, bias)
{carry, [output | outputs]}
end
{carry, Nx.concatenate(Enum.reverse(outputs), axis: 1)}
end
)
end
end
|
lib/axon/recurrent.ex
| 0.907916
| 0.829146
|
recurrent.ex
|
starcoder
|
defmodule KVX.Bucket do
@moduledoc """
Defines a Bucket.
A bucket maps to an underlying data store, controlled by the
adapter. For example, `KVX` ships with a `KVX.Bucket.ExShards`
adapter that stores data into `ExShards` distributed memory
storage – [ExShards](https://github.com/cabol/ex_shards).
For example, the bucket:
defmodule MyModule do
use use KVX.Bucket
end
Could be configured with:
config :kvx,
adapter: KVX.Bucket.ExShards,
ttl: 10
Most of the configuration that goes into the `config` is specific to
the adapter, so check `KVX.Bucket.ExShards` documentation for more
information. However, some configuration is shared across
all adapters, they are:
* `:ttl` - The time in seconds to wait until the `key` expires.
Value `:infinity` will wait indefinitely (default: 3600)
Check adapters documentation for more information.
"""
@type bucket :: atom
@type key :: term
@type value :: term
@type ttl :: integer | :infinity
@doc false
defmacro __using__(_opts) do
quote do
@behaviour KVX.Bucket
@adapter (Application.get_env(:kvx, :adapter, KVX.Bucket.ExShards))
@default_ttl (Application.get_env(:kvx, :ttl, :infinity))
def __adapter__ do
@adapter
end
def __ttl__ do
@default_ttl
end
def new(bucket, opts \\ []) do
@adapter.new(bucket, opts)
end
def add(bucket, key, value, ttl \\ @default_ttl) do
@adapter.add(bucket, key, value, ttl)
end
def set(bucket, key, value, ttl \\ @default_ttl) do
@adapter.set(bucket, key, value, ttl)
end
def mset(bucket, kv_pairs, ttl \\ @default_ttl) when is_list(kv_pairs) do
@adapter.mset(bucket, kv_pairs, ttl)
end
def get(bucket, key) do
@adapter.get(bucket, key)
end
def mget(bucket, keys) when is_list(keys) do
@adapter.mget(bucket, keys)
end
def find_all(bucket, query \\ nil) do
@adapter.find_all(bucket, query)
end
def delete(bucket, key) do
@adapter.delete(bucket, key)
end
def delete(bucket) do
@adapter.delete(bucket)
end
def flush(bucket) do
@adapter.flush(bucket)
end
end
end
## Setup Commands
@doc """
Creates a new bucket if it doesn't exist. If the bucket already exist,
nothing happens – it works as an idempotent operation.
## Example
MyBucket.new(:mybucket)
"""
@callback new(bucket, [term]) :: bucket
## Storage Commands
@doc """
Store this data, only if it does not already exist. If an item already
exists and an add fails with a `KVX.ConflictError` exception.
If `bucket` doesn't exist, it will raise an argument error.
## Example
MyBucket.add(:mybucket, "hello", "world")
"""
@callback add(bucket, key, value, ttl) :: bucket | KVX.ConflictError
@doc """
Most common command. Store this data, possibly overwriting any existing data.
If `bucket` doesn't exist, it will raise an argument error.
## Example
MyBucket.set(:mybucket, "hello", "world")
"""
@callback set(bucket, key, value, ttl) :: bucket
@doc """
Store this bulk data, possibly overwriting any existing data.
If `bucket` doesn't exist, it will raise an argument error.
## Example
MyBucket.mset(:mybucket, [{"a": 1}, {"b", "2"}])
"""
@callback mset(bucket, [{key, value}], ttl) :: bucket
## Retrieval Commands
@doc """
Get the value of `key`. If the key does not exist the special value `nil`
is returned.
If `bucket` doesn't exist, it will raise an argument error.
## Example
MyBucket.get(:mybucket, "hello")
"""
@callback get(bucket, key) :: value | nil
@doc """
Returns the values of all specified keys. For every key that does not hold
a string value or does not exist, the special value `nil` is returned.
Because of this, the operation never fails.
If `bucket` doesn't exist, it will raise an argument error.
## Example
MyBucket.mget(:mybucket, ["hello", "world"])
"""
@callback mget(bucket, [key]) :: [value | nil]
@doc """
Returns all objects/tuples `{key, value}` that matches with the specified
`query`. The `query` type/spec depends on each adapter implementation –
`:ets.match_spec` in case of `KVX.Bucket.ExShards`.
If `bucket` doesn't exist, it will raise an argument error.
## Example
MyBucket.find_all(bucket, Ex2ms.fun do object -> object end)
"""
@callback find_all(bucket, query :: term) :: [{key, value}]
## Cleanup functions
@doc """
Removes an item from the bucket, if it exists.
If `bucket` doesn't exist, it will raise an argument error.
## Example
MyBucket.delete(:mybucket, "hello")
"""
@callback delete(bucket, key) :: bucket
@doc """
Deletes an entire bucket, if it exists.
If `bucket` doesn't exist, it will raise an argument error.
## Example
MyBucket.delete(:mybucket)
"""
@callback delete(bucket) :: bucket
@doc """
Invalidate all existing cache items.
If `bucket` doesn't exist, it will raise an argument error.
## Example
MyBucket.flush(:mybucket)
"""
@callback flush(bucket) :: bucket
end
|
lib/kvx/bucket.ex
| 0.92058
| 0.512693
|
bucket.ex
|
starcoder
|
defmodule Maxwell.Adapter.Util do
@moduledoc """
Utils for Adapter
"""
@chunk_size 4 * 1024 * 1024
alias Maxwell.{Conn, Multipart, Query}
@doc """
Append path and query string to url
* `url` - `conn.url`
* `path` - `conn.path`
* `query` - `conn.query`
* `type` - `:char_list` or `:string`, default is :string
### Examples
#http://example.com/home?name=foo
iex> url_serialize("http://example.com", "/home", %{"name" => "foo"})
"""
def url_serialize(url, path, query_string, type \\ :string) do
url = url |> append_query_string(path, query_string)
case type do
:string -> url
:char_list -> url |> to_charlist
end
end
@doc """
Converts the headers map to a list of tuples.
* `headers` - `Map.t`, for example: `%{"content-type" => "application/json"}`
### Examples
iex> headers_serialize(%{"content-type" => "application/json"})
[{"content-type", "application/json"}]
"""
def header_serialize(headers) do
Enum.into(headers, [])
end
@doc """
Add `content-type` to headers if don't have;
Add `content-length` to headers if not chunked
* `chunked` - `boolean`, is chunked mode
* `conn` - `Maxwell.Conn`
"""
def file_header_transform(chunked, conn) do
%Conn{req_body: {:file, filepath}, req_headers: req_headers} = conn
req_headers =
case Map.has_key?(req_headers, "content-type") do
true ->
req_headers
false ->
content_type =
filepath
|> Path.extname()
|> String.trim_leading(".")
|> MIME.type()
conn
|> Conn.put_req_header("content-type", content_type)
|> Map.get(:req_headers)
end
case chunked or Map.has_key?(req_headers, "content-length") do
true ->
req_headers
false ->
len = :filelib.file_size(filepath)
conn
|> Conn.put_req_header("content-length", len)
|> Map.get(:req_headers)
end
end
@doc """
Check req_headers has transfer-encoding: chunked.
* `conn` - `Maxwell.Conn`
"""
def chunked?(conn) do
case Conn.get_req_header(conn, "transfer-encoding") do
nil -> false
type -> "chunked" == String.downcase(type)
end
end
@doc """
Encode multipart form.
* `conn` - `Maxwell.Conn`
* `multiparts` - see `Maxwell.Multipart.encode_form/2`
"""
def multipart_encode(conn, multiparts) do
boundary = Multipart.new_boundary()
body = {&multipart_body/1, {:start, boundary, multiparts}}
len = Multipart.len_mp_stream(boundary, multiparts)
headers =
conn
|> Conn.put_req_header("content-type", "multipart/form-data; boundary=#{boundary}")
|> Conn.put_req_header("content-length", len)
|> Map.get(:req_headers)
{headers, body}
end
@doc """
Fetch the first element from stream.
"""
def stream_iterate(filepath) when is_binary(filepath) do
filepath
|> File.stream!([], @chunk_size)
|> stream_iterate
end
def stream_iterate(next_stream_fun) when is_function(next_stream_fun, 1) do
case next_stream_fun.({:cont, nil}) do
{:suspended, item, next_stream_fun} -> {:ok, item, next_stream_fun}
{:halted, _} -> :eof
{:done, _} -> :eof
end
end
def stream_iterate(stream) do
case Enumerable.reduce(stream, {:cont, nil}, fn item, nil -> {:suspend, item} end) do
{:suspended, item, next_stream} -> {:ok, item, next_stream}
{:done, _} -> :eof
{:halted, _} -> :eof
end
end
defp multipart_body({:start, boundary, multiparts}) do
{body, _size} = Multipart.encode_form(boundary, multiparts)
{:ok, body, :end}
end
defp multipart_body(:end), do: :eof
defp append_query_string(url, path, query) when query == %{}, do: url <> path
defp append_query_string(url, path, query) do
query_string = Query.encode(query)
url <> path <> "?" <> query_string
end
end
|
lib/maxwell/adapter/util.ex
| 0.810704
| 0.413122
|
util.ex
|
starcoder
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.