code
stringlengths 114
1.05M
| path
stringlengths 3
312
| quality_prob
float64 0.5
0.99
| learning_prob
float64 0.2
1
| filename
stringlengths 3
168
| kind
stringclasses 1
value |
|---|---|---|---|---|---|
defmodule FE.Result do
@moduledoc """
`FE.Result` is a data type for representing output of a computation that either succeeded or failed.
"""
@type t(a, b) :: {:ok, a} | {:error, b}
@type t(a) :: t(a, any)
alias FE.{Maybe, Review}
defmodule Error do
defexception [:message]
end
@doc """
Creates a `FE.Result` representing a successful output of a computation.
"""
@spec ok(a) :: t(a) when a: var
def ok(value), do: {:ok, value}
@doc """
Creates a `FE.Result` representing an errorneous output of a computation.
"""
@spec error(a) :: t(any, a) when a: var
def error(value), do: {:error, value}
@doc """
Transforms a success value in a `FE.Result` using a provided function.
## Examples
iex> FE.Result.map(FE.Result.error("foo"), &String.length/1)
FE.Result.error("foo")
iex> FE.Result.map(FE.Result.ok("foo"), &String.length/1)
FE.Result.ok(3)
"""
@spec map(t(a, b), (a -> c)) :: t(c, b) when a: var, b: var, c: var
def map(result, f)
def map({:error, _} = error, _), do: error
def map({:ok, value}, f), do: {:ok, f.(value)}
@doc """
Transforms an errorneous value in a `FE.Result` using a provided function.
## Examples
iex> FE.Result.map_error(FE.Result.ok("foo"), &String.length/1)
FE.Result.ok("foo")
iex> FE.Result.map_error(FE.Result.error("foo"), &String.length/1)
FE.Result.error(3)
"""
@spec map_error(t(a, b), (b -> c)) :: t(a, c) when a: var, b: var, c: var
def map_error(result, f)
def map_error({:ok, _} = ok, _), do: ok
def map_error({:error, value}, f), do: {:error, f.(value)}
@doc """
Returns the success value stored in a `FE.Result` or a provided default value if an error is passed.
## Examples
iex> FE.Result.unwrap_or(FE.Result.error("foo"), "default")
"default"
iex> FE.Result.unwrap_or(FE.Result.ok("bar"), "default")
"bar"
"""
@spec unwrap_or(t(a), a) :: a when a: var
def unwrap_or(result, default)
def unwrap_or({:error, _}, default), do: default
def unwrap_or({:ok, value}, _), do: value
@doc """
Returns the success value stored in a `FE.Result`, raises an `FE.Result.Error` if an error is passed.
"""
@spec unwrap!(t(a)) :: a | no_return() when a: var
def unwrap!(result)
def unwrap!({:ok, value}), do: value
def unwrap!({:error, error}) do
raise(Error, "unwrapping Result with an error: #{inspect(error)}")
end
@doc """
Runs the first function on a success value, or the second function on
error value, returning the results.
## Examples
iex> FE.Result.ok(1) |> FE.Result.unwrap_with(&inspect/1, &("error: "<> inspect(&1)))
"1"
iex> FE.Result.error("db down") |> FE.Result.unwrap_with(&inspect/1, &("error: "<> &1))
"error: db down"
"""
@spec unwrap_with(t(a, b), (a -> c), (b -> d)) :: c | d when a: var, b: var, c: var, d: var
def unwrap_with(result, on_ok, on_error)
def unwrap_with({:ok, value}, f, _) when is_function(f, 1), do: f.(value)
def unwrap_with({:error, error}, _, f) when is_function(f, 1), do: f.(error)
@doc """
Applies success value of a `FE.Result` to a provided function and returns its return value,
that should be of `FE.Result` type.
Useful for chaining together a computation consisting of multiple steps, each of which
takes success value wrapped in `FE.Result` as an argument and returns a `FE.Result`.
## Examples
iex> FE.Result.error("foo") |> FE.Result.and_then(&FE.Result.ok(String.length(&1)))
FE.Result.error("foo")
iex> FE.Result.ok("bar") |> FE.Result.and_then(&FE.Result.ok(String.length(&1)))
FE.Result.ok(3)
iex> FE.Result.ok("bar") |> FE.Result.and_then(fn _ -> FE.Result.error(:baz) end)
FE.Result.error(:baz)
"""
@spec and_then(t(a, b), (a -> t(c, b))) :: t(c, b) when a: var, b: var, c: var
def and_then(result, f)
def and_then({:error, _} = error, _), do: error
def and_then({:ok, value}, f), do: f.(value)
@doc """
Folds over provided list of elements applying it and current accumulator
to the provided function.
The provided function returns a new accumulator, that should be a `FE.Result`.
The provided `FE.Result` is the initial accumulator.
Returns last value returned by the function.
Stops and returns error if at any moment the function returns error.
## Examples
iex> FE.Result.fold(FE.Result.error(:error), [], &FE.Result.ok(&1 + &2))
FE.Result.error(:error)
iex> FE.Result.fold(FE.Result.ok(5), [], &FE.Result.ok(&1 + &2))
FE.Result.ok(5)
iex> FE.Result.fold(FE.Result.error(:foo), [1, 2], &FE.Result.ok(&1 + &2))
FE.Result.error(:foo)
iex> FE.Result.fold(FE.Result.ok(5), [1, 2, 3], &FE.Result.ok(&1 * &2))
FE.Result.ok(30)
iex> FE.Result.fold(FE.Result.ok(5), [1, 2, 3], fn
...> _, 10 -> FE.Result.error("it's a ten!")
...> x, y -> FE.Result.ok(x * y)
...> end)
FE.Result.error("it's a ten!")
"""
@spec fold(t(a, b), [c], (c, a -> t(a, b))) :: t(a, b) when a: var, b: var, c: var
def fold(result, elems, f) do
Enum.reduce_while(elems, result, fn elem, acc ->
case and_then(acc, fn value -> f.(elem, value) end) do
{:ok, _} = ok -> {:cont, ok}
{:error, _} = error -> {:halt, error}
end
end)
end
@doc """
Works like `fold/3`, except that the first element of the provided list is removed
from it, converted to a success `FE.Result` and treated as the initial accumulator.
Then, fold is executed over the remainder of the provided list.
## Examples
iex> FE.Result.fold([1], fn _, _ -> FE.Result.error(:one) end)
FE.Result.ok(1)
iex> FE.Result.fold([1, 2, 3], &(FE.Result.ok(&1 + &2)))
FE.Result.ok(6)
iex> FE.Result.fold([1, 2, 3], fn
...> _, 3 -> FE.Result.error(:three)
...> x, y -> FE.Result.ok(x + y)
...> end)
FE.Result.error(:three)
"""
@spec fold([c], (c, a -> t(a, b))) :: t(a, b) when a: var, b: var, c: var
def fold(elems, f)
def fold([], _), do: raise(Enum.EmptyError)
def fold([head | tail], f), do: fold(ok(head), tail, f)
@doc """
Returns the `FE.Result.ok` values from a list of `FE.Result`s.
## Examples
iex> FE.Result.oks([FE.Result.ok(:good), FE.Result.error(:bad), FE.Result.ok(:better)])
[:good, :better]
"""
@spec oks([t(a, any)]) :: [a] when a: var
def oks(e) do
Enum.reduce(e, [], fn
{:ok, val}, acc -> [val | acc]
{:error, _}, acc -> acc
end)
|> Enum.reverse()
end
@doc """
If a list of `FE.Result`s is all `FE.Result.ok`s, returns a `FE.Result.ok`
where the value is a list of the unwrapped values.
Otherwise, returns `FE.Result.error` with the first erroneous value.
## Examples
iex> FE.Result.all_ok([FE.Result.ok(:a), FE.Result.ok(:b), FE.Result.ok(:c)])
FE.Result.ok([:a, :b, :c])
iex> FE.Result.all_ok([FE.Result.ok(:a), FE.Result.error("BAD APPLE"), FE.Result.ok(:c)])
FE.Result.error("BAD APPLE")
"""
@spec all_ok([t(a, any)]) :: t([a], any) when a: var
def all_ok(list), do: all_ok0(list, [])
defp all_ok0([], res) when is_list(res), do: Enum.reverse(res) |> ok()
defp all_ok0([{:ok, v} | tail], res), do: all_ok0(tail, [v | res])
defp all_ok0([{:error, e} | _], _), do: {:error, e}
@doc """
Transforms `FE.Result` to a `FE.Maybe`.
A `FE.Result` with successful value becomes a `FE.Maybe` with the same value.
An errornous `FE.Result` becomes a `FE.Maybe` without a value.
## Examples
iex> FE.Result.to_maybe(FE.Result.ok(13))
FE.Maybe.just(13)
iex> FE.Result.to_maybe(FE.Result.error("something went wrong"))
FE.Maybe.nothing()
"""
@spec to_maybe(t(a, any)) :: Maybe.t(a) when a: var
def to_maybe(result)
def to_maybe({:ok, value}), do: Maybe.just(value)
def to_maybe({:error, _}), do: Maybe.nothing()
@doc """
Transforms `FE.Result` to a `FE.Review`.
A `FE.Result` with successful value becomes an accepted `FE.Review` with
the same value.
An errornous `FE.Result` with error output being a list becomes a rejected
`FE.Review` with issues being exactly this list.
An errornous `FE.Result` with error output being other term becomes a rejected
`FE.Review` with one issue, being this term.
## Examples
iex> FE.Result.to_review(FE.Result.ok(23))
FE.Review.accepted(23)
iex> FE.Result.to_review(FE.Result.error(["wrong", "bad", "very bad"]))
FE.Review.rejected(["wrong", "bad", "very bad"])
iex> FE.Result.to_review(FE.Result.error("error"))
FE.Review.rejected(["error"])
"""
@spec to_review(t(a, b) | t(a, [b])) :: Review.t(a, b) when a: var, b: var
def to_review(result)
def to_review({:ok, value}), do: Review.accepted(value)
def to_review({:error, values}) when is_list(values), do: Review.rejected(values)
def to_review({:error, value}), do: Review.rejected([value])
end
|
lib/fe/result.ex
| 0.938962
| 0.609175
|
result.ex
|
starcoder
|
defmodule RTypes.Extractor do
@doc """
Recursively extract and instantiate AST representation of a type.
## Arguments
* `mod` - a module name or a tuple of a module name and object code
* `type_name` - type name
* `type_args` - arguments, if any, for the type. The arguments should be
represented as AST, for example, `{:type, 0, :list, []}`
## Usage
```
iex> t = RTypes.Extractor.extract_type(:inet, :port_number, [])
iex> match?({:type, _, :range, [{:integer, _, 0}, {:integer, _, 65535}]}, t)
true
iex> t = RTypes.Extractor.extract_type(Keyword, :t, [{:type, 0, :list, []}])
iex> match?({:type, _, :list, [{:type, _, :tuple, [{:type, _, :atom, []}, {:type, _, :list, []}]}]}, t)
true
```
"""
@type type :: {:type, line :: integer(), atom(), [type | value]}
@type value :: {value_tag :: atom(), line :: integer(), term()}
@type unfolded_type :: {:type, line :: integer(), atom(), [type | value]}
@spec extract_type(what, atom(), [type | value]) :: unfolded_type()
when what: module() | {module(), binary()}
def extract_type(mod, type_name, type_args) do
{typ, mod_types} = find_type(mod, type_name, Enum.count(type_args))
unfold_type(bind_type_vars(typ, type_args), mod_types)
end
defp find_type(what, type_name, arity) do
mod = ensure_module_loaded(what)
mod_types =
case Code.Typespec.fetch_types(mod) do
{:ok, types} ->
Enum.map(types, fn {_, {local_name, typ, params}} ->
{{local_name, Enum.count(params)}, {typ, params}}
end)
|> Enum.into(%{})
:error ->
raise "can not extract types from module #{mod}"
end
case Map.fetch(mod_types, {type_name, arity}) do
{:ok, typ} ->
{typ, mod_types}
:error ->
raise "can not find type #{type_name}/#{arity} in module #{mod}"
end
end
defp unfold_type({:type, line, type_name, type_args}, local_types) do
{:type, line, type_name, unfold_type_args(type_args, local_types)}
end
defp unfold_type({:user_type, _linum, user_type, args}, local_types) do
case Map.fetch(local_types, {user_type, Enum.count(args)}) do
{:ok, typ} ->
unfold_type(bind_type_vars(typ, args), local_types)
:error ->
raise "user type #{user_type} not found"
end
end
defp unfold_type(
{:remote_type, _, [{:atom, _, mod}, {:atom, _, type_name}, args]},
local_types
) do
extract_type(mod, type_name, unfold_type_args(args, local_types))
end
## concrete value
defp unfold_type({_kind, _line, _val} = value, _) do
value
end
defp unfold_type_args(:any, _), do: :any
defp unfold_type_args(args, local_types) do
Enum.map(args, &unfold_type(&1, local_types))
end
defp bind_parameters(typ, parameters, type_vars, type_args) do
vars =
Enum.zip(type_vars, type_args)
|> Enum.map(fn {{:var, _, var_name}, var_value} ->
{var_name, var_value}
end)
|> Enum.into(%{})
case parameters do
:any ->
:any
xs when is_list(xs) ->
Enum.map(parameters, fn
{:var, _, var_name} ->
case Map.fetch(vars, var_name) do
{:ok, var_value} -> var_value
:error -> raise "can not bind type variable #{var_name} for type #{inspect(typ)}"
end
{:type, line, type_name, parameters1} ->
bound_parameters = bind_parameters(type_name, parameters1, type_vars, type_args)
{:type, line, type_name, bound_parameters}
{:user_type, line, type_name, parameters1} ->
bound_parameters = bind_parameters(type_name, parameters1, type_vars, type_args)
{:user_type, line, type_name, bound_parameters}
# TODO: {:remote_type ...} -> ?
{_kind, _line, _val} = value ->
value
end)
end
end
defp bind_type_vars({{:type, line, type_name, :any}, _type_vars}, _type_args) do
{:type, line, type_name, :any}
end
defp bind_type_vars({{:type, line, type_name, parameters}, type_vars}, type_args) do
bound_parameters =
bind_parameters(
type_name,
Enum.map(parameters, fn typ ->
bind_type_vars({typ, type_vars}, type_args)
end),
type_vars,
type_args
)
{:type, line, type_name, bound_parameters}
end
defp bind_type_vars({{:user_type, line, type_name, parameters}, type_vars}, type_args) do
bound_parameters =
bind_parameters(
type_name,
Enum.map(parameters, fn typ ->
bind_type_vars({typ, type_vars}, type_args)
end),
type_vars,
type_args
)
{:user_type, line, type_name, bound_parameters}
end
defp bind_type_vars({{:remote_type, line, [mod, type_name, parameters]}, type_vars}, type_args) do
bound_parameters =
bind_parameters(
type_name,
Enum.map(parameters, fn typ ->
bind_type_vars({typ, type_vars}, type_args)
end),
type_vars,
type_args
)
{:remote_type, line, [mod, type_name, bound_parameters]}
end
defp bind_type_vars({{:var, _line, _value} = var, [type_var]}, [type_arg]) do
[typ] = bind_parameters(:var, [var], [type_var], [type_arg])
typ
end
## operations on literal integers
defp bind_type_vars({{:op, _line, :-, {:integer, line, val}}, []}, []) do
{:integer, line, -1 * val}
end
defp bind_type_vars({{:op, _line, :+, {:integer, line, val}}, []}, []) do
{:integer, line, val}
end
use Bitwise
defp bind_type_vars({{:op, _line, :bnot, {:integer, line, val}}, []}, []) do
{:integer, line, bnot(val)}
end
defp bind_type_vars({{:op, line, :band, {:integer, _, lhs}, {:integer, _, rhs}}, []}, []) do
{:integer, line, band(lhs, rhs)}
end
defp bind_type_vars({{:op, line, :bxor, {:integer, _, lhs}, {:integer, _, rhs}}, []}, []) do
{:integer, line, bxor(lhs, rhs)}
end
defp bind_type_vars({{:op, line, :bor, {:integer, _, lhs}, {:integer, _, rhs}}, []}, []) do
{:integer, line, bor(lhs, rhs)}
end
defp bind_type_vars({{:op, line, :bsl, {:integer, _, lhs}, {:integer, _, rhs}}, []}, []) do
{:integer, line, bsl(lhs, rhs)}
end
defp bind_type_vars({{:op, line, :bsr, {:integer, _, lhs}, {:integer, _, rhs}}, []}, []) do
{:integer, line, bsl(lhs, rhs)}
end
defp bind_type_vars({{:op, line, :+, {:integer, _, lhs}, {:integer, _, rhs}}, []}, []) do
{:integer, line, lhs + rhs}
end
defp bind_type_vars({{:op, line, :-, {:integer, _, lhs}, {:integer, _, rhs}}, []}, []) do
{:integer, line, lhs - rhs}
end
defp bind_type_vars({{:op, line, :*, {:integer, _, lhs}, {:integer, _, rhs}}, []}, []) do
{:integer, line, lhs * rhs}
end
defp bind_type_vars({{:op, line, :div, {:integer, _, lhs}, {:integer, _, rhs}}, []}, []) do
{:integer, line, div(lhs, rhs)}
end
defp bind_type_vars({{:op, line, :rem, {:integer, _, lhs}, {:integer, _, rhs}}, []}, []) do
{:integer, line, rem(lhs, rhs)}
end
# literals
defp bind_type_vars({{_kind, _line, _value} = val, _}, _), do: val
defp ensure_module_loaded(mod) when is_atom(mod) do
case :code.ensure_loaded(mod) do
{:module, ^mod} ->
case :code.is_loaded(mod) do
{:file, path} when is_list(path) ->
# `path` is something like `/foo/blah/Elixir.MyModule.beam`
# Add the directory, just in case
true = :code.add_path(String.to_charlist(Path.dirname(path)))
mod
{:file, _path} ->
# when `path` is not a list, do nothing
mod
false ->
raise "Module #{mod} can not be loaded"
end
{:error, reason} ->
raise "Module #{mod} can not be loaded, reason #{inspect(reason)}"
end
end
defp ensure_module_loaded({mod_name, obj_code}) do
case :code.load_binary(mod_name, 'rtypes', obj_code) do
{:module, ^mod_name} ->
obj_code
{:error, reason} ->
raise "Module #{mod_name} can not be loaded, reason #{inspect(reason)}"
end
end
end
|
lib/rtypes/extractor.ex
| 0.78695
| 0.687818
|
extractor.ex
|
starcoder
|
defmodule Mix.Tasks.Expo.Msgfmt do
@shortdoc "Generate binary message catalog from textual translation description."
@moduledoc """
Generate binary message catalog from textual translation description.
mix expo.msgfmt [PO_FILE] [OPTIONS]
## Options
* `--use-fuzzy` - use fuzzy entries in output
* `--endianness=BYTEORDER` - write out 32-bit numbers in the given byte
order (big or little, default depends on platform)
* `--output-file=FILE` - write output to specified file
* `--statistics` - print statistics about translations
"""
use Mix.Task
alias Expo.Mo
alias Expo.Po
@switches [
use_fuzzy: :boolean,
endianness: :string,
output_file: :string,
statistics: :boolean
]
@default_options [use_fuzzy: false, endianness: "little", statistics: false]
@impl Mix.Task
def run(args) do
Application.ensure_all_started(:expo)
{opts, argv} = OptionParser.parse!(args, switches: @switches)
opts = Keyword.merge(@default_options, opts)
opts = Keyword.update!(opts, :endianness, &parse_endianness/1)
{output_file, mo_compose_opts} = Keyword.pop(opts, :output_file)
source_file =
case argv do
[] ->
Mix.raise("""
mix expo.msgfmt failed due to missing po file path argument
""")
[_file_one, _file_two | _other_files] ->
Mix.raise("""
mix expo.msgfmt failed due to multiple po file path arguments
Only one is currently supported
""")
[file] ->
file
end
translations = Po.parse_file!(source_file)
output = Mo.compose(translations, mo_compose_opts)
case output_file do
nil -> IO.binwrite(:standard_io, IO.iodata_to_binary(output))
file -> File.write!(file, output)
end
if Keyword.fetch!(mo_compose_opts, :statistics) do
receive do
{Mo, :translation_count, count} ->
# Not using Mix.shell().info/1 since that will print into stdout and not stderr
IO.puts(:standard_error, "#{count} translated messages.")
end
end
end
defp parse_endianness(endianness)
defp parse_endianness("little"), do: :little
defp parse_endianness("big"), do: :big
defp parse_endianness(other),
do:
Mix.raise("""
mix expo.msgfmt failed due to invalid endianness option
Expected: "little" or "big"
Received: #{inspect(other)}
""")
end
|
lib/mix/tasks/expo.msgmft.ex
| 0.813461
| 0.426441
|
expo.msgmft.ex
|
starcoder
|
defmodule Transform.Wkt.Point do
@moduledoc """
`Transform.Step.t()` impl for transformation of latitude/longitude into a
well-known text (WKT) point object.
## Init options
* `longitude` - String or list of strings as path to `Dictionary.Type.Longitude` field.
* `latitude` - String or list of strings as path to `Dictionary.Type.Latitude` field.
* `to` - String or list of strings as path to transformed value field.
"""
use Definition, schema: Transform.Wkt.Point.V1
use JsonSerde, alias: "transform_wkt_point"
@type t :: %__MODULE__{
longitude: String.t(),
latitude: String.t(),
to: String.t()
}
defstruct [:longitude, :latitude, :to]
defimpl Transform.Step, for: __MODULE__ do
import Dictionary.Access, only: [to_access_path: 1]
def transform_dictionary(%{longitude: longitude, latitude: latitude, to: to}, dictionary) do
longitude_path = to_access_path(longitude)
latitude_path = to_access_path(latitude)
to_path = to_access_path(to)
new_name = List.wrap(to) |> List.last()
with :ok <-
Dictionary.validate_field(dictionary, longitude_path, Dictionary.Type.Longitude),
:ok <- Dictionary.validate_field(dictionary, latitude_path, Dictionary.Type.Latitude) do
{:ok, new_field} = Dictionary.Type.Wkt.Point.new(name: new_name)
put_in(dictionary, to_path, new_field)
|> Ok.ok()
end
end
def create_function(step, _dictionary) do
longitude_path = to_access_path(step.longitude)
latitude_path = to_access_path(step.latitude)
to_path = to_access_path(step.to)
fn value ->
longitude = get_in(value, longitude_path)
latitude = get_in(value, latitude_path)
point = %Geo.Point{coordinates: {longitude, latitude}}
{:ok, wkt} = Geo.WKT.encode(point)
put_in(value, to_path, wkt)
|> Ok.ok()
end
|> Ok.ok()
end
end
end
defmodule Transform.Wkt.Point.V1 do
@moduledoc false
use Definition.Schema
def s do
schema(%Transform.Wkt.Point{
longitude: access_path(),
latitude: access_path(),
to: access_path()
})
end
end
|
apps/transform_wkt/lib/transform/wkt/point.ex
| 0.929152
| 0.809125
|
point.ex
|
starcoder
|
defmodule LevelWeb.Schema.Mutations do
@moduledoc false
use Absinthe.Schema.Notation
@desc "Interface for payloads containing validation data."
interface :validatable do
field :success, non_null(:boolean)
field :errors, list_of(:error)
resolve_type fn _, _ -> nil end
end
@desc "A validation error."
object :error do
@desc "The name of the invalid attribute."
field :attribute, non_null(:string)
@desc "A human-friendly error message."
field :message, non_null(:string)
end
@desc "The response to updating a user."
object :update_user_payload do
@desc """
A boolean indicating if the mutation was successful. If true, the errors
list will be empty. Otherwise, errors may contain objects describing why
the mutation failed.
"""
field :success, non_null(:boolean)
@desc "A list of validation errors."
field :errors, list_of(:error)
@desc """
The mutated object. If the mutation was not successful,
this field may be null.
"""
field :user, :user
interface :validatable
end
@desc "The response to creating a space."
object :create_space_payload do
@desc """
A boolean indicating if the mutation was successful. If true, the errors
list will be empty. Otherwise, errors may contain objects describing why
the mutation failed.
"""
field :success, non_null(:boolean)
@desc "A list of validation errors."
field :errors, list_of(:error)
@desc """
The mutated object. If the mutation was not successful,
this field may be null.
"""
field :space, :space
interface :validatable
end
@desc "The response to updating a space."
object :update_space_payload do
@desc """
A boolean indicating if the mutation was successful. If true, the errors
list will be empty. Otherwise, errors may contain objects describing why
the mutation failed.
"""
field :success, non_null(:boolean)
@desc "A list of validation errors."
field :errors, list_of(:error)
@desc """
The mutated object. If the mutation was not successful,
this field may be null.
"""
field :space, :space
interface :validatable
end
@desc "The response to completing a setup step."
object :complete_setup_step_payload do
@desc """
A boolean indicating if the mutation was successful. If true, the errors
list will be empty. Otherwise, errors may contain objects describing why
the mutation failed.
"""
field :success, non_null(:boolean)
@desc """
The next state.
"""
field :state, :space_setup_state
end
@desc "The response to creating a group."
object :create_group_payload do
@desc """
A boolean indicating if the mutation was successful. If true, the errors
list will be empty. Otherwise, errors may contain objects describing why
the mutation failed.
"""
field :success, non_null(:boolean)
@desc "A list of validation errors."
field :errors, list_of(:error)
@desc """
The mutated object. If the mutation was not successful,
this field may be null.
"""
field :group, :group
interface :validatable
end
@desc "The response to updating a group."
object :update_group_payload do
@desc """
A boolean indicating if the mutation was successful. If true, the errors
list will be empty. Otherwise, errors may contain objects describing why
the mutation failed.
"""
field :success, non_null(:boolean)
@desc "A list of validation errors."
field :errors, list_of(:error)
@desc """
The mutated object. If the mutation was not successful,
this field may be null.
"""
field :group, :group
interface :validatable
end
@desc "The response to bulk creating groups."
object :bulk_create_groups_payload do
@desc "A list of result payloads for each group."
field :payloads, non_null(list_of(:bulk_create_group_payload))
end
@desc "The payload for an individual group in a bulk create payload."
object :bulk_create_group_payload do
@desc """
A boolean indicating if the mutation was successful. If true, the errors
list will be empty. Otherwise, errors may contain objects describing why
the mutation failed.
"""
field :success, non_null(:boolean)
@desc "A list of validation errors."
field :errors, list_of(:error)
@desc """
The mutated object. If the mutation was not successful,
this field may be null.
"""
field :group, :group
@desc "The original arguments for this particular object."
field :args, non_null(:bulk_create_group_args)
interface :validatable
end
@desc "The arguments for an individual bulk-created group."
object :bulk_create_group_args do
@desc "The name of the group."
field :name, non_null(:string)
end
@desc "The response to updating a group."
object :update_group_membership_payload do
@desc """
A boolean indicating if the mutation was successful. If true, the errors
list will be empty. Otherwise, errors may contain objects describing why
the mutation failed.
"""
field :success, non_null(:boolean)
@desc "A list of validation errors."
field :errors, list_of(:error)
@desc """
The mutated membership. If the mutation was not successful,
this field may be null.
"""
field :membership, :group_membership
@desc """
The group. If the mutation was not successful,
this field may be null.
"""
field :group, :group
interface :validatable
end
@desc "The payload for an updating group bookmark state."
object :bookmark_group_payload do
@desc "The current bookmark status."
field :is_bookmarked, non_null(:boolean)
@desc "The group."
field :group, non_null(:group)
end
@desc "The response to posting a message to a group."
object :create_post_payload do
@desc """
A boolean indicating if the mutation was successful. If true, the errors
list will be empty. Otherwise, errors may contain objects describing why
the mutation failed.
"""
field :success, non_null(:boolean)
@desc "A list of validation errors."
field :errors, list_of(:error)
@desc """
The mutated object. If the mutation was not successful,
this field may be null.
"""
field :post, :post
interface :validatable
end
@desc "The response to replying to a post."
object :create_reply_payload do
@desc """
A boolean indicating if the mutation was successful. If true, the errors
list will be empty. Otherwise, errors may contain objects describing why
the mutation failed.
"""
field :success, non_null(:boolean)
@desc "A list of validation errors."
field :errors, list_of(:error)
@desc """
The mutated object. If the mutation was not successful,
this field may be null.
"""
field :reply, :reply
interface :validatable
end
@desc "The response to recording a post view."
object :record_post_view_payload do
@desc """
A boolean indicating if the mutation was successful. If true, the errors
list will be empty. Otherwise, errors may contain objects describing why
the mutation failed.
"""
field :success, non_null(:boolean)
@desc "A list of validation errors."
field :errors, list_of(:error)
interface :validatable
end
@desc "The response to dismissing a mention."
object :dismiss_mentions_payload do
@desc """
A boolean indicating if the mutation was successful. If true, the errors
list will be empty. Otherwise, errors may contain objects describing why
the mutation failed.
"""
field :success, non_null(:boolean)
@desc "A list of validation errors."
field :errors, list_of(:error)
@desc """
The post for which mentions were dismissed. If the mutation was not successful,
this field may be null.
"""
field :posts, list_of(:post)
interface :validatable
end
@desc "The response to dismissing posts."
object :dismiss_posts_payload do
@desc """
A boolean indicating if the mutation was successful. If true, the errors
list will be empty. Otherwise, errors may contain objects describing why
the mutation failed.
"""
field :success, non_null(:boolean)
@desc "A list of validation errors."
field :errors, list_of(:error)
@desc """
The posts that were dismissed. If the mutation was not successful,
this field may be null.
"""
field :posts, list_of(:post)
interface :validatable
end
@desc "The response to registering a push subscription."
object :register_push_subscription_payload do
@desc """
A boolean indicating if the mutation was successful. If true, the errors
list will be empty. Otherwise, errors may contain objects describing why
the mutation failed.
"""
field :success, non_null(:boolean)
@desc "A list of validation errors."
field :errors, list_of(:error)
interface :validatable
end
end
|
lib/level_web/schema/mutations.ex
| 0.868241
| 0.443179
|
mutations.ex
|
starcoder
|
defmodule MuchData do
use MuchData.Types
import ExAequo.KeywordParams, only: [tuple_from_params: 3]
@moduledoc """
Documentation for `MuchData`.
## MuchData
Reassemble and merge map like data from many sources and many formats.
- Sources supported in this version: files
- Formats supported in this version: YAML
All the examples here are using the fixtures found in [fixtures](https://github.com/RobertDober/much_data/test/fixtures)
### Parsing a single file
Will by default add it's filename
iex(0)> parse_file("test/fixtures/top1.yml")["top1"]["a"]
42
A convenience function is provided to access nested string keys
iex(1)> parse_file("test/fixtures/top1.yml") |> dig("top1.level1.a")
{:ok, %{"x" => 1, "y" => 2}}
The prefix key for the filename can either be removed
iex(2)> parse_file("test/fixtures/top1.yml", remove_filename: true)
...(2)> |> dig!("level1.a.x")
1
Alternatively the whole path of the file can be used as composed key
iex(3)> parse_file("test/fixtures/top1.yml", expand_path: true)
%{"test" => %{"fixtures" => %{"top1.yml" => %{"a" => 42, "level1" => %{"a" => %{"x" => 1, "y" => 2}}}}}}
"""
alias __MODULE__.Error
@spec dig(map(), binary()) :: NestedMap.result_t
def dig(map, compound_string_key) do
keys = String.split(compound_string_key, ".")
NestedMap.fetch(map, keys)
end
@doc false
@spec dig!(map(), binary()) :: maybe_error(NestedMap.result_t)
def dig!(map, compound_string_key) do
case dig(map, compound_string_key) do
{:ok, value} -> value
:error -> raise Error, "compound string key #{compound_string_key} not found"
end
end
@default_options %{ remove_filename: false, expand_path: false }
@spec parse_file(binary(), Keyword.t) :: maybe_error(map())
def parse_file(filename, options \\ [])
def parse_file(filename, options) do
%{remove_filename: remove_filename, expand_path: expand_path} = Map.merge(@default_options, options |> Enum.into(%{}))
if remove_filename && expand_path do
raise Error, "must not specify remove_filename and expand_path"
end
result = _parse_file(filename)
cond do
remove_filename -> result
expand_path -> _make_prefix_map(filename, result)
true -> %{Path.basename(filename, Path.extname(filename)) => result}
end
end
@spec parse_tree(binary(), Keyword.t) :: map()
def parse_tree(path, options \\ [])
def parse_tree(path, options) do
{include_name, split_path} = tuple_from_params([include_name: true, split_path: false], options, [:include_name, :split_path])
cond do
split_path -> NestedMap.make_nested_map(Path.split(path), _tree_hash(path, options))
include_name -> %{Path.basename(path) => _tree_hash(path, options)}
true -> _tree_hash(path, options)
end
end
@doc """
Used by the `xtra` mix task to generate the latest version in the docs, but
also handy for client applications for quick exploration in `iex`.
"""
@spec version() :: binary()
def version() do
with {:ok, version} = :application.get_key(:nested_map, :vsn),
do: to_string(version)
end
@spec _add_maps(binaries(), map(), map()) :: map()
defp _add_maps(prefix, data, parsed)
defp _add_maps([], data, _parsed), do: data
defp _add_maps([pfx], data, parsed) do
Map.put(data, pfx, parsed)
end
defp _add_maps([h | t], data, parsed) do
data1 = Map.put_new(data, h, %{})
result = _add_maps(t, data1[h], parsed)
%{data1 | h => result}
end
@spec _make_prefix_map(binary(), map()) :: map()
defp _make_prefix_map(filename, result) do
(Path.extname(filename) |> Regex.escape) <> "\z"
|> Regex.compile!
|> Regex.replace(filename, "")
|> Path.split
|> Enum.reverse
|> Enum.reduce(result, fn key, acc -> %{key => acc} end)
end
@spec _parse_file(binary()) :: maybe_error(map())
defp _parse_file(filename) do
case YamlElixir.read_from_file(filename) do
{:ok, result} -> result
{:error, message} -> raise message
end
end
@spec _parse(prefixed(), map()) :: map()
defp _parse({file, prefix}, data) do
parsed = _parse_yml(file)
# IO.inspect(file, label: :file)
_add_maps(prefix, data, parsed) # |> IO.inspect(label: "Added maps")
end
@spec _parse_yml(String.t) :: map()
defp _parse_yml(file, options \\ []) do
result = YamlElixir.read_from_file!(file)
if Keyword.get(options, :include_filename) do
Map.put(result, :file, file)
else
result
end
end
@spec _tree_hash(binary(), Keyword.t) :: map()
defp _tree_hash(path, _options) do
MuchData.FileWalker.walk(path, ".yml", &_parse/2, %{})
end
end
|
lib/much_data.ex
| 0.727201
| 0.513668
|
much_data.ex
|
starcoder
|
defmodule RobotSimulator do
@directions [:north, :east, :south, :west]
@command ["A", "L", "R"]
@doc """
Create a Robot Simulator given an initial direction and position.
Valid directions are: `:north`, `:east`, `:south`, `:west`
"""
def create(), do: {:north, {0, 0}}
def create({x, y}), do: {:north, {x, y}}
def create(direction) when is_atom(direction) and direction in @directions,
do: {direction, {0, 0}}
# @spec create(direction :: atom, position :: {integer, integer}) :: {Atom, Tuple}
def create(direction, {x, y})
when direction in @directions and is_integer(x) and is_integer(y),
do: {direction, {x, y}}
def create(direction, _position)
when direction not in @directions,
do: {:error, "invalid direction"}
@spec create(direction :: atom, position :: {integer, integer}) :: any
def create(_direction, _position), do: {:error, "invalid position"}
@doc """
Simulate the robot's movement given a string of instructions.
Valid instructions are: "R" (turn right), "L", (turn left), and "A" (advance)
"""
@spec simulate(robot :: any, instructions :: String.t()) :: any
def simulate(robot, instructions) do
valid =
String.codepoints(instructions)
|> Enum.all?(fn x -> x in @command end)
if valid do
execute(robot, String.codepoints(instructions))
else
{:error, "invalid instruction"}
end
end
defp execute(robot, ["R" | []]), do: {rotate("R", direction(robot)), position(robot)}
defp execute(robot, ["L" | []]), do: {rotate("L", direction(robot)), position(robot)}
defp execute(robot, ["A" | []]), do: move(robot)
defp execute(robot, ["R" | tail]),
do: {rotate("R", direction(robot)), position(robot)} |> execute(tail)
defp execute(robot, ["L" | tail]),
do: {rotate("L", direction(robot)), position(robot)} |> execute(tail)
defp execute(robot, ["A" | tail]), do: move(robot) |> execute(tail)
defp move({:north, {x, y}}), do: {:north, {x, y + 1}}
defp move({:east, {x, y}}), do: {:east, {x + 1, y}}
defp move({:south, {x, y}}), do: {:south, {x, y - 1}}
defp move({:west, {x, y}}), do: {:west, {x - 1, y}}
# source modified by lucafuelbier's solution
defp rotate("R", :north), do: :east
defp rotate("R", :east), do: :south
defp rotate("R", :south), do: :west
defp rotate("R", :west), do: :north
defp rotate("L", :north), do: :west
defp rotate("L", :east), do: :north
defp rotate("L", :south), do: :east
defp rotate("L", :west), do: :south
@doc """
Return the robot's direction.
Valid directions are: `:north`, `:east`, `:south`, `:west`
"""
@spec direction(robot :: any) :: atom
def direction({direction, _position}), do: direction
@doc """
Return the robot's position.
"""
@spec position(robot :: any) :: {integer, integer}
def position({_direction, position}), do: position
end
|
robot-simulator/lib/robot_simulator.ex
| 0.90432
| 0.865281
|
robot_simulator.ex
|
starcoder
|
defmodule Ello.Core.Discovery.Editorial do
use Ecto.Schema
alias Ello.Core.Content.Post
alias Ello.Core.Image
@type t :: %__MODULE__{}
schema "editorials" do
field :published_position, :integer
field :preview_position, :integer
field :kind, :string
field :content, :map
field :one_by_one_image, :string
field :one_by_two_image, :string
field :two_by_one_image, :string
field :two_by_two_image, :string
field :one_by_one_image_metadata, :map
field :one_by_two_image_metadata, :map
field :two_by_one_image_metadata, :map
field :two_by_two_image_metadata, :map
field :one_by_one_image_struct, :map, virtual: true
field :one_by_two_image_struct, :map, virtual: true
field :two_by_one_image_struct, :map, virtual: true
field :two_by_two_image_struct, :map, virtual: true
field :created_at, :utc_datetime
field :updated_at, :utc_datetime
# Holds curated posts preloads
field :curated_posts, {:array, :map}, default: [], virtual: true
belongs_to :post, Post
end
@doc """
Converts image filename and metadata into a struct for serialization
"""
@spec build_images(editorial :: t) :: t
def build_images(editorial) do
editorial
|> Map.put(:one_by_one_image_struct, image_struct(editorial, :one_by_one_image))
|> Map.put(:one_by_two_image_struct, image_struct(editorial, :one_by_two_image))
|> Map.put(:two_by_one_image_struct, image_struct(editorial, :two_by_one_image))
|> Map.put(:two_by_two_image_struct, image_struct(editorial, :two_by_two_image))
end
def image_struct(editorial, field) do
field_string = Atom.to_string(field)
metadata = Map.get(editorial, String.to_atom("#{field_string}_metadata"))
filename = Map.get(editorial, field)
case filename do
nil -> nil
"" -> nil
_ ->
%Image{
filename: filename,
path: "/uploads/editorial/#{field_string}/#{editorial.id}",
versions: Image.Version.from_metadata(metadata, filename),
}
end
end
end
|
apps/ello_core/lib/ello_core/discovery/editorial.ex
| 0.68721
| 0.447823
|
editorial.ex
|
starcoder
|
defmodule Jylis.TLOG do
@moduledoc """
A timestamped log.
<sup>[[link](https://jemc.github.io/jylis/docs/types/tlog/)]</sup>
"""
@doc """
Get the latest `value` and `timestamp` for the register at `key`.
Returns `{:ok, [{value, timestamp}, ...]}` on success.
"""
def get(connection, key, count \\ nil) do
query = ["TLOG", "GET", key]
query =
case count do
nil -> query
_ -> query ++ [count]
end
result = connection |> Jylis.query(query)
case result do
{:ok, items} ->
items = items |> Enum.map(fn(item) -> List.to_tuple(item) end)
{:ok, items}
error -> error
end
end
@doc """
Insert a `value`/`timestamp` entry into the log at `key`.
`timestamp` - An integer Unix timestamp or an ISO 8601 formatted string.
"""
def ins(connection, key, value, timestamp) when is_integer(timestamp) do
connection |> Jylis.query(["TLOG", "INS", key, value, timestamp])
end
def ins(connection, key, value, timestamp) when is_binary(timestamp) do
{:ok, date_time, _} = timestamp |> DateTime.from_iso8601
timestamp = date_time |> DateTime.to_unix
ins(connection, key, value, timestamp)
end
@doc """
Return the number of entries in the log at `key` as an integer.
"""
def size(connection, key) do
connection |> Jylis.query(["TLOG", "SIZE", key])
end
@doc """
Return the current cutoff timestamp of the log at `key` as an integer.
"""
def cutoff(connection, key) do
connection |> Jylis.query(["TLOG", "CUTOFF", key])
end
@doc """
Raise the cutoff timestamp of the log, causing any entries to be discarded
whose timestamp is earlier than the newly given `timestamp`.
`timestamp` - An integer Unix timestamp or an ISO 8601 formatted string.
"""
def trimat(connection, key, timestamp) when is_integer(timestamp) do
connection |> Jylis.query(["TLOG", "TRIMAT", key, timestamp])
end
def trimat(connection, key, timestamp) when is_binary(timestamp) do
{:ok, date_time, _} = timestamp |> DateTime.from_iso8601
timestamp = date_time |> DateTime.to_unix
trimat(connection, key, timestamp)
end
@doc """
Raise the cutoff timestamp of the log to retain at least `count` entries, by
setting the cutoff timestamp to the timestamp of the entry at index `count - 1`
in the log. Any entries with an earlier timestamp than the entry at that index
will be discarded. If `count` is zero, this is the same as calling `clr/2`.
"""
def trim(connection, key, count) do
connection |> Jylis.query(["TLOG", "TRIM", key, count])
end
@doc """
Raise the cutoff timestamp to be the timestamp of the latest entry plus
one, such that all local entries in the log will be discarded due to having
timestamps earlier than the cutoff timestamp.
"""
def clr(connection, key) do
connection |> Jylis.query(["TLOG", "CLR", key])
end
end
|
lib/data_types/tlog.ex
| 0.913879
| 0.713332
|
tlog.ex
|
starcoder
|
defmodule TextDelta.Difference do
@moduledoc """
Document diffing.
Given valid document states A and B, generate a delta that when applied to A
will result in B.
"""
alias TextDelta.{Operation, Attributes, ConfigurableString}
@typedoc """
Reason for an error.
"""
@type error_reason :: :bad_document
@typedoc """
Result of getting a diff.
An ok/error tuple. Represents either a successful diffing in form of
`{:ok, delta}` or an error in form of `{:error, reason}`.
"""
@type result ::
{:ok, TextDelta.t()}
| {:error, error_reason}
@doc """
Calculates a difference between two documents in form of new delta.
## Examples
successful application:
iex> doc_a =
iex> TextDelta.new()
iex> |> TextDelta.insert("hello")
iex> doc_b =
iex> TextDelta.new()
iex> |> TextDelta.insert("goodbye")
iex> TextDelta.diff(doc_a, doc_b)
{:ok, %TextDelta{ops: [
%{insert: "g"},
%{delete: 4},
%{retain: 1},
%{insert: "odbye"}]}}
error handling:
iex> doc = TextDelta.retain(TextDelta.new(), 3)
iex> TextDelta.diff(doc, doc)
{:error, :bad_document}
"""
@spec diff(TextDelta.state(), TextDelta.state()) :: result
def diff(first, second) do
case valid_document?(first) and valid_document?(second) do
true ->
fst_ops = TextDelta.operations(first)
snd_ops = TextDelta.operations(second)
result =
fst_ops
|> string_from_ops()
|> String.myers_difference(string_from_ops(snd_ops))
|> mdiff_to_delta(fst_ops, snd_ops, TextDelta.new())
|> TextDelta.trim()
{:ok, result}
false ->
{:error, :bad_document}
end
end
@doc """
Calculates a difference between two documents in form of new delta.
Equivalent to `&TextDelta.Difference.diff/2`, but instead of returning
ok/error tuples raises a `RuntimeError`.
"""
@spec diff!(TextDelta.state(), TextDelta.state()) :: TextDelta.t() | no_return
def diff!(first, second) do
case diff(first, second) do
{:ok, delta} ->
delta
{:error, reason} ->
raise "Can not diff documents: #{Atom.to_string(reason)}"
end
end
defp string_from_ops(ops) do
ops
|> Enum.map(&string_from_op/1)
|> Enum.join()
end
defp string_from_op(%{insert: str}) when is_bitstring(str), do: str
defp string_from_op(%{insert: _}), do: List.to_string([0])
defp mdiff_to_delta([], _, _, delta), do: delta
defp mdiff_to_delta([{_, ""} | rest], fst, snd, delta) do
mdiff_to_delta(rest, fst, snd, delta)
end
defp mdiff_to_delta([{type, str} | rest], fst, snd, delta) do
str_len = ConfigurableString.length(str)
case type do
:ins ->
{op, new_snd} = next_op_no_longer_than(snd, str_len)
op_len = Operation.length(op)
{_, substr} = ConfigurableString.split_at(str, op_len)
new_delta = TextDelta.append(delta, op)
mdiff_to_delta([{:ins, substr} | rest], fst, new_snd, new_delta)
:del ->
{op, new_fst} = next_op_no_longer_than(fst, str_len)
op_len = Operation.length(op)
{_, substr} = ConfigurableString.split_at(str, op_len)
new_delta = TextDelta.append(delta, Operation.delete(op_len))
mdiff_to_delta([{:del, substr} | rest], new_fst, snd, new_delta)
:eq ->
{{op1, new_fst}, {op2, new_snd}} =
next_op_no_longer_than(fst, snd, str_len)
op_len = Operation.length(op1)
{_, substr} = ConfigurableString.split_at(str, op_len)
if op1.insert == op2.insert do
attrs =
op1
|> Map.get(:attributes, %{})
|> Attributes.diff(Map.get(op2, :attributes, %{}))
new_delta = TextDelta.retain(delta, op_len, attrs)
mdiff_to_delta([{:eq, substr} | rest], new_fst, new_snd, new_delta)
else
new_delta =
delta
|> TextDelta.append(op2)
|> TextDelta.append(Operation.delete(op_len))
mdiff_to_delta([{:eq, substr} | rest], new_fst, new_snd, new_delta)
end
end
end
defp next_op_no_longer_than([op | rest], max_len) do
op_len = Operation.length(op)
if op_len <= max_len do
{op, rest}
else
{op1, op2} = Operation.slice(op, max_len)
{op1, [op2 | rest]}
end
end
defp next_op_no_longer_than([op1 | rest1], [op2 | rest2], max_len) do
len = Enum.min([Operation.length(op1), Operation.length(op2), max_len])
{next_op_no_longer_than([op1 | rest1], len),
next_op_no_longer_than([op2 | rest2], len)}
end
defp valid_document?(document) do
TextDelta.length(document) == TextDelta.length(document, [:insert])
end
end
|
lib/text_delta/difference.ex
| 0.844489
| 0.639989
|
difference.ex
|
starcoder
|
defmodule Money.Subscription do
@moduledoc """
Provides functions to create, upgrade and downgrade subscriptions
from one plan to another.
Since moving from one plan to another may require
prorating the payment stream at the point of transition,
this module is introduced to provide a single point of
calculation of the proration in order to give clear focus
to the issues of calculating the carry-over amount or
the carry-over period at the point of plan change.
### Defining a subscription
A subscription records this current state and history of
all plans assigned over time to a subscriber. The definition
is deliberately minimal to simplify integration into applications
that have a specific implementation of a subscription.
A new subscription is created with `Money.Subscription.new/3`
which has the following attributes:
* `plan` which defines the initial plan for the subscription.
This option is required.
* `effective_date` which determines the effective date of
the inital plan. This option is required.
* `options` which include `:created_at` and `:id` with which
a subscription may be annotated
### Changing a subscription plan
Changing a subscription plan requires the following
information be provided:
* A Subscription or the definition of the current plan
* The definition of the new plan
* The strategy for changing the plan which is either:
* to have the effective date of the new plan be after
the current interval of the current plan
* To change the plan immediately in which case there will
be a credit on the current plan which needs to be applied
to the new plan.
See `Money.Subscription.change_plan/3`
### When the new plan is effective at the end of the current billing period
The first strategy simply finishes the current billing period before
the new plan is introduced and therefore no proration is required.
This is the default strategy.
### When the new plan is effective immediately
If the new plan is to be effective immediately then any credit
balance remaining on the old plan needs to be applied to the
new plan. There are two options of applying the credit:
1. Reduce the billing amount of the first period of the new plan
be the amount of the credit left on the old plan. This means
that the billing amount for the first period of the new plan
will be different (less) than the billing amount for subsequent
periods on the new plan.
2. Extend the first period of the new plan by the interval amount
that can be funded by the credit amount left on the old plan. In
the situation where the credit amount does not fully fund an integral
interval the additional interval can be truncated or rounded up to the next
integral period.
### Plan definition
This module, and `Money` in general, does not provide a full
billing or subscription solution - its focus is to support a reliable
means of calcuating the accounting outcome of a plan change only.
Therefore the plan definition required by `Money.Subscription` can be
any `Map.t` that includes the following fields:
* `interval` which defines the time interval for a plan. The value
can be one of `day`, `week`, `month` or `year`.
* `interval_count` which defines the number of `interval`s for the
current plan interval. This must be a positive integer.
* `price` which is a `Money.t` representing the price of the plan
to be paid each interval count.
### Billing in advance
This module calculates all subscription changes on the basis
that billing is done in advance. This primarily affects the
calculation of plan credit when a plan changes. The assumption
is that the period from the start of the current interval to
the point of change has been consumed and therefore the credit
is based upon that period of the plan that has not yet been
consumed.
If the calculation was based upon "payment in arrears" then
the credit would actually be a debit since the part of the
current period consumed has not yet been paid.
"""
alias Money.Subscription
alias Money.Subscription.{Change, Plan}
@typedoc "An id that uniquely identifies a subscription"
@type id :: term()
@typedoc "A Money.Subscription type"
@type t :: %__MODULE__{id: id(), plans: list({Change.t(), Plan.t()}), created_at: DateTime.t()}
@doc """
A `struct` defining a subscription
* `:id` any term that uniquely identifies this subscription
* `:plans` is a list of `{change, plan}` tuples that record the history
of plans assigned to this subscription
* `:created_at` records the `DateTime.t` when the subscription was created
"""
defstruct id: nil,
plans: [],
created_at: nil
@doc """
Creates a new subscription.
## Arguments
* `plan` is any `Money.Subscription.Plan.t` the defines the initial plan
* `effective_date` is a `Date.t` that represents the effective
date of the initial plan. This defines the start of the first interval
* `options` is a keyword list of options
## Options
* `:id` is any term that an application can use to uniquely identify
this subscription. It is not used in any function in this module.
* `:created_at` is a `DateTime.t` that records the timestamp when
the subscription was created. The default is `DateTime.utc_now/0`
## Returns
* `{:ok, Money.Subscription.t}` or
* `{:error, {exception, message}}`
"""
# @doc since: "2.3.0"
@spec new(plan :: Plan.t(), effective_date :: Date.t(), Keyword.t()) ::
{:ok, Subscription.t()} | {:error, {module(), String.t()}}
def new(plan, effective_date, options \\ [])
def new(
%{price: _price, interval: _interval} = plan,
%{year: _year, month: _month, day: _day, calendar: _calendar} = effective_date,
options
) do
options =
default_subscription_options()
|> Keyword.merge(options)
next_interval_starts = next_interval_starts(plan, effective_date, options)
first_billing_amount = plan.price
changes = %Change{
first_interval_starts: effective_date,
next_interval_starts: next_interval_starts,
first_billing_amount: first_billing_amount,
credit_amount_applied: Money.zero(first_billing_amount),
credit_amount: Money.zero(first_billing_amount),
carry_forward: Money.zero(first_billing_amount)
}
subscription =
struct(__MODULE__, options)
|> Map.put(:plans, [{changes, plan}])
{:ok, subscription}
end
def new(%{price: _price, interval: _interval}, effective_date, _options) do
{:error, {Subscription.DateError, "The effective date #{inspect(effective_date)} is invalid."}}
end
def new(plan, %{year: _, month: _, day: _, calendar: _}, _options) do
{:error, {Subscription.PlanError, "The plan #{inspect(plan)} is invalid."}}
end
@doc """
Creates a new subscription or raises an exception.
## Arguments
* `plan` is any `Money.Subscription.Plan.t` the defines the initial plan
* `effective_date` is a `Date.t` that represents the effective
date of the initial plan. This defines the start of the first interval
* `:options` is a keyword list of options
## Options
* `:id` is any term that an application can use to uniquely identify
this subscription. It is not used in any function in this module.
* `:created_at` is a `DateTime.t` that records the timestamp when
the subscription was created. The default is `DateTime.utc_now/0`
## Returns
* A `Money.Subscription.t` or
* raises an exception
"""
@spec new!(plan :: Plan.t(), effective_date :: Date.t(), Keyword.t()) ::
Subscription.t() | no_return()
def new!(plan, effective_date, options \\ []) do
case new(plan, effective_date, options) do
{:ok, subscription} -> subscription
{:error, {exception, message}} -> raise exception, message
end
end
defp default_subscription_options do
[
created_at: DateTime.utc_now()
]
end
@doc """
Retrieve the plan that is currently in affect.
The plan in affect is not necessarily the first
plan in the list. We may have upgraded plans to
be in affect at some later time.
## Arguments
* `subscription` is a `Money.Subscription.t` or any
map that provides the field `:plans`
## Returns
* The `Money.Subscription.Plan.t` that is the plan currently in affect or
`nil`
"""
# @doc since: "2.3.0"
@spec current_plan(Subscription.t() | map, Keyword.t()) :: Plan.t() | nil
def current_plan(subscription, options \\ [])
def current_plan(%{plans: []}, _options) do
nil
end
def current_plan(%{plans: [h | t]}, options) do
if current_plan?(h, options) do
h
else
current_plan(%{plans: t}, options)
end
end
# Because we walk the list from most recent to oldest, the first
# plan that has a start date less than or equal to the current
# date is the one we want
@spec current_plan?({Change.t(), Plan.t()}, Keyword.t) :: boolean
defp current_plan?({%Change{first_interval_starts: start_date}, _}, options) do
today = Keyword.get(options, :today, Date.utc_today())
Date.compare(start_date, today) in [:lt, :eq]
end
@doc """
Returns a `boolean` indicating if there is a pending plan.
A pending plan is one where the subscription has changed
plans but the plan is not yet in effect. There can only
be one pending plan.
## Arguments
* `:subscription` is any `Money.Subscription.t`
* `:options` is a keyword list of options
## Options
* `:today` is a `Date.t` that represents the effective
date used to determine is there is a pending plan.
The default is `Date.utc_today/1`.
## Returns
* Either `true` or `false`
"""
# @doc since: "2.3.0"
@spec plan_pending?(Subscription.t(), Keyword.t()) :: boolean()
def plan_pending?(%{plans: [{changes, _plan} | _t]}, options \\ []) do
today = options[:today] || Date.utc_today()
Date.compare(changes.first_interval_starts, today) == :gt
end
@doc """
Cancel a subscription's pending plan.
A pending plan arise when a a `Subscription.change_plan/3` has
been executed but the effective date is in the future. Only
one plan may be pending at any one time so that if
`Subscription.change_plan/3` is attemtped a second time an
error tuple will be returned.
`Subscription.cancel_pending_plan/2`
can be used to roll back the pending plan change.
## Arguments
* `:subscription` is any `Money.Subscription.t`
* `:options` is a `Keyword.t`
## Options
* `:today` is a `Date.t` that represents today.
The default is `Date.utc_today`
## Returns
* An updated `Money.Subscription.t` which may or may not
have had a pending plan. If it did have a pending plan
that plan is deleted. If there was no pending plan then
the subscription is returned unchanged.
"""
# @doc since: "2.3.0"
@spec cancel_pending_plan(Subscription.t(), Keyword.t()) :: Subscription.t()
def cancel_pending_plan(%{plans: [_plan | other_plans]} = subscription, options \\ []) do
if plan_pending?(subscription, options) do
%{subscription | plans: other_plans}
else
subscription
end
end
@doc """
Returns the start date of the current plan.
## Arguments
* `subscription` is a `Money.Subscription.t` or any
map that provides the field `:plans`
## Returns
* The start `Date.t` of the current plan
"""
# @doc since: "2.3.0"
@spec current_plan_start_date(Subscription.t()) :: Date.t() | nil
@dialyzer {:nowarn_function, current_plan_start_date: 1}
def current_plan_start_date(%{plans: _plans} = subscription) do
case current_plan(subscription) do
{changes, _plan} -> changes.first_interval_starts
nil -> nil
end
end
@doc """
Returns the first date of the current interval of a plan.
## Arguments
* `:subscription_or_changeset` is any`Money.Subscription.t` or
a `{Change.t, Plan.t}` tuple
* `:options` is a keyword list of options
## Options
* `:today` is a `Date.t` that represents today.
The default is `Date.utc_today`
## Returns
* The `Date.t` that is the first date of the current interval
"""
# @doc since: "2.3.0"
@spec current_interval_start_date(Subscription.t() | {Change.t(), Plan.t()} | map(), Keyword.t()) ::
Date.t()
@dialyzer {:nowarn_function, current_interval_start_date: 2}
def current_interval_start_date(subscription_or_changeset, options \\ [])
def current_interval_start_date(%{plans: _plans} = subscription, options) do
case current_plan(subscription, options) do
{changes, plan} ->
current_interval_start_date({changes, plan}, options)
_ ->
{:error,
{Money.Subscription.NoCurrentPlan, "There is no current plan for the subscription"}}
end
end
def current_interval_start_date({%Change{first_interval_starts: start_date}, plan}, options) do
next_interval_starts = next_interval_starts(plan, start_date)
options = Keyword.put_new(options, :today, Date.utc_today())
case compare_range(options[:today], start_date, next_interval_starts) do
:between ->
start_date
:less ->
current_interval_start_date(
{%Change{first_interval_starts: next_interval_starts}, plan},
options
)
:greater ->
{:error,
{Money.Subscription.NoCurrentPlan, "The plan is not current for #{inspect(start_date)}"}}
end
end
defp compare_range(date, current, next) do
cond do
Date.compare(date, current) in [:gt, :eq] and Date.compare(date, next) == :lt ->
:between
Date.compare(current, date) == :lt ->
:less
Date.compare(next, date) == :gt ->
:greater
end
end
@doc """
Returns the latest plan for a subscription.
The latest plan may not be in affect since
its start date may be in the future.
## Arguments
* `subscription` is a `Money.Subscription.t` or any
map that provides the field `:plans`
## Returns
* The `Money.Subscription.Plan.t` that is the most recent
plan - whether or not it is the currently active plan.
"""
# @doc since: "2.3.0"
@spec latest_plan(Subscription.t() | map()) :: {Change.t(), Plan.t()}
def latest_plan(%{plans: [h | _t]}) do
h
end
@doc """
Change plan from the current plan to a new plan.
## Arguments
* `subscription_or_plan` is either a `Money.Subscription.t` or `Money.Subscription.Plan.t`
or a map with the same fields
* `new_plan` is a `Money.Subscription.Plan.t` or a map with at least the fields
`interval`, `interval_count` and `price`
* `current_interval_started` is a `Date.t` or other map with the fields `year`, `month`,
`day` and `calendar`
* `options` is a keyword list of options the define how the change is to be made
## Options
* `:effective` defines when the new plan comes into effect. The values are `:immediately`,
a `Date.t` or `:next_period`. The default is `:next_period`. Note that the date
applied in the case of `:immediately` is the date returned by `Date.utc_today`.
* `:prorate` which determines how to prorate the current plan into the new plan. The
options are `:price` which will reduce the price of the first period of the new plan
by the credit amount left on the old plan (this is the default). Or `:period` in which
case the first period of the new plan is extended by the `interval` amount of the new
plan that the credit on the old plan will fund.
* `:round` determines whether when prorating the `:period` it is truncated or rounded up
to the next nearest full `interval_count`. Valid values are `:down`, `:half_up`,
`:half_even`, `:ceiling`, `:floor`, `:half_down`, `:up`. The default is `:up`.
* `:first_interval_started` determines the anchor day for monthly billing. For
example if a monthly plan starts on January 31st then the next period will start
on February 28th (or 29th). The period following that should, however, be March 31st.
If `subscription_or_plan` is a `Money.Subscription.t` then the `:first_interval_started`
is automatically populated from the subscription. If `:first_interval_started` is
`nil` then the date defined by `:effective` is used.
## Returns
A `Money.Subscription.Change.t` with the following elements:
* `:first_interval_starts` which is the start date of the first interval for the new
plan
* `:first_billing_amount` is the amount to be billed, net of any credit, at
the `:first_interval_starts`
* `:next_interval_starts` is the start date of the next interval after the `
first interval `including any `credit_days_applied`
* `:credit_amount` is the amount of unconsumed credit of the current plan
* `:credit_amount_applied` is the amount of credit applied to the new plan. If
the `:prorate` option is `:price` (the default) then `:first_billing_amount`
is the plan `:price` reduced by the `:credit_amount_applied`. If the `:prorate`
option is `:period` then the `:first_billing_amount` is the plan `price` and
the `:next_interval_date` is extended by the `:credit_days_applied`
instead.
* `:credit_days_applied` is the number of days credit applied to the first
interval by adding days to the `:first_interval_starts` date.
* `:credit_period_ends` is the date on which any applied credit is consumed or `nil`
* `:carry_forward` is any amount of credit carried forward to a subsequent period.
If non-zero, this amount is a negative `Money.t`. It is non-zero when the credit
amount for the current plan is greater than the `:price` of the new plan. In
this case the `:first_billing_amount` is zero.
## Returns
* `{:ok, updated_subscription}` or
* `{:error, {exception, message}}`
## Examples
# Change at end of the current period so no proration
iex> current = Money.Subscription.Plan.new!(Money.new(:USD, 10), :month, 1)
iex> new = Money.Subscription.Plan.new!(Money.new(:USD, 10), :month, 3)
iex> Money.Subscription.change_plan current, new, current_interval_started: ~D[2018-01-01]
{:ok, %Money.Subscription.Change{
carry_forward: Money.zero(:USD),
credit_amount: Money.zero(:USD),
credit_amount_applied: Money.zero(:USD),
credit_days_applied: 0,
credit_period_ends: nil,
next_interval_starts: ~D[2018-05-01],
first_billing_amount: Money.new(:USD, 10),
first_interval_starts: ~D[2018-02-01]
}}
# Change during the current plan generates a credit amount
iex> current = Money.Subscription.Plan.new!(Money.new(:USD, 10), :month, 1)
iex> new = Money.Subscription.Plan.new!(Money.new(:USD, 10), :month, 3)
iex> Money.Subscription.change_plan current, new, current_interval_started: ~D[2018-01-01], effective: ~D[2018-01-15]
{:ok, %Money.Subscription.Change{
carry_forward: Money.zero(:USD),
credit_amount: Money.new(:USD, "5.49"),
credit_amount_applied: Money.new(:USD, "5.49"),
credit_days_applied: 0,
credit_period_ends: nil,
next_interval_starts: ~D[2018-04-15],
first_billing_amount: Money.new(:USD, "4.51"),
first_interval_starts: ~D[2018-01-15]
}}
# Change during the current plan generates a credit period
iex> current = Money.Subscription.Plan.new!(Money.new(:USD, 10), :month, 1)
iex> new = Money.Subscription.Plan.new!(Money.new(:USD, 10), :month, 3)
iex> Money.Subscription.change_plan current, new, current_interval_started: ~D[2018-01-01], effective: ~D[2018-01-15], prorate: :period
{:ok, %Money.Subscription.Change{
carry_forward: Money.zero(:USD),
credit_amount: Money.new(:USD, "5.49"),
credit_amount_applied: Money.zero(:USD),
credit_days_applied: 50,
credit_period_ends: ~D[2018-03-05],
next_interval_starts: ~D[2018-06-04],
first_billing_amount: Money.new(:USD, 10),
first_interval_starts: ~D[2018-01-15]
}}
"""
# @doc since: "2.3.0"
@spec change_plan(
subscription_or_plan :: Subscription.t() | Plan.t(),
new_plan :: Plan.t(),
options :: Keyword.t()
) :: {:ok, Change.t() | Subscription.t()} | {:error, {module(), String.t()}}
@dialyzer {:nowarn_function, change_plan: 3}
def change_plan(subscription_or_plan, new_plan, options \\ [])
def change_plan(
%{plans: [{changes, %{price: %Money{currency: currency}} = current_plan} | _] = plans} =
subscription,
%{price: %Money{currency: currency}} = new_plan,
options
) do
options =
options
|> Keyword.put(:first_interval_started, changes.first_interval_starts)
|> Keyword.put(:current_interval_started, current_interval_start_date(subscription, options))
|> change_plan_options_from(default_options())
|> Keyword.new()
if plan_pending?(subscription, options) do
{:error,
{Money.Subscription.PlanPending, "Can't change plan when a new plan is already pending"}}
else
{:ok, changes} = change_plan(current_plan, new_plan, options)
updated_subscription = %{subscription | plans: [{changes, new_plan} | plans]}
{:ok, updated_subscription}
end
end
def change_plan(
%{price: %Money{currency: currency}} = current_plan,
%{price: %Money{currency: currency}} = new_plan,
options
) do
options = change_plan_options_from(options, default_options())
change_plan(current_plan, new_plan, options[:effective], options)
end
@doc """
Change plan from the current plan to a new plan.
Retuns the plan or raises an exception on error.
See `Money.Subscription.change_plan/3` for the description
of arguments, options and return.
"""
# @doc since: "2.3.0"
@spec change_plan!(
subscription_or_plan :: t() | Plan.t(),
new_plan :: Plan.t(),
options :: Keyword.t()
) :: Change.t() | no_return()
def change_plan!(subscription_or_plan, new_plan, options \\ []) do
case change_plan(subscription_or_plan, new_plan, options) do
{:ok, changeset} -> changeset
{:error, {exception, message}} -> raise exception, message
end
end
# Change the plan at the end of the current plan interval. This requires
# no proration and is therefore the easiest to calculate.
defp change_plan(current_plan, new_plan, :next_period, options) do
price = Map.get(new_plan, :price)
first_interval_starts =
next_interval_starts(current_plan, options[:current_interval_started], options)
zero = Money.zero(price.currency)
{:ok,
%Change{
first_billing_amount: price,
first_interval_starts: first_interval_starts,
next_interval_starts: next_interval_starts(new_plan, first_interval_starts, options),
credit_amount_applied: zero,
credit_amount: zero,
credit_days_applied: 0,
credit_period_ends: nil,
carry_forward: zero
}}
end
defp change_plan(current_plan, new_plan, :immediately, options) do
change_plan(current_plan, new_plan, options[:today], options)
end
defp change_plan(current_plan, new_plan, effective_date, options) do
credit = plan_credit(current_plan, effective_date, options)
{:ok, prorate(new_plan, credit, effective_date, options[:prorate], options)}
end
# Reduce the price of the first interval of the new plan by the
# credit amount on the current plan
defp prorate(plan, credit_amount, effective_date, :price, options) do
prorate_price =
Map.get(plan, :price)
|> Money.sub!(credit_amount)
|> Money.round(rounding_mode: options[:round])
zero = zero(plan)
{first_billing_amount, carry_forward} =
if Money.cmp(prorate_price, zero) == :lt do
{zero, prorate_price}
else
{prorate_price, zero}
end
%Change{
first_interval_starts: effective_date,
first_billing_amount: first_billing_amount,
next_interval_starts: next_interval_starts(plan, effective_date, options),
credit_amount: credit_amount,
credit_amount_applied: Money.add!(credit_amount, carry_forward),
credit_days_applied: 0,
credit_period_ends: nil,
carry_forward: carry_forward
}
end
# Extend the first interval of the new plan by the amount of credit
# on the current plan
defp prorate(plan, credit_amount, effective_date, :period, options) do
{next_interval_starts, days_credit} =
extend_period(plan, credit_amount, effective_date, options)
first_billing_amount = Map.get(plan, :price)
credit_period_ends = Date.add(effective_date, days_credit - 1)
%Change{
first_interval_starts: effective_date,
first_billing_amount: first_billing_amount,
next_interval_starts: next_interval_starts,
credit_amount: credit_amount,
credit_amount_applied: zero(plan),
credit_days_applied: days_credit,
credit_period_ends: credit_period_ends,
carry_forward: zero(plan)
}
end
defp plan_credit(%{price: price} = plan, effective_date, options) do
plan_days = plan_days(plan, effective_date, options)
price_per_day = Decimal.div(price.amount, Decimal.new(plan_days))
days_remaining =
days_remaining(plan, options[:current_interval_started], effective_date, options)
price_per_day
|> Decimal.mult(Decimal.new(days_remaining))
|> Money.new(price.currency)
|> Money.round(rounding_mode: options[:round])
end
# Extend the interval by the amount that
# credit will fund on the new plan in days.
defp extend_period(plan, credit, effective_date, options) do
price = Map.get(plan, :price)
plan_days = plan_days(plan, effective_date, options)
price_per_day = Decimal.div(price.amount, Decimal.new(plan_days))
credit_days_applied =
credit.amount
|> Decimal.div(price_per_day)
|> Decimal.round(0, options[:round])
|> Decimal.to_integer()
next_interval_starts =
next_interval_starts(plan, effective_date, options)
|> Date.add(credit_days_applied)
{next_interval_starts, credit_days_applied}
end
@doc """
Returns number of days in a plan interval.
## Arguments
* `plan` is any `Money.Subscription.Plan.t`
* `current_interval_started` is any `Date.t`
## Returns
The number of days in a plan interval.
## Examples
iex> plan = Money.Subscription.Plan.new! Money.new!(:USD, 100), :month, 1
iex> Money.Subscription.plan_days plan, ~D[2018-01-01]
31
iex> Money.Subscription.plan_days plan, ~D[2018-02-01]
28
iex> Money.Subscription.plan_days plan, ~D[2018-04-01]
30
"""
# @doc since: "2.3.0"
@spec plan_days(Plan.t(), Date.t(), Keyword.t()) :: integer()
def plan_days(plan, current_interval_started, options \\ []) do
plan
|> next_interval_starts(current_interval_started, options)
|> Date.diff(current_interval_started)
end
@doc """
Returns number of days remaining in a plan interval.
## Arguments
* `plan` is any `Money.Subscription.Plan.t`
* `current_interval_started` is a `Date.t`
* `effective_date` is a `Date.t` after the
`current_interval_started` and before the end of
the `plan_days`
## Returns
The number of days remaining in a plan interval
## Examples
iex> plan = Money.Subscription.Plan.new! Money.new!(:USD, 100), :month, 1
iex> Money.Subscription.days_remaining plan, ~D[2018-01-01], ~D[2018-01-02]
30
iex> Money.Subscription.days_remaining plan, ~D[2018-02-01], ~D[2018-02-02]
27
"""
# @doc since: "2.3.0"
@spec days_remaining(Plan.t(), Date.t(), Date.t(), Keyword.t()) :: integer
def days_remaining(plan, current_interval_started, effective_date, options \\ []) do
plan
|> next_interval_starts(current_interval_started, options)
|> Date.diff(effective_date)
end
@doc """
Returns the next interval start date for a plan.
## Arguments
* `plan` is any `Money.Subscription.Plan.t`
* `:current_interval_started` is the `Date.t` that
represents the start of the current interval
## Returns
The next interval start date as a `Date.t`.
## Example
iex> plan = Money.Subscription.Plan.new!(Money.new!(:USD, 100), :month)
iex> Money.Subscription.next_interval_starts(plan, ~D[2018-03-01])
~D[2018-04-01]
iex> plan = Money.Subscription.Plan.new!(Money.new!(:USD, 100), :day, 30)
iex> Money.Subscription.next_interval_starts(plan, ~D[2018-02-01])
~D[2018-03-03]
"""
# @doc since: "2.3.0"
@spec next_interval_starts(Plan.t(), Date.t(), Keyword.t()) :: Date.t()
def next_interval_starts(plan, current_interval_started, options \\ [])
def next_interval_starts(
%{interval: :day, interval_count: count},
%{
year: _year,
month: _month,
day: _day,
calendar: _calendar
} = current_interval_started,
_options
) do
Date.add(current_interval_started, count)
end
def next_interval_starts(
%{interval: :week, interval_count: count},
current_interval_started,
options
) do
next_interval_starts(
%Plan{interval: :day, interval_count: count * 7},
current_interval_started,
options
)
end
def next_interval_starts(
%{interval: :month, interval_count: count} = plan,
%{year: year, month: month, day: day, calendar: calendar} = current_interval_started,
options
) do
# options = if is_list(options), do: options, else: Enum.into(options, %{})
months_in_this_year = months_in_year(current_interval_started)
{year, month} =
if count + month <= months_in_this_year do
{year, month + count}
else
months_left_this_year = months_in_this_year - month
plan = %{plan | interval_count: count - months_left_this_year - 1}
current_interval_started = %{current_interval_started | year: year + 1, month: 1, day: day}
date = next_interval_starts(plan, current_interval_started, options)
{Map.get(date, :year), Map.get(date, :month)}
end
day =
year
|> calendar.days_in_month(month)
|> min(max(day, preferred_day(options)))
{:ok, next_interval_starts} = Date.new(year, month, day, calendar)
next_interval_starts
end
def next_interval_starts(
%{interval: :year, interval_count: count},
%{year: year} = current_interval_started,
_options
) do
%{current_interval_started | year: year + count}
end
## Helpers
@default_months_in_year 12
defp months_in_year(%{year: year, calendar: calendar}) do
if function_exported?(calendar, :months_in_year, 1) do
calendar.months_in_year(year)
else
@default_months_in_year
end
end
defp change_plan_options_from(options, default_options) do
options =
default_options
|> Keyword.merge(options)
require_options!(options, [:effective, :current_interval_started])
Keyword.put_new(options, :first_interval_started, options[:current_interval_started])
end
defp default_options do
[effective: :next_period, prorate: :price, round: :up, today: Date.utc_today()]
end
defp zero(plan) do
plan
|> Map.get(:price)
|> Map.get(:currency)
|> Money.zero()
end
defp require_options!(options, [h | []]) do
unless options[h] do
raise_change_plan_options_error(h)
end
end
defp require_options!(options, [h | t]) do
if options[h] do
require_options!(options, t)
else
raise_change_plan_options_error(h)
end
end
defp raise_change_plan_options_error(opt) do
raise ArgumentError, "change_plan requires the the option #{inspect(opt)}"
end
defp preferred_day(options) do
case Keyword.get(options, :first_interval_started) do
%{day: day} -> day
_ -> -1
end
end
end
|
lib/money/subscription.ex
| 0.886119
| 0.860955
|
subscription.ex
|
starcoder
|
defmodule Patch.Mock.Code do
@moduledoc """
Patch mocks out modules by generating mock modules and recompiling them for a `target` module.
Patch's approach to mocking a module provides some powerful affordances.
- Private functions can be mocked.
- Internal function calls are effected by mocks regardless of the function's visibility without
having to change the way code is written.
- Private functions can be optionally exposed in the facade to make it possible to test a
private function directly without changing its visibility in code.
# Mocking Strategy
There are 4 logical modules and 1 GenServer that are involved when mocking a module.
The 4 logical modules:
- `target` - The module to be mocked.
- `facade` - The `target` module is replaced by a `facade` module that intercepts all external
calls and redirects them to the `delegate` module.
- `original` - The `target` module is preserved as the `original` module, with the important
transformation that all local calls are redirected to the `delegate` module.
- `delegate` - This module is responsible for checking with the `server` to see if a call is
mocked and should be intercepted. If so, the mock value is returned, otherwise the `original`
function is called.
Each `target` module has an associated GenServer, a `Patch.Mock.Server` that has keeps state
about the history of function calls and holds the mock data to be returned on interception. See
`Patch.Mock.Server` for more information.
## Example Target Module
To better understand how Patch works, consider the following example module.
```elixir
defmodule Example do
def public_function(argument_1, argument_2) do
{:public, private_function(argument_1, argument_2)}
end
defp private_function(argument_1, argument_2) do
{:private, argument_1, argument_2}
end
end
```
### `facade` module
The `facade` module is automatically generated based off the exports of the `target` module.
It takes on the name of the `provided` module.
For each exported function, a function is generated in the `facade` module that calls the
`delegate` module.
```elixir
defmodule Example do
def public_function(argument_1, argument_2) do
Patch.Mock.Delegate.For.Example.public_function(argument_1, argument_2)
end
end
```
### `delegate` module
The `delegate` module is automatically generated based off all the functions of the `target`
module. It takes on a name based off the `target` module, see `Patch.Mock.Naming.delegate/1`.
For each function, a function is generated in the `delegate` module that calls
`Patch.Mock.Server.delegate/3` delegating to the server named for the `target` module, see
`Patch.Mock.Naming.server/1`.
```elixir
defmodule Patch.Mock.Delegate.For.Example do
def public_function(argument_1, argument_2) do
Patch.Mock.Server.delegate(
Patch.Mock.Server.For.Example,
:public_function,
[argument_1, argument_2]
)
end
def private_function(argument_1, argument_2) do
Patch.Mock.Server.delegate(
Patch.Mock.Server.For.Example,
:private_function,
[argument_1, argument_2]
)
end
end
```
### `original` module
The `original` module takes on a name based off the `provided` module, see
`Patch.Mock.Naming.original/1`.
The code is transformed in the following ways.
- All local calls are converted into remote calls to the `delegate` module.
- All functions are exported
```elixir
defmodule Patch.Mock.Original.For.Example do
def public_function(argument_1, argument_2) do
{:public, Patch.Mock.Delegate.For.Example.private_function(argument_1, argument_2)}
end
def private_function(argument_1, argument_2) do
{:private, argument_1, argument_2}
end
end
```
## External Function Calls
First, let's examine how calls from outside the module are treated.
### Public Function Calls
Code calling `Example.public_function/2` has the following call flow.
```text
[Caller] -> facade -> delegate -> server -> mocked? -> yes (Intercepted)
[Mock Value] <----------------------------|----'
-> no -> original (Run Original Code)
[Original Value] <--------------------------------------'
```
Calling a public funtion will either return the mocked value if it exists, or fall back to
calling the original function.
### Private Function Calls
Code calling `Example.private_function/2` has the following call flow.
```text
[Caller] --------------------------> facade
[UndefinedFunctionError] <-----'
```
Calling a private function continues to be an error from the external caller's point of view.
The `expose` option does allow the facade to expose private functions, in those cases the call
flow just follows the public call flow.
## Internal Calls
Next, let's examine how calls from outside the module are treated.
### Public Function Calls
Code in the `original` module calling public functions have their code transformed to call the
`delegate` module.
```text
original -> delegate -> server -> mocked? -> yes (Intercepted)
[Mock Value] <------------------|----'
-> no -> original (Run Original Code)
[Original Value] <----------------------------'
```
Since the call is redirected to the `delegate`, calling a public funtion will either return the
mocked value if it exists, or fall back to calling the original function.
### Private Function Call Flow
Code in the `original` module calling private functions have their code transformed to call the
`delegate` module
```text
original -> delegate -> server -> mocked? -> yes (Intercepted)
[Mock Value] <------------------|----'
-> no -> original (Run Original Code)
[Original Value] <----------------------------'
```
Since the call is redirected to the `delegate`, calling a private funtion will either return the
mocked value if it exists, or fall back to calling the original function.
## Code Generation
For additional details on how Code Generation works, see the `Patch.Mock.Code.Generate` module.
"""
alias Patch.Mock
alias Patch.Mock.Code.Generate
alias Patch.Mock.Code.Query
alias Patch.Mock.Code.Unit
@type binary_error ::
:badfile
| :nofile
| :not_purged
| :on_load_failure
| :sticky_directory
@type chunk_error ::
:chunk_too_big
| :file_error
| :invalid_beam_file
| :key_missing_or_invalid
| :missing_backend
| :missing_chunk
| :not_a_beam_file
| :unknown_chunk
@type load_error ::
:embedded
| :badfile
| :nofile
| :on_load_failure
@type compiler_option :: term()
@type form :: term()
@type export_classification :: :builtin | :generated | :normal
@type exports :: Keyword.t(arity())
@typedoc """
Sum-type of all valid options
"""
@type option :: Mock.exposes_option()
@doc """
Extracts the abstract_forms from a module
"""
@spec abstract_forms(module :: module) ::
{:ok, [form()]}
| {:error, :abstract_forms_unavailable}
| {:error, chunk_error()}
| {:error, load_error()}
def abstract_forms(module) do
with :ok <- ensure_loaded(module),
{:ok, binary} <- binary(module) do
case :beam_lib.chunks(binary, [:abstract_code]) do
{:ok, {_, [abstract_code: {:raw_abstract_v1, abstract_forms}]}} ->
{:ok, abstract_forms}
{:error, :beam_lib, details} ->
reason = elem(details, 0)
{:error, reason}
_ ->
{:error, :abstract_forms_unavailable}
end
end
end
@doc """
Extracts the attribtues from a module
"""
@spec attributes(module :: module()) ::
{:ok, Keyword.t()}
| {:error, :attributes_unavailable}
def attributes(module) do
with :ok <- ensure_loaded(module) do
try do
Keyword.get(module.module_info(), :attributes, [])
catch
_, _ ->
{:error, :attributes_unavailable}
end
end
end
@doc """
Classifies an exported mfa into one of the following classifications
- :builtin - Export is a BIF.
- :generated - Export is a generated function.
- :normal - Export is a user defined function.
"""
@spec classify_export(module :: module(), function :: atom(), arity :: arity()) :: export_classification()
def classify_export(_, :module_info, 0), do: :generated
def classify_export(_, :module_info, 1), do: :generated
def classify_export(module, function, arity) do
if :erlang.is_builtin(module, function, arity) do
:builtin
else
:normal
end
end
@doc """
Compiles the provided abstract_form with the given compiler_options
In addition to compiling, the module will be loaded.
"""
@spec compile(abstract_forms :: [form()], compiler_options :: [compiler_option()]) ::
:ok
| {:error, binary_error()}
| {:error, {:abstract_forms_invalid, [form()], term()}}
def compile(abstract_forms, compiler_options \\ []) do
case :compile.forms(abstract_forms, [:return_errors | compiler_options]) do
{:ok, module, binary} ->
load_binary(module, binary)
{:ok, module, binary, _} ->
load_binary(module, binary)
errors ->
{:error, {:abstract_forms_invalid, abstract_forms, errors}}
end
end
@doc """
Extracts the compiler options from a module.
"""
@spec compiler_options(module :: module()) ::
{:ok, [compiler_option()]}
| {:error, :compiler_options_unavailable}
| {:error, chunk_error()}
| {:error, load_error()}
def compiler_options(module) do
with :ok <- ensure_loaded(module),
{:ok, binary} <- binary(module) do
case :beam_lib.chunks(binary, [:compile_info]) do
{:ok, {_, [compile_info: info]}} ->
filtered_options =
case Keyword.fetch(info, :options) do
{:ok, options} ->
filter_compiler_options(options)
:error ->
[]
end
{:ok, filtered_options}
{:error, :beam_lib, details} ->
reason = elem(details, 0)
{:error, reason}
_ ->
{:error, :compiler_options_unavailable}
end
end
end
@doc """
Extracts the exports from the provided abstract_forms for the module.
The exports returned can be controlled by the exposes argument.
"""
@spec exports(abstract_forms :: [form()], module :: module(), exposes :: Mock.exposes()) :: exports()
def exports(abstract_forms, module, :public) do
exports = Query.exports(abstract_forms)
filter_exports(module, exports, :normal)
end
def exports(abstract_forms, module, :all) do
exports = Query.functions(abstract_forms)
filter_exports(module, exports, :normal)
end
def exports(abstract_forms, module, exposes) do
exports = exposes ++ Query.exports(abstract_forms)
filter_exports(module, exports, :normal)
end
@doc """
Given a module and a list of exports filters the list of exports to those that
have the given classification.
See `classify_export/3` for information about export classification
"""
@spec filter_exports(module :: module, exports :: exports(), classification :: export_classification()) :: exports()
def filter_exports(module, exports, classification) do
Enum.filter(exports, fn {name, arity} ->
classify_export(module, name, arity) == classification
end)
end
@doc """
Freezes a module by generating a copy of it under a frozen name with all remote calls to the
`target` module re-routed to the frozen module.
"""
@spec freeze(module :: module) :: :ok | {:error, term}
def freeze(module) do
with {:ok, compiler_options} <- compiler_options(module),
{:ok, _} <- unstick_module(module),
{:ok, abstract_forms} <- abstract_forms(module),
frozen_forms = Generate.frozen(abstract_forms, module),
:ok <- compile(frozen_forms, compiler_options) do
:ok
end
end
@doc """
Mocks a module by generating a set of modules based on the `target` module.
The `target` module's Unit is returned on success.
"""
@spec module(module :: module(), options :: [option()]) :: {:ok, Unit.t()} | {:error, term}
def module(module, options \\ []) do
exposes = options[:exposes] || :public
with {:ok, compiler_options} <- compiler_options(module),
{:ok, sticky?} <- unstick_module(module),
{:ok, abstract_forms} <- abstract_forms(module),
local_exports = exports(abstract_forms, module, :all),
remote_exports = exports(abstract_forms, module, exposes),
delegate_forms = Generate.delegate(abstract_forms, module, local_exports),
facade_forms = Generate.facade(abstract_forms, module, remote_exports),
original_forms = Generate.original(abstract_forms, module, local_exports),
:ok <- compile(delegate_forms),
:ok <- compile(original_forms, compiler_options),
:ok <- compile(facade_forms) do
unit = %Unit{
abstract_forms: abstract_forms,
compiler_options: compiler_options,
module: module,
sticky?: sticky?
}
{:ok, unit}
end
end
@doc """
Purges a module from the code server
"""
@spec purge(module :: module()) :: boolean()
def purge(module) do
:code.purge(module)
:code.delete(module)
end
@doc """
Marks a module a sticky
"""
@spec stick_module(module :: module()) :: :ok | {:error, load_error()}
def stick_module(module) do
:code.stick_mod(module)
ensure_loaded(module)
end
@doc """
Unsticks a module
Returns `{:ok, was_sticky?}` on success, `{:error, reason}` otherwise
"""
@spec unstick_module(module :: module()) ::
{:ok, boolean()}
| {:error, load_error()}
def unstick_module(module) do
with :ok <- ensure_loaded(module) do
if :code.is_sticky(module) do
{:ok, :code.unstick_mod(module)}
else
{:ok, false}
end
end
end
## Private
@spec binary(module :: module()) :: {:ok, binary()} | {:error, :binary_unavailable}
defp binary(module) do
case :code.get_object_code(module) do
{^module, binary, _} ->
{:ok, binary}
:error ->
{:error, :binary_unavailable}
end
end
@spec ensure_loaded(module :: module()) :: :ok | {:error, load_error()}
defp ensure_loaded(module) do
with {:module, ^module} <- Code.ensure_loaded(module) do
:ok
end
end
@spec filter_compiler_options(options :: [term()]) :: [term()]
defp filter_compiler_options(options) do
Enum.filter(options, fn
{:parse_transform, _} ->
false
:makedeps_side_effects ->
false
:from_core ->
false
_ ->
true
end)
end
@spec load_binary(module :: module(), binary :: binary()) ::
:ok
| {:error, binary_error()}
defp load_binary(module, binary) do
with {:module, ^module} <- :code.load_binary(module, '', binary) do
:ok
end
end
end
|
lib/patch/mock/code.ex
| 0.942507
| 0.923143
|
code.ex
|
starcoder
|
defmodule Legion.Identity.Information.AddressBook do
@moduledoc """
Functions for using the address book.
## Shared options
Functions `create_address/5` and `update_address/5` are subject to options below.
- `:type`: The new type of the address.
- `:name`: The new name of the address.
- `:country_name`: The name of the country.
- `:description`: The description of the address entry.
- `:state`: The new name of the state.
- `:city`: The new name of the city.
- `:neighborhood`: The new name of the neighborhood.
- `:zip_code`: The new zip code of the location.
- `:location`: The new coordinates of the location.
"""
use Legion.Stereotype, :service
require Logger
alias Legion.Identity.Information.AddressBook.Address
alias Legion.Identity.Information.Registration, as: User
@env Application.get_env(:legion, Legion.Identity.Information.AddressBook)
@default_page_size Keyword.fetch!(@env, :listing_default_page_size)
@doc """
Adds an address entry to the user.
To get more information about the fields, see `Legion.Identity.Information.AddressBook.Address`.
"""
@spec create_address(
User.id(),
Address.address_type(),
String.t(),
String.t(),
Keyword.t()
) ::
{:ok, Address}
| {:error, Ecto.Changeset.t()}
def create_address(user_id, type, name, country_name, opts \\ [])
when is_integer(user_id) do
description = Keyword.get(opts, :description)
state = Keyword.get(opts, :state)
city = Keyword.get(opts, :city)
neighborhood = Keyword.get(opts, :neighborhood)
zip_code = Keyword.get(opts, :zip_code)
location = Keyword.get(opts, :location)
changeset =
Address.changeset(
%Address{},
%{
user_id: user_id,
type: type,
name: name,
description: description,
state: state,
city: city,
neighborhood: neighborhood,
zip_code: zip_code,
location: location,
country_name: country_name
}
)
Repo.insert(changeset)
end
@doc """
Updates address with given attributes.
## Options
See the "Shared options" section at the module documentation.
"""
@spec update_address(Address.id(), Address.address_type(), String.t(), String.t(), Keyword.t()) ::
{:ok, Address}
| {:error, Ecto.Changeset.t()}
| {:error, :not_found}
def update_address(address_id, type, name, country_name, opts \\ [])
when is_integer(address_id) do
if address = Repo.get_by(Address, id: address_id) do
description = Keyword.get(opts, :description)
state = Keyword.get(opts, :state)
city = Keyword.get(opts, :city)
neighborhood = Keyword.get(opts, :neighborhood)
zip_code = Keyword.get(opts, :zip_code)
location = Keyword.get(opts, :location)
changeset =
Address.changeset(
address,
%{
type: type,
name: name,
description: description,
state: state,
city: city,
neighborhood: neighborhood,
zip_code: zip_code,
location: location,
country_name: country_name
}
)
Repo.update(changeset)
else
{:error, :not_found}
end
end
@doc """
Lists addresses of the user.
## Options
- `:limit`: Limits the number of entities in result. Defaults to #{@default_page_size}.
- `:offset`: Skips given number of entities in result.
"""
@spec list_addresses_of_user(User.id(), Keyword.t()) ::
[Address]
def list_addresses_of_user(user_id, opts \\ [])
when is_integer(user_id) do
offset = Keyword.get(opts, :offset, 0)
given_limit = Keyword.get(opts, :limit, @default_page_size)
limit =
[given_limit, @default_page_size]
|> Enum.min()
unless given_limit <= @default_page_size,
do:
Logger.warn(fn ->
"Paging violation: Expected page size was #{given_limit}, fenced to its default value #{
@default_page_size
}."
end)
query =
from a in Address,
where: a.user_id == ^user_id,
offset: ^offset,
limit: ^limit,
order_by: [asc: :id],
select: a
Repo.all(query)
end
@doc """
Deletes an address with given identifier.
"""
@spec delete_address!(Address.id()) ::
Address
| no_return()
def delete_address!(address_id) do
address = Repo.get_by!(Address, id: address_id)
Repo.delete!(address)
end
end
|
apps/legion/lib/identity/information/address_book/address_book.ex
| 0.891655
| 0.477371
|
address_book.ex
|
starcoder
|
defmodule Zipper do
@type t :: %Zipper{root: BinTree.t(), focus: BinTree.t(), path: [{BinTree.t(), :left | :right}] }
defstruct [:root, :path, :focus]
@doc """
Get a zipper focused on the root node.
"""
@spec from_tree(BinTree.t()) :: Zipper.t()
def from_tree(bin_tree) do
%Zipper{root: bin_tree, focus: bin_tree, path: []}
end
@doc """
Get the complete tree from a zipper.
"""
@spec to_tree(Zipper.t()) :: BinTree.t()
def to_tree(zipper) do
update_zipper(zipper).root
end
@doc """
Get the value of the focus node.
"""
@spec value(Zipper.t()) :: any
def value(zipper) do
zipper.focus.value
end
@doc """
Get the left child of the focus node, if any.
"""
@spec left(Zipper.t()) :: Zipper.t() | nil
def left(zipper) do
if zipper.focus.left do
%{ %{zipper | path: [{zipper.focus, :left} | zipper.path]} | focus: zipper.focus.left}
end
end
@doc """
Get the right child of the focus node, if any.
"""
@spec right(Zipper.t()) :: Zipper.t() | nil
def right(zipper) do
if zipper.focus.right do
%{ %{zipper | path: [{zipper.focus, :right} | zipper.path]} | focus: zipper.focus.right}
end
end
@doc """
Get the parent of the focus node, if any.
"""
@spec up(Zipper.t()) :: Zipper.t() | nil
def up(zipper) do
if zipper.path != [] do
[{parent, direction} | ancestors] = zipper.path
new_parent = Map.put(parent, direction, zipper.focus)
cond do
ancestors == [] ->
%{%{%{zipper | focus: new_parent} | path: []} | root: new_parent}
true -> %{%{zipper | focus: new_parent} | path: ancestors}
end
end
end
defp update_zipper(zipper) do
%{path: new_path, current: new_root} = List.foldl(zipper.path, %{path: [], current: zipper.focus }, fn ({node, direction}, acc) ->
new_node = Map.put(node, direction, acc.current)
%{%{acc | path: [{new_node, direction} | acc.path]} | current: new_node}
end )
%{%{zipper | path: Enum.reverse(new_path)} | root: new_root}
end
@doc """
Set the value of the focus node.
"""
@spec set_value(Zipper.t(), any) :: Zipper.t()
def set_value(zipper, value) do
%{zipper | focus: %{ zipper.focus | value: value}}
end
@doc """
Replace the left child tree of the focus node.
"""
@spec set_left(Zipper.t(), BinTree.t() | nil) :: Zipper.t()
def set_left(zipper, left) do
%{zipper | focus: %{ zipper.focus | left: left}}
end
@doc """
Replace the right child tree of the focus node.
"""
@spec set_right(Zipper.t(), BinTree.t() | nil) :: Zipper.t()
def set_right(zipper, right) do
%{zipper | focus: %{ zipper.focus | right: right}}
end
end
|
zipper/lib/zipper.ex
| 0.861538
| 0.731778
|
zipper.ex
|
starcoder
|
defmodule TimeAgo do
@moduledoc """
Module for getting the amount of days/hours/minutes/seconds since past a specific date.
This module is inspired of functionality in many social networks
"""
@doc """
Returns tuple
first value is the unit as an atom
second value is the amount of days/hours/minutes/seconds
second argument has the current time of UTC timezone as default value
## Examples
# returns original date if amount of days are more than 7
iex> TimeAgo.from_date ~N[2019-12-10 23:00:00], ~N[2019-12-21 22:30:00]
{:date, ~N[2019-12-10 23:00:00]}
# returns days if amount of days are more than 0
iex> TimeAgo.from_date ~N[2019-12-18 12:00:00], ~N[2019-12-21 22:30:00]
{:days, 3}
# returns hours if amount of hours are more than 0
iex> TimeAgo.from_date ~N[2019-12-21 20:00:00], ~N[2019-12-21 22:30:00]
{:hours, 2}
# returns minutes if amount of minutes are more than 0
iex> TimeAgo.from_date ~N[2019-12-21 18:00:00], ~N[2019-12-21 18:30:00]
{:minutes, 30}
# returns seconds if amount of seconds are more than 0
iex> TimeAgo.from_date ~N[2019-12-21 13:00:00], ~N[2019-12-21 13:00:55]
{:seconds, 55}
"""
@spec from_date(DateTime.t(), DateTime.t()) :: {number, atom}
def from_date(first, last \\ DateTime.utc_now) do
%{days: days, hours: hours, minutes: minutes, seconds: seconds} = calc_diff first, last
cond do
days > 7 -> {:date, first}
days > 0 -> {:days, days}
hours > 0 -> {:hours, hours}
minutes > 0 -> {:minutes, minutes}
seconds > 0 -> {:seconds, seconds}
end
end
@spec calc_diff(DateTime.t(), DateTime.t()) :: map
defp calc_diff(first, last) do
diff_days = last.day - first.day
diff_hours = last.hour - first.hour
diff_minutes = last.minute - first.minute
diff_seconds = last.second - first.second
%{
days: diff_days,
hours: diff_hours,
minutes: diff_minutes,
seconds: diff_seconds
}
end
end
|
lib/time_ago.ex
| 0.88054
| 0.612368
|
time_ago.ex
|
starcoder
|
defmodule Ornia.Core.Driver do
use GenServer, restart: :transient
alias Ornia.Core.{
Grid,
RideSupervisor,
Passenger,
Pickup,
Ride,
}
def start_link([user, coordinates]) do
state = %{
user: user,
coordinates: coordinates,
pickup: nil,
passenger: nil,
ride: nil,
status: :online,
}
GenServer.start_link(__MODULE__, state, [])
end
def init(state) do
Grid.join(self(), state.coordinates, [:driver])
{:ok, state}
end
def handle_call(:available, _from, state=%{status: status}) when status != :driving and status != :dispatched,
do: {:reply, :ok, %{state | status: :available}}
def handle_call(:unavailable, _from, state=%{status: :available}),
do: {:reply, :ok, %{state | status: :online}}
def handle_call(:offline, _from, state=%{status: status}) when status == :available or status == :online,
do: {:stop, :normal, :ok, state}
def handle_call({:dispatched, pickup, passenger}, _from, state) do
{:reply, :ok, %{state | pickup: pickup, passenger: passenger, status: :dispatched}}
end
def handle_call(:arrived, _from, state=%{pickup: pickup, passenger: passenger, status: :dispatched}) when not is_nil(pickup) do
Pickup.complete(pickup)
{:ok, ride} = RideSupervisor.start_child(passenger, self(), state.coordinates)
Passenger.depart(passenger, ride)
{:reply, :ok, %{state | ride: ride, pickup: nil, status: :riding}}
end
def handle_call(:dropoff, _from, state=%{status: :riding, ride: ride}) when not is_nil(ride) do
Ride.complete(ride)
{:reply, :ok, %{state | ride: nil, passenger: nil, status: :available}}
end
def handle_call({:move, coordinates}, _from, state) do
Grid.update(self(), state.coordinates, coordinates)
if state.pickup, do: Pickup.move(state.pickup, coordinates)
if state.ride, do: Ride.move(state.ride, coordinates)
{:reply, :ok, %{state | coordinates: coordinates}}
end
def offline(pid),
do: GenServer.call(pid, :offline)
def available(pid),
do: GenServer.call(pid, :available)
def unavailable(pid),
do: GenServer.call(pid, :unavailable)
def dispatch(pid, pickup, passenger),
do: GenServer.call(pid, {:dispatched, pickup, passenger})
def arrive(pid),
do: GenServer.call(pid, :arrived)
def dropoff(pid),
do: GenServer.call(pid, :dropoff)
def move(pid, coordinates),
do: GenServer.call(pid, {:move, coordinates})
end
|
apps/core/lib/core/driver.ex
| 0.647687
| 0.409309
|
driver.ex
|
starcoder
|
defmodule Sagax.Test.Builder do
alias __MODULE__
alias Sagax.Test.Log
import ExUnit.Assertions
import Sagax.Test.Assertions
defstruct log: nil, args: nil, context: nil
def new_builder(opts), do: struct!(Builder, opts)
def effect(builder, value, outer_opts \\ []) do
fn results, args, context, opts ->
if Keyword.has_key?(outer_opts, :results) do
assert_saga_results results, Keyword.get(outer_opts, :results, [])
end
if Keyword.has_key?(outer_opts, :opts) do
assert Enum.all?(Keyword.get(outer_opts, :opts), fn {k, v} ->
Keyword.get(opts, k) === v
end),
message: "Expected opts to match",
left: opts,
right: Keyword.get(outer_opts, :opts)
end
if Keyword.get(outer_opts, :delay, true) do
Process.sleep(:rand.uniform(250))
end
assert args == Keyword.get(outer_opts, :args, Map.get(builder, :args))
assert context == Keyword.get(outer_opts, :context, Map.get(builder, :context))
if Keyword.has_key?(outer_opts, :tag) do
{:ok, Log.log(builder.log, value), Keyword.get(outer_opts, :tag)}
else
{:ok, Log.log(builder.log, value)}
end
end
end
def effect_error(builder, value, outer_opts \\ []) do
fn results, args, context, opts ->
if Keyword.has_key?(outer_opts, :results) do
assert_saga_results results, Keyword.get(outer_opts, :results, [])
end
if Keyword.has_key?(outer_opts, :opts) do
assert Enum.all?(Keyword.get(outer_opts, :opts), fn {k, v} ->
Keyword.get(opts, k) === v
end),
message: "Expected opts to match",
left: opts,
right: Keyword.get(outer_opts, :opts)
end
if Keyword.get(outer_opts, :delay, true) do
Process.sleep(:rand.uniform(250))
end
assert args == Keyword.get(outer_opts, :args, Map.get(builder, :args))
assert context == Keyword.get(outer_opts, :context, Map.get(builder, :context))
{:error, Log.log(builder.log, value)}
end
end
def compensation(builder, value, outer_opts \\ []) do
fn result, results, args, context, opts ->
assert result == value,
message: "Expected the result of the effect to compensate to match",
left: result,
right: value
if Keyword.has_key?(outer_opts, :results) do
assert_saga_results results, Keyword.get(outer_opts, :results, [])
end
if Keyword.has_key?(outer_opts, :opts) do
assert Enum.all?(Keyword.get(outer_opts, :opts), fn {k, v} ->
Keyword.get(opts, k) === v
end),
message: "Expected opts to match",
left: opts,
right: Keyword.get(outer_opts, :opts)
end
if Keyword.get(outer_opts, :delay, true) do
Process.sleep(:rand.uniform(250))
end
assert args == Keyword.get(outer_opts, :args, Map.get(builder, :args))
assert context == Keyword.get(outer_opts, :context, Map.get(builder, :context))
Log.log(builder.log, "#{value}.comp")
Keyword.get(outer_opts, :result, :ok)
end
end
end
|
test/support/builder.ex
| 0.640748
| 0.448607
|
builder.ex
|
starcoder
|
defmodule Chex.Move.SmithParser do
@moduledoc false
@doc """
Parses the given `binary` as move.
Returns `{:ok, [token], rest, context, position, byte_offset}` or
`{:error, reason, rest, context, line, byte_offset}` where `position`
describes the location of the move (start position) as `{line, column_on_line}`.
## Options
* `:byte_offset` - the byte offset for the whole binary, defaults to 0
* `:line` - the line and the byte offset into that line, defaults to `{1, byte_offset}`
* `:context` - the initial context value. It will be converted to a map
"""
@spec move(binary, keyword) ::
{:ok, [term], rest, context, line, byte_offset}
| {:error, reason, rest, context, line, byte_offset}
when line: {pos_integer, byte_offset},
byte_offset: pos_integer,
rest: binary,
reason: String.t(),
context: map()
def move(binary, opts \\ []) when is_binary(binary) do
context = Map.new(Keyword.get(opts, :context, []))
byte_offset = Keyword.get(opts, :byte_offset, 0)
line =
case(Keyword.get(opts, :line, 1)) do
{_, _} = line ->
line
line ->
{line, byte_offset}
end
case(move__0(binary, [], [], context, line, byte_offset)) do
{:ok, acc, rest, context, line, offset} ->
{:ok, :lists.reverse(acc), rest, context, line, offset}
{:error, _, _, _, _, _} = error ->
error
end
end
@compile {:inline,
[
move__19: 6,
move__0: 6,
move__18: 6,
move__15: 6,
move__14: 6,
move__9: 6,
move__13: 6,
move__10: 6,
move__12: 6,
move__8: 6,
move__7: 6,
move__2: 6,
move__6: 6,
move__3: 6,
move__5: 6
]}
defp move__0(rest, acc, stack, context, line, offset) do
move__16(rest, [], [{rest, context, line, offset}, acc | stack], context, line, offset)
end
defp move__2(rest, acc, stack, context, line, offset) do
move__3(rest, [], [acc | stack], context, line, offset)
end
defp move__3(rest, acc, stack, context, line, offset) do
move__4(rest, [], [acc | stack], context, line, offset)
end
defp move__4(<<"e1c1c", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
move__5(rest, acc, stack, context, comb__line, comb__offset + 5)
end
defp move__4(<<"e8c8c", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
move__5(rest, acc, stack, context, comb__line, comb__offset + 5)
end
defp move__4(rest, _acc, _stack, context, line, offset) do
{:error,
"expected square, followed by square, followed by utf8 codepoint equal to 'c' or string \"e1g1c\" or string \"e8g8c\" or string \"e1c1c\" or string \"e8c8c\"",
rest, context, line, offset}
end
defp move__5(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
move__6(rest, [:queenside] ++ acc, stack, context, line, offset)
end
defp move__6(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
move__7(
rest,
[
castle:
case(:lists.reverse(user_acc)) do
[one] ->
one
many ->
raise("unwrap_and_tag/3 expected a single token, got: #{inspect(many)}")
end
] ++ acc,
stack,
context,
line,
offset
)
end
defp move__7(rest, acc, [_, previous_acc | stack], context, line, offset) do
move__1(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp move__8(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
move__2(rest, [], stack, context, line, offset)
end
defp move__9(rest, acc, stack, context, line, offset) do
move__10(rest, [], [acc | stack], context, line, offset)
end
defp move__10(rest, acc, stack, context, line, offset) do
move__11(rest, [], [acc | stack], context, line, offset)
end
defp move__11(<<"e1g1c", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
move__12(rest, acc, stack, context, comb__line, comb__offset + 5)
end
defp move__11(<<"e8g8c", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
move__12(rest, acc, stack, context, comb__line, comb__offset + 5)
end
defp move__11(rest, _acc, stack, context, line, offset) do
[_, acc | stack] = stack
move__8(rest, acc, stack, context, line, offset)
end
defp move__12(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
move__13(rest, [:kingside] ++ acc, stack, context, line, offset)
end
defp move__13(rest, user_acc, [acc | stack], context, line, offset) do
_ = user_acc
move__14(
rest,
[
castle:
case(:lists.reverse(user_acc)) do
[one] ->
one
many ->
raise("unwrap_and_tag/3 expected a single token, got: #{inspect(many)}")
end
] ++ acc,
stack,
context,
line,
offset
)
end
defp move__14(rest, acc, [_, previous_acc | stack], context, line, offset) do
move__1(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp move__15(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
move__9(rest, [], stack, context, line, offset)
end
defp move__16(
<<x0::utf8, x1::utf8, x2::utf8, x3::utf8, rest::binary>>,
acc,
stack,
context,
comb__line,
comb__offset
)
when x0 >= 97 and x0 <= 104 and (x1 >= 49 and x1 <= 56) and (x2 >= 97 and x2 <= 104) and
(x3 >= 49 and x3 <= 56) do
move__17(
rest,
[destination: [x2, x3], origin: [x0, x1]] ++ acc,
stack,
context,
comb__line,
comb__offset + byte_size(<<x0::utf8>>) + byte_size(<<x1::utf8>>) + byte_size(<<x2::utf8>>) +
byte_size(<<x3::utf8>>)
)
end
defp move__16(rest, acc, stack, context, line, offset) do
move__15(rest, acc, stack, context, line, offset)
end
defp move__17(<<x0::utf8, _::binary>> = rest, acc, stack, context, line, offset)
when x0 === 99 do
move__15(rest, acc, stack, context, line, offset)
end
defp move__17(rest, acc, stack, context, line, offset) do
move__18(rest, acc, stack, context, line, offset)
end
defp move__18(rest, acc, [_, previous_acc | stack], context, line, offset) do
move__1(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp move__1(<<x0::utf8, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 === 114 or x0 === 110 or x0 === 98 or x0 === 113 or x0 === 107 do
move__19(
rest,
[promote: [x0]] ++ acc,
stack,
context,
comb__line,
comb__offset + byte_size(<<x0::utf8>>)
)
end
defp move__1(<<rest::binary>>, acc, stack, context, comb__line, comb__offset) do
move__19(rest, [] ++ acc, stack, context, comb__line, comb__offset)
end
defp move__19(rest, acc, _stack, context, line, offset) do
{:ok, acc, rest, context, line, offset}
end
end
|
lib/chex/move/smith/smith_parser.ex
| 0.893337
| 0.506652
|
smith_parser.ex
|
starcoder
|
defmodule Float do
@moduledoc """
Functions for working with floating point numbers.
"""
@doc """
Parses a binary into a float.
If successful, returns a tuple of the form `{ float, remainder_of_binary }`.
Otherwise `:error`.
## Examples
iex> Float.parse("34")
{34.0,""}
iex> Float.parse("34.25")
{34.25,""}
iex> Float.parse("56.5xyz")
{56.5,"xyz"}
iex> Float.parse("pi")
:error
"""
@spec parse(binary) :: { float, binary } | :error
def parse(binary) when is_binary(binary) do
case Integer.parse binary do
:error -> :error
{ integer_part, after_integer } -> parse after_integer, integer_part
end
end
# Dot followed by digit is required afterwards or we are done
defp parse(<< ?., char, rest :: binary >>, int) when char in ?0..?9 do
parse(rest, char - ?0, 1, int)
end
defp parse(rest, int) do
{ :erlang.float(int), rest }
end
# Handle decimal points
defp parse(<< char, rest :: binary >>, float, decimal, int) when char in ?0..?9 do
parse rest, 10 * float + (char - ?0), decimal + 1, int
end
defp parse(<< ?e, after_e :: binary >>, float, decimal, int) do
case Integer.parse after_e do
:error ->
# Note we rebuild the binary here instead of breaking it apart at
# the function clause because the current approach copies a binary
# just on this branch. If we broke it apart in the function clause,
# the copy would happen when calling Integer.parse/1.
{ floatify(int, float, decimal), << ?e, after_e :: binary >> }
{ exponential, after_exponential } ->
{ floatify(int, float, decimal, exponential), after_exponential }
end
end
defp parse(bitstring, float, decimal, int) do
{ floatify(int, float, decimal), bitstring }
end
defp floatify(int, float, decimal, exponential // 0) do
multiplier = if int < 0, do: -1.0, else: 1.0
# Try to ensure the minimum amount of rounding errors
result = multiplier * (abs(int) * :math.pow(10, decimal) + float) * :math.pow(10, exponential - decimal)
# Try avoiding stuff like this:
# iex(1)> 0.0001 * 75
# 0.007500000000000001
# Due to IEEE 754 floating point standard
# http://docs.oracle.com/cd/E19957-01/806-3568/ncg_goldberg.html
final_decimal_places = decimal - exponential
if final_decimal_places > 0 do
decimal_power_round = :math.pow(10, final_decimal_places)
trunc(result * decimal_power_round) / decimal_power_round
else
result
end
end
@doc """
Round a float to the largest integer less than or equal to `num`
## Examples
iex> Float.floor(34)
34
iex> Float.floor(34.25)
34
iex> Float.floor(-56.5)
-57
"""
@spec floor(float | integer) :: integer
def floor(num) when is_integer(num), do: num
def floor(num) when is_float(num) do
truncated = :erlang.trunc(num)
case :erlang.abs(num - truncated) do
x when x > 0 and num < 0 -> truncated - 1
_ -> truncated
end
end
@doc """
Round a float to the largest integer greater than or equal to `num`
## Examples
iex> Float.ceil(34)
34
iex> Float.ceil(34.25)
35
iex> Float.ceil(-56.5)
-56
"""
@spec ceil(float | integer) :: integer
def ceil(num) when is_integer(num), do: num
def ceil(num) when is_float(num) do
truncated = :erlang.trunc(num)
case :erlang.abs(num - truncated) do
x when x > 0 and num > 0 -> truncated + 1
_ -> truncated
end
end
@doc """
Rounds a floating point value to an arbitrary number of fractional digits
(between 0 and 15) with an optional midpoint rounding mode (:up or :down,
defaults to :up).
## Examples
iex> Float.round(5.5675, 3)
5.568
iex> Float.round(5.5675, 3, :down)
5.567
iex> Float.round(-5.5675, 3)
-5.567
iex> Float.round(-5.5675, 3, :down)
-5.568
"""
@spec round(float, integer, atom | nil) :: float
def round(number, precision, midpoint_rounding // :up) when is_float(number) and is_integer(precision) and precision in 0..15 do
# Default to :up if anything but :down is provided for midpoint rounding mode
case midpoint_rounding do
:down -> Kernel.round(Float.floor(number * :math.pow(10, precision))) / :math.pow(10, precision)
_ -> Kernel.round(Float.ceil(number * :math.pow(10, precision))) / :math.pow(10, precision)
end
end
end
|
lib/elixir/lib/float.ex
| 0.950949
| 0.571916
|
float.ex
|
starcoder
|
defmodule OpenStax.Swift.Ecto.Model do
@moduledoc """
This module simplifies using OpenStax Swift storage with Ecto Models.
It exposes several functions that allow to easily upload/download files
that should be logically bound to certain record.
It automatically performs MIME type checks and ensures that uploaded
files have right MIME type in the storage.
By design, only one file per record is allowed.
If either `file_type` or `file_size`, `file_etag`, `file_name` fields will
be present in the model it will be automatically updated. Names of these
fields can be overriden by overriding `swift_file_size_field`,
`swift_file_type_field`, `swift_file_etag_field` and `swift_file_name_field`
functions. Override this to functions that return `nil` to disable that feature.
An example model that uses `OpenStax.Swift.Ecto.Model`:
defmodule MyApp.MyModel do
use Ecto.Model
use OpenStax.Swift.Ecto.Model
@required_fields ~w()
@optional_fields ~w(file_size file_type file_etag file_name)
schema "mymodel" do
field :file_size, :integer
field :file_type, :string
field :file_etag, :string
field :file_name, :string
timestamps
end
def swift_endpoint_id(_record), do: :myendpoint
def swift_container(_record), do: :somecontainer
def swift_object_id(record), do: "something_" <> record.id
def changeset(model, params \\ :empty) do
model
|> cast(params, @required_fields, @optional_fields)
end
end
An example usage:
defmodule MyApp.MyLogic do
def attach_file_to_record(record_id, path) do
record = MyApp.Repo.get!(MyApp.MyModel, record_id)
case OpenStax.Swift.Ecto.Model.upload(MyApp.Repo, record, {:file, path}) do
{:ok, record} ->
IO.puts "OK " <> OpenStax.Swift.Ecto.Model.temp_url(record)
{:error, reason} ->
IO.puts "ERROR " <> inspect(reason)
end
end
end
"""
@doc """
Returns string that contains Swift Object ID for given record.
Default implementation takes passed struct type and adds value of the `id`
field of the struct, concatenated using underscore.
"""
@callback swift_object_id(map) :: String.t
@doc """
Returns OpenStax Swift endpoint ID as atom for given record.
Endpoint has to be previously configures, please refer to OpenStax Swift
documentation to see how to do it.
"""
@callback swift_endpoint_id(map) :: atom
@doc """
Returns container name as atom or string for given record.
"""
@callback swift_container(map) :: atom | String.t
@doc """
Returns field name that should contain file size for given record.
Return nil if you want to disable this feature.
"""
@callback swift_file_size_field(map) :: atom | nil
@doc """
Returns field name that should contain file type for given record.
Return nil if you want to disable this feature.
"""
@callback swift_file_type_field(map) :: atom | nil
@doc """
Returns field name that should contain file type for given record.
Return nil if you want to disable this feature.
"""
@callback swift_file_etag_field(map) :: atom | nil
@doc """
Returns field name that should contain file name for given record.
Return nil if you want to disable this feature.
"""
@callback swift_file_name_field(map) :: atom | nil
defmacro __using__(_) do
quote location: :keep do
@behaviour OpenStax.Swift.Ecto.Model
# Default implementations
@doc false
def swift_object_id(record) do
# When we serialize __struct__, "Elixir." prefix is added,
# we want to remove it.
prefix = to_string(record.__struct__) |> String.split(".", parts: 2) |> List.last
suffix = to_string(record.id)
prefix <> "_" <> suffix
end
@doc false
def swift_file_size_field(_record), do: :file_size
@doc false
def swift_file_type_field(_record), do: :file_type
@doc false
def swift_file_etag_field(_record), do: :file_etag
@doc false
def swift_file_name_field(_record), do: :file_name
defoverridable [
swift_object_id: 1,
swift_file_type_field: 1,
swift_file_size_field: 1,
swift_file_etag_field: 1,
swift_file_name_field: 1,
]
end
end
@doc """
Synchronously uploads file from given path to the storage and associates
it with given record using given repo.
If either `file_type` or `file_size`, `file_etag`, `file_name` fields will
be present in the model it will be automatically updated. Names of these
fields can be overriden by overriding `swift_file_size_field`,
`swift_file_type_field`, `swift_file_etag_field` and `swift_file_name_field`
functions. Override this to functions that return `nil` to disable that feature.
It creates single Object in the storage.
First argument is a repo to use while updating the record.
Second argument is a record that is supposed to be "an owner" of the file.
Third argument is a file contents passed as string (not recommended)
or path to the file, represented as `{:file, path}`.
Fourth argument is a list of options containing `{:key, value}` pairs.
Supported keys are:
* :mime_type - if present, use given MIME type instead of performing guess.
On success it returns `{:ok, record}`.
On failure to communicate with the storage it returns
`{:error, {:storage, reason}}`.
On failure to update the record it returns `{:error, {:update, changeset}}`.
"""
@spec upload(Ecto.Repo.t, map, String.t | {:file, String.t}, [...]) :: {:ok, map} | {:error, any}
def upload(repo, record, body, options \\ [])
def upload(repo, record, body, options) when is_binary(body) and is_map(record) do
{:ok, tempfile_fd, tempfile_path} = Temp.open(to_string(__MODULE__))
IO.binwrite(tempfile_fd, body)
result = do_upload(repo, record, tempfile_path, nil, options)
File.close(tempfile_fd)
File.rm!(tempfile_path)
result
end
def upload(repo, record, {:file, path}, options) when is_binary(path) and is_map(record) do
do_upload(repo, record, path, Path.basename(path), options)
end
@doc """
Does the same as `upload/4` but throws an error in case of failure.
"""
@spec upload!(Ecto.Repo.t, map, String.t | {:file, String.t}) :: map
def upload!(repo, record, body) when is_map(record) do
case upload(repo, record, body) do
{:ok, record} -> record
{:error, reason} -> throw reason # FIXME should I use raise or throw?
end
end
@doc """
Generates temporary URL for given record with specified expiry time.
"""
@spec temp_url(map, non_neg_integer) :: String.t
def temp_url(record, expires \\ 3600) do
OpenStax.Swift.Middleware.TempURL.generate(
record.__struct__.swift_endpoint_id(record),
record.__struct__.swift_container(record),
record.__struct__.swift_object_id(record),
expires)
end
defp do_upload(repo, record, path, file_name, options) do
# Get MIME type if not passed
file_type = case options |> List.keyfind(:mime_type, 0) do
nil ->
mime_result = FileInfo.get_info(path)[path]
%FileInfo.Mime{subtype: mime_subtype, type: mime_type} = mime_result
mime_type <> "/" <> mime_subtype
{_, mime_type} ->
mime_type
end
# Get file size
%File.Stat{size: file_size} = File.stat!(path)
# Upload the file
object_id = record.__struct__.swift_object_id(record)
endpoint_id = record.__struct__.swift_endpoint_id(record)
container = record.__struct__.swift_container(record)
case OpenStax.Swift.API.Object.create(endpoint_id, container, object_id, {:file, path}, file_type, "attachment", file_name) do
{:ok, %{etag: file_etag}} ->
# Update record
file_type_field = record.__struct__.swift_file_type_field(record)
file_size_field = record.__struct__.swift_file_size_field(record)
file_etag_field = record.__struct__.swift_file_etag_field(record)
file_name_field = record.__struct__.swift_file_name_field(record)
changeset = record.__struct__.changeset(record, %{})
changeset = if Map.has_key?(record, file_type_field) do
changeset |> Ecto.Changeset.put_change(file_type_field, file_type)
else
changeset
end
changeset = if Map.has_key?(record, file_size_field) do
changeset |> Ecto.Changeset.put_change(file_size_field, file_size)
else
changeset
end
changeset = if Map.has_key?(record, file_etag_field) do
changeset |> Ecto.Changeset.put_change(file_etag_field, file_etag)
else
changeset
end
changeset = if Map.has_key?(record, file_name_field) do
changeset |> Ecto.Changeset.put_change(file_name_field, file_name)
else
changeset
end
case repo.update(changeset) do
{:ok, record} -> {:ok, record}
{:error, changeset} -> {:error, {:update, changeset}}
end
{:error, reason} ->
{:error, {:storage, reason}}
end
end
end
|
lib/openstax_swift_ecto/model.ex
| 0.76908
| 0.451447
|
model.ex
|
starcoder
|
defmodule Caylir.Graph.Config do
@moduledoc """
## How To Configure
Configuration values can be stored in two locations:
- hardcoded inline
- application environment
If you combine inline configuration with an `:otp_app` setting the
inline defaults will be overwritten by and/or merged with the
application environment values when the configuration is accessed.
Additionally you can insert use your application supervision tree to
trigger an initializer module ("dynamic configuration") that is able to
configure your application environment during startup.
### Inline Configuration
For some use cases (e.g. testing) it may be sufficient to define hardcoded
configuration defaults outside of your application environment:
defmodule MyGraph do
use Caylir.Graph,
config: [
host: "localhost",
port: 64210
]
end
### Environment Configuration
When not using hardcoded configuration values you can use the
application environment for configuration:
defmodule MyGraph do
use Caylir.Graph,
otp_app: :my_app
end
config :my_app, MyGraph,
host: "localhost",
port: 64210
### Dynamic Configuration
If you cannot, or do not want to, use a static application config you can
configure an initializer module that will be called every time your graph
is started (or restarted) in your supervision tree:
# {mod, fun}
[
init: {MyInitModule, :my_init_mf}
]
# {mod, fun, args}
[
init: {MyInitModule, :my_init_mfargs, [:foo, :bar]}
]
# the configured initializer module
defmodule MyInitModule do
@spec my_init_mf(module) :: :ok
def my_init_mf(graph), do: my_init_mfargs(graph, :foo, :bar)
@spec my_init_mfargs(module, atom, atom) :: :ok
def my_init_mfargs(graph, :foo, :bar) do
config =
Keyword.merge(
graph.config(),
host: "localhost",
port: 64210
)
Application.put_env(:my_app, graph, config)
end
end
When the graph is started the function will be called with the graph module
as the first parameter with optional `{m, f, args}` parameters from your
configuration following. This will be done before the graph is available
for use.
The initializer function is expected to always return `:ok`.
## What To Configure
As every configuration value can be set both inline and using the
application environment the documentation using plain keyword lists.
Several configuration entries will be set to default values
if they are not set manually:
[
host: "localhost",
json_decoder: {Jason, :decode!, [[keys: :atoms]]},
json_encoder: {Jason, :encode!, []},
language: :gizmo,
port: 64210,
scheme: "http"
]
### Query Language Configuration
By default all queries are expected to use the Gizmo query language.
Using the configuration key `:language` you can switch the default
language endpoint to use:
[
language: :graphql
]
The HTTP URLs used to send the queries (for both `:query` and `:shape` calls)
are constructed as `/api/v1/\#{call}/\#{language}`. Depending on your
choice and used Cayley version the `:shape` endpoint might not be available.
### Query Limit Configuration
You can define a default query limit by adding it to your graph config:
[
limit: -1
]
### Query Timeout Configuration
Using all default values and no specific parameters each query is allowed
to take up to 5000 milliseconds (`GenServer.call/2` timeout) to complete.
That may be too long or not long enough in some cases.
To change that timeout you can configure your graph:
# lowering timeout to 500 ms
[
query_timeout: 500
]
or pass an individual timeout for a single query:
MyGraph.query(query, timeout: 250)
A passed or graph wide timeout configuration override any
`:recv_timeout` of your `:hackney` (HTTP client) configuration.
This does not apply to write requests. They are currently only affected by
configured `:recv_timeout` values. Setting a graph timeout enables you to
have a different timeout for read and write requests.
### JSON Decoder/Encoder Configuration
By default the library used for encoding/decoding JSON is `:jason`.
For the time `:caylir` directly depends on it to ensure it is available.
If you want to use another library you can switch it:
[
json_decoder: MyJSONLibrary,
json_encoder: MyJSONLibrary
]
[
json_decoder: {MyJSONLibrary, :decoder_argless},
json_encoder: {MyJSONLibrary, :encoder_argless}
]
[
json_decoder: {MyJSONLibrary, :decode_it, [[keys: :atoms]]},
json_encoder: {MyJSONLibrary, :encode_it, []}
]
If you configure only a module name it will be called
as `module.decode!(binary)` and `module.encode!(map)`. When using
a more complete `{m, f}` or `{m, f, args}` configuration the data
to decode/encode will passed as the first argument with your
configured extra arguments following.
"""
@doc """
Retrieves the graph configuration for `graph` in `otp_app`.
"""
@spec config(atom, module, Keyword.t()) :: Keyword.t()
def config(nil, _, defaults), do: defaults
def config(otp_app, graph, defaults) do
defaults
|> Keyword.merge(Application.get_env(otp_app, graph, []))
|> Keyword.put(:otp_app, otp_app)
end
end
|
lib/caylir/graph/config.ex
| 0.844521
| 0.53692
|
config.ex
|
starcoder
|
alias ICouch.Document
defmodule ICouch.Collection do
@moduledoc """
Collection of documents useful for batch tasks.
This module helps collecting documents together for batch uploading. It keeps
track of the document byte sizes to allow a size cap and supports replacing a
document already in the collection. Furthermore it is possible to associate
meta data with documents (e.g. a sequence number).
There are implementations for the `Enumerable` and `Collectable` protocol for
your convenience.
All functions with the `_meta` suffix return the document and meta data as
2-tuple, unless otherwise specified.
"""
@behaviour Access
defstruct [
contents: %{},
byte_size: 0
]
@type doc_id :: String.t
@type doc_rev :: String.t | nil
@type meta :: term
@type t :: %__MODULE__{
contents: %{optional(doc_id) => {Document.t, meta}},
byte_size: integer
}
@doc """
Initialize a collection struct.
"""
@spec new() :: t
def new(), do: %__MODULE__{}
@doc """
Returns whether the collection is empty.
"""
@spec empty?(coll :: t) :: boolean
def empty?(%__MODULE__{contents: contents}),
do: map_size(contents) == 0
@doc """
Returns the number of documents in the collection.
"""
@spec count(coll :: t) :: integer
def count(%__MODULE__{contents: contents}),
do: map_size(contents)
@doc """
Returns the byte size of all documents in the collection as if they were
encoded entirely in JSON (including attachments in Base64).
"""
@spec byte_size(coll :: t) :: integer
def byte_size(%__MODULE__{byte_size: byte_size}),
do: byte_size
@doc """
Returns all document IDs as list.
"""
def doc_ids(%__MODULE__{contents: contents}),
do: Map.keys(contents)
@doc """
Returns a list of tuples pairing document IDs and their rev values.
"""
@spec doc_revs(coll :: t) :: [{doc_id, doc_rev}]
def doc_revs(%__MODULE__{contents: contents}),
do: Enum.map(contents, fn {doc_id, {%Document{rev: rev}, _}} -> {doc_id, rev} end)
@doc """
Returns a list of tuples with document IDs, their rev values and metadata.
"""
@spec doc_revs_meta(coll :: t) :: [{doc_id, doc_rev, meta}]
def doc_revs_meta(%__MODULE__{contents: contents}) do
for {doc_id, {%Document{rev: rev}, meta}} <- contents, do: {doc_id, rev, meta}
end
@doc """
Returns all documents as list.
"""
@spec to_list(coll :: t) :: [Document.t]
def to_list(%__MODULE__{contents: contents}) do
for {doc, _} <- Map.values(contents), do: doc
end
@doc """
Returns all documents and meta data as list of tuples.
"""
@spec to_list_meta(coll :: t) :: [Document.t]
def to_list_meta(%__MODULE__{contents: contents}),
do: Map.values(contents)
@doc """
Puts a document into the collection, replacing an existing document if needed.
Also accepts a 2-tuple of a document and meta data; meta data is set to `nil`
if not given.
"""
@spec put(coll :: t, Document.t | {Document.t, meta}) :: t
def put(coll, %Document{} = doc),
do: put(coll, doc, nil)
def put(coll, {%Document{} = doc, meta}),
do: put(coll, doc, meta)
@doc """
Puts a document with meta data into the collection, replacing an existing
document and meta data if needed.
"""
@spec put(coll :: t, Document.t, meta) :: t
def put(%__MODULE__{contents: contents, byte_size: bs} = coll, %Document{id: doc_id} = doc, meta) do
bs = Document.full_json_byte_size(doc) + case Map.fetch(contents, doc_id) do
:error -> bs
{:ok, {old_doc, _}} -> bs - Document.full_json_byte_size(old_doc)
end
%{coll | contents: Map.put(contents, doc_id, {doc, meta}), byte_size: bs}
end
@doc """
Replaces meta data of the document specified by `doc_id` in the collection.
Warning: if the document does not exist, returns `coll` unchanged.
"""
@spec set_meta(coll :: t, doc_id, meta) :: t
def set_meta(coll, doc_id, meta) do
case Map.fetch(coll, doc_id) do
:error -> coll
{:ok, {doc, _}} -> %{coll | doc_id => {doc, meta}}
end
end
@doc """
Fetches the document of a specific `doc_id` in the given `coll`.
If `coll` contains the document with the given `doc_id`, then `{:ok, doc}` is
returned. If `coll` doesn't contain the document, `:error` is returned.
Part of the Access behavior.
"""
@spec fetch(coll :: t, doc_id) :: {:ok, Document.t} | :error
def fetch(%__MODULE__{contents: contents}, doc_id) do
case Map.fetch(contents, doc_id) do
{:ok, {doc, _}} -> {:ok, doc}
:error -> :error
end
end
@doc """
Fetches the document and its meta data for a specific `doc_id` in the given
`coll`.
If `coll` contains the document with the given `doc_id`, then
`{:ok, {doc, meta}}` is returned. If `coll` doesn't contain the document,
`:error` is returned.
"""
@spec fetch_meta(coll :: t, doc_id) :: {:ok, {Document.t, meta}} | :error
def fetch_meta(%__MODULE__{contents: contents}, doc_id),
do: Map.fetch(contents, doc_id)
@doc """
Gets the document for a specific `doc_id` in `coll`.
If `coll` contains the document with the given `doc_id`, then the document is
returned. Otherwise, `default` is returned (which is `nil` unless
specified otherwise).
Part of the Access behavior.
"""
@spec get(coll :: t, doc_id, default :: any) :: Document.t | any
def get(%__MODULE__{contents: contents}, doc_id, default \\ nil) do
case Map.fetch(contents, doc_id) do
{:ok, {doc, _}} -> doc
:error -> default
end
end
@doc """
Gets the document and its meta data for a specific `doc_id` in `coll`.
If `coll` contains the document with the given `doc_id`, then the document and
its meta data is returned. Otherwise, `default` is returned (which is `nil`
unless specified otherwise).
"""
@spec get_meta(coll :: t, doc_id, default) :: {Document.t, meta} | default when default: term
def get_meta(%__MODULE__{contents: contents}, doc_id, default \\ nil) do
case Map.fetch(contents, doc_id) do
{:ok, doc_meta} -> doc_meta
:error -> default
end
end
@doc """
Gets the document for a specific `doc_id` and updates it, all in one pass.
`fun` is called with the current document under `doc_id` in `coll` (or `nil`
if the document is not present in `coll`) and must return a two-element tuple:
the "get" document (the retrieved document, which can be operated on before
being returned) and the new document to be stored under `doc_id` in the
resulting new collection. `fun` may also return `:pop`, which means the
document shall be removed from `coll` and returned (making this function
behave like `Collection.pop(coll, doc_id)`.
The returned value is a tuple with the "get" document returned by
`fun` and a new collection with the updated document under `doc_id`.
Not that it is possible to return a document plus meta data from `fun`.
Part of the Access behavior.
"""
@spec get_and_update(coll :: t, doc_id, (Document.t | nil -> {get, Document.t | {Document.t, meta}} | :pop)) :: {get, t} when get: term
def get_and_update(coll, doc_id, fun) when is_function(fun, 1) do
current = get(coll, doc_id)
case fun.(current) do
{get, update} ->
{get, put(coll, update)}
:pop ->
{current, delete(coll, doc_id)}
other ->
raise "the given function must return a two-element tuple or :pop, got: #{inspect(other)}"
end
end
@doc """
Gets the document and its meta data for a specific `doc_id` and updates it,
all in one pass.
`fun` is called with the current document and meta data under `doc_id` in
`coll` (or `nil` if the document is not present in `coll`) and must return a
two-element tuple: the "get" meta data (the retrieved meta data, which can be operated on before
being returned) and the new meta data to be stored under `doc_id` in the
resulting new collection. `fun` may also return `:pop`, which means the
document shall be removed from `coll` and its meta data returned (making this
function behave like `Collection.pop_meta(coll, doc_id)`.
The returned value is a tuple with the "get" document and meta data returned
by `fun` and a new collection with the updated document and meta data under
`doc_id`.
"""
@spec get_and_update_meta(coll :: t, doc_id, ({Document.t, meta} | nil -> {get, Document.t | {Document.t, meta}} | :pop)) :: {get, t} when get: term
def get_and_update_meta(coll, doc_id, fun) when is_function(fun, 1) do
current = get_meta(coll, doc_id)
case fun.(current) do
{get, update} ->
{get, put(coll, update)}
:pop ->
{current, delete(coll, doc_id)}
other ->
raise "the given function must return a two-element tuple or :pop, got: #{inspect(other)}"
end
end
@doc """
Returns and removes a document from `coll`, referenced by its ID or itself.
If the document is present in `coll`, `{doc, new_coll}` is returned where
`new_coll` is the result of removing the document from `coll`. If the document
is not present in `coll`, `{default, coll}` is returned.
"""
@spec pop(coll :: t, key :: doc_id | Document.t, default) :: {Document.t | default, t} when default: term
def pop(coll, key, default \\ nil)
def pop(coll, %Document{id: doc_id}, default),
do: pop(coll, doc_id, default)
def pop(%__MODULE__{contents: contents, byte_size: bs} = coll, doc_id, default) when is_binary(doc_id) do
case Map.pop(contents, doc_id) do
{nil, _} ->
{default, coll}
{{doc, _}, contents} ->
{doc, %{coll | contents: contents, byte_size: bs - Document.full_json_byte_size(doc)}}
end
end
@doc """
Returns and removes a document and its meta data from `coll`, referenced by
its ID or itself.
If the document is present in `coll`, `{{doc, meta}, new_coll}` is returned
where `new_coll` is the result of removing the document from `coll`. If the
document is not present in `coll`, `{default, coll}` is returned.
"""
@spec pop_meta(coll :: t, key :: doc_id | Document.t, default) :: {{Document.t, meta} | default, t} when default: term
def pop_meta(coll, key, default \\ nil)
def pop_meta(coll, %Document{id: doc_id}, default),
do: pop_meta(coll, doc_id, default)
def pop_meta(%__MODULE__{contents: contents, byte_size: bs} = coll, doc_id, default) when is_binary(doc_id) do
case Map.pop(contents, doc_id) do
{nil, _} ->
{default, coll}
{{doc, _} = doc_meta, contents} ->
{doc_meta, %{coll | contents: contents, byte_size: bs - Document.full_json_byte_size(doc)}}
end
end
@doc """
Deletes a document in `coll`, referenced by its ID or itself.
If the document does not exist, returns `coll` unchanged.
"""
@spec delete(coll :: t, key :: doc_id | Document.t) :: t
def delete(coll, key),
do: pop(coll, key) |> elem(1)
@doc """
Checks if a document exists within the collection.
If a string is given as parameter, checks if a document with that ID exists.
If a 2-tuple of two strings is given, the first element is the document ID and
the second element the revision to check. Otherwise a document or a 2-tuple of
document and meta data can be given.
Documents are tested using `ICouch.Document.equal?/2` if they exist. Meta
data is compared using `==`.
"""
@spec member?(coll :: t, doc_id | Document.t | {doc_id, doc_rev}) :: boolean
def member?(%__MODULE__{contents: contents}, %Document{id: doc_id} = doc1) do
case Map.fetch(contents, doc_id) do
{:ok, {doc2, _}} -> Document.equal?(doc1, doc2)
_ -> false
end
end
def member?(%__MODULE__{contents: contents}, {%Document{id: doc_id} = doc1, meta1}) do
case Map.fetch(contents, doc_id) do
{:ok, {doc2, meta2}} -> Document.equal?(doc1, doc2) and meta1 == meta2
_ -> false
end
end
def member?(%__MODULE__{contents: contents}, {doc_id, doc_rev}) when is_binary(doc_id),
do: match?({:ok, %{rev: ^doc_rev}}, Map.fetch(contents, doc_id))
def member?(%__MODULE__{contents: contents}, doc_id) when is_binary(doc_id),
do: Map.has_key?(contents, doc_id)
def member?(_coll, _other),
do: false
end
defimpl Enumerable, for: ICouch.Collection do
def count(%ICouch.Collection{contents: contents}),
do: {:ok, map_size(contents)}
def member?(coll, key),
do: {:ok, ICouch.Collection.member?(coll, key)}
def slice(%ICouch.Collection{contents: contents}),
do: Enumerable.Map.slice(contents)
def reduce(%ICouch.Collection{contents: contents}, acc, fun),
do: reduce_contents(Map.to_list(contents), acc, fun)
defp reduce_contents(_, {:halt, acc}, _fun),
do: {:halted, acc}
defp reduce_contents(contents, {:suspend, acc}, fun),
do: {:suspended, acc, &reduce_contents(contents, &1, fun)}
defp reduce_contents([], {:cont, acc}, _fun),
do: {:done, acc}
defp reduce_contents([{doc, _} | t], {:cont, acc}, fun),
do: reduce_contents(t, fun.(doc, acc), fun)
end
defimpl Collectable, for: ICouch.Collection do
def into(%ICouch.Collection{contents: start_contents, byte_size: start_size}) do
{{start_contents, start_size}, fn
{contents, size}, {:cont, element} ->
{%Document{id: doc_id} = doc, meta} = case element do
%Document{} = doc -> {doc, nil}
other -> other
end
size = Document.full_json_byte_size(doc) + case Map.fetch(contents, doc_id) do
:error -> size
{:ok, {old_doc, _}} -> size - Document.full_json_byte_size(old_doc)
end
{Map.put(contents, doc_id, {doc, meta}), size}
{contents, size}, :done ->
%ICouch.Collection{contents: contents, byte_size: size}
_, :halt ->
:ok
end}
end
end
|
lib/icouch/collection.ex
| 0.927642
| 0.620018
|
collection.ex
|
starcoder
|
defmodule ParamMap do
@moduledoc """
A subset of the `Map` module that operates on string-keyed maps using
atom key arguments.
## Examples
iex> params = %{"color" => "red", "size" => "large", "age" => 100}
iex> ParamMap.get(params, :color)
"red"
iex> ParamMap.delete(params, :size)
%{"color" => "red", "age" => 100}
iex> ParamMap.take(params, [:color, :size])
%{color: "red", size: "large"}
iex> ParamMap.pop(params, :color)
{"red", %{"size" => "large", "age" => 100}}
Still works with maps that have atom keys, in case you don't
know ahead of time whether the keys are strings or atoms.
iex> params = %{:color => "red", "age" => 100}
iex> ParamMap.get(params, :color)
"red"
The params may have a key both as a string and an atom.
`get/1`, `pop/1`, and `take/2` prioritize the atom key.
`delete/2`, `pop/1` remove both the atom and string version of the key.
iex> params = %{:color => "red", "color" => "blue"}
iex> ParamMap.get(params, :color)
"red"
iex> params = %{:color => "red", "color" => "blue", "age" => 100}
iex> ParamMap.delete(params, :color)
%{"age" => 100}
iex> params = %{:color => "red", "color" => "blue", "age" => 100}
iex> ParamMap.pop(params, :color)
{"red", %{"age" => 100}}
"""
def get(params, atom_key, default \\ nil) do
case fetch(params, atom_key) do
{:ok, value} -> value
:error -> default
end
end
def delete(params, atom_key) do
string_key = to_string(atom_key)
params
|> Map.delete(atom_key)
|> Map.delete(string_key)
end
def fetch(params, atom_key) do
string_key = to_string(atom_key)
case params do
%{^atom_key => value} -> {:ok, value}
%{^string_key => value} -> {:ok, value}
_ -> :error
end
end
def pop(params, atom_key, default \\ nil) do
value = get(params, atom_key, default)
params = delete(params, atom_key)
{value, params}
end
def take(params, keys), do: take(params, keys, %{})
def take(_params, [], map), do: map
def take(params, [key | keys], map) do
case fetch(params, key) do
{:ok, value} -> Map.put(take(params, keys, map), key, value)
:error -> take(params, keys, map)
end
end
end
|
lib/param_map.ex
| 0.87057
| 0.578121
|
param_map.ex
|
starcoder
|
defmodule Snipe do
@moduledoc """
Functions for transferring and managing files through SFTP
"""
alias Snipe.Sftp.{Access, Management, Transfer, Stream}
alias Snipe.Conn, as: Conn
@default_opts [
user_interaction: false,
silently_accept_hosts: true,
rekey_limit: 1_000_000_000_000,
port: 22
]
@doc """
Download a file given the connection and remote_path
Returns {:ok, data}, {:error, reason}
"""
@spec download(Conn.t(), Path.t()) :: {:ok | :error, any()}
def download(connection, remote_path) do
Transfer.download(connection, remote_path)
end
@doc """
Upload a local file to a remote path
Returns :ok, or {:error, reason}
"""
@spec upload(Conn.t(), Path.t(), Path.t()) :: :ok | {:error, any()}
def upload(connection, remote_path, file_handle) do
Transfer.upload(connection, remote_path, file_handle)
end
@doc """
Creates a Connection struct if the connection is successful,
else will return {:error, reason}
A connection struct will contain the
channel_pid = pid()
connection_pid = pid()
host = string()
port = integer()
opts = [{Option, Value}]
Default values are set for the following options:
user_interaction: false,
silently_accept_hosts: true,
rekey_limit: 1000000000000,
port: 22
***NOTE: The only required option is ':host'
The rekey_limit value is set at a large amount because the Erlang library creates
an exception when the server is negotiating a rekey. Setting the value at a high number
of bytes will avoid a rekey event occurring.
Other available options can be found at http://erlang.org/doc/man/ssh.html#connect-3
"""
@spec connect(Keyword.t()) :: {:ok, Conn.t()} | {:error, any()}
def connect(opts) do
opts = @default_opts |> Keyword.merge(opts)
own_keys = [:host, :port]
ssh_opts = opts |> Enum.filter(fn {k, _} -> not (k in own_keys) end)
Conn.connect(opts[:host], opts[:port], ssh_opts)
end
@doc """
Open a connection.
If the connection does not succeed, raises an error.
"""
@spec connect!(Keyword.t()) :: Conn.t()
def connect!(opts) do
case connect(opts) do
{:ok, conn} -> conn
{:error, reason} -> raise reason
end
end
@doc """
Creates an SFTP stream by opening an SFTP connection and opening a file
in read or write mode.
Below is an example of reading a file from a server.
An example of writing a file to a server is the following.
stream = File.stream!("filename.txt")
|> Stream.into(Snipe.stream!(connection,"/home/path/filename.txt"))
|> Stream.run
This follows the same pattern as Elixir IO streams so a file can be transferred
from one server to another via SFTP as follows.
stream = Snipe.stream!(connection,"/home/path/filename.txt")
|> Stream.into(Snipe.stream!(connection2,"/home/path/filename.txt"))
|> Stream.run
"""
@spec stream!(Conn.t(), Path.t(), non_neg_integer()) :: Stream.t()
def stream!(connection, remote_path, byte_size \\ 32768) do
%Stream{
connection: connection,
path: remote_path,
byte_length: byte_size
}
end
@doc """
Opens a file or directory given a connection and remote_path
"""
@spec open(Conn.t(), Path.t()) :: {:ok | :error, any()}
def open(connection, remote_path) do
Access.open(connection, remote_path, :read)
end
@doc """
Lists the contents of a directory given a connection a handle or remote path
"""
@spec ls(Conn.t(), Path.t()) :: {:ok, [Path.t()]} | {:error, any()}
def ls(connection, remote_path) do
Management.list_files(connection, remote_path)
end
@doc """
Lists the contents of a directory given a connection a handle or remote path
"""
@spec mkdir(Conn.t(), Path.t()) :: :ok | {:error, any()}
def mkdir(connection, remote_path) do
Management.make_directory(connection, remote_path)
end
@doc """
Stat a file.
"""
def lstat(connection, remote_path) do
Access.file_info(connection, remote_path)
end
@doc """
Determine the size of a file
"""
def size(connection, remote_path) do
case Access.file_info(connection, remote_path) do
{:error, reason} -> {:error, reason}
{:ok, info} -> info.size
end
end
@doc """
Gets the type given a remote path.
"""
def get_type(connection, remote_path) do
case Access.file_info(connection, remote_path) do
{:error, reason} -> {:error, reason}
{:ok, info} -> info.type
end
end
@doc """
Stops the SSH application
"""
def disconnect(connection) do
Conn.disconnect(connection)
end
@doc """
Removes a file from the server.
"""
def rm(connection, file) do
Management.remove_file(connection, file)
end
@doc """
Removes a directory and all files within it
"""
def rm_dir(connection, remote_path) do
Management.remove_directory(connection, remote_path)
end
@doc """
Renames a file or directory
"""
def rename(connection, old_name, new_name) do
Management.rename(connection, old_name, new_name)
end
end
|
lib/snipe.ex
| 0.793546
| 0.432003
|
snipe.ex
|
starcoder
|
defmodule Confex.Transforms.Key do
@moduledoc """
A behaviour module for defining a key transformer for a pipeline.
Key transformers are used by a pipeline to alter the requested key before
passing it on to its underlying source.
Using this module automatically adopts the behaviour, defines a struct, and
derives an implementation of the `Confex.KeyTransformable` protocol. Any
options supplied to the `use` macro will be passed to the `defstruct` macro,
so they should be a list of keywords.
## Examples
One use case for key transformers is to be able to specify configuration keys
in a consistent format but retrieve them from sources that may have a
different scheme for naming keys. For example, say we want to lookup
configuration values from the environment using keys like `"example.key"` but
the deployment environment provides the values in variables named like
`EXAMPLE__KEY`. We can use a key transformer in a pipeline to achieve this:
defmodule EnvironmentKeyTransformer do
use Confex.Transforms.Key
def new(), do: %__MODULE__{}
def transform_key(_, key) do
key
|> String.split(".")
|> Enum.map(&String.upcase/1)
|> Enum.join("__")
end
end
env = Confex.Sources.Environment.new
pipeline = Confex.Pipeline.new(env, key_transforms: [EnvironmentKeyTransformer.new])
System.put_env("EXAMPLE__KEY", "value")
puts Confex.ConfigSourceable.get(pipeline, "example.key")
# will print "value"
As the `__using__` macro definition for key transformers defines a struct
with the given fields, we can also provide data to be used when transforming
keys. For example, a transformer that prefixes all requested keys with a
value:
defmodule PrefixKeyTransformer do
use Confex.Transforms.Key, prefix: ""
def new(prefix), do: %__MODULE__{prefix: prefix}
def transform_key(%{prefix: prefix}, key) do
"\#{prefix}_key"
end
end
source = Confex.Sources.Map.new(%{transformed_key: "value"})
key = PrefixKeyTransformer.new("transformed")
pipeline = Confex.Pipeline.new(env, key_transforms: [key])
puts Confex.ConfigSourceable.get(pipeline, "key")
# will print "value"
"""
defmacro __using__(opts) do
quote do
@behaviour Confex.Transforms.Key
@derive Confex.KeyTransformable
defstruct unquote(opts)
end
end
@type transformer :: module
@callback transform_key(transformer, String.t) :: String.t
end
|
lib/confex/transforms/key.ex
| 0.887177
| 0.61633
|
key.ex
|
starcoder
|
defmodule Membrane.WAV.Parser do
@moduledoc """
Element responsible for parsing WAV files.
It requires WAV file in uncompressed, PCM format on the input (otherwise error is raised) and
provides raw audio on the output. WAV header is parsed to extract metadata for creating caps.
Then it is dropped and only samples are sent to the next element.
The element has one option - `frames_per_buffer`. User can specify number of frames sent in one
buffer when demand unit on the output is `:buffers`. One frame contains `bits per sample` x
`number of channels` bits.
## WAV Header
```
0 4 8 12 16
_________________________________________________________________________________
0 | | | | |
| "RIFF" | file length | "WAVE" | "fmt " |
| | | | |
|___________________|___________________|___________________|___________________|
16 | | | | | |
| format chunk | format |number of| sample | data transmission |
| length |(1 - PCM)|channels | rate | rate |
|___________________|_________|_________|___________________|___________________|
32 | block | bits | | | |
| align | per | "fact" | fact chunk | samples per |
| unit | sample | | length | channel |
|_________|_________|___________________|___________________|___________________|
48 | | | |
| "data" | data length | DATA |
| | in bytes | |
|___________________|___________________|_______________________________________|
```
Header may contain additional bytes between `bits per sample` and `"fact"` in case of `format`
different from 1 (1 represents PCM / uncompressed format). Length of block from `format` until
`"fact"` is present in `format chunk length` (it is 16 for PCM).
Blocks from byte 36 to 48 are optional. There can be additional bytes after `samples per
channel` if `fact chunk length` contains number bigger than 4.
## Parsing
Stages of parsing:
- `:init` - Parser waits for the first 22 bytes. After getting them, it parses these bytes
to ensure that it is a WAV file. Parser knows `format chunk length` and `format`, so it
is able to raise an error in case of different `format` than 1 (PCM) or different
length than 16 (for PCM). After parsing, the stage is set to `:format`.
- `:format` - Parser waits for the next 22 bytes - `fmt` chunk (bytes 20 - 35) without
`format` and either `"fact"` and `fact chunk length` or `"data"` and `data length in bytes`.
Then it parses it and create `Membrane.Caps.Audio.Raw` struct with audio format to send it
as caps to the next element. Stage is set to `:fact` or `:data` depending on last 8 bytes.
- `:fact` - Parser waits for `8 + fact chunk length` bytes. It parses them only to check if
the header is correct, but does not use that data in any way. After parsing, the stage is
set to `:data`.
- `:data` - header is already fully parsed. All new data from the input is sent to the output.
"""
use Membrane.Filter
alias Membrane.Buffer
alias Membrane.Caps.Audio.Raw, as: Caps
alias Membrane.Caps.Audio.Raw.Format
require Membrane.Logger
@pcm_format_size 16
@init_stage_size 22
@format_stage_size 22
@fact_stage_base_size 8
def_options frames_per_buffer: [
type: :integer,
spec: pos_integer(),
description: """
Assumed number of raw audio frames in each buffer.
Used when converting demand from buffers into bytes.
""",
default: 2048
]
def_output_pad :output,
mode: :pull,
availability: :always,
caps: Caps
def_input_pad :input,
mode: :pull,
availability: :always,
demand_unit: :bytes,
caps: :any
@impl true
def handle_init(options) do
state =
options
|> Map.from_struct()
|> Map.put(:stage, :init)
{:ok, state}
end
@impl true
def handle_prepared_to_playing(_context, state) do
demand = {:input, @init_stage_size}
{{:ok, demand: demand}, state}
end
@impl true
def handle_demand(:output, size, :bytes, _context, %{stage: :data} = state) do
{{:ok, demand: {:input, size}}, state}
end
def handle_demand(
:output,
buffers_count,
:buffers,
_context,
%{stage: :data, frames_per_buffer: frames, caps: caps} = state
) do
demand_size = Caps.frames_to_bytes(frames, caps) * buffers_count
{{:ok, demand: {:input, demand_size}}, state}
end
def handle_demand(:output, _size, _unit, _context, state) do
{:ok, state}
end
@impl true
def handle_process(:input, buffer, _context, %{stage: :data} = state) do
{{:ok, buffer: {:output, buffer}, redemand: :output}, state}
end
def handle_process(
:input,
%Buffer{payload: payload} = _buffer,
_context,
%{stage: :init} = state
) do
<<
"RIFF",
_file_size::32-little,
"WAVE",
"fmt ",
format_chunk_size::32-little,
format::16-little
>> = payload
check_format(format, format_chunk_size)
demand = {:input, @format_stage_size}
state = %{state | stage: :format}
{{:ok, demand: demand}, state}
end
def handle_process(
:input,
%Buffer{payload: payload} = _buffer,
_context,
%{stage: :format} = state
) do
<<
channels::16-little,
sample_rate::32-little,
_data_transmission_rate::32,
_block_alignment_unit::16,
bits_per_sample::16-little,
next_chunk_type::4-bytes,
next_chunk_size::32-little
>> = payload
caps = %Caps{
channels: channels,
sample_rate: sample_rate,
format: Format.from_tuple({:s, bits_per_sample, :le})
}
state = Map.merge(state, %{caps: caps})
case next_chunk_type do
"fact" ->
state = %{state | stage: :fact}
demand = {:input, @fact_stage_base_size + next_chunk_size}
{{:ok, caps: {:output, caps}, demand: demand}, state}
"data" ->
state = %{state | stage: :data}
{{:ok, caps: {:output, caps}, redemand: :output}, state}
end
end
def handle_process(
:input,
%Buffer{payload: payload} = _buffer,
_context,
%{stage: :fact} = state
) do
fact_chunk_size = 8 * (byte_size(payload) - @fact_stage_base_size)
<<
_fact_chunk::size(fact_chunk_size),
"data",
_data_length::32
>> = payload
state = %{state | stage: :data}
{{:ok, redemand: :output}, state}
end
defp check_format(format, format_chunk_size) do
cond do
format != 1 ->
raise(
RuntimeError,
"formats different than PCM are not supported; expected 1, given #{format}; format chunk size: #{format_chunk_size}"
)
format_chunk_size != @pcm_format_size ->
raise(
RuntimeError,
"format chunk size different than supported; expected 16, given #{format_chunk_size}"
)
true ->
:ok
end
end
end
|
lib/membrane_wav/parser.ex
| 0.892484
| 0.837021
|
parser.ex
|
starcoder
|
defmodule Rambla.Http do
@moduledoc """
Default connection implementation for 🕸️ HTTP.
It expects a message to be a map, containing the following fields:
`:method`, `:path`, `:query`, `:body` _and_ the optional `:type`
that otherwise would be inferred from the body type.
For instance, this call would send a POST request with a JSON specified as body.
```elixir
Rambla.publish(
Rambla.Http,
%{method: :post, body: %{message: "I ❤ HTTP"}}
}
```
If the second argument `message` is `binary()` it’s treated as an URL _and_
`:get` is implied.
---
List of all possible options might be found in
[`:httpc.request/4`](http://erlang.org/doc/man/httpc.html#request-4), names are preserved.
"""
@behaviour Rambla.Connection
@conn_params ~w|host port|a
@impl Rambla.Connection
def connect(params) when is_list(params) do
if is_nil(params[:host]),
do:
raise(Rambla.Exceptions.Connection,
value: params,
expected: "🕸️ configuration with :host key"
)
[defaults, opts] =
params
|> Keyword.split(@conn_params)
|> Tuple.to_list()
|> Enum.map(&Map.new/1)
%Rambla.Connection{
conn: %Rambla.Connection.Config{conn: params[:host], opts: opts, defaults: defaults},
conn_type: __MODULE__,
conn_pid: self(),
conn_params: params,
errors: []
}
end
@impl Rambla.Connection
def publish(%Rambla.Connection.Config{} = conn, message) when is_binary(message),
do:
publish(conn, %{
method: :get,
host: "",
port: "",
path: message
})
@impl Rambla.Connection
def publish(%Rambla.Connection.Config{} = conn, message) when is_binary(message),
do: publish(conn, Jason.decode!(message))
@impl Rambla.Connection
def publish(%Rambla.Connection.Config{} = conn, message) when is_list(message),
do: publish(conn, Map.new(message))
@impl Rambla.Connection
def publish(%Rambla.Connection.Config{opts: opts, defaults: defaults}, message)
when is_map(opts) and is_map(message) do
{method, message} = Map.pop(message, :method, :get)
{host, message} = Map.pop(message, :host, Map.get(defaults, :host))
{port, message} = Map.pop(message, :port, Map.get(defaults, :port))
{headers, message} = Map.pop(message, :headers, Map.get(defaults, :headers, []))
{path, message} = Map.pop(message, :path, Map.get(opts, :path, Map.get(defaults, :path, "")))
{http_options, message} = Map.pop(message, :http_options, Map.get(opts, :http_options, []))
{options, message} = Map.pop(message, :options, Map.get(opts, :options, []))
{%{} = query, message} = Map.pop(message, :query, Map.get(opts, :query, %{}))
{body, _message} = Map.pop(message, :body, Map.get(opts, :body, %{}))
host_port =
[host, port]
|> Enum.reject(&(to_string(&1) == ""))
|> Enum.join(":")
path_query =
[path, Plug.Conn.Query.encode(query)]
|> Enum.map(&String.trim(&1, "/"))
|> Enum.reject(&(&1 == ""))
|> Enum.join("?")
url =
[host_port, path_query]
|> Enum.reject(&(&1 == ""))
|> Enum.join("/")
headers = for {k, v} <- headers, do: {:erlang.binary_to_list(k), :erlang.binary_to_list(v)}
request(method, url, headers, body, http_options, options)
end
@typep method :: :head | :get | :put | :post | :trace | :options | :delete | :patch
@typep url :: binary()
@typep header :: {binary(), binary()}
@typep headers :: [header()]
@typep body :: charlist() | binary()
@typep option ::
{:sync, boolean()}
| {:stream, any()}
| {:body_format, any()}
| {:full_result, boolean()}
| {:headers_as_is, boolean()}
| {:socket_opts, any()}
| {:receiver, any()}
| {:ipv6_host_with_brackets, boolean()}
@typep options :: [option()]
@typep http_option ::
{:timeout, timeout()}
| {:connect_timeout, timeout()}
| {:ssl, any()}
| {:essl, any()}
| {:autoredirect, boolean()}
| {:proxy_auth, {charlist(), charlist()}}
| {:version, charlist()}
| {:relaxed, boolean()}
@typep http_options :: [http_option()]
@typep content_type :: charlist()
@typep status_line :: {charlist(), integer(), charlist()}
@spec request(
method :: method(),
url :: url(),
headers :: headers(),
body :: body(),
http_options :: http_options(),
options :: options(),
content_type :: content_type()
) :: {:ok, {status_line(), list()}} | {:error, any()}
defp request(
method,
url,
headers,
body \\ "",
http_options \\ [],
options \\ [],
content_type \\ 'application/json'
)
Enum.each([:post, :put], fn m ->
defp request(unquote(m), url, headers, body, http_options, options, content_type) do
:httpc.request(
unquote(m),
{:erlang.binary_to_list(url), headers, content_type,
body |> Jason.encode!() |> :erlang.binary_to_list()},
http_options,
options
)
end
end)
Enum.each([:get, :head, :options, :delete], fn m ->
defp request(unquote(m), url, headers, _body, http_options, options, _content_type),
do:
:httpc.request(unquote(m), {:erlang.binary_to_list(url), headers}, http_options, options)
end)
end
|
lib/rambla/connections/http.ex
| 0.907563
| 0.652864
|
http.ex
|
starcoder
|
defmodule Griffin.Model.GraphQL do
@moduledoc """
Module for converting models to graphql.
"""
@doc """
Converts a list of models into a plug that can be used to serve GraphQL
with GraphiQL.
"""
def plugify(conn, schema) do
opts = Absinthe.Plug.init(schema: schema)
Absinthe.Plug.call(conn, opts)
end
@doc """
Runs a GraphQL query string against a given schema module
"""
def run(query, schema) do
Absinthe.run(query, schema)
end
@doc """
Converts a list of model modules into a single Absinthe schema module that
can be sent to `Absinthe.run`.
"""
def schemaify(models) do
id = UUID.uuid4(:hex)
model = List.first(models)
code = """
defmodule Griffin.Model.Runtime.Types#{id} do
use Absinthe.Schema.Notation
#{model_to_types(model)}
end
defmodule Griffin.Model.Runtime.Schema#{id} do
use Absinthe.Schema
import_types Griffin.Model.Runtime.Types#{id}
query do
#{model_to_field(model, :read)}
#{model_to_field(model, :list)}
end
mutation do
#{model_to_field(model, :create)}
#{model_to_field(model, :update)}
#{model_to_field(model, :delete)}
end
end
"""
Code.compile_string(code)
Module.concat(["Griffin.Model.Runtime.Schema#{id}"])
end
defp model_to_field(model, crud_op) do
{singular, plural} = Griffin.Model.Module.namespaces(model)
field_name =
case crud_op do
:create -> "create_#{singular}"
:read -> singular
:update -> "update_#{singular}"
:delete -> "delete_#{singular}"
:list -> plural
end
type_name =
if crud_op == :list do
"list_of(:#{singular})"
else
":#{singular}"
end
"""
field :#{field_name}, #{type_name} do
#{model_to_args(model, crud_op)}
resolve fn (args, _) ->
model = #{inspect(model)}
{:ok, Griffin.Model.Module.resolve(model, :#{crud_op}, args).res}
end
end
"""
end
defp model_to_args(model, crud_op) do
{name, _} = Griffin.Model.Module.namespaces(model)
dsl = Griffin.Model.DSL.for_crud_op(model.fields, crud_op)
fields =
for {attr, [type | _]} <- dsl do
type =
case type do
:map -> "#{name}_input_#{attr}"
:int -> :integer
_ -> type
end
"arg :#{attr}, :#{type}"
end
Enum.join(fields, "\n")
end
defp model_to_types(model) do
{singular, _} = Griffin.Model.Module.namespaces(model)
dsl = Griffin.Model.DSL.for_crud_op(model.fields, :read)
dsl_to_objects(singular, dsl) <> dsl_to_objects(singular, dsl, true)
end
defp dsl_to_objects(name, dsl, is_input \\ false) do
prefix = if is_input, do: "input_object", else: "object"
suffix =
if is_input and not already_namespaced(name) do
"#{name}_input"
else
name
end
"""
#{prefix} :#{suffix} do
#{to_fields(name, dsl, is_input)}
end
#{extract_map_objs(name, dsl, is_input)}
"""
end
defp already_namespaced(name) do
String.contains?(to_string(name), "_input")
end
defp object_name(name, attr, is_input) do
if is_input and not already_namespaced(name) do
"#{name}_input_#{attr}"
else
"#{name}_#{attr}"
end
end
defp to_fields(name, dsl, is_input) do
fields =
for {attr, [type | _]} <- dsl do
type =
case type do
:map -> object_name(name, attr, is_input)
:int -> :integer
_ -> type
end
"field :#{attr}, :#{type}"
end
Enum.join(fields, "\n ")
end
defp extract_map_objs(name, dsl, is_input) do
maps_dsl = Enum.filter(dsl, fn {_, [type | _]} -> type == :map end)
for {attr, [_ | rules]} <- maps_dsl do
{_, dsl} =
rules
|> Enum.filter(fn {rule_name, _} -> rule_name == :of end)
|> List.first()
dsl_to_objects(object_name(name, attr, is_input), dsl, is_input)
end
end
end
|
lib/griffin/model/graphql.ex
| 0.692018
| 0.407363
|
graphql.ex
|
starcoder
|
defmodule Galaxy.Host do
@moduledoc """
This topologying strategy relies on Erlang's built-in distribution protocol by
using a `.hosts.erlang` file (as used by the `:net_adm` module).
Please see [the net_adm docs](http://erlang.org/doc/man/net_adm.html) for more details.
In short, the following is the gist of how it works:
> File `.hosts.erlang` consists of a number of host names written as Erlang terms. It is looked for in the current work
> directory, the user's home directory, and $OTP_ROOT (the root directory of Erlang/OTP), in that order.
This looks a bit like the following in practice:
```erlang
'super.eua.ericsson.se'.
'renat.eua.ericsson.se'.
'grouse.eua.ericsson.se'.
'gauffin1.eua.ericsson.se'.
```
"""
use GenServer
require Logger
@default_initial_delay 0
@default_polling_interval 5000
def start_link(options) do
{sup_opts, opts} = Keyword.split(options, [:name])
GenServer.start_link(__MODULE__, opts, sup_opts)
end
@impl true
def init(options) do
case :net_adm.host_file() do
{:error, _} ->
:ignore
_ ->
unless topology = options[:topology] do
raise ArgumentError, "expected :topology option to be given"
end
initial_delay = Keyword.get(options, :initial_delay, @default_initial_delay)
polling_interval = Keyword.get(options, :polling_interval, @default_polling_interval)
state = %{
topology: topology,
polling_interval: polling_interval
}
Process.send_after(self(), :poll, initial_delay)
{:ok, state}
end
end
@impl true
def handle_info(:poll, state) do
knowns_hosts = state.topology.members()
registered_hosts = :net_adm.world()
unconnected_hosts = registered_hosts -- knowns_hosts
{_, bad_nodes} = state.topology.connect_nodes(unconnected_hosts)
Enum.each(bad_nodes, &Logger.debug(["Host fail to connect ", &1 |> to_string(), " node"]))
Process.send_after(self(), :poll, state.polling_interval)
{:noreply, state}
end
end
|
lib/galaxy/host.ex
| 0.806738
| 0.701777
|
host.ex
|
starcoder
|
defmodule FloUI.Tab do
alias FloUI.Util.FontMetricsHelper
@moduledoc ~S"""
## Usage in SnapFramework
Tab should be passed into the Tabs module as follows.
``` elixir
<%= graph font_size: 20 %>
<%= component FloUI.Tabs, {@active_tab, @tabs}, id: :tabs do %>
<%= component FloUI.Grid, %{
start_xy: {0, 0},
max_xy: {@module.get_tabs_width(@tabs), 40}
} do %>
<%= for {{label, cmp}, i} <- Enum.with_index(@tabs) do %>
<%= component FloUI.Tab,
{label, cmp},
selected?: if(i == 0, do: true, else: false),
id: :"#{label}",
width: @module.get_tab_width(label),
height: 40
%>
<% end %>
<% end %>
<% end %>
```
"""
use SnapFramework.Component,
name: :tab,
template: "lib/tabs/tab.eex",
controller: FloUI.TabController,
assigns: [
label: nil,
cmp: nil,
id: nil,
selected?: false,
hovered?: false
],
opts: []
defcomponent(:tab, :tuple)
watch([:hovered?])
use_effect([assigns: [selected?: :any]],
run: [:on_selected_change]
)
def setup(%{assigns: %{data: {label, cmp}, hovered?: hovered, opts: opts}} = scene) do
# request_input(scene, [:cursor_pos])
if opts[:selected?], do: send_parent(scene, {:tab_pid, self()})
scene
|> assign(
label: label,
width: FontMetricsHelper.get_text_width(label, 20),
cmp: cmp,
id: opts[:id] || "",
selected?: opts[:selected?] || false,
hovered?: hovered
)
end
def bounds({label, _cmp}, opts) do
{0.0, 0.0, FontMetricsHelper.get_text_width(label, 20), 40}
end
def process_input({:cursor_pos, _}, :bg, scene) do
capture_input(scene, [:cursor_pos])
{:noreply, assign(scene, hovered?: true)}
end
def process_input({:cursor_pos, _}, _, scene) do
release_input(scene)
{:noreply, assign(scene, hovered?: false)}
end
def process_input({:cursor_button, {:btn_left, 1, _, _}}, _, scene) do
send_parent_event(scene, {:select_tab, scene.assigns.cmp})
{:noreply, assign(scene, selected?: true)}
end
def process_input({:cursor_button, {:btn_left, 0, _, _}}, _, scene) do
{:noreply, scene}
end
def process_call({:put, value}, _, scene) do
{:reply, :ok, assign(scene, selected?: value)}
end
def process_call(_, _, scene) do
{:noreply, scene}
end
end
|
lib/tabs/tab.ex
| 0.714827
| 0.581778
|
tab.ex
|
starcoder
|
defmodule Eval do
@moduledoc """
Contains the evaluation functions used with lisir.
"""
@doc """
Evaluates the given tree in an environment. Returns a 2 element tuple,
the first element is the result, the second is the new environment.
"""
def eval([:+ | l], env) do
res = get_bindings(l, env) |> Enum.reduce(0, fn(x, acc) -> x + acc end)
{res, env}
end
def eval([:- | l], env) do
[h | t] = get_bindings(l, env)
{Enum.reduce(t, h, fn(x, acc) -> acc - x end), env}
end
def eval([:* | l], env) do
res = get_bindings(l, env) |> Enum.reduce(1, fn(x, acc) -> x * acc end)
{res, env}
end
def eval([:/ | l], env) do
[h | t] = get_bindings(l, env)
{Enum.reduce(t, h, fn(x, acc) -> acc / x end), env}
end
def eval([:= | l], env) do
[h | t] = get_bindings(l, env)
{Enum.all?(t, fn(x) -> x === h end), env}
end
def eval([:> | l], env) do
[h | t] = get_bindings(l, env)
Enum.reduce(t, h, fn(x, last) ->
if last > x, do: x, else: throw({:gt, false})
end)
{true, env}
catch
{:gt, false} -> {false, env}
end
def eval([:>= | l], env) do
[h | t] = get_bindings(l, env)
Enum.reduce(t, h, fn(x, last) ->
if last >= x, do: x, else: throw({:gt, false})
end)
{true, env}
catch
{:gt, false} -> {false, env}
end
def eval([:< | l], env) do
[h | t] = get_bindings(l, env)
Enum.reduce(t, h, fn(x, last) ->
if last < x, do: x, else: throw({:gt, false})
end)
{true, env}
catch
{:gt, false} -> {false, env}
end
def eval([:<= | l], env) do
[h | t] = get_bindings(l, env)
Enum.reduce(t, h, fn(x, last) ->
if last <= x, do: x, else: throw({:gt, false})
end)
{true, env}
catch
{:gt, false} -> {false, env}
end
def eval([:quote, exp], env), do: {exp, env}
def eval([:define, var, exp], env) do
{r, _} = eval(exp, env)
{nil, env_put(env, var, r)}
end
def eval([:set!, var, exp], env)do
env_get!(env, var)
{r, _} = eval(exp, env)
{nil, env_put(env, var, r)}
end
def eval([:if, test, ts, fs], env) do
case eval(test, env) do
{true, _} -> eval(ts, env)
{false, _} -> eval(fs, env)
end
end
# Creates an elixir anonymous function that will evaluate `exps`. A new
# environment is created with the existing `env` as a parent.
def eval([:lambda, args, exps], env) do
{fn(params) -> eval(exps, new_env(args, params, env)) end, env}
end
def eval([:begin, exp], env) do
eval(exp, env)
end
def eval([:begin, exp | rest], env) do
{_, e} = eval(exp, env)
eval([:begin | rest], e)
end
def eval([], _) do
raise "expected a procedure"
end
# (proc exp) - lambdas basically
def eval(exps, env) when is_list(exps) do
[{fun, _} | params] = Enum.map(exps, fn(exp) -> eval(exp, env) end)
params = Enum.map(params, fn({p,_}) -> p end)
if is_function(fun) do
{res, _} = fun.(params)
{res, env}
else
raise %s/"#{fun}" is not a procedure/
end
end
# variable reference
def eval(a, env) when is_atom(a), do: {env_get!(env, a), env}
# constant
def eval(other, env), do: {other, env}
# Replace variables with their repective values
defp get_bindings(l, e) do
Enum.reduce(l, [], fn
x, acc when is_atom(x) -> [env_get!(e, x) | acc]
x, acc -> [x | acc]
end)
|> Enum.reverse
end
# Create a new environment, used on lambdas, an optional parent can be given
# as an argument.
defp new_env(keys, vals, parent) when is_list(keys) do
kl = length(keys)
vl = length(vals)
if kl === vl do
{Enum.zip(keys, vals), parent}
else
raise "expected #{kl} arguments, got #{vl}"
end
end
defp new_env(key, val, parent) when is_atom(key) do
vl = length(val)
if vl === 1 do
{[{key, hd(val)}], parent}
else
raise "expected 1 argument, got #{vl}"
end
end
# Get the value of key `k` in the environment, if the nothing is found
# an exception is raised.
defp env_get!({e, p}, k) do
case e[k] do
nil ->
case p do
[] -> raise %s/"#{k}" undefined/
_ -> env_get!(p, k)
end
val ->
val
end
end
# Create a copy of environment `e` with a new pair `k: v`.
defp env_put({e, p}, k, v) do
{Keyword.put(e, k, v), p}
end
end
|
lib/eval.ex
| 0.642769
| 0.60437
|
eval.ex
|
starcoder
|
defmodule Ymlr.Encoder do
@moduledoc """
Encodes data into YAML strings.
"""
# credo:disable-for-this-file Credo.Check.Refactor.CyclomaticComplexity
@type data :: map() | [data] | atom() | binary() | number()
@quote_when_first [
"!", # tag
"&", # anchor
"*", # alias
"{", "}", # flow mapping
"[", "]", # flow sequence
",", # flow collection entry separator
"#", # comment
"|", ">", # block scalar
"@", "`", # reserved characters
"\"", "'", # double and single quotes
]
@quote_when_last [
":", # colon
]
@doc """
Encodes the given data as YAML string. Raises if it cannot be encoded.
## Examples
iex> Ymlr.Encoder.to_s!(%{})
"{}"
iex> Ymlr.Encoder.to_s!(%{a: 1, b: 2})
"a: 1\\nb: 2"
iex> Ymlr.Encoder.to_s!(%{"a" => "a", "b" => :b, "c" => "true", "d" => "100"})
"a: a\\nb: b\\nc: 'true'\\nd: '100'"
iex> Ymlr.Encoder.to_s!({"a", "b"})
** (ArgumentError) The given data {\"a\", \"b\"} cannot be converted to YAML.
"""
@spec to_s!(data) :: binary()
def to_s!(data) do
data
|> encode_as_io_list()
|> IO.iodata_to_binary()
end
@doc """
Encodes the given data as YAML string.
## Examples
iex> Ymlr.Encoder.to_s(%{a: 1, b: 2})
{:ok, "a: 1\\nb: 2"}
iex> Ymlr.Encoder.to_s({"a", "b"})
{:error, "The given data {\\"a\\", \\"b\\"} cannot be converted to YAML."}
"""
@spec to_s(data) :: {:ok, binary()} | {:error, binary()}
def to_s(data) do
yml = to_s!(data)
{:ok, yml}
rescue
e in ArgumentError -> {:error, e.message}
end
defp encode_as_io_list(data, level \\ 0)
defp encode_as_io_list(data, _level) when data == %{} do
"{}"
end
defp encode_as_io_list(%Date{} = data, _), do: Calendar.strftime(data, "%Y-%m-%d")
defp encode_as_io_list(%DateTime{} = data, _) do
data |> DateTime.shift_zone!("Etc/UTC") |> Calendar.strftime("%Y-%m-%d %H:%M:%S.000000000 Z")
end
defp encode_as_io_list(data, level) when is_map(data) do
indentation = indent(level)
data
|> Enum.map(fn
{key, nil} -> "#{key}:"
{key, value} when value == [] -> "#{key}: []"
{key, value} when value == %{} -> "#{key}: {}"
{key, value} when is_map(value) -> ["#{key}:" | [indentation | [" " | encode_as_io_list(value, level + 1)]]]
{key, value} when is_list(value) -> ["#{key}:" | [indentation | [" " | encode_as_io_list(value, level + 1)]]]
{key, value} -> ["#{key}: " | encode_as_io_list(value, level + 1)]
end)
|> Enum.intersperse(indentation)
end
defp encode_as_io_list(data, level) when is_list(data) do
indentation = indent(level)
data
|> Enum.map(fn
nil -> "-"
"" -> ~s(- "")
value -> ["- " | encode_as_io_list(value, level + 1)]
end)
|> Enum.intersperse(indentation)
end
defp encode_as_io_list(data, _) when is_atom(data), do: Atom.to_string(data)
defp encode_as_io_list(data, level) when is_binary(data) do
cond do
data == "" -> ~S('')
data == "null" -> ~S('null')
data == "yes" -> ~S('yes')
data == "no" -> ~S('no')
data == "true" -> ~S('true')
data == "false" -> ~S('false')
data == "True" -> ~S('True')
data == "False" -> ~S('False')
String.contains?(data, "\n") -> multiline(data, level)
String.at(data, 0) in @quote_when_first -> with_quotes(data)
String.at(data, -1) in @quote_when_last -> with_quotes(data)
String.starts_with?(data, "- ") -> with_quotes(data)
String.starts_with?(data, ": ") -> with_quotes(data)
String.starts_with?(data, "? ") -> with_quotes(data)
String.contains?(data, " #") -> with_quotes(data)
String.contains?(data, ": ") -> with_quotes(data)
String.starts_with?(data, "0b") -> with_quotes(data)
String.starts_with?(data, "0o") -> with_quotes(data)
String.starts_with?(data, "0x") -> with_quotes(data)
is_numeric(data) -> with_quotes(data)
true -> data
end
end
defp encode_as_io_list(data, _) when is_number(data), do: "#{data}"
defp encode_as_io_list(data, _), do: raise(ArgumentError, message: "The given data #{inspect(data)} cannot be converted to YAML.")
defp is_numeric(string) do
case Float.parse(string) do
{_, ""} -> true
_ -> false
end
rescue
_ -> false
end
defp with_quotes(data) do
if String.contains?(data, "'") do
~s("#{escape(data)}")
else
~s('#{data}')
end
end
defp escape(data) do
data |> String.replace("\\", "\\\\") |> String.replace(~s("), ~s(\\"))
end
# see https://yaml-multiline.info/
defp multiline(data, level) do
indentation = indent(level)
block = data |> String.trim_trailing("\n") |> String.replace("\n", IO.iodata_to_binary(indentation))
[block_chomping_indicator(data) | [indentation | block]]
end
defp block_chomping_indicator(data) do
if String.ends_with?(data, "\n"), do: "|", else: "|-"
end
defp indent(level) do
["\n" | List.duplicate(" ", level)]
end
end
|
lib/ymlr/encoder.ex
| 0.810854
| 0.487307
|
encoder.ex
|
starcoder
|
defmodule FuzzDist.Telemetry do
@moduledoc """
Telemetry integration.
Match `:ok = :telemetry/all` as telemetry is expected as part of test.
`FuzzDist` executes the following events:
* `[:fuzz_dist, :beam]` - Executed on receipt of BEAM monitored message/signal.
#### Measurements
* `%{}`
#### Metadata:
* `:event` - The BEAM event
* `[:fuzz_dist, :client]` - Executed on connection of Jepsen client ws connection
#### Measurements
* `%{}`
#### Metadata:
* `:antidote_conn` - AntidoteDB connection that client will use for all transactions
* `[:fuzz_dist, :g_set_read, :start]` - Executed on receipt of Jepsen :read operation.
#### Measurements
* `:system_time` - The system time
#### Metadata:
* `%{}`
* `[:fuzz_dist, :g_set_read, :stop]` - Executed on completion of read before return to Jepsen.
#### Measurements
* `:duration` - Duration of the read.
#### Metadata:
* :value - Value returned by read.
* `[:fuzz_dist, :g_set_add, :start]` - Executed on receipt of Jepsen :add operation.
#### Measurements
* `:system_time` - The system time
#### Metadata:
* `value`: - The value to be added
* `[:fuzz_dist, :g_set_add, :stop]` - Executed on completion of add before return to Jepsen.
#### Measurements
* `:duration` - Duration of the read.
#### Metadata:
* `%{}`
* `[:fuzz_dist, :setup_primary, :start]` - Executed on receipt of Jepsen :setup_primary operation.
#### Measurements
* `:system_time` - The system time
#### Metadata:
* `nodes` - List of (long) node names
* `[:fuzz_dist, :setup_primary, :stop]` - Executed on completion of `setup_primary/1` before return to Jepsen.
#### Measurements
* `:duration` - Duration of the cluster configuration
#### Metadata:
* `%{}`
TODO: add Prometheus collector.
"""
use GenServer
require Logger
def start_link(opts) do
{:ok, _pid} = GenServer.start_link(__MODULE__, opts, name: __MODULE__)
end
@impl true
def init(_args) do
# blocking, crash, intentional in init/1.
:ok = :net_kernel.monitor_nodes(true)
monitor_ref = :erlang.monitor(:time_offset, :clock_service)
:ok =
:telemetry.attach_many(
:fuzz_dist,
[
[:fuzz_dist, :beam],
[:fuzz_dist, :g_set_add, :start],
[:fuzz_dist, :g_set_add, :stop],
[:fuzz_dist, :g_set_read, :start],
[:fuzz_dist, :g_set_read, :stop],
[:fuzz_dist, :setup_primary, :start],
[:fuzz_dist, :setup_primary, :stop]
],
&FuzzDist.Telemetry.log_handler/4,
nil
)
{:ok, %{monitor_ref: monitor_ref}}
end
@impl true
def handle_info({node_state, _node} = message, state)
when node_state == :nodeup or node_state == :nodedown do
:ok = event(:beam, %{}, %{event: message})
{:noreply, state}
end
@impl true
def handle_info({'CHANGE', monitor_ref, _type, _item, _new_time_offset} = message, state)
when monitor_ref == state.monitor_ref do
:ok = event(:beam, %{}, %{event: message})
{:noreply, state}
end
@doc false
# Used for reporting generic events
def event(event, measurements, meta) do
:ok = :telemetry.execute([:fuzz_dist, event], measurements, meta)
end
@doc false
# emits a `start` telemetry event and returns the the start time
def start(event, meta \\ %{}, extra_measurements \\ %{}) do
start_time = System.monotonic_time()
:ok =
:telemetry.execute(
[:fuzz_dist, event, :start],
Map.merge(extra_measurements, %{system_time: System.system_time()}),
meta
)
start_time
end
@doc false
# Emits a stop event.
def stop(event, start_time, meta \\ %{}, extra_measurements \\ %{}) do
end_time = System.monotonic_time()
measurements = Map.merge(extra_measurements, %{duration: end_time - start_time})
:ok =
:telemetry.execute(
[:fuzz_dist, event, :stop],
measurements,
meta
)
end
@doc false
def exception(event, start_time, kind, reason, stack, meta \\ %{}, extra_measurements \\ %{}) do
end_time = System.monotonic_time()
measurements = Map.merge(extra_measurements, %{duration: end_time - start_time})
meta =
meta
|> Map.put(:kind, kind)
|> Map.put(:error, reason)
|> Map.put(:stacktrace, stack)
:ok = :telemetry.execute([:fuzz_dist, event, :exception], measurements, meta)
end
def log_handler(event, measurements, meta, _config) do
Logger.debug("Telemetry: [#{inspect(event)}, #{inspect(measurements)}, #{inspect(meta)}]")
end
end
|
beam.fuzz_dist/lib/fuzz_dist/telemetry.ex
| 0.735926
| 0.694704
|
telemetry.ex
|
starcoder
|
defmodule Distributed.Scaler.Node do
@moduledoc """
The functions in `Distributed.Scaler.Node` module helps to scale projects by processing every event on the next node, in order.
**Note**: Since this module is only a wrapper for `Node` module, there is no need to write a detailed documentation for this module.
Please check documentation of the `Node` module; you can basically think that the functions of the module run on the next node
without specifying the node, and you will be replied with the result of the process.
You can use this module mostly for read operations, loop actions or background tasks. It is suitable when you do not need replication
of events.
"""
use GenServer
@doc false
def start_link() do
GenServer.start_link(__MODULE__, [], name: __MODULE__.process_id())
end
@doc false
def init(_opts \\ []) do
{:ok, %{}}
end
@doc false
def process_id() do
Distributed.Scaler.Node
end
@doc """
Returns the PID of a new process started by the application of `fun` on the next node.
See `Node.spawn/2` and `Node.spawn/3`.
"""
@spec spawn(fun :: (() -> any), opts :: [any]) :: pid | {pid, reference}
def spawn(fun, opts \\ [])
when is_function(fun)
do
spawn_opts = Keyword.get(opts, :spawn_opts, [])
Node.spawn(Distributed.Node.Iterator.next(), fun, spawn_opts)
end
@doc """
Returns the PID of a new process started by the application of `module.fun(args)` on the next node.
See `Node.spawn/4` and `Node.spawn/5`.
"""
@spec spawn(module :: module, fun :: atom, args :: [any], opts :: [any]) :: pid | {pid, reference}
def spawn(module, fun, args, opts \\ [])
when is_atom(module)
when is_atom(fun)
do
spawn_opts = Keyword.get(opts, :spawn_opts, [])
Node.spawn(Distributed.Node.Iterator.next(), module, fun, args, spawn_opts)
end
@doc """
Returns the PID of a new linked process started by the application of `fun` on the next node.
See `Node.spawn_link/2`.
"""
@spec spawn_link((() -> any)) :: pid
def spawn_link(fun)
when is_function(fun)
do
Node.spawn_link(Distributed.Node.Iterator.next(), fun)
end
@doc """
Returns the PID of a new linked process started by the application of `module.function(args)` on the next node.
See `Node.spawn_link/4`.
"""
@spec spawn_link(module :: module, fun :: atom, args :: [any]) :: pid
def spawn_link(module, fun, args)
when is_atom(module)
when is_atom(fun)
do
Node.spawn_link(Distributed.Node.Iterator.next(), module, fun, args)
end
end
|
lib/distributed/scaler/node.ex
| 0.761982
| 0.611034
|
node.ex
|
starcoder
|
defmodule Jeeves.Named do
@moduledoc """
Implement a singleton (global) named service.
### Usage
To create the service:
* Create a module that implements the API you want. This API will be
expressed as a set of public functions. Each function will automatically
receive the current state in a variable (by default named `state`). There is
not need to declare this as a
parameter.[<small><small>[why?]</small></small>](background.html#why-magic-state).
If a function wants to change the state, it must end with a call to the
`Jeeves.Common.update_state/2` function (which will have been
imported into your module automatically).
For this example, we'll call the module `NamedService`.
* Add the line `use Jeeves.Named` to the top of this module.
To consume the service:
* Create an instance of the service with `NamedJeeves.run()`. You can pass
initial state to the service as an optional parameter. This call returns
a handle to this service instance, but you shouldn't use it.
* Call the API functions in the service.
### Example
defmodule KV do
using Jeeves.Named, state: %{}
def get(name), do: state[name]
def put(name, value) do
update_state(Map.put(state, name, value)) do
value
end
end
end
KV.run(%{ name: "Elixir" })
KV.put(:type, "language")
KV.get(:name) # => "Elixir"
KV.get(:type) # => "language"
### Options
You can pass a keyword list to `use Jeeves.Anonymous:`
* `state:` _value_
* `state_name:` _atom_
The default name for the state variable is (unimaginatively) `state`.
Use `state_name` to override this. For example, you could change the
previous example to use `store` for the state with:
defmodule KV do
using Jeeves.Named, state: %{}, state_name: :store
def get(name), do: store[name]
def put(name, value) do
update_state(Map.put(store, name, value)) do
value
end
end
end
* `service_name:` _atom_
The default name for the service is the name of the module that defines it.
Use `service_name:` to change this.
* `showcode:` _boolean_
If truthy, dump a representation of the generated code to STDOUT during
compilation.
"""
require Jeeves.Common
@doc false
defmacro __using__(opts \\ []) do
Jeeves.Common.generate_common_code(
__CALLER__.module,
__MODULE__,
opts,
service_name(opts))
end
@doc false
defmacro generate_code_callback(_) do
Jeeves.Common.generate_code(__CALLER__.module, __MODULE__)
end
@doc false
def generate_api_call(options, {call, _body}) do
quote do
def(unquote(call), do: unquote(api_body(options, call)))
end
end
@doc false
defp api_body(options, call) do
request = call_signature(call)
quote do
GenServer.call(unquote(service_name(options)), unquote(request))
end
end
@doc false
def generate_handle_call(options, {call, _body}) do
request = call_signature(call)
api_call = api_signature(options, call)
state_var = { state_name(options), [], nil }
quote do
def handle_call(unquote(request), _, unquote(state_var)) do
__MODULE__.Implementation.unquote(api_call)
|> Jeeves.Common.create_genserver_response(unquote(state_var))
end
end
end
@doc false
def generate_implementation(options, {call, body}) do
quote do
def(unquote(api_signature(options, call)), unquote(body))
end
end
# only used for pools
@doc false
def generate_delegator(_options, {_call, _body}), do: nil
# given def fred(a, b) return { :fred, a, b }
@doc false
def call_signature({ name, _, args }) do
{ :{}, [], [ name | Enum.map(args, fn a -> var!(a) end) ] }
end
# given def fred(a, b) return def fred(«state name», a, b)
@doc false
def api_signature(options, { name, context, args }) do
{ name, context, [ { state_name(options), [], nil } | args ] }
end
@doc false
def service_name(options) do
options[:service_name] || quote(do: __MODULE__)
end
@doc false
def state_name(options) do
check_state_name(options[:state_name])
end
defp check_state_name(nil), do: :state
defp check_state_name(name) when is_atom(name), do: name
defp check_state_name({name, _, _}) do
raise CompileError, description: "state_name: “#{name}” should be an atom, not a variable"
end
defp check_state_name(name) do
raise CompileError, description: "state_name: “#{inspect name}” should be an atom"
end
end
|
lib/jeeves/named.ex
| 0.814643
| 0.676693
|
named.ex
|
starcoder
|
defmodule Xtree.Algorithms do
alias Xtree
@type xtree() :: Xtree.t()
@type tree() :: xtree() | %{children: list(tree)}
@type fn_traverse() ::
(node :: xtree(), accumulator :: any() ->
{:ok, accumulator :: any()} | {:halt, accumulator :: any()})
@doc """
Builds a hash map based on `tMD` of each XTree node
"""
def build_hash_map(tree) do
{:ok, hash_map} =
dft_traverse(tree, %{}, fn %{tMD: tMD}, tMD_map ->
tMD_map =
case Map.get(tMD_map, tMD, nil) do
nil -> Map.put(tMD_map, tMD, 1)
value -> Map.put(tMD_map, tMD, value + 1)
end
{:ok, tMD_map}
end)
hash_map
end
@doc """
Builds 3 maps out of an X-Tree:
- hash_map - A map with key being the node hash and value being the node
- id_map - A map with key being the n_id of node and value being the node
- op_map - A map with key being the n_id of node and value being an operation
"""
def build_tree_maps(tree) do
{:ok, {hash_map, id_map, op_map}} =
dft_traverse(tree, {%{}, %{}, %{}}, fn %{n_id: n_id, tMD: tMD} = node,
{tMD_map, id_map, op_map} ->
tMD_map =
case Map.get(tMD_map, tMD, nil) do
nil -> Map.put(tMD_map, tMD, 1)
value -> Map.put(tMD_map, tMD, value + 1)
end
id_map = Map.put(id_map, n_id, node)
# Put into the OpMap `node.n_id` as key and `{operation, ptrNode}` as value
op_map = Map.put(op_map, n_id, nil)
{:ok, {tMD_map, id_map, op_map}}
end)
{hash_map, id_map, op_map}
end
@doc """
Depth-First pre-order Traverse
"""
@spec dft_traverse(list(tree()) | tree(), accumulator :: any(), fn_visit :: fn_traverse()) ::
{:ok, accumulator :: any()} | {:halt, accumulator :: any()}
def dft_traverse([], acc, _fn_visit) do
{:ok, acc}
end
def dft_traverse([node | nodes], acc, fn_visit) do
case dft_traverse(node, acc, fn_visit) do
{:ok, acc} -> dft_traverse(nodes, acc, fn_visit)
{:halt, acc} -> {:halt, acc}
end
end
def dft_traverse(%{children: children} = node, acc, fn_visit) do
case fn_visit.(node, acc) do
{:halt, acc} ->
{:halt, acc}
{:ok, acc} ->
dft_traverse(children, acc, fn_visit)
acc ->
# Same as {:ok, acc}
dft_traverse(children, acc, fn_visit)
end
end
def dft_traverse(_, acc, _) do
{:ok, acc}
end
@doc """
Breadth-First pre-order Traverse
"""
@spec bft_traverse(list(tree()) | tree(), accumulator :: any(), fn_visit :: fn_traverse()) ::
{:ok, accumulator :: any()} | {:halt, accumulator :: any()}
def bft_traverse([], [], acc, _fn_visit) do
{:ok, acc}
end
def bft_traverse([], children, acc, fn_visit) do
bft_traverse(children, [], acc, fn_visit)
end
def bft_traverse([%{children: node_children} = node | nodes], children, acc, fn_visit) do
case fn_visit.(node, acc) do
{:halt, acc} ->
{:halt, acc}
{:skip, acc} ->
bft_traverse(nodes, children, acc, fn_visit)
{:ok, acc} ->
bft_traverse(nodes, Enum.concat(children, node_children), acc, fn_visit)
acc ->
# Same as {:ok, acc}
bft_traverse(nodes, Enum.concat(children, node_children), acc, fn_visit)
end
end
def bft_traverse(%{children: _} = node, acc, fn_visit) do
bft_traverse([node], [], acc, fn_visit)
end
def bft_traverse(_, acc, _) do
{:ok, acc}
end
def df_post_order_traverse(_, acc, _) do
acc
end
@spec walk(node :: tree() | list(tree()), map_func :: (tree() -> any())) :: any()
def walk([], _fn_walk) do
[]
end
def walk([node | nodes], fn_walk) do
ret = walk(node, fn_walk)
[ret | walk(nodes, fn_walk)]
end
def walk(%{children: _} = node, fn_walk) do
case fn_walk.(node) do
%{children: children} = ret ->
children = walk(children, fn_walk)
Map.put(ret, :children, children)
ret ->
ret
end
end
def walk(ret, _) do
ret
end
end
|
lib/xtree/algorithms.ex
| 0.80406
| 0.510619
|
algorithms.ex
|
starcoder
|
defmodule Day3 do
@moduledoc """
--- Day 3: Toboggan Trajectory ---
Part1 -> how many trees would you encounter, while traversing the map, with (3, 1) slope.
Part2 -> What do you get if you multiply together the number of
trees encountered on each of the listed slopes?
"""
@num_of_rows 323
@row_length 31
def solve_part1 do
airport_map_list = generate_airport_map_list()
slope1 = %{x: 1, y: 3}
slope1_trees = find_num_trees(airport_map_list, slope1.x, slope1.y, slope1)
slope1_trees
end
def solve_part2 do
airport_map_list = generate_airport_map_list()
slope1 = %{x: 1, y: 1}
slope1_trees = find_num_trees(airport_map_list, slope1.x, slope1.y, slope1)
slope2 = %{x: 1, y: 3}
slope2_trees = find_num_trees(airport_map_list, slope2.x, slope2.y, slope2)
slope3 = %{x: 1, y: 5}
slope3_trees = find_num_trees(airport_map_list, slope3.x, slope3.y, slope3)
slope4 = %{x: 1, y: 7}
slope4_trees = find_num_trees(airport_map_list, slope4.x, slope4.y, slope4)
slope5 = %{x: 2, y: 1}
slope5_trees = find_num_trees(airport_map_list, slope5.x, slope5.y, slope5)
slope1_trees * slope2_trees * slope3_trees * slope4_trees * slope5_trees
end
defp generate_airport_map_list do
File.stream!("airport_map.txt")
|> Enum.map(&String.trim/1)
end
defp find_num_trees(airport_map_list, row_index, col_index, slope, num_trees \\ 0)
defp find_num_trees(_airport_map_list, row_index, _col_index, _slope, num_trees)
when row_index >= @num_of_rows, do: num_trees
defp find_num_trees(airport_map_list, row_index, col_index, slope, num_trees) do
ground_point = Enum.at(airport_map_list, row_index)
|> String.at(rem(col_index, @row_length))
case ground_point do
"#" -> find_num_trees(airport_map_list, row_index + slope.x, col_index + slope.y, slope, num_trees + 1)
"." -> find_num_trees(airport_map_list, row_index + slope.x, col_index + slope.y, slope, num_trees)
end
end
end
|
day3.ex
| 0.807385
| 0.710427
|
day3.ex
|
starcoder
|
defmodule JSONC.Parser do
@moduledoc false
import JSONC.Tokenizer
def parse!(content) when is_binary(content) do
case parse(content) do
{:ok, result} ->
result
{:error, reason} ->
raise reason
end
end
def parse(content) when is_binary(content) do
case parse_value({content, cursor: {1, 1}, token: nil}, :root) do
{:ok, result} ->
{:ok, result}
{:error, reason} ->
{:error, reason}
end
end
defp parse_value(state, context \\ :other) do
case parse_comments(state) do
{:error, reason} ->
{:error, reason}
{comments, state} when is_list(comments) ->
{current, state} = next(state)
case parse_comments(state) do
{:error, reason} ->
{:error, reason}
{new_comments, state} when is_list(new_comments) ->
comments = comments ++ new_comments
{value, state} =
case {current, state} do
{{{:delimiter, {:brace, :open}}, line, column}, state} ->
{node, state} = parse_object(state, {line, column})
case {node, state} do
{:error, reason} ->
{:error, reason}
_ ->
{{node, comments}, state}
end
{{{:delimiter, {:bracket, :open}}, line, column}, state} ->
{node, state} = parse_array(state, {line, column})
case {node, state} do
{:error, reason} ->
{:error, reason}
_ ->
{{node, comments}, state}
end
{{{:string, {subtype, value}}, line, column}, state} ->
{{%{
type: :string,
subtype: subtype,
value: value,
place: {line, column}
}, comments}, state}
{{{:number, {subtype, value}}, line, column}, state} ->
{{%{
type: :number,
subtype: subtype,
value: value,
place: {line, column}
}, comments}, state}
{{{:boolean, value}, line, column}, state} ->
{{%{type: :boolean, value: value, place: {line, column}}, comments}, state}
{{nil, line, column}, state} ->
{{%{type: nil, place: {line, column}}, comments}, state}
{:error, reason} ->
{:error, reason}
{{token, line, column}, _} ->
{:error,
"unexpected token `#{token |> inspect()}` at line #{line} column #{column}"}
{:done, _} ->
{:error, "unexpected end of input"}
end
case value do
:error ->
{:error, state}
{value, _} = node ->
case context do
:root ->
case peek(state) do
:done ->
{:ok, %{type: :root, value: value, comments: comments}}
{token, line, column} ->
{:error,
"unexpected token `#{token |> inspect()}` at line #{line} column #{column}"}
end
_ ->
{node, state}
end
end
end
end
end
defp parse_object(state, start, map \\ %{}, comments \\ [])
when is_map(map) and is_list(comments) do
case peek(state) do
{{:delimiter, {:brace, :close}}, _, _} ->
{_, state} = next(state)
case parse_comments(state) do
{new_comments, state} when is_list(new_comments) ->
comments = comments ++ new_comments
{%{type: :object, value: map, place: start, comments: comments}, state}
{:error, reason} ->
{:error, reason}
end
{{:delimiter, :comma} = token, line, column} when map == %{} ->
{:error, "unexpected token `#{token |> inspect()}` at line #{line} column #{column}"}
{{:delimiter, :comma}, _, _} ->
{_, state} = next(state)
case parse_comments(state) do
{new_comments, state} when is_list(new_comments) ->
comments = comments ++ new_comments
parse_object(state, start, map, comments)
{:error, reason} ->
{:error, reason}
end
_ ->
{current, state} = next(state)
case parse_comments(state) do
{new_comments, state} when is_list(new_comments) ->
comments = comments ++ new_comments
case current do
{{:string, {subtype, key}}, _, _} when subtype in [:single, :free] ->
case peek(state) do
{{:delimiter, :colon}, _, _} ->
{_, state} = next(state)
case parse_comments(state) do
{new_comments, state} when is_list(new_comments) ->
comments = comments ++ new_comments
case parse_value(state) do
{:error, reason} ->
{:error, reason}
{{current, value_comments}, state} ->
map = map |> Map.put(key, current)
parse_object(state, start, map, comments ++ value_comments)
end
{:error, reason} ->
{:error, reason}
end
{token, line, column} ->
{:error,
"unexpected token `#{token |> inspect()}` at line #{line} column #{column}"}
end
{token, line, column} ->
{:error,
"unexpected token `#{token |> inspect()}` at line #{line} column #{column}"}
end
{:error, reason} ->
{:error, reason}
end
end
end
defp parse_array(state, start, list \\ [], comments \\ [])
when is_list(list) and is_list(comments) do
case peek(state) do
{{:delimiter, {:bracket, :close}}, _, _} ->
{_, state} = next(state)
case parse_comments(state) do
{:error, reason} ->
{:error, reason}
{new_comments, state} when is_list(new_comments) ->
{%{type: :array, value: list, place: start, comments: comments ++ new_comments},
state}
end
# end
{{:delimiter, :comma} = token, line, column} when list == [] ->
{:error, "unexpected token `#{token |> inspect()}` at line #{line} column #{column}"}
{{:delimiter, :comma}, _, _} ->
{_, state} = next(state)
case parse_comments(state) do
{:error, reason} ->
{:error, reason}
{new_comments, state} when is_list(new_comments) ->
comments = comments ++ new_comments
parse_array(state, start, list, comments)
end
_ ->
case parse_value(state) do
{:error, reason} ->
{:error, reason}
{{current, value_comments}, state} ->
list = list ++ [current]
parse_array(state, start, list, comments ++ value_comments)
end
end
end
defp parse_comments(state, comments \\ []) when is_list(comments) do
case peek(state) do
{{:comment, {subtype, value}}, line, column} ->
{_, state} = next(state)
parse_comments(
state,
comments ++ [%{type: :comment, subtype: subtype, value: value, place: {line, column}}]
)
{:error, reason} ->
{:error, reason}
_ ->
{comments, state}
end
end
end
|
lib/parser.ex
| 0.709623
| 0.501404
|
parser.ex
|
starcoder
|
defmodule Topo.Contains do
@moduledoc false
import Topo.Intersects
alias Topo.PointRing
alias Topo.LineLine
alias Topo.LineRing
alias Topo.RingRing
@type geo_struct ::
%Geo.Point{}
| %Geo.MultiPoint{}
| %Geo.LineString{}
| %Geo.MultiLineString{}
| %Geo.Polygon{}
| %Geo.MultiPolygon{}
@spec contains?(geo_struct, geo_struct) :: boolean
def contains?(%Geo.Point{} = a, %Geo.Point{} = b), do: a == b
def contains?(%Geo.Point{} = a, %Geo.MultiPoint{} = b), do: contains_all?(a, b, Geo.Point)
def contains?(%Geo.Point{}, %Geo.LineString{}), do: false
def contains?(%Geo.Point{}, %Geo.MultiLineString{}), do: false
def contains?(%Geo.Point{}, %Geo.Polygon{}), do: false
def contains?(%Geo.Point{}, %Geo.MultiPolygon{}), do: false
def contains?(%Geo.MultiPoint{} = a, %Geo.Point{} = b), do: any_contain?(a, b, Geo.Point)
def contains?(%Geo.MultiPoint{} = a, %Geo.MultiPoint{} = b), do: contains_all?(a, b, Geo.Point)
def contains?(%Geo.MultiPoint{}, %Geo.LineString{}), do: false
def contains?(%Geo.MultiPoint{}, %Geo.MultiLineString{}), do: false
def contains?(%Geo.MultiPoint{}, %Geo.Polygon{}), do: false
def contains?(%Geo.MultiPoint{}, %Geo.MultiPolygon{}), do: false
def contains?(%Geo.LineString{} = a, %Geo.Point{} = b) do
cond do
List.first(a.coordinates) == List.last(a.coordinates) ->
intersects?(a, b)
intersects?(a, b) ->
b.coordinates != List.first(a.coordinates) && b.coordinates != List.last(a.coordinates)
true ->
false
end
end
def contains?(%Geo.LineString{} = a, %Geo.MultiPoint{} = b),
do: intersects_all?(a, b, Geo.Point) && contains_any?(a, b, Geo.Point)
def contains?(%Geo.LineString{coordinates: a}, %Geo.LineString{coordinates: b}) do
LineLine.contains?(a, b)
end
def contains?(%Geo.LineString{} = a, %Geo.MultiLineString{} = b),
do: contains_all?(a, b, Geo.LineString)
def contains?(%Geo.LineString{}, %Geo.Polygon{}), do: false
def contains?(%Geo.LineString{}, %Geo.MultiPolygon{}), do: false
def contains?(%Geo.MultiLineString{} = a, %Geo.Point{} = b),
do: any_contain?(a, b, Geo.LineString)
def contains?(%Geo.MultiLineString{} = a, %Geo.MultiPoint{} = b),
do: intersects_all?(a, b, Geo.Point) && contains_any?(a, b, Geo.Point)
def contains?(%Geo.MultiLineString{} = a, %Geo.LineString{} = b),
do: any_contain?(a, b, Geo.LineString)
def contains?(%Geo.MultiLineString{} = a, %Geo.MultiLineString{} = b),
do: contains_all?(a, b, Geo.LineString)
def contains?(%Geo.MultiLineString{}, %Geo.Polygon{}), do: false
def contains?(%Geo.MultiLineString{}, %Geo.MultiPolygon{}), do: false
def contains?(%Geo.Polygon{coordinates: [exterior | holes]}, %Geo.Point{coordinates: point}) do
PointRing.relate(exterior, point) === :interior &&
PointRing.relate_multi(holes, point) === :disjoint
end
def contains?(%Geo.Polygon{} = a, %Geo.MultiPoint{} = b) do
contains_any?(a, b, Geo.Point) && intersects_all?(a, b, Geo.Point)
end
def contains?(%Geo.Polygon{coordinates: [a_exterior | a_holes]}, %Geo.LineString{} = b) do
LineRing.contains?(a_exterior, b.coordinates) &&
LineRing.line_exterior_to_all?(a_holes, b.coordinates)
end
def contains?(%Geo.Polygon{} = a, %Geo.MultiLineString{} = b),
do: contains_all?(a, b, Geo.LineString)
def contains?(%Geo.Polygon{coordinates: [a_exterior | a_holes]}, %Geo.Polygon{
coordinates: [b_exterior | b_holes]
}) do
RingRing.contains?(a_exterior, b_exterior) && holes_contained?(a_holes, b_exterior, b_holes)
end
def contains?(%Geo.Polygon{} = a, %Geo.MultiPolygon{} = b), do: contains_all?(a, b, Geo.Polygon)
def contains?(%Geo.MultiPolygon{} = a, %Geo.Point{} = b), do: any_contain?(a, b, Geo.Polygon)
def contains?(%Geo.MultiPolygon{} = a, %Geo.MultiPoint{} = b),
do: intersects_all?(a, b, Geo.Point) && contains_any?(a, b, Geo.Point)
def contains?(%Geo.MultiPolygon{} = a, %Geo.LineString{} = b),
do: any_contain?(a, b, Geo.Polygon)
def contains?(%Geo.MultiPolygon{} = a, %Geo.MultiLineString{} = b),
do: contains_all?(a, b, Geo.LineString)
def contains?(%Geo.MultiPolygon{} = a, %Geo.Polygon{} = b), do: any_contain?(a, b, Geo.Polygon)
def contains?(%Geo.MultiPolygon{} = a, %Geo.MultiPolygon{} = b),
do: contains_all?(a, b, Geo.Polygon)
defp contains_all?(a, b, component_struct) do
Enum.all?(b.coordinates, fn b_comp ->
contains?(a, struct(component_struct, %{coordinates: b_comp}))
end)
end
defp contains_any?(a, b, component_struct) do
Enum.any?(b.coordinates, fn b_comp ->
contains?(a, struct(component_struct, %{coordinates: b_comp}))
end)
end
defp any_contain?(a, b, component_struct) do
Enum.any?(a.coordinates, fn a_comp ->
contains?(struct(component_struct, %{coordinates: a_comp}), b)
end)
end
defp intersects_all?(a, b, component_struct) do
Enum.all?(b.coordinates, fn b_comp ->
intersects?(a, struct(component_struct, %{coordinates: b_comp}))
end)
end
defp holes_contained?(a_holes, b_exterior, []) do
!Enum.any?(a_holes, fn hole ->
RingRing.overlaps?(hole, b_exterior) || RingRing.contains?(hole, b_exterior)
end)
end
defp holes_contained?(a_holes, b_exterior, b_holes) do
holes_intersecting_b = Enum.filter(a_holes, &RingRing.overlaps?(&1, b_exterior))
Enum.all?(holes_intersecting_b, fn a_hole ->
RingRing.contains?(b_exterior, a_hole) &&
Enum.any?(b_holes, &RingRing.contains?(&1, a_hole))
end)
end
end
|
lib/topo/contains.ex
| 0.777046
| 0.40645
|
contains.ex
|
starcoder
|
defmodule ExWire.Adapter.UDP do
@moduledoc """
Starts a UDP server to handle incoming and outgoing
peer to peer messages according to RLPx.
"""
use GenServer
@doc """
When starting a UDP server, we'll store a network to use for all
message handling.
"""
@spec start_link(atom(), {module(), term()}, integer()) :: GenServer.on_start()
def start_link(name, {module, args}, port) do
GenServer.start_link(
__MODULE__,
%{network: module, network_args: args, port: port},
name: name
)
end
@doc """
Initialize by opening up a `gen_udp` server on a given port.
"""
@impl true
def init(state = %{port: port}) do
{:ok, socket} = :gen_udp.open(port, [:binary])
{:ok, Map.put(state, :socket, socket)}
end
@doc """
Handle info will handle when we have communication from a peer node.
We'll offload the effort to our `ExWire.Network` and `ExWire.Handler` modules.
Note: all responses will be asynchronous.
"""
@impl true
def handle_info(
{:udp, _socket, ip, port, data},
state = %{network: network, network_args: network_args}
) do
Exth.trace(fn ->
"Got UDP message from #{inspect(ip)}:#{to_string(port)} with #{byte_size(data)} bytes, handling with {#{
Atom.to_string(network)
}, #{inspect(network_args)}}"
end)
:ok = handle_inbound_message(ip, port, data, network, network_args)
{:noreply, state}
end
@spec handle_inbound_message(:inet.ip_address(), non_neg_integer(), binary(), module(), term()) ::
:ok
defp handle_inbound_message(ip, port, data, network, network_args) do
ip = Tuple.to_list(ip)
inbound_message = %ExWire.Network.InboundMessage{
data: data,
server_pid: self(),
remote_host: %ExWire.Struct.Endpoint{
ip: ip,
udp_port: port
},
timestamp: ExWire.Util.Timestamp.soon()
}
apply(network, :receive, [inbound_message, network_args])
:ok
end
@doc """
For cast, we'll respond back to a given peer with a given message package. This represents
all outbound messages we'll ever send.
"""
@impl true
def handle_cast(
{:send, %{to: %{ip: ip, udp_port: udp_port}, data: data}},
state = %{socket: socket}
)
when not is_nil(udp_port) do
Exth.trace(fn ->
"Sending UDP message to #{inspect(ip)}:#{to_string(udp_port)} with #{byte_size(data)} bytes"
end)
tuple_ip = List.to_tuple(ip)
:ok = :gen_udp.send(socket, tuple_ip, udp_port, data)
{:noreply, state}
end
end
|
apps/ex_wire/lib/ex_wire/adapter/udp.ex
| 0.837686
| 0.469885
|
udp.ex
|
starcoder
|
defmodule Bincode.Structs do
@moduledoc """
Module defining macros related to structs and enums.
"""
@doc """
Declares a new struct. This macro generates a struct with serialization and
deserialization methods according to the given fields.
## Options
* `absolute` - When set to true, the given struct name is interpreted as the absolute module name.
When set to false, the given struct name is appended to the caller's module. Defaults to false.
## Example
defmodule MyStructs do
import Bincode.Structs
declare_struct(Person,
first_name: :string,
last_name: :string,
age: :u8
)
end
alias MyStructs.Person
person = %Person{first_name: "John", last_name: "Doe", age: 44}
{:ok, <<4, 0, 0, 0, 0, 0, 0, 0, 74, 111, 104, 110, 3, 0, 0, 0, 0, 0, 0, 0, 68, 111, 101, 44>>} = Bincode.serialize(person, Person)
It's also possible to call `serialize` and `deserialize` from the struct module directly.
{:ok, {%Person{age: 44, first_name: "John", last_name: "Doe"}, ""}} = Person.deserialize(<<4, 0, 0, 0, 0, 0, 0, 0, 74, 111, 104, 110, 3, 0, 0, 0, 0, 0, 0, 0, 68, 111, 101, 44>>)
Structs and enums can be nested. In this case the type is the fully qualified module. For example:
defmodule MyStructs do
import Bincode.Structs
declare_struct(Person,
first_name: :string,
last_name: :string,
age: :u8
)
declare_struct(Employee,
employee_number: :u64,
person: MyStructs.Person,
job_title: :string,
)
end
"""
defmacro declare_struct(struct, fields, options \\ []) when is_list(fields) do
%Macro.Env{module: caller_module} = __CALLER__
struct_module =
if Keyword.get(options, :absolute, false) do
Macro.expand(struct, __CALLER__)
else
Module.concat([caller_module, Macro.expand(struct, __CALLER__)])
end
struct_data = for {field_name, _} <- fields, do: {field_name, nil}
field_names = for {field_name, _} <- fields, do: field_name
field_types = for {_, field_type} <- fields, do: field_type
types =
for type <- field_types do
case type do
# This field is a struct
{:__aliases__, _, _} -> Macro.expand(type, __CALLER__)
_ -> type
end
end
value_variables =
for {field_name, _} <- fields do
quote do: var!(struct).unquote(Macro.var(field_name, nil))
end
prefix = Keyword.get(options, :prefix, {nil, nil})
quoted_prefix_serialization =
case prefix do
{nil, nil} ->
{:ok, <<>>}
{prefix_value, prefix_type} ->
quote do: Bincode.serialize(unquote(prefix_value), unquote(prefix_type), var!(opts))
end
quoted_prefix_deserialization =
case prefix do
{nil, nil} ->
quote do: {:ok, {<<>>, var!(rest)}}
{prefix_value, prefix_type} ->
quote do: Bincode.deserialize(var!(rest), unquote(prefix_type), var!(opts))
end
quote do
defmodule unquote(struct_module) do
defstruct unquote(struct_data)
def serialize(struct, opts \\ [])
def serialize(%__MODULE__{} = var!(struct), var!(opts)) do
with {:ok, serialized_prefix} = unquote(quoted_prefix_serialization) do
serialized_fields =
Enum.reduce_while(
Enum.zip([unquote_splicing(value_variables)], [unquote_splicing(types)]),
[serialized_prefix],
fn {value_var, type}, result ->
case Bincode.serialize(value_var, type, var!(opts)) do
{:ok, serialized} -> {:cont, [result, serialized]}
{:error, msg} -> {:halt, {:error, msg}}
end
end
)
case serialized_fields do
{:error, msg} ->
{:error, msg}
_ ->
{:ok, IO.iodata_to_binary(serialized_fields)}
end
end
end
def serialize(value, _opts) do
{:error,
"Cannot serialize value #{inspect(value)} into struct #{unquote(struct_module)}"}
end
def serialize!(value, opts) do
case serialize(value, opts) do
{:ok, result} -> result
{:error, message} -> raise ArgumentError, message: message
end
end
def deserialize(data, opts \\ [])
def deserialize(<<var!(rest)::binary>>, var!(opts)) do
with {:ok, {deserialized_variant, rest}} <- unquote(quoted_prefix_deserialization) do
deserialized_fields =
Enum.reduce_while(
Enum.zip([unquote_splicing(field_names)], [unquote_splicing(types)]),
{[], rest},
fn {field_name, type}, {fields, rest} ->
case Bincode.deserialize(rest, type, var!(opts)) do
{:ok, {deserialized, rest}} ->
{:cont, {[{field_name, deserialized} | fields], rest}}
{:error, msg} ->
{:halt, {:error, msg}}
end
end
)
case deserialized_fields do
{:error, msg} ->
{:error, msg}
{fields, rest} ->
struct = struct!(unquote(struct_module), fields)
{:ok, {struct, rest}}
end
end
end
def deserialize(data, _opts) do
{:error,
"Cannot deserialize value #{inspect(data)} into struct #{unquote(struct_module)}"}
end
def deserialize!(data, opts) do
case deserialize(data, opts) do
{:ok, result} -> result
{:error, message} -> raise ArgumentError, message: message
end
end
end
defimpl Bincode.Serializer, for: unquote(struct_module) do
def serialize(term, opts) do
unquote(struct_module).serialize(term, opts)
end
end
end
end
@doc """
Declares a new enum. This macro generates a module for the enum, plus a struct for each variant
with serialization and deserialization methods according to the given fields.
## Options
* `absolute` - When set to true, the given struct name is interpreted as the absolute module name.
When set to false, the given struct name is appended to the caller's module. Defaults to false.
## Example
defmodule MyEnums do
import Bincode.Structs
declare_enum(IpAddr,
V4: [tuple: {:u8, :u8, :u8, :u8}],
V6: [addr: :string]
)
end
alias MyEnums.IpAddr
ip_v4 = %IpAddr.V4{tuple: {127, 0, 0, 1}}
{:ok, <<0, 0, 0, 0, 127, 0, 0, 1>>} = Bincode.serialize(ip_v4, IpAddr)
ip_v6 = %IpAddr.V6{addr: "::1"}
{:ok, <<1, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 58, 58, 49>>} = Bincode.serialize(ip_v6, IpAddr)
It's also possible to call `serialize` and `deserialize` from the struct module directly.
{:ok, {%IpAddr.V4{tuple: {127, 0, 0, 1}}, ""}} = IpAddr.deserialize(<<0, 0, 0, 0, 127, 0, 0, 1>>)
{:ok, {%IpAddr.V6{addr: "::1"}, ""}} = IpAddr.deserialize(<<1, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 58, 58, 49>>)
Enums can be nested and contain structs. See `Bincode.Structs.declare_struct/3`.
"""
defmacro declare_enum(enum, variants, options \\ []) when is_list(variants) do
%Macro.Env{module: caller_module} = __CALLER__
enum_module =
if Keyword.get(options, :absolute, false) do
Macro.expand(enum, __CALLER__)
else
Module.concat([caller_module, Macro.expand(enum, __CALLER__)])
end
quote do
defmodule unquote(enum_module) do
unquote do
variants_definition =
for {{variant, fields}, i} <- Enum.with_index(variants) do
variant_module = Module.concat([enum_module, Macro.expand(variant, __CALLER__)])
quote do
Bincode.Structs.declare_struct(
unquote(variant),
unquote(fields),
prefix: {unquote(i), :u32}
)
def serialize(%unquote(variant_module){} = variant, opts) do
unquote(variant_module).serialize(variant, opts)
end
defp deserialize(unquote(i), <<data::binary>>, opts) do
unquote(variant_module).deserialize(data, opts)
end
end
end
quote do
unquote(variants_definition)
def serialize(value, _opts) do
{:error,
"Cannot serialize variant #{inspect(value)} into enum #{unquote(enum_module)}"}
end
def serialize!(value, opts) do
case serialize(value, opts) do
{:ok, result} -> result
{:error, message} -> raise ArgumentError, message: message
end
end
def deserialize(<<data::binary>>, opts) do
case Bincode.deserialize(data, :u32, opts) do
{:ok, {deserialized_variant, _}} ->
deserialize(deserialized_variant, data, opts)
{:error, _} ->
{:error,
"Cannot serialize variant #{inspect(data)} into enum #{unquote(enum_module)}"}
end
end
def deserialize(data, _opts) do
{:error,
"Cannot deserialize #{inspect(data)} into enum #{unquote(enum_module)} variant"}
end
defp deserialize(_unknown_variant, data, _opts) do
{:error,
"Cannot deserialize #{inspect(data)} into enum #{unquote(enum_module)} variant"}
end
def deserialize!(data, opts) do
case deserialize(data, opts) do
{:ok, result} -> result
{:error, message} -> raise ArgumentError, message: message
end
end
end
end
end
end
end
end
|
lib/bincode/structs.ex
| 0.880155
| 0.461259
|
structs.ex
|
starcoder
|
defmodule Tensorflow.FunctionDefLibrary do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
function: [Tensorflow.FunctionDef.t()],
gradient: [Tensorflow.GradientDef.t()]
}
defstruct [:function, :gradient]
field(:function, 1, repeated: true, type: Tensorflow.FunctionDef)
field(:gradient, 2, repeated: true, type: Tensorflow.GradientDef)
end
defmodule Tensorflow.FunctionDef.AttrEntry do
@moduledoc false
use Protobuf, map: true, syntax: :proto3
@type t :: %__MODULE__{
key: String.t(),
value: Tensorflow.AttrValue.t() | nil
}
defstruct [:key, :value]
field(:key, 1, type: :string)
field(:value, 2, type: Tensorflow.AttrValue)
end
defmodule Tensorflow.FunctionDef.ArgAttrs.AttrEntry do
@moduledoc false
use Protobuf, map: true, syntax: :proto3
@type t :: %__MODULE__{
key: String.t(),
value: Tensorflow.AttrValue.t() | nil
}
defstruct [:key, :value]
field(:key, 1, type: :string)
field(:value, 2, type: Tensorflow.AttrValue)
end
defmodule Tensorflow.FunctionDef.ArgAttrs do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
attr: %{String.t() => Tensorflow.AttrValue.t() | nil}
}
defstruct [:attr]
field(:attr, 1,
repeated: true,
type: Tensorflow.FunctionDef.ArgAttrs.AttrEntry,
map: true
)
end
defmodule Tensorflow.FunctionDef.ArgAttrEntry do
@moduledoc false
use Protobuf, map: true, syntax: :proto3
@type t :: %__MODULE__{
key: non_neg_integer,
value: Tensorflow.FunctionDef.ArgAttrs.t() | nil
}
defstruct [:key, :value]
field(:key, 1, type: :uint32)
field(:value, 2, type: Tensorflow.FunctionDef.ArgAttrs)
end
defmodule Tensorflow.FunctionDef.ResourceArgUniqueIdEntry do
@moduledoc false
use Protobuf, map: true, syntax: :proto3
@type t :: %__MODULE__{
key: non_neg_integer,
value: non_neg_integer
}
defstruct [:key, :value]
field(:key, 1, type: :uint32)
field(:value, 2, type: :uint32)
end
defmodule Tensorflow.FunctionDef.RetEntry do
@moduledoc false
use Protobuf, map: true, syntax: :proto3
@type t :: %__MODULE__{
key: String.t(),
value: String.t()
}
defstruct [:key, :value]
field(:key, 1, type: :string)
field(:value, 2, type: :string)
end
defmodule Tensorflow.FunctionDef.ControlRetEntry do
@moduledoc false
use Protobuf, map: true, syntax: :proto3
@type t :: %__MODULE__{
key: String.t(),
value: String.t()
}
defstruct [:key, :value]
field(:key, 1, type: :string)
field(:value, 2, type: :string)
end
defmodule Tensorflow.FunctionDef do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
signature: Tensorflow.OpDef.t() | nil,
attr: %{String.t() => Tensorflow.AttrValue.t() | nil},
arg_attr: %{
non_neg_integer => Tensorflow.FunctionDef.ArgAttrs.t() | nil
},
resource_arg_unique_id: %{non_neg_integer => non_neg_integer},
node_def: [Tensorflow.NodeDef.t()],
ret: %{String.t() => String.t()},
control_ret: %{String.t() => String.t()}
}
defstruct [
:signature,
:attr,
:arg_attr,
:resource_arg_unique_id,
:node_def,
:ret,
:control_ret
]
field(:signature, 1, type: Tensorflow.OpDef)
field(:attr, 5,
repeated: true,
type: Tensorflow.FunctionDef.AttrEntry,
map: true
)
field(:arg_attr, 7,
repeated: true,
type: Tensorflow.FunctionDef.ArgAttrEntry,
map: true
)
field(:resource_arg_unique_id, 8,
repeated: true,
type: Tensorflow.FunctionDef.ResourceArgUniqueIdEntry,
map: true
)
field(:node_def, 3, repeated: true, type: Tensorflow.NodeDef)
field(:ret, 4,
repeated: true,
type: Tensorflow.FunctionDef.RetEntry,
map: true
)
field(:control_ret, 6,
repeated: true,
type: Tensorflow.FunctionDef.ControlRetEntry,
map: true
)
end
defmodule Tensorflow.GradientDef do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
function_name: String.t(),
gradient_func: String.t()
}
defstruct [:function_name, :gradient_func]
field(:function_name, 1, type: :string)
field(:gradient_func, 2, type: :string)
end
|
lib/tensorflow/core/framework/function.pb.ex
| 0.80567
| 0.661951
|
function.pb.ex
|
starcoder
|
defmodule ScChecksumHelper do
@moduledoc """
This module contain function which generate payment requests to the SafeCharge servers.
"""
# This will enable us to test the private functions.
# Note that testing private function is BAD, as we are testing implementation details not behaviour!
@compile if Mix.env == :test, do: :export_all
@doc """
This function generate a request to the SafeCharge Servers. It raises an exception if the parameters are not valid.
"""
@type sc_request :: %ScRequest{secret_key: String.t(), algorithm: atom(), charset: atom(), server_url: String.t()}
@spec get_link!(sc_request, map(), atom()) :: String.t()
def get_link!(sc_request, params, version) do
validate_request!(sc_request, params)
case version do
:v4 -> get_v4_link(sc_request, params)
:v3 -> get_v3_link(sc_request, params)
_ -> raise "Unknown request version"
end
end
defp validate_request!(sc_request, params) do
# add validation
if (sc_request == nil or params == nil), do: raise "Missing required parameters!"
end
defp get_v4_link(sc_request, params) do
params = params
|> Map.put(:version, "4.0.0")
|> Map.put_new(:timestamp, get_current_timestamp())
checksum = calculate_v4_checksum(sc_request, params)
build_url(sc_request, params, checksum)
end
defp get_v3_link(sc_request, params) do
params = params
|> Map.put(:version, "3.0.0")
|> Map.put_new(:timestamp, get_current_timestamp())
checksum = calculate_v3_checksum(sc_request, params)
build_url(sc_request, params, checksum)
end
defp calculate_v4_checksum(sc_request, params) do
params
|> Enum.sort
|> Enum.map(fn {k, v} -> "#{k}#{v}" end)
|> Enum.concat([sc_request.secret_key])
|> Enum.join
|> (&:crypto.hash(sc_request.algorithm, &1)).()
|> Base.encode16(case: :lower)
end
defp calculate_v3_checksum(_sc_request, _params) do
"Implement me!"
end
defp build_url(sc_request, params, checksum) do
sc_request.server_url <> "?" <> URI.encode_query(params) <> "&checksum=" <> checksum
end
defp get_current_timestamp() do
Timex.now
|> Timex.format!("{YYYY}-{0M}-{0D}.{h24}:{0m}:{0s}")
# may be we should use :strftime, as the default formater syntax is DSL for Timex...
end
end
|
lib/sc_checksum_helper.ex
| 0.729038
| 0.470858
|
sc_checksum_helper.ex
|
starcoder
|
defmodule Pathfinding do
@moduledoc """
This module is the entry point to access the more important `Pathfinding.Grid` and provides the search methods used against a Grid struct.
"""
alias Pathfinding.{
Coord,
Grid,
Node,
Search
}
@doc """
Returns the path from one coordinate to another.
A `cost_threshold` can be provided if the search should terminate early, such as being restricted by movement distance.
"""
@spec find_path(Pathfinding.Grid.t, Number.t, Number.t, Number.t, Number.t) :: [Coord.t, ...]
def find_path(_, start_x, start_y, end_x, end_y, cost_threshold \\ nil)
def find_path(_, start_x, start_y, end_x, end_y, _) when start_x == end_x and start_y == end_y,
do: []
def find_path(grid, start_x, start_y, end_x, end_y, cost_threshold) do
case Grid.is_coord_stoppable?(grid, end_x, end_y) do
false ->
nil
true ->
search = Search.new(start_x, start_y, end_x, end_y, cost_threshold)
start_node =
search
|> coordinate_to_node(nil, start_x, start_y, 0)
search =
search
|> Search.push(start_node)
|> calculate(grid)
case Search.pop(search) do
{nil, _} ->
nil
{node, _} ->
node
|> Node.format_path()
end
end
end
@doc """
Returns the 'walkable' coordinates within the grid from a specified coordinate. If a list of coordinate is provided, a search will be executed starting from each coordinate.
A `cost_threshold` can be provided if the search should terminate early, such as being restricted by movement distance.
"""
@spec find_walkable(Pathfinding.Grid.t, %{x: Number.t, y: Number.t} | [%{x: Number.t, y: Number.t}, ...]) :: [Coord.t, ...]
def find_walkable(_, _, cost_threshold \\ nil)
def find_walkable(grid, %{x: _, y: _} = coord, cost_threshold) do
find_walkable(grid, [coord], cost_threshold)
end
def find_walkable(grid, coords, cost_threshold) when is_list(coords) do
%{x: x, y: y} = List.first(coords)
search = Search.new(x, y, cost_threshold)
nodes =
coords
|> Enum.map(fn %{x: x, y: y} ->
coordinate_to_node(search, nil, x, y, 0)
end)
search =
nodes
|> Enum.reduce(search, &Search.push(&2, &1))
|> calculate(grid)
search
|> Search.traversed_nodes()
|> Enum.filter(&Grid.is_coord_walkable?(grid, &1.x, &1.y))
|> Enum.map(&%{x: &1.x, y: &1.y})
end
defp calculate(%Search{} = search, %Grid{} = grid) do
case Search.size(search) do
0 ->
search
_ ->
case reached_destination(search, Search.peek(search)) do
true ->
search
false ->
{node, search} = Search.pop(search)
node = node |> Map.put(:visited, true)
search = search |> Search.cache(node)
# :cardinal
search =
case Grid.in_grid?(grid, node.x, node.y - 1) do
false -> search
true -> check_adjacent_node(search, grid, node, 0, -1)
end
# :hex & :intercardinal
search =
case !Grid.is_cardinal?(grid) && Grid.in_grid?(grid, node.x + 1, node.y - 1) do
false -> search
true -> check_adjacent_node(search, grid, node, 1, -1)
end
# :cardinal
search =
case Grid.in_grid?(grid, node.x + 1, node.y) do
false -> search
true -> check_adjacent_node(search, grid, node, 1, 0)
end
# :intercardinal
search =
case Grid.is_intercardinal?(grid) && Grid.in_grid?(grid, node.x + 1, node.y + 1) do
false -> search
true -> check_adjacent_node(search, grid, node, 1, 1)
end
# :cardinal
search =
case Grid.in_grid?(grid, node.x, node.y + 1) do
false -> search
true -> check_adjacent_node(search, grid, node, 0, 1)
end
# :hex & :intercardinal
search =
case !Grid.is_cardinal?(grid) && Grid.in_grid?(grid, node.x - 1, node.y + 1) do
false -> search
true -> check_adjacent_node(search, grid, node, -1, 1)
end
# :cardinal
search =
case Grid.in_grid?(grid, node.x - 1, node.y) do
false -> search
true -> check_adjacent_node(search, grid, node, -1, 0)
end
# :intercardinal
search =
case Grid.is_intercardinal?(grid) && Grid.in_grid?(grid, node.x - 1, node.y - 1) do
false -> search
true -> check_adjacent_node(search, grid, node, -1, -1)
end
calculate(search, grid)
end
end
end
defp reached_destination(%{end_x: end_x, end_y: end_y}, %{x: x, y: y}) do
end_x == x && end_y == y
end
defp check_adjacent_node(
%Search{} = search,
%Grid{} = grid,
source_node,
x,
y
) do
adjacent_x = source_node.x + x
adjacent_y = source_node.y + y
adjacent_cost = Grid.get_coord_cost(grid, adjacent_x, adjacent_y)
case Grid.is_coord_walkable?(grid, adjacent_x, adjacent_y) &&
can_afford(source_node, adjacent_cost, search.cost_threshold) do
false ->
search
true ->
adjacent_node =
search
|> coordinate_to_node(
source_node,
adjacent_x,
adjacent_y,
adjacent_cost
)
search =
search
|> Search.cache(adjacent_node)
case adjacent_node.visited do
false ->
search
|> Search.push(adjacent_node)
true ->
case source_node.cost + adjacent_cost < adjacent_node.cost do
false ->
search
true ->
adjacent_node =
adjacent_node
|> Map.put(:cost, source_node.cost + adjacent_cost)
|> Map.put(:parent, source_node)
search
|> Search.update(adjacent_node)
end
end
end
end
defp can_afford(_, _, nil), do: true
defp can_afford(source_node, cost, cost_threshold) do
source_node.cost + cost <= cost_threshold
end
defp coordinate_to_node(
search,
parent,
x,
y,
cost
) do
case Search.get_node(search, x, y) do
%Node{} = node ->
node
nil ->
distance =
case is_nil(search.end_x) && is_nil(search.end_y) do
true -> 1
false -> get_distance(x, y, search.end_x, search.end_y)
end
%Node{
parent: parent,
x: x,
y: y,
cost:
case Map.get(parent || %{}, :cost) do
nil -> cost
parent_cost -> parent_cost + cost
end,
distance: distance
}
end
end
defp get_distance(x1, y1, x2, y2) do
dx = abs(x1 - x2)
dy = abs(y1 - y2)
dx + dy
end
end
|
lib/pathfinding.ex
| 0.918151
| 0.761428
|
pathfinding.ex
|
starcoder
|
defmodule Ecto.Changeset do
@moduledoc ~S"""
Changesets allow filtering, casting, validation and
definition of constraints when manipulating structs.
There is an example of working with changesets in the introductory
documentation in the `Ecto` module. The functions `cast/4` and
`change/2` are the usual entry points for creating changesets.
The first one is used to cast and validate external parameters,
such as parameters sent through a form, API, command line, etc.
The second one is used to change data directly from your application.
The remaining functions in this module, such as validations,
constraints, association handling, are about manipulating
changesets. Let's discuss some of this extra functionality.
## External vs internal data
Changesets allow working with both kinds of data:
* internal to the application - for example programmatically generated,
or coming from other subsystems. This use case is primarily covered
by the `change/2` and `put_change/3` functions.
* external to the application - for example data provided by the user in
a form that needs to be type-converted and properly validated. This
use case is primarily covered by the `cast/4` function.
## Validations and constraints
Ecto changesets provide both validations and constraints which
are ultimately turned into errors in case something goes wrong.
The difference between them is that most validations can be
executed without a need to interact with the database and, therefore,
are always executed before attempting to insert or update the entry
in the database. Some validations may happen against the database but
they are inherently unsafe. Those validations start with a `unsafe_`
prefix, such as `unsafe_validate_unique/3`.
On the other hand, constraints rely on the database and are always safe.
As a consequence, validations are always checked before constraints.
Constraints won't even be checked in case validations failed.
Let's see an example:
defmodule User do
use Ecto.Schema
import Ecto.Changeset
schema "users" do
field :name
field :email
field :age, :integer
end
def changeset(user, params \\ %{}) do
user
|> cast(params, [:name, :email, :age])
|> validate_required([:name, :email])
|> validate_format(:email, ~r/@/)
|> validate_inclusion(:age, 18..100)
|> unique_constraint(:email)
end
end
In the `changeset/2` function above, we define three validations.
They check that `name` and `email` fields are present in the
changeset, the e-mail is of the specified format, and the age is
between 18 and 100 - as well as a unique constraint in the email
field.
Let's suppose the e-mail is given but the age is invalid. The
changeset would have the following errors:
changeset = User.changeset(%User{}, %{age: 0, email: "<EMAIL>"})
{:error, changeset} = Repo.insert(changeset)
changeset.errors #=> [age: {"is invalid", []}, name: {"can't be blank", []}]
In this case, we haven't checked the unique constraint in the
e-mail field because the data did not validate. Let's fix the
age and the name, and assume that the e-mail already exists in the
database:
changeset = User.changeset(%User{}, %{age: 42, name: "Mary", email: "<EMAIL>"})
{:error, changeset} = Repo.insert(changeset)
changeset.errors #=> [email: {"has already been taken", []}]
Validations and constraints define an explicit boundary when the check
happens. By moving constraints to the database, we also provide a safe,
correct and data-race free means of checking the user input.
### Deferred constraints
Some databases support deferred constraints, i.e., constraints which are
checked at the end of the transaction rather than at the end of each statement.
Changesets do not support this type of constraints. When working with deferred
constraints, a violation while invoking `Repo.insert/2` or `Repo.update/2` won't
return `{:error, changeset}`, but rather raise an error at the end of the
transaction.
## Empty values
Many times, the data given on cast needs to be further pruned, specially
regarding empty values. For example, if you are gathering data to be
cast from the command line or through an HTML form or any other text-based
format, it is likely those means cannot express nil values. For
those reasons, changesets include the concept of empty values, which are
values that will be automatically converted to the field's default value
on `cast/4`. Those values are stored in the changeset `empty_values` field
and default to `[""]`. You can also pass the `:empty_values` option to
`cast/4` in case you want to change how a particular `cast/4` work.
## Associations, embeds and on replace
Using changesets you can work with associations as well as with embedded
structs. There are two primary APIs:
* `cast_assoc/3` and `cast_embed/3` - those functions are used when
working with external data. In particular, they allow you to change
associations and embeds alongside the parent struct, all at once.
* `put_assoc/4` and `put_embed/4` - it allows you to replace the
association or embed as a whole. This can be used to move associated
data from one entry to another, to completely remove or replace
existing entries.
See the documentation for those functions for more information.
### The `:on_replace` option
When using any of those APIs, you may run into situations where Ecto sees
data is being replaced. For example, imagine a Post has many Comments where
the comments have IDs 1, 2 and 3. If you call `cast_assoc/3` passing only
the IDs 1 and 2, Ecto will consider 3 is being "replaced" and it will raise
by default. Such behaviour can be changed when defining the relation by
setting `:on_replace` option when defining your association/embed according
to the values below:
* `:raise` (default) - do not allow removing association or embedded
data via parent changesets
* `:mark_as_invalid` - if attempting to remove the association or
embedded data via parent changeset - an error will be added to the parent
changeset, and it will be marked as invalid
* `:nilify` - sets owner reference column to `nil` (available only for
associations). Use this on a `belongs_to` column to allow the association
to be cleared out so that it can be set to a new value. Will set `action`
on associated changesets to `:replace`
* `:update` - updates the association, available only for `has_one` and `belongs_to`.
This option will update all the fields given to the changeset including the id
for the association
* `:delete` - removes the association or related data from the database.
This option has to be used carefully (see below). Will set `action` on associated
changesets to `:replace`
The `:delete` option in particular must be used carefully as it would allow
users to delete any associated data by simply not sending any data for a given
field. If you need deletion, it is often preferred to manually mark the changeset
for deletion if a `delete` field is set in the params, as in the example below:
defmodule Comment do
use Ecto.Schema
import Ecto.Changeset
schema "comments" do
field :body, :string
end
def changeset(comment, %{"delete" => "true"}) do
%{Ecto.Changeset.change(comment) | action: :delete}
end
def changeset(comment, params) do
cast(comment, params, [:body])
end
end
## Schemaless changesets
In the changeset examples so far, we have always used changesets to validate
and cast data contained in a struct defined by an Ecto schema, such as the `%User{}`
struct defined by the `User` module.
However, changesets can also be used with "regular" structs too by passing a tuple
with the data and its types:
user = %User{}
types = %{first_name: :string, last_name: :string, email: :string}
changeset =
{user, types}
|> Ecto.Changeset.cast(params, Map.keys(types))
|> Ecto.Changeset.validate_required(...)
|> Ecto.Changeset.validate_length(...)
where the user struct refers to the definition in the following module:
defmodule User do
defstruct [:name, :age]
end
Changesets can also be used with data in a plain map, by following the same API:
data = %{}
types = %{name: :string}
params = %{name: "Callum"}
changeset =
{data, types}
|> Ecto.Changeset.cast(params, Map.keys(types))
|> Ecto.Changeset.validate_required(...)
|> Ecto.Changeset.validate_length(...)
Such functionality makes Ecto extremely useful to cast, validate and prune data even
if it is not meant to be persisted to the database.
### Changeset actions
Changesets have an action field which is usually set by `Ecto.Repo`
whenever one of the operations such as `insert` or `update` is called:
changeset = User.changeset(%User{}, %{age: 42, email: "<EMAIL>"})
{:error, changeset} = Repo.insert(changeset)
changeset.action
#=> :insert
This means that when working with changesets that are not meant to be
persisted to the database, such as schemaless changesets, you may need
to explicitly set the action to one specific value. Frameworks such as
Phoenix use the action value to define how HTML forms should act.
Instead of setting the action manually, you may use `apply_action/2` that
emulates operations such as `Repo.insert`. `apply_action/2` will return
`{:ok, changes}` if the changeset is valid or `{:error, changeset}`, with
the given `action` set in the changeset in case of errors.
## The Ecto.Changeset struct
The public fields are:
* `valid?` - Stores if the changeset is valid
* `data` - The changeset source data, for example, a struct
* `params` - The parameters as given on changeset creation
* `changes` - The `changes` from parameters that were approved in casting
* `errors` - All errors from validations
* `required` - All required fields as a list of atoms
* `action` - The action to be performed with the changeset
* `types` - Cache of the data's field types
* `empty_values` - A list of values to be considered empty
* `repo` - The repository applying the changeset (only set after a Repo function is called)
* `repo_opts` - A keyword list of options given to the underlying repository operation
The following fields are private and must not be accessed directly.
* `validations`
* `constraints`
* `filters`
* `prepare`
### Redacting fields in inspect
To hide a fields value from the inspect protocol of `Ecto.Changeset`, mark
the field as `redact: true` in the schema, and it will display with the
value `**redacted**`.
"""
require Ecto.Query
alias __MODULE__
alias Ecto.Changeset.Relation
@empty_values [""]
# If a new field is added here, def merge must be adapted
defstruct valid?: false, data: nil, params: nil, changes: %{},
errors: [], validations: [], required: [], prepare: [],
constraints: [], filters: %{}, action: nil, types: nil,
empty_values: @empty_values, repo: nil, repo_opts: []
@type t(data_type) :: %Changeset{valid?: boolean(),
repo: atom | nil,
repo_opts: Keyword.t,
data: data_type,
params: %{optional(String.t) => term} | nil,
changes: %{optional(atom) => term},
required: [atom],
prepare: [(t -> t)],
errors: [{atom, error}],
constraints: [constraint],
validations: [{atom, term}],
filters: %{optional(atom) => term},
action: action,
types: nil | %{atom => Ecto.Type.t}}
@type t :: t(Ecto.Schema.t | map | nil)
@type error :: {String.t, Keyword.t}
@type action :: nil | :insert | :update | :delete | :replace | :ignore | atom
@type constraint :: %{type: :check | :exclusion | :foreign_key | :unique,
constraint: String.t, match: :exact | :suffix | :prefix,
field: atom, error_message: String.t, error_type: atom}
@type data :: map()
@type types :: map()
@number_validators %{
less_than: {&</2, "must be less than %{number}"},
greater_than: {&>/2, "must be greater than %{number}"},
less_than_or_equal_to: {&<=/2, "must be less than or equal to %{number}"},
greater_than_or_equal_to: {&>=/2, "must be greater than or equal to %{number}"},
equal_to: {&==/2, "must be equal to %{number}"},
not_equal_to: {&!=/2, "must be not equal to %{number}"},
}
@relations [:embed, :assoc]
@match_types [:exact, :suffix, :prefix]
@doc """
Wraps the given data in a changeset or adds changes to a changeset.
`changes` is a map or keyword where the key is an atom representing a
field, association or embed and the value is a term. Note the `value` is
directly stored in the changeset with no validation whatsoever. For this
reason, this function is meant for working with data internal to the
application.
When changing embeds and associations, see `put_assoc/4` for a complete
reference on the accepted values.
This function is useful for:
* wrapping a struct inside a changeset
* directly changing a struct without performing castings nor validations
* directly bulk-adding changes to a changeset
Changed attributes will only be added if the change does not have the
same value as the field in the data.
When a changeset is passed as the first argument, the changes passed as the
second argument are merged over the changes already in the changeset if they
differ from the values in the struct.
When a `{data, types}` is passed as the first argument, a changeset is
created with the given data and types and marked as valid.
See `cast/4` if you'd prefer to cast and validate external parameters.
## Examples
iex> changeset = change(%Post{})
%Ecto.Changeset{...}
iex> changeset.valid?
true
iex> changeset.changes
%{}
iex> changeset = change(%Post{author: "bar"}, title: "title")
iex> changeset.changes
%{title: "title"}
iex> changeset = change(%Post{title: "title"}, title: "title")
iex> changeset.changes
%{}
iex> changeset = change(changeset, %{title: "new title", body: "body"})
iex> changeset.changes.title
"new title"
iex> changeset.changes.body
"body"
"""
@spec change(Ecto.Schema.t | t | {data, types}, %{atom => term} | Keyword.t) :: t
def change(data, changes \\ %{})
def change({data, types}, changes) when is_map(data) do
change(%Changeset{data: data, types: Enum.into(types, %{}), valid?: true}, changes)
end
def change(%Changeset{types: nil}, _changes) do
raise ArgumentError, "changeset does not have types information"
end
def change(%Changeset{changes: changes, types: types} = changeset, new_changes)
when is_map(new_changes) or is_list(new_changes) do
{changes, errors, valid?} =
get_changed(changeset.data, types, changes, new_changes,
changeset.errors, changeset.valid?)
%{changeset | changes: changes, errors: errors, valid?: valid?}
end
def change(%{__struct__: struct} = data, changes) when is_map(changes) or is_list(changes) do
types = struct.__changeset__()
{changes, errors, valid?} = get_changed(data, types, %{}, changes, [], true)
%Changeset{valid?: valid?, data: data, changes: changes,
errors: errors, types: types}
end
defp get_changed(data, types, old_changes, new_changes, errors, valid?) do
Enum.reduce(new_changes, {old_changes, errors, valid?}, fn
{key, value}, {changes, errors, valid?} ->
put_change(data, changes, errors, valid?, key, value, Map.get(types, key))
end)
end
@doc """
Applies the given `params` as changes for the given `data` according to
the given set of `permitted` keys. Returns a changeset.
The given `data` may be either a changeset, a schema struct or a `{data, types}`
tuple. The second argument is a map of `params` that are cast according
to the type information from `data`. `params` is a map with string keys
or a map with atom keys containing potentially invalid data.
During casting, all `permitted` parameters whose values match the specified
type information will have their key name converted to an atom and stored
together with the value as a change in the `:changes` field of the changeset.
All parameters that are not explicitly permitted are ignored.
If casting of all fields is successful, the changeset is returned as valid.
Note that `cast/4` validates the types in the `params`, but not in the given
`data`.
## Options
* `:empty_values` - a list of values to be considered as empty when casting.
All empty values are discarded on cast. Defaults to `[""]`
## Examples
iex> changeset = cast(post, params, [:title])
iex> if changeset.valid? do
...> Repo.update!(changeset)
...> end
Passing a changeset as the first argument:
iex> changeset = cast(post, %{title: "Hello"}, [:title])
iex> new_changeset = cast(changeset, %{title: "Foo", body: "World"}, [:body])
iex> new_changeset.params
%{"title" => "Hello", "body" => "World"}
Or creating a changeset from a simple map with types:
iex> data = %{title: "hello"}
iex> types = %{title: :string}
iex> changeset = cast({data, types}, %{title: "world"}, [:title])
iex> apply_changes(changeset)
%{title: "world"}
## Composing casts
`cast/4` also accepts a changeset as its first argument. In such cases, all
the effects caused by the call to `cast/4` (additional errors and changes)
are simply added to the ones already present in the argument changeset.
Parameters are merged (**not deep-merged**) and the ones passed to `cast/4`
take precedence over the ones already in the changeset.
"""
@spec cast(Ecto.Schema.t | t | {data, types},
%{binary => term} | %{atom => term} | :invalid,
[atom],
Keyword.t) :: t
def cast(data, params, permitted, opts \\ [])
def cast(_data, %{__struct__: _} = params, _permitted, _opts) do
raise Ecto.CastError, type: :map, value: params,
message: "expected params to be a :map, got: `#{inspect(params)}`"
end
def cast({data, types}, params, permitted, opts) when is_map(data) do
cast(data, types, %{}, params, permitted, opts)
end
def cast(%Changeset{types: nil}, _params, _permitted, _opts) do
raise ArgumentError, "changeset does not have types information"
end
def cast(%Changeset{changes: changes, data: data, types: types, empty_values: empty_values} = changeset,
params, permitted, opts) do
opts = Keyword.put_new(opts, :empty_values, empty_values)
new_changeset = cast(data, types, changes, params, permitted, opts)
cast_merge(changeset, new_changeset)
end
def cast(%{__struct__: module} = data, params, permitted, opts) do
cast(data, module.__changeset__(), %{}, params, permitted, opts)
end
defp cast(%{} = data, %{} = types, %{} = changes, :invalid, permitted, _opts) when is_list(permitted) do
_ = Enum.each(permitted, &cast_key/1)
%Changeset{params: nil, data: data, valid?: false, errors: [],
changes: changes, types: types}
end
defp cast(%{} = data, %{} = types, %{} = changes, %{} = params, permitted, opts) when is_list(permitted) do
empty_values = Keyword.get(opts, :empty_values, @empty_values)
params = convert_params(params)
defaults = case data do
%{__struct__: struct} -> struct.__struct__()
%{} -> %{}
end
{changes, errors, valid?} =
Enum.reduce(permitted, {changes, [], true},
&process_param(&1, params, types, data, empty_values, defaults, &2))
%Changeset{params: params, data: data, valid?: valid?,
errors: Enum.reverse(errors), changes: changes, types: types}
end
defp cast(%{}, %{}, %{}, params, permitted, _opts) when is_list(permitted) do
raise Ecto.CastError, type: :map, value: params,
message: "expected params to be a :map, got: `#{inspect params}`"
end
defp process_param(key, params, types, data, empty_values, defaults, {changes, errors, valid?}) do
{key, param_key} = cast_key(key)
type = cast_type!(types, key)
current =
case changes do
%{^key => value} -> value
_ -> Map.get(data, key)
end
case cast_field(key, param_key, type, params, current, empty_values, defaults, valid?) do
{:ok, value, valid?} ->
{Map.put(changes, key, value), errors, valid?}
:missing ->
{changes, errors, valid?}
{:invalid, custom_errors} ->
{message, new_errors} =
custom_errors
|> Keyword.put_new(:validation, :cast)
|> Keyword.put(:type, type)
|> Keyword.pop(:message, "is invalid")
{changes, [{key, {message, new_errors}} | errors], false}
end
end
defp cast_type!(types, key) do
case types do
%{^key => {tag, _}} when tag in @relations ->
raise "casting #{tag}s with cast/4 for #{inspect key} field is not supported, use cast_#{tag}/3 instead"
%{^key => type} ->
type
_ ->
known_fields = types |> Map.keys() |> Enum.map_join(", ", &inspect/1)
raise ArgumentError,
"unknown field `#{inspect(key)}` given to cast. Either the field does not exist or it is a " <>
":through association (which are read-only). The known fields are: #{known_fields}"
end
end
defp cast_key(key) when is_atom(key),
do: {key, Atom.to_string(key)}
defp cast_key(key),
do: raise ArgumentError, "cast/3 expects a list of atom keys, got: `#{inspect key}`"
defp cast_field(key, param_key, type, params, current, empty_values, defaults, valid?) do
case params do
%{^param_key => value} ->
value = if value in empty_values, do: Map.get(defaults, key), else: value
case Ecto.Type.cast(type, value) do
{:ok, value} ->
if Ecto.Type.equal?(type, current, value) do
:missing
else
{:ok, value, valid?}
end
:error ->
{:invalid, []}
{:error, custom_errors} when is_list(custom_errors) ->
{:invalid, custom_errors}
end
_ ->
:missing
end
end
# TODO: Remove branch when we require Elixir v1.10+.
if Code.ensure_loaded?(:maps) and function_exported?(:maps, :iterator, 1) do
defp convert_params(params) do
case :maps.next(:maps.iterator(params)) do
{key, _, _} when is_atom(key) ->
for {key, value} <- params, into: %{} do
if is_atom(key) do
{Atom.to_string(key), value}
else
raise Ecto.CastError, type: :map, value: params,
message: "expected params to be a map with atoms or string keys, " <>
"got a map with mixed keys: #{inspect params}"
end
end
_ ->
params
end
end
else
defp convert_params(params) do
params
|> Enum.reduce(nil, fn
{key, _value}, nil when is_binary(key) ->
nil
{key, _value}, _ when is_binary(key) ->
raise Ecto.CastError, type: :map, value: params,
message: "expected params to be a map with atoms or string keys, " <>
"got a map with mixed keys: #{inspect params}"
{key, value}, nil when is_atom(key) ->
[{Atom.to_string(key), value}]
{key, value}, acc when is_atom(key) ->
[{Atom.to_string(key), value} | acc]
end)
|> case do
nil -> params
list -> :maps.from_list(list)
end
end
end
## Casting related
@doc """
Casts the given association with the changeset parameters.
This function should be used when working with the entire association at
once (and not a single element of a many-style association) and receiving
data external to the application.
`cast_assoc/3` works matching the records extracted from the database
and compares it with the parameters received from an external source.
Therefore, it is expected that the data in the changeset has explicitly
preloaded the association being cast and that all of the IDs exist and
are unique.
For example, imagine a user has many addresses relationship where
post data is sent as follows
%{"name" => "<NAME>", "addresses" => [
%{"street" => "somewhere", "country" => "brazil", "id" => 1},
%{"street" => "elsewhere", "country" => "poland"},
]}
and then
User
|> Repo.get!(id)
|> Repo.preload(:addresses) # Only required when updating data
|> Ecto.Changeset.cast(params, [])
|> Ecto.Changeset.cast_assoc(:addresses, with: &MyApp.Address.changeset/2)
The parameters for the given association will be retrieved
from `changeset.params`. Those parameters are expected to be
a map with attributes, similar to the ones passed to `cast/4`.
Once parameters are retrieved, `cast_assoc/3` will match those
parameters with the associations already in the changeset record.
Once `cast_assoc/3` is called, Ecto will compare each parameter
with the user's already preloaded addresses and act as follows:
* If the parameter does not contain an ID, the parameter data
will be passed to `MyApp.Address.changeset/2` with a new struct
and become an insert operation
* If the parameter contains an ID and there is no associated child
with such ID, the parameter data will be passed to
`MyApp.Address.changeset/2` with a new struct and become an insert
operation
* If the parameter contains an ID and there is an associated child
with such ID, the parameter data will be passed to
`MyApp.Address.changeset/2` with the existing struct and become an
update operation
* If there is an associated child with an ID and its ID is not given
as parameter, the `:on_replace` callback for that association will
be invoked (see the "On replace" section on the module documentation)
Every time the `MyApp.Address.changeset/2` function is invoked, it must
return a changeset. Once the parent changeset is given to an `Ecto.Repo`
function, all entries will be inserted/updated/deleted within the same
transaction.
Note developers are allowed to explicitly set the `:action` field of a
changeset to instruct Ecto how to act in certain situations. Let's suppose
that, if one of the associations has only empty fields, you want to ignore
the entry altogether instead of showing an error. The changeset function could
be written like this:
def changeset(struct, params) do
struct
|> cast(params, [:title, :body])
|> validate_required([:title, :body])
|> case do
%{valid?: false, changes: changes} = changeset when changes == %{} ->
# If the changeset is invalid and has no changes, it is
# because all required fields are missing, so we ignore it.
%{changeset | action: :ignore}
changeset ->
changeset
end
end
## Partial changes for many-style associations
By preloading an association using a custom query you can confine the behavior
of `cast_assoc/3`. This opens up the possibility to work on a subset of the data,
instead of all associations in the database.
Taking the initial example of users having addresses imagine those addresses
are set up to belong to a country. If you want to allow users to bulk edit all
addresses that belong to a single country, you can do so by changing the preload
query:
query = from MyApp.Address, where: [country: ^edit_country]
User
|> Repo.get!(id)
|> Repo.preload(addresses: query)
|> Ecto.Changeset.cast(params, [])
|> Ecto.Changeset.cast_assoc(:addresses)
This will allow you to cast and update only the association for the given country.
The important point for partial changes is that any addresses, which were not
preloaded won't be changed.
## Options
* `:required` - if the association is a required field
* `:required_message` - the message on failure, defaults to "can't be blank"
* `:invalid_message` - the message on failure, defaults to "is invalid"
* `:force_update_on_change` - force the parent record to be updated in the
repository if there is a change, defaults to `true`
* `:with` - the function to build the changeset from params. Defaults to the
`changeset/2` function of the associated module. It can be changed by passing
an anonymous function or an MFA tuple. If using an MFA, the default changeset
and parameters arguments will be prepended to the given args. For example,
using `with: {Author, :special_changeset, ["hello"]}` will be invoked as
`Author.special_changeset(changeset, params, "hello")`
"""
def cast_assoc(changeset, name, opts \\ []) when is_atom(name) do
cast_relation(:assoc, changeset, name, opts)
end
@doc """
Casts the given embed with the changeset parameters.
The parameters for the given embed will be retrieved
from `changeset.params`. Those parameters are expected to be
a map with attributes, similar to the ones passed to `cast/4`.
Once parameters are retrieved, `cast_embed/3` will match those
parameters with the embeds already in the changeset record.
See `cast_assoc/3` for an example of working with casts and
associations which would also apply for embeds.
The changeset must have been previously `cast` using
`cast/4` before this function is invoked.
## Options
* `:required` - if the embed is a required field
* `:required_message` - the message on failure, defaults to "can't be blank"
* `:invalid_message` - the message on failure, defaults to "is invalid"
* `:force_update_on_change` - force the parent record to be updated in the
repository if there is a change, defaults to `true`
* `:with` - the function to build the changeset from params. Defaults to the
`changeset/2` function of the embedded module. It can be changed by passing
an anonymous function or an MFA tuple. If using an MFA, the default changeset
and parameters arguments will be prepended to the given args. For example,
using `with: {Author, :special_changeset, ["hello"]}` will be invoked as
`Author.special_changeset(changeset, params, "hello")`
"""
def cast_embed(changeset, name, opts \\ []) when is_atom(name) do
cast_relation(:embed, changeset, name, opts)
end
defp cast_relation(type, %Changeset{data: data, types: types}, _name, _opts)
when data == nil or types == nil do
raise ArgumentError, "cast_#{type}/3 expects the changeset to be cast. " <>
"Please call cast/4 before calling cast_#{type}/3"
end
defp cast_relation(type, %Changeset{} = changeset, key, opts) do
{key, param_key} = cast_key(key)
%{data: data, types: types, params: params, changes: changes} = changeset
%{related: related} = relation = relation!(:cast, type, key, Map.get(types, key))
params = params || %{}
{changeset, required?} =
if opts[:required] do
{update_in(changeset.required, &[key|&1]), true}
else
{changeset, false}
end
on_cast = Keyword.get_lazy(opts, :with, fn -> on_cast_default(type, related) end)
original = Map.get(data, key)
changeset =
case Map.fetch(params, param_key) do
{:ok, value} ->
current = Relation.load!(data, original)
case Relation.cast(relation, data, value, current, on_cast) do
{:ok, change, relation_valid?} when change != original ->
valid? = changeset.valid? and relation_valid?
changes = Map.put(changes, key, change)
changeset = %{force_update(changeset, opts) | changes: changes, valid?: valid?}
missing_relation(changeset, key, current, required?, relation, opts)
{:error, {message, meta}} ->
meta = [validation: type] ++ meta
error = {key, {message(opts, :invalid_message, message), meta}}
%{changeset | errors: [error | changeset.errors], valid?: false}
# ignore or ok with change == original
_ ->
missing_relation(changeset, key, current, required?, relation, opts)
end
:error ->
missing_relation(changeset, key, original, required?, relation, opts)
end
update_in changeset.types[key], fn {type, relation} ->
{type, %{relation | on_cast: on_cast}}
end
end
defp on_cast_default(type, module) do
fn struct, params ->
try do
module.changeset(struct, params)
rescue
e in UndefinedFunctionError ->
case __STACKTRACE__ do
[{^module, :changeset, args_or_arity, _}] when args_or_arity == 2
when length(args_or_arity) == 2 ->
raise ArgumentError, """
the module #{inspect module} does not define a changeset/2 function,
which is used by cast_#{type}/3. You need to either:
1. implement the #{type}.changeset/2 function
2. pass the :with option to cast_#{type}/3 with an anonymous
function that expects 2 args or an MFA tuple
When using an inline embed, the :with option must be given
"""
stacktrace ->
reraise e, stacktrace
end
end
end
end
defp missing_relation(%{changes: changes, errors: errors} = changeset,
name, current, required?, relation, opts) do
current_changes = Map.get(changes, name, current)
if required? and Relation.empty?(relation, current_changes) do
errors = [{name, {message(opts, :required_message, "can't be blank"), [validation: :required]}} | errors]
%{changeset | errors: errors, valid?: false}
else
changeset
end
end
defp relation!(_op, type, _name, {type, relation}),
do: relation
defp relation!(op, :assoc, name, nil),
do: raise(ArgumentError, "cannot #{op} assoc `#{name}`, assoc `#{name}` not found. Make sure it is spelled correctly and that the association type is not read-only")
defp relation!(op, type, name, nil),
do: raise(ArgumentError, "cannot #{op} #{type} `#{name}`, #{type} `#{name}` not found. Make sure that it exists and is spelled correctly")
defp relation!(op, type, name, {other, _}) when other in @relations,
do: raise(ArgumentError, "expected `#{name}` to be an #{type} in `#{op}_#{type}`, got: `#{other}`")
defp relation!(op, type, name, schema_type),
do: raise(ArgumentError, "expected `#{name}` to be an #{type} in `#{op}_#{type}`, got: `#{inspect schema_type}`")
defp force_update(changeset, opts) do
if Keyword.get(opts, :force_update_on_change, true) do
put_in(changeset.repo_opts[:force], true)
else
changeset
end
end
## Working with changesets
@doc """
Merges two changesets.
This function merges two changesets provided they have been applied to the
same data (their `:data` field is equal); if the data differs, an
`ArgumentError` exception is raised. If one of the changesets has a `:repo`
field which is not `nil`, then the value of that field is used as the `:repo`
field of the resulting changeset; if both changesets have a non-`nil` and
different `:repo` field, an `ArgumentError` exception is raised.
The other fields are merged with the following criteria:
* `params` - params are merged (not deep-merged) giving precedence to the
params of `changeset2` in case of a conflict. If both changesets have their
`:params` fields set to `nil`, the resulting changeset will have its params
set to `nil` too.
* `changes` - changes are merged giving precedence to the `changeset2`
changes.
* `errors` and `validations` - they are simply concatenated.
* `required` - required fields are merged; all the fields that appear
in the required list of both changesets are moved to the required
list of the resulting changeset.
## Examples
iex> changeset1 = cast(%Post{}, %{title: "Title"}, [:title])
iex> changeset2 = cast(%Post{}, %{title: "New title", body: "Body"}, [:title, :body])
iex> changeset = merge(changeset1, changeset2)
iex> changeset.changes
%{body: "Body", title: "New title"}
iex> changeset1 = cast(%Post{body: "Body"}, %{title: "Title"}, [:title])
iex> changeset2 = cast(%Post{}, %{title: "New title"}, [:title])
iex> merge(changeset1, changeset2)
** (ArgumentError) different :data when merging changesets
"""
@spec merge(t, t) :: t
def merge(changeset1, changeset2)
def merge(%Changeset{data: data} = cs1, %Changeset{data: data} = cs2) do
new_repo = merge_identical(cs1.repo, cs2.repo, "repos")
new_repo_opts = Keyword.merge(cs1.repo_opts, cs2.repo_opts)
new_action = merge_identical(cs1.action, cs2.action, "actions")
new_filters = Map.merge(cs1.filters, cs2.filters)
new_validations = cs1.validations ++ cs2.validations
new_constraints = cs1.constraints ++ cs2.constraints
cast_merge %{cs1 | repo: new_repo, repo_opts: new_repo_opts, filters: new_filters,
action: new_action, validations: new_validations,
constraints: new_constraints}, cs2
end
def merge(%Changeset{}, %Changeset{}) do
raise ArgumentError, message: "different :data when merging changesets"
end
defp cast_merge(cs1, cs2) do
new_params = (cs1.params || cs2.params) && Map.merge(cs1.params || %{}, cs2.params || %{})
new_changes = Map.merge(cs1.changes, cs2.changes)
new_errors = Enum.uniq(cs1.errors ++ cs2.errors)
new_required = Enum.uniq(cs1.required ++ cs2.required)
new_types = cs1.types || cs2.types
new_valid? = cs1.valid? and cs2.valid?
%{cs1 | params: new_params, valid?: new_valid?, errors: new_errors, types: new_types,
changes: new_changes, required: new_required}
end
defp merge_identical(object, nil, _thing), do: object
defp merge_identical(nil, object, _thing), do: object
defp merge_identical(object, object, _thing), do: object
defp merge_identical(lhs, rhs, thing) do
raise ArgumentError, "different #{thing} (`#{inspect lhs}` and " <>
"`#{inspect rhs}`) when merging changesets"
end
@doc """
Fetches the given field from changes or from the data.
While `fetch_change/2` only looks at the current `changes`
to retrieve a value, this function looks at the changes and
then falls back on the data, finally returning `:error` if
no value is available.
For relations, these functions will return the changeset
original data with changes applied. To retrieve raw changesets,
please use `fetch_change/2`.
## Examples
iex> post = %Post{title: "Foo", body: "Bar baz bong"}
iex> changeset = change(post, %{title: "New title"})
iex> fetch_field(changeset, :title)
{:changes, "New title"}
iex> fetch_field(changeset, :body)
{:data, "Bar baz bong"}
iex> fetch_field(changeset, :not_a_field)
:error
"""
@spec fetch_field(t, atom) :: {:changes, term} | {:data, term} | :error
def fetch_field(%Changeset{changes: changes, data: data, types: types}, key) when is_atom(key) do
case Map.fetch(changes, key) do
{:ok, value} ->
{:changes, change_as_field(types, key, value)}
:error ->
case Map.fetch(data, key) do
{:ok, value} -> {:data, data_as_field(data, types, key, value)}
:error -> :error
end
end
end
@doc """
Same as `fetch_field/2` but returns the value or raises if the given key was not found.
## Examples
iex> post = %Post{title: "Foo", body: "Bar baz bong"}
iex> changeset = change(post, %{title: "New title"})
iex> fetch_field!(changeset, :title)
"New title"
iex> fetch_field!(changeset, :other)
** (KeyError) key :other not found in: %Post{...}
"""
@spec fetch_field!(t, atom) :: term
def fetch_field!(changeset, key) do
case fetch_field(changeset, key) do
{_, value} ->
value
:error ->
raise KeyError, key: key, term: changeset.data
end
end
@doc """
Gets a field from changes or from the data.
While `get_change/3` only looks at the current `changes`
to retrieve a value, this function looks at the changes and
then falls back on the data, finally returning `default` if
no value is available.
For relations, these functions will return the changeset data
with changes applied. To retrieve raw changesets, please use `get_change/3`.
iex> post = %Post{title: "A title", body: "My body is a cage"}
iex> changeset = change(post, %{title: "A new title"})
iex> get_field(changeset, :title)
"A new title"
iex> get_field(changeset, :not_a_field, "Told you, not a field!")
"Told you, not a field!"
"""
@spec get_field(t, atom, term) :: term
def get_field(%Changeset{changes: changes, data: data, types: types}, key, default \\ nil) do
case Map.fetch(changes, key) do
{:ok, value} ->
change_as_field(types, key, value)
:error ->
case Map.fetch(data, key) do
{:ok, value} -> data_as_field(data, types, key, value)
:error -> default
end
end
end
defp change_as_field(types, key, value) do
case Map.get(types, key) do
{tag, relation} when tag in @relations ->
Relation.apply_changes(relation, value)
_other ->
value
end
end
defp data_as_field(data, types, key, value) do
case Map.get(types, key) do
{tag, _relation} when tag in @relations ->
Relation.load!(data, value)
_other ->
value
end
end
@doc """
Fetches a change from the given changeset.
This function only looks at the `:changes` field of the given `changeset` and
returns `{:ok, value}` if the change is present or `:error` if it's not.
## Examples
iex> changeset = change(%Post{body: "foo"}, %{title: "bar"})
iex> fetch_change(changeset, :title)
{:ok, "bar"}
iex> fetch_change(changeset, :body)
:error
"""
@spec fetch_change(t, atom) :: {:ok, term} | :error
def fetch_change(%Changeset{changes: changes} = _changeset, key) when is_atom(key) do
Map.fetch(changes, key)
end
@doc """
Same as `fetch_change/2` but returns the value or raises if the given key was not found.
## Examples
iex> changeset = change(%Post{body: "foo"}, %{title: "bar"})
iex> fetch_change!(changeset, :title)
"bar"
iex> fetch_change!(changeset, :body)
** (KeyError) key :body not found in: %{title: "bar"}
"""
@spec fetch_change!(t, atom) :: term
def fetch_change!(changeset, key) do
case fetch_change(changeset, key) do
{:ok, value} ->
value
:error ->
raise KeyError, key: key, term: changeset.changes
end
end
@doc """
Gets a change or returns a default value.
## Examples
iex> changeset = change(%Post{body: "foo"}, %{title: "bar"})
iex> get_change(changeset, :title)
"bar"
iex> get_change(changeset, :body)
nil
"""
@spec get_change(t, atom, term) :: term
def get_change(%Changeset{changes: changes} = _changeset, key, default \\ nil) when is_atom(key) do
Map.get(changes, key, default)
end
@doc """
Updates a change.
The given `function` is invoked with the change value only if there
is a change for the given `key`. Note that the value of the change
can still be `nil` (unless the field was marked as required on `validate_required/3`).
## Examples
iex> changeset = change(%Post{}, %{impressions: 1})
iex> changeset = update_change(changeset, :impressions, &(&1 + 1))
iex> changeset.changes.impressions
2
"""
@spec update_change(t, atom, (term -> term)) :: t
def update_change(%Changeset{changes: changes} = changeset, key, function) when is_atom(key) do
case Map.fetch(changes, key) do
{:ok, value} ->
put_change(changeset, key, function.(value))
:error ->
changeset
end
end
@doc """
Puts a change on the given `key` with `value`.
`key` is an atom that represents any field, embed or
association in the changeset. Note the `value` is directly
stored in the changeset with no validation whatsoever.
For this reason, this function is meant for working with
data internal to the application.
If the change is already present, it is overridden with
the new value. If the change has the same value as in the
changeset data, it is not added to the list of changes.
When changing embeds and associations, see `put_assoc/4`
for a complete reference on the accepted values.
## Examples
iex> changeset = change(%Post{author: "bar"}, %{title: "foo"})
iex> changeset = put_change(changeset, :title, "bar")
iex> changeset.changes
%{title: "bar"}
iex> changeset = put_change(changeset, :author, "bar")
iex> changeset.changes
%{title: "bar"}
"""
@spec put_change(t, atom, term) :: t
def put_change(%Changeset{types: nil}, _key, _value) do
raise ArgumentError, "changeset does not have types information"
end
def put_change(%Changeset{data: data, types: types} = changeset, key, value) do
type = Map.get(types, key)
{changes, errors, valid?} =
put_change(data, changeset.changes, changeset.errors, changeset.valid?, key, value, type)
%{changeset | changes: changes, errors: errors, valid?: valid?}
end
defp put_change(data, changes, errors, valid?, key, value, {tag, relation})
when tag in @relations do
original = Map.get(data, key)
current = Relation.load!(data, original)
case Relation.change(relation, value, current) do
{:ok, change, relation_valid?} when change != original ->
{Map.put(changes, key, change), errors, valid? and relation_valid?}
{:error, error} ->
{changes, [{key, error} | errors], false}
# ignore or ok with change == original
_ ->
{Map.delete(changes, key), errors, valid?}
end
end
defp put_change(data, _changes, _errors, _valid?, key, _value, nil) when is_atom(key) do
raise ArgumentError, "unknown field `#{inspect(key)}` in #{inspect(data)}"
end
defp put_change(_data, _changes, _errors, _valid?, key, _value, nil) when not is_atom(key) do
raise ArgumentError, "field names given to change/put_change must be atoms, got: `#{inspect(key)}`"
end
defp put_change(data, changes, errors, valid?, key, value, type) do
if not Ecto.Type.equal?(type, Map.get(data, key), value) do
{Map.put(changes, key, value), errors, valid?}
else
{Map.delete(changes, key), errors, valid?}
end
end
@doc """
Puts the given association entry or entries as a change in the changeset.
This function is used to work with associations as a whole. For example,
if a Post has many Comments, it allows you to add, remove or change all
comments at once. If your goal is to simply add a new comment to a post,
then it is preferred to do so manually, as we will describe later in the
"Example: Adding a comment to a post" section.
This function requires the associated data to have been preloaded, except
when the parent changeset has been newly built and not yet persisted.
Missing data will invoke the `:on_replace` behaviour defined on the
association.
For associations with cardinality one, `nil` can be used to remove the existing
entry. For associations with many entries, an empty list may be given instead.
If the association has no changes, it will be skipped. If the association is
invalid, the changeset will be marked as invalid. If the given value is not any
of values below, it will raise.
The associated data may be given in different formats:
* a map or a keyword list representing changes to be applied to the
associated data. A map or keyword list can be given to update the
associated data as long as they have matching primary keys.
For example, `put_assoc(changeset, :comments, [%{id: 1, title: "changed"}])`
will locate the comment with `:id` of 1 and update its title.
If no comment with such id exists, one is created on the fly.
Since only a single comment was given, any other associated comment
will be replaced. On all cases, it is expected the keys to be atoms.
This API is mostly used in scripts and tests, to make it straight-
forward to create schemas with associations at once, such as:
Ecto.Changeset.change(
%Post{},
title: "foo",
comments: [
%{body: "first"},
%{body: "second"}
]
)
* changesets or structs - when a changeset or struct is given, they
are treated as the canonical data and the associated data currently
stored in the association is ignored. For instance, the operation
`put_assoc(changeset, :comments, [%Comment{id: 1, title: "changed"}])`
will send the `Comment` as is to the database, ignoring any comment
currently associated, even if a matching ID is found. If the comment
is already persisted to the database, then `put_assoc/4` only takes
care of guaranteeing that the comments and the parent data are associated.
This extremely useful when associating existing data, as we will see
in the "Example: Adding tags to a post" section.
Once the parent changeset is given to an `Ecto.Repo` function, all entries
will be inserted/updated/deleted within the same transaction.
## Example: Adding a comment to a post
Imagine a relationship where Post has many comments and you want to add a
new comment to an existing post. While it is possible to use `put_assoc/4`
for this, it would be unnecessarily complex. Let's see an example.
First, let's fetch the post with all existing comments:
post = Post |> Repo.get!(1) |> Repo.preload(:comments)
The following approach is **wrong**:
post
|> Ecto.Changeset.change()
|> Ecto.Changeset.put_assoc(:comments, [%Comment{body: "bad example!"}])
|> Repo.update!()
The reason why the example above is wrong is because `put_assoc/4` always
works with the **full data**. So the example above will effectively **erase
all previous comments** and only keep the comment you are currently adding.
Instead, you could try:
post
|> Ecto.Changeset.change()
|> Ecto.Changeset.put_assoc(:comments, [%Comment{body: "so-so example!"} | post.comments])
|> Repo.update!()
In this example, we prepend the new comment to the list of existing comments.
Ecto will diff the list of comments currently in `post` with the list of comments
given, and correctly insert the new comment to the database. Note, however,
Ecto is doing a lot of work just to figure out something we knew since the
beginning, which is that there is only one new comment.
In cases like above, when you want to work only on a single entry, it is
much easier to simply work on the associated directly. For example, we
could instead set the `post` association in the comment:
%Comment{body: "better example"}
|> Ecto.Changeset.change()
|> Ecto.Changeset.put_assoc(:post, post)
|> Repo.insert!()
Alternatively, we can make sure that when we create a comment, it is already
associated to the post:
Ecto.build_assoc(post, :comments)
|> Ecto.Changeset.change(body: "great example!")
|> Repo.insert!()
Or we can simply set the post_id in the comment itself:
%Comment{body: "better example", post_id: post.id}
|> Repo.insert!()
In other words, when you find yourself wanting to work only with a subset
of the data, then using `put_assoc/4` is most likely unnecessary. Instead,
you want to work on the other side of the association.
Let's see an example where using `put_assoc/4` is a good fit.
## Example: Adding tags to a post
Imagine you are receiving a set of tags you want to associate to a post.
Let's imagine that those tags exist upfront and are all persisted to the
database. Imagine we get the data in this format:
params = %{"title" => "new post", "tags" => ["learner"]}
Now, since the tags already exist, we will bring all of them from the
database and put them directly in the post:
tags = Repo.all(from t in Tag, where: t.name in ^params["tags"])
post
|> Repo.preload(:tags)
|> Ecto.Changeset.cast(params, [:title]) # No need to allow :tags as we put them directly
|> Ecto.Changeset.put_assoc(:tags, tags) # Explicitly set the tags
Since in this case we always require the user to pass all tags
directly, using `put_assoc/4` is a great fit. It will automatically
remove any tag not given and properly associate all of the given
tags with the post.
Furthermore, since the tag information is given as structs read directly
from the database, Ecto will treat the data as correct and only do the
minimum necessary to guarantee that posts and tags are associated,
without trying to update or diff any of the fields in the tag struct.
Although it accepts an `opts` argument, there are no options currently
supported by `put_assoc/4`.
"""
def put_assoc(%Changeset{} = changeset, name, value, opts \\ []) do
put_relation(:assoc, changeset, name, value, opts)
end
@doc """
Puts the given embed entry or entries as a change in the changeset.
This function is used to work with embeds as a whole. For embeds with
cardinality one, `nil` can be used to remove the existing entry. For
embeds with many entries, an empty list may be given instead.
If the embed has no changes, it will be skipped. If the embed is
invalid, the changeset will be marked as invalid.
The list of supported values and their behaviour is described in
`put_assoc/4`. If the given value is not any of values listed there,
it will raise.
Although this function accepts an `opts` argument, there are no options
currently supported by `put_embed/4`.
"""
def put_embed(%Changeset{} = changeset, name, value, opts \\ []) do
put_relation(:embed, changeset, name, value, opts)
end
defp put_relation(_tag, %{types: nil}, _name, _value, _opts) do
raise ArgumentError, "changeset does not have types information"
end
defp put_relation(tag, changeset, name, value, _opts) do
%{data: data, types: types, changes: changes, errors: errors, valid?: valid?} = changeset
relation = relation!(:put, tag, name, Map.get(types, name))
{changes, errors, valid?} =
put_change(data, changes, errors, valid?, name, value, {tag, relation})
%{changeset | changes: changes, errors: errors, valid?: valid?}
end
@doc """
Forces a change on the given `key` with `value`.
If the change is already present, it is overridden with
the new value.
## Examples
iex> changeset = change(%Post{author: "bar"}, %{title: "foo"})
iex> changeset = force_change(changeset, :title, "bar")
iex> changeset.changes
%{title: "bar"}
iex> changeset = force_change(changeset, :author, "bar")
iex> changeset.changes
%{title: "bar", author: "bar"}
"""
@spec force_change(t, atom, term) :: t
def force_change(%Changeset{types: nil}, _key, _value) do
raise ArgumentError, "changeset does not have types information"
end
def force_change(%Changeset{types: types} = changeset, key, value) do
case Map.get(types, key) do
{tag, _} when tag in @relations ->
raise "changing #{tag}s with force_change/3 is not supported, " <>
"please use put_#{tag}/4 instead"
nil ->
raise ArgumentError, "unknown field `#{inspect(key)}` in #{inspect(changeset.data)}"
_ ->
put_in changeset.changes[key], value
end
end
@doc """
Deletes a change with the given key.
## Examples
iex> changeset = change(%Post{}, %{title: "foo"})
iex> changeset = delete_change(changeset, :title)
iex> get_change(changeset, :title)
nil
"""
@spec delete_change(t, atom) :: t
def delete_change(%Changeset{} = changeset, key) when is_atom(key) do
update_in changeset.changes, &Map.delete(&1, key)
end
@doc """
Applies the changeset changes to the changeset data.
This operation will return the underlying data with changes
regardless if the changeset is valid or not.
## Examples
iex> changeset = change(%Post{author: "bar"}, %{title: "foo"})
iex> apply_changes(changeset)
%Post{author: "bar", title: "foo"}
"""
@spec apply_changes(t) :: Ecto.Schema.t | data
def apply_changes(%Changeset{changes: changes, data: data}) when changes == %{} do
data
end
def apply_changes(%Changeset{changes: changes, data: data, types: types}) do
Enum.reduce(changes, data, fn {key, value}, acc ->
case Map.fetch(types, key) do
{:ok, {tag, relation}} when tag in @relations ->
Map.put(acc, key, Relation.apply_changes(relation, value))
{:ok, _} ->
Map.put(acc, key, value)
:error ->
acc
end
end)
end
@doc """
Applies the changeset action only if the changes are valid.
If the changes are valid, all changes are applied to the changeset data.
If the changes are invalid, no changes are applied, and an error tuple
is returned with the changeset containing the action that was attempted
to be applied.
The action may be any atom.
## Examples
iex> {:ok, data} = apply_action(changeset, :update)
iex> {:error, changeset} = apply_action(changeset, :update)
%Ecto.Changeset{action: :update}
"""
@spec apply_action(t, atom) :: {:ok, Ecto.Schema.t() | data} | {:error, t}
def apply_action(%Changeset{} = changeset, action) when is_atom(action) do
if changeset.valid? do
{:ok, apply_changes(changeset)}
else
{:error, %Changeset{changeset | action: action}}
end
end
def apply_action(%Changeset{}, action) do
raise ArgumentError, "expected action to be an atom, got: #{inspect action}"
end
@doc """
Applies the changeset action if the changes are valid or raises an error.
## Examples
iex> changeset = change(%Post{author: "bar"}, %{title: "foo"})
iex> apply_action!(changeset, :update)
%Post{author: "bar", title: "foo"}
iex> changeset = change(%Post{author: "bar"}, %{title: :bad})
iex> apply_action!(changeset, :update)
** (Ecto.InvalidChangesetError) could not perform update because changeset is invalid.
See `apply_action/2` for more information.
"""
@spec apply_action!(t, atom) :: Ecto.Schema.t() | data
def apply_action!(%Changeset{} = changeset, action) do
case apply_action(changeset, action) do
{:ok, data} ->
data
{:error, changeset} ->
raise Ecto.InvalidChangesetError, action: action, changeset: changeset
end
end
## Validations
@doc ~S"""
Returns a keyword list of the validations for this changeset.
The keys in the list are the names of fields, and the values are a
validation associated with the field. A field may occur multiple
times in the list.
## Example
%Post{}
|> change()
|> validate_format(:title, ~r/^\w+:\s/, message: "must start with a topic")
|> validate_length(:title, max: 100)
|> validations()
#=> [
title: {:length, [ max: 100 ]},
title: {:format, ~r/^\w+:\s/}
]
The following validations may be included in the result. The list is
not necessarily exhaustive. For example, custom validations written
by the developer will also appear in our return value.
This first group contains validations that take a keyword list of validators,
where the validators are shown immediately following the validation type.
This list may also include a `message:` key.
* `{:length, [option]}`
* `min: n`
* `max: n`
* `is: n`
* `count: :graphemes | :codepoints`
* `{:number, [option]}`
* `equal_to: n`
* `greater_than: n`
* `greater_than_or_equal_to: n`
* `less_than: n`
* `less_than_or_equal_to: n`
The other validators simply take a value:
* `{:exclusion, Enum.t}`
* `{:format, ~r/pattern/}`
* `{:inclusion, Enum.t}`
* `{:subset, Enum.t}`
"""
@spec validations(t) :: [{atom, term}]
def validations(%Changeset{validations: validations}) do
validations
end
@doc """
Adds an error to the changeset.
An additional keyword list `keys` can be passed to provide additional
contextual information for the error. This is useful when using
`traverse_errors/2`
## Examples
iex> changeset = change(%Post{}, %{title: ""})
iex> changeset = add_error(changeset, :title, "empty")
iex> changeset.errors
[title: {"empty", []}]
iex> changeset.valid?
false
iex> changeset = change(%Post{}, %{title: ""})
iex> changeset = add_error(changeset, :title, "empty", additional: "info")
iex> changeset.errors
[title: {"empty", [additional: "info"]}]
iex> changeset.valid?
false
"""
@spec add_error(t, atom, String.t, Keyword.t) :: t
def add_error(%Changeset{errors: errors} = changeset, key, message, keys \\ []) when is_binary(message) do
%{changeset | errors: [{key, {message, keys}}|errors], valid?: false}
end
@doc """
Validates the given `field` change.
It invokes the `validator` function to perform the validation
only if a change for the given `field` exists and the change
value is not `nil`. The function must return a list of errors
(with an empty list meaning no errors).
In case there's at least one error, the list of errors will be appended to the
`:errors` field of the changeset and the `:valid?` flag will be set to
`false`.
## Examples
iex> changeset = change(%Post{}, %{title: "foo"})
iex> changeset = validate_change changeset, :title, fn :title, title ->
...> # Value must not be "foo"!
...> if title == "foo" do
...> [title: "cannot be foo"]
...> else
...> []
...> end
...> end
iex> changeset.errors
[title: {"cannot be foo", []}]
"""
@spec validate_change(t, atom, (atom, term -> [{atom, String.t} | {atom, {String.t, Keyword.t}}])) :: t
def validate_change(%Changeset{} = changeset, field, validator) when is_atom(field) do
%{changes: changes, errors: errors} = changeset
ensure_field_exists!(changeset, field)
value = Map.get(changes, field)
new = if is_nil(value), do: [], else: validator.(field, value)
new =
Enum.map(new, fn
{key, val} when is_atom(key) and is_binary(val) ->
{key, {val, []}}
{key, {val, opts}} when is_atom(key) and is_binary(val) and is_list(opts) ->
{key, {val, opts}}
end)
case new do
[] -> changeset
[_|_] -> %{changeset | errors: new ++ errors, valid?: false}
end
end
@doc """
Stores the validation `metadata` and validates the given `field` change.
Similar to `validate_change/3` but stores the validation metadata
into the changeset validators. The validator metadata is often used
as a reflection mechanism, to automatically generate code based on
the available validations.
## Examples
iex> changeset = change(%Post{}, %{title: "foo"})
iex> changeset = validate_change changeset, :title, :useless_validator, fn
...> _, _ -> []
...> end
iex> changeset.validations
[title: :useless_validator]
"""
@spec validate_change(t, atom, term, (atom, term -> [{atom, String.t} | {atom, {String.t, Keyword.t}}])) :: t
def validate_change(%Changeset{validations: validations} = changeset,
field, metadata, validator) do
changeset = %{changeset | validations: [{field, metadata}|validations]}
validate_change(changeset, field, validator)
end
@doc """
Validates that one or more fields are present in the changeset.
You can pass a single field name or a list of field names that
are required.
If the value of a field is `nil` or a string made only of whitespace,
the changeset is marked as invalid, the field is removed from the
changeset's changes, and an error is added. An error won't be added if
the field already has an error.
If a field is given to `validate_required/3` but it has not been passed
as parameter during `cast/3` (i.e. it has not been changed), then
`validate_required/3` will check for its current value in the data.
If the data contains an non-empty value for the field, then no error is
added. This allows developers to use `validate_required/3` to perform
partial updates. For example, on `insert` all fields would be required,
because their default values on the data are all `nil`, but on `update`,
if you don't want to change a field that has been previously set,
you are not required to pass it as a paramater, since `validate_required/3`
won't add an error for missing changes as long as the value in the
data given to the `changeset` is not empty.
Do not use this function to validate associations are required,
instead pass the `:required` option to `cast_assoc/3`.
Opposite to other validations, calling this function does not store
the validation under the `changeset.validations` key. Instead, it
stores all required fields under `changeset.required`.
## Options
* `:message` - the message on failure, defaults to "can't be blank"
* `:trim` - a boolean that sets whether whitespaces are removed before
running the validation on binaries/strings, defaults to true
## Examples
validate_required(changeset, :title)
validate_required(changeset, [:title, :body])
"""
@spec validate_required(t, list | atom, Keyword.t) :: t
def validate_required(%Changeset{} = changeset, fields, opts \\ []) when not is_nil(fields) do
%{required: required, errors: errors, changes: changes} = changeset
trim = Keyword.get(opts, :trim, true)
fields = List.wrap(fields)
fields_with_errors =
for field <- fields,
missing?(changeset, field, trim),
ensure_field_exists!(changeset, field),
is_nil(errors[field]),
do: field
case fields_with_errors do
[] ->
%{changeset | required: fields ++ required}
_ ->
message = message(opts, "can't be blank")
new_errors = Enum.map(fields_with_errors, &{&1, {message, [validation: :required]}})
changes = Map.drop(changes, fields_with_errors)
%{changeset | changes: changes, required: fields ++ required, errors: new_errors ++ errors, valid?: false}
end
end
@doc """
Validates that no existing record with a different primary key
has the same values for these fields.
This function exists to provide quick feedback to users of your
application. It should not be relied on for any data guarantee as it
has race conditions and is inherently unsafe. For example, if this
check happens twice in the same time interval (because the user
submitted a form twice), both checks may pass and you may end-up with
duplicate entries in the database. Therefore, a `unique_constraint/3`
should also be used to ensure your data won't get corrupted.
However, because constraints are only checked if all validations
succeed, this function can be used as an early check to provide
early feedback to users, since most conflicting data will have been
inserted prior to the current validation phase.
## Options
* `:message` - the message in case the constraint check fails,
defaults to "has already been taken".
* `:match` - how the changeset constraint name is matched against the
repo constraint, may be `:exact` or `:suffix`. Defaults to `:exact`.
`:suffix` matches any repo constraint which `ends_with?` `:name`
to this changeset constraint.
* `:error_key` - the key to which changeset error will be added when
check fails, defaults to the first field name of the given list of
fields.
* `:prefix` - the prefix to run the query on (such as the schema path
in Postgres or the database in MySQL). See `Ecto.Repo` documentation
for more information.
* `:repo_opts` - the options to pass to the `Ecto.Repo` call.
## Examples
unsafe_validate_unique(changeset, :city_name, repo)
unsafe_validate_unique(changeset, [:city_name, :state_name], repo)
unsafe_validate_unique(changeset, [:city_name, :state_name], repo, message: "city must be unique within state")
unsafe_validate_unique(changeset, [:city_name, :state_name], repo, prefix: "public")
"""
@spec unsafe_validate_unique(t, atom | [atom, ...], Ecto.Repo.t, Keyword.t) :: t
def unsafe_validate_unique(changeset, fields, repo, opts \\ []) when is_list(opts) do
fields = List.wrap(fields)
{repo_opts, opts} = Keyword.pop(opts, :repo_opts, [])
{validations, schema} =
case changeset do
%Ecto.Changeset{validations: validations, data: %schema{}} ->
{validations, schema}
%Ecto.Changeset{} ->
raise ArgumentError, "unsafe_validate_unique/4 does not work with schemaless changesets"
end
changeset = %{changeset | validations: [{:unsafe_unique, fields} | validations]}
where_clause = for field <- fields do
{field, get_field(changeset, field)}
end
# No need to query if there is a prior error for the fields
any_prior_errors_for_fields? = Enum.any?(changeset.errors, &(elem(&1, 0) in fields))
# No need to query if we haven't changed any of the fields in question
unrelated_changes? = Enum.all?(fields, ¬ Map.has_key?(changeset.changes, &1))
# If we don't have values for all fields, we can't query for uniqueness
any_nil_values_for_fields? = Enum.any?(where_clause, &(&1 |> elem(1) |> is_nil()))
if unrelated_changes? || any_nil_values_for_fields? || any_prior_errors_for_fields? do
changeset
else
query =
schema
|> maybe_exclude_itself(changeset)
|> Ecto.Query.where(^where_clause)
|> Ecto.Query.select(true)
|> Ecto.Query.limit(1)
query =
if prefix = opts[:prefix] do
Ecto.Query.put_query_prefix(query, prefix)
else
query
end
if repo.one(query, repo_opts) do
error_key = Keyword.get(opts, :error_key, hd(fields))
add_error(changeset, error_key, message(opts, "has already been taken"),
validation: :unsafe_unique, fields: fields)
else
changeset
end
end
end
defp maybe_exclude_itself(schema, changeset) do
:primary_key
|> schema.__schema__()
|> Enum.reduce_while(schema, fn field, query ->
case get_field(changeset, field) do
nil ->
{:halt, schema}
value ->
{:cont, Ecto.Query.or_where(query, [q], field(q, ^field) != ^value)}
end
end)
end
defp ensure_field_exists!(%Changeset{types: types, data: data}, field) do
unless Map.has_key?(types, field) do
raise ArgumentError, "unknown field #{inspect(field)} in #{inspect(data)}"
end
true
end
defp missing?(changeset, field, trim) when is_atom(field) do
case get_field(changeset, field) do
%{__struct__: Ecto.Association.NotLoaded} ->
raise ArgumentError, "attempting to validate association `#{field}` " <>
"that was not loaded. Please preload your associations " <>
"before calling validate_required/3 or pass the :required " <>
"option to Ecto.Changeset.cast_assoc/3"
value when is_binary(value) and trim -> String.trim_leading(value) == ""
value when is_binary(value) -> value == ""
nil -> true
_ -> false
end
end
defp missing?(_changeset, field, _trim) do
raise ArgumentError, "validate_required/3 expects field names to be atoms, got: `#{inspect field}`"
end
@doc """
Validates a change has the given format.
The format has to be expressed as a regular expression.
## Options
* `:message` - the message on failure, defaults to "has invalid format"
## Examples
validate_format(changeset, :email, ~r/@/)
"""
@spec validate_format(t, atom, Regex.t, Keyword.t) :: t
def validate_format(changeset, field, format, opts \\ []) do
validate_change changeset, field, {:format, format}, fn _, value ->
if value =~ format, do: [], else: [{field, {message(opts, "has invalid format"), [validation: :format]}}]
end
end
@doc """
Validates a change is included in the given enumerable.
## Options
* `:message` - the message on failure, defaults to "is invalid"
## Examples
validate_inclusion(changeset, :gender, ["man", "woman", "other", "prefer not to say"])
validate_inclusion(changeset, :age, 0..99)
"""
@spec validate_inclusion(t, atom, Enum.t, Keyword.t) :: t
def validate_inclusion(changeset, field, data, opts \\ []) do
validate_change changeset, field, {:inclusion, data}, fn _, value ->
if value in data,
do: [],
else: [{field, {message(opts, "is invalid"), [validation: :inclusion, enum: data]}}]
end
end
@doc ~S"""
Validates a change, of type enum, is a subset of the given enumerable. Like
`validate_inclusion/4` for lists.
## Options
* `:message` - the message on failure, defaults to "has an invalid entry"
## Examples
validate_subset(changeset, :pets, ["cat", "dog", "parrot"])
validate_subset(changeset, :lottery_numbers, 0..99)
"""
@spec validate_subset(t, atom, Enum.t, Keyword.t) :: t
def validate_subset(changeset, field, data, opts \\ []) do
validate_change changeset, field, {:subset, data}, fn _, value ->
case Enum.any?(value, fn(x) -> not(x in data) end) do
true -> [{field, {message(opts, "has an invalid entry"), [validation: :subset, enum: data]}}]
false -> []
end
end
end
@doc """
Validates a change is not included in the given enumerable.
## Options
* `:message` - the message on failure, defaults to "is reserved"
## Examples
validate_exclusion(changeset, :name, ~w(admin superadmin))
"""
@spec validate_exclusion(t, atom, Enum.t, Keyword.t) :: t
def validate_exclusion(changeset, field, data, opts \\ []) do
validate_change changeset, field, {:exclusion, data}, fn _, value ->
if value in data, do:
[{field, {message(opts, "is reserved"), [validation: :exclusion, enum: data]}}], else: []
end
end
@doc """
Validates a change is a string or list of the given length.
Note that the length of a string is counted in graphemes by default. If using
this validation to match a character limit of a database backend,
it's likely that the limit ignores graphemes and limits the number
of unicode characters. Then consider using the `:count` option to
limit the number of codepoints (`:codepoints`), or limit the number of bytes (`:bytes`).
## Options
* `:is` - the length must be exactly this value
* `:min` - the length must be greater than or equal to this value
* `:max` - the length must be less than or equal to this value
* `:count` - what length to count for string, `:graphemes` (default), `:codepoints` or `:bytes`
* `:message` - the message on failure, depending on the validation, is one of:
* for strings:
* "should be %{count} character(s)"
* "should be at least %{count} character(s)"
* "should be at most %{count} character(s)"
* for binary:
* "should be %{count} byte(s)"
* "should be at least %{count} byte(s)"
* "should be at most %{count} byte(s)"
* for lists:
* "should have %{count} item(s)"
* "should have at least %{count} item(s)"
* "should have at most %{count} item(s)"
## Examples
validate_length(changeset, :title, min: 3)
validate_length(changeset, :title, max: 100)
validate_length(changeset, :title, min: 3, max: 100)
validate_length(changeset, :code, is: 9)
validate_length(changeset, :topics, is: 2)
validate_length(changeset, :icon, count: :bytes, max: 1024 * 16)
"""
@spec validate_length(t, atom, Keyword.t) :: t
def validate_length(changeset, field, opts) when is_list(opts) do
validate_change changeset, field, {:length, opts}, fn
_, value ->
count_type = opts[:count] || :graphemes
{type, length} = case {value, count_type} do
{value, :codepoints} when is_binary(value) ->
{:string, codepoints_length(value, 0)}
{value, :graphemes} when is_binary(value) ->
{:string, String.length(value)}
{value, :bytes} when is_binary(value) ->
{:binary, byte_size(value)}
{value, _} when is_list(value) ->
{:list, list_length(changeset, field, value)}
end
error = ((is = opts[:is]) && wrong_length(type, length, is, opts)) ||
((min = opts[:min]) && too_short(type, length, min, opts)) ||
((max = opts[:max]) && too_long(type, length, max, opts))
if error, do: [{field, error}], else: []
end
end
defp codepoints_length(<<_::utf8, rest::binary>>, acc), do: codepoints_length(rest, acc + 1)
defp codepoints_length(<<_, rest::binary>>, acc), do: codepoints_length(rest, acc + 1)
defp codepoints_length(<<>>, acc), do: acc
defp list_length(%{types: types}, field, value) do
case Map.fetch(types, field) do
{:ok, {tag, _association}} when tag in [:embed, :assoc] ->
length(Relation.filter_empty(value))
_ ->
length(value)
end
end
defp wrong_length(_type, value, value, _opts), do: nil
defp wrong_length(:string, _length, value, opts), do:
{message(opts, "should be %{count} character(s)"), count: value, validation: :length, kind: :is, type: :string}
defp wrong_length(:binary, _length, value, opts), do:
{message(opts, "should be %{count} byte(s)"), count: value, validation: :length, kind: :is, type: :binary}
defp wrong_length(:list, _length, value, opts), do:
{message(opts, "should have %{count} item(s)"), count: value, validation: :length, kind: :is, type: :list}
defp too_short(_type, length, value, _opts) when length >= value, do: nil
defp too_short(:string, _length, value, opts), do:
{message(opts, "should be at least %{count} character(s)"), count: value, validation: :length, kind: :min, type: :string}
defp too_short(:binary, _length, value, opts), do:
{message(opts, "should be at least %{count} byte(s)"), count: value, validation: :length, kind: :min, type: :binary}
defp too_short(:list, _length, value, opts), do:
{message(opts, "should have at least %{count} item(s)"), count: value, validation: :length, kind: :min, type: :list}
defp too_long(_type, length, value, _opts) when length <= value, do: nil
defp too_long(:string, _length, value, opts), do:
{message(opts, "should be at most %{count} character(s)"), count: value, validation: :length, kind: :max, type: :string}
defp too_long(:binary, _length, value, opts), do:
{message(opts, "should be at most %{count} byte(s)"), count: value, validation: :length, kind: :max, type: :binary}
defp too_long(:list, _length, value, opts), do:
{message(opts, "should have at most %{count} item(s)"), count: value, validation: :length, kind: :max, type: :list}
@doc """
Validates the properties of a number.
## Options
* `:less_than`
* `:greater_than`
* `:less_than_or_equal_to`
* `:greater_than_or_equal_to`
* `:equal_to`
* `:not_equal_to`
* `:message` - the message on failure, defaults to one of:
* "must be less than %{number}"
* "must be greater than %{number}"
* "must be less than or equal to %{number}"
* "must be greater than or equal to %{number}"
* "must be equal to %{number}"
* "must be not equal to %{number}"
## Examples
validate_number(changeset, :count, less_than: 3)
validate_number(changeset, :pi, greater_than: 3, less_than: 4)
validate_number(changeset, :the_answer_to_life_the_universe_and_everything, equal_to: 42)
"""
@spec validate_number(t, atom, Keyword.t) :: t
def validate_number(changeset, field, opts) do
validate_change changeset, field, {:number, opts}, fn
field, value ->
{message, opts} = Keyword.pop(opts, :message)
Enum.find_value opts, [], fn {spec_key, target_value} ->
case Map.fetch(@number_validators, spec_key) do
{:ok, {spec_function, default_message}} ->
validate_number(field, value, message || default_message,
spec_key, spec_function, target_value)
:error ->
supported_options = @number_validators |> Map.keys() |> Enum.map_join("\n", &" * #{inspect(&1)}")
raise ArgumentError, """
unknown option #{inspect spec_key} given to validate_number/3
The supported options are:
#{supported_options}
"""
end
end
end
end
defp validate_number(field, %Decimal{} = value, message, spec_key, _spec_function, target_value) do
result = Decimal.cmp(value, decimal_new(target_value))
case decimal_compare(result, spec_key) do
true -> nil
false -> [{field, {message, validation: :number, kind: spec_key, number: target_value}}]
end
end
defp validate_number(field, value, message, spec_key, spec_function, target_value) do
case apply(spec_function, [value, target_value]) do
true -> nil
false -> [{field, {message, validation: :number, kind: spec_key, number: target_value}}]
end
end
defp decimal_new(term) when is_float(term), do: Decimal.from_float(term)
defp decimal_new(term), do: Decimal.new(term)
defp decimal_compare(:lt, spec), do: spec in [:less_than, :less_than_or_equal_to, :not_equal_to]
defp decimal_compare(:gt, spec), do: spec in [:greater_than, :greater_than_or_equal_to, :not_equal_to]
defp decimal_compare(:eq, spec), do: spec in [:equal_to, :less_than_or_equal_to, :greater_than_or_equal_to]
@doc """
Validates that the given parameter matches its confirmation.
By calling `validate_confirmation(changeset, :email)`, this
validation will check if both "email" and "email_confirmation"
in the parameter map matches. Note this validation only looks
at the parameters themselves, never the fields in the schema.
As such as, the "email_confirmation" field does not need to be
added as a virtual field in your schema.
Note that if the confirmation field is nil or missing, this does
not add a validation error. You can specify that the confirmation
parameter is required in the options (see below).
## Options
* `:message` - the message on failure, defaults to "does not match confirmation"
* `:required` - boolean, sets whether existence of confirmation parameter
is required for addition of error. Defaults to false
## Examples
validate_confirmation(changeset, :email)
validate_confirmation(changeset, :password, message: "does not match password")
cast(data, params, [:password])
|> validate_confirmation(:password, message: "does not match password")
"""
@spec validate_confirmation(t, atom, Keyword.t) :: t
def validate_confirmation(changeset, field, opts \\ [])
def validate_confirmation(%{params: params} = changeset, field, opts) when is_map(params) do
param = Atom.to_string(field)
error_param = "#{param}_confirmation"
error_field = String.to_atom(error_param)
value = Map.get(params, param)
errors =
case Map.fetch(params, error_param) do
{:ok, ^value} ->
[]
{:ok, _} ->
[{error_field,
{message(opts, "does not match confirmation"), [validation: :confirmation]}}]
:error ->
confirmation_missing(opts, error_field)
end
%{changeset | validations: [{field, {:confirmation, opts}} | changeset.validations],
errors: errors ++ changeset.errors,
valid?: changeset.valid? and errors == []}
end
def validate_confirmation(%{params: nil} = changeset, _, _) do
changeset
end
defp confirmation_missing(opts, error_field) do
required = Keyword.get(opts, :required, false)
if required, do: [{error_field, {message(opts, "can't be blank"), [validation: :required]}}], else: []
end
defp message(opts, key \\ :message, default) do
Keyword.get(opts, key, default)
end
@doc """
Validates the given parameter is true.
Note this validation only checks the parameter itself is true, never
the field in the schema. That's because acceptance parameters do not need
to be persisted, as by definition they would always be stored as `true`.
## Options
* `:message` - the message on failure, defaults to "must be accepted"
## Examples
validate_acceptance(changeset, :terms_of_service)
validate_acceptance(changeset, :rules, message: "please accept rules")
"""
@spec validate_acceptance(t, atom, Keyword.t) :: t
def validate_acceptance(changeset, field, opts \\ [])
def validate_acceptance(%{params: params} = changeset, field, opts) do
errors = validate_acceptance_errors(params, field, opts)
%{changeset | validations: [{field, {:acceptance, opts}} | changeset.validations],
errors: errors ++ changeset.errors,
valid?: changeset.valid? and errors == []}
end
defp validate_acceptance_errors(nil, _field, _opts), do: []
defp validate_acceptance_errors(params, field, opts) do
param = Atom.to_string(field)
value = Map.get(params, param)
case Ecto.Type.cast(:boolean, value) do
{:ok, true} -> []
_ -> [{field, {message(opts, "must be accepted"), validation: :acceptance}}]
end
end
## Optimistic lock
@doc ~S"""
Applies optimistic locking to the changeset.
[Optimistic
locking](http://en.wikipedia.org/wiki/Optimistic_concurrency_control) (or
*optimistic concurrency control*) is a technique that allows concurrent edits
on a single record. While pessimistic locking works by locking a resource for
an entire transaction, optimistic locking only checks if the resource changed
before updating it.
This is done by regularly fetching the record from the database, then checking
whether another user has made changes to the record *only when updating the
record*. This behaviour is ideal in situations where the chances of concurrent
updates to the same record are low; if they're not, pessimistic locking or
other concurrency patterns may be more suited.
## Usage
Optimistic locking works by keeping a "version" counter for each record; this
counter gets incremented each time a modification is made to a record. Hence,
in order to use optimistic locking, a field must exist in your schema for
versioning purpose. Such field is usually an integer but other types are
supported.
## Examples
Assuming we have a `Post` schema (stored in the `posts` table), the first step
is to add a version column to the `posts` table:
alter table(:posts) do
add :lock_version, :integer, default: 1
end
The column name is arbitrary and doesn't need to be `:lock_version`. Now add
a field to the schema too:
defmodule Post do
use Ecto.Schema
schema "posts" do
field :title, :string
field :lock_version, :integer, default: 1
end
def changeset(:update, struct, params \\ %{}) do
struct
|> Ecto.Changeset.cast(params, [:title])
|> Ecto.Changeset.optimistic_lock(:lock_version)
end
end
Now let's take optimistic locking for a spin:
iex> post = Repo.insert!(%Post{title: "foo"})
%Post{id: 1, title: "foo", lock_version: 1}
iex> valid_change = Post.changeset(:update, post, %{title: "bar"})
iex> stale_change = Post.changeset(:update, post, %{title: "baz"})
iex> Repo.update!(valid_change)
%Post{id: 1, title: "bar", lock_version: 2}
iex> Repo.update!(stale_change)
** (Ecto.StaleEntryError) attempted to update a stale entry:
%Post{id: 1, title: "baz", lock_version: 1}
When a conflict happens (a record which has been previously fetched is
being updated, but that same record has been modified since it was
fetched), an `Ecto.StaleEntryError` exception is raised.
Optimistic locking also works with delete operations. Just call the
`optimistic_lock/3` function with the data before delete:
iex> changeset = Ecto.Changeset.optimistic_lock(post, :lock_version)
iex> Repo.delete(changeset)
`optimistic_lock/3` by default assumes the field
being used as a lock is an integer. If you want to use another type,
you need to pass the third argument customizing how the next value
is generated:
iex> Ecto.Changeset.optimistic_lock(post, :lock_uuid, fn _ -> Ecto.UUID.generate end)
"""
@spec optimistic_lock(Ecto.Schema.t | t, atom, (term -> term)) :: t
def optimistic_lock(data_or_changeset, field, incrementer \\ &increment_with_rollover/1) do
changeset = change(data_or_changeset, %{})
current = get_field(changeset, field)
# Apply these changes only inside the repo because we
# don't want to permanently track the lock change.
changeset = prepare_changes(changeset, fn changeset ->
put_in(changeset.changes[field], incrementer.(current))
end)
changeset = put_in(changeset.filters[field], current)
changeset
end
# increment_with_rollover expect to be used with lock_version set as :integer in db schema
# 2_147_483_647 is upper limit for signed integer for both PostgreSQL and MySQL
defp increment_with_rollover(val) when val >= 2_147_483_647 do
1
end
defp increment_with_rollover(val) when is_integer(val) do
val + 1
end
@doc """
Provides a function executed by the repository on insert/update/delete.
If the changeset given to the repository is valid, the function given to
`prepare_changes/2` will be called with the changeset and must return a
changeset, allowing developers to do final adjustments to the changeset or
to issue data consistency commands. The repository itself can be accessed
inside the function under the `repo` field in the changeset. If the
changeset given to the repository is invalid, the function will not be
invoked.
The given function is guaranteed to run inside the same transaction
as the changeset operation for databases that do support transactions.
## Example
A common use case is updating a counter cache, in this case updating a post's
comment count when a comment is created:
def create_comment(comment, params) do
comment
|> cast(params, [:body, :post_id])
|> prepare_changes(fn changeset ->
if post_id = get_change(changeset, :post_id) do
query = from Post, where: [id: ^post_id]
changeset.repo.update_all(query, inc: [comment_count: 1])
end
changeset
end)
end
We retrieve the repo from the comment changeset itself and use
update_all to update the counter cache in one query. Finally, the original
changeset must be returned.
"""
@spec prepare_changes(t, (t -> t)) :: t
def prepare_changes(%Changeset{prepare: prepare} = changeset, function) when is_function(function, 1) do
%{changeset | prepare: [function | prepare]}
end
## Constraints
@doc """
Returns all constraints in a changeset.
A constraint is a map with the following fields:
* `:type` - the type of the constraint that will be checked in the database,
such as `:check`, `:unique`, etc
* `:constraint` - the database constraint name as a string
* `:match` - the type of match Ecto will perform on a violated constraint
against the `:constraint` value. It is `:exact`, `:suffix` or `:prefix`
* `:field` - the field a violated constraint will apply the error to
* `:error_message` - the error message in case of violated constraints
* `:error_type` - the type of error that identifies the error message
"""
@spec constraints(t) :: [constraint]
def constraints(%Changeset{constraints: constraints}) do
constraints
end
@doc """
Checks for a check constraint in the given field.
The check constraint works by relying on the database to check
if the check constraint has been violated or not and, if so,
Ecto converts it into a changeset error.
In order to use the check constraint, the first step is
to define the check constraint in a migration:
create constraint("users", :price_must_be_positive, check: "price > 0")
Now that a constraint exists, when modifying users, we could
annotate the changeset with a check constraint so Ecto knows
how to convert it into an error message:
cast(user, params, [:price])
|> check_constraint(:price, name: :price_must_be_positive)
Now, when invoking `Repo.insert/2` or `Repo.update/2`, if the
price is not positive, it will be converted into an error and
`{:error, changeset}` returned by the repository. Note that the error
will occur only after hitting the database so it will not be visible
until all other validations pass.
## Options
* `:message` - the message in case the constraint check fails.
Defaults to "is invalid"
* `:name` - the name of the constraint. Required.
* `:match` - how the changeset constraint name is matched against the
repo constraint, may be `:exact`, `:suffix` or `:prefix`. Defaults to `:exact`.
`:suffix` matches any repo constraint which `ends_with?` `:name`
to this changeset constraint.
`:prefix` matches any repo constraint which `starts_with?` `:name`
to this changeset constraint.
"""
def check_constraint(changeset, field, opts \\ []) do
constraint = opts[:name] || raise ArgumentError, "must supply the name of the constraint"
message = message(opts, "is invalid")
match_type = Keyword.get(opts, :match, :exact)
add_constraint(changeset, :check, to_string(constraint), match_type, field, message)
end
@doc """
Checks for a unique constraint in the given field or list of fields.
The unique constraint works by relying on the database to check
if the unique constraint has been violated or not and, if so,
Ecto converts it into a changeset error.
In order to use the uniqueness constraint, the first step is
to define the unique index in a migration:
create unique_index(:users, [:email])
Now that a constraint exists, when modifying users, we could
annotate the changeset with a unique constraint so Ecto knows
how to convert it into an error message:
cast(user, params, [:email])
|> unique_constraint(:email)
Now, when invoking `Repo.insert/2` or `Repo.update/2`, if the
email already exists, it will be converted into an error and
`{:error, changeset}` returned by the repository. Note that the error
will occur only after hitting the database so it will not be visible
until all other validations pass.
## Options
* `:message` - the message in case the constraint check fails,
defaults to "has already been taken"
* `:name` - the constraint name. By default, the constraint
name is inferred from the table + field(s). May be required
explicitly for complex cases
* `:match` - how the changeset constraint name is matched against the
repo constraint, may be `:exact` or `:suffix`. Defaults to `:exact`.
`:suffix` matches any repo constraint which `ends_with?` `:name`
to this changeset constraint.
## Complex constraints
Because the constraint logic is in the database, we can leverage
all the database functionality when defining them. For example,
let's suppose the e-mails are scoped by company id:
# In migration
create unique_index(:users, [:email, :company_id])
# In the changeset function
cast(user, params, [:email])
|> unique_constraint([:email, :company_id])
The first field name, `:email` in this case, will be used as the error
key to the changeset errors keyword list. For example, the above
`unique_constraint/3` would generate something like:
Repo.insert!(%User{email: "<EMAIL>", company_id: 1})
changeset = User.changeset(%User{}, %{email: "<EMAIL>", company_id: 1})
{:error, changeset} = Repo.insert(changeset)
changeset.errors #=> [email: {"has already been taken", []}]
In complex cases, instead of relying on name inference, it may be best
to set the constraint name explicitly:
# In the migration
create unique_index(:users, [:email, :company_id], name: :users_email_company_id_index)
# In the changeset function
cast(user, params, [:email])
|> unique_constraint(:email, name: :users_email_company_id_index)
### Partitioning
If your table is partitioned, then your unique index might look different
per partition, e.g. Postgres adds p<number> to the middle of your key, like:
users_p0_email_key
users_p1_email_key
...
users_p99_email_key
In this case you can use the name and suffix options together to match on
these dynamic indexes, like:
cast(user, params, [:email])
|> unique_constraint(:email, name: :email_key, match: :suffix)
## Case sensitivity
Unfortunately, different databases provide different guarantees
when it comes to case-sensitiveness. For example, in MySQL, comparisons
are case-insensitive by default. In Postgres, users can define case
insensitive column by using the `:citext` type/extension. In your migration:
execute "CREATE EXTENSION IF NOT EXISTS citext"
create table(:users) do
...
add :email, :citext
...
end
If for some reason your database does not support case insensitive columns,
you can explicitly downcase values before inserting/updating them:
cast(data, params, [:email])
|> update_change(:email, &String.downcase/1)
|> unique_constraint(:email)
"""
@spec unique_constraint(t, atom | [atom, ...], Keyword.t) :: t
def unique_constraint(changeset, field_or_fields, opts \\ [])
def unique_constraint(changeset, field, opts) when is_atom(field) do
unique_constraint(changeset, [field], opts)
end
def unique_constraint(changeset, [first_field | _] = fields, opts) do
constraint = opts[:name] || unique_index_name(changeset, fields)
message = message(opts, "has already been taken")
match_type = Keyword.get(opts, :match, :exact)
add_constraint(changeset, :unique, to_string(constraint), match_type, first_field, message)
end
defp unique_index_name(changeset, fields) do
field_names = Enum.map(fields, &get_field_source(changeset, &1))
Enum.join([get_source(changeset)] ++ field_names ++ ["index"], "_")
end
@doc """
Checks for foreign key constraint in the given field.
The foreign key constraint works by relying on the database to
check if the associated data exists or not. This is useful to
guarantee that a child will only be created if the parent exists
in the database too.
In order to use the foreign key constraint the first step is
to define the foreign key in a migration. This is often done
with references. For example, imagine you are creating a
comments table that belongs to posts. One would have:
create table(:comments) do
add :post_id, references(:posts)
end
By default, Ecto will generate a foreign key constraint with
name "comments_post_id_fkey" (the name is configurable).
Now that a constraint exists, when creating comments, we could
annotate the changeset with foreign key constraint so Ecto knows
how to convert it into an error message:
cast(comment, params, [:post_id])
|> foreign_key_constraint(:post_id)
Now, when invoking `Repo.insert/2` or `Repo.update/2`, if the
associated post does not exist, it will be converted into an
error and `{:error, changeset}` returned by the repository.
## Options
* `:message` - the message in case the constraint check fails,
defaults to "does not exist"
* `:name` - the constraint name. By default, the constraint
name is inferred from the table + field. May be required
explicitly for complex cases
"""
@spec foreign_key_constraint(t, atom, Keyword.t) :: t
def foreign_key_constraint(changeset, field, opts \\ []) do
constraint = opts[:name] || "#{get_source(changeset)}_#{get_field_source(changeset, field)}_fkey"
message = message(opts, "does not exist")
add_constraint(changeset, :foreign_key, to_string(constraint), :exact, field, message, :foreign)
end
@doc """
Checks the associated field exists.
This is similar to `foreign_key_constraint/3` except that the
field is inferred from the association definition. This is useful
to guarantee that a child will only be created if the parent exists
in the database too. Therefore, it only applies to `belongs_to`
associations.
As the name says, a constraint is required in the database for
this function to work. Such constraint is often added as a
reference to the child table:
create table(:comments) do
add :post_id, references(:posts)
end
Now, when inserting a comment, it is possible to forbid any
comment to be added if the associated post does not exist:
comment
|> Ecto.Changeset.cast(params, [:post_id])
|> Ecto.Changeset.assoc_constraint(:post)
|> Repo.insert
## Options
* `:message` - the message in case the constraint check fails,
defaults to "does not exist"
* `:name` - the constraint name. By default, the constraint
name is inferred from the table + association field.
May be required explicitly for complex cases
"""
@spec assoc_constraint(t, atom, Keyword.t) :: t
def assoc_constraint(changeset, assoc, opts \\ []) do
constraint = opts[:name] ||
case get_assoc(changeset, assoc) do
%Ecto.Association.BelongsTo{owner_key: owner_key} ->
"#{get_source(changeset)}_#{owner_key}_fkey"
other ->
raise ArgumentError,
"assoc_constraint can only be added to belongs to associations, got: #{inspect other}"
end
message = message(opts, "does not exist")
add_constraint(changeset, :foreign_key, to_string(constraint), :exact, assoc, message, :assoc)
end
@doc """
Checks the associated field does not exist.
This is similar to `foreign_key_constraint/3` except that the
field is inferred from the association definition. This is useful
to guarantee that parent can only be deleted (or have its primary
key changed) if no child exists in the database. Therefore, it only
applies to `has_*` associations.
As the name says, a constraint is required in the database for
this function to work. Such constraint is often added as a
reference to the child table:
create table(:comments) do
add :post_id, references(:posts)
end
Now, when deleting the post, it is possible to forbid any post to
be deleted if they still have comments attached to it:
post
|> Ecto.Changeset.change
|> Ecto.Changeset.no_assoc_constraint(:comments)
|> Repo.delete
## Options
* `:message` - the message in case the constraint check fails,
defaults to "is still associated with this entry" (for `has_one`)
and "are still associated with this entry" (for `has_many`)
* `:name` - the constraint name. By default, the constraint
name is inferred from the association table + association
field. May be required explicitly for complex cases
"""
@spec no_assoc_constraint(t, atom, Keyword.t) :: t
def no_assoc_constraint(changeset, assoc, opts \\ []) do
{constraint, message} =
case get_assoc(changeset, assoc) do
%Ecto.Association.Has{cardinality: cardinality,
related_key: related_key, related: related} ->
{opts[:name] || "#{related.__schema__(:source)}_#{related_key}_fkey",
message(opts, no_assoc_message(cardinality))}
other ->
raise ArgumentError,
"no_assoc_constraint can only be added to has one/many associations, got: #{inspect other}"
end
add_constraint(changeset, :foreign_key, to_string(constraint), :exact, assoc, message, :no_assoc)
end
@doc """
Checks for an exclusion constraint in the given field.
The exclusion constraint works by relying on the database to check
if the exclusion constraint has been violated or not and, if so,
Ecto converts it into a changeset error.
## Options
* `:message` - the message in case the constraint check fails,
defaults to "violates an exclusion constraint"
* `:name` - the constraint name. By default, the constraint
name is inferred from the table + field. May be required
explicitly for complex cases
* `:match` - how the changeset constraint name is matched against the
repo constraint, may be `:exact` or `:suffix`. Defaults to `:exact`.
`:suffix` matches any repo constraint which `ends_with?` `:name`
to this changeset constraint.
"""
def exclusion_constraint(changeset, field, opts \\ []) do
constraint = opts[:name] || "#{get_source(changeset)}_#{get_field_source(changeset, field)}_exclusion"
message = message(opts, "violates an exclusion constraint")
match_type = Keyword.get(opts, :match, :exact)
add_constraint(changeset, :exclusion, to_string(constraint), match_type, field, message, :exclusion)
end
defp no_assoc_message(:one), do: "is still associated with this entry"
defp no_assoc_message(:many), do: "are still associated with this entry"
defp add_constraint(changeset, type, constraint, match, field, message) do
add_constraint(changeset, type, constraint, match, field, message, type)
end
defp add_constraint(%Changeset{constraints: constraints} = changeset,
type, constraint, match, field, error_message, error_type)
when is_binary(constraint) and is_atom(field) and is_binary(error_message) do
unless match in @match_types do
raise ArgumentError, "invalid match type: #{inspect match}. Allowed match types: #{inspect @match_types}"
end
constraint = %{
constraint: constraint,
error_message: error_message,
error_type: error_type,
field: field,
match: match,
type: type
}
%{changeset | constraints: [constraint | constraints]}
end
defp get_source(%{data: %{__meta__: %{source: source}}}) when is_binary(source),
do: source
defp get_source(%{data: data}), do:
raise ArgumentError, "cannot add constraint to changeset because it does not have a source, got: #{inspect data}"
defp get_source(item), do:
raise ArgumentError, "cannot add constraint because a changeset was not supplied, got: #{inspect item}"
defp get_assoc(%{types: types}, assoc) do
case Map.fetch(types, assoc) do
{:ok, {:assoc, association}} ->
association
_ ->
raise_invalid_assoc(types, assoc)
end
end
defp raise_invalid_assoc(types, assoc) do
associations = for {_key, {:assoc, %{field: field}}} <- types, do: field
raise ArgumentError, "cannot add constraint to changeset because association `#{assoc}` does not exist. " <>
"Did you mean one of `#{Enum.join(associations, "`, `")}`?"
end
defp get_field_source(%{data: %{__struct__: schema}}, field) when is_atom(schema),
do: schema.__schema__(:field_source, field) || field
defp get_field_source(%{}, field),
do: field
@doc ~S"""
Traverses changeset errors and applies the given function to error messages.
This function is particularly useful when associations and embeds
are cast in the changeset as it will traverse all associations and
embeds and place all errors in a series of nested maps.
A changeset is supplied along with a function to apply to each
error message as the changeset is traversed. The error message
function receives an error tuple `{msg, opts}`, for example:
{"should be at least %{count} characters", [count: 3, validation: :length, min: 3]}
## Examples
iex> traverse_errors(changeset, fn {msg, opts} ->
...> Enum.reduce(opts, msg, fn {key, value}, acc ->
...> String.replace(acc, "%{#{key}}", to_string(value))
...> end)
...> end)
%{title: ["should be at least 3 characters"]}
Optionally function can accept three arguments: `changeset`, `field`
and error tuple `{msg, opts}`. It is useful whenever you want to extract
validations rules from `changeset.validations` to build detailed error
description.
"""
@spec traverse_errors(t, (error -> String.t) | (Changeset.t, atom, error -> String.t)) :: %{atom => [String.t | map]}
def traverse_errors(%Changeset{errors: errors, changes: changes, types: types} = changeset, msg_func)
when is_function(msg_func, 1) or is_function(msg_func, 3) do
errors
|> Enum.reverse()
|> merge_error_keys(msg_func, changeset)
|> merge_related_keys(changes, types, msg_func)
end
defp merge_error_keys(errors, msg_func, _) when is_function(msg_func, 1) do
Enum.reduce(errors, %{}, fn({key, val}, acc) ->
val = msg_func.(val)
Map.update(acc, key, [val], &[val|&1])
end)
end
defp merge_error_keys(errors, msg_func, changeset) when is_function(msg_func, 3) do
Enum.reduce(errors, %{}, fn({key, val}, acc) ->
val = msg_func.(changeset, key, val)
Map.update(acc, key, [val], &[val|&1])
end)
end
defp merge_related_keys(_, _, nil, _) do
raise ArgumentError, "changeset does not have types information"
end
defp merge_related_keys(map, changes, types, msg_func) do
Enum.reduce types, map, fn
{field, {tag, %{cardinality: :many}}}, acc when tag in @relations ->
if changesets = Map.get(changes, field) do
{errors, all_empty?} =
Enum.map_reduce(changesets, true, fn changeset, all_empty? ->
errors = traverse_errors(changeset, msg_func)
{errors, all_empty? and errors == %{}}
end)
case all_empty? do
true -> acc
false -> Map.put(acc, field, errors)
end
else
acc
end
{field, {tag, %{cardinality: :one}}}, acc when tag in @relations ->
if changeset = Map.get(changes, field) do
case traverse_errors(changeset, msg_func) do
errors when errors == %{} -> acc
errors -> Map.put(acc, field, errors)
end
else
acc
end
{_, _}, acc ->
acc
end
end
end
defimpl Inspect, for: Ecto.Changeset do
import Inspect.Algebra
def inspect(%Ecto.Changeset{data: data} = changeset, opts) do
list = for attr <- [:action, :changes, :errors, :data, :valid?] do
{attr, Map.get(changeset, attr)}
end
redacted_fields = case data do
%type{__meta__: _} -> type.__schema__(:redact_fields)
_ -> []
end
container_doc("#Ecto.Changeset<", list, ">", opts, fn
{:action, action}, opts -> concat("action: ", to_doc(action, opts))
{:changes, changes}, opts -> concat("changes: ", changes |> filter(redacted_fields) |> to_doc(opts))
{:data, data}, _opts -> concat("data: ", to_struct(data, opts))
{:errors, errors}, opts -> concat("errors: ", to_doc(errors, opts))
{:valid?, valid?}, opts -> concat("valid?: ", to_doc(valid?, opts))
end)
end
defp to_struct(%{__struct__: struct}, _opts), do: "#" <> Kernel.inspect(struct) <> "<>"
defp to_struct(other, opts), do: to_doc(other, opts)
defp filter(changes, redacted_fields) do
Enum.reduce(redacted_fields, changes, fn redacted_field, changes ->
if Map.has_key?(changes, redacted_field) do
Map.put(changes, redacted_field, "**redacted**")
else
changes
end
end)
end
end
|
lib/ecto/changeset.ex
| 0.884919
| 0.736116
|
changeset.ex
|
starcoder
|
defmodule Elastix.Mapping do
@moduledoc """
The mapping API is used to define how documents are stored and indexed.
[Elastic documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping.html)
"""
import Elastix.HTTP, only: [prepare_url: 2]
alias Elastix.{HTTP, JSON}
@doc """
Creates a new mapping.
## Examples
iex> mapping = %{properties: %{user: %{type: "text"}, post_date: %{type: "date"}, message: %{type: "text"}}}
iex> Elastix.Mapping.put("http://localhost:9200", "twitter", "tweet", mapping)
{:ok, %HTTPoison.Response{...}}
"""
@spec put(
elastic_url :: String.t(),
index_names :: String.t() | list,
type_name :: String.t(),
data :: map,
query_params :: Keyword.t()
) :: HTTP.resp()
def put(elastic_url, index_names, type_name, data, query_params \\ [])
def put(elastic_url, index_names, type_name, data, query_params)
when is_list(index_names) do
prepare_url(elastic_url, make_path(index_names, [type_name], query_params))
|> HTTP.put(JSON.encode!(data))
end
def put(elastic_url, index_name, type_name, data, query_params),
do: put(elastic_url, [index_name], type_name, data, query_params)
@doc """
Gets info on one or a list of mappings for one or a list of indices.
## Examples
iex> Elastix.Mapping.get("http://localhost:9200", "twitter", "tweet")
{:ok, %HTTPoison.Response{...}}
"""
@spec get(
elastic_url :: String.t(),
index_names :: String.t() | list,
type_names :: String.t() | list,
query_params :: Keyword.t()
) :: HTTP.resp()
def get(elastic_url, index_names, type_names, query_params \\ [])
def get(elastic_url, index_names, type_names, query_params)
when is_list(type_names) and is_list(index_names) do
prepare_url(elastic_url, make_path(index_names, type_names, query_params))
|> HTTP.get()
end
def get(elastic_url, index_names, type_name, query_params)
when is_list(index_names) do
get(elastic_url, index_names, [type_name], query_params)
end
def get(elastic_url, index_name, type_names, query_params)
when is_list(type_names) do
get(elastic_url, [index_name], type_names, query_params)
end
def get(elastic_url, index_name, type_name, query_params),
do: get(elastic_url, [index_name], [type_name], query_params)
@doc """
Gets info on every mapping.
## Examples
iex> Elastix.Mapping.get_all("http://localhost:9200")
{:ok, %HTTPoison.Response{...}}
"""
@spec get_all(elastic_url :: String.t(), query_params :: Keyword.t()) :: HTTP.resp()
def get_all(elastic_url, query_params \\ []) do
prepare_url(elastic_url, make_all_path(query_params))
|> HTTP.get()
end
@doc """
Gets info on every given mapping.
## Examples
iex> Elastix.Mapping.get_all("http://localhost:9200", ["tweet", "user"])
{:ok, %HTTPoison.Response{...}}
"""
@spec get_all_with_type(
elastic_url :: String.t(),
type_names :: String.t() | list,
query_params :: Keyword.t()
) :: HTTP.resp()
def get_all_with_type(elastic_url, type_names, query_params \\ [])
def get_all_with_type(elastic_url, type_names, query_params)
when is_list(type_names) do
prepare_url(elastic_url, make_all_path(type_names, query_params))
|> HTTP.get()
end
def get_all_with_type(elastic_url, type_name, query_params),
do: get_all_with_type(elastic_url, [type_name], query_params)
@doc false
def make_path(index_names, type_names, query_params) do
index_names = Enum.join(index_names, ",")
type_names = Enum.join(type_names, ",")
path = "/#{index_names}/_mapping/#{type_names}"
case query_params do
[] -> path
_ -> HTTP.append_query_string(path, query_params)
end
end
@doc false
def make_all_path(query_params) do
path = "/_mapping"
case query_params do
[] -> path
_ -> HTTP.append_query_string(path, query_params)
end
end
@doc false
def make_all_path(type_names, query_params) do
type_names = Enum.join(type_names, ",")
path = "/_mapping/#{type_names}"
case query_params do
[] -> path
_ -> HTTP.append_query_string(path, query_params)
end
end
end
|
lib/elastix/mapping.ex
| 0.848643
| 0.425068
|
mapping.ex
|
starcoder
|
defmodule ZenMonitor do
@moduledoc """
ZenMonitor provides efficient monitoring of remote processes and controlled dissemination of
any resulting `:DOWN` messages.
This module provides a convenient client interface which aims to be a drop in replacement for
`Process.monitor/1` and `Process.demonitor/2`
# Known differences between ZenMonitor and Process
- `ZenMonitor.demonitor/2` has the same signature as Process.demonitor/2 but does not respect
the `:info` option.
- ZenMonitor aims to be efficient over distribution, one of the main strategies for achieving
this is relying mainly on local monitors and then batching up all changes over a time period
to be sent as a single message. This design means that additional latency is added to the
delivery of down messages in pursuit of the goal. Where `Process.monitor/1` on a remote
process will provide a :DOWN message as soon as possible, `ZenMonitor.monitor/1` on a remote
process will actually have a number of batching periods to go through before the message
arrives at the monitoring process, here are all the points that add latency.
1. When the monitor is enqueued it has to wait until the next sweep happens in the
`ZenMonitor.Local.Connector` until it will be delivered to the `ZenMonitor.Proxy`.
1. The monitor arrives at the `ZenMonitor.Proxy`, the process crashes and the ERTS `:DOWN`
message is delivered. This will be translated into a death_certificate and sent to a
`ZenMonitor.Proxy.Batcher` for delivery. It will have to wait until the next sweep
happens for it to be sent back to the `ZenMonitor.Local.Connector` for fan-out.
1. The dead summary including the death_certificate arrives at the
`ZenMonitor.Local.Connector` and a down_dispatch is created for it and enqueued with the
`ZenMonitor.Local`.
1. The down_dispatch waits in a queue until the `ZenMonitor.Local.Dispatcher` generates
more demand.
1. Once demand is generated, `ZenMonitor.Local` will hand off the down_dispatch for actual
delivery by `ZenMonitor.Local.Dispatcher`.
* Steps 1 and 3 employ a strategy of batch sizing to prevent the message from growing too
large. The batch size is controlled by application configuration and is alterable at boot
and runtime. This means though that Steps 1 and 3 can be delayed by N intervals
where `N = ceil(items_ahead_of_event / chunk_size)`
* Step 4 employs a similar batching strategy, a down_dispatch will wait in queue for up to N
intervals where `N = ceil(items_ahead_of_dispatch / chunk_size)`
- `ZenMonitor` decorates the reason of the `:DOWN` message. If a remote process goes down
because of `original_reason`, this will get decorated as `{:zen_monitor, original_reason}`
when delivered by ZenMonitor. This allows the receiver to differentiate `:DOWN` messages
originating from `ZenMonitor.monitor/1` and those originating from `Process.monitor/1`.
This is necessary when operating in mixed mode. It is the responsibility of the receiver to
unwrap this reason if it requires the `original_reason` for some additional handling of the
`:DOWN` message.
"""
@gen_module GenServer
@typedoc """
`ZenMonitor.destination` are all the types that can be monitored.
- `pid()` either local or remote
- `{name, node}` represents a named process on the given node
- `name :: atom()` is a named process on the local node
"""
@type destination :: pid() | {name :: atom, node :: node()} | name :: atom()
## Delegates
@doc """
Delegate to `ZenMonitor.Local.compatibility/1`
"""
defdelegate compatibility(target), to: ZenMonitor.Local
@doc """
Delegate to `ZenMonitor.Local.compatibility_for_node/1`
"""
defdelegate compatibility_for_node(remote), to: ZenMonitor.Local
@doc """
Delegate to `ZenMonitor.Local.Connector.connect/1`
"""
defdelegate connect(remote), to: ZenMonitor.Local.Connector
@doc """
Delegate to `ZenMonitor.Local.demonitor/2`
"""
defdelegate demonitor(ref, options \\ []), to: ZenMonitor.Local
@doc """
Delegate to `ZenMonitor.Local.monitor/1`
"""
defdelegate monitor(target), to: ZenMonitor.Local
## Client
@doc """
Get the module to use for gen calls from the Application Environment
This module only needs to support `GenServer.call/3` and `GenServer.cast/3` functionality, see
ZenMonitor's `@gen_module` for the default value
This can be controlled at boot and runtime with the `{:zen_monitor, :gen_module}` setting, see
`ZenMonitor.gen_module/1` for runtime convenience functionality.
"""
@spec gen_module() :: atom
def gen_module do
Application.get_env(:zen_monitor, :gen_module, @gen_module)
end
@doc """
Put the module to use for gen calls into the Application Environment
This is a simple convenience function for overwriting the `{:zen_monitor, :gen_module}` setting
at runtime.
"""
@spec gen_module(value :: atom) :: :ok
def gen_module(value) do
Application.put_env(:zen_monitor, :gen_module, value)
end
@doc """
Get the current monotonic time in milliseconds
This is a helper because `System.monotonic_time(:milliseconds)` is long and error-prone to
type in multiple call sites.
See `System.monotonic_time/1` for more information.
"""
@spec now() :: integer
def now do
System.monotonic_time(:milliseconds)
end
@doc """
Find the node for a destination.
"""
@spec find_node(target :: destination) :: node()
def find_node(pid) when is_pid(pid), do: node(pid)
def find_node({_, node}), do: node
def find_node(_), do: Node.self()
end
|
lib/zen_monitor.ex
| 0.886871
| 0.807385
|
zen_monitor.ex
|
starcoder
|
defmodule GenStateMachine do
@moduledoc """
A behaviour module for implementing a state machine.
The advantage of using this module is that it will have a standard set of
interface functions and include functionality for tracing and error reporting.
It will also fit into a supervision tree.
## Example
The `GenStateMachine` behaviour abstracts the state machine. Developers are only
required to implement the callbacks and functionality they are interested in.
Let's start with a code example and then explore the available callbacks.
Imagine we want a `GenStateMachine` that works like a switch, allowing us to
turn it on and off, as well as see how many times the switch has been turned
on:
defmodule Switch do
use GenStateMachine
# Callbacks
def handle_event(:cast, :flip, :off, data) do
{:next_state, :on, data + 1}
end
def handle_event(:cast, :flip, :on, data) do
{:next_state, :off, data}
end
def handle_event({:call, from}, :get_count, state, data) do
{:next_state, state, data, [{:reply, from, data}]}
end
end
# Start the server
{:ok, pid} = GenStateMachine.start_link(Switch, {:off, 0})
# This is the client
GenStateMachine.cast(pid, :flip)
#=> :ok
GenStateMachine.call(pid, :get_count)
#=> 1
We start our `Switch` by calling `start_link/3`, passing the module with the
server implementation and its initial argument (a tuple where the first element
is the initial state and the second is the initial data). We can primarily
interact with the state machine by sending two types of messages. **call**
messages expect a reply from the server (and are therefore synchronous) while
**cast** messages do not.
Every time you do a `call/3` or a `cast/2`, the message will be handled by
`handle_event/4`.
We can also use the `:state_functions` callback mode instead of the default,
which is `:handle_event_function`:
defmodule Switch do
use GenStateMachine, callback_mode: :state_functions
def off(:cast, :flip, data) do
{:next_state, :on, data + 1}
end
def off(event_type, event_content, data) do
handle_event(event_type, event_content, data)
end
def on(:cast, :flip, data) do
{:next_state, :off, data}
end
def on(event_type, event_content, data) do
handle_event(event_type, event_content, data)
end
def handle_event({:call, from}, :get_count, data) do
{:keep_state_and_data, [{:reply, from, data}]}
end
end
# Start the server
{:ok, pid} = GenStateMachine.start_link(Switch, {:off, 0})
# This is the client
GenStateMachine.cast(pid, :flip)
#=> :ok
GenStateMachine.call(pid, :get_count)
#=> 1
Again, we start our `Switch` by calling `start_link/3`, passing the module
with the server implementation and its initial argument, and interacting with
it via **call** and **cast**.
However, in `:state_functions` callback mode, every time you do a `call/3` or
a `cast/2`, the message will be handled by the `state_name/3` function which
is named the same as the current state.
## Callbacks
In the default `:handle_event_function` callback mode, there are 4 callbacks
required to be implemented. By adding `use GenStateMachine` to your module,
Elixir will automatically define all 4 callbacks for you, leaving it up to you
to implement the ones you want to customize.
In the `:state_functions` callback mode, there are 3 callbacks required to be
implemented. By adding `use GenStateMachine, callback_mode: :state_functions`
to your module, Elixir will automatically define all 3 callbacks for you,
leaving it up to you to implement the ones you want to customize, as well as
`state_name/3` functions named the same as the states you wish to support.
It is important to note that the default implementation of the `code_change/4`
callback results in an `:undefined` error. This is because `code_change/4` is
related to the quite difficult topic of hot upgrades, and if you need it, you
should really be implementing it yourself. In normal use this callback will
not be invoked.
## Name Registration
Both `start_link/3` and `start/3` support registering the `GenStateMachine`
under a given name on start via the `:name` option. Registered names are also
automatically cleaned up on termination. The supported values are:
* an atom - the `GenStateMachine` is registered locally with the given name
using `Process.register/2`.
* `{:global, term}`- the `GenStateMachine` is registered globally with the
given term using the functions in the `:global` module.
* `{:via, module, term}` - the `GenStateMachine` is registered with the given
mechanism and name. The `:via` option expects a module that exports
`register_name/2`, `unregister_name/1`, `whereis_name/1` and `send/2`.
One such example is the `:global` module which uses these functions
for keeping the list of names of processes and their associated pid's
that are available globally for a network of Erlang nodes.
For example, we could start and register our Switch server locally as follows:
# Start the server and register it locally with name MySwitch
{:ok, _} = GenStateMachine.start_link(Switch, {:off, 0}, name: MySwitch)
# Now messages can be sent directly to MySwitch
GenStateMachine.call(MySwitch, :get_count) #=> 0
Once the server is started, the remaining functions in this module (`call/3`,
`cast/2`, and friends) will also accept an atom, or any `:global` or `:via`
tuples. In general, the following formats are supported:
* a `pid`
* an `atom` if the server is locally registered
* `{atom, node}` if the server is locally registered at another node
* `{:global, term}` if the server is globally registered
* `{:via, module, name}` if the server is registered through an alternative
registry
## Client / Server APIs
Although in the example above we have used `GenStateMachine.start_link/3` and
friends to directly start and communicate with the server, most of the
time we don't call the `GenStateMachine` functions directly. Instead, we wrap
the calls in new functions representing the public API of the server.
Here is a better implementation of our Switch module:
defmodule Switch do
use GenStateMachine
# Client
def start_link() do
GenStateMachine.start_link(Switch, {:off, 0})
end
def flip(pid) do
GenStateMachine.cast(pid, :flip)
end
def get_count(pid) do
GenStateMachine.call(pid, :get_count)
end
# Server (callbacks)
def handle_event(:cast, :flip, :off, data) do
{:next_state, :on, data + 1}
end
def handle_event(:cast, :flip, :on, data) do
{:next_state, :off, data}
end
def handle_event({:call, from}, :get_count, state, data) do
{:next_state, state, data, [{:reply, from, data}]}
end
def handle_event(event_type, event_content, state, data) do
# Call the default implementation from GenStateMachine
super(event_type, event_content, state, data)
end
end
In practice, it is common to have both server and client functions in
the same module. If the server and/or client implementations are growing
complex, you may want to have them in different modules.
## Receiving custom messages
The goal of a `GenStateMachine` is to abstract the "receive" loop for
developers, automatically handling system messages, support code change,
synchronous calls and more. Therefore, you should never call your own
"receive" inside the `GenStateMachine` callbacks as doing so will cause the
`GenStateMachine` to misbehave. If you want to receive custom messages, they
will be delivered to the usual handler for your callback mode with event_type
`:info`.
## Learn more
If you wish to find out more about gen_statem, the documentation and links
in Erlang can provide extra insight.
* [`:gen_statem` module documentation](http://erlang.org/documentation/doc-8.0-rc1/lib/stdlib-3.0/doc/html/gen_statem.html)
* [gen_statem Behaviour – OTP Design Principles](http://erlang.org/documentation/doc-8.0-rc1/doc/design_principles/statem.html)
"""
@typedoc """
The term representing the current state.
In `:handle_event_function` callback mode, any term.
In `:state_functions` callback mode, an atom.
"""
@type state :: state_name | term
@typedoc """
The atom representing the current state in `:state_functions` callback mode.
"""
@type state_name :: atom
@typedoc """
The persistent data (similar to a GenServer's `state`) for the GenStateMachine.
"""
@type data :: term
@typedoc """
The source of the current event.
`{:call, from}` will be received as a result of a call.
`:cast` will be received as a result of a cast.
`:info` will be received as a result of any regular process messages received.
`:timeout` will be received as a result of a `:timeout` action.
`:state_timeout` will be received as a result of a `:state_timeout` action.
`:internal` will be received as a result of a `:next_event` action.
See the erlang
[documentation](http://erlang.org/documentation/doc-9.0/lib/stdlib-3.4/doc/html/gen_statem.html#type-event_type)
for details.
"""
@type event_type :: :gen_statem.event_type()
@typedoc """
The callback mode for the GenStateMachine.
See the Example section above for more info.
"""
@type callback_mode :: :state_functions | :handle_event_function
@typedoc """
The message content received as the result of an event.
"""
@type event_content :: term
@typedoc """
State transition actions.
They may be invoked by returning them from a state function or init/1.
If present in a list of actions, they are executed in order, and any that set
transition options (postpone, hibernate, and timeout) override any previously
provided options of the same type.
If the state changes, the queue of incoming events is reset to start with the
oldest postponed.
All events added as a result of a `:next_event` action are inserted in the
queue to be processed before all other events. An event of type `:internal`
should be used when you want to reliably distinguish an event inserted this
way from an external event.
See the erlang
[documentation](http://erlang.org/documentation/doc-9.0/lib/stdlib-3.4/doc/html/gen_statem.html#type-action)
for the possible values.
"""
@type action :: :gen_statem.action()
@doc """
Invoked when the server is started. `start_link/3` (or `start/3`) will
block until it returns.
`args` is the argument term (second argument) passed to `start_link/3`.
Returning `{:ok, state, data}` will cause `start_link/3` to return
`{:ok, pid}` and the process to enter its loop.
Returning `{:ok, state, data, actions}` is similar to `{:ok, state}`
except the provided actions will be executed.
Returning `:ignore` will cause `start_link/3` to return `:ignore` and the
process will exit normally without entering the loop or calling `terminate/2`.
If used when part of a supervision tree the parent supervisor will not fail
to start nor immediately try to restart the `GenStateMachine`. The remainder
of the supervision tree will be (re)started and so the `GenStateMachine`
should not be required by other processes. It can be started later with
`Supervisor.restart_child/2` as the child specification is saved in the parent
supervisor. The main use cases for this are:
* The `GenStateMachine` is disabled by configuration but might be enabled
later.
* An error occurred and it will be handled by a different mechanism than the
`Supervisor`. Likely this approach involves calling
`Supervisor.restart_child/2` after a delay to attempt a restart.
Returning `{:stop, reason}` will cause `start_link/3` to return
`{:error, reason}` and the process to exit with reason `reason` without
entering the loop or calling `terminate/2`.
This function can optionally throw a result to return it.
"""
@callback init(args :: term) :: :gen_statem.init_result(state)
@doc """
Whenever a `GenStateMachine` in callback mode `:state_functions` receives a
call, cast, or normal process message, a state function is called.
This spec exists to document the callback, but in actual use the name of the
function is probably not going to be state_name. Instead, there will be at
least one state function named after each state you wish to handle. See the
Examples section above for more info.
These functions can optionally throw a result to return it.
See the erlang [documentation](http://erlang.org/documentation/doc-9.0/lib/stdlib-3.4/doc/html/gen_statem.html#type-event_handler_result)
for a complete reference.
"""
@callback state_name(event_type, event_content, data) ::
:gen_statem.event_handler_result(state_name())
@doc """
Whenever a `GenStateMachine` in callback mode `:handle_event_function` (the
default) receives a call, cast, or normal process messsage, this callback will
be invoked.
This function can optionally throw a result to return it.
See the erlang [documentation](http://erlang.org/documentation/doc-9.0/lib/stdlib-3.4/doc/html/gen_statem.html#type-event_handler_result)
for a complete reference.
"""
@callback handle_event(event_type, event_content, state, data) ::
:gen_statem.event_handler_result(state())
@doc """
Invoked when the server is about to exit. It should do any cleanup required.
`reason` is exit reason, `state` is the current state, and `data` is the
current data of the `GenStateMachine`. The return value is ignored.
`terminate/2` is called if a callback (except `init/1`) returns a `:stop`
tuple, raises, calls `Kernel.exit/1` or returns an invalid value. It may also
be called if the `GenStateMachine` traps exits using `Process.flag/2` *and*
the parent process sends an exit signal.
If part of a supervision tree a `GenStateMachine`'s `Supervisor` will send an
exit signal when shutting it down. The exit signal is based on the shutdown
strategy in the child's specification. If it is `:brutal_kill` the
`GenStateMachine` is killed and so `terminate/2` is not called. However if it
is a timeout the `Supervisor` will send the exit signal `:shutdown` and the
`GenStateMachine` will have the duration of the timeout to call `terminate/2`
- if the process is still alive after the timeout it is killed.
If the `GenStateMachine` receives an exit signal (that is not `:normal`) from
any process when it is not trapping exits it will exit abruptly with the same
reason and so not call `terminate/2`. Note that a process does *NOT* trap
exits by default and an exit signal is sent when a linked process exits or its
node is disconnected.
Therefore it is not guaranteed that `terminate/2` is called when a
`GenStateMachine` exits. For such reasons, we usually recommend important
clean-up rules to happen in separated processes either by use of monitoring or
by links themselves. For example if the `GenStateMachine` controls a `port`
(e.g. `:gen_tcp.socket`) or `File.io_device`, they will be closed on receiving
a `GenStateMachine`'s exit signal and do not need to be closed in
`terminate/2`.
If `reason` is not `:normal`, `:shutdown` nor `{:shutdown, term}` an error is
logged.
This function can optionally throw a result, which is ignored.
"""
@callback terminate(reason :: term, state, data) :: any
@doc """
Invoked to change the state of the `GenStateMachine` when a different version
of a module is loaded (hot code swapping) and the state and/or data's term
structure should be changed.
`old_vsn` is the previous version of the module (defined by the `@vsn`
attribute) when upgrading. When downgrading the previous version is wrapped in
a 2-tuple with first element `:down`. `state` is the current state of the
`GenStateMachine`, `data` is the current data, and `extra` is any extra data
required to change the state.
Returning `{:ok, new_state, new_data}` changes the state to `new_state`, the
data to `new_data`, and the code change is successful.
On OTP versions before 19.1, if you wish to change the callback mode as part
of an upgrade/downgrade, you may return
`{callback_mode, new_state, new_data}`. It is important to note however that
for a downgrade you must use the argument `extra`, `{:down, vsn}` from the
argument `old_vsn`, or some other data source to determine what the previous
callback mode was.
Returning `reason` fails the code change with reason `reason` and the state
and data remains the same.
If `code_change/4` raises the code change fails and the loop will continue
with its previous state. Therefore this callback does not usually contain side
effects.
This function can optionally throw a result to return it.
"""
@callback code_change(old_vsn :: term | {:down, vsn :: term}, state, data, extra :: term) ::
{:ok, state, data}
| {callback_mode, state, data}
| (reason :: term)
@doc """
Invoked in some cases to retrieve a formatted version of the `GenStateMachine`
status.
This callback can be useful to control the *appearance* of the status of the
`GenStateMachine`. For example, it can be used to return a compact
representation of the `GenStateMachines`'s state/data to avoid having large
terms printed.
* one of `:sys.get_status/1` or `:sys.get_status/2` is invoked to get the
status of the `GenStateMachine`; in such cases, `reason` is `:normal`
* the `GenStateMachine` terminates abnormally and logs an error; in such cases,
`reason` is `:terminate`
`pdict_state_and_data` is a three-element list `[pdict, state, data]` where
`pdict` is a list of `{key, value}` tuples representing the current process
dictionary of the `GenStateMachine`, `state` is the current state of the
`GenStateMachine`, and `data` is the current data.
This function can optionally throw a result to return it.
"""
@callback format_status(reason :: :normal | :terminate, pdict_state_and_data :: list) :: term
@optional_callbacks state_name: 3, handle_event: 4, format_status: 2
@gen_statem_callback_mode_callback Application.loaded_applications()
|> Enum.find_value(fn {app, _, vsn} ->
app == :stdlib and vsn
end)
|> to_string()
|> String.split(".")
|> (case do
[major] ->
"#{major}.0.0"
[major, minor] ->
"#{major}.#{minor}.0"
[major, minor, patch | _] ->
"#{major}.#{minor}.#{patch}"
end)
|> Version.parse()
|> elem(1)
|> Version.match?(">= 3.1.0")
@doc false
defmacro __using__(args) do
{callback_mode, args} = Keyword.pop(args, :callback_mode, :handle_event_function)
quote location: :keep do
@behaviour GenStateMachine
unless unquote(@gen_statem_callback_mode_callback) do
@before_compile GenStateMachine
end
@gen_statem_callback_mode unquote(callback_mode)
@doc false
def init({state, data}) do
{:ok, state, data}
end
if unquote(@gen_statem_callback_mode_callback) do
def callback_mode do
@gen_statem_callback_mode
end
end
if @gen_statem_callback_mode == :handle_event_function do
@doc false
def handle_event(_event_type, _event_content, _state, _data) do
{:stop, :bad_event}
end
end
@doc false
def terminate(_reason, _state, _data) do
:ok
end
@doc false
def code_change(_old_vsn, _state, _data, _extra) do
:undefined
end
@doc """
Returns a specification to start this module under a supervisor.
See `Supervisor` in Elixir v1.6+.
"""
def child_spec(arg) do
default = %{id: __MODULE__, start: {__MODULE__, :start_link, [arg]}}
Enum.reduce(unquote(args), default, fn
{key, value}, acc when key in [:id, :start, :restart, :shutdown, :type, :modules] ->
Map.put(acc, key, value)
{key, _value}, _acc ->
raise ArgumentError, "unknown key #{inspect(key)} in child specification override"
end)
end
overridable_funcs = [init: 1, terminate: 3, code_change: 4, child_spec: 1]
overridable_funcs =
if @gen_statem_callback_mode == :handle_event_function do
[handle_event: 4] ++ overridable_funcs
else
overridable_funcs
end
defoverridable overridable_funcs
end
end
@doc false
defmacro __before_compile__(_env) do
quote location: :keep do
defoverridable init: 1, code_change: 4
@doc false
def init(args) do
result =
try do
super(args)
catch
thrown -> thrown
end
case result do
{:handle_event_function, _, _} = return -> {:stop, {:bad_return_value, return}}
{:state_functions, _, _} = return -> {:stop, {:bad_return_value, return}}
{:ok, state, data} -> {@gen_statem_callback_mode, state, data}
{:ok, state, data, actions} -> {@gen_statem_callback_mode, state, data, actions}
other -> other
end
end
@doc false
def code_change(old_vsn, state, data, extra) do
result =
try do
super(old_vsn, state, data, extra)
catch
thrown -> thrown
end
case result do
{:handle_event_function, state, data} -> {:handle_event_function, state, data}
{:state_functions, state, data} -> {:state_functions, state, data}
{:ok, state, data} -> {@gen_statem_callback_mode, state, data}
other -> other
end
end
end
end
@doc """
Starts a `GenStateMachine` process linked to the current process.
This is often used to start the `GenStateMachine` as part of a supervision
tree.
Once the server is started, the `init/1` function of the given `module` is
called with `args` as its arguments to initialize the server. To ensure a
synchronized start-up procedure, this function does not return until `init/1`
has returned.
Note that a `GenStateMachine` started with `start_link/3` is linked to the
parent process and will exit in case of crashes from the parent. The
`GenStateMachine` will also exit due to the `:normal` reasons in case it is
configured to trap exits in the `init/1` callback.
## Options
* `:name` - used for name registration as described in the "Name
registration" section of the module documentation
* `:timeout` - if present, the server is allowed to spend the given amount of
milliseconds initializing or it will be terminated and the start function
will return `{:error, :timeout}`
* `:debug` - if present, the corresponding function in the [`:sys`
module](http://www.erlang.org/doc/man/sys.html) is invoked
* `:spawn_opt` - if present, its value is passed as options to the
underlying process as in `Process.spawn/4`
## Return values
If the server is successfully created and initialized, this function returns
`{:ok, pid}`, where `pid` is the pid of the server. If a process with the
specified server name already exists, this function returns
`{:error, {:already_started, pid}}` with the pid of that process.
If the `init/1` callback fails with `reason`, this function returns
`{:error, reason}`. Otherwise, if it returns `{:stop, reason}`
or `:ignore`, the process is terminated and this function returns
`{:error, reason}` or `:ignore`, respectively.
"""
@spec start_link(module, any, GenServer.options()) :: GenServer.on_start()
def start_link(module, args, options \\ []) do
{name, options} = Keyword.pop(options, :name)
if name do
name =
if is_atom(name) do
{:local, name}
else
name
end
:gen_statem.start_link(name, module, args, options)
else
:gen_statem.start_link(module, args, options)
end
end
@doc """
Starts a `GenStateMachine` process without links (outside of a supervision
tree).
See `start_link/3` for more information.
"""
@spec start(module, any, GenServer.options()) :: GenServer.on_start()
def start(module, args, options \\ []) do
{name, options} = Keyword.pop(options, :name)
if name do
name =
if is_atom(name) do
{:local, name}
else
name
end
:gen_statem.start(name, module, args, options)
else
:gen_statem.start(module, args, options)
end
end
@doc """
Stops the server with the given `reason`.
The `terminate/2` callback of the given `server` will be invoked before
exiting. This function returns `:ok` if the server terminates with the
given reason; if it terminates with another reason, the call exits.
This function keeps OTP semantics regarding error reporting.
If the reason is any other than `:normal`, `:shutdown` or
`{:shutdown, _}`, an error report is logged.
"""
@spec stop(GenServer.server(), reason :: term, timeout) :: :ok
def stop(server, reason \\ :normal, timeout \\ :infinity) do
:gen_statem.stop(server, reason, timeout)
end
@doc """
Makes a synchronous call to the `server` and waits for its reply.
The client sends the given `request` to the server and waits until a reply
arrives or a timeout occurs. The appropriate state function will be called on
the server to handle the request.
`server` can be any of the values described in the "Name registration"
section of the documentation for this module.
## Timeouts
`timeout` is an integer greater than zero which specifies how many
milliseconds to wait for a reply, or the atom `:infinity` to wait
indefinitely. The default value is `:infinity`. If no reply is received within
the specified time, the function call fails and the caller exits.
If the caller catches an exit, to avoid getting a late reply in the caller's
inbox, this function spawns a proxy process that does the call. A late reply
gets delivered to the dead proxy process, and hence gets discarded. This is
less efficient than using `:infinity` as a timeout.
"""
@spec call(GenServer.server(), term, timeout) :: term
def call(server, request, timeout \\ :infinity) do
:gen_statem.call(server, request, timeout)
end
@doc """
Sends an asynchronous request to the `server`.
This function always returns `:ok` regardless of whether
the destination `server` (or node) exists. Therefore it
is unknown whether the destination `server` successfully
handled the message.
The appropriate state function will be called on the server to handle
the request.
"""
@spec cast(GenServer.server(), term) :: :ok
def cast(server, request) do
:gen_statem.cast(server, request)
end
@doc """
Sends replies to clients.
Can be used to explicitly send replies to multiple clients.
This function always returns `:ok`.
See `reply/2` for more information.
"""
@spec reply([:gen_statem.reply_action()]) :: :ok
def reply(replies) do
:gen_statem.reply(replies)
end
@doc """
Replies to a client.
This function can be used to explicitly send a reply to a client that called
`call/3` when the reply cannot be specified in the return value of a state
function.
`client` must be the `from` element of the event type accepted by state
functions. `reply` is an arbitrary term which will be given
back to the client as the return value of the call.
Note that `reply/2` can be called from any process, not just the one
that originally received the call (as long as that process communicated the
`from` argument somehow).
This function always returns `:ok`.
## Examples
def handle_event({:call, from}, :reply_in_one_second, state, data) do
Process.send_after(self(), {:reply, from}, 1_000)
:keep_state_and_data
end
def handle_event(:info, {:reply, from}, state, data) do
GenStateMachine.reply(from, :one_second_has_passed)
end
"""
@spec reply(GenServer.from(), term) :: :ok
def reply(client, reply) do
:gen_statem.reply(client, reply)
end
end
|
lib/gen_state_machine.ex
| 0.892064
| 0.793706
|
gen_state_machine.ex
|
starcoder
|
defmodule AWS.Batch do
@moduledoc """
Batch
Using Batch, you can run batch computing workloads on the Amazon Web Services
Cloud.
Batch computing is a common means for developers, scientists, and engineers to
access large amounts of compute resources. Batch uses the advantages of this
computing workload to remove the undifferentiated heavy lifting of configuring
and managing required infrastructure. At the same time, it also adopts a
familiar batch computing software approach. Given these advantages, Batch can
help you to efficiently provision resources in response to jobs submitted, thus
effectively helping you to eliminate capacity constraints, reduce compute costs,
and deliver your results more quickly.
As a fully managed service, Batch can run batch computing workloads of any
scale. Batch automatically provisions compute resources and optimizes workload
distribution based on the quantity and scale of your specific workloads. With
Batch, there's no need to install or manage batch computing software. This means
that you can focus your time and energy on analyzing results and solving your
specific problems.
"""
alias AWS.Client
alias AWS.Request
def metadata do
%AWS.ServiceMetadata{
abbreviation: nil,
api_version: "2016-08-10",
content_type: "application/x-amz-json-1.1",
credential_scope: nil,
endpoint_prefix: "batch",
global?: false,
protocol: "rest-json",
service_id: "Batch",
signature_version: "v4",
signing_name: "batch",
target_prefix: nil
}
end
@doc """
Cancels a job in an Batch job queue.
Jobs that are in the `SUBMITTED`, `PENDING`, or `RUNNABLE` state are canceled.
Jobs that have progressed to `STARTING` or `RUNNING` aren't canceled, but the
API operation still succeeds, even if no job is canceled. These jobs must be
terminated with the `TerminateJob` operation.
"""
def cancel_job(%Client{} = client, input, options \\ []) do
url_path = "/v1/canceljob"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Creates an Batch compute environment.
You can create `MANAGED` or `UNMANAGED` compute environments. `MANAGED` compute
environments can use Amazon EC2 or Fargate resources. `UNMANAGED` compute
environments can only use EC2 resources.
In a managed compute environment, Batch manages the capacity and instance types
of the compute resources within the environment. This is based on the compute
resource specification that you define or the [launch template](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-launch-templates.html)
that you specify when you create the compute environment. Either, you can choose
to use EC2 On-Demand Instances and EC2 Spot Instances. Or, you can use Fargate
and Fargate Spot capacity in your managed compute environment. You can
optionally set a maximum price so that Spot Instances only launch when the Spot
Instance price is less than a specified percentage of the On-Demand price.
Multi-node parallel jobs aren't supported on Spot Instances.
In an unmanaged compute environment, you can manage your own EC2 compute
resources and have a lot of flexibility with how you configure your compute
resources. For example, you can use custom AMIs. However, you must verify that
each of your AMIs meet the Amazon ECS container instance AMI specification. For
more information, see [container instance AMIs](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/container_instance_AMIs.html)
in the *Amazon Elastic Container Service Developer Guide*. After you created
your unmanaged compute environment, you can use the
`DescribeComputeEnvironments` operation to find the Amazon ECS cluster that's
associated with it. Then, launch your container instances into that Amazon ECS
cluster. For more information, see [Launching an Amazon ECS container instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_container_instance.html)
in the *Amazon Elastic Container Service Developer Guide*.
Batch doesn't upgrade the AMIs in a compute environment after the environment is
created. For example, it doesn't update the AMIs when a newer version of the
Amazon ECS optimized AMI is available. Therefore, you're responsible for
managing the guest operating system (including its updates and security patches)
and any additional application software or utilities that you install on the
compute resources. To use a new AMI for your Batch jobs, complete these steps:
Create a new compute environment with the new AMI.
Add the compute environment to an existing job queue.
Remove the earlier compute environment from your job queue.
Delete the earlier compute environment.
"""
def create_compute_environment(%Client{} = client, input, options \\ []) do
url_path = "/v1/createcomputeenvironment"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Creates an Batch job queue.
When you create a job queue, you associate one or more compute environments to
the queue and assign an order of preference for the compute environments.
You also set a priority to the job queue that determines the order that the
Batch scheduler places jobs onto its associated compute environments. For
example, if a compute environment is associated with more than one job queue,
the job queue with a higher priority is given preference for scheduling jobs to
that compute environment.
"""
def create_job_queue(%Client{} = client, input, options \\ []) do
url_path = "/v1/createjobqueue"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Creates an Batch scheduling policy.
"""
def create_scheduling_policy(%Client{} = client, input, options \\ []) do
url_path = "/v1/createschedulingpolicy"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Deletes an Batch compute environment.
Before you can delete a compute environment, you must set its state to
`DISABLED` with the `UpdateComputeEnvironment` API operation and disassociate it
from any job queues with the `UpdateJobQueue` API operation. Compute
environments that use Fargate resources must terminate all active jobs on that
compute environment before deleting the compute environment. If this isn't done,
the compute environment enters an invalid state.
"""
def delete_compute_environment(%Client{} = client, input, options \\ []) do
url_path = "/v1/deletecomputeenvironment"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Deletes the specified job queue.
You must first disable submissions for a queue with the `UpdateJobQueue`
operation. All jobs in the queue are eventually terminated when you delete a job
queue. The jobs are terminated at a rate of about 16 jobs each second.
It's not necessary to disassociate compute environments from a queue before
submitting a `DeleteJobQueue` request.
"""
def delete_job_queue(%Client{} = client, input, options \\ []) do
url_path = "/v1/deletejobqueue"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Deletes the specified scheduling policy.
You can't delete a scheduling policy that's used in any job queues.
"""
def delete_scheduling_policy(%Client{} = client, input, options \\ []) do
url_path = "/v1/deleteschedulingpolicy"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Deregisters an Batch job definition.
Job definitions are permanently deleted after 180 days.
"""
def deregister_job_definition(%Client{} = client, input, options \\ []) do
url_path = "/v1/deregisterjobdefinition"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Describes one or more of your compute environments.
If you're using an unmanaged compute environment, you can use the
`DescribeComputeEnvironment` operation to determine the `ecsClusterArn` that you
launch your Amazon ECS container instances into.
"""
def describe_compute_environments(%Client{} = client, input, options \\ []) do
url_path = "/v1/describecomputeenvironments"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Describes a list of job definitions.
You can specify a `status` (such as `ACTIVE`) to only return job definitions
that match that status.
"""
def describe_job_definitions(%Client{} = client, input, options \\ []) do
url_path = "/v1/describejobdefinitions"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Describes one or more of your job queues.
"""
def describe_job_queues(%Client{} = client, input, options \\ []) do
url_path = "/v1/describejobqueues"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Describes a list of Batch jobs.
"""
def describe_jobs(%Client{} = client, input, options \\ []) do
url_path = "/v1/describejobs"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Describes one or more of your scheduling policies.
"""
def describe_scheduling_policies(%Client{} = client, input, options \\ []) do
url_path = "/v1/describeschedulingpolicies"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Returns a list of Batch jobs.
You must specify only one of the following items:
* A job queue ID to return a list of jobs in that job queue
* A multi-node parallel job ID to return a list of nodes for that
job
* An array job ID to return a list of the children for that job
You can filter the results by job status with the `jobStatus` parameter. If you
don't specify a status, only `RUNNING` jobs are returned.
"""
def list_jobs(%Client{} = client, input, options \\ []) do
url_path = "/v1/listjobs"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Returns a list of Batch scheduling policies.
"""
def list_scheduling_policies(%Client{} = client, input, options \\ []) do
url_path = "/v1/listschedulingpolicies"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Lists the tags for an Batch resource.
Batch resources that support tags are compute environments, jobs, job
definitions, job queues, and scheduling policies. ARNs for child jobs of array
and multi-node parallel (MNP) jobs are not supported.
"""
def list_tags_for_resource(%Client{} = client, resource_arn, options \\ []) do
url_path = "/v1/tags/#{AWS.Util.encode_uri(resource_arn)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Registers an Batch job definition.
"""
def register_job_definition(%Client{} = client, input, options \\ []) do
url_path = "/v1/registerjobdefinition"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Submits an Batch job from a job definition.
Parameters that are specified during `SubmitJob` override parameters defined in
the job definition. vCPU and memory requirements that are specified in the
`resourceRequirements` objects in the job definition are the exception. They
can't be overridden this way using the `memory` and `vcpus` parameters. Rather,
you must specify updates to job definition parameters in a
`resourceRequirements` object that's included in the `containerOverrides`
parameter.
Job queues with a scheduling policy are limited to 500 active fair share
identifiers at a time.
Jobs that run on Fargate resources can't be guaranteed to run for more than 14
days. This is because, after 14 days, Fargate resources might become unavailable
and job might be terminated.
"""
def submit_job(%Client{} = client, input, options \\ []) do
url_path = "/v1/submitjob"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Associates the specified tags to a resource with the specified `resourceArn`.
If existing tags on a resource aren't specified in the request parameters, they
aren't changed. When a resource is deleted, the tags that are associated with
that resource are deleted as well. Batch resources that support tags are compute
environments, jobs, job definitions, job queues, and scheduling policies. ARNs
for child jobs of array and multi-node parallel (MNP) jobs are not supported.
"""
def tag_resource(%Client{} = client, resource_arn, input, options \\ []) do
url_path = "/v1/tags/#{AWS.Util.encode_uri(resource_arn)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Terminates a job in a job queue.
Jobs that are in the `STARTING` or `RUNNING` state are terminated, which causes
them to transition to `FAILED`. Jobs that have not progressed to the `STARTING`
state are cancelled.
"""
def terminate_job(%Client{} = client, input, options \\ []) do
url_path = "/v1/terminatejob"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Deletes specified tags from an Batch resource.
"""
def untag_resource(%Client{} = client, resource_arn, input, options \\ []) do
url_path = "/v1/tags/#{AWS.Util.encode_uri(resource_arn)}"
headers = []
{query_params, input} =
[
{"tagKeys", "tagKeys"}
]
|> Request.build_params(input)
Request.request_rest(
client,
metadata(),
:delete,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Updates an Batch compute environment.
"""
def update_compute_environment(%Client{} = client, input, options \\ []) do
url_path = "/v1/updatecomputeenvironment"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Updates a job queue.
"""
def update_job_queue(%Client{} = client, input, options \\ []) do
url_path = "/v1/updatejobqueue"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Updates a scheduling policy.
"""
def update_scheduling_policy(%Client{} = client, input, options \\ []) do
url_path = "/v1/updateschedulingpolicy"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
end
|
lib/aws/generated/batch.ex
| 0.921298
| 0.616705
|
batch.ex
|
starcoder
|
defmodule Xlsxir.ParseWorksheet do
alias Xlsxir.{ConvertDate, ConvertDateTime, SaxError}
import Xlsxir.ConvertDate, only: [convert_char_number: 1]
require Logger
@moduledoc """
Holds the SAX event instructions for parsing worksheet data via `Xlsxir.SaxParser.parse/2`
"""
defstruct row: %{}, cell_ref: "", data_type: "", num_style: "", value: "", value_type: nil, max_rows: nil, tid: nil
@doc """
Sax event utilized by `Xlsxir.SaxParser.parse/2`. Takes a pattern and the current state of a struct and recursivly parses the
worksheet XML file, ultimately saving a list of cell references and their assocated values to the ETS process.
## Parameters
- `arg1` - the XML pattern of the event to match upon
- `state` - the state of the `%Xlsxir.ParseWorksheet{}` struct which temporarily holds applicable data of the current row being parsed
## Example
Each entry in the list created consists of a list containing a cell reference string and the associated value (i.e. `[["A1", "string one"], ...]`).
"""
def sax_event_handler(:startDocument, _state, %{max_rows: max_rows}) do
%__MODULE__{tid: GenServer.call(Xlsxir.StateManager, :new_table), max_rows: max_rows}
end
def sax_event_handler({:startElement,_,'row',_,_}, %__MODULE__{tid: tid, max_rows: max_rows}, _excel) do
%__MODULE__{tid: tid, max_rows: max_rows}
end
def sax_event_handler({:startElement,_,'c',_,xml_attr}, state, %{styles: styles_tid}) do
a = Enum.map(xml_attr, fn(attr) ->
case attr do
{:attribute,'r',_,_,ref} -> {:r, ref}
{:attribute,'s',_,_,style} -> {:s, find_styles(styles_tid, List.to_integer(style))}
{:attribute,'t',_,_,type} -> {:t, type}
_ -> raise "Unknown cell attribute"
end
end)
{cell_ref, num_style, data_type} = case a |> Keyword.keys |> Enum.sort do
[:r] -> {a[:r], nil, nil}
[:r, :s] -> {a[:r], a[:s], nil}
[:r, :t] -> {a[:r], nil, a[:t]}
[:r, :s, :t] -> {a[:r], a[:s], a[:t]}
_ -> raise "Invalid attributes: #{a}"
end
%{state | cell_ref: cell_ref, num_style: num_style, data_type: data_type}
end
def sax_event_handler({:startElement,_,'f',_, _}, state, _) do
%{state | value_type: :formula}
end
def sax_event_handler({:startElement,_,'v',_, _}, state, _) do
%{state | value_type: :value}
end
def sax_event_handler({:characters, value}, state, _) do
case state do
nil -> nil
%{value_type: :value} -> %{state | value: value}
_ -> state
end
end
def sax_event_handler({:endElement,_,'c',_}, %__MODULE__{row: row} = state, excel) do
cell_value = format_cell_value(excel, [state.data_type, state.num_style, state.value])
%{state | row: Enum.into(row, [[to_string(state.cell_ref), cell_value]]), cell_ref: "", data_type: "", num_style: "", value: ""}
end
def sax_event_handler({:endElement,_,'row',_}, %__MODULE__{tid: tid, max_rows: max_rows} = state, _excel) do
unless Enum.empty?(state.row) do
[[row]] = ~r/\d+/ |> Regex.scan(state.row |> List.first |> List.first)
row = row |> String.to_integer
value = state.row |> Enum.reverse
:ets.insert(tid, {row, value})
if !is_nil(max_rows) and row == max_rows, do: raise SaxError, state: state
end
state
end
def sax_event_handler(_, state, _), do: state
defp format_cell_value(%{shared_strings: strings_tid}, list) do
case list do
[ _, _, nil] -> nil # Cell with no value attribute
[ _, _, ""] -> nil # Empty cell with assigned attribute
[ 'e', _, e] -> List.to_string(e) # Type error
[ 's', _, i] -> find_string(strings_tid, List.to_integer(i)) # Type string
[ nil, nil, n] -> convert_char_number(n) # Type number
[ 'n', nil, n] -> convert_char_number(n)
[ nil, 'd', d] -> convert_date_or_time(d) # ISO 8601 type date
[ 'n', 'd', d] -> convert_date_or_time(d)
[ 'd', 'd', d] -> convert_iso_date(d)
[ 'str', _, s] -> List.to_string(s) # Type formula w/ string
[ 'b', _, s] -> s == '1' # Type boolean
['inlineStr', _, s] -> List.to_string(s) # Type string
_ -> raise "Unmapped attribute #{Enum.at(list, 0)}. Unable to process" # Unmapped type
end
end
defp convert_iso_date(value) do
value |> List.to_string |> Date.from_iso8601() |> elem(1) |> Date.to_erl()
end
defp convert_date_or_time(value) do
str = List.to_string(value)
if str == "0" || String.match?(str, ~r/\d\.\d+/) do
ConvertDateTime.from_charlist(value)
else
ConvertDate.from_serial(value)
end
end
defp find_styles(nil, _index), do: nil
defp find_styles(tid, index) do
tid
|> :ets.lookup(index)
|> List.first
|> elem(1)
end
defp find_string(nil, _index), do: nil
defp find_string(tid, index) do
tid
|> :ets.lookup(index)
|> List.first
|> elem(1)
end
end
|
lib/xlsxir/parse_worksheet.ex
| 0.788868
| 0.665449
|
parse_worksheet.ex
|
starcoder
|
defmodule Ash.Query.Operator do
@moduledoc """
An operator is a predicate with a `left` and a `right`
For more information on being a predicate, see `Ash.Filter.Predicate`. Most of the complexities
are there. An operator must meet both behaviours.
"""
@doc """
Create a new predicate. There are various return types possible:
* `{:ok, left, right}` - Return the left/right values of the operator
* `{:ok, operator}` - Return the operator itself, this or the one above are acceptable
* `{:known, boolean}` - If the value is already known, e.g `1 == 1`
* `{:error, error}` - If there was an error creating the operator
"""
@callback new(term, term) ::
{:ok, term, term} | {:ok, term} | {:known, boolean} | {:error, Ash.error()}
@doc """
The implementation of the inspect protocol.
If not defined, it will be inferred
"""
@callback to_string(struct, Inspect.Opts.t()) :: term
@doc "Create a new operator. Pass the module and the left and right values"
def new(mod, left, right) do
case mod.new(left, right) do
{:ok, left, right} -> {:ok, struct(mod, left: left, right: right)}
{:ok, %_{} = op} -> {:ok, op}
{:known, result} -> {:ok, result}
{:error, error} -> {:error, error}
end
end
defmacro __using__(opts) do
unless opts[:operator] do
raise "Operator is required!"
end
quote do
defstruct [
:left,
:right,
operator: unquote(opts[:operator]),
embedded?: false,
__operator__?: true,
__predicate__?: unquote(opts[:predicate?] || false)
]
if unquote(opts[:predicate?]) do
@behaviour Ash.Filter.Predicate
end
alias Ash.Query.Ref
def operator, do: unquote(opts[:operator])
def name, do: unquote(opts[:name] || opts[:operator])
if unquote(opts[:predicate?]) do
@dialyzer {:nowarn_function, match?: 1}
def match?(struct) do
evaluate(struct) not in [nil, false]
end
end
def types do
unquote(opts[:types] || [:any_same_or_ref])
end
import Inspect.Algebra
def to_string(%{left: left, right: right, operator: operator}, opts) do
concat([
to_doc(left, opts),
" ",
to_string(operator),
" ",
to_doc(right, opts)
])
end
defoverridable to_string: 2
defimpl Inspect do
def inspect(%mod{} = op, opts) do
mod.to_string(op, opts)
end
end
end
end
end
|
lib/ash/query/operator/operator.ex
| 0.896597
| 0.763263
|
operator.ex
|
starcoder
|
defmodule Day16 do
@moduledoc """
Advent of Code 2019
Day 16: Flawed Frequency Transmission
"""
alias Day16.{Part1, Part2}
def get_signal() do
Path.join(__DIR__, "inputs/day16.txt")
|> File.read!()
|> String.trim()
|> String.graphemes()
|> Enum.map(&String.to_integer/1)
end
def execute() do
signal = get_signal()
IO.puts("Part 1: #{Part1.run(signal)}")
IO.puts("Part 2: #{Part2.run(signal)}")
end
end
defmodule Day16.Part1 do
def run(signal) do
Enum.reduce(1..100, signal, fn _, signal -> run_fft_algo(signal) end)
|> Enum.take(8)
|> Enum.join()
|> String.to_integer()
end
def run_fft_algo(signal) do
Enum.map(1..length(signal), fn row ->
signal
|> Enum.with_index(1)
|> Enum.map(fn {digit, col} -> digit * get_coefficient(row, col) end)
|> Enum.sum()
|> abs()
|> Integer.mod(10)
end)
end
def get_coefficient(row, col) do
case Integer.mod(div(col, row), 4) do
0 -> 0
1 -> 1
2 -> 0
3 -> -1
end
end
end
defmodule Day16.Part2 do
@moduledoc """
It is clearly not feasible to actually brute force the FFT with 10000
repetitions of the input, considering that the calculation is O(n^2).
There are 651 characters in the input. That means our "real" input signal has
6_510_000 digits. The first seven digits of our input, aka our offset, are
5_977_341.
What is notable is that the latter half of the signal can be computed quickly
in reverse via partial sums, because the coefficients are always `1`.
Furthermore, in the latter half, each digit in the output only relies on the
digits after its position in the input. Since the offset is in the second
half of the signal, we can save on some annoying computations.
"""
def run(signal) do
full_signal =
signal
|> List.duplicate(10000)
|> List.flatten()
|> Enum.drop(signal |> get_offset())
Enum.reduce(1..100, full_signal, fn _, signal -> rtl_prefix_sums(signal) end)
|> Enum.take(8)
|> Enum.join()
|> String.to_integer()
end
def get_offset(signal) do
signal
|> Enum.take(7)
|> Enum.join()
|> String.to_integer()
end
def rtl_prefix_sums(signal) do
signal
|> Enum.reverse()
|> Enum.reduce({[], 0}, fn digit, {prefixes, sum} ->
new_sum = (digit + sum) |> Integer.mod(10)
{[new_sum | prefixes], new_sum}
end)
|> (&elem(&1, 0)).()
end
end
|
lib/day16.ex
| 0.805135
| 0.54462
|
day16.ex
|
starcoder
|
defmodule Guardian.DB do
@moduledoc """
Guardian.DB is a simple module that hooks into guardian to prevent playback of tokens.
In vanilla Guardian, tokens aren't tracked so the main mechanism
that exists to make a token inactive is to set the expiry and wait until it arrives.
Guardian.DB takes an active role and stores each token in the database verifying it's presense
(based on it's jti) when Guardian verifies the token.
If the token is not present in the DB, the Guardian token cannot be verified.
Provides a simple database storage and check for Guardian tokens.
- When generating a token, the token is stored in a database.
- When tokens are verified (channel, session or header) the database is checked for an entry that matches. If none is found, verification results in an error.
- When logout, or revoking the token, the corresponding entry is removed
# Setup
### Config
Add your configuration to your environment files. You need to specify
* `repo`
You may also configure
* `prefix` - The schema prefix to use
* `schema_name` - The name of the schema to use. Default "guardian_tokens"
* `sweep_interval` - The interval between db sweeps to remove old tokens. Default 60 (mins)
### Sweeper
In order to sweep your expired tokens from the db, you'll need to add `Guardian.DB.Token.SweeperServer`
to your supervision tree.
In your supervisor add it as a worker
```elixir
worker(Guardian.DB.Token.SweeperServer, [])
```
# Migration
Guardian.DB requires a table in your database. Create a migration like the following:
```elixir
create table(:guardian_tokens, primary_key: false) do
add(:jti, :string, primary_key: true)
add(:typ, :string)
add(:aud, :string)
add(:iss, :string)
add(:sub, :string)
add(:exp, :bigint)
add(:jwt, :text)
add(:claims, :map)
timestamps()
end
```
# Setup (Guardian >= 1.0)
Guardian.DB works by hooking into the lifecycle of your token module.
You'll need to add it to
* `after_encode_and_sign`
* `on_verify`
* `on_revoke`
For example:
```elixir
defmodule MyApp.AuthTokens do
use Guardian, otp_app: :my_app
# snip...
def after_encode_and_sign(resource, claims, token, _options) do
with {:ok, _} <- Guardian.DB.after_encode_and_sign(resource, claims["typ"], claims, token) do
{:ok, token}
end
end
def on_verify(claims, token, _options) do
with {:ok, _} <- Guardian.DB.on_verify(claims, token) do
{:ok, claims}
end
end
def on_revoke(claims, token, _options) do
with {:ok, _} <- Guardian.DB.on_revoke(claims, token) do
{:ok, claims}
end
end
end
```
# Setup (Guardian < 1.0)
To use `Guardian.DB` with Guardian less than version 1.0, add Guardian.DB as your
hooks module. In the Guardian configuration:
```elixir
config :guardian, Guardian,
hooks: Guardian.DB
```
"""
alias Guardian.DB.Token
config = Application.get_env(:guardian, Guardian.DB, [])
@repo Keyword.get(config, :repo)
if config == [], do: raise("Guardian.DB configuration is required")
if is_nil(@repo), do: raise("Guardian.DB requires a repo")
@doc """
After the JWT is generated, stores the various fields of it in the DB for tracking.
If the token type does not match the configured types to be stored, the claims are
passed through.
"""
def after_encode_and_sign(resource, type, claims, jwt) do
case store_token(type, claims, jwt) do
{:error, _} -> {:error, :token_storage_failure}
_ -> {:ok, {resource, type, claims, jwt}}
end
end
defp store_token(type, claims, jwt) do
if storable_type?(type) do
Token.create(claims, jwt)
else
:ignore
end
end
@doc """
When a token is verified, check to make sure that it is present in the DB.
If the token is found, the verification continues, if not an error is returned.
If the type of the token does not match the configured token storage types,
the claims are passed through.
"""
def on_verify(claims, jwt) do
case find_token(claims) do
nil -> {:error, :token_not_found}
_ -> {:ok, {claims, jwt}}
end
end
defp find_token(%{"typ" => type} = claims) do
if storable_type?(type) do
Token.find_by_claims(claims)
else
:ignore
end
end
@doc """
When logging out, or revoking a token, removes from the database so the token may no longer be used
"""
def on_revoke(claims, jwt) do
claims
|> Token.find_by_claims()
|> destroy_token(claims, jwt)
end
defp destroy_token(nil, claims, jwt), do: {:ok, {claims, jwt}}
defp destroy_token(model, claims, jwt) do
case repo().delete(model) do
{:error, _} -> {:error, :could_not_revoke_token}
nil -> {:error, :could_not_revoke_token}
_ -> {:ok, {claims, jwt}}
end
end
def repo do
:guardian
|> Application.fetch_env!(Guardian.DB)
|> Keyword.fetch!(:repo)
end
defp token_types() do
:guardian
|> Application.fetch_env!(Guardian.DB)
|> Keyword.get(:token_types, [])
end
defp storable_type?(type), do: storable_type?(type, token_types())
defp storable_type?(_, []), do: true # store all types by default
defp storable_type?(type, types), do: type in types
end
|
lib/guardian/db.ex
| 0.810591
| 0.858481
|
db.ex
|
starcoder
|
defmodule Clone.CLI do
@moduledoc """
Handles the command-line interface for the program.
This module contains the entry point, parses the command-line arguments and options, and houses
the main control flow of the program.
"""
alias Clone.Repo
alias Clone.State
require Logger
@doc """
The entry-point for the program.
"""
@spec main([String.t()]) :: no_return
def main(args) do
args
|> parse_arguments
|> run
end
@doc """
Parses the command-line arguments and generates the `Clone.State` struct.
"""
@spec parse_arguments([String.t()]) :: State.t()
def parse_arguments(args) do
args
|> OptionParser.parse(
switches: [
debug: :boolean,
verbose: :boolean
],
aliases: [
d: :debug,
v: :verbose
]
)
|> State.new()
end
@doc """
Executes the main control flow of the program.
"""
@spec run(State.t()) :: no_return
def run(%State{} = state) do
state
|> set_verbosity
|> parse_location
|> parse_repo_dir
|> ensure_parent_directory
|> execute_hub
|> set_exit_status
end
defmacrop log_state(state) do
quote do
Logger.debug(fn -> "Starting #{format_function(__ENV__.function)}" end)
Logger.debug(fn -> "State: #{inspect(unquote(state))}" end)
end
end
defp ensure_directory(directory) do
Logger.debug(fn -> "Ensure `#{directory}` exists" end)
case File.mkdir(directory) do
:ok -> :ok
{:error, :eexist} -> :ok
error -> error
end
end
defp ensure_parent_directory(%{repo_dir: repo_dir} = state) do
log_state(state)
:ok =
repo_dir
|> Path.dirname()
|> ensure_directory
state
end
defp execute_hub(%{location: location, repo_dir: repo_dir} = state) do
log_state(state)
execute_hub(["clone", location, repo_dir])
end
defp execute_hub(list) when is_list(list) do
Logger.info(fn -> "Execute `hub #{Enum.join(list, " ")}`" end)
System.cmd("hub", list)
end
defp format_function({name, arity}), do: "#{name}/#{arity}"
defp parse_location(%{arguments: [head | _]} = state) do
log_state(state)
%State{state | location: head}
end
defp parse_repo_dir(%{location: location} = state) do
log_state(state)
{owner, repo} = Repo.parse_location(location)
%State{state | repo_dir: Path.join([repo_home(), owner, repo])}
end
defp repo_home do
"REPO_HOME"
|> System.get_env()
|> Path.expand()
end
defp set_exit_status({_, status}), do: exit({:shutdown, status})
defp set_verbosity(%{options: %{debug: true}} = state) do
log_state(state)
Logger.configure_backend(:console, level: :debug)
System.put_env("HUB_VERBOSE", "1")
state
end
defp set_verbosity(%{options: %{verbose: true}} = state) do
log_state(state)
Logger.configure_backend(:console, level: :info)
state
end
defp set_verbosity(state), do: state
end
|
lib/cli.ex
| 0.658308
| 0.41256
|
cli.ex
|
starcoder
|
defmodule Ecto.Adapters.Postgres do
@moduledoc """
This is the adapter module for PostgreSQL. It handles and pools the
connections to the postgres database with poolboy.
## Options
The options should be given via `Ecto.Repo.conf/0`.
`:hostname` - Server hostname;
`:port` - Server port (default: 5432);
`:username` - Username;
`:password` - <PASSWORD>;
`:size` - The number of connections to keep in the pool;
`:max_overflow` - The maximum overflow of connections (see poolboy docs);
`:parameters` - Keyword list of connection parameters;
`:ssl` - Set to true if ssl should be used (default: false);
`:ssl_opts` - A list of ssl options, see ssl docs;
`:lazy` - If false all connections will be started immediately on Repo startup (default: true)
"""
@behaviour Ecto.Adapter
@behaviour Ecto.Adapter.Migrations
@behaviour Ecto.Adapter.Storage
@behaviour Ecto.Adapter.Transactions
@behaviour Ecto.Adapter.TestTransactions
@default_port 5432
@timeout 5000
alias Ecto.Adapters.Postgres.SQL
alias Ecto.Adapters.Postgres.Worker
alias Ecto.Associations.Assoc
alias Ecto.Query.Query
alias Ecto.Query.QueryExpr
alias Ecto.Query.Util
alias Postgrex.TypeInfo
## Adapter API
@doc false
defmacro __using__(_opts) do
quote do
def __postgres__(:pool_name) do
__MODULE__.Pool
end
end
end
@doc false
def start_link(repo, opts) do
{ pool_opts, worker_opts } = prepare_start(repo, opts)
:poolboy.start_link(pool_opts, worker_opts)
end
@doc false
def stop(repo) do
pool_name = repo.__postgres__(:pool_name)
:poolboy.stop(pool_name)
end
@doc false
def all(repo, Query[] = query, opts) do
pg_query = Query[] = query.select |> normalize_select |> query.select
Postgrex.Result[rows: rows] = query(repo, SQL.select(pg_query), [], opts)
# Transform each row based on select expression
transformed =
Enum.map(rows, fn row ->
values = tuple_to_list(row)
transform_row(pg_query.select.expr, values, pg_query.sources) |> elem(0)
end)
transformed
|> Ecto.Associations.Assoc.run(query)
|> preload(repo, query)
end
@doc false
def insert(repo, entity, opts) do
module = elem(entity, 0)
returning = module.__entity__(:keywords, entity)
|> Enum.filter(fn { _, val } -> val == nil end)
|> Keyword.keys
case query(repo, SQL.insert(entity, returning), [], opts) do
Postgrex.Result[rows: [values]] ->
Enum.zip(returning, tuple_to_list(values))
_ ->
[]
end
end
@doc false
def update(repo, entity, opts) do
Postgrex.Result[num_rows: nrows] = query(repo, SQL.update(entity), [], opts)
nrows
end
@doc false
def update_all(repo, query, values, opts) do
Postgrex.Result[num_rows: nrows] = query(repo, SQL.update_all(query, values), [], opts)
nrows
end
@doc false
def delete(repo, entity, opts) do
Postgrex.Result[num_rows: nrows] = query(repo, SQL.delete(entity), [], opts)
nrows
end
@doc false
def delete_all(repo, query, opts) do
Postgrex.Result[num_rows: nrows] = query(repo, SQL.delete_all(query), [], opts)
nrows
end
@doc """
Run custom SQL query on given repo.
## Options
`:timeout` - The time in milliseconds to wait for the call to finish,
`:infinity` will wait indefinitely (default: 5000);
## Examples
iex> Postgres.query(MyRepo, "SELECT $1 + $2", [40, 2])
Postgrex.Result[command: :select, columns: ["?column?"], rows: [{42}], num_rows: 1]
"""
def query(repo, sql, params, opts \\ []) do
timeout = opts[:timeout] || @timeout
repo.log({ :query, sql }, fn ->
use_worker(repo, timeout, fn worker ->
Worker.query!(worker, sql, params, timeout)
end)
end)
end
defp prepare_start(repo, opts) do
pool_name = repo.__postgres__(:pool_name)
{ pool_opts, worker_opts } = Dict.split(opts, [:size, :max_overflow])
pool_opts = pool_opts
|> Keyword.update(:size, 5, &binary_to_integer(&1))
|> Keyword.update(:max_overflow, 10, &binary_to_integer(&1))
pool_opts = [
name: { :local, pool_name },
worker_module: Worker ] ++ pool_opts
worker_opts = worker_opts
|> Keyword.put(:decoder, &decoder/4)
|> Keyword.put_new(:port, @default_port)
{ pool_opts, worker_opts }
end
@doc false
def normalize_select(QueryExpr[expr: { :assoc, _, [_, _] } = assoc] = expr) do
normalize_assoc(assoc) |> expr.expr
end
def normalize_select(QueryExpr[expr: _] = expr), do: expr
defp normalize_assoc({ :assoc, _, [_, _] } = assoc) do
{ var, fields } = Assoc.decompose_assoc(assoc)
normalize_assoc(var, fields)
end
defp normalize_assoc(var, fields) do
nested = Enum.map(fields, fn { _field, nested } ->
{ var, fields } = Assoc.decompose_assoc(nested)
normalize_assoc(var, fields)
end)
{ var, nested }
end
## Result set transformation
defp transform_row({ :{}, _, list }, values, sources) do
{ result, values } = transform_row(list, values, sources)
{ list_to_tuple(result), values }
end
defp transform_row({ :&, _, [_] } = var, values, sources) do
entity = Util.find_source(sources, var) |> Util.entity
entity_size = length(entity.__entity__(:field_names))
{ entity_values, values } = Enum.split(values, entity_size)
if Enum.all?(entity_values, &(nil?(&1))) do
{ nil, values }
else
{ entity.__entity__(:allocate, entity_values), values }
end
end
# Skip records
defp transform_row({ first, _ } = tuple, values, sources) when not is_atom(first) do
{ result, values } = transform_row(tuple_to_list(tuple), values, sources)
{ list_to_tuple(result), values }
end
defp transform_row(list, values, sources) when is_list(list) do
{ result, values } = Enum.reduce(list, { [], values }, fn elem, { res, values } ->
{ result, values } = transform_row(elem, values, sources)
{ [result|res], values }
end)
{ Enum.reverse(result), values }
end
defp transform_row(_, values, _entities) do
[value|values] = values
{ value, values }
end
defp preload(results, repo, Query[] = query) do
pos = Util.locate_var(query.select.expr, { :&, [], [0] })
fields = Enum.map(query.preloads, &(&1.expr)) |> Enum.concat
Ecto.Associations.Preloader.run(results, repo, fields, pos)
end
## Postgrex casting
defp decoder(TypeInfo[sender: "interval"], :binary, default, param) do
{ mon, day, sec } = default.(param)
Ecto.Interval[year: 0, month: mon, day: day, hour: 0, min: 0, sec: sec]
end
defp decoder(TypeInfo[sender: sender], :binary, default, param) when sender in ["timestamp", "timestamptz"] do
{ { year, mon, day }, { hour, min, sec } } = default.(param)
Ecto.DateTime[year: year, month: mon, day: day, hour: hour, min: min, sec: sec]
end
defp decoder(TypeInfo[sender: "date"], :binary, default, param) do
{ year, mon, day } = default.(param)
Ecto.Date[year: year, month: mon, day: day]
end
defp decoder(TypeInfo[sender: sender], :binary, default, param) when sender in ["time", "timetz"] do
{ hour, min, sec } = default.(param)
Ecto.Time[hour: hour, min: min, sec: sec]
end
defp decoder(_type, _format, default, param) do
default.(param)
end
## Transaction API
@doc false
def transaction(repo, opts, fun) do
timeout = opts[:timout] || @timeout
worker = checkout_worker(repo, timeout)
try do
do_begin(repo, worker, timeout)
value = fun.()
do_commit(repo, worker, timeout)
{ :ok, value }
catch
:throw, { :ecto_rollback, value } ->
do_rollback(repo, worker, timeout)
{ :error, value }
type, term ->
do_rollback(repo, worker, timeout)
:erlang.raise(type, term, System.stacktrace)
after
checkin_worker(repo)
end
end
@doc false
def rollback(_repo, value) do
throw { :ecto_rollback, value }
end
defp use_worker(repo, timeout, fun) do
pool = repo.__postgres__(:pool_name)
key = { :ecto_transaction_pid, pool }
if value = Process.get(key) do
in_transaction = true
worker = elem(value, 0)
else
worker = :poolboy.checkout(pool, true, timeout)
end
try do
fun.(worker)
after
if !in_transaction do
:poolboy.checkin(pool, worker)
end
end
end
defp checkout_worker(repo, timeout) do
pool = repo.__postgres__(:pool_name)
key = { :ecto_transaction_pid, pool }
case Process.get(key) do
{ worker, counter } ->
Process.put(key, { worker, counter + 1 })
worker
nil ->
worker = :poolboy.checkout(pool, timeout)
Worker.monitor_me(worker)
Process.put(key, { worker, 1 })
worker
end
end
defp checkin_worker(repo) do
pool = repo.__postgres__(:pool_name)
key = { :ecto_transaction_pid, pool }
case Process.get(key) do
{ worker, 1 } ->
Worker.demonitor_me(worker)
:poolboy.checkin(pool, worker)
Process.delete(key)
{ worker, counter } ->
Process.put(key, { worker, counter - 1 })
end
:ok
end
defp do_begin(repo, worker, timeout) do
repo.log(:begin, fn ->
Worker.begin!(worker, timeout)
end)
end
defp do_rollback(repo, worker, timeout) do
repo.log(:rollback, fn ->
Worker.rollback!(worker, timeout)
end)
end
defp do_commit(repo, worker, timeout) do
repo.log(:commit, fn ->
Worker.commit!(worker, timeout)
end)
end
## Test transaction API
@doc false
def begin_test_transaction(repo, opts \\ []) do
timeout = opts[:timeout] || @timeout
pool = repo.__postgres__(:pool_name)
:poolboy.transaction(pool, fn worker ->
do_begin(repo, worker, timeout)
end, timeout)
end
@doc false
def rollback_test_transaction(repo, opts \\ []) do
timeout = opts[:timeout] || @timeout
pool = repo.__postgres__(:pool_name)
:poolboy.transaction(pool, fn worker ->
do_rollback(repo, worker, timeout)
end, timeout)
end
## Storage API
@doc false
def storage_up(opts) do
# TODO: allow the user to specify those options either in the Repo or on command line
database_options = ~s(ENCODING='UTF8' LC_COLLATE='en_US.UTF-8' LC_CTYPE='en_US.UTF-8')
output = run_with_psql opts, "CREATE DATABASE #{ opts[:database] } " <> database_options
cond do
String.length(output) == 0 -> :ok
String.contains?(output, "already exists") -> { :error, :already_up }
true -> { :error, output }
end
end
@doc false
def storage_down(opts) do
output = run_with_psql(opts, "DROP DATABASE #{ opts[:database] }")
cond do
String.length(output) == 0 -> :ok
String.contains?(output, "does not exist") -> { :error, :already_down }
true -> { :error, output }
end
end
defp run_with_psql(database, sql_command) do
command = ""
if password = database[:password] do
command = ~s(PGPASSWORD=#{ password } )
end
command =
command <>
~s(psql --quiet -U #{ database[:username] } ) <>
~s(--host #{ database[:hostname] } ) <>
~s(-c "#{ sql_command };" )
System.cmd command
end
## Migration API
@doc false
def migrate_up(repo, version, commands) do
case check_migration_version(repo, version) do
Postgrex.Result[num_rows: 0] ->
transaction(repo, [], fn ->
Enum.each(commands, &query(repo, &1, []))
insert_migration_version(repo, version)
end)
:ok
_ ->
:already_up
end
end
@doc false
def migrate_down(repo, version, commands) do
case check_migration_version(repo, version) do
Postgrex.Result[num_rows: 0] ->
:missing_up
_ ->
transaction(repo, [], fn ->
Enum.each(commands, &query(repo, &1, []))
delete_migration_version(repo, version)
end)
:ok
end
end
@doc false
def migrated_versions(repo) do
create_migrations_table(repo)
Postgrex.Result[rows: rows] = query(repo, "SELECT version FROM schema_migrations", [])
Enum.map(rows, &elem(&1, 0))
end
defp create_migrations_table(repo) do
query(repo, "CREATE TABLE IF NOT EXISTS schema_migrations (id serial primary key, version bigint)", [])
end
defp check_migration_version(repo, version) do
create_migrations_table(repo)
query(repo, "SELECT version FROM schema_migrations WHERE version = #{version}", [])
end
defp insert_migration_version(repo, version) do
query(repo, "INSERT INTO schema_migrations(version) VALUES (#{version})", [])
end
defp delete_migration_version(repo, version) do
query(repo, "DELETE FROM schema_migrations WHERE version = #{version}", [])
end
end
|
lib/ecto/adapters/postgres.ex
| 0.832985
| 0.436202
|
postgres.ex
|
starcoder
|
defmodule Cldr.Number.Symbol do
@moduledoc """
Functions to manage the symbol definitions for a locale and
number system.
"""
alias Cldr.Locale
alias Cldr.LanguageTag
alias Cldr.Number.System
defstruct [
:decimal,
:group,
:exponential,
:infinity,
:list,
:minus_sign,
:nan,
:per_mille,
:percent_sign,
:plus_sign,
:superscripting_exponent,
:time_separator
]
@type t :: %__MODULE__{
decimal: String.t(),
group: String.t(),
exponential: String.t(),
infinity: String.t(),
list: String.t(),
minus_sign: String.t(),
nan: String.t(),
per_mille: String.t(),
percent_sign: String.t(),
plus_sign: String.t(),
superscripting_exponent: String.t(),
time_separator: String.t()
}
@doc """
Returns a map of `Cldr.Number.Symbol.t` structs of the number symbols for each
of the number systems of a locale.
## Arguments
* `locale` is any valid locale name returned by `Cldr.known_locale_names/1`
or a `Cldr.LanguageTag` struct returned by `Cldr.Locale.new!/2`. The
default is `Cldr.get_locale/1`.
* `backend` is any module that includes `use Cldr` and therefore
is a `Cldr` backend module
## Example:
iex> Cldr.Number.Symbol.number_symbols_for("th", TestBackend.Cldr)
{:ok, %{
latn: %Cldr.Number.Symbol{
decimal: ".",
exponential: "E",
group: ",",
infinity: "∞",
list: ";",
minus_sign: "-",
nan: "NaN",
per_mille: "‰",
percent_sign: "%",
plus_sign: "+",
superscripting_exponent: "×",
time_separator: ":"
},
thai: %Cldr.Number.Symbol{
decimal: ".",
exponential: "E",
group: ",",
infinity: "∞",
list: ";",
minus_sign: "-",
nan: "NaN",
per_mille: "‰",
percent_sign: "%",
plus_sign: "+",
superscripting_exponent: "×",
time_separator: ":"
}
}}
"""
@spec number_symbols_for(LanguageTag.t() | Locale.locale_name(), Cldr.backend()) ::
{:ok, map()} | {:error, {module(), String.t()}}
def number_symbols_for(locale, backend) do
Module.concat(backend, Number.Symbol).number_symbols_for(locale)
end
@doc """
Returns the number symbols for a specific locale and number system.
## Options
* `locale` is any valid locale name returned by `Cldr.known_locale_names/1`
or a `Cldr.LanguageTag` struct returned by `Cldr.Locale.new!/2`. The
default is `Cldr.get_locale/1`.
* `number_system` is any number system name returned by
`Cldr.known_number_systems/0` or a number system type
returned by `Cldr.known_number_system_types/0`
* `backend` is any module that includes `use Cldr` and therefore
is a `Cldr` backend module
## Example
iex> Cldr.Number.Symbol.number_symbols_for("th", "thai", TestBackend.Cldr)
{:ok, %Cldr.Number.Symbol{
decimal: ".",
exponential: "E",
group: ",",
infinity: "∞",
list: ";",
minus_sign: "-",
nan: "NaN",
per_mille: "‰",
percent_sign: "%",
plus_sign: "+",
superscripting_exponent: "×",
time_separator: ":"
}}
"""
@spec number_symbols_for(
LanguageTag.t() | Locale.locale_name(),
System.system_name(),
Cldr.backend()
) :: {:ok, map()} | {:error, {Cldr.NoNumberSymbols, String.t()}}
def number_symbols_for(%LanguageTag{} = locale, number_system, backend) do
with {:ok, system_name} <-
Cldr.Number.System.system_name_from(number_system, locale, backend),
{:ok, symbols} <- number_symbols_for(locale, backend) do
symbols
|> Map.get(system_name)
|> symbols_return(locale, number_system)
end
end
def number_symbols_for(locale_name, number_system, backend) do
with {:ok, locale} <- Cldr.validate_locale(locale_name, backend) do
number_symbols_for(locale, number_system, backend)
end
end
@doc """
Returns a list of all decimal symbols defined
by the locales configured in the given backend as
a list.
## Arguments
* `backend` is any module that includes `use Cldr` and therefore
is a `Cldr` backend module
"""
def all_decimal_symbols(backend) do
Module.concat(backend, Number.Symbol).all_decimal_symbols
end
@doc """
Returns a list of all grouping symbols defined
by the locales configured in the given backend as
a list.
## Arguments
* `backend` is any module that includes `use Cldr` and therefore
is a `Cldr` backend module
"""
def all_grouping_symbols(backend) do
Module.concat(backend, Number.Symbol).all_grouping_symbols
end
@doc """
Returns a list of all decimal symbols defined
by the locales configured in the given backend as
a string.
This string can be used as a character class
when builing a regular expression.
## Arguments
* `backend` is any module that includes `use Cldr` and therefore
is a `Cldr` backend module
"""
def all_decimal_symbols_class(backend) do
Module.concat(backend, Number.Symbol).all_decimal_symbols_class
end
@doc """
Returns a list of all grouping symbols defined
by the locales configured in the given backend as
a string.
This string can be used as a character class
when builing a regular expression.
## Arguments
* `backend` is any module that includes `use Cldr` and therefore
is a `Cldr` backend module
"""
def all_grouping_symbols_class(backend) do
Module.concat(backend, Number.Symbol).all_grouping_symbols_class
end
@doc false
def symbols_return(nil, locale, number_system) do
{
:error,
{
Cldr.NoNumberSymbols,
"The locale #{inspect(locale)} does not have " <>
"any symbols for number system #{inspect(number_system)}"
}
}
end
@doc false
def symbols_return(symbols, _locale, _number_system) do
{:ok, symbols}
end
end
|
lib/cldr/number/symbol.ex
| 0.928805
| 0.618032
|
symbol.ex
|
starcoder
|
defmodule Timex.Parse.DateTime.Parsers.ISO8601Extended do
use Combine.Helpers
alias Combine.ParserState
defparser parse(%ParserState{status: :ok, column: col, input: input, results: results} = state) do
case parse_extended(input) do
{:ok, parts, len, remaining} ->
%{state | :column => col + len, :input => remaining, :results => [Enum.reverse(parts)|results]}
{:error, reason, count} ->
%{state | :status => :error, :column => col + count, :error => reason}
end
end
def parse_extended(<<>>), do: {:error, "Expected year, but got end of input."}
def parse_extended(input), do: parse_extended(input, :year, [], 0)
def parse_extended(<<y1::utf8,y2::utf8,y3::utf8,y4::utf8,"-",rest::binary>>, :year, acc, count) when
y1 >= ?0 and y1 <= ?9 and
y2 >= ?0 and y2 <= ?9 and
y3 >= ?0 and y3 <= ?9 and
y4 >= ?0 and y4 <= ?9 do
year = String.to_integer(<<y1::utf8,y2::utf8,y3::utf8,y4::utf8>>)
parse_extended(rest, :month, [{:year4, year}|acc], count+4)
end
def parse_extended(<<h::utf8,_::binary>>, :year, _acc, count), do: {:error, "Expected 4 digit year, but got `#{<<h::utf8>>}` instead.", count}
def parse_extended(<<m1::utf8,m2::utf8,"-",rest::binary>>, :month, acc, count) when
m1 >= ?0 and m1 < ?2 and
m2 >= ?0 and m2 <= ?9 do
month = String.to_integer(<<m1::utf8,m2::utf8>>)
cond do
month > 0 and month < 13 ->
parse_extended(rest, :day, [{:month, month}|acc], count+2)
:else ->
{:error, "Expected month between 1-12, but got `#{month}` instead.", count}
end
end
def parse_extended(<<h::utf8,_::binary>>, :month, _acc, count), do: {:error, "Expected 2 digit month, but got `#{<<h::utf8>>}` instead.", count}
def parse_extended(<<d1::utf8,d2::utf8,sep::utf8,rest::binary>>, :day, acc, count) when
d1 >= ?0 and d1 <= ?3 and
d2 >= ?0 and d2 <= ?9 do
cond do
sep in [?T,?\s] ->
day = String.to_integer(<<d1::utf8,d2::utf8>>)
cond do
day > 0 and day < 32 ->
parse_extended(rest, :hour, [{:day, day}|acc], count+3)
:else ->
{:error, "Expected day between 1-31, but got `#{day}` instead.", count}
end
:else ->
{:error, "Expected valid date/time separator (T or space), but got `#{<<sep::utf8>>}` instead.", count+2}
end
end
def parse_extended(<<h::utf8,_::binary>>, :day, _acc, count), do: {:error, "Expected 2 digit day, but got `#{<<h::utf8>>}` instead.", count}
def parse_extended(<<h1::utf8,h2::utf8,rest::binary>>, :hour, acc, count) when
h1 >= ?0 and h1 < ?3 and
h2 >= ?0 and h2 <= ?9 do
hour = String.to_integer(<<h1::utf8,h2::utf8>>)
cond do
hour >= 0 and hour <= 24 ->
case rest do
<<":",rest::binary>> ->
parse_extended(rest, :minute, [{:hour24, hour}|acc], count+3)
_ ->
parse_offset(rest, [{:hour24, hour}|acc], count+2)
end
:else ->
{:error, "Expected hour between 0-24, but got `#{hour}` instead.", count}
end
end
def parse_extended(<<h::utf8,_::binary>>, :hour, _acc, count), do: {:error, "Expected 2 digit hour, but got `#{<<h::utf8>>}` instead.", count}
# Minutes are optional
def parse_extended(<<m1::utf8,m2::utf8,rest::binary>>, :minute, acc, count) when
m1 >= ?0 and m1 < ?6 and
m2 >= ?0 and m2 <= ?9 do
minute = String.to_integer(<<m1::utf8,m2::utf8>>)
cond do
minute >= 0 and minute <= 60 ->
case rest do
<<":",rest::binary>> ->
parse_extended(rest, :second, [{:min, minute}|acc], count+3)
_ ->
parse_offset(rest, [{:min, minute}|acc], count+2)
end
:else ->
{:error, "Expected minute between 0-60, but got `#{minute}` instead.", count}
end
end
def parse_extended(<<h::utf8,_::binary>>, :minute, _acc, count), do: {:error, "Expected 2 digit minute, but got `#{<<h::utf8>>}` instead.", count}
# Seconds are optional
def parse_extended(<<s1::utf8,s2::utf8,".",rest::binary>>, :second, acc, count) when # Has fractional seconds
s1 >= ?0 and s1 < ?6 and
s2 >= ?0 and s2 <= ?9 do
case parse_fractional_seconds(rest, count, <<>>) do
{:ok, fraction, count, rest} ->
seconds = String.to_integer(<<s1::utf8,s2::utf8>>)
precision = byte_size(fraction)
fraction = if precision > 6, do: binary_part(fraction, 0, 6), else: fraction
precision = if precision > 6, do: 6, else: precision
fractional = String.to_integer(fraction)
fractional = fractional * div(1_000_000, trunc(:math.pow(10, precision)))
cond do
seconds >= 0 and seconds <= 60 ->
parse_offset(rest, [{:sec_fractional, {fractional, precision}}, {:sec, seconds}|acc], count+2)
:else ->
{:error, "Expected second between 0-60, but got `#{seconds}` instead.", count}
end
{:error, _reason, _count} = err ->
err
end
end
def parse_extended(<<s1::utf8,s2::utf8,rest::binary>>, :second, acc, count) when # No fractional seconds
s1 >= ?0 and s1 < ?6 and
s2 >= ?0 and s2 <= ?9 do
second = String.to_integer(<<s1::utf8,s2::utf8>>)
cond do
second >= 0 and second <= 60 ->
parse_offset(rest, [{:sec, second}|acc], count+2)
:else ->
{:error, "Expected second between 0-60, but got `#{second}` instead.", count}
end
end
def parse_extended(<<h::utf8,_::binary>>, :second, _acc, count), do: {:error, "Expected valid value for seconds, but got `#{<<h::utf8>>}` instead.", count}
def parse_fractional_seconds(<<digit::utf8,rest::binary>>, count, acc) when
digit >= ?0 and digit <= ?9 do
parse_fractional_seconds(rest, count+1, <<acc::binary,digit::utf8>>)
end
def parse_fractional_seconds(_rest, count, "") do
{:error, "Expected at least one digit after the decimal sign, but found none", count}
end
def parse_fractional_seconds(rest, count, acc) do
{:ok, acc, count, rest}
end
def parse_offset(<<"Z",rest::binary>>, acc, count), do: {:ok, [{:zname, "Etc/UTC"}|acc], count+1, rest}
def parse_offset(<<dir::utf8,rest::binary>>, acc, count) when dir in [?+,?-] do
parse_offset(dir, rest, acc, count+1)
end
def parse_offset("", acc, count), do: {:ok, acc, count, ""}
def parse_offset(str, _acc, count), do: {:error, "Expected either Z or a valid timezone offset, but got `#{str}`", count}
# +/-HH:MM:SS (seconds are currently unhandled in offsets)
def parse_offset(dir, <<h1::utf8,h2::utf8,":",m1::utf8,m2::utf8,":",s1::utf8,s2::utf8,rest::binary>>, acc, count) when
h1 >= ?0 and h1 < ?2 and
h2 >= ?0 and h2 <= ?9 and
m1 >= ?0 and m1 < ?6 and
m2 >= ?0 and m2 <= ?9 and
s1 >= ?0 and s1 < ?6 and
s2 >= ?0 and s2 <= ?9 do
{:ok, [{:zname, <<dir::utf8,h1::utf8,h2::utf8,":",m1::utf8,m2::utf8>>}|acc], count+7, rest}
end
# +/-HH:MM
def parse_offset(dir, <<h1::utf8,h2::utf8,":",m1::utf8,m2::utf8,rest::binary>>, acc, count) when
h1 >= ?0 and h1 < ?2 and
h2 >= ?0 and h2 <= ?9 and
m1 >= ?0 and m1 < ?6 and
m2 >= ?0 and m2 <= ?9 do
{:ok, [{:zname, <<dir::utf8,h1::utf8,h2::utf8,":",m1::utf8,m2::utf8>>}|acc], count+5, rest}
end
# +/-HHMM
def parse_offset(dir, <<h1::utf8,h2::utf8,m1::utf8,m2::utf8,rest::binary>>, acc, count) when
h1 >= ?0 and h1 < ?2 and
h2 >= ?0 and h2 <= ?9 and
m1 >= ?0 and m1 < ?6 and
m2 >= ?0 and m2 <= ?9 do
{:ok, [{:zname, <<dir::utf8,h1::utf8,h2::utf8,":",m1::utf8,m2::utf8>>}|acc], count+5, rest}
end
# +/-HH
def parse_offset(dir, <<h1::utf8,h2::utf8,rest::binary>>, acc, count) when
h1 >= ?0 and h1 < ?2 and
h2 >= ?0 and h2 <= ?9 do
{:ok, [{:zname, <<dir::utf8,h1::utf8,h2::utf8,":00">>}|acc], count+2, rest}
end
def parse_offset(_, <<h::utf8,_rest::binary>>, _acc, count), do: {:error, "Expected valid offset, but got `#{<<h::utf8>>}` instead.", count}
end
|
lib/parse/datetime/parsers/iso8601_extended.ex
| 0.533397
| 0.48499
|
iso8601_extended.ex
|
starcoder
|
defmodule LDAPoolex do
@doc """
Calls `:eldap.add/3` using a worker of the `pool_name` pool
"""
def add(pool_name, dn, attributes) do
:poolboy.transaction(pool_name, fn worker ->
Connection.call(worker, {:add, dn, attributes})
end)
end
@doc """
Calls `:eldap.modify/3` using a worker of the `pool_name` pool
"""
def modify(pool_name, dn, modify_ops) do
:poolboy.transaction(pool_name, fn worker ->
Connection.call(worker, {:modify, dn, modify_ops})
end)
end
@doc """
Calls `:eldap.modify_dn/5` using a worker of the `pool_name` pool
"""
def modify_dn(pool_name, dn, new_rdn, delete_old_rdn, new_sup_dn) do
:poolboy.transaction(pool_name, fn worker ->
Connection.call(worker, {:modify_password, dn, new_rdn, delete_old_rdn, new_sup_dn})
end)
end
@doc """
Calls `:eldap.modify_password/3` using a worker of the `pool_name` pool
"""
def modify_password(pool_name, dn, new_password) do
:poolboy.transaction(pool_name, fn worker ->
Connection.call(worker, {:modify_password, dn, new_password})
end)
end
@doc """
Calls `:eldap.modify_password/4` using a worker of the `pool_name` pool
"""
def modify_password(pool_name, dn, new_password, old_password) do
:poolboy.transaction(pool_name, fn worker ->
Connection.call(worker, {:modify_password, dn, new_password, old_password})
end)
end
@doc """
Calls `:eldap.search/2` using a worker of the `pool_name` pool
"""
def search(pool_name, search_opts) do
:poolboy.transaction(pool_name, fn worker ->
Connection.call(worker, {:search, search_opts})
end)
end
@doc """
Calls `:eldap.delete/2` using a worker of the `pool_name` pool
"""
def delete(pool_name, dn) do
:poolboy.transaction(pool_name, fn worker ->
Connection.call(worker, {:delete, dn})
end)
end
def child_spec(opts) do
args = [
name: case opts[:name] do
{_, _} -> # already in Supervisor format
opts[:name]
_ ->
{:local, opts[:name]}
end,
worker_module: LDAPoolex.ConnectionWorker,
size: opts[:size] || 5,
max_overflow: opts[:max_overflow] || 5
]
:poolboy.child_spec(opts[:name], args, opts[:ldap_args])
end
@doc """
Launches a supervised LDAP pool
Options:
- `:name`: the name of the pool (from poolboy). Defaults to `{:local, name}`
- `:size`: the initial size of the pool (from poolboy). Defaults to `5`
- `:max_overflow`: the number of *additional* LDAP connections that can be created under
heavy load. Defaults to `5`, which means that by default the maximum number of connections
is `10`
- `:ldap_args`:
- `:hosts`: the host list under. Note that this latter option must be a **list** of
**charlists** (see examples below). No defaults
- `:base`: the base DN to use for search. No default. Mandatory to load schema
- `:load_schema`: `boolean()` that indicates whether the LDAP schema should be loaded.
Defaults to `true`
- `:bind_dn`: the DN to use to authenticate. If not set, the anonymous mode will be used
instead
- `:bind_password`: the password associated to the `:bind_dn`
- `:connection_retry_delay`: connection retry delay when the LDAP connection is lost in
milliseconds. Defaults to `3000`
- `:ldap_open_opts`: will be passed as the second parameter of the `:eldap.open/2` function.
Defaults to `[]`
## Example:
```elixir
LDAPoolex.start_link(:slapd1, [ldap_args: [hosts: ['localhost'], base: 'dc=example,dc=org']])
```
"""
def start_link(pool_name, opts) do
children =
if opts[:ldap_args][:load_schema] in [true, nil] do
[
child_spec(opts),
{LDAPoolex.Schema, opts}
]
else
[
child_spec(opts)
]
end
Supervisor.start_link(
children,
[strategy: :one_for_one, name: Module.concat(__MODULE__, pool_name)]
)
end
end
|
lib/ldapoolex.ex
| 0.852997
| 0.600657
|
ldapoolex.ex
|
starcoder
|
defmodule ExOpenAI.Completion do
import ExOpenAI
@moduledoc """
All of the OpenAI API Completion endpoints.
"""
@doc """
The `POST /completions` endpoint for the OpenAI API.
Returns the wrapped response with either an `ok` or `error` tuple along
with the `HTTPoison.Response` as the second element in the tuple.
engine_id: The ID of the engine (i.e. davinci, babbage) to use for this request.
parameters: A `Keyword` list of parameters that will be passed with the
request body as json.
* `prompt` - The prompt(s) to generate completions for, encoded as a string,
a list of strings, or a list of token lists.
* `max_tokens` - The maximum number of tokens to generate. Requests can use up to 2048 tokens shared between
prompt and completion. (One token is roughly 4 characters for normal English text)
* `temperature` - What sampling temperature to use.
* `top_p` - An alternative to sampling with temperature, called nucleus sampling, where the model
considers the results of the tokens with top_p probability mass.
* `n` - How many completions to generate for each prompt.
* `stream` - Whether to stream back partial progress.
* `logprobs` - Include the log probabilities on the logprobs most likely tokens, as well the chosen tokens.
* `echo` - Echo back the prompt in addition to the completion
* `stop` - Up to 4 sequences where the API will stop generating further tokens. The returned text will not
contain the stop sequence.
* `presence_penalty` - Number between 0 and 1 that penalizes new tokens based on whether they appear in the text so far.
* `frequency_penalty` - Number between 0 and 1 that penalizes new tokens based on their existing frequency in the text so far.
* `best_of` - Generates best_of completions server-side and returns the "best" (the one with the lowest log probability per token).
* `logit_bias` - Modify the likelihood of specified tokens appearing in the completion.
options:
* `api_key` - A binary API key to be used for this request. This will
override any API key specified in the config or as an environment variable.
## Examples
```
{:ok, response} = ExOpenAI.Completion.create(:davinci)
{:error, response} = ExOpenAI.Completion.create(:blah)
```
"""
def create(engine_id, parameters \\ [], options \\ []) do
post("engines/#{engine_id}/completions", parameters, options)
end
end
|
lib/ex_openai/completion.ex
| 0.857872
| 0.859546
|
completion.ex
|
starcoder
|
defmodule Fastimage do
@moduledoc """
Fastimage finds the dimensions/size or file type of a remote url,
local image file or a binary object given the url, file path or
binary itself respectively.
It streams the smallest amount of data necessary to ascertain the file size.
Supports ".bmp", ".gif", ".jpeg", ".webp" or ".png" image types only.
"""
alias __MODULE__
alias Fastimage.{Dimensions, Error, Parser, Stream, Utils}
@typedoc """
* `:stream_timeout` - Applies to a url only.
An override for the after `:stream_timeout` field
in the `Fastimage.Stream.Acc` struct which in turn determines the timeout in the
processing of the hackney stream. By default the @default_stream_timeout
is used in `Fastimage.Stream.Acc`.
* `:max_error_retries` - Applies to a url only.
An override for the `:max_error_retries` field
in the `Fastimage.Stream.Acc` struct which in turn determines the maximum number
of retries that will be attempted before giving up and returning an error.
By default the @default_max_error_retries is used in `Fastimage.Stream.Acc`.
* `:max_redirect_retries` - Applies to a url only.
An override for the `:max_redirect_retries` field
in the `Fastimage.Stream.Acc` struct which in turn determines the maximum number
of redirects that will be attempted before giving up and returning an error.
By default the @default_max_redirect_retries is used in `Fastimage.Stream.Acc`.
"""
@type fastimage_opts :: [
stream_timeout: non_neg_integer,
max_error_retries: non_neg_integer,
max_redirect_retries: non_neg_integer
]
@type image_type :: :bmp | :gif | :jpeg | :png | :webp
@type source_type :: :url | :file | :binary
defstruct source: nil,
source_type: nil,
image_type: nil,
dimensions: %Dimensions{}
@type t :: %Fastimage{
source: binary() | nil,
source_type: source_type() | nil,
image_type: image_type() | nil,
dimensions: Dimensions.t()
}
@doc ~S"""
Returns the type of image. Accepts a source as a url, binary
object or file path.
## Example
iex> Fastimage.type("https://raw.githubusercontent.com/stephenmoloney/fastimage/master/priv/test.jpg")
{:ok, :jpeg}
"""
@spec type(binary(), fastimage_opts()) :: {:ok, image_type()} | {:error, Error.t()}
def type(source, opts \\ []) when is_binary(source) do
case Utils.get_source_type(source) do
:other ->
{:error, %Error{reason: :invalid_input}}
source_type ->
{:ok, %Stream.Acc{image_type: type, stream_ref: stream_ref}} =
get_acc_with_type(source, source_type, opts)
Utils.close_stream(stream_ref)
{:ok, type}
end
end
@doc ~S"""
Returns the type of image. Accepts a source as a url, binary
object or file path.
## Example
iex> Fastimage.type!("https://raw.githubusercontent.com/stephenmoloney/fastimage/master/priv/test.jpg")
:jpeg
"""
@spec type!(binary(), fastimage_opts()) :: image_type() | no_return()
def type!(source, opts \\ []) when is_binary(source) do
case type(source, opts) do
{:ok, type} -> type
{:error, %Error{} = error} -> raise(error)
{:error, reason} -> raise(Error, reason)
end
end
@doc """
Returns a `%Fastimage{}` struct with information such as
type and dimensions. Accepts a source as a url, binary
object or file path.
## Example
iex> Fastimage.info("https://raw.githubusercontent.com/stephenmoloney/fastimage/master/priv/test.jpg")
{:ok,
%Fastimage{
dimensions: %Fastimage.Dimensions{height: 142, width: 283},
image_type: :jpeg,
source: "https://raw.githubusercontent.com/stephenmoloney/fastimage/master/priv/test.jpg",
source_type: :url
}}
"""
@spec info(binary(), fastimage_opts()) :: {:ok, Fastimage.t()} | {:error, Error.t()}
def info(source, opts \\ []) when is_binary(source) do
case Utils.get_source_type(source) do
:other ->
{:error, %Error{reason: :invalid_input}}
source_type ->
info(source, source_type, opts)
end
end
@doc """
Returns a `%Fastimage{}` struct with information such as
type and dimensions. Accepts a source as a url, binary
object or file path.
## Example
iex> Fastimage.info!("https://raw.githubusercontent.com/stephenmoloney/fastimage/master/priv/test.jpg")
%Fastimage{
dimensions: %Fastimage.Dimensions{height: 142, width: 283},
image_type: :jpeg,
source: "https://raw.githubusercontent.com/stephenmoloney/fastimage/master/priv/test.jpg",
source_type: :url
}
"""
@spec info!(binary(), fastimage_opts()) :: Fastimage.t() | no_return()
def info!(source, opts \\ []) when is_binary(source) do
case info(source, opts) do
{:ok, info} -> info
{:error, %Error{} = error} -> raise(error)
{:error, reason} -> raise(Error, reason)
end
end
@doc """
Returns the dimensions of the image. Accepts a source as a url, binary
object or file path.
## Example
iex> Fastimage.size("https://raw.githubusercontent.com/stephenmoloney/fastimage/master/priv/test.jpg")
{:ok, %Fastimage.Dimensions{height: 142, width: 283}}
"""
@spec size(binary(), fastimage_opts()) :: {:ok, Dimensions.t()} | {:error, Error.t()}
def size(source, opts \\ []) when is_binary(source) do
with {:ok, %Fastimage{dimensions: %Fastimage.Dimensions{} = dimensions}} <- info(source, opts) do
{:ok, dimensions}
end
end
@doc """
Returns the dimensions of the image. Accepts a source as a url, binary
object or file path.
## Example
iex> Fastimage.size!("https://raw.githubusercontent.com/stephenmoloney/fastimage/master/priv/test.jpg")
%Fastimage.Dimensions{height: 142, width: 283}
"""
@spec size!(binary(), fastimage_opts()) :: Dimensions.t() | no_return()
def size!(source, opts \\ []) when is_binary(source) do
case size(source, opts) do
{:ok, dimensions} -> dimensions
{:error, %Error{} = error} -> raise(error)
{:error, reason} -> raise(Error, reason)
end
end
# private
defp get_acc_with_type(source, source_type, opts) do
stream_timeout = Keyword.get(opts, :stream_timeout, false)
max_error_retries = Keyword.get(opts, :max_error_retries, false)
max_redirect_retries = Keyword.get(opts, :max_redirect_retries, false)
acc = %Stream.Acc{
source: source,
source_type: source_type
}
acc =
if source_type == :url do
acc
|> maybe_put_option(:stream_timeout, stream_timeout)
|> maybe_put_option(:max_error_retries, max_error_retries)
|> maybe_put_option(:max_redirect_retries, max_redirect_retries)
else
acc
end
with {:ok, %Stream.Acc{} = updated_acc} <- Stream.stream_data(acc),
bytes <- :erlang.binary_part(updated_acc.acc_data, {0, 2}),
{:ok, image_type} <- Parser.type(bytes, updated_acc) do
{:ok, %{updated_acc | image_type: image_type}}
end
end
defp info(source, source_type, opts) do
with {:ok, %Stream.Acc{image_type: type} = acc} <-
get_acc_with_type(source, source_type, opts),
{:ok, %Dimensions{} = size} = Parser.size(type, acc) do
Utils.close_stream(acc.stream_ref)
{:ok,
%Fastimage{
source: source,
source_type: source_type,
image_type: type,
dimensions: size
}}
end
end
defp maybe_put_option(%Stream.Acc{} = acc, _option_key, false) do
acc
end
defp maybe_put_option(%Stream.Acc{} = acc, option_key, option_val) do
Map.put(acc, option_key, option_val)
end
end
|
lib/fastimage.ex
| 0.938548
| 0.634543
|
fastimage.ex
|
starcoder
|
defmodule Yacto.Repo.Helper.Helper do
# this module is copied from Ecto.Repo.Queryable
@moduledoc false
require Ecto.Query
# defp field(ix, field) when is_integer(ix) and is_atom(field) do
# {{:., [], [{:&, [], [ix]}, field]}, [], []}
# end
defp assert_schema!(%{from: {_source, schema}}) when schema != nil, do: schema
defp assert_schema!(query) do
raise Ecto.QueryError,
query: query,
message: "expected a from expression with a schema"
end
def query_for_get(repo, _queryable, nil) do
raise ArgumentError, "cannot perform #{inspect(repo)}.get/2 because the given value is nil"
end
def query_for_get(repo, queryable, id) do
query = Ecto.Queryable.to_query(queryable)
schema = assert_schema!(query)
case schema.__schema__(:primary_key) do
[pk] ->
Ecto.Query.from(x in query, where: field(x, ^pk) == ^id)
pks ->
raise ArgumentError,
"#{inspect(repo)}.get/2 requires the schema #{inspect(schema)} " <>
"to have exactly one primary key, got: #{inspect(pks)}"
end
end
def query_for_get_by(_repo, queryable, clauses) do
Ecto.Query.where(queryable, [], ^Enum.to_list(clauses))
end
end
defmodule Yacto.Repo.Helper do
@moduledoc """
Helper functions for your repo.
```
defmodule MyApp.Repo do
use Ecto.Repo, otp_app: :my_app
use Yacto.Repo.Helper
end
person = MyApp.Repo.get_or_insert_for_update(Person, [name: "foo"], %Person{name: "foo", value: 10})
# person is exclusive locked.
changeset = Person.changeset(person, [value: person.value + 5])
_person = MyApp.Repo.update!(changeset)
```
"""
alias Yacto.Repo.Helper.Helper
defmacro __using__(_) do
quote do
def get_for_update(queryable, id, opts \\ []) do
query = Helper.query_for_get(__MODULE__, queryable, id)
query |> Yacto.Query.for_update() |> __MODULE__.one(opts)
end
def get_for_update!(queryable, id, opts \\ []) do
query = Helper.query_for_get(__MODULE__, queryable, id)
query |> Yacto.Query.for_update() |> __MODULE__.one!(opts)
end
def get_by_for_update(queryable, clauses, opts \\ []) do
query = Helper.query_for_get_by(__MODULE__, queryable, clauses)
query |> Yacto.Query.for_update() |> __MODULE__.one(opts)
end
def get_by_for_update!(queryable, clauses, opts \\ []) do
query = Helper.query_for_get_by(__MODULE__, queryable, clauses)
query |> Yacto.Query.for_update() |> __MODULE__.one!(opts)
end
def find(queryable, clauses, opts \\ []) do
query = Helper.query_for_get_by(__MODULE__, queryable, clauses)
query |> __MODULE__.all(opts)
end
def find_for_update(queryable, clauses, opts \\ []) do
query = Helper.query_for_get_by(__MODULE__, queryable, clauses)
query |> Yacto.Query.for_update() |> __MODULE__.all(opts)
end
def delete_by(queryable, clauses, opts \\ []) do
query = Helper.query_for_get_by(__MODULE__, queryable, clauses)
query |> __MODULE__.delete_all(opts)
end
def delete_by!(queryable, clauses, opts \\ []) do
case delete_by(queryable, clauses, opts) do
{0, _} ->
raise Ecto.NoResultsError, queryable: queryable
otherwise ->
otherwise
end
end
def count(queryable, clauses, opts \\ []) do
require Ecto.Query
query = Helper.query_for_get_by(__MODULE__, queryable, clauses)
query |> Ecto.Query.select(count("*")) |> __MODULE__.one!(opts)
end
def get_by_or_new(queryable, clauses, default_struct, opts \\ []) do
case __MODULE__.get_by(queryable, clauses, opts) do
nil ->
{default_struct, true}
record ->
{record, false}
end
end
def get_by_or_insert_for_update(queryable, clauses, default_struct_or_changeset, opts \\ []) do
case __MODULE__.get_by(queryable, clauses, opts) do
nil ->
# insert
try do
__MODULE__.insert!(default_struct_or_changeset)
else
record -> {record, true}
rescue
_ in Ecto.ConstraintError ->
# duplicate key
record = get_by_for_update!(queryable, clauses, opts)
{record, false}
end
_ ->
# retry SELECT with FOR UPDATE
record = get_by_for_update!(queryable, clauses, opts)
{record, false}
end
end
end
end
end
|
lib/yacto/repo.ex
| 0.628863
| 0.680905
|
repo.ex
|
starcoder
|
defmodule Workflow.Persistence do
@read_event_batch_size 100
@moduledoc """
Database side effects from Aggregates and Process Managers servers. Having them in a segregated
file helps to test, debug and share the uncommon code between them
"""
alias Workflow.Container
alias Workflow.Storage
require Logger
@typedoc "positions -> [first, last]"
@type state :: struct() # the aggregate or process manager data structure
@type events :: [struct()]
@type uuid :: String.t
@type reason :: atom
@type stream :: String.t
@doc "Rebuild if events are found, if not found, return the container state with an empty data structure"
def rebuild_from_events(%Container{} = state), do: rebuild_from_events(state, 1)
def rebuild_from_events(%Container{uuid: uuid, module: module, data: data} = state, start_version) do
case Storage.read_stream_forward(uuid, start_version, @read_event_batch_size) do
{:ok, batch} ->
batch_size = length(batch)
# rebuild the aggregate's state from the batch of events
data = apply_events(module, data, batch)
state = %Container{state |
version: start_version - 1 + batch_size,
data: data
}
case batch_size < @read_event_batch_size do
true ->
# end of event stream for aggregate so return its state
state
false ->
# fetch next batch of events to apply to updated aggregate state
rebuild_from_events(state, start_version + @read_event_batch_size)
end
{:error, _} ->
# data-structure does not exist so return empty state
state
end
state
end
def persist_events([], _aggregate_uuid, _expected_version), do: :ok
def persist_events(pending_events, uuid, expected_version) do
:ok = Storage.append_to_stream(uuid, expected_version, pending_events)
end
@doc "Receive a module that implements apply function, and rebuild the state from events"
def apply_events(module, state, events), do:
Enum.reduce(events, state, &module.apply(&2, &1))
end
|
lib/persistence.ex
| 0.60871
| 0.499817
|
persistence.ex
|
starcoder
|
defmodule Custodian.Github do
@moduledoc """
The GitHub context provides a boundary into the GitHub API client interface
and associated domain logic. The top-level class provides a way to process
webhook payloads from GitHub.
"""
import Ecto.Query, warn: false
alias Custodian.Github.Processor
alias Custodian.Tasks
@typedoc """
Pull request identifier as a tuple with the repo and integer ID.
"""
@type pull_request :: {Custodian.Bots.Bot.t(), integer}
@doc """
Calls the processing function appropriate for the `type` and `params`.
## Events
Currently, the app responds to the following GitHub webhook [events]:
- [`installation`]: whenever repos are installed or uninstalled
- [`installation_repositories`]: whenever repos are added to the installation
- [`pull_request`]: whenever a pull request is opened or updated
- [`pull_request_review`]: whenever a pull request review is submitted
For any other event, it fails.
## Examples
iex> process_event("installation", %{})
{:ok, [%Bot]}
iex> process_event("pull_request", %{})
{:ok, [%Bot]}
[events]: https://developer.github.com/webhooks/#events
[`installation`]: https://developer.github.com/v3/activity/events/types/#installationevent
[`installation_repositories`]: https://developer.github.com/v3/activity/events/types/#installationrepositoriesevent
[`pull_request`]: https://developer.github.com/v3/activity/events/types/#pullrequestevent
[`pull_request_review`]: https://developer.github.com/v3/activity/events/types/#pullrequestreviewevent
"""
@spec process_event(String.t(), map) :: :ok | {:error, atom}
def process_event(type, params)
def process_event("installation", params) do
Tasks.process(fn -> Processor.installation(params) end)
:ok
end
def process_event("installation_repositories", params) do
Tasks.process(fn -> Processor.installation(params) end)
:ok
end
def process_event("pull_request", params) do
Tasks.process(fn -> Processor.pr(params) end)
:ok
end
def process_event("pull_request_review", params) do
Tasks.process(fn -> Processor.review(params) end)
:ok
end
def process_event("repository", params) do
Tasks.process(fn -> Processor.repo(params) end)
:ok
end
def process_event(_, _) do
{:error, :unsupported_event}
end
end
|
lib/custodian/github/github.ex
| 0.619586
| 0.525856
|
github.ex
|
starcoder
|
defmodule Towwwer.Websites do
@moduledoc """
The Websites context.
"""
require Logger
import Ecto.Query, warn: false
alias Towwwer.Repo
alias Towwwer.Websites.Site
alias Towwwer.Websites.Monitor
alias Towwwer.Tools.Helpers
alias Towwwer.Websites.Report
@doc """
Returns the list of sites.
## Examples
iex> list_sites()
[%Site{}, ...]
"""
@spec list_sites() :: [%Site{}]
def list_sites do
Repo.all(
from s in Site,
order_by: s.base_url
)
end
@doc """
Same as list_sites/0 but preloads associations.
"""
@spec list_sites_with_preloads() :: [%Site{}]
def list_sites_with_preloads do
Repo.all(
from s in Site,
preload: [monitors: [:reports]]
)
end
@doc """
Same as list_sites_with_preloads/0 but only loads the latest report,
and only includes monitors with path of "/".
"""
@spec list_sites_with_latest_root_report() :: [%Site{}]
def list_sites_with_latest_root_report do
reports_query = from r in Report, distinct: r.monitor_id, order_by: [desc: r.updated_at]
monitors_query =
from m in Monitor,
distinct: m.site_id,
where: m.path == "/",
preload: [reports: ^reports_query]
Repo.all(from s in Site, preload: [monitors: ^monitors_query])
end
@doc """
Gets a single site.
Raises `Ecto.NoResultsError` if the Site does not exist.
## Examples
iex> get_site!(123)
%Site{}
iex> get_site!(456)
** (Ecto.NoResultsError)
"""
@spec get_site!(integer()) :: %Site{}
def get_site!(id) do
Site
|> Repo.get!(id)
|> Repo.preload(monitors: [:reports])
end
@doc """
Creates a site.
## Examples
iex> create_site(%{field: value})
{:ok, %Site{}}
iex> create_site(%{field: bad_value})
{:error, %Ecto.Changeset{}}
"""
@spec create_site(map()) :: {:ok, %Site{}} | {:error, %Ecto.Changeset{}}
def create_site(attrs \\ %{}) do
changeset =
%Site{}
|> Site.changeset(attrs)
|> Repo.insert()
case changeset do
{:ok, site} ->
Logger.info("Site created, running build task for monitors")
Helpers.run_build_task_for_site_monitors(site)
_ ->
Logger.info("Failed to create site, so no reports are built.")
end
changeset
end
@doc """
Updates a site.
## Examples
iex> update_site(site, %{field: new_value})
{:ok, %Site{}}
iex> update_site(site, %{field: bad_value})
{:error, %Ecto.Changeset{}}
"""
@spec update_site(%Site{}, map()) :: {:ok, %Site{}} | {:error, %Ecto.Changeset{}}
def update_site(%Site{} = site, attrs) do
changeset =
site
|> Site.changeset(attrs)
|> Repo.update()
case changeset do
{:ok, updated_site} ->
Helpers.run_build_task_for_new_site_monitors(updated_site)
_ ->
Logger.info("Failed to update site, so no reports are built.")
end
changeset
end
@spec bump_site_timestamp(%Site{}) :: %Site{}
def bump_site_timestamp(%Site{} = site) do
site
|> Site.changeset(%{})
|> Repo.update(force: true)
end
@doc """
Deletes a Site.
## Examples
iex> delete_site(site)
{:ok, %Site{}}
iex> delete_site(site)
{:error, %Ecto.Changeset{}}
"""
@spec delete_site(%Site{}) :: {:ok, %Site{}}
def delete_site(%Site{} = site) do
Repo.delete(site)
end
@doc """
Returns an `%Ecto.Changeset{}` for tracking site changes.
## Examples
iex> change_site(site)
%Ecto.Changeset{source: %Site{}}
"""
@spec change_site(%Site{}) :: %Ecto.Changeset{}
def change_site(%Site{} = site) do
Site.changeset(site, %{})
end
@doc """
Returns the list of monitors.
## Examples
iex> list_monitors()
[%Monitor{}, ...]
"""
def list_monitors do
Repo.all(from m in Monitor, preload: [:reports])
end
@doc """
Gets a single monitor.
Raises `Ecto.NoResultsError` if the Monitor does not exist.
## Examples
iex> get_monitor!(123)
%Monitor{}
iex> get_monitor!(456)
** (Ecto.NoResultsError)
"""
def get_monitor!(id) do
Monitor
|> Repo.get!(id)
|> Repo.preload([:reports])
end
@doc """
Creates a monitor.
## Examples
iex> create_monitor(%{field: value})
{:ok, %Monitor{}}
iex> create_monitor(%{field: bad_value})
{:error, %Ecto.Changeset{}}
"""
def create_monitor(attrs \\ %{}) do
%Monitor{}
|> Monitor.changeset(attrs)
|> Repo.insert()
end
@doc """
Updates a monitor.
## Examples
iex> update_monitor(monitor, %{field: new_value})
{:ok, %Monitor{}}
iex> update_monitor(monitor, %{field: bad_value})
{:error, %Ecto.Changeset{}}
"""
def update_monitor(%Monitor{} = monitor, attrs) do
monitor
|> Monitor.changeset(attrs)
|> Repo.update()
end
@doc """
Deletes a Monitor.
## Examples
iex> delete_monitor(monitor)
{:ok, %Monitor{}}
iex> delete_monitor(monitor)
{:error, %Ecto.Changeset{}}
"""
def delete_monitor(%Monitor{} = monitor) do
Repo.delete(monitor)
end
@doc """
Returns an `%Ecto.Changeset{}` for tracking monitor changes.
## Examples
iex> change_monitor(monitor)
%Ecto.Changeset{source: %Monitor{}}
"""
def change_monitor(%Monitor{} = monitor) do
Monitor.changeset(monitor, %{})
end
@doc """
Returns the list of reports.
## Examples
iex> list_reports()
[%Report{}, ...]
"""
def list_reports do
Repo.all(Report)
end
def list_reports_of_monitor(monitor) do
query =
from r in Report,
where: r.monitor_id == ^monitor.id,
order_by: [desc: :inserted_at]
Repo.all(query)
end
@doc """
Gets a single report.
Raises `Ecto.NoResultsError` if the Report does not exist.
## Examples
iex> get_report!(123)
%Report{}
iex> get_report!(456)
** (Ecto.NoResultsError)
"""
def get_report!(id) do
Report
|> Repo.get!(id)
end
@doc """
Get a single report's scores, both desktop and mobile.
Raises `Ecto.NoResultsError` if the Report does not exist.
## Examples
iex> get_report_scores!(123)
%{
desktop: %{
accessibility: 0.88,
best_practices: 0.77,
performance: 0.97,
pwa: 0.4,
seo: 0.77
},
mobile: %{
accessibility: 0.88,
best_practices: 0.77,
performance: 0.84,
pwa: 0.42,
seo: 0.78
}
}
iex> get_report_scores!(14456)
** (Ecto.NoResultsError)
"""
def get_report_scores!(id) do
query =
from r in Report,
where: r.id == ^id,
select: %{
desktop: %{
performance:
fragment("?->'lighthouseResult'->'categories'->'performance'->'score'", r.data),
pwa: fragment("?->'lighthouseResult'->'categories'->'pwa'->'score'", r.data),
seo: fragment("?->'lighthouseResult'->'categories'->'seo'->'score'", r.data),
best_practices:
fragment("?->'lighthouseResult'->'categories'->'best-practices'->'score'", r.data),
accessibility:
fragment("?->'lighthouseResult'->'categories'->'accessibility'->'score'", r.data)
},
mobile: %{
performance:
fragment(
"?->'lighthouseResult'->'categories'->'performance'->'score'",
r.mobile_data
),
pwa: fragment("?->'lighthouseResult'->'categories'->'pwa'->'score'", r.mobile_data),
seo: fragment("?->'lighthouseResult'->'categories'->'seo'->'score'", r.mobile_data),
best_practices:
fragment(
"?->'lighthouseResult'->'categories'->'best-practices'->'score'",
r.mobile_data
),
accessibility:
fragment(
"?->'lighthouseResult'->'categories'->'accessibility'->'score'",
r.mobile_data
)
}
}
Repo.one!(query)
end
@doc """
Returns the latest Report for a given Monitor.
"""
def get_latest_report_for_monitor(monitor) do
reports_query =
from r in Report,
distinct: r.monitor_id,
order_by: [desc: r.updated_at],
where: r.monitor_id == ^monitor.id
Repo.one(reports_query)
end
@doc """
Creates a report.
## Examples
iex> create_report(%{field: value})
{:ok, %Report{}}
iex> create_report(%{field: bad_value})
{:error, %Ecto.Changeset{}}
"""
def create_report(attrs \\ %{}) do
%Report{}
|> Report.changeset(attrs)
|> Repo.insert()
end
@doc """
Updates a report.
## Examples
iex> update_report(report, %{field: new_value})
{:ok, %Report{}}
iex> update_report(report, %{field: bad_value})
{:error, %Ecto.Changeset{}}
"""
def update_report(%Report{} = report, attrs) do
report
|> Report.changeset(attrs)
|> Repo.update()
end
@doc """
Deletes a Report.
## Examples
iex> delete_report(report)
{:ok, %Report{}}
iex> delete_report(report)
{:error, %Ecto.Changeset{}}
"""
def delete_report(%Report{} = report) do
Repo.delete(report)
end
@doc """
Returns an `%Ecto.Changeset{}` for tracking report changes.
## Examples
iex> change_report(report)
%Ecto.Changeset{source: %Report{}}
"""
def change_report(%Report{} = report) do
Report.changeset(report, %{})
end
end
|
lib/towwwer/websites.ex
| 0.835886
| 0.407805
|
websites.ex
|
starcoder
|
defmodule Elixirdo.Instance.MonadTrans.Writer do
alias Elixirdo.Instance.MonadTrans.Writer, as: WriterT
use Elixirdo.Base
use Elixirdo.Typeclass.Monad.Trans, import_typeclasses: true
use Elixirdo.Typeclass.Monad.Writer, import_monad_writer: true
use Elixirdo.Typeclass.Monoid, import_typeclasses: true
defstruct [:data]
deftype writer_t(w, m, a) :: %WriterT{data: m({a, w()})}
def new(data) do
%WriterT{data: data}
end
def run(%WriterT{data: data}) do
data
end
definstance functor(writer_t(w, m), m: functor) do
def fmap(f, writer_t_a) do
map(
fn functor_a ->
Functor.fmap(fn {a, w} -> {f.(a), w} end, functor_a, m)
end,
writer_t_a
)
end
end
definstance applicative(writer_t(w, m), m: applicative, w: monoid) do
def pure(a) do
new(Applicative.pure({a, Monoid.mempty(w)}, m))
end
def ap(writer_t_f, writer_t_a) do
faw = fn {f, w1}, {a, w2} ->
{f.(a), Monoid.mappend(w1, w2, w)}
end
applicative_fw = run(writer_t_f)
applicative_aw = run(writer_t_a)
new(Applicative.lift_a2(faw, applicative_fw, applicative_aw, m))
end
end
definstance monad(writer_t(w, m), m: monad, w: monoid) do
def bind(writer_t_a, afb) do
new(
monad m do
{a, w1} <- run(writer_t_a)
{b, w2} <- run(afb.(a))
Monad.return(b, Monoid.mappend(w1, w2, w))
end
)
end
end
definstance monad_trans(writer_t(w, m), m: monad, w: monoid) do
def lift(monad_a) do
new(Monad.lift_m(fn a -> {a, Monoid.mempty(w)} end, monad_a, m))
end
end
definstance monad_writer(writer_t(_w, m), m: monad, _w: monoid) do
def tell(ws) do
new(Monad.return({:ok, ws}, m))
end
def writer({a, ws}) do
new(Monad.return({a, ws}, m))
end
def listen(writer_t_a) do
map(
fn monad_a ->
Monad.lift_m(fn {a, ws} -> {{a, ws}, ws} end, monad_a, m)
end,
writer_t_a
)
end
def pass(writer_t_af) do
map(
fn monad_af ->
Monad.lift_m(fn {{a, f}, ws} -> {a, f.(ws)} end, monad_af, m)
end,
writer_t_af
)
end
end
def map(f, writer_t_a) do
new(f.(run(writer_t_a)))
end
end
|
lib/elixirdo/instance/monad_trans/writer.ex
| 0.532911
| 0.406685
|
writer.ex
|
starcoder
|
defmodule AWS.WorkSpaces do
@moduledoc """
Amazon WorkSpaces Service
Amazon WorkSpaces enables you to provision virtual, cloud-based Microsoft
Windows and Amazon Linux desktops for your users.
"""
@doc """
Associates the specified connection alias with the specified directory to
enable cross-Region redirection. For more information, see [ Cross-Region
Redirection for Amazon
WorkSpaces](https://docs.aws.amazon.com/workspaces/latest/adminguide/cross-region-redirection.html).
<note> Before performing this operation, call [
DescribeConnectionAliases](https://docs.aws.amazon.com/workspaces/latest/api/API_DescribeConnectionAliases.html)
to make sure that the current state of the connection alias is `CREATED`.
</note>
"""
def associate_connection_alias(client, input, options \\ []) do
request(client, "AssociateConnectionAlias", input, options)
end
@doc """
Associates the specified IP access control group with the specified
directory.
"""
def associate_ip_groups(client, input, options \\ []) do
request(client, "AssociateIpGroups", input, options)
end
@doc """
Adds one or more rules to the specified IP access control group.
This action gives users permission to access their WorkSpaces from the CIDR
address ranges specified in the rules.
"""
def authorize_ip_rules(client, input, options \\ []) do
request(client, "AuthorizeIpRules", input, options)
end
@doc """
Copies the specified image from the specified Region to the current Region.
For more information about copying images, see [ Copy a Custom WorkSpaces
Image](https://docs.aws.amazon.com/workspaces/latest/adminguide/copy-custom-image.html).
<important> Before copying a shared image, be sure to verify that it has
been shared from the correct AWS account. To determine if an image has been
shared and to see the AWS account ID that owns an image, use the
[DescribeWorkSpaceImages](https://docs.aws.amazon.com/workspaces/latest/api/API_DescribeWorkspaceImages.html)
and
[DescribeWorkspaceImagePermissions](https://docs.aws.amazon.com/workspaces/latest/api/API_DescribeWorkspaceImagePermissions.html)
API operations.
</important>
"""
def copy_workspace_image(client, input, options \\ []) do
request(client, "CopyWorkspaceImage", input, options)
end
@doc """
Creates the specified connection alias for use with cross-Region
redirection. For more information, see [ Cross-Region Redirection for
Amazon
WorkSpaces](https://docs.aws.amazon.com/workspaces/latest/adminguide/cross-region-redirection.html).
"""
def create_connection_alias(client, input, options \\ []) do
request(client, "CreateConnectionAlias", input, options)
end
@doc """
Creates an IP access control group.
An IP access control group provides you with the ability to control the IP
addresses from which users are allowed to access their WorkSpaces. To
specify the CIDR address ranges, add rules to your IP access control group
and then associate the group with your directory. You can add rules when
you create the group or at any time using `AuthorizeIpRules`.
There is a default IP access control group associated with your directory.
If you don't associate an IP access control group with your directory, the
default group is used. The default group includes a default rule that
allows users to access their WorkSpaces from anywhere. You cannot modify
the default IP access control group for your directory.
"""
def create_ip_group(client, input, options \\ []) do
request(client, "CreateIpGroup", input, options)
end
@doc """
Creates the specified tags for the specified WorkSpaces resource.
"""
def create_tags(client, input, options \\ []) do
request(client, "CreateTags", input, options)
end
@doc """
Creates one or more WorkSpaces.
This operation is asynchronous and returns before the WorkSpaces are
created.
"""
def create_workspaces(client, input, options \\ []) do
request(client, "CreateWorkspaces", input, options)
end
@doc """
Deletes the specified connection alias. For more information, see [
Cross-Region Redirection for Amazon
WorkSpaces](https://docs.aws.amazon.com/workspaces/latest/adminguide/cross-region-redirection.html).
<important> **If you will no longer be using a fully qualified domain name
(FQDN) as the registration code for your WorkSpaces users, you must take
certain precautions to prevent potential security issues.** For more
information, see [ Security Considerations if You Stop Using Cross-Region
Redirection](https://docs.aws.amazon.com/workspaces/latest/adminguide/cross-region-redirection.html#cross-region-redirection-security-considerations).
</important> <note> To delete a connection alias that has been shared, the
shared account must first disassociate the connection alias from any
directories it has been associated with. Then you must unshare the
connection alias from the account it has been shared with. You can delete a
connection alias only after it is no longer shared with any accounts or
associated with any directories.
</note>
"""
def delete_connection_alias(client, input, options \\ []) do
request(client, "DeleteConnectionAlias", input, options)
end
@doc """
Deletes the specified IP access control group.
You cannot delete an IP access control group that is associated with a
directory.
"""
def delete_ip_group(client, input, options \\ []) do
request(client, "DeleteIpGroup", input, options)
end
@doc """
Deletes the specified tags from the specified WorkSpaces resource.
"""
def delete_tags(client, input, options \\ []) do
request(client, "DeleteTags", input, options)
end
@doc """
Deletes the specified image from your account. To delete an image, you must
first delete any bundles that are associated with the image and unshare the
image if it is shared with other accounts.
"""
def delete_workspace_image(client, input, options \\ []) do
request(client, "DeleteWorkspaceImage", input, options)
end
@doc """
Deregisters the specified directory. This operation is asynchronous and
returns before the WorkSpace directory is deregistered. If any WorkSpaces
are registered to this directory, you must remove them before you can
deregister the directory.
"""
def deregister_workspace_directory(client, input, options \\ []) do
request(client, "DeregisterWorkspaceDirectory", input, options)
end
@doc """
Retrieves a list that describes the configuration of Bring Your Own License
(BYOL) for the specified account.
"""
def describe_account(client, input, options \\ []) do
request(client, "DescribeAccount", input, options)
end
@doc """
Retrieves a list that describes modifications to the configuration of Bring
Your Own License (BYOL) for the specified account.
"""
def describe_account_modifications(client, input, options \\ []) do
request(client, "DescribeAccountModifications", input, options)
end
@doc """
Retrieves a list that describes one or more specified Amazon WorkSpaces
clients.
"""
def describe_client_properties(client, input, options \\ []) do
request(client, "DescribeClientProperties", input, options)
end
@doc """
Describes the permissions that the owner of a connection alias has granted
to another AWS account for the specified connection alias. For more
information, see [ Cross-Region Redirection for Amazon
WorkSpaces](https://docs.aws.amazon.com/workspaces/latest/adminguide/cross-region-redirection.html).
"""
def describe_connection_alias_permissions(client, input, options \\ []) do
request(client, "DescribeConnectionAliasPermissions", input, options)
end
@doc """
Retrieves a list that describes the connection aliases used for
cross-Region redirection. For more information, see [ Cross-Region
Redirection for Amazon
WorkSpaces](https://docs.aws.amazon.com/workspaces/latest/adminguide/cross-region-redirection.html).
"""
def describe_connection_aliases(client, input, options \\ []) do
request(client, "DescribeConnectionAliases", input, options)
end
@doc """
Describes one or more of your IP access control groups.
"""
def describe_ip_groups(client, input, options \\ []) do
request(client, "DescribeIpGroups", input, options)
end
@doc """
Describes the specified tags for the specified WorkSpaces resource.
"""
def describe_tags(client, input, options \\ []) do
request(client, "DescribeTags", input, options)
end
@doc """
Retrieves a list that describes the available WorkSpace bundles.
You can filter the results using either bundle ID or owner, but not both.
"""
def describe_workspace_bundles(client, input, options \\ []) do
request(client, "DescribeWorkspaceBundles", input, options)
end
@doc """
Describes the available directories that are registered with Amazon
WorkSpaces.
"""
def describe_workspace_directories(client, input, options \\ []) do
request(client, "DescribeWorkspaceDirectories", input, options)
end
@doc """
Describes the permissions that the owner of an image has granted to other
AWS accounts for an image.
"""
def describe_workspace_image_permissions(client, input, options \\ []) do
request(client, "DescribeWorkspaceImagePermissions", input, options)
end
@doc """
Retrieves a list that describes one or more specified images, if the image
identifiers are provided. Otherwise, all images in the account are
described.
"""
def describe_workspace_images(client, input, options \\ []) do
request(client, "DescribeWorkspaceImages", input, options)
end
@doc """
Describes the snapshots for the specified WorkSpace.
"""
def describe_workspace_snapshots(client, input, options \\ []) do
request(client, "DescribeWorkspaceSnapshots", input, options)
end
@doc """
Describes the specified WorkSpaces.
You can filter the results by using the bundle identifier, directory
identifier, or owner, but you can specify only one filter at a time.
"""
def describe_workspaces(client, input, options \\ []) do
request(client, "DescribeWorkspaces", input, options)
end
@doc """
Describes the connection status of the specified WorkSpaces.
"""
def describe_workspaces_connection_status(client, input, options \\ []) do
request(client, "DescribeWorkspacesConnectionStatus", input, options)
end
@doc """
Disassociates a connection alias from a directory. Disassociating a
connection alias disables cross-Region redirection between two directories
in different AWS Regions. For more information, see [ Cross-Region
Redirection for Amazon
WorkSpaces](https://docs.aws.amazon.com/workspaces/latest/adminguide/cross-region-redirection.html).
<note> Before performing this operation, call [
DescribeConnectionAliases](https://docs.aws.amazon.com/workspaces/latest/api/API_DescribeConnectionAliases.html)
to make sure that the current state of the connection alias is `CREATED`.
</note>
"""
def disassociate_connection_alias(client, input, options \\ []) do
request(client, "DisassociateConnectionAlias", input, options)
end
@doc """
Disassociates the specified IP access control group from the specified
directory.
"""
def disassociate_ip_groups(client, input, options \\ []) do
request(client, "DisassociateIpGroups", input, options)
end
@doc """
Imports the specified Windows 10 Bring Your Own License (BYOL) image into
Amazon WorkSpaces. The image must be an already licensed Amazon EC2 image
that is in your AWS account, and you must own the image. For more
information about creating BYOL images, see [ Bring Your Own Windows
Desktop
Licenses](https://docs.aws.amazon.com/workspaces/latest/adminguide/byol-windows-images.html).
"""
def import_workspace_image(client, input, options \\ []) do
request(client, "ImportWorkspaceImage", input, options)
end
@doc """
Retrieves a list of IP address ranges, specified as IPv4 CIDR blocks, that
you can use for the network management interface when you enable Bring Your
Own License (BYOL).
This operation can be run only by AWS accounts that are enabled for BYOL.
If your account isn't enabled for BYOL, you'll receive an
`AccessDeniedException` error.
The management network interface is connected to a secure Amazon WorkSpaces
management network. It is used for interactive streaming of the WorkSpace
desktop to Amazon WorkSpaces clients, and to allow Amazon WorkSpaces to
manage the WorkSpace.
"""
def list_available_management_cidr_ranges(client, input, options \\ []) do
request(client, "ListAvailableManagementCidrRanges", input, options)
end
@doc """
Migrates a WorkSpace from one operating system or bundle type to another,
while retaining the data on the user volume.
The migration process recreates the WorkSpace by using a new root volume
from the target bundle image and the user volume from the last available
snapshot of the original WorkSpace. During migration, the original
`D:\Users\%USERNAME%` user profile folder is renamed to
`D:\Users\%USERNAME%MMddyyTHHmmss%.NotMigrated`. A new
`D:\Users\%USERNAME%\` folder is generated by the new OS. Certain files in
the old user profile are moved to the new user profile.
For available migration scenarios, details about what happens during
migration, and best practices, see [Migrate a
WorkSpace](https://docs.aws.amazon.com/workspaces/latest/adminguide/migrate-workspaces.html).
"""
def migrate_workspace(client, input, options \\ []) do
request(client, "MigrateWorkspace", input, options)
end
@doc """
Modifies the configuration of Bring Your Own License (BYOL) for the
specified account.
"""
def modify_account(client, input, options \\ []) do
request(client, "ModifyAccount", input, options)
end
@doc """
Modifies the properties of the specified Amazon WorkSpaces clients.
"""
def modify_client_properties(client, input, options \\ []) do
request(client, "ModifyClientProperties", input, options)
end
@doc """
Modifies the self-service WorkSpace management capabilities for your users.
For more information, see [Enable Self-Service WorkSpace Management
Capabilities for Your
Users](https://docs.aws.amazon.com/workspaces/latest/adminguide/enable-user-self-service-workspace-management.html).
"""
def modify_selfservice_permissions(client, input, options \\ []) do
request(client, "ModifySelfservicePermissions", input, options)
end
@doc """
Specifies which devices and operating systems users can use to access their
WorkSpaces. For more information, see [ Control Device
Access](https://docs.aws.amazon.com/workspaces/latest/adminguide/update-directory-details.html#control-device-access).
"""
def modify_workspace_access_properties(client, input, options \\ []) do
request(client, "ModifyWorkspaceAccessProperties", input, options)
end
@doc """
Modify the default properties used to create WorkSpaces.
"""
def modify_workspace_creation_properties(client, input, options \\ []) do
request(client, "ModifyWorkspaceCreationProperties", input, options)
end
@doc """
Modifies the specified WorkSpace properties. For important information
about how to modify the size of the root and user volumes, see [ Modify a
WorkSpace](https://docs.aws.amazon.com/workspaces/latest/adminguide/modify-workspaces.html).
"""
def modify_workspace_properties(client, input, options \\ []) do
request(client, "ModifyWorkspaceProperties", input, options)
end
@doc """
Sets the state of the specified WorkSpace.
To maintain a WorkSpace without being interrupted, set the WorkSpace state
to `ADMIN_MAINTENANCE`. WorkSpaces in this state do not respond to requests
to reboot, stop, start, rebuild, or restore. An AutoStop WorkSpace in this
state is not stopped. Users cannot log into a WorkSpace in the
`ADMIN_MAINTENANCE` state.
"""
def modify_workspace_state(client, input, options \\ []) do
request(client, "ModifyWorkspaceState", input, options)
end
@doc """
Reboots the specified WorkSpaces.
You cannot reboot a WorkSpace unless its state is `AVAILABLE` or
`UNHEALTHY`.
This operation is asynchronous and returns before the WorkSpaces have
rebooted.
"""
def reboot_workspaces(client, input, options \\ []) do
request(client, "RebootWorkspaces", input, options)
end
@doc """
Rebuilds the specified WorkSpace.
You cannot rebuild a WorkSpace unless its state is `AVAILABLE`, `ERROR`,
`UNHEALTHY`, `STOPPED`, or `REBOOTING`.
Rebuilding a WorkSpace is a potentially destructive action that can result
in the loss of data. For more information, see [Rebuild a
WorkSpace](https://docs.aws.amazon.com/workspaces/latest/adminguide/reset-workspace.html).
This operation is asynchronous and returns before the WorkSpaces have been
completely rebuilt.
"""
def rebuild_workspaces(client, input, options \\ []) do
request(client, "RebuildWorkspaces", input, options)
end
@doc """
Registers the specified directory. This operation is asynchronous and
returns before the WorkSpace directory is registered. If this is the first
time you are registering a directory, you will need to create the
workspaces_DefaultRole role before you can register a directory. For more
information, see [ Creating the workspaces_DefaultRole
Role](https://docs.aws.amazon.com/workspaces/latest/adminguide/workspaces-access-control.html#create-default-role).
"""
def register_workspace_directory(client, input, options \\ []) do
request(client, "RegisterWorkspaceDirectory", input, options)
end
@doc """
Restores the specified WorkSpace to its last known healthy state.
You cannot restore a WorkSpace unless its state is ` AVAILABLE`, `ERROR`,
`UNHEALTHY`, or `STOPPED`.
Restoring a WorkSpace is a potentially destructive action that can result
in the loss of data. For more information, see [Restore a
WorkSpace](https://docs.aws.amazon.com/workspaces/latest/adminguide/restore-workspace.html).
This operation is asynchronous and returns before the WorkSpace is
completely restored.
"""
def restore_workspace(client, input, options \\ []) do
request(client, "RestoreWorkspace", input, options)
end
@doc """
Removes one or more rules from the specified IP access control group.
"""
def revoke_ip_rules(client, input, options \\ []) do
request(client, "RevokeIpRules", input, options)
end
@doc """
Starts the specified WorkSpaces.
You cannot start a WorkSpace unless it has a running mode of `AutoStop` and
a state of `STOPPED`.
"""
def start_workspaces(client, input, options \\ []) do
request(client, "StartWorkspaces", input, options)
end
@doc """
Stops the specified WorkSpaces.
You cannot stop a WorkSpace unless it has a running mode of `AutoStop` and
a state of `AVAILABLE`, `IMPAIRED`, `UNHEALTHY`, or `ERROR`.
"""
def stop_workspaces(client, input, options \\ []) do
request(client, "StopWorkspaces", input, options)
end
@doc """
Terminates the specified WorkSpaces.
<important> Terminating a WorkSpace is a permanent action and cannot be
undone. The user's data is destroyed. If you need to archive any user data,
contact AWS Support before terminating the WorkSpace.
</important> You can terminate a WorkSpace that is in any state except
`SUSPENDED`.
This operation is asynchronous and returns before the WorkSpaces have been
completely terminated. After a WorkSpace is terminated, the `TERMINATED`
state is returned only briefly before the WorkSpace directory metadata is
cleaned up, so this state is rarely returned. To confirm that a WorkSpace
is terminated, check for the WorkSpace ID by using [
DescribeWorkSpaces](https://docs.aws.amazon.com/workspaces/latest/api/API_DescribeWorkspaces.html).
If the WorkSpace ID isn't returned, then the WorkSpace has been
successfully terminated.
"""
def terminate_workspaces(client, input, options \\ []) do
request(client, "TerminateWorkspaces", input, options)
end
@doc """
Shares or unshares a connection alias with one account by specifying
whether that account has permission to associate the connection alias with
a directory. If the association permission is granted, the connection alias
is shared with that account. If the association permission is revoked, the
connection alias is unshared with the account. For more information, see [
Cross-Region Redirection for Amazon
WorkSpaces](https://docs.aws.amazon.com/workspaces/latest/adminguide/cross-region-redirection.html).
<note> <ul> <li> Before performing this operation, call [
DescribeConnectionAliases](https://docs.aws.amazon.com/workspaces/latest/api/API_DescribeConnectionAliases.html)
to make sure that the current state of the connection alias is `CREATED`.
</li> <li> To delete a connection alias that has been shared, the shared
account must first disassociate the connection alias from any directories
it has been associated with. Then you must unshare the connection alias
from the account it has been shared with. You can delete a connection alias
only after it is no longer shared with any accounts or associated with any
directories.
</li> </ul> </note>
"""
def update_connection_alias_permission(client, input, options \\ []) do
request(client, "UpdateConnectionAliasPermission", input, options)
end
@doc """
Replaces the current rules of the specified IP access control group with
the specified rules.
"""
def update_rules_of_ip_group(client, input, options \\ []) do
request(client, "UpdateRulesOfIpGroup", input, options)
end
@doc """
Shares or unshares an image with one account by specifying whether that
account has permission to copy the image. If the copy image permission is
granted, the image is shared with that account. If the copy image
permission is revoked, the image is unshared with the account. For more
information about sharing images, see [ Share or Unshare a Custom
WorkSpaces
Image](https://docs.aws.amazon.com/workspaces/latest/adminguide/share-custom-image.html).
<note> <ul> <li> To delete an image that has been shared, you must unshare
the image before you delete it.
</li> <li> Sharing Bring Your Own License (BYOL) images across AWS accounts
isn't supported at this time in the AWS GovCloud (US-West) Region. To share
BYOL images across accounts in the AWS GovCloud (US-West) Region, contact
AWS Support.
</li> </ul> </note>
"""
def update_workspace_image_permission(client, input, options \\ []) do
request(client, "UpdateWorkspaceImagePermission", input, options)
end
@spec request(AWS.Client.t(), binary(), map(), list()) ::
{:ok, map() | nil, map()}
| {:error, term()}
defp request(client, action, input, options) do
client = %{client | service: "workspaces"}
host = build_host("workspaces", client)
url = build_url(host, client)
headers = [
{"Host", host},
{"Content-Type", "application/x-amz-json-1.1"},
{"X-Amz-Target", "WorkspacesService.#{action}"}
]
payload = encode!(client, input)
headers = AWS.Request.sign_v4(client, "POST", url, headers, payload)
post(client, url, payload, headers, options)
end
defp post(client, url, payload, headers, options) do
case AWS.Client.request(client, :post, url, payload, headers, options) do
{:ok, %{status_code: 200, body: body} = response} ->
body = if body != "", do: decode!(client, body)
{:ok, body, response}
{:ok, response} ->
{:error, {:unexpected_response, response}}
error = {:error, _reason} -> error
end
end
defp build_host(_endpoint_prefix, %{region: "local", endpoint: endpoint}) do
endpoint
end
defp build_host(_endpoint_prefix, %{region: "local"}) do
"localhost"
end
defp build_host(endpoint_prefix, %{region: region, endpoint: endpoint}) do
"#{endpoint_prefix}.#{region}.#{endpoint}"
end
defp build_url(host, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}/"
end
defp encode!(client, payload) do
AWS.Client.encode!(client, payload, :json)
end
defp decode!(client, payload) do
AWS.Client.decode!(client, payload, :json)
end
end
|
lib/aws/generated/work_spaces.ex
| 0.883532
| 0.458106
|
work_spaces.ex
|
starcoder
|
defmodule State.Schedule do
@moduledoc "State for Schedules"
use State.Server,
indices: [:trip_id, :stop_id],
recordable: Model.Schedule
require Logger
alias Events.Gather
alias Model.Schedule
alias Parse.StopTimes
alias State.Trip
@fetch_stop_times {:fetch, "stop_times.txt"}
@subscriptions [@fetch_stop_times, {:new_state, Trip}]
@type filter_opts :: %{
optional(:routes) => [Model.Route.id()],
optional(:trips) => [Model.Trip.id()],
optional(:direction_id) => Model.Direction.id(),
optional(:stops) => [Model.Stop.id()],
optional(:route_type) => [Model.Route.route_type()],
optional(:stop_sequence) => stop_sequence,
optional(:date) => Date.t(),
optional(:min_time) => non_neg_integer,
optional(:max_time) => non_neg_integer
}
@typep convert_filters :: %{
optional(:routes) => [Model.Route.id()],
optional(:trips) => [Model.Trip.id()],
optional(:direction_id) => Model.Direction.id(),
optional(:stops) => [Model.Stop.id()],
optional(:route_type) => [Model.Route.route_type()],
optional(:date) => Date.t(),
optional(:min_time) => non_neg_integer,
optional(:max_time) => non_neg_integer
}
@typep min_time :: non_neg_integer
@typep max_time :: non_neg_integer | :infinity
@typep search :: %{
index: :stop_id | :trip_id,
matchers: [%{}]
}
@typep stop_sequence_item :: non_neg_integer | :first | :last
@typep stop_sequence :: [stop_sequence_item]
@typep stop_sequence_matcher :: State.Matchers.stop_sequence_matcher()
@doc """
Applies a filtered search on Schedules based on a map of filters values.
The allowed filterable keys are:
:routes
:direction_id
:trips
:stop_sequence
:stops
:min_time
:max_time
:date
At least one of the following filters must be applied for any schedules to
returned:
:routes
:trips
:stops
### Important Behavior Notes
When filtering on both `:routes` and `:trips`, `:routes` has priority for
filtering.
When filtering with `:direction_id`, either `:routes` or `:stops` must also
be applied.
When filtering with `:date`, either `:routes` or `:stops` must also be
applied.
"""
@spec filter_by(filter_opts) :: [Schedule.t()]
def filter_by(filters) do
converted = convert_filters(filters)
converted
|> build_filter_matchers()
|> do_filtered_search(converted)
|> do_post_search_filter(converted)
end
# Only for tests
@doc false
def reset_gather do
GenServer.call(__MODULE__, :reset_gather)
end
@schedule_relationships_with_schedules [nil, :cancelled, :no_data, :skipped]
@spec schedule_for(Model.Prediction.t()) :: Model.Schedule.t() | nil
def schedule_for(%Model.Prediction{schedule_relationship: relationship} = prediction)
when relationship in @schedule_relationships_with_schedules do
stop_ids =
case State.Stop.siblings(prediction.stop_id) do
[_ | _] = stops -> Enum.map(stops, & &1.id)
[] -> [prediction.stop_id]
end
%{
trips: [prediction.trip_id],
stops: stop_ids,
stop_sequence: [prediction.stop_sequence]
}
|> filter_by
|> List.first()
end
def schedule_for(%Model.Prediction{}) do
nil
end
@spec schedule_for_many([Model.Prediction.t()]) :: map
def schedule_for_many(predictions) do
for prediction <- predictions,
schedule = schedule_for(prediction),
schedule != nil,
into: %{} do
{{prediction.trip_id, prediction.stop_sequence}, schedule}
end
end
@spec build_stop_sequence_matchers(stop_sequence | nil) :: [stop_sequence_matcher]
def build_stop_sequence_matchers(nil), do: [%{}]
def build_stop_sequence_matchers([]), do: [%{}]
def build_stop_sequence_matchers(stop_sequence) do
Enum.map(stop_sequence, &State.Matchers.stop_sequence/1)
end
@impl Events.Server
def handle_event(event, value, _, state) do
state = %{state | data: Gather.update(state.data, event, value)}
{:noreply, state, :hibernate}
end
# Only for tests
@impl GenServer
def handle_call(:reset_gather, _from, state) do
state = %{state | data: Gather.new(@subscriptions, &do_gather/1)}
{:reply, :ok, state}
end
def handle_call(request, from, state), do: super(request, from, state)
@impl GenServer
def init(_) do
{:ok, state, timeout_or_hibernate} = super(nil)
Enum.each(@subscriptions, &subscribe/1)
data = Gather.new(@subscriptions, &do_gather/1)
overridden_state = %{state | data: data}
{:ok, overridden_state, timeout_or_hibernate}
end
# Converts routes and stops into workable ids
@spec convert_filters(filter_opts) :: convert_filters
defp convert_filters(%{routes: _} = filters) do
# Routes have priority for filtering on trip ids
# Modify :trips in the filters with the trip ids based on the route ids
trip_ids =
filters
|> Map.take([:routes, :direction_id, :date])
|> State.Trip.filter_by()
|> Enum.map(& &1.id)
# keep direction_id, as we can use that if there's also a stop filter
filters
|> Map.drop([:routes, :date])
|> Map.put(:trips, trip_ids)
|> convert_filters()
end
defp convert_filters(%{stops: stop_ids} = filters) do
stops = State.Stop.location_type_0_ids_by_parent_ids(stop_ids)
Map.put(filters, :stops, stops)
end
defp convert_filters(filters), do: filters
# Build search criteria
@spec build_filter_matchers(convert_filters) :: search | %{}
defp build_filter_matchers(%{stops: stops, trips: trips} = filters) do
stop_sequence_matchers = build_stop_sequence_matchers(filters[:stop_sequence])
all_trips = State.Trip.by_primary_ids(trips)
routes_from_trips = MapSet.new(all_trips, & &1.route_id)
filtered_routes =
stops
|> State.RoutesPatternsAtStop.routes_by_stops_and_direction(canonical?: false)
|> MapSet.new()
|> MapSet.intersection(routes_from_trips)
filtered_trips =
all_trips
|> Stream.filter(&MapSet.member?(filtered_routes, &1.route_id))
|> Enum.map(& &1.id)
matchers =
for stop_id <- stops,
trip_id <- filtered_trips,
stop_sequence_matcher <- stop_sequence_matchers do
stop_sequence_matcher
|> Map.put(:trip_id, trip_id)
|> Map.put(:stop_id, stop_id)
end
%{index: :trip_id, matchers: matchers}
end
defp build_filter_matchers(%{stops: stops} = filters) do
direction_matcher = State.Matchers.direction_id(filters[:direction_id])
stop_sequence_matchers = build_stop_sequence_matchers(filters[:stop_sequence])
matchers =
for stop_id <- stops,
stop_sequence_matcher <- stop_sequence_matchers do
stop_sequence_matcher
|> Map.put(:stop_id, stop_id)
|> Map.merge(direction_matcher)
end
%{index: :stop_id, matchers: matchers}
end
defp build_filter_matchers(%{trips: trips} = filters) do
stop_sequence_matchers = build_stop_sequence_matchers(filters[:stop_sequence])
matchers =
for trip_id <- trips,
stop_sequence_matcher <- stop_sequence_matchers do
Map.put(stop_sequence_matcher, :trip_id, trip_id)
end
%{index: :trip_id, matchers: matchers}
end
defp build_filter_matchers(_), do: %{}
defp do_gather(%{@fetch_stop_times => blob}) do
# we parse the blob in the same process as the schedule to prevent copying
# all the data to a new process. Sending the binary blob sends it by
# reference, so no copying!
_ = Logger.debug(fn -> "#{__MODULE__} Parsing and writing schedule data..." end)
blob
|> StopTimes.parse(&Trip.by_primary_id/1)
|> handle_new_state()
end
# Performs the search on the given index and matchers
@spec do_filtered_search(search, convert_filters) :: [Schedule.t()]
defp do_filtered_search(
%{index: index, matchers: [_ | _] = matchers},
filters
) do
schedules = select(matchers, index)
do_service_date_filter(schedules, filters)
end
defp do_filtered_search(_, _), do: []
# Filters schedules for a given service date
@spec do_service_date_filter([Schedule.t()], convert_filters) :: [Schedule.t()]
defp do_service_date_filter(schedules, %{date: %Date{} = date}) do
Enum.filter(schedules, &State.ServiceByDate.valid?(&1.service_id, date))
end
defp do_service_date_filter(schedules, _), do: schedules
# Apply any filters after a completed search. Currently, only a time window
# filter is supported
@spec do_post_search_filter([Schedule.t()], convert_filters) :: [Schedule.t()]
defp do_post_search_filter(schedules, %{min_time: min, max_time: max}) do
do_time_filter(schedules, min, max)
end
defp do_post_search_filter(schedules, %{min_time: min}) do
do_time_filter(schedules, min, :infinity)
end
defp do_post_search_filter(schedules, %{max_time: max}) do
do_time_filter(schedules, 0, max)
end
defp do_post_search_filter(schedules, _), do: schedules
# Filters schedules to see if they fit within a time window
@spec do_time_filter([Schedule.t()], min_time, max_time) :: [Schedule.t()]
defp do_time_filter(schedules, min_time, max_time) do
Enum.filter(schedules, &in_time_range?(&1, min_time, max_time))
end
@spec in_time_range?(Schedule.t(), min_time, max_time) :: boolean
defp in_time_range?(schedule, min_time, max_time) do
time = Schedule.time(schedule)
min_time <= time and time <= max_time
end
def filter_by_route_type(schedules, nil), do: schedules
def filter_by_route_type(schedules, []), do: schedules
def filter_by_route_type(schedules, route_types) do
route_ids =
route_types
|> State.Route.by_types()
|> MapSet.new(& &1.id)
Enum.filter(schedules, &(&1.route_id in route_ids))
end
end
|
apps/state/lib/state/schedule.ex
| 0.838151
| 0.409988
|
schedule.ex
|
starcoder
|
defmodule Mint.WebSocket.PerMessageDeflate do
@moduledoc """
A WebSocket extension which compresses each message before sending it across
the wire
This extension is defined in
[rfc7692](https://www.rfc-editor.org/rfc/rfc7692.html).
## Options
* `:zlib_level` - (default: `:best_compression`) the compression level to
use for the deflation zstream. See the `:zlib.deflateInit/6` documentation
on the `Level` argument.
* `:zlib_memory_level` - (default: `8`) how much memory to allow for use
during compression. See the `:zlib.deflateInit/6` documentation on the
`MemLevel` argument.
"""
require Mint.WebSocket.Frame, as: Frame
alias Mint.WebSocket.Extension
@typedoc false
@type t :: %__MODULE__{
inflate: :zlib.zstream(),
deflate: :zlib.zstream(),
inflate_takeover?: boolean(),
deflate_takeover?: boolean()
}
defstruct [:inflate, :deflate, :inflate_takeover?, :deflate_takeover?]
@behaviour Extension
@doc false
@impl Extension
def name, do: "permessage-deflate"
@doc false
@impl Extension
def init(%Extension{params: params, opts: opts} = this_extension, _other_extensions) do
inflate_window_bits = get_window_bits(params, "server_max_window_bits", 15)
deflate_window_bits = get_window_bits(params, "client_max_window_bits", 15)
inflate_zstream = :zlib.open()
deflate_zstream = :zlib.open()
:ok = :zlib.inflateInit(inflate_zstream, -inflate_window_bits)
:ok =
:zlib.deflateInit(
deflate_zstream,
Keyword.get(opts, :zlib_level, :best_compression),
:deflated,
-deflate_window_bits,
Keyword.get(opts, :zlib_memory_level, 8),
:default
)
state = %__MODULE__{
inflate: inflate_zstream,
deflate: deflate_zstream,
inflate_takeover?: get_takeover(params, "server_no_context_takeover", true),
deflate_takeover?: get_takeover(params, "client_no_context_takeover", true)
}
{:ok, put_in(this_extension.state, state)}
end
@doc false
@impl Extension
def decode(frame, state)
# rfc section 6: "[Per-Message Compression Extensions]s operate only on data
# messages"
for opcode <- [:text, :binary, :continuation] do
def decode(
Frame.unquote(opcode)(
reserved: <<1::size(1), _::bitstring>> = reserved_binary,
data: data
) = frame,
state
) do
<<reserved::size(3)>> = reserved_binary
# Append 4 octets of 0x00 0x00 0xff 0xff to the tail end of the
# payload of the message
data =
state.inflate
|> :zlib.inflate(<<data::binary, 0x00, 0x00, 0xFF, 0xFF>>)
|> IO.iodata_to_binary()
if state.inflate_takeover? == false do
:zlib.inflateReset(state.inflate)
end
frame =
Frame.unquote(opcode)(frame,
reserved: <<:erlang.bxor(reserved, 0b100)::size(3)>>,
data: data
)
{:ok, frame, state}
end
end
def decode(frame, state), do: {:ok, frame, state}
@doc false
@impl Extension
def encode(frame, state)
for opcode <- [:text, :binary, :continuation] do
def encode(
Frame.unquote(opcode)(
reserved: <<0::size(1), _::bitstring>> = reserved_binary,
data: data
) = frame,
state
) do
<<reserved::size(3)>> = reserved_binary
data = deflate_data(state.deflate, data)
if state.deflate_takeover? == false do
:zlib.deflateReset(state.deflate)
end
frame =
Frame.unquote(opcode)(frame,
reserved: <<:erlang.bor(reserved, 0b100)::size(3)>>,
data: data
)
{:ok, frame, state}
end
end
def encode(frame, state), do: {:ok, frame, state}
defp deflate_data(deflate_zstream, data) do
deflated =
deflate_zstream
|> :zlib.deflate(data, :sync)
|> IO.iodata_to_binary()
# "Remove 4 octets (that are 0x00 0x00 0xff 0xff) from the tail end"
data_size = byte_size(deflated) - 4
case deflated do
<<deflated::binary-size(data_size), 0x00, 0x00, 0xFF, 0xFF>> -> deflated
deflated -> deflated
end
end
defp get_window_bits(params, param_name, default) do
with {:ok, value} <- fetch_param(params, param_name),
{bits, _} <- Integer.parse(value) do
bits
else
_ -> default
end
end
defp get_takeover(params, param_name, default) when is_boolean(default) do
with {:ok, value} <- fetch_param(params, param_name),
{:ok, no_takeover?} <- parse_boolean(value) do
not no_takeover?
else
_ -> default
end
end
defp fetch_param(params, param_name) do
with {^param_name, value} <- List.keyfind(params, param_name, 0, :error) do
{:ok, value}
end
end
defp parse_boolean("true"), do: {:ok, true}
defp parse_boolean("false"), do: {:ok, false}
defp parse_boolean(_), do: :error
end
|
lib/mint/web_socket/per_message_deflate.ex
| 0.818193
| 0.443299
|
per_message_deflate.ex
|
starcoder
|
defmodule AshPhoenix.Form.Auto do
@moduledoc """
A (slightly) experimental tool to automatically generate available nested forms based on a resource and action.
To use this, specify `forms: [auto?: true]` when creating the form.
There are two things that this builds forms for:
1. Attributes/arguments who's type is an embedded resource.
2. Arguments that have a corresponding `change manage_relationship(..)` configured.
For more on relationships see the documentation for `Ash.Changeset.manage_relationship/4`.
When building forms, you can switch on the action type and/or resource of the form, in order to have different
fields depending on the form. For example, if you have a simple relationship called `:comments` with
`on_match: :update` and `on_no_match: :create`, there are two types of forms that can be in `inputs_for(form, :comments)`.
In which case you may have something like this:
```elixir
<%= for comment_form <- inputs_for(f, :comments) do %>
<%= hidden_inputs_for(comment_form) %>
<%= if comment_form.source.type == :create do %>
<%= text_input comment_form, :text %>
<%= text_input comment_form, :on_create_field %>
<% else %>
<%= text_input comment_form, :text %>
<%= text_input comment_form, :on_create_field %>
<% end %>
<button phx-click="remove_form" phx-value-path="<%= comment_form.name %>">Add Comment</button>
<button phx-click="add_form" phx-value-path="<%= comment_form.name %>">Add Comment</button>
<% end %>
```
This also applies to adding forms of different types manually. For instance, if you had a "search" field
to allow them to search for a record (e.g in a liveview), and you had an `on_lookup` read action, you could
render a search form for that read action, and once they've selected a record, you could render the fields
to update that record (in the case of `on_lookup: :relate_and_update` configurations).
## Special Considerations
### `on_lookup: :relate_and_update`
For `on_lookup: :relate_and_update` configurations, the "read" form for that relationship will use the appropriate read action.
However, you may also want to include the relevant fields for the update that would subsequently occur. To that end, a special
nested form called `:_update` is created, that uses an empty instance of that resource as the base of its changeset. This may require
some manual manipulation of that data before rendering the relevant form because it assumes all the default values. To solve for this,
if you are using liveview, you could actually look up the record using the input from the read action, and then use `AshPhoenix.Form.update_form/3`
to set that looked up record as the data of the `_update` form.
### Many to Many Relationshisp
In the case that a manage_change option points to a join relationship, that form is presented via a special nested form called
`_join`. So the first form in `inputs_for(form, :relationship)` would be for the destination, and then inside of that you could say
`inputs_for(nested_form, :_join)`. The parameters are merged together during submission.
"""
@dialyzer {:nowarn_function, rel_to_resource: 2}
@auto_opts [
relationship_fetcher: [
type: :any,
doc: """
A two argument function that receives the parent data, the relationship to fetch.
The default simply fetches the relationship value, and if it isn't loaded, it uses `[]` or `nil`.
"""
],
sparse_lists?: [
type: :boolean,
doc:
"Sets all list type forms to `sparse?: true` by default. Has no effect on forms derived for embedded resources.",
default: false
]
]
def auto(resource, action, opts \\ []) do
opts = Ash.OptionsHelpers.validate!(opts, @auto_opts)
related(resource, action, opts) ++ embedded(resource, action, opts)
end
def related(resource, action, auto_opts) do
passed_in_action = action
action =
if is_atom(action) do
Ash.Resource.Info.action(resource, action)
else
action
end
if is_nil(action) && is_atom(passed_in_action) do
raise "No such action :#{passed_in_action} for #{inspect(resource)}"
end
action.arguments
|> Enum.reject(& &1.private?)
|> Enum.filter(&(&1.type in [{:array, :map}, :map, Ash.Type.Map, {:array, Ash.Type.Map}]))
|> Enum.flat_map(fn arg ->
case find_manage_change(arg, action) do
nil ->
[]
manage_opts ->
[{arg, manage_opts}]
end
end)
|> Enum.map(fn {arg, manage_opts} ->
relationship = Ash.Resource.Info.relationship(resource, manage_opts[:relationship])
manage_opts = manage_opts[:opts]
defaults =
if manage_opts[:type] do
Ash.Changeset.manage_relationship_opts(manage_opts[:type])
else
[]
end
manage_opts =
Ash.Changeset.ManagedRelationshipHelpers.sanitize_opts(
relationship,
Keyword.merge(defaults, manage_opts)
)
type =
case arg.type do
{:array, _} -> :list
_ -> :single
end
opts = [
type: type,
forms: [],
sparse?: auto_opts[:sparse_lists?],
managed_relationship: {relationship.source, relationship.name},
must_load?: Ash.Changeset.ManagedRelationshipHelpers.must_load?(manage_opts),
updater: fn opts ->
opts =
opts
|> add_create_action(manage_opts, relationship, auto_opts)
|> add_read_action(manage_opts, relationship, auto_opts)
|> add_update_action(manage_opts, relationship, auto_opts)
|> add_destroy_action(manage_opts, relationship, auto_opts)
|> add_nested_forms(auto_opts)
if opts[:read_action] || opts[:update_action] || opts[:destroy_action] do
Keyword.put(
opts,
:data,
relationship_fetcher(relationship, auto_opts[:relationship_fetcher])
)
else
opts
end
end
]
{arg.name, opts}
end)
end
defp add_nested_forms(opts, auto_opts) do
Keyword.update!(opts, :forms, fn forms ->
forms =
if forms[:update_action] do
forms ++ set_for_type(auto(opts[:resource], opts[:update_action], auto_opts), :update)
else
forms
end
forms =
if forms[:create_action] do
forms ++ set_for_type(auto(opts[:resource], opts[:create_action], auto_opts), :create)
else
forms
end
forms =
if forms[:destroy_action] do
forms ++ set_for_type(auto(opts[:resource], opts[:destroy_action], auto_opts), :destroy)
else
forms
end
if forms[:read_action] do
forms ++ set_for_type(auto(opts[:resource], opts[:read_action], auto_opts), :read)
else
forms
end
end)
end
defp set_for_type(forms, type) do
Enum.map(forms, fn {key, value} ->
{key, Keyword.put(value, :for_type, type)}
end)
end
defp add_read_action(opts, manage_opts, relationship, auto_opts) do
manage_opts
|> Ash.Changeset.ManagedRelationshipHelpers.on_lookup_read_action(relationship)
|> case do
nil ->
opts
{source_dest_or_join, action_name} ->
resource = rel_to_resource(source_dest_or_join, relationship)
opts
|> Keyword.put(:read_resource, resource)
|> Keyword.put(:read_action, action_name)
|> Keyword.update!(
:forms,
fn forms ->
case Ash.Changeset.ManagedRelationshipHelpers.on_lookup_update_action(
manage_opts,
relationship
) do
nil ->
forms ++
auto(resource, action_name, auto_opts)
{source_dest_or_join, update_action} ->
resource = rel_to_resource(source_dest_or_join, relationship)
forms ++
auto(resource, action_name, auto_opts) ++
[
{:_update,
[
resource: resource,
managed_relationship: {relationship.source, relationship.name},
type: :single,
data: resource.__struct__(),
update_action: update_action
]}
]
{:join, update_action, _} ->
resource = relationship.through
forms ++
auto(resource, action_name, auto_opts) ++
[
{:_update,
[
resource: resource,
managed_relationship: {relationship.source, relationship.name},
type: :single,
data: resource.__struct__(),
update_action: update_action
]}
]
end
end
)
end
end
defp add_create_action(opts, manage_opts, relationship, auto_opts) do
manage_opts
|> Ash.Changeset.ManagedRelationshipHelpers.on_no_match_destination_actions(relationship)
|> List.wrap()
|> Enum.sort_by(&(elem(&1, 0) == :join))
|> case do
[] ->
opts
[{source_dest_or_join, action_name} | rest] ->
resource = rel_to_resource(source_dest_or_join, relationship)
opts
|> Keyword.put(:create_resource, resource)
|> Keyword.put(:create_action, action_name)
|> Keyword.update!(
:forms,
&(&1 ++
auto(resource, action_name, auto_opts))
)
|> add_join_form(relationship, rest)
end
end
defp add_update_action(opts, manage_opts, relationship, auto_opts) do
manage_opts
|> Ash.Changeset.ManagedRelationshipHelpers.on_match_destination_actions(relationship)
|> List.wrap()
|> Enum.sort_by(&(elem(&1, 0) == :join))
|> case do
[] ->
opts
[{source_dest_or_join, action_name} | rest] ->
resource = rel_to_resource(source_dest_or_join, relationship)
opts
|> Keyword.put(:update_resource, resource)
|> Keyword.put(:update_action, action_name)
|> Keyword.update!(
:forms,
&(&1 ++
auto(resource, action_name, auto_opts))
)
|> add_join_form(relationship, rest)
end
end
defp add_destroy_action(opts, manage_opts, relationship, auto_opts) do
manage_opts
|> Ash.Changeset.ManagedRelationshipHelpers.on_missing_destination_actions(relationship)
|> List.wrap()
|> Enum.sort_by(&(elem(&1, 0) == :join))
|> case do
[] ->
opts
[{source_dest_or_join, action_name} | rest] ->
resource = rel_to_resource(source_dest_or_join, relationship)
opts
|> Keyword.put(:destroy_resource, resource)
|> Keyword.put(:destroy_action, action_name)
|> Keyword.update!(
:forms,
&(&1 ++
auto(resource, action_name, auto_opts))
)
|> add_join_form(relationship, rest)
end
end
defp add_join_form(opts, _relationship, []), do: opts
defp add_join_form(opts, relationship, [{:join, action, _}]) do
action = Ash.Resource.Info.action(relationship.through, action)
case action.type do
:update ->
Keyword.update!(opts, :forms, fn forms ->
Keyword.put(forms, :_join,
resource: relationship.through,
managed_relationship: {relationship.source, relationship.name},
type: :single,
data: &get_join(&1, &2, relationship),
update_action: action.name
)
end)
:create ->
Keyword.update!(opts, :forms, fn forms ->
Keyword.put(forms, :_join,
resource: relationship.through,
managed_relationship: {relationship.source, relationship.name},
create_action: action.name
)
end)
:destroy ->
Keyword.update!(opts, :forms, fn forms ->
Keyword.put(forms, :_join,
resource: relationship.through,
managed_relationship: {relationship.source, relationship.name},
type: :single,
data: &get_join(&1, &2, relationship),
destroy_action: action.name,
merge?: true
)
end)
end
end
defp get_join(parent, prev_path, relationship) do
case Enum.find(prev_path, &(&1.__struct__ == relationship.source)) do
nil ->
nil
root ->
case Map.get(root, relationship.join_relationship) do
value when is_list(value) ->
Enum.find(value, fn join ->
Map.get(join, relationship.destination_field_on_join_table) ==
Map.get(parent, relationship.destination_field)
end)
_ ->
nil
end
end
end
defp relationship_fetcher(relationship, relationship_fetcher) do
fn parent ->
if relationship_fetcher do
relationship_fetcher.(parent, relationship)
else
case Map.get(parent, relationship.name) do
%Ash.NotLoaded{} ->
if relationship.cardinality == :many do
[]
end
value ->
value
end
end
end
end
defp rel_to_resource(source_dest_or_join, relationship) do
case source_dest_or_join do
:source ->
relationship.source
:destination ->
relationship.destination
:join ->
relationship.through
end
end
def embedded(resource, action, auto_opts) do
action =
if is_atom(action) do
Ash.Resource.Info.action(resource, action)
else
action
end
resource
|> accepted_attributes(action)
|> Enum.concat(action.arguments)
|> Enum.filter(&Ash.Type.embedded_type?(&1.type))
|> Enum.reject(&match?({:array, {:array, _}}, &1.type))
|> Enum.map(fn attr ->
type =
case attr.type do
{:array, _} ->
:list
_ ->
:single
end
embed = unwrap_type(attr.type)
data =
case type do
:list ->
fn parent ->
if parent do
Map.get(parent, attr.name) || []
else
[]
end
end
:single ->
fn parent ->
if parent do
Map.get(parent, attr.name)
end
end
end
create_action =
if attr.constraints[:create_action] do
Ash.Resource.Info.action(embed, attr.constraints[:create_action])
else
Ash.Resource.Info.primary_action(embed, :create)
end
update_action =
if attr.constraints[:update_action] do
Ash.Resource.Info.action(embed, attr.constraints[:update_action])
else
Ash.Resource.Info.primary_action(embed, :update)
end
{attr.name,
[
type: type,
resource: embed,
create_action: create_action.name,
update_action: update_action.name,
embed?: true,
data: data,
forms: [],
updater: fn opts ->
Keyword.update!(opts, :forms, fn forms ->
forms ++
embedded(embed, create_action, auto_opts) ++
embedded(embed, update_action, auto_opts)
end)
end
]}
end)
end
defp unwrap_type({:array, type}), do: unwrap_type(type)
defp unwrap_type(type), do: type
@doc false
def accepted_attributes(resource, action) do
resource
|> Ash.Resource.Info.public_attributes()
|> only_accepted(action)
end
defp only_accepted(_attributes, %{type: :read}), do: []
defp only_accepted(attributes, %{accept: nil, reject: reject}) do
Enum.filter(attributes, &(&1.name not in reject || []))
end
defp only_accepted(attributes, %{accept: accept}) do
Enum.filter(attributes, &(&1.name in accept))
end
defp find_manage_change(argument, action) do
Enum.find_value(Map.get(action, :changes, []), fn
%{change: {Ash.Resource.Change.ManageRelationship, opts}} ->
if opts[:argument] == argument.name do
opts
end
_ ->
nil
end)
end
end
|
lib/ash_phoenix/form/auto.ex
| 0.819063
| 0.791459
|
auto.ex
|
starcoder
|
defmodule Farmbot.Firmware.UartHandler.Framing do
@behaviour Nerves.UART.Framing
import Farmbot.Firmware.Gcode.Parser
use Farmbot.Logger
# credo:disable-for-this-file Credo.Check.Refactor.FunctionArity
@moduledoc """
Each message is one line. This framer appends and removes newline sequences
as part of the framing. Buffering is performed internally, so users can get
the complete messages under normal circumstances. Attention should be paid
to the following:
1. Lines must have a fixed max length so that a misbehaving sender can't
cause unbounded buffer expansion. When the max length is passed, a
`{:partial, data}` is reported. The application can decide what to do with
this.
2. The separation character varies depending on the target device. Some
devices require "\\r\\n" sequences, so be sure to specify this. Currently
only one or two character separators are supported.
3. It may be desirable to set a `:rx_framer_timeout` to prevent
characters received in error from collecting during idle times. When the
receive timer expires, `{:partial, data}` is reported.
4. Line separators must be ASCII characters (0-127) or be valid UTF-8
sequences. If the device only sends ASCII, high characters (128-255)
should work as well. [Note: please report if using extended
characters.]
"""
defmodule State do
@moduledoc false
defstruct max_length: nil,
separator: nil,
processed: <<>>,
in_process: <<>>,
log_input: false,
log_output: false
end
def init(args) do
max_length = Keyword.get(args, :max_length, 4096)
separator = Keyword.get(args, :separator, "\n")
log_input =
Farmbot.System.ConfigStorage.get_config_value(
:bool,
"settings",
"firmware_input_log"
)
log_output =
Farmbot.System.ConfigStorage.get_config_value(
:bool,
"settings",
"firmware_output_log"
)
state = %State{
max_length: max_length,
separator: separator,
log_input: log_input,
log_output: log_output
}
{:ok, state}
end
def add_framing(data, state) do
# maybe log output here
if state.log_output do
Logger.debug(3, data)
end
{:ok, data <> state.separator, state}
end
def remove_framing(data, state) do
{new_processed, new_in_process, lines} =
process_data(
state.separator,
byte_size(state.separator),
state.max_length,
state.processed,
state.in_process <> data,
[],
state.log_input
)
new_state = %{state | processed: new_processed, in_process: new_in_process}
rc = if buffer_empty?(new_state), do: :ok, else: :in_frame
{rc, lines, new_state}
end
def frame_timeout(state) do
partial_line = {:partial, state.processed <> state.in_process}
new_state = %{state | processed: <<>>, in_process: <<>>}
{:ok, [partial_line], new_state}
end
def flush(direction, state)
when direction == :receive or direction == :both do
%{state | processed: <<>>, in_process: <<>>}
end
def flush(_direction, state) do
state
end
def buffer_empty?(state) do
state.processed == <<>> and state.in_process == <<>>
end
# Handle not enough data case
defp process_data(
_separator,
sep_length,
_max_length,
processed,
to_process,
lines,
_log_input
)
when byte_size(to_process) < sep_length do
{processed, to_process, lines}
end
# Process data until separator or next char
defp process_data(
separator,
sep_length,
max_length,
processed,
to_process,
lines,
log_input
) do
case to_process do
# Handle separater
<<^separator::binary-size(sep_length), rest::binary>> ->
new_lines = lines ++ [do_parse_code(processed, log_input)]
process_data(
separator,
sep_length,
max_length,
<<>>,
rest,
new_lines,
log_input
)
# Handle line too long case
to_process when byte_size(processed) == max_length and to_process != <<>> ->
new_lines = lines ++ [{:partial, processed}]
process_data(
separator,
sep_length,
max_length,
<<>>,
to_process,
new_lines,
log_input
)
# Handle next char
<<next_char::binary-size(1), rest::binary>> ->
process_data(
separator,
sep_length,
max_length,
processed <> next_char,
rest,
lines,
log_input
)
end
end
defp do_parse_code(processed, log_input) do
if log_input do
Logger.debug(3, processed)
end
parse_code(processed)
rescue
er ->
Logger.error(1, "Firmware parser error: #{Exception.message(er)}")
{nil, :noop}
end
end
|
lib/farmbot/firmware/uart_handler/framing.ex
| 0.631481
| 0.440529
|
framing.ex
|
starcoder
|
use Croma
defmodule Antikythera.Aws.CloudfrontSignedUrl do
@moduledoc """
This module provides functions to generate [a signed URL for CloudFront](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-signed-urls.html)
"""
@doc """
Generates a signed URL to access a file via CloudFront.
## Parameters
- `resource_url` (string): CloudFront URL used for accessing the file, including query string parameters, if any.
- `lifetime_in_seconds` (positive integer): Expiration time is determined as the sum of the current time (in seconds) and this value.
- `key_pair_id` (string): ID for an active CloudFront key pair used for generating the signature.
- `private_key` (string): RSA private key for the key pair specified by `key_pair_id`.
- `url_encoded?` (boolean): Whether `resource_url` is encoded or not (optional, default is `false`).
## Return value
A generated signed URL (string).
"""
defun generate_signed_url(
resource_url :: v[String.t()],
lifetime_in_seconds :: v[pos_integer],
key_pair_id :: v[String.t()],
private_key :: v[String.t()],
url_encoded? :: v[boolean] \\ false
) :: String.t() do
encoded_url = if url_encoded?, do: resource_url, else: URI.encode(resource_url)
expires_in_seconds = System.system_time(:second) + lifetime_in_seconds
joiner = if URI.parse(encoded_url) |> Map.get(:query) |> is_nil(), do: "?", else: "&"
encoded_url <>
joiner <>
URI.encode_query(
make_query_params_for_canned_policy(
encoded_url,
expires_in_seconds,
key_pair_id,
private_key
)
)
end
@doc """
Generates a signed URL to access a file via CloudFront using a custom policy.
## Parameters
- `resource_url` (string): CloudFront URL used for accessing the file, including query string parameters, if any.
- `lifetime_in_seconds` (positive integer): Expiration time is determined as the sum of the current time (in seconds) and this value.
- `key_pair_id` (string): ID for an active CloudFront key pair used for generating the signature.
- `private_key` (string): RSA private key for the key pair specified by `key_pair_id`.
- `url_encoded?` (boolean): Whether `resource_url` is encoded or not (optional, default is `false`).
- `optional_policy` (Keyword): Optional policy conditions to be added to a custom policy (default is `[]`). Currently, supports only the following keywords:
- `:date_greater_than`(integer >= 0): Seconds in `AWS:EpochTime`. Specified to `DateGreaterThan`
- `:ip_address` (list of strings): Specified to `IpAddress`.
## Return value
A generated signed URL (string).
"""
defun generate_signed_url_using_custom_policy(
resource_url :: v[String.t()],
lifetime_in_seconds :: v[pos_integer],
key_pair_id :: v[String.t()],
private_key :: v[String.t()],
url_encoded? :: v[boolean] \\ false,
optional_policy :: Keyword.t() \\ []
) :: String.t() do
encoded_url = if url_encoded?, do: resource_url, else: URI.encode(resource_url)
expires_in_seconds = System.system_time(:second) + lifetime_in_seconds
joiner = if URI.parse(encoded_url) |> Map.get(:query) |> is_nil(), do: "?", else: "&"
encoded_url <>
joiner <>
URI.encode_query(
make_query_params_for_custom_policy(
encoded_url,
expires_in_seconds,
key_pair_id,
private_key,
optional_policy
)
)
end
defunpt make_query_params_for_canned_policy(
encoded_url :: v[String.t()],
expires_in_seconds :: v[pos_integer],
key_pair_id :: v[String.t()],
private_key :: v[String.t()]
) :: [{String.t(), String.t()}] do
policy_statement =
~s/{"Statement":[{"Resource":"#{encoded_url}","Condition":{"DateLessThan":{"AWS:EpochTime":#{
expires_in_seconds
}}}}]}/
signature = create_signature(policy_statement, private_key)
[
{"Expires", expires_in_seconds},
{"Signature", signature},
{"Key-Pair-Id", key_pair_id}
]
end
defunpt generate_ip_address_string(addresses :: v[[String.t()]]) :: v[String.t()] do
case addresses do
[address] ->
~s/"#{String.trim(address)}"/
addresses ->
"[" <>
Enum.map_join(addresses, ",", fn address -> ~s/"#{String.trim(address)}"/ end) <> "]"
end
end
defunpt generate_custom_policy(
encoded_url :: v[String.t()],
expires_in_seconds :: v[pos_integer],
optional_policy :: Keyword.t()
) :: v[String.t()] do
date_greater_than =
case Keyword.fetch(optional_policy, :date_greater_than) do
{:ok, t} when is_integer(t) and t >= 0 -> ~s/,"DateGreaterThan":{"AWS:EpochTime":#{t}}/
_ -> ""
end
ip_address =
case Keyword.fetch(optional_policy, :ip_address) do
{:ok, []} ->
""
{:ok, addresses} when is_list(addresses) ->
~s/,"IpAddress":{"AWS:SourceIp":#{generate_ip_address_string(addresses)}}/
# It is important to restrict a connection using IpAddress, so this raises a RuntimeError instead of just ignoring :ip_address.
{:ok, _} ->
raise "Type of :ip_address is not list"
_ ->
""
end
~s/{"Statement":[{"Resource":"#{encoded_url}","Condition":{"DateLessThan":{"AWS:EpochTime":#{
expires_in_seconds
}}#{date_greater_than}#{ip_address}}}]}/
end
defunpt make_query_params_for_custom_policy(
encoded_url :: v[String.t()],
expires_in_seconds :: v[pos_integer],
key_pair_id :: v[String.t()],
private_key :: v[String.t()],
optional_policy :: Keyword.t()
) :: [{String.t(), String.t()}] do
policy_statement = generate_custom_policy(encoded_url, expires_in_seconds, optional_policy)
signature = create_signature(policy_statement, private_key)
[
{"Policy", encode_for_aws(policy_statement)},
{"Signature", signature},
{"Key-Pair-Id", key_pair_id}
]
end
defunp encode_for_aws(string :: v[String.t()]) :: v[String.t()] do
string
|> Base.encode64()
# Replace characters that are invalid in a URL query string with characters that are valid.
# https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-creating-signed-url-canned-policy.html
|> String.replace("+", "-")
|> String.replace("=", "_")
|> String.replace("/", "~")
end
defunp create_signature(policy_statement :: v[String.t()], private_key :: v[String.t()]) ::
String.t() do
:public_key.sign(policy_statement, :sha, decode_rsa_key(private_key))
|> encode_for_aws()
end
defp decode_rsa_key(rsa_key) do
[pem_entry] = :public_key.pem_decode(rsa_key)
:public_key.pem_entry_decode(pem_entry)
end
end
|
lib/util/aws/cloudfront_signed_url.ex
| 0.856812
| 0.406391
|
cloudfront_signed_url.ex
|
starcoder
|
defmodule Akd.Dsl.Pipeline do
@moduledoc """
Defines an Akd Pipeline.
This modules provides a DSL to interact with Akd in a readable and simple
manner.
The module provides a set of macros for generating hooks that could either
be dispatched to a hook module (native or custom created) or a set of
operations.
## Pipelines and Hooks
Once a deployment is initiated, it is goes through several steps and operations
which perform tasks like building and publishing a release, while transforming
the deployment struct, eventually executing the deployment (and operations
in the order that they were added to the pipeline).
Each of the operations can be added in form of `Akd.Hook`s.
Once a pipeline is defined, a deployment/other pipeline can be piped-through
it.
## For Example:
```
defmodule DeployApp.Pipeline do
import Akd.Dsl.Pipeline
pipeline :build do
hook SomeModule
hook SomeOtherModule
end
pipeline :publish do
hook PublishModule
end
pipeline :deploy do
pipe_through :build
pipe_through :publish
hook SomeCleanupModule
end
end
```
Please refer to `Nomenclature` for more information about the terms used.
"""
@doc """
Defines a pipeline `name` with a given block.
Also defines a function with name `name` and arity 0.
This can be called only inside a module.
## Examples:
iex> defmodule SomeMod do
...> import Akd.Dsl.Pipeline
...> pipeline :temporary do
...> hook "this hook"
...> end
...> end
iex> SomeMod.temporary
[{"this hook", []}]
iex> defmodule SomeOtherMod do
...> import Akd.Dsl.Pipeline
...> pipeline :temporary do
...> hook "this hook"
...> end
...> pipeline :permanent do
...> pipe_through :temporary
...> hook "another hook", some_option: "some option"
...> end
...> end
iex> SomeOtherMod.permanent
[{"this hook", []}, {"another hook", [some_option: "some option"]}]
"""
defmacro pipeline(name, do: block) do
quote do
def unquote(name)() do
{:ok, var!(hooks, unquote(__MODULE__))} = start_pipe()
unquote(block)
get_pipe(var!(hooks, unquote(__MODULE__)))
end
end
end
@doc """
Adds a hook to a pipeline.
This can be called only inside a pipeline call.
## Examples:
```elixir
pipeline :pipe do
hook Akd.Init.Distillery, run_ensure: false
hook Akd.Build.Distillery
hook Akd.Publish.Distillery
end
```
"""
defmacro hook(hook, opts \\ []) do
quote do
put_pipe(
var!(hooks, unquote(__MODULE__)),
{unquote(hook), unquote(opts)}
)
end
end
@doc """
Adds a list of hooks to a pipeline. Those list of hooks are
defined in the pipeline this pipes through
This can be called only inside a pipeline call.
## Examples:
```elixir
pipeline :pipe do
hook Akd.Init.Distillery, run_ensure: false
hook Akd.Build.Distillery
hook Akd.Publish.Distillery
end
```elixir
pipeline :final do
pipe_through :pipe # This adds all the above three hooks to :final
end
```
"""
defmacro pipe_through(pipeline) do
quote do
__MODULE__
|> apply(unquote(pipeline), [])
|> Enum.each(&put_pipe(var!(hooks, unquote(__MODULE__)), &1))
end
end
@doc """
This starts an Agent that keeps track of a pipeline's definition and
hooks added to the pipeline.
"""
def start_pipe(hooks \\ []), do: Agent.start_link(fn -> hooks end)
@doc """
This stops the Agent that keeps track of a pipeline's definition and
hooks added to the pipeline.
"""
def stop_pipe(hooks), do: Agent.stop(hooks)
@doc """
This adds another hook to the Agent keeping track of a pipeline's definition
"""
def put_pipe(hooks, hook), do: Agent.update(hooks, &[hook | &1])
@doc """
This gets the hooks from the Agent keeping track of a pipeline's definition
"""
def get_pipe(hooks), do: hooks |> Agent.get(& &1) |> Enum.reverse()
end
|
lib/akd/dsl/pipeline.ex
| 0.87804
| 0.944074
|
pipeline.ex
|
starcoder
|
defmodule Bunch.Binary do
@moduledoc """
A bunch of helpers for manipulating binaries.
"""
use Bunch
@doc """
Chunks given binary into parts of given size.
Remaining part is cut off.
## Examples
iex> <<1, 2, 3, 4, 5, 6>> |> #{inspect(__MODULE__)}.chunk_every(2)
[<<1, 2>>, <<3, 4>>, <<5, 6>>]
iex> <<1, 2, 3, 4, 5, 6, 7>> |> #{inspect(__MODULE__)}.chunk_every(2)
[<<1, 2>>, <<3, 4>>, <<5, 6>>]
"""
@spec chunk_every(binary, pos_integer) :: [binary]
def chunk_every(binary, chunk_size) do
{result, _} = chunk_every_rem(binary, chunk_size)
result
end
@doc """
Chunks given binary into parts of given size.
Returns list of chunks and remainder.
## Examples
iex> <<1, 2, 3, 4, 5, 6>> |> #{inspect(__MODULE__)}.chunk_every_rem(2)
{[<<1, 2>>, <<3, 4>>, <<5, 6>>], <<>>}
iex> <<1, 2, 3, 4, 5, 6, 7>> |> #{inspect(__MODULE__)}.chunk_every_rem(2)
{[<<1, 2>>, <<3, 4>>, <<5, 6>>], <<7>>}
"""
@spec chunk_every_rem(binary, chunk_size :: pos_integer) :: {[binary], remainder :: binary}
def chunk_every_rem(binary, chunk_size) do
do_chunk_every_rem(binary, chunk_size)
end
defp do_chunk_every_rem(binary, chunk_size, acc \\ []) do
case binary do
<<chunk::binary-size(chunk_size)>> <> rest ->
do_chunk_every_rem(rest, chunk_size, [chunk | acc])
rest ->
{acc |> Enum.reverse(), rest}
end
end
@doc """
Cuts off the smallest possible chunk from the end of `binary`, so that the
size of returned binary is an integer multiple of `i`.
## Examples
iex> import #{inspect(__MODULE__)}
iex> take_int_part(<<1,2,3,4,5,6,7,8>>, 3)
<<1,2,3,4,5,6>>
iex> take_int_part(<<1,2,3,4,5,6,7,8>>, 4)
<<1,2,3,4,5,6,7,8>>
"""
@spec take_int_part(binary, pos_integer) :: binary
def take_int_part(binary, i) do
{b, _} = split_int_part(binary, i)
b
end
@doc """
Returns a 2-tuple, where the first element is the result of `take_int_part(binary, i)`,
and the second is the rest of `binary`.
## Examples
iex> import #{inspect(__MODULE__)}
iex> split_int_part(<<1,2,3,4,5,6,7,8>>, 3)
{<<1,2,3,4,5,6>>, <<7,8>>}
iex> split_int_part(<<1,2,3,4,5,6,7,8>>, 4)
{<<1,2,3,4,5,6,7,8>>, <<>>}
"""
@spec split_int_part(binary, pos_integer) :: {binary, binary}
def split_int_part(binary, i) do
len = Bunch.Math.max_multiple_lte(i, binary |> byte_size())
<<b::binary-size(len), r::binary>> = binary
{b, r}
end
end
|
lib/bunch/binary.ex
| 0.878835
| 0.448789
|
binary.ex
|
starcoder
|
defmodule Kaguya.Channel do
use GenServer
alias Kaguya.ChannelSupervisor, as: ChanSup
alias Kaguya.Util, as: Util
@moduledoc """
Channel GenServer, with a few utility functions for working with
channels. As a GenServer, it can be called in the following ways:
* {:send, message}, where message is the message to be sent to the channel
* {:set_user, nick_string}, where the nick string is a nick with the mode prefix(+, @, etc.)
* {:get_user, nick}, where nick is the nick of the user to be returned.
The Kaguya.Channel.User struct is returned
* {:del_user, nick}, where nick is the nick of the user to be deleted
"""
@max_buffer 10000
defmodule User do
@moduledoc """
Representation of a user in a channel.
"""
defstruct nick: "", mode: :normal
end
@doc """
Starts a channel worker with the given name
and options
"""
def start_link(name, opts \\ []) do
GenServer.start_link(__MODULE__, {name}, opts)
end
def init({name}) do
require Logger
Logger.log :debug, "Started channel #{name}!"
Util.joinChan(name)
:pg2.join(:channels, self())
:ets.insert(:channels, {name, self()})
users = :ets.new(:users, [:set, :protected])
{:ok, {name, users, []}}
end
def handle_call({:send, message}, _from, {name, _users, _buffer} = state) do
Kaguya.Util.sendPM(name, message)
{:reply, :ok, state}
end
def handle_call({:rename_user, {old_nick, new_nick}}, _from, {_name, users, _buffer} = state) do
case :ets.lookup(users, old_nick) do
[{^old_nick, user}] ->
new_user = %{user | nick: new_nick}
:ets.delete(users, old_nick)
:ets.insert(users, {new_nick, new_user})
[] -> :ok
end
{:reply, :ok, state}
end
def handle_call({:set_user, nick_mode}, _from, {_name, users, _buffer} = state) do
mode_sym = String.first(nick_mode)
mode =
case mode_sym do
"~" -> :op
"&" -> :op
"@" -> :op
"%" -> :hop
"+" -> :voice
_ -> :normal
end
nick =
if mode == :normal do
nick_mode
else
String.slice(nick_mode, 1, 1000)
end
user = %Kaguya.Channel.User{nick: nick, mode: mode}
:ets.insert(users, {nick, user})
{:reply, :ok, state}
end
def handle_call({:get_user, nick}, _from, {_name, users, _buffer} = state) do
case :ets.lookup(users, nick) do
[{^nick, user}] -> {:reply, user, state}
[] -> {:reply, nil, state}
end
end
def handle_call(:get_users, _from, {_name, users, _buffer} = state) do
chan_users =
:ets.foldr(fn {_nick, user}, acc ->
[user|acc]
end, [], users)
{:reply, chan_users, state}
end
def handle_call({:del_user, nick}, _from, {_name, users, _buffer} = state) do
:ets.delete(users, nick)
{:reply, :ok, state}
end
def handle_call({:log_message, msg}, _from, {name, users, buffer}) do
new_buffer =
if Enum.count(buffer) > @max_buffer do
[msg|buffer] |> Enum.drop(-1)
else
[msg|buffer]
end
{:reply, :ok, {name, users, new_buffer}}
end
def handle_call({:get_buffer, fun}, _from, {_name, _users, buffer} = state) do
{:reply, fun.(buffer), state}
end
def handle_call(:get_user_count, _from, {_name, users, _buffer} = state) do
count =
users
|> :ets.info(:size)
{:reply, count, state}
end
def handle_call(:part, _from, {name, _users, _buffer}) do
Util.partChan(name)
{:stop, :normal, :ok, nil}
end
def handle_call(:join, _from, {name, _users, _buffer}) do
Kaguya.Channel.init([name])
end
@doc """
Convnenience function to join the specified channel.
"""
def join(channel) do
case :ets.lookup(:channels, channel) do
[] -> {:ok, _pid} = Supervisor.start_child(ChanSup, [channel, []])
_ -> nil
end
end
@doc """
Convnenience function to part the specified channel.
"""
def part(channel) do
[{^channel, pid}] = :ets.lookup(:channels, channel)
:ets.delete(:channels, channel)
:ok = GenServer.call(pid, :part)
end
@doc """
Convenience function to send a nickstring to a channel.
"""
def set_user(chan, nick) do
[{^chan, pid}] = :ets.lookup(:channels, chan)
:ok = GenServer.call(pid, {:set_user, nick})
end
@doc """
Convenience function to get a user count from a channel.
"""
def get_user_count(chan) do
[{^chan, pid}] = :ets.lookup(:channels, chan)
GenServer.call(pid, :get_user_count)
end
@doc """
Convenience function to get all users from a channel.
"""
def get_users(chan) do
[{^chan, pid}] = :ets.lookup(:channels, chan)
GenServer.call(pid, :get_users)
end
@doc """
Convenience function to get information of a user in a channel.
"""
def get_user(chan, nick) do
[{^chan, pid}] = :ets.lookup(:channels, chan)
GenServer.call(pid, {:get_user, nick})
end
@doc """
Convenience function to remove a nick from a channel.
"""
def del_user(chan, nick) do
case :ets.lookup(:channels, chan) do
[{^chan, pid}] -> :ok = GenServer.call(pid, {:del_user, nick})
_ -> :ok
end
end
@doc """
Convenience function to perform a function on a channel's buffer
and get the result.
"""
def get_buffer(chan, fun) do
[{^chan, pid}] = :ets.lookup(:channels, chan)
GenServer.call(pid, {:get_buffer, fun})
end
end
|
lib/kaguya/channel.ex
| 0.721939
| 0.403302
|
channel.ex
|
starcoder
|
defmodule Sippet do
@moduledoc """
Holds the Sippet stack.
Network transport protocols should be registered during initialization:
def init(_) do
Sippet.register_transport(:udp, false)
...
end
Messages are dispatched to transports by sending the following message:
send(pid, {:send_message, message, host, port, transaction})
Whenever a message is received by a transport, the function
`Sippet.handle_transport_message` is called, which will validate and route
messages through the transaction layer or send directly to the core.
"""
use Supervisor
import Kernel, except: [send: 2]
alias Sippet.{Message, Transactions}
alias Sippet.Message.{RequestLine, StatusLine}
require Logger
@typedoc "A SIP message request"
@type request :: Message.request()
@typedoc "A SIP message response"
@type response :: Message.response()
@typedoc "An network error that occurred while sending a message"
@type reason :: term
@typedoc "A client transaction identifier"
@type client_key :: Transactions.Client.Key.t()
@typedoc "A server transaction identifier"
@type server_key :: Transactions.Server.Key.t()
@typedoc "Sippet identifier"
@type sippet :: atom
@doc """
Handles the sigil `~K`.
It returns a client or server transaction key depending on the number of
parameters passed.
## Examples
iex> import Sippet, only: [sigil_K: 2]
iex> Sippet.Transactions.Client.Key.new("<KEY>", :invite)
~K[z9hG4bK230f2.1|:invite]
iex> ~K[z9hG4bK230f2.1|INVITE]
~K[z9hG4bK230f2.1|:invite]
iex> Sippet.Transactions.Server.Key.new("<KEY>", :invite, {"client.biloxi.example.com", 5060})
~K[z9hG4bK74b21|:invite|client.biloxi.example.com:5060]
iex> ~K[z9hG4bK74b21|INVITE|client.biloxi.example.com:5060]
~K[z9hG4bK74b21|:invite|client.biloxi.example.com:5060]
"""
def sigil_K(string, _) do
case String.split(string, "|") do
[branch, method] ->
Transactions.Client.Key.new(branch, sigil_to_method(method))
[branch, method, sentby] ->
[host, port] = String.split(sentby, ":")
Transactions.Server.Key.new(
branch,
sigil_to_method(method),
{host, String.to_integer(port)}
)
end
end
defp sigil_to_method(method) do
case method do
":" <> rest -> Message.to_method(rest)
other -> Message.to_method(other)
end
end
@doc """
Sends a message (request or response) using transactions if possible.
Requests of method `:ack` is sent directly to the transport layer.
A `Sippet.Transactions.Client` is created for requests to handle client
retransmissions, when the transport presumes it, and match response
retransmissions, so the `Sippet.Core` doesn't get retransmissions other than
200 OK for `:invite` requests.
In case of success, returns `:ok`.
"""
@spec send(sippet, request | response) :: :ok | {:error, reason}
def send(sippet, message) when is_atom(sippet) do
unless Message.valid?(message) do
raise ArgumentError, "expected :message argument to be a valid SIP message"
end
do_send(sippet, message)
end
defp do_send(sippet, %Message{start_line: %RequestLine{method: :ack}} = request),
do: Sippet.Router.send_transport_message(sippet, request, nil)
defp do_send(sippet, %Message{start_line: %RequestLine{}} = outgoing_request),
do: Sippet.Router.send_transaction_request(sippet, outgoing_request)
defp do_send(sippet, %Message{start_line: %StatusLine{}} = outgoing_response),
do: Sippet.Router.send_transaction_response(sippet, outgoing_response)
@doc """
Verifies if the transport protocol used to send the given message is
reliable.
"""
@spec reliable?(sippet, Message.t()) :: boolean
def reliable?(sippet, %Message{headers: %{via: [via | _]}})
when is_atom(sippet) do
{_version, protocol, _host_and_port, _params} = via
case Registry.lookup(sippet, {:transport, protocol}) do
[{_, reliable}] ->
reliable
_ ->
raise ArgumentError, message: "protocol not registered"
end
end
@doc """
Registers a transport for a given protocol.
"""
@spec register_transport(sippet, atom, boolean) :: :ok | {:error, :already_registered}
def register_transport(sippet, protocol, reliable)
when is_atom(sippet) and is_atom(protocol) and is_boolean(reliable) do
case Registry.register(sippet, {:transport, protocol}, reliable) do
{:ok, _} ->
:ok
{:error, {:already_registered, _}} ->
{:error, :already_registered}
end
end
@doc """
Registers the stack core.
"""
@spec register_core(sippet, atom) :: :ok
def register_core(sippet, module)
when is_atom(sippet) and is_atom(module) do
Registry.put_meta(sippet, :core, module)
end
@doc """
Terminates a client or server transaction forcefully.
This function is not generally executed by entities; there is a single case
where it is fundamental, which is when a client transaction is in proceeding
state for a long time, and the transaction has to be finished forcibly, or it
will never finish by itself.
If a transaction with such a key does not exist, it will be silently ignored.
"""
@spec terminate(sippet, client_key | server_key) :: :ok
def terminate(sippet, key) do
case Registry.lookup(sippet, {:transaction, key}) do
[] ->
:ok
[{pid, _}] ->
# Send the response through the existing server key.
case key do
%Transactions.Client.Key{} ->
Transactions.Client.terminate(pid)
%Transactions.Server.Key{} ->
Transactions.Server.terminate(pid)
end
end
end
@doc false
def start_link(options) when is_list(options) do
name =
case Keyword.fetch(options, :name) do
{:ok, name} when is_atom(name) ->
name
{:ok, other} ->
raise ArgumentError, "expected :name to be an atom, got: #{inspect(other)}"
:error ->
raise ArgumentError, "expected :name option to be present"
end
Supervisor.start_link(__MODULE__, options, name: :"#{name}_sup")
end
def child_spec(options) do
%{
id: __MODULE__,
start: {__MODULE__, :start_link, [options]}
}
end
@impl true
def init(options) do
children = [
{Registry, [name: options[:name], keys: :unique, partitions: System.schedulers_online()]}
]
Supervisor.init(children, strategy: :one_for_one)
end
end
|
lib/sippet.ex
| 0.895711
| 0.431584
|
sippet.ex
|
starcoder
|
defmodule Multiverses.Phoenix.PubSub do
@moduledoc """
Implements the `Multiverses` pattern for `Phoenix.PubSub`.
Messages topics are sharded by postfixing the topic with a universe id.
Processes in any given universe are then only capable of subscribing to
messages sent within the same universe.
## Usage
```
use Multiverses, with: Phoenix.PubSub
```
and in that module use the `PubSub` alias as if you had the
`alias Phoenix.PubSub` directive.
To use with `Phoenix.Presence`, see:
[Using Multiverses with Phoenix Presence](phoenix-presence.html)
## Warning
This system should not be used in production to achieve sharding of
communications channels.
## Important
This does not shard across phoenix channels, as each channel will presumably
already exist in the context of its own test shard and have requisite
`:"$callers"` implemented by other functionality.
"""
use Multiverses.Clone,
module: Phoenix.PubSub,
except: [
broadcast: 3, broadcast: 4,
broadcast!: 3, broadcast!: 4,
broadcast_from: 4, broadcast_from: 5,
broadcast_from!: 4, broadcast_from!: 5,
direct_broadcast: 4, direct_broadcast: 5,
direct_broadcast!: 4, direct_broadcast!: 5,
local_broadcast: 3, local_broadcast: 4,
local_broadcast_from: 4, local_broadcast_from: 5,
subscribe: 2, subscribe: 3,
unsubscribe: 2,
]
require Multiverses
def universal(message) do
universe_slug = Multiverses.self()
|> :erlang.term_to_binary
|> Base.url_encode64
IO.chardata_to_string([message, "-", universe_slug])
end
def broadcast(pubsub, topic, message, dispatcher \\ Phoenix.PubSub) do
Phoenix.PubSub.broadcast(pubsub,
universal(topic),
message,
dispatcher)
end
def broadcast!(pubsub, topic, message, dispatcher \\ Phoenix.PubSub) do
Phoenix.PubSub.broadcast!(pubsub,
universal(topic),
message,
dispatcher)
end
def broadcast_from(pubsub, from, topic, message, dispatcher \\ Phoenix.PubSub) do
Phoenix.PubSub.broadcast_from(pubsub,
from,
universal(topic),
message,
dispatcher)
end
def broadcast_from!(pubsub, from, topic, message, dispatcher \\ Phoenix.PubSub) do
Phoenix.PubSub.broadcast_from!(pubsub,
from,
universal(topic),
message,
dispatcher)
end
def direct_broadcast(node_name, pubsub, topic, message, dispatcher \\ Phoenix.PubSub) do
Phoenix.PubSub.direct_broadcast(node_name,
pubsub,
universal(topic),
message,
dispatcher)
end
def direct_broadcast!(node_name, pubsub, topic, message, dispatcher \\ Phoenix.PubSub) do
Phoenix.PubSub.direct_broadcast!(node_name,
pubsub,
universal(topic),
message,
dispatcher)
end
def local_broadcast(pubsub, topic, message, dispatcher \\ Phoenix.PubSub) do
Phoenix.PubSub.local_broadcast(pubsub,
universal(topic),
message,
dispatcher)
end
def local_broadcast_from(pubsub, from, topic, message, dispatcher \\ Phoenix.PubSub) do
Phoenix.PubSub.local_broadcast_from(pubsub,
from,
universal(topic),
message,
dispatcher)
end
def subscribe(pubsub, topic, opts \\ []) do
Phoenix.PubSub.subscribe(pubsub,
universal(topic),
opts)
end
def unsubscribe(pubsub, topic) do
Phoenix.PubSub.subscribe(pubsub,
universal(topic))
end
end
|
lib/multiverses.phoenix.pubsub.ex
| 0.85408
| 0.833257
|
multiverses.phoenix.pubsub.ex
|
starcoder
|
defmodule NervesHub.Connection do
@moduledoc """
Agent used to keep the simple state of the devices connection
to [nerves-hub.org](https://www.nerves-hub.org).
The state is a tuple where the first element is an atom of `:connected` or
`:disconnected` and the second element is the value of `System.monotonic_time/1`
at the time of setting the new state.
This agent is started as a child when using `NervesHub.Supervisor`. However,
if you are not using the supervisor (i.e. HTTP requests only), you must start
this agent separately:
```elixir
def init(_) do
children =
[
NervesHub.Connection,
MyApp.OtherChild
]
Supervisor.init(children, strategy: :one_for_one)
end
```
In practice, this state is set anytime the device connection to
[nerves-hub.org](https://www.nerves-hub.org) channel changes.
Likewise, it is set after a HTTP request fails or succeeds. This makes it
useful when you want to consider the connection to
[nerves-hub.org](https://www.nerves-hub.org) as part of the overall health
of the device and perform explicit actions based on the result, such as using
the Erlang [:heart](http://erlang.org/doc/man/heart.html) module to force a
reboot if the callback check fails.
```
# Set a callback for heart to check every 5 seconds. If the function returns anything other than
# `:ok`, it will cause reboot.
:heart.set_callback(NervesHub.Connection, :check!)
```
Or, you can use the check as part of a separate function with other health checks as well
```
defmodule MyApp.Checker do
def health_check do
with :ok <- NervesHub.Connection.check,
:ok <- MyApp.another_check,
:ok <- MyApp.yet_another_check,
do
:ok
else
err -> err
end
end
end
# Somewhere else in MyApp
:heart.set_callback(MyApp.Checker, :health_check)
```
"""
use Agent
@spec start_link(any()) :: {:error, any()} | {:ok, pid()}
def start_link(_) do
# start in a disconnected state
Agent.start_link(fn -> {:disconnected, current_time()} end, name: __MODULE__)
end
@doc """
A simple check to see if the device is considered ok.
This will still return `:ok` if the device is in a disconnected state,
but within the `:connection_timeout` timeframe to allow for
intermittent connection failures that are recoverable. Once the
disconnection has exceeded the timeout, this check will be consided
unhealthy.
The default connection timeout is 15 minutes (900 seconds), but is configurable:
```
# 60 second timeout
config :nerves_hub, connection_timeout: 60
```
"""
@spec check() :: :ok | {:error, {:disconnected_too_long, integer()}} | {:error, :no_agent}
def check() do
timeout = Application.get_env(:nerves_hub, :connection_timeout, 900)
now = current_time()
Agent.get(__MODULE__, & &1)
|> case do
{:connected, _} -> :ok
{:disconnected, time} when now - time <= timeout -> :ok
{:disconnected, time} -> {:error, {:disconnected_too_long, time}}
end
end
@doc """
Same as `check/0`, but raises `RuntimeError` if the check fails
"""
@spec check!() :: :ok
def check!() do
unless check() == :ok do
raise "too much time has passed since a successful connection to NervesHub"
end
:ok
end
@doc """
Sets the state to `{:connected, System.monotonic_time(:seconds)}`
"""
@spec connected() :: :ok | {:error, :no_agent}
def connected() do
fun = fn _ -> {:connected, current_time()} end
apply_agent_fun(:update, fun)
end
@doc """
Sets the state to `{:disconnected, System.monotonic_time(:seconds)}`
"""
@spec disconnected() :: :ok | {:error, :no_agent}
def disconnected() do
# If we are already in a disconnected state, then don't
# overwrite the existing value so we can measure from
# the first point of disconnect
fun = fn state ->
case state do
{:disconnected, _time} = state -> state
_ -> {:disconnected, current_time()}
end
end
apply_agent_fun(:update, fun)
end
@doc """
Reads the state directly without modification.
"""
@spec read() :: {:connected, integer()} | {:disconnected, integer()} | {:error, :no_agent}
def read(), do: apply_agent_fun(:get, & &1)
defp current_time(), do: System.monotonic_time(:second)
defp apply_agent_fun(fun_name, fun) do
case Process.whereis(__MODULE__) do
agent when is_pid(agent) ->
apply(Agent, fun_name, [agent, fun])
_ ->
{:error, :no_agent}
end
end
end
|
lib/nerves_hub/connection.ex
| 0.871338
| 0.944995
|
connection.ex
|
starcoder
|
defmodule Rummage.Ecto.Services.BuildSearchQuery do
@moduledoc """
`Rummage.Ecto.Services.BuildSearchQuery` is a service module which serves the
default search hook, `Rummage.Ecto.Hooks.Search` that comes shipped with `Rummage.Ecto`.
Has a `Module Attribute` called `search_types`:
```elixir
@search_types ~w(like ilike eq gt lt gteq lteq is_nil)
```
`@search_types` is a collection of all the 8 valid `search_types` that come shipped with
`Rummage.Ecto`'s default search hook. The types are:
* `like`: Searches for a `term` in a given `field` of a `queryable`.
* `ilike`: Searches for a `term` in a given `field` of a `queryable`, in a case insensitive fashion.
* `eq`: Searches for a `term` to be equal to a given `field` of a `queryable`.
* `gt`: Searches for a `term` to be greater than to a given `field` of a `queryable`.
* `lt`: Searches for a `term` to be less than to a given `field` of a `queryable`.
* `gteq`: Searches for a `term` to be greater than or equal to to a given `field` of a `queryable`.
* `lteq`: Searches for a `term` to be less than or equal to a given `field` of a `queryable`.
* `is_nil`: Searches for a `term` to be nil or not nil to a given `field` of a `queryable`.
* `between`: Searches for a `term` to be in range `field_1` and `field_2` of a `queryable`.
Feel free to use this module on a custom search hook that you write.
"""
import Ecto.Query
@search_types ~w(like ilike eq gt lt gteq lteq is_nil in nin between)
@doc """
Builds a searched `queryable` on top of the given `queryable` using `field`, `search_type`
and `search_term`.
## Examples
When `field`, `search_type` and `queryable` are passed with `search_type` of `like`:
iex> alias Rummage.Ecto.Services.BuildSearchQuery
iex> import Ecto.Query
iex> queryable = from u in "parents"
#Ecto.Query<from p in "parents">
iex> BuildSearchQuery.run(queryable, :field_1, "like", "field_!")
#Ecto.Query<from p in "parents", where: like(p.field_1, ^"field_!")>
When `field`, `search_type` and `queryable` are passed with `search_type` of `ilike`:
iex> alias Rummage.Ecto.Services.BuildSearchQuery
iex> import Ecto.Query
iex> queryable = from u in "parents"
#Ecto.Query<from p in "parents">
iex> BuildSearchQuery.run(queryable, :field_1, "ilike", "field_!")
#Ecto.Query<from p in "parents", where: ilike(p.field_1, ^"field_!")>
When `field`, `search_type` and `queryable` are passed with `search_type` of `eq`:
iex> alias Rummage.Ecto.Services.BuildSearchQuery
iex> import Ecto.Query
iex> queryable = from u in "parents"
#Ecto.Query<from p in "parents">
iex> BuildSearchQuery.run(queryable, :field_1, "eq", "field_!")
#Ecto.Query<from p in "parents", where: p.field_1 == ^"field_!">
When `field`, `search_type` and `queryable` are passed with `search_type` of `gt`:
iex> alias Rummage.Ecto.Services.BuildSearchQuery
iex> import Ecto.Query
iex> queryable = from u in "parents"
#Ecto.Query<from p in "parents">
iex> BuildSearchQuery.run(queryable, :field_1, "gt", "field_!")
#Ecto.Query<from p in "parents", where: p.field_1 > ^"field_!">
When `field`, `search_type` and `queryable` are passed with `search_type` of `lt`:
iex> alias Rummage.Ecto.Services.BuildSearchQuery
iex> import Ecto.Query
iex> queryable = from u in "parents"
#Ecto.Query<from p in "parents">
iex> BuildSearchQuery.run(queryable, :field_1, "lt", "field_!")
#Ecto.Query<from p in "parents", where: p.field_1 < ^"field_!">
When `field`, `search_type` and `queryable` are passed with `search_type` of `gteq`:
iex> alias Rummage.Ecto.Services.BuildSearchQuery
iex> import Ecto.Query
iex> queryable = from u in "parents"
#Ecto.Query<from p in "parents">
iex> BuildSearchQuery.run(queryable, :field_1, "gteq", "field_!")
#Ecto.Query<from p in "parents", where: p.field_1 >= ^"field_!">
When `field`, `search_type` and `queryable` are passed with `search_type` of `lteq`:
iex> alias Rummage.Ecto.Services.BuildSearchQuery
iex> import Ecto.Query
iex> queryable = from u in "parents"
#Ecto.Query<from p in "parents">
iex> BuildSearchQuery.run(queryable, :field_1, "lteq", "field_!")
#Ecto.Query<from p in "parents", where: p.field_1 <= ^"field_!">
When `field`, `search_type` and `queryable` are passed with `search_type` of `is_nil`:
iex> alias Rummage.Ecto.Services.BuildSearchQuery
iex> import Ecto.Query
iex> queryable = from u in "parents"
#Ecto.Query<from p in "parents">
iex> BuildSearchQuery.run(queryable, :field_1, "is_nil", "false")
#Ecto.Query<from p in "parents", where: not is_nil(p.field_1)>
When `field`, `search_type` and `queryable` are passed with `search_type` of `in`:
iex> alias Rummage.Ecto.Services.BuildSearchQuery
iex> import Ecto.Query
iex> queryable = from u in "parents"
#Ecto.Query<from p in "parents">
iex> BuildSearchQuery.run(queryable, :field_1, "in", ["1", "2"])
#Ecto.Query<from p in "parents", where: p.field_1 in ^["1", "2"]>
When `field`, `search_type` and `queryable` are passed with `search_type` of `nin`:
iex> alias Rummage.Ecto.Services.BuildSearchQuery
iex> import Ecto.Query
iex> queryable = from u in "parents"
#Ecto.Query<from p in "parents">
iex> BuildSearchQuery.run(queryable, :field_1, "nin", ["1", "2"])
#Ecto.Query<from p in "parents", where: p.field_1 not in ^["1", "2"]>
When `field`, `search_type` and `queryable` are passed with `search_type` of `nin`:
iex> alias Rummage.Ecto.Services.BuildSearchQuery
iex> import Ecto.Query
iex> queryable = from u in "parents"
#Ecto.Query<from p in "parents">
iex> BuildSearchQuery.run(queryable, :field_1, "between", ["1", "2"])
#Ecto.Query<from p in "parents", where: p.field_1 >= ^"1", where: p.field_1 <= ^"2">
When `field`, `search_type` and `queryable` are passed with an invalid `search_type`:
iex> alias Rummage.Ecto.Services.BuildSearchQuery
iex> import Ecto.Query
iex> queryable = from u in "parents"
#Ecto.Query<from p in "parents">
iex> BuildSearchQuery.run(queryable, :field_1, "pizza", "field_!")
#Ecto.Query<from p in "parents">
"""
@spec run(Ecto.Query.t(), atom, String.t(), term) :: {Ecto.Query.t()}
def run(queryable, field, search_type, search_term) do
case Enum.member?(@search_types, search_type) do
true ->
apply(__MODULE__, String.to_atom("handle_" <> search_type), [
queryable,
field,
search_term
])
_ ->
queryable
end
end
@doc """
Builds a searched `queryable` on top of the given `queryable` using `field` and `search_type`
when the `search_term` is `like`.
## Examples
iex> alias Rummage.Ecto.Services.BuildSearchQuery
iex> import Ecto.Query
iex> queryable = from u in "parents"
#Ecto.Query<from p in "parents">
iex> BuildSearchQuery.handle_like(queryable, :field_1, "field_!")
#Ecto.Query<from p in "parents", where: like(p.field_1, ^"field_!")>
"""
@spec handle_like(Ecto.Query.t(), atom, term) :: {Ecto.Query.t()}
def handle_like(queryable, field, search_term) do
queryable
|> where([..., b], like(field(b, ^field), ^search_term))
end
@doc """
Builds a searched `queryable` on top of the given `queryable` using `field` and `search_type`
when the `search_term` is `ilike`.
## Examples
iex> alias Rummage.Ecto.Services.BuildSearchQuery
iex> import Ecto.Query
iex> queryable = from u in "parents"
#Ecto.Query<from p in "parents">
iex> BuildSearchQuery.handle_ilike(queryable, :field_1, "field_!")
#Ecto.Query<from p in "parents", where: ilike(p.field_1, ^"field_!")>
"""
@spec handle_ilike(Ecto.Query.t(), atom, term) :: {Ecto.Query.t()}
def handle_ilike(queryable, field, search_term) do
queryable
|> where([..., b], ilike(field(b, ^field), ^search_term))
end
@doc """
Builds a searched `queryable` on top of the given `queryable` using `field` and `search_type`
when the `search_term` is `eq`.
## Examples
iex> alias Rummage.Ecto.Services.BuildSearchQuery
iex> import Ecto.Query
iex> queryable = from u in "parents"
#Ecto.Query<from p in "parents">
iex> BuildSearchQuery.handle_eq(queryable, :field_1, "field_!")
#Ecto.Query<from p in "parents", where: p.field_1 == ^"field_!">
"""
@spec handle_eq(Ecto.Query.t(), atom, term) :: {Ecto.Query.t()}
def handle_eq(queryable, field, search_term) do
queryable
|> where([..., b], field(b, ^field) == ^search_term)
end
@doc """
Builds a searched `queryable` on top of the given `queryable` using `field` and `search_type`
when the `search_term` is `gt`.
## Examples
iex> alias Rummage.Ecto.Services.BuildSearchQuery
iex> import Ecto.Query
iex> queryable = from u in "parents"
#Ecto.Query<from p in "parents">
iex> BuildSearchQuery.handle_gt(queryable, :field_1, "field_!")
#Ecto.Query<from p in "parents", where: p.field_1 > ^"field_!">
"""
@spec handle_gt(Ecto.Query.t(), atom, term) :: {Ecto.Query.t()}
def handle_gt(queryable, field, search_term) do
queryable
|> where([..., b], field(b, ^field) > ^search_term)
end
@doc """
Builds a searched `queryable` on top of the given `queryable` using `field` and `search_type`
when the `search_term` is `lt`.
## Examples
iex> alias Rummage.Ecto.Services.BuildSearchQuery
iex> import Ecto.Query
iex> queryable = from u in "parents"
#Ecto.Query<from p in "parents">
iex> BuildSearchQuery.handle_lt(queryable, :field_1, "field_!")
#Ecto.Query<from p in "parents", where: p.field_1 < ^"field_!">
"""
@spec handle_lt(Ecto.Query.t(), atom, term) :: {Ecto.Query.t()}
def handle_lt(queryable, field, search_term) do
queryable
|> where([..., b], field(b, ^field) < ^search_term)
end
@doc """
Builds a searched `queryable` on top of the given `queryable` using `field` and `search_type`
when the `search_term` is `gteq`.
## Examples
iex> alias Rummage.Ecto.Services.BuildSearchQuery
iex> import Ecto.Query
iex> queryable = from u in "parents"
#Ecto.Query<from p in "parents">
iex> BuildSearchQuery.handle_gteq(queryable, :field_1, "field_!")
#Ecto.Query<from p in "parents", where: p.field_1 >= ^"field_!">
"""
@spec handle_gteq(Ecto.Query.t(), atom, term) :: {Ecto.Query.t()}
def handle_gteq(queryable, field, search_term) do
queryable
|> where([..., b], field(b, ^field) >= ^search_term)
end
@doc """
Builds a searched `queryable` on top of the given `queryable` using `field` and `search_type`
when the `search_term` is `lteq`.
## Examples
iex> alias Rummage.Ecto.Services.BuildSearchQuery
iex> import Ecto.Query
iex> queryable = from u in "parents"
#Ecto.Query<from p in "parents">
iex> BuildSearchQuery.handle_lteq(queryable, :field_1, "field_!")
#Ecto.Query<from p in "parents", where: p.field_1 <= ^"field_!">
"""
@spec handle_lteq(Ecto.Query.t(), atom, term) :: {Ecto.Query.t()}
def handle_lteq(queryable, field, search_term) do
queryable
|> where([..., b], field(b, ^field) <= ^search_term)
end
@doc """
Builds a searched `queryable` on top of the given `queryable` using `field` and `search_type`
when the `search_term` is `is_nil`.
## Examples
iex> alias Rummage.Ecto.Services.BuildSearchQuery
iex> import Ecto.Query
iex> queryable = from u in "parents"
#Ecto.Query<from p in "parents">
iex> BuildSearchQuery.handle_is_nil(queryable, :field_1, "false")
#Ecto.Query<from p in "parents", where: not is_nil(p.field_1)>
"""
@spec handle_is_nil(Ecto.Query.t(), atom, term) :: {Ecto.Query.t()}
def handle_is_nil(queryable, field, "false") do
queryable
|> where([..., b], not is_nil(field(b, ^field)))
end
def handle_is_nil(queryable, field, _) do
queryable
|> where([..., b], is_nil(field(b, ^field)))
end
@doc """
Builds a searched `queryable` on top of the given `queryable` using `field` and `search_type`
when the `search_term` is `in`.
## Examples
iex> alias Rummage.Ecto.Services.BuildSearchQuery
iex> import Ecto.Query
iex> queryable = from u in "parents"
#Ecto.Query<from p in "parents">
iex> BuildSearchQuery.handle_in(queryable, :field_1, ["1", "2"])
#Ecto.Query<from p in "parents", where: p.field_1 in ^["1", "2"]>
"""
@spec handle_in(Ecto.Query.t(), atom, term) :: {Ecto.Query.t()}
def handle_in(queryable, field, search_term) do
queryable
|> where([..., b], field(b, ^field) in ^search_term)
end
@doc """
Builds a searched `queryable` on top of the given `queryable` using `field` and `search_type`
when the `search_term` is `nin`.
## Examples
iex> alias Rummage.Ecto.Services.BuildSearchQuery
iex> import Ecto.Query
iex> queryable = from u in "parents"
#Ecto.Query<from p in "parents">
iex> BuildSearchQuery.handle_nin(queryable, :field_1, ["1", "2"])
#Ecto.Query<from p in "parents", where: p.field_1 not in ^["1", "2"]>
"""
@spec handle_nin(Ecto.Query.t(), atom, term) :: {Ecto.Query.t()}
def handle_nin(queryable, field, search_term) do
queryable
|> where([..., b], field(b, ^field) not in ^search_term)
end
@doc """
Builds a searched `queryable` on top of the given `queryable` using `field` and `search_type`
when the `search_term` is `between`.
## Examples
iex> alias Rummage.Ecto.Services.BuildSearchQuery
iex> import Ecto.Query
iex> queryable = from u in "parents"
#Ecto.Query<from p in "parents">
iex> BuildSearchQuery.handle_between(queryable, :field_1, ["1", "2"])
#Ecto.Query<from p in "parents", where: p.field_1 >= ^"1", where: p.field_1 <= ^"2">
"""
@spec handle_between(Ecto.Query.t(), atom, term) :: {Ecto.Query.t()}
def handle_between(queryable, field, search_term) do
[first, last] = search_term
queryable
|> where([..., b], field(b, ^field) >= ^first)
|> where([..., b], field(b, ^field) <= ^last)
end
end
|
lib/rummage_ecto/services/build_search_query.ex
| 0.802013
| 0.884888
|
build_search_query.ex
|
starcoder
|
defmodule AdventOfCode2019.TheNBodyProblem do
@moduledoc """
Day 12 — https://adventofcode.com/2019/day/12
"""
@spec part1(Enumerable.t(), integer) :: integer
def part1(in_stream, steps \\ 1000) do
in_stream
|> Stream.map(&read_moons/1)
|> Enum.map(&start_moon/1)
|> connect_moons([])
|> Stream.iterate(&simulate/1)
|> Stream.drop(steps)
|> Enum.take(1)
|> energy()
end
@spec part2(Enumerable.t()) :: integer
def part2(in_stream) do
in_stream
|> Stream.map(&read_moons/1)
|> Enum.map(&start_moon/1)
|> connect_moons([])
|> find_steps()
|> Enum.reduce(&lcm/2)
end
@spec read_moons(Enumerable.t()) :: tuple
defp read_moons(moon) do
String.trim(moon)
|> String.replace_suffix(">", "")
|> String.split(",")
|> Enum.map(&extract_position/1)
|> List.to_tuple()
end
@spec extract_position(String.t()) :: integer
defp extract_position(position) do
String.split(position, "=")
|> Enum.at(1)
|> String.to_integer()
end
@spec start_moon(tuple) :: pid
defp start_moon(position) do
{:ok, moon} = GenServer.start_link(AdventOfCode2019.Moon, position)
moon
end
@spec connect_moons(Enumerable.t(), Enumerable.t()) :: Enumerable.t()
defp connect_moons([], seen), do: seen
defp connect_moons([moon | tail], seen) do
GenServer.call(moon, {:moons, seen ++ tail})
connect_moons(tail, [moon | seen])
end
@spec simulate(Enumerable.t()) :: Enumerable.t()
defp simulate(moons) do
Enum.map(moons, &GenServer.call(&1, :gravitate))
|> Enum.map(&GenServer.call(&1, :move))
end
@spec energy(Enumerable.t()) :: integer
defp energy([moons]) do
Stream.map(moons, &GenServer.call(&1, :energy))
|> Enum.sum()
end
@spec find_steps(Enumerable.t()) :: Enumerable.t()
defp find_steps(moons) do
find_steps([{false, 0}, {false, 0}, {false, 0}], moons)
end
defp find_steps([{true, x_steps}, {true, y_steps}, {true, z_steps}], _moons) do
[x_steps + 1, y_steps + 1, z_steps + 1]
end
defp find_steps(steps, moons) do
simulate(moons)
|> Stream.map(&GenServer.call(&1, :at_ini))
|> Enum.reduce(fn [x, y, z], [ax, ay, az] -> [x and ax, y and ay, z and az] end)
|> Stream.zip(steps)
|> Enum.map(&incr_steps/1)
|> find_steps(moons)
end
@spec incr_steps(tuple) :: tuple
defp incr_steps({_, {true, steps}}), do: {true, steps}
defp incr_steps({at_ini, {_, steps}}), do: {at_ini, steps + 1}
@spec lcm(integer, integer) :: integer
def lcm(0, 0), do: 0
def lcm(a, b), do: div(a * b, gcd(a, b))
@spec gcd(integer, integer) :: integer
def gcd(a, 0), do: a
def gcd(a, b), do: gcd(b, rem(a, b))
end
defmodule AdventOfCode2019.Moon do
@moduledoc """
Day 12 — Moon Generic Server — https://adventofcode.com/2019/day/12
"""
use GenServer
@impl true
def init({x, y, z}) do
{:ok, {[x, y, z], [0, 0, 0], [x, y, z], [0, 0, 0], []}}
end
@impl true
def handle_call({:moons, moons}, _from, {position, velocity, ini_p, ini_v, _moons}) do
{:reply, self(), {position, velocity, ini_p, ini_v, moons}}
end
@impl true
def handle_call(:gravitate, _from, {position, velocity, ini_p, ini_v, moons}) do
{:reply, self(), {position, gravitate(position, velocity, moons), ini_p, ini_v, moons}}
end
@impl true
def handle_call(:position, _from, {position, velocity, ini_p, ini_v, moons}) do
{:reply, position, {position, velocity, ini_p, ini_v, moons}}
end
@impl true
def handle_call(:move, _from, {[x, y, z], [vx, vy, vz], ini_p, ini_v, moons}) do
position = [x + vx, y + vy, z + vz]
{:reply, self(), {position, [vx, vy, vz], ini_p, ini_v, moons}}
end
@impl true
def handle_call(:energy, _from, {[x, y, z], [vx, vy, vz], ini_p, ini_v, moons}) do
energy = (abs(x) + abs(y) + abs(z)) * (abs(vx) + abs(vy) + abs(vz))
{:reply, energy, {[x, y, z], [vx, vy, vz], ini_p, ini_v, moons}}
end
@impl true
def handle_call(:at_ini, _from, {position, velocity, ini_p, ini_v, moons}) do
at_ini_pos =
Stream.zip(position, ini_p)
|> Enum.map(fn {a, b} -> a == b end)
at_ini_vel =
Stream.zip(position, ini_p)
|> Enum.map(fn {a, b} -> a == b end)
at_ini =
Stream.zip(at_ini_pos, at_ini_vel)
|> Enum.map(fn {a, b} -> a and b end)
{:reply, at_ini, {position, velocity, ini_p, ini_v, moons}}
end
defp gravitate(_position, velocity, []), do: velocity
defp gravitate([x, y, z], [vx, vy, vz], [moon | tail]) do
[x2, y2, z2] = GenServer.call(moon, :position)
gravitate([x, y, z], [gravitate(vx, x, x2), gravitate(vy, y, y2), gravitate(vz, z, z2)], tail)
end
defp gravitate(v, p, p2) when p < p2, do: v + 1
defp gravitate(v, p, p2) when p > p2, do: v - 1
defp gravitate(v, _, _), do: v
end
|
lib/advent_of_code_2019/day12.ex
| 0.855263
| 0.518241
|
day12.ex
|
starcoder
|
defmodule EventStore.Notifications.Listener do
@moduledoc false
# Listener subscribes to event notifications using PostgreSQL's `LISTEN`
# command. Whenever events are appended to storage a `NOTIFY` command is
# executed by a trigger. The notification payload contains the first and last
# event number of the appended events. These events are then read from storage
# and published to interested subscribers.
use GenStage
require Logger
alias EventStore.MonitoredServer
alias EventStore.Notifications.Listener
defstruct [:listen_to, :schema, :ref, demand: 0, queue: :queue.new()]
def start_link(opts) do
{start_opts, listener_opts} =
Keyword.split(opts, [:name, :timeout, :debug, :spawn_opt, :hibernate_after])
listen_to = Keyword.fetch!(listener_opts, :listen_to)
schema = Keyword.fetch!(listener_opts, :schema)
state = %Listener{listen_to: listen_to, schema: schema}
GenStage.start_link(__MODULE__, state, start_opts)
end
def init(%Listener{} = state) do
%Listener{listen_to: listen_to} = state
:ok = MonitoredServer.monitor(listen_to)
{:producer, state}
end
def handle_info({:UP, listen_to, _pid}, %Listener{listen_to: listen_to} = state) do
{:noreply, [], listen_for_events(state)}
end
def handle_info({:DOWN, listen_to, _pid, _reason}, %Listener{listen_to: listen_to} = state) do
{:noreply, [], %Listener{state | ref: nil}}
end
# Ignore notifications when database connection down.
def handle_info(
{:notification, _connection_pid, _ref, _channel, _payload},
%Listener{ref: nil} = state
) do
{:noreply, [], state}
end
# Notification received from PostgreSQL's `NOTIFY`
def handle_info({:notification, _connection_pid, _ref, channel, payload}, %Listener{} = state) do
Logger.debug(
"Listener received notification on channel " <>
inspect(channel) <> " with payload: " <> inspect(payload)
)
# `NOTIFY` payload contains the stream uuid, stream id, and first / last
# stream versions (e.g. "stream-12345,1,1,5")
[last, first, stream_id, stream_uuid] =
payload
|> String.reverse()
|> String.split(",", parts: 4)
|> Enum.map(&String.reverse/1)
{stream_id, ""} = Integer.parse(stream_id)
{first_stream_version, ""} = Integer.parse(first)
{last_stream_version, ""} = Integer.parse(last)
state = enqueue({stream_uuid, stream_id, first_stream_version, last_stream_version}, state)
dispatch_events([], state)
end
def handle_demand(incoming_demand, %Listener{} = state) do
%Listener{demand: pending_demand} = state
state = %Listener{state | demand: pending_demand + incoming_demand}
dispatch_events([], state)
end
defp listen_for_events(%Listener{} = state) do
%Listener{listen_to: listen_to, schema: schema} = state
channel = schema <> ".events"
{:ok, ref} = Postgrex.Notifications.listen(listen_to, channel)
%Listener{state | ref: ref}
end
defp dispatch_events(events, %Listener{demand: 0} = state) do
{:noreply, Enum.reverse(events), state}
end
defp dispatch_events(events, %Listener{} = state) do
%Listener{demand: demand, queue: queue} = state
case :queue.out(queue) do
{{:value, event}, queue} ->
state = %Listener{state | demand: max(demand - 1, 0), queue: queue}
dispatch_events([event | events], state)
{:empty, _queue} ->
{:noreply, Enum.reverse(events), state}
end
end
defp enqueue(event, %Listener{} = state) do
%Listener{queue: queue} = state
%Listener{state | queue: :queue.in(event, queue)}
end
end
|
lib/event_store/notifications/listener.ex
| 0.779867
| 0.434041
|
listener.ex
|
starcoder
|
defmodule ExVCR.Converter do
@moduledoc """
Provides helpers for adapter converters.
"""
defmacro __using__(_) do
quote do
@doc """
Parse string format into original request / response tuples.
"""
def convert_from_string(%{"request" => request, "response" => response}) do
%{ request: string_to_request(request), response: string_to_response(response) }
end
defoverridable [convert_from_string: 1]
@doc """
Parse request and response tuples into string format.
"""
def convert_to_string(request, response) do
%{ request: request_to_string(request), response: response_to_string(response) }
end
defoverridable [convert_to_string: 2]
def string_to_request(string) do
request = Enum.map(string, fn({x,y}) -> {String.to_atom(x),y} end) |> Enum.into(%{})
struct(ExVCR.Request, request)
end
defoverridable [string_to_request: 1]
def string_to_response(string), do: raise ExVCR.ImplementationMissingError
defoverridable [string_to_response: 1]
def request_to_string(request), do: raise ExVCR.ImplementationMissingError
defoverridable [request_to_string: 1]
def response_to_string(response), do: raise ExVCR.ImplementationMissingError
defoverridable [response_to_string: 1]
def parse_headers(headers) do
do_parse_headers(headers, [])
end
defoverridable [parse_headers: 1]
def do_parse_headers([], acc) do
Enum.reverse(acc) |> Enum.uniq_by(fn({key,value}) -> key end)
end
def do_parse_headers([{key,value}|tail], acc) do
replaced_value = to_string(value) |> ExVCR.Filter.filter_sensitive_data
replaced_value = ExVCR.Filter.filter_request_header(to_string(key), to_string(replaced_value))
do_parse_headers(tail, [{to_string(key), replaced_value}|acc])
end
defoverridable [do_parse_headers: 2]
def parse_options(options) do
do_parse_options(options, [])
end
defoverridable [parse_options: 1]
def do_parse_options([], acc) do
Enum.reverse(acc) |> Enum.uniq_by(fn({key,value}) -> key end)
end
def do_parse_options([{key,value}|tail], acc) when is_function(value) do
do_parse_options(tail, acc)
end
def do_parse_options([{key,value}|tail], acc) do
replaced_value = atom_to_string(value) |> ExVCR.Filter.filter_sensitive_data
replaced_value = ExVCR.Filter.filter_request_option(to_string(key), atom_to_string(replaced_value))
do_parse_options(tail, [{to_string(key), replaced_value}|acc])
end
defoverridable [do_parse_options: 2]
def parse_url(url) do
to_string(url) |> ExVCR.Filter.filter_url_params
end
defoverridable [parse_url: 1]
def parse_keyword_list(params) do
Enum.map(params, fn({k,v}) -> {k,to_string(v)} end)
end
defoverridable [parse_keyword_list: 1]
def parse_request_body(:error), do: ""
def parse_request_body({:ok, body}) do
parse_request_body(body)
end
def parse_request_body(body) do
body_string = try do
to_string(body)
rescue
_e in Protocol.UndefinedError -> inspect(body)
end
ExVCR.Filter.filter_sensitive_data(body_string)
end
defoverridable [parse_request_body: 1]
defp atom_to_string(atom) do
if is_atom(atom) do
to_string(atom)
else
atom
end
end
end
end
end
|
lib/exvcr/converter.ex
| 0.650356
| 0.46794
|
converter.ex
|
starcoder
|
defmodule Saucexages.Sauce do
@moduledoc """
Functions for working with [SAUCE](http://www.acid.org/info/sauce/sauce.htm).
"""
@sauce_version "00"
@sauce_id "SAUCE"
@comment_id "COMNT"
@comment_line_byte_size 64
@sauce_record_byte_size 128
@max_comment_lines 255
@eof_character 0x1a
@file_size_limit <<255, 255, 255, 255>> |> :binary.decode_unsigned(:little)
@type field_id :: :sauce_id | :version | :title | :author | :group | :date | :file_size | :data_type | :file_type | :t_info_1 | :t_info_2 | :t_info3 | :t_info_4 | :comment_lines | :t_flags | :t_info_s
#TODO: Make FieldMeta module with well-defined struct? - these may eventually roll into a validation lib via something like vex or ecto if it reduces deps
@field_mappings [
%{field_id: :sauce_id, field_size: 5, required?: true},
%{field_id: :version, field_size: 2, required?: true},
%{field_id: :title, field_size: 35, required?: false},
%{field_id: :author, field_size: 20, required?: false},
%{field_id: :group, field_size: 20, required?: false},
%{field_id: :date, field_size: 8, required?: false},
%{field_id: :file_size, field_size: 4, required?: false},
%{field_id: :data_type, field_size: 1, required?: true},
%{field_id: :file_type, field_size: 1, required?: true},
%{field_id: :t_info_1, field_size: 2, required?: false},
%{field_id: :t_info_2, field_size: 2, required?: false},
%{field_id: :t_info_3, field_size: 2, required?: false},
%{field_id: :t_info_4, field_size: 2, required?: false},
%{field_id: :comment_lines, field_size: 1, required?: false},
%{field_id: :t_flags, field_size: 1, required?: false},
%{field_id: :t_info_s, field_size: 22, required?: false},
]
defguard is_comment_lines(comment_lines) when is_integer(comment_lines) and comment_lines >= 0 and comment_lines <= @max_comment_lines
defguard is_comment_block(comment_lines) when is_integer(comment_lines) and comment_lines > 0 and comment_lines <= @max_comment_lines
@doc """
Returns a list of metadata for each SAUCE record field.
"""
@spec field_mappings() :: [map()]
defmacro field_mappings() do
@field_mappings |> Macro.escape()
end
@doc """
Returns the EOF (end-of-file) character value that should be used when reading or writing a SAUCE.
"""
@spec eof_character() :: integer()
defmacro eof_character() do
@eof_character
end
@doc """
Returns the size of a SAUCE field in bytes. The byte size determines how much fixed-space in a SAUCE binary the field should occupy.
Useful for building binaries, constructing matches, and avoiding sizing errors when working with SAUCE.
Only matches valid SAUCE fields.
## Examples
iex> Saucexages.Sauce.field_size(:title)
35
iex> Saucexages.Sauce.field_size(:t_info_1)
2
"""
@spec field_size(field_id()) :: pos_integer()
defmacro field_size(field_id)
for %{field_id: field_id, field_size: field_size} <- @field_mappings do
defmacro field_size(unquote(field_id)) do
unquote(field_size)
end
end
@doc """
Returns a list of metadata for each SAUCE record field, including calculated information such as field position.
"""
@spec field_list() :: [map()]
defmacro field_list() do
{fields, _} = Enum.map_reduce(@field_mappings, 0, fn(%{field_size: field_size} = field, acc) -> {Map.put(field, :position, acc), acc + field_size} end)
fields |> Macro.escape()
end
@doc """
Returns the zero-based binary offset within a SAUCE record for a given `field_id`.
Optionally, you may pass a boolean to indicate if the field is offset from the SAUCE `sauce_id` field.
Useful for jumping to the exact start position of a field, building binaries, constructing matches, and avoiding sizing errors when working with SAUCE.
Used with `field_size/1`, it can be helpful for working with SAUCE binaries efficiently.
## Examples
iex> Saucexages.Sauce.field_position(:title)
7
iex> Saucexages.Sauce.field_position(:title, true)
2
iex> Saucexages.Sauce.field_position(:sauce_id)
0
iex> Saucexages.Sauce.field_position(:t_info_s)
106
"""
@spec field_position(field_id(), boolean()) :: non_neg_integer()
defmacro field_position(field_id, offset? \\ false)
with {fields, _} <- Enum.map_reduce(@field_mappings, 0, fn (%{field_id: field_id, field_size: field_size}, acc) -> {%{field_id: field_id, field_size: field_size, position: acc}, acc + field_size} end) do
for %{field_id: field_id, position: position} <- fields do
defmacro field_position(unquote(field_id), false) do
unquote(position)
end
if field_id != :sauce_id do
defmacro field_position(unquote(field_id), true) do
unquote(position) - byte_size(@sauce_id)
end
else
defmacro field_position(unquote(field_id), true) do
unquote(position)
end
end
end
end
@doc """
Returns a list of metadata consisting only of required fields for a SAUCE record. SAUCE binary that lacks these fields should be considered invalid.
"""
@spec required_fields() :: [map()]
defmacro required_fields() do
Enum.filter(@field_mappings, fn(%{required?: required?}) -> required? end) |> Macro.escape()
end
@doc """
Returns a list of field_ids consisting only of required fields for a SAUCE record. SAUCE binary that lacks these fields should be considered invalid.
## Examples
iex> Saucexages.Sauce.required_field_ids()
[:sauce_id, :version, :data_type, :file_type]
"""
@spec required_field_ids() :: [field_id()]
defmacro required_field_ids() do
Enum.flat_map(@field_mappings, fn(%{field_id: field_id, required?: required?}) -> if required?, do: [field_id], else: [] end) |> Macro.escape()
end
@doc """
Default value of the sauce version field.
## Examples
iex> Saucexages.Sauce.sauce_version()
"00"
"""
@spec sauce_version() :: String.t()
defmacro sauce_version() do
@sauce_version
end
@doc """
Value of the sauce ID field.
Useful for constructing binaries and matching.
## Examples
iex> Saucexages.Sauce.sauce_id()
"SAUCE"
"""
@spec sauce_id() :: String.t()
defmacro sauce_id() do
@sauce_id
end
@doc """
Value of the sauce comment ID field.
Useful for constructing binaries and matching.
## Examples
iex> Saucexages.Sauce.comment_id()
"COMNT"
"""
@spec comment_id() :: String.t()
defmacro comment_id() do
@comment_id
end
@doc """
Byte size of the sauce comment ID field.
Useful for constructing binaries and matching.
## Examples
iex> Saucexages.Sauce.comment_id_byte_size()
5
"""
@spec comment_id_byte_size() :: pos_integer()
defmacro comment_id_byte_size() do
byte_size(@comment_id)
end
@doc """
Byte size of a single comment line in a sauce.
Useful for constructing binaries and matching.
## Examples
iex> Saucexages.Sauce.comment_line_byte_size()
64
"""
@spec comment_line_byte_size() :: pos_integer()
defmacro comment_line_byte_size() do
@comment_line_byte_size
end
@doc """
Total byte size of all comment lines when stored.
Useful for constructing binaries and matching.
## Examples
iex> Saucexages.Sauce.comments_byte_size(1)
64
iex> Saucexages.Sauce.comments_byte_size(2)
128
"""
@spec comments_byte_size(non_neg_integer()) :: non_neg_integer()
def comments_byte_size(comment_lines) when is_comment_lines(comment_lines) do
comment_lines * @comment_line_byte_size
end
def comments_byte_size(comment_lines) do
raise ArgumentError, "Comment lines must be an integer greater than or equal to zero and less than or equal to #{inspect @max_comment_lines}, instead got #{inspect comment_lines}}."
end
@doc """
Max number of comment lines allowed according to the SAUCE spec.
## Examples
iex> Saucexages.Sauce.max_comment_lines()
255
"""
@spec max_comment_lines() :: pos_integer()
defmacro max_comment_lines() do
@max_comment_lines
end
@doc """
Byte size of just the sauce record fields.
Useful for constructing binaries and matching.
## Examples
iex> Saucexages.Sauce.sauce_record_byte_size()
128
"""
@spec sauce_record_byte_size() :: pos_integer()
defmacro sauce_record_byte_size() do
@sauce_record_byte_size
end
@doc """
Byte size of the sauce record fields, excluding the sauce_id.
Useful for constructing binaries and matching.
## Examples
iex> Saucexages.Sauce.sauce_data_byte_size()
123
"""
@spec sauce_data_byte_size() :: pos_integer()
defmacro sauce_data_byte_size() do
@sauce_record_byte_size - byte_size(@sauce_id)
end
@doc """
Byte size of the sauce ID field.
## Examples
iex> Saucexages.Sauce.sauce_id_byte_size()
5
"""
@spec sauce_id_byte_size() :: pos_integer()
defmacro sauce_id_byte_size() do
byte_size(@sauce_id)
end
@doc """
Total byte size of a sauce including the full comments block.
## Examples
iex> Saucexages.Sauce.sauce_byte_size(1)
197
iex> Saucexages.Sauce.sauce_byte_size(2)
261
"""
@spec sauce_byte_size(non_neg_integer()) :: pos_integer()
def sauce_byte_size(comment_lines)
def sauce_byte_size(0) do
@sauce_record_byte_size
end
def sauce_byte_size(comment_lines) when is_comment_block(comment_lines) do
comment_block_byte_size(comment_lines) + @sauce_record_byte_size
end
def sauce_byte_size(comment_lines) do
raise ArgumentError, "Comment lines must be an integer greater than or equal to zero and less than or equal to #{inspect @max_comment_lines}, instead got #{inspect comment_lines}}."
end
@doc """
Total byte size of a sauce comments block, including the comment ID.
## Examples
iex> Saucexages.Sauce.comment_block_byte_size(1)
69
iex> Saucexages.Sauce.comment_block_byte_size(2)
133
iex> Saucexages.Sauce.comment_block_byte_size(0)
0
"""
@spec comment_block_byte_size(non_neg_integer()) :: non_neg_integer()
def comment_block_byte_size(comment_lines)
def comment_block_byte_size(0) do
0
end
def comment_block_byte_size(comment_lines) when is_comment_block(comment_lines) do
comments_byte_size(comment_lines) + comment_id_byte_size()
end
def comment_block_byte_size(comment_lines) do
raise ArgumentError, "Comment lines must be an integer greater than or equal to zero and less than or equal to #{inspect @max_comment_lines}, instead got #{inspect comment_lines}}."
end
@doc """
Minimum byte size of a comment block as required by SAUCE.
The minimum requirement for a comment block is that it includes the comment id (COMNT) and enough space for 1 comment line (64 bytes).
## Examples
iex> Saucexages.Sauce.minimum_comment_block_byte_size()
69
"""
@spec minimum_comment_block_byte_size() :: pos_integer()
defmacro minimum_comment_block_byte_size() do
comment_block_byte_size(1)
end
@doc """
Minimum byte size of a SAUCE block that includes at least one comment.
## Examples
iex> Saucexages.Sauce.minimum_commented_sauce_size()
197
"""
@spec minimum_commented_sauce_size() :: pos_integer()
defmacro minimum_commented_sauce_size() do
@sauce_record_byte_size + comment_block_byte_size(1)
end
@doc """
Byte size limit for a file size (32-bit unsigned integer) according to SAUCE. Any file size bigger than this limit is set to zero or can be considered undefined.
## Examples
iex> Saucexages.Sauce.file_size_limit()
4294967295
"""
@spec file_size_limit() :: pos_integer()
defmacro file_size_limit() do
@file_size_limit
end
end
|
lib/saucexages/sauce.ex
| 0.551815
| 0.468851
|
sauce.ex
|
starcoder
|
defmodule Sim.Grid do
alias Sim.Grid
def create(width, height, default \\ nil)
def create(width, height, func) when is_function(func) do
0..(width - 1)
|> Map.new(fn x ->
{x,
0..(height - 1)
|> Map.new(fn y ->
{y, func.(x, y)}
end)}
end)
end
def create(width, height, [[_c | _] | _r] = list) do
Grid.create(width, height, fn x, y ->
list |> Enum.at(height - (y + 1)) |> Enum.at(x)
end)
end
def create(width, height, value) do
Grid.create(width, height, fn _x, _y -> value end)
end
def apply_changes(grid, changes) do
Enum.reduce(changes, grid, fn {{x, y}, value}, grid ->
Grid.put(grid, x, y, value)
end)
end
def get(nil, _x, _y), do: {:error, "grid is nil"}
def get(%{0 => columns} = grid, x, y)
when x >= 0 and x < map_size(grid) and y >= 0 and y < map_size(columns) do
get_in(grid, [x, y])
end
def get(grid, x, y) when is_integer(x) and is_integer(y) do
{:error,
"coordinates x: #{x}, y: #{y} outside of grid width: #{width(grid)}, height: #{height(grid)}"}
end
def get(_grid, x, y) do
{:error, "only integers are allowed as coordinates, x: #{x}, y: #{y}"}
end
def put(%{0 => columns} = grid, x, y, value)
when x >= 0 and x < map_size(grid) and y >= 0 and y < map_size(columns) do
put_in(grid, [x, y], value)
end
def put(grid, x, y, _value) when is_integer(x) and is_integer(y) do
{:error,
"coordinates x: #{x}, y: #{y} outside of grid width: #{width(grid)}, height: #{height(grid)}"}
end
def put(_grid, x, y, _value) do
{:error, "only integers are allowed as coordinates, x: #{x}, y: #{y}"}
end
def width(grid) do
map_size(grid)
end
def height(grid) do
map_size(grid[0])
end
# [{x0, y0, value}, {x1, y0, value}, ...]
def map(grid, func \\ &{&1, &2, &3}) do
Enum.map(grid, fn {x, col} ->
Enum.map(col, fn {y, value} ->
{func.(x, y, value)}
end)
end)
|> Enum.reverse()
|> Enum.zip()
|> Enum.map(&Tuple.to_list/1)
|> List.flatten()
|> Enum.map(fn {i} -> i end)
|> Enum.reverse()
end
def merge_field(grid, x, y, value, func \\ &Map.merge(&1, &2)) do
field = Grid.get(grid, x, y)
Grid.put(grid, x, y, func.(field, value))
end
end
|
apps/sim/lib/sim/object/grid.ex
| 0.667798
| 0.715772
|
grid.ex
|
starcoder
|
defmodule SensorHub.DataReader do
@moduledoc """
Opens the first I2C bus ("i2c-1") and reads from SensorHub device address (0x17).
Updates its internal state periodically (every second in the example below).
Below you can find a table of registers, that we found on their website:
https://wiki.52pi.com/index.php/DockerPi_Sensor_Hub_Development_Board_SKU:_EP-0106
Register Address Function Value
0x01 TEMP_REG Ext. Temperature [Unit:degC]
0x02 LIGHT_REG_L Light Brightness Low 8 Bit [Unit:Lux]
0x03 LIGHT_REG_H Light Brightness High 8 Bit [Unit:Lux]
0x04 STATUS_REG Status Function
0x05 ON_BOARD_TEMP_REG OnBoard Temperature [Unit:degC]
0x06 ON_BOARD_HUMIDITY_REG OnBoard Humidity [Uinit:%]
0x07 ON_BOARD_SENSOR_ERROR 0(OK) - 1(Error)
0x08 BMP280_TEMP_REG P. Temperature [Unit:degC]
0x09 BMP280_PRESSURE_REG_L P. Pressure Low 8 Bit [Unit:Pa]
0x0A BMP280_PRESSURE_REG_M P. Pressure Mid 8 Bit [Unit:Pa]
0x0B BMP280_PRESSURE_REG_H P. Pressure High 8 Bit [Unit:Pa]
0x0C BMP280_STATUS 0(OK) - 1(Error)
0x0D HUMAN_DETECT 0(No Active Body) - 1(Active Body)
"""
alias Circuits.I2C
@bus_name "i2c-1"
@sensor_hub_addr 0x17
use GenServer
### External API
def start_link(_) do
{:ok, _} = GenServer.start_link(__MODULE__, {}, name: __MODULE__)
end
def get_data() do
GenServer.call(__MODULE__, :data)
end
### GenServer API
def init(_initial) do
{:ok, bus} = I2C.open(@bus_name)
{:ok, data} = get_sensors_data(bus)
{:ok, {bus, data}}
end
def handle_call(:data, _from, {bus, _data}) do
new_data = get_sensors_data(bus)
{:reply, new_data, {bus, new_data}}
end
### Helper functions
defp get_sensors_data(bus) do
{:ok,
<<0, ext_temp, light_low, light_high, status, on_board_temp, on_board_humidity,
on_board_error, bpm280_temp, bpm280_pressure_low, bpm280_pressure_medium,
bpm280_pressure_high, bpm280_status,
human_detect>>} = I2C.write_read(bus, @sensor_hub_addr, <<0>>, 0x0E)
{:ok,
%{
external_temp: ext_temp,
light_low: light_low,
light_high: light_high,
status: status,
on_board_temp: on_board_temp,
on_board_humidity: on_board_humidity,
on_board_error: on_board_error,
bpm280_temp: bpm280_temp,
bpm280_pressure_low: bpm280_pressure_low,
bpm280_pressure_medium: bpm280_pressure_medium,
bpm280_pressure_high: bpm280_pressure_high,
bpm280_status: bpm280_status,
human_detect: human_detect
}}
end
end
|
lib/sensor_hub/data_reader.ex
| 0.692122
| 0.521776
|
data_reader.ex
|
starcoder
|
defmodule Xlsxir.Unzip do
alias Xlsxir.XmlFile
@moduledoc """
Provides validation of accepted file types for file path,
extracts required `.xlsx` contents to memory or files
"""
@filetype_error "Invalid file type (expected xlsx)."
@xml_not_found_error "Invalid File. Required XML files not found."
@worksheet_index_error "Invalid worksheet index."
@doc """
Checks if given path is a valid file type and contains the requested worksheet, returning a tuple.
## Parameters
- `path` - file path of a `.xlsx` file type in `string` format
## Example
iex> path = "./test/test_data/test.xlsx"
iex> Xlsxir.Unzip.validate_path_and_index(path, 0)
{:ok, './test/test_data/test.xlsx'}
iex> path = "./test/test_data/test.validfilebutnotxlsx"
iex> Xlsxir.Unzip.validate_path_and_index(path, 0)
{:ok, './test/test_data/test.validfilebutnotxlsx'}
iex> path = "./test/test_data/test.xlsx"
iex> Xlsxir.Unzip.validate_path_and_index(path, 100)
{:error, "Invalid worksheet index."}
iex> path = "./test/test_data/test.invalidfile"
iex> Xlsxir.Unzip.validate_path_and_index(path, 0)
{:error, "Invalid file type (expected xlsx)."}
"""
def validate_path_and_index(path, index) do
path = String.to_charlist(path)
case valid_extract_request?(path, index) do
:ok -> {:ok, path}
{:error, reason} -> {:error, reason}
end
end
@doc """
Checks if given path is a valid file type, returning a list of available worksheets.
## Parameters
- `path` - file path of a `.xlsx` file type in `string` format
## Example
iex> path = "./test/test_data/test.xlsx"
iex> Xlsxir.Unzip.validate_path_all_indexes(path)
{:ok, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]}
iex> path = "./test/test_data/test.zip"
iex> Xlsxir.Unzip.validate_path_all_indexes(path)
{:ok, []}
iex> path = "./test/test_data/test.invalidfile"
iex> Xlsxir.Unzip.validate_path_all_indexes(path)
{:error, "Invalid file type (expected xlsx)."}
"""
def validate_path_all_indexes(path) do
path = String.to_charlist(path)
case :zip.list_dir(path) do
{:ok, file_list} ->
indexes = file_list
|> Enum.filter(fn (file) ->
case file do
{:zip_file, filename, _, _, _, _} ->
filename |> to_string |> String.starts_with?("xl/worksheets/sheet")
_ ->
nil
end
end)
|> Enum.map(fn ({:zip_file, filename, _, _, _, _}) ->
index = filename
|> to_string
|> String.replace_prefix("xl/worksheets/sheet", "")
|> String.replace_suffix(".xml", "")
|> String.to_integer
index - 1
end)
|> Enum.sort
{:ok, indexes}
{:error, _reason} -> {:error, @filetype_error}
end
end
defp valid_extract_request?(path, index) do
case :zip.list_dir(path) do
{:ok, file_list} -> search_file_list(file_list, index)
{:error, _reason} -> {:error, @filetype_error}
end
end
defp search_file_list(file_list, index) do
sheet = 'xl/worksheets/sheet#{index + 1}.xml'
results = file_list
|> Enum.map(fn file ->
case file do
{:zip_file, ^sheet, _, _, _, _} -> :ok
_ -> nil
end
end)
if Enum.member?(results, :ok) do
:ok
else
{:error, @worksheet_index_error}
end
end
@doc """
Extracts requested list of files from a `.zip` file to memory or file system
and returns a list of the extracted file paths.
## Parameters
- `file_list` - list containing file paths to be extracted in `char_list` format
- `path` - file path of a `.xlsx` file type in `string` format
- `to` - `:memory` or `{:file, "destination/path"}` option
## Example
An example file named `test.zip` located in './test_data/test' containing a single file named `test.txt`:
iex> path = "./test/test_data/test.zip"
iex> file_list = ['test.txt']
iex> Xlsxir.Unzip.extract_xml(file_list, path, :memory)
{:ok, [%Xlsxir.XmlFile{content: "test_successful", name: "test.txt", path: nil}]}
iex> Xlsxir.Unzip.extract_xml(file_list, path, {:file, "temp/"})
{:ok, [%Xlsxir.XmlFile{content: nil, name: "test.txt", path: "temp/test.txt"}]}
iex> with {:ok, _} <- File.rm_rf("temp"), do: :ok
:ok
"""
def extract_xml(file_list, path, to) do
path
|> to_charlist
|> extract_from_zip(file_list, to)
|> case do
{:error, reason} -> {:error, reason}
{:ok, []} -> {:error, @xml_not_found_error}
{:ok, files_list} -> {:ok, build_xml_files(files_list)}
end
end
defp extract_from_zip(path, file_list, :memory), do: :zip.extract(path, [{:file_list, file_list}, :memory])
defp extract_from_zip(path, file_list, {:file, dest_path}), do: :zip.extract(path, [{:file_list, file_list}, {:cwd, dest_path}])
defp build_xml_files(files_list) do
files_list
|> Enum.map(&build_xml_file/1)
end
# When extracting to memory
defp build_xml_file({name, content}) do
%XmlFile{name: Path.basename(name), content: content}
end
# When extracting to temp file
defp build_xml_file(file_path) do
%XmlFile{name: Path.basename(file_path), path: to_string(file_path)}
end
end
|
lib/xlsxir/unzip.ex
| 0.766512
| 0.404772
|
unzip.ex
|
starcoder
|
defmodule Day02 do
use Aoc2018
@doc ~S"""
Counts checksum as [specified](https://adventofcode.com/2018/day/2).
## Example
iex> Day02.part_one(~s(abcdef\nbababc\nabbcde\nabcccd\naabcdd\nabcdee\nababab))
12
"""
@spec part_one(binary()) :: number()
def part_one(input) when is_binary(input) do
{twice_count, thrice_count} =
input
|> String.split()
|> Enum.reduce({0, 0}, fn id, {twice_count, thrice_count} ->
counts = get_letter_counts(id)
twice_count = if contains_repeated_twice_letter?(counts), do: twice_count + 1, else: twice_count
thrice_count = if contains_repeated_thrice_letter?(counts), do: thrice_count + 1, else: thrice_count
{twice_count, thrice_count}
end)
twice_count * thrice_count
end
defp get_letter_counts(str) when is_binary(str) do
str
|> String.codepoints()
|> Enum.reduce(%{}, fn letter, counts ->
Map.update(counts, letter, 1, &(&1 + 1))
end)
end
defp contains_repeated_twice_letter?(counts), do: 2 in Map.values(counts)
defp contains_repeated_thrice_letter?(counts), do: 3 in Map.values(counts)
@doc ~S"""
Finds two ids differing only by one letter at the same position. Returns letters that are matching.
## Example
iex> Day02.part_two("abcde\nfghij\nklmno\npqrst\nfguij\naxcye\nwvxyz")
"fgij"
"""
@spec part_two(binary()) :: binary() | nil
def part_two(input) when is_binary(input) do
input
|> String.split()
|> solve()
end
defp solve([first_id | remaining_ids]) when remaining_ids != [] do
answer =
remaining_ids
|> Enum.find_value(&find_matching_letters_if_different_by_one_letter_only(first_id, &1))
if is_nil(answer), do: solve(remaining_ids), else: answer
end
@spec find_matching_letters_if_different_by_one_letter_only(binary(), binary()) :: binary() | nil
defp find_matching_letters_if_different_by_one_letter_only(id1, id2) do
chars1 = String.codepoints(id1)
chars2 = String.codepoints(id2)
if length(chars1) == length(chars2) do
matching =
Enum.zip(chars1, chars2)
|> Enum.filter(fn {x, y} -> x == y end)
|> Enum.map(fn {x, _} -> x end)
if length(matching) == length(chars1) - 1, do: Enum.join(matching), else: nil
else
nil
end
end
end
|
lib/day02.ex
| 0.861611
| 0.45042
|
day02.ex
|
starcoder
|
defmodule CouchGears.Initializer do
@moduledoc """
This module is responsible for starting a CouchGears framework
inside a particular Couch DB note as a independent daemon.
A `CouchGears.Initializer` starts its own base supervisor. Each application (actually also a supervisor)
becomes a part of base supervisor.
A framework configuration designed as easy as possible. It follows
a Couch DB extension approach.
## Configurtion
1. Specify a CouchGears `ebin` path in `couchdb`.
COUCH_GEARS_PA_OPTIONS="-pa /var/www/couch_gears/current/ebin"
ERL_START_OPTIONS="$ERL_OS_MON_OPTIONS -sasl errlog_type error +K true +A 4 $COUCH_GEARS_PA_OPTIONS"
2. Specify the `daemons` in `local.ini`
[daemons]
couch_gears={'Elixir-CouchGears-Initializer', start_link, [[{env, <<"prod">>}]]}
Finally, notice that after initialization a CouchGears sets both `httpd_db_handlers` and `httpd_global_handlers`
option which handles incoming `/db/_gears` or `/_gears` requests.
Is an equivalent to:
[httpd_global_handlers]
_gears = {'Elixir-CouchGears-Mochiweb-Handler', handle_global_gears_req}
[httpd_db_handlers]
_gears = {'Elixir-CouchGears-Mochiweb-Handler', handle_db_gears_req}
"""
use Supervisor.Behaviour
@root_path Path.expand "../../..", __FILE__
@httpd_db_handlers "Elixir-CouchGears-Mochiweb-Handler"
@gears_request_prefix "_gears"
@doc """
Starts the supervisor
"""
def start_link(opts) do
:supervisor.start_link({ :local, __MODULE__ }, __MODULE__, opts)
end
@doc """
Restarts the base supervisor through a `couch_secondary_services` functions such as `terminate_child` and `restart_child`
"""
def restart do
:supervisor.terminate_child(:couch_secondary_services, :couch_gears)
:supervisor.restart_child(:couch_secondary_services, :couch_gears)
end
@doc false
def init(opts) do
configure_gears(opts)
# Adds a Elixir deps to the code path
:erlang.bitstring_to_list(@root_path <> "/deps/elixir/lib/elixir/ebin") |> :code.add_pathz
Code.append_path(@root_path <> "/deps/elixir/lib/mix/ebin")
Code.append_path(@root_path <> "/deps/elixir/lib/iex/ebin")
{ Mix.start, Code.load_file(Path.join([@root_path, "mix.exs"])), Mix.loadpaths }
# Setups gears environment
setup_httpd_handlers
# Starts applications
apps = Enum.map initialize_gears, fn(opts) ->
supervisor(__MODULE__, [opts], [id: opts[:app_name], function: :start_app, restart: :permanent])
end
spec = supervise(apps, [strategy: :one_for_one])
CouchGears.Logger.info("CouchGears has started")
spec
end
@doc """
Starts a particular application
"""
def start_app(opts // []) do
File.cd(opts[:app_path])
Code.load_file Path.join([opts[:app_path], "config", "application.ex"])
app = Module.concat([Mix.Utils.camelize(opts[:app_name]) <> "Application"])
CouchGears.gears(CouchGears.gears ++ [app])
app.start_link
end
@doc """
Restarts a particular application through a `supervisor` functions such as `terminate_child` and `restart_child`
"""
def restart_app(name) do
:supervisor.terminate_child(__MODULE__, name)
:supervisor.restart_child(__MODULE__, name)
end
defp setup_httpd_handlers do
:couch_config.set(
"httpd_global_handlers",
"#{@gears_request_prefix}",
binary_to_list("{'#{@httpd_db_handlers}', handle_global_gears_req}"),
false
)
:couch_config.set(
"httpd_db_handlers",
"#{@gears_request_prefix}",
binary_to_list("{'#{@httpd_db_handlers}', handle_db_gears_req}"),
false
)
end
defp initialize_gears do
Enum.map Path.wildcard(CouchGears.root_path <> "/apps/*"), fn(app_path) ->
Code.load_file(Path.join([app_path, "mix.exs"]))
Mix.Tasks.Deps.Loadpaths.run([])
Mix.Tasks.Loadpaths.run([])
[app_name: List.last(Path.split(app_path)), app_path: app_path]
end
end
defp configure_gears(opts) do
CouchGears.gears([])
CouchGears.env(:couch_util.get_value(:env, opts, "dev"))
CouchGears.root_path(@root_path)
end
end
|
lib/couch_gears/initializer.ex
| 0.723016
| 0.441011
|
initializer.ex
|
starcoder
|
defmodule Mint.HTTP1 do
@moduledoc """
Processless HTTP client with support for HTTP/1 and HTTP/1.1.
This module provides a data structure that represents an HTTP/1 or HTTP/1.1 connection to
a given server. The connection is represented as an opaque struct `%Mint.HTTP1{}`.
The connection is a data structure and is not backed by a process, and all the
connection handling happens in the process that creates the struct.
This module and data structure work exactly like the ones described in the `Mint`
module, with the exception that `Mint.HTTP1` specifically deals with HTTP/1 and HTTP/1.1 while
`Mint` deals seamlessly with HTTP/1, HTTP/1.1, and HTTP/2. For more information on
how to use the data structure and client architecture, see `Mint`.
"""
import Mint.Core.Util
alias Mint.HTTP1.{Parse, Request, Response}
alias Mint.{HTTPError, TransportError, Types}
require Logger
@behaviour Mint.Core.Conn
@opaque t() :: %__MODULE__{}
@typedoc """
An HTTP/1-specific error reason.
The values can be:
* `:closed` - when you try to make a request or stream a body chunk but the connection
is closed.
* `:request_body_is_streaming` - when you call `request/5` to send a new
request but another request is already streaming.
* `{:unexpected_data, data}` - when unexpected data is received from the server.
* `:invalid_status_line` - when the HTTP/1 status line is invalid.
* `{:invalid_request_target, target}` - when the request target is invalid.
* `:invalid_header` - when headers can't be parsed correctly.
* `{:invalid_header_name, name}` - when a header name is invalid.
* `{:invalid_header_value, name, value}` - when a header value is invalid. `name`
is the name of the header and `value` is the invalid value.
* `:invalid_chunk_size` - when the chunk size is invalid.
* `:missing_crlf_after_chunk` - when the CRLF after a chunk is missing.
* `:invalid_trailer_header` - when trailer headers can't be parsed.
* `:more_than_one_content_length_header` - when more than one `content-length`
headers are in the response.
* `:transfer_encoding_and_content_length` - when both the `content-length` as well
as the `transfer-encoding` headers are in the response.
* `{:invalid_content_length_header, value}` - when the value of the `content-length`
header is invalid, that is, is not an non-negative integer.
* `:empty_token_list` - when a header that is supposed to contain a list of tokens
(such as the `connection` header) doesn't contain any.
* `{:invalid_token_list, string}` - when a header that is supposed to contain a list
of tokens (such as the `connection` header) contains a malformed list of tokens.
"""
@type error_reason() :: term()
defstruct [
:host,
:request,
:socket,
:transport,
requests: :queue.new(),
state: :closed,
buffer: "",
private: %{}
]
@doc """
Same as `Mint.HTTP.connect/4`, but forces an HTTP/1 or HTTP/1.1 connection.
This function doesn't support proxying.
"""
@spec connect(Types.scheme(), String.t(), :inet.port_number(), keyword()) ::
{:ok, t()} | {:error, Types.error()}
def connect(scheme, hostname, port, opts \\ []) do
# TODO: Also ALPN negotiate HTTP1?
transport = scheme_to_transport(scheme)
transport_opts = Keyword.get(opts, :transport_opts, [])
with {:ok, socket} <- transport.connect(hostname, port, transport_opts) do
initiate(transport, socket, hostname, port, opts)
end
end
@doc false
@spec upgrade(
Types.scheme(),
Mint.Types.socket(),
Types.scheme(),
String.t(),
:inet.port_number(),
keyword()
) :: {:ok, t()} | {:error, Types.error()}
def upgrade(old_scheme, socket, new_scheme, hostname, port, opts) do
# TODO: Also ALPN negotiate HTTP1?
transport = scheme_to_transport(new_scheme)
transport_opts = Keyword.get(opts, :transport_opts, [])
with {:ok, socket} <- transport.upgrade(socket, old_scheme, hostname, port, transport_opts) do
initiate(new_scheme, socket, hostname, port, opts)
end
end
@doc false
@impl true
@spec initiate(
Types.scheme(),
Mint.Types.socket(),
String.t(),
:inet.port_number(),
keyword()
) :: {:ok, t()} | {:error, Types.error()}
def initiate(scheme, socket, hostname, _port, _opts) do
transport = scheme_to_transport(scheme)
with :ok <- inet_opts(transport, socket),
:ok <- transport.setopts(socket, active: :once) do
conn = %__MODULE__{
transport: transport,
socket: socket,
host: hostname,
state: :open
}
{:ok, conn}
else
{:error, reason} ->
:ok = transport.close(socket)
{:error, reason}
end
end
@doc """
See `Mint.HTTP.close/1`.
"""
@impl true
@spec close(t()) :: {:ok, t()}
def close(conn)
def close(%__MODULE__{state: :open} = conn) do
conn = internal_close(conn)
{:ok, conn}
end
def close(%__MODULE__{state: :closed} = conn) do
{:ok, conn}
end
@doc """
See `Mint.HTTP.open?/1`.
"""
@impl true
@spec open?(t(), :read | :write | :read_write) :: boolean()
def open?(conn, type \\ :read_write)
def open?(%__MODULE__{state: state}, type) when type in [:read, :write, :read_write] do
state == :open
end
@doc """
See `Mint.HTTP.request/5`.
In HTTP/1 and HTTP/1.1, you can't open a new request if you're streaming the body of
another request. If you try, the error reason `{:error, :request_body_is_streaming}` is
returned.
"""
@impl true
@spec request(
t(),
method :: String.t(),
path :: String.t(),
Types.headers(),
body :: iodata() | nil | :stream
) ::
{:ok, t(), Types.request_ref()}
| {:error, t(), Types.error()}
def request(conn, method, path, headers, body \\ nil)
def request(%__MODULE__{state: :closed} = conn, _method, _path, _headers, _body) do
{:error, conn, wrap_error(:closed)}
end
def request(
%__MODULE__{request: %{state: :stream_request}} = conn,
_method,
_path,
_headers,
_body
) do
{:error, conn, wrap_error(:request_body_is_streaming)}
end
def request(%__MODULE__{} = conn, method, path, headers, body) do
%__MODULE__{host: host, transport: transport, socket: socket} = conn
with {:ok, iodata} <- Request.encode(method, path, host, headers, body),
:ok <- transport.send(socket, iodata) do
request_ref = make_ref()
state = if body == :stream, do: :stream_request, else: :status
request = new_request(request_ref, state, method)
if conn.request == nil do
conn = %__MODULE__{conn | request: request}
{:ok, conn, request_ref}
else
requests = :queue.in(request, conn.requests)
conn = %__MODULE__{conn | requests: requests}
{:ok, conn, request_ref}
end
else
{:error, %TransportError{reason: :closed} = error} ->
{:error, %{conn | state: :closed}, error}
{:error, %HTTPError{} = error} ->
{:error, conn, error}
{:error, reason} ->
{:error, conn, wrap_error(reason)}
end
end
@doc """
See `Mint.HTTP.stream_request_body/3`.
"""
@impl true
@spec stream_request_body(t(), Types.request_ref(), iodata() | :eof) ::
{:ok, t()} | {:error, t(), Types.error()}
def stream_request_body(
%__MODULE__{request: %{state: :stream_request, ref: ref}} = conn,
ref,
:eof
) do
{:ok, put_in(conn.request.state, :status)}
end
def stream_request_body(
%__MODULE__{request: %{state: :stream_request, ref: ref}} = conn,
ref,
body
) do
case conn.transport.send(conn.socket, body) do
:ok ->
{:ok, conn}
{:error, %TransportError{reason: :closed} = error} ->
{:error, %{conn | state: :closed}, error}
{:error, error} ->
{:error, conn, error}
end
end
@doc """
See `Mint.HTTP.stream/2`.
"""
@impl true
@spec stream(t(), term()) ::
{:ok, t(), [Types.response()]}
| {:error, t(), Types.error(), [Types.response()]}
| :unknown
def stream(conn, message)
def stream(%__MODULE__{transport: transport, socket: socket} = conn, {tag, socket, data})
when tag in [:tcp, :ssl] do
result = handle_data(conn, data)
# TODO: handle errors here.
_ = transport.setopts(socket, active: :once)
result
end
def stream(%__MODULE__{socket: socket} = conn, {tag, socket})
when tag in [:tcp_closed, :ssl_closed] do
handle_close(conn)
end
def stream(%__MODULE__{socket: socket} = conn, {tag, socket, reason})
when tag in [:tcp_error, :ssl_error] do
conn = put_in(conn.state, :closed)
error = conn.transport.wrap_error(reason)
{:error, conn, error, []}
end
def stream(%__MODULE__{}, _message) do
:unknown
end
defp handle_data(%__MODULE__{request: nil} = conn, data) do
conn = internal_close(conn)
{:error, conn, wrap_error({:unexpected_data, data}), []}
end
defp handle_data(%__MODULE__{request: request} = conn, data) do
data = conn.buffer <> data
case decode(request.state, conn, data, []) do
{:ok, conn, responses} ->
{:ok, conn, Enum.reverse(responses)}
{:error, conn, reason, responses} ->
conn = put_in(conn.state, :closed)
{:error, conn, reason, responses}
end
end
defp handle_close(%__MODULE__{request: request} = conn) do
conn = put_in(conn.state, :closed)
conn = request_done(conn)
if request && request.body == :until_closed do
conn = put_in(conn.state, :closed)
{:ok, conn, [{:done, request.ref}]}
else
{:error, conn, conn.transport.wrap_error(:closed), []}
end
end
@doc """
See `Mint.HTTP.open_request_count/1`.
In HTTP/1, the number of open requests is the number of pipelined requests.
"""
@impl true
@spec open_request_count(t()) :: non_neg_integer()
def open_request_count(%__MODULE__{} = conn) do
if is_nil(conn.request) do
0
else
1 + :queue.len(conn.requests)
end
end
@doc """
See `Mint.HTTP.put_private/3`.
"""
@impl true
@spec put_private(t(), atom(), term()) :: t()
def put_private(%__MODULE__{private: private} = conn, key, value) when is_atom(key) do
%{conn | private: Map.put(private, key, value)}
end
@doc """
See `Mint.HTTP.get_private/3`.
"""
@impl true
@spec get_private(t(), atom(), term()) :: term()
def get_private(%__MODULE__{private: private} = _conn, key, default \\ nil) when is_atom(key) do
Map.get(private, key, default)
end
@doc """
See `Mint.HTTP.delete_private/2`.
"""
@impl true
@spec delete_private(t(), atom()) :: t()
def delete_private(%__MODULE__{private: private} = conn, key) when is_atom(key) do
%{conn | private: Map.delete(private, key)}
end
@doc """
See `Mint.HTTP.get_socket/1`.
"""
@impl true
@spec get_socket(t()) :: Mint.Types.socket()
def get_socket(%__MODULE__{socket: socket} = _conn) do
socket
end
## Helpers
defp decode(:status, %{request: request} = conn, data, responses) do
case Response.decode_status_line(data) do
{:ok, {version, status, _reason}, rest} ->
request = %{request | version: version, status: status, state: :headers}
conn = %{conn | request: request}
responses = [{:status, request.ref, status} | responses]
decode(:headers, conn, rest, responses)
:more ->
conn = put_in(conn.buffer, data)
{:ok, conn, responses}
:error ->
{:error, conn, wrap_error(:invalid_status_line), responses}
end
end
defp decode(:headers, %{request: request} = conn, data, responses) do
decode_headers(conn, request, data, responses, request.headers_buffer)
end
defp decode(:body, conn, data, responses) do
case message_body(conn.request) do
{:ok, body} ->
conn = put_in(conn.request.body, body)
decode_body(body, conn, data, conn.request.ref, responses)
{:error, reason} ->
{:error, conn, wrap_error(reason), responses}
end
end
defp decode_headers(conn, request, data, responses, headers) do
case Response.decode_header(data) do
{:ok, {name, value}, rest} ->
headers = [{name, value} | headers]
case store_header(request, name, value) do
{:ok, request} -> decode_headers(conn, request, rest, responses, headers)
{:error, reason} -> {:error, conn, wrap_error(reason), responses}
end
{:ok, :eof, rest} ->
responses = [{:headers, request.ref, Enum.reverse(headers)} | responses]
request = %{request | state: :body, headers_buffer: []}
conn = %{conn | buffer: "", request: request}
decode(:body, conn, rest, responses)
:more ->
request = %{request | headers_buffer: headers}
conn = %{conn | buffer: data, request: request}
{:ok, conn, responses}
:error ->
{:error, conn, wrap_error(:invalid_header), responses}
end
end
defp decode_body(:none, conn, data, request_ref, responses) do
conn = put_in(conn.buffer, data)
conn = request_done(conn)
responses = [{:done, request_ref} | responses]
{:ok, conn, responses}
end
defp decode_body(:until_closed, conn, data, _request_ref, responses) do
{conn, responses} = add_body(conn, data, responses)
{:ok, conn, responses}
end
defp decode_body({:content_length, length}, conn, data, request_ref, responses) do
cond do
length > byte_size(data) ->
conn = put_in(conn.request.body, {:content_length, length - byte_size(data)})
{conn, responses} = add_body(conn, data, responses)
{:ok, conn, responses}
length <= byte_size(data) ->
<<body::binary-size(length), rest::binary>> = data
{conn, responses} = add_body(conn, body, responses)
conn = request_done(conn)
responses = [{:done, request_ref} | responses]
next_request(conn, rest, responses)
end
end
defp decode_body({:chunked, nil}, conn, "", _request_ref, responses) do
conn = put_in(conn.buffer, "")
conn = put_in(conn.request.body, {:chunked, nil})
{:ok, conn, responses}
end
defp decode_body({:chunked, nil}, conn, data, request_ref, responses) do
case Integer.parse(data, 16) do
{_size, ""} ->
conn = put_in(conn.buffer, data)
conn = put_in(conn.request.body, {:chunked, nil})
{:ok, conn, responses}
{0, rest} ->
# Here, we manually collapse the body buffer since we're done with the body.
{conn, responses} = collapse_body_buffer(conn, responses)
decode_body({:chunked, :metadata, :trailer}, conn, rest, request_ref, responses)
{size, rest} when size > 0 ->
decode_body({:chunked, :metadata, size}, conn, rest, request_ref, responses)
_other ->
{:error, conn, wrap_error(:invalid_chunk_size), responses}
end
end
defp decode_body({:chunked, :metadata, size}, conn, data, request_ref, responses) do
case Parse.ignore_until_crlf(data) do
{:ok, rest} ->
decode_body({:chunked, size}, conn, rest, request_ref, responses)
:more ->
conn = put_in(conn.buffer, data)
conn = put_in(conn.request.body, {:chunked, :metadata, size})
{:ok, conn, responses}
end
end
defp decode_body({:chunked, :trailer}, conn, data, _request_ref, responses) do
decode_trailer_headers(conn, data, responses, conn.request.headers_buffer)
end
defp decode_body({:chunked, :crlf}, conn, data, request_ref, responses) do
case data do
<<"\r\n", rest::binary>> ->
conn = put_in(conn.request.body, {:chunked, nil})
decode_body({:chunked, nil}, conn, rest, request_ref, responses)
_other when byte_size(data) < 2 ->
conn = put_in(conn.buffer, data)
{:ok, conn, responses}
_other ->
{:error, conn, wrap_error(:missing_crlf_after_chunk), responses}
end
end
defp decode_body({:chunked, length}, conn, data, request_ref, responses) do
cond do
length > byte_size(data) ->
conn = put_in(conn.buffer, "")
conn = put_in(conn.request.body, {:chunked, length - byte_size(data)})
{conn, responses} = add_body(conn, data, responses)
{:ok, conn, responses}
length <= byte_size(data) ->
<<body::binary-size(length), rest::binary>> = data
conn = add_body_to_buffer(conn, body)
conn = put_in(conn.request.body, {:chunked, :crlf})
decode_body({:chunked, :crlf}, conn, rest, request_ref, responses)
end
end
defp decode_trailer_headers(conn, data, responses, headers) do
case Response.decode_header(data) do
{:ok, {name, value}, rest} ->
headers = [{name, value} | headers]
decode_trailer_headers(conn, rest, responses, headers)
{:ok, :eof, rest} ->
responses = [
{:done, conn.request.ref}
| add_trailing_headers(headers, conn.request.ref, responses)
]
conn = request_done(conn)
next_request(conn, rest, responses)
:more ->
request = %{conn.request | body: {:chunked, :trailer}, headers_buffer: headers}
conn = %{conn | buffer: data, request: request}
{:ok, conn, responses}
:error ->
{:error, conn, wrap_error(:invalid_trailer_header), responses}
end
end
defp next_request(%{request: nil} = conn, data, responses) do
# TODO: Figure out if we should keep buffering even though there are no
# requests in flight
{:ok, %{conn | buffer: data}, responses}
end
defp next_request(conn, data, responses) do
decode(:status, %{conn | state: :status}, data, responses)
end
defp add_trailing_headers([], _request_ref, responses), do: responses
defp add_trailing_headers(headers, request_ref, responses),
do: [{:headers, request_ref, Enum.reverse(headers)} | responses]
defp add_body(conn, data, responses) do
conn = add_body_to_buffer(conn, data)
collapse_body_buffer(conn, responses)
end
defp add_body_to_buffer(conn, data) do
update_in(conn.request.data_buffer, &[&1 | data])
end
defp collapse_body_buffer(conn, responses) do
case IO.iodata_to_binary(conn.request.data_buffer) do
"" ->
{conn, responses}
data ->
conn = put_in(conn.request.data_buffer, [])
{conn, [{:data, conn.request.ref, data} | responses]}
end
end
defp store_header(%{content_length: nil} = request, "content-length", value) do
with {:ok, content_length} <- Parse.content_length_header(value),
do: {:ok, %{request | content_length: content_length}}
end
defp store_header(%{connection: connection} = request, "connection", value) do
with {:ok, connection_header} <- Parse.connection_header(value),
do: {:ok, %{request | connection: connection ++ connection_header}}
end
defp store_header(%{transfer_encoding: transfer_encoding} = request, "transfer-encoding", value) do
with {:ok, transfer_encoding_header} <- Parse.transfer_encoding_header(value),
do: {:ok, %{request | transfer_encoding: transfer_encoding ++ transfer_encoding_header}}
end
defp store_header(_request, "content-length", _value) do
{:error, :more_than_one_content_length_header}
end
defp store_header(request, _name, _value) do
{:ok, request}
end
defp request_done(%{request: request} = conn) do
conn = pop_request(conn)
cond do
!request -> conn
"close" in request.connection -> internal_close(conn)
request.version >= {1, 1} -> conn
"keep-alive" in request.connection -> conn
true -> internal_close(conn)
end
end
defp pop_request(conn) do
case :queue.out(conn.requests) do
{{:value, request}, requests} ->
%{conn | request: request, requests: requests}
{:empty, requests} ->
%{conn | request: nil, requests: requests}
end
end
defp internal_close(conn) do
if conn.buffer != "" do
_ = Logger.debug(["Connection closed with data left in the buffer: ", inspect(conn.buffer)])
end
:ok = conn.transport.close(conn.socket)
%{conn | state: :closed}
end
# RFC7230 3.3.3:
# > If a message is received with both a Transfer-Encoding and a
# > Content-Length header field, the Transfer-Encoding overrides the
# > Content-Length. Such a message might indicate an attempt to
# > perform request smuggling (Section 9.5) or response splitting
# > (Section 9.4) and ought to be handled as an error. A sender MUST
# > remove the received Content-Length field prior to forwarding such
# > a message downstream.
defp message_body(%{body: nil, method: method, status: status} = request) do
cond do
method == "HEAD" or status in 100..199 or status in [204, 304] ->
{:ok, :none}
# method == "CONNECT" and status in 200..299 -> nil
request.transfer_encoding != [] && request.content_length ->
{:error, :transfer_encoding_and_content_length}
"chunked" == List.first(request.transfer_encoding) ->
{:ok, {:chunked, nil}}
request.content_length ->
{:ok, {:content_length, request.content_length}}
true ->
{:ok, :until_closed}
end
end
defp message_body(%{body: body}) do
{:ok, body}
end
defp new_request(ref, state, method) do
%{
ref: ref,
state: state,
method: method,
version: nil,
status: nil,
headers_buffer: [],
data_buffer: [],
content_length: nil,
connection: [],
transfer_encoding: [],
body: nil
}
end
defp wrap_error(reason) do
%HTTPError{reason: reason, module: __MODULE__}
end
@doc false
def format_error(reason)
def format_error(:closed) do
"the connection is closed"
end
def format_error(:request_body_is_streaming) do
"a request body is currently streaming, so no new requests can be issued"
end
def format_error({:unexpected_data, data}) do
"received unexpected data: " <> inspect(data)
end
def format_error(:invalid_status_line) do
"invalid status line"
end
def format_error(:invalid_header) do
"invalid header"
end
def format_error({:invalid_request_target, target}) do
"invalid request target: #{inspect(target)}"
end
def format_error({:invalid_header_name, name}) do
"invalid header name: #{inspect(name)}"
end
def format_error({:invalid_header_value, name, value}) do
"invalid value for header #{inspect(name)}: #{inspect(value)}"
end
def format_error(:invalid_chunk_size) do
"invalid chunk size"
end
def format_error(:missing_crlf_after_chunk) do
"missing CRLF after chunk"
end
def format_error(:invalid_trailer_header) do
"invalid trailer header"
end
def format_error(:more_than_one_content_length_header) do
"the response contains two or more Content-Length headers"
end
def format_error(:transfer_encoding_and_content_length) do
"the response contained both a Transfer-Encoding header as well as a Content-Length header"
end
def format_error({:invalid_content_length_header, value}) do
"invalid Content-Length header: #{inspect(value)}"
end
def format_error(:empty_token_list) do
"header should contain a list of values, but it doesn't"
end
def format_error({:invalid_token_list, string}) do
"header contains invalid tokens: #{inspect(string)}"
end
end
|
deps/mint/lib/mint/http1.ex
| 0.809577
| 0.600891
|
http1.ex
|
starcoder
|
defmodule ExTermbox.Constants do
@moduledoc """
Defines constants from the termbox library. These can be used e.g. to set a
formatting attributes or to identify keys passed in an event.
"""
use Bitwise, only_operators: true
@type constant :: integer
@type key :: constant
@keys %{
f1: 0xFFFF - 0,
f2: 0xFFFF - 1,
f3: 0xFFFF - 2,
f4: 0xFFFF - 3,
f5: 0xFFFF - 4,
f6: 0xFFFF - 5,
f7: 0xFFFF - 6,
f8: 0xFFFF - 7,
f9: 0xFFFF - 8,
f10: 0xFFFF - 9,
f11: 0xFFFF - 10,
f12: 0xFFFF - 11,
insert: 0xFFFF - 12,
delete: 0xFFFF - 13,
home: 0xFFFF - 14,
end: 0xFFFF - 15,
pgup: 0xFFFF - 16,
pgdn: 0xFFFF - 17,
arrow_up: 0xFFFF - 18,
arrow_down: 0xFFFF - 19,
arrow_left: 0xFFFF - 20,
arrow_right: 0xFFFF - 21,
mouse_left: 0xFFFF - 22,
mouse_right: 0xFFFF - 23,
mouse_middle: 0xFFFF - 24,
mouse_release: 0xFFFF - 25,
mouse_wheel_up: 0xFFFF - 26,
mouse_wheel_down: 0xFFFF - 27,
ctrl_tilde: 0x00,
# clash with 'CTRL_TILDE'
ctrl_2: 0x00,
ctrl_a: 0x01,
ctrl_b: 0x02,
ctrl_c: 0x03,
ctrl_d: 0x04,
ctrl_e: 0x05,
ctrl_f: 0x06,
ctrl_g: 0x07,
backspace: 0x08,
# clash with 'CTRL_BACKSPACE'
ctrl_h: 0x08,
tab: 0x09,
# clash with 'TAB'
ctrl_i: 0x09,
ctrl_j: 0x0A,
ctrl_k: 0x0B,
ctrl_l: 0x0C,
enter: 0x0D,
# clash with 'ENTER'
ctrl_m: 0x0D,
ctrl_n: 0x0E,
ctrl_o: 0x0F,
ctrl_p: 0x10,
ctrl_q: 0x11,
ctrl_r: 0x12,
ctrl_s: 0x13,
ctrl_t: 0x14,
ctrl_u: 0x15,
ctrl_v: 0x16,
ctrl_w: 0x17,
ctrl_x: 0x18,
ctrl_y: 0x19,
ctrl_z: 0x1A,
esc: 0x1B,
# clash with 'ESC'
ctrl_lsq_bracket: 0x1B,
# clash with 'ESC'
ctrl_3: 0x1B,
ctrl_4: 0x1C,
# clash with 'CTRL_4'
ctrl_backslash: 0x1C,
ctrl_5: 0x1D,
# clash with 'CTRL_5'
ctrl_rsq_bracket: 0x1D,
ctrl_6: 0x1E,
ctrl_7: 0x1F,
# clash with 'CTRL_7'
ctrl_slash: 0x1F,
# clash with 'CTRL_7'
ctrl_underscore: 0x1F,
space: 0x20,
backspace2: 0x7F,
# clash with 'BACKSPACE2'
ctrl_8: 0x7F
}
@type color :: constant
@colors %{
default: 0x00,
black: 0x01,
red: 0x02,
green: 0x03,
yellow: 0x04,
blue: 0x05,
magenta: 0x06,
cyan: 0x07,
white: 0x08
}
@type attribute :: constant
@attributes %{
bold: 0x0100,
underline: 0x0200,
reverse: 0x0400
}
@type event_type :: constant
@event_types %{
key: 1,
resize: 2,
mouse: 3
}
@type error_code :: constant
@error_codes %{
unsupported_terminal: -1,
failed_to_open_tty: -2,
pipe_trap_error: -3
}
@type input_mode :: constant
@input_modes %{
current: 0,
esc: 1,
esc_with_mouse: 1 ||| 4,
alt: 2,
alt_with_mouse: 2 ||| 4,
mouse: 4
}
@type output_mode :: constant
@output_modes %{
current: 0,
normal: 1,
term_256: 2,
term_216: 3,
grayscale: 4
}
@type hide_cursor :: constant
@hide_cursor -1
@doc """
Retrieves the mapping of key constants for use with termbox.
These are based on terminfo constants. Note that there's some overlap
of terminfo values. For example, it's not possible to distinguish between
`<backspace>` and `ctrl-h`.
"""
@spec keys() :: %{atom => key}
def keys, do: @keys
@doc """
Retrieves a key constant by name
## Examples
iex> key(:esc)
0x1B
iex> key(:space)
0x20
"""
@spec key(atom) :: key
def key(name), do: Map.fetch!(@keys, name)
@doc """
Retrieves the mapping of color constants.
"""
@spec colors() :: %{atom => color}
def colors, do: @colors
@doc """
Retrieves a color constant by name
## Examples
iex> color(:red)
0x02
iex> color(:blue)
0x05
"""
@spec color(atom) :: color
def color(name), do: Map.fetch!(@colors, name)
@doc """
Retrieves the mapping of attribute constants.
"""
@spec attributes() :: %{atom => attribute}
def attributes, do: @attributes
@doc """
Retrieves an attribute constant by name
## Examples
iex> attribute(:bold)
0x0100
iex> attribute(:underline)
0x0200
"""
@spec attribute(atom) :: attribute
def attribute(name), do: Map.fetch!(@attributes, name)
@doc """
Retrieves the mapping of event type constants.
"""
@spec event_types() :: %{atom => event_type}
def event_types, do: @event_types
@doc """
Retrieves an event type constant by name
## Examples
iex> event_type(:key)
0x01
iex> event_type(:resize)
0x02
iex> event_type(:mouse)
0x03
"""
@spec event_type(atom) :: event_type
def event_type(name), do: Map.fetch!(@event_types, name)
@doc """
Retrieves the mapping of error code constants.
"""
@spec error_codes() :: %{atom => error_code}
def error_codes, do: @error_codes
@doc """
Retrieves an error code constant by name
## Examples
iex> error_code(:unsupported_terminal)
-1
"""
@spec error_code(atom) :: error_code
def error_code(name), do: Map.fetch!(@error_codes, name)
@doc """
Retrieves the mapping of input mode constants.
"""
@spec input_modes() :: %{atom => input_mode}
def input_modes, do: @input_modes
@doc """
Retrieves an input mode constant by name
## Examples
iex> input_mode(:esc)
1
iex> input_mode(:esc_with_mouse)
5
"""
@spec input_mode(atom) :: input_mode
def input_mode(name), do: Map.fetch!(@input_modes, name)
@doc """
Retrieves the mapping of output mode constants.
"""
@spec output_modes() :: %{atom => output_mode}
def output_modes, do: @output_modes
@doc """
Retrieves an output mode constant by name
## Examples
iex> output_mode(:term_256)
2
"""
@spec output_mode(atom) :: output_mode
def output_mode(name), do: Map.fetch!(@output_modes, name)
@doc """
Retrieves the hide cursor constant.
## Examples
iex> hide_cursor()
-1
"""
@spec hide_cursor() :: hide_cursor
def hide_cursor, do: @hide_cursor
end
|
lib/ex_termbox/constants.ex
| 0.810779
| 0.456168
|
constants.ex
|
starcoder
|
defmodule Cldr.Calendar.Kday do
@moduledoc """
Provide K-Day functions for Dates, DateTimes and NaiveDateTimes.
"""
import Cldr.Calendar,
only: [
date_to_iso_days: 1,
date_from_iso_days: 2,
iso_days_to_day_of_week: 1,
weeks_to_days: 1
]
@doc """
Return the date of the `day_of_week` on or before the
specified `date`.
## Arguments
* `date` is `%Date{}`, a `%DateTime{}`, `%NaiveDateTime{}` or a Rata Die
* `k` is an integer day of the week.
## Returns
* A `%Date{}` in the calendar of the date provided as an argument
## Examples
iex> Cldr.Calendar.Kday.kday_on_or_before(~D[2016-02-29], 2)
~D[2016-02-23]
iex> Cldr.Calendar.Kday.kday_on_or_before(~D[2017-11-30], 1)
~D[2017-11-27]
iex> Cldr.Calendar.Kday.kday_on_or_before(~D[2017-06-30], 6)
~D[2017-06-24]
"""
@spec kday_on_or_before(Calendar.day() | Date.t(), Cldr.Calendar.day_of_week()) ::
Calendar.day() | Date.t()
def kday_on_or_before(%{year: _, month: _, day: _, calendar: calendar} = date, k)
when k in 1..7 do
date
|> date_to_iso_days
|> kday_on_or_before(k)
|> date_from_iso_days(calendar)
end
def kday_on_or_before(iso_days, k) when is_integer(iso_days) do
iso_days - iso_days_to_day_of_week(iso_days - k)
end
@doc """
Return the date of the `day_of_week` on or after the
specified `date`.
## Arguments
* `date` is `%Date{}`, a `%DateTime{}`, `%NaiveDateTime{}` or a Rata Die
* `k` is an integer day of the week.
## Returns
* A `%Date{}` in the calendar of the date provided as an argument
## Examples
iex> Cldr.Calendar.Kday.kday_on_or_after(~D[2016-02-29], 2)
~D[2016-03-01]
iex> Cldr.Calendar.Kday.kday_on_or_after(~D[2017-11-30], 1)
~D[2017-12-04]
iex> Cldr.Calendar.Kday.kday_on_or_after(~D[2017-06-30], 6)
~D[2017-07-01]
"""
@spec kday_on_or_after(Calendar.day() | Date.t(), Cldr.Calendar.day_of_week()) ::
Calendar.day() | Date.t()
def kday_on_or_after(%{year: _, month: _, day: _, calendar: calendar} = date, k)
when k in 1..7 do
date
|> date_to_iso_days
|> kday_on_or_after(k)
|> date_from_iso_days(calendar)
end
def kday_on_or_after(iso_days, k) when is_integer(iso_days) do
kday_on_or_before(iso_days + 7, k)
end
@doc """
Return the date of the `day_of_week` nearest the
specified `date`.
## Arguments
* `date` is `%Date{}`, a `%DateTime{}`, `%NaiveDateTime{}` or a Rata Die
* `k` is an integer day of the week.
## Returns
* A `%Date{}` in the calendar of the date provided as an argument
## Examples
iex> Cldr.Calendar.Kday.kday_nearest(~D[2016-02-29], 2)
~D[2016-03-01]
iex> Cldr.Calendar.Kday.kday_nearest(~D[2017-11-30], 1)
~D[2017-11-27]
iex> Cldr.Calendar.Kday.kday_nearest(~D[2017-06-30], 6)
~D[2017-07-01]
"""
@spec kday_nearest(Calendar.day() | Date.t(), Cldr.Calendar.day_of_week()) ::
Calendar.day() | Date.t()
def kday_nearest(%{year: _, month: _, day: _, calendar: calendar} = date, k)
when k in 1..7 do
date
|> date_to_iso_days
|> kday_nearest(k)
|> date_from_iso_days(calendar)
end
def kday_nearest(iso_days, k) when is_integer(iso_days) do
kday_on_or_before(iso_days + 3, k)
end
@doc """
Return the date of the `day_of_week` before the
specified `date`.
## Arguments
* `date` is `%Date{}`, a `%DateTime{}`, `%NaiveDateTime{}` or a Rata Die
* `k` is an integer day of the week.
## Returns
* A `%Date{}` in the calendar of the date provided as an argument
## Examples
iex> Cldr.Calendar.Kday.kday_before(~D[2016-02-29], 2)
~D[2016-02-23]
iex> Cldr.Calendar.Kday.kday_before(~D[2017-11-30], 1)
~D[2017-11-27]
# 6 means Saturday. Use either the integer value or the atom form.
iex> Cldr.Calendar.Kday.kday_before(~D[2017-06-30], 6)
~D[2017-06-24]
"""
@spec kday_before(Calendar.day() | Date.t(), Cldr.Calendar.day_of_week()) ::
Calendar.day() | Date.t()
def kday_before(%{year: _, month: _, day: _, calendar: calendar} = date, k)
when k in 1..7 do
date
|> date_to_iso_days
|> kday_before(k)
|> date_from_iso_days(calendar)
end
def kday_before(iso_days, k) do
kday_on_or_before(iso_days - 1, k)
end
@doc """
Return the date of the `day_of_week` after the
specified `date`.
## Arguments
* `date` is `%Date{}`, a `%DateTime{}`, `%NaiveDateTime{}` or
ISO days since epoch.
* `k` is an integer day of the week.
## Returns
* A `%Date{}` in the calendar of the date provided as an argument
## Examples
iex> Cldr.Calendar.Kday.kday_after(~D[2016-02-29], 2)
~D[2016-03-01]
iex> Cldr.Calendar.Kday.kday_after(~D[2017-11-30], 1)
~D[2017-12-04]
iex> Cldr.Calendar.Kday.kday_after(~D[2017-06-30], 6)
~D[2017-07-01]
iex> Cldr.Calendar.Kday.kday_after(~D[2021-03-28], 7)
~D[2021-04-04]
"""
@spec kday_after(Calendar.day() | Date.t(), Cldr.Calendar.day_of_week()) ::
Calendar.day() | Date.t()
def kday_after(%{year: _, month: _, day: _, calendar: calendar} = date, k)
when k in 1..7 do
date
|> date_to_iso_days
|> kday_after(k)
|> date_from_iso_days(calendar)
end
def kday_after(iso_days, k) do
kday_on_or_after(iso_days + 1, k)
end
@doc """
Return the date of the `nth` `day_of_week` on or before/after the
specified `date`.
## Arguments
* `date` is `%Date{}`, a `%DateTime{}`, `%NaiveDateTime{}` or
ISO days since epoch.
* `n` is the cardinal number of `k` before (negative `n`) or after
(positive `n`) the specified date
* `k` is an integer day of the week.
## Returns
* A `%Date{}` in the calendar of the date provided as an argument
## Examples
# Thanksgiving in the US
iex> Cldr.Calendar.Kday.nth_kday(~D[2017-11-01], 4, 4)
~D[2017-11-23]
# Labor day in the US
iex> Cldr.Calendar.Kday.nth_kday(~D[2017-09-01], 1, 1)
~D[2017-09-04]
# Daylight savings time starts in the US
iex> Cldr.Calendar.Kday.nth_kday(~D[2017-03-01], 2, 7)
~D[2017-03-12]
"""
@spec nth_kday(Calendar.day() | Date.t(), integer(), Cldr.Calendar.day_of_week()) ::
Calendar.day() | Date.t()
def nth_kday(%{year: _, month: _, day: _, calendar: calendar} = date, n, k)
when k in 1..7 and is_integer(n) do
date
|> date_to_iso_days
|> nth_kday(n, k)
|> date_from_iso_days(calendar)
end
def nth_kday(iso_days, n, k) when is_integer(iso_days) and n > 0 do
weeks_to_days(n) + kday_before(iso_days, k)
end
def nth_kday(iso_days, n, k) when is_integer(iso_days) do
weeks_to_days(n) + kday_after(iso_days, k)
end
@doc """
Return the date of the first `day_of_week` on or after the
specified `date`.
## Arguments
* `date` is `%Date{}`, a `%DateTime{}`, `%NaiveDateTime{}` or
ISO days since epoch.
* `k` is an integer day of the week.
## Returns
* A `%Date{”}` in the calendar of the date provided as an argument
## Examples
# US election day
iex> Cldr.Calendar.Kday.first_kday(~D[2017-11-02], 2)
~D[2017-11-07]
# US Daylight savings end
iex> Cldr.Calendar.Kday.first_kday(~D[2017-11-01], 7)
~D[2017-11-05]
"""
@spec first_kday(Calendar.day() | Date.t(), Cldr.Calendar.day_of_week()) ::
Calendar.day() | Date.t()
def first_kday(%{year: _, month: _, day: _, calendar: calendar} = date, k)
when k in 1..7 do
date
|> date_to_iso_days
|> first_kday(k)
|> date_from_iso_days(calendar)
end
def first_kday(iso_days, k) do
nth_kday(iso_days, 1, k)
end
@doc """
Return the date of the last `day_of_week` on or before the
specified `date`.
## Arguments
* `date` is `%Date{}`, a `%DateTime{}`, `%NaiveDateTime{}` or
ISO days since epoch.
* `k` is an integer day of the week.
## Returns
* A `%Date{}` in the calendar of the date provided as an argument
## Example
# Memorial Day in the US
iex> Cldr.Calendar.Kday.last_kday(~D[2017-05-31], 1)
~D[2017-05-29]
"""
@spec last_kday(Calendar.day() | Date.t(), Cldr.Calendar.day_of_week()) ::
Calendar.day() | Date.t()
def last_kday(%{year: _, month: _, day: _, calendar: calendar} = date, k)
when k in 1..7 do
date
|> date_to_iso_days
|> last_kday(k)
|> date_from_iso_days(calendar)
end
def last_kday(iso_days, k) do
nth_kday(iso_days, -1, k)
end
end
|
lib/cldr/calendar/kday.ex
| 0.945651
| 0.643833
|
kday.ex
|
starcoder
|
defmodule Acs.Improper do
@moduledoc """
A set of utilities for interacting with improper lists.
"""
@doc """
Converts a proper list into an improper list, popping of the tail of the
list and using it as the improper tail of the new improper list.
The given list must have at least 2 elements.
"""
@spec to_improper(list) :: nonempty_improper_list(term, term) | nil
def to_improper(list) when length(list) >= 2, do: to_improper_impl(list, [])
def to_improper(_list), do: nil
@spec to_improper_impl(list, list) :: nonempty_improper_list(term, term) | nil
defp to_improper_impl(list, acc)
defp to_improper_impl([t | []], acc) when not is_list(t), do: :lists.reverse(acc, t)
defp to_improper_impl([h | t], acc), do: to_improper_impl(t, [h | acc])
defp to_improper_impl(_, _), do: nil
@doc """
Converts an improper list to a list, appending the improper tail onto
the end of the new list.
Given an proper list, this should return the same list.
"""
@spec to_proper(nonempty_maybe_improper_list()) :: [any]
def to_proper([]), do: []
def to_proper([h | t]) when is_list(t), do: [h | to_proper(t)]
def to_proper([h | t]), do: [h | [t]]
@doc """
Gets the specified `index` in the improper `list`. The index of the
improper tail is one greater than the index of the last element of
in the proper head of the list.
## Examples
```
# last element of the proper portion of the list
iex> Acs.Improper.improper_get([:a, :b | :c], 1)
:b
# improper tail is 1 greater (i.e. 2)
iex> Acs.Improper.improper_get([:a, :b | :c], 2)
:c
```
"""
@spec improper_get(nonempty_improper_list(term, term) | term, integer) :: term | nil
def improper_get(list, index)
def improper_get([h | _], 0), do: h
def improper_get([_ | t], 1) when not is_list(t), do: t
def improper_get([_ | t], index) when index > 0 and is_list(t),
do: improper_get(t, index - 1)
def improper_get(list, index) when index < 0 do
length = improper_length(list)
if length + index >= 0 do
improper_get(list, length + index)
else
nil
end
end
def improper_get(_, _), do: nil
@doc """
Returns the length of the specified improper `list`.
The calculated length should be the length of the proper head + 1
(for the improper tail).
"""
@spec improper_length(nonempty_improper_list(term, term), non_neg_integer) :: non_neg_integer
def improper_length(list, len \\ 0)
def improper_length([_ | t], len) when is_list(t),
do: improper_length(t, len + 1)
def improper_length(_, len), do: len + 2
@doc """
Returns true if the given `term` is a proper list, and false otherwise.
"""
@spec proper_list?(term) :: boolean
def proper_list?(term)
def proper_list?([]), do: true
def proper_list?([_ | t]) when is_list(t), do: proper_list?(t)
def proper_list?(_), do: false
end
defmodule Acs do
@moduledoc """
`Acs` is shorthand for `Access` and overrides some of the default behaviour
of `get_in/2`, `update_in/3`, `put_in/3`, `get_and_update_in/3`, and
`pop_in/2`.
The main behavioural changes are the following:
* empty paths are supported (as opposed to raising a `FunctionClauseError`)
* atom keys for structs are supported (w/o `Access.key/1`)
* integer keys are supported for lists and tuples (w/o `Access.at/1` or `Access.elem/1`)
* an improper list's tail may be specified as the last element in the 'list'
# Example
By using Acs, you may specify which of these functions to override using
the `:only` option (defaults to overriding all). Or you may forgo the use
of this macro and override manually.
```
defmodule A do
use Acs
end
defmodule B do
use Acs, only: get_in: 2
end
defmodule C do
import Kernel, except: [get_in: 2]
import Acs, only: [get_in: 2]
end
```
"""
import Kernel, except: [get_in: 2, update_in: 3, put_in: 3, get_and_update_in: 3, pop_in: 2]
alias Acs.Improper
@doc false
defmacro __using__(opts) do
default_opts = [get_in: 2, update_in: 3, put_in: 3, get_and_update_in: 3, pop_in: 2]
opts =
if Keyword.has_key?(opts, :only) do
{keep, _} =
opts
|> Keyword.get(:only)
|> (&Keyword.split(default_opts, &1)).()
keep
else
default_opts
end
quote do
import Kernel, except: unquote(opts)
import Acs, only: unquote(opts)
end
end
@doc """
Gets a value and updates a nested structure.
See `Kernel.get_and_update_in/3`.
## Examples
Structs, lists, and tuples require less boilerplate to access.
```
iex> data = [{nil, %ArgumentError{message: "foo"}}, nil]
...> lazy_keys = [0, 1, :message]
...> keys = [Access.at(0), Access.elem(1), Access.key(:message)]
...> fun = fn x -> {x, x <> "bar"} end
...> Acs.get_and_update_in(data, lazy_keys, fun) == Kernel.get_and_update_in(data, keys, fun)
true
```
Empty `keys` also return sensible defaults, instead of raising
an exception.
```
iex> Acs.get_and_update_in(%{}, [], fn x -> {x, Map.put(x, :a, 1)} end)
{%{}, %{a: 1}}
```
And the tail of an improper list can be accessed at element
`length(data)` of the list.
```
iex> Acs.get_and_update_in([:a, :b, :c | :d], [3], fn x -> {x, :e} end)
{:d, [:a, :b, :c | :e]}
```
"""
@spec get_and_update_in(Access.t(), keys, (term -> {get_value, update_value} | :pop)) ::
{get_value, Access.t()}
when keys: [term], update_value: term, get_value: term
def get_and_update_in(data, keys, fun)
def get_and_update_in(data, [], fun), do: fun.(data)
def get_and_update_in(data, keys, fun) do
Kernel.get_and_update_in(data, lazify(keys), fun)
rescue
_ -> {nil, data}
end
@doc """
Gets a value from a nested structure.
See `Kernel.get_in/2`.
## Examples
Structs, lists, and tuples require less boilerplate to access.
```
iex> data = [{nil, %ArgumentError{message: "foo"}}, nil]
...> lazy_keys = [0, 1, :message]
...> keys = [Access.at(0), Access.elem(1), Access.key(:message)]
...> Acs.get_in(data, lazy_keys) == Kernel.get_in(data, keys)
true
```
Empty `keys` also return sensible defaults, instead of raising
an exception.
```
iex> Acs.get_in(%{}, [])
%{}
```
And the tail of an improper list can be accessed at element
`length(data)` of the list.
```
iex> Acs.get_in([:a, :b, :c | :d], [3])
:d
```
"""
@spec get_in(Access.t(), keys :: [term]) :: Access.t() | nil
def get_in(data, keys)
def get_in(data, []), do: data
def get_in(data, keys) do
Kernel.get_in(data, lazify(keys))
rescue
_ -> nil
end
@doc """
Updates a key in a nested structure.
See `Kernel.update_in/3`.
## Examples
Structs, lists, and tuples require less boilerplate to access.
```
iex> data = [{nil, %ArgumentError{message: "foo"}}, nil]
...> lazy_keys = [0, 1, :message]
...> keys = [Access.at(0), Access.elem(1), Access.key(:message)]
...> fun = fn x -> x <> "bar" end
...> Acs.update_in(data, lazy_keys, fun) == Kernel.update_in(data, keys, fun)
true
```
Empty `keys` also return sensible defaults, instead of raising
an exception.
```
iex> Acs.update_in(%{}, [], &Map.put(&1, :a, 1))
%{a: 1}
```
And the tail of an improper list can be accessed at element
`length(data)` of the list.
```
iex> Acs.update_in([:a, :b, :c | :d], [3], fn _ -> :e end)
[:a, :b, :c | :e]
```
"""
@spec update_in(Access.t(), keys :: [term], (term -> term)) :: Access.t()
def update_in(data, keys, fun)
def update_in(data, [], fun), do: fun.(data)
def update_in(data, keys, fun) do
Kernel.update_in(data, lazify(keys), fun)
rescue
_ -> data
end
@doc """
Puts a value in a nested structure.
See `Kernel.put_in/3`.
## Examples
Structs, lists, and tuples require less boilerplate to access.
```
iex> data = [{nil, %ArgumentError{message: "foo"}}, nil]
...> lazy_keys = [0, 1, :message]
...> keys = [Access.at(0), Access.elem(1), Access.key(:message)]
...> Acs.put_in(data, lazy_keys, "bar") == Kernel.put_in(data, keys, "bar")
true
```
Empty `keys` also return sensible defaults, instead of raising
an exception.
```
iex> Acs.put_in([], [], :foo)
[:foo]
```
And the tail of an improper list can be accessed at element
`length(data)` of the list.
```
iex> Acs.put_in([:a, :b, :c | :d], [3], :e)
[:a, :b, :c | :e]
```
"""
@spec put_in(Access.t(), keys :: [term], value :: term) :: Access.t()
def put_in(data, keys, value) do
Kernel.put_in(data, lazify(keys), value)
rescue
_ -> data
end
@doc """
Pops a key from the given nested structure.
See `Kernel.pop_in/2`.
## Examples
Structs, lists, and tuples require less boilerplate to access.
```
iex> data = [{nil, %ArgumentError{message: "foo"}}, nil]
...> lazy_keys = [0, 1, :message]
...> keys = [Access.at(0), Access.elem(1), Access.key(:message)]
...> Acs.pop_in(data, lazy_keys) == Kernel.pop_in(data, keys)
true
```
Empty `keys` also return sensible defaults, instead of raising
an exception.
```
iex> Acs.pop_in(%{}, [])
{nil, %{}}
```
And the tail of an improper list can be accessed at element
`length(data)` of the list.
```
iex> Acs.pop_in([:a, :b, :c | :d], [3])
[:a, :b, :c]
```
"""
@spec pop_in(Access.t(), keys :: [term]) :: {term, Access.t()}
def pop_in(data, keys) do
Kernel.pop_in(data, lazify(keys))
rescue
_ -> {nil, data}
end
@doc false
@spec lazify(keys) :: keys when keys: [term]
def lazify(keys), do: Enum.map(keys, &lazy_keys/1)
@doc false
@spec lazy_keys(key :: term) :: Access.access_fun(Access.t(), term)
def lazy_keys(key)
def lazy_keys(fun) when is_function(fun, 3) do
fun
end
def lazy_keys(i) when is_integer(i) do
&lazy_accessor(i, &1, &2, &3)
end
def lazy_keys(key) when is_atom(key) do
fn op, data, next ->
Access.key(key).(op, data, next)
end
end
def lazy_keys(key) do
key
end
@spec lazy_accessor(integer, op, data, (term -> term)) ::
{get_value, Access.container()} | :pop
when data: term, get_value: term, op: :get | :get_and_update
defp lazy_accessor(i, op, data, next) when is_list(data) and length(data) >= 0,
do: Access.at(i).(op, data, next)
defp lazy_accessor(i, op, data, next) when is_list(data),
do: improper_accessor(i, op, data, next)
defp lazy_accessor(i, op, data, next) when is_tuple(data), do: Access.elem(i).(op, data, next)
defp lazy_accessor(i, op, data, next), do: Access.key(i).(op, data, next)
@spec improper_accessor(integer, op, data, (term -> term)) ::
{get_value, Access.container()} | :pop
when data: term, get_value: term, op: :get | :get_and_update
defp improper_accessor(i, op, data, next) do
case Access.at(i).(op, Improper.to_proper(data), next) do
{get, container} when length(container) >= 2 ->
{get, Improper.to_improper(container)}
otherwise ->
otherwise
end
end
end
|
lib/acs.ex
| 0.902744
| 0.923936
|
acs.ex
|
starcoder
|
defmodule NBT.Parser do
@moduledoc """
Parsing functions that turn an NBT binary into native elixir types.
"""
import NBT.Util, only: [partial_unfold: 3]
@type next :: nil | {:end, binary} | {{atom, binary, binary}, binary}
@doc """
Given a partial NBT binary, returns the next TAG and the rest of the binary.
## Example
iex> Parser.take_next(<< 1, 5::2*8, "votes", 26, "more data" >>)
{{:byte, "votes", 26}, "more data"}
"""
@spec take_next(binary) :: nil | {next, binary}
def take_next(""), do: nil
def take_next(<<0, rest::binary>>), do: {:end, rest}
def take_next(<<type::integer-size(8), name_data_and_rest::binary>>) do
{name, data_and_rest} = take_name(name_data_and_rest)
{data, rest} = take_value(type, data_and_rest)
{{typename(type), name, data}, rest}
end
defp take_name(<<name_length::integer-size(16), name_and_rest::binary>>) do
name = binary_part(name_and_rest, 0, name_length)
rest = String.replace_prefix(name_and_rest, name, "")
{name, rest}
end
defp take_value(1, <<data::integer-size(8), rest::binary>>), do: {data, rest}
defp take_value(2, <<data::integer-size(16), rest::binary>>), do: {data, rest}
defp take_value(3, <<data::integer-size(32), rest::binary>>), do: {data, rest}
defp take_value(4, <<data::integer-size(64), rest::binary>>), do: {data, rest}
defp take_value(5, <<data::float-size(32), rest::binary>>), do: {data, rest}
defp take_value(6, <<data::float-size(64), rest::binary>>), do: {data, rest}
defp take_value(7, <<array_length::integer-size(32), data_and_rest::binary>>) do
data = binary_part(data_and_rest, 0, array_length)
rest = String.replace_prefix(data_and_rest, data, "")
{:binary.bin_to_list(data), rest}
end
defp take_value(8, data_and_rest), do: take_name(data_and_rest)
defp take_value(9, <<
type::integer-size(8),
list_length::integer-size(32),
data_and_rest::binary
>>) do
partial_unfold(
{0, data_and_rest},
fn
{^list_length, _stream} ->
nil
{n, stream} ->
{value, rest} = take_value(type, stream)
{value, {n + 1, rest}}
end,
&elem(&1, 1)
)
end
defp take_value(10, data) do
{children, rest} =
partial_unfold(
{false, data},
fn
{true, _vals} ->
nil
{false, vals} ->
case take_next(vals) do
{:end, rest} -> {:end, {true, rest}}
{value, rest} -> {value, {false, rest}}
end
end,
&elem(&1, 1)
)
data =
children
|> Enum.reduce(%{}, fn
:end, compound -> compound
{_type, key, val}, compound -> Map.put(compound, key, val)
end)
{data, rest}
end
defp take_value(11, <<array_length::integer-size(32), data_and_rest::binary>>) do
data = binary_part(data_and_rest, 0, array_length * 4)
rest = String.replace_prefix(data_and_rest, data, "")
step = fn
"" ->
nil
acc ->
<<next::integer-size(32), rest::binary>> = acc
{next, rest}
end
list =
data
|> Stream.unfold(step)
|> Enum.to_list()
{list, rest}
end
defp take_value(12, <<array_length::integer-size(32), data_and_rest::binary>>) do
data = binary_part(data_and_rest, 0, array_length * 8)
rest = String.replace_prefix(data_and_rest, data, "")
step = fn
"" ->
nil
acc ->
<<next::integer-size(64), rest::binary>> = acc
{next, rest}
end
list =
data
|> Stream.unfold(step)
|> Enum.to_list()
{list, rest}
end
@spec typename(1..11) :: atom()
defp typename(1), do: :byte
defp typename(2), do: :short
defp typename(3), do: :int
defp typename(4), do: :long
defp typename(5), do: :float
defp typename(6), do: :double
defp typename(7), do: :byte_array
defp typename(8), do: :string
defp typename(9), do: :list
defp typename(10), do: :compound
defp typename(11), do: :int_array
defp typename(12), do: :long_array
end
|
lib/nbt/parser.ex
| 0.814938
| 0.555857
|
parser.ex
|
starcoder
|
defmodule Cloudinary.Transformation.Effect.Vectorize do
@moduledoc false
defguardp is_colors(colors) when colors in 2..30
defguardp is_detail(detail) when (detail <= 1 and detail >= 0) or detail in 2..1000
defguardp is_despeckle(despeckl) when (despeckl <= 1 and despeckl >= 0) or despeckl in 2..100
defguardp is_paths(paths) when paths <= 100 and paths >= 0
defguardp is_corners(corners) when corners <= 100 and corners >= 0
@spec to_url_string(%{
optional(:colors) => 2..30,
optional(:detail) => 0..1000 | float,
optional(:despeckle) => 0..100 | float,
optional(:paths) => 0..100 | float,
optional(:corners) => 0..100 | float
}) :: String.t()
def to_url_string(%{colors: c, detail: t, despeckle: s, paths: p, corners: r})
when is_colors(c) and is_detail(t) and is_despeckle(s) and is_paths(p) and is_corners(r) do
"vectorize:#{c}:#{t}:#{s}:#{p}:#{r}"
end
def to_url_string(%{colors: c, detail: t, despeckle: s, paths: p})
when is_colors(c) and is_detail(t) and is_despeckle(s) and is_paths(p) do
"vectorize:#{c}:#{t}:#{s}:#{p}"
end
def to_url_string(%{colors: c, detail: t, despeckle: s, corners: r})
when is_colors(c) and is_detail(t) and is_despeckle(s) and is_corners(r) do
"vectorize:colors:#{c}:detail:#{t}:despeckle:#{s}:corners:#{r}"
end
def to_url_string(%{colors: c, detail: t, paths: p, corners: r})
when is_colors(c) and is_detail(t) and is_paths(p) and is_corners(r) do
"vectorize:colors:#{c}:detail:#{t}:paths:#{p}:corners:#{r}"
end
def to_url_string(%{colors: c, despeckle: s, paths: p, corners: r})
when is_colors(c) and is_despeckle(s) and is_paths(p) and is_corners(r) do
"vectorize:colors:#{c}:despeckle:#{s}:paths:#{p}:corners:#{r}"
end
def to_url_string(%{detail: t, despeckle: s, paths: p, corners: r})
when is_detail(t) and is_despeckle(s) and is_paths(p) and is_corners(r) do
"vectorize:detail:#{t}:despeckle:#{s}:paths:#{p}:corners:#{r}"
end
def to_url_string(%{colors: c, detail: t, despeckle: s})
when is_colors(c) and is_detail(t) and is_despeckle(s) do
"vectorize:#{c}:#{t}:#{s}"
end
def to_url_string(%{colors: c, detail: t, paths: p})
when is_colors(c) and is_detail(t) and is_paths(p) do
"vectorize:colors:#{c}:detail:#{t}:paths:#{p}"
end
def to_url_string(%{colors: c, detail: t, corners: r})
when is_colors(c) and is_detail(t) and is_corners(r) do
"vectorize:colors:#{c}:detail:#{t}:corners:#{r}"
end
def to_url_string(%{colors: c, despeckle: s, paths: p})
when is_colors(c) and is_despeckle(s) and is_paths(p) do
"vectorize:colors:#{c}:despeckle:#{s}:paths:#{p}"
end
def to_url_string(%{colors: c, despeckle: s, corners: r})
when is_colors(c) and is_despeckle(s) and is_corners(r) do
"vectorize:colors:#{c}:despeckle:#{s}:corners:#{r}"
end
def to_url_string(%{colors: c, paths: p, corners: r})
when is_colors(c) and is_paths(p) and is_corners(r) do
"vectorize:colors:#{c}:paths:#{p}:corners:#{r}"
end
def to_url_string(%{detail: t, despeckle: s, paths: p})
when is_detail(t) and is_despeckle(s) and is_paths(p) do
"vectorize:detail:#{t}:despeckle:#{s}:paths:#{p}"
end
def to_url_string(%{detail: t, despeckle: s, corners: r})
when is_detail(t) and is_despeckle(s) and is_corners(r) do
"vectorize:detail:#{t}:despeckle:#{s}:corners:#{r}"
end
def to_url_string(%{detail: t, paths: p, corners: r})
when is_detail(t) and is_paths(p) and is_corners(r) do
"vectorize:detail:#{t}:paths:#{p}:corners:#{r}"
end
def to_url_string(%{despeckle: s, paths: p, corners: r})
when is_despeckle(s) and is_paths(p) and is_corners(r) do
"vectorize:despeckle:#{s}:paths:#{p}:corners:#{r}"
end
def to_url_string(%{colors: c, detail: t}) when is_colors(c) and is_detail(t) do
"vectorize:#{c}:#{t}"
end
def to_url_string(%{colors: c, despeckle: s}) when is_colors(c) and is_despeckle(s) do
"vectorize:colors:#{c}:despeckle:#{s}"
end
def to_url_string(%{colors: c, paths: p}) when is_colors(c) and is_paths(p) do
"vectorize:colors:#{c}:paths:#{p}"
end
def to_url_string(%{colors: c, corners: r}) when is_colors(c) and is_corners(r) do
"vectorize:colors:#{c}:corners:#{r}"
end
def to_url_string(%{detail: t, despeckle: s}) when is_detail(t) and is_despeckle(s) do
"vectorize:detail:#{t}:despeckle:#{s}"
end
def to_url_string(%{detail: t, paths: p}) when is_detail(t) and is_paths(p) do
"vectorize:detail:#{t}:paths:#{p}"
end
def to_url_string(%{detail: t, corners: r}) when is_detail(t) and is_corners(r) do
"vectorize:detail:#{t}:corners:#{r}"
end
def to_url_string(%{despeckle: s, paths: p}) when is_despeckle(s) and is_paths(p) do
"vectorize:despeckle:#{s}:paths:#{p}"
end
def to_url_string(%{despeckle: s, corners: r}) when is_despeckle(s) and is_corners(r) do
"vectorize:despeckle:#{s}:corners:#{r}"
end
def to_url_string(%{paths: p, corners: r}) when is_paths(p) and is_corners(r) do
"vectorize:paths:#{p}:corners:#{r}"
end
def to_url_string(%{colors: c}) when is_colors(c), do: "vectorize:#{c}"
def to_url_string(%{detail: t}) when is_detail(t), do: "vectorize:detail:#{t}"
def to_url_string(%{despeckle: s}) when is_despeckle(s), do: "vectorize:despeckle:#{s}"
def to_url_string(%{paths: p}) when is_paths(p), do: "vectorize:paths:#{p}"
def to_url_string(%{corners: r}) when is_corners(r), do: "vectorize:corners:#{r}"
end
|
lib/cloudinary/transformation/effect/vectorize.ex
| 0.73914
| 0.557725
|
vectorize.ex
|
starcoder
|
defmodule Flawless.SchemaValidator do
@moduledoc """
Defines a schema to validate that schemas are valid.
"""
import Flawless.Helpers
def schema_schema() do
fn
%Flawless.Spec{} -> spec_schema()
%Flawless.Union{} -> union_schema()
[] -> literal([])
l when is_list(l) -> list_schema()
t when is_tuple(t) -> tuple_schema()
f when is_function(f, 0) -> function(arity: 0)
f when is_function(f, 1) -> function(arity: 1)
%module{} -> struct_schema(module)
%{} -> map_schema()
literal when is_binary(literal) -> string()
literal when is_atom(literal) -> atom()
literal when is_number(literal) -> number()
end
end
defp union_schema() do
structure(%Flawless.Union{
schemas: [schema_schema()]
})
end
defp spec_schema() do
structure(%Flawless.Spec{
checks: checks_schema(),
type: type_schema(),
cast_from: cast_from_schema(),
nil: nil_schema(),
on_error: on_error_schema(),
for: fn
%Flawless.Spec.Value{} -> value_spec_schema()
%Flawless.Spec.List{} -> list_spec_schema()
%Flawless.Spec.Tuple{} -> tuple_spec_schema()
%Flawless.Spec.Literal{} -> literal_spec_schema()
%Flawless.Spec.Struct{module: module} -> struct_spec_schema(module)
end
})
end
defp value_spec_schema() do
structure(%Flawless.Spec.Value{
schema: fn
nil -> nil
_ -> map_schema()
end
})
end
defp struct_spec_schema(module) do
structure(%Flawless.Spec.Struct{
module: atom(),
schema: struct_schema(module)
})
end
defp list_spec_schema() do
structure(%Flawless.Spec.List{
item_type: &schema_schema/0
})
end
defp tuple_spec_schema() do
structure(%Flawless.Spec.Tuple{
elem_types: list(&schema_schema/0, cast_from: :tuple)
})
end
defp literal_spec_schema() do
structure(%Flawless.Spec.Literal{
value: any()
})
end
defp list_schema() do
list(&schema_schema/0,
max_length: 1,
on_error:
"The list shortcut `[item_spec]` should define only one schema that will be the same for all items."
)
end
defp tuple_schema() do
list(&schema_schema/0, cast_from: :tuple)
end
defp map_schema() do
%{any_key() => &schema_schema/0}
end
defp struct_schema(module) do
structure(
%{
any_key() => &schema_schema/0,
__struct__: module
},
nil: true
)
end
defp type_schema() do
atom(in: Flawless.Types.valid_types())
end
defp checks_schema() do
list(check_schema())
end
defp check_schema() do
fn
%_{} ->
structure(%Flawless.Rule{
predicate: predicate_schema(),
message: any()
})
_else ->
predicate_schema()
end
end
defp predicate_schema() do
function(arity: 1, on_error: "Predicates used in checks must be function of arity 1.")
end
defp cast_from_schema() do
fn
l when is_list(l) -> list(&cast_from_schema/0)
{_type, [with: _converter]} -> {type_schema(), list({:with, function()}, length: 1)}
_ -> type_schema()
end
end
defp nil_schema() do
atom(in: [:default, true, false])
end
defp on_error_schema() do
fn
nil -> nil
_ -> string()
end
end
end
|
lib/flawless/schema_validator.ex
| 0.698844
| 0.461927
|
schema_validator.ex
|
starcoder
|
defmodule MapSchema.Examples.CustomTypeLang do
@moduledoc """
MapSchema.Examples.CustomTypeLang
Imagine here a query to Database or any place where you have
the list https://www.iso.org/iso-639-language-codes.html
https://es.wikipedia.org/wiki/ISO_639-1
only if the value exist it will be valid in other case
the schema wont be valid. It´s simple. ;)
"""
@behaviour MapSchema.CustomType
@spec name :: atom
def name, do: :language_iso639
def nested?, do: false
@doc """
We are interesting in that every string will be lowcase.
then it´s simple we add in the cast a function that make downcase.
## Examples
iex> alias MapSchema.Examples.CustomTypeLang
iex> CustomTypeLang.cast("ES")
...> |> CustomTypeLang.is_valid?()
true
iex> alias MapSchema.Examples.CustomTypeLang
iex> CustomTypeLang.cast(nil)
:map_schema_type_error
"""
@spec cast(value :: any) :: any | :map_schema_type_error
def cast(value) when is_bitstring(value) do
value
|> String.downcase()
end
def cast(_), do: :map_schema_type_error
@doc """
In this example our database it´s a simple list with
["zh", "en", "es"]
## Examples
iex> alias MapSchema.Examples.CustomTypeLang
iex> CustomTypeLang.is_valid?("zh")
true
iex> CustomTypeLang.is_valid?("en")
true
iex> CustomTypeLang.is_valid?("es")
true
iex> alias MapSchema.Examples.CustomTypeLang
iex> CustomTypeLang.is_valid?("ES")
false
iex> alias MapSchema.Examples.CustomTypeLang
iex> CustomTypeLang.is_valid?("Español")
false
"""
@spec is_valid?(any) :: boolean
def is_valid?(value) do
## Imagine here a query to Database or any place where you have
## the list https://www.iso.org/iso-639-language-codes.html
## https://es.wikipedia.org/wiki/ISO_639-1
## only if the value exist it will be valid.
value in ["zh", "en", "es"]
end
@doc """
Stop... the magic continue. After define our cast and validation functions
we can define a generador of doctest... Yes ¡¡ You are reading well.. TEST FREE¡¡
If you define this function well... you can have a fast test of your new datatype ;)
This method return a list of tuples [{value_test, expected_value},{.. , ..}...]
map schema selected a random tuple for build the test ;) Please be careful, and
test that every tuple it´s correct. Thanks.
It´s important be careful because the values should be in string format for can be writed
in the doctest please review that `mix docs` run without problems.
## Examples
iex> alias MapSchema.Examples.CustomTypeLang
iex> CustomTypeLang.doctest_values()
[{"\\"zh\\"", "\\"zh\\""}, {"\\"en\\"", "\\"en\\""}, {"\\"es\\"", "\\"es\\""}]
"""
@spec doctest_values :: [{any, any}]
def doctest_values do
["zh", "en", "es"]
|> Enum.map(fn(text) -> {"\"#{text}\"", "\"#{text}\""} end)
end
end
|
lib/example/custom_type_lang.ex
| 0.774328
| 0.542984
|
custom_type_lang.ex
|
starcoder
|
defmodule AnalysisPrep do
@moduledoc """
Analysis preparation for data series.
Incorporates the excellent elixir-statistics with the types of things needed for exploratory
analysis and preparing to use data in machine learning algorithms.
"""
alias AnalysisPrep.Encode
alias AnalysisPrep.Frequency
alias AnalysisPrep.Normalize
alias AnalysisPrep.Probability
alias AnalysisPrep.Scale
alias AnalysisPrep.Summary
import Statistics
use Ratio, override_math: false
@doc """
Calculate the precision from a series of values.
Precision is the inverse of the variance, used in some Bayesian libraries for normal distributions
instead of standard deviation.
Examples
iex> variance([1,2,3,4])
1.25
iex> precision([1,2,3,4])
1 / 1.25
iex> precision([1,2,3,4])
0.8
iex> precision([])
[]
"""
@spec precision(list) :: list
def precision([]), do: []
def precision(list) do
1 / variance(list)
end
def summary(list, type \\ :continuous), do: Summary.summary(list, type)
def one_hot(list), do: Encode.one_hot(list)
def label(list), do: Encode.label(list)
def frequency(list, base \\ %{}), do: Frequency.frequency(list, base)
def normalize(object, max \\ nil), do: Normalize.normalize(object, max)
def scale(list, opts \\ []), do: Scale.scale(list, opts)
def p(event, space), do: Probability.p(event, space)
def such_that(predicate, space), do: Probability.such_that(predicate, space)
def joint(a, b), do: Probability.joint(a, b)
def cross(a, b), do: Probability.cross(a, b)
def combinations(list, n \\ 2), do: Probability.combinations(list, n)
def sample(list, n \\ 1), do: Probability.sample(list, n)
def choose(n, c), do: Probability.choose(n, c)
@doc """
values of a map or defer to Statistics.sum
Examples
iex> sum_map(%{a: 1, b: 2})
3
"""
def sum_map(map) do
map
|> Map.values
|> Statistics.sum
end
@doc """
Save an array of arrays to a file
Examples
iex> save("/tmp/foo.csv", [["a","b"],[1,2],[3,4]]) && File.rm("/tmp/foo.csv")
:ok
"""
def save(filename, data) do
file = File.open!(filename, [:write, :utf8])
data |> CSV.encode |> Enum.each(&IO.write(file, &1))
end
@doc """
Test for a range.
A range is implemented with a struct, but behaves differently enough we need to restrict
our program flow to handle things correctly.
Examples
iex> is_range 1..5
true
"""
def is_range(object) do
try do
Map.keys(object) == [:__struct__, :first, :last]
rescue
BadMapError -> false
_ -> false
end
end
end
|
lib/analysis_prep.ex
| 0.900188
| 0.696139
|
analysis_prep.ex
|
starcoder
|
defmodule Riak.Pool do
@moduledoc """
Defines a pool of Riak connections.
A pool can be defined as:
defmodule MyPool do
use Riak.Pool,
adapter: Riak.Pool.Poolboy,
hostname: "localhost"
end
Options will be passed to the pool adapter and to `Mongo.Connection`.
## Logging
The pool may define a `log/5` function, that will be called by the
driver on every call to the database.
The first argument result can be of form: `:ok`, `{:ok, _}` or `{:error, _}`.
The second element of the tuples should be considered private, and not used.
The fourth argument determines the operation, these can be (listed with the
arguments passed as the fifth argument to the log function):
* `:run_command`, `[query, options]`
* `:fetch_type`, `[bucket, key]`
"""
use Behaviour
@type t :: module
@doc false
defmacro __using__(opts) do
adapter = Keyword.fetch!(opts, :adapter)
quote do
# TODO: Customizable timeout
@timeout 5_000
@behaviour unquote(__MODULE__)
@adapter unquote(adapter)
@name __MODULE__
@sup __MODULE__.Sup
def start_link(opts) do
import Supervisor.Spec, warn: false
children = [
worker(@adapter, [@name, opts]),
]
opts = [strategy: :one_for_all, name: @sup]
Supervisor.start_link(children, opts)
end
def stop do
Process.whereis(__MODULE__)
|> Process.exit(:shutdown)
end
def run(fun) do
@adapter.run(@name, fun)
end
def log(return, queue_time, query_time, _fun, _args) do
return
end
defoverridable [log: 5]
end
end
@type time :: integer
defcallback run((pid -> return)) :: {queue_time :: time, return} when return: var
defcallback log(return, queue_time, query_time, fun :: atom, args :: list) ::
return when return: var, queue_time: time, query_time: time
def run_with_log(pool, log, args, opts, fun) do
{log?, opts} = Keyword.pop(opts, :log, true)
if log? do
{queue_time, {query_time, value}} =
pool.run(&:timer.tc(fun, [&1]))
pool.log(value, queue_time, query_time, log, args ++ [opts])
value
else
{_queue_time, value} = pool.run(fun)
value
end
end
end
|
lib/riak/pool.ex
| 0.601828
| 0.546194
|
pool.ex
|
starcoder
|
defmodule Crux.Cache do
@moduledoc """
Behaviour all caches must implement. (Looking at custom ones you may want to write)
There are exceptions:
* User cache:
* Implement a `me/1` function setting the own user id
* A `me/0` and `me!/0` function getting the own user
* Guild cache:
* A bit more, you probably want to take a look at the code of the `Crux.Cache.Guild` module
Custom caches should be put under a `Crux.Cache.Provider`. (Can be combined with default caches)
Also worth a look:
* `Crux.Cache.None` - A dummy `Crux.Cache` and `Crux.Cache.Provider`, not caching anything.
"""
@typedoc """
Default caches are using Discord Snowflakes as identifiers.
"""
@type key :: non_neg_integer()
@doc """
Used to start anything fitting under a supervision tree, like for example a `GenServer`, instructed with handling the cache.
Optional, you maybe want to use external caching, e.g. Redis, not requiring anything like that.
"""
@callback start_link(args :: term()) :: Supervisor.on_start()
@doc """
Inserts data into the cache.
Returns the atomified data allowing the operation to be chained.
For example something like that:
```elixir
id =
raw_data
|> Cache.insert()
|> Map.get(:id)
```
"""
@callback insert(data :: term()) :: term()
@doc """
Inserts data into the cache.
Returns "updated" data including changes by merging.
For example from a message embed update to a full message object
```elixir
content =
partial_message # only contains `:id`, `:channel_id`, and `:embeds`
|> Cache.update()
|> Map.get(:content) # present if the message was cached previously
```
"""
@callback update(data :: term()) :: term()
@doc """
Deletes data from the cache by key.
"""
@callback delete(id :: key()) :: :ok
@doc """
Fetches data from the cache by key.
"""
@callback fetch(id :: key()) :: {:ok, term()} | :error
@doc """
Fetches data from the cache by key, raises if not found.
"""
@callback fetch!(id :: identifier()) :: term() | no_return()
@optional_callbacks start_link: 1
end
|
lib/cache.ex
| 0.879315
| 0.785473
|
cache.ex
|
starcoder
|
defmodule Errol.Consumer.Server do
@moduledoc """
`GenServer` that creates a `queue`, binds it to a given `routing_key` and
consumes messages from that queue.
The preferred way of spinning up consumer this is through `Errol.Wiring.consume/3`
when declaring your _Wiring_, but you can also use this module to start them on your own.
## Examples
iex> {:ok, connection} = AMQP.Connection.open()
iex> {:ok, _pid} = Errol.Consumer.Server.start_link(name: :queue_consumer, queue: "queue_name", routing_key: "my.routing.key", callback: fn _message -> :ok end, exchange: {"/", :topic}, connection: connection)
iex> Errol.Consumer.Server.unbind(:queue_consumer)
:ok
"""
use GenServer
require Logger
alias Errol.{Setup, Message}
@doc """
Creates a `queue`, binds it to a given `routing_key` and
Starts a process that consumes messages from that queue.
It expects the following arguments:
- `name` A name for the process.
- `queue` A name for the queue that will be created.
- `routing_key`: A rabbitmq compatible routing key (e.g. `my.routing.key`).
- `callback`: A function with arity of 1.
- `exchange`: The expected format is `{exchange_path, exchange_type}`. `exchange_type` can be `:topic`, `:fanout` or `:direct`.
- `connection`: You can create a connection through `AMQP.Connection.open/1`.
"""
@spec start_link(options :: keyword()) :: {:ok, pid()} | {:error, reason :: atom()}
def start_link(args) do
GenServer.start_link(__MODULE__, args, name: Keyword.get(args, :name))
end
@doc false
def init(options) do
case Setup.set_consumer(options) do
{:ok,
%{
channel: channel,
queue: queue,
exchange: {exchange, _},
routing_key: routing_key
}} ->
{:ok,
%{
channel: channel,
queue: queue,
exchange: exchange,
routing_key: routing_key,
callback: Keyword.get(options, :callback),
pipe_before: Keyword.get(options, :pipe_before, []),
pipe_after: Keyword.get(options, :pipe_after, []),
pipe_error: Keyword.get(options, :pipe_error, []),
running_messages: %{}
}}
{:error, reason} ->
{:stop, reason}
end
end
@doc false
defp apply_middlewares(message, data, middlewares) do
Enum.reduce(middlewares, {:ok, message}, fn middleware, {status, message} ->
if status == :ok do
middleware.(message, data)
else
{status, message}
end
end)
end
@doc false
def handle_info(
{:basic_deliver, payload, meta},
%{callback: callback, pipe_before: pipe_before, pipe_after: pipe_after, queue: queue} =
state
) do
message = %Message{payload: payload, meta: meta}
monitor_pid = self()
{pid, _ref} =
spawn_monitor(fn ->
result =
case apply_middlewares(message, queue, pipe_before) do
{:ok, message} ->
with message <- callback.(message),
{:ok, message} <- apply_middlewares(message, queue, pipe_after) do
{:ok, message}
else
{:error, reason} ->
{:error, reason}
end
{result, reason} ->
{result, reason}
end
with {:ok, message} <- apply_middlewares(message, queue, pipe_before),
message <- callback.(message),
{:ok, message} <- apply_middlewares(message, queue, pipe_after) do
{:ok, message}
end
GenServer.cast(monitor_pid, {:processed, {result, self()}})
end)
{:noreply,
%{state | running_messages: Map.put(state.running_messages, pid, {:running, message})}}
end
@doc false
def handle_info({:DOWN, _, :process, _, :normal}, state), do: {:noreply, state}
@doc false
def handle_info({:DOWN, _, :process, pid, exception}, state) do
GenServer.cast(self(), {:processed, {{:error, exception}, pid}})
{:noreply, state}
end
# Confirmation sent by the broker after registering this process as a consumer
@doc false
def handle_info({:basic_consume_ok, %{consumer_tag: consumer_tag}}, state) do
{:noreply, Map.put(state, :consumer_tag, consumer_tag)}
end
# Sent by the broker when the consumer is unexpectedly cancelled (such as after a queue deletion)
@doc false
def handle_info({:basic_cancel, _payload}, state) do
{:stop, :normal, state}
end
# Confirmation sent by the broker to the consumer process after a Basic.cancel
@doc false
def handle_info({:basic_cancel_ok, _payload}, state) do
{:noreply, state}
end
@doc false
def handle_call(
:unbind,
_from,
%{channel: channel, queue: queue, consumer_tag: consumer_tag, exchange: exchange} = state
) do
AMQP.Queue.unbind(channel, queue, exchange)
AMQP.Basic.cancel(channel, consumer_tag)
{:reply, :ok, state}
end
@doc false
def handle_call(:config, _from, state) do
{:reply, state, state}
end
@doc false
def handle_cast({:processed, {{:ok, %Message{meta: %{delivery_tag: tag}}}, pid}}, state) do
{_, running_messages} = Map.pop(state.running_messages, pid)
:ok = AMQP.Basic.ack(state.channel, tag)
{:noreply, %{state | running_messages: running_messages}}
end
@doc false
def handle_cast({:processed, {{:reject, reason}, pid}}, state) do
{{:running, %Message{meta: %{delivery_tag: tag}} = message}, running_messages} =
Map.pop(state.running_messages, pid)
log_error(:reject, message, state.queue, reason)
:ok = AMQP.Basic.reject(state.channel, tag, requeue: false)
{:noreply, %{state | running_messages: running_messages}}
end
@doc false
def handle_cast({:processed, {{:error, error}, pid}}, state) do
new_state = processing_failed(pid, error, state)
{:noreply, new_state}
end
@doc false
def processing_failed(
pid,
error,
%{pipe_error: pipe_error, queue: queue, channel: channel} = state
) do
{{:running, %Message{meta: %{delivery_tag: tag, redelivered: redelivered}} = message},
running_messages} = Map.pop(state.running_messages, pid)
{status, _} = apply_middlewares(message, {queue, error}, pipe_error)
log_error(status, message, queue, error)
:ok = AMQP.Basic.reject(channel, tag, requeue: status == :retry)
%{state | running_messages: running_messages}
end
defp log_error(reason, message, queue, error) do
Logger.error("""
#{error_header(reason)}
* message: #{inspect(message)}
* queue: #{queue}
* error: #{inspect(error)}
""")
end
defp error_header(:retry), do: "Retrying message"
defp error_header(_), do: "Rejecting message"
@doc """
Unbinds the given process from the rabbitmq queue it is subscribed to and
shuts down the process.
"""
@spec unbind(consume_name :: atom() | pid()) :: :ok
def unbind(consumer_name) do
GenServer.call(consumer_name, :unbind)
end
end
|
lib/errol/consumer/server.ex
| 0.888904
| 0.418637
|
server.ex
|
starcoder
|
defmodule Brando.Revisions do
@moduledoc """
**NOTE**: if you use revisions on a schema that has a relation you are
preloading i.e:
`mutation :update, {Project, preload: [:related_projects]}`
You must pass `use_parent: true` to the field's dataloader to prevent
dataloader loading the relation from the original entry, as opposed
to leaving it from the revision:
```
field :related_projects, list_of(:project),
resolve: dataloader(MyApp.Projects, use_parent: true)
```
"""
@type changeset :: Ecto.Changeset.t()
@type revision :: Brando.Revisions.Revision.t()
@type revision_active :: boolean
@type user :: Brando.Users.User.t()
use Brando.Query
import Ecto.Query
alias Brando.Cache
alias Brando.Datasource
alias Brando.Query
alias Brando.Revisions.Revision
alias Brando.Utils
query :list, Revision do
fn query -> from(q in query) end
end
filters Revision do
fn
{:entry_id, entry_id}, query ->
from q in query, where: q.entry_id == ^entry_id
{:entry_type, entry_type}, query when is_binary(entry_type) ->
entry_type = Module.concat([entry_type]) |> to_string()
from q in query, where: q.entry_type == ^entry_type
{:entry_type, entry_type}, query when is_atom(entry_type) ->
entry_type = to_string(entry_type)
from q in query, where: q.entry_type == ^entry_type
{:revision, revision}, query ->
from q in query, where: q.revision == ^revision
{:active, active}, query ->
from q in query, where: q.active == ^active
end
end
@doc """
Create a new revision from `entry` struct
"""
@spec create_revision(map, user, revision_active) :: {:ok, revision} | {:error, changeset}
def create_revision(%{__struct__: entry_type, id: entry_id} = entry, user, set_active \\ true) do
user_id = if user == :system, do: nil, else: user.id
entry_type_binary = to_string(entry_type)
encoded_entry = Utils.term_to_binary(entry)
revision = %{
active: set_active,
entry_type: entry_type_binary,
entry_id: entry_id,
encoded_entry: encoded_entry,
metadata: %{},
revision: next_revision(entry_type_binary, entry_id),
creator_id: user_id,
protected: false
}
%Revision{}
|> Revision.changeset(revision)
|> Brando.repo().insert()
|> case do
{:ok, revision} ->
if set_active do
deactivate_all_revisions_except(revision)
end
{:ok, revision}
err ->
err
end
end
@doc """
Set description for revision
## Example
describe_revision(Project, entry.id, revision_number, "Different heading")
"""
def describe_revision(entry_type, entry_id, revision_number, description) do
entry_type_binary = to_string(entry_type)
query =
from r in Revision,
where:
r.entry_type == ^entry_type_binary and
r.entry_id == ^entry_id and
r.revision == ^revision_number,
update: [set: [description: ^description]]
Brando.repo().update_all(query, [])
end
@doc """
Mark revision as protected
## Example
protect_revision(Project, entry.id, revision_number, true)
"""
def protect_revision(entry_type, entry_id, revision_number, protect) do
entry_type_binary = to_string(entry_type)
query =
from r in Revision,
where:
r.entry_type == ^entry_type_binary and
r.entry_id == ^entry_id and
r.revision == ^revision_number,
update: [set: [protected: ^protect]]
Brando.repo().update_all(query, [])
end
@doc """
Create a revision based on `base_revision`.
Merges in `entry_params` and stores as a new revision
"""
def create_from_base_revision(entry_schema, base_revision_version, entry_id, entry_params, user) do
{:ok, {_, {_, decoded_entry}}} = get_revision(entry_schema, entry_id, base_revision_version)
decoded_entry
|> Map.merge(entry_params)
|> create_revision(user, false)
end
@doc """
Purge all revisions older than 14 days, which are not protected or active
"""
def purge_revisions do
query =
from r in Revision,
where:
fragment("? < current_timestamp - interval '14 day'", r.inserted_at) and
r.protected == false and
r.active == false
Brando.repo().delete_all(query)
end
@doc """
Purge all inactive and unprotected revisions for `entry_type` and `entry_id`
## Example
purge_revisions(Project, entry_id)
"""
def purge_revisions(entry_type, entry_id) do
entry_type_binary = to_string(entry_type)
query =
from r in Revision,
where:
r.entry_type == ^entry_type_binary and
r.entry_id == ^entry_id and
r.protected == false and
r.active == false
Brando.repo().delete_all(query)
end
@doc """
Delete specific revision
## Example
delete_revision(Project, entry_id, revision_number)
"""
def delete_revision(entry_type, entry_id, revision) do
entry_type_binary = to_string(entry_type)
query =
from r in Revision,
where:
r.entry_type == ^entry_type_binary and
r.entry_id == ^entry_id and
r.revision == ^revision and
r.active == false
Brando.repo().delete_all(query)
end
@doc """
Get the last revision for `entry_type` and `entry_id`
## Example
get_last_revision(Project, project.id)
"""
def get_last_revision(entry_type, entry_id) do
entry_type_binary = to_string(entry_type)
query =
from r in Revision,
where: r.entry_type == ^entry_type_binary and r.entry_id == ^entry_id,
limit: 1,
order_by: [desc: :revision]
case Brando.repo().all(query) do
[] ->
:error
[revision] ->
decoded_entry = Utils.binary_to_term(revision.encoded_entry)
{:ok, {revision, {revision.revision, decoded_entry}}}
end
end
@doc """
Get active revision for `entry_type` and `entry_id`
## Example
get_active_revision(Project, entry.id)
"""
def get_active_revision(entry_type, entry_id) do
entry_type_binary = to_string(entry_type)
query =
from r in Revision,
where:
r.entry_type == ^entry_type_binary and
r.entry_id == ^entry_id and
r.active == true,
limit: 1
case Brando.repo().all(query) do
[] ->
:error
[revision] ->
decoded_entry = Utils.binary_to_term(revision.encoded_entry)
{:ok, {revision, {revision.revision, decoded_entry}}}
end
end
@doc """
Set entry to revision number.
## Example
set_entry_to_revision(Project, project_id, wanted_revision_id, user)
"""
def set_entry_to_revision(entry_schema, entry_id, revision_number, user) do
{:ok, {revision, {_, new_entry}}} = get_revision(entry_schema, entry_id, revision_number)
{:ok, {_, {_, base_entry}}} = get_active_revision(entry_schema, entry_id)
new_params = Utils.map_from_struct(new_entry)
base_entry
|> entry_schema.changeset(new_params, user, skip_villain: true)
|> Query.update()
|> case do
{:ok, new_entry} ->
activate_revision(revision)
deactivate_all_revisions_except(revision)
Datasource.update_datasource(entry_schema, new_entry)
Cache.Query.evict({:ok, new_entry})
err ->
err
end
end
@doc """
Get revision
## Example
get_revision(Project, entry.id, revision_number)
"""
def get_revision(entry_type, entry_id, revision_number) do
entry_type_binary = to_string(entry_type)
query =
from r in Revision,
where:
r.entry_type == ^entry_type_binary and
r.entry_id == ^entry_id and
r.revision == ^revision_number,
limit: 1,
order_by: [desc: :revision]
case Brando.repo().all(query) do
[] ->
:error
[revision] ->
decoded_entry = Utils.binary_to_term(revision.encoded_entry)
{:ok, {revision, {revision.revision, decoded_entry}}}
end
end
defp next_revision(entry_type, entry_id) do
query =
from r in Revision,
select: r.revision,
where: r.entry_type == ^entry_type and r.entry_id == ^entry_id,
order_by: [desc: :revision],
limit: 1
case Brando.repo().all(query) do
[] -> 0
[revision] -> revision + 1
end
end
defp activate_revision(revision) do
query =
from r in Revision,
where:
r.entry_type == ^revision.entry_type and
r.entry_id == ^revision.entry_id and
r.revision == ^revision.revision,
update: [set: [active: true]]
Brando.repo().update_all(query, [])
end
defp deactivate_all_revisions_except(revision) do
query =
from r in Revision,
where:
r.active == true and
r.entry_type == ^revision.entry_type and
r.entry_id == ^revision.entry_id and
r.revision != ^revision.revision,
update: [set: [active: false]]
Brando.repo().update_all(query, [])
end
end
|
lib/brando/revisions/revisions.ex
| 0.841858
| 0.701777
|
revisions.ex
|
starcoder
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.