code
stringlengths 114
1.05M
| path
stringlengths 3
312
| quality_prob
float64 0.5
0.99
| learning_prob
float64 0.2
1
| filename
stringlengths 3
168
| kind
stringclasses 1
value |
|---|---|---|---|---|---|
defmodule Mix.Tasks.Compile.Tyx do
# credo:disable-for-this-file Credo.Check.Readability.Specs
require Logger
use Boundary, classify_to: Tyx.Mix
use Mix.Task.Compiler
alias Mix.{Project, Task.Compiler, Utils}
alias Tyx.Mix.Typer
@preferred_cli_env :dev
@manifest_events "tyx_events"
@moduledoc """
Cross-module type validation.
This compiler reports all type violations.
## Usage
Once you have `Tyx` used anywhere in your project, you need to include the compiler in `mix.exs`:
```
defmodule MyApp.MixProject do
# ...
def project do
[
compilers: [:tyx | Mix.compilers()],
# ...
]
end
# ...
end
```
When developing a library, it's advised to use this compiler only in `:dev` and `:test`
environments:
```
defmodule MyLib.MixProject do
# ...
def project do
[
compilers: extra_compilers(Mix.env()) ++ Mix.compilers(),
# ...
]
end
# ...
defp extra_compilers(:prod), do: []
defp extra_compilers(_env), do: [:tyx]
end
```
## Warnings
Every invalid type is reported as a compiler warning. Consider the following example:
```
defmodule MyApp.User do
use Tyx
deft auth(name: String, pass: String, OUT: :ok) do
MyApp.Auth.validate(name, pass)
end
end
```
Assuming that `MyApp.Auth.validate/2` might fail returning `{:error, _}` tuple,
you'll get the following warning:
```
$ mix compile
warning: type violation in `MyApp.User.auth/2`
(returned value `{:error, _}` is not allowed)
lib/my_app/user.ex:3
```
Since the compiler emits warnings, `mix compile` will still succeed, and you can normally start
your system, even if some type checks has not succeeded. The compiler doesn’t force you to immediately
fix these type errors, which is a deliberate decision made to avoid disrupting the development flow.
At the same time, it's worth enforcing types on the CI. This can easily be done by providing
the `--warnings-as-errors` option to `mix compile`.
"""
@impl Compiler
def run(argv) do
Typer.start_link()
Compiler.after_compiler(:app, &after_compiler(&1, argv))
tracers = Code.get_compiler_option(:tracers)
Code.put_compiler_option(:tracers, [__MODULE__ | tracers])
{:ok, []}
end
@impl Compiler
@doc false
def manifests, do: [manifest_path(@manifest_events)]
@doc false
@impl Compiler
def clean do
:ok
end
@doc false
def trace({:imported_macro, meta, Tyx, :deft, 2}, env) do
pos = if Keyword.keyword?(meta), do: Keyword.get(meta, :line, env.line)
"⚑⚐⚑"
|> diagnostic(
details: [module: env.module, context: env.context],
position: pos,
file: env.file
)
|> Typer.put()
:ok
end
@doc false
def trace(_event, _env), do: :ok
defp after_compiler({status, diagnostics}, argv) do
if status in [:ok, :noop] do
app_name = app_name()
Application.unload(app_name)
Application.load(app_name)
end
tracers = Enum.reject(Code.get_compiler_option(:tracers), &(&1 == __MODULE__))
Code.put_compiler_option(:tracers, tracers)
tyx_diagnostics = finalize_diagnostics()
write_manifest(@manifest_events, tyx_diagnostics)
Logger.debug(inspect({status, argv, tyx_diagnostics}))
{status, diagnostics ++ tyx_diagnostics}
end
@spec finalize_diagnostics :: [Compiler.Diagnostic.t()]
defp finalize_diagnostics do
# FIXME group by module and do it in bulks
Typer.all()
|> Enum.reduce([], fn diagnostic, acc ->
tyxes = diagnostic.details[:module].__tyx__()
pos = diagnostic.position
{_tyx, result} = Enum.find(tyxes, &match?({%Tyx{env: %Macro.Env{line: ^pos}}, _}, &1))
case result do
:ok ->
acc
{:error, [traversal: traversal]} ->
[
%Compiler.Diagnostic{
diagnostic
| message: "Unknown or not yet implemented issue at ##{pos}: #{inspect(traversal)}"
}
| acc
]
{:error, known} ->
[
%Compiler.Diagnostic{
diagnostic
| message: "Tyx error at ##{pos}: #{inspect(known)}",
severity: :error
}
| acc
]
end
end)
end
@spec app_name :: atom()
defp app_name, do: Keyword.fetch!(Project.config(), :app)
@spec store_config :: :ok | {:error, :manifest_missing}
def store_config, do: @manifest_events |> read_manifest() |> do_store_config()
@spec do_store_config(nil | term()) :: :ok | {:error, any()}
defp do_store_config(nil), do: {:error, :manifest_missing}
defp do_store_config(_manifest) do
:ok
end
@spec manifest_path(binary()) :: binary()
defp manifest_path(name),
do: Project.config() |> Project.manifest_path() |> Path.join("compile.#{name}")
@spec read_manifest(binary()) :: term()
defp read_manifest(name) do
unless Utils.stale?([Project.config_mtime()], [manifest_path(name)]) do
name
|> manifest_path()
|> File.read()
|> case do
{:ok, manifest} -> :erlang.binary_to_term(manifest)
_ -> nil
end
end
end
@spec write_manifest(binary(), term()) :: :ok
defp write_manifest(name, data) do
path = manifest_path(name)
File.mkdir_p!(Path.dirname(path))
File.write!(path, :erlang.term_to_binary(data))
do_store_config(data)
end
# system_apps = ~w/elixir stdlib kernel/a
# system_apps
# |> Stream.each(&Application.load/1)
# |> Stream.flat_map(&Application.spec(&1, :modules))
# |> Enum.each(fn module -> defp system_module?(unquote(module)), do: true end)
# defp system_module?(module), do: :code.which(module) == :preloaded
# defp status([], _), do: :ok
# defp status([_ | _], argv), do: if(warnings_as_errors?(argv), do: :error, else: :ok)
# defp warnings_as_errors?(argv) do
# {parsed, _argv, _errors} = OptionParser.parse(argv, strict: [warnings_as_errors: :boolean])
# Keyword.get(parsed, :warnings_as_errors, false)
# end
# defp print_diagnostic_errors(errors) do
# if errors != [], do: Mix.shell().info("")
# Enum.each(errors, &print_diagnostic_error/1)
# end
# defp print_diagnostic_error(error) do
# Mix.shell().info([severity(error.severity), error.message, location(error)])
# end
# defp location(error) do
# if error.file != nil and error.file != "" do
# pos = if error.position != nil, do: ":#{error.position}", else: ""
# "\n #{error.file}#{pos}\n"
# else
# "\n"
# end
# end
# defp severity(severity), do: [:bright, color(severity), "#{severity}: ", :reset]
# defp color(:error), do: :red
# defp color(:warning), do: :yellow
# defp check(application, entries) do
# []
# |> Stream.map(&to_diagnostic_error/1)
# |> Enum.sort_by(&{&1.file, &1.position})
# rescue
# e in Boundary.Error ->
# [diagnostic(e.message, file: e.file, position: e.line)]
# end
# defp to_diagnostic_error({error, module}),
# do: diagnostic("#{inspect(error)} is error", file: module_source(module))
# defp module_source(module) do
# module.module_info(:compile)
# |> Keyword.fetch!(:source)
# |> to_string()
# |> Path.relative_to_cwd()
# catch
# _, _ -> ""
# end
@spec diagnostic(String.t(), keyword()) :: Compiler.Diagnostic.t()
def diagnostic(message, opts \\ []) do
%Compiler.Diagnostic{
compiler_name: "tyx",
details: nil,
file: "unknown",
message: message,
position: nil,
severity: :information
}
|> Map.merge(Map.new(opts))
end
end
|
lib/tyx/mix/tasks/compile/tyx.ex
| 0.75037
| 0.690337
|
tyx.ex
|
starcoder
|
defmodule Flop.Cursor do
@moduledoc """
Functions for encoding, decoding and extracting cursor values.
"""
@doc """
Encodes a cursor value.
iex> Flop.Cursor.encode(%{name: "Peter", email: "<EMAIL>"})
"g3QAAAACZAAFZW1haWxtAAAACnBldGVyQG1haWxkAARuYW1lbQAAAAVQZXRlcg=="
"""
@doc since: "0.8.0"
@spec encode(map()) :: binary()
def encode(key) do
Base.url_encode64(:erlang.term_to_binary(key))
end
@doc """
Decodes a cursor value.
Returns `:error` if the cursor cannot be decoded or the decoded term is not a
map with atom keys.
iex> Flop.Cursor.decode("g3QAAAABZAACaWRiAAACDg==")
{:ok, %{id: 526}}
iex> Flop.Cursor.decode("AAAH")
:error
iex> f = fn a -> a + 1 end
iex> cursor = Flop.Cursor.encode(%{a: f})
iex> Flop.Cursor.decode(cursor)
:error
iex> cursor = Flop.Cursor.encode(a: "b")
iex> Flop.Cursor.decode(cursor)
:error
iex> cursor = Flop.Cursor.encode(%{"a" => "b"})
iex> Flop.Cursor.decode(cursor)
:error
Trying to decode a cursor that contains non-existent atoms also results in an
error.
iex> Flop.Cursor.decode("g3QAAAABZAAGYmFybmV5ZAAGcnViYmVs")
:error
"""
@doc since: "0.8.0"
@spec decode(binary()) :: {:ok, map()} | :error
def decode(cursor) do
with {:ok, binary} <- Base.url_decode64(cursor),
{:ok, term} <- safe_binary_to_term(binary) do
sanitize(term)
if is_map(term) && term |> Map.keys() |> Enum.all?(&is_atom/1),
do: {:ok, term},
else: :error
end
rescue
_e in RuntimeError -> :error
end
@doc """
Same as `Flop.Cursor.decode/1`, but raises an error if the cursor is invalid.
iex> Flop.Cursor.decode!("g3QAAAABZAACaWRiAAACDg==")
%{id: 526}
iex> Flop.Cursor.decode!("AAAH")
** (RuntimeError) invalid cursor
"""
@doc since: "0.9.0"
@spec decode!(binary()) :: map()
def decode!(cursor) do
case decode(cursor) do
{:ok, decoded} -> decoded
:error -> raise "invalid cursor"
end
end
defp safe_binary_to_term(term) do
{:ok, :erlang.binary_to_term(term, [:safe])}
rescue
_e in ArgumentError -> :error
end
defp sanitize(term)
when is_atom(term) or is_number(term) or is_binary(term) do
term
end
defp sanitize([]), do: []
defp sanitize([h | t]), do: [sanitize(h) | sanitize(t)]
defp sanitize(%{} = term) do
:maps.fold(
fn key, value, acc ->
sanitize(key)
sanitize(value)
acc
end,
term,
term
)
end
defp sanitize(term) when is_tuple(term) do
term
|> Tuple.to_list()
|> sanitize()
end
defp sanitize(_) do
raise "invalid cursor value"
end
@doc """
Retrieves the start and end cursors from a query result.
iex> results = [%{name: "Mary"}, %{name: "Paul"}, %{name: "Peter"}]
iex> order_by = [:name]
iex>
iex> {start_cursor, end_cursor} =
...> Flop.Cursor.get_cursors(results, order_by)
{"g3QAAAABZAAEbmFtZW0AAAAETWFyeQ==", "g3QAAAABZAAEbmFtZW0AAAAFUGV0ZXI="}
iex>
iex> Flop.Cursor.decode(start_cursor)
{:ok, %{name: "Mary"}}
iex> Flop.Cursor.decode(end_cursor)
{:ok, %{name: "Peter"}}
If the result set is empty, the cursor values will be `nil`.
iex> Flop.Cursor.get_cursors([], [:id])
{nil, nil}
The default function to retrieve the cursor value from the query result is
`Flop.Cursor.get_cursor_from_node/2`, which expects the query result to be a
map or a 2-tuple. You can set the `cursor_value_func` option to use
another function. Flop also comes with `Flop.Cursor.get_cursor_from_edge/2`.
If the records in the result set are not maps, you can define a custom cursor
value function like this:
iex> results = [{"Mary", 1936}, {"Paul", 1937}, {"Peter", 1938}]
iex> cursor_func = fn {name, year}, order_fields ->
...> Enum.into(order_fields, %{}, fn
...> :name -> {:name, name}
...> :year -> {:year, year}
...> end)
...> end
iex> opts = [cursor_value_func: cursor_func]
iex>
iex> {start_cursor, end_cursor} =
...> Flop.Cursor.get_cursors(results, [:name, :year], opts)
{"g3QAAAACZAAEbmFtZW0AAAAETWFyeWQABHllYXJiAAAHkA==",
"g3QAAAACZAAEbmFtZW0AAAAFUGV0ZXJkAAR5ZWFyYgAAB5I="}
iex>
iex> Flop.Cursor.decode(start_cursor)
{:ok, %{name: "Mary", year: 1936}}
iex> Flop.Cursor.decode(end_cursor)
{:ok, %{name: "Peter", year: 1938}}
"""
@doc since: "0.8.0"
@spec get_cursors([any], [atom], [Flop.option()]) ::
{binary(), binary()} | {nil, nil}
def get_cursors(results, order_by, opts \\ []) do
cursor_value_func = cursor_value_func(opts)
case results do
[] ->
{nil, nil}
[first | _] ->
{
first |> cursor_value_func.(order_by) |> encode(),
results
|> List.last()
|> cursor_value_func.(order_by)
|> encode()
}
end
end
@doc """
Takes a tuple with the node and the edge and the `order_by` field list and
returns the cursor value derived from the edge map.
If a map is passed instead of a tuple, it retrieves the cursor value from that
map.
This function can be used for the `:cursor_value_func` option. See also
`Flop.Cursor.get_cursor_from_node/2`.
iex> record = %{id: 20, name: "George", age: 62}
iex> edge = %{id: 25, relation: "sibling"}
iex>
iex> Flop.Cursor.get_cursor_from_edge({record, edge}, [:id])
%{id: 25}
iex> Flop.Cursor.get_cursor_from_edge({record, edge}, [:id, :relation])
%{id: 25, relation: "sibling"}
iex> Flop.Cursor.get_cursor_from_edge(record, [:id])
%{id: 20}
If the edge is a struct that derives `Flop.Schema`, join and compound fields
are resolved according to the configuration.
iex> record = %{id: 25, relation: "sibling"}
iex> edge = %Flop.Pet{
...> name: "George",
...> owner: %Flop.Owner{name: "Carl"}
...> }
iex>
iex> Flop.Cursor.get_cursor_from_edge({record, edge}, [:owner_name])
%{owner_name: "Carl"}
iex> Flop.Cursor.get_cursor_from_edge(edge, [:owner_name])
%{owner_name: "Carl"}
iex> record = %{id: 25, relation: "sibling"}
iex> edge = %Flop.Pet{
...> given_name: "George",
...> family_name: "Gooney"
...> }
iex> Flop.Cursor.get_cursor_from_edge({record, edge}, [:full_name])
%{full_name: "<NAME>"}
iex> Flop.Cursor.get_cursor_from_edge(edge, [:full_name])
%{full_name: "<NAME>"}
"""
@doc since: "0.11.0"
@spec get_cursor_from_edge({map, map} | map, [atom]) :: map
def get_cursor_from_edge({_, %{} = item}, order_by) do
Enum.into(order_by, %{}, fn field ->
{field, Flop.Schema.get_field(item, field)}
end)
end
def get_cursor_from_edge(%{} = item, order_by) do
Enum.into(order_by, %{}, fn field ->
{field, Flop.Schema.get_field(item, field)}
end)
end
@doc """
Takes a tuple with the node and the edge and the `order_by` field list and
returns the cursor value derived from the node map.
If a map is passed instead of a tuple, it retrieves the cursor value from that
map.
This function is used as a default if no `:cursor_value_func` option is
set. See also `Flop.Cursor.get_cursor_from_edge/2`.
iex> record = %{id: 20, name: "George", age: 62}
iex> edge = %{id: 25, relation: "sibling"}
iex>
iex> Flop.Cursor.get_cursor_from_node({record, edge}, [:id])
%{id: 20}
iex> Flop.Cursor.get_cursor_from_node({record, edge}, [:id, :name])
%{id: 20, name: "George"}
iex> Flop.Cursor.get_cursor_from_node(record, [:id])
%{id: 20}
If the node is a struct that derives `Flop.Schema`, join and compound fields
are resolved according to the configuration.
iex> record = %Flop.Pet{
...> name: "George",
...> owner: %Flop.Owner{name: "Carl"}
...> }
iex> edge = %{id: 25, relation: "sibling"}
iex>
iex> Flop.Cursor.get_cursor_from_node({record, edge}, [:owner_name])
%{owner_name: "Carl"}
iex> Flop.Cursor.get_cursor_from_node(record, [:owner_name])
%{owner_name: "Carl"}
iex> record = %Flop.Pet{
...> given_name: "George",
...> family_name: "Gooney"
...> }
iex> edge = %{id: 25, relation: "sibling"}
iex> Flop.Cursor.get_cursor_from_node({record, edge}, [:full_name])
%{full_name: "<NAME>"}
iex> Flop.Cursor.get_cursor_from_node(record, [:full_name])
%{full_name: "<NAME>"}
"""
@doc since: "0.11.0"
@spec get_cursor_from_node({map, map} | map, [atom]) :: map
def get_cursor_from_node({%{} = item, _}, order_by) do
Enum.into(order_by, %{}, fn field ->
{field, Flop.Schema.get_field(item, field)}
end)
end
def get_cursor_from_node(%{} = item, order_by) do
Enum.into(order_by, %{}, fn field ->
{field, Flop.Schema.get_field(item, field)}
end)
end
@doc false
def cursor_value_func(opts \\ []) do
opts[:cursor_value_func] ||
Application.get_env(:flop, :cursor_value_func) ||
(&get_cursor_from_node/2)
end
end
|
lib/flop/cursor.ex
| 0.871338
| 0.44348
|
cursor.ex
|
starcoder
|
defmodule ExVault.Response do
@moduledoc """
Structs for the most common Vault API response formats.
Generally, the data of a response is available in a `ExVault.Response.Logical`
struct wrapped in a `ExVault.Response.Success` struct. Errors are represented
with the `ExVault.Response.Error` struct.
"""
defmodule Error do
@moduledoc """
Vault API error response. This represents an HTTP 4xx/5xx response.
"""
defstruct [:status, :errors]
@type t :: %__MODULE__{
status: integer,
errors: [String.t()]
}
@doc false
@spec from_resp(Tesla.Env.t()) :: t()
def from_resp(%{status: status, body: %{"errors" => errors}}) do
%__MODULE__{status: status, errors: errors}
end
end
defmodule Success do
@moduledoc """
Vault API success response. This represents an HTTP 2xx response.
Usually, the `logical` field will contain a `ExVault.Response.Logical`
struct.
"""
alias ExVault.Response.Logical
defstruct [:status, :logical, :body]
@type t :: %__MODULE__{
status: integer,
body: %{} | String.t(),
logical: Logical.t() | nil
}
@doc false
@spec from_resp(Tesla.Env.t()) :: t()
def from_resp(%{status: status, body: body}) do
%__MODULE__{
status: status,
body: body,
logical: Logical.from_body(body)
}
end
end
defmodule Logical do
@moduledoc """
Vault API "logical" response. Most Vault APIs return one of these.
This is based on [this](https://godoc.org/github.com/hashicorp/vault/logical#HTTPResponse)
Golang struct in the official Vault client.
"""
defstruct [
:request_id,
:lease_id,
:renewable,
:lease_duration,
:data,
:wrap_info,
:warnings,
:auth
]
@type t :: %__MODULE__{
request_id: String.t(),
lease_id: String.t(),
renewable: boolean,
lease_duration: integer,
data: %{},
# TODO: WrapInfo struct.
wrap_info: nil,
warnings: [String.t()],
# TODO: Auth struct.
auth: nil
}
@doc false
@spec from_body(Tesla.Env.body()) :: t() | nil
def from_body(%{
"request_id" => request_id,
"lease_id" => lease_id,
"renewable" => renewable,
"lease_duration" => lease_duration,
"data" => data,
"wrap_info" => wrap_info,
"warnings" => warnings,
"auth" => auth
}) do
%__MODULE__{
request_id: request_id,
lease_id: lease_id,
renewable: renewable,
lease_duration: lease_duration,
data: data,
wrap_info: wrap_info,
warnings: warnings,
auth: auth
}
end
def from_body(_), do: nil
end
@type t :: {:ok, Error.t() | Success.t()} | {:error, any()}
@doc false
@spec parse_response(Tesla.Env.result()) :: t()
def parse_response({:ok, %{status: status} = resp}) when status >= 400,
do: {:ok, Error.from_resp(resp)}
def parse_response({:ok, resp}), do: {:ok, Success.from_resp(resp)}
def parse_response(not_ok), do: not_ok
end
|
lib/exvault/response.ex
| 0.751557
| 0.528473
|
response.ex
|
starcoder
|
import Kernel, except: [inspect: 1]
import Inspect.Algebra
defprotocol Inspect do
@moduledoc """
The `Inspect` protocol is responsible for converting any Elixir
data structure into an algebra document. This document is then
formatted, either in pretty printing format or a regular one.
The `inspect/2` function receives the entity to be inspected
followed by the inspecting options, represented by the struct
`Inspect.Opts`.
Inspection is done using the functions available in `Inspect.Algebra`.
## Examples
Many times, inspecting a structure can be implemented in function
of existing entities. For example, here is `MapSet`'s `inspect`
implementation:
defimpl Inspect, for: MapSet do
import Inspect.Algebra
def inspect(dict, opts) do
concat ["#MapSet<", to_doc(MapSet.to_list(dict), opts), ">"]
end
end
The `concat/1` function comes from `Inspect.Algebra` and it
concatenates algebra documents together. In the example above,
it is concatenating the string `"MapSet<"` (all strings are
valid algebra documents that keep their formatting when pretty
printed), the document returned by `Inspect.Algebra.to_doc/2` and the
other string `">"`.
Since regular strings are valid entities in an algebra document,
an implementation of inspect may simply return a string,
although that will devoid it of any pretty-printing.
## Error handling
In case there is an error while your structure is being inspected,
Elixir will raise an `ArgumentError` error and will automatically fall back
to a raw representation for printing the structure.
You can however access the underlying error by invoking the Inspect
implementation directly. For example, to test Inspect.MapSet above,
you can invoke it as:
Inspect.MapSet.inspect(MapSet.new, %Inspect.Opts{})
"""
# Handle structs in Any
@fallback_to_any true
def inspect(term, opts)
end
defimpl Inspect, for: Atom do
require Macro
def inspect(atom, _opts) do
inspect(atom)
end
def inspect(false), do: "false"
def inspect(true), do: "true"
def inspect(nil), do: "nil"
def inspect(:""), do: ":\"\""
def inspect(atom) do
binary = Atom.to_string(atom)
cond do
valid_ref_identifier?(binary) ->
if only_elixir?(binary) do
binary
else
"Elixir." <> rest = binary
rest
end
valid_atom_identifier?(binary) ->
":" <> binary
atom in [:%{}, :{}, :<<>>, :..., :%] ->
":" <> binary
atom in Macro.binary_ops or atom in Macro.unary_ops ->
":" <> binary
true ->
IO.iodata_to_binary [?:, ?", Inspect.BitString.escape(binary, ?"), ?"]
end
end
defp only_elixir?("Elixir." <> rest), do: only_elixir?(rest)
defp only_elixir?("Elixir"), do: true
defp only_elixir?(_), do: false
# Detect if atom is an atom alias (Elixir.Foo.Bar.Baz)
defp valid_ref_identifier?("Elixir" <> rest) do
valid_ref_piece?(rest)
end
defp valid_ref_identifier?(_), do: false
defp valid_ref_piece?(<<?., h, t::binary>>) when h in ?A..?Z do
valid_ref_piece? valid_identifier?(t)
end
defp valid_ref_piece?(<<>>), do: true
defp valid_ref_piece?(_), do: false
# Detect if atom
defp valid_atom_identifier?(<<h, t::binary>>) when h in ?a..?z or h in ?A..?Z or h == ?_ do
valid_atom_piece?(t)
end
defp valid_atom_identifier?(_), do: false
defp valid_atom_piece?(t) do
case valid_identifier?(t) do
<<>> -> true
<<??>> -> true
<<?!>> -> true
<<?@, t::binary>> -> valid_atom_piece?(t)
_ -> false
end
end
defp valid_identifier?(<<h, t::binary>>)
when h in ?a..?z
when h in ?A..?Z
when h in ?0..?9
when h == ?_ do
valid_identifier? t
end
defp valid_identifier?(other), do: other
end
defimpl Inspect, for: BitString do
def inspect(term, %Inspect.Opts{binaries: bins, base: base} = opts) when is_binary(term) do
if base == :decimal and (bins == :as_strings or (bins == :infer and String.printable?(term))) do
IO.iodata_to_binary([?", escape(term, ?"), ?"])
else
inspect_bitstring(term, opts)
end
end
def inspect(term, opts) do
inspect_bitstring(term, opts)
end
## Escaping
@doc false
def escape(other, char) do
escape(other, char, [])
end
defp escape(<<char, t::binary>>, char, acc) do
escape(t, char, [acc | [?\\, char]])
end
defp escape(<<?#, ?{, t::binary>>, char, acc) do
escape(t, char, [acc | '\\\#{'])
end
defp escape(<<?\a, t::binary>>, char, acc) do
escape(t, char, [acc | '\\a'])
end
defp escape(<<?\b, t::binary>>, char, acc) do
escape(t, char, [acc | '\\b'])
end
defp escape(<<?\d, t::binary>>, char, acc) do
escape(t, char, [acc | '\\d'])
end
defp escape(<<?\e, t::binary>>, char, acc) do
escape(t, char, [acc | '\\e'])
end
defp escape(<<?\f, t::binary>>, char, acc) do
escape(t, char, [acc | '\\f'])
end
defp escape(<<?\n, t::binary>>, char, acc) do
escape(t, char, [acc | '\\n'])
end
defp escape(<<?\r, t::binary>>, char, acc) do
escape(t, char, [acc | '\\r'])
end
defp escape(<<?\\, t::binary>>, char, acc) do
escape(t, char, [acc | '\\\\'])
end
defp escape(<<?\t, t::binary>>, char, acc) do
escape(t, char, [acc | '\\t'])
end
defp escape(<<?\v, t::binary>>, char, acc) do
escape(t, char, [acc | '\\v'])
end
defp escape(<<h::utf8, t::binary>>, char, acc)
when h in 0x20..0x7E
when h in 0xA0..0xD7FF
when h in 0xE000..0xFFFD
when h in 0x10000..0x10FFFF do
escape(t, char, [acc | <<h::utf8>>])
end
defp escape(<<h, t::binary>>, char, acc) do
escape(t, char, [acc | escape_char(h)])
end
defp escape(<<>>, _char, acc), do: acc
@doc false
# Also used by Regex
def escape_char(0) do
'\\0'
end
def escape_char(char) when char < 0x100 do
<<a::4, b::4>> = <<char::8>>
['\\x', to_hex(a), to_hex(b)]
end
def escape_char(char) when char < 0x10000 do
<<a::4, b::4, c::4, d::4>> = <<char::16>>
['\\x{', to_hex(a), to_hex(b), to_hex(c), to_hex(d), ?}]
end
def escape_char(char) when char < 0x1000000 do
<<a::4, b::4, c::4, d::4, e::4, f::4>> = <<char::24>>
['\\x{', to_hex(a), to_hex(b), to_hex(c),
to_hex(d), to_hex(e), to_hex(f), ?}]
end
defp to_hex(c) when c in 0..9, do: ?0+c
defp to_hex(c) when c in 10..15, do: ?A+c-10
## Bitstrings
defp inspect_bitstring("", _opts) do
"<<>>"
end
defp inspect_bitstring(bitstring, opts) do
nest surround("<<", each_bit(bitstring, opts.limit, opts), ">>"), 1
end
defp each_bit(_, 0, _) do
"..."
end
defp each_bit(<<>>, _counter, _opts) do
:doc_nil
end
defp each_bit(<<h::8>>, _counter, opts) do
Inspect.Integer.inspect(h, opts)
end
defp each_bit(<<h, t::bitstring>>, counter, opts) do
glue(concat(Inspect.Integer.inspect(h, opts), ","),
each_bit(t, decrement(counter), opts))
end
defp each_bit(bitstring, _counter, opts) do
size = bit_size(bitstring)
<<h::size(size)>> = bitstring
Inspect.Integer.inspect(h, opts) <> "::size(" <> Integer.to_string(size) <> ")"
end
defp decrement(:infinity), do: :infinity
defp decrement(counter), do: counter - 1
end
defimpl Inspect, for: List do
def inspect([], _opts), do: "[]"
# TODO: Deprecate :char_lists and :as_char_lists keys in v1.5
def inspect(term, %Inspect.Opts{charlists: lists, char_lists: lists_deprecated} = opts) do
lists =
if lists == :infer and lists_deprecated != :infer do
case lists_deprecated do
:as_char_lists ->
:as_charlists
_ ->
lists_deprecated
end
else
lists
end
cond do
lists == :as_charlists or (lists == :infer and printable?(term)) ->
IO.iodata_to_binary [?', Inspect.BitString.escape(IO.chardata_to_string(term), ?'), ?']
keyword?(term) ->
surround_many("[", term, "]", opts, &keyword/2)
true ->
surround_many("[", term, "]", opts, &to_doc/2)
end
end
@doc false
def keyword({key, value}, opts) do
concat(
key_to_binary(key) <> ": ",
to_doc(value, opts)
)
end
@doc false
def keyword?([{key, _value} | rest]) when is_atom(key) do
case Atom.to_charlist(key) do
'Elixir.' ++ _ -> false
_ -> keyword?(rest)
end
end
def keyword?([]), do: true
def keyword?(_other), do: false
@doc false
def printable?([char | rest]) when char in 32..126, do: printable?(rest)
def printable?([?\n | rest]), do: printable?(rest)
def printable?([?\r | rest]), do: printable?(rest)
def printable?([?\t | rest]), do: printable?(rest)
def printable?([?\v | rest]), do: printable?(rest)
def printable?([?\b | rest]), do: printable?(rest)
def printable?([?\f | rest]), do: printable?(rest)
def printable?([?\e | rest]), do: printable?(rest)
def printable?([?\a | rest]), do: printable?(rest)
def printable?([]), do: true
def printable?(_), do: false
## Private
defp key_to_binary(key) do
case Inspect.Atom.inspect(key) do
":" <> right -> right
other -> other
end
end
end
defimpl Inspect, for: Tuple do
def inspect({}, _opts), do: "{}"
def inspect(tuple, opts) do
surround_many("{", Tuple.to_list(tuple), "}", opts, &to_doc/2)
end
end
defimpl Inspect, for: Map do
def inspect(map, opts) do
nest inspect(map, "", opts), 1
end
def inspect(map, name, opts) do
map = :maps.to_list(map)
surround_many("%" <> name <> "{", map, "}", opts, traverse_fun(map))
end
defp traverse_fun(list) do
if Inspect.List.keyword?(list) do
&Inspect.List.keyword/2
else
&to_map/2
end
end
defp to_map({key, value}, opts) do
concat(
concat(to_doc(key, opts), " => "),
to_doc(value, opts)
)
end
end
defimpl Inspect, for: Integer do
def inspect(term, %Inspect.Opts{base: base}) do
Integer.to_string(term, base_to_value(base))
|> prepend_prefix(base)
end
defp base_to_value(base) do
case base do
:binary -> 2
:decimal -> 10
:octal -> 8
:hex -> 16
end
end
defp prepend_prefix(value, :decimal), do: value
defp prepend_prefix(value, base) do
prefix = case base do
:binary -> "0b"
:octal -> "0o"
:hex -> "0x"
end
prefix <> value
end
end
defimpl Inspect, for: Float do
def inspect(term, _opts) do
IO.iodata_to_binary(:io_lib_format.fwrite_g(term))
end
end
defimpl Inspect, for: Regex do
def inspect(regex, _opts) do
IO.iodata_to_binary ['~r/', escape(regex.source, ?/), ?/, regex.opts]
end
defp escape(bin, term),
do: escape(bin, [], term)
defp escape(<<?\\, term>> <> rest, buf, term),
do: escape(rest, [buf | [?\\, term]], term)
defp escape(<<term>> <> rest, buf, term),
do: escape(rest, [buf | [?\\, term]], term)
# The list of characters is from 'String.printable?' implementation
# minus characters treated specially by regex: \s, \d, \b, \e
defp escape(<<?\n>> <> rest, buf, term),
do: escape(rest, [buf | '\\n'], term)
defp escape(<<?\r>> <> rest, buf, term),
do: escape(rest, [buf | '\\r'], term)
defp escape(<<?\t>> <> rest, buf, term),
do: escape(rest, [buf | '\\t'], term)
defp escape(<<?\v>> <> rest, buf, term),
do: escape(rest, [buf | '\\v'], term)
defp escape(<<?\f>> <> rest, buf, term),
do: escape(rest, [buf | '\\f'], term)
defp escape(<<?\a>> <> rest, buf, term),
do: escape(rest, [buf | '\\a'], term)
defp escape(<<char::utf8, rest::binary>>, buf, term)
when char in 0x20..0x7E
when char in 0xA0..0xD7FF
when char in 0xE000..0xFFFD
when char in 0x10000..0x10FFFF,
do: escape(rest, [buf | <<char::utf8>>], term)
defp escape(<<char, rest::binary>>, buf, term),
do: escape(rest, [buf | Inspect.BitString.escape_char(char)], term)
defp escape(<<>>, buf, _), do: buf
end
defimpl Inspect, for: Function do
def inspect(function, _opts) do
fun_info = :erlang.fun_info(function)
mod = fun_info[:module]
if fun_info[:type] == :external and fun_info[:env] == [] do
"&#{Inspect.Atom.inspect(mod)}.#{fun_info[:name]}/#{fun_info[:arity]}"
else
case Atom.to_charlist(mod) do
'elixir_compiler_' ++ _ ->
if function_exported?(mod, :__RELATIVE__, 0) do
"#Function<#{uniq(fun_info)} in file:#{mod.__RELATIVE__}>"
else
default_inspect(mod, fun_info)
end
_ ->
default_inspect(mod, fun_info)
end
end
end
defp default_inspect(mod, fun_info) do
"#Function<#{uniq(fun_info)}/#{fun_info[:arity]} in " <>
"#{Inspect.Atom.inspect(mod)}#{extract_name(fun_info[:name])}>"
end
defp extract_name([]) do
""
end
defp extract_name(name) do
name = Atom.to_string(name)
case :binary.split(name, "-", [:global]) do
["", name | _] -> "." <> name
_ -> "." <> name
end
end
defp uniq(fun_info) do
Integer.to_string(fun_info[:new_index]) <> "." <>
Integer.to_string(fun_info[:uniq])
end
end
defimpl Inspect, for: PID do
def inspect(pid, _opts) do
"#PID" <> IO.iodata_to_binary(:erlang.pid_to_list(pid))
end
end
defimpl Inspect, for: Port do
def inspect(port, _opts) do
IO.iodata_to_binary :erlang.port_to_list(port)
end
end
defimpl Inspect, for: Reference do
def inspect(ref, _opts) do
'#Ref' ++ rest = :erlang.ref_to_list(ref)
"#Reference" <> IO.iodata_to_binary(rest)
end
end
defimpl Inspect, for: Any do
def inspect(%{__struct__: struct} = map, opts) do
try do
struct.__struct__
rescue
_ -> Inspect.Map.inspect(map, opts)
else
dunder ->
if :maps.keys(dunder) == :maps.keys(map) do
pruned = :maps.remove(:__exception__, :maps.remove(:__struct__, map))
Inspect.Map.inspect(pruned, Inspect.Atom.inspect(struct, opts), opts)
else
Inspect.Map.inspect(map, opts)
end
end
end
end
|
lib/elixir/lib/inspect.ex
| 0.765681
| 0.60133
|
inspect.ex
|
starcoder
|
defmodule Absinthe.Phase.Document.Result do
@moduledoc false
# Produces data fit for external encoding from annotated value tree
alias Absinthe.{Blueprint, Phase, Type}
use Absinthe.Phase
@spec run(Blueprint.t | Phase.Error.t, Keyword.t) :: {:ok, map}
def run(%Blueprint{} = bp, _options \\ []) do
result = Map.merge(bp.result, process(bp))
{:ok, %{bp | result: result}}
end
defp process(blueprint) do
result = case blueprint.execution do
%{validation_errors: [], result: result} ->
{:ok, data(result, [])}
%{validation_errors: errors} ->
{:validation_failed, errors}
end
format_result(result)
end
defp format_result(:execution_failed) do
%{data: nil}
end
defp format_result({:ok, {data, []}}) do
%{data: data}
end
defp format_result({:ok, {data, errors}}) do
errors = errors |> Enum.uniq |> Enum.map(&format_error/1)
%{data: data, errors: errors}
end
defp format_result({:validation_failed, errors}) do
errors = errors |> Enum.uniq |> Enum.map(&format_error/1)
%{errors: errors}
end
defp format_result({:parse_failed, error}) do
%{errors: [format_error(error)]}
end
defp data(%{errors: [_|_] = field_errors}, errors), do: {nil, field_errors ++ errors}
# Leaf
defp data(%{value: nil}, errors), do: {nil, errors}
defp data(%{value: value, emitter: emitter}, errors) do
value =
case Type.unwrap(emitter.schema_node.type) do
%Type.Scalar{} = schema_node ->
Type.Scalar.serialize(schema_node, value)
%Type.Enum{} = schema_node ->
Type.Enum.serialize(schema_node, value)
end
{value, errors}
end
# Object
defp data(%{fields: fields}, errors), do: field_data(fields, errors)
# List
defp data(%{values: values}, errors), do: list_data(values, errors)
defp list_data(fields, errors, acc \\ [])
defp list_data([], errors, acc), do: {:lists.reverse(acc), errors}
defp list_data([%{errors: errs} = field | fields], errors, acc) do
{value, errors} = data(field, errors)
list_data(fields, errs ++ errors, [value | acc])
end
defp field_data(fields, errors, acc \\ [])
defp field_data([], errors, acc), do: {Map.new(acc), errors}
defp field_data([%Absinthe.Resolution{} = res | _], _errors, _acc) do
raise """
Found unresolved resolution struct!
You probably forgot to run the resolution phase again.
#{inspect res}
"""
end
defp field_data([field | fields], errors, acc) do
{value, errors} = data(field, errors)
field_data(fields, errors, [{field_name(field.emitter), value} | acc])
end
defp field_name(%{alias: nil, name: name}), do: name
defp field_name(%{alias: name}), do: name
defp field_name(%{name: name}), do: name
defp format_error(%Phase.Error{locations: []} = error) do
error_object = %{message: error.message}
Map.merge(error.extra, error_object)
end
defp format_error(%Phase.Error{} = error) do
error_object = %{
message: error.message,
locations: Enum.flat_map(error.locations, &format_location/1),
}
error_object = case error.path do
[] -> error_object
path -> Map.put(error_object, :path, path)
end
Map.merge(Map.new(error.extra), error_object)
end
defp format_location(%{line: line, column: col}) do
[%{line: line || 0, column: col || 0}]
end
defp format_location(_), do: []
end
|
deps/absinthe/lib/absinthe/phase/document/result.ex
| 0.830216
| 0.519826
|
result.ex
|
starcoder
|
defmodule Explorer.Shared do
# A collection of **private** helpers shared in Explorer.
@moduledoc false
def backend_from_options!(opts) do
case Keyword.fetch(opts, :backend) do
{:ok, backend} when is_atom(backend) ->
backend
{:ok, other} ->
raise ArgumentError,
":backend must be an atom, got: #{inspect(other)}"
:error ->
nil
end
end
@doc """
Gets the implementation of a dataframe or series.
"""
def impl!(%{data: %struct{}}), do: struct
def impl!([%{data: %first_struct{}} | _] = dfs) when is_list(dfs),
do: Enum.reduce(dfs, first_struct, fn %{data: %struct{}}, acc -> pick_struct(acc, struct) end)
def impl!(%{data: %struct1{}}, %{data: %struct2{}}),
do: pick_struct(struct1, struct2)
@doc """
Gets the implementation of a list of maybe dataframes or series.
"""
def find_impl!(list) do
Enum.reduce(list, fn
%{data: %struct{}}, acc -> pick_struct(struct, acc)
_, acc -> acc
end)
end
defp pick_struct(struct, struct), do: struct
defp pick_struct(struct1, struct2) do
raise "cannot invoke Explorer function because it relies on two incompatible implementations: " <>
"#{inspect(struct1)} and #{inspect(struct2)}. You may need to call Explorer.backend_transfer/1 " <>
"(or Explorer.backend_copy/1) on one or both of them to transfer them to a common implementation"
end
@doc """
Gets the `dtype` of a list.
"""
def check_types(list) do
type =
Enum.reduce_while(list, nil, fn el, type ->
new_type = type(el, type) || type
cond do
new_type == :numeric and type in [:float, :integer] ->
{:cont, new_type}
new_type != type and !is_nil(type) ->
{:halt,
{:error,
"cannot make a series from mismatched types - the value #{inspect(el)} does not match inferred dtype #{type}"}}
true ->
{:cont, new_type}
end
end)
case type do
nil -> {:ok, :float}
{:error, _} = error -> error
valid -> {:ok, valid}
end
end
@doc """
Gets the `dtype` of a list or raise error if not possible.
"""
def check_types!(list) do
case check_types(list) do
{:ok, dtype} -> dtype
{:error, error} -> raise ArgumentError, error
end
end
defp type(item, type) when is_integer(item) and type == :float, do: :numeric
defp type(item, type) when is_float(item) and type == :integer, do: :numeric
defp type(item, type) when type == :numeric and (is_integer(item) or is_float(item)),
do: :numeric
defp type(item, _type) when is_integer(item), do: :integer
defp type(item, _type) when is_float(item), do: :float
defp type(item, _type) when is_boolean(item), do: :boolean
defp type(item, _type) when is_binary(item), do: :string
defp type(%Date{} = _item, _type), do: :date
defp type(%NaiveDateTime{} = _item, _type), do: :datetime
defp type(item, _type) when is_nil(item), do: nil
defp type(item, _type), do: raise("Unsupported datatype: #{inspect(item)}")
@doc """
Downcasts lists of mixed numeric types (float and int) to float.
"""
def cast_numerics(list, type) when type == :numeric do
data =
Enum.map(list, fn
nil -> nil
item -> item / 1
end)
{data, :float}
end
def cast_numerics(list, type), do: {list, type}
end
|
lib/explorer/shared.ex
| 0.834339
| 0.494812
|
shared.ex
|
starcoder
|
defmodule Plug.Router do
@moduledoc ~S"""
A DSL to define a routing algorithm that works with Plug.
It provides a set of macros to generate routes. For example:
defmodule AppRouter do
use Plug.Router
plug :match
plug :dispatch
get "/hello" do
send_resp(conn, 200, "world")
end
match _ do
send_resp(conn, 404, "oops")
end
end
Each route receives a `conn` variable containing a `Plug.Conn`
struct and it needs to return a connection, as per the Plug spec.
A catch-all `match` is recommended to be defined as in the example
above, otherwise routing fails with a function clause error.
The router is itself a plug, which means it can be invoked as:
AppRouter.call(conn, AppRouter.init([]))
Each `Plug.Router` has a plug pipeline, defined by `Plug.Builder`,
and by default it requires two plugs: `:match` and `:dispatch`.
`:match` is responsible for finding a matching route which is
then forwarded to `:dispatch`. This means users can easily hook
into the router mechanism and add behaviour before match, before
dispatch, or after both. All of the options given to `use Plug.Router`
are forwarded to `Plug.Builder`. See the `Plug.Builder` module
for more information on the `plug` macro and on the available options.
## Routes
get "/hello" do
send_resp(conn, 200, "world")
end
In the example above, a request will only match if it is a `GET`
request and the route is "/hello". The supported HTTP methods are
`get`, `post`, `put`, `patch`, `delete` and `options`.
A route can also specify parameters which will then be available
in the function body:
get "/hello/:name" do
send_resp(conn, 200, "hello #{name}")
end
The `:name` parameter will also be available in the function body as
`conn.params["name"]` and `conn.path_params["name"]`.
Routes allow for globbing which will match the remaining parts
of a route and can be available as a parameter in the function
body. Also note that a glob can't be followed by other segments:
get "/hello/*_rest" do
send_resp(conn, 200, "matches all routes starting with /hello")
end
get "/hello/*glob" do
send_resp(conn, 200, "route after /hello: #{inspect glob}")
end
Finally, a general `match` function is also supported:
match "/hello" do
send_resp(conn, 200, "world")
end
A `match` will match any route regardless of the HTTP method.
Check `match/3` for more information on how route compilation
works and a list of supported options.
## Parameter Parsing
Handling request data can be done through the
[`Plug.Parsers`](https://hexdocs.pm/plug/Plug.Parsers.html#content) plug. It
provides support for parsing URL-encoded, form-data, and JSON data as well as
providing a behaviour that others parsers can adopt.
Here is an example of `Plug.Parsers` can be used in a `Plug.Router` router to
parse the JSON-encoded body of a POST request:
defmodule AppRouter do
use Plug.Router
plug :match
plug Plug.Parsers, parsers: [:json],
pass: ["application/json"],
json_decoder: Jason
plug :dispatch
post "/hello" do
IO.inspect conn.body_params # Prints JSON POST body
send_resp(conn, 200, "Success!")
end
end
It is important that `Plug.Parsers` is placed before the `:dispatch` plug in
the pipeline, otherwise the matched clause route will not receive the parsed
body in its `Plug.Conn` argument when dispatched.
`Plug.Parsers` can also be plugged between `:match` and `:dispatch` (like in
the example above): this means that `Plug.Parsers` will run only if there is a
matching route. This can be useful to perform actions such as authentication
*before* parsing the body, which should only be parsed if a route matches
afterwards.
## Error handling
In case something goes wrong in a request, the router by default
will crash, without returning any response to the client. This
behaviour can be configured in two ways, by using two different
modules:
* `Plug.ErrorHandler` - allows the developer to customize exactly
which page is sent to the client via the `handle_errors/2` function;
* `Plug.Debugger` - automatically shows debugging and request information
about the failure. This module is recommended to be used only in a
development environment.
Here is an example of how both modules could be used in an application:
defmodule AppRouter do
use Plug.Router
if Mix.env == :dev do
use Plug.Debugger
end
use Plug.ErrorHandler
plug :match
plug :dispatch
get "/hello" do
send_resp(conn, 200, "world")
end
defp handle_errors(conn, %{kind: _kind, reason: _reason, stack: _stack}) do
send_resp(conn, conn.status, "Something went wrong")
end
end
## Passing data between routes and plugs
It is also possible to assign data to the `Plug.Conn` that will
be available to any plug invoked after the `:match` plug.
This is very useful if you want a matched route to customize how
later plugs will behave.
You can use `:assigns` (which contains user data) or `:private`
(which contains library/framework data) for this. For example:
get "/hello", assigns: %{an_option: :a_value} do
send_resp(conn, 200, "world")
end
In the example above, `conn.assigns[:an_option]` will be available
to all plugs invoked after `:match`. Such plugs can read from
`conn.assigns` (or `conn.private`) to configure their behaviour
based on the matched route.
## Routes compilation
All routes are compiled to a match function that receives
three arguments: the method, the request path split on `/`
and the connection. Consider this example:
match "/foo/bar", via: :get do
send_resp(conn, 200, "hello world")
end
It is compiled to:
defp match("GET", ["foo", "bar"], conn) do
send_resp(conn, 200, "hello world")
end
This means guards can be given to `match`:
match "/foo/bar/:baz" when size(baz) <= 3, via: :get do
send_resp(conn, 200, "hello world")
end
After a match is found, the block given as `do/end` is stored
as a function in the connection. This function is then retrieved
and invoked in the `dispatch` plug.
## Routes options
Sometimes you may want to customize how a route behaves during dispatch.
This can be done by accessing the `opts` variable inside the route:
defmodule AppRouter do
use Plug.Router
plug :match
plug :dispatch, content: "hello world"
get "/hello" do
send_resp(conn, 200, opts[:content])
end
match _ do
send_resp(conn, 404, "oops")
end
end
This is particularly useful when used with `Plug.Builder.builder_opts/0`.
`builder_opts/0` allows us to pass options received when initializing
`AppRouter` to a specific plug, such as dispatch itself. So if instead of:
plug :dispatch, content: "hello world"
we do:
plug :dispatch, builder_opts()
now the content can be given when starting the router, like this:
Plug.Cowboy.http AppRouter, [content: "hello world"]
Or as part of a pipeline like this:
plug AppRouter, content: "hello world"
In a nutshell, `builder_opts()` allows us to pass the options given
when initializing the router to a `dispatch`.
## Telemetry
The router emits the following telemetry events:
* `[:plug, :router_dispatch, :start]` - dispatched before dispatching to a matched route
* Measurement: `%{system_time: System.system_time}`
* Metadata: `%{conn: Plug.Conn.t, route: binary, router: module}`
* `[:plug, :router_dispatch, :exception]` - dispatched after exceptions on dispatching a route
* Measurement: `%{duration: native_time}`
* Metadata: `%{conn: Plug.Conn.t, route: binary, router: module}`
* `[:plug, :router_dispatch, :stop]` - dispatched after successfully dispatching a matched route
* Measurement: `%{duration: native_time}`
* Metadata: `%{conn: Plug.Conn.t, route: binary, router: module}`
"""
@doc false
defmacro __using__(opts) do
quote location: :keep do
import Plug.Router
@before_compile Plug.Router
use Plug.Builder, unquote(opts)
@doc false
def match(conn, _opts) do
do_match(conn, conn.method, Plug.Router.Utils.decode_path_info!(conn), conn.host)
end
@doc false
def dispatch(%Plug.Conn{} = conn, opts) do
start = System.monotonic_time()
{path, fun} = Map.fetch!(conn.private, :plug_route)
metadata = %{conn: conn, route: path, router: __MODULE__}
:telemetry.execute(
[:plug, :router_dispatch, :start],
%{system_time: System.system_time()},
metadata
)
try do
fun.(conn, opts)
else
conn ->
duration = System.monotonic_time() - start
metadata = %{metadata | conn: conn}
:telemetry.execute([:plug, :router_dispatch, :stop], %{duration: duration}, metadata)
conn
catch
kind, reason ->
duration = System.monotonic_time() - start
metadata = %{kind: kind, reason: reason, stacktrace: __STACKTRACE__}
:telemetry.execute(
[:plug, :router_dispatch, :exception],
%{duration: duration},
metadata
)
Plug.Conn.WrapperError.reraise(conn, kind, reason, __STACKTRACE__)
end
end
defoverridable match: 2, dispatch: 2
end
end
@doc false
defmacro __before_compile__(env) do
unless Module.defines?(env.module, {:do_match, 4}) do
raise "no routes defined in module #{inspect(env.module)} using Plug.Router"
end
quote do
import Plug.Router, only: []
end
end
@doc """
Returns the path of the route that the request was matched to.
"""
@spec match_path(Plug.Conn.t()) :: String.t()
def match_path(%Plug.Conn{} = conn) do
{path, _fun} = Map.fetch!(conn.private, :plug_route)
path
end
## Match
@doc """
Main API to define routes.
It accepts an expression representing the path and many options
allowing the match to be configured.
The route can dispatch either to a function body or a Plug module.
## Examples
match "/foo/bar", via: :get do
send_resp(conn, 200, "hello world")
end
match "/baz", to: MyPlug, init_opts: [an_option: :a_value]
## Options
`match/3` and the other route macros accept the following options:
* `:host` - the host which the route should match. Defaults to `nil`,
meaning no host match, but can be a string like "example.com" or a
string ending with ".", like "subdomain." for a subdomain match.
* `:private` - assigns values to `conn.private` for use in the match
* `:assigns` - assigns values to `conn.assigns` for use in the match
* `:via` - matches the route against some specific HTTP method(s) specified
as an atom, like `:get` or `:put`, or a list, like `[:get, :post]`.
* `:do` - contains the implementation to be invoked in case
the route matches.
* `:to` - a Plug that will be called in case the route matches.
* `:init_opts` - the options for the target Plug given by `:to`.
A route should specify only one of `:do` or `:to` options.
"""
defmacro match(path, options, contents \\ []) do
compile(nil, path, options, contents)
end
@doc """
Dispatches to the path only if the request is a GET request.
See `match/3` for more examples.
"""
defmacro get(path, options, contents \\ []) do
compile(:get, path, options, contents)
end
@doc """
Dispatches to the path only if the request is a POST request.
See `match/3` for more examples.
"""
defmacro post(path, options, contents \\ []) do
compile(:post, path, options, contents)
end
@doc """
Dispatches to the path only if the request is a PUT request.
See `match/3` for more examples.
"""
defmacro put(path, options, contents \\ []) do
compile(:put, path, options, contents)
end
@doc """
Dispatches to the path only if the request is a PATCH request.
See `match/3` for more examples.
"""
defmacro patch(path, options, contents \\ []) do
compile(:patch, path, options, contents)
end
@doc """
Dispatches to the path only if the request is a DELETE request.
See `match/3` for more examples.
"""
defmacro delete(path, options, contents \\ []) do
compile(:delete, path, options, contents)
end
@doc """
Dispatches to the path only if the request is an OPTIONS request.
See `match/3` for more examples.
"""
defmacro options(path, options, contents \\ []) do
compile(:options, path, options, contents)
end
@doc """
Forwards requests to another Plug. The `path_info` of the forwarded
connection will exclude the portion of the path specified in the
call to `forward`. If the path contains any parameters, those will
be available in the target Plug in `conn.params` and `conn.path_params`.
## Options
`forward` accepts the following options:
* `:to` - a Plug the requests will be forwarded to.
* `:init_opts` - the options for the target Plug.
* `:host` - a string representing the host or subdomain, exactly like in
`match/3`.
* `:private` - values for `conn.private`, exactly like in `match/3`.
* `:assigns` - values for `conn.assigns`, exactly like in `match/3`.
If `:init_opts` is undefined, then all remaining options are passed
to the target plug.
## Examples
forward "/users", to: UserRouter
Assuming the above code, a request to `/users/sign_in` will be forwarded to
the `UserRouter` plug, which will receive what it will see as a request to
`/sign_in`.
forward "/foo/:bar/qux", to: FooPlug
Here, a request to `/foo/BAZ/qux` will be forwarded to the `FooPlug`
plug, which will receive what it will see as a request to `/`,
and `conn.params["bar"]` will be set to `"BAZ"`.
Some other examples:
forward "/foo/bar", to: :foo_bar_plug, host: "foobar."
forward "/baz", to: BazPlug, init_opts: [plug_specific_option: true]
"""
defmacro forward(path, options) when is_binary(path) do
quote bind_quoted: [path: path, options: options] do
{target, options} = Keyword.pop(options, :to)
{options, plug_options} = Keyword.split(options, [:host, :private, :assigns])
plug_options = Keyword.get(plug_options, :init_opts, plug_options)
if is_nil(target) or !is_atom(target) do
raise ArgumentError, message: "expected :to to be an alias or an atom"
end
{target, target_opts} =
case Atom.to_string(target) do
"Elixir." <> _ -> {target, target.init(plug_options)}
_ -> {{__MODULE__, target}, plug_options}
end
@plug_forward_target target
@plug_forward_opts target_opts
# Delegate the matching to the match/3 macro along with the options
# specified by Keyword.split/2.
match path <> "/*glob", options do
Plug.forward(
var!(conn),
var!(glob),
@plug_forward_target,
@plug_forward_opts
)
end
end
end
## Match Helpers
@doc false
def __route__(method, path, guards, options) do
{method, guards} = build_methods(List.wrap(method || options[:via]), guards)
{vars, match} = Plug.Router.Utils.build_path_match(path)
params_match = Plug.Router.Utils.build_path_params_match(vars)
private = extract_merger(options, :private)
assigns = extract_merger(options, :assigns)
host_match = Plug.Router.Utils.build_host_match(options[:host])
{quote(do: conn), method, match, params_match, host_match, guards, private, assigns}
end
@doc false
def __put_route__(conn, path, fun) do
Plug.Conn.put_private(conn, :plug_route, {append_match_path(conn, path), fun})
end
defp append_match_path(%Plug.Conn{private: %{plug_route: {base_path, _}}}, path) do
base_path <> path
end
defp append_match_path(%Plug.Conn{}, path) do
path
end
# Entry point for both forward and match that is actually
# responsible to compile the route.
defp compile(method, expr, options, contents) do
{body, options} =
cond do
Keyword.has_key?(contents, :do) ->
{contents[:do], options}
Keyword.has_key?(options, :do) ->
Keyword.pop(options, :do)
options[:to] ->
{to, options} = Keyword.pop(options, :to)
{init_opts, options} = Keyword.pop(options, :init_opts, [])
body =
quote do
@plug_router_to.call(var!(conn), @plug_router_init)
end
options =
quote do
to = unquote(to)
@plug_router_to to
@plug_router_init to.init(unquote(init_opts))
unquote(options)
end
{body, options}
true ->
raise ArgumentError, message: "expected one of :to or :do to be given as option"
end
{path, guards} = extract_path_and_guards(expr)
quote bind_quoted: [
method: method,
path: path,
options: options,
guards: Macro.escape(guards, unquote: true),
body: Macro.escape(body, unquote: true)
] do
route = Plug.Router.__route__(method, path, guards, options)
{conn, method, match, params, host, guards, private, assigns} = route
defp do_match(unquote(conn), unquote(method), unquote(match), unquote(host))
when unquote(guards) do
unquote(private)
unquote(assigns)
merge_params = fn
%Plug.Conn.Unfetched{} -> unquote({:%{}, [], params})
fetched -> Map.merge(fetched, unquote({:%{}, [], params}))
end
conn = update_in(unquote(conn).params, merge_params)
conn = update_in(conn.path_params, merge_params)
Plug.Router.__put_route__(conn, unquote(path), fn var!(conn), var!(opts) ->
_ = var!(opts)
unquote(body)
end)
end
end
end
defp extract_merger(options, key) when is_list(options) do
if option = Keyword.get(options, key) do
quote do
conn = update_in(conn.unquote(key), &Map.merge(&1, unquote(Macro.escape(option))))
end
end
end
# Convert the verbs given with `:via` into a variable and guard set that can
# be added to the dispatch clause.
defp build_methods([], guards) do
{quote(do: _), guards}
end
defp build_methods([method], guards) do
{Plug.Router.Utils.normalize_method(method), guards}
end
defp build_methods(methods, guards) do
methods = Enum.map(methods, &Plug.Router.Utils.normalize_method(&1))
var = quote do: method
guards = join_guards(quote(do: unquote(var) in unquote(methods)), guards)
{var, guards}
end
defp join_guards(fst, true), do: fst
defp join_guards(fst, snd), do: quote(do: unquote(fst) and unquote(snd))
# Extract the path and guards from the path.
defp extract_path_and_guards({:when, _, [path, guards]}), do: {extract_path(path), guards}
defp extract_path_and_guards(path), do: {extract_path(path), true}
defp extract_path({:_, _, var}) when is_atom(var), do: "/*_path"
defp extract_path(path), do: path
end
|
lib/plug/router.ex
| 0.895611
| 0.567757
|
router.ex
|
starcoder
|
defmodule BitcoinSimulator.BitcoinCore.Wallet do
defmodule Wallet do
defstruct [
spent_addresses: [],
unspent_addresses: Map.new(),
unspent_balance: 0.0
]
end
defmodule Address do
defstruct [
public_key: nil,
private_Key: nil,
address: nil,
value: 0.0,
outpoint: %{
hash: nil,
index: 0
}
]
end
# APIs
def get_new_wallet, do: %Wallet{}
def get_new_address(wallet) do
{public_key, private_key} = :crypto.generate_key(:ecdh, :secp256k1)
addr = :crypto.hash(:ripemd160, :crypto.hash(:sha256, public_key))
address =
%Address{
public_key: public_key,
private_Key: private_key,
address: addr
}
new_wallet = %{wallet | unspent_addresses: Map.put(wallet.unspent_addresses, address.address, address)}
{address, new_wallet}
end
def combine_unspent_addresses(wallet, target_value) do
sorted_addresses = wallet.unspent_addresses |> Map.values() |> sort_addresses_by_value()
combine_address_helper(sorted_addresses, target_value, [], 0.0, 0)
end
def spend_address(wallet, address) do
spent_addr = wallet.unspent_addresses[address]
%{wallet |
spent_addresses: wallet.spent_addresses ++ [spent_addr],
unspent_addresses: Map.delete(wallet.unspent_addresses, address),
unspent_balance: wallet.unspent_balance - spent_addr.value
}
end
def import_address(wallet, address) do
%{wallet |
unspent_addresses: Map.put(wallet.unspent_addresses, address.address, address),
unspent_balance: wallet.unspent_balance + address.value
}
end
# Aux
def update_address_detail(details, wallet) do
Enum.reduce(details |> Map.to_list(), wallet, fn(x, acc) ->
addr = x |> elem(0)
addr_value = x |> elem(1) |> elem(0)
updated = %{acc.unspent_addresses[addr] |
value: addr_value,
outpoint: x |> elem(1) |> elem(1)
}
%{acc |
unspent_addresses: Map.put(acc.unspent_addresses, addr, updated),
unspent_balance: acc.unspent_balance + addr_value
}
end)
end
defp sort_addresses_by_value(addresses), do: Enum.sort(addresses, fn(a, b) -> a.value < b.value end)
defp combine_address_helper(addresses, target_value, result, result_sum, current_index) do
if result_sum >= target_value do
{result, result_sum}
else
current = Enum.at(addresses, current_index)
unless current.value == 0.0 do
combine_address_helper(addresses, target_value, [current | result], result_sum + current.value, current_index + 1)
else
combine_address_helper(addresses, target_value, result, result_sum, current_index + 1)
end
end
end
end
|
lib/bitcoin_simulator/bitcoin_core/wallet.ex
| 0.661923
| 0.512144
|
wallet.ex
|
starcoder
|
defmodule Adoptoposs.Search do
@moduledoc """
The search context provides functions for finding projects by language tag,
and name.
"""
import Ecto.Query, warn: false
alias Adoptoposs.Repo
alias Adoptoposs.Submissions.Project
alias Adoptoposs.Tags.Tag
def find_projects(query, offset: offset, limit: limit) when is_binary(query) do
find_projects(query, offset: offset, limit: limit, filters: [])
end
def find_projects(query, offset: offset, limit: limit, filters: filters)
when is_binary(query) do
terms = Regex.split(~r/[\s\.-_]/, String.downcase(query))
base_query = projects_base_query(terms, filters)
results =
base_query
|> offset(^offset)
|> limit(^limit)
|> order_by(desc: :updated_at)
|> order_by([:name, :repo_owner])
|> preload([:user, :language, :interests])
|> Repo.all()
%{results: results, total_count: base_query |> Repo.aggregate(:count)}
end
def find_projects(_query, _opts), do: %{results: [], total_count: 0}
defp projects_base_query(terms, []) do
Project
|> join(:left, [p], t in assoc(p, :language))
|> where(status: ^:published)
|> where_all_terms_match(terms, &matches_project/2)
end
defp projects_base_query(terms, filters) do
Project
|> join(:left, [p], t in assoc(p, :language))
|> where(status: ^:published)
|> where_all_terms_match(terms, &matches_project/2)
|> where([p], p.language_id in ^filters)
end
def find_tags(query, offset: offset, limit: limit) when is_binary(query) do
terms = Regex.split(~r/[\s\.-_]/, String.downcase(query))
Tag
|> where(type: ^Tag.Language.type())
|> where_all_terms_match(terms, &matches_tag/2)
|> offset(^offset)
|> limit(^limit)
|> order_by(:name)
|> Repo.all()
end
def find_tags(_query, _opts), do: []
defp where_all_terms_match(query, terms, fun) do
Enum.reduce(terms, query, fun)
end
def matches_project(term, query) do
from [project, tag] in query,
where:
ilike(project.name, ^"%#{term}%") or
ilike(project.repo_owner, ^"%#{term}%") or
ilike(project.description, ^"%#{term}%")
end
def matches_tag(term, query) do
from project in query,
where: ilike(project.name, ^"%#{term}%")
end
end
|
lib/adoptoposs/search.ex
| 0.622918
| 0.429848
|
search.ex
|
starcoder
|
defmodule Pathex.Builder.Setter do
@moduledoc """
Module with common functions for updaters
"""
import Pathex.Common, only: [list_match: 2, pin: 1]
# Helpers
# Non variable
def create_setter({:map, key}, tail) do
pinned = pin(key)
quote do
%{unquote(pinned) => value} = map ->
%{map | unquote(key) => value |> unquote(tail)}
end
end
def create_setter({:list, index}, tail) when is_integer(index) do
x = {:x, [], Elixir}
match = list_match(index, x)
quote do
unquote(match) = list ->
List.replace_at(list, unquote(index), unquote(x) |> unquote(tail))
end
end
def create_setter({:tuple, index}, tail) when is_integer(index) do
quote do
t when is_tuple(t) and tuple_size(t) > unquote(index) ->
val =
elem(t, unquote(index))
|> unquote(tail)
put_elem(t, unquote(index), val)
end
end
def create_setter({:keyword, key}, tail) when is_atom(key) do
quote do
[{_, _} | _] = keyword ->
key = unquote(key)
if Keyword.has_key?(keyword, key) do
Keyword.update!(keyword, key, fn val ->
val |> unquote(tail)
end)
else
throw(:path_not_found)
end
end
end
# Variable
def create_setter({:list, {_, _, _} = index}, tail) do
quote do
l when is_list(l) ->
List.update_at(l, unquote(index), fn x -> x |> unquote(tail) end)
end
end
def create_setter({:tuple, {_, _, _} = index}, tail) do
quote do
t
when is_tuple(t) and is_integer(unquote(index)) and
unquote(index) >= 0 and
tuple_size(t) > unquote(index) ->
val =
elem(t, unquote(index))
|> unquote(tail)
put_elem(t, unquote(index), val)
end
end
def create_setter({:keyword, {_, _, _} = key}, tail) do
quote do
[{_, _} | _] = keyword when is_atom(unquote(key)) ->
Keyword.update!(keyword, unquote(key), fn val ->
val |> unquote(tail)
end)
end
end
def fallback do
quote do
_ -> throw(:path_not_found)
end
end
def wrap_to_code(code, [arg1 | _] = args) do
code =
quote do
try do
{:ok, unquote(arg1) |> unquote(code)}
catch
:path_not_found -> :error
end
end
%Pathex.Builder.Code{code: code, vars: args}
end
end
|
lib/pathex/builder/setter.ex
| 0.589126
| 0.586227
|
setter.ex
|
starcoder
|
defmodule Sanbase.MapUtils do
def replace_lazy(map, key, value_fun) do
case Map.has_key?(map, key) do
true -> Map.put(map, key, value_fun.())
false -> map
end
end
@doc ~s"""
Return a subset of `left` map that has only the keys that are also present in `right`.
#### Examples:
iex> Sanbase.MapUtils.drop_diff_keys(%{}, %{})
%{}
iex> Sanbase.MapUtils.drop_diff_keys(%{a: 1}, %{a: 1})
%{a: 1}
iex> Sanbase.MapUtils.drop_diff_keys(%{a: 1, b: 2}, %{a: 1})
%{a: 1}
iex> Sanbase.MapUtils.drop_diff_keys(%{a: 1}, %{a: "ASDASDASDA"})
%{a: 1}
iex> Sanbase.MapUtils.drop_diff_keys(%{a: 1, d: 555, e: "string"}, %{b: 2, c: 3, f: 19})
%{}
"""
def drop_diff_keys(left, right) do
Map.drop(left, Map.keys(left) -- Map.keys(right))
end
@doc ~s"""
Find where a given name-value pair is located in a deeply nested list-map data
structures.
#### Examples:
iex> %{a: %{b: %{"name" => "ivan"}}} |> Sanbase.MapUtils.find_pair_path("name", "ivan")
[[:a, :b, "name"]]
iex> %{a: %{b: [%{"name" => "ivan"}]}} |> Sanbase.MapUtils.find_pair_path("name", "ivan")
[[:a, :b, {:at, 0}, "name"]]
iex> %{a: [%{b: [%{"name" => "ivan"}]}]} |> Sanbase.MapUtils.find_pair_path("name", "ivan")
[[:a, {:at, 0}, :b, {:at, 0}, "name"]]
iex>%{
...> "foo" => %{"last" => [%{b: [%{"name" => "ivan"}]}]},
...> a: %{"some" => %{a: 2, c: 12}, "key" => [1, 2, 3, 4, 5, 6]}
...> } |> Sanbase.MapUtils.find_pair_path("name", "ivan")
[["foo", "last", {:at, 0}, :b, {:at, 0}, "name"]]
iex> %{a: %{b: [%{"name" => ""}]}} |> Sanbase.MapUtils.find_pair_path("name", "not_existing")
[]
iex> %{a: %{b: [%{"name" => ""}]}} |> Sanbase.MapUtils.find_pair_path("not_existing", "ivan")
[]
"""
def find_pair_path(map, key, value) when is_map(map) do
do_find_pair_path(map, key, value, [])
|> Enum.map(&(&1 |> List.flatten() |> Enum.reverse()))
|> Enum.reject(&(&1 == nil || &1 == []))
end
@doc ~s"""
Atomize the string keys of a map or list of maps.
#### Examples:
iex> %{"a" => %{"b" => %{"name" => "ivan"}}} |> Sanbase.MapUtils.atomize_keys()
%{a: %{b: %{name: "ivan"}}}
iex> [%{"a" => 1}, %{"b" => [%{"c" => %{"d" => 12}}]}] |> Sanbase.MapUtils.atomize_keys()
[%{a: 1}, %{b: [%{c: %{d: 12}}]}]
iex> %{} |> Sanbase.MapUtils.atomize_keys()
%{}
iex> [%{}, %{}] |> Sanbase.MapUtils.atomize_keys()
[%{}, %{}]
iex> %{already: %{atom: :atom}} |> Sanbase.MapUtils.atomize_keys()
%{already: %{atom: :atom}}
"""
def atomize_keys(list) when is_list(list) do
Enum.map(list, fn elem -> atomize_keys(elem) end)
end
def atomize_keys(map) when is_map(map) and not is_struct(map) do
Enum.reduce(map, %{}, fn {key, val}, acc ->
Map.put(acc, atomize(key), atomize_keys(val))
end)
end
def atomize_keys(data), do: data
# Private functions
@compile {:inline, atomize: 1}
defp atomize(value) when is_atom(value) or is_binary(value),
do: value |> Inflex.underscore() |> String.to_existing_atom()
defp do_find_pair_path(map, key, value, path) when is_map(map) do
keys = Map.keys(map)
if key in keys and Map.get(map, key) == value do
[key | path]
else
Enum.map(keys, fn subkey ->
Map.get(map, subkey)
|> do_find_pair_path(key, value, [subkey | path])
end)
end
end
defp do_find_pair_path(list, key, value, path) when is_list(list) do
Enum.with_index(list)
|> Enum.map(fn {elem, index} ->
do_find_pair_path(elem, key, value, [{:at, index} | path])
end)
end
defp do_find_pair_path(_, _, _, _), do: []
end
|
lib/map_utils.ex
| 0.782829
| 0.570391
|
map_utils.ex
|
starcoder
|
defmodule UserCounter.Impl do
@moduledoc "'Private' implementation for UserCounter"
@regions [:north_america, :south_america, :africa, :europe, :asia, :australia]
@default_regions_to_users Map.new(@regions, fn region -> {region, MapSet.new()} end)
defstruct region_to_users: @default_regions_to_users, heartbeats: Map.new()
@doc """
Associates the user with the given region. Removes them from whatever region they were
previously associated with, if any.
"""
def put(%{region_to_users: regions_to_users, heartbeats: heartbeats}, user_id, region)
when region in @regions do
other_regions = Enum.filter(@regions, &(&1 != region))
{_, added_to_new_region} =
regions_to_users
|> remove_from_regions(user_id, other_regions)
|> Map.get_and_update!(region, fn users ->
{users, MapSet.put(users, user_id)}
end)
updated_heartbeats = Map.put(heartbeats, user_id, DateTime.utc_now())
%__MODULE__{region_to_users: added_to_new_region, heartbeats: updated_heartbeats}
end
@doc """
Removes a user from the data set, if they were present at all.
"""
def drop(%{region_to_users: regions_to_users, heartbeats: heartbeats}, user_id) do
updated_regions = remove_from_regions(regions_to_users, user_id, @regions)
updated_heartbeats = Map.delete(heartbeats, user_id)
%__MODULE__{region_to_users: updated_regions, heartbeats: updated_heartbeats}
end
@doc """
Removes all users that were most recently "put" before the specified datetime.
Users without a "heartbeat" expire and are not counted toward future totals.
"""
def drop_older_than(%{region_to_users: regions_to_users, heartbeats: heartbeats}, cutoff_datetime) do
{expired_heartbeats, live_heartbeats} =
Enum.split_with(heartbeats, fn {_user_id, last_heartbeat} ->
DateTime.compare(cutoff_datetime, last_heartbeat) == :gt
end)
old_user_ids = Enum.map(expired_heartbeats, &elem(&1, 0))
updated_regions_to_users =
Enum.reduce(old_user_ids, regions_to_users, fn old_user_id, acc ->
remove_from_regions(acc, old_user_id, @regions)
end)
%__MODULE__{region_to_users: updated_regions_to_users, heartbeats: Map.new(live_heartbeats)}
end
@doc """
The number of users logged in to the current region.
"""
def count(%{region_to_users: regions_to_users}, region) when region in @regions do
MapSet.size(regions_to_users[region])
end
@doc """
The total number of users logged in to any region.
"""
def count(%{heartbeats: heartbeats}) do
map_size(heartbeats)
end
def empty?(%{region_to_users: regions_to_users}, region) when region in @regions do
Enum.empty?(regions_to_users[region])
end
def empty?(%{heartbeats: heartbeats}) do
Enum.empty?(heartbeats)
end
defp remove_from_regions(regions_to_users, user_id, regions) do
Enum.reduce(regions, regions_to_users, fn remove_from_region, acc ->
{_, updated_region_to_users} =
Map.get_and_update!(acc, remove_from_region, fn users ->
{users, MapSet.delete(users, user_id)}
end)
updated_region_to_users
end)
end
end
|
lib/gen_server_example/user_counter_impl.ex
| 0.764935
| 0.430686
|
user_counter_impl.ex
|
starcoder
|
defmodule EctoSearcher.Sorter do
@moduledoc """
Module for sorting
## Usage
```elixir
sortable_fields = [:name, :description]
sorted_query = EctoSearcher.Sorter.sort(SomeEctoModel, %{"field" => "name", "order" => "desc"}, sortable_fields)
MySuperApp.Repo.all(sorted_query)
```
"""
@allowed_order_values ["asc", "desc"]
@type sort_params() :: %{String.t() => String.t()}
@type sortable_fields() :: [atom()]
require Ecto.Query
alias Ecto.Query
alias EctoSearcher.Mapping.Default, as: DefaultMapping
alias EctoSearcher.Utils.Field
@doc """
Shortcut for `sort/5`
"""
@spec sort(Ecto.Queryable.t(), Ecto.Schema.t(), sort_params()) :: Ecto.Queryable.t()
def sort(base_query, schema, sort_params) do
sortable_fields = Field.searchable_fields(schema, DefaultMapping)
mapping = DefaultMapping
sort(base_query, schema, sort_params, mapping, sortable_fields)
end
@doc """
Builds sort query
`sort_params` should be a map with "field" and "order" like this:
```elixir
%{
"field" => "name",
"order" => "asc"
}
```
`mapping` should implement `EctoSearcher.Mapping` behavior. `EctoSearcher.Mapping.Default` provides some basics.
`sortable_fields` is a list with fields (atoms) permitted for sorting. If not provided (or `nil`) all fields are allowed for sorting:
```elixir
[:name, :description]
```
"""
@spec sort(
Ecto.Queryable.t(),
Ecto.Schema.t(),
sort_params(),
module(),
sortable_fields() | nil
) :: Ecto.Queryable.t()
def sort(
base_query,
schema,
sort_params,
mapping,
sortable_fields \\ nil
)
when is_list(sortable_fields) or is_nil(sortable_fields) do
sortable_fields = sortable_fields || Field.searchable_fields(schema, mapping)
case sort_params do
%{"field" => field, "order" => order} ->
sorted_query(base_query, field, order, schema, mapping, sortable_fields)
_ ->
base_query
end
end
defp sorted_query(base_query, field, order, schema, mapping, sortable_fields) do
sortable_field_names = Enum.map(sortable_fields, &to_string/1)
if field in sortable_field_names and order in @allowed_order_values do
field_atom = String.to_existing_atom(field)
field_query = Field.lookup(field_atom, schema, mapping)
order_by =
case order do
"asc" -> [asc: field_query]
"desc" -> [desc: field_query]
end
Query.from(base_query, order_by: ^order_by)
else
base_query
end
end
end
|
lib/ecto_searcher/sorter.ex
| 0.827166
| 0.670554
|
sorter.ex
|
starcoder
|
defmodule Grizzly.Packet.HeaderExtension do
@moduledoc """
Functions for working with the header extension
in a Z/IP Packet.
"""
alias Grizzly.Packet.HeaderExtension.{
ExpectedDelay,
BinaryParser,
InstallationAndMaintenanceGet,
InstallationAndMaintenanceReport,
EncapsulationFormatInfo
}
@type extension :: struct()
@opaque t :: [extension]
@doc """
Given a header extension, get the expected delay in seconds
"""
@spec get_expected_delay(t()) :: {:ok, ExpectedDelay.seconds()} | nil
def get_expected_delay(extensions) do
extensions
|> Enum.filter(fn
%ExpectedDelay{} -> true
_ -> false
end)
|> List.first()
|> maybe_get_expected_delay()
end
@doc """
Make an expected delay from seconds
"""
@spec expected_delay_from_seconds(ExpectedDelay.seconds()) :: ExpectedDelay.t()
def expected_delay_from_seconds(seconds) do
ExpectedDelay.new(seconds)
end
@doc """
Try to parse a binary string into `HeaderExtension.t()`
"""
@spec from_binary(binary()) :: t()
def from_binary(extensions) do
extensions
|> BinaryParser.from_binary()
|> BinaryParser.parse(&parse_extension/1)
end
defp parse_extension(<<0x01, 0x03, seconds::integer-size(3)-unit(8), rest::binary>>) do
{ExpectedDelay.new(seconds), rest}
end
defp parse_extension(<<0x02, 0x00, rest::binary>>),
do: {InstallationAndMaintenanceGet.new(), rest}
defp parse_extension(<<0x03, length, rest::binary>> = report) do
<<_::binary-size(length), rest::binary>> = rest
{InstallationAndMaintenanceReport.from_binary(report), rest}
end
defp parse_extension(<<0x84, 0x02, security_to_security, crc16, rest::binary>>) do
security_to_security =
EncapsulationFormatInfo.security_to_security_from_byte(security_to_security)
crc16 = EncapsulationFormatInfo.crc16_from_byte(crc16)
{EncapsulationFormatInfo.new(security_to_security, crc16), rest}
end
defp parse_extension(<<0x05, 0x00, rest::binary>>) do
{:multicast_addressing, rest}
end
defp maybe_get_expected_delay(nil), do: nil
defp maybe_get_expected_delay(expected_delay) do
{:ok, ExpectedDelay.get_seconds(expected_delay)}
end
end
|
lib/grizzly/packet/header_extension.ex
| 0.832713
| 0.417628
|
header_extension.ex
|
starcoder
|
defmodule CrissCrossDHT.RoutingTable.Distance do
@moduledoc false
require Bitwise
@doc """
TODO
"""
def closest_nodes(nodes, target, n) do
closest_nodes(nodes, target)
|> Enum.slice(0..n)
end
def closest_nodes(nodes, target) do
Enum.sort(nodes, fn x, y ->
xor_cmp(x.hashed_id, y.hashed_id, target, &(&1 < &2))
end)
end
@doc """
This function gets two node ids, a target node id and a lambda function as an
argument. It compares the two node ids according to the XOR metric which is
closer to the target.
## Example
iex> RoutingTable.Worker.xor_compare("A", "a", "F", &(&1 > &2))
false
"""
def xor_cmp("", "", "", func), do: func.(0, 0)
def xor_cmp(node_id_a, node_id_b, target, func) do
<<byte_a::8, rest_a::bitstring>> = node_id_a
<<byte_b::8, rest_b::bitstring>> = node_id_b
<<byte_target::8, rest_target::bitstring>> = target
if byte_a == byte_b do
xor_cmp(rest_a, rest_b, rest_target, func)
else
xor_a = Bitwise.bxor(byte_a, byte_target)
xor_b = Bitwise.bxor(byte_b, byte_target)
func.(xor_a, xor_b)
end
end
@doc """
This function takes two node ids as binary and returns the bucket
number in which the node_id belongs as an integer. It counts the
number of identical bits.
## Example
iex> RoutingTable.find_bucket(<<0b11110000>>, <<0b11111111>>)
4
"""
def find_bucket(node_id_a, node_id_b), do: find_bucket(node_id_a, node_id_b, 0)
def find_bucket("", "", bucket), do: bucket
def find_bucket(node_id_a, node_id_b, bucket) do
<<bit_a::1, rest_a::bitstring>> = node_id_a
<<bit_b::1, rest_b::bitstring>> = node_id_b
if bit_a == bit_b do
find_bucket(rest_a, rest_b, bucket + 1)
else
bucket
end
end
@doc """
This function gets the number of bits and a node id as an argument and
generates a new node id. It copies the number of bits from the given node id
and the last bits it will generate randomly.
"""
def gen_node_id(nr_of_bits, node_id) do
nr_rest_bits = 32 * 8 - nr_of_bits
<<bits::size(nr_of_bits), _::size(nr_rest_bits)>> = node_id
<<rest::size(nr_rest_bits), _::size(nr_of_bits)>> = :crypto.strong_rand_bytes(32)
<<bits::size(nr_of_bits), rest::size(nr_rest_bits)>>
end
end
|
lib/criss_cross_dht/routing_table/distance.ex
| 0.577614
| 0.514034
|
distance.ex
|
starcoder
|
defmodule Rummage.Ecto do
@moduledoc """
Rummage.Ecto is a light weight, but powerful framework that can be used to alter Ecto
queries with Search, Sort and Paginate operations.
It accomplishes the above operations by using `Hooks`, which are modules that
implement `Rummage.Ecto.Hook` behavior. Each operation: Search, Sort and Paginate
have their hooks defined in Rummage. By doing this, we have made rummage completely
configurable. For example, if you don't like one of the implementations of Rummage,
but like the other two, you can configure Rummage to not use it.
If you want to check a sample application that uses Rummage, please check
[this link](https://github.com/aditya7iyengar/rummage_ecto_example).
Usage:
```elixir
defmodule Rummage.Ecto.Category do
use Ecto.Schema
use Rummage.Ecto
schema "categories" do
field :name, :string
end
end
```
This allows you to do:
iex> rummage = %{search: %{name: %{assoc: [], search_type: :ilike, search_term: "field_!"}}}
iex> {queryable, rummage} = Rummage.Ecto.Category.rummageq(Rummage.Ecto.Category, rummage)
iex> queryable
#Ecto.Query<from c in subquery(from c in Rummage.Ecto.Category), where: ilike(c.name, ^"%field_!%")>
iex> rummage
%{search: %{name: %{assoc: [], search_expr: :where,
search_term: "field_!", search_type: :ilike}}}
This also allows you to do call `rummage/2` without a `queryable` which defaults
to the module calling `rummage`, which is `Rummage.Ecto.Category` in this case:
iex> rummage = %{search: %{name: %{assoc: [], search_type: :ilike, search_term: "field_!"}}}
iex> {queryable, rummage} = Rummage.Ecto.Category.rummage(rummage)
iex> queryable
#Ecto.Query<from c in subquery(from c in Rummage.Ecto.Category), where: ilike(c.name, ^"%field_!%")>
iex> rummage
%{search: %{name: %{assoc: [], search_expr: :where,
search_term: "field_!", search_type: :ilike}}}
"""
alias Rummage.Ecto.Config, as: RConfig
@doc """
This is the function which calls to the `Rummage` `hooks`.
It is the entry-point to `Rummage.Ecto`.
This function takes in a `queryable`, a `rummage` map and an `opts` keyword.
Recognized `opts` keys are:
* `repo`: If you haven't set up a `repo` at the config level or `__using__`
level, this a way of passing `repo` to `rummage`. If you have
already configured your app to use a default `repo` and/or
specified the `repo` at `__using__` level, this is a way of
overriding those defaults.
* `per_page`: If you haven't set up a `per_page` at the config level or `__using__`
level, this a way of passing `per_page` to `rummage`. If you have
already configured your app to use a default `per_page` and/or
specified the `per_page` at `__using__` level, this is a way of
overriding those defaults.
* `search`: If you haven't set up a `search` at the config level or `__using__`
level, this a way of passing `search` to `rummage`. If you have
already configured your app to use a default `search` and/or
specified the `search` at `__using__` level, this is a way of
overriding those defaults. This can be used to override native
`Rummage.Ecto.Hook.Search` to a custom hook.
* `sort`: If you haven't set up a `sort` at the config level or `__using__`
level, this a way of passing `sort` to `rummage`. If you have
already configured your app to use a default `sort` and/or
specified the `sort` at `__using__` level, this is a way of
overriding those defaults. This can be used to override native
`Rummage.Ecto.Hook.Sort` to a custom hook.
* `paginate`: If you haven't set up a `paginate` at the config level or `__using__`
level, this a way of passing `paginate` to `rummage`. If you have
already configured your app to use a default `paginate` and/or
specified the `paginate` at `__using__` level, this is a way of
overriding those defaults. This can be used to override native
`Rummage.Ecto.Hook.Paginate` to a custom hook.
## Examples
When no hook params are given, it just returns the queryable and the params:
iex> import Rummage.Ecto
iex> alias Rummage.Ecto.Product
iex> rummage = %{}
iex> {queryable, rummage} = rummage(Product, rummage)
iex> rummage
%{}
iex> queryable
Rummage.Ecto.Product
When `nil` hook module is given, it just returns the queryable and the params:
iex> import Rummage.Ecto
iex> alias Rummage.Ecto.Product
iex> rummage = %{paginate: %{page: 1}}
iex> {queryable, rummage} = rummage(Product, rummage, paginate: nil)
iex> rummage
%{paginate: %{page: 1}}
iex> queryable
Rummage.Ecto.Product
When a hook param is given, with hook module it just returns the
`queryable` and the `params`:
iex> import Rummage.Ecto
iex> alias Rummage.Ecto.Product
iex> rummage = %{paginate: %{page: 1}}
iex> repo = Rummage.Ecto.Repo
iex> Ecto.Adapters.SQL.Sandbox.checkout(repo)
iex> opts = [paginate: Rummage.Ecto.Hook.Paginate, repo: repo]
iex> {queryable, rummage} = rummage(Product, rummage, opts)
iex> rummage
%{paginate: %{max_page: 0, page: 1, per_page: 10, total_count: 0}}
iex> queryable
#Ecto.Query<from p in Rummage.Ecto.Product, limit: ^10, offset: ^0>
When a hook is given, with correspondng params, it updates and returns the
`queryable` and the `params` accordingly:
iex> import Rummage.Ecto
iex> alias Rummage.Ecto.Product
iex> rummage = %{paginate: %{per_page: 1, page: 1}}
iex> repo = Rummage.Ecto.Repo
iex> Ecto.Adapters.SQL.Sandbox.checkout(repo)
iex> repo.insert!(%Product{name: "name", internal_code: "100"})
iex> repo.insert!(%Product{name: "name2", internal_code: "101"})
iex> opts = [paginate: Rummage.Ecto.Hook.Paginate,
...> repo: repo]
iex> {queryable, rummage} = rummage(Product, rummage, opts)
iex> rummage
%{paginate: %{max_page: 2, page: 1, per_page: 1, total_count: 2}}
iex> queryable
#Ecto.Query<from p in Rummage.Ecto.Product, limit: ^1, offset: ^0>
"""
def rummage(queryable, rummage, opts \\ []) do
hooks = [search: Keyword.get(opts, :search, RConfig.search()),
sort: Keyword.get(opts, :sort, RConfig.sort()),
paginate: Keyword.get(opts, :paginate, RConfig.paginate())]
rummage =
Enum.reduce(hooks, rummage, &format_hook_params(&1, &2, queryable, opts))
{Enum.reduce(hooks, queryable, &run_hook(&1, &2, rummage)), rummage}
end
defp format_hook_params({_, nil}, rummage, _, _), do: rummage
defp format_hook_params({type, hook_mod}, rummage, queryable, opts) do
case Map.get(rummage, type) do
nil -> rummage
params -> Map.put(rummage, type,
apply(hook_mod, :format_params, [queryable, params, opts]))
end
end
defp run_hook({_, nil}, queryable, _), do: queryable
defp run_hook({type, hook_mod}, queryable, rummage) do
case Map.get(rummage, type) do
nil -> queryable
params -> apply(hook_mod, :run, [queryable, params])
end
end
@doc """
This macro allows an `Ecto.Schema` to leverage rummage's features with
ease. This macro defines a function `rummage/2` which can be called on
the Module `using` this which delegates to `Rummage.Ecto.rummage/3`, but
before doing that it resolves the options with default values for `repo`,
`search` hook, `sort` hook and `paginate` hook. If `rummage/2` is called with
those options in form of keys given to the last argument `opts`, then it
sets those keys to what's given else it delegates it to the defaults
specficied by `__using__` macro. If no defaults are specified, then it
further delegates it to configurations.
The function `rummage/2` takes in `rummage params` and `opts` and calls
`Rummage.Ecto.rummage/3` with whatever schema is calling it as the
`queryable`.
This macro also defines a function `rummageq/3` where q implies `queryable`.
Therefore this function can take a `queryable` as the first argument.
In this way this macro makes it very easy to use `Rummage.Ecto`.
## Usage:
### Basic Usage where a default repo is specified as options to the macro.
```elixir
defmodule MyApp.MySchema do
use Ecto.Schema
use Rummage.Ecto, repo: MyApp.Repo, per_page: 10
end
```
### Advanced Usage where search and sort hooks are overrident for this module.
```elixir
defmodule MyApp.MySchema do
use Ecto.Schema
use Rummage.Ecto, repo: MyApp.Repo, per_page: 10,
search: CustomSearchModule,
sort: CustomSortModule
end
This allows you do just do `MyApp.Schema.rummage(rummage_params)` with specific
`rummage_params` and add `Rummage.Ecto`'s power to your schema.
```
"""
defmacro __using__(opts) do
quote do
alias Rummage.Ecto.Config, as: RConfig
def rummage(rummage, opts \\ []) do
Rummage.Ecto.rummage(__MODULE__, rummage, uniq_merge(opts, defaults()))
end
def rummageq(queryable, rummage, opts \\ []) do
Rummage.Ecto.rummage(queryable, rummage, uniq_merge(opts, defaults()))
end
defp defaults() do
keys = ~w{repo per_page search sort paginate}a
Enum.map(keys, &get_defs/1)
end
defp get_defs(key) do
app = Application.get_application(__MODULE__)
{key, Keyword.get(unquote(opts), key, apply(RConfig, key, [app]))}
end
defp uniq_merge(keyword1, keyword2) do
keyword2
|> Keyword.merge(keyword1)
|> Keyword.new()
end
end
end
end
|
lib/rummage_ecto.ex
| 0.775095
| 0.827689
|
rummage_ecto.ex
|
starcoder
|
defmodule AdventOfCode.Day11 do
@moduledoc false
use AdventOfCode
defmodule Point, do: defstruct(value: nil, coordinates: nil, neighbors: [])
def part1(input), do: preprocess_input(input) |> step(0, 99) |> elem(1)
def part2(input, grid \\ nil, current_step \\ 0) do
grid =
(grid || preprocess_input(input))
|> step(0, 0)
|> elem(0)
all_zeros = Enum.map(grid, fn {_, %Point{value: value}} -> value end) |> Enum.all?(&(&1 == 0))
if all_zeros, do: current_step + 1, else: part2(nil, grid, current_step + 1)
end
defp step(grid, total_flashes, steps_left) do
# add +1 to all values
grid =
Enum.map(grid, fn {k, %Point{value: value} = point} ->
new_value = value + 1
{k, %{point | value: new_value}}
end)
|> Map.new()
# add + 1 to all neighbors
|> find_and_update_neighbors()
# count how many octopuses flashed
count_flashes = Enum.filter(grid, fn {_c, %Point{value: v}} -> v > 9 end) |> Enum.count()
new_total_flashes = total_flashes + count_flashes
# update all flashed octopuses energy to 0
grid =
Enum.map(grid, fn {coordinates, %Point{value: value} = point} ->
if value > 9 do
{coordinates, %{point | value: 0}}
else
{coordinates, point}
end
end)
|> Map.new()
if steps_left == 0,
do: {grid, new_total_flashes},
else: step(grid, new_total_flashes, steps_left - 1)
end
defp find_and_update_neighbors(grid) do
flashing_neighbors =
grid
|> Enum.filter(fn {_c, %Point{value: v}} -> v == 10 end)
|> Enum.map(fn {_, %Point{coordinates: coordinates, value: _value, neighbors: neighbors}} ->
[coordinates | neighbors]
end)
|> List.flatten()
grid =
Enum.map(grid, fn {k, %Point{value: value} = point} ->
# update all 10s by one so we won't count them again
new_value = if value == 10, do: value + 1, else: value
{k, %{point | value: new_value}}
end)
|> Map.new()
if Enum.empty?(flashing_neighbors),
do: grid,
else: update_neighbors(grid, flashing_neighbors)
end
defp update_neighbors(grid, neighbors) do
Enum.reduce(neighbors, grid, fn neighbor_coordinates, acc ->
%Point{value: value} = point = Map.get(acc, neighbor_coordinates)
# don't update 10s as they are not counted as flashed yet
new_value = if value == 10, do: value, else: value + 1
Map.put(acc, neighbor_coordinates, %{point | value: new_value})
end)
|> find_and_update_neighbors()
end
defp preprocess_input(input) do
grid =
input
|> String.trim()
|> String.split("\n")
|> Enum.map(fn line ->
line
|> String.trim()
|> String.split("")
|> Enum.reject(&(&1 == ""))
|> Enum.map(&String.to_integer/1)
end)
|> Enum.with_index()
|> Enum.reduce(%{}, fn {values, y}, acc ->
values
|> Enum.with_index()
|> Enum.map(fn {value, x} ->
{{x, y}, %Point{value: value, coordinates: {x, y}}}
end)
|> Map.new()
|> Map.merge(acc)
end)
Enum.map(grid, fn {coordinates, %Point{} = point} ->
{coordinates, %{point | neighbors: find_neighbors(grid, point)}}
end)
|> Map.new()
end
defp find_neighbors(grid, %Point{coordinates: {x, y}}) do
neighbor_coordinates = [
{x + 1, y},
{x - 1, y},
{x, y + 1},
{x, y - 1},
{x + 1, y + 1},
{x - 1, y + 1},
{x + 1, y - 1},
{x - 1, y - 1}
]
Enum.filter(grid, fn {coordinates, _} -> Enum.member?(neighbor_coordinates, coordinates) end)
|> Enum.map(fn {coordinates, _} -> coordinates end)
end
# defp print_grid(grid, sep \\ "") do
# grid
# |> Enum.group_by(fn {{_x, y}, %Point{} = _point} -> y end)
# |> Enum.map(fn {_y, points} ->
# Enum.map(points, fn {_, %Point{coordinates: {x, _y}, value: value}} -> {x, value} end)
# |> Enum.sort(fn {x1, _}, {x2, _} -> x1 < x2 end)
# |> Enum.map(fn {_x, v} -> v end)
# |> Enum.join(sep)
# end)
# |> Enum.each(fn l -> IO.puts(l) end)
# IO.puts("\n")
# grid
# end
end
|
lib/day11.ex
| 0.642769
| 0.546859
|
day11.ex
|
starcoder
|
defmodule InetTcp_dist do
@moduledoc """
This module replaces the standard `:inet_tcp_dist` from Erlang and introduces a new function call
to replace DNS lookups for Erlang Distribution. The EPMD module is required to have this function
implemented. It is not checked during compilation since the callback is done dynamically.
The EPMD module needs to implement `address_and_port_please(node)`. It should give a tuple
containing IP and port like this: `{ip, port}`.
Most callbacks of this module fall back on Erlang's `:inet_tcp_dist`. For the ones it doesn't it
has an equal implementation.
It only supports `:shortnames` currently, which makes sense since we're not using DNS.
"""
require Record
require Logger
Record.defrecordp :hs_data, Record.extract(:hs_data, from_lib: "kernel/include/dist_util.hrl")
Record.defrecordp :net_address, Record.extract(:net_address, from_lib: "kernel/include/net_address.hrl")
def listen(name) do
:inet_tcp_dist.listen name
end
def select(node) do
:inet_tcp_dist.select node
end
def accept(listen) do
:inet_tcp_dist.accept listen
end
def accept_connection(accept_pid, socket, my_node, allowed, setup_time) do
:inet_tcp_dist.accept_connection accept_pid, socket, my_node, allowed, setup_time
end
# only support :shortnames
def setup(_, _, _, :longnames, _), do: Logger.warn "Longnames not supported with this distribution module"
def setup(node, type, my_node, :shortnames, setup_time) do
:erlang.spawn_opt(__MODULE__, :do_setup, [self(), node, type, my_node, :shortnames, setup_time],[:link, {:priority, :max}])
end
def do_setup(kernel, node, type, my_node, :shortnames, setup_time) do
# get epmd module
mod = :net_kernel.epmd_module()
# epmd module should expose this new function to give address and port
case mod.address_and_port_please(node) do
{{_,_,_,_} = ip, port} when port > 0 ->
# start distribution timer (for timeout etc)
timer = :dist_util.start_timer(setup_time)
# connection options
options = connect_options([{:active, false}, {:packet, 2}])
# start connecting and distribution
:inet_tcp.connect(ip, port, options)
|> case do
{:ok, my_socket} ->
hsdata = create_hs_data(kernel, node, ip, port, type, my_node, my_socket, timer)
:dist_util.handshake_we_started(hsdata)
_ ->
Logger.warn "Connection to other node (#{inspect node}) failed"
:dist_util.shutdown(__MODULE__, 41, node)
end
_ ->
Logger.warn "address_and_port_please/1 (#{inspect node}) failed"
exit(:shutdown)
end
end
def close(listen) do
:inet_tcp_dist.close listen
end
defp create_hs_data(kernel, node, ip, port, type, my_node, socket, timer) do
hs_data(
kernel_pid: kernel,
other_node: node,
this_node: my_node,
socket: socket,
timer: timer,
this_flags: 0,
other_version: 5,
f_send: &:inet_tcp.send/2,
f_recv: &:inet_tcp.recv/3,
f_setopts_pre_nodeup:
fn(s) ->
:inet.setopts(
s,
[{:active, false},
{:packet, 4},
nodelay()])
end,
f_setopts_post_nodeup:
fn(s) ->
:inet.setopts(
s,
[{:active, true},
{:deliver, :port},
{:packet, 4},
nodelay()])
end,
f_getll: &:inet.getll/1,
f_address:
fn(_,_) ->
net_address(
address: {ip, port},
host: get_domain(node),
protocol: :tcp,
family: :inet
)
end,
mf_tick: fn(s) -> :inet_tcp_dist.tick(:inet_tcp, s) end,
mf_getstat: &:inet_tcp_dist.getstat/1,
request_type: type,
mf_setopts: &:inet_tcp_dist.setopts/2,
mf_getopts: &:inet_tcp_dist.getopts/2
)
end
# Rewrote Erlang version to Elixir, source 'inet_tcp_dist'
defp nodelay() do
Application.get_env(:kernel, :dist_nodelay, :undefined)
|> case do
:undefined -> {:nodelay, true}
{:ok, true} -> {:nodelay, true}
{:ok, false} -> {:nodelay, false}
_ -> {:nodelay, true}
end
end
# Rewrote Erlang version to Elixir, source 'inet_tcp_dist'
defp connect_options(opts) do
Application.get_env(:kernel, :inet_dist_connect_options, []) ++ opts
end
defp get_domain(node) do
node
|> to_charlist()
|> Enum.reduce([], fn
(?@, _) -> []
(x, acc) -> [x|acc]
end)
|> Enum.reverse
end
end
|
lib/inet_tcp_dist.ex
| 0.627951
| 0.401746
|
inet_tcp_dist.ex
|
starcoder
|
defmodule AWS.WAF.Regional do
@moduledoc """
This is the *AWS WAF Regional API Reference* for using AWS WAF with Elastic
Load Balancing (ELB) Application Load Balancers. The AWS WAF actions and
data types listed in the reference are available for protecting Application
Load Balancers. You can use these actions and data types by means of the
endpoints listed in [AWS Regions and
Endpoints](http://docs.aws.amazon.com/general/latest/gr/rande.html#waf_region).
This guide is for developers who need detailed information about the AWS
WAF API actions, data types, and errors. For detailed information about AWS
WAF features and an overview of how to use the AWS WAF API, see the [AWS
WAF Developer
Guide](http://docs.aws.amazon.com/waf/latest/developerguide/).
"""
@doc """
Associates a web ACL with a resource.
"""
def associate_web_a_c_l(client, input, options \\ []) do
request(client, "AssociateWebACL", input, options)
end
@doc """
Creates a `ByteMatchSet`. You then use `UpdateByteMatchSet` to identify the
part of a web request that you want AWS WAF to inspect, such as the values
of the `User-Agent` header or the query string. For example, you can create
a `ByteMatchSet` that matches any requests with `User-Agent` headers that
contain the string `BadBot`. You can then configure AWS WAF to reject those
requests.
To create and configure a `ByteMatchSet`, perform the following steps:
<ol> <li> Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of a `CreateByteMatchSet` request.
</li> <li> Submit a `CreateByteMatchSet` request.
</li> <li> Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of an `UpdateByteMatchSet` request.
</li> <li> Submit an `UpdateByteMatchSet` request to specify the part of
the request that you want AWS WAF to inspect (for example, the header or
the URI) and the value that you want AWS WAF to watch for.
</li> </ol> For more information about how to use the AWS WAF API to allow
or block HTTP requests, see the [AWS WAF Developer
Guide](http://docs.aws.amazon.com/waf/latest/developerguide/).
"""
def create_byte_match_set(client, input, options \\ []) do
request(client, "CreateByteMatchSet", input, options)
end
@doc """
Creates an `IPSet`, which you use to specify which web requests you want to
allow or block based on the IP addresses that the requests originate from.
For example, if you're receiving a lot of requests from one or more
individual IP addresses or one or more ranges of IP addresses and you want
to block the requests, you can create an `IPSet` that contains those IP
addresses and then configure AWS WAF to block the requests.
To create and configure an `IPSet`, perform the following steps:
<ol> <li> Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of a `CreateIPSet` request.
</li> <li> Submit a `CreateIPSet` request.
</li> <li> Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of an `UpdateIPSet` request.
</li> <li> Submit an `UpdateIPSet` request to specify the IP addresses that
you want AWS WAF to watch for.
</li> </ol> For more information about how to use the AWS WAF API to allow
or block HTTP requests, see the [AWS WAF Developer
Guide](http://docs.aws.amazon.com/waf/latest/developerguide/).
"""
def create_i_p_set(client, input, options \\ []) do
request(client, "CreateIPSet", input, options)
end
@doc """
Creates a `Rule`, which contains the `IPSet` objects, `ByteMatchSet`
objects, and other predicates that identify the requests that you want to
block. If you add more than one predicate to a `Rule`, a request must match
all of the specifications to be allowed or blocked. For example, suppose
you add the following to a `Rule`:
<ul> <li> An `IPSet` that matches the IP address `192.0.2.44/32`
</li> <li> A `ByteMatchSet` that matches `BadBot` in the `User-Agent`
header
</li> </ul> You then add the `Rule` to a `WebACL` and specify that you want
to blocks requests that satisfy the `Rule`. For a request to be blocked, it
must come from the IP address 192.0.2.44 *and* the `User-Agent` header in
the request must contain the value `BadBot`.
To create and configure a `Rule`, perform the following steps:
<ol> <li> Create and update the predicates that you want to include in the
`Rule`. For more information, see `CreateByteMatchSet`, `CreateIPSet`, and
`CreateSqlInjectionMatchSet`.
</li> <li> Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of a `CreateRule` request.
</li> <li> Submit a `CreateRule` request.
</li> <li> Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of an `UpdateRule` request.
</li> <li> Submit an `UpdateRule` request to specify the predicates that
you want to include in the `Rule`.
</li> <li> Create and update a `WebACL` that contains the `Rule`. For more
information, see `CreateWebACL`.
</li> </ol> For more information about how to use the AWS WAF API to allow
or block HTTP requests, see the [AWS WAF Developer
Guide](http://docs.aws.amazon.com/waf/latest/developerguide/).
"""
def create_rule(client, input, options \\ []) do
request(client, "CreateRule", input, options)
end
@doc """
Creates a `SizeConstraintSet`. You then use `UpdateSizeConstraintSet` to
identify the part of a web request that you want AWS WAF to check for
length, such as the length of the `User-Agent` header or the length of the
query string. For example, you can create a `SizeConstraintSet` that
matches any requests that have a query string that is longer than 100
bytes. You can then configure AWS WAF to reject those requests.
To create and configure a `SizeConstraintSet`, perform the following steps:
<ol> <li> Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of a `CreateSizeConstraintSet` request.
</li> <li> Submit a `CreateSizeConstraintSet` request.
</li> <li> Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of an `UpdateSizeConstraintSet` request.
</li> <li> Submit an `UpdateSizeConstraintSet` request to specify the part
of the request that you want AWS WAF to inspect (for example, the header or
the URI) and the value that you want AWS WAF to watch for.
</li> </ol> For more information about how to use the AWS WAF API to allow
or block HTTP requests, see the [AWS WAF Developer
Guide](http://docs.aws.amazon.com/waf/latest/developerguide/).
"""
def create_size_constraint_set(client, input, options \\ []) do
request(client, "CreateSizeConstraintSet", input, options)
end
@doc """
Creates a `SqlInjectionMatchSet`, which you use to allow, block, or count
requests that contain snippets of SQL code in a specified part of web
requests. AWS WAF searches for character sequences that are likely to be
malicious strings.
To create and configure a `SqlInjectionMatchSet`, perform the following
steps:
<ol> <li> Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of a `CreateSqlInjectionMatchSet` request.
</li> <li> Submit a `CreateSqlInjectionMatchSet` request.
</li> <li> Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of an `UpdateSqlInjectionMatchSet` request.
</li> <li> Submit an `UpdateSqlInjectionMatchSet` request to specify the
parts of web requests in which you want to allow, block, or count malicious
SQL code.
</li> </ol> For more information about how to use the AWS WAF API to allow
or block HTTP requests, see the [AWS WAF Developer
Guide](http://docs.aws.amazon.com/waf/latest/developerguide/).
"""
def create_sql_injection_match_set(client, input, options \\ []) do
request(client, "CreateSqlInjectionMatchSet", input, options)
end
@doc """
Creates a `WebACL`, which contains the `Rules` that identify the CloudFront
web requests that you want to allow, block, or count. AWS WAF evaluates
`Rules` in order based on the value of `Priority` for each `Rule`.
You also specify a default action, either `ALLOW` or `BLOCK`. If a web
request doesn't match any of the `Rules` in a `WebACL`, AWS WAF responds to
the request with the default action.
To create and configure a `WebACL`, perform the following steps:
<ol> <li> Create and update the `ByteMatchSet` objects and other predicates
that you want to include in `Rules`. For more information, see
`CreateByteMatchSet`, `UpdateByteMatchSet`, `CreateIPSet`, `UpdateIPSet`,
`CreateSqlInjectionMatchSet`, and `UpdateSqlInjectionMatchSet`.
</li> <li> Create and update the `Rules` that you want to include in the
`WebACL`. For more information, see `CreateRule` and `UpdateRule`.
</li> <li> Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of a `CreateWebACL` request.
</li> <li> Submit a `CreateWebACL` request.
</li> <li> Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of an `UpdateWebACL` request.
</li> <li> Submit an `UpdateWebACL` request to specify the `Rules` that you
want to include in the `WebACL`, to specify the default action, and to
associate the `WebACL` with a CloudFront distribution.
</li> </ol> For more information about how to use the AWS WAF API, see the
[AWS WAF Developer
Guide](http://docs.aws.amazon.com/waf/latest/developerguide/).
"""
def create_web_a_c_l(client, input, options \\ []) do
request(client, "CreateWebACL", input, options)
end
@doc """
Creates an `XssMatchSet`, which you use to allow, block, or count requests
that contain cross-site scripting attacks in the specified part of web
requests. AWS WAF searches for character sequences that are likely to be
malicious strings.
To create and configure an `XssMatchSet`, perform the following steps:
<ol> <li> Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of a `CreateXssMatchSet` request.
</li> <li> Submit a `CreateXssMatchSet` request.
</li> <li> Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of an `UpdateXssMatchSet` request.
</li> <li> Submit an `UpdateXssMatchSet` request to specify the parts of
web requests in which you want to allow, block, or count cross-site
scripting attacks.
</li> </ol> For more information about how to use the AWS WAF API to allow
or block HTTP requests, see the [AWS WAF Developer
Guide](http://docs.aws.amazon.com/waf/latest/developerguide/).
"""
def create_xss_match_set(client, input, options \\ []) do
request(client, "CreateXssMatchSet", input, options)
end
@doc """
Permanently deletes a `ByteMatchSet`. You can't delete a `ByteMatchSet` if
it's still used in any `Rules` or if it still includes any `ByteMatchTuple`
objects (any filters).
If you just want to remove a `ByteMatchSet` from a `Rule`, use
`UpdateRule`.
To permanently delete a `ByteMatchSet`, perform the following steps:
<ol> <li> Update the `ByteMatchSet` to remove filters, if any. For more
information, see `UpdateByteMatchSet`.
</li> <li> Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of a `DeleteByteMatchSet` request.
</li> <li> Submit a `DeleteByteMatchSet` request.
</li> </ol>
"""
def delete_byte_match_set(client, input, options \\ []) do
request(client, "DeleteByteMatchSet", input, options)
end
@doc """
Permanently deletes an `IPSet`. You can't delete an `IPSet` if it's still
used in any `Rules` or if it still includes any IP addresses.
If you just want to remove an `IPSet` from a `Rule`, use `UpdateRule`.
To permanently delete an `IPSet` from AWS WAF, perform the following steps:
<ol> <li> Update the `IPSet` to remove IP address ranges, if any. For more
information, see `UpdateIPSet`.
</li> <li> Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of a `DeleteIPSet` request.
</li> <li> Submit a `DeleteIPSet` request.
</li> </ol>
"""
def delete_i_p_set(client, input, options \\ []) do
request(client, "DeleteIPSet", input, options)
end
@doc """
Permanently deletes a `Rule`. You can't delete a `Rule` if it's still used
in any `WebACL` objects or if it still includes any predicates, such as
`ByteMatchSet` objects.
If you just want to remove a `Rule` from a `WebACL`, use `UpdateWebACL`.
To permanently delete a `Rule` from AWS WAF, perform the following steps:
<ol> <li> Update the `Rule` to remove predicates, if any. For more
information, see `UpdateRule`.
</li> <li> Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of a `DeleteRule` request.
</li> <li> Submit a `DeleteRule` request.
</li> </ol>
"""
def delete_rule(client, input, options \\ []) do
request(client, "DeleteRule", input, options)
end
@doc """
Permanently deletes a `SizeConstraintSet`. You can't delete a
`SizeConstraintSet` if it's still used in any `Rules` or if it still
includes any `SizeConstraint` objects (any filters).
If you just want to remove a `SizeConstraintSet` from a `Rule`, use
`UpdateRule`.
To permanently delete a `SizeConstraintSet`, perform the following steps:
<ol> <li> Update the `SizeConstraintSet` to remove filters, if any. For
more information, see `UpdateSizeConstraintSet`.
</li> <li> Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of a `DeleteSizeConstraintSet` request.
</li> <li> Submit a `DeleteSizeConstraintSet` request.
</li> </ol>
"""
def delete_size_constraint_set(client, input, options \\ []) do
request(client, "DeleteSizeConstraintSet", input, options)
end
@doc """
Permanently deletes a `SqlInjectionMatchSet`. You can't delete a
`SqlInjectionMatchSet` if it's still used in any `Rules` or if it still
contains any `SqlInjectionMatchTuple` objects.
If you just want to remove a `SqlInjectionMatchSet` from a `Rule`, use
`UpdateRule`.
To permanently delete a `SqlInjectionMatchSet` from AWS WAF, perform the
following steps:
<ol> <li> Update the `SqlInjectionMatchSet` to remove filters, if any. For
more information, see `UpdateSqlInjectionMatchSet`.
</li> <li> Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of a `DeleteSqlInjectionMatchSet` request.
</li> <li> Submit a `DeleteSqlInjectionMatchSet` request.
</li> </ol>
"""
def delete_sql_injection_match_set(client, input, options \\ []) do
request(client, "DeleteSqlInjectionMatchSet", input, options)
end
@doc """
Permanently deletes a `WebACL`. You can't delete a `WebACL` if it still
contains any `Rules`.
To delete a `WebACL`, perform the following steps:
<ol> <li> Update the `WebACL` to remove `Rules`, if any. For more
information, see `UpdateWebACL`.
</li> <li> Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of a `DeleteWebACL` request.
</li> <li> Submit a `DeleteWebACL` request.
</li> </ol>
"""
def delete_web_a_c_l(client, input, options \\ []) do
request(client, "DeleteWebACL", input, options)
end
@doc """
Permanently deletes an `XssMatchSet`. You can't delete an `XssMatchSet` if
it's still used in any `Rules` or if it still contains any `XssMatchTuple`
objects.
If you just want to remove an `XssMatchSet` from a `Rule`, use
`UpdateRule`.
To permanently delete an `XssMatchSet` from AWS WAF, perform the following
steps:
<ol> <li> Update the `XssMatchSet` to remove filters, if any. For more
information, see `UpdateXssMatchSet`.
</li> <li> Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of a `DeleteXssMatchSet` request.
</li> <li> Submit a `DeleteXssMatchSet` request.
</li> </ol>
"""
def delete_xss_match_set(client, input, options \\ []) do
request(client, "DeleteXssMatchSet", input, options)
end
@doc """
Removes a web ACL from the specified resource.
"""
def disassociate_web_a_c_l(client, input, options \\ []) do
request(client, "DisassociateWebACL", input, options)
end
@doc """
Returns the `ByteMatchSet` specified by `ByteMatchSetId`.
"""
def get_byte_match_set(client, input, options \\ []) do
request(client, "GetByteMatchSet", input, options)
end
@doc """
When you want to create, update, or delete AWS WAF objects, get a change
token and include the change token in the create, update, or delete
request. Change tokens ensure that your application doesn't submit
conflicting requests to AWS WAF.
Each create, update, or delete request must use a unique change token. If
your application submits a `GetChangeToken` request and then submits a
second `GetChangeToken` request before submitting a create, update, or
delete request, the second `GetChangeToken` request returns the same value
as the first `GetChangeToken` request.
When you use a change token in a create, update, or delete request, the
status of the change token changes to `PENDING`, which indicates that AWS
WAF is propagating the change to all AWS WAF servers. Use
`GetChangeTokenStatus` to determine the status of your change token.
"""
def get_change_token(client, input, options \\ []) do
request(client, "GetChangeToken", input, options)
end
@doc """
Returns the status of a `ChangeToken` that you got by calling
`GetChangeToken`. `ChangeTokenStatus` is one of the following values:
<ul> <li> `PROVISIONED`: You requested the change token by calling
`GetChangeToken`, but you haven't used it yet in a call to create, update,
or delete an AWS WAF object.
</li> <li> `PENDING`: AWS WAF is propagating the create, update, or delete
request to all AWS WAF servers.
</li> <li> `IN_SYNC`: Propagation is complete.
</li> </ul>
"""
def get_change_token_status(client, input, options \\ []) do
request(client, "GetChangeTokenStatus", input, options)
end
@doc """
Returns the `IPSet` that is specified by `IPSetId`.
"""
def get_i_p_set(client, input, options \\ []) do
request(client, "GetIPSet", input, options)
end
@doc """
Returns the `Rule` that is specified by the `RuleId` that you included in
the `GetRule` request.
"""
def get_rule(client, input, options \\ []) do
request(client, "GetRule", input, options)
end
@doc """
Gets detailed information about a specified number of requests--a
sample--that AWS WAF randomly selects from among the first 5,000 requests
that your AWS resource received during a time range that you choose. You
can specify a sample size of up to 500 requests, and you can specify any
time range in the previous three hours.
`GetSampledRequests` returns a time range, which is usually the time range
that you specified. However, if your resource (such as a CloudFront
distribution) received 5,000 requests before the specified time range
elapsed, `GetSampledRequests` returns an updated time range. This new time
range indicates the actual period during which AWS WAF selected the
requests in the sample.
"""
def get_sampled_requests(client, input, options \\ []) do
request(client, "GetSampledRequests", input, options)
end
@doc """
Returns the `SizeConstraintSet` specified by `SizeConstraintSetId`.
"""
def get_size_constraint_set(client, input, options \\ []) do
request(client, "GetSizeConstraintSet", input, options)
end
@doc """
Returns the `SqlInjectionMatchSet` that is specified by
`SqlInjectionMatchSetId`.
"""
def get_sql_injection_match_set(client, input, options \\ []) do
request(client, "GetSqlInjectionMatchSet", input, options)
end
@doc """
Returns the `WebACL` that is specified by `WebACLId`.
"""
def get_web_a_c_l(client, input, options \\ []) do
request(client, "GetWebACL", input, options)
end
@doc """
Returns the web ACL for the specified resource.
"""
def get_web_a_c_l_for_resource(client, input, options \\ []) do
request(client, "GetWebACLForResource", input, options)
end
@doc """
Returns the `XssMatchSet` that is specified by `XssMatchSetId`.
"""
def get_xss_match_set(client, input, options \\ []) do
request(client, "GetXssMatchSet", input, options)
end
@doc """
Returns an array of `ByteMatchSetSummary` objects.
"""
def list_byte_match_sets(client, input, options \\ []) do
request(client, "ListByteMatchSets", input, options)
end
@doc """
Returns an array of `IPSetSummary` objects in the response.
"""
def list_i_p_sets(client, input, options \\ []) do
request(client, "ListIPSets", input, options)
end
@doc """
Returns an array of resources associated with the specified web ACL.
"""
def list_resources_for_web_a_c_l(client, input, options \\ []) do
request(client, "ListResourcesForWebACL", input, options)
end
@doc """
Returns an array of `RuleSummary` objects.
"""
def list_rules(client, input, options \\ []) do
request(client, "ListRules", input, options)
end
@doc """
Returns an array of `SizeConstraintSetSummary` objects.
"""
def list_size_constraint_sets(client, input, options \\ []) do
request(client, "ListSizeConstraintSets", input, options)
end
@doc """
Returns an array of `SqlInjectionMatchSet` objects.
"""
def list_sql_injection_match_sets(client, input, options \\ []) do
request(client, "ListSqlInjectionMatchSets", input, options)
end
@doc """
Returns an array of `WebACLSummary` objects in the response.
"""
def list_web_a_c_ls(client, input, options \\ []) do
request(client, "ListWebACLs", input, options)
end
@doc """
Returns an array of `XssMatchSet` objects.
"""
def list_xss_match_sets(client, input, options \\ []) do
request(client, "ListXssMatchSets", input, options)
end
@doc """
Inserts or deletes `ByteMatchTuple` objects (filters) in a `ByteMatchSet`.
For each `ByteMatchTuple` object, you specify the following values:
<ul> <li> Whether to insert or delete the object from the array. If you
want to change a `ByteMatchSetUpdate` object, you delete the existing
object and add a new one.
</li> <li> The part of a web request that you want AWS WAF to inspect, such
as a query string or the value of the `User-Agent` header.
</li> <li> The bytes (typically a string that corresponds with ASCII
characters) that you want AWS WAF to look for. For more information,
including how you specify the values for the AWS WAF API and the AWS CLI or
SDKs, see `TargetString` in the `ByteMatchTuple` data type.
</li> <li> Where to look, such as at the beginning or the end of a query
string.
</li> <li> Whether to perform any conversions on the request, such as
converting it to lowercase, before inspecting it for the specified string.
</li> </ul> For example, you can add a `ByteMatchSetUpdate` object that
matches web requests in which `User-Agent` headers contain the string
`BadBot`. You can then configure AWS WAF to block those requests.
To create and configure a `ByteMatchSet`, perform the following steps:
<ol> <li> Create a `ByteMatchSet.` For more information, see
`CreateByteMatchSet`.
</li> <li> Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of an `UpdateByteMatchSet` request.
</li> <li> Submit an `UpdateByteMatchSet` request to specify the part of
the request that you want AWS WAF to inspect (for example, the header or
the URI) and the value that you want AWS WAF to watch for.
</li> </ol> For more information about how to use the AWS WAF API to allow
or block HTTP requests, see the [AWS WAF Developer
Guide](http://docs.aws.amazon.com/waf/latest/developerguide/).
"""
def update_byte_match_set(client, input, options \\ []) do
request(client, "UpdateByteMatchSet", input, options)
end
@doc """
Inserts or deletes `IPSetDescriptor` objects in an `IPSet`. For each
`IPSetDescriptor` object, you specify the following values:
<ul> <li> Whether to insert or delete the object from the array. If you
want to change an `IPSetDescriptor` object, you delete the existing object
and add a new one.
</li> <li> The IP address version, `IPv4` or `IPv6`.
</li> <li> The IP address in CIDR notation, for example, `192.0.2.0/24`
(for the range of IP addresses from `192.0.2.0` to `192.0.2.255`) or
`192.0.2.44/32` (for the individual IP address `192.0.2.44`).
</li> </ul> AWS WAF supports /8, /16, /24, and /32 IP address ranges for
IPv4, and /24, /32, /48, /56, /64 and /128 for IPv6. For more information
about CIDR notation, see the Wikipedia entry [Classless Inter-Domain
Routing](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing).
IPv6 addresses can be represented using any of the following formats:
<ul> <li> fc00:db20:35b:7399::5/128
</li> <li> fc00:db20:35b:7399::5/128
</li> <li> fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b/128
</li> <li> fdf8:f53e:61e4::18/128
</li> </ul> You use an `IPSet` to specify which web requests you want to
allow or block based on the IP addresses that the requests originated from.
For example, if you're receiving a lot of requests from one or a small
number of IP addresses and you want to block the requests, you can create
an `IPSet` that specifies those IP addresses, and then configure AWS WAF to
block the requests.
To create and configure an `IPSet`, perform the following steps:
<ol> <li> Submit a `CreateIPSet` request.
</li> <li> Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of an `UpdateIPSet` request.
</li> <li> Submit an `UpdateIPSet` request to specify the IP addresses that
you want AWS WAF to watch for.
</li> </ol> When you update an `IPSet`, you specify the IP addresses that
you want to add and/or the IP addresses that you want to delete. If you
want to change an IP address, you delete the existing IP address and add
the new one.
For more information about how to use the AWS WAF API to allow or block
HTTP requests, see the [AWS WAF Developer
Guide](http://docs.aws.amazon.com/waf/latest/developerguide/).
"""
def update_i_p_set(client, input, options \\ []) do
request(client, "UpdateIPSet", input, options)
end
@doc """
Inserts or deletes `Predicate` objects in a `Rule`. Each `Predicate` object
identifies a predicate, such as a `ByteMatchSet` or an `IPSet`, that
specifies the web requests that you want to allow, block, or count. If you
add more than one predicate to a `Rule`, a request must match all of the
specifications to be allowed, blocked, or counted. For example, suppose you
add the following to a `Rule`:
<ul> <li> A `ByteMatchSet` that matches the value `BadBot` in the
`User-Agent` header
</li> <li> An `IPSet` that matches the IP address `192.0.2.44`
</li> </ul> You then add the `Rule` to a `WebACL` and specify that you want
to block requests that satisfy the `Rule`. For a request to be blocked, the
`User-Agent` header in the request must contain the value `BadBot` *and*
the request must originate from the IP address 192.0.2.44.
To create and configure a `Rule`, perform the following steps:
<ol> <li> Create and update the predicates that you want to include in the
`Rule`.
</li> <li> Create the `Rule`. See `CreateRule`.
</li> <li> Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of an `UpdateRule` request.
</li> <li> Submit an `UpdateRule` request to add predicates to the `Rule`.
</li> <li> Create and update a `WebACL` that contains the `Rule`. See
`CreateWebACL`.
</li> </ol> If you want to replace one `ByteMatchSet` or `IPSet` with
another, you delete the existing one and add the new one.
For more information about how to use the AWS WAF API to allow or block
HTTP requests, see the [AWS WAF Developer
Guide](http://docs.aws.amazon.com/waf/latest/developerguide/).
"""
def update_rule(client, input, options \\ []) do
request(client, "UpdateRule", input, options)
end
@doc """
Inserts or deletes `SizeConstraint` objects (filters) in a
`SizeConstraintSet`. For each `SizeConstraint` object, you specify the
following values:
<ul> <li> Whether to insert or delete the object from the array. If you
want to change a `SizeConstraintSetUpdate` object, you delete the existing
object and add a new one.
</li> <li> The part of a web request that you want AWS WAF to evaluate,
such as the length of a query string or the length of the `User-Agent`
header.
</li> <li> Whether to perform any transformations on the request, such as
converting it to lowercase, before checking its length. Note that
transformations of the request body are not supported because the AWS
resource forwards only the first `8192` bytes of your request to AWS WAF.
</li> <li> A `ComparisonOperator` used for evaluating the selected part of
the request against the specified `Size`, such as equals, greater than,
less than, and so on.
</li> <li> The length, in bytes, that you want AWS WAF to watch for in
selected part of the request. The length is computed after applying the
transformation.
</li> </ul> For example, you can add a `SizeConstraintSetUpdate` object
that matches web requests in which the length of the `User-Agent` header is
greater than 100 bytes. You can then configure AWS WAF to block those
requests.
To create and configure a `SizeConstraintSet`, perform the following steps:
<ol> <li> Create a `SizeConstraintSet.` For more information, see
`CreateSizeConstraintSet`.
</li> <li> Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of an `UpdateSizeConstraintSet` request.
</li> <li> Submit an `UpdateSizeConstraintSet` request to specify the part
of the request that you want AWS WAF to inspect (for example, the header or
the URI) and the value that you want AWS WAF to watch for.
</li> </ol> For more information about how to use the AWS WAF API to allow
or block HTTP requests, see the [AWS WAF Developer
Guide](http://docs.aws.amazon.com/waf/latest/developerguide/).
"""
def update_size_constraint_set(client, input, options \\ []) do
request(client, "UpdateSizeConstraintSet", input, options)
end
@doc """
Inserts or deletes `SqlInjectionMatchTuple` objects (filters) in a
`SqlInjectionMatchSet`. For each `SqlInjectionMatchTuple` object, you
specify the following values:
<ul> <li> `Action`: Whether to insert the object into or delete the object
from the array. To change a `SqlInjectionMatchTuple`, you delete the
existing object and add a new one.
</li> <li> `FieldToMatch`: The part of web requests that you want AWS WAF
to inspect and, if you want AWS WAF to inspect a header, the name of the
header.
</li> <li> `TextTransformation`: Which text transformation, if any, to
perform on the web request before inspecting the request for snippets of
malicious SQL code.
</li> </ul> You use `SqlInjectionMatchSet` objects to specify which
CloudFront requests you want to allow, block, or count. For example, if
you're receiving requests that contain snippets of SQL code in the query
string and you want to block the requests, you can create a
`SqlInjectionMatchSet` with the applicable settings, and then configure AWS
WAF to block the requests.
To create and configure a `SqlInjectionMatchSet`, perform the following
steps:
<ol> <li> Submit a `CreateSqlInjectionMatchSet` request.
</li> <li> Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of an `UpdateIPSet` request.
</li> <li> Submit an `UpdateSqlInjectionMatchSet` request to specify the
parts of web requests that you want AWS WAF to inspect for snippets of SQL
code.
</li> </ol> For more information about how to use the AWS WAF API to allow
or block HTTP requests, see the [AWS WAF Developer
Guide](http://docs.aws.amazon.com/waf/latest/developerguide/).
"""
def update_sql_injection_match_set(client, input, options \\ []) do
request(client, "UpdateSqlInjectionMatchSet", input, options)
end
@doc """
Inserts or deletes `ActivatedRule` objects in a `WebACL`. Each `Rule`
identifies web requests that you want to allow, block, or count. When you
update a `WebACL`, you specify the following values:
<ul> <li> A default action for the `WebACL`, either `ALLOW` or `BLOCK`. AWS
WAF performs the default action if a request doesn't match the criteria in
any of the `Rules` in a `WebACL`.
</li> <li> The `Rules` that you want to add and/or delete. If you want to
replace one `Rule` with another, you delete the existing `Rule` and add the
new one.
</li> <li> For each `Rule`, whether you want AWS WAF to allow requests,
block requests, or count requests that match the conditions in the `Rule`.
</li> <li> The order in which you want AWS WAF to evaluate the `Rules` in a
`WebACL`. If you add more than one `Rule` to a `WebACL`, AWS WAF evaluates
each request against the `Rules` in order based on the value of `Priority`.
(The `Rule` that has the lowest value for `Priority` is evaluated first.)
When a web request matches all of the predicates (such as `ByteMatchSets`
and `IPSets`) in a `Rule`, AWS WAF immediately takes the corresponding
action, allow or block, and doesn't evaluate the request against the
remaining `Rules` in the `WebACL`, if any.
</li> </ul> To create and configure a `WebACL`, perform the following
steps:
<ol> <li> Create and update the predicates that you want to include in
`Rules`. For more information, see `CreateByteMatchSet`,
`UpdateByteMatchSet`, `CreateIPSet`, `UpdateIPSet`,
`CreateSqlInjectionMatchSet`, and `UpdateSqlInjectionMatchSet`.
</li> <li> Create and update the `Rules` that you want to include in the
`WebACL`. For more information, see `CreateRule` and `UpdateRule`.
</li> <li> Create a `WebACL`. See `CreateWebACL`.
</li> <li> Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of an `UpdateWebACL` request.
</li> <li> Submit an `UpdateWebACL` request to specify the `Rules` that you
want to include in the `WebACL`, to specify the default action, and to
associate the `WebACL` with a CloudFront distribution.
</li> </ol> For more information about how to use the AWS WAF API to allow
or block HTTP requests, see the [AWS WAF Developer
Guide](http://docs.aws.amazon.com/waf/latest/developerguide/).
"""
def update_web_a_c_l(client, input, options \\ []) do
request(client, "UpdateWebACL", input, options)
end
@doc """
Inserts or deletes `XssMatchTuple` objects (filters) in an `XssMatchSet`.
For each `XssMatchTuple` object, you specify the following values:
<ul> <li> `Action`: Whether to insert the object into or delete the object
from the array. To change a `XssMatchTuple`, you delete the existing object
and add a new one.
</li> <li> `FieldToMatch`: The part of web requests that you want AWS WAF
to inspect and, if you want AWS WAF to inspect a header, the name of the
header.
</li> <li> `TextTransformation`: Which text transformation, if any, to
perform on the web request before inspecting the request for cross-site
scripting attacks.
</li> </ul> You use `XssMatchSet` objects to specify which CloudFront
requests you want to allow, block, or count. For example, if you're
receiving requests that contain cross-site scripting attacks in the request
body and you want to block the requests, you can create an `XssMatchSet`
with the applicable settings, and then configure AWS WAF to block the
requests.
To create and configure an `XssMatchSet`, perform the following steps:
<ol> <li> Submit a `CreateXssMatchSet` request.
</li> <li> Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of an `UpdateIPSet` request.
</li> <li> Submit an `UpdateXssMatchSet` request to specify the parts of
web requests that you want AWS WAF to inspect for cross-site scripting
attacks.
</li> </ol> For more information about how to use the AWS WAF API to allow
or block HTTP requests, see the [AWS WAF Developer
Guide](http://docs.aws.amazon.com/waf/latest/developerguide/).
"""
def update_xss_match_set(client, input, options \\ []) do
request(client, "UpdateXssMatchSet", input, options)
end
@spec request(map(), binary(), map(), list()) ::
{:ok, Poison.Parser.t | nil, Poison.Response.t} |
{:error, Poison.Parser.t} |
{:error, HTTPoison.Error.t}
defp request(client, action, input, options) do
client = %{client | service: "waf-regional"}
host = get_host("waf-regional", client)
url = get_url(host, client)
headers = [{"Host", host},
{"Content-Type", "application/x-amz-json-1.1"},
{"X-Amz-Target", "AWSWAF_Regional_20161128.#{action}"}]
payload = Poison.Encoder.encode(input, [])
headers = AWS.Request.sign_v4(client, "POST", url, headers, payload)
case HTTPoison.post(url, payload, headers, options) do
{:ok, response=%HTTPoison.Response{status_code: 200, body: ""}} ->
{:ok, nil, response}
{:ok, response=%HTTPoison.Response{status_code: 200, body: body}} ->
{:ok, Poison.Parser.parse!(body), response}
{:ok, _response=%HTTPoison.Response{body: body}} ->
error = Poison.Parser.parse!(body)
exception = error["__type"]
message = error["message"]
{:error, {exception, message}}
{:error, %HTTPoison.Error{reason: reason}} ->
{:error, %HTTPoison.Error{reason: reason}}
end
end
defp get_host(endpoint_prefix, client) do
if client.region == "local" do
"localhost"
else
"#{endpoint_prefix}.#{client.region}.#{client.endpoint}"
end
end
defp get_url(host, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}/"
end
end
|
lib/aws/waf_regional.ex
| 0.841891
| 0.685785
|
waf_regional.ex
|
starcoder
|
defmodule Kuddle.Decoder do
@moduledoc """
Tokenizes and parses KDL documents into kuddle documents.
"""
alias Kuddle.Value
alias Kuddle.Node
import Kuddle.Tokenizer
import Kuddle.Utils
@typedoc """
Parsed tokens from the Tokenizer, these will be processed and converted into the final nodes for
the document.
"""
@type tokens :: Kuddle.Tokenizer.tokens()
@typedoc """
A single node in the Kuddle document
"""
@type document_node :: Node.t()
@typedoc """
A kuddle document is a list of Kuddle Nodes
"""
@type document :: [document_node()]
@doc """
Tokenize and parse a given KDL document.
If successful, it will return `{:ok, document, tokens}`, where document is the list of nodes that
were parsed and tokens are any unparsed tokens.
"""
@spec decode(String.t()) ::
{:ok, document(), tokens()}
| {:error, term()}
def decode(blob) when is_binary(blob) do
case tokenize(blob) do
{:ok, tokens, ""} ->
decode(tokens)
{:error, _} = err ->
err
end
end
def decode(tokens) when is_list(tokens) do
parse(tokens, {:default, 0}, [], [])
end
defp parse([], {:default, 0}, [], doc) do
handle_parse_exit([], doc)
end
defp parse([{:annotation, _value} = annotation | tokens], {:default, _} = state, acc, doc) do
parse(tokens, state, [annotation | acc], doc)
end
defp parse([{:slashdash, _} | tokens], {:default, _} = state, acc, doc) do
# add the slashdash to the document accumulator
# when the next parse is done, the slashdash will cause the next item in the accumulator to
# be dropped
parse(tokens, state, acc, [:slashdash | doc])
end
defp parse([{:comment, _} | tokens], {:default, _} = state, acc, doc) do
parse(tokens, state, acc, doc)
end
defp parse([{:fold, _} | tokens], {:default, _} = state, acc, doc) do
parse(unfold_leading_tokens(tokens), state, acc, doc)
end
defp parse([{:sc, _} | tokens], {:default, _} = state, acc, doc) do
# loose semi-colon
parse(tokens, state, acc, doc)
end
defp parse([{:nl, _} | tokens], {:default, _} = state, acc, doc) do
# trim leading newlines
parse(tokens, state, acc, doc)
end
defp parse([{:space, _} | tokens], {:default, _} = state, acc, doc) do
# trim leading space
parse(tokens, state, acc, doc)
end
defp parse([{:term, name} | tokens], {:default, depth}, acc, doc) do
# node
annotations = extract_annotations(acc)
parse(tokens, {:node, depth}, {name, annotations, []}, doc)
end
defp parse([{:dquote_string, name} | tokens], {:default, depth}, acc, doc) do
# double quote initiated node
annotations = extract_annotations(acc)
parse(tokens, {:node, depth}, {name, annotations, []}, doc)
end
defp parse([{:raw_string, name} | tokens], {:default, depth}, acc, doc) do
# raw string node
annotations = extract_annotations(acc)
parse(tokens, {:node, depth}, {name, annotations, []}, doc)
end
defp parse([{:slashdash, _} | tokens], {:node, _} = state, {name, annotations, attrs}, doc) do
parse(tokens, state, {name, annotations, [:slashdash | attrs]}, doc)
end
defp parse([{:comment, _} | tokens], {:node, _} = state, acc, doc) do
# trim comments
parse(tokens, state, acc, doc)
end
defp parse([{:space, _} | tokens], {:node, _} = state, acc, doc) do
# trim leading spaces in node
parse(tokens, state, acc, doc)
end
defp parse([{:fold, _} | tokens], {:node, _} = state, acc, doc) do
parse(unfold_leading_tokens(tokens), state, acc, doc)
end
defp parse([{token_type, _} | tokens], {:node, depth}, {name, node_annotations, attrs}, doc) when token_type in [:nl, :sc] do
node = %Node{
name: name,
annotations: node_annotations,
attributes: resolve_node_attributes(attrs),
children: nil,
}
parse(tokens, {:default, depth}, [], [node | doc])
end
defp parse([{:open_block, _} | tokens], {:node, depth}, {name, node_annotations, attrs}, doc) do
case parse(tokens, {:default, depth + 1}, [], []) do
{:ok, children, tokens} ->
case trim_leading_space(tokens) do
[{:close_block, _} | tokens] ->
node =
case attrs do
[:slashdash | attrs] ->
# discard the children
%Node{
name: name,
annotations: node_annotations,
attributes: resolve_node_attributes(attrs),
children: nil,
}
attrs ->
%Node{
name: name,
annotations: node_annotations,
attributes: resolve_node_attributes(attrs),
children: children,
}
end
parse(tokens, {:default, depth}, [], [node | doc])
end
{:error, _} = err ->
err
end
end
defp parse([{:annotation, _} = annotation | tokens], {:node, _} = state, {name, node_annotations, attrs}, doc) do
attrs = [annotation | attrs]
parse(tokens, state, {name, node_annotations, attrs}, doc)
end
defp parse([token | tokens], {:node, _} = state, {name, node_annotations, attrs}, doc) do
case token_to_value(token) do
{:ok, %Value{} = key} ->
{key_annotations, attrs} =
case attrs do
[{:annotation, annotation} | attrs] ->
{[annotation], attrs}
attrs ->
{[], attrs}
end
key = %{key | annotations: key.annotations ++ key_annotations}
case trim_leading_space(tokens) do
[{:=, _} | tokens] ->
tokens = trim_leading_space(tokens)
{value_annotations, tokens} =
case tokens do
[{:annotation, annotation} | tokens] ->
{[annotation], tokens}
tokens ->
{[], tokens}
end
[token | tokens] = tokens
case token_to_value(token) do
{:ok, %Value{} = value} ->
value = %{value | annotations: value.annotations ++ value_annotations}
parse(tokens, state, {name, node_annotations, [{key, value} | attrs]}, doc)
{:error, _} = err ->
err
end
tokens ->
case key do
%{type: :id} ->
{:error, {:bare_identifier, key}}
_ ->
parse(tokens, state, {name, node_annotations, [key | attrs]}, doc)
end
end
{:error, _} = err ->
err
end
end
defp parse([], {:node, depth}, {name, node_annotations, attrs}, doc) do
node = %Node{
name: name,
annotations: node_annotations,
attributes: resolve_node_attributes(attrs),
children: nil,
}
parse([], {:default, depth}, [], [node | doc])
end
defp parse([{:close_block, _} | _tokens] = tokens, {:default, _depth}, [], doc) do
handle_parse_exit(tokens, doc)
end
defp extract_annotations(items, acc \\ [])
defp extract_annotations([], acc) do
Enum.reverse(acc)
end
defp extract_annotations([{:annotation, value} | rest], acc) do
extract_annotations(rest, [value | acc])
end
defp extract_annotations([_ | rest], acc) do
extract_annotations(rest, acc)
end
defp handle_parse_exit(rest, doc) do
doc = Enum.reverse(doc)
{:ok, handle_slashdashes(doc, []), rest}
end
defp resolve_node_attributes(acc) do
acc
|> Enum.reverse()
|> handle_slashdashes([])
|> Enum.reduce([], fn
{key, value}, acc ->
# deduplicate attributes
acc =
Enum.reject(acc, fn
{key2, _value} -> key2.value == key.value
_ -> false
end)
[{key, value} | acc]
value, acc ->
[value | acc]
end)
|> Enum.reverse()
end
defp unfold_leading_tokens(tokens, remaining \\ 1)
defp unfold_leading_tokens([{:space, _} | tokens], remaining) do
unfold_leading_tokens(tokens, remaining)
end
defp unfold_leading_tokens([{:nl, _} | tokens], remaining) when remaining > 0 do
unfold_leading_tokens(tokens, remaining - 1)
end
defp unfold_leading_tokens([{:comment, _} | tokens], remaining) when remaining > 0 do
unfold_leading_tokens(tokens, remaining - 1)
end
defp unfold_leading_tokens(tokens, 0) do
tokens
end
defp trim_leading_space(tokens, remaining \\ 0)
defp trim_leading_space([{:space, _} | tokens], remaining) do
trim_leading_space(tokens, remaining)
end
defp trim_leading_space([{:nl, _} | tokens], remaining) when remaining > 0 do
trim_leading_space(tokens, remaining - 1)
end
defp trim_leading_space([{:comment, _} | tokens], remaining) when remaining > 0 do
trim_leading_space(tokens, remaining - 1)
end
defp trim_leading_space([{:fold, _} | tokens], remaining) do
trim_leading_space(tokens, remaining + 1)
end
defp trim_leading_space(tokens, 0) do
tokens
end
defp token_to_value({:term, value}) do
decode_term(value)
end
defp token_to_value({:dquote_string, value}) do
{:ok, %Value{value: value, type: :string}}
end
defp token_to_value({:raw_string, value}) do
{:ok, %Value{value: value, type: :string}}
end
defp decode_term("true") do
{:ok, %Value{value: true, type: :boolean}}
end
defp decode_term("false") do
{:ok, %Value{value: false, type: :boolean}}
end
defp decode_term("null") do
{:ok, %Value{type: :null, value: nil}}
end
defp decode_term(<<"0b", rest::binary>>) do
decode_bin_integer(rest)
end
defp decode_term(<<"0o", rest::binary>>) do
decode_oct_integer(rest)
end
defp decode_term(<<"0x", rest::binary>>) do
decode_hex_integer(rest)
end
defp decode_term("") do
{:error, :no_term}
end
defp decode_term(term) do
case decode_dec_integer(term) do
{:ok, value} ->
{:ok, value}
{:error, _} ->
case decode_float(term) do
{:ok, value} ->
{:ok, value}
{:error, _} ->
{:ok, %Value{value: term, type: :id}}
end
end
end
defp decode_bin_integer(bin, state \\ :start, acc \\ [])
defp decode_bin_integer(<<>>, :start, _acc) do
{:error, :invalid_bin_integer_format}
end
defp decode_bin_integer(<<"_", rest::binary>>, :body, acc) do
decode_bin_integer(rest, :body, acc)
end
defp decode_bin_integer(<<c::utf8, rest::binary>>, _, acc) when c in [?0, ?1] do
decode_bin_integer(rest, :body, [<<c::utf8>> | acc])
end
defp decode_bin_integer(<<_::utf8, _rest::binary>>, _, _acc) do
{:error, :invalid_bin_integer_format}
end
defp decode_bin_integer(<<>>, :body, acc) do
case decode_integer(acc, 2) do
{:ok, value} ->
{:ok, %{value | format: :bin}}
{:error, _} = err ->
err
end
end
defp decode_oct_integer(bin, state \\ :start, acc \\ [])
defp decode_oct_integer(<<>>, :start, _acc) do
{:error, :invalid_oct_integer_format}
end
defp decode_oct_integer(<<"_", rest::binary>>, :body, acc) do
decode_oct_integer(rest, :body, acc)
end
defp decode_oct_integer(<<c::utf8, rest::binary>>, _, acc) when c in ?0..?7 do
decode_oct_integer(rest, :body, [<<c::utf8>> | acc])
end
defp decode_oct_integer(<<_::utf8, _rest::binary>>, _, _acc) do
{:error, :invalid_oct_integer_format}
end
defp decode_oct_integer(<<>>, :body, acc) do
case decode_integer(acc, 8) do
{:ok, value} ->
{:ok, %{value | format: :oct}}
{:error, _} = err ->
err
end
end
defp decode_dec_integer(bin, state \\ :start, acc \\ [])
defp decode_dec_integer(<<>>, :start, _acc) do
{:error, :invalid_dec_integer_format}
end
defp decode_dec_integer(<<"_", rest::binary>>, :body, acc) do
decode_dec_integer(rest, :body, acc)
end
defp decode_dec_integer(<<c::utf8, rest::binary>>, :start, acc) when c in [?+, ?-] do
decode_dec_integer(rest, :start, [<<c::utf8>> | acc])
end
defp decode_dec_integer(<<c::utf8, rest::binary>>, _, acc) when c in ?0..?9 do
decode_dec_integer(rest, :body, [<<c::utf8>> | acc])
end
defp decode_dec_integer(<<_::utf8, _rest::binary>>, _, _acc) do
{:error, :invalid_dec_integer_format}
end
defp decode_dec_integer(<<>>, :body, acc) do
case decode_integer(acc, 10) do
{:ok, value} ->
{:ok, %{value | format: :dec}}
{:error, _} = err ->
err
end
end
defp decode_hex_integer(bin, state \\ :start, acc \\ [])
defp decode_hex_integer(<<>>, :start, _acc) do
{:error, :invalid_hex_integer_format}
end
defp decode_hex_integer(<<"_", rest::binary>>, :body, acc) do
decode_hex_integer(rest, :body, acc)
end
defp decode_hex_integer(<<c::utf8, rest::binary>>, _, acc) when c in ?0..?9 or
c in ?A..?F or
c in ?a..?f do
decode_hex_integer(rest, :body, [<<c::utf8>> | acc])
end
defp decode_hex_integer(<<_::utf8, _rest::binary>>, _, _acc) do
{:error, :invalid_hex_integer_format}
end
defp decode_hex_integer(<<>>, :body, acc) do
case decode_integer(acc, 16) do
{:ok, value} ->
{:ok, %{value | format: :hex}}
{:error, _} = err ->
err
end
end
defp decode_integer(acc, radix) do
case Integer.parse(IO.iodata_to_binary(Enum.reverse(acc)), radix) do
{int, ""} ->
{:ok, %Value{value: int, type: :integer}}
{_int, _} ->
{:error, :invalid_integer_format}
:error ->
{:error, :invalid_integer_format}
end
end
defp decode_float(value) do
case parse_float_string(value) do
{:ok, value} ->
case Decimal.parse(value) do
{:ok, %Decimal{} = decimal} ->
{:ok, %Value{value: decimal, type: :float}}
{%Decimal{} = decimal, ""} ->
{:ok, %Value{value: decimal, type: :float}}
{%Decimal{}, _} ->
{:error, :invalid_float_format}
:error ->
{:error, :invalid_float_format}
end
{:error, _} = err ->
err
end
end
defp handle_slashdashes([:slashdash, _term | tokens], acc) do
handle_slashdashes(tokens, acc)
end
defp handle_slashdashes([:slashdash], acc) do
handle_slashdashes([], acc)
end
defp handle_slashdashes([term | tokens], acc) do
handle_slashdashes(tokens, [term | acc])
end
defp handle_slashdashes([], acc) do
Enum.reverse(acc)
end
end
|
lib/kuddle/decoder.ex
| 0.766556
| 0.67852
|
decoder.ex
|
starcoder
|
import TypeClass
defclass Witchcraft.Monoid do
@moduledoc ~S"""
Monoid extends the semigroup with the concept of an "empty" or "zero" element.
## Type Class
An instance of `Witchcraft.Monoid` must also implement `Witchcraft.Semigroup`,
and define `Witchcraft.Monoid.empty/1`.
Semigroup [append/2]
↓
Monoid [empty/1]
"""
alias __MODULE__
extend Witchcraft.Semigroup, alias: true
use Witchcraft.Internal, deps: [Witchcraft.Semigroup]
@type t :: any()
where do
@doc ~S"""
An "emptied out" or "starting position" of the passed data.
## Example
iex> empty(10)
0
iex> empty [1, 2, 3, 4, 5]
[]
"""
def empty(sample)
end
defalias zero(sample), as: :empty
@doc """
Check if a value is the empty element of that type.
## Examples
iex> empty?([])
true
iex> empty?([1])
false
"""
@spec empty?(Monoid.t()) :: boolean
def empty?(monoid), do: empty(monoid) == monoid
properties do
def left_identity(data) do
a = generate(data)
if is_function(a) do
equal?(Semigroup.append(Monoid.empty(a), a).("foo"), a.("foo"))
else
equal?(Semigroup.append(Monoid.empty(a), a), a)
end
end
def right_identity(data) do
a = generate(data)
if is_function(a) do
Semigroup.append(a, Monoid.empty(a)).("foo") == a.("foo")
else
Semigroup.append(a, Monoid.empty(a)) == a
end
end
end
end
definst Witchcraft.Monoid, for: Function do
def empty(sample) when is_function(sample), do: &Quark.id/1
end
definst Witchcraft.Monoid, for: Integer do
def empty(_), do: 0
end
definst Witchcraft.Monoid, for: Float do
def empty(_), do: 0.0
end
definst Witchcraft.Monoid, for: BitString do
def empty(_), do: ""
end
definst Witchcraft.Monoid, for: List do
def empty(_), do: []
end
definst Witchcraft.Monoid, for: Map do
def empty(_), do: %{}
end
definst Witchcraft.Monoid, for: Tuple do
custom_generator(_) do
{}
end
def empty(sample), do: Witchcraft.Functor.map(sample, &Witchcraft.Monoid.empty/1)
end
definst Witchcraft.Monoid, for: MapSet do
def empty(_), do: MapSet.new()
end
definst Witchcraft.Monoid, for: Witchcraft.Unit do
require Witchcraft.Semigroup
def empty(_), do: %Witchcraft.Unit{}
end
|
lib/witchcraft/monoid.ex
| 0.763351
| 0.551574
|
monoid.ex
|
starcoder
|
defmodule Mox.Server do
@moduledoc false
use GenServer
@timeout 30000
# API
def start_link(_options) do
GenServer.start_link(__MODULE__, :ok, name: __MODULE__)
end
def add_expectation(owner_pid, key, value) do
GenServer.call(__MODULE__, {:add_expectation, owner_pid, key, value}, @timeout)
end
def fetch_fun_to_dispatch(caller_pid, key) do
GenServer.call(__MODULE__, {:fetch_fun_to_dispatch, caller_pid, key}, @timeout)
end
def verify(owner_pid, for) do
GenServer.call(__MODULE__, {:verify, owner_pid, for}, @timeout)
end
def verify_on_exit(pid) do
GenServer.call(__MODULE__, {:verify_on_exit, pid}, @timeout)
end
def allow(mock, owner_pid, pid) do
GenServer.call(__MODULE__, {:allow, mock, owner_pid, pid}, @timeout)
end
def exit(pid) do
GenServer.cast(__MODULE__, {:exit, pid})
end
def set_mode(owner_pid, mode) do
GenServer.call(__MODULE__, {:set_mode, owner_pid, mode})
end
# Callbacks
def init(:ok) do
{:ok, %{expectations: %{}, allowances: %{}, deps: %{}, mode: :private, global_owner_pid: nil}}
end
def handle_call(
{:add_expectation, owner_pid, {mock, _, _} = key, expectation},
_from,
%{mode: :private} = state
) do
if allowance = state.allowances[owner_pid][mock] do
{:reply, {:error, {:currently_allowed, allowance}}, state}
else
state = maybe_add_and_monitor_pid(state, owner_pid)
state =
update_in(state, [:expectations, pid_map(owner_pid)], fn owned_expectations ->
Map.update(owned_expectations, key, expectation, &merge_expectation(&1, expectation))
end)
{:reply, :ok, state}
end
end
def handle_call(
{:add_expectation, owner_pid, {_mock, _, _} = key, expectation},
_from,
%{mode: :global, global_owner_pid: global_owner_pid} = state
) do
if owner_pid != global_owner_pid do
{:reply, {:error, {:not_global_owner, global_owner_pid}}, state}
else
state =
update_in(state, [:expectations, pid_map(owner_pid)], fn owned_expectations ->
Map.update(owned_expectations, key, expectation, &merge_expectation(&1, expectation))
end)
{:reply, :ok, state}
end
end
def handle_call(
{:fetch_fun_to_dispatch, caller_pid, {mock, _, _} = key},
_from,
%{mode: :private} = state
) do
owner_pid = state.allowances[caller_pid][mock] || caller_pid
case state.expectations[owner_pid][key] do
nil ->
{:reply, :no_expectation, state}
{total, [], nil} ->
{:reply, {:out_of_expectations, total}, state}
{_, [], stub} ->
{:reply, {:ok, stub}, state}
{total, [call | calls], stub} ->
new_state = put_in(state.expectations[owner_pid][key], {total, calls, stub})
{:reply, {:ok, call}, new_state}
end
end
def handle_call(
{:fetch_fun_to_dispatch, _caller_pid, {_mock, _, _} = key},
_from,
%{mode: :global} = state
) do
case state.expectations[state.global_owner_pid][key] do
nil ->
{:reply, :no_expectation, state}
{total, [], nil} ->
{:reply, {:out_of_expectations, total}, state}
{_, [], stub} ->
{:reply, {:ok, stub}, state}
{total, [call | calls], stub} ->
new_state = put_in(state.expectations[state.global_owner_pid][key], {total, calls, stub})
{:reply, {:ok, call}, new_state}
end
end
def handle_call({:verify, owner_pid, mock}, _from, state) do
expectations = state.expectations[owner_pid] || %{}
pending =
for {{module, _, _} = key, {count, [_ | _] = calls, _stub}} <- expectations,
module == mock or mock == :all do
{key, count, length(calls)}
end
{:reply, pending, state}
end
def handle_call({:verify_on_exit, pid}, _from, state) do
state = maybe_add_and_monitor_pid(state, pid, :EXIT, fn {_, deps} -> {:EXIT, deps} end)
{:reply, :ok, state}
end
def handle_call({:allow, _, _, _}, _from, %{mode: :global} = state) do
{:reply, {:error, :in_global_mode}, state}
end
def handle_call({:allow, mock, owner_pid, pid}, _from, %{mode: :private} = state) do
%{allowances: allowances, expectations: expectations} = state
owner_pid = state.allowances[owner_pid][mock] || owner_pid
allowance = allowances[pid][mock]
cond do
Map.has_key?(expectations, pid) ->
{:reply, {:error, :expectations_defined}, state}
allowance && allowance != owner_pid ->
{:reply, {:error, {:already_allowed, allowance}}, state}
true ->
state =
maybe_add_and_monitor_pid(state, owner_pid, :DOWN, fn {on, deps} ->
{on, [{pid, mock} | deps]}
end)
state = put_in(state, [:allowances, pid_map(pid), mock], owner_pid)
{:reply, :ok, state}
end
end
def handle_call({:set_mode, owner_pid, :global}, _from, state) do
state = maybe_add_and_monitor_pid(state, owner_pid)
{:reply, :ok, %{state | mode: :global, global_owner_pid: owner_pid}}
end
def handle_call({:set_mode, _owner_pid, :private}, _from, state) do
{:reply, :ok, %{state | mode: :private, global_owner_pid: nil}}
end
def handle_cast({:exit, pid}, state) do
{:noreply, down(state, pid)}
end
def handle_info({:DOWN, _, _, pid, _}, state) do
state =
case state.global_owner_pid do
^pid -> %{state | mode: :private, global_owner_pid: nil}
_ -> state
end
state =
case state.deps do
%{^pid => {:DOWN, _}} -> down(state, pid)
%{} -> state
end
{:noreply, state}
end
# Helper functions
defp down(state, pid) do
{{_, deps}, state} = pop_in(state.deps[pid])
{_, state} = pop_in(state.expectations[pid])
{_, state} = pop_in(state.allowances[pid])
Enum.reduce(deps, state, fn {pid, mock}, acc ->
acc.allowances[pid][mock] |> pop_in() |> elem(1)
end)
end
defp pid_map(pid) do
Access.key(pid, %{})
end
defp maybe_add_and_monitor_pid(state, pid) do
maybe_add_and_monitor_pid(state, pid, :DOWN, nil)
end
defp maybe_add_and_monitor_pid(state, pid, on, fun) do
case state.deps do
%{^pid => entry} ->
if fun do
put_in(state.deps[pid], fun.(entry))
else
state
end
_ ->
Process.monitor(pid)
state = put_in(state.deps[pid], {on, []})
state
end
end
defp merge_expectation({current_n, current_calls, current_stub}, {n, calls, stub}) do
{current_n + n, current_calls ++ calls, stub || current_stub}
end
end
|
lib/mox/server.ex
| 0.5769
| 0.412353
|
server.ex
|
starcoder
|
defmodule Phoenix.Component do
@moduledoc """
API for function components.
A function component is any function that receives
an assigns map as argument and returns a rendered
struct built with the `~H` sigil.
Here is an example:
defmodule MyComponent do
use Phoenix.Component
# Optionally also bring the HTML helpers
# use Phoenix.HTML
def greet(assigns) do
~H"\""
<p>Hello, <%= assigns.name %></p>
"\""
end
end
The component can be invoked as a regular function:
MyComponent.greet(%{name: "Jane"})
But it is typically invoked using the function component
syntax from the `~H` sigil:
~H"\""
<MyComponent.greet name="Jane" />
"\""
If the `MyComponent` module is imported or if the function
is defined locally, you can skip the module name:
~H"\""
<.greet name="Jane" />
"\""
Learn more about the `~H` sigil [in its documentation](`Phoenix.LiveView.Helpers.sigil_H/2`).
## `use Phoenix.Component`
Modules that have to define function components should call `use Phoenix.Component`
at the top. Doing so will import the functions from both `Phoenix.LiveView`
and `Phoenix.LiveView.Helpers` modules.
Note it is not necessary to `use Phoenix.Component` inside `Phoenix.LiveView`
and `Phoenix.LiveComponent`.
## Assigns
While inside a function component, it is recommended to use
the functions in `Phoenix.LiveView` to manipulate assigns.
For example, let's imagine a component that receives the first
name and last name and must compute the name assign. One option
would be:
def show_name(assigns) do
assigns = assigns(assigns, :name, assigns.first_name <> assigns.last_name)
~H"\""
<p>Your name is: <%= @name %></p>
"\""
end
However, when possible, it may be cleaner to break the logic over function
calls instead of precomputed assigns:
def show_name(assigns) do
~H"\""
<p>Your name is: <%= full_name(@first_name, @last_name) %></p>
"\""
end
defp full_name(first_name, last_name), do: first_name <> last_name
## Blocks
It is also possible to HTML blocks to function components,
as to regular HTML tags. For example, you could create a
button component that is invoked like this:
<.button>
This does <strong>inside</strong> the button!
</.button>
Where the function component would be defined as:
def button(assigns) do
~H"\""
<button class="btn">
<%= render_block(@inner_block) %>
</button>
"\""
end
Where `render_block` is defined at
`Phoenix.LiveView.Helpers.render_block/2`.
"""
@doc false
defmacro __using__(_) do
quote do
import Phoenix.LiveView
import Phoenix.LiveView.Helpers
end
end
end
|
lib/phoenix_component.ex
| 0.88143
| 0.589894
|
phoenix_component.ex
|
starcoder
|
defmodule Membrane.RTP.Parser do
@moduledoc """
Identifies RTP/RTCP packets, then tries to parse RTP packet (parsing header and preparing payload)
and forwards RTCP packet to `:rtcp_output` pad unchanged.
## Encrypted packets
In case of SRTP/SRTCP the parser tries to parse just the header of the RTP packet as the packet's payload
is encrypted and must be passed as a whole to the decryptor. The whole packet remains unchanged but
the parsed header gets attached to `Membrane.Buffer`'s metadata.
SRTP is treated the same as RTCP and all packets gets forwarded to `:rtcp_output` pad.
## Parsed packets
In both cases, encrypted and unencryptd, parsed header is put into the metadata field in `Membrane.Buffer` under `:rtp` key.
with the following metadata `:timestamp`, `:sequence_number`, `:ssrc`, `:payload_type`,
`:marker`, `:extension`. See `Membrane.RTP.Header` for their meaning and specifications.
"""
use Membrane.Filter
alias Membrane.Buffer
alias Membrane.{RTCPEvent, RTP, RemoteStream}
require Membrane.Logger
@metadata_fields [
:timestamp,
:sequence_number,
:ssrc,
:csrcs,
:payload_type,
:marker,
:extension
]
def_options secure?: [
type: :boolean,
default: false,
description: """
Specifies whether Parser should expect packets that are encrypted or not.
Requires adding [srtp](https://github.com/membraneframework/elixir_libsrtp) dependency to work.
"""
]
def_input_pad :input,
caps: {RemoteStream, type: :packetized, content_format: one_of([nil, RTP])},
demand_unit: :buffers
def_output_pad :output, caps: RTP
def_output_pad :rtcp_output, mode: :push, caps: :any, availability: :on_request
@impl true
def handle_init(opts) do
{:ok, %{rtcp_output_pad: nil, secure?: opts.secure?}}
end
@impl true
def handle_caps(:input, _caps, _ctx, state) do
{{:ok, caps: {:output, %RTP{}}}, state}
end
@impl true
def handle_pad_added(Pad.ref(:rtcp_output, _ref) = pad, _ctx, state) do
{:ok, %{state | rtcp_output_pad: pad}}
end
@impl true
def handle_process(:input, %Buffer{payload: payload, metadata: metadata} = buffer, _ctx, state) do
with :rtp <- RTP.Packet.identify(payload),
{:ok,
%{packet: packet, has_padding?: has_padding?, total_header_size: total_header_size}} <-
RTP.Packet.parse(payload, state.secure?) do
%RTP.Packet{payload: payload, header: header} = packet
rtp =
header
|> Map.take(@metadata_fields)
|> Map.merge(%{has_padding?: has_padding?, total_header_size: total_header_size})
metadata = Map.put(metadata, :rtp, rtp)
{{:ok, buffer: {:output, %Buffer{payload: payload, metadata: metadata}}}, state}
else
:rtcp ->
case state.rtcp_output_pad do
nil ->
{{:ok, redemand: :output}, state}
pad ->
{{:ok, buffer: {pad, buffer}, redemand: :output}, state}
end
{:error, reason} ->
Membrane.Logger.warn("""
Couldn't parse rtp packet:
#{inspect(payload, limit: :infinity)}
Reason: #{inspect(reason)}. Ignoring packet.
""")
{{:ok, redemand: :output}, state}
end
end
@impl true
def handle_demand(:output, size, :buffers, _ctx, state) do
{{:ok, demand: {:input, size}}, state}
end
@impl true
def handle_event(:output, %RTCPEvent{} = event, _ctx, state) do
case state.rtcp_output_pad do
nil ->
{:ok, state}
pad ->
{{:ok, event: {pad, event}}, state}
end
end
@impl true
def handle_event(pad, event, ctx, state), do: super(pad, event, ctx, state)
end
|
lib/membrane/rtp/parser.ex
| 0.845528
| 0.546073
|
parser.ex
|
starcoder
|
defmodule RedixPool do
@moduledoc """
This module provides an API for using `Redix` through a pool of workers.
## Overview
`RedixPool` is very simple, it is merely wraps `Redix` with a pool of `Poolboy`
workers. All function calls get passed through to a `Redix` connection.
Please see the [redix](https://github.com/whatyouhide/redix) library for
more in-depth documentation. Many of the examples in this documentation are
pulled directly from the `Redix` docs.
"""
use Application
alias RedixPool.Config
@type command :: [binary]
@pool_name :redix_pool
def start(_type, _args) do
import Supervisor.Spec, warn: false
pool_options = [
name: {:local, @pool_name},
worker_module: RedixPool.Worker,
size: Config.get(:pool_size, 10),
max_overflow: Config.get(:pool_max_overflow, 1)
]
children = [
:poolboy.child_spec(@pool_name, pool_options, [])
]
opts = [strategy: :one_for_one, name: RedixPool.Supervisor]
Supervisor.start_link(children, opts)
end
@doc"""
Wrapper to call `Redix.command/3` inside a poolboy worker.
## Examples
iex> RedixPool.command(["SET", "k", "foo"])
{:ok, "OK"}
iex> RedixPool.command(["GET", "k"])
{:ok, "foo"}
"""
@spec command(command, Keyword.t) ::
{:ok, [Redix.Protocol.redis_value]} | {:error, atom | Redix.Error.t}
def command(args, opts \\ []) do
:poolboy.transaction(
@pool_name,
fn(worker) -> GenServer.call(worker, {:command, args, opts}) end,
RedixPool.Config.get(:timeout, 5000)
)
end
@doc"""
Wrapper to call `Redix.command!/3` inside a poolboy worker, raising if
there's an error.
## Examples
iex> RedixPool.command!(["SET", "k", "foo"])
"OK"
iex> RedixPool.command!(["GET", "k"])
"foo"
"""
@spec command!(command, Keyword.t) :: Redix.Protocol.redis_value | no_return
def command!(args, opts \\ []) do
:poolboy.transaction(
@pool_name,
fn(worker) -> GenServer.call(worker, {:command!, args, opts}) end,
Config.get(:timeout, 5000)
)
end
@doc"""
Wrapper to call `Redix.pipeline/3` inside a poolboy worker.
## Examples
iex> RedixPool.pipeline([["INCR", "mykey"], ["INCR", "mykey"], ["DECR", "mykey"]])
{:ok, [1, 2, 1]}
iex> RedixPool.pipeline([["SET", "k", "foo"], ["INCR", "k"], ["GET", "k"]])
{:ok, ["OK", %Redix.Error{message: "ERR value is not an integer or out of range"}, "foo"]}
"""
@spec pipeline([command], Keyword.t) ::
{:ok, [Redix.Protocol.redis_value]} | {:error, atom}
def pipeline(args, opts \\ []) do
:poolboy.transaction(
@pool_name,
fn(worker) -> GenServer.call(worker, {:pipeline, args, opts}) end,
Config.get(:timeout, 5000)
)
end
@doc"""
Wrapper to call `Redix.pipeline!/3` inside a poolboy worker, raising if there
are errors issuing the commands (but not if the commands are successfully
issued and result in errors).
## Examples
iex> RedixPool.pipeline!([["INCR", "mykey"], ["INCR", "mykey"], ["DECR", "mykey"]])
[1, 2, 1]
iex> RedixPool.pipeline!([["SET", "k", "foo"], ["INCR", "k"], ["GET", "k"]])
["OK", %Redix.Error{message: "ERR value is not an integer or out of range"}, "foo"]
"""
@spec pipeline!([command], Keyword.t) :: [Redix.Protocol.redis_value] | no_return
def pipeline!(args, opts \\ []) do
:poolboy.transaction(
@pool_name,
fn(worker) -> GenServer.call(worker, {:pipeline!, args, opts}) end,
RedixPool.Config.get(:timeout, 5000)
)
end
end
|
lib/redix_pool.ex
| 0.898813
| 0.425426
|
redix_pool.ex
|
starcoder
|
defmodule OMG.Watcher.ExitProcessor.Finalizations do
@moduledoc """
Encapsulates managing and executing the behaviors related to treating exits by the child chain and watchers
Keeps a state of exits that are in progress, updates it with news from the root chain, compares to the
state of the ledger (`OMG.State`), issues notifications as it finds suitable.
Should manage all kinds of exits allowed in the protocol and handle the interactions between them.
This is the functional, zero-side-effect part of the exit processor. Logic should go here:
- orchestrating the persistence of the state
- finding invalid exits, disseminating them as events according to rules
- enabling to challenge invalid exits
- figuring out critical failure of invalid exit challenging (aka `:unchallenged_exit` event)
- MoreVP protocol managing in general
For the imperative shell, see `OMG.Watcher.ExitProcessor`
"""
alias OMG.State.Transaction
alias OMG.Utxo
alias OMG.Watcher.ExitProcessor.Core
alias OMG.Watcher.ExitProcessor.ExitInfo
alias OMG.Watcher.ExitProcessor.InFlightExitInfo
use OMG.Utils.LoggerExt
@doc """
Finalize exits based on Ethereum events, removing from tracked state if valid.
Invalid finalizing exits should continue being tracked as `is_active`, to continue emitting events.
This includes non-`is_active` exits that finalize invalid, which are turned to be `is_active` now.
"""
@spec finalize_exits(Core.t(), validities :: {list(Utxo.Position.t()), list(Utxo.Position.t())}) ::
{Core.t(), list(), list()}
def finalize_exits(%Core{exits: exits} = state, {valid_finalizations, invalid}) do
# handling valid finalizations
exit_event_triggers =
valid_finalizations
|> Enum.map(fn utxo_pos ->
%ExitInfo{owner: owner, currency: currency, amount: amount} = exits[utxo_pos]
%{exit_finalized: %{owner: owner, currency: currency, amount: amount, utxo_pos: utxo_pos}}
end)
new_exits_kv_pairs =
exits
|> Map.take(valid_finalizations)
|> Enum.into(%{}, fn {utxo_pos, exit_info} -> {utxo_pos, %ExitInfo{exit_info | is_active: false}} end)
new_state1 = %{state | exits: Map.merge(exits, new_exits_kv_pairs)}
db_updates = new_exits_kv_pairs |> Enum.map(&ExitInfo.make_db_update/1)
# invalid ones - activating, in case they were inactive, to keep being invalid forever
{new_state2, activating_db_updates} = activate_on_invalid_finalization(new_state1, invalid)
{new_state2, exit_event_triggers, db_updates ++ activating_db_updates}
end
defp activate_on_invalid_finalization(%Core{exits: exits} = state, invalid_finalizations) do
exits_to_activate =
exits
|> Map.take(invalid_finalizations)
|> Enum.map(fn {k, v} -> {k, Map.update!(v, :is_active, fn _ -> true end)} end)
|> Map.new()
activating_db_updates =
exits_to_activate
|> Enum.map(&ExitInfo.make_db_update/1)
state = %{state | exits: Map.merge(exits, exits_to_activate)}
{state, activating_db_updates}
end
@doc """
Returns a tuple of {:ok, map in-flight exit id => {finalized input exits, finalized output exits}}.
finalized input exits and finalized output exits structures both fit into `OMG.State.exit_utxos/1`.
When there are invalid finalizations, returns one of the following:
- {:unknown_piggybacks, list of piggybacks that exit processor state is not aware of}
- {:unknown_in_flight_exit, set of in-flight exit ids that exit processor is not aware of}
"""
@spec prepare_utxo_exits_for_in_flight_exit_finalizations(Core.t(), [map()]) ::
{:ok, map()}
| {:unknown_piggybacks, list()}
| {:unknown_in_flight_exit, MapSet.t(non_neg_integer())}
def prepare_utxo_exits_for_in_flight_exit_finalizations(%Core{in_flight_exits: ifes}, finalizations) do
finalizations = finalizations |> Enum.map(&ife_id_to_binary/1)
with {:ok, ifes_by_id} <- get_all_finalized_ifes_by_ife_contract_id(finalizations, ifes),
{:ok, []} <- known_piggybacks?(finalizations, ifes_by_id) do
{exits_by_ife_id, _} =
finalizations
|> Enum.reduce({%{}, ifes_by_id}, &prepare_utxo_exits_for_finalization/2)
{:ok, exits_by_ife_id}
end
end
# converts from int, which is how the contract serves it
defp ife_id_to_binary(finalization),
do: Map.update!(finalization, :in_flight_exit_id, fn id -> <<id::192>> end)
defp get_all_finalized_ifes_by_ife_contract_id(finalizations, ifes) do
finalizations_ids =
finalizations
|> Enum.map(fn %{in_flight_exit_id: id} -> id end)
|> MapSet.new()
by_contract_id =
ifes
|> Enum.map(fn {tx_hash, %InFlightExitInfo{contract_id: id} = ife} -> {id, {tx_hash, ife}} end)
|> Map.new()
known_ifes =
by_contract_id
|> Map.keys()
|> MapSet.new()
unknown_ifes = MapSet.difference(finalizations_ids, known_ifes)
if Enum.empty?(unknown_ifes) do
{:ok, by_contract_id}
else
{:unknown_in_flight_exit, unknown_ifes}
end
end
defp known_piggybacks?(finalizations, ifes_by_id) do
not_piggybacked =
finalizations
|> Enum.filter(fn %{in_flight_exit_id: ife_id, output_index: output} ->
{_, ife} = Map.get(ifes_by_id, ife_id)
not InFlightExitInfo.is_piggybacked?(ife, output)
end)
if Enum.empty?(not_piggybacked) do
{:ok, []}
else
{:unknown_piggybacks, not_piggybacked}
end
end
defp prepare_utxo_exits_for_finalization(
%{in_flight_exit_id: ife_id, output_index: output},
{exits, ifes_by_id}
) do
{tx_hash, ife} = Map.get(ifes_by_id, ife_id)
# a runtime sanity check - if this were false it would mean all piggybacks finalized so contract wouldn't allow that
true = InFlightExitInfo.is_active?(ife, output)
{input_exits, output_exits} =
if output >= 4 do
{[], [%{tx_hash: tx_hash, output_index: output}]}
else
%InFlightExitInfo{tx: %Transaction.Signed{raw_tx: tx}} = ife
input_exit = tx |> Transaction.get_inputs() |> Enum.at(output)
{[input_exit], []}
end
{input_exits_acc, output_exits_acc} = Map.get(exits, ife_id, {[], []})
exits = Map.put(exits, ife_id, {input_exits ++ input_exits_acc, output_exits ++ output_exits_acc})
{exits, ifes_by_id}
end
@doc """
Finalizes in-flight exits.
Returns a tuple of {:ok, updated state, database updates}.
When there are invalid finalizations, returns one of the following:
- {:unknown_piggybacks, list of piggybacks that exit processor state is not aware of}
- {:unknown_in_flight_exit, set of in-flight exit ids that exit processor is not aware of}
"""
@spec finalize_in_flight_exits(Core.t(), [map()], map()) ::
{:ok, Core.t(), list()}
| {:unknown_piggybacks, list()}
| {:unknown_in_flight_exit, MapSet.t(non_neg_integer())}
def finalize_in_flight_exits(%Core{in_flight_exits: ifes} = state, finalizations, invalidities_by_ife_id) do
# convert ife_id from int (given by contract) to a binary
finalizations =
finalizations
|> Enum.map(fn %{in_flight_exit_id: id} = map -> Map.replace!(map, :in_flight_exit_id, <<id::192>>) end)
with {:ok, ifes_by_id} <- get_all_finalized_ifes_by_ife_contract_id(finalizations, ifes),
{:ok, []} <- known_piggybacks?(finalizations, ifes_by_id) do
{ifes_by_id, updated_ifes} =
finalizations
|> Enum.reduce({ifes_by_id, MapSet.new()}, &finalize_single_exit/2)
|> activate_on_invalid_utxo_exits(invalidities_by_ife_id)
db_updates =
Map.new(ifes_by_id)
|> Map.take(updated_ifes)
|> Enum.map(fn {_, value} -> value end)
|> Enum.map(&InFlightExitInfo.make_db_update/1)
ifes =
ifes_by_id
|> Enum.map(fn {_, value} -> value end)
|> Map.new()
{:ok, %{state | in_flight_exits: ifes}, db_updates}
end
end
defp finalize_single_exit(
%{in_flight_exit_id: ife_id, output_index: output},
{ifes_by_id, updated_ifes}
) do
{tx_hash, ife} = Map.get(ifes_by_id, ife_id)
if InFlightExitInfo.is_active?(ife, output) do
{:ok, finalized_ife} = InFlightExitInfo.finalize(ife, output)
ifes_by_id = Map.put(ifes_by_id, ife_id, {tx_hash, finalized_ife})
updated_ifes = MapSet.put(updated_ifes, ife_id)
{ifes_by_id, updated_ifes}
else
{ifes_by_id, updated_ifes}
end
end
defp activate_on_invalid_utxo_exits({ifes_by_id, updated_ifes}, invalidities_by_ife_id) do
ifes_to_activate =
invalidities_by_ife_id
|> Enum.filter(fn {_ife_id, invalidities} -> not Enum.empty?(invalidities) end)
|> Enum.map(fn {ife_id, _invalidities} -> ife_id end)
|> MapSet.new()
ifes_by_id = Enum.map(ifes_by_id, &activate_in_flight_exit(&1, ifes_to_activate))
updated_ifes = MapSet.to_list(ifes_to_activate) ++ MapSet.to_list(updated_ifes)
updated_ifes = MapSet.new(updated_ifes)
{ifes_by_id, updated_ifes}
end
defp activate_in_flight_exit({ife_id, {tx_hash, ife}}, ifes_to_activate) do
if MapSet.member?(ifes_to_activate, ife_id) do
activated_ife = InFlightExitInfo.activate(ife)
{ife_id, {tx_hash, activated_ife}}
else
{ife_id, {tx_hash, ife}}
end
end
end
|
apps/omg_watcher/lib/omg_watcher/exit_processor/finalizations.ex
| 0.714329
| 0.591959
|
finalizations.ex
|
starcoder
|
defmodule AWS.CodeCommit do
@moduledoc """
AWS CodeCommit
This is the *AWS CodeCommit API Reference*.
This reference provides descriptions of the operations and data types for AWS
CodeCommit API along with usage examples.
You can use the AWS CodeCommit API to work with the following objects:
Repositories, by calling the following:
* `BatchGetRepositories`, which returns information about one or
more repositories associated with your AWS account.
* `CreateRepository`, which creates an AWS CodeCommit repository.
* `DeleteRepository`, which deletes an AWS CodeCommit repository.
* `GetRepository`, which returns information about a specified
repository.
* `ListRepositories`, which lists all AWS CodeCommit repositories
associated with your AWS account.
* `UpdateRepositoryDescription`, which sets or updates the
description of the repository.
* `UpdateRepositoryName`, which changes the name of the repository.
If you change the name of a repository, no other users of that repository can
access it until you send them the new HTTPS or SSH URL to use.
Branches, by calling the following:
* `CreateBranch`, which creates a branch in a specified repository.
* `DeleteBranch`, which deletes the specified branch in a repository
unless it is the default branch.
* `GetBranch`, which returns information about a specified branch.
* `ListBranches`, which lists all branches for a specified
repository.
* `UpdateDefaultBranch`, which changes the default branch for a
repository.
Files, by calling the following:
* `DeleteFile`, which deletes the content of a specified file from a
specified branch.
* `GetBlob`, which returns the base-64 encoded content of an
individual Git blob object in a repository.
* `GetFile`, which returns the base-64 encoded content of a
specified file.
* `GetFolder`, which returns the contents of a specified folder or
directory.
* `PutFile`, which adds or modifies a single file in a specified
repository and branch.
Commits, by calling the following:
* `BatchGetCommits`, which returns information about one or more
commits in a repository.
* `CreateCommit`, which creates a commit for changes to a
repository.
* `GetCommit`, which returns information about a commit, including
commit messages and author and committer information.
* `GetDifferences`, which returns information about the differences
in a valid commit specifier (such as a branch, tag, HEAD, commit ID, or other
fully qualified reference).
Merges, by calling the following:
* `BatchDescribeMergeConflicts`, which returns information about
conflicts in a merge between commits in a repository.
* `CreateUnreferencedMergeCommit`, which creates an unreferenced
commit between two branches or commits for the purpose of comparing them and
identifying any potential conflicts.
* `DescribeMergeConflicts`, which returns information about merge
conflicts between the base, source, and destination versions of a file in a
potential merge.
* `GetMergeCommit`, which returns information about the merge
between a source and destination commit.
* `GetMergeConflicts`, which returns information about merge
conflicts between the source and destination branch in a pull request.
* `GetMergeOptions`, which returns information about the available
merge options between two branches or commit specifiers.
* `MergeBranchesByFastForward`, which merges two branches using the
fast-forward merge option.
* `MergeBranchesBySquash`, which merges two branches using the
squash merge option.
* `MergeBranchesByThreeWay`, which merges two branches using the
three-way merge option.
Pull requests, by calling the following:
* `CreatePullRequest`, which creates a pull request in a specified
repository.
* `CreatePullRequestApprovalRule`, which creates an approval rule
for a specified pull request.
* `DeletePullRequestApprovalRule`, which deletes an approval rule
for a specified pull request.
* `DescribePullRequestEvents`, which returns information about one
or more pull request events.
* `EvaluatePullRequestApprovalRules`, which evaluates whether a pull
request has met all the conditions specified in its associated approval rules.
* `GetCommentsForPullRequest`, which returns information about
comments on a specified pull request.
* `GetPullRequest`, which returns information about a specified pull
request.
* `GetPullRequestApprovalStates`, which returns information about
the approval states for a specified pull request.
* `GetPullRequestOverrideState`, which returns information about
whether approval rules have been set aside (overriden) for a pull request, and
if so, the Amazon Resource Name (ARN) of the user or identity that overrode the
rules and their requirements for the pull request.
* `ListPullRequests`, which lists all pull requests for a
repository.
* `MergePullRequestByFastForward`, which merges the source
destination branch of a pull request into the specified destination branch for
that pull request using the fast-forward merge option.
* `MergePullRequestBySquash`, which merges the source destination
branch of a pull request into the specified destination branch for that pull
request using the squash merge option.
* `MergePullRequestByThreeWay`. which merges the source destination
branch of a pull request into the specified destination branch for that pull
request using the three-way merge option.
* `OverridePullRequestApprovalRules`, which sets aside all approval
rule requirements for a pull request.
* `PostCommentForPullRequest`, which posts a comment to a pull
request at the specified line, file, or request.
* `UpdatePullRequestApprovalRuleContent`, which updates the
structure of an approval rule for a pull request.
* `UpdatePullRequestApprovalState`, which updates the state of an
approval on a pull request.
* `UpdatePullRequestDescription`, which updates the description of a
pull request.
* `UpdatePullRequestStatus`, which updates the status of a pull
request.
* `UpdatePullRequestTitle`, which updates the title of a pull
request.
Approval rule templates, by calling the following:
* `AssociateApprovalRuleTemplateWithRepository`, which associates a
template with a specified repository. After the template is associated with a
repository, AWS CodeCommit creates approval rules that match the template
conditions on every pull request created in the specified repository.
* `BatchAssociateApprovalRuleTemplateWithRepositories`, which
associates a template with one or more specified repositories. After the
template is associated with a repository, AWS CodeCommit creates approval rules
that match the template conditions on every pull request created in the
specified repositories.
* `BatchDisassociateApprovalRuleTemplateFromRepositories`, which
removes the association between a template and specified repositories so that
approval rules based on the template are not automatically created when pull
requests are created in those repositories.
* `CreateApprovalRuleTemplate`, which creates a template for
approval rules that can then be associated with one or more repositories in your
AWS account.
* `DeleteApprovalRuleTemplate`, which deletes the specified
template. It does not remove approval rules on pull requests already created
with the template.
* `DisassociateApprovalRuleTemplateFromRepository`, which removes
the association between a template and a repository so that approval rules based
on the template are not automatically created when pull requests are created in
the specified repository.
* `GetApprovalRuleTemplate`, which returns information about an
approval rule template.
* `ListApprovalRuleTemplates`, which lists all approval rule
templates in the AWS Region in your AWS account.
* `ListAssociatedApprovalRuleTemplatesForRepository`, which lists
all approval rule templates that are associated with a specified repository.
* `ListRepositoriesForApprovalRuleTemplate`, which lists all
repositories associated with the specified approval rule template.
* `UpdateApprovalRuleTemplateDescription`, which updates the
description of an approval rule template.
* `UpdateApprovalRuleTemplateName`, which updates the name of an
approval rule template.
* `UpdateApprovalRuleTemplateContent`, which updates the content of
an approval rule template.
Comments in a repository, by calling the following:
* `DeleteCommentContent`, which deletes the content of a comment on
a commit in a repository.
* `GetComment`, which returns information about a comment on a
commit.
* `GetCommentReactions`, which returns information about emoji
reactions to comments.
* `GetCommentsForComparedCommit`, which returns information about
comments on the comparison between two commit specifiers in a repository.
* `PostCommentForComparedCommit`, which creates a comment on the
comparison between two commit specifiers in a repository.
* `PostCommentReply`, which creates a reply to a comment.
* `PutCommentReaction`, which creates or updates an emoji reaction
to a comment.
* `UpdateComment`, which updates the content of a comment on a
commit in a repository.
Tags used to tag resources in AWS CodeCommit (not Git tags), by calling the
following:
* `ListTagsForResource`, which gets information about AWS tags for a
specified Amazon Resource Name (ARN) in AWS CodeCommit.
* `TagResource`, which adds or updates tags for a resource in AWS
CodeCommit.
* `UntagResource`, which removes tags for a resource in AWS
CodeCommit.
Triggers, by calling the following:
* `GetRepositoryTriggers`, which returns information about triggers
configured for a repository.
* `PutRepositoryTriggers`, which replaces all triggers for a
repository and can be used to create or delete triggers.
* `TestRepositoryTriggers`, which tests the functionality of a
repository trigger by sending data to the trigger target.
For information about how to use AWS CodeCommit, see the [AWS CodeCommit User Guide](https://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html).
"""
@doc """
Creates an association between an approval rule template and a specified
repository.
Then, the next time a pull request is created in the repository where the
destination reference (if specified) matches the destination reference (branch)
for the pull request, an approval rule that matches the template conditions is
automatically created for that pull request. If no destination references are
specified in the template, an approval rule that matches the template contents
is created for all pull requests in that repository.
"""
def associate_approval_rule_template_with_repository(client, input, options \\ []) do
request(client, "AssociateApprovalRuleTemplateWithRepository", input, options)
end
@doc """
Creates an association between an approval rule template and one or more
specified repositories.
"""
def batch_associate_approval_rule_template_with_repositories(client, input, options \\ []) do
request(client, "BatchAssociateApprovalRuleTemplateWithRepositories", input, options)
end
@doc """
Returns information about one or more merge conflicts in the attempted merge of
two commit specifiers using the squash or three-way merge strategy.
"""
def batch_describe_merge_conflicts(client, input, options \\ []) do
request(client, "BatchDescribeMergeConflicts", input, options)
end
@doc """
Removes the association between an approval rule template and one or more
specified repositories.
"""
def batch_disassociate_approval_rule_template_from_repositories(client, input, options \\ []) do
request(client, "BatchDisassociateApprovalRuleTemplateFromRepositories", input, options)
end
@doc """
Returns information about the contents of one or more commits in a repository.
"""
def batch_get_commits(client, input, options \\ []) do
request(client, "BatchGetCommits", input, options)
end
@doc """
Returns information about one or more repositories.
The description field for a repository accepts all HTML characters and all valid
Unicode characters. Applications that do not HTML-encode the description and
display it in a webpage can expose users to potentially malicious code. Make
sure that you HTML-encode the description field in any application that uses
this API to display the repository description on a webpage.
"""
def batch_get_repositories(client, input, options \\ []) do
request(client, "BatchGetRepositories", input, options)
end
@doc """
Creates a template for approval rules that can then be associated with one or
more repositories in your AWS account.
When you associate a template with a repository, AWS CodeCommit creates an
approval rule that matches the conditions of the template for all pull requests
that meet the conditions of the template. For more information, see
`AssociateApprovalRuleTemplateWithRepository`.
"""
def create_approval_rule_template(client, input, options \\ []) do
request(client, "CreateApprovalRuleTemplate", input, options)
end
@doc """
Creates a branch in a repository and points the branch to a commit.
Calling the create branch operation does not set a repository's default branch.
To do this, call the update default branch operation.
"""
def create_branch(client, input, options \\ []) do
request(client, "CreateBranch", input, options)
end
@doc """
Creates a commit for a repository on the tip of a specified branch.
"""
def create_commit(client, input, options \\ []) do
request(client, "CreateCommit", input, options)
end
@doc """
Creates a pull request in the specified repository.
"""
def create_pull_request(client, input, options \\ []) do
request(client, "CreatePullRequest", input, options)
end
@doc """
Creates an approval rule for a pull request.
"""
def create_pull_request_approval_rule(client, input, options \\ []) do
request(client, "CreatePullRequestApprovalRule", input, options)
end
@doc """
Creates a new, empty repository.
"""
def create_repository(client, input, options \\ []) do
request(client, "CreateRepository", input, options)
end
@doc """
Creates an unreferenced commit that represents the result of merging two
branches using a specified merge strategy.
This can help you determine the outcome of a potential merge. This API cannot be
used with the fast-forward merge strategy because that strategy does not create
a merge commit.
This unreferenced merge commit can only be accessed using the GetCommit API or
through git commands such as git fetch. To retrieve this commit, you must
specify its commit ID or otherwise reference it.
"""
def create_unreferenced_merge_commit(client, input, options \\ []) do
request(client, "CreateUnreferencedMergeCommit", input, options)
end
@doc """
Deletes a specified approval rule template.
Deleting a template does not remove approval rules on pull requests already
created with the template.
"""
def delete_approval_rule_template(client, input, options \\ []) do
request(client, "DeleteApprovalRuleTemplate", input, options)
end
@doc """
Deletes a branch from a repository, unless that branch is the default branch for
the repository.
"""
def delete_branch(client, input, options \\ []) do
request(client, "DeleteBranch", input, options)
end
@doc """
Deletes the content of a comment made on a change, file, or commit in a
repository.
"""
def delete_comment_content(client, input, options \\ []) do
request(client, "DeleteCommentContent", input, options)
end
@doc """
Deletes a specified file from a specified branch.
A commit is created on the branch that contains the revision. The file still
exists in the commits earlier to the commit that contains the deletion.
"""
def delete_file(client, input, options \\ []) do
request(client, "DeleteFile", input, options)
end
@doc """
Deletes an approval rule from a specified pull request.
Approval rules can be deleted from a pull request only if the pull request is
open, and if the approval rule was created specifically for a pull request and
not generated from an approval rule template associated with the repository
where the pull request was created. You cannot delete an approval rule from a
merged or closed pull request.
"""
def delete_pull_request_approval_rule(client, input, options \\ []) do
request(client, "DeletePullRequestApprovalRule", input, options)
end
@doc """
Deletes a repository.
If a specified repository was already deleted, a null repository ID is returned.
Deleting a repository also deletes all associated objects and metadata. After a
repository is deleted, all future push calls to the deleted repository fail.
"""
def delete_repository(client, input, options \\ []) do
request(client, "DeleteRepository", input, options)
end
@doc """
Returns information about one or more merge conflicts in the attempted merge of
two commit specifiers using the squash or three-way merge strategy.
If the merge option for the attempted merge is specified as FAST_FORWARD_MERGE,
an exception is thrown.
"""
def describe_merge_conflicts(client, input, options \\ []) do
request(client, "DescribeMergeConflicts", input, options)
end
@doc """
Returns information about one or more pull request events.
"""
def describe_pull_request_events(client, input, options \\ []) do
request(client, "DescribePullRequestEvents", input, options)
end
@doc """
Removes the association between a template and a repository so that approval
rules based on the template are not automatically created when pull requests are
created in the specified repository.
This does not delete any approval rules previously created for pull requests
through the template association.
"""
def disassociate_approval_rule_template_from_repository(client, input, options \\ []) do
request(client, "DisassociateApprovalRuleTemplateFromRepository", input, options)
end
@doc """
Evaluates whether a pull request has met all the conditions specified in its
associated approval rules.
"""
def evaluate_pull_request_approval_rules(client, input, options \\ []) do
request(client, "EvaluatePullRequestApprovalRules", input, options)
end
@doc """
Returns information about a specified approval rule template.
"""
def get_approval_rule_template(client, input, options \\ []) do
request(client, "GetApprovalRuleTemplate", input, options)
end
@doc """
Returns the base-64 encoded content of an individual blob in a repository.
"""
def get_blob(client, input, options \\ []) do
request(client, "GetBlob", input, options)
end
@doc """
Returns information about a repository branch, including its name and the last
commit ID.
"""
def get_branch(client, input, options \\ []) do
request(client, "GetBranch", input, options)
end
@doc """
Returns the content of a comment made on a change, file, or commit in a
repository.
Reaction counts might include numbers from user identities who were deleted
after the reaction was made. For a count of reactions from active identities,
use GetCommentReactions.
"""
def get_comment(client, input, options \\ []) do
request(client, "GetComment", input, options)
end
@doc """
Returns information about reactions to a specified comment ID.
Reactions from users who have been deleted will not be included in the count.
"""
def get_comment_reactions(client, input, options \\ []) do
request(client, "GetCommentReactions", input, options)
end
@doc """
Returns information about comments made on the comparison between two commits.
Reaction counts might include numbers from user identities who were deleted
after the reaction was made. For a count of reactions from active identities,
use GetCommentReactions.
"""
def get_comments_for_compared_commit(client, input, options \\ []) do
request(client, "GetCommentsForComparedCommit", input, options)
end
@doc """
Returns comments made on a pull request.
Reaction counts might include numbers from user identities who were deleted
after the reaction was made. For a count of reactions from active identities,
use GetCommentReactions.
"""
def get_comments_for_pull_request(client, input, options \\ []) do
request(client, "GetCommentsForPullRequest", input, options)
end
@doc """
Returns information about a commit, including commit message and committer
information.
"""
def get_commit(client, input, options \\ []) do
request(client, "GetCommit", input, options)
end
@doc """
Returns information about the differences in a valid commit specifier (such as a
branch, tag, HEAD, commit ID, or other fully qualified reference).
Results can be limited to a specified path.
"""
def get_differences(client, input, options \\ []) do
request(client, "GetDifferences", input, options)
end
@doc """
Returns the base-64 encoded contents of a specified file and its metadata.
"""
def get_file(client, input, options \\ []) do
request(client, "GetFile", input, options)
end
@doc """
Returns the contents of a specified folder in a repository.
"""
def get_folder(client, input, options \\ []) do
request(client, "GetFolder", input, options)
end
@doc """
Returns information about a specified merge commit.
"""
def get_merge_commit(client, input, options \\ []) do
request(client, "GetMergeCommit", input, options)
end
@doc """
Returns information about merge conflicts between the before and after commit
IDs for a pull request in a repository.
"""
def get_merge_conflicts(client, input, options \\ []) do
request(client, "GetMergeConflicts", input, options)
end
@doc """
Returns information about the merge options available for merging two specified
branches.
For details about why a merge option is not available, use GetMergeConflicts or
DescribeMergeConflicts.
"""
def get_merge_options(client, input, options \\ []) do
request(client, "GetMergeOptions", input, options)
end
@doc """
Gets information about a pull request in a specified repository.
"""
def get_pull_request(client, input, options \\ []) do
request(client, "GetPullRequest", input, options)
end
@doc """
Gets information about the approval states for a specified pull request.
Approval states only apply to pull requests that have one or more approval rules
applied to them.
"""
def get_pull_request_approval_states(client, input, options \\ []) do
request(client, "GetPullRequestApprovalStates", input, options)
end
@doc """
Returns information about whether approval rules have been set aside
(overridden) for a pull request, and if so, the Amazon Resource Name (ARN) of
the user or identity that overrode the rules and their requirements for the pull
request.
"""
def get_pull_request_override_state(client, input, options \\ []) do
request(client, "GetPullRequestOverrideState", input, options)
end
@doc """
Returns information about a repository.
The description field for a repository accepts all HTML characters and all valid
Unicode characters. Applications that do not HTML-encode the description and
display it in a webpage can expose users to potentially malicious code. Make
sure that you HTML-encode the description field in any application that uses
this API to display the repository description on a webpage.
"""
def get_repository(client, input, options \\ []) do
request(client, "GetRepository", input, options)
end
@doc """
Gets information about triggers configured for a repository.
"""
def get_repository_triggers(client, input, options \\ []) do
request(client, "GetRepositoryTriggers", input, options)
end
@doc """
Lists all approval rule templates in the specified AWS Region in your AWS
account.
If an AWS Region is not specified, the AWS Region where you are signed in is
used.
"""
def list_approval_rule_templates(client, input, options \\ []) do
request(client, "ListApprovalRuleTemplates", input, options)
end
@doc """
Lists all approval rule templates that are associated with a specified
repository.
"""
def list_associated_approval_rule_templates_for_repository(client, input, options \\ []) do
request(client, "ListAssociatedApprovalRuleTemplatesForRepository", input, options)
end
@doc """
Gets information about one or more branches in a repository.
"""
def list_branches(client, input, options \\ []) do
request(client, "ListBranches", input, options)
end
@doc """
Returns a list of pull requests for a specified repository.
The return list can be refined by pull request status or pull request author
ARN.
"""
def list_pull_requests(client, input, options \\ []) do
request(client, "ListPullRequests", input, options)
end
@doc """
Gets information about one or more repositories.
"""
def list_repositories(client, input, options \\ []) do
request(client, "ListRepositories", input, options)
end
@doc """
Lists all repositories associated with the specified approval rule template.
"""
def list_repositories_for_approval_rule_template(client, input, options \\ []) do
request(client, "ListRepositoriesForApprovalRuleTemplate", input, options)
end
@doc """
Gets information about AWS tags for a specified Amazon Resource Name (ARN) in
AWS CodeCommit.
For a list of valid resources in AWS CodeCommit, see [CodeCommit Resources and Operations](https://docs.aws.amazon.com/codecommit/latest/userguide/auth-and-access-control-iam-access-control-identity-based.html#arn-formats)
in the* AWS CodeCommit User Guide*.
"""
def list_tags_for_resource(client, input, options \\ []) do
request(client, "ListTagsForResource", input, options)
end
@doc """
Merges two branches using the fast-forward merge strategy.
"""
def merge_branches_by_fast_forward(client, input, options \\ []) do
request(client, "MergeBranchesByFastForward", input, options)
end
@doc """
Merges two branches using the squash merge strategy.
"""
def merge_branches_by_squash(client, input, options \\ []) do
request(client, "MergeBranchesBySquash", input, options)
end
@doc """
Merges two specified branches using the three-way merge strategy.
"""
def merge_branches_by_three_way(client, input, options \\ []) do
request(client, "MergeBranchesByThreeWay", input, options)
end
@doc """
Attempts to merge the source commit of a pull request into the specified
destination branch for that pull request at the specified commit using the
fast-forward merge strategy.
If the merge is successful, it closes the pull request.
"""
def merge_pull_request_by_fast_forward(client, input, options \\ []) do
request(client, "MergePullRequestByFastForward", input, options)
end
@doc """
Attempts to merge the source commit of a pull request into the specified
destination branch for that pull request at the specified commit using the
squash merge strategy.
If the merge is successful, it closes the pull request.
"""
def merge_pull_request_by_squash(client, input, options \\ []) do
request(client, "MergePullRequestBySquash", input, options)
end
@doc """
Attempts to merge the source commit of a pull request into the specified
destination branch for that pull request at the specified commit using the
three-way merge strategy.
If the merge is successful, it closes the pull request.
"""
def merge_pull_request_by_three_way(client, input, options \\ []) do
request(client, "MergePullRequestByThreeWay", input, options)
end
@doc """
Sets aside (overrides) all approval rule requirements for a specified pull
request.
"""
def override_pull_request_approval_rules(client, input, options \\ []) do
request(client, "OverridePullRequestApprovalRules", input, options)
end
@doc """
Posts a comment on the comparison between two commits.
"""
def post_comment_for_compared_commit(client, input, options \\ []) do
request(client, "PostCommentForComparedCommit", input, options)
end
@doc """
Posts a comment on a pull request.
"""
def post_comment_for_pull_request(client, input, options \\ []) do
request(client, "PostCommentForPullRequest", input, options)
end
@doc """
Posts a comment in reply to an existing comment on a comparison between commits
or a pull request.
"""
def post_comment_reply(client, input, options \\ []) do
request(client, "PostCommentReply", input, options)
end
@doc """
Adds or updates a reaction to a specified comment for the user whose identity is
used to make the request.
You can only add or update a reaction for yourself. You cannot add, modify, or
delete a reaction for another user.
"""
def put_comment_reaction(client, input, options \\ []) do
request(client, "PutCommentReaction", input, options)
end
@doc """
Adds or updates a file in a branch in an AWS CodeCommit repository, and
generates a commit for the addition in the specified branch.
"""
def put_file(client, input, options \\ []) do
request(client, "PutFile", input, options)
end
@doc """
Replaces all triggers for a repository.
Used to create or delete triggers.
"""
def put_repository_triggers(client, input, options \\ []) do
request(client, "PutRepositoryTriggers", input, options)
end
@doc """
Adds or updates tags for a resource in AWS CodeCommit.
For a list of valid resources in AWS CodeCommit, see [CodeCommit Resources and Operations](https://docs.aws.amazon.com/codecommit/latest/userguide/auth-and-access-control-iam-access-control-identity-based.html#arn-formats)
in the *AWS CodeCommit User Guide*.
"""
def tag_resource(client, input, options \\ []) do
request(client, "TagResource", input, options)
end
@doc """
Tests the functionality of repository triggers by sending information to the
trigger target.
If real data is available in the repository, the test sends data from the last
commit. If no data is available, sample data is generated.
"""
def test_repository_triggers(client, input, options \\ []) do
request(client, "TestRepositoryTriggers", input, options)
end
@doc """
Removes tags for a resource in AWS CodeCommit.
For a list of valid resources in AWS CodeCommit, see [CodeCommit Resources and Operations](https://docs.aws.amazon.com/codecommit/latest/userguide/auth-and-access-control-iam-access-control-identity-based.html#arn-formats)
in the *AWS CodeCommit User Guide*.
"""
def untag_resource(client, input, options \\ []) do
request(client, "UntagResource", input, options)
end
@doc """
Updates the content of an approval rule template.
You can change the number of required approvals, the membership of the approval
rule, and whether an approval pool is defined.
"""
def update_approval_rule_template_content(client, input, options \\ []) do
request(client, "UpdateApprovalRuleTemplateContent", input, options)
end
@doc """
Updates the description for a specified approval rule template.
"""
def update_approval_rule_template_description(client, input, options \\ []) do
request(client, "UpdateApprovalRuleTemplateDescription", input, options)
end
@doc """
Updates the name of a specified approval rule template.
"""
def update_approval_rule_template_name(client, input, options \\ []) do
request(client, "UpdateApprovalRuleTemplateName", input, options)
end
@doc """
Replaces the contents of a comment.
"""
def update_comment(client, input, options \\ []) do
request(client, "UpdateComment", input, options)
end
@doc """
Sets or changes the default branch name for the specified repository.
If you use this operation to change the default branch name to the current
default branch name, a success message is returned even though the default
branch did not change.
"""
def update_default_branch(client, input, options \\ []) do
request(client, "UpdateDefaultBranch", input, options)
end
@doc """
Updates the structure of an approval rule created specifically for a pull
request.
For example, you can change the number of required approvers and the approval
pool for approvers.
"""
def update_pull_request_approval_rule_content(client, input, options \\ []) do
request(client, "UpdatePullRequestApprovalRuleContent", input, options)
end
@doc """
Updates the state of a user's approval on a pull request.
The user is derived from the signed-in account when the request is made.
"""
def update_pull_request_approval_state(client, input, options \\ []) do
request(client, "UpdatePullRequestApprovalState", input, options)
end
@doc """
Replaces the contents of the description of a pull request.
"""
def update_pull_request_description(client, input, options \\ []) do
request(client, "UpdatePullRequestDescription", input, options)
end
@doc """
Updates the status of a pull request.
"""
def update_pull_request_status(client, input, options \\ []) do
request(client, "UpdatePullRequestStatus", input, options)
end
@doc """
Replaces the title of a pull request.
"""
def update_pull_request_title(client, input, options \\ []) do
request(client, "UpdatePullRequestTitle", input, options)
end
@doc """
Sets or changes the comment or description for a repository.
The description field for a repository accepts all HTML characters and all valid
Unicode characters. Applications that do not HTML-encode the description and
display it in a webpage can expose users to potentially malicious code. Make
sure that you HTML-encode the description field in any application that uses
this API to display the repository description on a webpage.
"""
def update_repository_description(client, input, options \\ []) do
request(client, "UpdateRepositoryDescription", input, options)
end
@doc """
Renames a repository.
The repository name must be unique across the calling AWS account. Repository
names are limited to 100 alphanumeric, dash, and underscore characters, and
cannot include certain characters. The suffix .git is prohibited. For more
information about the limits on repository names, see
[Limits](https://docs.aws.amazon.com/codecommit/latest/userguide/limits.html) in
the AWS CodeCommit User Guide.
"""
def update_repository_name(client, input, options \\ []) do
request(client, "UpdateRepositoryName", input, options)
end
@spec request(AWS.Client.t(), binary(), map(), list()) ::
{:ok, map() | nil, map()}
| {:error, term()}
defp request(client, action, input, options) do
client = %{client | service: "codecommit"}
host = build_host("codecommit", client)
url = build_url(host, client)
headers = [
{"Host", host},
{"Content-Type", "application/x-amz-json-1.1"},
{"X-Amz-Target", "CodeCommit_20150413.#{action}"}
]
payload = encode!(client, input)
headers = AWS.Request.sign_v4(client, "POST", url, headers, payload)
post(client, url, payload, headers, options)
end
defp post(client, url, payload, headers, options) do
case AWS.Client.request(client, :post, url, payload, headers, options) do
{:ok, %{status_code: 200, body: body} = response} ->
body = if body != "", do: decode!(client, body)
{:ok, body, response}
{:ok, response} ->
{:error, {:unexpected_response, response}}
error = {:error, _reason} -> error
end
end
defp build_host(_endpoint_prefix, %{region: "local", endpoint: endpoint}) do
endpoint
end
defp build_host(_endpoint_prefix, %{region: "local"}) do
"localhost"
end
defp build_host(endpoint_prefix, %{region: region, endpoint: endpoint}) do
"#{endpoint_prefix}.#{region}.#{endpoint}"
end
defp build_url(host, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}/"
end
defp encode!(client, payload) do
AWS.Client.encode!(client, payload, :json)
end
defp decode!(client, payload) do
AWS.Client.decode!(client, payload, :json)
end
end
|
lib/aws/generated/code_commit.ex
| 0.883676
| 0.601886
|
code_commit.ex
|
starcoder
|
defmodule DataMiner.Apriori do
@moduledoc """
Documentation for `Apriori` Algorithm Implementation.
"""
@transactions_file Path.expand("../data/transactions_items.txt")
@frequencies_file Path.expand("../data/items_frequencies.txt")
@result_save_file Path.expand("../results/apriori_frequents.txt")
@doc """
Main function for run algorithm with minimum support.
This function will get minimum support as input.
This number is expressed as a percentage.
At the end of function result of `Apriori` algorithm will
save to a file.
"""
def main(min_supp) do
transactions = import_transactions()
frequencies = import_frequencies()
start = Time.utc_now()
apriori(frequencies, [], transactions, min_supp, length(transactions))
|> List.flatten()
|> export_frequents()
endt = Time.utc_now()
IO.inspect("total time: #{Time.diff(endt, start)}s")
end
@doc """
Export frequents will export all frequent itemsets to a file.
"""
def export_frequents(frequents) do
{:ok, file} = File.open(@result_save_file, [:write])
Enum.each(frequents, fn {itemset, frequency} ->
Enum.each(itemset, fn item ->
IO.write(file, "#{item} | ")
end)
IO.write(file, "#{frequency}\n")
end)
end
@doc """
Implementation of eclat algorithm, this function will return any frequent itemset.
## Examples
iex> DataMiner.Eclat.eclat([{[:a], MapSet.new([2])}, {[:b], MapSet.new([2])}], [], 0.1, 3)
[[{[:b, :a], #MapSet<[2]>}], [{[:a], #MapSet<[2]>}, {[:b], #MapSet<[2]>}]]
"""
def apriori([], frequents, _, _, _) do
IO.inspect("ends")
frequents
end
def apriori(frequencies, frequents, transactions, min_supp, transactions_length) do
IO.inspect("apriori!")
supported_itemsets = remove_low_frequencies(transactions_length, frequencies, min_supp)
new_frequencies =
supported_itemsets
|> merge_itemsets([])
|> calculate_itemsets_frequency(transactions)
apriori(
new_frequencies,
[supported_itemsets | frequents],
transactions,
min_supp,
transactions_length
)
end
@doc """
This function will calculate frequency of any itemset by see itemset frequency inside transactions.
## Examples
iex> DataMiner.Apriori.calculate_itemsets_frequency([["a", "b"], ["a", "c"]] |> Enum.map(&MapSet.new(&1)), [["a", "c", "d"], ["b", "c", "e"], ["a", "b", "c", "e"], ["b", "e"]] |> Enum.map(&MapSet.new(&1)))
[{["a", "b"], 1}, {["a", "c"], 2}]
"""
def calculate_itemsets_frequency(itemsets, transactions) do
IO.inspect("calculating #{length(itemsets)}")
start = Time.utc_now()
result =
itemsets
|> Flow.from_enumerable()
|> Flow.partition()
|> Flow.map(fn itemset ->
frequency =
transactions
|> Enum.reduce(0, fn transaction, acc ->
if MapSet.subset?(itemset, transaction) do
acc + 1
else
acc
end
end)
{MapSet.to_list(itemset), frequency}
end)
|> Enum.to_list()
endt = Time.utc_now()
IO.inspect("time of frequency calculating: #{Time.diff(endt, start)}s")
result
end
@doc """
This function will merge a list of itemsets to a list of sub itemsets.
So input is a list of itemsets and output is a list of merged itemsets.
"""
def merge_itemsets([], merged_itemsets), do: merged_itemsets |> List.flatten()
def merge_itemsets([{base_itemset, _} | tail], merged_list) do
merged =
tail
|> Flow.from_enumerable()
|> Flow.partition()
|> Flow.map(fn {itemset, _} ->
MapSet.new(merger(base_itemset, itemset))
end)
|> Flow.partition()
|> Flow.filter(fn itemset -> itemset != MapSet.new() end)
|> Enum.to_list()
merge_itemsets(tail, [merged | merged_list])
end
@doc """
merger will merge two itemsets.
## Examples
iex> DataMiner.Apriori.merger([1, 2, 3], [4, 2, 3])
[4, 1, 2, 3]
"""
def merger([base_item | tail_base_itemset], [item | tail_itemset]) do
if tail_base_itemset == tail_itemset do
[item | [base_item | tail_base_itemset]]
else
[]
end
end
@doc """
When itemsets merged succesfully, we should pipe them into `remove_low_frequencies`
that will remove all of itemsets that size of their transactions are lower that minimum support.
This is for downward closers!
"""
def remove_low_frequencies(transactions_length, frequencies, min_supp) do
frequencies
|> Enum.filter(fn {_item, frequency} ->
support(frequency, transactions_length) >= min_supp
end)
end
@doc """
support will calculate support of an itemset by its frequency
"""
def support(item_frequency, transactions_length) do
item_frequency / transactions_length * 100
end
@doc """
import frequencies file.
"""
def import_frequencies do
@frequencies_file
|> import_file()
|> Enum.reduce(%{}, fn [item, freq], acc ->
Map.put(acc, [item], String.to_integer(Atom.to_string(freq)))
end)
end
@doc """
import transactions file.
"""
def import_transactions do
@transactions_file
|> import_file()
|> Enum.map(fn transaction -> MapSet.new(transaction) end)
end
@doc """
import file.
"""
def import_file(file_address) do
File.stream!(file_address)
|> Stream.map(&String.trim/1)
|> Stream.map(fn line ->
String.split(line, "|")
|> Enum.filter(fn word -> word != "" end)
|> Enum.map(fn item -> String.to_atom(item) end)
end)
|> Stream.drop(1)
end
end
|
data_miner/lib/apriori.ex
| 0.842992
| 0.61503
|
apriori.ex
|
starcoder
|
defmodule Bitcoinex.Secp256k1.Point do
@moduledoc """
Contains the x, y, and z of an elliptic curve point.
"""
@type t :: %__MODULE__{
x: integer(),
y: integer(),
z: integer()
}
@enforce_keys [
:x,
:y
]
defstruct [:x, :y, z: 0]
defguard is_point(term)
when is_map(term) and :erlang.map_get(:__struct__, term) == __MODULE__ and
:erlang.is_map_key(:x, term) and :erlang.is_map_key(:y, term) and
:erlang.is_map_key(:z, term)
@doc """
parse_public_key parses a public key
"""
@spec parse_public_key(binary | String.t()) :: t()
def parse_public_key(<<0x04, x::binary-size(32), y::binary-size(32)>>) do
%__MODULE__{x: :binary.decode_unsigned(x), y: :binary.decode_unsigned(y)}
end
# Above matches with uncompressed keys. Below matches with compressed keys
def parse_public_key(<<prefix::binary-size(1), x_bytes::binary-size(32)>>) do
x = :binary.decode_unsigned(x_bytes)
case :binary.decode_unsigned(prefix) do
2 ->
{:ok, y} = Bitcoinex.Secp256k1.get_y(x, false)
%__MODULE__{x: x, y: y}
3 ->
{:ok, y} = Bitcoinex.Secp256k1.get_y(x, true)
%__MODULE__{x: x, y: y}
end
end
# Allow parse_public_key to parse SEC strings
def parse_public_key(key) do
key
|> String.downcase()
|> Base.decode16!(case: :lower)
|> parse_public_key()
end
@doc """
serialize_public_key serializes a compressed public key to binary
"""
@spec sec(t()) :: binary
def sec(%__MODULE__{x: x, y: y}) do
case rem(y, 2) do
0 ->
<<0x02>> <> Bitcoinex.Utils.pad(:binary.encode_unsigned(x), 32, :leading)
1 ->
<<0x03>> <> Bitcoinex.Utils.pad(:binary.encode_unsigned(x), 32, :leading)
end
end
@doc """
serialize_public_key serializes a compressed public key to string
"""
@spec serialize_public_key(t()) :: String.t()
def serialize_public_key(pubkey) do
pubkey
|> sec()
|> Base.encode16(case: :lower)
end
end
|
lib/secp256k1/point.ex
| 0.851135
| 0.584153
|
point.ex
|
starcoder
|
defmodule AWS.S3 do
@moduledoc """
<p/>
"""
@doc """
This operation aborts a multipart upload. After a multipart upload is
aborted, no additional parts can be uploaded using that upload ID. The
storage consumed by any previously uploaded parts will be freed. However,
if any part uploads are currently in progress, those part uploads might or
might not succeed. As a result, it might be necessary to abort a given
multipart upload multiple times in order to completely free all storage
consumed by all parts.
To verify that all parts have been removed, so you don't get charged for
the part storage, you should call the `ListParts` operation and ensure that
the parts list is empty.
For information about permissions required to use the multipart upload API,
see [Multipart Upload API and
Permissions](https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html).
The following operations are related to `AbortMultipartUpload`:
<ul> <li> `CreateMultipartUpload`
</li> <li> `UploadPart`
</li> <li> `CompleteMultipartUpload`
</li> <li> `ListParts`
</li> <li> `ListMultipartUploads`
</li> </ul>
"""
def abort_multipart_upload(client, bucket, key, input, options \\ []) do
path_ = "/#{URI.encode(bucket)}/#{AWS.Util.encode_uri(key, true)}"
{headers, input} =
[
{"RequestPayer", "x-amz-request-payer"},
]
|> AWS.Request.build_params(input)
{query_, input} =
[
{"UploadId", "uploadId"},
]
|> AWS.Request.build_params(input)
case request(client, :delete, path_, query_, headers, input, options, 204) do
{:ok, body, response} ->
body =
[
{"x-amz-request-charged", "RequestCharged"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Completes a multipart upload by assembling previously uploaded parts.
You first initiate the multipart upload and then upload all parts using the
`UploadPart` operation. After successfully uploading all relevant parts of
an upload, you call this operation to complete the upload. Upon receiving
this request, Amazon S3 concatenates all the parts in ascending order by
part number to create a new object. In the Complete Multipart Upload
request, you must provide the parts list. You must ensure that the parts
list is complete. This operation concatenates the parts that you provide in
the list. For each part in the list, you must provide the part number and
the `ETag` value, returned after that part was uploaded.
Processing of a Complete Multipart Upload request could take several
minutes to complete. After Amazon S3 begins processing the request, it
sends an HTTP response header that specifies a 200 OK response. While
processing is in progress, Amazon S3 periodically sends white space
characters to keep the connection from timing out. Because a request could
fail after the initial 200 OK response has been sent, it is important that
you check the response body to determine whether the request succeeded.
Note that if `CompleteMultipartUpload` fails, applications should be
prepared to retry the failed requests. For more information, see [Amazon S3
Error Best
Practices](https://docs.aws.amazon.com/AmazonS3/latest/dev/ErrorBestPractices.html).
For more information about multipart uploads, see [Uploading Objects Using
Multipart
Upload](https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html).
For information about permissions required to use the multipart upload API,
see [Multipart Upload API and
Permissions](https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html).
`GetBucketLifecycle` has the following special errors:
<ul> <li> Error code: `EntityTooSmall`
<ul> <li> Description: Your proposed upload is smaller than the minimum
allowed object size. Each part must be at least 5 MB in size, except the
last part.
</li> <li> 400 Bad Request
</li> </ul> </li> <li> Error code: `InvalidPart`
<ul> <li> Description: One or more of the specified parts could not be
found. The part might not have been uploaded, or the specified entity tag
might not have matched the part's entity tag.
</li> <li> 400 Bad Request
</li> </ul> </li> <li> Error code: `InvalidPartOrder`
<ul> <li> Description: The list of parts was not in ascending order. The
parts list must be specified in order by part number.
</li> <li> 400 Bad Request
</li> </ul> </li> <li> Error code: `NoSuchUpload`
<ul> <li> Description: The specified multipart upload does not exist. The
upload ID might be invalid, or the multipart upload might have been aborted
or completed.
</li> <li> 404 Not Found
</li> </ul> </li> </ul> The following operations are related to
`CompleteMultipartUpload`:
<ul> <li> `CreateMultipartUpload`
</li> <li> `UploadPart`
</li> <li> `AbortMultipartUpload`
</li> <li> `ListParts`
</li> <li> `ListMultipartUploads`
</li> </ul>
"""
def complete_multipart_upload(client, bucket, key, input, options \\ []) do
path_ = "/#{URI.encode(bucket)}/#{AWS.Util.encode_uri(key, true)}"
{headers, input} =
[
{"RequestPayer", "x-amz-request-payer"},
]
|> AWS.Request.build_params(input)
{query_, input} =
[
{"UploadId", "uploadId"},
]
|> AWS.Request.build_params(input)
case request(client, :post, path_, query_, headers, input, options, nil) do
{:ok, body, response} ->
body =
[
{"x-amz-expiration", "Expiration"},
{"x-amz-request-charged", "RequestCharged"},
{"x-amz-server-side-encryption-aws-kms-key-id", "SSEKMSKeyId"},
{"x-amz-server-side-encryption", "ServerSideEncryption"},
{"x-amz-version-id", "VersionId"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Creates a copy of an object that is already stored in Amazon S3.
<note> You can store individual objects of up to 5 TB in Amazon S3. You
create a copy of your object up to 5 GB in size in a single atomic
operation using this API. However, to copy an object greater than 5 GB, you
must use the multipart upload Upload Part - Copy API. For more information,
see [Copy Object Using the REST Multipart Upload
API](https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjctsUsingRESTMPUapi.html).
</note> All copy requests must be authenticated. Additionally, you must
have *read* access to the source object and *write* access to the
destination bucket. For more information, see [REST
Authentication](https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html).
Both the Region that you want to copy the object from and the Region that
you want to copy the object to must be enabled for your account.
A copy request might return an error when Amazon S3 receives the copy
request or while Amazon S3 is copying the files. If the error occurs before
the copy operation starts, you receive a standard Amazon S3 error. If the
error occurs during the copy operation, the error response is embedded in
the `200 OK` response. This means that a `200 OK` response can contain
either a success or an error. Design your application to parse the contents
of the response and handle it appropriately.
If the copy is successful, you receive a response with information about
the copied object.
<note> If the request is an HTTP 1.1 request, the response is chunk
encoded. If it were not, it would not contain the content-length, and you
would need to read the entire body.
</note> The copy request charge is based on the storage class and Region
that you specify for the destination object. For pricing information, see
[Amazon S3 pricing](https://aws.amazon.com/s3/pricing/).
<important> Amazon S3 transfer acceleration does not support cross-Region
copies. If you request a cross-Region copy using a transfer acceleration
endpoint, you get a 400 `Bad Request` error. For more information, see
[Transfer
Acceleration](https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html).
</important> **Metadata**
When copying an object, you can preserve all metadata (default) or specify
new metadata. However, the ACL is not preserved and is set to private for
the user making the request. To override the default ACL setting, specify a
new ACL when generating a copy request. For more information, see [Using
ACLs](https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html).
To specify whether you want the object metadata copied from the source
object or replaced with metadata provided in the request, you can
optionally add the `x-amz-metadata-directive` header. When you grant
permissions, you can use the `s3:x-amz-metadata-directive` condition key to
enforce certain metadata behavior when objects are uploaded. For more
information, see [Specifying Conditions in a
Policy](https://docs.aws.amazon.com/AmazonS3/latest/dev/amazon-s3-policy-keys.html)
in the *Amazon S3 Developer Guide*. For a complete list of Amazon
S3-specific condition keys, see [Actions, Resources, and Condition Keys for
Amazon
S3](https://docs.aws.amazon.com/AmazonS3/latest/dev/list_amazons3.html).
** `x-amz-copy-source-if` Headers**
To only copy an object under certain conditions, such as whether the `Etag`
matches or whether the object was modified before or after a specified
date, use the following request parameters:
<ul> <li> `x-amz-copy-source-if-match`
</li> <li> `x-amz-copy-source-if-none-match`
</li> <li> `x-amz-copy-source-if-unmodified-since`
</li> <li> `x-amz-copy-source-if-modified-since`
</li> </ul> If both the `x-amz-copy-source-if-match` and
`x-amz-copy-source-if-unmodified-since` headers are present in the request
and evaluate as follows, Amazon S3 returns `200 OK` and copies the data:
<ul> <li> `x-amz-copy-source-if-match` condition evaluates to true
</li> <li> `x-amz-copy-source-if-unmodified-since` condition evaluates to
false
</li> </ul> If both the `x-amz-copy-source-if-none-match` and
`x-amz-copy-source-if-modified-since` headers are present in the request
and evaluate as follows, Amazon S3 returns the `412 Precondition Failed`
response code:
<ul> <li> `x-amz-copy-source-if-none-match` condition evaluates to false
</li> <li> `x-amz-copy-source-if-modified-since` condition evaluates to
true
</li> </ul> <note> All headers with the `x-amz-` prefix, including
`x-amz-copy-source`, must be signed.
</note> **Encryption**
The source object that you are copying can be encrypted or unencrypted. The
source object can be encrypted with server-side encryption using AWS
managed encryption keys (SSE-S3 or SSE-KMS) or by using a customer-provided
encryption key. With server-side encryption, Amazon S3 encrypts your data
as it writes it to disks in its data centers and decrypts the data when you
access it.
You can optionally use the appropriate encryption-related headers to
request server-side encryption for the target object. You have the option
to provide your own encryption key or use SSE-S3 or SSE-KMS, regardless of
the form of server-side encryption that was used to encrypt the source
object. You can even request encryption if the source object was not
encrypted. For more information about server-side encryption, see [Using
Server-Side
Encryption](https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html).
**Access Control List (ACL)-Specific Request Headers**
When copying an object, you can optionally use headers to grant ACL-based
permissions. By default, all objects are private. Only the owner has full
access control. When adding a new object, you can grant permissions to
individual AWS accounts or to predefined groups defined by Amazon S3. These
permissions are then added to the ACL on the object. For more information,
see [Access Control List (ACL)
Overview](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html)
and [Managing ACLs Using the REST
API](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html).
**Storage Class Options**
You can use the `CopyObject` operation to change the storage class of an
object that is already stored in Amazon S3 using the `StorageClass`
parameter. For more information, see [Storage
Classes](https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html)
in the *Amazon S3 Service Developer Guide*.
**Versioning**
By default, `x-amz-copy-source` identifies the current version of an object
to copy. If the current version is a delete marker, Amazon S3 behaves as if
the object was deleted. To copy a different version, use the `versionId`
subresource.
If you enable versioning on the target bucket, Amazon S3 generates a unique
version ID for the object being copied. This version ID is different from
the version ID of the source object. Amazon S3 returns the version ID of
the copied object in the `x-amz-version-id` response header in the
response.
If you do not enable versioning or suspend it on the target bucket, the
version ID that Amazon S3 generates is always null.
If the source object's storage class is GLACIER, you must restore a copy of
this object before you can use it as a source object for the copy
operation. For more information, see .
The following operations are related to `CopyObject`:
<ul> <li> `PutObject`
</li> <li> `GetObject`
</li> </ul> For more information, see [Copying
Objects](https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjectsExamples.html).
"""
def copy_object(client, bucket, key, input, options \\ []) do
path_ = "/#{URI.encode(bucket)}/#{AWS.Util.encode_uri(key, true)}"
{headers, input} =
[
{"CopySourceIfModifiedSince", "x-amz-copy-source-if-modified-since"},
{"ContentLanguage", "Content-Language"},
{"Expires", "Expires"},
{"CopySourceSSECustomerKeyMD5", "x-amz-copy-source-server-side-encryption-customer-key-MD5"},
{"ObjectLockLegalHoldStatus", "x-amz-object-lock-legal-hold"},
{"CopySourceSSECustomerKey", "x-amz-copy-source-server-side-encryption-customer-key"},
{"SSECustomerKeyMD5", "x-amz-server-side-encryption-customer-key-MD5"},
{"ObjectLockRetainUntilDate", "x-amz-object-lock-retain-until-date"},
{"RequestPayer", "x-amz-request-payer"},
{"ContentDisposition", "Content-Disposition"},
{"ContentEncoding", "Content-Encoding"},
{"ServerSideEncryption", "x-amz-server-side-encryption"},
{"Tagging", "x-amz-tagging"},
{"StorageClass", "x-amz-storage-class"},
{"CopySourceIfUnmodifiedSince", "x-amz-copy-source-if-unmodified-since"},
{"ACL", "x-amz-acl"},
{"ContentType", "Content-Type"},
{"CopySource", "x-amz-copy-source"},
{"ObjectLockMode", "x-amz-object-lock-mode"},
{"SSECustomerAlgorithm", "x-amz-server-side-encryption-customer-algorithm"},
{"GrantWriteACP", "x-amz-grant-write-acp"},
{"GrantReadACP", "x-amz-grant-read-acp"},
{"GrantFullControl", "x-amz-grant-full-control"},
{"SSEKMSKeyId", "x-amz-server-side-encryption-aws-kms-key-id"},
{"CacheControl", "Cache-Control"},
{"TaggingDirective", "x-amz-tagging-directive"},
{"SSECustomerKey", "x-amz-server-side-encryption-customer-key"},
{"WebsiteRedirectLocation", "x-amz-website-redirect-location"},
{"GrantRead", "x-amz-grant-read"},
{"CopySourceSSECustomerAlgorithm", "x-amz-copy-source-server-side-encryption-customer-algorithm"},
{"CopySourceIfMatch", "x-amz-copy-source-if-match"},
{"MetadataDirective", "x-amz-metadata-directive"},
{"CopySourceIfNoneMatch", "x-amz-copy-source-if-none-match"},
{"SSEKMSEncryptionContext", "x-amz-server-side-encryption-context"},
]
|> AWS.Request.build_params(input)
query_ = []
case request(client, :put, path_, query_, headers, input, options, nil) do
{:ok, body, response} ->
body =
[
{"x-amz-copy-source-version-id", "CopySourceVersionId"},
{"x-amz-expiration", "Expiration"},
{"x-amz-request-charged", "RequestCharged"},
{"x-amz-server-side-encryption-customer-algorithm", "SSECustomerAlgorithm"},
{"x-amz-server-side-encryption-customer-key-MD5", "SSECustomerKeyMD5"},
{"x-amz-server-side-encryption-context", "SSEKMSEncryptionContext"},
{"x-amz-server-side-encryption-aws-kms-key-id", "SSEKMSKeyId"},
{"x-amz-server-side-encryption", "ServerSideEncryption"},
{"x-amz-version-id", "VersionId"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Creates a new bucket. To create a bucket, you must register with Amazon S3
and have a valid AWS Access Key ID to authenticate requests. Anonymous
requests are never allowed to create buckets. By creating the bucket, you
become the bucket owner.
Not every string is an acceptable bucket name. For information on bucket
naming restrictions, see [Working with Amazon S3
Buckets](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html).
By default, the bucket is created in the US East (N. Virginia) Region. You
can optionally specify a Region in the request body. You might choose a
Region to optimize latency, minimize costs, or address regulatory
requirements. For example, if you reside in Europe, you will probably find
it advantageous to create buckets in the Europe (Ireland) Region. For more
information, see [How to Select a Region for Your
Buckets](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro).
<note> If you send your create bucket request to the `s3.amazonaws.com`
endpoint, the request goes to the us-east-1 Region. Accordingly, the
signature calculations in Signature Version 4 must use us-east-1 as the
Region, even if the location constraint in the request specifies another
Region where the bucket is to be created. If you create a bucket in a
Region other than US East (N. Virginia), your application must be able to
handle 307 redirect. For more information, see [Virtual Hosting of
Buckets](https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html).
</note> When creating a bucket using this operation, you can optionally
specify the accounts or groups that should be granted specific permissions
on the bucket. There are two ways to grant the appropriate permissions
using the request headers.
<ul> <li> Specify a canned ACL using the `x-amz-acl` request header. Amazon
S3 supports a set of predefined ACLs, known as *canned ACLs*. Each canned
ACL has a predefined set of grantees and permissions. For more information,
see [Canned
ACL](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL).
</li> <li> Specify access permissions explicitly using the
`x-amz-grant-read`, `x-amz-grant-write`, `x-amz-grant-read-acp`,
`x-amz-grant-write-acp`, and `x-amz-grant-full-control` headers. These
headers map to the set of permissions Amazon S3 supports in an ACL. For
more information, see [Access Control List (ACL)
Overview](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html).
You specify each grantee as a type=value pair, where the type is one of the
following:
<ul> <li> `id` – if the value specified is the canonical user ID of an AWS
account
</li> <li> `uri` – if you are granting permissions to a predefined group
</li> <li> `emailAddress` – if the value specified is the email address of
an AWS account
<note> Using email addresses to specify a grantee is only supported in the
following AWS Regions:
<ul> <li> US East (N. Virginia)
</li> <li> US West (N. California)
</li> <li> US West (Oregon)
</li> <li> Asia Pacific (Singapore)
</li> <li> Asia Pacific (Sydney)
</li> <li> Asia Pacific (Tokyo)
</li> <li> Europe (Ireland)
</li> <li> South America (São Paulo)
</li> </ul> For a list of all the Amazon S3 supported Regions and
endpoints, see [Regions and
Endpoints](https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region)
in the AWS General Reference.
</note> </li> </ul> For example, the following `x-amz-grant-read` header
grants the AWS accounts identified by account IDs permissions to read
object data and its metadata:
`x-amz-grant-read: id="11112222333", id="444455556666" `
</li> </ul> <note> You can use either a canned ACL or specify access
permissions explicitly. You cannot do both.
</note> The following operations are related to `CreateBucket`:
<ul> <li> `PutObject`
</li> <li> `DeleteBucket`
</li> </ul>
"""
def create_bucket(client, bucket, input, options \\ []) do
path_ = "/#{URI.encode(bucket)}"
{headers, input} =
[
{"ACL", "x-amz-acl"},
{"GrantFullControl", "x-amz-grant-full-control"},
{"GrantRead", "x-amz-grant-read"},
{"GrantReadACP", "x-amz-grant-read-acp"},
{"GrantWrite", "x-amz-grant-write"},
{"GrantWriteACP", "x-amz-grant-write-acp"},
{"ObjectLockEnabledForBucket", "x-amz-bucket-object-lock-enabled"},
]
|> AWS.Request.build_params(input)
query_ = []
case request(client, :put, path_, query_, headers, input, options, nil) do
{:ok, body, response} ->
body =
[
{"Location", "Location"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
This operation initiates a multipart upload and returns an upload ID. This
upload ID is used to associate all of the parts in the specific multipart
upload. You specify this upload ID in each of your subsequent upload part
requests (see `UploadPart`). You also include this upload ID in the final
request to either complete or abort the multipart upload request.
For more information about multipart uploads, see [Multipart Upload
Overview](https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html).
If you have configured a lifecycle rule to abort incomplete multipart
uploads, the upload must complete within the number of days specified in
the bucket lifecycle configuration. Otherwise, the incomplete multipart
upload becomes eligible for an abort operation and Amazon S3 aborts the
multipart upload. For more information, see [Aborting Incomplete Multipart
Uploads Using a Bucket Lifecycle
Policy](https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config).
For information about the permissions required to use the multipart upload
API, see [Multipart Upload API and
Permissions](https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html).
For request signing, multipart upload is just a series of regular requests.
You initiate a multipart upload, send one or more requests to upload parts,
and then complete the multipart upload process. You sign each request
individually. There is nothing special about signing multipart upload
requests. For more information about signing, see [Authenticating Requests
(AWS Signature Version
4)](https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html).
<note> After you initiate a multipart upload and upload one or more parts,
to stop being charged for storing the uploaded parts, you must either
complete or abort the multipart upload. Amazon S3 frees up the space used
to store the parts and stop charging you for storing them only after you
either complete or abort a multipart upload.
</note> You can optionally request server-side encryption. For server-side
encryption, Amazon S3 encrypts your data as it writes it to disks in its
data centers and decrypts it when you access it. You can provide your own
encryption key, or use AWS Key Management Service (AWS KMS) customer master
keys (CMKs) or Amazon S3-managed encryption keys. If you choose to provide
your own encryption key, the request headers you provide in `UploadPart`)
and `UploadPartCopy`) requests must match the headers you used in the
request to initiate the upload by using `CreateMultipartUpload`.
To perform a multipart upload with encryption using an AWS KMS CMK, the
requester must have permission to the `kms:Encrypt`, `kms:Decrypt`,
`kms:ReEncrypt*`, `kms:GenerateDataKey*`, and `kms:DescribeKey` actions on
the key. These permissions are required because Amazon S3 must decrypt and
read data from the encrypted file parts before it completes the multipart
upload.
If your AWS Identity and Access Management (IAM) user or role is in the
same AWS account as the AWS KMS CMK, then you must have these permissions
on the key policy. If your IAM user or role belongs to a different account
than the key, then you must have the permissions on both the key policy and
your IAM user or role.
For more information, see [Protecting Data Using Server-Side
Encryption](https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html).
<dl> <dt>Access Permissions</dt> <dd> When copying an object, you can
optionally specify the accounts or groups that should be granted specific
permissions on the new object. There are two ways to grant the permissions
using the request headers:
<ul> <li> Specify a canned ACL with the `x-amz-acl` request header. For
more information, see [Canned
ACL](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL).
</li> <li> Specify access permissions explicitly with the
`x-amz-grant-read`, `x-amz-grant-read-acp`, `x-amz-grant-write-acp`, and
`x-amz-grant-full-control` headers. These parameters map to the set of
permissions that Amazon S3 supports in an ACL. For more information, see
[Access Control List (ACL)
Overview](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html).
</li> </ul> You can use either a canned ACL or specify access permissions
explicitly. You cannot do both.
</dd> <dt>Server-Side- Encryption-Specific Request Headers</dt> <dd> You
can optionally tell Amazon S3 to encrypt data at rest using server-side
encryption. Server-side encryption is for data encryption at rest. Amazon
S3 encrypts your data as it writes it to disks in its data centers and
decrypts it when you access it. The option you use depends on whether you
want to use AWS managed encryption keys or provide your own encryption key.
<ul> <li> Use encryption keys managed by Amazon S3 or customer master keys
(CMKs) stored in AWS Key Management Service (AWS KMS) – If you want AWS to
manage the keys used to encrypt data, specify the following headers in the
request.
<ul> <li> x-amz-server-side-encryption
</li> <li> x-amz-server-side-encryption-aws-kms-key-id
</li> <li> x-amz-server-side-encryption-context
</li> </ul> <note> If you specify `x-amz-server-side-encryption:aws:kms`,
but don't provide `x-amz-server-side-encryption-aws-kms-key-id`, Amazon S3
uses the AWS managed CMK in AWS KMS to protect the data.
</note> <important> All GET and PUT requests for an object protected by AWS
KMS fail if you don't make them with SSL or by using SigV4.
</important> For more information about server-side encryption with CMKs
stored in AWS KMS (SSE-KMS), see [Protecting Data Using Server-Side
Encryption with CMKs stored in AWS
KMS](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html).
</li> <li> Use customer-provided encryption keys – If you want to manage
your own encryption keys, provide all the following headers in the request.
<ul> <li> x-amz-server-side-encryption-customer-algorithm
</li> <li> x-amz-server-side-encryption-customer-key
</li> <li> x-amz-server-side-encryption-customer-key-MD5
</li> </ul> For more information about server-side encryption with CMKs
stored in AWS KMS (SSE-KMS), see [Protecting Data Using Server-Side
Encryption with CMKs stored in AWS
KMS](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html).
</li> </ul> </dd> <dt>Access-Control-List (ACL)-Specific Request
Headers</dt> <dd> You also can use the following access control–related
headers with this operation. By default, all objects are private. Only the
owner has full access control. When adding a new object, you can grant
permissions to individual AWS accounts or to predefined groups defined by
Amazon S3. These permissions are then added to the access control list
(ACL) on the object. For more information, see [Using
ACLs](https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html).
With this operation, you can grant access permissions using one of the
following two methods:
<ul> <li> Specify a canned ACL (`x-amz-acl`) — Amazon S3 supports a set of
predefined ACLs, known as *canned ACLs*. Each canned ACL has a predefined
set of grantees and permissions. For more information, see [Canned
ACL](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL).
</li> <li> Specify access permissions explicitly — To explicitly grant
access permissions to specific AWS accounts or groups, use the following
headers. Each header maps to specific permissions that Amazon S3 supports
in an ACL. For more information, see [Access Control List (ACL)
Overview](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html).
In the header, you specify a list of grantees who get the specific
permission. To grant permissions explicitly, use:
<ul> <li> x-amz-grant-read
</li> <li> x-amz-grant-write
</li> <li> x-amz-grant-read-acp
</li> <li> x-amz-grant-write-acp
</li> <li> x-amz-grant-full-control
</li> </ul> You specify each grantee as a type=value pair, where the type
is one of the following:
<ul> <li> `id` – if the value specified is the canonical user ID of an AWS
account
</li> <li> `uri` – if you are granting permissions to a predefined group
</li> <li> `emailAddress` – if the value specified is the email address of
an AWS account
<note> Using email addresses to specify a grantee is only supported in the
following AWS Regions:
<ul> <li> US East (N. Virginia)
</li> <li> US West (N. California)
</li> <li> US West (Oregon)
</li> <li> Asia Pacific (Singapore)
</li> <li> Asia Pacific (Sydney)
</li> <li> Asia Pacific (Tokyo)
</li> <li> Europe (Ireland)
</li> <li> South America (São Paulo)
</li> </ul> For a list of all the Amazon S3 supported Regions and
endpoints, see [Regions and
Endpoints](https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region)
in the AWS General Reference.
</note> </li> </ul> For example, the following `x-amz-grant-read` header
grants the AWS accounts identified by account IDs permissions to read
object data and its metadata:
`x-amz-grant-read: id="11112222333", id="444455556666" `
</li> </ul> </dd> </dl> The following operations are related to
`CreateMultipartUpload`:
<ul> <li> `UploadPart`
</li> <li> `CompleteMultipartUpload`
</li> <li> `AbortMultipartUpload`
</li> <li> `ListParts`
</li> <li> `ListMultipartUploads`
</li> </ul>
"""
def create_multipart_upload(client, bucket, key, input, options \\ []) do
path_ = "/#{URI.encode(bucket)}/#{AWS.Util.encode_uri(key, true)}?uploads"
{headers, input} =
[
{"ACL", "x-amz-acl"},
{"CacheControl", "Cache-Control"},
{"ContentDisposition", "Content-Disposition"},
{"ContentEncoding", "Content-Encoding"},
{"ContentLanguage", "Content-Language"},
{"ContentType", "Content-Type"},
{"Expires", "Expires"},
{"GrantFullControl", "x-amz-grant-full-control"},
{"GrantRead", "x-amz-grant-read"},
{"GrantReadACP", "x-amz-grant-read-acp"},
{"GrantWriteACP", "x-amz-grant-write-acp"},
{"ObjectLockLegalHoldStatus", "x-amz-object-lock-legal-hold"},
{"ObjectLockMode", "x-amz-object-lock-mode"},
{"ObjectLockRetainUntilDate", "x-amz-object-lock-retain-until-date"},
{"RequestPayer", "x-amz-request-payer"},
{"SSECustomerAlgorithm", "x-amz-server-side-encryption-customer-algorithm"},
{"SSECustomerKey", "x-amz-server-side-encryption-customer-key"},
{"SSECustomerKeyMD5", "x-amz-server-side-encryption-customer-key-MD5"},
{"SSEKMSEncryptionContext", "x-amz-server-side-encryption-context"},
{"SSEKMSKeyId", "x-amz-server-side-encryption-aws-kms-key-id"},
{"ServerSideEncryption", "x-amz-server-side-encryption"},
{"StorageClass", "x-amz-storage-class"},
{"Tagging", "x-amz-tagging"},
{"WebsiteRedirectLocation", "x-amz-website-redirect-location"},
]
|> AWS.Request.build_params(input)
query_ = []
case request(client, :post, path_, query_, headers, input, options, nil) do
{:ok, body, response} ->
body =
[
{"x-amz-abort-date", "AbortDate"},
{"x-amz-abort-rule-id", "AbortRuleId"},
{"x-amz-request-charged", "RequestCharged"},
{"x-amz-server-side-encryption-customer-algorithm", "SSECustomerAlgorithm"},
{"x-amz-server-side-encryption-customer-key-MD5", "SSECustomerKeyMD5"},
{"x-amz-server-side-encryption-context", "SSEKMSEncryptionContext"},
{"x-amz-server-side-encryption-aws-kms-key-id", "SSEKMSKeyId"},
{"x-amz-server-side-encryption", "ServerSideEncryption"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Deletes the bucket. All objects (including all object versions and delete
markers) in the bucket must be deleted before the bucket itself can be
deleted.
<p class="title"> **Related Resources**
<ul> <li>
</li> <li>
</li> </ul>
"""
def delete_bucket(client, bucket, input, options \\ []) do
path_ = "/#{URI.encode(bucket)}"
headers = []
query_ = []
request(client, :delete, path_, query_, headers, input, options, 204)
end
@doc """
Deletes an analytics configuration for the bucket (specified by the
analytics configuration ID).
To use this operation, you must have permissions to perform the
`s3:PutAnalyticsConfiguration` action. The bucket owner has this permission
by default. The bucket owner can grant this permission to others. For more
information about permissions, see [Permissions Related to Bucket
Subresource
Operations](https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
and [Managing Access Permissions to Your Amazon S3
Resources](https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html).
For information about the Amazon S3 analytics feature, see [Amazon S3
Analytics – Storage Class
Analysis](https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html).
The following operations are related to
`DeleteBucketAnalyticsConfiguration`:
<ul> <li>
</li> <li>
</li> <li>
</li> </ul>
"""
def delete_bucket_analytics_configuration(client, bucket, input, options \\ []) do
path_ = "/#{URI.encode(bucket)}?analytics"
headers = []
{query_, input} =
[
{"Id", "id"},
]
|> AWS.Request.build_params(input)
request(client, :delete, path_, query_, headers, input, options, 204)
end
@doc """
Deletes the `cors` configuration information set for the bucket.
To use this operation, you must have permission to perform the
`s3:PutBucketCORS` action. The bucket owner has this permission by default
and can grant this permission to others.
For information about `cors`, see [Enabling Cross-Origin Resource
Sharing](https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the
*Amazon Simple Storage Service Developer Guide*.
<p class="title"> **Related Resources:**
<ul> <li>
</li> <li> `RESTOPTIONSobject`
</li> </ul>
"""
def delete_bucket_cors(client, bucket, input, options \\ []) do
path_ = "/#{URI.encode(bucket)}?cors"
headers = []
query_ = []
request(client, :delete, path_, query_, headers, input, options, 204)
end
@doc """
This implementation of the DELETE operation removes default encryption from
the bucket. For information about the Amazon S3 default encryption feature,
see [Amazon S3 Default Bucket
Encryption](https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html)
in the *Amazon Simple Storage Service Developer Guide*.
To use this operation, you must have permissions to perform the
`s3:PutEncryptionConfiguration` action. The bucket owner has this
permission by default. The bucket owner can grant this permission to
others. For more information about permissions, see [Permissions Related to
Bucket Subresource
Operations](https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
and [Managing Access Permissions to your Amazon S3
Resources](https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html)
in the *Amazon Simple Storage Service Developer Guide*.
<p class="title"> **Related Resources**
<ul> <li> `PutBucketEncryption`
</li> <li> `GetBucketEncryption`
</li> </ul>
"""
def delete_bucket_encryption(client, bucket, input, options \\ []) do
path_ = "/#{URI.encode(bucket)}?encryption"
headers = []
query_ = []
request(client, :delete, path_, query_, headers, input, options, 204)
end
@doc """
Deletes an inventory configuration (identified by the inventory ID) from
the bucket.
To use this operation, you must have permissions to perform the
`s3:PutInventoryConfiguration` action. The bucket owner has this permission
by default. The bucket owner can grant this permission to others. For more
information about permissions, see [Permissions Related to Bucket
Subresource
Operations](https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
and [Managing Access Permissions to Your Amazon S3
Resources](https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html).
For information about the Amazon S3 inventory feature, see [Amazon S3
Inventory](https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html).
Operations related to `DeleteBucketInventoryConfiguration` include:
<ul> <li> `GetBucketInventoryConfiguration`
</li> <li> `PutBucketInventoryConfiguration`
</li> <li> `ListBucketInventoryConfigurations`
</li> </ul>
"""
def delete_bucket_inventory_configuration(client, bucket, input, options \\ []) do
path_ = "/#{URI.encode(bucket)}?inventory"
headers = []
{query_, input} =
[
{"Id", "id"},
]
|> AWS.Request.build_params(input)
request(client, :delete, path_, query_, headers, input, options, 204)
end
@doc """
Deletes the lifecycle configuration from the specified bucket. Amazon S3
removes all the lifecycle configuration rules in the lifecycle subresource
associated with the bucket. Your objects never expire, and Amazon S3 no
longer automatically deletes any objects on the basis of rules contained in
the deleted lifecycle configuration.
To use this operation, you must have permission to perform the
`s3:PutLifecycleConfiguration` action. By default, the bucket owner has
this permission and the bucket owner can grant this permission to others.
There is usually some time lag before lifecycle configuration deletion is
fully propagated to all the Amazon S3 systems.
For more information about the object expiration, see [Elements to Describe
Lifecycle
Actions](https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#intro-lifecycle-rules-actions).
Related actions include:
<ul> <li> `PutBucketLifecycleConfiguration`
</li> <li> `GetBucketLifecycleConfiguration`
</li> </ul>
"""
def delete_bucket_lifecycle(client, bucket, input, options \\ []) do
path_ = "/#{URI.encode(bucket)}?lifecycle"
headers = []
query_ = []
request(client, :delete, path_, query_, headers, input, options, 204)
end
@doc """
Deletes a metrics configuration for the Amazon CloudWatch request metrics
(specified by the metrics configuration ID) from the bucket. Note that this
doesn't include the daily storage metrics.
To use this operation, you must have permissions to perform the
`s3:PutMetricsConfiguration` action. The bucket owner has this permission
by default. The bucket owner can grant this permission to others. For more
information about permissions, see [Permissions Related to Bucket
Subresource
Operations](https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
and [Managing Access Permissions to Your Amazon S3
Resources](https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html).
For information about CloudWatch request metrics for Amazon S3, see
[Monitoring Metrics with Amazon
CloudWatch](https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html).
The following operations are related to `DeleteBucketMetricsConfiguration`:
<ul> <li> `GetBucketMetricsConfiguration`
</li> <li> `PutBucketMetricsConfiguration`
</li> <li> `ListBucketMetricsConfigurations`
</li> <li> [Monitoring Metrics with Amazon
CloudWatch](https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html)
</li> </ul>
"""
def delete_bucket_metrics_configuration(client, bucket, input, options \\ []) do
path_ = "/#{URI.encode(bucket)}?metrics"
headers = []
{query_, input} =
[
{"Id", "id"},
]
|> AWS.Request.build_params(input)
request(client, :delete, path_, query_, headers, input, options, 204)
end
@doc """
This implementation of the DELETE operation uses the policy subresource to
delete the policy of a specified bucket. If you are using an identity other
than the root user of the AWS account that owns the bucket, the calling
identity must have the `DeleteBucketPolicy` permissions on the specified
bucket and belong to the bucket owner's account to use this operation.
If you don't have `DeleteBucketPolicy` permissions, Amazon S3 returns a
`403 Access Denied` error. If you have the correct permissions, but you're
not using an identity that belongs to the bucket owner's account, Amazon S3
returns a `405 Method Not Allowed` error.
<important> As a security precaution, the root user of the AWS account that
owns a bucket can always use this operation, even if the policy explicitly
denies the root user the ability to perform this action.
</important> For more information about bucket policies, see [Using Bucket
Policies and UserPolicies](
https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html).
The following operations are related to `DeleteBucketPolicy`
<ul> <li> `CreateBucket`
</li> <li> `DeleteObject`
</li> </ul>
"""
def delete_bucket_policy(client, bucket, input, options \\ []) do
path_ = "/#{URI.encode(bucket)}?policy"
headers = []
query_ = []
request(client, :delete, path_, query_, headers, input, options, 204)
end
@doc """
Deletes the replication configuration from the bucket.
To use this operation, you must have permissions to perform the
`s3:PutReplicationConfiguration` action. The bucket owner has these
permissions by default and can grant it to others. For more information
about permissions, see [Permissions Related to Bucket Subresource
Operations](https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
and [Managing Access Permissions to Your Amazon S3
Resources](https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html).
<note> It can take a while for the deletion of a replication configuration
to fully propagate.
</note> For information about replication configuration, see [Replication](
https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) in the
*Amazon S3 Developer Guide*.
The following operations are related to `DeleteBucketReplication`:
<ul> <li> `PutBucketReplication`
</li> <li> `GetBucketReplication`
</li> </ul>
"""
def delete_bucket_replication(client, bucket, input, options \\ []) do
path_ = "/#{URI.encode(bucket)}?replication"
headers = []
query_ = []
request(client, :delete, path_, query_, headers, input, options, 204)
end
@doc """
Deletes the tags from the bucket.
To use this operation, you must have permission to perform the
`s3:PutBucketTagging` action. By default, the bucket owner has this
permission and can grant this permission to others.
The following operations are related to `DeleteBucketTagging`:
<ul> <li> `GetBucketTagging`
</li> <li> `PutBucketTagging`
</li> </ul>
"""
def delete_bucket_tagging(client, bucket, input, options \\ []) do
path_ = "/#{URI.encode(bucket)}?tagging"
headers = []
query_ = []
request(client, :delete, path_, query_, headers, input, options, 204)
end
@doc """
This operation removes the website configuration for a bucket. Amazon S3
returns a `200 OK` response upon successfully deleting a website
configuration on the specified bucket. You will get a `200 OK` response if
the website configuration you are trying to delete does not exist on the
bucket. Amazon S3 returns a `404` response if the bucket specified in the
request does not exist.
This DELETE operation requires the `S3:DeleteBucketWebsite` permission. By
default, only the bucket owner can delete the website configuration
attached to a bucket. However, bucket owners can grant other users
permission to delete the website configuration by writing a bucket policy
granting them the `S3:DeleteBucketWebsite` permission.
For more information about hosting websites, see [Hosting Websites on
Amazon
S3](https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html).
The following operations are related to `DeleteBucketWebsite`:
<ul> <li> `GetBucketWebsite`
</li> <li> `PutBucketWebsite`
</li> </ul>
"""
def delete_bucket_website(client, bucket, input, options \\ []) do
path_ = "/#{URI.encode(bucket)}?website"
headers = []
query_ = []
request(client, :delete, path_, query_, headers, input, options, 204)
end
@doc """
Removes the null version (if there is one) of an object and inserts a
delete marker, which becomes the latest version of the object. If there
isn't a null version, Amazon S3 does not remove any objects.
To remove a specific version, you must be the bucket owner and you must use
the version Id subresource. Using this subresource permanently deletes the
version. If the object deleted is a delete marker, Amazon S3 sets the
response header, `x-amz-delete-marker`, to true.
If the object you want to delete is in a bucket where the bucket versioning
configuration is MFA Delete enabled, you must include the `x-amz-mfa`
request header in the DELETE `versionId` request. Requests that include
`x-amz-mfa` must use HTTPS.
For more information about MFA Delete, see [Using MFA
Delete](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMFADelete.html).
To see sample requests that use versioning, see [Sample
Request](https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html#ExampleVersionObjectDelete).
You can delete objects by explicitly calling the DELETE Object API or
configure its lifecycle (`PutBucketLifecycle`) to enable Amazon S3 to
remove them for you. If you want to block users or accounts from removing
or deleting objects from your bucket, you must deny them the
`s3:DeleteObject`, `s3:DeleteObjectVersion`, and
`s3:PutLifeCycleConfiguration` actions.
The following operation is related to `DeleteObject`:
<ul> <li> `PutObject`
</li> </ul>
"""
def delete_object(client, bucket, key, input, options \\ []) do
path_ = "/#{URI.encode(bucket)}/#{AWS.Util.encode_uri(key, true)}"
{headers, input} =
[
{"BypassGovernanceRetention", "x-amz-bypass-governance-retention"},
{"MFA", "x-amz-mfa"},
{"RequestPayer", "x-amz-request-payer"},
]
|> AWS.Request.build_params(input)
{query_, input} =
[
{"VersionId", "versionId"},
]
|> AWS.Request.build_params(input)
case request(client, :delete, path_, query_, headers, input, options, 204) do
{:ok, body, response} ->
body =
[
{"x-amz-delete-marker", "DeleteMarker"},
{"x-amz-request-charged", "RequestCharged"},
{"x-amz-version-id", "VersionId"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Removes the entire tag set from the specified object. For more information
about managing object tags, see [ Object
Tagging](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html).
To use this operation, you must have permission to perform the
`s3:DeleteObjectTagging` action.
To delete tags of a specific object version, add the `versionId` query
parameter in the request. You will need permission for the
`s3:DeleteObjectVersionTagging` action.
The following operations are related to `DeleteBucketMetricsConfiguration`:
<ul> <li> `PutObjectTagging`
</li> <li> `GetObjectTagging`
</li> </ul>
"""
def delete_object_tagging(client, bucket, key, input, options \\ []) do
path_ = "/#{URI.encode(bucket)}/#{AWS.Util.encode_uri(key, true)}?tagging"
headers = []
{query_, input} =
[
{"VersionId", "versionId"},
]
|> AWS.Request.build_params(input)
case request(client, :delete, path_, query_, headers, input, options, 204) do
{:ok, body, response} ->
body =
[
{"x-amz-version-id", "VersionId"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
This operation enables you to delete multiple objects from a bucket using a
single HTTP request. If you know the object keys that you want to delete,
then this operation provides a suitable alternative to sending individual
delete requests, reducing per-request overhead.
The request contains a list of up to 1000 keys that you want to delete. In
the XML, you provide the object key names, and optionally, version IDs if
you want to delete a specific version of the object from a
versioning-enabled bucket. For each key, Amazon S3 performs a delete
operation and returns the result of that delete, success, or failure, in
the response. Note that if the object specified in the request is not
found, Amazon S3 returns the result as deleted.
The operation supports two modes for the response: verbose and quiet. By
default, the operation uses verbose mode in which the response includes the
result of deletion of each key in your request. In quiet mode the response
includes only keys where the delete operation encountered an error. For a
successful deletion, the operation does not return any information about
the delete in the response body.
When performing this operation on an MFA Delete enabled bucket, that
attempts to delete any versioned objects, you must include an MFA token. If
you do not provide one, the entire request will fail, even if there are
non-versioned objects you are trying to delete. If you provide an invalid
token, whether there are versioned keys in the request or not, the entire
Multi-Object Delete request will fail. For information about MFA Delete,
see [ MFA
Delete](https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html#MultiFactorAuthenticationDelete).
Finally, the Content-MD5 header is required for all Multi-Object Delete
requests. Amazon S3 uses the header value to ensure that your request body
has not been altered in transit.
The following operations are related to `DeleteObjects`:
<ul> <li> `CreateMultipartUpload`
</li> <li> `UploadPart`
</li> <li> `CompleteMultipartUpload`
</li> <li> `ListParts`
</li> <li> `AbortMultipartUpload`
</li> </ul>
"""
def delete_objects(client, bucket, input, options \\ []) do
path_ = "/#{URI.encode(bucket)}?delete"
{headers, input} =
[
{"BypassGovernanceRetention", "x-amz-bypass-governance-retention"},
{"MFA", "x-amz-mfa"},
{"RequestPayer", "x-amz-request-payer"},
]
|> AWS.Request.build_params(input)
query_ = []
case request(client, :post, path_, query_, headers, input, options, nil) do
{:ok, body, response} ->
body =
[
{"x-amz-request-charged", "RequestCharged"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Removes the `PublicAccessBlock` configuration for an Amazon S3 bucket. To
use this operation, you must have the `s3:PutBucketPublicAccessBlock`
permission. For more information about permissions, see [Permissions
Related to Bucket Subresource
Operations](https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
and [Managing Access Permissions to Your Amazon S3
Resources](https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html).
The following operations are related to `DeletePublicAccessBlock`:
<ul> <li> [Using Amazon S3 Block Public
Access](https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html)
</li> <li> `GetPublicAccessBlock`
</li> <li> `PutPublicAccessBlock`
</li> <li> `GetBucketPolicyStatus`
</li> </ul>
"""
def delete_public_access_block(client, bucket, input, options \\ []) do
path_ = "/#{URI.encode(bucket)}?publicAccessBlock"
headers = []
query_ = []
request(client, :delete, path_, query_, headers, input, options, 204)
end
@doc """
This implementation of the GET operation uses the `accelerate` subresource
to return the Transfer Acceleration state of a bucket, which is either
`Enabled` or `Suspended`. Amazon S3 Transfer Acceleration is a bucket-level
feature that enables you to perform faster data transfers to and from
Amazon S3.
To use this operation, you must have permission to perform the
`s3:GetAccelerateConfiguration` action. The bucket owner has this
permission by default. The bucket owner can grant this permission to
others. For more information about permissions, see [Permissions Related to
Bucket Subresource
Operations](https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
and [Managing Access Permissions to your Amazon S3
Resources](https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html)
in the *Amazon Simple Storage Service Developer Guide*.
You set the Transfer Acceleration state of an existing bucket to `Enabled`
or `Suspended` by using the `PutBucketAccelerateConfiguration` operation.
A GET `accelerate` request does not return a state value for a bucket that
has no transfer acceleration state. A bucket has no Transfer Acceleration
state if a state has never been set on the bucket.
For more information about transfer acceleration, see [Transfer
Acceleration](https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html)
in the Amazon Simple Storage Service Developer Guide.
<p class="title"> **Related Resources**
<ul> <li> `PutBucketAccelerateConfiguration`
</li> </ul>
"""
def get_bucket_accelerate_configuration(client, bucket, options \\ []) do
path_ = "/#{URI.encode(bucket)}?accelerate"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
This implementation of the `GET` operation uses the `acl` subresource to
return the access control list (ACL) of a bucket. To use `GET` to return
the ACL of the bucket, you must have `READ_ACP` access to the bucket. If
`READ_ACP` permission is granted to the anonymous user, you can return the
ACL of the bucket without using an authorization header.
<p class="title"> **Related Resources**
<ul> <li>
</li> </ul>
"""
def get_bucket_acl(client, bucket, options \\ []) do
path_ = "/#{URI.encode(bucket)}?acl"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
This implementation of the GET operation returns an analytics configuration
(identified by the analytics configuration ID) from the bucket.
To use this operation, you must have permissions to perform the
`s3:GetAnalyticsConfiguration` action. The bucket owner has this permission
by default. The bucket owner can grant this permission to others. For more
information about permissions, see [ Permissions Related to Bucket
Subresource
Operations](https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
and [Managing Access Permissions to Your Amazon S3
Resources](https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html)
in the *Amazon Simple Storage Service Developer Guide*.
For information about Amazon S3 analytics feature, see [Amazon S3 Analytics
– Storage Class
Analysis](https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html)
in the *Amazon Simple Storage Service Developer Guide*.
<p class="title"> **Related Resources**
<ul> <li>
</li> <li>
</li> <li>
</li> </ul>
"""
def get_bucket_analytics_configuration(client, bucket, id, options \\ []) do
path_ = "/#{URI.encode(bucket)}?analytics"
headers = []
query_ = []
query_ = if !is_nil(id) do
[{"id", id} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Returns the cors configuration information set for the bucket.
To use this operation, you must have permission to perform the
s3:GetBucketCORS action. By default, the bucket owner has this permission
and can grant it to others.
For more information about cors, see [ Enabling Cross-Origin Resource
Sharing](https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html).
The following operations are related to `GetBucketCors`:
<ul> <li> `PutBucketCors`
</li> <li> `DeleteBucketCors`
</li> </ul>
"""
def get_bucket_cors(client, bucket, options \\ []) do
path_ = "/#{URI.encode(bucket)}?cors"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Returns the default encryption configuration for an Amazon S3 bucket. For
information about the Amazon S3 default encryption feature, see [Amazon S3
Default Bucket
Encryption](https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html).
To use this operation, you must have permission to perform the
`s3:GetEncryptionConfiguration` action. The bucket owner has this
permission by default. The bucket owner can grant this permission to
others. For more information about permissions, see [Permissions Related to
Bucket Subresource
Operations](https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
and [Managing Access Permissions to Your Amazon S3
Resources](https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html).
The following operations are related to `GetBucketEncryption`:
<ul> <li> `PutBucketEncryption`
</li> <li> `DeleteBucketEncryption`
</li> </ul>
"""
def get_bucket_encryption(client, bucket, options \\ []) do
path_ = "/#{URI.encode(bucket)}?encryption"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Returns an inventory configuration (identified by the inventory
configuration ID) from the bucket.
To use this operation, you must have permissions to perform the
`s3:GetInventoryConfiguration` action. The bucket owner has this permission
by default and can grant this permission to others. For more information
about permissions, see [Permissions Related to Bucket Subresource
Operations](https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
and [Managing Access Permissions to Your Amazon S3
Resources](https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html).
For information about the Amazon S3 inventory feature, see [Amazon S3
Inventory](https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html).
The following operations are related to `GetBucketInventoryConfiguration`:
<ul> <li> `DeleteBucketInventoryConfiguration`
</li> <li> `ListBucketInventoryConfigurations`
</li> <li> `PutBucketInventoryConfiguration`
</li> </ul>
"""
def get_bucket_inventory_configuration(client, bucket, id, options \\ []) do
path_ = "/#{URI.encode(bucket)}?inventory"
headers = []
query_ = []
query_ = if !is_nil(id) do
[{"id", id} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
<important> For an updated version of this API, see
`GetBucketLifecycleConfiguration`. If you configured a bucket lifecycle
using the `filter` element, you should see the updated version of this
topic. This topic is provided for backward compatibility.
</important> Returns the lifecycle configuration information set on the
bucket. For information about lifecycle configuration, see [Object
Lifecycle
Management](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html).
To use this operation, you must have permission to perform the
`s3:GetLifecycleConfiguration` action. The bucket owner has this permission
by default. The bucket owner can grant this permission to others. For more
information about permissions, see [Permissions Related to Bucket
Subresource
Operations](https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
and [Managing Access Permissions to Your Amazon S3
Resources](https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html).
`GetBucketLifecycle` has the following special error:
<ul> <li> Error code: `NoSuchLifecycleConfiguration`
<ul> <li> Description: The lifecycle configuration does not exist.
</li> <li> HTTP Status Code: 404 Not Found
</li> <li> SOAP Fault Code Prefix: Client
</li> </ul> </li> </ul> The following operations are related to
`GetBucketLifecycle`:
<ul> <li> `GetBucketLifecycleConfiguration`
</li> <li> `PutBucketLifecycle`
</li> <li> `DeleteBucketLifecycle`
</li> </ul>
"""
def get_bucket_lifecycle(client, bucket, options \\ []) do
path_ = "/#{URI.encode(bucket)}?lifecycle"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
<note> Bucket lifecycle configuration now supports specifying a lifecycle
rule using an object key name prefix, one or more object tags, or a
combination of both. Accordingly, this section describes the latest API.
The response describes the new filter element that you can use to specify a
filter to select a subset of objects to which the rule applies. If you are
still using previous version of the lifecycle configuration, it works. For
the earlier API description, see `GetBucketLifecycle`.
</note> Returns the lifecycle configuration information set on the bucket.
For information about lifecycle configuration, see [Object Lifecycle
Management](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html).
To use this operation, you must have permission to perform the
`s3:GetLifecycleConfiguration` action. The bucket owner has this
permission, by default. The bucket owner can grant this permission to
others. For more information about permissions, see [Permissions Related to
Bucket Subresource
Operations](https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
and [Managing Access Permissions to Your Amazon S3
Resources](https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html).
`GetBucketLifecycleConfiguration` has the following special error:
<ul> <li> Error code: `NoSuchLifecycleConfiguration`
<ul> <li> Description: The lifecycle configuration does not exist.
</li> <li> HTTP Status Code: 404 Not Found
</li> <li> SOAP Fault Code Prefix: Client
</li> </ul> </li> </ul> The following operations are related to
`GetBucketLifecycleConfiguration`:
<ul> <li> `GetBucketLifecycle`
</li> <li> `PutBucketLifecycle`
</li> <li> `DeleteBucketLifecycle`
</li> </ul>
"""
def get_bucket_lifecycle_configuration(client, bucket, options \\ []) do
path_ = "/#{URI.encode(bucket)}?lifecycle"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Returns the Region the bucket resides in. You set the bucket's Region using
the `LocationConstraint` request parameter in a `CreateBucket` request. For
more information, see `CreateBucket`.
To use this implementation of the operation, you must be the bucket owner.
The following operations are related to `GetBucketLocation`:
<ul> <li> `GetObject`
</li> <li> `CreateBucket`
</li> </ul>
"""
def get_bucket_location(client, bucket, options \\ []) do
path_ = "/#{URI.encode(bucket)}?location"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Returns the logging status of a bucket and the permissions users have to
view and modify that status. To use GET, you must be the bucket owner.
The following operations are related to `GetBucketLogging`:
<ul> <li> `CreateBucket`
</li> <li> `PutBucketLogging`
</li> </ul>
"""
def get_bucket_logging(client, bucket, options \\ []) do
path_ = "/#{URI.encode(bucket)}?logging"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Gets a metrics configuration (specified by the metrics configuration ID)
from the bucket. Note that this doesn't include the daily storage metrics.
To use this operation, you must have permissions to perform the
`s3:GetMetricsConfiguration` action. The bucket owner has this permission
by default. The bucket owner can grant this permission to others. For more
information about permissions, see [Permissions Related to Bucket
Subresource
Operations](https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
and [Managing Access Permissions to Your Amazon S3
Resources](https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html).
For information about CloudWatch request metrics for Amazon S3, see
[Monitoring Metrics with Amazon
CloudWatch](https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html).
The following operations are related to `GetBucketMetricsConfiguration`:
<ul> <li> `PutBucketMetricsConfiguration`
</li> <li> `DeleteBucketMetricsConfiguration`
</li> <li> `ListBucketMetricsConfigurations`
</li> <li> [Monitoring Metrics with Amazon
CloudWatch](https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html)
</li> </ul>
"""
def get_bucket_metrics_configuration(client, bucket, id, options \\ []) do
path_ = "/#{URI.encode(bucket)}?metrics"
headers = []
query_ = []
query_ = if !is_nil(id) do
[{"id", id} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
No longer used, see `GetBucketNotificationConfiguration`.
"""
def get_bucket_notification(client, bucket, options \\ []) do
path_ = "/#{URI.encode(bucket)}?notification"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Returns the notification configuration of a bucket.
If notifications are not enabled on the bucket, the operation returns an
empty `NotificationConfiguration` element.
By default, you must be the bucket owner to read the notification
configuration of a bucket. However, the bucket owner can use a bucket
policy to grant permission to other users to read this configuration with
the `s3:GetBucketNotification` permission.
For more information about setting and reading the notification
configuration on a bucket, see [Setting Up Notification of Bucket
Events](https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html).
For more information about bucket policies, see [Using Bucket
Policies](https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html).
The following operation is related to `GetBucketNotification`:
<ul> <li> `PutBucketNotification`
</li> </ul>
"""
def get_bucket_notification_configuration(client, bucket, options \\ []) do
path_ = "/#{URI.encode(bucket)}?notification"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Returns the policy of a specified bucket. If you are using an identity
other than the root user of the AWS account that owns the bucket, the
calling identity must have the `GetBucketPolicy` permissions on the
specified bucket and belong to the bucket owner's account in order to use
this operation.
If you don't have `GetBucketPolicy` permissions, Amazon S3 returns a `403
Access Denied` error. If you have the correct permissions, but you're not
using an identity that belongs to the bucket owner's account, Amazon S3
returns a `405 Method Not Allowed` error.
<important> As a security precaution, the root user of the AWS account that
owns a bucket can always use this operation, even if the policy explicitly
denies the root user the ability to perform this action.
</important> For more information about bucket policies, see [Using Bucket
Policies and User
Policies](https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html).
The following operation is related to `GetBucketPolicy`:
<ul> <li> `GetObject`
</li> </ul>
"""
def get_bucket_policy(client, bucket, options \\ []) do
path_ = "/#{URI.encode(bucket)}?policy"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Retrieves the policy status for an Amazon S3 bucket, indicating whether the
bucket is public. In order to use this operation, you must have the
`s3:GetBucketPolicyStatus` permission. For more information about Amazon S3
permissions, see [Specifying Permissions in a
Policy](https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html).
For more information about when Amazon S3 considers a bucket public, see
[The Meaning of
"Public"](https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status).
The following operations are related to `GetBucketPolicyStatus`:
<ul> <li> [Using Amazon S3 Block Public
Access](https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html)
</li> <li> `GetPublicAccessBlock`
</li> <li> `PutPublicAccessBlock`
</li> <li> `DeletePublicAccessBlock`
</li> </ul>
"""
def get_bucket_policy_status(client, bucket, options \\ []) do
path_ = "/#{URI.encode(bucket)}?policyStatus"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Returns the replication configuration of a bucket.
<note> It can take a while to propagate the put or delete a replication
configuration to all Amazon S3 systems. Therefore, a get request soon after
put or delete can return a wrong result.
</note> For information about replication configuration, see
[Replication](https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html)
in the *Amazon Simple Storage Service Developer Guide*.
This operation requires permissions for the
`s3:GetReplicationConfiguration` action. For more information about
permissions, see [Using Bucket Policies and User
Policies](https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html).
If you include the `Filter` element in a replication configuration, you
must also include the `DeleteMarkerReplication` and `Priority` elements.
The response also returns those elements.
For information about `GetBucketReplication` errors, see
`ReplicationErrorCodeList`
The following operations are related to `GetBucketReplication`:
<ul> <li> `PutBucketReplication`
</li> <li> `DeleteBucketReplication`
</li> </ul>
"""
def get_bucket_replication(client, bucket, options \\ []) do
path_ = "/#{URI.encode(bucket)}?replication"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Returns the request payment configuration of a bucket. To use this version
of the operation, you must be the bucket owner. For more information, see
[Requester Pays
Buckets](https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html).
The following operations are related to `GetBucketRequestPayment`:
<ul> <li> `ListObjects`
</li> </ul>
"""
def get_bucket_request_payment(client, bucket, options \\ []) do
path_ = "/#{URI.encode(bucket)}?requestPayment"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Returns the tag set associated with the bucket.
To use this operation, you must have permission to perform the
`s3:GetBucketTagging` action. By default, the bucket owner has this
permission and can grant this permission to others.
`GetBucketTagging` has the following special error:
<ul> <li> Error code: `NoSuchTagSetError`
<ul> <li> Description: There is no tag set associated with the bucket.
</li> </ul> </li> </ul> The following operations are related to
`GetBucketTagging`:
<ul> <li> `PutBucketTagging`
</li> <li> `DeleteBucketTagging`
</li> </ul>
"""
def get_bucket_tagging(client, bucket, options \\ []) do
path_ = "/#{URI.encode(bucket)}?tagging"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Returns the versioning state of a bucket.
To retrieve the versioning state of a bucket, you must be the bucket owner.
This implementation also returns the MFA Delete status of the versioning
state. If the MFA Delete status is `enabled`, the bucket owner must use an
authentication device to change the versioning state of the bucket.
The following operations are related to `GetBucketVersioning`:
<ul> <li> `GetObject`
</li> <li> `PutObject`
</li> <li> `DeleteObject`
</li> </ul>
"""
def get_bucket_versioning(client, bucket, options \\ []) do
path_ = "/#{URI.encode(bucket)}?versioning"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Returns the website configuration for a bucket. To host website on Amazon
S3, you can configure a bucket as website by adding a website
configuration. For more information about hosting websites, see [Hosting
Websites on Amazon
S3](https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html).
This GET operation requires the `S3:GetBucketWebsite` permission. By
default, only the bucket owner can read the bucket website configuration.
However, bucket owners can allow other users to read the website
configuration by writing a bucket policy granting them the
`S3:GetBucketWebsite` permission.
The following operations are related to `DeleteBucketWebsite`:
<ul> <li> `DeleteBucketWebsite`
</li> <li> `PutBucketWebsite`
</li> </ul>
"""
def get_bucket_website(client, bucket, options \\ []) do
path_ = "/#{URI.encode(bucket)}?website"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Retrieves objects from Amazon S3. To use `GET`, you must have `READ` access
to the object. If you grant `READ` access to the anonymous user, you can
return the object without using an authorization header.
An Amazon S3 bucket has no directory hierarchy such as you would find in a
typical computer file system. You can, however, create a logical hierarchy
by using object key names that imply a folder structure. For example,
instead of naming an object `sample.jpg`, you can name it
`photos/2006/February/sample.jpg`.
To get an object from such a logical hierarchy, specify the full key name
for the object in the `GET` operation. For a virtual hosted-style request
example, if you have the object `photos/2006/February/sample.jpg`, specify
the resource as `/photos/2006/February/sample.jpg`. For a path-style
request example, if you have the object `photos/2006/February/sample.jpg`
in the bucket named `examplebucket`, specify the resource as
`/examplebucket/photos/2006/February/sample.jpg`. For more information
about request types, see [HTTP Host Header Bucket
Specification](https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html#VirtualHostingSpecifyBucket).
To distribute large files to many people, you can save bandwidth costs by
using BitTorrent. For more information, see [Amazon S3
Torrent](https://docs.aws.amazon.com/AmazonS3/latest/dev/S3Torrent.html).
For more information about returning the ACL of an object, see
`GetObjectAcl`.
If the object you are retrieving is stored in the GLACIER or DEEP_ARCHIVE
storage classes, before you can retrieve the object you must first restore
a copy using . Otherwise, this operation returns an
`InvalidObjectStateError` error. For information about restoring archived
objects, see [Restoring Archived
Objects](https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html).
Encryption request headers, like `x-amz-server-side-encryption`, should not
be sent for GET requests if your object uses server-side encryption with
CMKs stored in AWS KMS (SSE-KMS) or server-side encryption with Amazon
S3–managed encryption keys (SSE-S3). If your object does use these types of
keys, you’ll get an HTTP 400 BadRequest error.
If you encrypt an object by using server-side encryption with
customer-provided encryption keys (SSE-C) when you store the object in
Amazon S3, then when you GET the object, you must use the following
headers:
<ul> <li> x-amz-server-side-encryption-customer-algorithm
</li> <li> x-amz-server-side-encryption-customer-key
</li> <li> x-amz-server-side-encryption-customer-key-MD5
</li> </ul> For more information about SSE-C, see [Server-Side Encryption
(Using Customer-Provided Encryption
Keys)](https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html).
Assuming you have permission to read object tags (permission for the
`s3:GetObjectVersionTagging` action), the response also returns the
`x-amz-tagging-count` header that provides the count of number of tags
associated with the object. You can use `GetObjectTagging` to retrieve the
tag set associated with an object.
**Permissions**
You need the `s3:GetObject` permission for this operation. For more
information, see [Specifying Permissions in a
Policy](https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html).
If the object you request does not exist, the error Amazon S3 returns
depends on whether you also have the `s3:ListBucket` permission.
<ul> <li> If you have the `s3:ListBucket` permission on the bucket, Amazon
S3 will return an HTTP status code 404 ("no such key") error.
</li> <li> If you don’t have the `s3:ListBucket` permission, Amazon S3 will
return an HTTP status code 403 ("access denied") error.
</li> </ul> **Versioning**
By default, the GET operation returns the current version of an object. To
return a different version, use the `versionId` subresource.
<note> If the current version of the object is a delete marker, Amazon S3
behaves as if the object was deleted and includes `x-amz-delete-marker:
true` in the response.
</note> For more information about versioning, see `PutBucketVersioning`.
**Overriding Response Header Values**
There are times when you want to override certain response header values in
a GET response. For example, you might override the Content-Disposition
response header value in your GET request.
You can override values for a set of response headers using the following
query parameters. These response header values are sent only on a
successful request, that is, when status code 200 OK is returned. The set
of headers you can override using these parameters is a subset of the
headers that Amazon S3 accepts when you create an object. The response
headers that you can override for the GET response are `Content-Type`,
`Content-Language`, `Expires`, `Cache-Control`, `Content-Disposition`, and
`Content-Encoding`. To override these header values in the GET response,
you use the following request parameters.
<note> You must sign the request, either using an Authorization header or a
presigned URL, when using these parameters. They cannot be used with an
unsigned (anonymous) request.
</note> <ul> <li> `response-content-type`
</li> <li> `response-content-language`
</li> <li> `response-expires`
</li> <li> `response-cache-control`
</li> <li> `response-content-disposition`
</li> <li> `response-content-encoding`
</li> </ul> **Additional Considerations about Request Headers**
If both of the `If-Match` and `If-Unmodified-Since` headers are present in
the request as follows: `If-Match` condition evaluates to `true`, and;
`If-Unmodified-Since` condition evaluates to `false`; then, S3 returns 200
OK and the data requested.
If both of the `If-None-Match` and `If-Modified-Since` headers are present
in the request as follows:` If-None-Match` condition evaluates to `false`,
and; `If-Modified-Since` condition evaluates to `true`; then, S3 returns
304 Not Modified response code.
For more information about conditional requests, see [RFC
7232](https://tools.ietf.org/html/rfc7232).
The following operations are related to `GetObject`:
<ul> <li> `ListBuckets`
</li> <li> `GetObjectAcl`
</li> </ul>
"""
def get_object(client, bucket, key, part_number \\ nil, response_cache_control \\ nil, response_content_disposition \\ nil, response_content_encoding \\ nil, response_content_language \\ nil, response_content_type \\ nil, response_expires \\ nil, version_id \\ nil, if_match \\ nil, if_modified_since \\ nil, if_none_match \\ nil, if_unmodified_since \\ nil, range \\ nil, request_payer \\ nil, s_s_e_customer_algorithm \\ nil, s_s_e_customer_key \\ nil, s_s_e_customer_key_m_d5 \\ nil, options \\ []) do
path_ = "/#{URI.encode(bucket)}/#{AWS.Util.encode_uri(key, true)}"
headers = []
headers = if !is_nil(if_match) do
[{"If-Match", if_match} | headers]
else
headers
end
headers = if !is_nil(if_modified_since) do
[{"If-Modified-Since", if_modified_since} | headers]
else
headers
end
headers = if !is_nil(if_none_match) do
[{"If-None-Match", if_none_match} | headers]
else
headers
end
headers = if !is_nil(if_unmodified_since) do
[{"If-Unmodified-Since", if_unmodified_since} | headers]
else
headers
end
headers = if !is_nil(range) do
[{"Range", range} | headers]
else
headers
end
headers = if !is_nil(request_payer) do
[{"x-amz-request-payer", request_payer} | headers]
else
headers
end
headers = if !is_nil(s_s_e_customer_algorithm) do
[{"x-amz-server-side-encryption-customer-algorithm", s_s_e_customer_algorithm} | headers]
else
headers
end
headers = if !is_nil(s_s_e_customer_key) do
[{"x-amz-server-side-encryption-customer-key", s_s_e_customer_key} | headers]
else
headers
end
headers = if !is_nil(s_s_e_customer_key_m_d5) do
[{"x-amz-server-side-encryption-customer-key-MD5", s_s_e_customer_key_m_d5} | headers]
else
headers
end
query_ = []
query_ = if !is_nil(version_id) do
[{"versionId", version_id} | query_]
else
query_
end
query_ = if !is_nil(response_expires) do
[{"response-expires", response_expires} | query_]
else
query_
end
query_ = if !is_nil(response_content_type) do
[{"response-content-type", response_content_type} | query_]
else
query_
end
query_ = if !is_nil(response_content_language) do
[{"response-content-language", response_content_language} | query_]
else
query_
end
query_ = if !is_nil(response_content_encoding) do
[{"response-content-encoding", response_content_encoding} | query_]
else
query_
end
query_ = if !is_nil(response_content_disposition) do
[{"response-content-disposition", response_content_disposition} | query_]
else
query_
end
query_ = if !is_nil(response_cache_control) do
[{"response-cache-control", response_cache_control} | query_]
else
query_
end
query_ = if !is_nil(part_number) do
[{"partNumber", part_number} | query_]
else
query_
end
case request(client, :get, path_, query_, headers, nil, options, nil) do
{:ok, body, response} ->
body =
[
{"accept-ranges", "AcceptRanges"},
{"Cache-Control", "CacheControl"},
{"Content-Disposition", "ContentDisposition"},
{"Content-Encoding", "ContentEncoding"},
{"Content-Language", "ContentLanguage"},
{"Content-Length", "ContentLength"},
{"Content-Range", "ContentRange"},
{"Content-Type", "ContentType"},
{"x-amz-delete-marker", "DeleteMarker"},
{"ETag", "ETag"},
{"x-amz-expiration", "Expiration"},
{"Expires", "Expires"},
{"Last-Modified", "LastModified"},
{"x-amz-missing-meta", "MissingMeta"},
{"x-amz-object-lock-legal-hold", "ObjectLockLegalHoldStatus"},
{"x-amz-object-lock-mode", "ObjectLockMode"},
{"x-amz-object-lock-retain-until-date", "ObjectLockRetainUntilDate"},
{"x-amz-mp-parts-count", "PartsCount"},
{"x-amz-replication-status", "ReplicationStatus"},
{"x-amz-request-charged", "RequestCharged"},
{"x-amz-restore", "Restore"},
{"x-amz-server-side-encryption-customer-algorithm", "SSECustomerAlgorithm"},
{"x-amz-server-side-encryption-customer-key-MD5", "SSECustomerKeyMD5"},
{"x-amz-server-side-encryption-aws-kms-key-id", "SSEKMSKeyId"},
{"x-amz-server-side-encryption", "ServerSideEncryption"},
{"x-amz-storage-class", "StorageClass"},
{"x-amz-tagging-count", "TagCount"},
{"x-amz-version-id", "VersionId"},
{"x-amz-website-redirect-location", "WebsiteRedirectLocation"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Returns the access control list (ACL) of an object. To use this operation,
you must have READ_ACP access to the object.
**Versioning**
By default, GET returns ACL information about the current version of an
object. To return ACL information about a different version, use the
versionId subresource.
The following operations are related to `GetObjectAcl`:
<ul> <li> `GetObject`
</li> <li> `DeleteObject`
</li> <li> `PutObject`
</li> </ul>
"""
def get_object_acl(client, bucket, key, version_id \\ nil, request_payer \\ nil, options \\ []) do
path_ = "/#{URI.encode(bucket)}/#{AWS.Util.encode_uri(key, true)}?acl"
headers = []
headers = if !is_nil(request_payer) do
[{"x-amz-request-payer", request_payer} | headers]
else
headers
end
query_ = []
query_ = if !is_nil(version_id) do
[{"versionId", version_id} | query_]
else
query_
end
case request(client, :get, path_, query_, headers, nil, options, nil) do
{:ok, body, response} ->
body =
[
{"x-amz-request-charged", "RequestCharged"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Gets an object's current Legal Hold status. For more information, see
[Locking
Objects](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html).
"""
def get_object_legal_hold(client, bucket, key, version_id \\ nil, request_payer \\ nil, options \\ []) do
path_ = "/#{URI.encode(bucket)}/#{AWS.Util.encode_uri(key, true)}?legal-hold"
headers = []
headers = if !is_nil(request_payer) do
[{"x-amz-request-payer", request_payer} | headers]
else
headers
end
query_ = []
query_ = if !is_nil(version_id) do
[{"versionId", version_id} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Gets the Object Lock configuration for a bucket. The rule specified in the
Object Lock configuration will be applied by default to every new object
placed in the specified bucket. For more information, see [Locking
Objects](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html).
"""
def get_object_lock_configuration(client, bucket, options \\ []) do
path_ = "/#{URI.encode(bucket)}?object-lock"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Retrieves an object's retention settings. For more information, see
[Locking
Objects](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html).
"""
def get_object_retention(client, bucket, key, version_id \\ nil, request_payer \\ nil, options \\ []) do
path_ = "/#{URI.encode(bucket)}/#{AWS.Util.encode_uri(key, true)}?retention"
headers = []
headers = if !is_nil(request_payer) do
[{"x-amz-request-payer", request_payer} | headers]
else
headers
end
query_ = []
query_ = if !is_nil(version_id) do
[{"versionId", version_id} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Returns the tag-set of an object. You send the GET request against the
tagging subresource associated with the object.
To use this operation, you must have permission to perform the
`s3:GetObjectTagging` action. By default, the GET operation returns
information about current version of an object. For a versioned bucket, you
can have multiple versions of an object in your bucket. To retrieve tags of
any other version, use the versionId query parameter. You also need
permission for the `s3:GetObjectVersionTagging` action.
By default, the bucket owner has this permission and can grant this
permission to others.
For information about the Amazon S3 object tagging feature, see [Object
Tagging](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html).
The following operation is related to `GetObjectTagging`:
<ul> <li> `PutObjectTagging`
</li> </ul>
"""
def get_object_tagging(client, bucket, key, version_id \\ nil, options \\ []) do
path_ = "/#{URI.encode(bucket)}/#{AWS.Util.encode_uri(key, true)}?tagging"
headers = []
query_ = []
query_ = if !is_nil(version_id) do
[{"versionId", version_id} | query_]
else
query_
end
case request(client, :get, path_, query_, headers, nil, options, nil) do
{:ok, body, response} ->
body =
[
{"x-amz-version-id", "VersionId"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Return torrent files from a bucket. BitTorrent can save you bandwidth when
you're distributing large files. For more information about BitTorrent, see
[Amazon S3
Torrent](https://docs.aws.amazon.com/AmazonS3/latest/dev/S3Torrent.html).
<note> You can get torrent only for objects that are less than 5 GB in size
and that are not encrypted using server-side encryption with
customer-provided encryption key.
</note> To use GET, you must have READ access to the object.
The following operation is related to `GetObjectTorrent`:
<ul> <li> `GetObject`
</li> </ul>
"""
def get_object_torrent(client, bucket, key, request_payer \\ nil, options \\ []) do
path_ = "/#{URI.encode(bucket)}/#{AWS.Util.encode_uri(key, true)}?torrent"
headers = []
headers = if !is_nil(request_payer) do
[{"x-amz-request-payer", request_payer} | headers]
else
headers
end
query_ = []
case request(client, :get, path_, query_, headers, nil, options, nil) do
{:ok, body, response} ->
body =
[
{"x-amz-request-charged", "RequestCharged"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Retrieves the `PublicAccessBlock` configuration for an Amazon S3 bucket. To
use this operation, you must have the `s3:GetBucketPublicAccessBlock`
permission. For more information about Amazon S3 permissions, see
[Specifying Permissions in a
Policy](https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html).
<important> When Amazon S3 evaluates the `PublicAccessBlock` configuration
for a bucket or an object, it checks the `PublicAccessBlock` configuration
for both the bucket (or the bucket that contains the object) and the bucket
owner's account. If the `PublicAccessBlock` settings are different between
the bucket and the account, Amazon S3 uses the most restrictive combination
of the bucket-level and account-level settings.
</important> For more information about when Amazon S3 considers a bucket
or an object public, see [The Meaning of
"Public"](https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status).
The following operations are related to `GetPublicAccessBlock`:
<ul> <li> [Using Amazon S3 Block Public
Access](https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html)
</li> <li> `PutPublicAccessBlock`
</li> <li> `GetPublicAccessBlock`
</li> <li> `DeletePublicAccessBlock`
</li> </ul>
"""
def get_public_access_block(client, bucket, options \\ []) do
path_ = "/#{URI.encode(bucket)}?publicAccessBlock"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
This operation is useful to determine if a bucket exists and you have
permission to access it. The operation returns a `200 OK` if the bucket
exists and you have permission to access it. Otherwise, the operation might
return responses such as `404 Not Found` and `403 Forbidden`.
To use this operation, you must have permissions to perform the
`s3:ListBucket` action. The bucket owner has this permission by default and
can grant this permission to others. For more information about
permissions, see [Permissions Related to Bucket Subresource
Operations](https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
and [Managing Access Permissions to Your Amazon S3
Resources](https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html).
"""
def head_bucket(client, bucket, input, options \\ []) do
path_ = "/#{URI.encode(bucket)}"
headers = []
query_ = []
request(client, :head, path_, query_, headers, input, options, nil)
end
@doc """
The HEAD operation retrieves metadata from an object without returning the
object itself. This operation is useful if you're only interested in an
object's metadata. To use HEAD, you must have READ access to the object.
A `HEAD` request has the same options as a `GET` operation on an object.
The response is identical to the `GET` response except that there is no
response body.
If you encrypt an object by using server-side encryption with
customer-provided encryption keys (SSE-C) when you store the object in
Amazon S3, then when you retrieve the metadata from the object, you must
use the following headers:
<ul> <li> x-amz-server-side-encryption-customer-algorithm
</li> <li> x-amz-server-side-encryption-customer-key
</li> <li> x-amz-server-side-encryption-customer-key-MD5
</li> </ul> For more information about SSE-C, see [Server-Side Encryption
(Using Customer-Provided Encryption
Keys)](https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html).
<note> Encryption request headers, like `x-amz-server-side-encryption`,
should not be sent for GET requests if your object uses server-side
encryption with CMKs stored in AWS KMS (SSE-KMS) or server-side encryption
with Amazon S3–managed encryption keys (SSE-S3). If your object does use
these types of keys, you’ll get an HTTP 400 BadRequest error.
</note> Request headers are limited to 8 KB in size. For more information,
see [Common Request
Headers](https://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonRequestHeaders.html).
Consider the following when using request headers:
<ul> <li> Consideration 1 – If both of the `If-Match` and
`If-Unmodified-Since` headers are present in the request as follows:
<ul> <li> `If-Match` condition evaluates to `true`, and;
</li> <li> `If-Unmodified-Since` condition evaluates to `false`;
</li> </ul> Then Amazon S3 returns `200 OK` and the data requested.
</li> <li> Consideration 2 – If both of the `If-None-Match` and
`If-Modified-Since` headers are present in the request as follows:
<ul> <li> `If-None-Match` condition evaluates to `false`, and;
</li> <li> `If-Modified-Since` condition evaluates to `true`;
</li> </ul> Then Amazon S3 returns the `304 Not Modified` response code.
</li> </ul> For more information about conditional requests, see [RFC
7232](https://tools.ietf.org/html/rfc7232).
**Permissions**
You need the `s3:GetObject` permission for this operation. For more
information, see [Specifying Permissions in a
Policy](https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html).
If the object you request does not exist, the error Amazon S3 returns
depends on whether you also have the s3:ListBucket permission.
<ul> <li> If you have the `s3:ListBucket` permission on the bucket, Amazon
S3 returns an HTTP status code 404 ("no such key") error.
</li> <li> If you don’t have the `s3:ListBucket` permission, Amazon S3
returns an HTTP status code 403 ("access denied") error.
</li> </ul> The following operation is related to `HeadObject`:
<ul> <li> `GetObject`
</li> </ul>
"""
def head_object(client, bucket, key, input, options \\ []) do
path_ = "/#{URI.encode(bucket)}/#{AWS.Util.encode_uri(key, true)}"
{headers, input} =
[
{"IfMatch", "If-Match"},
{"IfModifiedSince", "If-Modified-Since"},
{"IfNoneMatch", "If-None-Match"},
{"IfUnmodifiedSince", "If-Unmodified-Since"},
{"Range", "Range"},
{"RequestPayer", "x-amz-request-payer"},
{"SSECustomerAlgorithm", "x-amz-server-side-encryption-customer-algorithm"},
{"SSECustomerKey", "x-amz-server-side-encryption-customer-key"},
{"SSECustomerKeyMD5", "x-amz-server-side-encryption-customer-key-MD5"},
]
|> AWS.Request.build_params(input)
{query_, input} =
[
{"PartNumber", "partNumber"},
{"VersionId", "versionId"},
]
|> AWS.Request.build_params(input)
case request(client, :head, path_, query_, headers, input, options, nil) do
{:ok, body, response} ->
body =
[
{"accept-ranges", "AcceptRanges"},
{"Cache-Control", "CacheControl"},
{"Content-Disposition", "ContentDisposition"},
{"Content-Encoding", "ContentEncoding"},
{"Content-Language", "ContentLanguage"},
{"Content-Length", "ContentLength"},
{"Content-Type", "ContentType"},
{"x-amz-delete-marker", "DeleteMarker"},
{"ETag", "ETag"},
{"x-amz-expiration", "Expiration"},
{"Expires", "Expires"},
{"Last-Modified", "LastModified"},
{"x-amz-missing-meta", "MissingMeta"},
{"x-amz-object-lock-legal-hold", "ObjectLockLegalHoldStatus"},
{"x-amz-object-lock-mode", "ObjectLockMode"},
{"x-amz-object-lock-retain-until-date", "ObjectLockRetainUntilDate"},
{"x-amz-mp-parts-count", "PartsCount"},
{"x-amz-replication-status", "ReplicationStatus"},
{"x-amz-request-charged", "RequestCharged"},
{"x-amz-restore", "Restore"},
{"x-amz-server-side-encryption-customer-algorithm", "SSECustomerAlgorithm"},
{"x-amz-server-side-encryption-customer-key-MD5", "SSECustomerKeyMD5"},
{"x-amz-server-side-encryption-aws-kms-key-id", "SSEKMSKeyId"},
{"x-amz-server-side-encryption", "ServerSideEncryption"},
{"x-amz-storage-class", "StorageClass"},
{"x-amz-version-id", "VersionId"},
{"x-amz-website-redirect-location", "WebsiteRedirectLocation"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Lists the analytics configurations for the bucket. You can have up to 1,000
analytics configurations per bucket.
This operation supports list pagination and does not return more than 100
configurations at a time. You should always check the `IsTruncated` element
in the response. If there are no more configurations to list, `IsTruncated`
is set to false. If there are more configurations to list, `IsTruncated` is
set to true, and there will be a value in `NextContinuationToken`. You use
the `NextContinuationToken` value to continue the pagination of the list by
passing the value in continuation-token in the request to `GET` the next
page.
To use this operation, you must have permissions to perform the
`s3:GetAnalyticsConfiguration` action. The bucket owner has this permission
by default. The bucket owner can grant this permission to others. For more
information about permissions, see [Permissions Related to Bucket
Subresource
Operations](https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
and [Managing Access Permissions to Your Amazon S3
Resources](https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html).
For information about Amazon S3 analytics feature, see [Amazon S3 Analytics
– Storage Class
Analysis](https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html).
The following operations are related to
`ListBucketAnalyticsConfigurations`:
<ul> <li> `GetBucketAnalyticsConfiguration`
</li> <li> `DeleteBucketAnalyticsConfiguration`
</li> <li> `PutBucketAnalyticsConfiguration`
</li> </ul>
"""
def list_bucket_analytics_configurations(client, bucket, continuation_token \\ nil, options \\ []) do
path_ = "/#{URI.encode(bucket)}?analytics"
headers = []
query_ = []
query_ = if !is_nil(continuation_token) do
[{"continuation-token", continuation_token} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Returns a list of inventory configurations for the bucket. You can have up
to 1,000 analytics configurations per bucket.
This operation supports list pagination and does not return more than 100
configurations at a time. Always check the `IsTruncated` element in the
response. If there are no more configurations to list, `IsTruncated` is set
to false. If there are more configurations to list, `IsTruncated` is set to
true, and there is a value in `NextContinuationToken`. You use the
`NextContinuationToken` value to continue the pagination of the list by
passing the value in continuation-token in the request to `GET` the next
page.
To use this operation, you must have permissions to perform the
`s3:GetInventoryConfiguration` action. The bucket owner has this permission
by default. The bucket owner can grant this permission to others. For more
information about permissions, see [Permissions Related to Bucket
Subresource
Operations](https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
and [Managing Access Permissions to Your Amazon S3
Resources](https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html).
For information about the Amazon S3 inventory feature, see [Amazon S3
Inventory](https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html)
The following operations are related to
`ListBucketInventoryConfigurations`:
<ul> <li> `GetBucketInventoryConfiguration`
</li> <li> `DeleteBucketInventoryConfiguration`
</li> <li> `PutBucketInventoryConfiguration`
</li> </ul>
"""
def list_bucket_inventory_configurations(client, bucket, continuation_token \\ nil, options \\ []) do
path_ = "/#{URI.encode(bucket)}?inventory"
headers = []
query_ = []
query_ = if !is_nil(continuation_token) do
[{"continuation-token", continuation_token} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Lists the metrics configurations for the bucket. The metrics configurations
are only for the request metrics of the bucket and do not provide
information on daily storage metrics. You can have up to 1,000
configurations per bucket.
This operation supports list pagination and does not return more than 100
configurations at a time. Always check the `IsTruncated` element in the
response. If there are no more configurations to list, `IsTruncated` is set
to false. If there are more configurations to list, `IsTruncated` is set to
true, and there is a value in `NextContinuationToken`. You use the
`NextContinuationToken` value to continue the pagination of the list by
passing the value in `continuation-token` in the request to `GET` the next
page.
To use this operation, you must have permissions to perform the
`s3:GetMetricsConfiguration` action. The bucket owner has this permission
by default. The bucket owner can grant this permission to others. For more
information about permissions, see [Permissions Related to Bucket
Subresource
Operations](https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
and [Managing Access Permissions to Your Amazon S3
Resources](https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html).
For more information about metrics configurations and CloudWatch request
metrics, see [Monitoring Metrics with Amazon
CloudWatch](https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html).
The following operations are related to `ListBucketMetricsConfigurations`:
<ul> <li> `PutBucketMetricsConfiguration`
</li> <li> `GetBucketMetricsConfiguration`
</li> <li> `DeleteBucketMetricsConfiguration`
</li> </ul>
"""
def list_bucket_metrics_configurations(client, bucket, continuation_token \\ nil, options \\ []) do
path_ = "/#{URI.encode(bucket)}?metrics"
headers = []
query_ = []
query_ = if !is_nil(continuation_token) do
[{"continuation-token", continuation_token} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Returns a list of all buckets owned by the authenticated sender of the
request.
"""
def list_buckets(client, options \\ []) do
path_ = "/"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
This operation lists in-progress multipart uploads. An in-progress
multipart upload is a multipart upload that has been initiated using the
Initiate Multipart Upload request, but has not yet been completed or
aborted.
This operation returns at most 1,000 multipart uploads in the response.
1,000 multipart uploads is the maximum number of uploads a response can
include, which is also the default value. You can further limit the number
of uploads in a response by specifying the `max-uploads` parameter in the
response. If additional multipart uploads satisfy the list criteria, the
response will contain an `IsTruncated` element with the value true. To list
the additional multipart uploads, use the `key-marker` and
`upload-id-marker` request parameters.
In the response, the uploads are sorted by key. If your application has
initiated more than one multipart upload using the same object key, then
uploads in the response are first sorted by key. Additionally, uploads are
sorted in ascending order within each key by the upload initiation time.
For more information on multipart uploads, see [Uploading Objects Using
Multipart
Upload](https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html).
For information on permissions required to use the multipart upload API,
see [Multipart Upload API and
Permissions](https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html).
The following operations are related to `ListMultipartUploads`:
<ul> <li> `CreateMultipartUpload`
</li> <li> `UploadPart`
</li> <li> `CompleteMultipartUpload`
</li> <li> `ListParts`
</li> <li> `AbortMultipartUpload`
</li> </ul>
"""
def list_multipart_uploads(client, bucket, delimiter \\ nil, encoding_type \\ nil, key_marker \\ nil, max_uploads \\ nil, prefix \\ nil, upload_id_marker \\ nil, options \\ []) do
path_ = "/#{URI.encode(bucket)}?uploads"
headers = []
query_ = []
query_ = if !is_nil(upload_id_marker) do
[{"upload-id-marker", upload_id_marker} | query_]
else
query_
end
query_ = if !is_nil(prefix) do
[{"prefix", prefix} | query_]
else
query_
end
query_ = if !is_nil(max_uploads) do
[{"max-uploads", max_uploads} | query_]
else
query_
end
query_ = if !is_nil(key_marker) do
[{"key-marker", key_marker} | query_]
else
query_
end
query_ = if !is_nil(encoding_type) do
[{"encoding-type", encoding_type} | query_]
else
query_
end
query_ = if !is_nil(delimiter) do
[{"delimiter", delimiter} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Returns metadata about all of the versions of objects in a bucket. You can
also use request parameters as selection criteria to return metadata about
a subset of all the object versions.
<note> A 200 OK response can contain valid or invalid XML. Make sure to
design your application to parse the contents of the response and handle it
appropriately.
</note> To use this operation, you must have READ access to the bucket.
The following operations are related to `ListObjectVersions`:
<ul> <li> `ListObjectsV2`
</li> <li> `GetObject`
</li> <li> `PutObject`
</li> <li> `DeleteObject`
</li> </ul>
"""
def list_object_versions(client, bucket, delimiter \\ nil, encoding_type \\ nil, key_marker \\ nil, max_keys \\ nil, prefix \\ nil, version_id_marker \\ nil, options \\ []) do
path_ = "/#{URI.encode(bucket)}?versions"
headers = []
query_ = []
query_ = if !is_nil(version_id_marker) do
[{"version-id-marker", version_id_marker} | query_]
else
query_
end
query_ = if !is_nil(prefix) do
[{"prefix", prefix} | query_]
else
query_
end
query_ = if !is_nil(max_keys) do
[{"max-keys", max_keys} | query_]
else
query_
end
query_ = if !is_nil(key_marker) do
[{"key-marker", key_marker} | query_]
else
query_
end
query_ = if !is_nil(encoding_type) do
[{"encoding-type", encoding_type} | query_]
else
query_
end
query_ = if !is_nil(delimiter) do
[{"delimiter", delimiter} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Returns some or all (up to 1,000) of the objects in a bucket. You can use
the request parameters as selection criteria to return a subset of the
objects in a bucket. A 200 OK response can contain valid or invalid XML. Be
sure to design your application to parse the contents of the response and
handle it appropriately.
<important> This API has been revised. We recommend that you use the newer
version, `ListObjectsV2`, when developing applications. For backward
compatibility, Amazon S3 continues to support `ListObjects`.
</important> The following operations are related to `ListObjects`:
<ul> <li> `ListObjectsV2`
</li> <li> `GetObject`
</li> <li> `PutObject`
</li> <li> `CreateBucket`
</li> <li> `ListBuckets`
</li> </ul>
"""
def list_objects(client, bucket, delimiter \\ nil, encoding_type \\ nil, marker \\ nil, max_keys \\ nil, prefix \\ nil, request_payer \\ nil, options \\ []) do
path_ = "/#{URI.encode(bucket)}"
headers = []
headers = if !is_nil(request_payer) do
[{"x-amz-request-payer", request_payer} | headers]
else
headers
end
query_ = []
query_ = if !is_nil(prefix) do
[{"prefix", prefix} | query_]
else
query_
end
query_ = if !is_nil(max_keys) do
[{"max-keys", max_keys} | query_]
else
query_
end
query_ = if !is_nil(marker) do
[{"marker", marker} | query_]
else
query_
end
query_ = if !is_nil(encoding_type) do
[{"encoding-type", encoding_type} | query_]
else
query_
end
query_ = if !is_nil(delimiter) do
[{"delimiter", delimiter} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Returns some or all (up to 1,000) of the objects in a bucket. You can use
the request parameters as selection criteria to return a subset of the
objects in a bucket. A `200 OK` response can contain valid or invalid XML.
Make sure to design your application to parse the contents of the response
and handle it appropriately.
To use this operation, you must have READ access to the bucket.
To use this operation in an AWS Identity and Access Management (IAM)
policy, you must have permissions to perform the `s3:ListBucket` action.
The bucket owner has this permission by default and can grant this
permission to others. For more information about permissions, see
[Permissions Related to Bucket Subresource
Operations](https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
and [Managing Access Permissions to Your Amazon S3
Resources](https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html).
<important> This section describes the latest revision of the API. We
recommend that you use this revised API for application development. For
backward compatibility, Amazon S3 continues to support the prior version of
this API, `ListObjects`.
</important> To get a list of your buckets, see `ListBuckets`.
The following operations are related to `ListObjectsV2`:
<ul> <li> `GetObject`
</li> <li> `PutObject`
</li> <li> `CreateBucket`
</li> </ul>
"""
def list_objects_v2(client, bucket, continuation_token \\ nil, delimiter \\ nil, encoding_type \\ nil, fetch_owner \\ nil, max_keys \\ nil, prefix \\ nil, start_after \\ nil, request_payer \\ nil, options \\ []) do
path_ = "/#{URI.encode(bucket)}?list-type=2"
headers = []
headers = if !is_nil(request_payer) do
[{"x-amz-request-payer", request_payer} | headers]
else
headers
end
query_ = []
query_ = if !is_nil(start_after) do
[{"start-after", start_after} | query_]
else
query_
end
query_ = if !is_nil(prefix) do
[{"prefix", prefix} | query_]
else
query_
end
query_ = if !is_nil(max_keys) do
[{"max-keys", max_keys} | query_]
else
query_
end
query_ = if !is_nil(fetch_owner) do
[{"fetch-owner", fetch_owner} | query_]
else
query_
end
query_ = if !is_nil(encoding_type) do
[{"encoding-type", encoding_type} | query_]
else
query_
end
query_ = if !is_nil(delimiter) do
[{"delimiter", delimiter} | query_]
else
query_
end
query_ = if !is_nil(continuation_token) do
[{"continuation-token", continuation_token} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Lists the parts that have been uploaded for a specific multipart upload.
This operation must include the upload ID, which you obtain by sending the
initiate multipart upload request (see `CreateMultipartUpload`). This
request returns a maximum of 1,000 uploaded parts. The default number of
parts returned is 1,000 parts. You can restrict the number of parts
returned by specifying the `max-parts` request parameter. If your multipart
upload consists of more than 1,000 parts, the response returns an
`IsTruncated` field with the value of true, and a `NextPartNumberMarker`
element. In subsequent `ListParts` requests you can include the
part-number-marker query string parameter and set its value to the
`NextPartNumberMarker` field value from the previous response.
For more information on multipart uploads, see [Uploading Objects Using
Multipart
Upload](https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html).
For information on permissions required to use the multipart upload API,
see [Multipart Upload API and
Permissions](https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html).
The following operations are related to `ListParts`:
<ul> <li> `CreateMultipartUpload`
</li> <li> `UploadPart`
</li> <li> `CompleteMultipartUpload`
</li> <li> `AbortMultipartUpload`
</li> <li> `ListMultipartUploads`
</li> </ul>
"""
def list_parts(client, bucket, key, max_parts \\ nil, part_number_marker \\ nil, upload_id, request_payer \\ nil, options \\ []) do
path_ = "/#{URI.encode(bucket)}/#{AWS.Util.encode_uri(key, true)}"
headers = []
headers = if !is_nil(request_payer) do
[{"x-amz-request-payer", request_payer} | headers]
else
headers
end
query_ = []
query_ = if !is_nil(upload_id) do
[{"uploadId", upload_id} | query_]
else
query_
end
query_ = if !is_nil(part_number_marker) do
[{"part-number-marker", part_number_marker} | query_]
else
query_
end
query_ = if !is_nil(max_parts) do
[{"max-parts", max_parts} | query_]
else
query_
end
case request(client, :get, path_, query_, headers, nil, options, nil) do
{:ok, body, response} ->
body =
[
{"x-amz-abort-date", "AbortDate"},
{"x-amz-abort-rule-id", "AbortRuleId"},
{"x-amz-request-charged", "RequestCharged"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Sets the accelerate configuration of an existing bucket. Amazon S3 Transfer
Acceleration is a bucket-level feature that enables you to perform faster
data transfers to Amazon S3.
To use this operation, you must have permission to perform the
s3:PutAccelerateConfiguration action. The bucket owner has this permission
by default. The bucket owner can grant this permission to others. For more
information about permissions, see [Permissions Related to Bucket
Subresource
Operations](https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
and [Managing Access Permissions to Your Amazon S3
Resources](https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html).
The Transfer Acceleration state of a bucket can be set to one of the
following two values:
<ul> <li> Enabled – Enables accelerated data transfers to the bucket.
</li> <li> Suspended – Disables accelerated data transfers to the bucket.
</li> </ul> The `GetBucketAccelerateConfiguration` operation returns the
transfer acceleration state of a bucket.
After setting the Transfer Acceleration state of a bucket to Enabled, it
might take up to thirty minutes before the data transfer rates to the
bucket increase.
The name of the bucket used for Transfer Acceleration must be DNS-compliant
and must not contain periods (".").
For more information about transfer acceleration, see [Transfer
Acceleration](https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html).
The following operations are related to `PutBucketAccelerateConfiguration`:
<ul> <li> `GetBucketAccelerateConfiguration`
</li> <li> `CreateBucket`
</li> </ul>
"""
def put_bucket_accelerate_configuration(client, bucket, input, options \\ []) do
path_ = "/#{URI.encode(bucket)}?accelerate"
headers = []
query_ = []
request(client, :put, path_, query_, headers, input, options, nil)
end
@doc """
Sets the permissions on an existing bucket using access control lists
(ACL). For more information, see [Using
ACLs](https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html).
To set the ACL of a bucket, you must have `WRITE_ACP` permission.
You can use one of the following two ways to set a bucket's permissions:
<ul> <li> Specify the ACL in the request body
</li> <li> Specify permissions using request headers
</li> </ul> <note> You cannot specify access permission using both the body
and the request headers.
</note> Depending on your application needs, you may choose to set the ACL
on a bucket using either the request body or the headers. For example, if
you have an existing application that updates a bucket ACL using the
request body, then you can continue to use that approach.
**Access Permissions**
You can set access permissions using one of the following methods:
<ul> <li> Specify a canned ACL with the `x-amz-acl` request header. Amazon
S3 supports a set of predefined ACLs, known as *canned ACLs*. Each canned
ACL has a predefined set of grantees and permissions. Specify the canned
ACL name as the value of `x-amz-acl`. If you use this header, you cannot
use other access control-specific headers in your request. For more
information, see [Canned
ACL](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL).
</li> <li> Specify access permissions explicitly with the
`x-amz-grant-read`, `x-amz-grant-read-acp`, `x-amz-grant-write-acp`, and
`x-amz-grant-full-control` headers. When using these headers, you specify
explicit access permissions and grantees (AWS accounts or Amazon S3 groups)
who will receive the permission. If you use these ACL-specific headers, you
cannot use the `x-amz-acl` header to set a canned ACL. These parameters map
to the set of permissions that Amazon S3 supports in an ACL. For more
information, see [Access Control List (ACL)
Overview](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html).
You specify each grantee as a type=value pair, where the type is one of the
following:
<ul> <li> `id` – if the value specified is the canonical user ID of an AWS
account
</li> <li> `uri` – if you are granting permissions to a predefined group
</li> <li> `emailAddress` – if the value specified is the email address of
an AWS account
<note> Using email addresses to specify a grantee is only supported in the
following AWS Regions:
<ul> <li> US East (N. Virginia)
</li> <li> US West (N. California)
</li> <li> US West (Oregon)
</li> <li> Asia Pacific (Singapore)
</li> <li> Asia Pacific (Sydney)
</li> <li> Asia Pacific (Tokyo)
</li> <li> Europe (Ireland)
</li> <li> South America (São Paulo)
</li> </ul> For a list of all the Amazon S3 supported Regions and
endpoints, see [Regions and
Endpoints](https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region)
in the AWS General Reference.
</note> </li> </ul> For example, the following `x-amz-grant-write` header
grants create, overwrite, and delete objects permission to LogDelivery
group predefined by Amazon S3 and two AWS accounts identified by their
email addresses.
`x-amz-grant-write: uri="http://acs.amazonaws.com/groups/s3/LogDelivery",
id="111122223333", id="555566667777" `
</li> </ul> You can use either a canned ACL or specify access permissions
explicitly. You cannot do both.
**Grantee Values**
You can specify the person (grantee) to whom you're assigning access rights
(using request elements) in the following ways:
<ul> <li> By the person's ID:
`<Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:type="CanonicalUser"><ID><>ID<></ID><DisplayName><>GranteesEmail<></DisplayName>
</Grantee>`
DisplayName is optional and ignored in the request
</li> <li> By URI:
`<Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:type="Group"><URI><>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<></URI></Grantee>`
</li> <li> By Email address:
`<Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:type="AmazonCustomerByEmail"><EmailAddress><><EMAIL><></EmailAddress>lt;/Grantee>`
The grantee is resolved to the CanonicalUser and, in a response to a GET
Object acl request, appears as the CanonicalUser.
<note> Using email addresses to specify a grantee is only supported in the
following AWS Regions:
<ul> <li> US East (N. Virginia)
</li> <li> US West (N. California)
</li> <li> US West (Oregon)
</li> <li> Asia Pacific (Singapore)
</li> <li> Asia Pacific (Sydney)
</li> <li> Asia Pacific (Tokyo)
</li> <li> Europe (Ireland)
</li> <li> South America (São Paulo)
</li> </ul> For a list of all the Amazon S3 supported Regions and
endpoints, see [Regions and
Endpoints](https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region)
in the AWS General Reference.
</note> </li> </ul> <p class="title"> **Related Resources**
<ul> <li> `CreateBucket`
</li> <li> `DeleteBucket`
</li> <li> `GetObjectAcl`
</li> </ul>
"""
def put_bucket_acl(client, bucket, input, options \\ []) do
path_ = "/#{URI.encode(bucket)}?acl"
{headers, input} =
[
{"ACL", "x-amz-acl"},
{"ContentMD5", "Content-MD5"},
{"GrantFullControl", "x-amz-grant-full-control"},
{"GrantRead", "x-amz-grant-read"},
{"GrantReadACP", "x-amz-grant-read-acp"},
{"GrantWrite", "x-amz-grant-write"},
{"GrantWriteACP", "x-amz-grant-write-acp"},
]
|> AWS.Request.build_params(input)
query_ = []
request(client, :put, path_, query_, headers, input, options, nil)
end
@doc """
Sets an analytics configuration for the bucket (specified by the analytics
configuration ID). You can have up to 1,000 analytics configurations per
bucket.
You can choose to have storage class analysis export analysis reports sent
to a comma-separated values (CSV) flat file. See the `DataExport` request
element. Reports are updated daily and are based on the object filters that
you configure. When selecting data export, you specify a destination bucket
and an optional destination prefix where the file is written. You can
export the data to a destination bucket in a different account. However,
the destination bucket must be in the same Region as the bucket that you
are making the PUT analytics configuration to. For more information, see
[Amazon S3 Analytics – Storage Class
Analysis](https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html).
<important> You must create a bucket policy on the destination bucket where
the exported file is written to grant permissions to Amazon S3 to write
objects to the bucket. For an example policy, see [Granting Permissions for
Amazon S3 Inventory and Storage Class
Analysis](https://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html#example-bucket-policies-use-case-9).
</important> To use this operation, you must have permissions to perform
the `s3:PutAnalyticsConfiguration` action. The bucket owner has this
permission by default. The bucket owner can grant this permission to
others. For more information about permissions, see [Permissions Related to
Bucket Subresource
Operations](https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
and [Managing Access Permissions to Your Amazon S3
Resources](https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html).
<p class="title"> **Special Errors**
<ul> <li> <ul> <li> *HTTP Error: HTTP 400 Bad Request*
</li> <li> *Code: InvalidArgument*
</li> <li> *Cause: Invalid argument.*
</li> </ul> </li> <li> <ul> <li> *HTTP Error: HTTP 400 Bad Request*
</li> <li> *Code: TooManyConfigurations*
</li> <li> *Cause: You are attempting to create a new configuration but
have already reached the 1,000-configuration limit.*
</li> </ul> </li> <li> <ul> <li> *HTTP Error: HTTP 403 Forbidden*
</li> <li> *Code: AccessDenied*
</li> <li> *Cause: You are not the owner of the specified bucket, or you do
not have the s3:PutAnalyticsConfiguration bucket permission to set the
configuration on the bucket.*
</li> </ul> </li> </ul> <p class="title"> **Related Resources**
<ul> <li>
</li> <li>
</li> <li>
</li> </ul>
"""
def put_bucket_analytics_configuration(client, bucket, input, options \\ []) do
path_ = "/#{URI.encode(bucket)}?analytics"
headers = []
{query_, input} =
[
{"Id", "id"},
]
|> AWS.Request.build_params(input)
request(client, :put, path_, query_, headers, input, options, nil)
end
@doc """
Sets the `cors` configuration for your bucket. If the configuration exists,
Amazon S3 replaces it.
To use this operation, you must be allowed to perform the
`s3:PutBucketCORS` action. By default, the bucket owner has this permission
and can grant it to others.
You set this configuration on a bucket so that the bucket can service
cross-origin requests. For example, you might want to enable a request
whose origin is `http://www.example.com` to access your Amazon S3 bucket at
`my.example.bucket.com` by using the browser's `XMLHttpRequest` capability.
To enable cross-origin resource sharing (CORS) on a bucket, you add the
`cors` subresource to the bucket. The `cors` subresource is an XML document
in which you configure rules that identify origins and the HTTP methods
that can be executed on your bucket. The document is limited to 64 KB in
size.
When Amazon S3 receives a cross-origin request (or a pre-flight OPTIONS
request) against a bucket, it evaluates the `cors` configuration on the
bucket and uses the first `CORSRule` rule that matches the incoming browser
request to enable a cross-origin request. For a rule to match, the
following conditions must be met:
<ul> <li> The request's `Origin` header must match `AllowedOrigin`
elements.
</li> <li> The request method (for example, GET, PUT, HEAD, and so on) or
the `Access-Control-Request-Method` header in case of a pre-flight
`OPTIONS` request must be one of the `AllowedMethod` elements.
</li> <li> Every header specified in the `Access-Control-Request-Headers`
request header of a pre-flight request must match an `AllowedHeader`
element.
</li> </ul> For more information about CORS, go to [Enabling Cross-Origin
Resource
Sharing](https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the
*Amazon Simple Storage Service Developer Guide*.
<p class="title"> **Related Resources**
<ul> <li> `GetBucketCors`
</li> <li> `DeleteBucketCors`
</li> <li> `RESTOPTIONSobject`
</li> </ul>
"""
def put_bucket_cors(client, bucket, input, options \\ []) do
path_ = "/#{URI.encode(bucket)}?cors"
{headers, input} =
[
{"ContentMD5", "Content-MD5"},
]
|> AWS.Request.build_params(input)
query_ = []
request(client, :put, path_, query_, headers, input, options, nil)
end
@doc """
This implementation of the `PUT` operation uses the `encryption`
subresource to set the default encryption state of an existing bucket.
This implementation of the `PUT` operation sets default encryption for a
bucket using server-side encryption with Amazon S3-managed keys SSE-S3 or
AWS KMS customer master keys (CMKs) (SSE-KMS). For information about the
Amazon S3 default encryption feature, see [Amazon S3 Default Bucket
Encryption](https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html).
<important> This operation requires AWS Signature Version 4. For more
information, see [ Authenticating Requests (AWS Signature Version
4)](sig-v4-authenticating-requests.html).
</important> To use this operation, you must have permissions to perform
the `s3:PutEncryptionConfiguration` action. The bucket owner has this
permission by default. The bucket owner can grant this permission to
others. For more information about permissions, see [Permissions Related to
Bucket Subresource
Operations](https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
and [Managing Access Permissions to Your Amazon S3
Resources](https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html)
in the Amazon Simple Storage Service Developer Guide.
<p class="title"> **Related Resources**
<ul> <li> `GetBucketEncryption`
</li> <li> `DeleteBucketEncryption`
</li> </ul>
"""
def put_bucket_encryption(client, bucket, input, options \\ []) do
path_ = "/#{URI.encode(bucket)}?encryption"
{headers, input} =
[
{"ContentMD5", "Content-MD5"},
]
|> AWS.Request.build_params(input)
query_ = []
request(client, :put, path_, query_, headers, input, options, nil)
end
@doc """
This implementation of the `PUT` operation adds an inventory configuration
(identified by the inventory ID) to the bucket. You can have up to 1,000
inventory configurations per bucket.
Amazon S3 inventory generates inventories of the objects in the bucket on a
daily or weekly basis, and the results are published to a flat file. The
bucket that is inventoried is called the *source* bucket, and the bucket
where the inventory flat file is stored is called the *destination* bucket.
The *destination* bucket must be in the same AWS Region as the *source*
bucket.
When you configure an inventory for a *source* bucket, you specify the
*destination* bucket where you want the inventory to be stored, and whether
to generate the inventory daily or weekly. You can also configure what
object metadata to include and whether to inventory all object versions or
only current versions. For more information, see [Amazon S3
Inventory](https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html)
in the Amazon Simple Storage Service Developer Guide.
<important> You must create a bucket policy on the *destination* bucket to
grant permissions to Amazon S3 to write objects to the bucket in the
defined location. For an example policy, see [ Granting Permissions for
Amazon S3 Inventory and Storage Class
Analysis](https://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html#example-bucket-policies-use-case-9).
</important> To use this operation, you must have permissions to perform
the `s3:PutInventoryConfiguration` action. The bucket owner has this
permission by default and can grant this permission to others. For more
information about permissions, see [Permissions Related to Bucket
Subresource
Operations](https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
and [Managing Access Permissions to Your Amazon S3
Resources](https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html)
in the Amazon Simple Storage Service Developer Guide.
<p class="title"> **Special Errors**
<ul> <li> <p class="title"> **HTTP 400 Bad Request Error**
<ul> <li> *Code:* InvalidArgument
</li> <li> *Cause:* Invalid Argument
</li> </ul> </li> <li> <p class="title"> **HTTP 400 Bad Request Error**
<ul> <li> *Code:* TooManyConfigurations
</li> <li> *Cause:* You are attempting to create a new configuration but
have already reached the 1,000-configuration limit.
</li> </ul> </li> <li> <p class="title"> **HTTP 403 Forbidden Error**
<ul> <li> *Code:* AccessDenied
</li> <li> *Cause:* You are not the owner of the specified bucket, or you
do not have the `s3:PutInventoryConfiguration` bucket permission to set the
configuration on the bucket.
</li> </ul> </li> </ul> <p class="title"> **Related Resources**
<ul> <li> `GetBucketInventoryConfiguration`
</li> <li> `DeleteBucketInventoryConfiguration`
</li> <li> `ListBucketInventoryConfigurations`
</li> </ul>
"""
def put_bucket_inventory_configuration(client, bucket, input, options \\ []) do
path_ = "/#{URI.encode(bucket)}?inventory"
headers = []
{query_, input} =
[
{"Id", "id"},
]
|> AWS.Request.build_params(input)
request(client, :put, path_, query_, headers, input, options, nil)
end
@doc """
<important> For an updated version of this API, see
`PutBucketLifecycleConfiguration`. This version has been deprecated.
Existing lifecycle configurations will work. For new lifecycle
configurations, use the updated API.
</important> Creates a new lifecycle configuration for the bucket or
replaces an existing lifecycle configuration. For information about
lifecycle configuration, see [Object Lifecycle
Management](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html)
in the *Amazon Simple Storage Service Developer Guide*.
By default, all Amazon S3 resources, including buckets, objects, and
related subresources (for example, lifecycle configuration and website
configuration) are private. Only the resource owner, the AWS account that
created the resource, can access it. The resource owner can optionally
grant access permissions to others by writing an access policy. For this
operation, users must get the `s3:PutLifecycleConfiguration` permission.
You can also explicitly deny permissions. Explicit denial also supersedes
any other permissions. If you want to prevent users or accounts from
removing or deleting objects from your bucket, you must deny them
permissions for the following actions:
<ul> <li> `s3:DeleteObject`
</li> <li> `s3:DeleteObjectVersion`
</li> <li> `s3:PutLifecycleConfiguration`
</li> </ul> For more information about permissions, see [Managing Access
Permissions to your Amazon S3
Resources](https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html)
in the *Amazon Simple Storage Service Developer Guide*.
For more examples of transitioning objects to storage classes such as
STANDARD_IA or ONEZONE_IA, see [Examples of Lifecycle
Configuration](https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#lifecycle-configuration-examples).
<p class="title"> **Related Resources**
<ul> <li> `GetBucketLifecycle`(Deprecated)
</li> <li> `GetBucketLifecycleConfiguration`
</li> <li>
</li> <li> By default, a resource owner—in this case, a bucket owner, which
is the AWS account that created the bucket—can perform any of the
operations. A resource owner can also grant others permission to perform
the operation. For more information, see the following topics in the Amazon
Simple Storage Service Developer Guide:
<ul> <li> [Specifying Permissions in a
Policy](https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html)
</li> <li> [Managing Access Permissions to your Amazon S3
Resources](https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html)
</li> </ul> </li> </ul>
"""
def put_bucket_lifecycle(client, bucket, input, options \\ []) do
path_ = "/#{URI.encode(bucket)}?lifecycle"
{headers, input} =
[
{"ContentMD5", "Content-MD5"},
]
|> AWS.Request.build_params(input)
query_ = []
request(client, :put, path_, query_, headers, input, options, nil)
end
@doc """
Creates a new lifecycle configuration for the bucket or replaces an
existing lifecycle configuration. For information about lifecycle
configuration, see [Managing Access Permissions to Your Amazon S3
Resources](https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html).
<note> Bucket lifecycle configuration now supports specifying a lifecycle
rule using an object key name prefix, one or more object tags, or a
combination of both. Accordingly, this section describes the latest API.
The previous version of the API supported filtering based only on an object
key name prefix, which is supported for backward compatibility. For the
related API description, see `PutBucketLifecycle`.
</note> **Rules**
You specify the lifecycle configuration in your request body. The lifecycle
configuration is specified as XML consisting of one or more rules. Each
rule consists of the following:
<ul> <li> Filter identifying a subset of objects to which the rule applies.
The filter can be based on a key name prefix, object tags, or a combination
of both.
</li> <li> Status whether the rule is in effect.
</li> <li> One or more lifecycle transition and expiration actions that you
want Amazon S3 to perform on the objects identified by the filter. If the
state of your bucket is versioning-enabled or versioning-suspended, you can
have many versions of the same object (one current version and zero or more
noncurrent versions). Amazon S3 provides predefined actions that you can
specify for current and noncurrent object versions.
</li> </ul> For more information, see [Object Lifecycle
Management](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html)
and [Lifecycle Configuration
Elements](https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html).
**Permissions**
By default, all Amazon S3 resources are private, including buckets,
objects, and related subresources (for example, lifecycle configuration and
website configuration). Only the resource owner (that is, the AWS account
that created it) can access the resource. The resource owner can optionally
grant access permissions to others by writing an access policy. For this
operation, a user must get the s3:PutLifecycleConfiguration permission.
You can also explicitly deny permissions. Explicit deny also supersedes any
other permissions. If you want to block users or accounts from removing or
deleting objects from your bucket, you must deny them permissions for the
following actions:
<ul> <li> s3:DeleteObject
</li> <li> s3:DeleteObjectVersion
</li> <li> s3:PutLifecycleConfiguration
</li> </ul> For more information about permissions, see [Managing Access
Permissions to Your Amazon S3
Resources](https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html).
The following are related to `PutBucketLifecycleConfiguration`:
<ul> <li> [Examples of Lifecycle
Configuration](https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-configuration-examples.html)
</li> <li> `GetBucketLifecycleConfiguration`
</li> <li> `DeleteBucketLifecycle`
</li> </ul>
"""
def put_bucket_lifecycle_configuration(client, bucket, input, options \\ []) do
path_ = "/#{URI.encode(bucket)}?lifecycle"
headers = []
query_ = []
request(client, :put, path_, query_, headers, input, options, nil)
end
@doc """
Set the logging parameters for a bucket and to specify permissions for who
can view and modify the logging parameters. All logs are saved to buckets
in the same AWS Region as the source bucket. To set the logging status of a
bucket, you must be the bucket owner.
The bucket owner is automatically granted FULL_CONTROL to all logs. You use
the `Grantee` request element to grant access to other people. The
`Permissions` request element specifies the kind of access the grantee has
to the logs.
**Grantee Values**
You can specify the person (grantee) to whom you're assigning access rights
(using request elements) in the following ways:
<ul> <li> By the person's ID:
`<Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:type="CanonicalUser"><ID><>ID<></ID><DisplayName><>GranteesEmail<></DisplayName>
</Grantee>`
DisplayName is optional and ignored in the request.
</li> <li> By Email address:
` <Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:type="AmazonCustomerByEmail"><EmailAddress><><EMAIL><></EmailAddress></Grantee>`
The grantee is resolved to the CanonicalUser and, in a response to a GET
Object acl request, appears as the CanonicalUser.
</li> <li> By URI:
`<Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:type="Group"><URI><>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<></URI></Grantee>`
</li> </ul> To enable logging, you use LoggingEnabled and its children
request elements. To disable logging, you use an empty BucketLoggingStatus
request element:
`<BucketLoggingStatus xmlns="http://doc.s3.amazonaws.com/2006-03-01"
/>`
For more information about server access logging, see [Server Access
Logging](https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerLogs.html).
For more information about creating a bucket, see `CreateBucket`. For more
information about returning the logging status of a bucket, see
`GetBucketLogging`.
The following operations are related to `PutBucketLogging`:
<ul> <li> `PutObject`
</li> <li> `DeleteBucket`
</li> <li> `CreateBucket`
</li> <li> `GetBucketLogging`
</li> </ul>
"""
def put_bucket_logging(client, bucket, input, options \\ []) do
path_ = "/#{URI.encode(bucket)}?logging"
{headers, input} =
[
{"ContentMD5", "Content-MD5"},
]
|> AWS.Request.build_params(input)
query_ = []
request(client, :put, path_, query_, headers, input, options, nil)
end
@doc """
Sets a metrics configuration (specified by the metrics configuration ID)
for the bucket. You can have up to 1,000 metrics configurations per bucket.
If you're updating an existing metrics configuration, note that this is a
full replacement of the existing metrics configuration. If you don't
include the elements you want to keep, they are erased.
To use this operation, you must have permissions to perform the
`s3:PutMetricsConfiguration` action. The bucket owner has this permission
by default. The bucket owner can grant this permission to others. For more
information about permissions, see [Permissions Related to Bucket
Subresource
Operations](https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
and [Managing Access Permissions to Your Amazon S3
Resources](https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html).
For information about CloudWatch request metrics for Amazon S3, see
[Monitoring Metrics with Amazon
CloudWatch](https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html).
The following operations are related to `PutBucketMetricsConfiguration`:
<ul> <li> `DeleteBucketMetricsConfiguration`
</li> <li> `PutBucketMetricsConfiguration`
</li> <li> `ListBucketMetricsConfigurations`
</li> </ul> `GetBucketLifecycle` has the following special error:
<ul> <li> Error code: `TooManyConfigurations`
<ul> <li> Description: You are attempting to create a new configuration but
have already reached the 1,000-configuration limit.
</li> <li> HTTP Status Code: HTTP 400 Bad Request
</li> </ul> </li> </ul>
"""
def put_bucket_metrics_configuration(client, bucket, input, options \\ []) do
path_ = "/#{URI.encode(bucket)}?metrics"
headers = []
{query_, input} =
[
{"Id", "id"},
]
|> AWS.Request.build_params(input)
request(client, :put, path_, query_, headers, input, options, nil)
end
@doc """
No longer used, see the `PutBucketNotificationConfiguration` operation.
"""
def put_bucket_notification(client, bucket, input, options \\ []) do
path_ = "/#{URI.encode(bucket)}?notification"
{headers, input} =
[
{"ContentMD5", "Content-MD5"},
]
|> AWS.Request.build_params(input)
query_ = []
request(client, :put, path_, query_, headers, input, options, nil)
end
@doc """
Enables notifications of specified events for a bucket. For more
information about event notifications, see [Configuring Event
Notifications](https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html).
Using this API, you can replace an existing notification configuration. The
configuration is an XML file that defines the event types that you want
Amazon S3 to publish and the destination where you want Amazon S3 to
publish an event notification when it detects an event of the specified
type.
By default, your bucket has no event notifications configured. That is, the
notification configuration will be an empty `NotificationConfiguration`.
`<NotificationConfiguration>`
`</NotificationConfiguration>`
This operation replaces the existing notification configuration with the
configuration you include in the request body.
After Amazon S3 receives this request, it first verifies that any Amazon
Simple Notification Service (Amazon SNS) or Amazon Simple Queue Service
(Amazon SQS) destination exists, and that the bucket owner has permission
to publish to it by sending a test notification. In the case of AWS Lambda
destinations, Amazon S3 verifies that the Lambda function permissions grant
Amazon S3 permission to invoke the function from the Amazon S3 bucket. For
more information, see [Configuring Notifications for Amazon S3
Events](https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html).
You can disable notifications by adding the empty NotificationConfiguration
element.
By default, only the bucket owner can configure notifications on a bucket.
However, bucket owners can use a bucket policy to grant permission to other
users to set this configuration with `s3:PutBucketNotification` permission.
<note> The PUT notification is an atomic operation. For example, suppose
your notification configuration includes SNS topic, SQS queue, and Lambda
function configurations. When you send a PUT request with this
configuration, Amazon S3 sends test messages to your SNS topic. If the
message fails, the entire PUT operation will fail, and Amazon S3 will not
add the configuration to your bucket.
</note> **Responses**
If the configuration in the request body includes only one
`TopicConfiguration` specifying only the `s3:ReducedRedundancyLostObject`
event type, the response will also include the `x-amz-sns-test-message-id`
header containing the message ID of the test notification sent to the
topic.
The following operation is related to `PutBucketNotificationConfiguration`:
<ul> <li> `GetBucketNotificationConfiguration`
</li> </ul>
"""
def put_bucket_notification_configuration(client, bucket, input, options \\ []) do
path_ = "/#{URI.encode(bucket)}?notification"
headers = []
query_ = []
request(client, :put, path_, query_, headers, input, options, nil)
end
@doc """
Applies an Amazon S3 bucket policy to an Amazon S3 bucket. If you are using
an identity other than the root user of the AWS account that owns the
bucket, the calling identity must have the `PutBucketPolicy` permissions on
the specified bucket and belong to the bucket owner's account in order to
use this operation.
If you don't have `PutBucketPolicy` permissions, Amazon S3 returns a `403
Access Denied` error. If you have the correct permissions, but you're not
using an identity that belongs to the bucket owner's account, Amazon S3
returns a `405 Method Not Allowed` error.
<important> As a security precaution, the root user of the AWS account that
owns a bucket can always use this operation, even if the policy explicitly
denies the root user the ability to perform this action.
</important> For more information about bucket policies, see [Using Bucket
Policies and User
Policies](https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html).
The following operations are related to `PutBucketPolicy`:
<ul> <li> `CreateBucket`
</li> <li> `DeleteBucket`
</li> </ul>
"""
def put_bucket_policy(client, bucket, input, options \\ []) do
path_ = "/#{URI.encode(bucket)}?policy"
{headers, input} =
[
{"ConfirmRemoveSelfBucketAccess", "x-amz-confirm-remove-self-bucket-access"},
{"ContentMD5", "Content-MD5"},
]
|> AWS.Request.build_params(input)
query_ = []
request(client, :put, path_, query_, headers, input, options, nil)
end
@doc """
Creates a replication configuration or replaces an existing one. For more
information, see
[Replication](https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html)
in the *Amazon S3 Developer Guide*.
<note> To perform this operation, the user or role performing the operation
must have the
[iam:PassRole](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_passrole.html)
permission.
</note> Specify the replication configuration in the request body. In the
replication configuration, you provide the name of the destination bucket
where you want Amazon S3 to replicate objects, the IAM role that Amazon S3
can assume to replicate objects on your behalf, and other relevant
information.
A replication configuration must include at least one rule, and can contain
a maximum of 1,000. Each rule identifies a subset of objects to replicate
by filtering the objects in the source bucket. To choose additional subsets
of objects to replicate, add a rule for each subset. All rules must specify
the same destination bucket.
To specify a subset of the objects in the source bucket to apply a
replication rule to, add the Filter element as a child of the Rule element.
You can filter objects based on an object key prefix, one or more object
tags, or both. When you add the Filter element in the configuration, you
must also add the following elements: `DeleteMarkerReplication`, `Status`,
and `Priority`.
For information about enabling versioning on a bucket, see [Using
Versioning](https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html).
By default, a resource owner, in this case the AWS account that created the
bucket, can perform this operation. The resource owner can also grant
others permissions to perform the operation. For more information about
permissions, see [Specifying Permissions in a
Policy](https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html)
and [Managing Access Permissions to Your Amazon S3
Resources](https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html).
**Handling Replication of Encrypted Objects**
By default, Amazon S3 doesn't replicate objects that are stored at rest
using server-side encryption with CMKs stored in AWS KMS. To replicate AWS
KMS-encrypted objects, add the following: `SourceSelectionCriteria`,
`SseKmsEncryptedObjects`, `Status`, `EncryptionConfiguration`, and
`ReplicaKmsKeyID`. For information about replication configuration, see
[Replicating Objects Created with SSE Using CMKs stored in AWS
KMS](https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-config-for-kms-objects.html).
For information on `PutBucketReplication` errors, see
`ReplicationErrorCodeList`
The following operations are related to `PutBucketReplication`:
<ul> <li> `GetBucketReplication`
</li> <li> `DeleteBucketReplication`
</li> </ul>
"""
def put_bucket_replication(client, bucket, input, options \\ []) do
path_ = "/#{URI.encode(bucket)}?replication"
{headers, input} =
[
{"ContentMD5", "Content-MD5"},
{"Token", "x-amz-bucket-object-lock-token"},
]
|> AWS.Request.build_params(input)
query_ = []
request(client, :put, path_, query_, headers, input, options, nil)
end
@doc """
Sets the request payment configuration for a bucket. By default, the bucket
owner pays for downloads from the bucket. This configuration parameter
enables the bucket owner (only) to specify that the person requesting the
download will be charged for the download. For more information, see
[Requester Pays
Buckets](https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html).
The following operations are related to `PutBucketRequestPayment`:
<ul> <li> `CreateBucket`
</li> <li> `GetBucketRequestPayment`
</li> </ul>
"""
def put_bucket_request_payment(client, bucket, input, options \\ []) do
path_ = "/#{URI.encode(bucket)}?requestPayment"
{headers, input} =
[
{"ContentMD5", "Content-MD5"},
]
|> AWS.Request.build_params(input)
query_ = []
request(client, :put, path_, query_, headers, input, options, nil)
end
@doc """
Sets the tags for a bucket.
Use tags to organize your AWS bill to reflect your own cost structure. To
do this, sign up to get your AWS account bill with tag key values included.
Then, to see the cost of combined resources, organize your billing
information according to resources with the same tag key values. For
example, you can tag several resources with a specific application name,
and then organize your billing information to see the total cost of that
application across several services. For more information, see [Cost
Allocation and
Tagging](https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html).
<note> Within a bucket, if you add a tag that has the same key as an
existing tag, the new value overwrites the old value. For more information,
see [Using Cost Allocation in Amazon S3 Bucket
Tags](https://docs.aws.amazon.com/AmazonS3/latest/dev/CostAllocTagging.html).
</note> To use this operation, you must have permissions to perform the
`s3:PutBucketTagging` action. The bucket owner has this permission by
default and can grant this permission to others. For more information about
permissions, see [Permissions Related to Bucket Subresource
Operations](https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
and [Managing Access Permissions to Your Amazon S3
Resources](https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html).
`PutBucketTagging` has the following special errors:
<ul> <li> Error code: `InvalidTagError`
<ul> <li> Description: The tag provided was not a valid tag. This error can
occur if the tag did not pass input validation. For information about tag
restrictions, see [User-Defined Tag
Restrictions](https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html)
and [AWS-Generated Cost Allocation Tag
Restrictions](https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/aws-tag-restrictions.html).
</li> </ul> </li> <li> Error code: `MalformedXMLError`
<ul> <li> Description: The XML provided does not match the schema.
</li> </ul> </li> <li> Error code: `OperationAbortedError `
<ul> <li> Description: A conflicting conditional operation is currently in
progress against this resource. Please try again.
</li> </ul> </li> <li> Error code: `InternalError`
<ul> <li> Description: The service was unable to apply the provided tag to
the bucket.
</li> </ul> </li> </ul> The following operations are related to
`PutBucketTagging`:
<ul> <li> `GetBucketTagging`
</li> <li> `DeleteBucketTagging`
</li> </ul>
"""
def put_bucket_tagging(client, bucket, input, options \\ []) do
path_ = "/#{URI.encode(bucket)}?tagging"
{headers, input} =
[
{"ContentMD5", "Content-MD5"},
]
|> AWS.Request.build_params(input)
query_ = []
request(client, :put, path_, query_, headers, input, options, nil)
end
@doc """
Sets the versioning state of an existing bucket. To set the versioning
state, you must be the bucket owner.
You can set the versioning state with one of the following values:
**Enabled**—Enables versioning for the objects in the bucket. All objects
added to the bucket receive a unique version ID.
**Suspended**—Disables versioning for the objects in the bucket. All
objects added to the bucket receive the version ID null.
If the versioning state has never been set on a bucket, it has no
versioning state; a `GetBucketVersioning` request does not return a
versioning state value.
If the bucket owner enables MFA Delete in the bucket versioning
configuration, the bucket owner must include the `x-amz-mfa request` header
and the `Status` and the `MfaDelete` request elements in a request to set
the versioning state of the bucket.
<important> If you have an object expiration lifecycle policy in your
non-versioned bucket and you want to maintain the same permanent delete
behavior when you enable versioning, you must add a noncurrent expiration
policy. The noncurrent expiration lifecycle policy will manage the deletes
of the noncurrent object versions in the version-enabled bucket. (A
version-enabled bucket maintains one current and zero or more noncurrent
object versions.) For more information, see [Lifecycle and
Versioning](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html#lifecycle-and-other-bucket-config).
</important> <p class="title"> **Related Resources**
<ul> <li> `CreateBucket`
</li> <li> `DeleteBucket`
</li> <li> `GetBucketVersioning`
</li> </ul>
"""
def put_bucket_versioning(client, bucket, input, options \\ []) do
path_ = "/#{URI.encode(bucket)}?versioning"
{headers, input} =
[
{"ContentMD5", "Content-MD5"},
{"MFA", "x-amz-mfa"},
]
|> AWS.Request.build_params(input)
query_ = []
request(client, :put, path_, query_, headers, input, options, nil)
end
@doc """
Sets the configuration of the website that is specified in the `website`
subresource. To configure a bucket as a website, you can add this
subresource on the bucket with website configuration information such as
the file name of the index document and any redirect rules. For more
information, see [Hosting Websites on Amazon
S3](https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html).
This PUT operation requires the `S3:PutBucketWebsite` permission. By
default, only the bucket owner can configure the website attached to a
bucket; however, bucket owners can allow other users to set the website
configuration by writing a bucket policy that grants them the
`S3:PutBucketWebsite` permission.
To redirect all website requests sent to the bucket's website endpoint, you
add a website configuration with the following elements. Because all
requests are sent to another website, you don't need to provide index
document name for the bucket.
<ul> <li> `WebsiteConfiguration`
</li> <li> `RedirectAllRequestsTo`
</li> <li> `HostName`
</li> <li> `Protocol`
</li> </ul> If you want granular control over redirects, you can use the
following elements to add routing rules that describe conditions for
redirecting requests and information about the redirect destination. In
this case, the website configuration must provide an index document for the
bucket, because some requests might not be redirected.
<ul> <li> `WebsiteConfiguration`
</li> <li> `IndexDocument`
</li> <li> `Suffix`
</li> <li> `ErrorDocument`
</li> <li> `Key`
</li> <li> `RoutingRules`
</li> <li> `RoutingRule`
</li> <li> `Condition`
</li> <li> `HttpErrorCodeReturnedEquals`
</li> <li> `KeyPrefixEquals`
</li> <li> `Redirect`
</li> <li> `Protocol`
</li> <li> `HostName`
</li> <li> `ReplaceKeyPrefixWith`
</li> <li> `ReplaceKeyWith`
</li> <li> `HttpRedirectCode`
</li> </ul> Amazon S3 has a limitation of 50 routing rules per website
configuration. If you require more than 50 routing rules, you can use
object redirect. For more information, see [Configuring an Object
Redirect](https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html)
in the *Amazon Simple Storage Service Developer Guide*.
"""
def put_bucket_website(client, bucket, input, options \\ []) do
path_ = "/#{URI.encode(bucket)}?website"
{headers, input} =
[
{"ContentMD5", "Content-MD5"},
]
|> AWS.Request.build_params(input)
query_ = []
request(client, :put, path_, query_, headers, input, options, nil)
end
@doc """
Adds an object to a bucket. You must have WRITE permissions on a bucket to
add an object to it.
Amazon S3 never adds partial objects; if you receive a success response,
Amazon S3 added the entire object to the bucket.
Amazon S3 is a distributed system. If it receives multiple write requests
for the same object simultaneously, it overwrites all but the last object
written. Amazon S3 does not provide object locking; if you need this, make
sure to build it into your application layer or use versioning instead.
To ensure that data is not corrupted traversing the network, use the
`Content-MD5` header. When you use this header, Amazon S3 checks the object
against the provided MD5 value and, if they do not match, returns an error.
Additionally, you can calculate the MD5 while putting an object to Amazon
S3 and compare the returned ETag to the calculated MD5 value.
<note> The `Content-MD5` header is required for any request to upload an
object with a retention period configured using Amazon S3 Object Lock. For
more information about Amazon S3 Object Lock, see [Amazon S3 Object Lock
Overview](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html)
in the *Amazon Simple Storage Service Developer Guide*.
</note> **Server-side Encryption**
You can optionally request server-side encryption. With server-side
encryption, Amazon S3 encrypts your data as it writes it to disks in its
data centers and decrypts the data when you access it. You have the option
to provide your own encryption key or use AWS managed encryption keys. For
more information, see [Using Server-Side
Encryption](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html).
**Access Control List (ACL)-Specific Request Headers**
You can use headers to grant ACL- based permissions. By default, all
objects are private. Only the owner has full access control. When adding a
new object, you can grant permissions to individual AWS accounts or to
predefined groups defined by Amazon S3. These permissions are then added to
the ACL on the object. For more information, see [Access Control List (ACL)
Overview](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html)
and [Managing ACLs Using the REST
API](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html).
**Storage Class Options**
By default, Amazon S3 uses the STANDARD storage class to store newly
created objects. The STANDARD storage class provides high durability and
high availability. Depending on performance needs, you can specify a
different storage class. For more information, see [Storage
Classes](https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html)
in the *Amazon S3 Service Developer Guide*.
**Versioning**
If you enable versioning for a bucket, Amazon S3 automatically generates a
unique version ID for the object being stored. Amazon S3 returns this ID in
the response. When you enable versioning for a bucket, if Amazon S3
receives multiple write requests for the same object simultaneously, it
stores all of the objects.
For more information about versioning, see [Adding Objects to Versioning
Enabled
Buckets](https://docs.aws.amazon.com/AmazonS3/latest/dev/AddingObjectstoVersioningEnabledBuckets.html).
For information about returning the versioning state of a bucket, see
`GetBucketVersioning`.
<p class="title"> **Related Resources**
<ul> <li> `CopyObject`
</li> <li> `DeleteObject`
</li> </ul>
"""
def put_object(client, bucket, key, input, options \\ []) do
path_ = "/#{URI.encode(bucket)}/#{AWS.Util.encode_uri(key, true)}"
{headers, input} =
[
{"ACL", "x-amz-acl"},
{"CacheControl", "Cache-Control"},
{"ContentDisposition", "Content-Disposition"},
{"ContentEncoding", "Content-Encoding"},
{"ContentLanguage", "Content-Language"},
{"ContentLength", "Content-Length"},
{"ContentMD5", "Content-MD5"},
{"ContentType", "Content-Type"},
{"Expires", "Expires"},
{"GrantFullControl", "x-amz-grant-full-control"},
{"GrantRead", "x-amz-grant-read"},
{"GrantReadACP", "x-amz-grant-read-acp"},
{"GrantWriteACP", "x-amz-grant-write-acp"},
{"ObjectLockLegalHoldStatus", "x-amz-object-lock-legal-hold"},
{"ObjectLockMode", "x-amz-object-lock-mode"},
{"ObjectLockRetainUntilDate", "x-amz-object-lock-retain-until-date"},
{"RequestPayer", "x-amz-request-payer"},
{"SSECustomerAlgorithm", "x-amz-server-side-encryption-customer-algorithm"},
{"SSECustomerKey", "x-amz-server-side-encryption-customer-key"},
{"SSECustomerKeyMD5", "x-amz-server-side-encryption-customer-key-MD5"},
{"SSEKMSEncryptionContext", "x-amz-server-side-encryption-context"},
{"SSEKMSKeyId", "x-amz-server-side-encryption-aws-kms-key-id"},
{"ServerSideEncryption", "x-amz-server-side-encryption"},
{"StorageClass", "x-amz-storage-class"},
{"Tagging", "x-amz-tagging"},
{"WebsiteRedirectLocation", "x-amz-website-redirect-location"},
]
|> AWS.Request.build_params(input)
query_ = []
case request(client, :put, path_, query_, headers, input, options, nil) do
{:ok, body, response} ->
body =
[
{"ETag", "ETag"},
{"x-amz-expiration", "Expiration"},
{"x-amz-request-charged", "RequestCharged"},
{"x-amz-server-side-encryption-customer-algorithm", "SSECustomerAlgorithm"},
{"x-amz-server-side-encryption-customer-key-MD5", "SSECustomerKeyMD5"},
{"x-amz-server-side-encryption-context", "SSEKMSEncryptionContext"},
{"x-amz-server-side-encryption-aws-kms-key-id", "SSEKMSKeyId"},
{"x-amz-server-side-encryption", "ServerSideEncryption"},
{"x-amz-version-id", "VersionId"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Uses the `acl` subresource to set the access control list (ACL) permissions
for an object that already exists in a bucket. You must have `WRITE_ACP`
permission to set the ACL of an object.
Depending on your application needs, you can choose to set the ACL on an
object using either the request body or the headers. For example, if you
have an existing application that updates a bucket ACL using the request
body, you can continue to use that approach. For more information, see
[Access Control List (ACL)
Overview](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html)
in the *Amazon S3 Developer Guide*.
**Access Permissions**
You can set access permissions using one of the following methods:
<ul> <li> Specify a canned ACL with the `x-amz-acl` request header. Amazon
S3 supports a set of predefined ACLs, known as canned ACLs. Each canned ACL
has a predefined set of grantees and permissions. Specify the canned ACL
name as the value of `x-amz-ac`l. If you use this header, you cannot use
other access control-specific headers in your request. For more
information, see [Canned
ACL](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL).
</li> <li> Specify access permissions explicitly with the
`x-amz-grant-read`, `x-amz-grant-read-acp`, `x-amz-grant-write-acp`, and
`x-amz-grant-full-control` headers. When using these headers, you specify
explicit access permissions and grantees (AWS accounts or Amazon S3 groups)
who will receive the permission. If you use these ACL-specific headers, you
cannot use `x-amz-acl` header to set a canned ACL. These parameters map to
the set of permissions that Amazon S3 supports in an ACL. For more
information, see [Access Control List (ACL)
Overview](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html).
You specify each grantee as a type=value pair, where the type is one of the
following:
<ul> <li> `id` – if the value specified is the canonical user ID of an AWS
account
</li> <li> `uri` – if you are granting permissions to a predefined group
</li> <li> `emailAddress` – if the value specified is the email address of
an AWS account
<note> Using email addresses to specify a grantee is only supported in the
following AWS Regions:
<ul> <li> US East (N. Virginia)
</li> <li> US West (N. California)
</li> <li> US West (Oregon)
</li> <li> Asia Pacific (Singapore)
</li> <li> Asia Pacific (Sydney)
</li> <li> Asia Pacific (Tokyo)
</li> <li> Europe (Ireland)
</li> <li> South America (São Paulo)
</li> </ul> For a list of all the Amazon S3 supported Regions and
endpoints, see [Regions and
Endpoints](https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region)
in the AWS General Reference.
</note> </li> </ul> For example, the following `x-amz-grant-read` header
grants list objects permission to the two AWS accounts identified by their
email addresses.
`x-amz-grant-read: emailAddress="<EMAIL>",
emailAddress="<EMAIL>" `
</li> </ul> You can use either a canned ACL or specify access permissions
explicitly. You cannot do both.
**Grantee Values**
You can specify the person (grantee) to whom you're assigning access rights
(using request elements) in the following ways:
<ul> <li> By the person's ID:
`<Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:type="CanonicalUser"><ID><>ID<></ID><DisplayName><>GranteesEmail<></DisplayName>
</Grantee>`
DisplayName is optional and ignored in the request.
</li> <li> By URI:
`<Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:type="Group"><URI><>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<></URI></Grantee>`
</li> <li> By Email address:
`<Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:type="AmazonCustomerByEmail"><EmailAddress><><EMAIL><></EmailAddress>lt;/Grantee>`
The grantee is resolved to the CanonicalUser and, in a response to a GET
Object acl request, appears as the CanonicalUser.
<note> Using email addresses to specify a grantee is only supported in the
following AWS Regions:
<ul> <li> US East (N. Virginia)
</li> <li> US West (N. California)
</li> <li> US West (Oregon)
</li> <li> Asia Pacific (Singapore)
</li> <li> Asia Pacific (Sydney)
</li> <li> Asia Pacific (Tokyo)
</li> <li> Europe (Ireland)
</li> <li> South America (São Paulo)
</li> </ul> For a list of all the Amazon S3 supported Regions and
endpoints, see [Regions and
Endpoints](https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region)
in the AWS General Reference.
</note> </li> </ul> **Versioning**
The ACL of an object is set at the object version level. By default, PUT
sets the ACL of the current version of an object. To set the ACL of a
different version, use the `versionId` subresource.
<p class="title"> **Related Resources**
<ul> <li> `CopyObject`
</li> <li> `GetObject`
</li> </ul>
"""
def put_object_acl(client, bucket, key, input, options \\ []) do
path_ = "/#{URI.encode(bucket)}/#{AWS.Util.encode_uri(key, true)}?acl"
{headers, input} =
[
{"ACL", "x-amz-acl"},
{"ContentMD5", "Content-MD5"},
{"GrantFullControl", "x-amz-grant-full-control"},
{"GrantRead", "x-amz-grant-read"},
{"GrantReadACP", "x-amz-grant-read-acp"},
{"GrantWrite", "x-amz-grant-write"},
{"GrantWriteACP", "x-amz-grant-write-acp"},
{"RequestPayer", "x-amz-request-payer"},
]
|> AWS.Request.build_params(input)
{query_, input} =
[
{"VersionId", "versionId"},
]
|> AWS.Request.build_params(input)
case request(client, :put, path_, query_, headers, input, options, nil) do
{:ok, body, response} ->
body =
[
{"x-amz-request-charged", "RequestCharged"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Applies a Legal Hold configuration to the specified object.
<p class="title"> **Related Resources**
<ul> <li> [Locking
Objects](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html)
</li> </ul>
"""
def put_object_legal_hold(client, bucket, key, input, options \\ []) do
path_ = "/#{URI.encode(bucket)}/#{AWS.Util.encode_uri(key, true)}?legal-hold"
{headers, input} =
[
{"ContentMD5", "Content-MD5"},
{"RequestPayer", "x-amz-request-payer"},
]
|> AWS.Request.build_params(input)
{query_, input} =
[
{"VersionId", "versionId"},
]
|> AWS.Request.build_params(input)
case request(client, :put, path_, query_, headers, input, options, nil) do
{:ok, body, response} ->
body =
[
{"x-amz-request-charged", "RequestCharged"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Places an Object Lock configuration on the specified bucket. The rule
specified in the Object Lock configuration will be applied by default to
every new object placed in the specified bucket.
<note> `DefaultRetention` requires either Days or Years. You can't specify
both at the same time.
</note> <p class="title"> **Related Resources**
<ul> <li> [Locking
Objects](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html)
</li> </ul>
"""
def put_object_lock_configuration(client, bucket, input, options \\ []) do
path_ = "/#{URI.encode(bucket)}?object-lock"
{headers, input} =
[
{"ContentMD5", "Content-MD5"},
{"RequestPayer", "x-amz-request-payer"},
{"Token", "x-amz-bucket-object-lock-token"},
]
|> AWS.Request.build_params(input)
query_ = []
case request(client, :put, path_, query_, headers, input, options, nil) do
{:ok, body, response} ->
body =
[
{"x-amz-request-charged", "RequestCharged"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Places an Object Retention configuration on an object.
<p class="title"> **Related Resources**
<ul> <li> [Locking
Objects](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html)
</li> </ul>
"""
def put_object_retention(client, bucket, key, input, options \\ []) do
path_ = "/#{URI.encode(bucket)}/#{AWS.Util.encode_uri(key, true)}?retention"
{headers, input} =
[
{"BypassGovernanceRetention", "x-amz-bypass-governance-retention"},
{"ContentMD5", "Content-MD5"},
{"RequestPayer", "x-amz-request-payer"},
]
|> AWS.Request.build_params(input)
{query_, input} =
[
{"VersionId", "versionId"},
]
|> AWS.Request.build_params(input)
case request(client, :put, path_, query_, headers, input, options, nil) do
{:ok, body, response} ->
body =
[
{"x-amz-request-charged", "RequestCharged"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Sets the supplied tag-set to an object that already exists in a bucket.
A tag is a key-value pair. You can associate tags with an object by sending
a PUT request against the tagging subresource that is associated with the
object. You can retrieve tags by sending a GET request. For more
information, see `GetObjectTagging`.
For tagging-related restrictions related to characters and encodings, see
[Tag
Restrictions](https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html).
Note that Amazon S3 limits the maximum number of tags to 10 tags per
object.
To use this operation, you must have permission to perform the
`s3:PutObjectTagging` action. By default, the bucket owner has this
permission and can grant this permission to others.
To put tags of any other version, use the `versionId` query parameter. You
also need permission for the `s3:PutObjectVersionTagging` action.
For information about the Amazon S3 object tagging feature, see [Object
Tagging](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html).
<p class="title"> **Special Errors**
<ul> <li> <p class="title"> <b/>
<ul> <li> *Code: InvalidTagError *
</li> <li> *Cause: The tag provided was not a valid tag. This error can
occur if the tag did not pass input validation. For more information, see
[Object
Tagging](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html).*
</li> </ul> </li> <li> <p class="title"> <b/>
<ul> <li> *Code: MalformedXMLError *
</li> <li> *Cause: The XML provided does not match the schema.*
</li> </ul> </li> <li> <ul> <li> *Code: OperationAbortedError *
</li> <li> *Cause: A conflicting conditional operation is currently in
progress against this resource. Please try again.*
</li> </ul> </li> <li> <ul> <li> *Code: InternalError*
</li> <li> *Cause: The service was unable to apply the provided tag to the
object.*
</li> </ul> </li> </ul> <p class="title"> **Related Resources**
<ul> <li> `GetObjectTagging`
</li> </ul>
"""
def put_object_tagging(client, bucket, key, input, options \\ []) do
path_ = "/#{URI.encode(bucket)}/#{AWS.Util.encode_uri(key, true)}?tagging"
{headers, input} =
[
{"ContentMD5", "Content-MD5"},
]
|> AWS.Request.build_params(input)
{query_, input} =
[
{"VersionId", "versionId"},
]
|> AWS.Request.build_params(input)
case request(client, :put, path_, query_, headers, input, options, nil) do
{:ok, body, response} ->
body =
[
{"x-amz-version-id", "VersionId"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Creates or modifies the `PublicAccessBlock` configuration for an Amazon S3
bucket. To use this operation, you must have the
`s3:PutBucketPublicAccessBlock` permission. For more information about
Amazon S3 permissions, see [Specifying Permissions in a
Policy](https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html).
<important> When Amazon S3 evaluates the `PublicAccessBlock` configuration
for a bucket or an object, it checks the `PublicAccessBlock` configuration
for both the bucket (or the bucket that contains the object) and the bucket
owner's account. If the `PublicAccessBlock` configurations are different
between the bucket and the account, Amazon S3 uses the most restrictive
combination of the bucket-level and account-level settings.
</important> For more information about when Amazon S3 considers a bucket
or an object public, see [The Meaning of
"Public"](https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status).
<p class="title"> **Related Resources**
<ul> <li> `GetPublicAccessBlock`
</li> <li> `DeletePublicAccessBlock`
</li> <li> `GetBucketPolicyStatus`
</li> <li> [Using Amazon S3 Block Public
Access](https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html)
</li> </ul>
"""
def put_public_access_block(client, bucket, input, options \\ []) do
path_ = "/#{URI.encode(bucket)}?publicAccessBlock"
{headers, input} =
[
{"ContentMD5", "Content-MD5"},
]
|> AWS.Request.build_params(input)
query_ = []
request(client, :put, path_, query_, headers, input, options, nil)
end
@doc """
Restores an archived copy of an object back into Amazon S3
This operation performs the following types of requests:
<ul> <li> `select` - Perform a select query on an archived object
</li> <li> `restore an archive` - Restore an archived object
</li> </ul> To use this operation, you must have permissions to perform the
`s3:RestoreObject` action. The bucket owner has this permission by default
and can grant this permission to others. For more information about
permissions, see [Permissions Related to Bucket Subresource
Operations](https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
and [Managing Access Permissions to Your Amazon S3
Resources](https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html)
in the *Amazon Simple Storage Service Developer Guide*.
**Querying Archives with Select Requests**
You use a select type of request to perform SQL queries on archived
objects. The archived objects that are being queried by the select request
must be formatted as uncompressed comma-separated values (CSV) files. You
can run queries and custom analytics on your archived data without having
to restore your data to a hotter Amazon S3 tier. For an overview about
select requests, see [Querying Archived
Objects](https://docs.aws.amazon.com/AmazonS3/latest/dev/querying-glacier-archives.html)
in the *Amazon Simple Storage Service Developer Guide*.
When making a select request, do the following:
<ul> <li> Define an output location for the select query's output. This
must be an Amazon S3 bucket in the same AWS Region as the bucket that
contains the archive object that is being queried. The AWS account that
initiates the job must have permissions to write to the S3 bucket. You can
specify the storage class and encryption for the output objects stored in
the bucket. For more information about output, see [Querying Archived
Objects](https://docs.aws.amazon.com/AmazonS3/latest/dev/querying-glacier-archives.html)
in the *Amazon Simple Storage Service Developer Guide*.
For more information about the `S3` structure in the request body, see the
following:
<ul> <li> `PutObject`
</li> <li> [Managing Access with
ACLs](https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html)
in the *Amazon Simple Storage Service Developer Guide*
</li> <li> [Protecting Data Using Server-Side
Encryption](https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html)
in the *Amazon Simple Storage Service Developer Guide*
</li> </ul> </li> <li> Define the SQL expression for the `SELECT` type of
restoration for your query in the request body's `SelectParameters`
structure. You can use expressions like the following examples.
<ul> <li> The following expression returns all records from the specified
object.
`SELECT * FROM Object`
</li> <li> Assuming that you are not using any headers for data stored in
the object, you can specify columns with positional headers.
`SELECT s._1, s._2 FROM Object s WHERE s._3 > 100`
</li> <li> If you have headers and you set the `fileHeaderInfo` in the
`CSV` structure in the request body to `USE`, you can specify headers in
the query. (If you set the `fileHeaderInfo` field to `IGNORE`, the first
row is skipped for the query.) You cannot mix ordinal positions with header
column names.
`SELECT s.Id, s.FirstName, s.SSN FROM S3Object s`
</li> </ul> </li> </ul> For more information about using SQL with S3
Glacier Select restore, see [SQL Reference for Amazon S3 Select and S3
Glacier
Select](https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-glacier-select-sql-reference.html)
in the *Amazon Simple Storage Service Developer Guide*.
When making a select request, you can also do the following:
<ul> <li> To expedite your queries, specify the `Expedited` tier. For more
information about tiers, see "Restoring Archives," later in this topic.
</li> <li> Specify details about the data serialization format of both the
input object that is being queried and the serialization of the CSV-encoded
query results.
</li> </ul> The following are additional important facts about the select
feature:
<ul> <li> The output results are new Amazon S3 objects. Unlike archive
retrievals, they are stored until explicitly deleted-manually or through a
lifecycle policy.
</li> <li> You can issue more than one select request on the same Amazon S3
object. Amazon S3 doesn't deduplicate requests, so avoid issuing duplicate
requests.
</li> <li> Amazon S3 accepts a select request even if the object has
already been restored. A select request doesn’t return error response
`409`.
</li> </ul> **Restoring Archives**
Objects in the GLACIER and DEEP_ARCHIVE storage classes are archived. To
access an archived object, you must first initiate a restore request. This
restores a temporary copy of the archived object. In a restore request, you
specify the number of days that you want the restored copy to exist. After
the specified period, Amazon S3 deletes the temporary copy but the object
remains archived in the GLACIER or DEEP_ARCHIVE storage class that object
was restored from.
To restore a specific object version, you can provide a version ID. If you
don't provide a version ID, Amazon S3 restores the current version.
The time it takes restore jobs to finish depends on which storage class the
object is being restored from and which data access tier you specify.
When restoring an archived object (or using a select request), you can
specify one of the following data access tier options in the `Tier` element
of the request body:
<ul> <li> ** `Expedited` ** - Expedited retrievals allow you to quickly
access your data stored in the GLACIER storage class when occasional urgent
requests for a subset of archives are required. For all but the largest
archived objects (250 MB+), data accessed using Expedited retrievals are
typically made available within 1–5 minutes. Provisioned capacity ensures
that retrieval capacity for Expedited retrievals is available when you need
it. Expedited retrievals and provisioned capacity are not available for the
DEEP_ARCHIVE storage class.
</li> <li> ** `Standard` ** - S3 Standard retrievals allow you to access
any of your archived objects within several hours. This is the default
option for the GLACIER and DEEP_ARCHIVE retrieval requests that do not
specify the retrieval option. S3 Standard retrievals typically complete
within 3-5 hours from the GLACIER storage class and typically complete
within 12 hours from the DEEP_ARCHIVE storage class.
</li> <li> ** `Bulk` ** - Bulk retrievals are Amazon S3 Glacier’s
lowest-cost retrieval option, enabling you to retrieve large amounts, even
petabytes, of data inexpensively in a day. Bulk retrievals typically
complete within 5-12 hours from the GLACIER storage class and typically
complete within 48 hours from the DEEP_ARCHIVE storage class.
</li> </ul> For more information about archive retrieval options and
provisioned capacity for `Expedited` data access, see [Restoring Archived
Objects](https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html)
in the *Amazon Simple Storage Service Developer Guide*.
You can use Amazon S3 restore speed upgrade to change the restore speed to
a faster speed while it is in progress. You upgrade the speed of an
in-progress restoration by issuing another restore request to the same
object, setting a new `Tier` request element. When issuing a request to
upgrade the restore tier, you must choose a tier that is faster than the
tier that the in-progress restore is using. You must not change any other
parameters, such as the `Days` request element. For more information, see [
Upgrading the Speed of an In-Progress
Restore](https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html#restoring-objects-upgrade-tier.title.html)
in the *Amazon Simple Storage Service Developer Guide*.
To get the status of object restoration, you can send a `HEAD` request.
Operations return the `x-amz-restore` header, which provides information
about the restoration status, in the response. You can use Amazon S3 event
notifications to notify you when a restore is initiated or completed. For
more information, see [Configuring Amazon S3 Event
Notifications](https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
in the *Amazon Simple Storage Service Developer Guide*.
After restoring an archived object, you can update the restoration period
by reissuing the request with a new period. Amazon S3 updates the
restoration period relative to the current time and charges only for the
request-there are no data transfer charges. You cannot update the
restoration period when Amazon S3 is actively processing your current
restore request for the object.
If your bucket has a lifecycle configuration with a rule that includes an
expiration action, the object expiration overrides the life span that you
specify in a restore request. For example, if you restore an object copy
for 10 days, but the object is scheduled to expire in 3 days, Amazon S3
deletes the object in 3 days. For more information about lifecycle
configuration, see `PutBucketLifecycleConfiguration` and [Object Lifecycle
Management](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html)
in *Amazon Simple Storage Service Developer Guide*.
**Responses**
A successful operation returns either the `200 OK` or `202 Accepted` status
code.
<ul> <li> If the object copy is not previously restored, then Amazon S3
returns `202 Accepted` in the response.
</li> <li> If the object copy is previously restored, Amazon S3 returns
`200 OK` in the response.
</li> </ul> <p class="title"> **Special Errors**
<ul> <li> <p class="title"> <b/>
<ul> <li> *Code: RestoreAlreadyInProgress*
</li> <li> *Cause: Object restore is already in progress. (This error does
not apply to SELECT type requests.)*
</li> <li> *HTTP Status Code: 409 Conflict*
</li> <li> *SOAP Fault Code Prefix: Client*
</li> </ul> </li> <li> <p class="title"> <b/>
<ul> <li> *Code: GlacierExpeditedRetrievalNotAvailable*
</li> <li> *Cause: S3 Glacier expedited retrievals are currently not
available. Try again later. (Returned if there is insufficient capacity to
process the Expedited request. This error applies only to Expedited
retrievals and not to S3 Standard or Bulk retrievals.)*
</li> <li> *HTTP Status Code: 503*
</li> <li> *SOAP Fault Code Prefix: N/A*
</li> </ul> </li> </ul> <p class="title"> **Related Resources**
<ul> <li> `PutBucketLifecycleConfiguration`
</li> <li> `GetBucketNotificationConfiguration`
</li> <li> [SQL Reference for Amazon S3 Select and S3 Glacier Select
](https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-glacier-select-sql-reference.html)
in the *Amazon Simple Storage Service Developer Guide*
</li> </ul>
"""
def restore_object(client, bucket, key, input, options \\ []) do
path_ = "/#{URI.encode(bucket)}/#{AWS.Util.encode_uri(key, true)}?restore"
{headers, input} =
[
{"RequestPayer", "x-amz-request-payer"},
]
|> AWS.Request.build_params(input)
{query_, input} =
[
{"VersionId", "versionId"},
]
|> AWS.Request.build_params(input)
case request(client, :post, path_, query_, headers, input, options, nil) do
{:ok, body, response} ->
body =
[
{"x-amz-request-charged", "RequestCharged"},
{"x-amz-restore-output-path", "RestoreOutputPath"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
This operation filters the contents of an Amazon S3 object based on a
simple structured query language (SQL) statement. In the request, along
with the SQL expression, you must also specify a data serialization format
(JSON, CSV, or Apache Parquet) of the object. Amazon S3 uses this format to
parse object data into records, and returns only records that match the
specified SQL expression. You must also specify the data serialization
format for the response.
For more information about Amazon S3 Select, see [Selecting Content from
Objects](https://docs.aws.amazon.com/AmazonS3/latest/dev/selecting-content-from-objects.html)
in the *Amazon Simple Storage Service Developer Guide*.
For more information about using SQL with Amazon S3 Select, see [ SQL
Reference for Amazon S3 Select and S3 Glacier
Select](https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-glacier-select-sql-reference.html)
in the *Amazon Simple Storage Service Developer Guide*.
<p/> **Permissions**
You must have `s3:GetObject` permission for this operation. Amazon S3
Select does not support anonymous access. For more information about
permissions, see [Specifying Permissions in a
Policy](https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html)
in the *Amazon Simple Storage Service Developer Guide*.
<p/> *Object Data Formats*
You can use Amazon S3 Select to query objects that have the following
format properties:
<ul> <li> *CSV, JSON, and Parquet* - Objects must be in CSV, JSON, or
Parquet format.
</li> <li> *UTF-8* - UTF-8 is the only encoding type Amazon S3 Select
supports.
</li> <li> *GZIP or BZIP2* - CSV and JSON files can be compressed using
GZIP or BZIP2. GZIP and BZIP2 are the only compression formats that Amazon
S3 Select supports for CSV and JSON files. Amazon S3 Select supports
columnar compression for Parquet using GZIP or Snappy. Amazon S3 Select
does not support whole-object compression for Parquet objects.
</li> <li> *Server-side encryption* - Amazon S3 Select supports querying
objects that are protected with server-side encryption.
For objects that are encrypted with customer-provided encryption keys
(SSE-C), you must use HTTPS, and you must use the headers that are
documented in the `GetObject`. For more information about SSE-C, see
[Server-Side Encryption (Using Customer-Provided Encryption
Keys)](https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html)
in the *Amazon Simple Storage Service Developer Guide*.
For objects that are encrypted with Amazon S3 managed encryption keys
(SSE-S3) and customer master keys (CMKs) stored in AWS Key Management
Service (SSE-KMS), server-side encryption is handled transparently, so you
don't need to specify anything. For more information about server-side
encryption, including SSE-S3 and SSE-KMS, see [Protecting Data Using
Server-Side
Encryption](https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html)
in the *Amazon Simple Storage Service Developer Guide*.
</li> </ul> **Working with the Response Body**
Given the response size is unknown, Amazon S3 Select streams the response
as a series of messages and includes a `Transfer-Encoding` header with
`chunked` as its value in the response. For more information, see
`RESTSelectObjectAppendix` .
<p/> **GetObject Support**
The `SelectObjectContent` operation does not support the following
`GetObject` functionality. For more information, see `GetObject`.
<ul> <li> `Range`: Although you can specify a scan range for an Amazon S3
Select request (see `SelectObjectContentRequest$ScanRange` in the request
parameters), you cannot specify the range of bytes of an object to return.
</li> <li> GLACIER, DEEP_ARCHIVE and REDUCED_REDUNDANCY storage classes:
You cannot specify the GLACIER, DEEP_ARCHIVE, or `REDUCED_REDUNDANCY`
storage classes. For more information, about storage classes see [Storage
Classes](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html#storage-class-intro)
in the *Amazon Simple Storage Service Developer Guide*.
</li> </ul> <p/> **Special Errors**
For a list of special errors for this operation, see
`SelectObjectContentErrorCodeList`
<p class="title"> **Related Resources**
<ul> <li> `GetObject`
</li> <li> `GetBucketLifecycleConfiguration`
</li> <li> `PutBucketLifecycleConfiguration`
</li> </ul>
"""
def select_object_content(client, bucket, key, input, options \\ []) do
path_ = "/#{URI.encode(bucket)}/#{AWS.Util.encode_uri(key, true)}?select&select-type=2"
{headers, input} =
[
{"SSECustomerAlgorithm", "x-amz-server-side-encryption-customer-algorithm"},
{"SSECustomerKey", "x-amz-server-side-encryption-customer-key"},
{"SSECustomerKeyMD5", "x-amz-server-side-encryption-customer-key-MD5"},
]
|> AWS.Request.build_params(input)
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Uploads a part in a multipart upload.
<note> In this operation, you provide part data in your request. However,
you have an option to specify your existing Amazon S3 object as a data
source for the part you are uploading. To upload a part from an existing
object, you use the `UploadPartCopy` operation.
</note> You must initiate a multipart upload (see `CreateMultipartUpload`)
before you can upload any part. In response to your initiate request,
Amazon S3 returns an upload ID, a unique identifier, that you must include
in your upload part request.
Part numbers can be any number from 1 to 10,000, inclusive. A part number
uniquely identifies a part and also defines its position within the object
being created. If you upload a new part using the same part number that was
used with a previous part, the previously uploaded part is overwritten.
Each part must be at least 5 MB in size, except the last part. There is no
size limit on the last part of your multipart upload.
To ensure that data is not corrupted when traversing the network, specify
the `Content-MD5` header in the upload part request. Amazon S3 checks the
part data against the provided MD5 value. If they do not match, Amazon S3
returns an error.
**Note:** After you initiate multipart upload and upload one or more parts,
you must either complete or abort multipart upload in order to stop getting
charged for storage of the uploaded parts. Only after you either complete
or abort multipart upload, Amazon S3 frees up the parts storage and stops
charging you for the parts storage.
For more information on multipart uploads, go to [Multipart Upload
Overview](https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html)
in the *Amazon Simple Storage Service Developer Guide *.
For information on the permissions required to use the multipart upload
API, go to [Multipart Upload API and
Permissions](https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html)
in the *Amazon Simple Storage Service Developer Guide*.
You can optionally request server-side encryption where Amazon S3 encrypts
your data as it writes it to disks in its data centers and decrypts it for
you when you access it. You have the option of providing your own
encryption key, or you can use the AWS managed encryption keys. If you
choose to provide your own encryption key, the request headers you provide
in the request must match the headers you used in the request to initiate
the upload by using `CreateMultipartUpload`. For more information, go to
[Using Server-Side
Encryption](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html)
in the *Amazon Simple Storage Service Developer Guide*.
Server-side encryption is supported by the S3 Multipart Upload actions.
Unless you are using a customer-provided encryption key, you don't need to
specify the encryption parameters in each UploadPart request. Instead, you
only need to specify the server-side encryption parameters in the initial
Initiate Multipart request. For more information, see
`CreateMultipartUpload`.
If you requested server-side encryption using a customer-provided
encryption key in your initiate multipart upload request, you must provide
identical encryption information in each part upload using the following
headers.
<ul> <li> x-amz-server-side-encryption-customer-algorithm
</li> <li> x-amz-server-side-encryption-customer-key
</li> <li> x-amz-server-side-encryption-customer-key-MD5
</li> </ul> <p class="title"> **Special Errors**
<ul> <li> <p class="title"> <b/>
<ul> <li> *Code: NoSuchUpload*
</li> <li> *Cause: The specified multipart upload does not exist. The
upload ID might be invalid, or the multipart upload might have been aborted
or completed.*
</li> <li> * HTTP Status Code: 404 Not Found *
</li> <li> *SOAP Fault Code Prefix: Client*
</li> </ul> </li> </ul> <p class="title"> **Related Resources**
<ul> <li> `CreateMultipartUpload`
</li> <li> `CompleteMultipartUpload`
</li> <li> `AbortMultipartUpload`
</li> <li> `ListParts`
</li> <li> `ListMultipartUploads`
</li> </ul>
"""
def upload_part(client, bucket, key, input, options \\ []) do
path_ = "/#{URI.encode(bucket)}/#{AWS.Util.encode_uri(key, true)}"
{headers, input} =
[
{"ContentLength", "Content-Length"},
{"ContentMD5", "Content-MD5"},
{"RequestPayer", "x-amz-request-payer"},
{"SSECustomerAlgorithm", "x-amz-server-side-encryption-customer-algorithm"},
{"SSECustomerKey", "x-amz-server-side-encryption-customer-key"},
{"SSECustomerKeyMD5", "x-amz-server-side-encryption-customer-key-MD5"},
]
|> AWS.Request.build_params(input)
{query_, input} =
[
{"PartNumber", "partNumber"},
{"UploadId", "uploadId"},
]
|> AWS.Request.build_params(input)
case request(client, :put, path_, query_, headers, input, options, nil) do
{:ok, body, response} ->
body =
[
{"ETag", "ETag"},
{"x-amz-request-charged", "RequestCharged"},
{"x-amz-server-side-encryption-customer-algorithm", "SSECustomerAlgorithm"},
{"x-amz-server-side-encryption-customer-key-MD5", "SSECustomerKeyMD5"},
{"x-amz-server-side-encryption-aws-kms-key-id", "SSEKMSKeyId"},
{"x-amz-server-side-encryption", "ServerSideEncryption"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Uploads a part by copying data from an existing object as data source. You
specify the data source by adding the request header `x-amz-copy-source` in
your request and a byte range by adding the request header
`x-amz-copy-source-range` in your request.
The minimum allowable part size for a multipart upload is 5 MB. For more
information about multipart upload limits, go to [Quick
Facts](https://docs.aws.amazon.com/AmazonS3/latest/dev/qfacts.html) in the
*Amazon Simple Storage Service Developer Guide*.
<note> Instead of using an existing object as part data, you might use the
`UploadPart` operation and provide data in your request.
</note> You must initiate a multipart upload before you can upload any
part. In response to your initiate request. Amazon S3 returns a unique
identifier, the upload ID, that you must include in your upload part
request.
For more information about using the `UploadPartCopy` operation, see the
following:
<ul> <li> For conceptual information about multipart uploads, see
[Uploading Objects Using Multipart
Upload](https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html)
in the *Amazon Simple Storage Service Developer Guide*.
</li> <li> For information about permissions required to use the multipart
upload API, see [Multipart Upload API and
Permissions](https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html)
in the *Amazon Simple Storage Service Developer Guide*.
</li> <li> For information about copying objects using a single atomic
operation vs. the multipart upload, see [Operations on
Objects](https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectOperations.html)
in the *Amazon Simple Storage Service Developer Guide*.
</li> <li> For information about using server-side encryption with
customer-provided encryption keys with the UploadPartCopy operation, see
`CopyObject` and `UploadPart`.
</li> </ul> Note the following additional considerations about the request
headers `x-amz-copy-source-if-match`, `x-amz-copy-source-if-none-match`,
`x-amz-copy-source-if-unmodified-since`, and
`x-amz-copy-source-if-modified-since`:
<ul> <li> **Consideration 1** - If both of the `x-amz-copy-source-if-match`
and `x-amz-copy-source-if-unmodified-since` headers are present in the
request as follows:
`x-amz-copy-source-if-match` condition evaluates to `true`, and;
`x-amz-copy-source-if-unmodified-since` condition evaluates to `false`;
Amazon S3 returns `200 OK` and copies the data.
</li> <li> **Consideration 2** - If both of the
`x-amz-copy-source-if-none-match` and `x-amz-copy-source-if-modified-since`
headers are present in the request as follows:
`x-amz-copy-source-if-none-match` condition evaluates to `false`, and;
`x-amz-copy-source-if-modified-since` condition evaluates to `true`;
Amazon S3 returns `412 Precondition Failed` response code.
</li> </ul> **Versioning**
If your bucket has versioning enabled, you could have multiple versions of
the same object. By default, `x-amz-copy-source` identifies the current
version of the object to copy. If the current version is a delete marker
and you don't specify a versionId in the `x-amz-copy-source`, Amazon S3
returns a 404 error, because the object does not exist. If you specify
versionId in the `x-amz-copy-source` and the versionId is a delete marker,
Amazon S3 returns an HTTP 400 error, because you are not allowed to specify
a delete marker as a version for the `x-amz-copy-source`.
You can optionally specify a specific version of the source object to copy
by adding the `versionId` subresource as shown in the following example:
`x-amz-copy-source: /bucket/object?versionId=version id`
<p class="title"> **Special Errors**
<ul> <li> <p class="title"> <b/>
<ul> <li> *Code: NoSuchUpload*
</li> <li> *Cause: The specified multipart upload does not exist. The
upload ID might be invalid, or the multipart upload might have been aborted
or completed.*
</li> <li> *HTTP Status Code: 404 Not Found*
</li> </ul> </li> <li> <p class="title"> <b/>
<ul> <li> *Code: InvalidRequest*
</li> <li> *Cause: The specified copy source is not supported as a
byte-range copy source.*
</li> <li> *HTTP Status Code: 400 Bad Request*
</li> </ul> </li> </ul> <p class="title"> **Related Resources**
<ul> <li> `CreateMultipartUpload`
</li> <li> `UploadPart`
</li> <li> `CompleteMultipartUpload`
</li> <li> `AbortMultipartUpload`
</li> <li> `ListParts`
</li> <li> `ListMultipartUploads`
</li> </ul>
"""
def upload_part_copy(client, bucket, key, input, options \\ []) do
path_ = "/#{URI.encode(bucket)}/#{AWS.Util.encode_uri(key, true)}"
{headers, input} =
[
{"CopySource", "x-amz-copy-source"},
{"CopySourceIfMatch", "x-amz-copy-source-if-match"},
{"CopySourceIfModifiedSince", "x-amz-copy-source-if-modified-since"},
{"CopySourceIfNoneMatch", "x-amz-copy-source-if-none-match"},
{"CopySourceIfUnmodifiedSince", "x-amz-copy-source-if-unmodified-since"},
{"CopySourceRange", "x-amz-copy-source-range"},
{"CopySourceSSECustomerAlgorithm", "x-amz-copy-source-server-side-encryption-customer-algorithm"},
{"CopySourceSSECustomerKey", "x-amz-copy-source-server-side-encryption-customer-key"},
{"CopySourceSSECustomerKeyMD5", "x-amz-copy-source-server-side-encryption-customer-key-MD5"},
{"RequestPayer", "x-amz-request-payer"},
{"SSECustomerAlgorithm", "x-amz-server-side-encryption-customer-algorithm"},
{"SSECustomerKey", "x-amz-server-side-encryption-customer-key"},
{"SSECustomerKeyMD5", "x-amz-server-side-encryption-customer-key-MD5"},
]
|> AWS.Request.build_params(input)
{query_, input} =
[
{"PartNumber", "partNumber"},
{"UploadId", "uploadId"},
]
|> AWS.Request.build_params(input)
case request(client, :put, path_, query_, headers, input, options, nil) do
{:ok, body, response} ->
body =
[
{"x-amz-copy-source-version-id", "CopySourceVersionId"},
{"x-amz-request-charged", "RequestCharged"},
{"x-amz-server-side-encryption-customer-algorithm", "SSECustomerAlgorithm"},
{"x-amz-server-side-encryption-customer-key-MD5", "SSECustomerKeyMD5"},
{"x-amz-server-side-encryption-aws-kms-key-id", "SSEKMSKeyId"},
{"x-amz-server-side-encryption", "ServerSideEncryption"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@spec request(AWS.Client.t(), binary(), binary(), list(), list(), map(), list(), pos_integer()) ::
{:ok, Poison.Parser.t(), Poison.Response.t()}
| {:error, Poison.Parser.t()}
| {:error, HTTPoison.Error.t()}
defp request(client, method, path, query, headers, input, options, success_status_code) do
client = %{client | service: "s3"}
host = build_host("s3", client)
url = host
|> build_url(path, client)
|> add_query(query)
additional_headers = [{"Host", host}, {"Content-Type", "text/xml"}]
headers = AWS.Request.add_headers(additional_headers, headers)
payload = encode_payload(input)
headers = AWS.Request.sign_v4(client, method, url, headers, payload)
perform_request(method, url, payload, headers, options, success_status_code)
end
defp perform_request(method, url, payload, headers, options, nil) do
case HTTPoison.request(method, url, payload, headers, options) do
{:ok, %HTTPoison.Response{status_code: 200, body: ""} = response} ->
{:ok, response}
{:ok, %HTTPoison.Response{status_code: status_code, body: body} = response}
when status_code == 200 or status_code == 202 or status_code == 204 ->
{:ok, AWS.Util.decode_xml(body), response}
{:ok, %HTTPoison.Response{body: body}} ->
error = AWS.Util.decode_xml(body)
{:error, error}
{:error, %HTTPoison.Error{reason: reason}} ->
{:error, %HTTPoison.Error{reason: reason}}
end
end
defp perform_request(method, url, payload, headers, options, success_status_code) do
case HTTPoison.request(method, url, payload, headers, options) do
{:ok, %HTTPoison.Response{status_code: ^success_status_code, body: ""} = response} ->
{:ok, %{}, response}
{:ok, %HTTPoison.Response{status_code: ^success_status_code, body: body} = response} ->
{:ok, AWS.Util.decode_xml(body), response}
{:ok, %HTTPoison.Response{body: body}} ->
error = AWS.Util.decode_xml(body)
{:error, error}
{:error, %HTTPoison.Error{reason: reason}} ->
{:error, %HTTPoison.Error{reason: reason}}
end
end
defp build_host(_endpoint_prefix, %{region: "local"}) do
"localhost"
end
defp build_host(endpoint_prefix, %{region: region, endpoint: endpoint}) do
"#{endpoint_prefix}.#{region}.#{endpoint}"
end
defp build_url(host, path, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}#{path}"
end
defp add_query(url, []) do
url
end
defp add_query(url, query) do
querystring = AWS.Util.encode_query(query)
"#{url}?#{querystring}"
end
defp encode_payload(input) do
if input != nil, do: AWS.Util.encode_xml(input), else: ""
end
end
|
lib/aws/s3.ex
| 0.79732
| 0.48438
|
s3.ex
|
starcoder
|
defmodule Rondo.State do
defstruct [:descriptor, :partial, :children, :cache, :root]
defmodule Pointer do
defstruct [:path]
end
def init(nil, descriptor, component_path, store) do
init(%__MODULE__{cache: %{}}, descriptor, component_path, store)
end
def init(%{descriptor: descriptor} = state, descriptor, component_path, store) do
resolve(state, component_path, store)
end
def init(prev, descriptor, component_path, store) do
{partial, children} = traverse(descriptor, component_path)
%{prev | descriptor: descriptor, partial: partial, children: children, root: partial}
|> resolve(component_path, store)
end
defp resolve(%{cache: cache, children: children, root: root} = state, component_path, store) do
case lookup(children, store) do
{^cache, store} ->
state = %{state | root: Enum.reduce(cache, root, fn({path, value}, root) ->
put_in(root, path, value)
end)}
{state, store}
{cache, store} ->
insert(state, component_path, cache, store)
end
end
defp traverse(nil, _) do
{nil, %{}}
end
defp traverse(descriptor, component_path) do
Rondo.Traverser.postwalk(descriptor, [], %{}, fn
(%Rondo.Store{component_path: nil} = store, path, acc) ->
store = %{store | component_path: component_path, state_path: path}
acc = Map.put(acc, path, store)
{%Pointer{path: path}, acc}
(%Rondo.Store{} = store, path, acc) ->
acc = Map.put(acc, path, store)
{%Pointer{path: path}, acc}
(%Rondo.Store.Reference{} = ref, path, acc) ->
acc = Map.put(acc, path, ref)
{ref, acc}
(%Rondo.Stream{component_path: nil} = stream, path, acc) ->
id = :erlang.phash2({component_path, path})
stream = %{stream | component_path: component_path, state_path: path, id: id}
acc = Map.put(acc, path, stream)
sub = %Rondo.Stream.Subscription{id: id}
{sub, acc}
(%Rondo.Stream{id: id} = stream, path, acc) ->
acc = Map.put(acc, path, stream)
sub = %Rondo.Stream.Subscription{id: id}
{sub, acc}
(node, _, acc) ->
{node, acc}
end)
end
defp lookup(nil, store) do
{nil, store}
end
defp lookup(children, store) do
Enum.reduce(children, {%{}, store}, fn
({_, %Rondo.Stream{}}, acc) ->
acc
({path, descriptor}, {cache, store}) ->
{state, store} = Rondo.State.Store.mount(store, descriptor)
cache = Map.put(cache, path, state)
{cache, store}
end)
end
defp insert(%{partial: partial} = state, component_path, cache, store) do
{root, _} = Rondo.Traverser.postwalk(partial, [], nil, fn
(%Rondo.Store.Reference{} = ref, _, acc) ->
case Rondo.Store.Reference.resolve(ref, cache, :value) do
{:ok, value} ->
{value, acc}
:error ->
raise Rondo.Store.Reference.Error, reference: ref, component_path: component_path
end
(%Pointer{path: path}, _, acc) ->
value = Map.get(cache, path)
{value, acc}
(node, _, acc) ->
{node, acc}
end)
{%{state | root: root, cache: cache}, store}
end
end
|
lib/rondo/state.ex
| 0.622
| 0.609088
|
state.ex
|
starcoder
|
defmodule Adventofcode.Day22SporificaVirus do
use Adventofcode
@enforce_keys [:grid, :width, :height, :burst]
defstruct grid: MapSet.new(),
width: 0,
height: 0,
position: {0, 0},
direction: {0, -1},
burst: {0, nil},
infections: 0
def bursts_infected_count(input, bursts) do
input
|> new(bursts)
|> burst_repeatedly()
|> Map.get(:infections)
end
def new(input, bursts) do
lines = String.split(input, "\n")
width = lines |> Enum.map(&String.length/1) |> Enum.max()
height = length(lines)
chars = Enum.flat_map(lines, &String.codepoints/1)
%__MODULE__{
grid: build_grid(chars, width),
width: width,
height: height,
burst: {0, bursts}
}
end
def burst_repeatedly(state, options \\ [])
def burst_repeatedly(%{burst: {burst, burst}} = state, _) do
state
end
def burst_repeatedly(state, options) do
state
|> turn_left_or_right()
|> toggle_infected()
|> forward()
|> print_and_sleep(options)
|> burst_repeatedly(options)
end
defp forward(%{burst: {burst, last_burst}} = state) do
position = next_pos(state.position, state.direction)
%{state | position: position, burst: {burst + 1, last_burst}}
end
defp next_pos({x, y}, {dir_x, dir_y}) do
{x + dir_x, y + dir_y}
end
@directions [{1, 0}, {0, 1}, {-1, 0}, {0, -1}]
defp turn_left_or_right(state) do
index = Enum.find_index(@directions, &(&1 == state.direction))
diff = if position_infected?(state), do: 1, else: -1
next_index = rem(index + diff, length(@directions))
direction = Enum.at(@directions, next_index)
%{state | direction: direction}
end
defp toggle_infected(%{position: position, infections: infections} = state) do
if position_infected?(state) do
%{state | grid: MapSet.delete(state.grid, position)}
else
%{state | grid: MapSet.put(state.grid, position), infections: infections + 1}
end
end
defp position_infected?(state), do: position_infected?(state, state.position)
defp position_infected?(state, position) do
MapSet.member?(state.grid, position)
end
defp build_grid(chars, width) do
x_offset = div(width, 2)
y_offset = div(width, 2)
Enum.reduce(Enum.with_index(chars), MapSet.new(), fn
{".", _index}, acc ->
acc
{"#", index}, acc ->
x = rem(index, width) - x_offset
y = div(index, width) - y_offset
MapSet.put(acc, {x, y})
end)
end
defp print_and_sleep(state, options) do
if Keyword.get(options, :print), do: print_pretty_grid(state)
if sleep = Keyword.get(options, :sleep), do: :timer.sleep(sleep)
state
end
def print_pretty_grid(state) do
IO.puts(IO.ANSI.reset() <> IO.ANSI.home() <> IO.ANSI.clear() <> pretty_grid(state))
end
def pretty_grid(%{position: position} = state, range \\ -20..20) do
print_cell = fn {x, y} ->
if position_infected?(state, {x, y}), do: "#", else: "."
end
Enum.map_join(range, "\n", fn y ->
Enum.map_join(range, "", fn x ->
cond do
Enum.min(range) == x -> ""
position == {x, y} -> "["
position == {x - 1, y} -> "]"
true -> " "
end <> print_cell.({x, y})
end)
end) <> "\n"
end
end
|
lib/day_22_sporifica_virus.ex
| 0.645343
| 0.614105
|
day_22_sporifica_virus.ex
|
starcoder
|
defmodule Quantum.Scheduler do
@moduledoc """
Defines a quantum Scheduler.
When used, the quantum scheduler expects the `:otp_app` as option.
The `:otp_app` should point to an OTP application that has
the quantum runner configuration. For example, the quantum scheduler:
defmodule MyApp.Scheduler do
use Quantum.Scheduler, otp_app: :my_app
end
Could be configured with:
config :my_app, MyApp.Scheduler,
jobs: [
{"@daily", {Backup, :backup, []},
]
## Configuration:
* `:timeout` - Sometimes, you may come across GenServer
timeout errors esp. when you have too many jobs or high
load. The default GenServer.call timeout is 5000.
* `:jobs` - list of cron jobs to execute
* `:global` - When you have a cluster of nodes, you may not
want same jobs to be generated on every single node, e.g.
jobs involving db changes.
In this case, you may choose to run Quantum as a global process,
thus preventing same job being run multiple times because of
it being generated on multiple nodes. With the following
configuration, Quantum will be run as a globally unique
process across the cluster.
* `:schedule` - Default schedule of new Job
* `:run_strategy` - Default Run Strategy of new Job
* `:overlap` - Default overlap of new Job,
* `:timezone` - Default timezone of new Job
"""
alias Quantum.Job
@opaque t :: module
defmacro __using__(opts) do
quote bind_quoted: [opts: opts, moduledoc: @moduledoc] do
@moduledoc moduledoc
|> String.replace(~r/MyApp\.Scheduler/, Enum.join(Module.split(__MODULE__), "."))
|> String.replace(~r/:my_app/, Atom.to_string(Keyword.fetch!(opts, :otp_app)))
@behaviour Quantum.Scheduler
@otp_app Keyword.fetch!(opts, :otp_app)
def config(custom \\ []) do
Quantum.scheduler_config(__MODULE__, @otp_app, custom)
end
defp __job_broadcaster__, do: Keyword.fetch!(config(), :job_broadcaster)
defp __timeout__, do: Keyword.fetch!(config(), :timeout)
def start_link(opts \\ [name: __MODULE__]) do
Quantum.Supervisor.start_link(__MODULE__, @otp_app, opts)
end
def stop(server \\ __MODULE__, timeout \\ 5000) do
Supervisor.stop(server, :normal, timeout)
end
def add_job(server \\ __job_broadcaster__(), job)
def add_job(server, %Job{name: name} = job) do
GenStage.cast(server, {:add, job})
end
def add_job(server, {%Crontab.CronExpression{} = schedule, task})
when is_tuple(task) or is_function(task, 0) do
job =
new_job()
|> Job.set_schedule(schedule)
|> Job.set_task(task)
add_job(server, job)
end
def new_job(config \\ config()), do: Job.new(config)
def deactivate_job(server \\ __job_broadcaster__(), name)
when is_atom(name) or is_reference(name) do
GenStage.cast(server, {:change_state, name, :inactive})
end
def activate_job(server \\ __job_broadcaster__(), name)
when is_atom(name) or is_reference(name) do
GenStage.cast(server, {:change_state, name, :active})
end
def find_job(server \\ __job_broadcaster__(), name)
when is_atom(name) or is_reference(name) do
GenStage.call(server, {:find_job, name}, __timeout__())
end
def delete_job(server \\ __job_broadcaster__(), name)
when is_atom(name) or is_reference(name) do
GenStage.cast(server, {:delete, name})
end
def delete_all_jobs(server \\ __job_broadcaster__()) do
GenStage.cast(server, :delete_all)
end
def jobs(server \\ __job_broadcaster__()) do
GenStage.call(server, :jobs, __timeout__())
end
spec = [
id: opts[:id] || __MODULE__,
start: Macro.escape(opts[:start]) || quote(do: {__MODULE__, :start_link, [opts]}),
restart: opts[:restart] || :permanent,
type: :worker
]
@doc false
@spec child_spec(Keyword.t()) :: Supervisor.child_spec()
def child_spec(opts) do
%{unquote_splicing(spec)}
end
defoverridable child_spec: 1
end
end
@optional_callbacks init: 1
@doc """
Returns the configuration stored in the `:otp_app` environment.
"""
@callback config(Keyword.t()) :: Keyword.t()
@doc """
Starts supervision and return `{:ok, pid}`
or just `:ok` if nothing needs to be done.
Returns `{:error, {:already_started, pid}}` if the repo is already
started or `{:error, term}` in case anything else goes wrong.
## Options
See the configuration in the moduledoc for options.
"""
@callback start_link(opts :: Keyword.t()) ::
{:ok, pid}
| {:error, {:already_started, pid}}
| {:error, term}
@doc """
A callback executed when the quantum starts.
It takes the quantum configuration that is stored in the application
environment, and may change it to suit the application business.
It must return the updated list of configuration
"""
@callback init(config :: Keyword.t()) :: Keyword.t()
@doc """
Shuts down the quantum represented by the given pid.
"""
@callback stop(server :: GenServer.server(), timeout) :: :ok
@doc """
Creates a new Job. The job can be added by calling `add_job/1`.
"""
@callback new_job(Keyword.t()) :: Quantum.Job.t()
@doc """
Adds a new job
"""
@callback add_job(GenStage.stage(), Quantum.Job.t() | {Crontab.CronExpression.t(), Job.task()}) ::
:ok
@doc """
Deactivates a job by name
"""
@callback deactivate_job(GenStage.stage(), atom) :: :ok
@doc """
Activates a job by name
"""
@callback activate_job(GenStage.stage(), atom) :: :ok
@doc """
Resolves a job by name
"""
@callback find_job(GenStage.stage(), atom) :: Quantum.Job.t() | nil
@doc """
Deletes a job by name
"""
@callback delete_job(GenStage.stage(), atom) :: :ok
@doc """
Deletes all jobs
"""
@callback delete_all_jobs(GenStage.stage()) :: :ok
@doc """
Returns the list of currently defined jobs
"""
@callback jobs(GenStage.stage()) :: [Quantum.Job.t()]
end
|
lib/quantum/scheduler.ex
| 0.879529
| 0.470007
|
scheduler.ex
|
starcoder
|
defmodule Timex.Timezone.Dst do
@moduledoc """
Rules for determining if a datetime falls within a daylight savings period.
"""
alias Timex.Date, as: Date
alias Timex.DateTime, as: DateTime
alias Timex.TimezoneInfo, as: TimezoneInfo
@doc """
Check if the provided datetime is in daylight savings time
"""
@spec is_dst?(DateTime.t) :: true | false | :ambiguous_time | :doesnt_exist
def is_dst?(%DateTime{:timezone => %TimezoneInfo{:dst_start_day => :undef}}), do: false
def is_dst?(%DateTime{:year => year, :month => month, :day => day, :hour => hour, :minute => min, :second => sec, :timezone => tz}) do
%TimezoneInfo{
:gmt_offset_dst => dst_shift,
:dst_start_day => dst_start_rule, :dst_start_time => dst_start_time,
:dst_end_day => dst_end_rule, :dst_end_time => dst_end_time
} = tz
dst_start_day = get_dst_day_of_year(dst_start_rule, year)
dst_end_day = get_dst_day_of_year(dst_end_rule, year)
current_day = get_day_of_year({year, month, day})
case is_dst_date(dst_start_day, dst_end_day, current_day) do
:equal_to_start ->
is_dst_start_time(time_to_minutes({hour, min, sec}), time_to_minutes(dst_start_time), dst_shift)
:equal_to_end ->
is_dst_end_time(time_to_minutes({hour, min, sec}), time_to_minutes(dst_end_time), dst_shift)
result ->
cond do
# If DST ends the next day at 00:00, then after 23:00 the day before, we're ambiguous
dst_end_day - 1 === current_day and time_to_minutes(dst_end_time) == 0 ->
cond do
time_to_minutes({hour, min, sec}) >= (23 * 60) -> :ambiguous_time
true -> result
end
# Likewise, if DST started the night before at midnight (24:00), then before 01:00 of the current day, the time doesn't exist
dst_start_day + 1 === current_day and time_to_minutes(dst_start_time) == (24 * 60) ->
cond do
time_to_minutes({hour, min, sec}) < 60 -> :doesnt_exist
true -> result
end
true ->
result
end
end
end
defp is_dst_start_time(current_time, start_time, _shift) when current_time < start_time, do: false
defp is_dst_start_time(current_time, start_time, shift) do
case (start_time + shift) do
# When start time is late, say 2400, it rolls over in to the next day with the shift
shifted when shifted < start_time and current_time < start_time ->
(current_time + shift) >= shifted
# When it doesn't roll over, normal checks apply
shifted when shifted > start_time and current_time >= shifted ->
true
shifted when shifted > start_time and current_time < shifted and current_time >= start_time ->
:doesnt_exist
_ ->
:doesnt_exist
end
end
defp is_dst_end_time(current_time, end_time, shift) do
# Ambigous for the hour before it ends
case (end_time - shift) do
# When the end is at 0:00 and the current is >= 0:00, we're out of DST
shifted when shifted < 0 and current_time >= 0 -> false
# When the end is at any other point in the day, and the current time is between the shifted and end time, ambigous
shifted when current_time >= shifted and current_time < end_time ->
:ambiguous_time
# When the end is at any other point in the day, and we're less than the shifted time, we're in dst
shifted when current_time < shifted ->
true
# When the end is at any other point in the day, and we're greater than the end_time, we're out of dst
_ ->
false
end
end
defp is_dst_date(start_day, _, current_day) when current_day == start_day, do: :equal_to_start
defp is_dst_date(_, end_day, current_day) when current_day == end_day, do: :equal_to_end
defp is_dst_date(start_day, end_day, current_day) when start_day < end_day and (current_day > start_day and current_day < end_day), do: true
defp is_dst_date(start_day, end_day, current_day) when start_day < end_day and (current_day < start_day or current_day > end_day), do: false
defp is_dst_date(start_day, end_day, current_day) when start_day > end_day and (current_day < start_day and current_day > end_day), do: false
defp is_dst_date(start_day, end_day, current_day) when start_day > end_day and (current_day > start_day or current_day < end_day), do: true
defp get_dst_day_of_year({weekday, day, month}, year) when (weekday == :last) or (weekday == 5) do
month_num = Date.month_to_num(month)
day_num = Date.day_to_num(day)
get_last_dst(day_num, month_num, year)
end
defp get_dst_day_of_year({weekday, day, month}, year) when (weekday > 0) and (weekday <= 4) do
month_num = Date.month_to_num(month)
day_num = Date.day_to_num(day)
dst_days = get_day_of_year({year, month_num, 1})
dst_day = :calendar.day_of_the_week({year, month_num, 1})
case (dst_day === day_num) and (weekday === 1) do
true -> dst_days
false ->
adjusted_dst_days = case day_num >= dst_day do
true ->
dst_days + (day_num - dst_day)
false ->
dst_days + (7 - dst_day) + day_num
end
adjusted_dst_days + (weekday - 1) * 7
end
end
defp get_dst_day_of_year(_, _), do: raise(:error, "Invalid weekday")
defp get_last_dst(day_num, month_num, year) do
month_last_days = :calendar.date_to_gregorian_days(year, month_num, 1) + :calendar.last_day_of_the_month(year, month_num)
month_last_date = :calendar.gregorian_days_to_date(month_last_days)
month_last_dayofweek = :calendar.day_of_the_week(month_last_date)
case month_last_dayofweek > day_num do
true ->
month_last_days - (month_last_dayofweek - day_num) - :calendar.date_to_gregorian_days(if year == 0 do 0 else year - 1 end, 12, 31)
false ->
month_last_days - month_last_dayofweek - (7 - day_num) - :calendar.date_to_gregorian_days(if year == 0 do 0 else year - 1 end, 12, 31)
end
end
defp get_day_of_year({year, _, _} = date) do
:calendar.date_to_gregorian_days(date) - :calendar.date_to_gregorian_days(if year == 0 do 0 else year - 1 end, 12, 31)
end
defp time_to_minutes({hours, minutes}), do: (hours * 60) + minutes
defp time_to_minutes({hours, minutes, _}), do: (hours * 60) + minutes
end
|
lib/timezone/timezone_dst.ex
| 0.787727
| 0.686413
|
timezone_dst.ex
|
starcoder
|
defmodule GitOps.Commit do
@moduledoc """
Manages the structure, parsing, and formatting of commits.
Using `parse/1` you can parse a commit struct out of a commit message
Using `format/1` you can format a commit struct in the way that the
changelog expects.
"""
import NimbleParsec
defstruct [:type, :scope, :message, :body, :footer, :breaking?]
@type t :: %__MODULE__{}
# credo:disable-for-lines:27 Credo.Check.Refactor.PipeChainStart
whitespace = ignore(ascii_string([9, 32], min: 1))
# 40/41 are `(` and `)`, but syntax highlighters don't like ?( and ?)
type =
optional(whitespace)
|> optional(whitespace)
|> tag(ascii_string([not: ?:, not: ?!, not: 40, not: 41, not: 10, not: 32], min: 1), :type)
|> optional(whitespace)
scope =
optional(whitespace)
|> ignore(ascii_char([40]))
|> tag(utf8_string([not: 40, not: 41], min: 1), :scope)
|> ignore(ascii_char([41]))
|> optional(whitespace)
breaking_change_indicator = tag(ascii_char([?!]), :breaking?)
message = tag(optional(whitespace), ascii_string([not: ?\n], min: 1), :message)
commit =
type
|> concat(optional(scope))
|> concat(optional(breaking_change_indicator))
|> ignore(ascii_char([?:]))
|> concat(message)
|> concat(optional(whitespace))
|> concat(optional(ignore(ascii_string([10], min: 1))))
body =
[commit, eos()]
|> choice()
|> lookahead_not()
|> utf8_char([])
|> repeat()
|> reduce({List, :to_string, []})
|> tag(:body)
defparsecp(
:commits,
commit
|> concat(body)
|> tag(:commit)
|> repeat(),
inline: true
)
def format(commit) do
%{
scope: scopes,
message: message,
body: body,
footer: footer,
breaking?: breaking?
} = commit
scope = Enum.join(scopes || [], ",")
body_text =
if breaking? && String.starts_with?(body || "", "BREAKING CHANGE:") do
"\n\n" <> body
else
""
end
footer_text =
if breaking? && String.starts_with?(body || "", "BREAKING CHANGE:") do
"\n\n" <> footer
end
scope_text =
if String.trim(scope) != "" do
"#{scope}: "
else
""
end
"* #{scope_text}#{message}#{body_text}#{footer_text}"
end
def parse(text) do
case commits(text) do
{:ok, [], _, _, _, _} ->
:error
{:ok, results, _remaining, _state, _dunno, _also_dunno} ->
commits =
Enum.map(results, fn {:commit, result} ->
remaining_lines =
result[:body]
|> Enum.map(&String.trim/1)
|> Enum.join("\n")
|> String.split("\n")
|> Enum.map(&String.trim/1)
|> Enum.reject(&Kernel.==(&1, ""))
body = Enum.at(remaining_lines, 0)
footer = Enum.at(remaining_lines, 1)
%__MODULE__{
type: Enum.at(result[:type], 0),
scope: scopes(result[:scope]),
message: Enum.at(result[:message], 0),
body: body,
footer: footer,
breaking?: is_breaking?(result[:breaking?], body, footer)
}
end)
{:ok, commits}
{:error, _message, _remaining, _state, _dunno, _also_dunno} ->
:error
end
rescue
_ ->
:error
end
def breaking?(%GitOps.Commit{breaking?: breaking?}), do: breaking?
def feature?(%GitOps.Commit{type: type}) do
String.downcase(type) == "feat"
end
def fix?(%GitOps.Commit{type: type}) do
String.downcase(type) == "fix" || String.downcase(type) == "improvement"
end
defp scopes([value]) when is_bitstring(value), do: String.split(value, ",")
defp scopes(_), do: nil
defp is_breaking?(breaking, _, _) when not is_nil(breaking), do: true
defp is_breaking?(_, "BREAKING CHANGE:" <> _, _), do: true
defp is_breaking?(_, _, "BREAKING CHANGE:" <> _), do: true
defp is_breaking?(_, _, _), do: false
end
|
lib/git_ops/commit.ex
| 0.658527
| 0.405243
|
commit.ex
|
starcoder
|
defmodule Surface.Compiler do
@moduledoc """
Defines a behaviour that must be implemented by all HTML/Surface node translators.
This module also contains the main logic to translate Surface code.
"""
alias Surface.Compiler.Parser
alias Surface.IOHelper
alias Surface.AST
alias Surface.Compiler.Helpers
@stateful_component_types [
Surface.LiveComponent
]
@tag_directive_handlers [
Surface.Directive.Events,
Surface.Directive.Show,
Surface.Directive.If,
Surface.Directive.For,
Surface.Directive.Debug
]
@component_directive_handlers [
Surface.Directive.If,
Surface.Directive.For,
Surface.Directive.Debug
]
@meta_component_directive_handlers [
Surface.Directive.If,
Surface.Directive.For,
Surface.Directive.Debug
]
@template_directive_handlers [Surface.Directive.Let]
@boolean_tag_attributes [
:allowfullscreen,
:allowpaymentrequest,
:async,
:autofocus,
:autoplay,
:checked,
:controls,
:default,
:defer,
:disabled,
:formnovalidate,
:hidden,
:ismap,
:itemscope,
:loop,
:multiple,
:muted,
:nomodule,
:novalidate,
:open,
:readonly,
:required,
:reversed,
:selected,
:typemustmatch
]
@void_elements [
"area",
"base",
"br",
"col",
"hr",
"img",
"input",
"link",
"meta",
"param",
"command",
"keygen",
"source"
]
defmodule ParseError do
defexception file: "", line: 0, message: "error parsing HTML/Surface"
@impl true
def message(exception) do
"#{Path.relative_to_cwd(exception.file)}:#{exception.line}: #{exception.message}"
end
end
defmodule CompileMeta do
defstruct [:line_offset, :file, :caller]
@type t :: %__MODULE__{
line_offset: non_neg_integer(),
file: binary(),
caller: Macro.Env.t()
}
end
@doc """
This function compiles a string into the Surface AST.This is used by ~H and Surface.Renderer to parse and compile templates.
A special note for line_offset: This is considered the line number for the first line in the string. If the first line of the
string is also the first line of the file, then this should be 1. If this is being called within a macro (say to process a heredoc
passed to ~H), this should be __CALLER__.line + 1.
"""
@spec compile(binary, non_neg_integer(), Macro.Env.t(), binary()) :: [Surface.AST.t()]
def compile(string, line_offset, caller, file \\ "nofile") do
compile_meta = %CompileMeta{
line_offset: line_offset,
file: file,
caller: caller
}
string
|> Parser.parse()
|> case do
{:ok, nodes} ->
nodes
{:error, message, line} ->
raise %ParseError{line: line + line_offset - 1, file: file, message: message}
end
|> to_ast(compile_meta)
|> validate_component_structure(compile_meta, caller.module)
end
def to_live_struct(nodes, opts \\ []) do
Surface.Compiler.EExEngine.translate(nodes, opts)
end
def validate_component_structure(ast, meta, module) do
if is_stateful_component(module) do
validate_stateful_component(ast, meta)
end
ast
end
defp is_stateful_component(module) do
if Module.open?(module) do
Module.get_attribute(module, :component_type, Surface.BaseComponent) in @stateful_component_types
else
function_exported?(module, :component_type, 0) and
module.component_type() in @stateful_component_types
end
end
defp validate_stateful_component(ast, %CompileMeta{line_offset: offset, caller: caller}) do
num_tags =
ast
|> Enum.filter(fn
%AST.Tag{} -> true
%AST.VoidTag{} -> true
%AST.Component{} -> true
_ -> false
end)
|> Enum.count()
cond do
num_tags == 0 ->
IOHelper.warn(
"stateful live components must have a HTML root element",
caller,
fn _ -> offset end
)
num_tags > 1 ->
IOHelper.warn(
"stateful live components must have a single HTML root element",
caller,
fn _ -> offset end
)
true ->
:noop
end
end
defp to_ast(nodes, compile_meta) do
for node <- nodes do
case convert_node_to_ast(node_type(node), node, compile_meta) do
{:ok, ast} ->
process_directives(ast)
{:error, {message, line}, meta} ->
IOHelper.warn(message, compile_meta.caller, fn _ -> line end)
%AST.Error{message: message, meta: meta}
end
end
end
defp node_type({"#" <> _, _, _, _}), do: :macro_component
defp node_type({<<first, _::binary>>, _, _, _}) when first in ?A..?Z, do: :component
defp node_type({"template", _, _, _}), do: :template
defp node_type({"slot", _, _, _}), do: :slot
defp node_type({name, _, _, _}) when name in @void_elements, do: :void_tag
defp node_type({_, _, _, _}), do: :tag
defp node_type({:interpolation, _, _}), do: :interpolation
defp node_type(_), do: :text
defp process_directives(%{directives: directives} = node) do
directives
|> Enum.filter(fn %AST.Directive{module: mod} -> function_exported?(mod, :process, 2) end)
|> Enum.reduce(node, fn %AST.Directive{module: mod} = directive, node ->
mod.process(directive, node)
end)
end
defp process_directives(node), do: node
defp convert_node_to_ast(:text, text, _),
do: {:ok, %AST.Text{value: text}}
defp convert_node_to_ast(:interpolation, {_, text, node_meta}, compile_meta) do
meta = Helpers.to_meta(node_meta, compile_meta)
{:ok,
%AST.Interpolation{
original: text,
value: Helpers.interpolation_to_quoted!(text, meta),
meta: meta
}}
end
defp convert_node_to_ast(:template, {_, attributes, children, node_meta}, compile_meta) do
meta = Helpers.to_meta(node_meta, compile_meta)
with {:ok, directives, attributes} <-
collect_directives(@template_directive_handlers, attributes, meta),
slot <- attribute_value(attributes, "slot", :default) do
{:ok,
%AST.Template{
name: slot,
children: to_ast(children, compile_meta),
let: template_props(directives, meta),
meta: meta
}}
else
_ -> {:error, {"failed to parse template", meta.line}, meta}
end
end
defp convert_node_to_ast(:slot, {_, attributes, children, node_meta}, compile_meta) do
meta = Helpers.to_meta(node_meta, compile_meta)
with name when not is_nil(name) and is_atom(name) <-
attribute_value(attributes, "name", :default),
{:ok, props, _attributes} <-
collect_directives([Surface.Directive.SlotProps], attributes, meta) do
props =
case props do
[expr] ->
expr
_ ->
%AST.Directive{
module: Surface.Directive.SlotProps,
name: :props,
value: %AST.AttributeExpr{
original: "",
value: [],
meta: meta
},
meta: meta
}
end
Module.put_attribute(meta.caller.module, :used_slot, %{name: name, line: meta.line})
{:ok,
%AST.Slot{
name: name,
default: to_ast(children, compile_meta),
props: props,
meta: meta
}}
else
_ -> {:error, {"failed to parse slot", meta.line}, meta}
end
end
defp convert_node_to_ast(:tag, {name, attributes, children, node_meta}, compile_meta) do
meta = Helpers.to_meta(node_meta, compile_meta)
with {:ok, directives, attributes} <-
collect_directives(@tag_directive_handlers, attributes, meta),
attributes <- process_attributes(nil, attributes, meta),
children <- to_ast(children, compile_meta),
:ok <- validate_tag_children(children) do
{:ok,
%AST.Tag{
element: name,
attributes: attributes,
directives: directives,
children: children,
meta: meta
}}
else
{:error, message} ->
message = "cannot render <#{name}> (#{message})"
{:error, message}
_ ->
{:error, {"cannot render <#{name}>", meta.line}, meta}
end
end
defp convert_node_to_ast(:void_tag, {name, attributes, children, node_meta}, compile_meta) do
meta = Helpers.to_meta(node_meta, compile_meta)
with {:ok, directives, attributes} <-
collect_directives(@tag_directive_handlers, attributes, meta),
attributes <- process_attributes(nil, attributes, meta),
# a void element containing content is an error
[] <- to_ast(children, compile_meta) do
{:ok,
%AST.VoidTag{
element: name,
attributes: attributes,
directives: directives,
meta: meta
}}
else
{:error, message} ->
message = "cannot render <#{name}> (#{message})"
{:error, message}
_ ->
{:error, {"cannot render <#{name}>", meta.line}, meta}
end
end
defp convert_node_to_ast(:component, {name, attributes, children, node_meta}, compile_meta) do
# TODO: validate live views vs live components ?
meta = Helpers.to_meta(node_meta, compile_meta)
with {:ok, mod} <- Helpers.module_name(name, meta.caller),
true <- function_exported?(mod, :component_type, 0),
component_type <- mod.component_type(),
meta <- Map.merge(meta, %{module: mod, node_alias: name}),
# Passing in and modifying attributes here because :let on the parent is used
# to indicate the props for the :default slot's template
{:ok, templates, attributes} <-
collect_templates(mod, attributes, children, meta),
:ok <- validate_templates(mod, templates, meta),
# This is a little bit hacky. :let will only be extracted for the default
# template if `mod` doesn't export __slot_name__ (i.e. if it isn't a slotable component)
# We have to extract that here as it should not be considered an attribute
{:ok, template_directives, attributes} <-
maybe_collect_template_directives(mod, attributes, meta),
{:ok, directives, attributes} <-
collect_directives(@component_directive_handlers, attributes, meta),
attributes <- process_attributes(mod, attributes, meta),
:ok <- validate_properties(mod, attributes, meta) do
result =
if component_slotable?(mod) do
%AST.SlotableComponent{
module: mod,
slot: mod.__slot_name__(),
type: component_type,
let: template_props(template_directives, meta),
props: attributes,
directives: directives,
templates: templates,
meta: meta
}
else
%AST.Component{
module: mod,
type: component_type,
props: attributes,
directives: directives,
templates: templates,
meta: meta
}
end
{:ok, result}
else
{:error, message} ->
{:error, {"cannot render <#{name}> (#{message})", meta.line}, meta}
_ ->
{:error, {"cannot render <#{name}>", meta.line}, meta}
end
end
defp convert_node_to_ast(
:macro_component,
{"#" <> name, attributes, children, node_meta},
compile_meta
) do
meta = Helpers.to_meta(node_meta, compile_meta)
with {:ok, mod} <- Helpers.module_name(name, meta.caller),
meta <- Map.merge(meta, %{module: mod, node_alias: name}),
true <- function_exported?(mod, :expand, 3),
{:ok, directives, attributes} <-
collect_directives(@meta_component_directive_handlers, attributes, meta),
attributes <- process_attributes(mod, attributes, meta),
:ok <- validate_properties(mod, attributes, meta) do
expanded = mod.expand(attributes, children, meta)
{:ok,
%AST.Container{
children: List.wrap(expanded),
directives: directives,
meta: meta
}}
else
false ->
{:error,
{"cannot render <#{name}> (MacroComponents must export an expand/3 function)",
meta.line}, meta}
{:error, message} ->
{:error, {"cannot render <#{name}> (#{message})", meta.line}, meta}
_ ->
{:error, {"cannot render <#{name}>", meta.line}, meta}
end
end
defp attribute_value(attributes, attr_name, default) do
Enum.find_value(attributes, default, fn {name, value, _} ->
if name == attr_name do
List.to_atom(value)
end
end)
end
defp template_props([], meta),
do: %AST.Directive{
module: Surface.Directive.Let,
name: :let,
value: %AST.AttributeExpr{
value: [],
original: "",
meta: meta
},
meta: meta
}
defp template_props([%AST.Directive{module: Surface.Directive.Let} = props | _], _meta),
do: props
defp template_props([_ | directives], meta), do: template_props(directives, meta)
defp component_slotable?(mod), do: function_exported?(mod, :__slot_name__, 0)
defp maybe_collect_template_directives(mod, attributes, meta) do
if component_slotable?(mod) do
collect_directives(@template_directive_handlers, attributes, meta)
else
{:ok, [], attributes}
end
end
defp process_attributes(_module, [], _meta), do: []
defp process_attributes(
mod,
[{name, {:attribute_expr, [value], expr_meta}, attr_meta} | attrs],
meta
) do
name = String.to_atom(name)
expr_meta = Helpers.to_meta(expr_meta, meta)
attr_meta = Helpers.to_meta(attr_meta, meta)
type = determine_attribute_type(mod, name, attr_meta)
[
%AST.Attribute{
type: type,
name: name,
value: [expr_node(name, value, expr_meta, type)],
meta: attr_meta
}
| process_attributes(mod, attrs, meta)
]
end
defp process_attributes(mod, [{name, [], attr_meta} | attrs], meta) do
name = String.to_atom(name)
attr_meta = Helpers.to_meta(attr_meta, meta)
type = determine_attribute_type(mod, name, attr_meta)
attr_value =
case type do
type when type in [:string, :css_class, :any] ->
%AST.Text{
value: ""
}
:event ->
%AST.AttributeExpr{
original: "",
value: nil,
meta: attr_meta
}
:boolean ->
%AST.Text{value: true}
type ->
message =
"invalid property value for #{name}, expected #{type}, but got an empty string"
IOHelper.compile_error(message, meta.file, meta.line)
end
[
%AST.Attribute{
type: type,
name: name,
value: [attr_value],
meta: attr_meta
}
| process_attributes(mod, attrs, meta)
]
end
defp process_attributes(mod, [{name, value, attr_meta} | attrs], meta)
when is_bitstring(value) or is_binary(value) do
name = String.to_atom(name)
attr_meta = Helpers.to_meta(attr_meta, meta)
type = determine_attribute_type(mod, name, attr_meta)
[
%AST.Attribute{
type: type,
name: name,
value: [attr_value(name, type, value, meta)],
meta: attr_meta
}
| process_attributes(mod, attrs, meta)
]
end
defp process_attributes(mod, [{name, values, attr_meta} | attrs], meta)
when is_list(values) do
name = String.to_atom(name)
attr_meta = Helpers.to_meta(attr_meta, meta)
type = determine_attribute_type(mod, name, attr_meta)
values = collect_attr_values(name, meta, values, type)
[
%AST.Attribute{
type: type,
name: name,
value: values,
meta: attr_meta
}
| process_attributes(mod, attrs, meta)
]
end
defp process_attributes(mod, [{name, value, attr_meta} | attrs], meta)
when is_boolean(value) do
name = String.to_atom(name)
attr_meta = Helpers.to_meta(attr_meta, meta)
type = determine_attribute_type(mod, name, attr_meta)
[
%AST.Attribute{
type: type,
name: name,
value: [attr_value(name, type, value, meta)],
meta: attr_meta
}
| process_attributes(mod, attrs, meta)
]
end
defp determine_attribute_type(nil, :class, _meta), do: :css_class
defp determine_attribute_type(nil, name, _meta) when name in @boolean_tag_attributes,
do: :boolean
defp determine_attribute_type(nil, _name, _meta), do: :string
defp determine_attribute_type(module, name, meta) do
with true <- function_exported?(module, :__get_prop__, 1),
prop when not is_nil(prop) <- module.__get_prop__(name) do
prop.type
else
_ ->
IOHelper.warn(
"Unknown property \"#{to_string(name)}\" for component <#{meta.node_alias}>",
meta.caller,
fn _ ->
meta.line
end
)
:string
end
end
defp collect_attr_values(attribute_name, meta, values, type, accumulators \\ {[], []})
defp collect_attr_values(_attribute_name, _meta, [], _type, {[], acc}), do: Enum.reverse(acc)
defp collect_attr_values(attribute_name, meta, [], type, {codepoints, acc}) do
collect_attr_values(
attribute_name,
meta,
[],
type,
{[],
[
attr_value(attribute_name, type, codepoints |> Enum.reverse() |> List.to_string(), meta)
| acc
]}
)
end
defp collect_attr_values(
attribute_name,
meta,
[{:attribute_expr, [value], expr_meta} | values],
type,
{[], acc}
) do
collect_attr_values(
attribute_name,
meta,
values,
type,
{[], [expr_node(attribute_name, value, Helpers.to_meta(expr_meta, meta), type) | acc]}
)
end
defp collect_attr_values(
attribute_name,
meta,
[{:attribute_expr, [value], expr_meta} | values],
type,
{codepoints, acc}
) do
text_node =
attr_value(attribute_name, type, codepoints |> Enum.reverse() |> List.to_string(), meta)
acc = [text_node | acc]
collect_attr_values(
attribute_name,
meta,
values,
type,
{[], [expr_node(attribute_name, value, Helpers.to_meta(expr_meta, meta), type) | acc]}
)
end
defp collect_attr_values(attribute_name, meta, [codepoint | values], type, {codepoint_acc, acc}) do
collect_attr_values(attribute_name, meta, values, type, {[codepoint | codepoint_acc], acc})
end
defp attr_value(name, :event, value, meta) do
%AST.AttributeExpr{
original: value,
value: Helpers.attribute_expr_to_quoted!(Macro.to_string(value), name, :event, meta),
meta: meta
}
end
defp attr_value(_name, _type, value, _meta) do
%AST.Text{value: value}
end
defp expr_node(attribute_name, value, meta, type) do
# This is required as nimble_parsec appears to generate bitstrings that elixir doesn't
# want to interpret as actual strings.
# The exact example is " \"héllo\" " which generates <<32, 34, 104, 233, 108, 108, 111, 34, 32>>.
# When that sequence is passed to Code.string_to_quoted(), it results in:
# ** (UnicodeConversionError) invalid encoding starting at <<233, 108, 108, 111, 34, 32>>
# (elixir 1.10.4) lib/string.ex:2251: String.to_charlist/1
# (elixir 1.10.4) lib/code.ex:834: Code.string_to_quoted/2
binary = List.to_string(for <<c <- value>>, into: [], do: c)
%AST.AttributeExpr{
original: binary,
value: Helpers.attribute_expr_to_quoted!(binary, attribute_name, type, meta),
meta: meta
}
end
defp validate_tag_children([]), do: :ok
defp validate_tag_children([%AST.Template{name: name} | _]) do
{:error,
"templates are only allowed as children elements of components, but found template for #{
name
}"}
end
defp validate_tag_children([_ | nodes]), do: validate_tag_children(nodes)
defp collect_templates(mod, attributes, nodes, meta) do
# Don't extract the template directives if this module is slotable
{:ok, default_props, attributes} =
if component_slotable?(mod) do
{:ok, [], attributes}
else
collect_directives(@template_directive_handlers, attributes, meta)
end
templates =
nodes
|> to_ast(meta)
|> Enum.group_by(fn
%AST.Template{name: name} -> name
%AST.SlotableComponent{slot: name} -> name
_ -> :default
end)
{already_wrapped, default_children} =
templates
|> Map.get(:default, [])
|> Enum.split_with(fn
%AST.Template{} -> true
_ -> false
end)
if Enum.all?(default_children, &Helpers.is_blank_or_empty/1) do
{:ok, Map.put(templates, :default, already_wrapped), attributes}
else
wrapped = %AST.Template{
name: :default,
children: default_children,
let: template_props(default_props, meta),
meta: meta
}
{:ok, Map.put(templates, :default, [wrapped | already_wrapped]), attributes}
end
end
defp collect_directives(handlers, attributes, meta)
defp collect_directives(_, [], _), do: {:ok, [], []}
defp collect_directives(handlers, [attr | attributes], meta) do
{:ok, dirs, attrs} = collect_directives(handlers, attributes, meta)
directives =
handlers
|> Enum.map(fn handler -> handler.extract(attr, meta) end)
|> List.flatten()
attributes =
if Enum.empty?(directives) do
[attr | attrs]
else
attrs
end
directives =
Enum.sort_by(directives ++ dirs, fn %{module: mod} ->
Enum.find_index(handlers, fn handler -> handler == mod end)
end)
{:ok, directives, attributes}
end
defp validate_properties(module, props, meta) do
if function_exported?(module, :__props__, 0) do
existing_props = Enum.map(props, fn %{name: name} -> name end)
required_props =
for p <- module.__props__(), Keyword.get(p.opts, :required, false), do: p.name
missing_props = required_props -- existing_props
for prop <- missing_props do
message = "Missing required property \"#{prop}\" for component <#{meta.node_alias}>"
IOHelper.warn(message, meta.caller, fn _ -> meta.line end)
end
end
:ok
end
defp validate_templates(mod, templates, meta) do
names =
templates
|> Map.keys()
|> Enum.reject(fn name -> name == :default end)
if !function_exported?(mod, :__slots__, 0) and not Enum.empty?(names) do
message = """
parent component `#{inspect(mod)}` does not define any slots. \
Found the following templates: #{inspect(names)}
"""
IOHelper.compile_error(message, meta.file, meta.line)
end
for name <- mod.__required_slots_names__(),
!Map.has_key?(templates, name) or
Enum.all?(Map.get(templates, name, []), &Helpers.is_blank_or_empty/1) do
message = "missing required slot \"#{name}\" for component <#{meta.node_alias}>"
IOHelper.warn(message, meta.caller, fn _ -> meta.line end)
end
for slot_name <- names,
mod.__get_slot__(slot_name) == nil do
missing_slot(mod, slot_name, meta)
end
for slot_name <- Map.keys(templates),
template <- Map.get(templates, slot_name) do
slot = mod.__get_slot__(slot_name)
{props, prop_meta} =
case template.let do
%AST.Directive{
value: %AST.AttributeExpr{
value: value,
meta: meta
}
} ->
{Keyword.keys(value), meta}
_ ->
{[], meta}
end
if slot == nil and not Enum.empty?(props) do
message = """
there's no `#{slot_name}` slot defined in `#{inspect(mod)}`.
Directive :let can only be used on explicitly defined slots.
Hint: You can define a `#{slot_name}` slot and its props using: \
`slot #{slot_name}, props: #{inspect(props)}\
"""
IOHelper.compile_error(message, meta.file, meta.line)
end
case slot do
%{opts: opts} ->
non_generator_args = Enum.map(opts[:props] || [], &Map.get(&1, :name))
undefined_keys = props -- non_generator_args
if not Enum.empty?(undefined_keys) do
[prop | _] = undefined_keys
message = """
undefined prop `#{inspect(prop)}` for slot `#{slot_name}` in `#{inspect(mod)}`.
Available props: #{inspect(non_generator_args)}.
Hint: You can define a new slot prop using the `props` option: \
`slot #{slot_name}, props: [..., #{inspect(prop)}]`\
"""
IOHelper.compile_error(message, prop_meta.file, prop_meta.line)
end
_ ->
:ok
end
end
:ok
end
defp missing_slot(mod, slot_name, meta) do
parent_slots = mod.__slots__() |> Enum.map(& &1.name)
similar_slot_message =
case Helpers.did_you_mean(slot_name, parent_slots) do
{similar, score} when score > 0.8 ->
"\n\n Did you mean #{inspect(to_string(similar))}?"
_ ->
""
end
existing_slots_message =
if parent_slots == [] do
""
else
slots = Enum.map(parent_slots, &to_string/1)
available = Helpers.list_to_string("slot:", "slots:", slots)
"\n\n Available #{available}"
end
message = """
no slot "#{slot_name}" defined in parent component <#{meta.node_alias}>\
#{similar_slot_message}\
#{existing_slots_message}\
"""
IOHelper.warn(message, meta.caller, fn _ -> meta.line + 1 end)
end
end
|
lib/surface/compiler.ex
| 0.781497
| 0.429908
|
compiler.ex
|
starcoder
|
defmodule Shopix.Schema.Order do
use Ecto.Schema
alias Shopix.Schema.{Order, LineItem}
schema "orders" do
has_many :line_items, LineItem, on_replace: :delete
field :email, :string
field :first_name, :string
field :last_name, :string
field :company_name, :string
field :address_1, :string
field :address_2, :string
field :zip_code, :string
field :city, :string
field :country_state, :string
field :country_code, :string
field :phone, :string
field :vat_percentage, :decimal
field :completed_at, :naive_datetime
field :shipping_cost_amount, Money.Ecto.Type
field :total_quantity, :integer, virtual: true
field :price_items, Money.Ecto.Type, virtual: true
field :total_price, Money.Ecto.Type, virtual: true
timestamps()
end
def compute_properties(%Order{} = order, config \\ %{}) do
order
|> compute_line_items
|> compute_totals(config)
end
def compute_line_items(%Order{} = order) do
%{order | line_items: order.line_items |> Enum.map(&LineItem.compute_properties(&1))}
end
def compute_totals(%Order{} = order, config) do
%{
order
| total_quantity: total_quantity(order),
price_items: price_items(order),
total_price: total_price(order, config)
}
end
def shipping_cost_amount(
%Order{shipping_cost_amount: nil} = order,
shipping_cost_default_amount
) do
Shopix.ShippingCostCalculator.shipping_cost_for(order, shipping_cost_default_amount)
end
def shipping_cost_amount(%Order{shipping_cost_amount: shipping_cost_amount}, _),
do: shipping_cost_amount
def total_quantity(%Order{} = order) do
Enum.reduce(order.line_items, 0, fn line_item, acc -> acc + line_item.quantity end)
end
def price_items(%Order{} = order) do
Enum.reduce(order.line_items, Money.new(0), fn line_item, acc ->
Money.add(acc, line_item.total_price)
end)
end
def total_price(%Order{} = order, config) do
order
|> price_items()
|> Money.add(shipping_cost_amount(order, Map.get(config, :shipping_cost_default_amount)))
end
end
|
lib/shopix/schema/order.ex
| 0.502197
| 0.41478
|
order.ex
|
starcoder
|
defmodule OliWeb.Common.Hierarchy.HierarchyPicker do
@moduledoc """
Hierarchy Picker Component
A general purpose curriculum location picker. When a new location is selected,
this component will trigger an "HierarchyPicker.update_selection" event to the parent liveview
with the new selection.
### Multi-Pub Mode
In multi-pub mode, a user can select items from multiple publications. The active hierarchy shown
is still dictated by the hierarchy and active parameters, but this hierarchy is expected to change,
specifically when the "HierarchyPicker.select_publication" event is triggered. The liveview using this
component should handle this event and update hierarchy accordingly.
## Required Parameters:
id: Unique identifier for the hierarchy picker
hierarchy: Hierarchy to select from
active: Currently active node. Also represents the current selection in container
selection mode.
selection: List of current selections in the form of a tuples [{publication_id, resource_id}, ...].
(Only used in multi select mode)
preselected: List of preselected items which are already selected and cannot be changed. Like selection,
the list is expected to be in the form of a tuples [{publication_id, resource_id}, ...]
## Optional Parameters:
select_mode: Which selection mode to operate in. This can be set to :single, :multiple or
:container. Defaults to :single
filter_items_fn: Filter function applied to items shown. Default is no filter.
sort_items_fn: Sorting function applied to items shown. Default is to sort containers first.
publications: The list of publications that items can be selected from (used in multi-pub mode)
selected_publication: The currently selected publication (used in multi-pub mode)
## Events:
"HierarchyPicker.update_active", %{"uuid" => uuid}
"HierarchyPicker.select", %{"uuid" => uuid}
"HierarchyPicker.select_publication", %{"id" => id}
"HierarchyPicker.clear_publication", %{"id" => id}
"""
use Phoenix.LiveComponent
use Phoenix.HTML
alias Oli.Resources.Numbering
alias OliWeb.Common.Breadcrumb
alias Oli.Delivery.Hierarchy.HierarchyNode
alias Oli.Publishing.Publication
alias Oli.Authoring.Course.Project
def render(
%{
id: id,
hierarchy: %HierarchyNode{},
active: %HierarchyNode{children: children}
} = assigns
) do
~L"""
<div id="<%= id %>" class="hierarchy-picker">
<div class="hierarchy-navigation">
<%= render_breadcrumb assigns %>
</div>
<div class="hierarchy">
<%# filter out the item being moved from the options, sort all containers first %>
<%= for child <- children |> filter_items(assigns) |> sort_items(assigns) do %>
<%= render_child(assigns, child) %>
<% end %>
</div>
</div>
"""
end
def render(
%{
id: id,
hierarchy: nil,
active: nil,
publications: publications
} = assigns
) do
~L"""
<div id="<%= id %>" class="hierarchy-picker">
<div class="hierarchy-navigation">
<%= render_breadcrumb assigns %>
</div>
<div class="hierarchy">
<%= for pub <- publications do %>
<div id="hierarchy_item_<%= pub.id %>">
<button class="btn btn-link ml-1 mr-1 entry-title" phx-click="HierarchyPicker.select_publication" phx-value-id="<%= pub.id %>">
<%= pub.project.title %>
</button>
</div>
<% end %>
</div>
</div>
"""
end
def render_child(
%{
select_mode: :single,
selection: selection
} = assigns,
%{uuid: uuid, revision: revision} = child
) do
~L"""
<div id="hierarchy_item_<%= uuid %>" phx-click="HierarchyPicker.select" phx-value-uuid="<%= uuid %>">
<div class="flex-1 mx-2">
<span class="align-middle">
<input type="checkbox" <%= maybe_checked(selection, uuid) %>></input>
<%= OliWeb.Curriculum.EntryLive.icon(%{child: revision}) %>
</span>
<%= resource_link assigns, child %>
</div>
</div>
"""
end
def render_child(
%{
select_mode: :multiple,
selection: selection,
preselected: preselected,
selected_publication: pub
} = assigns,
%{uuid: uuid, revision: revision} = child
) do
click_handler =
if {pub.id, revision.resource_id} in preselected do
""
else
"phx-click=HierarchyPicker.select phx-value-uuid=#{uuid}"
end
~L"""
<div id="hierarchy_item_<%= uuid %>" <%= click_handler %>>
<div class="flex-1 mx-2">
<span class="align-middle">
<input type="checkbox" <%= maybe_checked(selection, pub.id, revision.resource_id) %> <%= maybe_preselected(preselected, pub.id, revision.resource_id) %>></input>
<%= OliWeb.Curriculum.EntryLive.icon(%{child: revision}) %>
</span>
<%= resource_link assigns, child %>
</div>
</div>
"""
end
def render_child(assigns, child) do
~L"""
<div id="hierarchy_item_<%= child.uuid %>">
<div class="flex-1 mx-2">
<span class="align-middle">
<%= OliWeb.Curriculum.EntryLive.icon(%{child: child.revision}) %>
</span>
<%= resource_link assigns, child %>
</div>
</div>
"""
end
def render_breadcrumb(%{hierarchy: nil, active: nil} = assigns) do
~L"""
<ol class="breadcrumb custom-breadcrumb p-1 px-2">
<div>
<button class="btn btn-sm btn-link" disabled><i class="las la-book"></i> Select a Publication</button>
</div>
</ol>
"""
end
def render_breadcrumb(%{hierarchy: hierarchy, active: active} = assigns) do
breadcrumbs = Breadcrumb.breadcrumb_trail_to(hierarchy, active)
~L"""
<ol class="breadcrumb custom-breadcrumb p-1 px-2">
<%= case assigns[:selected_publication] do %>
<% nil -> %>
<% selected_publication -> %>
<div class="border-right border-light">
<button class="btn btn-sm btn-link mr-2" phx-click="HierarchyPicker.clear_publication"><i class="las la-book"></i> <%= publication_title(selected_publication) %></button>
</div>
<% end %>
<button class="btn btn-sm btn-link" <%= maybe_disabled(breadcrumbs) %> phx-click="HierarchyPicker.update_active" phx-value-uuid="<%= previous_uuid(breadcrumbs) %>"><i class="las la-arrow-left"></i></button>
<%= for {breadcrumb, index} <- Enum.with_index(breadcrumbs) do %>
<%= render_breadcrumb_item Enum.into(%{
breadcrumb: breadcrumb,
show_short: length(breadcrumbs) > 3,
is_last: length(breadcrumbs) - 1 == index,
}, assigns) %>
<% end %>
</ol>
"""
end
defp render_breadcrumb_item(
%{breadcrumb: %Breadcrumb{} = breadcrumb, show_short: show_short, is_last: is_last} =
assigns
) do
~L"""
<li class="breadcrumb-item align-self-center pl-2">
<button class="btn btn-xs btn-link px-0" <%= if is_last, do: "disabled" %> phx-click="HierarchyPicker.update_active" phx-value-uuid="<%= breadcrumb.slug %>">
<%= get_title(breadcrumb, show_short) %>
</button>
</li>
"""
end
defp maybe_checked(selection, pub_id, resource_id) do
if {pub_id, resource_id} in selection do
"checked"
else
""
end
end
defp maybe_checked(selection, uuid) do
if uuid == selection do
"checked"
else
""
end
end
defp maybe_preselected(preselected, pub_id, resource_id) do
if {pub_id, resource_id} in preselected do
"checked disabled"
else
""
end
end
defp maybe_disabled(breadcrumbs) do
if Enum.count(breadcrumbs) < 2, do: "disabled", else: ""
end
defp get_title(breadcrumb, true = _show_short), do: breadcrumb.short_title
defp get_title(breadcrumb, false = _show_short), do: breadcrumb.full_title
defp publication_title(%Publication{project: %Project{title: title}}) do
if String.length(title) > 16 do
String.slice(title, 0, 16) <> "..."
else
title
end
end
defp resource_link(assigns, %HierarchyNode{
uuid: uuid,
revision: revision,
numbering: numbering
}) do
with resource_type <- Oli.Resources.ResourceType.get_type_by_id(revision.resource_type_id) do
case resource_type do
"container" ->
title =
if numbering do
Numbering.prefix(numbering) <> ": " <> revision.title
else
revision.title
end
~L"""
<button class="btn btn-link entry-title px-0" phx-click="HierarchyPicker.update_active" phx-value-uuid="<%= uuid %>">
<%= title %>
</button>
"""
_ ->
~L"""
<button class="btn btn-link entry-title px-0" disabled><%= revision.title %></button>
"""
end
end
end
defp filter_items(children, assigns) do
case assigns do
%{filter_items_fn: filter_items_fn} when filter_items_fn != nil ->
filter_items_fn.(children)
_ ->
# no filter
children
end
end
defp sort_items(children, assigns) do
case assigns do
%{sort_items_fn: sort_items_fn} when sort_items_fn != nil ->
sort_items_fn.(children)
_ ->
# default sort by resource type, containers first
Enum.sort(children, &sort_containers_first/2)
end
end
defp sort_containers_first(%HierarchyNode{revision: a}, %HierarchyNode{revision: b}) do
case {
Oli.Resources.ResourceType.get_type_by_id(a.resource_type_id),
Oli.Resources.ResourceType.get_type_by_id(b.resource_type_id)
} do
{"container", _} -> true
{type_a, type_b} when type_a == type_b -> true
_ -> false
end
end
defp previous_uuid(breadcrumbs) do
previous = Enum.at(breadcrumbs, length(breadcrumbs) - 2)
previous.slug
end
end
|
lib/oli_web/live/common/hierarchy/hierarchy_picker.ex
| 0.69285
| 0.410018
|
hierarchy_picker.ex
|
starcoder
|
defmodule Xfile do
@moduledoc """
`Xfile` contains augmentations of the built-in `File` module, including the
support of streams, the recursive listing of files, counting lines, grep, and
programmatic filtering.
"""
@doc """
Like the venerable command-line utility, `grep` searches lines in the given file
using the given pattern, returning only the matching lines as a stream.
The given pattern can be one of the following:
- an arity 1 function which returns a boolean; `true` indicates a match.
- a string
- a list of strings
- a regular expression
See `String.contains?/2` for viable inputs.
> #### Stream {: .info}
>
> `Xfile.grep/2` returns its result as a `Stream`, so you must remember to convert
> it to a list via `Enum.to_list/1` if you are not lazily evaluating its result.
## Examples
iex> Xfile.grep(~r/needle/, "path/to/file")
#Function<59.58486609/2 in Stream.transform/3>
iex> Xfile.grep("dir", ".gitignore") |> Enum.to_list()
["# The directory Mix will write compiled artifacts to.\\n",
"# The directory Mix downloads your dependencies sources to.\\n"]
Using a function to evaluate file lines:
iex> f = fn line ->
[serial_number, _] = String.split(line, " ")
String.to_integer(num) > 214
end
iex> Xfile.grep(f, "store/products.csv") |> Enum.to_list()
["215,Sprocket,9.99\\n", "216,Gear,5.00\\n", ...]
"""
@doc since: "0.2.0"
@spec grep(pattern :: String.pattern() | (String.t() -> boolean()), file :: Path.t()) ::
Enumerable.t()
def grep(pattern, file) when is_function(pattern, 1) do
file
|> File.stream!()
|> Stream.filter(fn line -> pattern.(line) end)
end
def grep(%Regex{} = pattern, file) do
file
|> File.stream!()
|> Stream.filter(fn line -> Regex.match?(pattern, line) end)
end
def grep(pattern, file) do
file
|> File.stream!()
|> Stream.filter(fn line -> String.contains?(line, pattern) end)
end
@doc """
This function mimics the functionality of `grep -rl`: it recursively searches
all files in the given path, returning only a list of file names (i.e. paths)
whose contents have one or more lines that match the pattern.
## Pattern
The given pattern can be one of the following:
- an arity 1 function which returns a boolean; `true` indicates a match.
- a string
- a list of strings
- a regular expression
Internally, this relies on `grep/2`.
> #### Stream {: .info}
>
> `Xfile.grep_rl/3` returns its result as a `Stream`, so you must
> remember to convert it to a list via `Enum.to_list/1` if you are not lazily
> evaluating its result.
## Options
Options are the same as those supported by `ls/2`. Use them to control which files
are subjected to the pattern matching.
## Examples
iex> Xfile.grep_rl("[error]", "tmp/logs", recursive: false) |> Enum.to_list()
[
"tmp/logs/server.1.log",
"tmp/logs/cache.log",
"tmp/logs/server.2.log"
]
## See Also
- `grep/2` for searching a single file and returning the matching lines
- `ls/2` using the `:filter` option to evaluate only the _names_ of the files.
"""
@doc since: "0.2.0"
@spec grep_rl(pattern :: String.pattern(), path :: Path.t(), opts :: Keyword.t()) ::
Enumerable.t()
def grep_rl(pattern, path, opts \\ []) do
path
|> ls!(opts)
|> Stream.filter(fn file ->
pattern
|> grep(file)
|> Enum.count()
|> Kernel.>(0)
end)
end
@doc """
Displays first `n` lines of the file, returned as an enumerable stream.
## Examples
iex> Xfile.head(".gitignore", 3) |> Enum.to_list()
[
"# The directory Mix will write compiled artifacts to.\\n",
"/_build/\\n",
"\\n"
]
"""
@doc since: "0.3.0"
@spec head(file :: Path.t(), n :: non_neg_integer()) :: Enumerable.t()
def head(file, n) when is_binary(file) and is_integer(n) and n > 0 do
file
|> File.stream!()
|> Stream.transform(0, fn line, acc ->
if acc < n, do: {[line], acc + 1}, else: {:halt, acc}
end)
end
@doc """
Counts the number of lines in the given file, offering functionality similar to `wc -l`.
Directories are not allowed. This is just some sugar around `File.stream!/1`.
> #### Newlines {: .info}
>
> This function technically counts new lines, which may result in "off-by-one"
> errors when the last line of a file is not terminated with a newline.
## Examples
iex> Xfile.line_count(".gitignore")
{:ok, 27}
iex> Xfile.line_count("/tmp"}
{:error, "Invalid input"}
"""
@doc since: "0.2.0"
@spec(line_count(file :: Path.t()) :: {:ok, non_neg_integer()}, {:error, any()})
def line_count(file) when is_binary(file) do
file
|> File.dir?()
|> case do
true ->
{:error, "Invalid input"}
false ->
{:ok,
file
|> File.stream!()
|> Enum.count()}
end
end
@doc """
As `Xfile.line_count/1`, but returns raw results on success or raises on `:error`.
## Examples
iex> Xfile.line_count!(".gitignore")
27
"""
@doc since: "0.2.0"
@spec line_count!(file :: Path.t()) :: non_neg_integer() | none()
def line_count!(file) when is_binary(file) do
file
|> File.stream!()
|> Enum.count()
end
@doc """
Returns the list of _files_ in the given directory with the ability to control
listing files recursively and filtering results programmatically.
> #### Stream {: .info}
>
> Unlike `File.ls/1`, `Xfile.ls/2` returns its result as a `Stream`, so you must
> remember to convert it to a list via `Enum.to_list/1` if you are not lazily
> evaluating its result.
## Differences between `File.ls/1`
- `Xfile.ls/2` returns results as a `Stream`
- `Xfile.ls/2` returns full paths (relative or absolute) instead of just basenames.
## Options
- `:recursive` indicates whether the directory and its subdirectories should be
recursively searched. This can be expressed either as a simple boolean or as a
positive integer indicating the maximum depth (where `false` is equivalent to `0`
and would list only the contents of the given directory). Default: `true`
- `:filter` can be either a regular expression to be used with `String.match?/2`,
a string or a list of strings to be used with `String.contains?/2`, OR an
arity 1 function that receives the full file path and returns a boolean value.
If the filter operation returns `true`, the file will be included in the
output. Any other output will cause the file to be filtered from the output. Optional.
- `:show_dirs?` boolean. When listing the contents of a directory that contains
sub-directories _and_ `:recursive` option is not `true`, this boolean controls
whether or not the sub-directories should be included in the output (provided
they pass any defined `:filter`). This option is ignored when `:recursive` is
`true`. Setting this option to `true` will yield results closer to what `File.ls/1`
returns. Default: `false`.
## Examples
Use a regular expression to return only `.txt` files:
iex> {:ok, stream} = Xfile.ls("path/to/files", filter: ~r/\\.txt$/)
{:ok, #Function<59.58486609/2 in Stream.transform/3>}
iex> Enum.to_list(stream)
[
"path/to/files/a.txt",
"path/to/files/b.txt",
"path/to/files/subdir/c.txt"
]
Use a function to apply more complex logic to filter the results:
iex> {:ok, stream} = Xfile.ls("mydir", filter: fn x ->
stat = File.stat!(x)
stat.size > 1024
end)
{:ok, #Function<59.58486609/2 in Stream.transform/3>}
iex> Enum.to_list(stream)
[
"mydir/big-file",
"mydir/big-file2",
# ...
]
Limit the depth of the recursion to the given directory and its subdirectories,
but no further:
iex> {:ok, stream} = Xfile.ls("top/dir", recursive: 1)
{:ok, #Function<59.58486609/2 in Stream.transform/3>}
iex> Enum.to_list(stream)
[
"top/dir/a",
"top/dir/b",
# ...
"top/dir/sub1/x",
"top/dir/sub1/y"
]
"""
@spec ls(directory :: Path.t(), opts :: Keyword.t()) :: {:ok, Enumerable.t()} | {:error, any()}
def ls(directory, opts \\ []) when is_binary(directory) do
max_depth =
opts
|> Keyword.get(:recursive, true)
|> case do
false -> 0
other -> other
end
case File.dir?(directory) do
true -> {:ok, directory |> File.ls() |> traverse(directory, opts, 0, max_depth)}
false -> {:error, "#{directory} is not a directory"}
end
end
@doc """
As `Xfile.ls/2`, but returns raw results on success or raises on `:error`.
"""
@spec ls!(directory :: Path.t(), opts :: Keyword.t()) :: Enumerable.t() | none()
def ls!(directory, opts \\ []) when is_binary(directory) do
case ls(directory, opts) do
{:ok, results} -> results
{:error, error} -> raise error
end
end
@doc """
Displays the last `n` lines of the file, returned as an enumerable stream.
## Examples
iex> Xfile.tail(".gitignore", 3) |> Enum.to_list()
[
"\\n",
"# Temporary files for e.g. tests\\n",
"/tmp\\n"
]
"""
@doc since: "0.3.0"
@spec tail(file :: Path.t(), n :: non_neg_integer()) :: Enumerable.t()
def tail(file, n) when is_binary(file) and is_integer(n) and n > 0 do
start_line = line_count!(file) - n
file
|> File.stream!()
|> Stream.transform(0, fn line, acc ->
if acc >= start_line, do: {[line], acc + 1}, else: {[], acc + 1}
end)
end
# `traverse/2` receives the result of `File.ls/1`, which acts as like `File.dir?/2`.
# If the result is `:ok`, we proceed deeper into the directory structure.
# If the result is an `:error`, then the path being evaluated is accumulated as a file.
defp traverse({:ok, files}, path, opts, current_depth, max_depth)
when max_depth == true or current_depth < max_depth do
files
|> Stream.flat_map(fn f ->
"#{path}/#{f}" |> File.ls() |> traverse("#{path}/#{f}", opts, current_depth + 1, max_depth)
end)
end
# at max depth
defp traverse({:ok, files}, path, opts, _current_depth, _max_depth) do
show_dirs? = Keyword.get(opts, :show_dirs?, false)
filter = Keyword.get(opts, :filter)
files
|> Stream.flat_map(fn f ->
cond do
!File.dir?("#{path}/#{f}") && filter_file("#{path}/#{f}", filter) ->
["#{path}/#{f}"]
File.dir?("#{path}/#{f}") && show_dirs? && filter_file("#{path}/#{f}", filter) ->
["#{path}/#{f}"]
true ->
[]
end
end)
end
defp traverse({:error, _}, file, opts, _, _) do
case filter_file(file, Keyword.get(opts, :filter)) do
true -> [file]
_ -> []
end
end
defp filter_file(_, nil), do: true
defp filter_file(file, function) when is_function(function, 1), do: function.(file)
defp filter_file(file, %Regex{} = regex), do: String.match?(file, regex)
defp filter_file(file, pattern), do: String.contains?(file, pattern)
end
|
lib/xfile.ex
| 0.865437
| 0.566258
|
xfile.ex
|
starcoder
|
defmodule Tub.Absinthe.Schema do
@moduledoc """
Generate Absinthe Schema
Usage:
```elixir
name = "q1"
doc = "hello world"
params = [
{:f1, :string, nullable: false, doc: "arg1"},
{:f1, :string, nullable: true, doc: "arg2"},
]
return = :list_blocks
meta = notation: "OcapApi.GQL.Notation.Bitcoin", resolver: "OcapApi.GQL.Bitcoin.Resolver"
Tub.Absinthe.Schema.gen(mod_name, [name, doc, params, return], meta)
```
"""
alias Tub.DynamicModule
require DynamicModule
def gen(mod_name, queries, meta, opts \\ []) do
notation = name_to_module(meta[:notation])
resolver = name_to_module(meta[:resolver])
IO.puts("Module: #{mod_name}, notation: #{notation}, resolver: #{resolver}")
preamble =
quote do
use Absinthe.Schema
import_types(unquote(notation))
alias unquote(resolver)
end
data = transform(queries)
contents =
quote do
query do
unquote(data)
end
end
DynamicModule.gen(mod_name, preamble, contents, opts)
end
defp name_to_module(name), do: String.to_atom("Elixir.#{name}")
defp transform(queries) do
queries
|> Enum.map(fn data ->
{field_name, filed_doc, params, return} = data
IO.puts(
"Creating #{field_name}: please make sure you define resolver function **#{return}** in your resolver."
)
quote do
@desc unquote(filed_doc)
field unquote(field_name), unquote(return) do
unquote(get_params(params))
resolve(fn parent, args, resolution ->
apply(Resolver, unquote(return), [parent, args, resolution])
end)
end
end
end)
end
defp get_params(params) do
Enum.map(params, fn {name, type, meta} ->
case meta[:nullable] do
false ->
quote do
@desc unquote(meta[:doc])
arg(unquote(name), non_null(unquote(type)))
end
_ ->
quote do
@desc unquote(meta[:doc])
arg(unquote(name), unquote(type))
end
end
end)
end
end
|
lib/gen/absinthe/schema.ex
| 0.702836
| 0.67875
|
schema.ex
|
starcoder
|
defmodule RDF.XSD.Numeric do
@moduledoc """
Collection of functions for numeric literals.
"""
@type t :: module
alias RDF.{XSD, Literal}
alias Elixir.Decimal, as: D
import Kernel, except: [abs: 1, floor: 1, ceil: 1]
defdelegate datatype?(value), to: Literal.Datatype.Registry, as: :numeric_datatype?
@doc !"""
Tests for numeric value equality of two numeric XSD datatyped literals.
see:
- <https://www.w3.org/TR/sparql11-query/#OperatorMapping>
- <https://www.w3.org/TR/xpath-functions/#func-numeric-equal>
"""
@spec do_equal_value?(t() | any, t() | any) :: boolean
def do_equal_value?(left, right)
def do_equal_value?(%left_datatype{value: left}, %right_datatype{value: right}) do
cond do
XSD.Decimal.datatype?(left_datatype) or XSD.Decimal.datatype?(right_datatype) ->
equal_decimal_value?(left, right)
datatype?(left_datatype) and datatype?(right_datatype) ->
left != :nan and right != :nan and left == right
true ->
nil
end
end
def do_equal_value?(_, _), do: nil
defp equal_decimal_value?(%D{} = left, %D{} = right), do: D.equal?(left, right)
defp equal_decimal_value?(%D{} = left, right),
do: equal_decimal_value?(left, new_decimal(right))
defp equal_decimal_value?(left, %D{} = right),
do: equal_decimal_value?(new_decimal(left), right)
defp equal_decimal_value?(_, _), do: false
defp new_decimal(value) when is_float(value), do: D.from_float(value)
defp new_decimal(value), do: D.new(value)
@doc !"""
Compares two numeric XSD literals.
Returns `:gt` if first literal is greater than the second and `:lt` for vice
versa. If the two literals are equal `:eq` is returned.
Returns `nil` when the given arguments are not comparable datatypes.
"""
@spec do_compare(t, t) :: Literal.Datatype.comparison_result() | nil
def do_compare(left, right)
def do_compare(%left_datatype{value: left}, %right_datatype{value: right}) do
if datatype?(left_datatype) and datatype?(right_datatype) do
cond do
XSD.Decimal.datatype?(left_datatype) or XSD.Decimal.datatype?(right_datatype) ->
compare_decimal_value(left, right)
left < right -> :lt
left > right -> :gt
true -> :eq
end
end
end
def do_compare(_, _), do: nil
defp compare_decimal_value(%D{} = left, %D{} = right), do: D.cmp(left, right)
defp compare_decimal_value(%D{} = left, right),
do: compare_decimal_value(left, new_decimal(right))
defp compare_decimal_value(left, %D{} = right),
do: compare_decimal_value(new_decimal(left), right)
defp compare_decimal_value(_, _), do: nil
@spec zero?(any) :: boolean
def zero?(%Literal{literal: literal}), do: zero?(literal)
def zero?(%{value: value}), do: zero_value?(value)
defp zero_value?(zero) when zero == 0, do: true
defp zero_value?(%D{coef: 0}), do: true
defp zero_value?(_), do: false
@spec negative_zero?(any) :: boolean
def negative_zero?(%Literal{literal: literal}), do: negative_zero?(literal)
def negative_zero?(%{value: zero, uncanonical_lexical: "-" <> _}) when zero == 0, do: true
def negative_zero?(%{value: %D{sign: -1, coef: 0}}), do: true
def negative_zero?(_), do: false
@doc """
Adds two numeric literals.
For `xsd:float` or `xsd:double` values, if one of the operands is a zero or a
finite number and the other is INF or -INF, INF or -INF is returned. If both
operands are INF, INF is returned. If both operands are -INF, -INF is returned.
If one of the operands is INF and the other is -INF, NaN is returned.
If one of the given arguments is not a numeric literal or a value which
can be coerced into a numeric literal, `nil` is returned.
see <http://www.w3.org/TR/xpath-functions/#func-numeric-add>
"""
def add(arg1, arg2) do
arithmetic_operation(:+, arg1, arg2, fn
:positive_infinity, :negative_infinity, _ -> :nan
:negative_infinity, :positive_infinity, _ -> :nan
:positive_infinity, _, _ -> :positive_infinity
_, :positive_infinity, _ -> :positive_infinity
:negative_infinity, _, _ -> :negative_infinity
_, :negative_infinity, _ -> :negative_infinity
%D{} = arg1, %D{} = arg2, _ -> D.add(arg1, arg2)
arg1, arg2, _ -> arg1 + arg2
end)
end
@doc """
Subtracts two numeric literals.
For `xsd:float` or `xsd:double` values, if one of the operands is a zero or a
finite number and the other is INF or -INF, an infinity of the appropriate sign
is returned. If both operands are INF or -INF, NaN is returned. If one of the
operands is INF and the other is -INF, an infinity of the appropriate sign is
returned.
If one of the given arguments is not a numeric literal or a value which
can be coerced into a numeric literal, `nil` is returned.
see <http://www.w3.org/TR/xpath-functions/#func-numeric-subtract>
"""
def subtract(arg1, arg2) do
arithmetic_operation(:-, arg1, arg2, fn
:positive_infinity, :positive_infinity, _ -> :nan
:negative_infinity, :negative_infinity, _ -> :nan
:positive_infinity, :negative_infinity, _ -> :positive_infinity
:negative_infinity, :positive_infinity, _ -> :negative_infinity
:positive_infinity, _, _ -> :positive_infinity
_, :positive_infinity, _ -> :negative_infinity
:negative_infinity, _, _ -> :negative_infinity
_, :negative_infinity, _ -> :positive_infinity
%D{} = arg1, %D{} = arg2, _ -> D.sub(arg1, arg2)
arg1, arg2, _ -> arg1 - arg2
end)
end
@doc """
Multiplies two numeric literals.
For `xsd:float` or `xsd:double` values, if one of the operands is a zero and
the other is an infinity, NaN is returned. If one of the operands is a non-zero
number and the other is an infinity, an infinity with the appropriate sign is
returned.
If one of the given arguments is not a numeric literal or a value which
can be coerced into a numeric literal, `nil` is returned.
see <http://www.w3.org/TR/xpath-functions/#func-numeric-multiply>
"""
def multiply(arg1, arg2) do
arithmetic_operation(:*, arg1, arg2, fn
:positive_infinity, :negative_infinity, _ -> :nan
:negative_infinity, :positive_infinity, _ -> :nan
inf, zero, _ when inf in [:positive_infinity, :negative_infinity] and zero == 0 -> :nan
zero, inf, _ when inf in [:positive_infinity, :negative_infinity] and zero == 0 -> :nan
:positive_infinity, number, _ when number < 0 -> :negative_infinity
number, :positive_infinity, _ when number < 0 -> :negative_infinity
:positive_infinity, _, _ -> :positive_infinity
_, :positive_infinity, _ -> :positive_infinity
:negative_infinity, number, _ when number < 0 -> :positive_infinity
number, :negative_infinity, _ when number < 0 -> :positive_infinity
:negative_infinity, _, _ -> :negative_infinity
_, :negative_infinity, _ -> :negative_infinity
%D{} = arg1, %D{} = arg2, _ -> D.mult(arg1, arg2)
arg1, arg2, _ -> arg1 * arg2
end)
end
@doc """
Divides two numeric literals.
For `xsd:float` and `xsd:double` operands, floating point division is performed
as specified in [IEEE 754-2008]. A positive number divided by positive zero
returns INF. A negative number divided by positive zero returns -INF. Division
by negative zero returns -INF and INF, respectively. Positive or negative zero
divided by positive or negative zero returns NaN. Also, INF or -INF divided by
INF or -INF returns NaN.
If one of the given arguments is not a numeric literal or a value which
can be coerced into a numeric literal, `nil` is returned.
`nil` is also returned for `xsd:decimal` and `xsd:integer` operands, if the
divisor is (positive or negative) zero.
see <http://www.w3.org/TR/xpath-functions/#func-numeric-divide>
"""
def divide(arg1, arg2) do
negative_zero = negative_zero?(arg2)
arithmetic_operation(:/, arg1, arg2, fn
inf1, inf2, _
when inf1 in [:positive_infinity, :negative_infinity] and
inf2 in [:positive_infinity, :negative_infinity] ->
:nan
%D{} = arg1, %D{coef: coef} = arg2, _ ->
unless coef == 0, do: D.div(arg1, arg2)
arg1, arg2, result_type ->
if zero_value?(arg2) do
cond do
result_type not in [XSD.Double, XSD.Float] -> nil
zero_value?(arg1) -> :nan
negative_zero and arg1 < 0 -> :positive_infinity
negative_zero -> :negative_infinity
arg1 < 0 -> :negative_infinity
true -> :positive_infinity
end
else
arg1 / arg2
end
end)
end
@doc """
Returns the absolute value of a numeric literal.
If the given argument is not a numeric literal or a value which
can be coerced into a numeric literal, `nil` is returned.
see <http://www.w3.org/TR/xpath-functions/#func-abs>
"""
def abs(literal)
def abs(%Literal{literal: literal}), do: abs(literal)
def abs(nil), do: nil
def abs(value) do
cond do
datatype?(value) ->
if Literal.Datatype.valid?(value) do
%datatype{} = value
case value.value do
:nan ->
literal(value)
:positive_infinity ->
literal(value)
:negative_infinity ->
datatype.base_primitive().new(:positive_infinity)
%D{} = value ->
value
|> D.abs()
|> datatype.base_primitive().new()
value ->
target_datatype = if XSD.Float.datatype?(datatype),
do: XSD.Float, else: datatype.base_primitive()
value
|> Kernel.abs()
|> target_datatype.new()
end
end
# non-numeric datatypes
Literal.datatype?(value) ->
nil
true ->
value
|> Literal.coerce()
|> abs()
end
end
@doc """
Rounds a value to a specified number of decimal places, rounding upwards if two such values are equally near.
The function returns the nearest (that is, numerically closest) value to the
given literal value that is a multiple of ten to the power of minus `precision`.
If two such values are equally near (for example, if the fractional part in the
literal value is exactly .5), the function returns the one that is closest to
positive infinity.
If the given argument is not a numeric literal or a value which
can be coerced into a numeric literal, `nil` is returned.
see <http://www.w3.org/TR/xpath-functions/#func-round>
"""
def round(literal, precision \\ 0)
def round(%Literal{literal: literal}, precision), do: round(literal, precision)
def round(nil, _), do: nil
def round(value, precision) do
cond do
datatype?(value) ->
if Literal.Datatype.valid?(value) do
%datatype{value: literal_value} = value
cond do
XSD.Integer.datatype?(datatype) ->
if precision < 0 do
literal_value
|> new_decimal()
|> xpath_round(precision)
|> D.to_integer()
|> XSD.Integer.new()
else
literal(value)
end
XSD.Decimal.datatype?(datatype) ->
literal_value
|> xpath_round(precision)
|> to_string()
|> XSD.Decimal.new()
(float_datatype = XSD.Float.datatype?(datatype)) or
XSD.Double.datatype?(datatype) ->
if literal_value in ~w[nan positive_infinity negative_infinity]a do
literal(value)
else
target_datatype = if float_datatype, do: XSD.Float, else: XSD.Double
literal_value
|> new_decimal()
|> xpath_round(precision)
|> D.to_float()
|> target_datatype.new()
end
end
end
# non-numeric datatypes
Literal.datatype?(value) ->
nil
true ->
value
|> Literal.coerce()
|> round(precision)
end
end
defp xpath_round(%D{sign: -1} = decimal, precision),
do: D.round(decimal, precision, :half_down)
defp xpath_round(decimal, precision),
do: D.round(decimal, precision)
@doc """
Rounds a numeric literal upwards to a whole number literal.
If the given argument is not a numeric literal or a value which
can be coerced into a numeric literal, `nil` is returned.
see <http://www.w3.org/TR/xpath-functions/#func-ceil>
"""
def ceil(literal)
def ceil(%Literal{literal: literal}), do: ceil(literal)
def ceil(nil), do: nil
def ceil(value) do
cond do
datatype?(value) ->
if Literal.Datatype.valid?(value) do
%datatype{value: literal_value} = value
cond do
XSD.Integer.datatype?(datatype) ->
literal(value)
XSD.Decimal.datatype?(datatype) ->
literal_value
|> D.round(0, if(literal_value.sign == -1, do: :down, else: :up))
|> D.to_string()
|> XSD.Decimal.new()
(float_datatype = XSD.Float.datatype?(datatype)) or
XSD.Double.datatype?(datatype) ->
if literal_value in ~w[nan positive_infinity negative_infinity]a do
literal(value)
else
target_datatype = if float_datatype, do: XSD.Float, else: XSD.Double
literal_value
|> Float.ceil()
|> trunc()
|> to_string()
|> target_datatype.new()
end
end
end
# non-numeric datatypes
Literal.datatype?(value) ->
nil
true ->
value
|> Literal.coerce()
|> ceil()
end
end
@doc """
Rounds a numeric literal downwards to a whole number literal.
If the given argument is not a numeric literal or a value which
can be coerced into a numeric literal, `nil` is returned.
see <http://www.w3.org/TR/xpath-functions/#func-floor>
"""
def floor(literal)
def floor(%Literal{literal: literal}), do: floor(literal)
def floor(nil), do: nil
def floor(value) do
cond do
datatype?(value) ->
if Literal.Datatype.valid?(value) do
%datatype{value: literal_value} = value
cond do
XSD.Integer.datatype?(datatype) ->
literal(value)
XSD.Decimal.datatype?(datatype) ->
literal_value
|> D.round(0, if(literal_value.sign == -1, do: :up, else: :down))
|> D.to_string()
|> XSD.Decimal.new()
(float_datatype = XSD.Float.datatype?(datatype)) or
XSD.Double.datatype?(datatype) ->
if literal_value in ~w[nan positive_infinity negative_infinity]a do
literal(value)
else
target_datatype = if float_datatype, do: XSD.Float, else: XSD.Double
literal_value
|> Float.floor()
|> trunc()
|> to_string()
|> target_datatype.new()
end
end
end
# non-numeric datatypes
Literal.datatype?(value) ->
nil
true ->
value
|> Literal.coerce()
|> floor()
end
end
defp arithmetic_operation(op, %Literal{literal: literal1}, literal2, fun), do: arithmetic_operation(op, literal1, literal2, fun)
defp arithmetic_operation(op, literal1, %Literal{literal: literal2}, fun), do: arithmetic_operation(op, literal1, literal2, fun)
defp arithmetic_operation(op, %datatype1{} = literal1, %datatype2{} = literal2, fun) do
if datatype?(datatype1) and datatype?(datatype2) and
Literal.Datatype.valid?(literal1) and Literal.Datatype.valid?(literal2) do
result_type = result_type(op, datatype1, datatype2)
{arg1, arg2} = type_conversion(literal1, literal2, result_type)
result = fun.(arg1.value, arg2.value, result_type)
unless is_nil(result), do: result_type.new(result)
end
end
defp arithmetic_operation(op, left, right, fun) do
cond do
is_nil(left) -> nil
is_nil(right) -> nil
not Literal.datatype?(left) -> arithmetic_operation(op, Literal.coerce(left), right, fun)
not Literal.datatype?(right) -> arithmetic_operation(op, left, Literal.coerce(right), fun)
true -> false
end
end
defp type_conversion(left, right, XSD.Decimal) do
{
if XSD.Decimal.datatype?(left) do
left
else
XSD.Decimal.new(left.value).literal
end,
if XSD.Decimal.datatype?(right) do
right
else
XSD.Decimal.new(right.value).literal
end
}
end
defp type_conversion(left, right, datatype) when datatype in [XSD.Double, XSD.Float] do
{
if XSD.Decimal.datatype?(left) do
(left.value |> D.to_float() |> XSD.Double.new()).literal
else
left
end,
if XSD.Decimal.datatype?(right) do
(right.value |> D.to_float() |> XSD.Double.new()).literal
else
right
end
}
end
defp type_conversion(left, right, _), do: {left, right}
@doc false
def result_type(op, left, right), do: do_result_type(op, base_primitive(left), base_primitive(right))
defp do_result_type(_, XSD.Double, _), do: XSD.Double
defp do_result_type(_, _, XSD.Double), do: XSD.Double
defp do_result_type(_, XSD.Float, _), do: XSD.Float
defp do_result_type(_, _, XSD.Float), do: XSD.Float
defp do_result_type(_, XSD.Decimal, _), do: XSD.Decimal
defp do_result_type(_, _, XSD.Decimal), do: XSD.Decimal
defp do_result_type(:/, _, _), do: XSD.Decimal
defp do_result_type(_, _, _), do: XSD.Integer
defp base_primitive(datatype) do
primitive = datatype.base_primitive()
if primitive == XSD.Double and XSD.Float.datatype?(datatype),
do: XSD.Float,
else: primitive
end
defp literal(value), do: %Literal{literal: value}
end
|
lib/rdf/xsd/datatypes/numeric.ex
| 0.928733
| 0.789356
|
numeric.ex
|
starcoder
|
defmodule Resx.Producers.Data do
@moduledoc """
A producer to handle data URIs.
Resx.Producers.Data.open("data:text/plain;base64,SGVsbG8sIFdvcmxkIQ%3D%3D")
### Media Types
If an error is being returned when attempting to open a data URI due to
`{ :invalid_reference, "invalid media type: \#{type}" }`, the MIME type
will need to be added to the config.
### Attributes
Data URI attributes will be able to be accessed as resource attributes
`Resx.Resource.attributes/1`
"""
use Resx.Producer
alias Resx.Resource
alias Resx.Resource.Content
alias Resx.Resource.Reference
alias Resx.Resource.Reference.Integrity
defp to_data(%Reference{ repository: repo }), do: { :ok, repo }
defp to_data(%URI{ scheme: "data", path: path }) do
with [tokens, data] <- String.split(path, ",", parts: 2),
[type|tokens] <- String.split(tokens, ";") do
decode(type, tokens, data)
else
_ -> { :error, { :invalid_reference, "invalid data URI format" } }
end
end
defp to_data(uri) when is_binary(uri), do: URI.decode(uri) |> URI.parse |> to_data
defp to_data(_), do: { :error, { :invalid_reference, "not a data reference" } }
defp decode("", [], data), do: decode("text/plain", ["charset=US-ASCII"], data)
defp decode("", tokens, data), do: decode("text/plain", tokens, data)
defp decode(type, tokens, data) do
if MIME.valid?(type) do
{ attributes, decoder } = Enum.reduce(tokens, { [], &({ :ok, &1 }) }, fn
"base64", { attributes, _ } ->
decoder = fn data ->
case Base.decode64(data) do
{ :ok, data } -> { :ok, data }
_ -> { :error, { :invalid_reference, "data is not base64" } }
end
end
{ attributes, decoder }
params, { attributes, decoder } ->
[key, value] = String.split(params, "=")
{ [{ key, value }|attributes], decoder }
end)
case decoder.(data) do
{ :ok, data } -> { :ok, { type, Map.new(attributes), data } }
error -> error
end
else
{ :error, { :invalid_reference, "invalid media type: #{type}" } }
end
end
@impl Resx.Producer
def schemes(), do: ["data"]
@impl Resx.Producer
def open(reference, _ \\ []) do
case to_data(reference) do
{ :ok, { type, attributes, data } } -> { :ok, new(data, type, attributes) }
error -> error
end
end
@impl Resx.Producer
def exists?(reference) do
case to_data(reference) do
{ :ok, _ } -> { :ok, true }
error -> error
end
end
@impl Resx.Producer
def alike?(a, b) do
with { :a, { :ok, data } } <- { :a, to_data(a) },
{ :b, { :ok, ^data } } <- { :b, to_data(b) } do
true
else
_ -> false
end
end
@impl Resx.Producer
def source(reference) do
case to_data(reference) do
{ :ok, _ } -> { :ok, nil }
error -> error
end
end
@impl Resx.Producer
def resource_uri(reference) do
case to_data(reference) do
{ :ok, { type, attributes, data } } ->
uri =
[
"data:",
type,
";",
Enum.reduce(attributes, [], fn { k, v }, acc -> [[k, "=", v, ";"]|acc] end),
"base64,",
Base.encode64(data)
]
|> IO.iodata_to_binary
|> URI.encode
{ :ok, uri }
error -> error
end
end
@impl Resx.Producer
def resource_attributes(reference) do
case to_data(reference) do
{ :ok, { _, attributes, _ } } -> { :ok, attributes }
error -> error
end
end
@doc """
Manually create a data resource.
Converts the static resource state or a binary into a data resource.
The type defaults to an `"application/octet-stream"` or the parent type
of the existing resource. This can be overridden by explicitly passing
a type to the `:type` option.
No attributes are attached to the data resource by default. This can be
overridden by passing the attributes to the `:attributes` option.
iex> { :ok, resource } = Resx.Producers.Data.new("hello")
...> resource.content
%Resx.Resource.Content{ data: "hello", type: ["application/octet-stream"] }
iex> { :ok, resource } = Resx.Producers.Data.new("hello", type: "text/plain")
...> resource.content.type
["text/plain"]
iex> { :ok, resource } = Resx.Producers.Data.new("hello")
...> Resx.Resource.attribute(resource, "charset")
{ :error, { :unknown_key, "charset" } }
iex> { :ok, resource } = Resx.Producers.Data.new("hello", attributes: %{ "charset" => "US-ASCII" })
...> Resx.Resource.attribute(resource, "charset")
{ :ok, "US-ASCII" }
"""
@spec new(Resource.t | binary, [type: String.t, attributes: %{ optional(Resource.attribute_key) => any }]) :: { :ok, Resource.t } | Resx.error(Resx.resource_error | Resx.reference_error)
def new(data, opts \\ [])
def new(%Resource{ content: %{ type: type, data: data } }, opts) do
type = case type do
[type|_] -> type
type -> type
end
{ :ok, new(data, opts[:type] || type, opts[:attributes] || %{}) }
end
def new(data, opts), do: { :ok, new(data, opts[:type] || "application/octet-stream", opts[:attributes] || %{}) }
@spec new(binary, String.t, %{ optional(Resource.attribute_key) => any }) :: Resource.t
defp new(data, type, attributes) do
content = %Content{
type: case type do
type when is_list(type) -> type
type -> [type]
end,
data: data
}
%Resource{
reference: %Reference{
adapter: __MODULE__,
repository: { type, attributes, data },
integrity: %Integrity{
timestamp: DateTime.utc_now
}
},
content: content
}
end
end
|
lib/resx/producers/data.ex
| 0.835114
| 0.470919
|
data.ex
|
starcoder
|
defmodule OMG.Utxo.Position do
@moduledoc """
Representation of a UTXO position in the child chain, providing encoding/decoding to/from formats digestible in `Eth`
and in the `OMG.DB`
"""
# these two offset constants are driven by the constants from the RootChain.sol contract
@input_pointer_output_type 1
alias OMG.Utxo
require Utxo
@type t() :: {
:utxo_position,
# blknum
non_neg_integer(),
# txindex
non_neg_integer(),
# oindex
non_neg_integer()
}
@type db_t() :: {non_neg_integer(), non_neg_integer(), non_neg_integer()}
@type input_db_key_t() :: {:input_pointer, pos_integer(), db_t()}
defguardp is_position(blknum, txindex, oindex)
when is_integer(blknum) and blknum >= 0 and
is_integer(txindex) and txindex >= 0 and
is_integer(oindex) and oindex >= 0
@doc """
Encode an input utxo position into an integer value.
## Examples
iex> utxo_pos = {:utxo_position, 4, 5, 1}
iex> OMG.Utxo.Position.encode(utxo_pos)
4_000_050_001
"""
@spec encode(t()) :: non_neg_integer()
def encode(Utxo.position(blknum, txindex, oindex)) when is_position(blknum, txindex, oindex),
do: ExPlasma.Utxo.pos(%{blknum: blknum, txindex: txindex, oindex: oindex})
@doc """
Decode an integer or binary into a utxo position tuple.
## Examples
# Decodes an integer encoded utxo position.
iex> OMG.Utxo.Position.decode!(4_000_050_001)
{:utxo_position, 4, 5, 1}
# Decode a binary encoded utxo position.
iex> encoded_pos = <<0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 238, 107, 235, 81>>
iex> OMG.Utxo.Position.decode!(encoded_pos)
{:utxo_position, 4, 5, 1}
"""
@spec decode!(binary()) :: t()
def decode!(encoded) do
{:ok, decoded} = decode(encoded)
decoded
end
@doc """
Decode an integer or binary into a utxo position tuple.
## Examples
# Decode an integer encoded utxo position.
iex> OMG.Utxo.Position.decode(4_000_050_001)
{:ok, {:utxo_position, 4, 5, 1}}
# Returns an error if the value is too low.
iex> OMG.Utxo.Position.decode(0)
{:error, :encoded_utxo_position_too_low}
iex> OMG.Utxo.Position.decode(-1)
{:error, :encoded_utxo_position_too_low}
# Decode a binary encoded utxo position.
iex> encoded_pos = <<0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 238, 107, 235, 81>>
iex> OMG.Utxo.Position.decode(encoded_pos)
{:ok, {:utxo_position, 4, 5, 1}}
"""
@spec decode(binary()) :: {:ok, t()} | {:error, :encoded_utxo_position_too_low | {:blknum, :exceeds_maximum}}
def decode(encoded) when is_number(encoded) and encoded <= 0, do: {:error, :encoded_utxo_position_too_low}
def decode(encoded) when is_integer(encoded) and encoded > 0, do: do_decode(encoded)
def decode(encoded) when is_binary(encoded) and byte_size(encoded) == 32, do: do_decode(encoded)
# TODO(achiurizo)
# Refactor to_input_db_key/1 and to_db_key/1. Doing this because
# this was merged from a previous module where one code path still wants the 3 item tuple.
@doc """
Convert a utxo position into the input db key tuple.
## Examples
iex> utxo_pos = {:utxo_position, 1, 2, 3}
iex> OMG.Utxo.Position.to_input_db_key(utxo_pos)
{:input_pointer, 1, {1, 2, 3}}
"""
@spec to_input_db_key(t()) :: {:input_pointer, unquote(@input_pointer_output_type), db_t()}
def to_input_db_key(Utxo.position(blknum, txindex, oindex)) when is_position(blknum, txindex, oindex),
do: {:input_pointer, @input_pointer_output_type, {blknum, txindex, oindex}}
@doc """
Convert a utxo position into the db key tuple. (legacy?)
## Examples
iex> utxo_pos = {:utxo_position, 1, 2, 3}
iex> OMG.Utxo.Position.to_db_key(utxo_pos)
{1, 2, 3}
"""
@spec to_db_key(t()) :: db_t()
def to_db_key(Utxo.position(blknum, txindex, oindex)), do: {blknum, txindex, oindex}
# TODO(achiurizo)
# Refactor so we only have one db key type.
@doc """
Convert an input db key tuple into a utxo position.
## Examples
# Convert an input db key tuple into a utxo position.
iex> input_db_key = {:input_pointer, 1, {1, 2, 3}}
iex> OMG.Utxo.Position.from_db_key(input_db_key)
{:utxo_position, 1, 2, 3}
# Convert a 'legacy' db key tuple into a utxo position
iex> legacy_input_db_key = {1, 2, 3}
iex> OMG.Utxo.Position.from_db_key(legacy_input_db_key)
{:utxo_position, 1, 2, 3}
"""
@spec from_db_key(db_t() | input_db_key_t()) :: t()
def from_db_key({:input_pointer, _output_type, db_value}), do: from_db_key(db_value)
def from_db_key({blknum, txindex, oindex}) when is_position(blknum, txindex, oindex),
do: Utxo.position(blknum, txindex, oindex)
# TODO(achiurizo)
# better name for this function, like to_rlp/1.
@doc """
Returns the rlp-encodable data for the given utxo position.
## Examples
iex> utxo_pos = {:utxo_position, 1, 2, 3}
iex> OMG.Utxo.Position.get_data_for_rlp(utxo_pos)
<<0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 59, 155, 24, 35>>
"""
@spec get_data_for_rlp(t()) :: binary()
def get_data_for_rlp(Utxo.position(blknum, txindex, oindex)) do
utxo = %ExPlasma.Utxo{blknum: blknum, txindex: txindex, oindex: oindex}
ExPlasma.Utxo.to_rlp(utxo)
end
defp do_decode(encoded) do
with {:ok, utxo} <- ExPlasma.Utxo.new(encoded),
do: {:ok, Utxo.position(utxo.blknum, utxo.txindex, utxo.oindex)}
end
end
|
apps/omg/lib/omg/utxo/position.ex
| 0.855384
| 0.480966
|
position.ex
|
starcoder
|
defmodule DateTimeParser.Serial do
@moduledoc false
def parse(string) do
if String.contains?(string, ".") do
with {float, _} <- Float.parse(string) do
{:ok, [serial: float], nil, nil, nil, nil}
end
else
with {integer, _} <- Integer.parse(string) do
{:ok, [serial: integer], nil, nil, nil, nil}
end
end
end
def from_tokens(tokens, opts) do
with {:ok, date_or_datetime} <- from_tokens(tokens[:serial]) do
assume_time(date_or_datetime, opts[:assume_time])
end
end
def from_tokens(nil), do: :error
def from_tokens(float) when is_float(float) do
{serial_date, serial_time} = split_float(float)
erl_time = time_from_serial(serial_time)
erl_date = date_from_serial(serial_date)
NaiveDateTime.from_erl({erl_date, erl_time})
end
def from_tokens(integer) when is_integer(integer) do
erl_date = date_from_serial(integer)
Date.from_erl(erl_date)
end
defp assume_time(datetime, true), do: assume_time(datetime, ~T[00:00:00])
defp assume_time(%Date{} = date, %Time{} = time) do
NaiveDateTime.new(
date.year,
date.month,
date.day,
time.hour,
time.minute,
time.second,
time.microsecond
)
end
defp assume_time(datetime, _), do: {:ok, datetime}
def time_from_serial(0.0), do: {0, 0, 0}
def time_from_serial(serial_time) do
{hours, min_fraction} = split_float(serial_time * 24)
{minutes, sec_fraction} = split_float(min_fraction * 60)
{seconds, _microseconds} = split_float(sec_fraction * 60)
{hours, minutes, seconds}
end
def date_from_serial(nil), do: :error
def date_from_serial(serial_date) do
{1899, 12, 31}
|> :calendar.date_to_gregorian_days()
|> Kernel.+(serial_date)
|> adjust_for_lotus_bug
|> :calendar.gregorian_days_to_date()
end
def split_float(integer) when is_integer(integer), do: split_float(integer / 1)
def split_float(float) when float >= 0 do
whole = float |> Float.floor() |> round()
{whole, float - whole}
end
def split_float(float) when float < 0 do
whole = abs(float) |> Float.floor() |> round()
fraction = 1 - (abs(float) - whole)
fraction = if fraction == 1.0, do: 0.0, else: fraction
{whole * -1, fraction}
end
# https://en.wikipedia.org/wiki/Leap_year_bug
# Microsoft Excel has, since its earliest versions, incorrectly considered 1900 to be a leap year,
# and therefore that February 29, 1900 comes between February 28 and March 1 of that year. The bug
# originated from Lotus 1-2-3, and was purposely implemented in Excel for the purpose of backward
# compatibility. Microsoft has written an article about this bug, explaining the reasons for
# treating 1900 as a leap year. This bug has been promoted into a requirement in the Ecma Office
# Open XML (OOXML) specification.
defp adjust_for_lotus_bug(day) when day > 59, do: day - 1
defp adjust_for_lotus_bug(day), do: day
end
|
lib/serial.ex
| 0.790207
| 0.422654
|
serial.ex
|
starcoder
|
if Code.ensure_loaded?(:pbkdf2) do
defmodule Cloak.Fields.PBKDF2 do
@moduledoc """
A custom `Ecto.Type` for deriving a key for fields using
[PBKDF2](https://en.wikipedia.org/wiki/PBKDF2).
PBKDF2 is **more secure** than `Cloak.Fields.HMAC` and
`Cloak.Fields.SHA256` because it uses [key
stretching](https://en.wikipedia.org/wiki/Key_stretching) to increase the
amount of time to compute hashes. This slows down brute-force attacks.
## Why
If you store a hash of a field's value, you can then query on it as a
proxy for an encrypted field. This works because PBKDF2 is deterministic
and always results in the same value, while secure encryption does not.
Be warned, however, that hashing will expose which fields have the same
value, because they will contain the same hash.
## Dependency
To use this field type, you must install the `:pbkdf2` library in your
`mix.exs` file.
{:pbkdf2, "~> 2.0"}
## Configuration
Create a `PBKDF2` field in your project:
defmodule MyApp.Hashed.PBKDF2 do
use Cloak.Fields.PBKDF2, otp_app: :my_app
end
Then, configure it with a `:secret`, an `:algorithm`, the maximum `:size`
of the stored key (in bytes), and a number of `:iterations`, either using
mix configuration:
config :my_app, MyApp.Hashed.PBKDF2,
algorithm: :sha256,
iterations: 10_000,
secret: "secret"
Or using the `init/1` callback to fetch configuration at runtime:
defmodule MyApp.Hashed.PBKDF2 do
use Cloak.Fields.PBKDF2, otp_app: :my_app
@impl Cloak.Fields.PBKDF2
def init(config) do
config = Keyword.merge(config, [
algorithm: :sha256,
iterations: 10_000,
secret: System.get_env("PBKDF2_SECRET")
])
{:ok, config}
end
end
## Usage
Create the hash field with the type `:binary`. Add it to your schema
definition like this:
schema "table" do
field :field_name, MyApp.Encrypted.Binary
field :field_name_hash, MyApp.Hashed.PBKDF2
end
Ensure that the hash is updated whenever the target field changes with the
`put_change/3` function:
def changeset(struct, attrs \\\\ %{}) do
struct
|> cast(attrs, [:field_name, :field_name_hash])
|> put_hashed_fields()
end
defp put_hashed_fields(changeset) do
changeset
|> put_change(:field_name_hash, get_field(changeset, :field_name))
end
Query the Repo using the `:field_name_hash` in any place you would typically
query by `:field_name`.
user = Repo.get_by(User, email_hash: "<EMAIL>")
"""
@typedoc "Digest algorithms supported by Cloak.Field.PBKDF2"
@type algorithms :: :md4 | :md5 | :ripemd160 | :sha | :sha224 | :sha256 | :sha384 | :sha512
@doc """
Configures the `PBKDF2` field using runtime information.
## Example
@impl Cloak.Fields.PBKDF2
def init(config) do
config = Keyword.merge(config, [
algorithm: :sha256,
secret: System.get_env("PBKDF2_SECRET")
])
{:ok, config}
end
"""
@callback init(config :: Keyword.t()) :: {:ok, Keyword.t()} | {:error, any}
@doc false
defmacro __using__(opts) do
otp_app = Keyword.fetch!(opts, :otp_app)
quote do
@behaviour Cloak.Fields.PBKDF2
@behaviour Ecto.Type
@algorithms ~w[
md4
md5
ripemd160
sha
sha224
sha256
sha384
sha512
]a
@impl Cloak.Fields.PBKDF2
def init(config) do
defaults = [algorithm: :sha256, iterations: 10_000, size: 32]
{:ok, defaults |> Keyword.merge(config)}
end
@impl Ecto.Type
def type, do: :binary
@impl Ecto.Type
def cast(nil), do: {:ok, nil}
def cast(value) when is_binary(value), do: {:ok, value}
def cast(_value), do: :error
@impl Ecto.Type
def dump(nil), do: {:ok, nil}
def dump(value) when is_binary(value) do
config = build_config()
:pbkdf2.pbkdf2({:hmac, config[:algorithm]}, value, config[:secret], config[:size])
end
def dump(_value), do: :error
@impl Ecto.Type
def load(value), do: {:ok, value}
defoverridable init: 1, type: 0, cast: 1, dump: 1, load: 1
defp build_config do
{:ok, config} =
unquote(otp_app)
|> Application.get_env(__MODULE__, [])
|> init()
validate_config(config)
end
defp validate_config(config) do
unless is_binary(config[:secret]) do
secret = inspect(config[:secret])
raise Cloak.InvalidConfig, "#{secret} is an invalid secret for #{inspect(__MODULE__)}"
end
unless config[:algorithm] in @algorithms do
algo = inspect(config[:algorithm])
raise Cloak.InvalidConfig,
"#{algo} is an invalid hash algorithm for #{inspect(__MODULE__)}"
end
unless is_integer(config[:iterations]) && config[:iterations] > 0 do
iterations = inspect(config[:iterations])
raise Cloak.InvalidConfig,
"#{iterations} must be a positive integer for #{inspect(__MODULE__)}"
end
unless is_integer(config[:size]) && config[:size] > 0 do
size = inspect(config[:size])
raise Cloak.InvalidConfig,
"#{size} should be a positive integer for #{inspect(__MODULE__)}"
end
config
end
end
end
end
end
|
lib/cloak/fields/pbkdf2.ex
| 0.861494
| 0.567337
|
pbkdf2.ex
|
starcoder
|
defmodule Type.Opaque do
@moduledoc """
a wrapper for opaqueness.
"""
@enforce_keys [:module, :name, :params, :type]
defstruct @enforce_keys
@type t :: %__MODULE__{
module: module,
name: atom,
params: [Type.t],
type: Type.t
}
import Type, only: [builtin: 1]
defimpl Type.Properties do
import Type, only: :macros
import Type.Helpers
alias Type.{Message, Opaque}
def compare(left = %{type: this}, right = %Opaque{type: this}) do
left_arity = length(left.params)
right_arity = length(right.params)
cond do
left.module > right.module -> :gt
left.module < right.module -> :lt
left.name > right.name -> :gt
left.name < right.name -> :lt
left_arity > right_arity -> :gt
left_arity < right_arity -> :lt
true ->
left.params
|> Enum.zip(right.params)
|> find_cmp
|> Kernel.||(:eq)
end
end
def compare(this, other) do
case Type.compare(this.type, other) do
:eq -> :lt
cmp -> cmp
end
end
defp find_cmp(leftrightlist) do
Enum.find_value(leftrightlist, fn {l, r} ->
if cmp = Type.compare(l, r) != :eq, do: cmp
end)
end
def typegroup(%{type: opaque}) do
Type.Properties.typegroup(opaque)
end
usable_as do
def usable_as(challenge = %Opaque{}, target, meta) do
case Type.usable_as(challenge.type, target, meta) do
:ok ->
# TODO: add opaqueness message here.
{:warn, [Message.make(challenge, target, meta)]}
any -> any
end
end
end
intersection do
def intersection(%Opaque{}, _non_opaque) do
builtin(:none)
end
end
subtype do
end
end
defimpl Inspect do
import Inspect.Algebra
def inspect(opaque, opts) do
params = opaque.params
|> Enum.map(&to_doc(&1, opts))
|> Enum.intersperse(", ")
concat(["#{inspect opaque.module}.#{opaque.name}("] ++ params ++ [")"])
end
end
end
|
lib/type/opaque.ex
| 0.541773
| 0.567128
|
opaque.ex
|
starcoder
|
defmodule Mix.Tasks.Ggity.Visual.Geom.Text do
@shortdoc "Launch a browser and draw sample text geom plots."
@moduledoc @shortdoc
use Mix.Task
alias GGity.{Examples, Plot}
@default_browser "firefox"
@doc false
@spec run(list(any)) :: any()
def run([]), do: run([@default_browser])
def run(argv) do
plots =
Enum.join(
[
basic(),
bar_labels(),
bar_stack(),
col_stack(),
bar_dodge(),
col_dodge()
],
"\n"
)
test_file = "test/visual/visual_test.html"
browser =
case argv do
["--wsl"] ->
"/mnt/c/Program Files/Mozilla Firefox/firefox.exe"
[browser] ->
browser
end
File.write!(test_file, "<html><body #{grid_style()}>\n#{plots}\n</body></html>")
open_html_file(browser, test_file)
Process.sleep(1000)
File.rm(test_file)
end
defp open_html_file(browser, file) do
System.cmd(browser, [file])
end
defp grid_style do
"style='display: grid;grid-template-columns: repeat(3, 1fr)'"
end
defp basic do
Examples.mtcars()
|> Enum.filter(fn record ->
String.contains?(record[:model], "Merc")
end)
|> Plot.new(%{x: :wt, y: :mpg, label: :model})
|> Plot.geom_point()
|> Plot.geom_text(%{alpha: :gear}, nudge_x: 5, hjust: :left, size: 8)
|> Plot.scale_alpha_discrete(guide: :legend)
|> Plot.xlab("Weight (tons)")
|> Plot.ylab("Miles Per Gallon")
|> Plot.plot()
end
defp bar_labels do
Examples.mpg()
|> Enum.filter(fn record ->
record["manufacturer"] in ["chevrolet", "audi", "ford", "nissan", "subaru"]
end)
|> Plot.new(%{x: "manufacturer"})
|> Plot.geom_bar()
|> Plot.geom_text(%{label: :count},
position: :dodge,
family: "Courier New",
fontface: "bold",
color: "cornflowerblue",
stat: :count,
size: 8,
nudge_y: -5
)
|> Plot.plot()
end
defp bar_stack do
Examples.mpg()
|> Enum.filter(fn record ->
record["manufacturer"] in ["chevrolet", "audi", "ford", "nissan", "subaru"]
end)
|> Plot.new(%{x: "manufacturer", label: :count, group: "class"})
|> Plot.geom_bar(%{fill: "class"}, position: :stack)
|> Plot.geom_text(
color: "grey",
stat: :count,
position: :stack,
position_vjust: 0.5,
fontface: "bold",
size: 6
)
|> Plot.scale_fill_viridis(option: :inferno)
|> Plot.plot()
end
defp col_stack do
simple_bar_data()
|> Plot.new(%{x: :week, y: :units, label: :units, group: :salesperson})
|> Plot.geom_col(%{fill: :salesperson}, position: :stack)
|> Plot.geom_text(
color: "#BAAC6F",
position: :stack,
position_vjust: 0.5,
fontface: "bold",
size: 6
)
|> Plot.scale_fill_viridis(option: :cividis)
|> Plot.plot()
end
defp bar_dodge do
Examples.mpg()
|> Enum.filter(fn record ->
record["manufacturer"] in ["chevrolet", "audi", "ford", "nissan", "subaru"]
end)
|> Plot.new(%{x: "manufacturer", label: :count, group: "class"})
|> Plot.geom_bar(%{fill: "class"}, position: :dodge)
|> Plot.geom_text(%{y: :count},
color: "grey",
stat: :count,
position: :dodge,
position_vjust: 0.5,
fontface: "bold",
size: 6
)
|> Plot.scale_fill_viridis(option: :inferno)
|> Plot.plot()
end
defp col_dodge do
simple_bar_data()
|> Plot.new(%{x: :week, y: :units, label: :units, group: :salesperson})
|> Plot.geom_col(%{fill: :salesperson}, position: :dodge)
|> Plot.geom_text(
color: "#BAAC6F",
position: :dodge,
fontface: "bold",
position_vjust: 0.5,
size: 6
)
|> Plot.scale_fill_viridis(option: :cividis)
|> Plot.plot()
end
defp simple_bar_data do
[
%{salesperson: "Joe", week: "Week 1", units: 10},
%{salesperson: "Jane", week: "Week 1", units: 15},
%{salesperson: "Paul", week: "Week 1", units: 5},
%{salesperson: "Joe", week: "Week 2", units: 4},
%{salesperson: "Jane", week: "Week 2", units: 10},
%{salesperson: "Paul", week: "Week 2", units: 8},
%{salesperson: "Joe", week: "Week 3", units: 14},
%{salesperson: "Paul", week: "Week 3", units: 8},
%{salesperson: "Jane", week: "Week 3", units: 9},
%{salesperson: "Joe", week: "Week 4", units: 14},
%{salesperson: "Jane", week: "Week 4", units: 9}
]
end
end
|
lib/mix/tasks/ggity_visual_geom_text.ex
| 0.879367
| 0.400661
|
ggity_visual_geom_text.ex
|
starcoder
|
defmodule Upvest.API do
@moduledoc """
Shared utilities for interacting with the Upvest API.
It contains shared implementations of endpoints methods for
creating, listing, retrieving and deleting resources. Where possible,
transforms the raw result from the Upvest API into a final struct. This is achieved
through the use of the `Upvest.Utils.to_struct/2`.
Intended for internal use by Upvest endpoint modules.
An Upvest endpoint module is usually mappped to an Upvest resource,
containing logic for interacting with the associated resource.
To implement this behaviour, simply add `use Upvest.API, [list_of_methods]` to the top of
the entity module and make sure it defines a struct mapped to the Upvest resource.
The parameter to the `use Upvest.API` construct is a list of HTTP
methods you want to expose in the module:
* create/2 - create a new resource
* retrive/2 - retrieve a resource
* update/3 - update a resource
* delete/2 - delete a resource
* all/1 - list all resources
* list/2 - list all resources, capped to specified limit
"""
defmacro __using__(opts) do
quote do
import Upvest, only: [request: 4]
import Upvest.Utils, only: [to_struct: 2, sprintf: 2]
alias Upvest.Client
alias __MODULE__
@type t :: %__MODULE__{}
@page_size 100
if :create in unquote(opts) do
@doc """
Create a(n) #{__MODULE__ |> Module.split() |> List.last()}
"""
def create(client, data) do
request(:post, endpoint(), data, client)
end
end
if :retrieve in unquote(opts) do
@doc """
Retrive a(n) #{__MODULE__ |> Module.split() |> List.last()} by its ID
"""
@spec retrieve(Client.t(), binary()) :: {:ok, __MODULE__.t()} | {:error, Upvest.error()}
def retrieve(client, id) when is_bitstring(id) do
resource_url = Path.join(endpoint(), id)
with {:ok, resp} <- request(:get, resource_url, %{}, client) do
{:ok, to_struct(resp, __MODULE__)}
end
end
end
if :update in unquote(opts) do
@doc """
Update a(n) #{__MODULE__ |> Module.split() |> List.last()}
"""
@spec update(Client.t(), binary()) :: {:ok, __MODULE__.t()} | {:error, Upvest.error()}
def update(client, id, data) when is_bitstring(id) do
resource_url = Path.join(endpoint(), id)
request(:patch, resource_url, data, client)
end
end
if :list in unquote(opts) do
@doc """
List specific number of #{__MODULE__ |> Module.split() |> List.last()}
"""
@spec list(Client.t(), non_neg_integer()) ::
{:ok, [__MODULE__.t()]} | {:error, Upvest.error()}
def list(client, count) do
do_list(endpoint(), count, client, [])
end
defp do_list(url, count, client, acc) do
{:ok, resp} = request(:get, url, %{}, client)
next = Map.get(resp, "next")
acc = acc ++ resp["results"]
case is_nil(next) or length(acc) == count do
true ->
{:ok, to_struct(acc, __MODULE__)}
_ ->
uri = URI.parse(next)
params = Map.put(URI.decode_query(uri.query), :page_size, @page_size)
next_url = URI.parse(next).path |> String.slice(4..-1)
next_url = "#{next_url}?#{URI.encode_query(params)}"
do_list(next_url, count, client, acc)
end
end
end
if :all in unquote(opts) do
@doc """
List all #{__MODULE__ |> Module.split() |> List.last()}
"""
@spec all(Client.t()) :: {:ok, [__MODULE__.t()]} | {:error, Upvest.error()}
def all(client) do
do_all(endpoint(), client, [])
end
defp do_all(url, client, acc) do
with {:ok, resp} <- request(:get, url, %{page_size: @page_size}, client) do
next = Map.get(resp, "next")
acc = acc ++ resp["results"]
case is_nil(next) do
true ->
{:ok, to_struct(acc, __MODULE__)}
_ ->
uri = URI.parse(next)
params = Map.put(URI.decode_query(uri.query), :page_size, @page_size)
next_url = URI.parse(next).path |> String.slice(4..-1)
next_url = "#{next_url}?#{URI.encode_query(params)}"
do_all(next_url, client, acc)
end
end
end
end
if :delete in unquote(opts) do
@doc """
Delete a(n) #{__MODULE__ |> Module.split() |> List.last()}
"""
@spec delete(Client.t(), binary()) :: {:ok, nil} | {:error, Upvest.error()}
def delete(client, id) when is_bitstring(id) do
resource_url = Path.join(endpoint(), id)
request(:delete, resource_url, %{}, client)
end
end
end
end
end
|
lib/upvest/api.ex
| 0.849691
| 0.437343
|
api.ex
|
starcoder
|
defmodule IEx.History do
@moduledoc false
alias IEx.History
defstruct queue: :queue.new(), size: 0, start: 1
@doc """
Initializes IEx history state.
"""
def init(), do: %History{}
@doc """
Appends one entry to the history.
"""
def append(%History{} = state, entry, limit) do
{collect?, state} =
state
|> append(entry)
|> prune(limit)
if collect?, do: collect_garbage()
state
end
@doc """
Enumerates over all items in the history starting from the oldest one and
applies `fun` to each one in turn.
"""
def each(%History{} = state, fun) do
state
|> to_list()
|> Enum.each(fun)
end
@doc """
Gets the nth item from the history.
If `n` < 0, the count starts from the most recent item and goes back in time.
"""
# Traverses the queue front-to-back if the index is positive.
def nth(%History{queue: q, size: size, start: start}, n)
when n - start >= 0 and n - start < size do
get_nth(q, n - start)
end
# Traverses the queue back-to-front if the index is negative.
def nth(%History{queue: q, size: size, start: start}, n)
when n < 0 and size + n >= start - 1 do
get_nth(:queue.reverse(q), abs(n) - 1)
end
def nth(%History{}, n) do
raise "v(#{n}) is out of bounds"
end
defp get_nth(q, 0), do: :queue.head(q)
defp get_nth(q, n) when n > 0, do: get_nth(:queue.tail(q), n - 1)
defp append(%{queue: q, size: size} = state, item) do
%{state | queue: :queue.in(item, q), size: size + 1}
end
defp to_list(%{queue: q}), do: :queue.to_list(q)
# Based on https://github.com/erlang/otp/blob/7dcccee4371477e983f026db9e243cb66900b1ef/lib/stdlib/src/shell.erl#L1401
defp collect_garbage() do
collect_proc_garbage(Process.whereis(:user))
collect_proc_garbage(Process.group_leader())
:erlang.garbage_collect()
end
defp collect_proc_garbage(process) do
try do
:erlang.garbage_collect(process)
catch
_, _ -> nil
end
end
defp prune(%{start: start} = state, limit) do
prune(state, start, limit, false)
end
defp prune(state, _, limit, _) when limit < 0 do
{false, state}
end
defp prune(%{size: size} = state, counter, limit, collect?) when size - counter < limit do
{collect?, %{state | start: counter}}
end
defp prune(%{queue: q} = state, counter, limit, collect?) do
{{:value, entry}, q} = :queue.out(q)
collect? = collect? || has_binary(entry)
prune(%{state | queue: q}, counter + 1, limit, collect?)
end
# Checks val and each of its elements (if it is a list or a tuple)
# recursively to see if it has any large binaries (outside of the heap).
defp has_binary(val) do
try do
has_bin(val)
catch
:throw, :found -> true
end
end
defp has_bin(val) when is_tuple(val), do: has_bin(val, tuple_size(val) - 1)
defp has_bin([head | tail]) do
has_bin(head)
has_bin(tail)
end
defp has_bin(val) when byte_size(val) > 64, do: throw(:found)
defp has_bin(_), do: false
defp has_bin(_, -1), do: false
defp has_bin(tuple, index) do
has_bin(elem(tuple, index))
has_bin(tuple, index - 1)
end
end
|
lib/iex/lib/iex/history.ex
| 0.781747
| 0.448124
|
history.ex
|
starcoder
|
defmodule NorwegianIdNumber do
@moduledoc """
Useful information extracted from Norwegian national identification number.
From 2017, checksum is included to the personal number and no longer validated.
"""
@type id_type :: :fh_number | :d_number | :h_number | :birth_number
defstruct [:id_type, :birth_day, :birth_month, :birth_year, :personal_number, :raw]
@doc """
Extract useful information from Norwegian national identification number
"""
@spec parse(String.t) :: {:ok, %NorwegianIdNumber{}} | {:error, atom()}
def parse(number) do
NorwegianIdNumber.Parser.execute(number)
end
@doc """
Extract useful information from Norwegian national identification number
"""
@spec render(String.t, :pretty | :raw) :: String.t
def render(number, render_mode \\ :pretty) do
{:ok, parsed_number} = parse(number)
case render_mode do
:pretty ->
NorwegianIdNumber.Formatter.pretty(parsed_number)
:birthdate ->
NorwegianIdNumber.Formatter.birthdate(parsed_number)
_ ->
NorwegianIdNumber.Formatter.default(parsed_number)
end
end
@doc """
Checks if Norwegian national identification number is valid
"""
@spec is_valid?(String.t) :: boolean()
def is_valid?(number) when is_binary(number) do
case NorwegianIdNumber.Parser.execute(number) do
{:ok, _} ->
checksum_verification(number)
_ ->
false
end
end
def is_valid?(_), do: false
@spec checksum_verification(String.t) :: boolean()
defp checksum_verification(number) do
first_check_digit = [3, 7, 6, 1, 8, 9, 4, 5, 2, 1]
second_check_digit = [5, 4, 3, 2, 7, 6, 5, 4, 3, 2, 1]
int_array =
String.split(number, "")
|> Enum.slice(1, 11)
|> Enum.map(&String.to_integer/1)
is_valid_check_digit?(first_check_digit, Enum.slice(int_array, 0..9))
&& is_valid_check_digit?(second_check_digit, int_array)
end
@spec is_valid_check_digit?([integer()], [integer()]) :: boolean()
defp is_valid_check_digit?(sequence, int_array) do
product =
List.zip([sequence, int_array])
|> Enum.reduce(0, fn({value, check}, acc) ->
acc + (value * check)
end)
|> Kernel.rem(11)
product === 0
end
end
|
lib/norwegian_id_number.ex
| 0.791459
| 0.550305
|
norwegian_id_number.ex
|
starcoder
|
defmodule Validix.Pipeline do
alias Validix.Source
alias Validix.Stage
alias Validix.Type
@protocol_fun_arity 5
## Generate a static pipeline from the app config
pipeline = Application.get_env(:validix, :pipeline, [])
@spec pipeline() :: [Stage.t]
def pipeline() do
unquote(pipeline)
end
@spec run(Source.t, field :: term, Type.key, value :: term, opts :: Keyword.t)
:: {Source.t, value :: term}
def run(source, field, type, value, opts) do
run(source, field, type, value, pipeline(), opts)
end
@spec run(Source.t, field :: term, Type.key, value :: term, [Stage.t], opts :: Keyword.t)
:: {Source.t, value :: term}
def run(source, _, _, value, [], _), do: {source, value}
def run(source, field, type, value, [stage | pipeline], opts) do
stage_type = try do
Protocol.assert_protocol!(stage)
:protocol
rescue
ArgumentError -> :runnable
end
case run_stage(field, type, value, stage, stage_type, opts) do
{:ok, value} ->
run(source, field, type, value, pipeline, opts)
{:error, reason} ->
{Source.handle_error(source, reason), value}
end
end
defp run_stage(field, type, value, stage, :runnable, opts) do
stage.run(field, type, value, opts)
end
defp run_stage(field, type, value, stage, :protocol, opts) do
functions = stage.__protocol__(:functions)
|> Enum.filter(&(elem(&1, 1) == @protocol_fun_arity))
|> Enum.map(&elem(&1, 0))
|> Enum.filter(&Keyword.has_key?(opts, &1))
run_protocol_fun(field, type, value, stage, functions, opts)
end
defp run_protocol_fun(_, _, value, _, [], _), do: {:ok, value}
defp run_protocol_fun(field, type, value, stage, [fun | rest], opts) do
case apply_protocol_fun(field, type, value, stage, fun, opts[fun]) do
{:ok, value} -> run_protocol_fun(field, type, value, stage, rest, opts)
{:error, _} = error -> error
end
end
defp apply_protocol_fun(field, type, value, stage, fun, args) do
type_struct = Type.type_module(type) |> struct()
with :parent <- apply(stage, fun, [type_struct, field, type, value, args]),
do: apply_protocol_fun(field, Type.parent_type(type), value, stage, fun, args)
end
end
|
lib/validix/pipeline.ex
| 0.586523
| 0.461017
|
pipeline.ex
|
starcoder
|
defmodule Metalove.MediaParser do
alias Metalove.MediaParser.ID3
@moduledoc false
def extract_id3_metadata(filename) do
bytes = File.read!(filename)
ID3.parse(bytes)
end
end
defmodule Metalove.MediaParser.ID3 do
@moduledoc """
ID3 parser for podcast relevant metadata.
"""
@doc """
Parse the ID3 header of the binary provided if any. E.g. useful for deciding how much of the URL/File to read to parse the complete one if needed.
"""
def parse_header(binary)
def parse_header(
<<"ID3", version::size(8), revision::size(8), a::size(1), b::size(1), c::size(1),
d::size(1), _ignore::size(4), rest::binary>>
) do
case parse_syncsafe_integer(rest) do
{:ok, tag_size, rest} ->
flags =
[a: a, b: b, c: c, d: d]
|> Enum.reduce([], fn
{:a, 1}, acc -> [:unsync | acc]
{:b, 1}, acc -> [:ext_header | acc]
{:c, 1}, acc -> [:experimental | acc]
{:d, 1}, acc -> [:footer | acc]
_, acc -> acc
end)
case byte_size(rest) do
size when size >= tag_size ->
{:ok, tag_size, version, revision, flags, rest}
_ ->
{:content_to_short, tag_size + 10}
end
_ ->
:not_id3
end
end
def parse_header(_), do: :not_id3
@format_iso 0
@format_utf16 1
require Logger
@doc """
Parse the complete ID3 metadata from the binary provided.
"""
def parse(content) when is_binary(content) do
case parse_header(content) do
{:ok, tag_size, version, revision, flags, content} ->
result_map = %{
version: "#{version}.#{revision}",
flags: flags,
tag_size: tag_size,
tags: []
}
Logger.debug(
"ID3 Header – size:#{tag_size} v:#{result_map.version} flags:#{inspect(flags)}"
)
# truncate to payload
<<content::bytes-size(tag_size), _rest::binary>> = content
# remove unsync if necessary
{content, tag_size} =
if Enum.member?(flags, :unsync) do
remove_unsync(content, tag_size)
else
{content, tag_size}
end
case version do
# https://mutagen-specs.readthedocs.io/en/latest/id3/id3v2.3.html
3 ->
%{result_map | tags: parse_frames(content, tag_size)}
# https://mutagen-specs.readthedocs.io/en/latest/id3/id3v2.2.html
2 ->
%{result_map | tags: parse_v220_frames(content, tag_size)}
_ ->
Logger.debug("ID3v2.#{result_map.version} not supported (yet)")
result_map
end
result ->
result
end
end
# according to the spec, the unsynchronization scheme tries to generally avoid sequences of
# 0b1111_1111 0b_111x_xxxx because decoders try to sync around those, and with the metadata that would be wrong.
# this is done by injecting a zero byte in all those sequences. Deocding is just replacing all sequences of <<0xff,0x00>> back to just <<0xff>>.
# For Decoding to yield the right results this also means that when encoding all occurences of <<0xff,0x00>> need to add the extra zero byte too.
# see specs http://id3.org/id3v2.3.0#The_unsynchronisation_scheme
defp remove_unsync(binary, length) do
bytesize = byte_size(binary)
adjusted_binary =
binary
|> :binary.replace(<<0xFF, 0>>, <<0xFF>>, [:global])
new_bytesize = byte_size(adjusted_binary)
{adjusted_binary, length - (bytesize - new_bytesize)}
end
defp parse_frames(content) when is_binary(content) do
parse_frames(content, byte_size(content))
end
defp parse_frames(content, remaining_size) do
parse_frames(content, remaining_size, [])
end
# Allow for padding
defp parse_frames(<<0::size(8), _rest::binary>>, _remaining_size, acc),
do: parse_frames(<<>>, 0, acc)
defp parse_frames(<<frame_id::bytes-4, rest::binary>> = _begin, remaining_size, acc)
when remaining_size > 10 and frame_id != <<0, 0, 0, 0>> do
# IO.inspect(binary_part(begin, 0, 10), label: "Frame Header:")
# IO.inspect("#{frame_id}", binaries: :as_strings)
# This would be for ID3v2.4.0
# {:ok, frame_size, rest} = parse_syncsafe_integer(rest)
<<frame_size::32, rest::binary>> = rest
<<frame_flags::bytes-2, rest::binary>> = rest
# IO.puts("#{frame_id} - remaining: #{remaining_size} - frame: #{frame_size}")
<<_::1, a::1, b::1, c::1, _::1, _::1, _::1, _::1, _::1, h::1, _::1, _::1, k::1, m::1, n::1,
p::1>> = frame_flags
parsed_flags =
[a: a, b: b, c: c, h: h, k: k, m: m, n: n, p: p]
|> Enum.reduce([], fn
{:a, 1}, acc -> [:tag_alter_discard | acc]
{:b, 1}, acc -> [:file_alter_discard | acc]
{:c, 1}, acc -> [:read_only | acc]
{:h, 1}, acc -> [:group_id | acc]
{:k, 1}, acc -> [:zlib | acc]
{:m, 1}, acc -> [:encrypted | acc]
{:n, 1}, acc -> [:unsync | acc]
{:p, 1}, acc -> [:has_data_length | acc]
_, acc -> acc
end)
# |> IO.inspect(label: "Frame Flags: ")
remaining_size = remaining_size - 10 - frame_size
<<frame_content::binary-size(frame_size), rest::binary>> = rest
acc = [parse_frame(frame_id, parsed_flags, frame_content) | acc]
parse_frames(rest, remaining_size, acc)
end
defp parse_frames(_, remaining_size, acc) when remaining_size <= 10, do: Enum.reverse(acc)
# Text information frames
defp parse_frame(<<"T", _::bytes-3>> = frame_id, _parsed_flags, content) do
{String.to_atom(frame_id), parse_text_frame_content(content)}
end
# User definde URL link frame
defp parse_frame("WXXX", _parsed_flags, content) do
{format, content} = take_text_format(content)
{title, content} = take_zero_terminated_text(content, format)
{link, _} = take_zero_terminated_text(content, @format_iso)
{:WXXX, %{link: link, title: title}}
end
# Attached Picture
defp parse_frame("APIC", _parsed_flags, content) do
{format, content} = take_text_format(content)
{mime_type, content} = take_zero_terminated_text(content, format)
<<picture_type::8, content::binary>> = content
{description, image_data} = take_zero_terminated_text(content, format)
# debug_write(image_data, mime_type)
{:APIC,
%{
mime_type: sanitized_image_type(mime_type),
picture_type: picture_type,
image_data: image_data,
description: description
}}
end
# Chapters: http://id3.org/id3v2-chapters-1.0
defp parse_frame("CHAP", _parsed_flags, content) do
{element_id, content} = take_zero_terminated(content)
<<start_time::32, end_time::32, start_offset::32, end_offset::32, rest::binary>> = content
{:CHAP,
%{
element_id: element_id,
start_time: start_time,
end_time: end_time,
start_offset: start_offset,
end_offset: end_offset,
sub_frames: parse_frames(rest)
}}
end
defp parse_frame("CTOC", _parsed_flags, content) do
{element_id, content} = take_zero_terminated(content)
# <<flags, count, _::binary>> = content
<<_::6, top_level::1, ordered::1, content::binary>> = content
<<entry_count::8, content::binary>> = content
{children, content} =
case entry_count do
# This is not supposed to be allowed, however, hindenburg produced these for a while until Version 1.81 Build 2256, so lets be gracious for now (especially because we do have an upper bound based on the frame content anyways)
0 ->
parse_ctoc_entries(content)
count ->
1..count
|> Enum.reduce({[], content}, fn _, {acc, content} ->
{element_id, rest} = take_zero_terminated(content)
{[element_id | acc], rest}
end)
end
descriptive = content
{:CTOC,
%{
element_id: element_id,
children: Enum.reverse(children),
top_level: top_level != 0,
ordered: ordered != 0,
descriptive_data: descriptive
}}
end
defp parse_frame(frame_id, parsed_flags, _content), do: {frame_id, parsed_flags}
defp parse_ctoc_entries(binary) do
parse_ctoc_entries(binary, [])
end
defp parse_ctoc_entries(<<>>, acc), do: {acc, <<>>}
defp parse_ctoc_entries(<<"TIT2", rest::binary>>, acc), do: {acc, rest}
defp parse_ctoc_entries(binary, acc) do
{element_id, rest} = take_zero_terminated(binary)
parse_ctoc_entries(rest, [element_id | acc])
end
defp parse_v220_frames(content, remaining_size),
do: parse_v220_frames_p(content, remaining_size, [])
defp parse_v220_frames_p(<<>>, 0, acc), do: Enum.reverse(acc)
# Allow for 0 padding
defp parse_v220_frames_p(<<0, 0, 0, _::binary>>, _, acc), do: parse_v220_frames_p(<<>>, 0, acc)
defp parse_v220_frames_p(
<<frame_id::bytes-3, frame_size::24, rest::binary>>,
remaining_size,
acc
) do
case remaining_size - 6 - frame_size do
too_small when too_small <= 6 ->
parse_v220_frames_p(<<>>, 0, acc)
new_remaining_size ->
<<frame_content::bytes-size(frame_size), new_content::binary>> = rest
parse_v220_frames_p(new_content, new_remaining_size, [
parse_v220_frame(frame_id, frame_content) | acc
])
end
end
# Text frames
defp parse_v220_frame(<<"T", _::binary>> = frame_id, frame_content) do
{String.to_atom(frame_id), parse_text_frame_content(frame_content)}
end
defp parse_v220_frame("COM", <<format, language::bytes-3, twostrings::binary>>) do
{description, text} = take_zero_terminated(twostrings)
{:COM,
%{
description: description |> text_to_utf8(format),
text: text |> text_to_utf8(format) |> String.trim_trailing(<<0>>),
language: language
}}
end
defp parse_v220_frame("PIC", content) do
{format, content} = take_text_format(content)
<<extension::bytes-3, content::binary>> = content
mime_type = :mimerl.extension(String.downcase(extension))
<<picture_type::8, content::binary>> = content
{description, image_data} = take_zero_terminated_text(content, format)
# debug_write(image_data, mime_type)
{:PIC,
%{
extension: extension,
mime_type: mime_type,
picture_type: picture_type,
image_data: image_data,
description: description
}}
end
defp parse_v220_frame(frame_id, content), do: {frame_id, content}
@spec text_to_utf8(binary(), non_neg_integer()) :: String.t()
defp text_to_utf8(text, format)
# Encoding 1 == utf16
defp text_to_utf8(<<0xFF, 0xFE, utf16_text::binary>>, @format_utf16),
do: :unicode.characters_to_binary(utf16_text, {:utf16, :little})
defp text_to_utf8(<<0xFE, 0xFF, utf16_text::binary>>, @format_utf16),
do: :unicode.characters_to_binary(utf16_text, {:utf16, :big})
# This is not supposed to be done this way, but it happens so lets be permissive
defp text_to_utf8("", @format_utf16), do: ""
# Encoding 0 == ISO-8859-1
defp text_to_utf8(text, @format_iso) do
text
|> :unicode.characters_to_binary(:latin1)
end
defp parse_syncsafe_integer(
<<0::1, size_1::unsigned-7, 0::1, size_2::unsigned-7, 0::size(1), size_3::size(7),
0::size(1), size_4::size(7), rest::binary>>
) do
value =
[size_1, size_2, size_3, size_4]
|> Enum.reduce(fn e, acc ->
acc * 0b1000_0000 + e
end)
{:ok, value, rest}
end
defp parse_syncsafe_integer(rest), do: {:error, binary_part(rest, 0, 4)}
# Encoding 1 == utf16
defp parse_text_frame_content(<<text_format::8, content::binary>>) do
content
|> truncate_zero_termination(text_format)
|> text_to_utf8(text_format)
end
defp truncate_zero_termination(binary, format) do
binary
|> zero_split(format)
|> hd()
end
defp do_utf16_zero_split(<<>>, acc), do: [acc]
defp do_utf16_zero_split(<<0::16, rest::binary>>, acc), do: [acc, rest]
defp do_utf16_zero_split(<<utf16_point::16, rest::binary>>, acc) do
do_utf16_zero_split(rest, <<acc::binary, utf16_point::16>>)
end
defp zero_split(binary, format) do
case format do
@format_iso ->
:binary.split(
binary,
<<0>>
)
_ ->
do_utf16_zero_split(binary, <<>>)
end
end
defp take_zero_terminated(binary, format \\ @format_iso) when is_binary(binary) do
binary
|> zero_split(format)
|> case do
[a, b] -> {a, b}
[b] -> {b, ""}
end
end
@spec take_zero_terminated_text(binary(), non_neg_integer()) :: {String.t(), binary()}
defp take_zero_terminated_text(binary, format) do
{a, b} = take_zero_terminated(binary, format)
{text_to_utf8(a, format), b}
end
defp take_text_format(<<text_format::8, rest::binary>>) do
{text_format, rest}
end
@doc false
# Internal debugging helpers
def debug_write(bytes, mime_type) do
extension =
case :mimerl.mime_to_exts(mime_type) do
[""] -> "png"
[first_ext | _] -> first_ext
end
name =
"#{Time.utc_now()}"
|> String.replace(":", "-")
|> String.replace(".", "_")
File.write!(Path.join(["/tmp", "Temp_#{name}.#{extension}"]), bytes)
end
defp sanitized_image_type("image/jpg"), do: "image/jpeg"
defp sanitized_image_type(""), do: "image/png"
defp sanitized_image_type(type), do: type
end
|
lib/metalove/media_parser.ex
| 0.719482
| 0.443179
|
media_parser.ex
|
starcoder
|
defprotocol Workex.Aggregate do
@moduledoc """
Specifies the protocol used by `Workex` behaviour to aggregate incoming messages.
"""
@doc "Value that contains aggregated messages which are passed to the worker process."
@type value :: any
@doc "Adds the new item to the aggregate."
@spec add(t, any) :: t
def add(aggregate, message)
@doc """
Produces an aggregated value from all collected items.
The returned tuple contains aggregated items, and the new instance that doesn't
contain those items.
"""
@spec value(t) :: {value, t}
def value(aggregate)
@doc """
Returns the number of aggregated items.
This function is invoked frequently, so be sure to make the implementation fast.
"""
@spec size(t) :: non_neg_integer
def size(aggregate)
@doc """
Removes the oldest item from the collection.
Sometimes it doesn't make sense to implement this function, for example when the
aggregation doesn't guarantee or preserve ordering. In such cases, just raise from
the implementation, and document that the implementation can't be used with the
`replace_oldest` option.
"""
@spec remove_oldest(t) :: t
def remove_oldest(aggregate)
end
defmodule Workex.Stack do
@moduledoc """
Aggregates messages in the stack like fashion. The aggregated value will contain
newer messages first.
"""
defstruct items: :queue.new, size: 0
@doc false
def add(%__MODULE__{items: items, size: size} = stack, message) do
{:ok, %__MODULE__{stack | items: :queue.in_r(message, items), size: size + 1}}
end
@doc false
def value(%__MODULE__{items: items}) do
{:queue.to_list(items), %__MODULE__{}}
end
@doc false
def size(%__MODULE__{size: size}), do: size
@doc false
def remove_oldest(%__MODULE__{items: items, size: size} = stack) do
{_, items} = :queue.out_r(items)
%__MODULE__{stack | items: items, size: size - 1}
end
defimpl Workex.Aggregate do
defdelegate add(aggregate, message), to: Workex.Stack
defdelegate value(aggregate), to: Workex.Stack
defdelegate size(aggregate), to: Workex.Stack
defdelegate remove_oldest(aggregate), to: Workex.Stack
end
end
defmodule Workex.Queue do
@moduledoc """
Aggregates messages in the queue like fashion. The aggregated value will be a list
that preserves the order of messages.
"""
defstruct items: :queue.new, size: 0
@doc false
def add(%__MODULE__{items: items, size: size} = queue, message) do
{:ok, %__MODULE__{queue | items: :queue.in(message, items), size: size + 1}}
end
@doc false
def value(%__MODULE__{items: items}) do
{:queue.to_list(items), %__MODULE__{}}
end
@doc false
def size(%__MODULE__{size: size}), do: size
@doc false
def remove_oldest(%__MODULE__{items: items, size: size} = queue) do
{_, items} = :queue.out(items)
%__MODULE__{queue | items: items, size: size - 1}
end
defimpl Workex.Aggregate do
defdelegate add(aggregate, message), to: Workex.Queue
defdelegate value(aggregate), to: Workex.Queue
defdelegate size(aggregate), to: Workex.Queue
defdelegate remove_oldest(aggregate), to: Workex.Queue
end
end
defmodule Workex.Dict do
@moduledoc """
Assumes that messages are key-value pairs. The new message will overwrite the
existing one of the same key. The aggregated value is a list of key-value tuples.
Ordering is not preserved.
"""
defstruct items: HashDict.new
@doc false
def add(%__MODULE__{items: items} = dict, {key, value}) do
{:ok, %__MODULE__{dict | items: HashDict.put(items, key, value)}}
end
@doc false
def value(%__MODULE__{items: items}) do
{HashDict.to_list(items), %__MODULE__{}}
end
@doc false
def size(%__MODULE__{items: items}), do: HashDict.size(items)
defimpl Workex.Aggregate do
defdelegate add(aggregate, message), to: Workex.Dict
defdelegate value(aggregate), to: Workex.Dict
defdelegate size(aggregate), to: Workex.Dict
def remove_oldest(_), do: raise("not implemented")
end
end
|
lib/workex/aggregate.ex
| 0.905424
| 0.599632
|
aggregate.ex
|
starcoder
|
defmodule Absinthe.Language.IDL do
@moduledoc false
alias Absinthe.{Schema, Language, Type}
@spec to_idl_ast(atom) :: Language.Document.t
def to_idl_ast(schema) do
%Language.Document{
definitions: Enum.map(Enum.reject(Absinthe.Schema.types(schema), &Absinthe.Type.built_in?/1), &to_idl_ast(&1, schema))
}
end
@spec to_idl_ast(Absinthe.Type.t, Absinthe.Schema.t) :: Language.t
def to_idl_ast(%Type.Object{} = node, schema) do
%Language.ObjectDefinition{
name: node.name,
fields: Enum.map(Map.values(node.fields), &to_idl_ast(node, &1, schema)),
interfaces: Enum.map(node.interfaces, &to_idl_named_type_ast(&1, schema))
}
end
def to_idl_ast(%Type.InputObject{} = node, schema) do
%Language.InputObjectDefinition{
name: node.name,
fields: Enum.map(Map.values(node.fields), &to_idl_ast(node, &1, schema))
}
end
def to_idl_ast(%Type.Interface{} = node, schema) do
%Language.InterfaceDefinition{
name: node.name,
fields: Enum.map(Map.values(node.fields), &to_idl_ast(node, &1, schema))
}
end
def to_idl_ast(%Type.Enum{} = node, _schema) do
%Language.EnumTypeDefinition{
name: node.name,
values: Enum.map(Map.values(node.values), &Map.get(&1, :name))
}
end
def to_idl_ast(%Type.Union{} = node, schema) do
%Language.UnionTypeDefinition{
name: node.name,
types: Enum.map(node.types, &to_idl_named_type_ast(&1, schema))
}
end
def to_idl_ast(%Type.Scalar{} = node, _schema) do
%Language.ScalarTypeDefinition{
name: node.name
}
end
def to_idl_ast(%Type.Argument{} = node, schema) do
%Language.InputValueDefinition{
name: node.name,
type: to_idl_ast(node.type, schema)
}
end
def to_idl_ast(%Type.List{of_type: type}, schema) do
%Language.ListType{
type: to_idl_ast(type, schema)
}
end
def to_idl_ast(%Type.NonNull{of_type: type}, schema) do
%Language.NonNullType{
type: to_idl_ast(type, schema)
}
end
def to_idl_ast(node, schema) when is_atom(node) do
%Language.NamedType{name: schema.__absinthe_type__(node).name}
end
@spec to_idl_ast(Type.t, Type.t, Schema.t) :: Language.t
defp to_idl_ast(%Type.InputObject{}, %Type.Field{} = node, schema) do
%Language.InputValueDefinition{
name: node.name,
default_value: to_idl_default_value_ast(Schema.lookup_type(schema, node.type, unwrap: false), node.default_value, schema),
type: to_idl_ast(node.type, schema)
}
end
defp to_idl_ast(%{__struct__: str}, %Type.Field{} = node, schema) when str in [Type.Object, Type.Interface] do
%Language.FieldDefinition{
name: node.name,
arguments: Enum.map(Map.values(node.args), &to_idl_ast(&1, schema)),
type: to_idl_ast(node.type, schema)
}
end
defp to_idl_named_type_ast(identifier, schema) do
name = schema.__absinthe_type__(identifier).name
%Language.NamedType{name: name}
end
defp to_idl_default_value_ast(_, nil, _), do: nil
defp to_idl_default_value_ast(%Type.Scalar{name: "Boolean"}, value, _schema) do
%Language.BooleanValue{value: value}
end
defp to_idl_default_value_ast(%Type.Scalar{name: "Int"}, value, _schema) do
%Language.IntValue{value: value}
end
defp to_idl_default_value_ast(%Type.Scalar{name: "String"}, value, _schema) do
%Language.StringValue{value: value}
end
defp to_idl_default_value_ast(%Type.Scalar{name: "ID"}, value, _schema) do
%Language.StringValue{value: value}
end
defp to_idl_default_value_ast(%Type.Scalar{name: "Float"}, value, _schema) do
%Language.FloatValue{value: value}
end
defp to_idl_default_value_ast(%Type.List{of_type: type}, value, schema) do
internal_type = Schema.lookup_type(schema, type, unwrap: false)
%Language.ListValue{
values: Enum.map(value, &to_idl_default_value_ast(internal_type, &1, schema))
}
end
@spec to_idl_iodata(Language.t) :: iodata
def to_idl_iodata(%Language.Document{} = doc) do
doc.definitions
|> Enum.map(&to_idl_iodata/1)
end
def to_idl_iodata(%Language.ObjectDefinition{} = node) do
[
"type ",
node.name,
implements_iodata(node.interfaces),
" {\n",
indented(2, node.fields),
"}\n"
]
end
def to_idl_iodata(%Language.InterfaceDefinition{} = node) do
[
"interface ",
node.name,
" {\n",
indented(2, node.fields),
"}\n"
]
end
def to_idl_iodata(%Language.InputObjectDefinition{} = node) do
[
"input ",
node.name,
" {\n",
indented(2, node.fields),
"}\n"
]
end
def to_idl_iodata(%Language.FieldDefinition{} = node) do
[
node.name,
arguments_idl_iodata(node.arguments),
": ",
to_idl_iodata(node.type),
]
end
def to_idl_iodata(%Language.InputValueDefinition{} = node) do
[
node.name,
": ",
to_idl_iodata(node.type),
default_idl_iodata(node.default_value),
]
end
def to_idl_iodata(%Language.EnumTypeDefinition{} = node) do
[
"enum ",
node.name,
" {\n",
indented(2, node.values),
"}\n"
]
end
def to_idl_iodata(%Language.UnionTypeDefinition{} = node) do
[
"union ",
node.name,
" = ",
Enum.map(node.types, &Map.get(&1, :name))
|> Enum.join(" | ")
]
end
def to_idl_iodata(%Language.ScalarTypeDefinition{} = node) do
[
"scalar ",
node.name,
"\n"
]
end
def to_idl_iodata(%Language.NamedType{} = node) do
node.name
end
def to_idl_iodata(%Language.NonNullType{} = node) do
[
to_idl_iodata(node.type),
"!"
]
end
def to_idl_iodata(%Language.ListType{} = node) do
[
"[",
to_idl_iodata(node.type),
"]"
]
end
def to_idl_iodata(value) when is_binary(value) do
value
end
defp implements_iodata([]) do
[]
end
defp implements_iodata(interfaces) do
[
" implements ",
interfaces
|> Enum.map(&Map.get(&1, :name))
|> Enum.join(", ")
]
end
defp default_idl_iodata(nil) do
""
end
defp default_idl_iodata(node) do
[
" = ",
do_default_idl_iodata(node)
]
end
defp do_default_idl_iodata(%Language.StringValue{} = node) do
node.value
|> inspect
end
defp do_default_idl_iodata(%Language.IntValue{} = node) do
node.value
|> Integer.to_string
end
defp do_default_idl_iodata(%Language.FloatValue{} = node) do
node.value
|> Float.to_string
end
defp do_default_idl_iodata(%Language.BooleanValue{} = node) do
node.value
|> to_string
end
defp do_default_idl_iodata(%Language.ListValue{} = node) do
[
"[",
Enum.map(node.values, &do_default_idl_iodata/1),
"]"
]
end
defp do_default_idl_iodata(%Language.ObjectValue{} = node) do
[
"{",
Enum.map(node.fields, &do_default_idl_iodata/1),
"}"
]
end
defp do_default_idl_iodata(%Language.ObjectField{} = node) do
[
node.name,
": ",
do_default_idl_iodata(node.value)
]
end
defp arguments_idl_iodata([]) do
[]
end
defp arguments_idl_iodata(arguments) do
[
"(",
Enum.intersperse(Enum.map(arguments, &to_idl_iodata/1), ", "),
")"
]
end
defp indented(amount, collection) do
indent = 1..amount |> Enum.map(fn _ -> " " end)
Enum.map(collection, fn
member ->
[indent, to_idl_iodata(member), "\n"]
end)
end
end
|
lib/absinthe/language/idl.ex
| 0.54819
| 0.437463
|
idl.ex
|
starcoder
|
defmodule Exvalidate.Rules.Between do
@moduledoc """
The field under validation must have a size between the given min and max.
- Strings: length is between those values.
- Numerics: value is between those values.
- Lists: length of array is between those values.
- Tuple: length of tuple is between those values.
### Examples with string
```
iex(3)> Exvalidate.Rules.Between.validating({:between, {"6", "10"}}, "Vegeta")
{:error, :between_rule_wrong}
```
```
iex(3)> Exvalidate.Rules.Between.validating({:between, {4, 20}}, "Vegeta")
{:ok, "Vegeta"}
```
### Examples with numerics
```
iex(3)> Exvalidate.Rules.Between.validating({:between, {4, 20}}, 7)
{:ok, 7}
```
```
iex(3)> Exvalidate.Rules.Between.validating({:between, {4, 20}}, 35)
{:error, :not_between_min_max}
```
### Examples with lists
```
iex(3)> Exvalidate.Rules.Between.validating({:between, {4, 20}}, ["Vegeta", "Goku", "Picolo", "Krilin"])
{:ok, ["Vegeta", "Goku", "Picolo", "Krilin"]}
```
```
iex(3)> Exvalidate.Rules.Between.validating({:between, {3, 5}}, ["Vegeta", "Krilin"])
{:error, :not_between_min_max}
```
### Examples with tuple
```
iex(3)> Exvalidate.Rules.Between.validating({:between, {4, 20}}, {"Vegeta", "Goku", "Picolo", "Krilin"})
{:ok, {"Vegeta", "Goku", "Picolo", "Krilin"}}
```
```
iex(3)> Exvalidate.Rules.Between.validating({:between, {4, 20}}, {"Vegeta", "Krilin"})
{:error, :not_between_min_max}
```
For see examples go to the tests: test/rules/between_test.exs
"""
use Exvalidate.Rules.IRules
def validating({:between, {min, max}}, value)
when is_number(min) and is_number(max) do
case is_between(min, max, value) do
{:ok, true} ->
{:ok, value}
{:ok, false} ->
{:error, :not_between_min_max}
error ->
error
end
end
def validating(_, _), do: {:error, :between_rule_wrong}
defp is_between(min, max, value)
when is_number(value) do
{:ok, value >= min and value <= max}
end
defp is_between(min, max, value)
when is_binary(value) and byte_size(value) > 0 do
{:ok, String.length(value) >= min and String.length(value) <= max}
end
defp is_between(min, max, value)
when is_list(value) do
{:ok, Enum.count(value) >= min and Enum.count(value) <= max}
end
defp is_between(min, max, value)
when is_tuple(value) do
{:ok, tuple_size(value) >= min and tuple_size(value) <= max}
end
defp is_between(_, _, _), do: {:error, :between_value_invalid}
end
|
lib/workflow/rules/between.ex
| 0.903651
| 0.972467
|
between.ex
|
starcoder
|
defmodule Phoenix.View do
alias Phoenix.Html
alias Phoenix.Naming
@moduledoc """
Serves as the base view for an entire Phoenix application view layer
Users define `App.Views` and `use Phoenix.View`. The main view:
* Serves as a base presentation layer for all views and templates
* Wires up the Template.Compiler and template path all for all other views
* Expects the base view to define a `__using__` macro for other view modules
## Examples
defmodule App.Views do
defmacro __using__(_options) do
quote do
use Phoenix.View, templates_root: unquote(Path.join([__DIR__, "templates"]))
import unquote(__MODULE__)
# This block is expanded within all views for aliases, imports, etc
def title, do: "Welcome to Phoenix!"
end
end
# Functions defined here are available to all other views/templates
end
defmodule App.PageView
use App.Views
def display(something) do
String.upcase(something)
end
end
"""
defmacro __using__(options \\ []) do
templates_root = Dict.get(options, :templates_root, default_templates_root)
quote do
import unquote(__MODULE__), except: [render: 3]
import Phoenix.View.Helpers
import Phoenix.Html, only: [safe: 1, unsafe: 1]
path = template_path_from_view_module(__MODULE__, unquote(templates_root))
use Phoenix.Template.Compiler, path: path
end
end
@doc """
Renders template to String
* module - The View module, ie, MyView
* template - The String template, ie, "index.html"
* assigns - The Dictionary of assigns, ie, [title: "Hello!"]
## Examples
iex> View.render(MyView, "index.html", title: "Hello!")
"<h1>Hello!</h1>"
## Layouts
Template can be rendered within other templates using the `within` option.
`within` accepts a Tuple, of the form `{LayoutModule, "template.extension"}`
When the sub template is rendered, the layout template will have an `@inner`
assign containing the rendered contents of the sub-template. For html
templates, `@inner` will be passed through `Html.safe/1` automatically.
### Examples
iex> View.render(MyView, "index.html", within: {LayoutView, "app.html"})
"<html><h1>Hello!</h1></html>"
"""
def render(module, template, assigns) do
assigns
|> Dict.get(:within)
|> render_within(module, template, assigns)
end
defp render_within({layout_mod, layout_tpl}, inner_mod, template, assigns) do
template
|> inner_mod.render(assigns)
|> render_layout(layout_mod, layout_tpl, assigns)
|> unwrap_rendered_content(Path.extname(template))
end
defp render_within(nil, module, template, assigns) do
template
|> module.render(assigns)
|> unwrap_rendered_content(Path.extname(template))
end
defp render_layout(inner_content, layout_mod, layout_tpl, assigns) do
layout_assigns = Dict.merge(assigns, inner: inner_content)
layout_mod.render(layout_tpl, layout_assigns)
end
@doc """
Unwraps rendered String content within extension specific structure
## Examples
iex> View.unwrap_rendered_content({:safe, "<h1>Hello!</h1>"}, ".html")
"<h1>Hello!</h1>"
iex> View.unwrap_rendered_content("Hello!", ".txt")
"Hello!"
"""
def unwrap_rendered_content(content, ".html"), do: Html.unsafe(content)
def unwrap_rendered_content(content, _ext), do: content
@doc """
Finds the template path given view module and template root path
## Examples
iex> Phoenix.View.template_path_from_view_module(MyApp.UserView, "web/templates")
"web/templates/user"
"""
def template_path_from_view_module(view_module, templates_root) do
submodule_path = view_module
|> Module.split
|> tl
|> Enum.map(&Naming.underscore/1)
|> Path.join
|> String.replace(~r/^(.*)(_view)$/, "\\1")
Path.join(templates_root, submodule_path)
end
@doc """
Returns the default String template root path for current mix project
"""
def default_templates_root do
Path.join([File.cwd!, "web/templates"])
end
end
|
lib/phoenix/view.ex
| 0.87068
| 0.427516
|
view.ex
|
starcoder
|
defmodule Membrane.AAC.Parser.Helper do
@moduledoc false
# Resources:
# https://wiki.multimedia.cx/index.php/ADTS
use Bunch
alias Membrane.{AAC, Buffer, Time}
@header_size 7
@crc_size 2
@spec parse_adts(binary, AAC.t(), AAC.Parser.timestamp_t(), %{
samples_per_frame: AAC.samples_per_frame_t(),
encapsulation: AAC.encapsulation_t()
}) ::
{:ok, {[{:caps, AAC.t()} | {:buffer, Buffer.t()}], binary, AAC.Parser.timestamp_t()}}
| {:error, :adts_header}
def parse_adts(data, caps, timestamp, options) do
with {:ok, {output, {rest, timestamp}}} <-
Bunch.List.try_unfoldr({data, caps, timestamp}, &do_parse_adts(&1, options)) do
{:ok, {List.flatten(output), rest, timestamp}}
end
end
defp do_parse_adts({data, caps, timestamp}, options)
when byte_size(data) > @header_size + @crc_size do
withl header: {:ok, frame_caps, header, crc, frame_length} <- parse_header(data, options),
header: :ok <- verify_header(header, crc),
do: adts_size = byte_size(header) + byte_size(crc),
payload: {:frame, frame, rest} <- extract_frame(data, adts_size, frame_length, options) do
caps = if caps == frame_caps, do: [], else: [caps: frame_caps]
buffer = [buffer: %Buffer{pts: timestamp, payload: frame}]
{:ok, {:cont, caps ++ buffer, {rest, frame_caps, next_timestamp(timestamp, frame_caps)}}}
else
header: :error -> {:error, :adts_header}
payload: :no_frame -> {:ok, {:halt, {data, timestamp}}}
end
end
defp do_parse_adts({data, _caps, timestamp}, _options), do: {:ok, {:halt, {data, timestamp}}}
defp parse_header(
<<0xFFF::12, _version::1, fc00:db20:35b:7399::5, protection_absent::1, profile_id::2,
sampling_frequency_id::4, _priv_bit::1, channel_config_id::3, _::4, frame_length::13,
_buffer_fullness::11, aac_frames_cnt::2, rest::binary>> = data,
options
)
when sampling_frequency_id <= 12 do
<<header::binary-size(@header_size), ^rest::binary>> = data
crc =
if protection_absent == 1 do
<<>>
else
<<crc::16, _rest::binary>> = rest
crc
end
caps = %AAC{
profile: AAC.aot_id_to_profile(profile_id + 1),
sample_rate: AAC.sampling_frequency_id_to_sample_rate(sampling_frequency_id),
channels: AAC.channel_config_id_to_channels(channel_config_id),
frames_per_buffer: aac_frames_cnt + 1,
samples_per_frame: options.samples_per_frame,
encapsulation: options.out_encapsulation
}
{:ok, caps, header, crc, frame_length}
end
defp parse_header(_payload, _options), do: :error
defp verify_header(_header, <<>>), do: :ok
defp verify_header(header, crc) do
if crc == CRC.crc_16(header), do: :ok, else: :error
end
defp extract_frame(data, _adts_size, size, %{out_encapsulation: :ADTS}) do
case data do
<<frame::binary-size(size), rest::binary>> -> {:frame, frame, rest}
_other -> :no_frame
end
end
defp extract_frame(data, adts_size, size, %{out_encapsulation: :none}) do
frame_size = size - adts_size
case data do
<<_adts::binary-size(adts_size), frame::binary-size(frame_size), rest::binary>> ->
{:frame, frame, rest}
_other ->
:no_frame
end
end
@spec next_timestamp(any(), AAC.t()) :: AAC.Parser.timestamp_t()
def next_timestamp(timestamp, caps) do
use Ratio
timestamp +
Ratio.new(caps.samples_per_frame * caps.frames_per_buffer * Time.second(), caps.sample_rate)
end
@spec payload_to_adts(binary(), AAC.t()) :: binary()
def payload_to_adts(payload, %AAC{} = caps) do
frame_length = 7 + byte_size(payload)
freq_index = caps.sample_rate |> AAC.sample_rate_to_sampling_frequency_id()
channel_config = caps.channels |> AAC.channels_to_channel_config_id()
profile = AAC.profile_to_aot_id(caps.profile) - 1
header = <<
# sync
0xFFF::12,
# id
0::1,
# layer
fc00:db20:35b:7399::5,
# protection_absent
fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b,
# profile
profile::2,
# sampling frequency index
freq_index::4,
# private_bit
0::1,
# channel configuration
channel_config::3,
# original_copy
0::1,
# home
0::1,
# copyright identification bit
fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b,
# copyright identification start
fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b,
# aac frame length
frame_length::13,
# adts buffer fullness (signalling VBR - most decoders don't care anyway)
0x7FF::11,
# number of raw data blocks in frame - 1
fc00:db20:35b:7399::5
>>
header <> payload
end
@spec parse_audio_specific_config!(binary()) :: AAC.t()
def parse_audio_specific_config!(
<<profile::5, sr_index::4, channel_configuration::4, frame_length_flag::1, _rest::bits>>
),
do: %AAC{
profile: AAC.aot_id_to_profile(profile),
mpeg_version: 4,
sample_rate: AAC.sampling_frequency_id_to_sample_rate(sr_index),
channels: AAC.channel_config_id_to_channels(channel_configuration),
encapsulation: :none,
samples_per_frame: if(frame_length_flag == 0, do: 1024, else: 960)
}
end
|
lib/membrane/aac/parser/helper.ex
| 0.79158
| 0.441613
|
helper.ex
|
starcoder
|
defmodule Requiem do
@moduledoc """
## Description
This is Elixir framework for running QuicTransport(WebTransport over QUIC) server.
- https://w3c.github.io/webtransport/
- https://tools.ietf.org/html/draft-vvv-webtransport-quic-02
This library depends on [cloudflare/quiche](https://github.com/cloudflare/quiche).
**quiche** is written in **Rust**, so you need to prepare Rust compiler to build this library.
ReQUIem requires [Rustler](https://github.com/rusterlium/rustler) to bridge between elixir and rust.
## Note
This library is currently in an experimental phase.
We plan to ensure its stability by conducting sufficient interoperability and performance tests in the future.
## Getting Started
### Resource preparation
Prepare a server and set up DNS so that you can access the server with your domain name.
Also, prepare the certificate chain and private key pem file to be used for that domain name.
You can follow the same procedure as when dealing with TLS on a typical web server.
Also, decide the port to use this time, and set the firewall etc. so that you can access the server via that port.
## Define your own handler
First of all, let's define your own handler.
Write the `use Requiem` line as follows.
`lib/my_app/my_handler.ex`
```elixir
defmodule MyApp.MyHandler do
use Requiem, otp_app: :my_app
end
```
### Configuration
Prepare the config file.
In `config/config.exs` or `config/releases.exs`,
Write as follows.
Make sure that the certificate can be specified via an environment variable.
```elixir
import Config
config :my_app, MyApp.MyHandler,
host: "0.0,0.0",
port: 443,
cert_chain: System.get_env("CERT_FILE"),
priv_key: System.get_env("PRIV_KEY"),
initial_max_data: 10_000_000,
max_udp_payload_size: 1350,
initial_max_stream_data_bidi_local: 1_000_000,
initial_max_stream_data_bidi_remote: 1_000_000,
initial_max_stream_data_uni: 1_000_000,
initial_max_streams_uni: 10,
initial_max_streams_bidi: 10,
disable_active_migration: true,
enable_early_data: true,
```
Set it like this. There are many more parameters for config, but I won't explain them here. See [Configuration](https://github.com/xflagstudio/requiem/wiki/Configuration) for details.
### Put your handler into your application supervisor
When you start the application, include the handler module that you just created in the child_spec definition of Supervisor.
`lib/my_app/application.ex`
```elixir
defmodule MyApp do
use Application
def start(_type, _args) do
[
# ...,
MyApp.MyHandler
]
|> Supervisor.start_link([
strategy: :one_for_one,
name: MyApp.Supervisor
])
end
end
```
Now let's launch the application.
```elixir
CERT_FILE=/path/to/cert PRIV_KEY=/path/to/priv_key mix run --no-halt
```
### Handler callbacks
If there are no problems with the config and other settings, this will start the application, but it is of no use at this point.
The reason is that no callback is written in the Handler.
Let's try to implement just printing the sent data to the standard output.
`lib/my_app/my_handler.ex`
```elixir
defmodule MyApp.MyHandler do
use Requiem, otp_app: :my_app
@impl Requiem
def handle_stream(_stream_id, data, conn, state) do
IO.puts(data)
{:ok, conn, state}
end
end
```
If you want to create an echo server that sends data directly back to the recipient, you can write the following
```elixir
defmodule MyApp.MyHandler do
use Requiem, otp_app: :my_app
@impl Requiem
def handle_stream(stream_id, data, conn, state) do
stream_send(stream_id, data, false)
{:ok, conn, state}
end
end
```
However, this implementation may fail depending on the value of stream_id. See [Stream](https://github.com/xflagstudio/requiem/wiki/Stream) for details.
Let's add a few more things.
```elixir
defmodule MyApp.MyHandler do
use Requiem, otp_app: :my_app
@impl Requiem
def init(conn, client) do
{:ok, conn, %{}}
end
@impl Requiem
def handle_stream(stream_id, data, conn, state) do
stream_send(stream_id, data, false)
{:ok, conn, state}
end
@impl Requiem
def handle_info(request, conn, state) do
{:noreply, conn, state}
end
@impl Requiem
def handle_cast(request, conn, state) do
{:noreply, conn, state}
end
@impl Requiem
def handle_call(request, from, conn, state) do
{:reply, :ok, conn, state}
end
@impl Requiem
def terminate(_reason, _conn, _state) do
:ok
end
end
```
If you are familiar with GenServer, you will see familiar names in the list. There are some parameters that you may not have seen before, such as `conn` and `client`, but other than that, you can probably guess how it behaves.
You can hook initialization and termination processes with `init/2` and `terminate/3`, and receive inter-process messages with `handle_info/3`, `handle_cast/3`, and `handle_call/4`.
In addition, `handle_dgram/3` can handle received datagrams. To send a datagram, use `dgram_send/1`.
```elixir
defmodule MyApp.MyHandler do
use Requiem, otp_app: :my_app
@impl Requiem
def init(conn, client) do
{:ok, conn, %{}}
end
@impl Requiem
def handle_stream(stream_id, data, conn, state) do
stream_send(stream_id, data, false)
{:ok, conn, state}
end
@impl Requiem
def handle_dgram(data, conn, state) do
dgram_send(data)
{:ok, conn, state}
end
@impl Requiem
def handle_info(request, conn, state) do
{:noreply, conn, state}
end
@impl Requiem
def handle_cast(request, conn, state) do
{:noreply, conn, state}
end
@impl Requiem
def handle_call(request, from, conn, state) do
{:reply, :ok, conn, state}
end
@impl Requiem
def terminate(_reason, _conn, _state) do
:ok
end
end
```
To use datagrams, you need to set the **enable_dgram** config to true.
```elixir
config :my_app, MyApp.MyHandler,
host: "0.0,0.0",
port: 443,
cert_chain: System.get_env("CERT"),
priv_key: System.get_env("PRIV_KEY"),
max_idle_timeout: 50000,
initial_max_data: 10_000_000,
max_udp_payload_size: 1350,
initial_max_stream_data_bidi_local: 1_000_000,
initial_max_stream_data_bidi_remote: 1_000_000,
initial_max_stream_data_uni: 1_000_000,
initial_max_streams_uni: 10,
initial_max_streams_bidi: 10,
disable_active_migration: true,
enable_early_data: true,
enable_dgram: true
```
Once you have done this, you can open the [WebTransport example page](https://googlechrome.github.io/samples/webtransport/client.html) in Google Chrome and try to interact with it.
For more information on the various callbacks and the various functions that can be called from here, see [Handler](https://github.com/xflagstudio/requiem/wiki/Handler).
## Examples
This repository contains an example project that can be used as a reference.
Check inside the `examples` directory.
## Handler
https://github.com/xflagstudio/requiem/wiki/Handler
## Configuration
https://github.com/xflagstudio/requiem/wiki/Configuration
"""
@type terminate_reason :: :normal | :shutdown | {:shutdown, term} | term
@callback init(conn :: Requiem.ConnectionState.t(), client :: Requiem.ClientIndication.t()) ::
{:ok, Requiem.ConnectionState.t(), any}
| {:ok, Requiem.ConnectionState.t(), any, timeout | :hibernate}
| {:stop, non_neg_integer, atom}
@callback handle_call(
request :: term,
from :: pid,
conn :: Requiem.ConnectionState.t(),
state :: any
) ::
{:noreply, Requiem.ConnectionState.t(), any}
| {:noreply, Requiem.ConnectionState.t(), any, timeout | :hibernate}
| {:reply, any, Requiem.ConnectionState.t(), any}
| {:reply, any, Requiem.ConnectionState.t(), any, timeout | :hibernate}
| {:stop, non_neg_integer, atom}
@callback handle_info(
request :: term,
conn :: Requiem.ConnectionState.t(),
state :: any
) ::
{:noreply, Requiem.ConnectionState.t(), any}
| {:noreply, Requiem.ConnectionState.t(), any, timeout | :hibernate}
| {:stop, non_neg_integer, atom}
@callback handle_cast(
request :: term,
conn :: Requiem.ConnectionState.t(),
state :: any
) ::
{:noreply, Requiem.ConnectionState.t(), any}
| {:noreply, Requiem.ConnectionState.t(), any, timeout | :hibernate}
| {:stop, non_neg_integer, atom}
@callback handle_stream(
stream_id :: non_neg_integer,
data :: binary,
conn :: Requiem.ConnectionState.t(),
state :: any
) ::
{:ok, Requiem.ConnectionState.t(), any}
| {:ok, Requiem.ConnectionState.t(), any, timeout | :hibernate}
| {:stop, non_neg_integer, atom}
@callback handle_dgram(
data :: binary,
conn :: Requiem.ConnectionState.t(),
state :: any
) ::
{:ok, Requiem.ConnectionState.t(), any}
| {:ok, Requiem.ConnectionState.t(), any, timeout | :hibernate}
| {:stop, non_neg_integer, atom}
@callback terminate(
reason :: terminate_reason,
conn :: Requiem.ConnectionState.t(),
state :: any
) :: any
defmacro __using__(opts \\ []) do
quote location: :keep, bind_quoted: [opts: opts] do
@behaviour Requiem
import Requiem.ConnectionState, only: [trap_exit: 2]
@spec close() :: no_return
def close(), do: send(self(), {:__close__, false, :no_error, :shutdown})
@spec close(non_neg_integer, atom) :: no_return
def close(code, reason), do: send(self(), {:__close__, true, code, reason})
@spec stream_send(non_neg_integer, binary, boolean) :: no_return
def stream_send(stream_id, data, fin) do
if Requiem.StreamId.is_writable?(stream_id) do
send(self(), {:__stream_send__, stream_id, data, fin})
else
Logger.error(
"<Requiem.Connection> You can't send data on this stream[stream_id: #{stream_id}]. This stream is not writable."
)
end
end
@spec dgram_send(binary) :: no_return
def dgram_send(data),
do: send(self(), {:__dgram_send__, data})
@otp_app Keyword.fetch!(opts, :otp_app)
@impl Requiem
def init(conn, client), do: {:ok, conn, %{}}
@impl Requiem
def handle_info(_event, conn, state), do: {:noreply, conn, state}
@impl Requiem
def handle_cast(_event, conn, state), do: {:noreply, conn, state}
@impl Requiem
def handle_call(_event, _from, conn, state), do: {:reply, :ok, conn, state}
@impl Requiem
def handle_stream(_stream_id, _data, conn, state), do: {:ok, conn, state}
@impl Requiem
def handle_dgram(_data, conn, state), do: {:ok, conn, state}
@impl Requiem
def terminate(_reason, _conn, _state), do: :ok
defoverridable init: 2,
handle_info: 3,
handle_cast: 3,
handle_call: 4,
handle_stream: 4,
handle_dgram: 3,
terminate: 3
@spec child_spec(any) :: Supervisor.child_spec()
def child_spec(_opts) do
Requiem.Supervisor.child_spec(__MODULE__, @otp_app)
end
end
end
end
|
lib/requiem.ex
| 0.799833
| 0.806243
|
requiem.ex
|
starcoder
|
defmodule Tracer do
@moduledoc """
**Tracer** is a tracing framework for elixir which features an easy to use high level interface, extensibility and safety for using in production.
To run a tool use the `run` command. Tracing only happens when the tool is running.
All tools accept the following parameters:
* `node: node_name` - Option to run the tool remotely.
* `max_tracing_time: time` - Maximum time to run tool (30sec).
* `max_message_count: count` - Maximum number of events (1000)
* `max_queue_size: size` - Maximum message queue size (1000)
* `process: pid` - Process to trace, also accepts regigered names,
and :all, :existing, :new
* `forward_pid: pid` - Forward results as messages insted of printing
to the display.
## Examples
```
iex> run Count, process: self(), match: global String.split(string, pattern)
:ok
iex> String.split("Hello World", " ")
["Hello", "World"]
iex> String.split("Hello World", " ")
["Hello", "World"]
iex String.split("Hello World", "o")
["Hell", " W", "rld"]
iex> String.split("Hello", "o")
["Hell", ""]
iex> stop
:ok
1 [string:"Hello World", pattern:"o"]
1 [string:"Hello" , pattern:"o" ]
2 [string:"Hello World", pattern:" "]
```
"""
alias Tracer.{Server, Probe, Tool}
import Tracer.Macros
defmacro __using__(_opts) do
quote do
import Tracer
import Tracer.Matcher
alias Tracer.{Tool, Probe, Clause}
alias Tracer.Tool.{Display, Count, CallSeq, Duration, FlameGraph}
:ok
end
end
delegate :start_server, to: Server, as: :start
delegate :stop_server, to: Server, as: :stop
delegate :stop, to: Server, as: :stop_tool
delegate_1 :set_tool, to: Server, as: :set_tool
def probe(params) do
Probe.new(params)
end
def probe(type, params) do
Probe.new([type: type] ++ params)
end
def tool(type, params) do
Tool.new(type, params)
end
@doc """
Runs a tool. Tracing only happens when the tool is running.
* `tool_name` - The name of the tool that want to run.
* `node: node_name` - Option to run the tool remotely.
* `max_tracing_time: time` - Maximum time to run tool (30sec).
* `max_message_count: count` - Maximum number of events (1000)
* `max_queue_size: size` - Maximum message queue size (1000)
* `process: pid` - Process to trace, also accepts regigered names,
and :all, :existing, :new
* `forward_pid: pid` - Forward results as messages insted of printing
to the display.
## Examples
```
iex> run Count, process: self(), match: global String.split(string, pattern)
:ok
iex> String.split("Hello World", " ")
["Hello", "World"]
iex> String.split("Hello World", " ")
["Hello", "World"]
iex> String.split("Hello World", "o")
["Hell", " W", "rld"]
iex> String.split("Hello", "o")
["Hell", ""]
iex> stop
:ok
1 [string:"Hello World", pattern:"o"]
1 [string:"Hello" , pattern:"o" ]
2 [string:"Hello World", pattern:" "]
```
"""
def run(%{"__tool__": _} = tool) do
Server.start_tool(tool)
end
def run(tool_name, params) do
Server.start_tool(tool(tool_name, params))
end
end
|
lib/tracer.ex
| 0.9026
| 0.798187
|
tracer.ex
|
starcoder
|
defmodule Infer.Ecto.Query.Builder do
@moduledoc """
Internal data structure to keep track of all context needed to translate complex Infer
rules to Ecto queries.
## Context switches
### Evaluate rule on other subject
- Can not access existing aliases
- Reset path
- Keep only next alias index
### Subquery (EXISTS)
- Can access existing aliases & path
- Mark existing aliases & path entries as :parent
- Add alias & path entry
### Join
- Can access existing aliases & path
- Add alias & path entry
"""
use TypedStruct
@type mapped_alias() :: {atom(), module(), %{atom() => mapped_alias()}}
typedstruct do
field(:query, Ecto.Query.t(), required: true)
field(:root_query, Ecto.Query.t())
field(:aliases, mapped_alias())
field(:path, list(atom()), default: [])
field(:types, list(atom()), default: [])
field(:next_alias_index, non_neg_integer(), default: 0)
field(:negate?, boolean(), default: false)
field(:in_subquery?, boolean(), default: false)
field(:eval, Infer.Evaluation.t())
end
alias __MODULE__, as: Builder
import Ecto.Query, only: [dynamic: 1, from: 2, join: 5]
def init(query, eval) do
%Builder{root_query: query, eval: eval}
|> set_root_alias()
end
defp set_root_alias(
%{
root_query: %Ecto.Query{
from: %Ecto.Query.FromExpr{
as: root_alias,
source: {_, type}
}
}
} = builder
)
when not is_nil(root_alias),
do: %{builder | aliases: {root_alias, type, %{}}}
defp set_root_alias(builder), do: builder |> do_alias() |> elem(0) |> set_root_alias()
defp get_type(%Ecto.Query{from: %{source: {_, type}}}), do: type
def root_alias(%Builder{aliases: {root_alias, _, _}}), do: root_alias
def root_type(%Builder{aliases: {_, type, _}}), do: type
def field(builder, key, maybe_parent? \\ false)
def field(%{path: [{:parent, as} | _]}, key, _), do: dynamic(field(parent_as(^as), ^key))
def field(%{path: [as | _], in_subquery?: true}, key, true),
do: dynamic(field(parent_as(^as), ^key))
def field(%{path: [as | _]}, key, _), do: dynamic(field(as(^as), ^key))
def field(%{path: [], aliases: {as, _, _}}, key, _), do: dynamic(field(as(^as), ^key))
def current_alias(%{path: [as | _]}), do: as
def current_alias(%{path: [], aliases: {as, _, _}}), do: as
def current_type(%{types: [type | _]}), do: type
def current_type(%{types: [], aliases: {_, type, _}}), do: type
def negate(builder, fun) do
builder
|> do_negate()
|> fun.()
|> case do
{nested, result} -> {Map.put(nested, :negate?, builder.negate?), result}
:error -> :error
end
end
defp do_negate(%{negate?: prev} = builder), do: %{builder | negate?: not prev}
defp merge(old, new) do
%{
old
| root_query: new.root_query,
aliases: new.aliases,
next_alias_index: new.next_alias_index
}
end
defp merge_query(%{query: nil} = old, new) do
%{
old
| root_query: new.root_query,
aliases: new.aliases,
next_alias_index: new.next_alias_index
}
end
defp merge_query(old, new) do
%{
old
| query: new.query,
aliases: new.aliases,
next_alias_index: new.next_alias_index
}
end
def step_into(builder, _key, subquery, fun) do
%{builder | query: subquery, in_subquery?: true, negate?: false}
|> add_aliased()
|> fun.()
|> case do
{nested, condition} ->
where =
nested.query
|> from(select: fragment("*"), where: ^condition)
|> exists(builder)
{merge(builder, nested), where}
:error ->
:error
end
end
defp exists(subquery, %{negate?: false}), do: dynamic(exists(subquery))
defp exists(subquery, %{negate?: true}), do: dynamic(not exists(subquery))
def from_root(builder, fun) do
%{builder | path: [], types: [], query: nil}
|> fun.()
|> case do
{nested, result} -> {merge(builder, nested), result}
:error -> :error
end
end
def with_join(builder, key, fun) do
builder
|> add_aliased_join(key)
|> fun.()
|> case do
{nested, result} -> {merge_query(builder, nested), result}
:error -> :error
end
end
defp update_query(%{query: nil} = builder, fun), do: Map.update!(builder, :root_query, fun)
defp update_query(builder, fun), do: Map.update!(builder, :query, fun)
def add_aliased(builder) do
{builder, as} = do_alias(builder)
type = get_type(builder.query)
parent_path = Enum.map(builder.path, &{:parent, &1})
%{builder | path: [as | parent_path], types: [type | builder.types]}
end
defp do_alias(builder) do
{builder, as} = next_alias(builder)
builder = update_query(builder, &aliased_from(&1, as))
{builder, as}
end
def add_aliased_join(builder, key) do
{builder, as} = next_alias(builder)
left = current_alias(builder)
type =
case Infer.Util.Ecto.association_details(current_type(builder), key) do
%_{related: type} -> type
end
builder = update_query(builder, &aliased_join(&1, left, key, as))
%{builder | path: [as | builder.path], types: [type | builder.types]}
end
defp next_alias(%{next_alias_index: i} = builder) do
as = "a#{i}" |> String.to_existing_atom()
builder = %{builder | next_alias_index: i + 1}
{builder, as}
end
defp aliased_from(queryable, :a0), do: from(q in queryable, as: :a0)
defp aliased_from(queryable, :a1), do: from(q in queryable, as: :a1)
defp aliased_from(queryable, :a2), do: from(q in queryable, as: :a2)
defp aliased_from(queryable, :a3), do: from(q in queryable, as: :a3)
defp aliased_from(queryable, :a4), do: from(q in queryable, as: :a4)
defp aliased_from(queryable, :a5), do: from(q in queryable, as: :a5)
defp aliased_from(queryable, :a6), do: from(q in queryable, as: :a6)
defp aliased_from(queryable, :a7), do: from(q in queryable, as: :a7)
defp aliased_from(queryable, :a8), do: from(q in queryable, as: :a8)
defp aliased_from(queryable, :a9), do: from(q in queryable, as: :a9)
defp aliased_from(queryable, :a10), do: from(q in queryable, as: :a10)
defp aliased_from(queryable, :a11), do: from(q in queryable, as: :a11)
defp aliased_from(queryable, :a12), do: from(q in queryable, as: :a12)
defp aliased_from(queryable, :a13), do: from(q in queryable, as: :a13)
defp aliased_from(queryable, :a14), do: from(q in queryable, as: :a14)
defp aliased_from(queryable, :a15), do: from(q in queryable, as: :a15)
defp aliased_from(queryable, :a16), do: from(q in queryable, as: :a16)
defp aliased_from(queryable, :a17), do: from(q in queryable, as: :a17)
defp aliased_from(queryable, :a18), do: from(q in queryable, as: :a18)
defp aliased_from(queryable, :a19), do: from(q in queryable, as: :a19)
defp aliased_from(queryable, :a20), do: from(q in queryable, as: :a20)
defp aliased_from(queryable, :a21), do: from(q in queryable, as: :a21)
defp aliased_from(queryable, :a22), do: from(q in queryable, as: :a22)
defp aliased_from(queryable, :a23), do: from(q in queryable, as: :a23)
defp aliased_from(queryable, :a24), do: from(q in queryable, as: :a24)
defp aliased_from(queryable, :a25), do: from(q in queryable, as: :a25)
defp aliased_from(queryable, :a26), do: from(q in queryable, as: :a26)
defp aliased_from(queryable, :a27), do: from(q in queryable, as: :a27)
defp aliased_from(queryable, :a28), do: from(q in queryable, as: :a28)
defp aliased_from(queryable, :a29), do: from(q in queryable, as: :a29)
defp aliased_from(queryable, :a30), do: from(q in queryable, as: :a30)
defp aliased_from(queryable, :a31), do: from(q in queryable, as: :a31)
defp aliased_from(queryable, :a32), do: from(q in queryable, as: :a32)
defp aliased_from(queryable, :a33), do: from(q in queryable, as: :a33)
defp aliased_from(queryable, :a34), do: from(q in queryable, as: :a34)
defp aliased_from(queryable, :a35), do: from(q in queryable, as: :a35)
defp aliased_from(queryable, :a36), do: from(q in queryable, as: :a36)
defp aliased_from(queryable, :a37), do: from(q in queryable, as: :a37)
defp aliased_from(queryable, :a38), do: from(q in queryable, as: :a38)
defp aliased_from(queryable, :a39), do: from(q in queryable, as: :a39)
defp aliased_join(queryable, left, key, :a0),
do: join(queryable, :inner, [{^left, l}], assoc(l, ^key), as: :a0)
defp aliased_join(queryable, left, key, :a1),
do: join(queryable, :inner, [{^left, l}], assoc(l, ^key), as: :a1)
defp aliased_join(queryable, left, key, :a2),
do: join(queryable, :inner, [{^left, l}], assoc(l, ^key), as: :a2)
defp aliased_join(queryable, left, key, :a3),
do: join(queryable, :inner, [{^left, l}], assoc(l, ^key), as: :a3)
defp aliased_join(queryable, left, key, :a4),
do: join(queryable, :inner, [{^left, l}], assoc(l, ^key), as: :a4)
defp aliased_join(queryable, left, key, :a5),
do: join(queryable, :inner, [{^left, l}], assoc(l, ^key), as: :a5)
defp aliased_join(queryable, left, key, :a6),
do: join(queryable, :inner, [{^left, l}], assoc(l, ^key), as: :a6)
defp aliased_join(queryable, left, key, :a7),
do: join(queryable, :inner, [{^left, l}], assoc(l, ^key), as: :a7)
defp aliased_join(queryable, left, key, :a8),
do: join(queryable, :inner, [{^left, l}], assoc(l, ^key), as: :a8)
defp aliased_join(queryable, left, key, :a9),
do: join(queryable, :inner, [{^left, l}], assoc(l, ^key), as: :a9)
defp aliased_join(queryable, left, key, :a10),
do: join(queryable, :inner, [{^left, l}], assoc(l, ^key), as: :a10)
defp aliased_join(queryable, left, key, :a11),
do: join(queryable, :inner, [{^left, l}], assoc(l, ^key), as: :a11)
defp aliased_join(queryable, left, key, :a12),
do: join(queryable, :inner, [{^left, l}], assoc(l, ^key), as: :a12)
defp aliased_join(queryable, left, key, :a13),
do: join(queryable, :inner, [{^left, l}], assoc(l, ^key), as: :a13)
defp aliased_join(queryable, left, key, :a14),
do: join(queryable, :inner, [{^left, l}], assoc(l, ^key), as: :a14)
defp aliased_join(queryable, left, key, :a15),
do: join(queryable, :inner, [{^left, l}], assoc(l, ^key), as: :a15)
defp aliased_join(queryable, left, key, :a16),
do: join(queryable, :inner, [{^left, l}], assoc(l, ^key), as: :a16)
defp aliased_join(queryable, left, key, :a17),
do: join(queryable, :inner, [{^left, l}], assoc(l, ^key), as: :a17)
defp aliased_join(queryable, left, key, :a18),
do: join(queryable, :inner, [{^left, l}], assoc(l, ^key), as: :a18)
defp aliased_join(queryable, left, key, :a19),
do: join(queryable, :inner, [{^left, l}], assoc(l, ^key), as: :a19)
defp aliased_join(queryable, left, key, :a20),
do: join(queryable, :inner, [{^left, l}], assoc(l, ^key), as: :a20)
defp aliased_join(queryable, left, key, :a21),
do: join(queryable, :inner, [{^left, l}], assoc(l, ^key), as: :a21)
defp aliased_join(queryable, left, key, :a22),
do: join(queryable, :inner, [{^left, l}], assoc(l, ^key), as: :a22)
defp aliased_join(queryable, left, key, :a23),
do: join(queryable, :inner, [{^left, l}], assoc(l, ^key), as: :a23)
defp aliased_join(queryable, left, key, :a24),
do: join(queryable, :inner, [{^left, l}], assoc(l, ^key), as: :a24)
defp aliased_join(queryable, left, key, :a25),
do: join(queryable, :inner, [{^left, l}], assoc(l, ^key), as: :a25)
defp aliased_join(queryable, left, key, :a26),
do: join(queryable, :inner, [{^left, l}], assoc(l, ^key), as: :a26)
defp aliased_join(queryable, left, key, :a27),
do: join(queryable, :inner, [{^left, l}], assoc(l, ^key), as: :a27)
defp aliased_join(queryable, left, key, :a28),
do: join(queryable, :inner, [{^left, l}], assoc(l, ^key), as: :a28)
defp aliased_join(queryable, left, key, :a29),
do: join(queryable, :inner, [{^left, l}], assoc(l, ^key), as: :a29)
defp aliased_join(queryable, left, key, :a30),
do: join(queryable, :inner, [{^left, l}], assoc(l, ^key), as: :a30)
defp aliased_join(queryable, left, key, :a31),
do: join(queryable, :inner, [{^left, l}], assoc(l, ^key), as: :a31)
defp aliased_join(queryable, left, key, :a32),
do: join(queryable, :inner, [{^left, l}], assoc(l, ^key), as: :a32)
defp aliased_join(queryable, left, key, :a33),
do: join(queryable, :inner, [{^left, l}], assoc(l, ^key), as: :a33)
end
|
lib/infer/ecto/query/builder.ex
| 0.776284
| 0.437283
|
builder.ex
|
starcoder
|
defmodule VintageNetMobile.Modem.UbloxTOBYL2 do
@behaviour VintageNetMobile.Modem
@moduledoc """
# u-blox TOBY-L2 support
The u-blox TOBY-L2 is a series of LTE Cat 4 modules with HSPA+ and/or 2G
fallback. Here's an example configuration:
```elixir
VintageNet.configure(
"ppp0",
%{
type: VintageNetMobile,
vintage_net_mobile: %{
modem: VintageNetMobile.Modem.UbloxTOBYL2,
service_providers: [
%{apn: "lte-apn", usage: :eps_bearer},
%{apn: "old-apn", usage: :pdp}
]
}
}
)
```
This implementation currently requires APNs to be annotated for whether
they are to be used on LTE (`:eps_bearer`) or on UMTS/GPRS (`:pdp`).
## Required Linux kernel options
* CONFIG_USB_SERIAL=m
* CONFIG_USB_SERIAL_WWAN=m
* CONFIG_USB_SERIAL_OPTION=m
## Required modem preparation
The Toby L2 is a composite USB device that can be configured to expose
various different interfaces. By default, it has one CDC ACM interface. This
implementation requires two, so you have to send it the following over a
tty interface (via `Circuits.UART` or externally):
```
AT+UUSBCONF=2
```
That command is saved NVRAM and only needs to be sent once. See section
"19.17 USB profiles configuration +UUSBCONF" in the [u-blox AT commands
manual](https://www.u-blox.com/en/docs/UBX-13002752)
"""
# Useful references:
# * AT commands - https://www.u-blox.com/en/docs/UBX-13002752
alias VintageNetMobile.{ExChat, SignalMonitor, PPPDConfig, Chatscript}
alias VintageNet.Interface.RawConfig
@impl VintageNetMobile.Modem
def normalize(config) do
config
|> require_service_providers()
end
defp require_service_providers(%{type: VintageNetMobile, vintage_net_mobile: mobile} = config) do
providers = Map.get(mobile, :service_providers, [])
if eps_bearer(providers) == nil or pdp(providers) == nil do
raise ArgumentError,
"Must provide at least two service_providers and annotate APNs with their usage (:eps_bearer and :pdp)"
end
config
end
@impl VintageNetMobile.Modem
def add_raw_config(raw_config, %{vintage_net_mobile: mobile} = _config, opts) do
ifname = raw_config.ifname
files = [{Chatscript.path(ifname, opts), chatscript(mobile.service_providers)}]
child_specs = [
{ExChat, [tty: "ttyACM1", speed: 115_200]},
{SignalMonitor, [ifname: ifname, tty: "ttyACM1"]}
]
%RawConfig{
raw_config
| files: files,
child_specs: child_specs
}
|> PPPDConfig.add_child_spec("ttyACM2", 115_200, opts)
end
defp chatscript(service_providers) do
lte_provider = eps_bearer(service_providers)
other_provider = pdp(service_providers)
[
Chatscript.prologue(120),
"""
# Enter airplane mode
OK AT+CFUN=4
# Delete existing contexts
OK AT+CGDEL
# Define PDP context
OK AT+UCGDFLT=1,"IP","#{lte_provider.apn}"
OK AT+CGDCONT=1,"IP","#{other_provider.apn}"
OK AT+CFUN=1
""",
Chatscript.connect()
]
|> IO.iodata_to_binary()
end
defp find_by_usage(service_providers, what) do
Enum.find(service_providers, &(Map.get(&1, :usage) == what))
end
defp eps_bearer(service_providers) do
find_by_usage(service_providers, :eps_bearer)
end
defp pdp(service_providers) do
find_by_usage(service_providers, :pdp)
end
end
|
lib/vintage_net_mobile/modem/ublox_TOBY_L2.ex
| 0.819713
| 0.730866
|
ublox_TOBY_L2.ex
|
starcoder
|
defmodule XmlJson.SaxHandler do
@moduledoc """
A generic Sax handler that creates a basic JSON version out of an XML document
"""
@behaviour Saxy.Handler
def parse_string(xml) do
case Saxy.parse_string(xml, __MODULE__, []) do
{:ok, _} = ok ->
ok
{:halt, state, rest} ->
{:error,
"Deserialization failed while walking XML. Failed with state of #{inspect(state)} and remaining XML of #{
inspect(rest)
}"}
{:error, _} = error ->
error
end
end
def encode(simple_form) do
xml = Saxy.encode!(simple_form)
{:ok, xml}
rescue
e ->
{:error, e}
end
def handle_event(:start_document, _prolog, _state) do
{:ok, [%{attributes: []}]}
end
def handle_event(:end_document, _data, [%{children: [root]}]) do
{:ok, root}
end
def handle_event(:start_element, {name, attributes}, state) do
[parent | _rest] = state
parent_ns = extract_ns_attributes(parent.attributes)
current_element = %{
name: name,
attributes: attributes ++ parent_ns
}
{:ok, [current_element | state]}
end
def handle_event(:end_element, _name, state) do
[current_element, parent | rest] = state
parent =
Map.update(parent, :children, [current_element], fn children ->
children ++ [current_element]
end)
{:ok, [parent | rest]}
end
def handle_event(:characters, chars, state) do
[current_element | rest] = state
{:ok, [maybe_add_text(current_element, chars) | rest]}
end
defp maybe_add_text(%{children: _} = element, _chars), do: element
defp maybe_add_text(element, chars), do: Map.put(element, :text, try_parse(chars))
defp extract_ns_attributes(attrs) do
Enum.filter(attrs, &is_ns_attr?/1)
end
defp try_parse(text) do
with :error <- integer_parse(text),
:error <- float_parse(text),
:error <- boolean_parse(String.downcase(text)) do
String.trim(text, " ")
else
parsed -> parsed
end
end
defp boolean_parse("true"), do: true
defp boolean_parse("false"), do: false
defp boolean_parse(_), do: :error
defp integer_parse(value) do
case Integer.parse(value) do
{parsed, ""} -> parsed
_ -> :error
end
rescue
_ -> :error
end
defp float_parse(value) do
case Float.parse(value) do
{parsed, ""} -> parsed
_ -> :error
end
rescue
_ -> :error
end
defp is_ns_attr?({"xmlns", _v}), do: true
defp is_ns_attr?({"xmlns:" <> _rest, _v}), do: true
defp is_ns_attr?(_), do: false
end
|
lib/xml_json/sax_handler.ex
| 0.751739
| 0.414336
|
sax_handler.ex
|
starcoder
|
defmodule Jeff.Command.LedSettings do
@moduledoc """
Reader LED control command
OSDP v2.2 Specification Reference: 6.10
Temporary Control Code Values
| Code | Description
|------|----------------------------------------------------------------------------------------------------------------------|
| 0x00 | NOP – do not alter this LED's temporary settings. The remaining values of the temporary settings record are ignored. |
| 0x01 | Cancel any temporary operation and display this LED's permanent state immediately. |
| 0x02 | Set the temporary state as given and start timer immediately. |
Permanent Control Code Values
| Code | Description
|-----------------------------------------------------------------------------------------------------------------------------|
| 0x00 | NOP – do not alter this LED's permanent settings. The remaining values of the temporary settings record are ignored. |
| 0x01 | Set the permanent state as given. |
Color Values
| Value | Description |
|---------------------------|
| 0 | Black (off/unlit) |
| 1 | Red |
| 2 | Green |
| 3 | Amber |
| 4 | Blue |
| 5 | Magenta |
| 6 | Cyan |
| 7 | White |
"""
defstruct reader: 0x0,
led: 0x0,
temp_mode: 0x00,
temp_on_time: 0x00,
temp_off_time: 0x00,
temp_on_color: 0x00,
temp_off_color: 0x00,
temp_timer: 0x00,
perm_mode: 0x00,
perm_on_time: 0x00,
perm_off_time: 0x00,
perm_on_color: 0x00,
perm_off_color: 0x00
@type t :: %__MODULE__{
reader: integer(),
led: integer(),
temp_mode: 0x00 | 0x01 | 0x02,
temp_on_time: integer(),
temp_off_time: integer(),
temp_on_color: 0x00..0x07,
temp_off_color: 0x00..0x07,
temp_timer: integer(),
perm_mode: 0x00 | 0x01,
perm_on_time: integer(),
perm_off_time: integer(),
perm_on_color: 0x00..0x07,
perm_off_color: 0x00..0x07
}
@type param() ::
{:reader, integer()}
| {:led, integer()}
| {:temp_mode, 0x00 | 0x01 | 0x02}
| {:temp_on_time, integer()}
| {:temp_off_time, integer()}
| {:temp_on_color, 0x00..0x07}
| {:temp_off_color, 0x00..0x07}
| {:temp_timer, integer()}
| {:perm_mode, 0x00 | 0x01}
| {:perm_on_time, integer()}
| {:perm_off_time, integer()}
| {:perm_on_color, 0x00..0x07}
| {:perm_off_color, 0x00..0x07}
@type params() :: t() | [param()]
@spec new(params()) :: t()
def new(params) do
struct(__MODULE__, params)
end
@spec encode(params()) :: binary()
def encode(params) do
settings = new(params)
<<
settings.reader,
settings.led,
settings.temp_mode,
settings.temp_on_time,
settings.temp_off_time,
settings.temp_on_color,
settings.temp_off_color,
settings.temp_timer::size(2)-unit(8)-little,
settings.perm_mode,
settings.perm_on_time,
settings.perm_off_time,
settings.perm_on_color,
settings.perm_off_color
>>
end
end
|
lib/jeff/command/led_settings.ex
| 0.786541
| 0.568895
|
led_settings.ex
|
starcoder
|
defmodule Codenamex.Game.Dictionary do
@moduledoc """
This module manages the game dictionary.
We can fetch words from this module with a call to fetch/1.
"""
@words [
"Hollywood",
"Screen",
"Play",
"Marble",
"Dinosaur",
"Cat",
"Pitch",
"Bond",
"Greece",
"Deck",
"Spike",
"Center",
"Vacuum",
"Unicorn",
"Undertaker",
"Sock",
"Loch Ness",
"Horse",
"Berlin",
"Platypus",
"Port",
"Chest",
"Box",
"Compound",
"Ship",
"Watch",
"Space",
"Flute",
"Tower",
"Death",
"Well",
"Fair",
"Tooth",
"Staff",
"Bill",
"Shot",
"King",
"Pan",
"Square",
"Buffalo",
"Scientist",
"Chick",
"Atlantis",
"Spy",
"Mail",
"Nut",
"Log",
"Pirate",
"Face",
"Stick",
"Disease",
"Yard",
"Mount",
"Slug",
"Dice",
"Lead",
"Hook",
"Carrot",
"Poison",
"Stock",
"Foot",
"Torch",
"Arm",
"Figure",
"Mine",
"Suit",
"Crane",
"Beijing",
"Mass",
"Microscope",
"Engine",
"China",
"Straw",
"Pants",
"Europe",
"Boot",
"Princess",
"Link",
"Luck",
"Olive",
"Palm",
"Teacher",
"Thumb",
"Octopus",
"Hood",
"Tie",
"Doctor",
"Wake",
"Cricket",
"Millionaire",
"New York",
"State",
"Bermuda",
"Park",
"Turkey",
"Chocolate",
"Trip",
"Racket",
"Bat",
"Jet",
"Shakespeare",
"Bolt",
"Switch",
"Wall",
"Soul",
"Ghost",
"Time",
"Dance",
"Amazon",
"Grace",
"Moscow",
"Pumpkin",
"Antarctica",
"Whip",
"Heart",
"Table",
"Ball",
"Fighter",
"Cold",
"Day",
"Spring",
"Match",
"Diamond",
"Centaur",
"March",
"Roulette",
"Dog",
"Cross",
"Wave",
"Duck",
"Wind",
"Spot",
"Skyscraper",
"Paper",
"Apple",
"Oil",
"Cook",
"Fly",
"Cast",
"Bear",
"Pin",
"Thief",
"Trunk",
"America",
"Novel",
"Cell",
"Bow",
"Model",
"Knife",
"Knight",
"Court",
"Iron",
"Whale",
"Shadow",
"Contract",
"Mercury",
"Conductor",
"Seal",
"Car",
"Ring",
"Kid",
"Piano",
"Laser",
"Sound",
"Pole",
"Superhero",
"Revolution",
"Pit",
"Gas",
"Glass",
"Washington",
"Bark",
"Snow",
"Ivory",
"Pipe",
"Cover",
"Degree",
"Tokyo",
"Church",
"Pie",
"Tube",
"Block",
"Comic",
"Fish",
"Bridge",
"Moon",
"Part",
"Aztec",
"Smuggler",
"Train",
"Embassy",
"Pupil",
"Scuba Diver",
"Ice",
"Tap",
"Code",
"Shoe",
"Server",
"Club",
"Row",
"Pyramid",
"Bug",
"Penguin",
"Pound",
"Himalayas",
"Czech",
"Rome",
"Eye",
"Board",
"Bed",
"Point",
"France",
"Mammoth",
"Cotton",
"Robin",
"Net",
"Bugle",
"Maple",
"England",
"Field",
"Robot",
"Plot",
"Africa",
"Tag",
"Mouth",
"Kiwi",
"Mole",
"School",
"Sink",
"Pistol",
"Opera",
"Mint",
"Root",
"Sub",
"Crown",
"Back",
"Plane",
"Mexico",
"Cloak",
"Circle",
"Tablet",
"Australia",
"Green",
"Egypt",
"Line",
"Lawyer",
"Witch",
"Parachute",
"Crash",
"Gold",
"Note",
"Lion",
"Plastic",
"Web",
"Ambulance",
"Hospital",
"Spell",
"Lock",
"Water",
"London",
"Casino",
"Cycle",
"Bar",
"Cliff",
"Round",
"Bomb",
"Giant",
"Hand",
"Ninja",
"Rose",
"Slip",
"Limousine",
"Pass",
"Theater",
"Plate",
"Satellite",
"Ketchup",
"Hotel",
"Tail",
"Tick",
"Ground",
"Police",
"Dwarf",
"Fan",
"Dress",
"Saturn",
"Grass",
"Brush",
"Chair",
"Rock",
"Pilot",
"Telescope",
"File",
"Lab",
"India",
"Ruler",
"Nail",
"Swing",
"Olympus",
"Change",
"Date",
"Stream",
"Missile",
"Scale",
"Band",
"Angel",
"Press",
"Berry",
"Card",
"Check",
"Draft",
"Head",
"Lap",
"Orange",
"Ice Cream",
"Film",
"Washer",
"Pool",
"Shark",
"Van",
"String",
"Calf",
"Hawk",
"Eagle",
"Needle",
"Forest",
"Dragon",
"Key",
"Belt",
"Cap",
"Drill",
"Glove",
"Paste",
"Fall",
"Fire",
"Spider",
"Spine",
"Soldier",
"Horn",
"Queen",
"Ham",
"Litter",
"Life",
"Temple",
"Rabbit",
"Button",
"Game",
"Star",
"Jupiter",
"Vet",
"Night",
"Air",
"Battery",
"Genius",
"Shop",
"Bottle",
"Stadium",
"Alien",
"Light",
"Triangle",
"Lemon",
"Nurse",
"Drop",
"Track",
"Bank",
"Germany",
"Worm",
"Ray",
"Capital",
"Strike",
"War",
"Concert",
"Honey",
"Canada",
"Buck",
"Snowman",
"Beat",
"Jam",
"Copper",
"Beach",
"Bell",
"Leprechaun",
"Phoenix",
"Force",
"Boom",
"Fork",
"Alps",
"Post",
"Fence",
"Kangaroo",
"Mouse",
"Mug",
"Horseshoe",
"Scorpion",
"Agent",
"Helicopter",
"Hole",
"Organ",
"Jack",
"Charge"
]
def fetch(amount) do
@words |> Enum.shuffle |> Enum.take(amount)
end
end
|
lib/codenamex/game/dictionary.ex
| 0.589835
| 0.604545
|
dictionary.ex
|
starcoder
|
defmodule LindaEx do
@moduledoc """
GenServer implementation of Linda-style tuple spaces.
Uses an ETS table internally.
"""
@type space :: :ets.tab
@type template :: tuple | :ets.match_spec | :'_'
use GenServer
@spec start_link(atom) :: GenServer.on_start
def start_link(name) do
GenServer.start_link __MODULE__, name, name: name
end
@doc """
If a tuple matching `template` exists in `ts`, return that tuple. If no
match is found and `mode` is `:block`, block until such a tuple is written
to the space. If no match is found and `mode` is `:noblock`, return nil.
Read tuples are _not_ removed from the space.
"""
@spec read(space, template, :block) :: tuple
@spec read(space, template, :noblock) :: tuple | nil
def read(ts, template, mode \\ :block) do
GenServer.call ts, {:read, translate_template(template), mode}, :infinity
end
@doc """
Return a list of all tuples in `ts` that matches `template`. Return the
empty list if no tuples match.
Read tuples are _not_ removed from the space.
"""
@spec read_all(space, template) :: [tuple]
def read_all(ts, template) do
GenServer.call ts, {:read_all, translate_template(template)}
end
@doc """
If a tuple matching `template` exists in `ts`, return that tuple. If no
match is found and `mode` is `:block`, block until such a tuple is written
to the space. If no match is found and `mode` is `:noblock`, return nil.
Taken tuples _are removed_ from the space.
"""
@spec take(space, template, :block) :: tuple
@spec take(space, template, :noblock) :: tuple | nil
def take(ts, template, mode \\ :block) do
GenServer.call ts, {:take, translate_template(template), mode}, :infinity
end
@doc """
Return a list of all tuples in `ts` that matches `template`. Return an
empty list if no tuples match.
Taken tuples _are removed_ from the space.
"""
@spec take_all(space, template) :: [tuple]
def take_all(ts, template) do
GenServer.call ts, {:take_all, translate_template(template)}
end
@doc """
Write a tuple to `ts`. Duplicates and empty tuples are not allowed.
"""
@spec write(space, tuple) :: :ok
def write(ts, tuple) do
GenServer.cast ts, {:write, tuple}
end
@doc """
Update a tuple in the space. `update_op` will be called with the matched
tuple and is expected to return an updated tuple.
"""
@spec update(space, template, (tuple -> tuple)) :: :ok
def update(ts, template, update_op) do
tuple = take ts, template
write ts, update_op.(tuple)
end
@doc """
Return the number of tuples in `ts`.
"""
@spec count(space) :: non_neg_integer
def count(ts) do
GenServer.call ts, :count
end
defmodule State do
defstruct space: nil, waiting_read: %{}, waiting_take: %{}
def add_waiting(state, from = {pid, _ref}, template, action) do
monitor_ref = Process.monitor pid
case action do
:read ->
%{state | waiting_read: Dict.put(state.waiting_read, monitor_ref, {from, template})}
:take ->
%{state | waiting_take: Dict.put(state.waiting_take, monitor_ref, {from, template})}
end
end
end
def init(name) do
space = :ets.new name, [:bag]
{:ok, %State{space: space}}
end
def handle_call({:read, template, mode}, from, state) do
read_or_take :read, template, mode, from, state
end
def handle_call({:take, template, mode}, from, state) do
read_or_take :take, template, mode, from, state
end
def handle_call({:read_all, template}, _from, state) do
{:reply, :ets.select(state.space, template), state}
end
def handle_call({:take_all, template}, _from, state) do
tuples = :ets.select state.space, template
:ets.select_delete state.space, [put_elem(hd(template), 2, [true])]
{:reply, tuples, state}
end
def handle_call(:count, _from, state) do
{:reply, :ets.info(state.space, :size), state}
end
def handle_info({:DOWN, monitor_ref, :process, _pid, _info}, state) do
waiting_read = Dict.delete state.waiting_read, monitor_ref
waiting_take = Dict.delete state.waiting_take, monitor_ref
{:noreply, %{state | waiting_read: waiting_read, waiting_take: waiting_take}}
end
defp read_or_take(action, template, mode, from, state) do
case :ets.select(state.space, template, 1) do
:"$end_of_table" ->
case mode do
:block ->
{:noreply, State.add_waiting(state, from, template, action)}
:noblock ->
{:reply, nil, state}
end
{[tuple], _cont} ->
if action == :take do
:ets.delete_object state.space, tuple
end
{:reply, tuple, state}
end
end
def handle_cast({:write, tuple}, state) do
{tuple_taken, state} = notify_waiting(state, tuple)
unless tuple_taken, do: :ets.insert(state.space, tuple)
{:noreply, state}
end
defp notify_waiting(state, tuple) do
notified_readers = Enum.filter_map state.waiting_read,
fn({_monitor_ref, {reader, template}}) ->
case :ets.test_ms(tuple, template) do
{:ok, false} ->
false
{:ok, _} ->
GenServer.reply reader, tuple
true
end
end,
fn({monitor_ref, _}) -> monitor_ref end
waiting_read = Dict.drop state.waiting_read, notified_readers
taker = Enum.find state.waiting_take, fn({_monitor_ref, {taker, template}}) ->
case :ets.test_ms(tuple, template) do
{:ok, false} ->
false
{:ok, _} ->
GenServer.reply taker, tuple
true
end
end
waiting_take = case taker do
{monitor_ref, _} ->
Process.demonitor monitor_ref, [:flush]
Dict.delete state.waiting_take, monitor_ref
nil ->
state.waiting_take
end
{taker, %{state | waiting_read: waiting_read, waiting_take: waiting_take}}
end
defp translate_template(:"_"), do: [{:"_", [], [:"$_"]}]
defp translate_template(match_spec) when is_list(match_spec), do: match_spec
defp translate_template(template) when is_tuple(template) do
{template, guards, _} = template
|> Tuple.to_list
|> Enum.reduce({[], [], 1}, fn(elem, {template, guards, num}) ->
case make_guard(elem, num) do
nil ->
{[elem | template], guards, num}
{var, guard} ->
{[var | template], [guard | guards], num+1}
end
end)
[{List.to_tuple(Enum.reverse(template)), Enum.reverse(guards), [:"$_"]}]
end
defp make_guard(elem, num) do
case atom_to_guard(elem) do
nil ->
nil
:"==" ->
var = :"$#{num}"
{var, {:"==", var, {:const, elem}}}
guard ->
var = :"$#{num}"
{var, {guard, var}}
end
end
defp atom_to_guard(:"$atom"), do: :is_atom
defp atom_to_guard(:"$binary"), do: :is_binary
defp atom_to_guard(:"$string"), do: :is_binary
defp atom_to_guard(:"$float"), do: :is_float
defp atom_to_guard(:"$function"), do: :is_function
defp atom_to_guard(:"$int"), do: :is_integer
defp atom_to_guard(:"$integer"), do: :is_integer
defp atom_to_guard(:"$list"), do: :is_list
defp atom_to_guard(:"$number"), do: :is_number
defp atom_to_guard(:"$pid"), do: :is_pid
defp atom_to_guard(:"$port"), do: :is_port
defp atom_to_guard(:"$reference"), do: :is_reference
defp atom_to_guard(:"$tuple"), do: :is_tuple
defp atom_to_guard(atom) when is_atom(atom) do
case to_string(atom) =~ ~r/^\$\d+/ do
true -> :"=="
false -> nil
end
end
defp atom_to_guard(_), do: nil
end
|
lib/lindaex/tuple_space.ex
| 0.79653
| 0.612194
|
tuple_space.ex
|
starcoder
|
defmodule Jsonpatch do
@moduledoc """
A implementation of [RFC 6902](https://tools.ietf.org/html/rfc6902) in pure Elixir.
The patch can be a single change or a list of things that shall be changed. Therefore
a list or a single JSON patch can be provided. Every patch belongs to a certain operation
which influences the usage.
Accorrding to [RFC 6901](https://tools.ietf.org/html/rfc6901) escaping of `/` and `~` is done
by using `~1` for `/` and `~0` for `~`.
"""
alias Jsonpatch.FlatMap
alias Jsonpatch.Operation
alias Jsonpatch.Operation.Add
alias Jsonpatch.Operation.Copy
alias Jsonpatch.Operation.Move
alias Jsonpatch.Operation.Remove
alias Jsonpatch.Operation.Replace
alias Jsonpatch.Operation.Test
@typedoc """
A valid Jsonpatch operation by RFC 6902
"""
@type t :: Add.t() | Remove.t() | Replace.t() | Copy.t() | Move.t() | Test.t()
@typedoc """
Describe an error that occured while patching.
"""
@type error :: {:error, :invalid_path | :invalid_index | :test_failed, bitstring()}
@doc """
Apply a Jsonpatch to a map or struct. The whole patch will not be applied
when any path is invalid or any other error occured.
## Examples
iex> patch = [
...> %Jsonpatch.Operation.Add{path: "/age", value: 33},
...> %Jsonpatch.Operation.Replace{path: "/hobbies/0", value: "Elixir!"},
...> %Jsonpatch.Operation.Replace{path: "/married", value: true},
...> %Jsonpatch.Operation.Remove{path: "/hobbies/1"},
...> %Jsonpatch.Operation.Remove{path: "/hobbies/2"},
...> %Jsonpatch.Operation.Copy{from: "/name", path: "/surname"},
...> %Jsonpatch.Operation.Move{from: "/home", path: "/work"},
...> %Jsonpatch.Operation.Test{path: "/name", value: "Bob"}
...> ]
iex> target = %{"name" => "Bob", "married" => false, "hobbies" => ["Sport", "Elixir", "Football"], "home" => "Berlin"}
iex> Jsonpatch.apply_patch(patch, target)
{:ok, %{"name" => "Bob", "married" => true, "hobbies" => ["Elixir!"], "age" => 33, "surname" => "Bob", "work" => "Berlin"}}
iex> # Patch will not be applied if test fails. The target will not be changed.
iex> patch = [
...> %Jsonpatch.Operation.Add{path: "/age", value: 33},
...> %Jsonpatch.Operation.Test{path: "/name", value: "Alice"}
...> ]
iex> target = %{"name" => "Bob", "married" => false, "hobbies" => ["Sport", "Elixir", "Football"], "home" => "Berlin"}
iex> Jsonpatch.apply_patch(patch, target)
{:error, :test_failed, "Expected value 'Alice' at '/name'"}
"""
@spec apply_patch(Jsonpatch.t() | list(Jsonpatch.t()), map()) ::
{:ok, map()} | Jsonpatch.error()
def apply_patch(json_patch, target)
def apply_patch(json_patch, %{} = target) when is_list(json_patch) do
# Operatons MUST be sorted before applying because a remove operation for path "/foo/2" must be done
# before the remove operation for path "/foo/1". Without order it could be possible that the wrong
# value will be removed or only one value instead of two.
result =
json_patch
|> Enum.map(&create_sort_value/1)
|> Enum.sort(fn {sort_value_1, _}, {sort_value_2, _} -> sort_value_1 >= sort_value_2 end)
|> Enum.map(fn {_, patch} -> patch end)
|> Enum.reduce(target, &Jsonpatch.Operation.apply_op/2)
case result do
{:error, _, _} = error -> error
ok_result -> {:ok, ok_result}
end
end
def apply_patch(json_patch, %{} = target) do
result = Operation.apply_op(json_patch, target)
case result do
{:error, _, _} = error -> error
ok_result -> {:ok, ok_result}
end
end
@doc """
Apply a Jsonpatch to a map or struct. In case of an error
it will raise an exception.
(See Jsonpatch.apply_patch/2 for more details)
"""
@spec apply_patch!(Jsonpatch.t() | list(Jsonpatch.t()), map()) :: map()
def apply_patch!(json_patch, target)
def apply_patch!(json_patch, target) do
case apply_patch(json_patch, target) do
{:ok, patched} -> patched
{:error, _, _} = error -> raise JsonpatchException, error
end
end
@doc """
Creates a patch from the difference of a source map to a target map.
## Examples
iex> source = %{"name" => "Bob", "married" => false, "hobbies" => ["Elixir", "Sport", "Football"]}
iex> destination = %{"name" => "Bob", "married" => true, "hobbies" => ["Elixir!"], "age" => 33}
iex> Jsonpatch.diff(source, destination)
[
%Add{path: "/age", value: 33},
%Replace{path: "/hobbies/0", value: "Elixir!"},
%Replace{path: "/married", value: true},
%Remove{path: "/hobbies/1"},
%Remove{path: "/hobbies/2"}
]
"""
@spec diff(map, map) :: list(Jsonpatch.t())
def diff(source, destination)
def diff(%{} = source, %{} = destination) do
source = FlatMap.parse(source)
destination = FlatMap.parse(destination)
[]
|> create_additions(source, destination)
|> create_replaces(source, destination)
|> create_removes(source, destination)
end
@doc """
Creates "add"-operations by using the keys of the destination and check their existence in the
source map. Source and destination has to be parsed to a flat map.
"""
@spec create_additions(list(Jsonpatch.t()), map, map) :: list(Jsonpatch.t())
def create_additions(accumulator \\ [], source, destination)
def create_additions(accumulator, %{} = source, %{} = destination) do
additions =
Map.keys(destination)
|> Enum.filter(fn key -> not Map.has_key?(source, key) end)
|> Enum.map(fn key ->
%Add{path: key, value: Map.get(destination, key)}
end)
accumulator ++ additions
end
@doc """
Creates "remove"-operations by using the keys of the destination and check their existence in the
source map. Source and destination has to be parsed to a flat map.
"""
@spec create_removes(list(Jsonpatch.t()), map, map) :: list(Jsonpatch.t())
def create_removes(accumulator \\ [], source, destination)
def create_removes(accumulator, %{} = source, %{} = destination) do
removes =
Map.keys(source)
|> Enum.filter(fn key -> not Map.has_key?(destination, key) end)
|> Enum.map(fn key -> %Remove{path: key} end)
accumulator ++ removes
end
@doc """
Creates "replace"-operations by comparing keys and values of source and destination. The source and
destination map have to be flat maps.
"""
@spec create_replaces(list(Jsonpatch.t()), map, map) :: list(Jsonpatch.t())
def create_replaces(accumulator \\ [], source, destination)
def create_replaces(accumulator, source, destination) do
replaces =
Map.keys(destination)
|> Enum.filter(fn key -> Map.has_key?(source, key) end)
|> Enum.filter(fn key -> Map.get(source, key) != Map.get(destination, key) end)
|> Enum.map(fn key ->
%Replace{path: key, value: Map.get(destination, key)}
end)
accumulator ++ replaces
end
# ===== ===== PRIVATE ===== =====
# Create once a easy sortable value for a operation
defp create_sort_value(%{path: path} = operation) do
fragments = String.split(path, "/")
x = Jsonpatch.PathUtil.operation_sort_value?(operation) * 1_000_000 * 100_000_000
y = length(fragments) * 100_000_000
z =
case List.last(fragments) |> Integer.parse() do
:error -> 0
{int, _} -> int
end
# Structure of recorde sort value
# x = Kind of PathUtil
# y = Amount of fragments (how deep goes the path?)
# z = At which position in a list?
# xxxxyyyyyyzzzzzzzz
{x + y + z, operation}
end
end
|
lib/jsonpatch.ex
| 0.912292
| 0.510374
|
jsonpatch.ex
|
starcoder
|
defmodule Noizu.UserSettings.Setting do
@type t :: %__MODULE__{
setting: atom,
stack: Map.t,
}
defstruct [
setting: nil,
stack: %{}
]
#--------------------------------------------
# append/4
#--------------------------------------------
def append(%__MODULE__{} = a, nil, _path_prefix, _weight_offset), do: a
def append(nil, %__MODULE__{} = b, path_prefix, weight_offset) do
stack = if weight_offset != 0 do
Enum.reduce(b.stack, %{}, fn({k,v}, acc) ->
m_k = path_prefix ++ k
m_e = Enum.map(v, fn(e) -> update_in(e, [:weight], &((&1 || 0) + weight_offset)) end)
put_in(acc, [m_k], m_e)
end)
else
Enum.reduce(b.stack, %{}, fn({k,v}, acc) ->
m_k = path_prefix ++ k
put_in(acc, [m_k], v)
end)
end
%__MODULE__{setting: b.setting, stack: stack}
end
def append(%__MODULE__{} = a, %__MODULE__{} = b, path_prefix, weight_offset) do
m_b = append(nil, b, path_prefix, weight_offset)
merge(a, m_b)
end
#--------------------------------------------
# merge/2
#--------------------------------------------
def merge(%__MODULE__{} = a, nil), do: a
def merge(nil, %__MODULE__{} = b), do: b
def merge(%__MODULE__{} = a, %__MODULE__{} = b) do
stack = (Map.keys(a.stack) ++ Map.keys(b.stack))
|> Enum.reduce(%{},
fn(path, acc) ->
put_in(acc, [path], ((a.stack[path] || []) ++ (b.stack[path] || [])))
end)
%__MODULE__{a| stack: stack}
end
#--------------------------------------------
# pull/3
#--------------------------------------------
@doc """
Return new Setting struct.
"""
def new(setting, value, path, weight), do: %__MODULE__{setting: setting, stack: %{path => [%{weight: weight, value: value}]}}
#--------------------------------------------
# insert/5
#--------------------------------------------
@doc """
Insert a new entry at given path with specified weight.
"""
def insert(this, setting, value, path, weight \\ :auto)
def insert(nil, setting, value, path, weight), do: new(setting, value, path, weight)
def insert(%__MODULE__{} = this, _setting, value, path, weight), do: update_in(this, [Access.key(:stack), path], fn(entry) -> (entry || []) ++ [%{weight: weight, value: value}] end)
#--------------------------------------------
# effective/2
#--------------------------------------------
@doc """
Determine effective setting given nesting path. Pull all entries at pull level or lower, order by weight and return
the entry with the highest value.
"""
def effective(nil, _path), do: {:error, :no_entry}
def effective(%__MODULE__{} = this, path) do
case pull(this, path) do
[] -> {:error, :no_entry}
stack when is_list(stack) -> (stack |> Enum.sort(&(&1.weight > &2.weight)) |> List.first()).value
end
end
#--------------------------------------------
# effective_for/2
#--------------------------------------------
@doc """
Determine effective setting given nesting paths. Pull all entries at pull level or lower for all paths, order by weight and return
the entry with the highest value.
"""
def effective_for(nil, _paths), do: {:error, :no_entry}
def effective_for(%__MODULE__{} = this, paths) do
entries = Enum.map(paths, fn(path) -> pull(this, path) end) |> List.flatten()
case entries do
[] -> {:error, :no_entry}
stack when is_list(stack) -> (stack |> Enum.sort(&(&1.weight > &2.weight)) |> List.first()).value
end
end
#--------------------------------------------
# pull/3
#--------------------------------------------
@doc """
Grab all entries for a path and it's parents. [path, parent, parents_parent]
"""
def pull(this, path, acc \\ [])
def pull(%__MODULE__{} = this, [], acc), do: acc ++ (this.stack[[]] || [])
def pull(%__MODULE__{} = this, [_h|t] = path, acc), do: pull(this, t, acc ++ (this.stack[path] || []))
end
#-----------------------------------------------------------------------------
# Inspect Protocol
#-----------------------------------------------------------------------------
defimpl Inspect, for: Noizu.UserSettings.Setting do
import Inspect.Algebra
def inspect(entity, opts) do
cond do
opts.limit == :infinity ->
concat ["#Setting(", to_doc(entity.setting, opts), ")<", to_doc(entity.stack, opts), ">"]
opts.limit >= 499 ->
stack = Enum.reduce(entity.stack, %{}, fn({k, v}, acc) ->
put_in(acc, [k], %{effective: Noizu.UserSettings.Setting.effective(entity, k), entries: length(v)})
end)
concat ["#Setting(", to_doc(entity.setting, opts), ")<", to_doc(stack, opts), ">"]
opts.limit >= 99 ->
stack = Enum.reduce(entity.stack, %{}, fn({k, v}, acc) ->
put_in(acc, [k], length(v))
end)
concat ["#Setting(", to_doc(entity.setting, opts), ")<", to_doc(stack, opts), ">"]
true ->
stack = length(Map.keys(entity.stack))
concat ["#Setting(", to_doc(entity.setting, opts), ")<", to_doc(stack, opts), ">"]
end
end
end # end Inspect
|
lib/user_settings/setting.ex
| 0.677047
| 0.486027
|
setting.ex
|
starcoder
|
defmodule ScrapyCloudEx.Endpoints.Storage.JobQ do
@moduledoc """
Wraps the [JobQ](https://doc.scrapinghub.com/api/jobq.html) endpoint.
The JobQ API allows you to retrieve finished jobs from the queue.
"""
import ScrapyCloudEx.Endpoints.Guards
alias ScrapyCloudEx.Endpoints
alias ScrapyCloudEx.Endpoints.Helpers
alias ScrapyCloudEx.HttpAdapter.RequestConfig
@base_url "https://storage.scrapinghub.com/jobq"
@default_format :json
@param_aliases [
{:start_ts, :startts},
{:end_ts, :endts}
]
@valid_params [:spider, :state, :startts, :endts, :has_tag, :lacks_tag]
@doc """
Counts the jobs for the specified project.
The following parameters are supported in the `params` argument:
* `:spider` - the spider name.
* `:state` - return jobs with specified state. Supported values: `"pending"`, `"running"`,
`"finished"`, `"deleted"`.
* `:startts` - UNIX timestamp at which to begin results, in milliseconds.
* `:endts` - UNIX timestamp at which to end results, in milliseconds.
* `:count` - limit results by a given number of jobs.
* `:has_tag` - return jobs with specified tag. May be given multiple times, and will behave
as a logical `OR` operation among the values.
* `:lacks_tag` - return jobs that lack specified tag. May be given multiple times, and will
behave as a logical `AND` operation among the values.
The `opts` value is documented [here](ScrapyCloudEx.Endpoints.html#module-options).
See docs [here](https://doc.scrapinghub.com/api/jobq.html#jobq-project-id-count).
## Example
```
ScrapyCloudEx.Endpoints.Storage.JobQ.count("API_KEY", "14", state: "running", has_tag: "sometag")
# {:ok, 4}
```
"""
@spec count(String.t(), String.t(), Keyword.t(), Keyword.t()) :: ScrapyCloudEx.result(integer())
def count(api_key, project_id, params \\ [], opts \\ [])
when is_api_key(api_key)
when is_binary(project_id) and project_id != ""
when is_list(params)
when is_list(opts) do
make_request(api_key, project_id, params, opts, @valid_params, "count")
end
@doc """
Lists the jobs for the specified project, in order from most recent to last.
The following parameters are supported in the `params` argument:
* `:format` - the [format](ScrapyCloudEx.Endpoints.Storage.html#module-format) to be used for
returning results. Can be `:json` or `:jl`. Defaults to `:json`.
* `:pagination` - the `:count` [pagination parameter](ScrapyCloudEx.Endpoints.Storage.html#module-pagination)
is supported.
* `:spider` - the spider name.
* `:state` - return jobs with specified state. Supported values: `"pending"`, `"running"`,
`"finished"`, `"deleted"`.
* `:startts` - UNIX timestamp at which to begin results, in milliseconds.
* `:endts` - UNIX timestamp at which to end results, in milliseconds.
* `:start` - offset of initial jobs to skip in returned results.
* `:end` - job key at which to stop showing results.
* `:key` - job key for which to get job data. May be given multiple times.
* `:has_tag` - return jobs with specified tag. May be given multiple times, and will behave
as a logical `OR` operation among the values.
* `:lacks_tag` - return jobs that lack specified tag. May be given multiple times, and will
behave as a logical `AND` operation among the values.
The `opts` value is documented [here](ScrapyCloudEx.Endpoints.html#module-options).
See docs [here](https://doc.scrapinghub.com/api/jobq.html#jobq-project-id-list).
## List jobs finished between two timestamps
If you pass the startts and endts parameters, the API will return only the jobs finished between them.
```
ScrapyCloudEx.Endpoints.Storage.JobQ.list("API_KEY", 53, startts: 1359774955431, endts: 1359774955440)
```
## Retrieve jobs finished after some job
JobQ returns the list of jobs, with the most recently finished first. It is recommended to associate
the key of the most recently finished job with the downloaded data. When you want to update your data
later on, you can list the jobs and stop at the previously downloaded job, through the `:stop` parameter.
```
ScrapyCloudEx.Endpoints.Storage.JobQ.list("API_KEY", 53, stop: "53/7/81")
```
## Example return value
```
{:ok, [
%{
"close_reason" => "cancelled",
"elapsed" => 485061225,
"errors" => 1,
"finished_time" => 1540745154657,
"items" => 2783,
"key" => "345675/1/26",
"logs" => 20,
"pages" => 2888,
"pending_time" => 1540744974169,
"running_time" => 1540744974190,
"spider" => "sixbid.com",
"state" => "finished",
"ts" => 1540745141316,
"version" => "5ef2169-master"
}
]}
```
"""
@spec list(String.t(), String.t() | integer, Keyword.t(), Keyword.t()) ::
ScrapyCloudEx.result([map()])
def list(api_key, project_id, params \\ [], opts \\ [])
when is_api_key(api_key)
when is_binary(project_id) and project_id != ""
when is_list(params)
when is_list(opts) do
valid_params = @valid_params ++ [:format, :count, :start, :stop, :key, :pagination]
params =
params
|> set_default_format()
|> Endpoints.scope_params(:pagination, [:count])
|> Endpoints.merge_scope(:pagination)
make_request(api_key, project_id, params, opts, valid_params, "list")
end
@spec make_request(
String.t(),
String.t() | integer,
Keyword.t(),
Keyword.t(),
[atom, ...],
String.t()
) :: ScrapyCloudEx.result(any)
defp make_request(api_key, project_id, params, opts, valid_params, endpoint) do
params = params |> Helpers.canonicalize_params(@param_aliases)
with :ok <- Helpers.validate_params(params, valid_params) do
base_url = [@base_url, project_id, endpoint] |> Enum.join("/")
query_string = URI.encode_query(params)
RequestConfig.new()
|> RequestConfig.put(:api_key, api_key)
|> RequestConfig.put(:url, "#{base_url}?#{query_string}")
|> RequestConfig.put(:headers, Keyword.get(opts, :headers, []))
|> RequestConfig.put(:opts, opts)
|> Helpers.make_request()
else
error -> {:error, error}
end
end
@spec set_default_format(Keyword.t()) :: Keyword.t()
defp set_default_format(params), do: Keyword.put_new(params, :format, @default_format)
end
|
lib/endpoints/storage/job_q.ex
| 0.912456
| 0.829871
|
job_q.ex
|
starcoder
|
defmodule Snmp.Agent do
@moduledoc """
Use this module to generate an Agent module you can insert in your supervision
tree.
## DSL
See `Snmp.Agent.DSL`.
## Configuration
When using this module, you need to provide `:otp_app` option. Agent
environment will be get with: `Application.get_env(<otp_app>,
<agent_module>)`.
* `versions` (optional, default: `[:v3]`): a list of versions to enable for
this agent amongst `:v1`, `v2` and `v3`.
* `transports` (optional, default: `["127.0.0.1", "::1"]`): a list of possible
transports definitions. See `t:Snmp.Transport.agent_transport/0`.
* `security`: defines a list of users. See `t:Snmp.Mib.UserBasedSm.user/0` for format.
## Example
```
defmodule Agent do
use Snmp.Agent, otp_app: :my_app
# Mandatory MIBs
mib MyApp.Mib.Standard
mib MyApp.Mib.Framework
# Application MIBs
mib MyMib
# VACM model
view :public do
include [1, 3, 6, 1, 2, 1]
end
view :private do
include [1, 3, 6]
end
access :public,
versions: [:v1, :v2c, :usm],
level: :noAuthNoPriv,
read_view: :public
access :secure,
versions: [:usm],
level: :authPriv,
read_view: :private,
write_view: :private,
notify_view: :private
end
```
"""
use GenServer
require Logger
alias Snmp.Agent.Config
defstruct handler: nil, errors: [], config: %{}, overwrite: true, otp_app: nil
@type handler :: module()
@doc """
Generates an SNMP agent module
"""
defmacro __using__(args) do
quote do
use Snmp.Agent.Handler, unquote(args)
end
end
@doc """
Starts SNMP agent
"""
@spec start_link(handler) :: GenServer.on_start()
def start_link(args) do
GenServer.start_link(__MODULE__, args, name: __MODULE__)
end
@impl GenServer
def init(handler) do
case Config.build(handler) do
{:ok, config} ->
_ = :snmp.start_agent(:normal)
{:ok, %__MODULE__{config: config}}
{:error, errors} ->
{:stop, errors}
end
end
end
|
lib/snmp/agent.ex
| 0.868702
| 0.692278
|
agent.ex
|
starcoder
|
defmodule Tai.Venues.Boot do
@moduledoc """
Coordinates the asynchronous hydration of a venue:
- products
- asset balances
- fees
"""
alias Tai.Venues.Boot
@type adapter :: Tai.Venues.Adapter.t()
@spec run(adapter :: adapter) :: {:ok, adapter} | {:error, {adapter, [reason :: term]}}
def run(%Tai.Venues.Adapter{} = adapter) do
adapter
|> hydrate_products_and_balances
|> wait_for_products
|> hydrate_fees_and_positions_and_start_streams
|> wait_for_balances_and_fees
end
defp hydrate_products_and_balances(adapter) do
t_products = Task.async(Boot.Products, :hydrate, [adapter])
t_balances = Task.async(Boot.AssetBalances, :hydrate, [adapter])
{adapter, t_products, t_balances}
end
defp wait_for_products({adapter, t_products, t_balances}) do
working_tasks = [asset_balances: t_balances]
case Task.await(t_products, adapter.timeout) do
{:ok, products} ->
{:ok, adapter, working_tasks, products}
{:error, reason} ->
err_reasons = [products: reason]
{:error, adapter, working_tasks, err_reasons}
end
end
defp hydrate_fees_and_positions_and_start_streams({:ok, adapter, working_tasks, products}) do
t_fees = Task.async(Boot.Fees, :hydrate, [adapter, products])
t_positions = Task.async(Boot.Positions, :hydrate, [adapter])
t_stream = Task.async(Boot.Stream, :start, [adapter, products])
new_working_tasks = [{:fees, t_fees} | working_tasks]
new_working_tasks = [{:positions, t_positions} | new_working_tasks]
new_working_tasks = [{:streams, t_stream} | new_working_tasks]
{:ok, adapter, new_working_tasks}
end
defp hydrate_fees_and_positions_and_start_streams({:error, _, _, _} = error), do: error
defp wait_for_balances_and_fees({:ok, adapter, working_tasks}) do
adapter
|> collect_remaining_errors(working_tasks, [])
end
defp wait_for_balances_and_fees({:error, adapter, working_tasks, err_reasons}) do
adapter
|> collect_remaining_errors(working_tasks, err_reasons)
end
defp collect_remaining_errors(adapter, [], err_reasons) do
if Enum.empty?(err_reasons) do
{:ok, adapter}
else
{:error, {adapter, err_reasons}}
end
end
defp collect_remaining_errors(adapter, [{name, working} | tasks], err_reasons) do
case Task.await(working, adapter.timeout) do
{:error, reason} ->
adapter |> collect_remaining_errors(tasks, [{name, reason} | err_reasons])
_ ->
adapter |> collect_remaining_errors(tasks, err_reasons)
end
end
end
|
apps/tai/lib/tai/venues/boot.ex
| 0.723505
| 0.438545
|
boot.ex
|
starcoder
|
defmodule Meca do
use GenServer
@moduledoc """
Module for communicating and controlling a Mecademic Robot over TCP.
## Example
{:ok, pid} = Meca.start(%{host: '127.0.0.1', port: 10000})
Meca.activate_robot(pid)
Meca.home(pid)
Meca.set_blending(pid, 0)
Meca.set_joint_vel(pid, 100)
Meca.move_joints(pid, 0, 0, 0, 170, 115, 175)
Meca.move_joints(pid, 0, 0, 0, -170, -115, -175)
Meca.move_joints(pid, 0, -70, 70, 0, 0, 0)
Meca.move_joints(pid, 0, 90, -135, 0, 0, 0)
Meca.gripper_close(pid)
"""
@initial_state %{
socket: nil,
host: nil,
port: nil,
eob: 1,
eom: 1,
user_eom: nil,
error_mode: false,
queueing: false
}
@eom_commands [
"MoveJoints",
"MoveLin",
"MoveLinRelTRF",
"MoveLinRelWRF",
"MovePose",
"SetCartAcc",
"SetJointAcc",
"SetTRF",
"SetWRF"
]
@robot_status_keys [
:activated,
:homing,
:simulation,
:error,
:paused,
:eob,
:eom
]
@gripper_status_keys [
:gripper_enabled,
:homing_state,
:holding_part,
:limit_reached,
:error_state,
:force_overload
]
@float_response_codes [
2026,
2027
]
@integer_response_codes [
2029,
2007,
2079
]
@reset_error_responses [
"The error was reset",
"There was no error to reset"
]
@socket_options [
:binary,
packet: :line,
line_delimiter: 0,
buffer: 1024,
active: false
]
@typedoc """
A response from executing a command.
"""
@type command_response :: :error_mode | :queueing | String.t() | list(integer()) | list(float())
@spec run_command(pid(), String.t(), list(integer() | float()) | nil) :: command_response()
@doc """
Sends a command to the Mecademic Robot and receives a decoded response.
"""
def run_command(pid, command, args) do
GenServer.call(pid, {:command, command, args})
end
@spec run_command(pid(), String.t()) :: command_response()
@doc """
Sends a command to the Mecademic Robot and receives a decoded response.
"""
def run_command(pid, command), do: run_command(pid, command, [])
@spec run_script(pid(), String.t()) :: :ok
@doc """
Takes a string of one command on each line, and sends the commands sequentially to
the Mecademic Robot.
## Example
{:ok, pid} = Meca.start(%{host: '127.0.0.1', port: 10000})
script = \"\"\"
SetBlending(0)
SetJointVel(100)
MoveJoints(0,0,0,170,115,175)
MoveJoints(0,0,0,-170,-115,-175)
MoveJoints(0,0,0,170,115,175)
\"\"\"
Meca.run_script(pid, script)
"""
def run_script(pid, script) do
script
|> String.split("\n")
|> Enum.map(&String.trim/1)
|> Enum.each(fn command ->
pid |> run_command(command, nil)
end)
end
@doc """
Creates a new connection to a Mecademic Robot with the provided connection options.
Expects a map with a `host` and a `port`.
"""
def start_link(opts \\ %{}) do
GenServer.start_link(__MODULE__, Map.merge(@initial_state, opts))
end
@doc """
Creates a new connection to a Mecademic Robot with the provided connection options.
Expects a map with a `host` and a `port`.
"""
def start(opts \\ %{}) do
GenServer.start(__MODULE__, Map.merge(@initial_state, opts))
end
@doc false
def init(state) do
with {:ok, socket} <- :gen_tcp.connect(state[:host], state[:port], @socket_options, 100),
{:ok, resp} <- :gen_tcp.recv(socket, 0, 10_000),
{code, _body} <- parse_response(resp) do
case code do
3000 ->
# connection confirmation
{:ok, %{state | socket: socket}}
3001 ->
# another session is connected to the robot
{:stop, :existing_connection}
_ ->
# unexpected response
{:stop, {:unexpected_code, code}}
end
else
{:error, err} ->
{:stop, err}
err ->
{:stop, err}
end
end
@doc false
def handle_call(
{:command, command, args},
_,
%{socket: socket, error_mode: error_mode, queueing: queueing} = state
) do
if error_mode do
{:reply, :error_mode, state}
else
:ok = :gen_tcp.send(socket, build_command(command, args) |> encode())
if queueing do
# skip receiving responses if queueing enabled
{:reply, :queueing, state}
else
{:ok, resp} = :gen_tcp.recv(socket, 0)
{code, body} = parse_response(resp)
decoded_body = decode_response_body(code, body)
{:reply, decoded_body, %{state | error_mode: error_code?(code)}}
end
end
end
@spec encode(String.t()) :: String.t()
@doc """
Encodes a message before sending to the Mecademic Robot.
"""
def encode(msg) do
"#{msg}\0"
end
@spec decode(String.t()) :: String.t()
@doc """
Decodes response from the Mecademic Robot into useful information that can be manipulated.
"""
def decode(msg) do
msg
end
@spec activate_robot(pid()) :: command_response()
@doc """
Activates the Mecademic Robot.
"""
def activate_robot(pid), do: pid |> run_command("ActivateRobot")
@spec deactivate_robot(pid()) :: command_response()
@doc """
Deactivates the Mecademic Robot.
"""
def deactivate_robot(pid), do: pid |> run_command("DeactivateRobot")
@spec activate_sim(pid()) :: command_response()
@doc """
Activates the Mecademic Robot simulation mode.
"""
def activate_sim(pid), do: pid |> run_command("ActivateSim")
@spec deactivate_sim(pid()) :: command_response()
@doc """
Deactivates the Mecademic Robot simulation mode.
"""
def deactivate_sim(pid), do: pid |> run_command("DeactivateSim")
@spec switch_to_ethercat(pid()) :: command_response()
@doc """
Places the Mecademic Robot in EtherCAT mode.
"""
def switch_to_ethercat(pid), do: pid |> run_command("SwitchToEtherCAT")
@spec get_conf(pid()) :: command_response()
@doc """
Retrieves the current inverse kinematic configuration.
"""
def get_conf(pid), do: pid |> run_command("GetConf")
@spec get_joints(pid()) :: command_response()
@doc """
Retrieves the Mecademic Robot joint angles in degrees.
"""
def get_joints(pid), do: pid |> run_command("GetJoints")
@spec get_pose(pid()) :: command_response()
@doc """
Retrieves the current pose of the Mecademic Robot TRF with respect to the WRF.
"""
def get_pose(pid), do: pid |> run_command("GetPose")
@spec pause_motion(pid()) :: command_response()
@doc """
Stops the robot movement and holds until ResumeMotion.
"""
def pause_motion(pid), do: pid |> run_command("PauseMotion")
@spec resume_motion(pid()) :: command_response()
@doc """
Resumes the robot movement after being Paused from PauseMotion or ClearMotion.
"""
def resume_motion(pid), do: pid |> run_command("ResumeMotion")
@spec clear_motion(pid()) :: command_response()
@doc """
Stops the robot movement and deletes the rest of the robot's trajectory.
Holds until a ResumeMotion.
"""
def clear_motion(pid), do: pid |> run_command("ClearMotion")
@spec brakes_on(pid()) :: command_response()
@doc """
Enables the brakes of joints 1, 2 and 3, if and only if the robot is powered but deactivated.
"""
def brakes_on(pid), do: pid |> run_command("BrakesOn")
@spec brakes_off(pid()) :: command_response()
@doc """
Disables the brakes of joints 1, 2 and 3, if and only if the robot is powered but deactivated.
"""
def brakes_off(pid), do: pid |> run_command("BrakesOff")
@spec home(pid()) :: command_response()
@doc """
Homes the Mecademic Robot.
"""
def home(pid), do: pid |> run_command("Home")
@spec gripper_open(pid()) :: command_response()
@doc """
Opens the gripper of the end-effector.
"""
def gripper_open(pid), do: pid |> run_command("GripperOpen")
@spec gripper_close(pid()) :: command_response()
@doc """
Closes the gripper of the end-effector.
"""
def gripper_close(pid), do: pid |> run_command("GripperClose")
@spec get_status_robot(pid()) :: %{
:activated => integer(),
:homing => integer(),
:simulation => integer(),
:error => integer(),
:paused => integer(),
:eob => integer(),
:eom => integer()
}
@doc """
Retrieves the robot status of the Mecademic Robot.
"""
def get_status_robot(pid) do
pid
|> run_command("GetStatusRobot")
|> then(&Enum.zip(@robot_status_keys, &1))
|> Enum.into(%{})
end
@spec get_status_gripper(pid()) :: %{
:gripper_enabled => integer(),
:homing_state => integer(),
:holding_part => integer(),
:limit_reached => integer(),
:error_state => integer(),
:force_overload => integer()
}
@doc """
Retrieves the gripper status of the Mecademic Robot.
"""
def get_status_gripper(pid) do
pid
|> run_command("GetStatusGripper")
|> then(&Enum.zip(@gripper_status_keys, &1))
|> Enum.into(%{})
end
@spec in_error_mode?(pid()) :: boolean()
@doc """
Status method that checks whether the Mecademic Robot is in error mode.
"""
def in_error_mode?(pid) do
:sys.get_state(pid) |> Map.get(:error_mode)
end
@spec set_eob(pid(), integer()) :: command_response()
@doc """
Sets End of Block answer active or inactive in the Mecademic Robot. Parameter `e`
enables (1) EOB or disables (0) EOB.
"""
def set_eob(pid, e) do
:sys.replace_state(pid, fn state -> %{state | eob: e} end)
pid |> run_command("SetEOB", [e])
end
@spec set_eom(pid(), integer()) :: command_response()
@doc """
Sets End of Movement answer active or inactive in the Mecademic Robot. Parameter `e`
enables (1) EOM or disables (0) EOM.
"""
def set_eom(pid, e) do
:sys.replace_state(pid, fn state -> %{state | eom: e} end)
pid |> run_command("SetEOM", [e])
end
@spec reset_error(pid()) :: command_response()
@doc """
Resets the error in the Mecademic Robot.
"""
def reset_error(pid) do
pid
|> run_command("ResetError")
|> tap(fn response ->
# set the error_mode state to false if the response suggests the error was reset
:sys.replace_state(pid, fn state ->
%{state | error_mode: !Enum.member?(@reset_error_responses, response)}
end)
end)
end
@spec set_queue(pid(), integer()) :: boolean()
@doc """
Enables the queueing of move commands for blending. Parameter `e` enables (1) queueing
or disables (0) queueing.
"""
def set_queue(pid, e) do
if e == 1 do
eom = :sys.get_state(pid) |> Map.get(:eom)
:sys.replace_state(pid, fn state ->
%{state | queueing: true, user_eom: eom}
end)
set_eom(pid, 0)
else
user_eom = :sys.get_state(pid) |> Map.get(:user_eom)
:sys.replace_state(pid, fn state ->
%{state | queueing: false}
end)
set_eom(pid, user_eom)
end
:sys.get_state(pid) |> Map.get(:queueing)
end
@spec delay(pid(), float()) :: command_response()
@doc """
Gives the Mecademic Robot a wait time before performing another action.
"""
def delay(pid, t) do
pid |> run_command("Delay", [t])
end
@spec move_joints(pid(), float(), float(), float(), float(), float(), float()) ::
command_response()
@doc """
Moves the joints of the Mecademic Robot to the desired angles. Each theta argument corresponds
to a joint number.
"""
def move_joints(pid, theta_1, theta_2, theta_3, theta_4, theta_5, theta_6) do
pid |> run_command("MoveJoints", [theta_1, theta_2, theta_3, theta_4, theta_5, theta_6])
end
@spec move_lin(pid(), float(), float(), float(), float(), float(), float()) ::
command_response()
@doc """
Moves the Mecademic Robot tool reference in a straight line to final
point with specified direction.
"""
def move_lin(pid, x, y, z, alpha, beta, gamma) do
pid |> run_command("MoveLin", [x, y, z, alpha, beta, gamma])
end
@spec move_lin_rel_trf(pid(), float(), float(), float(), float(), float(), float()) ::
command_response()
@doc """
Moves the Mecademic Robot tool reference frame to specified coordinates and heading.
"""
def move_lin_rel_trf(pid, x, y, z, alpha, beta, gamma) do
pid |> run_command("MoveLinRelTRF", [x, y, z, alpha, beta, gamma])
end
@spec move_lin_rel_wrf(pid(), float(), float(), float(), float(), float(), float()) ::
command_response()
@doc """
Moves the Mecademic Robot world reference frame to specified coordinates and heading.
"""
def move_lin_rel_wrf(pid, x, y, z, alpha, beta, gamma) do
pid |> run_command("MoveLinRelWRF", [x, y, z, alpha, beta, gamma])
end
@spec move_pose(pid(), float(), float(), float(), float(), float(), float()) ::
command_response()
@doc """
Moves the Mecademic Robot joints to have the TRF at (x, y, z) with heading (alpha, beta, gamma).
"""
def move_pose(pid, x, y, z, alpha, beta, gamma) do
pid |> run_command("MovePose", [x, y, z, alpha, beta, gamma])
end
@spec set_blending(pid(), float()) :: command_response()
@doc """
Sets the blending of the Mecademic Robot. Parameter `p` enables between `1` and `100` and
disables at `0`.
"""
def set_blending(pid, p) do
pid |> run_command("SetBlending", [p])
end
@spec set_auto_conf(pid(), integer()) :: command_response()
@doc """
Enables or Disables the automatic robot configuration selection and has effect only on
the MovePose command. Parameter `e` enables (1) EOB or disables (0).
"""
def set_auto_conf(pid, e) do
pid |> run_command("SetAutoConf", [e])
end
@spec set_cart_acc(pid(), float()) :: command_response()
@doc """
Sets the cartesian accelerations of the linear and angular movements of the
Mecademic Robot end effector. Parameter `p` should be between `1` and `100`.
"""
def set_cart_acc(pid, p) do
pid |> run_command("SetCartAcc", [p])
end
@spec set_cart_ang_vel(pid(), float()) :: command_response()
@doc """
Sets the cartesian angular velocity of the Mecademic Robot TRF with respect to its WRF.
Parameter `w` should be between `0.001` and `180`.
"""
def set_cart_ang_vel(pid, w) do
pid |> run_command("SetCartAngVel", [w])
end
@spec set_cart_lin_vel(pid(), float()) :: command_response()
@doc """
Sets the cartesian linear velocity of the Mecademic Robot's TRF relative to its WRF.
Parameter `v` should be between `0.001` and `500`.
"""
def set_cart_lin_vel(pid, v) do
pid |> run_command("SetCartLinVel", [v])
end
@spec set_conf(pid(), integer(), integer(), integer()) :: command_response()
@doc """
Sets the desired Mecademic Robot inverse kinematic configuration to be observed in
the MovePose command. Parameters `c1`, `c3`, and `c5` should be either `1` or `-1`.
"""
def set_conf(pid, c1, c3, c5) do
pid |> run_command("SetConf", [c1, c3, c5])
end
@spec set_gripper_force(pid(), float()) :: command_response()
@doc """
Sets the Gripper's grip force. Parameter `p` should be between `1` and `100`.
"""
def set_gripper_force(pid, p) do
pid |> run_command("SetGripperForce", [p])
end
@spec set_gripper_vel(pid(), float()) :: command_response()
@doc """
Sets the Gripper fingers' velocity with respect to the gripper. Parameter `p` should be
between `1` and `100`.
"""
def set_gripper_vel(pid, p) do
pid |> run_command("SetGripperVel", [p])
end
@spec set_joint_acc(pid(), float()) :: command_response()
@doc """
Sets the acceleration of the joints. Parameter `p` should be between `1` and `100`.
"""
def set_joint_acc(pid, p) do
pid |> run_command("SetJointAcc", [p])
end
@spec set_joint_vel(pid(), float()) :: command_response()
@doc """
Sets the angular velocities of the Mecademic Robot's joints. `velocity` should be
between `1` and `100`.
"""
def set_joint_vel(pid, velocity) do
pid |> run_command("SetJointVel", [velocity])
end
@spec set_trf(pid(), float(), float(), float(), float(), float(), float()) ::
command_response()
@doc """
Sets the Mecademic Robot TRF at (x, y, z) and heading (alpha, beta, gamma) with respect
to the FRF.
"""
def set_trf(pid, x, y, z, alpha, beta, gamma) do
pid |> run_command("SetTRF", [x, y, z, alpha, beta, gamma])
end
@spec set_wrf(pid(), float(), float(), float(), float(), float(), float()) ::
command_response()
@doc """
Sets the Mecademic Robot WRF at (x, y, z) and heading (alpha, beta, gamma) with respect
to the BRF.
"""
def set_wrf(pid, x, y, z, alpha, beta, gamma) do
pid |> run_command("SetWRF", [x, y, z, alpha, beta, gamma])
end
@spec build_command(String.t(), list(integer() | float() | String.t()) | nil) ::
command_response()
@doc """
Builds the command string to send to the Mecademic Robot from the function name and
arguments the command needs.
"""
def build_command(command, nil), do: command
def build_command(command, args) do
case Enum.count(args) do
0 ->
command
_ ->
args_list = Enum.join(args, ",")
"#{command}(#{args_list})"
end
end
@spec parse_response(String.t()) :: {integer(), String.t()}
@doc """
Parses the raw response into the response code and body.
"""
def parse_response(resp) do
trimmed = resp |> String.trim("\0")
{parse_response_code(trimmed), parse_response_body(trimmed)}
end
@spec parse_response_code(String.t()) :: integer()
defp parse_response_code(resp) do
resp |> String.slice(1..4) |> String.to_integer()
end
@spec parse_response_body(String.t()) :: String.t()
defp parse_response_body(resp) do
resp |> String.slice(7..-2)
end
@spec decode_response_body(integer(), String.t()) ::
list(float()) | list(integer()) | String.t()
@doc """
Decodes the response body into float values, integer values, or a string depending
on the response code.
"""
def decode_response_body(code, body) do
cond do
Enum.member?(@float_response_codes, code) ->
body
|> String.split(",")
|> Enum.map(&String.to_float/1)
Enum.member?(@integer_response_codes, code) ->
body
|> String.split(",")
|> Enum.map(&String.to_integer/1)
true ->
body
end
end
@spec answer_codes(String.t(), %{optional(:eob) => integer(), optional(:eom) => integer()}) ::
list(integer())
@doc """
Returns the list of possible answer codes for the given command and EOM/EOB enablement status.
"""
def answer_codes(command, state)
def answer_codes("ActivateRobot", _), do: [2000, 2001]
def answer_codes("ActivateSim", _), do: [2045]
def answer_codes("ClearMotion", _), do: [2044]
def answer_codes("DeactivateRobot", _), do: [2004]
def answer_codes("BrakesOn", _), do: [2010]
def answer_codes("BrakesOff", _), do: [2008]
def answer_codes("GetConf", _), do: [2029]
def answer_codes("GetJoints", _), do: [2026]
def answer_codes("GetStatusRobot", _), do: [2007]
def answer_codes("GetStatusGripper", _), do: [2079]
def answer_codes("GetPose", _), do: [2027]
def answer_codes("Home", _), do: [2002, 2003]
def answer_codes("PauseMotion", %{eom: 1}), do: [2042, 3004]
def answer_codes("PauseMotion", _), do: [2042]
def answer_codes("ResetError", _), do: [2005, 2006]
def answer_codes("ResumeMotion", _), do: [2043]
def answer_codes("SetEOB", _), do: [2054, 2055]
def answer_codes("SetEOM", _), do: [2052, 2053]
def answer_codes(command, %{eob: eob, eom: eom}) do
[]
|> append_eob_answer_code(eob)
|> append_eom_answer_code(command, eom)
end
def answer_codes(_, _), do: []
@spec append_eob_answer_code(list(integer()), integer()) :: list(integer())
defp append_eob_answer_code(list, 1), do: [3012 | list]
defp append_eob_answer_code(list, _), do: list
@spec append_eom_answer_code(list(integer()), String.t(), integer()) :: list(integer())
defp append_eom_answer_code(list, command, 1) do
if Enum.member?(@eom_commands, command) do
[3004 | list]
else
list
end
end
defp append_eom_answer_code(list, _, _), do: list
@spec error_code?(integer()) :: boolean()
@doc """
Returns whether or not the response code is an error code.
This encompasses the 1000-1999 command errors, and general errors in the 3000 range.
"""
def error_code?(code) when code in 1000..1999, do: true
def error_code?(code) when code in [3001, 3003, 3005, 3009, 3014, 3026], do: true
def error_code?(_), do: false
end
|
lib/meca.ex
| 0.811265
| 0.556159
|
meca.ex
|
starcoder
|
defmodule Membrane.MP4.MovieBox.TrackBox do
@moduledoc """
A module containing a set of utilities for assembling an MPEG-4 track box.
The track box (`trak` atom) describes a single track of a presentation. This description includes
information like its timescale, duration, volume, media-specific data (media handlers, sample
descriptions) as well as a sample table, which allows media players to find and interpret
track's data in the media data box.
For more information about the track box, refer to [ISO/IEC 14496-12](https://www.iso.org/standard/74428.html).
"""
alias Membrane.MP4.{Container, Helper, Track}
alias Membrane.MP4.Payload.{AAC, AVC1}
defguardp is_audio(track) when {track.height, track.width} == {0, 0}
@spec assemble(Track.t()) :: Container.t()
def assemble(track) do
dref =
{:dref,
%{
children: [url: %{children: [], fields: %{flags: 1, version: 0}}],
fields: %{entry_count: 1, flags: 0, version: 0}
}}
dinf = [dinf: %{children: [dref], fields: %{}}]
[
trak: %{
children:
track_header(track) ++
[
mdia: %{
children:
media_handler_header(track) ++
handler(track) ++
[
minf: %{
children:
media_header(track) ++
dinf ++ sample_table(track),
fields: %{}
}
],
fields: %{}
}
],
fields: %{}
}
]
end
defp track_header(track) do
[
tkhd: %{
children: [],
fields: %{
alternate_group: 0,
creation_time: 0,
duration: track.duration,
flags: 3,
height: {track.height, 0},
layer: 0,
matrix_value_A: {1, 0},
matrix_value_B: {0, 0},
matrix_value_C: {0, 0},
matrix_value_D: {1, 0},
matrix_value_U: {0, 0},
matrix_value_V: {0, 0},
matrix_value_W: {1, 0},
matrix_value_X: {0, 0},
matrix_value_Y: {0, 0},
modification_time: 0,
track_id: track.id,
version: 0,
volume:
if is_audio(track) do
{1, 0}
else
{0, 0}
end,
width: {track.width, 0}
}
}
]
end
defp media_handler_header(track) do
[
mdhd: %{
children: [],
fields: %{
creation_time: 0,
duration: track.duration,
flags: 0,
language: 21_956,
modification_time: 0,
timescale: track.timescale,
version: 0
}
}
]
end
defp handler(track) when is_audio(track) do
[
hdlr: %{
children: [],
fields: %{
flags: 0,
handler_type: "soun",
name: "SoundHandler",
version: 0
}
}
]
end
defp handler(_track) do
[
hdlr: %{
children: [],
fields: %{
flags: 0,
handler_type: "vide",
name: "VideoHandler",
version: 0
}
}
]
end
defp media_header(track) when is_audio(track) do
[
smhd: %{
children: [],
fields: %{
balance: {0, 0},
flags: 0,
version: 0
}
}
]
end
defp media_header(_track) do
[
vmhd: %{
children: [],
fields: %{
flags: 1,
graphics_mode: 0,
opcolor: 0,
version: 0
}
}
]
end
defp sample_table(track) do
sample_description = sample_description(track)
sample_deltas = sample_deltas(track)
maybe_sample_sync = maybe_sample_sync(track)
sample_to_chunk = sample_to_chunk(track)
sample_sizes = sample_sizes(track)
chunk_offsets = chunk_offsets(track)
[
stbl: %{
children:
[
stsd: %{
children: sample_description,
fields: %{
entry_count: length(sample_description),
flags: 0,
version: 0
}
},
stts: %{
fields: %{
version: 0,
flags: 0,
entry_count: length(sample_deltas),
entry_list: sample_deltas
}
}
] ++
maybe_sample_sync ++
[
stsc: %{
fields: %{
version: 0,
flags: 0,
entry_count: length(sample_to_chunk),
entry_list: sample_to_chunk
}
},
stsz: %{
fields: %{
version: 0,
flags: 0,
sample_size: 0,
sample_count: track.sample_table.sample_count,
entry_list: sample_sizes
}
},
stco: %{
fields: %{
version: 0,
flags: 0,
entry_count: length(chunk_offsets),
entry_list: chunk_offsets
}
}
],
fields: %{}
}
]
end
defp sample_description(%{content: %AVC1{} = avc1} = track) do
[
avc1: %{
children: [
avcC: %{
content: avc1.avcc
},
pasp: %{
children: [],
fields: %{h_spacing: 1, v_spacing: 1}
}
],
fields: %{
compressor_name: <<0::size(32)-unit(8)>>,
depth: 24,
flags: 0,
frame_count: 1,
height: track.height,
horizresolution: {0, 0},
num_of_entries: 1,
version: 0,
vertresolution: {0, 0},
width: track.width
}
}
]
end
defp sample_description(%{content: %AAC{} = aac}) do
[
mp4a: %{
children: %{
esds: %{
fields: %{
elementary_stream_descriptor: aac.esds,
flags: 0,
version: 0
}
}
},
fields: %{
channel_count: aac.channels,
compression_id: 0,
data_reference_index: 1,
encoding_revision: 0,
encoding_vendor: 0,
encoding_version: 0,
packet_size: 0,
sample_size: 16,
sample_rate: {aac.sample_rate, 0}
}
}
]
end
defp sample_deltas(%{timescale: timescale, sample_table: %{decoding_deltas: decoding_deltas}}),
do:
Enum.map(decoding_deltas, fn %{sample_count: count, sample_delta: delta} ->
%{sample_count: count, sample_delta: Helper.timescalify(delta, timescale)}
end)
defp maybe_sample_sync(%{sample_table: %{sync_samples: []}}), do: []
defp maybe_sample_sync(%{sample_table: %{sync_samples: sync_samples}}) do
sync_samples
|> Enum.map(&%{sample_number: &1})
|> then(
&[
stss: %{
fields: %{
version: 0,
flags: 0,
entry_count: length(&1),
entry_list: &1
}
}
]
)
end
defp sample_to_chunk(%{sample_table: %{samples_per_chunk: samples_per_chunk}}),
do:
Enum.map(
samples_per_chunk,
&%{
first_chunk: &1.first_chunk,
samples_per_chunk: &1.sample_count,
sample_description_index: 1
}
)
defp sample_sizes(%{sample_table: %{sample_sizes: sample_sizes}}),
do: Enum.map(sample_sizes, &%{entry_size: &1})
defp chunk_offsets(%{sample_table: %{chunk_offsets: chunk_offsets}}),
do: Enum.map(chunk_offsets, &%{chunk_offset: &1})
end
|
lib/membrane_mp4/movie_box/track_box.ex
| 0.896863
| 0.513059
|
track_box.ex
|
starcoder
|
defmodule Linkify.Parser do
@moduledoc """
Module to handle parsing the the input string.
"""
alias Linkify.Builder
@invalid_url ~r/(\.\.+)|(^(\d+\.){1,2}\d+$)/
@match_url ~r{^(?:\W*)?(?<url>(?:https?:\/\/)?[\w.-]+(?:\.[\w\.-]+)+[\w\-\._~%:\/?#[\]@!\$&'\(\)\*\+,;=.]+$)}u
@get_scheme_host ~r{^\W*(?<scheme>https?:\/\/)?(?:[^@\n]+\\w@)?(?<host>[^:#~\/\n?]+)}u
@match_hashtag ~r/^(?<tag>\#[[:word:]_]*[[:alpha:]_·][[:word:]_·\p{M}]*)/u
@match_skipped_tag ~r/^(?<tag>(a|code|pre)).*>*/
@match_phone ~r"(?<phone>(?:x\d{2,7})|(?:(?:\+?1\s?(?:[.-]\s?)?)?(?:\(\s?(?:[2-9]1[02-9]|[2-9][02-8]1|[2-9][02-8][02-9])\s?\)|(?:[2-9]1[02-9]|[2-9][02-8]1|[2-9][02-8][02-9]))\s?(?:[.-]\s?)?)(?:[2-9]1[02-9]|[2-9][02-9]1|[2-9][02-9]{2})\s?(?:[.-]\s?)?(?:[0-9]{4}))"
@delimiters ~r/[,.;:>?!]*$/
@en_apostrophes [
"'",
"'s",
"'ll",
"'d"
]
@prefix_extra [
"magnet:?",
"dweb://",
"dat://",
"gopher://",
"ipfs://",
"ipns://",
"irc://",
"ircs://",
"irc6://",
"mumble://",
"ssb://"
]
@tlds "./priv/tlds.txt"
|> File.read!()
|> String.split("\n", trim: true)
|> Enum.concat(["onion"])
|> MapSet.new()
@default_opts %{
url: true,
validate_tld: true
}
@doc """
Parse the given string, identifying items to link.
Parses the string, replacing the matching urls with an html link.
## Examples
iex> Linkify.Parser.parse("Check out google.com")
~s{Check out <a href="http://google.com">google.com</a>}
"""
@types [:url, :hashtag, :extra, :mention, :email, :phone]
def parse(input, opts \\ %{})
def parse(input, opts) when is_binary(input), do: {input, %{}} |> parse(opts) |> elem(0)
def parse(input, list) when is_list(list), do: parse(input, Enum.into(list, %{}))
def parse(input, opts) do
opts = Map.merge(@default_opts, opts)
{buffer, user_acc} = do_parse(input, opts, {"", [], :parsing})
if opts[:iodata] do
{buffer, user_acc}
else
{IO.iodata_to_binary(buffer), user_acc}
end
end
defp accumulate(acc, buffer),
do: [buffer | acc]
defp accumulate(acc, buffer, trailing),
do: [trailing, buffer | acc]
defp do_parse({"", user_acc}, _opts, {"", acc, _}),
do: {Enum.reverse(acc), user_acc}
defp do_parse(
{"<" <> text, user_acc},
%{hashtag: true} = opts,
{"#" <> _ = buffer, acc, :parsing}
) do
{buffer, user_acc} = link(buffer, opts, user_acc)
case Regex.run(@match_skipped_tag, text, capture: [:tag]) do
[tag] ->
text = String.trim_leading(text, tag)
do_parse({text, user_acc}, opts, {"", accumulate(acc, buffer, "<#{tag}"), :skip})
nil ->
do_parse({text, user_acc}, opts, {"<", acc, {:open, 1}})
end
end
defp do_parse({"<br" <> text, user_acc}, opts, {buffer, acc, :parsing}) do
{buffer, user_acc} = link(buffer, opts, user_acc)
do_parse({text, user_acc}, opts, {"", accumulate(acc, buffer, "<br"), {:open, 1}})
end
defp do_parse({"<a" <> text, user_acc}, opts, {buffer, acc, :parsing}),
do: do_parse({text, user_acc}, opts, {"", accumulate(acc, buffer, "<a"), :skip})
defp do_parse({"<pre" <> text, user_acc}, opts, {buffer, acc, :parsing}),
do: do_parse({text, user_acc}, opts, {"", accumulate(acc, buffer, "<pre"), :skip})
defp do_parse({"<code" <> text, user_acc}, opts, {buffer, acc, :parsing}),
do: do_parse({text, user_acc}, opts, {"", accumulate(acc, buffer, "<code"), :skip})
defp do_parse({"</a>" <> text, user_acc}, opts, {buffer, acc, :skip}),
do: do_parse({text, user_acc}, opts, {"", accumulate(acc, buffer, "</a>"), :parsing})
defp do_parse({"</pre>" <> text, user_acc}, opts, {buffer, acc, :skip}),
do: do_parse({text, user_acc}, opts, {"", accumulate(acc, buffer, "</pre>"), :parsing})
defp do_parse({"</code>" <> text, user_acc}, opts, {buffer, acc, :skip}),
do: do_parse({text, user_acc}, opts, {"", accumulate(acc, buffer, "</code>"), :parsing})
defp do_parse({"<" <> text, user_acc}, opts, {"", acc, :parsing}),
do: do_parse({text, user_acc}, opts, {"<", acc, {:open, 1}})
defp do_parse({"<" <> text, user_acc}, opts, {buffer, acc, :parsing}) do
{buffer, user_acc} = link(buffer, opts, user_acc)
do_parse({text, user_acc}, opts, {"", accumulate(acc, buffer, "<"), {:open, 1}})
end
defp do_parse({">" <> text, user_acc}, opts, {buffer, acc, {:attrs, _level}}),
do: do_parse({text, user_acc}, opts, {"", accumulate(acc, buffer, ">"), :parsing})
defp do_parse({<<ch::8>> <> text, user_acc}, opts, {"", acc, {:attrs, level}}) do
do_parse({text, user_acc}, opts, {"", accumulate(acc, <<ch::8>>), {:attrs, level}})
end
defp do_parse({text, user_acc}, opts, {buffer, acc, {:open, level}}) do
do_parse({text, user_acc}, opts, {"", accumulate(acc, buffer), {:attrs, level}})
end
defp do_parse(
{<<char::bytes-size(1), text::binary>>, user_acc},
opts,
{buffer, acc, state}
)
when char in [" ", "\r", "\n"] do
{buffer, user_acc} = link(buffer, opts, user_acc)
do_parse(
{text, user_acc},
opts,
{"", accumulate(acc, buffer, char), state}
)
end
defp do_parse({<<ch::8>>, user_acc}, opts, {buffer, acc, state}) do
{buffer, user_acc} = link(buffer <> <<ch::8>>, opts, user_acc)
do_parse(
{"", user_acc},
opts,
{"", accumulate(acc, buffer), state}
)
end
defp do_parse({<<ch::8>> <> text, user_acc}, opts, {buffer, acc, state}),
do: do_parse({text, user_acc}, opts, {buffer <> <<ch::8>>, acc, state})
def check_and_link(:url, buffer, opts, _user_acc) do
if url?(buffer, opts) do
case @match_url |> Regex.run(buffer, capture: [:url]) |> hd() do
^buffer ->
link_url(buffer, opts)
url ->
link = link_url(url, opts)
restore_stripped_symbols(buffer, url, link)
end
else
:nomatch
end
end
def check_and_link(:email, buffer, opts, _user_acc) do
if email?(buffer, opts), do: link_email(buffer, opts), else: :nomatch
end
def check_and_link(:mention, buffer, opts, user_acc) do
buffer
|> match_mention
|> link_mention(buffer, opts, user_acc)
end
def check_and_link(:hashtag, buffer, opts, user_acc) do
buffer
|> match_hashtag
|> link_hashtag(buffer, opts, user_acc)
end
def check_and_link(:extra, "xmpp:" <> handle = buffer, opts, _user_acc) do
if email?(handle, opts), do: link_extra(buffer, opts), else: :nomatch
end
def check_and_link(:extra, buffer, opts, _user_acc) do
if String.starts_with?(buffer, @prefix_extra), do: link_extra(buffer, opts), else: :nomatch
end
def check_and_link(:phone, buffer, opts, _user_acc) do
case Regex.run(@match_phone, buffer, capture: [:phone]) do
[phone] -> link_phone(buffer, opts)
nil -> :nomatch
end
end
defp maybe_strip_parens(buffer) do
trimmed = trim_leading_paren(buffer)
with :next <- parens_check_trailing(buffer),
:next <- parens_found_email(trimmed),
:next <- parens_found_url(trimmed),
%{path: path, query: query} = URI.parse(trimmed),
:next <- parens_in_query(query),
:next <- parens_found_path_separator(path),
:next <- parens_path_has_open_paren(path),
:next <- parens_check_balanced(trimmed) do
buffer |> trim_leading_paren |> trim_trailing_paren
else
:both -> buffer |> trim_leading_paren |> trim_trailing_paren
:leading_only -> buffer |> trim_leading_paren
:noop -> buffer
_ -> buffer
end
end
defp parens_check_trailing(buffer), do: (String.ends_with?(buffer, ")") && :next) || :noop
defp parens_found_email(trimmed),
do: (trim_trailing_paren(trimmed) |> email?(nil) && :both) || :next
defp parens_found_url(trimmed),
do: (trim_trailing_paren(trimmed) |> url?(nil) && :next) || :noop
defp parens_in_query(query), do: (is_nil(query) && :next) || :both
defp parens_found_path_separator(path), do: (String.contains?(path, "/") && :next) || :both
defp parens_path_has_open_paren(path), do: (String.contains?(path, "(") && :next) || :both
defp parens_check_balanced(trimmed) do
graphemes = String.graphemes(trimmed)
opencnt = graphemes |> Enum.count(fn x -> x == "(" end)
closecnt = graphemes |> Enum.count(fn x -> x == ")" end)
if opencnt == closecnt do
:leading_only
else
:next
end
end
defp trim_leading_paren(buffer) do
case buffer do
"(" <> buffer -> buffer
buffer -> buffer
end
end
defp trim_trailing_paren(buffer),
do:
(String.ends_with?(buffer, ")") && String.slice(buffer, 0, String.length(buffer) - 1)) ||
buffer
defp strip_punctuation(buffer), do: String.replace(buffer, @delimiters, "")
defp strip_en_apostrophes(buffer) do
Enum.reduce(@en_apostrophes, buffer, fn abbrev, buf ->
String.replace_suffix(buf, abbrev, "")
end)
end
def url?(buffer, opts) do
valid_url?(buffer) && Regex.match?(@match_url, buffer) && valid_tld?(buffer, opts)
end
def email?(buffer, opts) do
# Note: In reality the local part can only be checked by the remote server
case Regex.run(~r/^(?<user>.*)@(?<host>[^@]+)$/, buffer, capture: [:user, :host]) do
[_user, hostname] -> valid_hostname?(hostname) && valid_tld?(hostname, opts)
_ -> false
end
end
defp valid_url?(url), do: !Regex.match?(@invalid_url, url)
@doc """
Validates a URL's TLD. Returns a boolean.
Will return `true` if `:validate_tld` option set to `false`.
Will skip validation and return `true` if `:validate_tld` set to `:no_scheme` and the url has a scheme.
"""
def valid_tld?(url, opts) do
[scheme, host] = Regex.run(@get_scheme_host, url, capture: [:scheme, :host])
cond do
opts[:validate_tld] == false ->
true
scheme != "" && ip?(host) ->
true
# don't validate if scheme is present
opts[:validate_tld] == :no_scheme and scheme != "" ->
true
true ->
tld = host |> strip_punctuation() |> String.split(".") |> List.last()
MapSet.member?(@tlds, tld)
end
end
def safe_to_integer(string, base \\ 10) do
String.to_integer(string, base)
rescue
_ ->
nil
end
def ip?(buffer) do
case :inet.parse_strict_address(to_charlist(buffer)) do
{:error, _} -> false
{:ok, _} -> true
end
end
# IDN-compatible, ported from musl-libc's is_valid_hostname()
def valid_hostname?(hostname) do
hostname
|> String.to_charlist()
|> Enum.any?(fn s ->
!(s >= 0x80 || s in 0x30..0x39 || s in 0x41..0x5A || s in 0x61..0x7A || s in '.-')
end)
|> Kernel.!()
end
def match_mention(buffer) do
case Regex.run(~r/^@(?<user>[a-zA-Z\d_-]+)(@(?<host>[^@]+))?$/, buffer,
capture: [:user, :host]
) do
[user, ""] ->
"@" <> user
[user, hostname] ->
if valid_hostname?(hostname) && valid_tld?(hostname, []),
do: "@" <> user <> "@" <> hostname,
else: nil
_ ->
nil
end
end
def match_hashtag(buffer) do
case Regex.run(@match_hashtag, buffer, capture: [:tag]) do
[hashtag] -> hashtag
_ -> nil
end
end
def link_hashtag(nil, _buffer, _, _user_acc), do: :nomatch
def link_hashtag(hashtag, buffer, %{hashtag_handler: hashtag_handler} = opts, user_acc) do
hashtag
|> hashtag_handler.(buffer, opts, user_acc)
|> maybe_update_buffer(hashtag, buffer)
end
def link_hashtag(hashtag, buffer, opts, _user_acc) do
hashtag
|> Builder.create_hashtag_link(buffer, opts)
|> maybe_update_buffer(hashtag, buffer)
end
def link_mention(nil, _buffer, _, _user_acc), do: :nomatch
def link_mention(mention, buffer, %{mention_handler: mention_handler} = opts, user_acc) do
mention
|> mention_handler.(buffer, opts, user_acc)
|> maybe_update_buffer(mention, buffer)
end
def link_mention(mention, buffer, opts, _user_acc) do
mention
|> Builder.create_mention_link(buffer, opts)
|> maybe_update_buffer(mention, buffer)
end
defp maybe_update_buffer(out, match, buffer) when is_binary(out) do
maybe_update_buffer({out, nil}, match, buffer)
end
defp maybe_update_buffer({out, user_acc}, match, buffer)
when match != buffer and out != buffer do
out = String.replace(buffer, match, out)
{out, user_acc}
end
defp maybe_update_buffer(out, _match, _buffer), do: out
@doc false
def link_url(buffer, opts) do
Builder.create_link(buffer, opts)
end
@doc false
def link_email(buffer, opts) do
Builder.create_email_link(buffer, opts)
end
def link_extra(buffer, opts) do
Builder.create_extra_link(buffer, opts)
end
@doc false
def link_phone(buffer, opts) do
Builder.create_phone_link(buffer, opts)
end
defp link(buffer, opts, user_acc) do
Enum.reduce_while(@types, {buffer, user_acc}, fn type, _ ->
if opts[type] == true do
check_and_link_reducer(type, buffer, opts, user_acc)
else
{:cont, {buffer, user_acc}}
end
end)
end
defp check_and_link_reducer(type, buffer, opts, user_acc) do
str =
buffer
|> String.split("<")
|> List.first()
|> strip_en_apostrophes()
|> strip_punctuation()
|> maybe_strip_parens()
case check_and_link(type, str, opts, user_acc) do
:nomatch ->
{:cont, {buffer, user_acc}}
{link, user_acc} ->
{:halt, {restore_stripped_symbols(buffer, str, link), user_acc}}
link ->
{:halt, {restore_stripped_symbols(buffer, str, link), user_acc}}
end
end
defp restore_stripped_symbols(buffer, buffer, link), do: link
defp restore_stripped_symbols(buffer, stripped_buffer, link) do
buffer
|> String.split(stripped_buffer)
|> Enum.intersperse(link)
end
end
|
lib/linkify/parser.ex
| 0.601828
| 0.427576
|
parser.ex
|
starcoder
|
defmodule BankAccount do
@moduledoc """
An example bank account aggregate root.
It demonstrates returning either an `{:ok, aggregate}` or `{:error, reason}` tuple from the public API functions on success or failure.
Following this approach allows strict pattern matching on success and failures.
An error indicates a domain business rule violation, such as attempting to open an account with a negative initial balance.
You cannot use the pipeline operation (`|>`) to chain the functions.
Use the `with` special form instead, as shown in the example below.
## Example usage
with account <- BankAccount.new("123"),
{:ok, account} <- BankAccount.open_account(account, "ACC123", 100),
{:ok, account} <- BankAccount.deposit(account, 50),
do: account
"""
use EventSourced.AggregateRoot, fields: [account_number: nil, balance: nil]
defmodule Events do
defmodule BankAccountOpened do
defstruct account_number: nil, initial_balance: nil
end
defmodule MoneyDeposited do
defstruct amount: nil, balance: nil
end
defmodule MoneyWithdrawn do
defstruct amount: nil, balance: nil
end
end
alias Events.{BankAccountOpened,MoneyDeposited,MoneyWithdrawn}
def open_account(%BankAccount{} = _account, _account_number, initial_balance) when initial_balance <= 0 do
{:error, :initial_balance_must_be_above_zero}
end
def open_account(%BankAccount{} = account, account_number, initial_balance) when initial_balance > 0 do
{:ok, update(account, %BankAccountOpened{account_number: account_number, initial_balance: initial_balance})}
end
def deposit(%BankAccount{} = account, amount) do
balance = account.state.balance + amount
{:ok, update(account, %MoneyDeposited{amount: amount, balance: balance})}
end
def withdraw(%BankAccount{state: %{balance: balance}}, amount) when amount > balance do
{:error, :not_enough_funds}
end
def withdraw(%BankAccount{} = account, amount) do
balance = account.state.balance - amount
{:ok, update(account, %MoneyWithdrawn{amount: amount, balance: balance})}
end
# state mutators
def apply(%BankAccount.State{} = state, %BankAccountOpened{} = account_opened) do
%BankAccount.State{state|
account_number: account_opened.account_number,
balance: account_opened.initial_balance
}
end
def apply(%BankAccount.State{} = state, %MoneyDeposited{} = money_deposited) do
%BankAccount.State{state |
balance: money_deposited.balance
}
end
def apply(%BankAccount.State{} = state, %MoneyWithdrawn{} = money_withdrawn) do
%BankAccount.State{state |
balance: money_withdrawn.balance
}
end
end
|
test/example/bank_account.ex
| 0.879276
| 0.816699
|
bank_account.ex
|
starcoder
|
defmodule ExPixBRCode.Payments.Models.DynamicImmediatePixPayment do
@moduledoc """
A dynamic immediate Pix payment.
This payment structure is the result of loading it from a Pix endpoint.
"""
use ExPixBRCode.ValueObject
alias ExPixBRCode.Changesets
@required [:revisao, :chave, :txid, :status]
@optional [:solicitacaoPagador]
@calendario_required [:criacao, :apresentacao]
@calendario_optional [:expiracao]
@valor_required [:original]
@valor_optional []
embedded_schema do
field :revisao, :integer
field :chave, :string
field :txid, :string
field :status, Ecto.Enum,
values: ~w(ATIVA CONCLUIDA REMOVIDA_PELO_USUARIO_RECEBEDOR REMOVIDA_PELO_PSP)a
field :solicitacaoPagador, :string
embeds_one :calendario, Calendario, primary_key: false do
field :criacao, :utc_datetime
field :apresentacao, :utc_datetime
field :expiracao, :integer, default: 86_400
end
embeds_one :devedor, Devedor, primary_key: false do
field :cpf, :string
field :cnpj, :string
field :nome, :string
end
embeds_one :valor, Valor, primary_key: false do
field :original, :decimal
end
embeds_many :infoAdicionais, InfoAdicionais, primary_key: false do
field :nome, :string
field :valor, :string
end
end
@doc false
def changeset(model \\ %__MODULE__{}, params) do
model
|> cast(coalesce_params(params), @required ++ @optional)
|> validate_required(@required)
|> cast_embed(:calendario, with: &calendario_changeset/2, required: true)
|> cast_embed(:devedor, with: &devedor_changeset/2)
|> cast_embed(:valor, with: &valor_changeset/2, required: true)
|> cast_embed(:infoAdicionais, with: &info_adicionais_changeset/2)
|> validate_number(:revisao, greater_than_or_equal_to: 0)
|> validate_length(:txid, max: 35)
|> validate_length(:solicitacaoPagador, max: 140)
end
defp coalesce_params(%{"infoAdicionais" => nil} = params),
do: Map.put(params, "infoAdicionais", [])
defp coalesce_params(%{infoAdicionais: nil} = params), do: Map.put(params, :infoAdicionais, [])
defp coalesce_params(params), do: params
defp calendario_changeset(model, params) do
model
|> cast(params, @calendario_required ++ @calendario_optional)
|> validate_required(@calendario_required)
end
defp info_adicionais_changeset(model, params) do
model
|> cast(params, [:nome, :valor])
|> validate_required([:nome, :valor])
end
defp valor_changeset(model, params) do
model
|> cast(params, @valor_required ++ @valor_optional)
|> validate_required(@valor_required)
|> validate_number(:original, greater_than: 0)
end
defp devedor_changeset(model, params) do
model
|> cast(params, [:nome, :cpf, :cnpj])
|> validate_either_cpf_or_cnpj()
end
defp validate_either_cpf_or_cnpj(%{valid?: false} = c), do: c
defp validate_either_cpf_or_cnpj(changeset) do
cpf = get_field(changeset, :cpf)
cnpj = get_field(changeset, :cnpj)
name = get_field(changeset, :nome)
cond do
not is_nil(cpf) and not is_nil(cnpj) ->
add_error(changeset, :devedor, "only one of cpf or cnpj must be present")
(not is_nil(cpf) or not is_nil(cnpj)) and is_nil(name) ->
add_error(changeset, :devedor, "when either cpf or cnpj is present so must be 'nome'")
not is_nil(cpf) ->
Changesets.validate_document(changeset, :cpf)
true ->
Changesets.validate_document(changeset, :cnpj)
end
end
end
|
lib/ex_pix_brcode/payments/models/dynamic_immediate_pix_payment.ex
| 0.731251
| 0.441673
|
dynamic_immediate_pix_payment.ex
|
starcoder
|
defprotocol Sanbase.Alert.Settings do
@moduledoc ~s"""
A protocol that must be implemented by all trigger settings.
Every trigger has settings that define how it is evaluated, how it's cached
and how to check if the evaluated alert is triggered.
After creating the module 3 things should be done in order to run the signal:
- Add a map between the StructMapTransformation
"""
def evaluate(trigger_settings, trigger)
@spec triggered?(struct()) :: boolean()
def triggered?(trigger)
@spec cache_key(struct()) :: String.t() | :nocache
def cache_key(trigger)
end
defprotocol Sanbase.Alert.History do
@spec historical_trigger_points(struct(), String.t()) :: {:ok, list(any())} | {:error, any()}
def historical_trigger_points(trigger, cooldown)
end
defmodule Sanbase.Alert.Trigger do
@moduledoc ~s"""
Module that represents an embedded schema that is used in UserTrigger`s `jsonb`
column. It represents a trigger, providing some common fields:
- `is_public` - boolean, indicating if other people can see that trigger
- `last_triggered` - the last datetime when it was triggered
- `cooldown` - after how long the trigger can be triggered and sent again.
- `settings` field is a map that gets converted to one of the available
TriggerSettings modules. They implement a protocol that allows the evaluator
to easily process them.
"""
use Ecto.Schema
use Vex.Struct
import Ecto.Changeset
alias __MODULE__
alias Sanbase.DateTimeUtils
embedded_schema do
field(:settings, :map)
field(:is_frozen, :boolean, default: false)
field(:title, :string)
field(:description, :string)
field(:is_public, :boolean, default: false)
field(:last_triggered, :map, default: %{})
field(:cooldown, :string, default: "24h")
field(:icon_url, :string)
field(:is_active, :boolean, default: true)
field(:is_repeating, :boolean, default: true)
end
@type t :: %__MODULE__{
settings: map() | struct(),
is_public: boolean(),
cooldown: String.t(),
last_triggered: map(),
title: String.t(),
description: String.t(),
icon_url: String.t(),
is_active: boolean(),
is_repeating: boolean()
}
@doc false
@fields [
:settings,
:is_public,
:is_frozen,
:cooldown,
:last_triggered,
:title,
:description,
:icon_url,
:is_active,
:is_repeating
]
def create_changeset(%__MODULE__{} = trigger, args \\ %{}) do
trigger
|> cast(args, @fields)
|> validate_required([:settings, :title])
|> validate_change(:icon_url, &validate_url/2)
end
def update_changeset(%__MODULE__{} = trigger, args \\ %{}) do
trigger
|> cast(args, @fields)
|> validate_change(:icon_url, &validate_url/2)
end
defp validate_url(_changeset, url) do
case Sanbase.Validation.valid_url?(url) do
:ok -> []
{:error, reason} -> [icon_url: reason]
end
end
def payload_to_string(%Trigger{settings: %{payload: {template, kv}}}) do
Sanbase.TemplateEngine.run(template, kv)
end
def payload_to_string({template, kv}) do
Sanbase.TemplateEngine.run(template, kv)
end
def evaluate(%Trigger{settings: %{target: target} = trigger_settings} = trigger) do
filtered_target = remove_targets_on_cooldown(target, trigger)
trigger_settings =
%{trigger_settings | filtered_target: filtered_target}
|> Sanbase.Alert.Settings.evaluate(trigger)
%Trigger{trigger | settings: trigger_settings}
end
@spec historical_trigger_points(Sanbase.Alert.Trigger.t()) :: {:error, any} | {:ok, [any]}
def historical_trigger_points(%Trigger{settings: trigger_settings, cooldown: cooldown}) do
Sanbase.Alert.History.historical_trigger_points(trigger_settings, cooldown)
end
def triggered?(%Trigger{settings: trigger_settings}) do
Sanbase.Alert.Settings.triggered?(trigger_settings)
end
def cache_key(%Trigger{settings: trigger_settings}) do
Sanbase.Alert.Settings.cache_key(trigger_settings)
end
def last_triggered(%Trigger{last_triggered: lt}, _target) when map_size(lt) == 0, do: nil
def last_triggered(%Trigger{last_triggered: lt}, target) do
case Map.get(lt, target) do
nil -> nil
last_triggered -> last_triggered |> DateTimeUtils.from_iso8601!()
end
end
def has_cooldown?(%Trigger{} = trigger, target) do
case last_triggered(trigger, target) do
nil ->
false
%DateTime{} = target_last_triggered ->
DateTime.compare(
DateTimeUtils.after_interval(trigger.cooldown, target_last_triggered),
Timex.now()
) == :gt
end
end
def human_readable_settings_type(type) do
type
|> String.replace("_", " ")
|> String.split()
|> Enum.map(&String.capitalize/1)
|> Enum.join(" ")
end
defp remove_targets_on_cooldown(%{user_list: user_list_id}, trigger) do
remove_targets_on_cooldown(%{watchlist_id: user_list_id}, trigger)
end
defp remove_targets_on_cooldown(%{watchlist_id: watchlist_id}, trigger) do
case Sanbase.UserList.by_id(watchlist_id, []) do
{:error, _} ->
%{list: [], type: :slug}
{:ok, watchlist} ->
case Sanbase.UserList.get_projects(watchlist) do
{:ok, %{projects: projects}} ->
projects
|> Enum.map(& &1.slug)
|> remove_targets_on_cooldown(trigger, :slug)
{:error, _error} ->
[]
end
end
end
defp remove_targets_on_cooldown(%{market_segments: market_segments} = target, trigger) do
combinator = Map.get(target, :market_segments_combinator, "and")
projects =
case combinator do
"and" -> Sanbase.Model.Project.List.by_market_segment_all_of(market_segments)
"or" -> Sanbase.Model.Project.List.by_market_segment_any_of(market_segments)
end
Enum.map(projects, & &1.slug)
|> remove_targets_on_cooldown(trigger, :slug)
end
defp remove_targets_on_cooldown(%{slug: slug}, trigger)
when is_binary(slug) or is_list(slug) do
slug
|> List.wrap()
|> remove_targets_on_cooldown(trigger, :slug)
end
defp remove_targets_on_cooldown(%{word: slug}, trigger)
when is_binary(slug) or is_list(slug) do
slug
|> List.wrap()
|> remove_targets_on_cooldown(trigger, :word)
end
defp remove_targets_on_cooldown(%{text: text}, trigger)
when is_binary(text) do
text
|> List.wrap()
|> remove_targets_on_cooldown(trigger, :text)
end
defp remove_targets_on_cooldown(%{eth_address: address}, trigger)
when is_binary(address) or is_list(address) do
address
|> List.wrap()
|> Enum.map(&Sanbase.BlockchainAddress.to_internal_format/1)
|> remove_targets_on_cooldown(trigger, :eth_address)
end
defp remove_targets_on_cooldown(%{address: address}, trigger)
when is_binary(address) or is_list(address) do
address
|> List.wrap()
|> Enum.map(&Sanbase.BlockchainAddress.to_internal_format/1)
|> remove_targets_on_cooldown(trigger, :address)
end
defp remove_targets_on_cooldown(target, trigger) do
target
|> List.wrap()
|> remove_targets_on_cooldown(trigger, :slug)
end
defp remove_targets_on_cooldown(target_list, trigger, type) when is_list(target_list) do
target_list =
target_list
|> Enum.reject(&Sanbase.Alert.Trigger.has_cooldown?(trigger, &1))
%{list: target_list, type: type}
end
end
|
lib/sanbase/alerts/trigger/trigger.ex
| 0.884458
| 0.456289
|
trigger.ex
|
starcoder
|
defmodule AWS.ECS do
@moduledoc """
Amazon EC2 Container Service (Amazon ECS) is a highly scalable, fast,
container management service that makes it easy to run, stop, and manage
Docker containers on a cluster of EC2 instances. Amazon ECS lets you launch
and stop container-enabled applications with simple API calls, allows you
to get the state of your cluster from a centralized service, and gives you
access to many familiar Amazon EC2 features like security groups, Amazon
EBS volumes, and IAM roles.
You can use Amazon ECS to schedule the placement of containers across your
cluster based on your resource needs, isolation policies, and availability
requirements. Amazon EC2 Container Service eliminates the need for you to
operate your own cluster management and configuration management systems or
worry about scaling your management infrastructure.
"""
@doc """
Creates a new Amazon ECS cluster. By default, your account receives a
`default` cluster when you launch your first container instance. However,
you can create your own cluster with a unique name with the `CreateCluster`
action.
"""
def create_cluster(client, input, options \\ []) do
request(client, "CreateCluster", input, options)
end
@doc """
Runs and maintains a desired number of tasks from a specified task
definition. If the number of tasks running in a service drops below
`desiredCount`, Amazon ECS spawns another copy of the task in the specified
cluster. To update an existing service, see `UpdateService`.
In addition to maintaining the desired count of tasks in your service, you
can optionally run your service behind a load balancer. The load balancer
distributes traffic across the tasks that are associated with the service.
For more information, see [Service Load
Balancing](http://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-load-balancing.html)
in the *Amazon EC2 Container Service Developer Guide*.
You can optionally specify a deployment configuration for your service.
During a deployment (which is triggered by changing the task definition or
the desired count of a service with an `UpdateService` operation), the
service scheduler uses the `minimumHealthyPercent` and `maximumPercent`
parameters to determine the deployment strategy.
The `minimumHealthyPercent` represents a lower limit on the number of your
service's tasks that must remain in the `RUNNING` state during a
deployment, as a percentage of the `desiredCount` (rounded up to the
nearest integer). This parameter enables you to deploy without using
additional cluster capacity. For example, if `desiredCount` is four tasks
and the minimum is 50%, the scheduler can stop two existing tasks to free
up cluster capacity before starting two new tasks. Tasks for services that
do not use a load balancer are considered healthy if they are in the
`RUNNING` state. Tasks for services that use a load balancer are considered
healthy if they are in the `RUNNING` state and the container instance they
are hosted on is reported as healthy by the load balancer. The default
value is 50% in the console and 100% for the AWS CLI, the AWS SDKs, and the
APIs.
The `maximumPercent` parameter represents an upper limit on the number of
your service's tasks that are allowed in the `RUNNING` or `PENDING` state
during a deployment, as a percentage of the `desiredCount` (rounded down to
the nearest integer). This parameter enables you to define the deployment
batch size. For example, if `desiredCount` is four tasks and the maximum is
200%, the scheduler can start four new tasks before stopping the four older
tasks (provided that the cluster resources required to do this are
available). The default value is 200%.
When the service scheduler launches new tasks, it determines task placement
in your cluster using the following logic:
<ul> <li> Determine which of the container instances in your cluster can
support your service's task definition (for example, they have the required
CPU, memory, ports, and container instance attributes).
</li> <li> By default, the service scheduler attempts to balance tasks
across Availability Zones in this manner (although you can choose a
different placement strategy):
<ul> <li> Sort the valid container instances by the fewest number of
running tasks for this service in the same Availability Zone as the
instance. For example, if zone A has one running service task and zones B
and C each have zero, valid container instances in either zone B or C are
considered optimal for placement.
</li> <li> Place the new service task on a valid container instance in an
optimal Availability Zone (based on the previous steps), favoring container
instances with the fewest number of running tasks for this service.
</li> </ul> </li> </ul>
"""
def create_service(client, input, options \\ []) do
request(client, "CreateService", input, options)
end
@doc """
Deletes one or more custom attributes from an Amazon ECS resource.
"""
def delete_attributes(client, input, options \\ []) do
request(client, "DeleteAttributes", input, options)
end
@doc """
Deletes the specified cluster. You must deregister all container instances
from this cluster before you may delete it. You can list the container
instances in a cluster with `ListContainerInstances` and deregister them
with `DeregisterContainerInstance`.
"""
def delete_cluster(client, input, options \\ []) do
request(client, "DeleteCluster", input, options)
end
@doc """
Deletes a specified service within a cluster. You can delete a service if
you have no running tasks in it and the desired task count is zero. If the
service is actively maintaining tasks, you cannot delete it, and you must
update the service to a desired task count of zero. For more information,
see `UpdateService`.
<note> When you delete a service, if there are still running tasks that
require cleanup, the service status moves from `ACTIVE` to `DRAINING`, and
the service is no longer visible in the console or in `ListServices` API
operations. After the tasks have stopped, then the service status moves
from `DRAINING` to `INACTIVE`. Services in the `DRAINING` or `INACTIVE`
status can still be viewed with `DescribeServices` API operations; however,
in the future, `INACTIVE` services may be cleaned up and purged from Amazon
ECS record keeping, and `DescribeServices` API operations on those services
will return a `ServiceNotFoundException` error.
</note>
"""
def delete_service(client, input, options \\ []) do
request(client, "DeleteService", input, options)
end
@doc """
Deregisters an Amazon ECS container instance from the specified cluster.
This instance is no longer available to run tasks.
If you intend to use the container instance for some other purpose after
deregistration, you should stop all of the tasks running on the container
instance before deregistration to avoid any orphaned tasks from consuming
resources.
Deregistering a container instance removes the instance from a cluster, but
it does not terminate the EC2 instance; if you are finished using the
instance, be sure to terminate it in the Amazon EC2 console to stop
billing.
<note> If you terminate a running container instance, Amazon ECS
automatically deregisters the instance from your cluster (stopped container
instances or instances with disconnected agents are not automatically
deregistered when terminated).
</note>
"""
def deregister_container_instance(client, input, options \\ []) do
request(client, "DeregisterContainerInstance", input, options)
end
@doc """
Deregisters the specified task definition by family and revision. Upon
deregistration, the task definition is marked as `INACTIVE`. Existing tasks
and services that reference an `INACTIVE` task definition continue to run
without disruption. Existing services that reference an `INACTIVE` task
definition can still scale up or down by modifying the service's desired
count.
You cannot use an `INACTIVE` task definition to run new tasks or create new
services, and you cannot update an existing service to reference an
`INACTIVE` task definition (although there may be up to a 10 minute window
following deregistration where these restrictions have not yet taken
effect).
"""
def deregister_task_definition(client, input, options \\ []) do
request(client, "DeregisterTaskDefinition", input, options)
end
@doc """
Describes one or more of your clusters.
"""
def describe_clusters(client, input, options \\ []) do
request(client, "DescribeClusters", input, options)
end
@doc """
Describes Amazon EC2 Container Service container instances. Returns
metadata about registered and remaining resources on each container
instance requested.
"""
def describe_container_instances(client, input, options \\ []) do
request(client, "DescribeContainerInstances", input, options)
end
@doc """
Describes the specified services running in your cluster.
"""
def describe_services(client, input, options \\ []) do
request(client, "DescribeServices", input, options)
end
@doc """
Describes a task definition. You can specify a `family` and `revision` to
find information about a specific task definition, or you can simply
specify the family to find the latest `ACTIVE` revision in that family.
<note> You can only describe `INACTIVE` task definitions while an active
task or service references them.
</note>
"""
def describe_task_definition(client, input, options \\ []) do
request(client, "DescribeTaskDefinition", input, options)
end
@doc """
Describes a specified task or tasks.
"""
def describe_tasks(client, input, options \\ []) do
request(client, "DescribeTasks", input, options)
end
@doc """
<note> This action is only used by the Amazon EC2 Container Service agent,
and it is not intended for use outside of the agent.
</note> Returns an endpoint for the Amazon EC2 Container Service agent to
poll for updates.
"""
def discover_poll_endpoint(client, input, options \\ []) do
request(client, "DiscoverPollEndpoint", input, options)
end
@doc """
Lists the attributes for Amazon ECS resources within a specified target
type and cluster. When you specify a target type and cluster,
`LisAttributes` returns a list of attribute objects, one for each attribute
on each resource. You can filter the list of results to a single attribute
name to only return results that have that name. You can also filter the
results by attribute name and value, for example, to see which container
instances in a cluster are running a Linux AMI (`ecs.os-type=linux`).
"""
def list_attributes(client, input, options \\ []) do
request(client, "ListAttributes", input, options)
end
@doc """
Returns a list of existing clusters.
"""
def list_clusters(client, input, options \\ []) do
request(client, "ListClusters", input, options)
end
@doc """
Returns a list of container instances in a specified cluster. You can
filter the results of a `ListContainerInstances` operation with cluster
query language statements inside the `filter` parameter. For more
information, see [Cluster Query
Language](http://docs.aws.amazon.com/AmazonECS/latest/developerguide/cluster-query-language.html)
in the *Amazon EC2 Container Service Developer Guide*.
"""
def list_container_instances(client, input, options \\ []) do
request(client, "ListContainerInstances", input, options)
end
@doc """
Lists the services that are running in a specified cluster.
"""
def list_services(client, input, options \\ []) do
request(client, "ListServices", input, options)
end
@doc """
Returns a list of task definition families that are registered to your
account (which may include task definition families that no longer have any
`ACTIVE` task definition revisions).
You can filter out task definition families that do not contain any
`ACTIVE` task definition revisions by setting the `status` parameter to
`ACTIVE`. You can also filter the results with the `familyPrefix`
parameter.
"""
def list_task_definition_families(client, input, options \\ []) do
request(client, "ListTaskDefinitionFamilies", input, options)
end
@doc """
Returns a list of task definitions that are registered to your account. You
can filter the results by family name with the `familyPrefix` parameter or
by status with the `status` parameter.
"""
def list_task_definitions(client, input, options \\ []) do
request(client, "ListTaskDefinitions", input, options)
end
@doc """
Returns a list of tasks for a specified cluster. You can filter the results
by family name, by a particular container instance, or by the desired
status of the task with the `family`, `containerInstance`, and
`desiredStatus` parameters.
Recently-stopped tasks might appear in the returned results. Currently,
stopped tasks appear in the returned results for at least one hour.
"""
def list_tasks(client, input, options \\ []) do
request(client, "ListTasks", input, options)
end
@doc """
Create or update an attribute on an Amazon ECS resource. If the attribute
does not exist, it is created. If the attribute exists, its value is
replaced with the specified value. To delete an attribute, use
`DeleteAttributes`. For more information, see
[Attributes](http://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-placement-constraints.html#attributes)
in the *Amazon EC2 Container Service Developer Guide*.
"""
def put_attributes(client, input, options \\ []) do
request(client, "PutAttributes", input, options)
end
@doc """
<note> This action is only used by the Amazon EC2 Container Service agent,
and it is not intended for use outside of the agent.
</note> Registers an EC2 instance into the specified cluster. This instance
becomes available to place containers on.
"""
def register_container_instance(client, input, options \\ []) do
request(client, "RegisterContainerInstance", input, options)
end
@doc """
Registers a new task definition from the supplied `family` and
`containerDefinitions`. Optionally, you can add data volumes to your
containers with the `volumes` parameter. For more information about task
definition parameters and defaults, see [Amazon ECS Task
Definitions](http://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_defintions.html)
in the *Amazon EC2 Container Service Developer Guide*.
You can specify an IAM role for your task with the `taskRoleArn` parameter.
When you specify an IAM role for a task, its containers can then use the
latest versions of the AWS CLI or SDKs to make API requests to the AWS
services that are specified in the IAM policy associated with the role. For
more information, see [IAM Roles for
Tasks](http://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-iam-roles.html)
in the *Amazon EC2 Container Service Developer Guide*.
You can specify a Docker networking mode for the containers in your task
definition with the `networkMode` parameter. The available network modes
correspond to those described in [Network
settings](https://docs.docker.com/engine/reference/run/#/network-settings)
in the Docker run reference.
"""
def register_task_definition(client, input, options \\ []) do
request(client, "RegisterTaskDefinition", input, options)
end
@doc """
Starts a new task using the specified task definition.
You can allow Amazon ECS to place tasks for you, or you can customize how
Amazon ECS places tasks using placement constraints and placement
strategies. For more information, see [Scheduling
Tasks](http://docs.aws.amazon.com/AmazonECS/latest/developerguide/scheduling_tasks.html)
in the *Amazon EC2 Container Service Developer Guide*.
Alternatively, you can use `StartTask` to use your own scheduler or place
tasks manually on specific container instances.
"""
def run_task(client, input, options \\ []) do
request(client, "RunTask", input, options)
end
@doc """
Starts a new task from the specified task definition on the specified
container instance or instances.
Alternatively, you can use `RunTask` to place tasks for you. For more
information, see [Scheduling
Tasks](http://docs.aws.amazon.com/AmazonECS/latest/developerguide/scheduling_tasks.html)
in the *Amazon EC2 Container Service Developer Guide*.
"""
def start_task(client, input, options \\ []) do
request(client, "StartTask", input, options)
end
@doc """
Stops a running task.
When `StopTask` is called on a task, the equivalent of `docker stop` is
issued to the containers running in the task. This results in a `SIGTERM`
and a 30-second timeout, after which `SIGKILL` is sent and the containers
are forcibly stopped. If the container handles the `SIGTERM` gracefully and
exits within 30 seconds from receiving it, no `SIGKILL` is sent.
"""
def stop_task(client, input, options \\ []) do
request(client, "StopTask", input, options)
end
@doc """
<note> This action is only used by the Amazon EC2 Container Service agent,
and it is not intended for use outside of the agent.
</note> Sent to acknowledge that a container changed states.
"""
def submit_container_state_change(client, input, options \\ []) do
request(client, "SubmitContainerStateChange", input, options)
end
@doc """
<note> This action is only used by the Amazon EC2 Container Service agent,
and it is not intended for use outside of the agent.
</note> Sent to acknowledge that a task changed states.
"""
def submit_task_state_change(client, input, options \\ []) do
request(client, "SubmitTaskStateChange", input, options)
end
@doc """
Updates the Amazon ECS container agent on a specified container instance.
Updating the Amazon ECS container agent does not interrupt running tasks or
services on the container instance. The process for updating the agent
differs depending on whether your container instance was launched with the
Amazon ECS-optimized AMI or another operating system.
`UpdateContainerAgent` requires the Amazon ECS-optimized AMI or Amazon
Linux with the `ecs-init` service installed and running. For help updating
the Amazon ECS container agent on other operating systems, see [Manually
Updating the Amazon ECS Container
Agent](http://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-update.html#manually_update_agent)
in the *Amazon EC2 Container Service Developer Guide*.
"""
def update_container_agent(client, input, options \\ []) do
request(client, "UpdateContainerAgent", input, options)
end
@doc """
Modifies the status of an Amazon ECS container instance.
You can change the status of a container instance to `DRAINING` to manually
remove an instance from a cluster, for example to perform system updates,
update the Docker daemon, or scale down the cluster size.
When you set a container instance to `DRAINING`, Amazon ECS prevents new
tasks from being scheduled for placement on the container instance and
replacement service tasks are started on other container instances in the
cluster if the resources are available. Service tasks on the container
instance that are in the `PENDING` state are stopped immediately.
Service tasks on the container instance that are in the `RUNNING` state are
stopped and replaced according the service's deployment configuration
parameters, `minimumHealthyPercent` and `maximumPercent`. Note that you can
change the deployment configuration of your service using `UpdateService`.
<ul> <li> If `minimumHealthyPercent` is below 100%, the scheduler can
ignore `desiredCount` temporarily during task replacement. For example,
`desiredCount` is four tasks, a minimum of 50% allows the scheduler to stop
two existing tasks before starting two new tasks. If the minimum is 100%,
the service scheduler can't remove existing tasks until the replacement
tasks are considered healthy. Tasks for services that do not use a load
balancer are considered healthy if they are in the `RUNNING` state. Tasks
for services that use a load balancer are considered healthy if they are in
the `RUNNING` state and the container instance they are hosted on is
reported as healthy by the load balancer.
</li> <li> The `maximumPercent` parameter represents an upper limit on the
number of running tasks during task replacement, which enables you to
define the replacement batch size. For example, if `desiredCount` of four
tasks, a maximum of 200% starts four new tasks before stopping the four
tasks to be drained (provided that the cluster resources required to do
this are available). If the maximum is 100%, then replacement tasks can't
start until the draining tasks have stopped.
</li> </ul> Any `PENDING` or `RUNNING` tasks that do not belong to a
service are not affected; you must wait for them to finish or stop them
manually.
A container instance has completed draining when it has no more `RUNNING`
tasks. You can verify this using `ListTasks`.
When you set a container instance to `ACTIVE`, the Amazon ECS scheduler can
begin scheduling tasks on the instance again.
"""
def update_container_instances_state(client, input, options \\ []) do
request(client, "UpdateContainerInstancesState", input, options)
end
@doc """
Modifies the desired count, deployment configuration, or task definition
used in a service.
You can add to or subtract from the number of instantiations of a task
definition in a service by specifying the cluster that the service is
running in and a new `desiredCount` parameter.
You can use `UpdateService` to modify your task definition and deploy a new
version of your service.
You can also update the deployment configuration of a service. When a
deployment is triggered by updating the task definition of a service, the
service scheduler uses the deployment configuration parameters,
`minimumHealthyPercent` and `maximumPercent`, to determine the deployment
strategy.
<ul> <li> If `minimumHealthyPercent` is below 100%, the scheduler can
ignore `desiredCount` temporarily during a deployment. For example, if
`desiredCount` is four tasks, a minimum of 50% allows the scheduler to stop
two existing tasks before starting two new tasks. Tasks for services that
do not use a load balancer are considered healthy if they are in the
`RUNNING` state. Tasks for services that use a load balancer are considered
healthy if they are in the `RUNNING` state and the container instance they
are hosted on is reported as healthy by the load balancer.
</li> <li> The `maximumPercent` parameter represents an upper limit on the
number of running tasks during a deployment, which enables you to define
the deployment batch size. For example, if `desiredCount` is four tasks, a
maximum of 200% starts four new tasks before stopping the four older tasks
(provided that the cluster resources required to do this are available).
</li> </ul> When `UpdateService` stops a task during a deployment, the
equivalent of `docker stop` is issued to the containers running in the
task. This results in a `SIGTERM` and a 30-second timeout, after which
`SIGKILL` is sent and the containers are forcibly stopped. If the container
handles the `SIGTERM` gracefully and exits within 30 seconds from receiving
it, no `SIGKILL` is sent.
When the service scheduler launches new tasks, it determines task placement
in your cluster with the following logic:
<ul> <li> Determine which of the container instances in your cluster can
support your service's task definition (for example, they have the required
CPU, memory, ports, and container instance attributes).
</li> <li> By default, the service scheduler attempts to balance tasks
across Availability Zones in this manner (although you can choose a
different placement strategy):
<ul> <li> Sort the valid container instances by the fewest number of
running tasks for this service in the same Availability Zone as the
instance. For example, if zone A has one running service task and zones B
and C each have zero, valid container instances in either zone B or C are
considered optimal for placement.
</li> <li> Place the new service task on a valid container instance in an
optimal Availability Zone (based on the previous steps), favoring container
instances with the fewest number of running tasks for this service.
</li> </ul> </li> </ul> When the service scheduler stops running tasks, it
attempts to maintain balance across the Availability Zones in your cluster
using the following logic:
<ul> <li> Sort the container instances by the largest number of running
tasks for this service in the same Availability Zone as the instance. For
example, if zone A has one running service task and zones B and C each have
two, container instances in either zone B or C are considered optimal for
termination.
</li> <li> Stop the task on a container instance in an optimal Availability
Zone (based on the previous steps), favoring container instances with the
largest number of running tasks for this service.
</li> </ul>
"""
def update_service(client, input, options \\ []) do
request(client, "UpdateService", input, options)
end
@spec request(map(), binary(), map(), list()) ::
{:ok, Poison.Parser.t | nil, Poison.Response.t} |
{:error, Poison.Parser.t} |
{:error, HTTPoison.Error.t}
defp request(client, action, input, options) do
client = %{client | service: "ecs"}
host = get_host("ecs", client)
url = get_url(host, client)
headers = [{"Host", host},
{"Content-Type", "application/x-amz-json-1.1"},
{"X-Amz-Target", "AmazonEC2ContainerServiceV20141113.#{action}"}]
payload = Poison.Encoder.encode(input, [])
headers = AWS.Request.sign_v4(client, "POST", url, headers, payload)
case HTTPoison.post(url, payload, headers, options) do
{:ok, response=%HTTPoison.Response{status_code: 200, body: ""}} ->
{:ok, nil, response}
{:ok, response=%HTTPoison.Response{status_code: 200, body: body}} ->
{:ok, Poison.Parser.parse!(body), response}
{:ok, _response=%HTTPoison.Response{body: body}} ->
error = Poison.Parser.parse!(body)
exception = error["__type"]
message = error["message"]
{:error, {exception, message}}
{:error, %HTTPoison.Error{reason: reason}} ->
{:error, %HTTPoison.Error{reason: reason}}
end
end
defp get_host(endpoint_prefix, client) do
if client.region == "local" do
"localhost"
else
"#{endpoint_prefix}.#{client.region}.#{client.endpoint}"
end
end
defp get_url(host, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}/"
end
end
|
lib/aws/ecs.ex
| 0.911979
| 0.620593
|
ecs.ex
|
starcoder
|
defmodule Geo.JSON.Decoder do
@moduledoc false
alias Geo.{
Point,
PointZ,
LineString,
Polygon,
MultiPoint,
MultiLineString,
MultiPolygon,
MultiPolygonZ,
GeometryCollection
}
defmodule DecodeError do
@type t :: %__MODULE__{message: String.t(), value: any}
defexception [:message, :value]
def message(%{message: nil, value: value}) do
"unable to decode value: #{inspect(value)}"
end
def message(%{message: message}) do
message
end
end
@doc """
Takes a map representing GeoJSON and returns a Geometry
"""
@spec decode!(map()) :: Geo.geometry() | no_return
def decode!(geo_json) do
cond do
Map.has_key?(geo_json, "geometries") ->
crs = Map.get(geo_json, "crs")
geometries =
Enum.map(Map.get(geo_json, "geometries"), fn x ->
do_decode(
Map.get(x, "type"),
Map.get(x, "coordinates"),
Map.get(x, "properties", %{}),
crs
)
end)
%GeometryCollection{
geometries: geometries,
properties: Map.get(geo_json, "properties", %{})
}
Map.has_key?(geo_json, "coordinates") ->
crs = Map.get(geo_json, "crs")
do_decode(
Map.get(geo_json, "type"),
Map.get(geo_json, "coordinates"),
Map.get(geo_json, "properties", %{}),
crs
)
Map.get(geo_json, "type") == "Feature" ->
do_decode(
"Feature",
Map.get(geo_json, "geometry"),
Map.get(geo_json, "properties", %{}),
Map.get(geo_json, "id", "")
)
Map.get(geo_json, "type") == "FeatureCollection" ->
geometries =
Enum.map(Map.get(geo_json, "features"), fn x ->
do_decode(
Map.get(x, "type"),
Map.get(x, "geometry"),
Map.get(x, "properties", %{}),
Map.get(x, "id", "")
)
end)
%GeometryCollection{
geometries: geometries,
properties: %{}
}
true ->
raise DecodeError, value: geo_json
end
end
@doc """
Takes a map representing GeoJSON and returns a Geometry
"""
@spec decode(map()) :: {:ok, Geo.geometry()} | {:error, DecodeError.t()}
def decode(geo_json) do
{:ok, decode!(geo_json)}
rescue
exception in [DecodeError] ->
{:error, exception}
end
defp do_decode("Point", [x, y, z], properties, crs) do
%PointZ{coordinates: {x, y, z}, srid: get_srid(crs), properties: properties}
end
defp do_decode("Point", [x, y], properties, crs) do
%Point{coordinates: {x, y}, srid: get_srid(crs), properties: properties}
end
defp do_decode("LineString", coordinates, properties, crs) do
coordinates = Enum.map(coordinates, &list_to_tuple(&1))
%LineString{coordinates: coordinates, srid: get_srid(crs), properties: properties}
end
defp do_decode("Polygon", coordinates, properties, crs) do
coordinates =
Enum.map(coordinates, fn sub_coordinates ->
Enum.map(sub_coordinates, &list_to_tuple(&1))
end)
%Polygon{coordinates: coordinates, srid: get_srid(crs), properties: properties}
end
defp do_decode("MultiPoint", coordinates, properties, crs) do
coordinates = Enum.map(coordinates, &list_to_tuple(&1))
%MultiPoint{coordinates: coordinates, srid: get_srid(crs), properties: properties}
end
defp do_decode("MultiLineString", coordinates, properties, crs) do
coordinates =
Enum.map(coordinates, fn sub_coordinates ->
Enum.map(sub_coordinates, &list_to_tuple(&1))
end)
%MultiLineString{coordinates: coordinates, srid: get_srid(crs), properties: properties}
end
defp do_decode("MultiPolygon", coordinates, properties, crs) do
coordinates =
Enum.map(coordinates, fn sub_coordinates ->
Enum.map(sub_coordinates, fn third_sub_coordinates ->
Enum.map(third_sub_coordinates, &list_to_tuple(&1))
end)
end)
%MultiPolygon{coordinates: coordinates, srid: get_srid(crs), properties: properties}
end
defp do_decode("MultiPolygonZ", coordinates, properties, crs) do
coordinates =
Enum.map(coordinates, fn sub_coordinates ->
Enum.map(sub_coordinates, fn third_sub_coordinates ->
Enum.map(third_sub_coordinates, &list_to_tuple(&1))
end)
end)
%MultiPolygonZ{coordinates: coordinates, srid: get_srid(crs), properties: properties}
end
defp do_decode("Feature", geometry, properties, _id) do
do_decode(Map.get(geometry, "type"), Map.get(geometry, "coordinates"), properties, nil)
end
defp do_decode(type, [x, y, _z], properties, crs) do
do_decode(type, [x, y], properties, crs)
end
defp do_decode(type, _, _, _) do
raise DecodeError, message: "#{type} is not a valid type"
end
defp list_to_tuple([x, y, _z]), do: {x, y}
defp list_to_tuple([x, y]), do: {x, y}
defp get_srid(%{"type" => "name", "properties" => %{"name" => "EPSG:" <> srid}}) do
{srid, _} = Integer.parse(srid)
srid
end
defp get_srid(%{"type" => "name", "properties" => %{"name" => srid}}) do
srid
end
defp get_srid(nil) do
nil
end
end
|
lib/geo/json/decoder.ex
| 0.858852
| 0.63583
|
decoder.ex
|
starcoder
|
defmodule PlugAttack.Rule do
@moduledoc """
Defines various rules that can be used inside the `PlugAttack.rule/2` macro.
"""
@doc """
The simplest rule that always allows the request to pass.
If `value` is truthy the request is allowed, otherwise next rules are
evaluated.
"""
@spec allow(term) :: PlugAttack.rule()
def allow(value) do
if value do
{:allow, value}
else
nil
end
end
@doc """
The simplest rule that always blocks the request.
If `value` is truthy the request is blocked, otherwise next rules are
evaluated.
"""
@spec block(term) :: PlugAttack.rule()
def block(value) do
if value do
{:block, value}
else
nil
end
end
@doc """
Implements a request throttling algorithm.
The `key` differentiates different throttles, you can use, for example,
`conn.remote_ip` for per IP throttling, or an email address for login attempts
limitation. If the `key` is falsey the throttling is not performed and
next rules are evaluated.
Be careful not to use the same `key` for different rules that use the same
storage.
Passes `{:throttle, data}`, as the data to both allow and block tuples, where
data is a keyword containing: `:period`, `:limit`, `:expires_at` - when the
current limit will expire as unix time in milliseconds,
and `:remaining` - the remaining limit. This can be useful for adding
"X-RateLimit-*" headers.
## Options
* `:storage` - required, a tuple of `PlugAttack.Storage` implementation
and storage options.
* `:limit` - required, how many requests in a period are allowed.
* `:period` - required, how long, in ms, is the period.
"""
@spec throttle(term, Keyword.t()) :: PlugAttack.rule()
def throttle(key, opts) do
if key do
do_throttle(key, opts)
else
nil
end
end
defp do_throttle(key, opts) do
storage = Keyword.fetch!(opts, :storage)
limit = Keyword.fetch!(opts, :limit)
period = Keyword.fetch!(opts, :period)
now = System.system_time(:millisecond)
expires_at = expires_at(now, period)
count = do_throttle(storage, key, now, period, expires_at)
rem = limit - count
data = [period: period, expires_at: expires_at, limit: limit, remaining: max(rem, 0)]
{if(rem >= 0, do: :allow, else: :block), {:throttle, data}}
end
defp expires_at(now, period), do: (div(now, period) + 1) * period
defp do_throttle({mod, opts}, key, now, period, expires_at) do
full_key = {:throttle, key, div(now, period)}
mod.increment(opts, full_key, 1, expires_at)
end
@doc """
Implements an algorithm inspired by fail2ban.
This intends to catch misbehaving clients early and for longer amounts of
time. The `key` differentiates different clients, you can use, for example,
`conn.remote_ip` for per IP tracking. If the `key` is falsey the action is
skipped and next rules are evaluated.
Be careful not to use the same `key` for different rules that use the same
storage.
Passes `{:fail2ban, key}`, as the data to `block_action` calls when an
abusive request is detected. Each misbehaving client is blocked after each
call and tracked for `:period` time. If more than `:limit` abusive requests
are detected within the `:period`, the client is banned for `:ban_for`.
## Options
* `:storage` - required, a tuple of `PlugAttack.Storage` implementation
and storage options.
* `:period` - required, how long to store abusive requests for counting
towards `:limit` exhaustion.
* `:limit` - required, max abusive requests allowed before the ban.
* `:ban_for` - required, length of the ban in milliseconds.
"""
@spec fail2ban(term, Keyword.t()) :: PlugAttack.rule()
def fail2ban(key, opts) do
if key do
do_fail2ban(key, opts)
else
nil
end
end
defp do_fail2ban(key, opts) do
storage = Keyword.fetch!(opts, :storage)
limit = Keyword.fetch!(opts, :limit)
period = Keyword.fetch!(opts, :period)
ban_for = Keyword.fetch!(opts, :ban_for)
now = System.system_time(:millisecond)
if banned?(key, storage, now) do
{:block, {:fail2ban, :banned, key}}
else
track_fail2ban(key, storage, limit, period, ban_for, now)
end
end
defp banned?(key, {mod, opts}, now) do
mod.read(opts, {:fail2ban_banned, key}, now) == {:ok, true}
end
defp track_fail2ban(key, {mod, opts}, limit, period, ban_for, now) do
mod.write_sliding_counter(opts, {:fail2ban, key}, now, now + period)
if mod.read_sliding_counter(opts, {:fail2ban, key}, now) >= limit do
mod.write(opts, {:fail2ban_banned, key}, true, now + ban_for)
end
{:allow, {:fail2ban, :counting, key}}
end
end
|
lib/rule.ex
| 0.884377
| 0.628194
|
rule.ex
|
starcoder
|
defmodule Commanded.EventStore do
@moduledoc """
Defines the behaviour to be implemented by an event store adapter to be used by Commanded.
"""
alias Commanded.EventStore.{EventData, RecordedEvent, SnapshotData}
@type stream_uuid :: String.t()
@type start_from :: :origin | :current | integer
@type expected_version :: :any_version | :no_stream | :stream_exists | non_neg_integer
@type subscription_name :: String.t()
@type subscription :: any
@type subscriber :: pid
@type source_uuid :: String.t()
@type snapshot :: SnapshotData.t()
@type error :: term
@doc """
Return a child spec defining all processes required by the event store.
"""
@callback child_spec() :: [:supervisor.child_spec()]
@doc """
Append one or more events to a stream atomically.
"""
@callback append_to_stream(
stream_uuid,
expected_version,
events :: list(EventData.t())
) ::
:ok
| {:error, :wrong_expected_version}
| {:error, error}
@doc """
Streams events from the given stream, in the order in which they were
originally written.
"""
@callback stream_forward(
stream_uuid,
start_version :: non_neg_integer,
read_batch_size :: non_neg_integer
) ::
Enumerable.t()
| {:error, :stream_not_found}
| {:error, error}
@doc """
Create a transient subscription to a single event stream.
The event store will publish any events appended to the given stream to the
`subscriber` process as an `{:events, events}` message.
The subscriber does not need to acknowledge receipt of the events.
"""
@callback subscribe(stream_uuid | :all) :: :ok | {:error, error}
@doc """
Create a persistent subscription to an event stream.
To subscribe to all events appended to any stream use `:all` as the stream
when subscribing.
The event store will remember the subscribers last acknowledged event.
Restarting the named subscription will resume from the next event following
the last seen.
Once subscribed, the subscriber process should be sent a
`{:subscribed, subscription}` message to allow it to defer initialisation
until the subscription has started.
The subscriber process will be sent all events persisted to any stream. It
will receive a `{:events, events}` message for each batch of events persisted
for a single aggregate.
The subscriber must ack each received, and successfully processed event, using
`Commanded.EventStore.ack_event/2`.
## Examples
Subscribe to all streams:
{:ok, subscription} = Commanded.EventStore.subscribe_to(:all, "Example", self())
Subscribe to a single stream:
{:ok, subscription} = Commanded.EventStore.subscribe_to("stream1234", "Example", self())
"""
@callback subscribe_to(stream_uuid | :all, subscription_name, subscriber, start_from) ::
{:ok, subscription}
| {:error, :subscription_already_exists}
| {:error, error}
@doc """
Acknowledge receipt and successful processing of the given event received from
a subscription to an event stream.
"""
@callback ack_event(pid, RecordedEvent.t()) :: :ok
@doc """
Unsubscribe an existing subscriber from event notifications.
"""
@callback unsubscribe(subscription) :: :ok
@doc """
Read a snapshot, if available, for a given source.
"""
@callback read_snapshot(source_uuid) :: {:ok, snapshot} | {:error, :snapshot_not_found}
@doc """
Record a snapshot of the data and metadata for a given source
"""
@callback record_snapshot(snapshot) :: :ok | {:error, error}
@doc """
Delete a previously recorded snapshot for a given source
"""
@callback delete_snapshot(source_uuid) :: :ok | {:error, error}
@spec child_spec() :: [:supervisor.child_spec()]
def child_spec do
event_store_adapter().child_spec()
end
@doc """
Append one or more events to a stream atomically.
"""
@spec append_to_stream(
stream_uuid,
expected_version,
events :: list(EventData.t())
) :: :ok | {:error, :wrong_expected_version} | {:error, error}
def append_to_stream(stream_uuid, expected_version, events)
when is_binary(stream_uuid) and
(is_integer(expected_version) or
expected_version in [:any_version, :no_stream, :stream_exists]) and
is_list(events) do
event_store_adapter().append_to_stream(stream_uuid, expected_version, events)
end
@doc """
Streams events from the given stream, in the order in which they were
originally written.
"""
@spec stream_forward(
stream_uuid,
start_version :: non_neg_integer,
read_batch_size :: non_neg_integer
) :: Enumerable.t() | {:error, :stream_not_found} | {:error, error}
def stream_forward(stream_uuid, start_version \\ 0, read_batch_size \\ 1_000)
def stream_forward(stream_uuid, start_version, read_batch_size)
when is_binary(stream_uuid) and is_integer(start_version) and is_integer(read_batch_size) do
event_store_adapter().stream_forward(stream_uuid, start_version, read_batch_size)
end
@doc """
Create a transient subscription to a single event stream.
"""
@spec subscribe(stream_uuid | :all) :: :ok | {:error, error}
def subscribe(stream_uuid) when stream_uuid == :all or is_binary(stream_uuid) do
event_store_adapter().subscribe(stream_uuid)
end
@doc """
Create a persistent subscription to an event stream.
"""
@spec subscribe_to(stream_uuid | :all, subscription_name, subscriber, start_from) ::
{:ok, subscription}
| {:error, :subscription_already_exists}
| {:error, error}
def subscribe_to(stream_uuid, subscription_name, subscriber, start_from)
when is_binary(subscription_name) and is_pid(subscriber) do
event_store_adapter().subscribe_to(stream_uuid, subscription_name, subscriber, start_from)
end
@doc """
Acknowledge receipt and successful processing of the given event received from
a subscription to an event stream.
"""
@spec ack_event(pid, RecordedEvent.t()) :: :ok
def ack_event(pid, %RecordedEvent{} = event) when is_pid(pid) do
event_store_adapter().ack_event(pid, event)
end
@doc """
Unsubscribe an existing subscriber from all event notifications.
"""
@spec unsubscribe(subscription) :: :ok
def unsubscribe(subscription) do
event_store_adapter().unsubscribe(subscription)
end
@doc """
Read a snapshot, if available, for a given source.
"""
@spec read_snapshot(source_uuid) :: {:ok, snapshot} | {:error, :snapshot_not_found}
def read_snapshot(source_uuid) when is_binary(source_uuid) do
event_store_adapter().read_snapshot(source_uuid)
end
@doc """
Record a snapshot of the data and metadata for a given source
"""
@spec record_snapshot(snapshot) :: :ok | {:error, error}
def record_snapshot(%SnapshotData{} = snapshot) do
event_store_adapter().record_snapshot(snapshot)
end
@doc """
Delete a previously recorded snapshot for a given source
"""
@spec delete_snapshot(source_uuid) :: :ok | {:error, error}
def delete_snapshot(source_uuid) when is_binary(source_uuid) do
event_store_adapter().delete_snapshot(source_uuid)
end
@doc """
Get the configured event store adapter
"""
def event_store_adapter do
Application.get_env(:commanded, :event_store_adapter) ||
raise ArgumentError,
"Commanded expects `:event_store_adapter` to be configured in environment"
end
end
|
lib/commanded/event_store/event_store.ex
| 0.916335
| 0.513425
|
event_store.ex
|
starcoder
|
defmodule Cldr.Timezone do
@moduledoc """
Functions to map between the CLDR short time zone code and the
IANA timezone names.
The Unicode [locale](https://unicode.org/reports/tr35/#Locale)
[extension U](https://unicode.org/reports/tr35/#u_Extension)
allows the specification of the time zone requested for the provided locale.
This short timezone codes never change even if the IANA names change
over time. Therefore these short codes are always stable between CLDR
releases.
"""
@timezones_file "cldr/timezones.json"
@timezones Path.join(:code.priv_dir(Cldr.Config.app_name()), @timezones_file)
|> File.read!()
|> Cldr.Config.json_library().decode!
@doc """
Returns a mapping of CLDR short zone codes to
IANA timezone names.
"""
@spec timezones() :: map()
def timezones do
@timezones
end
@doc """
Returns a list of IANA time zone names for
a given CLDR short zone code, or `nil`
### Examples
iex> Cldr.Timezone.fetch("ausyd")
["Australia/Sydney", "Australia/ACT", "Australia/Canberra", "Australia/NSW"]}
iex> Cldr.Timezone.fetch("nope")
nil
"""
@spec get(String.t(), String.t() | nil) :: [String.t()] | nil
def get(short_zone, default \\ nil) do
Map.get(timezones(), short_zone, default)
end
@doc """
Returns a `:{:ok, list}` where list is a
list of IANA timezone names for
a given CLDR short zone code. If no such
short code exists then `:error` is returned.
### Example
iex> Cldr.Timezone.fetch("ausyd")
{:ok,
["Australia/Sydney", "Australia/ACT", "Australia/Canberra", "Australia/NSW"]}
iex> Cldr.Timezone.fetch("nope")
:error
"""
@spec fetch(String.t()) :: {:ok, [String.t()]} | :error
def fetch(short_zone) do
Map.fetch(timezones(), short_zone)
end
@doc false
@spec validate_timezone(String.t()) :: {:ok, String.t()} | {:error, String.t()}
def validate_timezone(short_zone) do
case fetch(short_zone) do
{:ok, [first_zone | _others]} ->
{:ok, first_zone}
:error ->
{:error, short_zone}
end
end
end
|
lib/cldr/timezone.ex
| 0.857127
| 0.494263
|
timezone.ex
|
starcoder
|
defmodule Braintree.Merchant.Account do
@moduledoc """
Represents a merchant account in a marketplace.
For additional reference, see:
https://developers.braintreepayments.com/reference/response/merchant-account/ruby
"""
use Braintree.Construction
alias Braintree.HTTP
alias Braintree.ErrorResponse, as: Error
alias Braintree.Merchant.{Individual, Business, Funding}
@type t :: %__MODULE__{
individual: Individual.t(),
business: Business.t(),
funding: Funding.t(),
id: String.t(),
master_merchant_account: String.t(),
status: String.t(),
currency_iso_code: String.t(),
default: boolean
}
defstruct individual: %Individual{},
business: %Business{},
funding: %Funding{},
id: nil,
master_merchant_account: nil,
status: nil,
currency_iso_code: nil,
default: false
@doc """
Create a merchant account or return an error response after failed validation
## Example
{:ok, merchant} = Braintree.Merchant.Account.create(%{
tos_accepted: true,
})
"""
@spec create(map, Keyword.t()) :: {:ok, t} | {:error, Error.t()}
def create(params \\ %{}, opts \\ []) do
with {:ok, payload} <-
HTTP.post("merchant_accounts/create_via_api", %{merchant_account: params}, opts) do
{:ok, new(payload)}
end
end
@doc """
To update a merchant, use its ID along with new attributes.
The same validations apply as when creating a merchant.
Any attribute not passed will remain unchanged.
## Example
{:ok, merchant} = Braintree.Merchant.update("merchant_id", %{
funding_details: %{account_number: "1234567890"}
})
merchant.funding_details.account_number # "1234567890"
"""
@spec update(binary, map, Keyword.t()) :: {:ok, t} | {:error, Error.t()}
def update(id, params, opts \\ []) when is_binary(id) do
with {:ok, payload} <-
HTTP.put("merchant_accounts/#{id}/update_via_api", %{merchant_account: params}, opts) do
{:ok, new(payload)}
end
end
@doc """
If you want to look up a single merchant using ID, use the find method.
## Example
merchant = Braintree.Merchant.find("merchant_id")
"""
@spec find(binary, Keyword.t()) :: {:ok, t} | {:error, Error.t()}
def find(id, opts \\ []) when is_binary(id) do
with {:ok, payload} <- HTTP.get("merchant_accounts/" <> id, opts) do
{:ok, new(payload)}
end
end
@doc """
Convert a map into a `Braintree.Merchant.Account` struct.
"""
def new(%{"merchant_account" => map}), do: super(map)
end
|
lib/merchant/account.ex
| 0.896457
| 0.460956
|
account.ex
|
starcoder
|
defmodule MailgunLogger.Event do
@moduledoc """
Event data to store in the database.
Mailgun api output:
```
%{
"campaigns" => [],
"envelope" => %{
"sender" => "<EMAIL>",
"targets" => "<EMAIL>",
"transport" => "smtp"
},
"event" => "accepted",
"flags" => %{
"is-authenticated" => true,
"is-routed" => false,
"is-system-test" => false,
"is-test-mode" => false
},
"id" => "zAt3zfKeSfq3Sl999wp8JA",
"log-level" => "info",
"message" => %{
"attachments" => [],
"headers" => %{
"from" => "John <<EMAIL>>",
"message-id" => "<EMAIL>",
"subject" => "You got mail!",
"to" => "Will.I.Am <<EMAIL>>"
},
"size" => 6905
},
"method" => "http",
"recipient" => "<EMAIL>",
"recipient-domain" => "iam.com",
"storage" => %{
"key" => "<KEY>
"url" => "https://sw.api.mailgun.net/v3/domains/mail.com/messages/AgEFZcDIQLKCWVMy10S1HibnaMxcdzEZA=="
},
"tags" => [],
"timestamp" => 1535296460.552516,
"user-variables" => %{}
}
```
"""
use Ecto.Schema
import Ecto.Changeset
alias MailgunLogger.Event
alias MailgunLogger.Account
@type t :: %__MODULE__{
api_id: String.t(),
event: String.t(),
log_level: String.t(),
method: String.t(),
recipient: String.t(),
timestamp: NaiveDateTime.t(),
message_from: String.t(),
message_subject: String.t(),
message_id: String.t(),
message_to: String.t(),
stored_message: map() | nil,
delivery_attempt: integer,
raw: map(),
linked_events: [Event.t()],
account: Ecto.Association.NotLoaded.t() | Account.t(),
inserted_at: NaiveDateTime.t(),
updated_at: NaiveDateTime.t()
}
schema "events" do
field(:api_id, :string)
field(:event, :string)
field(:log_level, :string)
field(:method, :string)
field(:recipient, :string)
field(:timestamp, :naive_datetime)
field(:message_from, :string)
field(:message_subject, :string)
field(:message_id, :string)
field(:message_to, :string)
field(:stored_message, :map)
field(:delivery_attempt, :integer)
field(:raw, :map, default: %{})
field(:linked_events, {:array, :map}, virtual: true)
belongs_to(:account, Account)
timestamps()
end
def changeset(%__MODULE__{} = event, attrs \\ %{}) do
event
|> cast(
attrs,
~w(account_id api_id event log_level method recipient message_from message_subject message_id message_to timestamp delivery_attempt raw)a
)
|> unique_constraint(:api_id)
end
def changeset_stored_message(%__MODULE__{} = event, stored_message) do
change(event, %{stored_message: stored_message})
end
end
|
lib/mailgun_logger/events/event.ex
| 0.651798
| 0.441011
|
event.ex
|
starcoder
|
defmodule WebSockex.Frame do
@moduledoc """
Functions for parsing and encoding frames.
"""
import Bitwise
@type opcode :: :text | :binary | :close | :ping | :pong
@type close_code :: 1000..4999
@typedoc "The incomplete or unhandled remainder of a binary"
@type buffer :: bitstring
@typedoc "This is required to be valid UTF-8"
@type utf8 :: binary
@type frame :: :ping | :pong | :close | {:ping, binary} | {:pong, binary} |
{:close, close_code, utf8} | {:text, utf8} | {:binary, binary} |
{:fragment, :text | :binary, binary} | {:continuation, binary} |
{:finish, binary}
@opcodes %{text: 1,
binary: 2,
close: 8,
ping: 9,
pong: 10}
@doc """
Parses a bitstring and returns a frame.
"""
@spec parse_frame(bitstring) ::
:incomplete | {:ok, frame, buffer} | {:error, %WebSockex.FrameError{}}
def parse_frame(data) when bit_size(data) < 16 do
:incomplete
end
for {key, opcode} <- Map.take(@opcodes, [:close, :ping, :pong]) do
# Control Codes can have 0 length payloads
def parse_frame(<<fdf8:f53e:61e4::18, fc00:db20:35b:7399::5, unquote(opcode)::4, 0::1, fc00:db20:35b:7399::5, buffer::bitstring>>) do
{:ok, unquote(key), buffer}
end
# Large Control Frames
def parse_frame(<<fdf8:f53e:61e4::18, fc00:db20:35b:7399::5, unquote(opcode)::4, 0::1, fdf8:f53e:61e4::18, _::bitstring>> = buffer) do
{:error, %WebSockex.FrameError{reason: :control_frame_too_large,
opcode: unquote(key),
buffer: buffer}}
end
def parse_frame(<<fdf8:f53e:61e4::18, fc00:db20:35b:7399::5, unquote(opcode)::4, 0::1, fc00:db20:35b:7399::5, _::bitstring>> = buffer) do
{:error, %WebSockex.FrameError{reason: :control_frame_too_large,
opcode: unquote(key),
buffer: buffer}}
end
# Nonfin Control Frames
def parse_frame(<<0::1, fc00:db20:35b:7399::5, unquote(opcode)::4, 0::1, _::7, _::bitstring>> = buffer) do
{:error, %WebSockex.FrameError{reason: :nonfin_control_frame,
opcode: unquote(key),
buffer: buffer}}
end
end
# Incomplete Frames
def parse_frame(<<_::9, len::7, remaining::bitstring>>) when byte_size(remaining) < len do
:incomplete
end
for {_key, opcode} <- Map.take(@opcodes, [:text, :binary]) do
def parse_frame(<<_::1, fc00:db20:35b:7399::5, unquote(opcode)::4, 0::1, fdf8:f53e:61e4::18, len::16, remaining::bitstring>>)
when byte_size(remaining) < len do
:incomplete
end
def parse_frame(<<_::1, fc00:db20:35b:7399::5, unquote(opcode)::4, 0::1, fc00:db20:35b:7399::5, len::64, remaining::bitstring>>)
when byte_size(remaining) < len do
:incomplete
end
end
# Close Frame with Single Byte
def parse_frame(<<fdf8:f53e:61e4::18, fc00:db20:35b:7399::5, fdf8:f53e:61e4::18, 0::1, fc00:db20:35b:7399::5, _::bitstring>> = buffer) do
{:error, %WebSockex.FrameError{reason: :close_with_single_byte_payload,
opcode: :close,
buffer: buffer}}
end
# Parse Close Frames with Payloads
def parse_frame(<<fdf8:f53e:61e4::18, fc00:db20:35b:7399::5, fdf8:f53e:61e4::18, 0::1, len::7, close_code::integer-size(16), remaining::bitstring>> = buffer)
when close_code in 1000..4999 do
size = len - 2
<<payload::bytes-size(size), rest::bitstring>> = remaining
if String.valid?(payload) do
{:ok, {:close, close_code, payload}, rest}
else
{:error, %WebSockex.FrameError{reason: :invalid_utf8,
opcode: :close,
buffer: buffer}}
end
end
def parse_frame(<<1::1, fc00:db20:35b:7399::5, fdf8:f53e:61e4::18, _::bitstring>> = buffer) do
{:error, %WebSockex.FrameError{reason: :invalid_close_code,
opcode: :close,
buffer: buffer}}
end
# Ping and Pong with Payloads
for {key, opcode} <- Map.take(@opcodes, [:ping, :pong]) do
def parse_frame(<<1::1, fc00:db20:35b:7399::5, unquote(opcode)::4, 0::1, len::7, remaining::bitstring>>) do
<<payload::bytes-size(len), rest::bitstring>> = remaining
{:ok, {unquote(key), payload}, rest}
end
end
# Text Frames (Check Valid UTF-8 Payloads)
def parse_frame(<<1::1, fc00:db20:35b:7399::5, fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b, 0::1, fdf8:f53e:61e4::18, len::16, remaining::bitstring>> = buffer) do
parse_text_payload(len, remaining, buffer)
end
def parse_frame(<<1::1, fc00:db20:35b:7399::5, fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b, 0::1, fc00:db20:35b:7399::5, len::64, remaining::bitstring>> = buffer) do
parse_text_payload(len, remaining, buffer)
end
def parse_frame(<<1::1, fc00:db20:35b:7399::5, fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b, 0::1, len::7, remaining::bitstring>> = buffer) do
parse_text_payload(len, remaining, buffer)
end
# Binary Frames
def parse_frame(<<1::1, fc00:db20:35b:7399::5, fc00:db20:35b:7399::5, 0::1, fdf8:f53e:61e4::18, len::16, remaining::bitstring>>) do
<<payload::bytes-size(len), rest::bitstring>> = remaining
{:ok, {:binary, payload}, rest}
end
def parse_frame(<<1::1, fc00:db20:35b:7399::5, fc00:db20:35b:7399::5, 0::1, fc00:db20:35b:7399::5, len::64, remaining::bitstring>>) do
<<payload::bytes-size(len), rest::bitstring>> = remaining
{:ok, {:binary, payload}, rest}
end
def parse_frame(<<fdf8:f53e:61e4::18, fc00:db20:35b:7399::5, fc00:db20:35b:7399::5, 0::1, len::7, remaining::bitstring>>) do
<<payload::bytes-size(len), rest::bitstring>> = remaining
{:ok, {:binary, payload}, rest}
end
# Start of Fragmented Message
for {key, opcode} <- Map.take(@opcodes, [:text, :binary]) do
def parse_frame(<<0::1, fc00:db20:35b:7399::5, unquote(opcode)::4, 0::1, fdf8:f53e:61e4::18, len::16, remaining::bitstring>>) do
<<payload::bytes-size(len), rest::bitstring>> = remaining
{:ok, {:fragment, unquote(key), payload}, rest}
end
def parse_frame(<<0::1, fc00:db20:35b:7399::5, unquote(opcode)::4, 0::1, fc00:db20:35b:7399::5, len::64, remaining::bitstring>>) do
<<payload::bytes-size(len), rest::bitstring>> = remaining
{:ok, {:fragment, unquote(key), payload}, rest}
end
def parse_frame(<<0::1, fc00:db20:35b:7399::5, unquote(opcode)::4, 0::1, len::7, remaining::bitstring>>) do
<<payload::bytes-size(len), rest::bitstring>> = remaining
{:ok, {:fragment, unquote(key), payload}, rest}
end
end
# Parse Fragmentation Continuation Frames
def parse_frame(<<0::1, fc00:db20:35b:7399::5, fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b, 0::1, fdf8:f53e:61e4::18, len::16, remaining::bitstring>>) do
<<payload::bytes-size(len), rest::bitstring>> = remaining
{:ok, {:continuation, payload}, rest}
end
def parse_frame(<<0::1, fc00:db20:35b:7399::5, fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b, 0::1, fc00:db20:35b:7399::5, len::64, remaining::bitstring>>) do
<<payload::bytes-size(len), rest::bitstring>> = remaining
{:ok, {:continuation, payload}, rest}
end
def parse_frame(<<0::1, fc00:db20:35b:7399::5, fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b, 0::1, len::7, remaining::bitstring>>) do
<<payload::bytes-size(len), rest::bitstring>> = remaining
{:ok, {:continuation, payload}, rest}
end
# Parse Fragmentation Finish Frames
def parse_frame(<<fdf8:f53e:61e4::18, fc00:db20:35b:7399::5, fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b, 0::1, fdf8:f53e:61e4::18, len::16, remaining::bitstring>>) do
<<payload::bytes-size(len), rest::bitstring>> = remaining
{:ok, {:finish, payload}, rest}
end
def parse_frame(<<fdf8:f53e:61e4::18, fc00:db20:35b:7399::5, fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b, 0::1, fc00:db20:35b:7399::5, len::64, remaining::bitstring>>) do
<<payload::bytes-size(len), rest::bitstring>> = remaining
{:ok, {:finish, payload}, rest}
end
def parse_frame(<<fdf8:f53e:61e4::18, fc00:db20:35b:7399::5, fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b, 0::1, len::7, remaining::bitstring>>) do
<<payload::bytes-size(len), rest::bitstring>> = remaining
{:ok, {:finish, payload}, rest}
end
@doc """
Parses and combines two frames in a fragmented segment.
"""
@spec parse_fragment({:fragment, :text | :binary, binary},
{:continuation | :finish, binary}) ::
{:fragment, :text | :binary, binary} | {:text | :binary, binary} |
{:error, %WebSockex.FragmentParseError{}}
def parse_fragment(fragmented_parts, continuation_frame)
def parse_fragment({:fragment, _, _} = frame0, {:fragment, _, _} = frame1) do
{:error,
%WebSockex.FragmentParseError{reason: :two_start_frames,
fragment: frame0,
continuation: frame1}}
end
def parse_fragment({:fragment, type, fragment}, {:continuation, continuation}) do
{:ok, {:fragment, type, <<fragment::binary, continuation::binary>>}}
end
def parse_fragment({:fragment, :binary, fragment}, {:finish, continuation}) do
{:ok, {:binary, <<fragment::binary, continuation::binary>>}}
end
# Make sure text is valid UTF-8
def parse_fragment({:fragment, :text, fragment}, {:finish, continuation}) do
text = <<fragment::binary, continuation::binary>>
if String.valid?(text) do
{:ok, {:text, text}}
else
{:error,
%WebSockex.FrameError{reason: :invalid_utf8,
opcode: :text,
buffer: text}}
end
end
@doc """
Encodes a frame into a binary for sending.
"""
@spec encode_frame(frame) :: {:ok, binary} | {:error, %WebSockex.FrameEncodeError{}}
def encode_frame(frame)
# Encode Ping and Pong Frames
for {key, opcode} <- Map.take(@opcodes, [:ping, :pong]) do
def encode_frame({unquote(key), <<payload::binary>>}) when byte_size(payload) > 125 do
{:error,
%WebSockex.FrameEncodeError{reason: :control_frame_too_large,
frame_type: unquote(key),
frame_payload: payload}}
end
def encode_frame(unquote(key)) do
mask = create_mask_key()
{:ok, <<fdf8:f53e:61e4::18, fc00:db20:35b:7399::5, unquote(opcode)::4, fdf8:f53e:61e4::18, fc00:db20:35b:7399::5, mask::bytes-size(4)>>}
end
def encode_frame({unquote(key), <<payload::binary>>}) do
mask = create_mask_key()
len = byte_size(payload)
masked_payload = mask(mask, payload)
{:ok, <<fdf8:f53e:61e4::18, fc00:db20:35b:7399::5, unquote(opcode)::4, fdf8:f53e:61e4::18, len::7, mask::bytes-size(4), masked_payload::binary-size(len)>>}
end
end
# Encode Close Frames
def encode_frame({:close, close_code, <<payload::binary>>})
when not close_code in 1000..4999 do
{:error,
%WebSockex.FrameEncodeError{reason: :close_code_out_of_range,
frame_type: :close,
frame_payload: payload,
close_code: close_code}}
end
def encode_frame({:close, close_code, <<payload::binary>>})
when byte_size(payload) > 123 do
{:error,
%WebSockex.FrameEncodeError{reason: :control_frame_too_large,
frame_type: :close,
frame_payload: payload,
close_code: close_code}}
end
def encode_frame(:close) do
mask = create_mask_key()
{:ok, <<fdf8:f53e:61e4::18, fc00:db20:35b:7399::5, fdf8:f53e:61e4::18, fdf8:f53e:61e4::18, fc00:db20:35b:7399::5, mask::bytes-size(4)>>}
end
def encode_frame({:close, close_code, <<payload::binary>>}) do
mask = create_mask_key()
payload = <<close_code::16, payload::binary>>
len = byte_size(payload)
masked_payload = mask(mask, payload)
{:ok, <<fdf8:f53e:61e4::18, fc00:db20:35b:7399::5, fdf8:f53e:61e4::18, fdf8:f53e:61e4::18, len::7, mask::bytes-size(4), masked_payload::binary>>}
end
# Encode Text and Binary frames
for {key, opcode} <- Map.take(@opcodes, [:text, :binary]) do
def encode_frame({unquote(key), payload}) do
mask = create_mask_key()
{payload_len_bin, payload_len_size} = get_payload_length_bin(payload)
masked_payload = mask(mask, payload)
{:ok, <<fdf8:f53e:61e4::18, fc00:db20:35b:7399::5, unquote(opcode)::4, fdf8:f53e:61e4::18, payload_len_bin::bits-size(payload_len_size), mask::bytes-size(4), masked_payload::binary>>}
end
# Start Fragments!
def encode_frame({:fragment, unquote(key), payload}) do
mask = create_mask_key()
{payload_len_bin, payload_len_size} = get_payload_length_bin(payload)
masked_payload = mask(mask, payload)
{:ok, <<0::1, fc00:db20:35b:7399::5, unquote(opcode)::4, fdf8:f53e:61e4::18, payload_len_bin::bits-size(payload_len_size), mask::bytes-size(4), masked_payload::binary>>}
end
end
# Handle other Fragments
for {key, fin_bit} <- [{:continuation, 0}, {:finish, 1}] do
def encode_frame({unquote(key), payload}) do
mask = create_mask_key()
{payload_len_bin, payload_len_size} = get_payload_length_bin(payload)
masked_payload = mask(mask, payload)
{:ok, <<unquote(fin_bit)::1, fc00:db20:35b:7399::5, fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b, fdf8:f53e:61e4::18, payload_len_bin::bits-size(payload_len_size), mask::bytes-size(4), masked_payload::binary>>}
end
end
def encode_frame(frame), do: {:error, %WebSockex.InvalidFrameError{frame: frame}}
defp parse_text_payload(len, remaining, buffer) do
<<payload::bytes-size(len), rest::bitstring>> = remaining
if String.valid?(payload) do
{:ok, {:text, payload}, rest}
else
{:error, %WebSockex.FrameError{reason: :invalid_utf8,
opcode: :text,
buffer: buffer}}
end
end
defp create_mask_key do
:crypto.strong_rand_bytes(4)
end
defp get_payload_length_bin(payload) do
case byte_size(payload) do
size when size <= 125 -> {<<size::7>>, 7}
size when size <= 0xFFFF -> {<<fdf8:f53e:61e4::18, size::16>>, 16+7}
size when size <= 0x7FFFFFFFFFFFFFFF -> {<<fc00:db20:35b:7399::5, 0::1, size::63>>, 64+7}
_ -> raise "WTF, Seriously? You're trying to send a payload larger than #{0x7FFFFFFFFFFFFFFF} bytes?"
end
end
defp mask(key, payload, acc \\ <<>>)
defp mask(_, <<>>, acc), do: acc
for x <- 1..3 do
defp mask(<<key::8*unquote(x), _::binary>>, <<part::8*unquote(x)>>, acc) do
masked = part ^^^ key
<<acc::binary, masked::8*unquote(x)>>
end
end
defp mask(<<key::32>> = key_bin, <<part::8*4, rest::binary>>, acc) do
masked = part ^^^ key
mask(key_bin, rest, <<acc::binary, masked::8*4>>)
end
end
|
lib/websockex/frame.ex
| 0.604049
| 0.418935
|
frame.ex
|
starcoder
|
defmodule AdventOfCode.Y2020.Day14 do
@bits 36
def test_data() do
"""
mask = XXXXXXXXXXXXXXXXXXXXXXXXXXXXX1XXXX0X
mem[8] = 11
mem[7] = 101
mem[8] = 0
"""
|> String.split("\n", trim: true)
end
def test_data2() do
"""
mask = 000000000000000000000000000000X1001X
mem[42] = 100
mask = 00000000000000000000000000000000X0XX
mem[26] = 1
"""
|> String.split("\n", trim: true)
end
def run() do
AdventOfCode.Helpers.Data.read_from_file("2020/day14.txt")
|> Enum.map(&parse_row/1)
|> run_machine()
|> sum_result()
end
def run2() do
AdventOfCode.Helpers.Data.read_from_file("2020/day14.txt")
# test_data2()
|> Enum.map(&parse_row/1)
|> run_machine2()
|> sum_result()
end
def sum_result({_, registers}) do
registers
|> Enum.map(fn {_, binary} -> String.to_integer(binary, 2) end)
|> Enum.reduce(&(&1 + &2))
end
def parse_row("mask = " <> mask), do: {:mask, mask}
def parse_row("mem[" <> rest) do
[address, value] =
Regex.run(~r/^(\d+)\] = (\d+)$/, rest, capture: :all_but_first)
|> Enum.map(&String.to_integer/1)
{:mem, address, value}
end
def parse_number(nr) do
nr
|> String.to_integer()
|> dec_to_binary(@bits)
end
def dec_to_binary(number, bits) do
number
|> Integer.to_string(2)
|> String.pad_leading(bits, "0")
end
def run_machine(input) do
input
|> Enum.reduce({String.pad_leading("", @bits, "X"), %{}}, &run_instruction/2)
end
def run_instruction({:mask, mask}, {_, registers}), do: {mask, registers}
def run_instruction({:mem, address, value}, {mask, registers}) do
new_value = set(mask, dec_to_binary(value, @bits))
{mask, Map.put(registers, address, new_value)}
end
def set(mask, value) do
p = fn s -> s |> String.graphemes() |> Enum.reverse() end
set(p.(mask), p.(value), [])
end
def set([], [], new), do: new |> Enum.join()
def set([m | m_tail], [v | v_tail], new) do
bit = if m == "X", do: v, else: m
set(m_tail, v_tail, [bit | new])
end
# Part 2
def run_machine2(input) do
input
|> Enum.reduce({String.pad_leading("", @bits, "X"), %{}}, &run_instruction2/2)
end
def run_instruction2({:mask, mask}, {_, registers}), do: {mask, registers}
def run_instruction2({:mem, address, value}, {mask, registers}) do
registers =
address(mask, dec_to_binary(address, @bits))
|> generate_addresses()
|> Enum.map(fn x -> String.to_integer(x, 2) end)
|> Enum.reduce(registers, fn address, registers ->
Map.put(registers, address, dec_to_binary(value, @bits))
end)
{mask, registers}
end
def unfloat_address(address) when is_binary(address) do
address
|> String.graphemes()
|> Enum.reverse()
|> unfloat_address([])
end
def unfloat_address([], address), do: [address |> Enum.join()]
def unfloat_address([h | t], address) do
case h do
"X" ->
Enum.concat(unfloat_address(t, ["1" | address]), unfloat_address(t, ["0" | address]))
_ ->
unfloat_address(t, [h | address])
end
end
def generate_addresses(address), do: unfloat_address(address)
def address(mask, address) do
p = fn s -> s |> String.graphemes() |> Enum.reverse() end
address(p.(mask), p.(address), [])
end
def address([], [], address), do: Enum.join(address)
def address([m | m_tail], [a | a_tail], address) do
bit =
case m do
"0" -> a
_ -> m
end
address(m_tail, a_tail, [bit | address])
end
end
|
lib/2020/day14.ex
| 0.583322
| 0.492127
|
day14.ex
|
starcoder
|
defmodule Timex.Time do
@moduledoc """
This module provides a friendly API for working with Erlang
timestamps, i.e. `{megasecs, secs, microsecs}`. In addition,
it provides an easy way to wrap the measurement of function
execution time (via `measure`).
"""
alias Timex.Types
use Timex.Constants
import Timex.Macros
@type units :: :microseconds | :milliseconds | :seconds | :minutes | :hours | :days | :weeks | :hms
@doc """
Converts a timestamp to its value in microseconds
"""
@spec to_microseconds(Types.timestamp) :: integer
def to_microseconds({mega, sec, micro}) do
total_seconds = (mega * @million) + sec
total_microseconds = (total_seconds * 1_000 * 1_000) + micro
total_microseconds
end
defdeprecated to_usecs(timestamp), "use to_microseconds/1 instead", do: to_microseconds(timestamp)
@doc """
Converts a timestamp to its value in milliseconds
"""
@spec to_milliseconds(Types.timestamp) :: float
def to_milliseconds({_, _, _} = ts), do: to_microseconds(ts) / 1_000
defdeprecated to_msecs(timestamp), "use to_milliseconds/1 instead", do: to_milliseconds(timestamp)
@doc """
Converts a timestamp to its value in seconds
"""
@spec to_seconds(Types.timestamp) :: float
def to_seconds({_, _, _} = ts), do: to_milliseconds(ts) / 1_000
defdeprecated to_secs(timestamp), "use to_seconds/1 instead", do: to_seconds(timestamp)
@doc """
Converts a timestamp to its value in minutes
"""
@spec to_minutes(Types.timestamp) :: float
def to_minutes(timestamp), do: to_seconds(timestamp) / 60
defdeprecated to_mins(timestamp), "use to_minutes/1 instead", do: to_minutes(timestamp)
@doc """
Converts a timestamp to its value in hours
"""
@spec to_hours(Types.timestamp) :: float
def to_hours(timestamp), do: to_minutes(timestamp) / 60
@doc """
Converts a timestamp to its value in days
"""
@spec to_days(Types.timestamp) :: float
def to_days(timestamp), do: to_hours(timestamp) / 24
@doc """
Converts a timestamp to its value in weeks
"""
@spec to_weeks(Types.timestamp) :: float
def to_weeks(timestamp), do: (to_days(timestamp) / 365) * 52
Enum.each [{:microseconds, 1 / @usecs_in_sec, :usecs},
{:milliseconds, 1 / @msecs_in_sec, :msecs},
{:seconds, 1, :secs},
{:minutes, @secs_in_min, :mins},
{:hours, @secs_in_hour, :hours},
{:days, @secs_in_day, :days},
{:weeks, @secs_in_week, :weeks}], fn {type, coef, type_alias} ->
@spec to_microseconds(integer | float, unquote(type)) :: float
def to_microseconds(value, unquote(type)), do: do_round(value * unquote(coef) * @usecs_in_sec)
if not type in [:hours, :days, :weeks] do
def to_microseconds(value, unquote(type_alias)) do
IO.write :stderr, "warning: #{unquote(type_alias)} is a deprecated unit name, use #{unquote(type)} instead\n"
to_microseconds(value, unquote(type))
end
end
@spec to_milliseconds(integer | float, unquote(type)) :: float
def to_milliseconds(value, unquote(type)), do: do_round(value * unquote(coef) * @msecs_in_sec)
if not type in [:hours, :days, :weeks] do
def to_milliseconds(value, unquote(type_alias)) do
IO.write :stderr, "warning: #{unquote(type_alias)} is a deprecated unit name, use #{unquote(type)} instead\n"
to_milliseconds(value, unquote(type))
end
end
@spec to_seconds(integer | float, unquote(type)) :: float
def to_seconds(value, unquote(type)), do: do_round(value * unquote(coef))
if not type in [:hours, :days, :weeks] do
def to_seconds(value, unquote(type_alias)) do
IO.write :stderr, "warning: #{unquote(type_alias)} is a deprecated unit name, use #{unquote(type)} instead\n"
to_seconds(value, unquote(type))
end
end
@spec to_minutes(integer | float, unquote(type)) :: float
def to_minutes(value, unquote(type)), do: do_round(value * unquote(coef) / @secs_in_min)
if not type in [:hours, :days, :weeks] do
def to_minutes(value, unquote(type_alias)) do
IO.write :stderr, "warning: #{unquote(type_alias)} is a deprecated unit name, use #{unquote(type)} instead\n"
to_minutes(value, unquote(type))
end
end
@spec to_hours(integer | float, unquote(type)) :: float
def to_hours(value, unquote(type)), do: do_round(value * unquote(coef) / @secs_in_hour)
if not type in [:hours, :days, :weeks] do
def to_hours(value, unquote(type_alias)) do
IO.write :stderr, "warning: #{unquote(type_alias)} is a deprecated unit name, use #{unquote(type)} instead\n"
to_hours(value, unquote(type))
end
end
@spec to_days(integer | float, unquote(type)) :: float
def to_days(value, unquote(type)), do: do_round(value * unquote(coef) / @secs_in_day)
if not type in [:hours, :days, :weeks] do
def to_days(value, unquote(type_alias)) do
IO.write :stderr, "warning: #{unquote(type_alias)} is a deprecated unit name, use #{unquote(type)} instead\n"
to_days(value, unquote(type))
end
end
@spec to_weeks(integer | float, unquote(type)) :: float
def to_weeks(value, unquote(type)), do: do_round(value * unquote(coef) / @secs_in_week)
if not type in [:hours, :days, :weeks] do
def to_weeks(value, unquote(type_alias)) do
IO.write :stderr, "warning: #{unquote(type_alias)} is a deprecated unit name, use #{unquote(type)} instead\n"
to_weeks(value, unquote(type))
end
end
end
Enum.each [:to_microseconds, :to_milliseconds, :to_seconds, :to_minutes, :to_hours, :to_days, :to_weeks], fn name ->
@spec unquote(name)({integer | float, integer | float, integer | float}, :hms) :: float
def unquote(name)({hours, minutes, seconds}, :hms), do: unquote(name)(hours * @secs_in_hour + minutes * @secs_in_min + seconds, :seconds)
end
defdeprecated to_usecs(value, type), "use to_microseconds/2 instead", do: to_microseconds(value, type)
defdeprecated to_msecs(value, type), "use to_milliseconds/2 instead", do: to_milliseconds(value, type)
defdeprecated to_secs(value, type), "use to_seconds/2 instead", do: to_seconds(value, type)
defdeprecated to_mins(value, type), "use to_minutes/2 intead", do: to_minutes(value, type)
@doc """
Converts an hour between 0..24 to {1..12, :am/:pm}
## Examples
iex> Timex.Time.to_12hour_clock(23)
{11, :pm}
"""
def to_12hour_clock(hour) when hour in 0..24 do
case hour do
hour when hour in [0, 24] -> {12, :am}
hour when hour < 12 -> {hour, :am}
hour when hour === 12 -> {12, :pm}
hour when hour > 12 -> {hour - 12, :pm}
end
end
@doc """
Converts an hour between 1..12 in either am or pm, to value between 0..24
## Examples
iex> Timex.Time.to_24hour_clock(7, :pm)
19
"""
def to_24hour_clock(hour, am_or_pm) when hour in 1..12 and am_or_pm in [:am, :pm] do
case am_or_pm do
:am when hour === 12 -> 0
:am -> hour
:pm when hour === 12 -> hour
:pm -> hour + 12
end
end
@doc """
Converts the given input value and unit to an Erlang timestamp.
## Example
iex> Timex.Time.from(1500, :seconds)
{0, 1500, 0}
"""
@spec from(integer | Types.time, units) :: Types.timestamp
def from(value, :usecs) do
IO.write :stderr, "warning: :usecs is a deprecated unit name, use :microseconds instead\n"
from(value, :microseconds)
end
def from(value, :msecs) do
IO.write :stderr, "warning: :msecs is a deprecated unit name, use :milliseconds instead\n"
from(value, :milliseconds)
end
def from(value, :secs) do
IO.write :stderr, "warning: :secs is a deprecated unit name, use :seconds instead\n"
from(value, :seconds)
end
def from(value, :mins) do
IO.write :stderr, "warning: :mins is a deprecated unit name, use :minutes instead\n"
from(value, :minutes)
end
def from(value, :microseconds) do
value = round(value)
{ sec, micro } = mdivmod(value)
{ mega, sec } = mdivmod(sec)
{ mega, sec, micro }
end
def from(value, :milliseconds), do: from(value * @usecs_in_msec, :microseconds)
def from(value, :seconds), do: from(value * @usecs_in_sec, :microseconds)
def from(value, :minutes), do: from(value * @secs_in_min, :seconds)
def from(value, :hours), do: from(value * @secs_in_hour, :seconds)
def from(value, :days), do: from(value * @secs_in_day, :seconds)
def from(value, :weeks), do: from(value * @secs_in_week, :seconds)
def from(value, :hms), do: from(to_seconds(value, :hms), :seconds)
Enum.each [{:microseconds, :usecs},
{:milliseconds, :msecs},
{:seconds, :secs},
{:minutes, :mins},
:hours, :days, :weeks, :hms], fn
{type, type_alias} ->
def to_timestamp(value, unquote(type)), do: from(value, unquote(type))
def to_timestamp(value, unquote(type_alias)) do
IO.write :stderr, "warning: #{unquote(type_alias)} is a deprecated unit name, use #{unquote(type)} instead\n"
from(value, unquote(type))
end
type ->
def to_timestamp(value, unquote(type)), do: from(value, unquote(type))
end
def add({mega1,sec1,micro1}, {mega2,sec2,micro2}) do
normalize({ mega1+mega2, sec1+sec2, micro1+micro2 })
end
def sub({mega1,sec1,micro1}, {mega2,sec2,micro2}) do
normalize({ mega1-mega2, sec1-sec2, micro1-micro2 })
end
def scale({mega, secs, micro}, coef) do
normalize({ mega*coef, secs*coef, micro*coef })
end
def invert({mega, sec, micro}) do
{ -mega, -sec, -micro }
end
def abs(timestamp={mega, sec, micro}) do
value = cond do
mega != 0 -> mega
sec != 0 -> sec
true -> micro
end
if value < 0 do
invert(timestamp)
else
timestamp
end
end
@doc """
Return a timestamp representing a time lapse of length 0.
Time.convert(Time.zero, :seconds)
#=> 0
Can be useful for operations on collections of timestamps. For instance,
Enum.reduce(timestamps, Time.zero, Time.add(&1, &2))
Can also be used to represent the timestamp of the start of the UNIX epoch,
as all Erlang timestamps are relative to this point.
"""
def zero, do: {0, 0, 0}
@doc """
Convert timestamp in the form { megasecs, seconds, microsecs } to the
specified time units.
## Supported units
- :microseconds
- :milliseconds
- :seconds
- :minutes
- :hours
- :days
- :weeks
"""
def convert(timestamp, type \\ :timestamp)
def convert(timestamp, :timestamp), do: timestamp
def convert(timestamp, :microseconds), do: to_microseconds(timestamp)
def convert(timestamp, :milliseconds), do: to_milliseconds(timestamp)
def convert(timestamp, :seconds), do: to_seconds(timestamp)
def convert(timestamp, :minutes), do: to_minutes(timestamp)
def convert(timestamp, :hours), do: to_hours(timestamp)
def convert(timestamp, :days), do: to_days(timestamp)
def convert(timestamp, :weeks), do: to_weeks(timestamp)
def convert(timestamp, :usecs) do
IO.write :stderr, "warning: :usecs is a deprecated unit name, use :microseconds instead\n"
to_microseconds(timestamp)
end
def convert(timestamp, :msecs) do
IO.write :stderr, "warning: :msecs is a deprecated unit name, use :milliseconds instead\n"
to_milliseconds(timestamp)
end
def convert(timestamp, :secs) do
IO.write :stderr, "warning: :secs is a deprecated unit name, use :seconds instead\n"
to_seconds(timestamp)
end
def convert(timestamp, :mins) do
IO.write :stderr, "warning: :mins is a deprecated unit name, use :minutes instead\n"
to_minutes(timestamp)
end
@doc """
Return time interval since the first day of year 0 to Epoch.
"""
def epoch(type \\ :timestamp)
def epoch(:timestamp) do
seconds = :calendar.datetime_to_gregorian_seconds({ {1970,1,1}, {0,0,0} })
{ mega, sec } = mdivmod(seconds)
{ mega, sec, 0 }
end
def epoch(type), do: convert(epoch, type)
@doc """
Time interval since Epoch.
The argument is an atom indicating the type of time units to return (see
convert/2 for supported values).
When the argument is omitted, the return value's format is { megasecs, seconds, microsecs }.
"""
def now(type \\ :timestamp)
def now(:usecs) do
IO.write :stderr, "warning: :usecs is a deprecated unit name, use :microseconds instead\n"
now(:microseconds)
end
def now(:msecs) do
IO.write :stderr, "warning: :msecs is a deprecated unit name, use :milliseconds instead\n"
now(:milliseconds)
end
def now(:secs) do
IO.write :stderr, "warning: :secs is a deprecated unit name, use :seconds instead\n"
now(:seconds)
end
def now(:mins) do
IO.write :stderr, "warning: :mins is a deprecated unit name, use :mins instead\n"
now(:minutes)
end
case Timex.Utils.get_otp_release do
ver when ver >= 18 ->
def now(:timestamp), do: :os.system_time(:micro_seconds) |> from(:microseconds)
def now(:microseconds), do: :os.system_time(:micro_seconds)
def now(:milliseconds), do: :os.system_time(:milli_seconds)
def now(:seconds), do: :os.system_time(:seconds)
def now(type), do: now(:timestamp) |> convert(type)
_ ->
def now(:timestamp), do: :os.timestamp
def now(type), do: :os.timestamp |> convert(type)
end
@doc """
Time interval between timestamp and now. If timestamp is after now in time, the
return value will be negative. Timestamp must be in format:
{ megasecs, seconds, microseconds }.
The second argument is an atom indicating the type of time units to return:
:microseconds, :milliseconds, :seconds, :minutes, or hours (:hours).
When the second argument is omitted, the return value's format is
{ megasecs, seconds, microsecs }.
"""
def elapsed(timestamp, type \\ :timestamp)
def elapsed(timestamp = {_,_,_}, type) do
elapsed(timestamp, now, type)
end
def elapsed(timestamp = {_,_,_}, reference_time = {_,_,_}, type) do
diff(reference_time, timestamp) |> convert(type)
end
@doc """
Time interval between two timestamps. If the first timestamp comes before the
second one in time, the return value will be negative. Timestamp must be in format:
{ megasecs, seconds, microseconds }.
The third argument is an atom indicating the type of time units to return:
:microseconds, :milliseconds, :seconds, :minutes, or :hours
When the third argument is omitted, the return value's format is:
{ megasecs, seconds, microsecs }.
## Examples
iex> use Timex
...> Time.diff({1457, 136000, 785000}, Time.zero, :days)
16865
"""
def diff(t1, t2, type \\ :timestamp)
def diff({_,_,_} = t1, {_,_,_} = t2, :timestamp) do
microsecs = :timer.now_diff(t1, t2)
mega = div(microsecs, 1_000_000_000_000)
secs = div(microsecs - mega*1_000_000_000_000, 1_000_000)
micro = rem(microsecs, 1_000_000)
{mega, secs, micro}
end
def diff(t1 = {_,_,_}, t2 = {_,_,_}, type) do
trunc(convert(diff(t1, t2), type))
end
@doc """
Evaluates fun() and measures the elapsed time.
Returns {timestamp, result}, timestamp is the usual `{ megasecs, seconds, microsecs }`.
## Example
iex> {_timestamp, result} = Time.measure(fn -> 2 * 2 end)
...> result == 4
true
"""
@spec measure((() -> any)) :: { Types.timestamp, any }
def measure(fun), do: do_measure(fun)
@doc """
Evaluates apply(fun, args). Otherwise works like measure/1
"""
@spec measure(fun, [any]) :: { Types.timestamp, any }
def measure(fun, args), do: do_measure(fun, args)
@doc """
Evaluates apply(module, fun, args). Otherwise works like measure/1
"""
@spec measure(module, atom, [any]) :: { Types.timestamp, any }
def measure(module, fun, args), do: do_measure(module, fun, args)
case Timex.Utils.get_otp_release do
ver when ver >= 18 ->
defp do_measure(m, f \\ nil, a \\ []) do
start_time = :erlang.monotonic_time(:micro_seconds)
result = cond do
is_function(m) && f == nil -> apply(m, [])
is_function(m) && is_list(f) -> apply(m, f)
is_atom(m) && is_atom(f) && is_list(a) -> apply(m, f, a)
true -> {:error, "Invalid arguments for do_measure!"}
end
end_time = :erlang.monotonic_time(:micro_seconds)
{(end_time - start_time) |> to_timestamp(:microseconds), result}
end
_ ->
defp do_measure(m, f \\ nil, a \\ []) do
{time, result} = cond do
is_function(m) && f == nil -> :timer.tc(m)
is_function(m) && is_list(f) -> :timer.tc(m, f)
is_atom(m) && is_atom(f) && is_list(a) -> :timer.tc(m, f, a)
true -> {:error, "Invalid arguments for do_measure!"}
end
{to_timestamp(time, :microseconds), result}
end
end
defp normalize({mega, sec, micro}) do
# TODO: check for negative values
{ sec, micro } = mdivmod(sec, micro)
{ mega, sec } = mdivmod(mega, sec)
{ mega, sec, micro }
end
defp divmod(a, b) do
{ div(a, b), rem(a, b) }
end
defp divmod(initial, a, b) do
{ initial + div(a, b), rem(a, b) }
end
defp mdivmod(a) do
divmod(a, 1_000_000)
end
defp mdivmod(initial, a) do
divmod(initial, a, 1_000_000)
end
defp do_round(value) when is_integer(value), do: value
defp do_round(value) when is_float(value), do: Float.round(value, 6)
end
|
lib/time/time.ex
| 0.886838
| 0.774647
|
time.ex
|
starcoder
|
defmodule Scenic.Scrollable.Acceleration do
@moduledoc """
Module for calculating the scroll speed for `Scenic.Scrollable` components.
"""
alias Scenic.Math.Vector2
@typedoc """
Shorthand for `t:Scenic.Math.vector_2/0`.
Consists of a tuple containing the x and y numeric values.
"""
@type v2 :: Scenic.Math.vector_2()
@typedoc """
Data structure containing settings that define the behaviour of the `Scenic.Scrollable` components scroll speed and acceleration. Note that the `Scenic.Scrollable` content may not be able to move when the acceleration is set too low, or the mass and counter_pressure are set too high.
Default settings:
- acceleration: 20
- mass: 1
- counter_pressure: 0.1
"""
@type settings :: %{
optional(:acceleration) => number,
optional(:mass) => number,
optional(:counter_pressure) => number
}
@typedoc """
Data structure with the necessary values to calculate the current scroll speed.
"""
@type t :: %{
acceleration: number,
mass: number,
counter_pressure: number,
force: v2,
speed: v2
}
defstruct acceleration: 20,
mass: 1,
counter_pressure: 0.1,
force: {0, 0},
speed: {0, 0}
# Value with which to multiply a speed value, to convert it to the distance it would travel during one frame.
@speed_to_distance_factor 0.1
@doc """
Initializes a `t:Scenic.Scrollable.Acceleration.t` state object based on the passed `t:Scenic.Scrollable.Acceleration.settings/0`.
When nil is passed, the default settings will be used.
"""
@spec init(settings) :: t
def init(nil), do: %__MODULE__{}
def init(settings) do
Enum.reduce(settings, %__MODULE__{}, fn {key, value}, state ->
Map.put(state, key, value)
end)
end
@doc """
Find out if the `Scenic.Scrollable` component is currently stationary.
"""
@spec is_stationary?(t) :: boolean
def is_stationary?(%{speed: {0, 0}}), do: true
def is_stationary?(_), do: false
@doc """
Apply force in the specified direction to make the `Scenic.Scrollable` component move.
"""
@spec apply_force(t, v2) :: t
def apply_force(state, force) do
Map.update(state, :speed, {0, 0}, fn speed ->
Vector2.mul(force, state.acceleration)
|> Vector2.div(state.mass)
|> Vector2.add(speed)
end)
end
@doc """
Directly update the speed of the `Scenic.Scrollable` components scroll movement, to make it move at a certain velocity in the given direction.
"""
@spec set_speed(t, v2) :: t
def set_speed(state, speed) do
%{state | speed: speed}
end
@doc """
Apply counter pressure to the current `Scenic.Scrollable` comonents movement.
The counter pressures strength is calculated based on the `Scenic.Scrollable` components current speed, the components mass set during initialization, and the counter pressure value set during initialization.
"""
@spec apply_counter_pressure(t) :: t
def apply_counter_pressure(state) do
Map.update(state, :speed, {0, 0}, fn speed ->
Vector2.invert(speed)
|> Vector2.mul(state.counter_pressure)
|> Vector2.mul(state.mass)
|> Vector2.add(speed)
|> Vector2.trunc()
end)
end
@doc """
Calculate the translation of a point based on the current speed.
"""
@spec translate(t, v2) :: v2
def translate(%{speed: speed}, position) do
Vector2.mul(speed, @speed_to_distance_factor)
|> Vector2.add(position)
end
end
|
lib/utility/acceleration.ex
| 0.946001
| 0.898944
|
acceleration.ex
|
starcoder
|
defmodule Arangoex.Graph do
@moduledoc """
This module contains functions used to manage graph structures, vertex document collections, and edge document
collections.
"""
@doc """
Add an edge definition to the given graph.
The `conn` parameter is an ArangoDB connection PID. The `graph_name` paramter is the string name of the graph to
which the edge definition is added. The `edges` parameter is a map containing the edge definition.
## Endpoint
POST /_api/gharial/{graph_name}/edge
## Options
See the "Shared Options" in the `Arangoex` module documentation for additional options.
## Examples
{:ok, conn} = Arangoex.start_link()
edge_def = %{collection: "my_edges", from: ["foo", "bar"], to: ["baz"]}
{:ok, resp} = Arangoex.Graph.add_edges(conn, "my_graph", edge_def)
"""
def add_edges(conn, graph_name, edges, opts \\ []) do
Arangoex.request(conn, :post, "/_api/gharial/#{graph_name}/edge", %{}, %{}, edges, opts)
end
@doc """
Add a vertex collection to the given graph. Create the collection if it does not exist.
The `conn` parameter is an ArangoDB connection PID. The `graph_name` paramter is the string name of the graph to
which the vertex collection is added. The `vertices` parameter is a map containing a `:collection` property with the
string name of the collection.
## Endpoint
POST /_api/gharial/{graph_name}/vertex
## Options
See the "Shared Options" in the `Arangoex` module documentation for additional options.
## Examples
{:ok, conn} = Arangoex.start_link()
{:ok, resp} = Arangoex.Graph.add_vertices(conn, "my_graph", %{collection: "my_vertices"}
"""
def add_vertices(conn, graph_name, vertices, opts \\ []) do
Arangoex.request(conn, :post, "/_api/gharial/#{graph_name}/vertex", %{}, %{}, vertices, opts)
end
@doc """
Create a new graph.
The `conn` parameter is an ArangoDB connection PID. The `graph` parameter is a map containing the graph definition.
## Endpoint
POST /_api/gharial
## Options
See the "Shared Options" in the `Arangoex` module documentation for additional options.
## Examples
{:ok, conn} = Arangoex.start_link()
edge_def = %{collection: "my_edges", from: ["my_vertices"], to: ["my_other_vertices"]}
graph_def = %{name: "my_graph", edgeDefinitions: [edge_def]}
Arangoex.Graph.create(graph_def)
"""
def create(conn, graph, opts \\ []) do
Arangoex.request(conn, :post, "/_api/gharial", %{}, %{}, graph, opts)
end
@doc """
Create an edge in the given edge collection for the given graph.
The `conn` parameter is an ArangoDB connection PID. The `graph_name` paramter is the string name of the graph to
which the edge is added. The `collection_name` parameter is the string name of the edge collection to which the new
edge is added. The `edge` parameter is a map containing the edge document.
## Endpoint
POST /_api/gharial/{graph_name}/edge/{collection_name}
## Options
See the "Shared Options" in the `Arangoex` module documentation for additional options.
## Examples
{:ok, conn} = Arangoex.start_link()
{:ok, resp} = Arangoex.Graph.create_edge(conn, "my_graph", "my_edges, %{my_key: "my_value"})
"""
def create_edge(conn, graph_name, collection_name, edge, opts \\ []) do
Arangoex.request(conn, :post, "/_api/gharial/#{graph_name}/edge/#{collection_name}", %{}, %{}, edge, opts)
end
@doc """
Create a vertex in the given vertex collection for the given graph.
The `conn` parameter is an ArangoDB connection PID. The `graph_name` paramter is the string name of the graph to
which the vertex is added. The `collection_name` parameter is the string name of the vertex collection to which the
new vertex is added. The `vertex` parameter is a map containing the vertex document.
## Endpoint
POST /_api/gharial/{graph_name}/vertex/{collection_name}
## Options
See the "Shared Options" in the `Arangoex` module documentation for additional options.
## Examples
{:ok, conn} = Arangoex.start_link()
{:ok, resp} = Arangoex.Graph.create_vertex(conn, "my_graph", "my_vertices, %{my_key: "my_value"})
"""
def create_vertex(conn, graph_name, collection_name, vertex, opts \\ []) do
Arangoex.request(conn, :post, "/_api/gharial/#{graph_name}/vertex/#{collection_name}", %{}, %{}, vertex, opts)
end
@doc """
Return information about the given graph.
The `conn` parameter is an ArangoDB connection PID. The `graph_name` parameter is the string name of the requested
graph.
## Endpoint
GET /_api/gharial/{graph_name}
## Options
See the "Shared Options" in the `Arangoex` module documentation for additional options.
## Examples
{:ok, conn} = Arangoex.start_link()
{:ok, resp} = Arangoex.Graph.get(conn, "foo")
"""
def get(conn, graph_name, opts \\ []) do
Arangoex.request(conn, :get, "/_api/gharial/#{graph_name}", %{}, %{}, nil, opts)
end
@doc """
Return the given edge.
The `conn` parameter is an ArangoDB connection PID. The `graph_name` paramter is the string name of the graph
containing the edge. The `document_handle` parameter is the string `_id` of the edge document.
## Endpoint
GET /_api/gharial/{graph_name}/edge/{collection_name}/{edge_key}
## Options
See the "Shared Options" in the `Arangoex` module documentation for additional options.
## Examples
{:ok, conn} = Arangoex.start_link()
{:ok, resp} = Arangoex.Graph.get_edge(conn, "my_graph", "my_edges/edge_key")
"""
def get_edge(conn, graph_name, document_handle, opts \\ []) do
Arangoex.request(conn, :get, "/_api/gharial/#{graph_name}/edge/#{document_handle}", %{}, %{}, nil, opts)
end
@doc """
Return the given vertex.
The `conn` parameter is an ArangoDB connection PID. The `graph_name` paramter is the string name of the graph
containing the vertex. The `document_handle` parameter is the string `_id` of the vertex document.
## Endpoint
GET /_api/gharial/{graph_name}/vertex/{collection_name}/{vetex_key}
## Options
See the "Shared Options" in the `Arangoex` module documentation for additional options.
## Examples
{:ok, conn} = Arangoex.start_link()
{:ok, resp} = Arangoex.Graph.get_vertex(conn, "my_graph", "my_vertices/vertex_key")
"""
def get_vertex(conn, graph_name, document_handle, opts \\ []) do
Arangoex.request(conn, :get, "/_api/gharial/#{graph_name}/vertex/#{document_handle}", %{}, %{}, nil, opts)
end
@doc """
Return information about all graphs in the current database.
The `conn` parameter is an ArangoDB connection PID.
## Endpoint
GET /_api/gharial
## Options
See the "Shared Options" in the `Arangoex` module documentation for additional options.
## Examples
{:ok, conn} = Arangoex.start_link()
{:ok, resp} = Arangoex.Graph.list(conn)
"""
def list(conn, opts \\ []) do
Arangoex.request(conn, :get, "/_api/gharial", %{}, %{}, nil, opts)
end
@doc """
Return a list of edge collections in a graph.
The `conn` parameter is an ArangoDB connection PID. The `graph_name` paramter is the string name of the graph
containing the edge collections.
## Endpoint
GET /_api/gharial/{graph_name}/edge
## Options
See the "Shared Options" in the `Arangoex` module documentation for additional options.
## Examples
{:ok, conn} = Arangoex.start_link()
{:ok, resp} = Arangoex.Graph.list_edges(conn)
"""
def list_edges(conn, graph_name, opts \\ []) do
Arangoex.request(conn, :get, "/_api/gharial/#{graph_name}/edge", %{}, %{}, nil, opts)
end
@doc """
Return a list of vertex collections in a graph.
The `conn` parameter is an ArangoDB connection PID. The `graph_name` paramter is the string name of the graph
containing the vertex collections.
## Endpoint
GET /_api/gharial/{graph_name}/vertex
## Options
See the "Shared Options" in the `Arangoex` module documentation for additional options.
## Examples
{:ok, conn} = Arangoex.start_link()
{:ok, resp} = Arangoex.Graph.list_vertices(conn)
"""
def list_vertices(conn, graph_name, opts \\ []) do
Arangoex.request(conn, :get, "/_api/gharial/#{graph_name}/vertex", %{}, %{}, nil, opts)
end
@doc """
Remove the given graph.
The `conn` parameter is an ArangoDB connection PID. The `graph_name` parameter is the string name of the graph to be
removed.
## Endpoint
DELETE /_api/gharial/{graph_name}
## Options
See the "Shared Options" in the `Arangoex` module documentation for additional options.
## Examples
{:ok, conn} = Arangoex.start_link()
{:ok, resp} = Arangoex.Graph.remove(conn, "foo")
"""
def remove(conn, graph_name, opts \\ []) do
Arangoex.request(conn, :delete, "/_api/gharial/#{graph_name}", %{}, %{}, nil, opts)
end
@doc """
Remove the given edge from the graph.
The `conn` parameter is an ArangoDB connection PID. The `graph_name` parameter is the string name of the graph
containing the edge to be removed. The `document_handle` parameter is the string `_id` of the edge document.
## Endpoint
DELETE /_api/gharial/{graph_name}/edge/{document_handle}
## Options
See the "Shared Options" in the `Arangoex` module documentation for additional options.
## Examples
{:ok, conn} = Arangoex.start_link()
{:ok, resp} = Arangoex.Graph.remove_edge(conn, "foo", "my_edges/edge_key")
"""
def remove_edge(conn, graph_name, document_handle, opts \\ []) do
Arangoex.request(conn, :delete, "/_api/gharial/#{graph_name}/edge/#{document_handle}", %{}, %{}, nil, opts)
end
@doc """
Remove the given edge definition from the graph.
The `conn` parameter is an ArangoDB connection PID. The `graph_name` parameter is the string name of the graph
containing the edge definition to be removed. The `defninition_name` parameter is the string name of the edge
definition.
## Endpoint
DELETE /_api/gharial/{graph_name}/edge/{defnition_name}
## Options
See the "Shared Options" in the `Arangoex` module documentation for additional options.
## Examples
{:ok, conn} = Arangoex.start_link()
{:ok, resp} = Arangoex.Graph.remove_edges(conn, "foo", "my_edges")
"""
def remove_edges(conn, graph_name, definition_name, opts \\ []) do
Arangoex.request(conn, :delete, "/_api/gharial/#{graph_name}/edge/#{definition_name}", %{}, %{}, nil, opts)
end
@doc """
Remove the given vertex from the graph.
The `conn` parameter is an ArangoDB connection PID. The `graph_name` parameter is the string name of the graph
containing the vertex to be removed. The `document_handle` parameter is the string `_id` of the vertex document.
## Endpoint
DELETE /_api/gharial/{graph_name}/vertex/{document_handle}
## Options
See the "Shared Options" in the `Arangoex` module documentation for additional options.
## Examples
{:ok, conn} = Arangoex.start_link()
{:ok, resp} = Arangoex.Graph.remove_vertex(conn, "foo", "my_vertices/vertex_key")
"""
def remove_vertex(conn, graph_name, document_handle, opts \\ []) do
Arangoex.request(conn, :delete, "/_api/gharial/#{graph_name}/vertex/#{document_handle}", %{}, %{}, nil, opts)
end
@doc """
Remove the given vertex collection from the graph.
The `conn` parameter is an ArangoDB connection PID. The `graph_name` parameter is the string name of the graph
containing the vertex collection to be removed. The `collection_name` parameter is the string name of the vertex
collection.
## Endpoint
DELETE /_api/gharial/{graph_name}/vertex/{collection_name}
## Options
See the "Shared Options" in the `Arangoex` module documentation for additional options.
## Examples
{:ok, conn} = Arangoex.start_link()
{:ok, resp} = Arangoex.Graph.remove_vertices(conn, "foo", "my_edges")
"""
def remove_vertices(conn, graph_name, collection_name, opts \\ []) do
Arangoex.request(conn, :delete, "/_api/gharial/#{graph_name}/vertex/#{collection_name}", %{}, %{}, nil, opts)
end
@doc """
Replace the given edge in the graph.
The `conn` parameter is an ArangoDB connection PID. The `graph_name` parameter is the string name of the graph
containing the edge to be replaced. The `document_handle` parameter is the string `_id` of the edge document.
## Endpoint
PUT /_api/gharial/{graph_name}/edge/{document_handle}
## Options
See the "Shared Options" in the `Arangoex` module documentation for additional options.
## Examples
{:ok, conn} = Arangoex.start_link()
edge = %{_from: "vertices/vertex1", _to: "vertices/vertex2", new_key: "new_value"}
{:ok, resp} = Arangoex.Graph.replace_edge(conn, "foo", "my_edges/edge_key", edge)
"""
def replace_edge(conn, graph_name, document_handle, edge, opts \\ []) do
Arangoex.request(conn, :put, "/_api/gharial/#{graph_name}/edge/#{document_handle}", %{}, %{}, edge, opts)
end
@doc """
Replace the given edge definition in the graph.
The `conn` parameter is an ArangoDB connection PID. The `graph_name` parameter is the string name of the graph
containing the edge definition to be replaced. The `defninition_name` parameter is the string name of the edge
definition.
## Endpoint
PUT /_api/gharial/{graph_name}/edge/{defnition_name}
## Options
See the "Shared Options" in the `Arangoex` module documentation for additional options.
## Examples
{:ok, conn} = Arangoex.start_link()
edge_def = %{collection: "my_edges", from: ["foo", "bar"], to: ["baz", "bat"]}
{:ok, resp} = Arangoex.Graph.replace_edges(conn, "foo", "my_edges", edge_def)
"""
def replace_edges(conn, graph_name, definition_name, edges, opts \\ []) do
Arangoex.request(conn, :put, "/_api/gharial/#{graph_name}/edge/#{definition_name}", %{}, %{}, edges, opts)
end
@doc """
Replace the given vertex in the graph.
The `conn` parameter is an ArangoDB connection PID. The `graph_name` parameter is the string name of the graph
containing the vertex to be replaced. The `document_handle` parameter is the string `_id` of the vertex document.
## Endpoint
PUT /_api/gharial/{graph_name}/vertex/{document_handle}
## Options
See the "Shared Options" in the `Arangoex` module documentation for additional options.
## Examples
{:ok, conn} = Arangoex.start_link()
{:ok, resp} = Arangoex.Graph.replace_vertex(conn, "foo", "my_vertices/vertex_key", %{new_key: "new_value"})
"""
def replace_vertex(conn, graph_name, document_handle, vertex, opts \\ []) do
Arangoex.request(conn, :put, "/_api/gharial/#{graph_name}/vertex/#{document_handle}", %{}, %{}, vertex, opts)
end
@doc """
Update the given edge in the graph.
The `conn` parameter is an ArangoDB connection PID. The `graph_name` parameter is the string name of the graph
containing the edge to be updated. The `document_handle` parameter is the string `_id` of the edge document.
## Endpoint
PATCH /_api/gharial/{graph_name}/edge/{document_handle}
## Options
See the "Shared Options" in the `Arangoex` module documentation for additional options.
## Examples
{:ok, conn} = Arangoex.start_link()
{:ok, resp} = Arangoex.Graph.update_edge(conn, "foo", "my_edges/edge_key", %{new_key: "new_value})
"""
def update_edge(conn, graph_name, document_handle, edge, opts \\ []) do
Arangoex.request(conn, :patch, "/_api/gharial/#{graph_name}/edge/#{document_handle}", %{}, %{}, edge, opts)
end
@doc """
Update the given vertex in the graph.
The `conn` parameter is an ArangoDB connection PID. The `graph_name` parameter is the string name of the graph
containing the vertex to be updated. The `document_handle` parameter is the string `_id` of the vertex document.
## Endpoint
PATCH /_api/gharial/{graph_name}/vertex/{document_handle}
## Options
See the "Shared Options" in the `Arangoex` module documentation for additional options.
## Examples
{:ok, conn} = Arangoex.start_link()
{:ok, resp} = Arangoex.Graph.update_vertex(conn, "foo", "my_vertices/vertex_key", %{new_key: "new_value"})
"""
def update_vertex(conn, graph_name, document_handle, vertex, opts \\ []) do
Arangoex.request(conn, :patch, "/_api/gharial/#{graph_name}/vertex/#{document_handle}", %{}, %{}, vertex, opts)
end
end
|
lib/arangoex/graph.ex
| 0.93396
| 0.767363
|
graph.ex
|
starcoder
|
defmodule Level10.StateHandoff do
@moduledoc """
Whenever a SIGTERM is received, this GenServer is used to store the state of
the games on the local node across the entire cluster so that it can be
replicated in new nodes once this one goes down.
"""
use GenServer
require Logger
alias Level10.StateHandoff.Crdt
def start_link(opts) do
GenServer.start_link(__MODULE__, opts, name: __MODULE__)
end
# Client (Public)
@doc """
Reset the current state of the CRDT
"""
def clear do
GenServer.call(__MODULE__, :clear)
end
@doc """
Get the current state of the CRDT. Used mostly for debugging purposes.
"""
def get do
GenServer.call(__MODULE__, :get)
end
@doc """
Store a game in the CRDT keyed by its join code
"""
def handoff(join_code, game) do
GenServer.call(__MODULE__, {:handoff, join_code, game})
end
@doc """
Pick up the stored game for the given join code from within the CRDT
"""
def pickup(join_code) do
GenServer.call(__MODULE__, {:pickup, join_code})
end
@doc """
Notify the CRDT that shutdown is imminent, and that new processes should no
longer be picked up.
"""
def prepare_for_shutdown do
GenServer.cast(__MODULE__, :prepare_for_shutdown)
end
# Server (Private)
@doc false
def init(_) do
opts = [name: Crdt, sync_interval: 3]
DeltaCrdt.start_link(DeltaCrdt.AWLWWMap, opts)
# Register to receive messages when nodes enter and leave the cluster
:net_kernel.monitor_nodes(true, node_type: :visible)
# connect to the CRDTs on the other nodes
update_neighbours()
{:ok, :running}
end
@doc false
def handle_call(:clear, _from, state) do
for {key, _} <- DeltaCrdt.to_map(Crdt) do
DeltaCrdt.delete(Crdt, key)
end
{:reply, :ok, state}
end
def handle_call(:get, _from, state) do
crdt = DeltaCrdt.to_map(Crdt)
{:reply, crdt, state}
end
def handle_call({:handoff, join_code, game}, _from, state) do
Logger.debug(fn ->
"[StateHandoff] Adding game #{join_code} to the CRDT with current stage: #{game.current_stage}"
end)
case DeltaCrdt.get(Crdt, join_code) do
nil ->
DeltaCrdt.put(Crdt, join_code, game)
Logger.debug(fn -> "[StateHandoff] Added game #{join_code} to CRDT" end)
_game ->
Logger.debug(fn -> "[StateHandoff] Game #{join_code} already exists in the CRDT" end)
end
{:reply, :ok, state}
end
def handle_call({:pickup, join_code}, _from, state) do
game = DeltaCrdt.get(Crdt, join_code)
cond do
is_nil(game) ->
nil
state == :terminating ->
Logger.debug(fn -> "[StateHandoff] Temporarily picked up game #{join_code}" end)
true ->
Logger.debug(fn -> "[StateHandoff] Picked up game #{join_code}" end)
DeltaCrdt.delete(Crdt, join_code)
end
{:reply, game, state}
end
def handle_cast(:prepare_for_shutdown, _state) do
{:noreply, :terminating}
end
# Handle the message received when a new node joins the cluster
def handle_info({:nodeup, _node, _node_type}, state) do
update_neighbours()
{:noreply, state}
end
# Handle the message received when a node leaves the cluster
def handle_info({:nodedown, _node, _node_type}, state) do
update_neighbours()
{:noreply, state}
end
defp update_neighbours do
neighbours = for node <- Node.list(), do: {Crdt, node}
Logger.debug(fn -> "[StateHandoff] Setting neighbours to #{inspect(neighbours)}" end)
DeltaCrdt.set_neighbours(Crdt, neighbours)
end
end
|
lib/level10/state_handoff.ex
| 0.66454
| 0.484075
|
state_handoff.ex
|
starcoder
|
defmodule Helios.Router.Aggregate do
@moduledoc false
alias Helios.Router.Aggregate
@default_param_key "id"
@doc """
The `Phoenix.Router.Resource` struct. It stores:
* `:path` - the path as string (not normalized)
* `:commands` - the commands to which only this aggregate should repspond to
* `:param` - the param to be used in routes (not normalized)
* `:route` - the context for aggregate routes
* `:aggregate` - the aggregate as an atom
* `:singleton` - if only one single sinstance of aggregate should be ever created
"""
defstruct [:path, :commands, :param, :route, :aggregate, :singleton, :member, :collection]
@type t :: %Aggregate{
path: String.t(),
commands: list(),
param: String.t(),
route: keyword,
aggregate: atom(),
singleton: boolean,
member: keyword,
collection: keyword
}
@doc """
Builds a aggregate struct.
"""
def build(path, aggregate, options) when is_atom(aggregate) and is_list(options) do
path = Helios.Router.Scope.validate_path(path)
alias = Keyword.get(options, :alias)
param = Keyword.get(options, :param, @default_param_key)
name = Keyword.get(options, :name, Helios.Naming.process_name(aggregate, "Aggregate"))
as = Keyword.get(options, :as, name)
private = Keyword.get(options, :private, %{helios_plug_key: param})
assigns = Keyword.get(options, :assigns, %{})
# TODO: this is not used currently but should work when set to true and
# distributed `IdentityServer` is imeplemented
singleton = Keyword.get(options, :singleton, false)
commands = extract_commands(options, singleton)
route = [as: as, private: private, assigns: assigns]
collection = [path: path, as: as, private: private, assigns: assigns]
member_path = if singleton, do: path, else: Path.join(path, ":#{param}")
member = [path: member_path, as: as, alias: alias, private: private, assigns: assigns]
%Aggregate{
path: path,
commands: commands,
param: param,
route: route,
aggregate: aggregate,
singleton: singleton,
member: member,
collection: collection
}
end
defp extract_commands(opts, _singleton?) do
only = Keyword.get(opts, :only)
except = Keyword.get(opts, :except)
cond do
only -> only
except -> except
true -> [:_]
end
end
end
|
lib/helios/router/aggregate.ex
| 0.688049
| 0.406391
|
aggregate.ex
|
starcoder
|
defmodule Predicator.Machine do
@moduledoc """
A Machine Struct is comprised of the instructions set, the current stack, the instruction pointer and the context struct.
iex>%Predicator.Machine{}
%Predicator.Machine{instructions: [], stack: [], instruction_pointer: 0, context: nil, opts: []}
"""
alias Predicator.{
ValueError,
InstructionError,
InstructionNotCompleteError
}
defstruct instructions: [],
stack: [],
instruction_pointer: 0,
context: %{},
opts: []
@type t :: %__MODULE__{
instructions: [] | [...],
stack: [] | [...],
instruction_pointer: non_neg_integer(),
context: struct() | map(),
opts: [{atom, atom}, ...] | [{atom, [...]}, ...]
}
def new(instructions, context \\ %{}, opts \\ []) do
do_new(instructions, context, opts)
end
defp do_new(instructions, %{__struct__: _} = context, opts) do
do_new(instructions, Map.from_struct(context), opts)
end
defp do_new(instructions, context, opts) do
context =
context
|> Enum.map(fn
{k, v} when is_atom(k) ->
{Atom.to_string(k), v}
other ->
other
end)
|> Map.new()
%__MODULE__{instructions: instructions, context: context, opts: opts}
end
def complete?(%__MODULE__{} = machine) do
case next_instruction(machine) do
nil -> true
_ -> false
end
end
def peek(%__MODULE__{stack: []}), do: nil
def peek(%__MODULE__{stack: [head | _tail]}) do
head
end
def step(%__MODULE__{} = machine) do
next_instruction = next_instruction(machine)
accept_instruction(machine, next_instruction)
end
def put_instruction(%__MODULE__{} = machine, instruction, opts \\ []) do
pointer =
if Keyword.get(opts, :increment, true) do
machine.instruction_pointer + 1
else
machine.instruction_pointer
end
%__MODULE__{machine | stack: [instruction | machine.stack], instruction_pointer: pointer}
end
def next_instruction(%__MODULE__{} = machine) do
if machine.instruction_pointer < Enum.count(machine.instructions) do
Enum.at(machine.instructions, machine.instruction_pointer)
end
end
def increment_pointer(%__MODULE__{} = machine, amount) do
%__MODULE__{machine | instruction_pointer: machine.instruction_pointer + amount}
end
def replace_stack(%__MODULE__{stack: [_head | tail]} = machine, value) do
%__MODULE__{
machine
| stack: [value | tail],
instruction_pointer: machine.instruction_pointer + 1
}
end
def pop_instruction(%__MODULE__{} = machine) do
%__MODULE__{
machine
| stack: tl(machine.stack),
instruction_pointer: machine.instruction_pointer + 1
}
end
def load!(%__MODULE__{} = machine, key) when is_atom(key) do
load!(machine, Atom.to_string(key))
end
def load!(%__MODULE__{context: context} = machine, key) when is_binary(key) do
if has_variable?(machine, key) do
Map.get(context, key)
else
ValueError.value_error(machine)
end
end
def has_variable?(%__MODULE__{context: context}, key) when is_binary(key) do
Map.has_key?(context, key)
end
def has_variable?(%__MODULE__{context: context}, key) when is_atom(key) do
Map.has_key?(context, Atom.to_string(key))
end
def accept_instruction(m = %__MODULE__{stack: [first | _]}, nil)
when not is_boolean(first),
do: InstructionNotCompleteError.inst_not_complete_error(m)
def accept_instruction(machine, nil), do: hd(machine.stack)
def accept_instruction(machine = %__MODULE__{}, ["array" | [val | _]]) do
put_instruction(machine, val)
end
def accept_instruction(machine = %__MODULE__{}, ["lit" | [val | _]]) do
put_instruction(machine, val)
end
def accept_instruction(machine = %__MODULE__{stack: [val | _rest_of_stack]}, ["not" | _]) do
put_instruction(machine, !val)
end
# Conversion Predicates
def accept_instruction(machine = %__MODULE__{stack: ["false" | _rest_of_stack]}, ["to_bool" | _]) do
replace_stack(machine, false)
end
def accept_instruction(machine = %__MODULE__{stack: ["true" | _rest_of_stack]}, ["to_bool" | _]) do
replace_stack(machine, true)
end
def accept_instruction(machine = %__MODULE__{stack: [val | _rest_of_stack]} = machine, [
"to_bool" | _
])
when is_boolean(val) do
replace_stack(machine, val)
end
def accept_instruction(machine = %__MODULE__{}, ["to_bool" | _]),
do: ValueError.value_error(machine)
def accept_instruction(machine = %__MODULE__{stack: [val | _rest_of_stack]}, ["to_str" | _])
when is_nil(val) do
replace_stack(machine, "nil")
end
def accept_instruction(machine = %__MODULE__{stack: [val | _rest_of_stack]}, ["to_str" | _]) do
replace_stack(machine, to_string(val))
end
def accept_instruction(machine = %__MODULE__{stack: [val | _rest_of_stack]}, ["to_int" | _])
when is_binary(val) do
case Integer.parse(val) do
{integer, _} ->
put_instruction(machine, integer)
:error ->
ValueError.value_error(machine)
end
end
def accept_instruction(machine = %__MODULE__{stack: [val | _rest_of_stack]}, ["to_int" | _])
when is_integer(val) do
put_instruction(machine, val)
end
def accept_instruction(machine = %__MODULE__{}, inst = ["to_date" | _]),
do: Predicator.Evaluator.Date._execute(inst, machine)
def accept_instruction(machine = %__MODULE__{}, inst = ["date_ago" | _]),
do: Predicator.Evaluator.Date._execute(inst, machine)
def accept_instruction(machine = %__MODULE__{}, inst = ["date_from_now" | _]),
do: Predicator.Evaluator.Date._execute(inst, machine)
def accept_instruction(machine = %__MODULE__{stack: [val | _rest_of_stack], opts: opts}, [
"blank"
]) do
val = Enum.member?(opts[:nil_values], val)
put_instruction(machine, val)
end
def accept_instruction(machine = %__MODULE__{stack: [val | _rest_of_stack], opts: opts}, [
"present"
]) do
val = !Enum.member?(opts[:nil_values], val)
put_instruction(machine, val)
end
def accept_instruction(machine = %__MODULE__{stack: [left | [right | _rest_of_stack]]}, [
"compare" | ["EQ" | _]
]) do
put_instruction(machine, left == right)
end
def accept_instruction(machine, ["compare" | ["EQ" | _]]) do
put_instruction(machine, false, increment: false)
end
def accept_instruction(machine = %__MODULE__{stack: [left | [right | _rest_of_stack]]}, [
"compare" | ["IN" | _]
]) do
val = Enum.member?(left, right)
put_instruction(machine, val)
end
def accept_instruction(machine, ["compare" | ["IN" | _]]) do
put_instruction(machine, false, increment: false)
end
def accept_instruction(machine = %__MODULE__{stack: [left | [right | _rest_of_stack]]}, [
"compare" | ["NOTIN" | _]
]) do
val = !Enum.member?(left, right)
put_instruction(machine, val)
end
def accept_instruction(machine, ["compare" | ["NOTIN" | _]]) do
put_instruction(machine, false, increment: false)
end
def accept_instruction(machine = %__MODULE__{stack: [second | [first | _rest_of_stack]]}, [
"compare" | ["GT" | _]
]) do
put_instruction(machine, first > second)
end
def accept_instruction(machine, ["compare" | ["GT" | _]]) do
put_instruction(machine, false, increment: false)
end
def accept_instruction(machine = %__MODULE__{stack: [second | [first | _rest_of_stack]]}, [
"compare" | ["LT" | _]
]) do
put_instruction(machine, first < second)
end
def accept_instruction(machine, ["compare" | ["LT" | _]]) do
put_instruction(machine, false, increment: false)
end
def accept_instruction(
machine = %__MODULE__{
stack: [max = %DateTime{} | [min = %DateTime{} | [val = %DateTime{} | _rest_of_stack]]]
},
["compare" | ["BETWEEN" | _]]
) do
is_between =
with :gt <- DateTime.compare(max, val),
:lt <- DateTime.compare(min, val) do
true
else
_ -> false
end
put_instruction(machine, is_between)
end
def accept_instruction(machine = %__MODULE__{stack: [max | [min | [val | _rest_of_stack]]]}, [
"compare" | ["BETWEEN" | _]
]) do
put_instruction(machine, val in min..max)
end
def accept_instruction(machine = %__MODULE__{stack: [match | [stack_val | _rest_of_stack]]}, [
"compare" | ["STARTSWITH" | _]
]) do
put_instruction(machine, String.starts_with?(stack_val, match))
end
def accept_instruction(
machine = %__MODULE__{stack: [end_match | [stack_val | _rest_of_stack]]},
["compare" | ["ENDSWITH" | _]]
) do
put_instruction(machine, String.ends_with?(stack_val, end_match))
end
def accept_instruction(machine = %__MODULE__{}, ["load" | [val | _]]) do
if has_variable?(machine, val) do
user_key = load!(machine, val)
put_instruction(machine, user_key)
else
ValueError.value_error(machine)
end
end
def accept_instruction(machine = %__MODULE__{}, ["jfalse" | [offset | _]]) do
case hd(machine.stack) do
false ->
increment_pointer(machine, offset)
_ ->
pop_instruction(machine)
end
end
def accept_instruction(machine = %__MODULE__{}, ["jtrue" | [offset | _]]) do
case hd(machine.stack) do
true ->
increment_pointer(machine, offset)
_ ->
pop_instruction(machine)
end
end
def accept_instruction(machine = %__MODULE__{}, [non_recognized_predicate | _]),
do: InstructionError.instruction_error(machine, non_recognized_predicate)
end
|
lib/predicator/machine.ex
| 0.739046
| 0.541894
|
machine.ex
|
starcoder
|
defmodule Surface.LiveView do
@moduledoc """
A wrapper component around `Phoenix.LiveView`.
Since this module is just a wrapper around `Phoenix.LiveView`, you
cannot define custom properties for it. Only `:id` and `:session`
are available. However, built-in directives like `:for` and `:if`
can be used normally.
## Example
defmodule Example do
use Surface.LiveView
def render(assigns) do
~H"\""
<Dialog title="Alert" id="dialog">
This <b>Dialog</b> is a stateful component. Cool!
</Dialog>
<Button click="show_dialog">Click to open the dialog</Button>
"\""
end
def handle_event("show_dialog", _, socket) do
Dialog.show("dialog")
{:noreply, socket}
end
end
"""
defmacro __using__(_) do
quote do
use Surface.BaseComponent
use Surface.EventValidator
import Phoenix.HTML
property id, :integer
property session, :map
def translator do
Surface.Translator.LiveViewTranslator
end
use Phoenix.LiveView
end
end
@doc """
The same as `Phoenix.LiveView.mount/2`.
"""
@callback mount(session :: map, socket :: Socket.t()) ::
{:ok, Socket.t()} | {:ok, Socket.t(), keyword()}
@doc """
The same as `Phoenix.LiveView.render/1`.
"""
@callback render(assigns :: Socket.assigns()) :: Phoenix.LiveView.Rendered.t()
@doc """
The same as `Phoenix.LiveView.terminate/2`.
"""
@callback terminate(reason, socket :: Socket.t()) :: term
when reason: :normal | :shutdown | {:shutdown, :left | :closed | term}
@doc """
The same as `Phoenix.LiveView.handle_params/3`.
"""
@callback handle_params(Socket.unsigned_params(), uri :: String.t(), socket :: Socket.t()) ::
{:noreply, Socket.t()} | {:stop, Socket.t()}
@doc """
The same as `Phoenix.LiveView.handle_event/3`.
"""
@callback handle_event(event :: binary, Socket.unsigned_params(), socket :: Socket.t()) ::
{:noreply, Socket.t()} | {:stop, Socket.t()}
@doc """
The same as `Phoenix.LiveView.handle_call/3`.
"""
@callback handle_call(msg :: term, {pid, reference}, socket :: Socket.t()) ::
{:noreply, Socket.t()} | {:reply, term, Socket.t()} | {:stop, Socket.t()}
@doc """
The same as `Phoenix.LiveView.handle_info/2`.
"""
@callback handle_info(msg :: term, socket :: Socket.t()) ::
{:noreply, Socket.t()} | {:stop, Socket.t()}
@optional_callbacks mount: 2,
terminate: 2,
handle_params: 3,
handle_event: 3,
handle_call: 3,
handle_info: 2
end
|
lib/surface/live_view.ex
| 0.909782
| 0.409988
|
live_view.ex
|
starcoder
|
defmodule ExOAPI.EctoTypes.TypedEnum do
defmacro __before_compile__(_env) do
# these are inserted in the before_compile hook to give opportunity to the
# implementing module to define additional variations
quote do
def cast(_), do: :error
def dump(_), do: :error
defp get_term(data), do: data
end
end
defmacro __using__(opts) do
values = Keyword.fetch!(opts, :values)
mod = __CALLER__.module
quote bind_quoted: [atoms: values, mod: mod] do
@before_compile ExOAPI.EctoTypes.TypedEnum
strings = Enum.map(atoms, fn entry -> Atom.to_string(entry) end)
mapped = Enum.zip(strings, atoms) |> Enum.into(%{})
@behaviour Ecto.Type
@impl Ecto.Type
def type, do: :string
strings = Enum.map(atoms, fn entry -> Atom.to_string(entry) end)
mapped = Enum.zip(strings, atoms) |> Enum.into(%{})
Module.put_attribute(mod, :valid_atoms, atoms)
Module.put_attribute(mod, :valid_strings, strings)
Module.put_attribute(mod, :validation_mappings, mapped)
@type t() :: unquote(Enum.reduce(Enum.reverse(atoms), &{:|, [], [&1, &2]}))
@spec values(:atoms | :strings) :: list(t()) | list(String.t())
def values(type \\ :atoms)
def values(:atoms), do: unquote(atoms)
def values(:strings), do: unquote(strings)
@impl Ecto.Type
def load(data), do: cast(data)
@impl Ecto.Type
@doc false
def cast(data) when is_atom(data) and data in unquote(atoms), do: {:ok, data}
def cast(data) when is_binary(data) and data in unquote(strings),
do: {:ok, String.to_atom(data)}
@impl Ecto.Type
@doc false
def dump(data) when is_atom(data) and data in unquote(atoms),
do: {:ok, Atom.to_string(data)}
def dump(data) when is_binary(data) and data in unquote(strings),
do: {:ok, data}
@doc false
def dump!(data) do
case dump(data) do
{:ok, value} ->
value
_ ->
raise Ecto.CastError,
message: "Unable to dump:: #{inspect(data)} ::into:: #{inspect(unquote(mod))}",
type: unquote(mod),
value: data
end
end
@impl Ecto.Type
@doc false
def embed_as(_), do: :dump
@impl Ecto.Type
@doc false
def equal?(term_1, term_1), do: true
def equal?(term_1, term_2), do: get_term(term_1) == get_term(term_2)
defp get_term(data) when is_atom(data) and data in unquote(atoms),
do: data
defp get_term(data) when is_binary(data) and data in unquote(strings),
do: @validation_mappings[data]
end
end
end
|
lib/ex_oapi/parser/ecto_types/typed_enum.ex
| 0.738386
| 0.543711
|
typed_enum.ex
|
starcoder
|
defmodule MetarMap.Timeline do
defstruct transitions: [], latest_value: nil, interpolate_fun: nil
defmodule Transition do
defstruct [:start_at, :start_value, :end_at, :end_value]
end
def init(initial_value, interpolate_fun) do
%__MODULE__{latest_value: initial_value, interpolate_fun: interpolate_fun}
end
defp now_ms, do: :erlang.monotonic_time(:millisecond)
@doc """
Enqueues a value transition.
Will always begin after the last scheduled transition, or in `min_delay_ms`, whichever comes
first.
"""
def append(timeline, duration_ms, value, opts \\ []) do
min_delay_ms = Keyword.get(opts, :min_delay_ms, 0)
start_at = find_start_at(timeline, now_ms(), min_delay_ms)
start_value = timeline.latest_value
end_at = start_at + duration_ms
end_value = value
transition = %Transition{
start_at: start_at,
start_value: start_value,
end_at: end_at,
end_value: end_value
}
%{timeline | transitions: timeline.transitions ++ [transition], latest_value: end_value}
end
@doc """
Immediately stops the timeline and freezes it to the latest inteprolated value.
"""
def abort(timeline) do
{value, timeline} = evaluate(timeline)
%{timeline | transitions: [], latest_value: value}
end
def empty?(%{transitions: []}), do: true
def empty?(_), do: false
# Returns the earliest time a transition could begin
defp find_start_at(timeline, now, min_delay_ms) do
earliest_start = now + min_delay_ms
if Enum.empty?(timeline.transitions) do
earliest_start
else
latest_start = List.last(timeline.transitions).end_at
max(earliest_start, latest_start)
end
end
@doc """
Determines the current value of the pixel.
Returns a tuple containing the value and the updated timeline.
"""
def evaluate(timeline) do
evaluate(timeline, now_ms())
end
def evaluate(%{transitions: [], latest_value: value} = timeline, _now),
do: {value, timeline}
def evaluate(timeline, now) do
# If we have no upcoming transitions, then just assume it's the latest value. Otherwise, we
# might be in a period where no transition has yet begun, so assume we are leading UP to that
# transition and assume its starting value.
initial_value =
if timeline.transitions == [] do
timeline.latest_value
else
hd(timeline.transitions).start_value
end
initial_acc = {initial_value, []}
{value, next_transitions} =
Enum.reduce(timeline.transitions, initial_acc, fn transition, {value, transitions} ->
cond do
transition.end_at < now ->
# The transition has passed - set the end value and discard it
{transition.end_value, transitions}
transition.start_at > now ->
# The transition has not yet begun - keep it
{value, transitions ++ [transition]}
true ->
value =
do_apply(timeline.interpolate_fun, [
transition.start_value,
transition.end_value,
transition.start_at..transition.end_at,
now
])
{value, transitions ++ [transition]}
end
end)
{value, %{timeline | transitions: next_transitions}}
end
defp do_apply(fun, args) when is_function(fun), do: apply(fun, args)
defp do_apply({module, fun}, args) when is_atom(module) and is_atom(fun),
do: apply(module, fun, args)
end
|
lib/metar_map/timeline.ex
| 0.847527
| 0.624737
|
timeline.ex
|
starcoder
|
defmodule Benchee.Benchmark.Runner do
@moduledoc """
Internal module "running" a scenario, measuring all defined measurements.
"""
# This module actually runs our benchmark scenarios, adding information about
# run time and memory usage to each scenario.
alias Benchee.{Benchmark, Configuration, Scenario, Utility.Parallel}
alias Benchmark.{
Collect,
FunctionCallOverhead,
Hooks,
RepeatedMeasurement,
RunOnce,
ScenarioContext
}
@doc """
Executes the benchmarks defined before by first running the defined functions
for `warmup` time without gathering results and them running them for `time`
gathering their run times.
This means the total run time of a single benchmarking scenario is warmup +
time.
Warmup is usually important for run times with JIT but it seems to have some
effect on the BEAM as well.
There will be `parallel` processes spawned executing the benchmark job in
parallel.
"""
@spec run_scenarios([Scenario.t()], ScenarioContext.t()) :: [Scenario.t()]
def run_scenarios(scenarios, scenario_context) do
if scenario_context.config.pre_check do
Enum.each(scenarios, fn scenario -> pre_check(scenario, scenario_context) end)
end
function_call_overhead =
if scenario_context.config.measure_function_call_overhead do
measure_and_report_function_call_overhead(scenario_context.printer)
else
0
end
scenario_context = %ScenarioContext{
scenario_context
| function_call_overhead: function_call_overhead
}
Enum.map(scenarios, fn scenario -> parallel_benchmark(scenario, scenario_context) end)
end
# This will run the given scenario exactly once, including the before and
# after hooks, to ensure the function can execute without raising an error.
defp pre_check(scenario, scenario_context) do
RunOnce.run(scenario, scenario_context, Collect.Time)
end
def measure_and_report_function_call_overhead(prtiner) do
overhead = FunctionCallOverhead.measure()
prtiner.function_call_overhead(overhead)
overhead
end
defp parallel_benchmark(
scenario = %Scenario{job_name: job_name, input_name: input_name},
scenario_context = %ScenarioContext{
printer: printer,
config: config
}
) do
printer.benchmarking(job_name, input_name, config)
config
|> measure_scenario_parallel(scenario, scenario_context)
|> add_measurements_to_scenario(scenario)
end
defp measure_scenario_parallel(config, scenario, scenario_context) do
Parallel.map(1..config.parallel, fn _ -> measure_scenario(scenario, scenario_context) end)
end
defp add_measurements_to_scenario(measurements, scenario) do
run_times = Enum.flat_map(measurements, fn {run_times, _, _} -> run_times end)
memory_usages = Enum.flat_map(measurements, fn {_, memory_usages, _} -> memory_usages end)
reductions = Enum.flat_map(measurements, fn {_, _, reductions} -> reductions end)
%{
scenario
| run_time_data: %{scenario.run_time_data | samples: run_times},
memory_usage_data: %{scenario.memory_usage_data | samples: memory_usages},
reductions_data: %{scenario.reductions_data | samples: reductions}
}
end
@spec measure_scenario(Scenario.t(), ScenarioContext.t()) :: {[number], [number], [number]}
defp measure_scenario(scenario, scenario_context) do
scenario_input = Hooks.run_before_scenario(scenario, scenario_context)
scenario_context = %ScenarioContext{scenario_context | scenario_input: scenario_input}
_ = run_warmup(scenario, scenario_context)
run_times =
scenario
|> run_runtime_benchmark(scenario_context)
|> deduct_function_call_overhead(scenario_context.function_call_overhead)
memory_usages = run_memory_benchmark(scenario, scenario_context)
reductions =
scenario
|> run_reductions_benchmark(scenario_context)
|> deduct_reduction_overhead()
Hooks.run_after_scenario(scenario, scenario_context)
{run_times, memory_usages, reductions}
end
defp run_warmup(
scenario,
scenario_context = %ScenarioContext{
config: %Configuration{warmup: warmup}
}
) do
measure_runtimes(scenario, scenario_context, warmup, false)
end
defp run_runtime_benchmark(
scenario,
scenario_context = %ScenarioContext{
config: %Configuration{
time: run_time,
print: %{fast_warning: fast_warning}
}
}
) do
measure_runtimes(scenario, scenario_context, run_time, fast_warning)
end
defp deduct_function_call_overhead(run_times, 0) do
run_times
end
defp deduct_function_call_overhead(run_times, overhead) do
Enum.map(run_times, fn time ->
max(time - overhead, 0)
end)
end
defp deduct_reduction_overhead([]), do: []
defp deduct_reduction_overhead(reductions) do
me = self()
ref = make_ref()
spawn(fn ->
{offset, _} = Collect.Reductions.collect(fn -> nil end)
send(me, {ref, offset})
end)
offset =
receive do
{^ref, offset} -> offset
end
Enum.map(reductions, &(&1 - offset))
end
defp run_reductions_benchmark(_, %ScenarioContext{config: %{reduction_time: 0.0}}) do
[]
end
defp run_reductions_benchmark(
scenario,
scenario_context = %ScenarioContext{
config: %Configuration{
reduction_time: reduction_time
}
}
) do
end_time = current_time() + reduction_time
new_context = %ScenarioContext{
scenario_context
| current_time: current_time(),
end_time: end_time
}
do_benchmark(scenario, new_context, Collect.Reductions, [])
end
defp run_memory_benchmark(_, %ScenarioContext{config: %{memory_time: 0.0}}) do
[]
end
defp run_memory_benchmark(
scenario,
scenario_context = %ScenarioContext{
config: %Configuration{
memory_time: memory_time
}
}
) do
end_time = current_time() + memory_time
new_context = %ScenarioContext{
scenario_context
| current_time: current_time(),
end_time: end_time
}
do_benchmark(scenario, new_context, Collect.Memory, [])
end
@spec measure_runtimes(Scenario.t(), ScenarioContext.t(), number, boolean) :: [number]
defp measure_runtimes(scenario, context, run_time, fast_warning)
defp measure_runtimes(_, _, 0.0, _), do: []
defp measure_runtimes(scenario, scenario_context, run_time, fast_warning) do
end_time = current_time() + run_time
:erlang.garbage_collect()
{num_iterations, initial_run_time} =
RepeatedMeasurement.determine_n_times(scenario, scenario_context, fast_warning)
new_context = %ScenarioContext{
scenario_context
| current_time: current_time(),
end_time: end_time,
num_iterations: num_iterations
}
do_benchmark(scenario, new_context, Collect.Time, [initial_run_time])
end
defp current_time, do: :erlang.system_time(:nano_seconds)
# `run_times` is kept separately from the `Scenario` so that for the
# `parallel` execution case we can easily concatenate and flatten the results
# of all processes. That's why we add them to the scenario once after
# measuring has finished. `scenario` is still needed in general for the
# benchmarking function, hooks etc.
defp do_benchmark(
_scenario,
%ScenarioContext{
current_time: current_time,
end_time: end_time
},
_collector,
measurements
)
when current_time > end_time and measurements != [] do
# restore correct order - important for graphing
Enum.reverse(measurements)
end
defp do_benchmark(scenario, scenario_context, collector, measurements) do
measurement = collect(scenario, scenario_context, collector)
updated_context = %ScenarioContext{scenario_context | current_time: current_time()}
do_benchmark(
scenario,
updated_context,
collector,
updated_measurements(measurement, measurements)
)
end
# We return `nil` if memory measurement failed so keep it empty
@spec updated_measurements(number | nil, [number]) :: [number]
defp updated_measurements(nil, measurements), do: measurements
defp updated_measurements(measurement, measurements), do: [measurement | measurements]
@doc """
Takes one measure with the given collector.
Correctly dispatches based on the number of iterations to perform.
"""
def collect(
scenario = %Scenario{function: function},
scenario_context = %ScenarioContext{
num_iterations: 1
},
collector
) do
new_input = Hooks.run_before_each(scenario, scenario_context)
function = main_function(function, new_input)
{measurement, return_value} = invoke_collector(collector, function)
Hooks.run_after_each(return_value, scenario, scenario_context)
measurement
end
def collect(
scenario,
scenario_context = %ScenarioContext{
num_iterations: iterations
},
collector
)
when iterations > 1 do
RepeatedMeasurement.collect(scenario, scenario_context, collector)
end
@no_input Benchmark.no_input()
def main_function(function, @no_input), do: function
def main_function(function, input), do: fn -> function.(input) end
defp invoke_collector({collector, collector_opts}, function),
do: collector.collect(function, collector_opts)
defp invoke_collector(collector, function), do: collector.collect(function)
end
|
lib/benchee/benchmark/runner.ex
| 0.842831
| 0.433142
|
runner.ex
|
starcoder
|
defmodule AWS.Evidently do
@moduledoc """
You can use Amazon CloudWatch Evidently to safely validate new features by
serving them to a specified percentage of your users while you roll out the
feature.
You can monitor the performance of the new feature to help you decide when to
ramp up traffic to your users. This helps you reduce risk and identify
unintended consequences before you fully launch the feature.
You can also conduct A/B experiments to make feature design decisions based on
evidence and data. An experiment can test as many as five variations at once.
Evidently collects experiment data and analyzes it using statistical methods. It
also provides clear recommendations about which variations perform better. You
can test both user-facing features and backend features.
"""
alias AWS.Client
alias AWS.Request
def metadata do
%AWS.ServiceMetadata{
abbreviation: nil,
api_version: "2021-02-01",
content_type: "application/x-amz-json-1.1",
credential_scope: nil,
endpoint_prefix: "evidently",
global?: false,
protocol: "rest-json",
service_id: "Evidently",
signature_version: "v4",
signing_name: "evidently",
target_prefix: nil
}
end
@doc """
This operation assigns feature variation to user sessions.
For each user session, you pass in an `entityID` that represents the user.
Evidently then checks the evaluation rules and assigns the variation.
The first rules that are evaluated are the override rules. If the user's
`entityID` matches an override rule, the user is served the variation specified
by that rule.
Next, if there is a launch of the feature, the user might be assigned to a
variation in the launch. The chance of this depends on the percentage of users
that are allocated to that launch. If the user is enrolled in the launch, the
variation they are served depends on the allocation of the various feature
variations used for the launch.
If the user is not assigned to a launch, and there is an ongoing experiment for
this feature, the user might be assigned to a variation in the experiment. The
chance of this depends on the percentage of users that are allocated to that
experiment. If the user is enrolled in the experiment, the variation they are
served depends on the allocation of the various feature variations used for the
experiment.
If the user is not assigned to a launch or experiment, they are served the
default variation.
"""
def batch_evaluate_feature(%Client{} = client, project, input, options \\ []) do
url_path = "/projects/#{AWS.Util.encode_uri(project)}/evaluations"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
200
)
end
@doc """
Creates an Evidently *experiment*.
Before you create an experiment, you must create the feature to use for the
experiment.
An experiment helps you make feature design decisions based on evidence and
data. An experiment can test as many as five variations at once. Evidently
collects experiment data and analyzes it by statistical methods, and provides
clear recommendations about which variations perform better.
Don't use this operation to update an existing experiment. Instead, use
[UpdateExperiment](https://docs.aws.amazon.com/cloudwatchevidently/latest/APIReference/API_UpdateExperiment.html).
"""
def create_experiment(%Client{} = client, project, input, options \\ []) do
url_path = "/projects/#{AWS.Util.encode_uri(project)}/experiments"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
200
)
end
@doc """
Creates an Evidently *feature* that you want to launch or test.
You can define up to five variations of a feature, and use these variations in
your launches and experiments. A feature must be created in a project. For
information about creating a project, see
[CreateProject](https://docs.aws.amazon.com/cloudwatchevidently/latest/APIReference/API_CreateProject.html). Don't use this operation to update an existing feature. Instead, use
[UpdateFeature](https://docs.aws.amazon.com/cloudwatchevidently/latest/APIReference/API_UpdateFeature.html).
"""
def create_feature(%Client{} = client, project, input, options \\ []) do
url_path = "/projects/#{AWS.Util.encode_uri(project)}/features"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
200
)
end
@doc """
Creates a *launch* of a given feature.
Before you create a launch, you must create the feature to use for the launch.
You can use a launch to safely validate new features by serving them to a
specified percentage of your users while you roll out the feature. You can
monitor the performance of the new feature to help you decide when to ramp up
traffic to more users. This helps you reduce risk and identify unintended
consequences before you fully launch the feature.
Don't use this operation to update an existing launch. Instead, use
[UpdateLaunch](https://docs.aws.amazon.com/cloudwatchevidently/latest/APIReference/API_UpdateLaunch.html).
"""
def create_launch(%Client{} = client, project, input, options \\ []) do
url_path = "/projects/#{AWS.Util.encode_uri(project)}/launches"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
200
)
end
@doc """
Creates a project, which is the logical object in Evidently that can contain
features, launches, and experiments.
Use projects to group similar features together.
To update an existing project, use
[UpdateProject](https://docs.aws.amazon.com/cloudwatchevidently/latest/APIReference/API_UpdateProject.html).
"""
def create_project(%Client{} = client, input, options \\ []) do
url_path = "/projects"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
200
)
end
@doc """
Deletes an Evidently experiment.
The feature used for the experiment is not deleted.
To stop an experiment without deleting it, use
[StopExperiment](https://docs.aws.amazon.com/cloudwatchevidently/latest/APIReference/API_StopExperiment.html).
"""
def delete_experiment(%Client{} = client, experiment, project, input, options \\ []) do
url_path =
"/projects/#{AWS.Util.encode_uri(project)}/experiments/#{AWS.Util.encode_uri(experiment)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:delete,
url_path,
query_params,
headers,
input,
options,
200
)
end
@doc """
Deletes an Evidently feature.
"""
def delete_feature(%Client{} = client, feature, project, input, options \\ []) do
url_path =
"/projects/#{AWS.Util.encode_uri(project)}/features/#{AWS.Util.encode_uri(feature)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:delete,
url_path,
query_params,
headers,
input,
options,
200
)
end
@doc """
Deletes an Evidently launch.
The feature used for the launch is not deleted.
To stop a launch without deleting it, use
[StopLaunch](https://docs.aws.amazon.com/cloudwatchevidently/latest/APIReference/API_StopLaunch.html).
"""
def delete_launch(%Client{} = client, launch, project, input, options \\ []) do
url_path = "/projects/#{AWS.Util.encode_uri(project)}/launches/#{AWS.Util.encode_uri(launch)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:delete,
url_path,
query_params,
headers,
input,
options,
200
)
end
@doc """
Deletes an Evidently project.
Before you can delete a project, you must delete all the features that the
project contains. To delete a feature, use
[DeleteFeature](https://docs.aws.amazon.com/cloudwatchevidently/latest/APIReference/API_DeleteFeature.html).
"""
def delete_project(%Client{} = client, project, input, options \\ []) do
url_path = "/projects/#{AWS.Util.encode_uri(project)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:delete,
url_path,
query_params,
headers,
input,
options,
200
)
end
@doc """
This operation assigns a feature variation to one given user session.
You pass in an `entityID` that represents the user. Evidently then checks the
evaluation rules and assigns the variation.
The first rules that are evaluated are the override rules. If the user's
`entityID` matches an override rule, the user is served the variation specified
by that rule.
Next, if there is a launch of the feature, the user might be assigned to a
variation in the launch. The chance of this depends on the percentage of users
that are allocated to that launch. If the user is enrolled in the launch, the
variation they are served depends on the allocation of the various feature
variations used for the launch.
If the user is not assigned to a launch, and there is an ongoing experiment for
this feature, the user might be assigned to a variation in the experiment. The
chance of this depends on the percentage of users that are allocated to that
experiment. If the user is enrolled in the experiment, the variation they are
served depends on the allocation of the various feature variations used for the
experiment.
If the user is not assigned to a launch or experiment, they are served the
default variation.
"""
def evaluate_feature(%Client{} = client, feature, project, input, options \\ []) do
url_path =
"/projects/#{AWS.Util.encode_uri(project)}/evaluations/#{AWS.Util.encode_uri(feature)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
200
)
end
@doc """
Returns the details about one experiment.
You must already know the experiment name. To retrieve a list of experiments in
your account, use
[ListExperiments](https://docs.aws.amazon.com/cloudwatchevidently/latest/APIReference/API_ListExperiments.html).
"""
def get_experiment(%Client{} = client, experiment, project, options \\ []) do
url_path =
"/projects/#{AWS.Util.encode_uri(project)}/experiments/#{AWS.Util.encode_uri(experiment)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
200
)
end
@doc """
Retrieves the results of a running or completed experiment.
No results are available until there have been 100 events for each variation and
at least 10 minutes have passed since the start of the experiment.
Experiment results are available up to 63 days after the start of the
experiment. They are not available after that because of CloudWatch data
retention policies.
"""
def get_experiment_results(%Client{} = client, experiment, project, input, options \\ []) do
url_path =
"/projects/#{AWS.Util.encode_uri(project)}/experiments/#{AWS.Util.encode_uri(experiment)}/results"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
200
)
end
@doc """
Returns the details about one feature.
You must already know the feature name. To retrieve a list of features in your
account, use
[ListFeatures](https://docs.aws.amazon.com/cloudwatchevidently/latest/APIReference/API_ListFeatures.html).
"""
def get_feature(%Client{} = client, feature, project, options \\ []) do
url_path =
"/projects/#{AWS.Util.encode_uri(project)}/features/#{AWS.Util.encode_uri(feature)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
200
)
end
@doc """
Returns the details about one launch.
You must already know the launch name. To retrieve a list of launches in your
account, use
[ListLaunches](https://docs.aws.amazon.com/cloudwatchevidently/latest/APIReference/API_ListLaunches.html).
"""
def get_launch(%Client{} = client, launch, project, options \\ []) do
url_path = "/projects/#{AWS.Util.encode_uri(project)}/launches/#{AWS.Util.encode_uri(launch)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
200
)
end
@doc """
Returns the details about one launch.
You must already know the project name. To retrieve a list of projects in your
account, use
[ListProjects](https://docs.aws.amazon.com/cloudwatchevidently/latest/APIReference/API_ListProjects.html).
"""
def get_project(%Client{} = client, project, options \\ []) do
url_path = "/projects/#{AWS.Util.encode_uri(project)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
200
)
end
@doc """
Returns configuration details about all the experiments in the specified
project.
"""
def list_experiments(
%Client{} = client,
project,
max_results \\ nil,
next_token \\ nil,
status \\ nil,
options \\ []
) do
url_path = "/projects/#{AWS.Util.encode_uri(project)}/experiments"
headers = []
query_params = []
query_params =
if !is_nil(status) do
[{"status", status} | query_params]
else
query_params
end
query_params =
if !is_nil(next_token) do
[{"nextToken", next_token} | query_params]
else
query_params
end
query_params =
if !is_nil(max_results) do
[{"maxResults", max_results} | query_params]
else
query_params
end
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
200
)
end
@doc """
Returns configuration details about all the features in the specified project.
"""
def list_features(
%Client{} = client,
project,
max_results \\ nil,
next_token \\ nil,
options \\ []
) do
url_path = "/projects/#{AWS.Util.encode_uri(project)}/features"
headers = []
query_params = []
query_params =
if !is_nil(next_token) do
[{"nextToken", next_token} | query_params]
else
query_params
end
query_params =
if !is_nil(max_results) do
[{"maxResults", max_results} | query_params]
else
query_params
end
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
200
)
end
@doc """
Returns configuration details about all the launches in the specified project.
"""
def list_launches(
%Client{} = client,
project,
max_results \\ nil,
next_token \\ nil,
status \\ nil,
options \\ []
) do
url_path = "/projects/#{AWS.Util.encode_uri(project)}/launches"
headers = []
query_params = []
query_params =
if !is_nil(status) do
[{"status", status} | query_params]
else
query_params
end
query_params =
if !is_nil(next_token) do
[{"nextToken", next_token} | query_params]
else
query_params
end
query_params =
if !is_nil(max_results) do
[{"maxResults", max_results} | query_params]
else
query_params
end
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
200
)
end
@doc """
Returns configuration details about all the projects in the current Region in
your account.
"""
def list_projects(%Client{} = client, max_results \\ nil, next_token \\ nil, options \\ []) do
url_path = "/projects"
headers = []
query_params = []
query_params =
if !is_nil(next_token) do
[{"nextToken", next_token} | query_params]
else
query_params
end
query_params =
if !is_nil(max_results) do
[{"maxResults", max_results} | query_params]
else
query_params
end
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
200
)
end
@doc """
Displays the tags associated with an Evidently resource.
"""
def list_tags_for_resource(%Client{} = client, resource_arn, options \\ []) do
url_path = "/tags/#{AWS.Util.encode_uri(resource_arn)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
200
)
end
@doc """
Sends performance events to Evidently.
These events can be used to evaluate a launch or an experiment.
"""
def put_project_events(%Client{} = client, project, input, options \\ []) do
url_path = "/events/projects/#{AWS.Util.encode_uri(project)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
200
)
end
@doc """
Starts an existing experiment.
To create an experiment, use
[CreateExperiment](https://docs.aws.amazon.com/cloudwatchevidently/latest/APIReference/API_CreateExperiment.html).
"""
def start_experiment(%Client{} = client, experiment, project, input, options \\ []) do
url_path =
"/projects/#{AWS.Util.encode_uri(project)}/experiments/#{AWS.Util.encode_uri(experiment)}/start"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
200
)
end
@doc """
Starts an existing launch.
To create a launch, use
[CreateLaunch](https://docs.aws.amazon.com/cloudwatchevidently/latest/APIReference/API_CreateLaunch.html).
"""
def start_launch(%Client{} = client, launch, project, input, options \\ []) do
url_path =
"/projects/#{AWS.Util.encode_uri(project)}/launches/#{AWS.Util.encode_uri(launch)}/start"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
200
)
end
@doc """
Stops an experiment that is currently running.
If you stop an experiment, you can't resume it or restart it.
"""
def stop_experiment(%Client{} = client, experiment, project, input, options \\ []) do
url_path =
"/projects/#{AWS.Util.encode_uri(project)}/experiments/#{AWS.Util.encode_uri(experiment)}/cancel"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
200
)
end
@doc """
Stops a launch that is currently running.
After you stop a launch, you will not be able to resume it or restart it. Also,
it will not be evaluated as a rule for traffic allocation, and the traffic that
was allocated to the launch will instead be available to the feature's
experiment, if there is one. Otherwise, all traffic will be served the default
variation after the launch is stopped.
"""
def stop_launch(%Client{} = client, launch, project, input, options \\ []) do
url_path =
"/projects/#{AWS.Util.encode_uri(project)}/launches/#{AWS.Util.encode_uri(launch)}/cancel"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
200
)
end
@doc """
Assigns one or more tags (key-value pairs) to the specified CloudWatch Evidently
resource.
Projects, features, launches, and experiments can be tagged.
Tags can help you organize and categorize your resources. You can also use them
to scope user permissions by granting a user permission to access or change only
resources with certain tag values.
Tags don't have any semantic meaning to Amazon Web Services and are interpreted
strictly as strings of characters.
You can use the `TagResource` action with a resource that already has tags. If
you specify a new tag key for the resource, this tag is appended to the list of
tags associated with the alarm. If you specify a tag key that is already
associated with the resource, the new tag value that you specify replaces the
previous value for that tag.
You can associate as many as 50 tags with a resource.
For more information, see [Tagging Amazon Web Services resources](https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html).
"""
def tag_resource(%Client{} = client, resource_arn, input, options \\ []) do
url_path = "/tags/#{AWS.Util.encode_uri(resource_arn)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
200
)
end
@doc """
Removes one or more tags from the specified resource.
"""
def untag_resource(%Client{} = client, resource_arn, input, options \\ []) do
url_path = "/tags/#{AWS.Util.encode_uri(resource_arn)}"
headers = []
{query_params, input} =
[
{"tagKeys", "tagKeys"}
]
|> Request.build_params(input)
Request.request_rest(
client,
metadata(),
:delete,
url_path,
query_params,
headers,
input,
options,
200
)
end
@doc """
Updates an Evidently experiment.
Don't use this operation to update an experiment's tag. Instead, use
[TagResource](https://docs.aws.amazon.com/cloudwatchevidently/latest/APIReference/API_TagResource.html).
"""
def update_experiment(%Client{} = client, experiment, project, input, options \\ []) do
url_path =
"/projects/#{AWS.Util.encode_uri(project)}/experiments/#{AWS.Util.encode_uri(experiment)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:patch,
url_path,
query_params,
headers,
input,
options,
200
)
end
@doc """
Updates an existing feature.
You can't use this operation to update the tags of an existing feature. Instead,
use
[TagResource](https://docs.aws.amazon.com/cloudwatchevidently/latest/APIReference/API_TagResource.html).
"""
def update_feature(%Client{} = client, feature, project, input, options \\ []) do
url_path =
"/projects/#{AWS.Util.encode_uri(project)}/features/#{AWS.Util.encode_uri(feature)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:patch,
url_path,
query_params,
headers,
input,
options,
200
)
end
@doc """
Updates a launch of a given feature.
Don't use this operation to update the tags of an existing launch. Instead, use
[TagResource](https://docs.aws.amazon.com/cloudwatchevidently/latest/APIReference/API_TagResource.html).
"""
def update_launch(%Client{} = client, launch, project, input, options \\ []) do
url_path = "/projects/#{AWS.Util.encode_uri(project)}/launches/#{AWS.Util.encode_uri(launch)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:patch,
url_path,
query_params,
headers,
input,
options,
200
)
end
@doc """
Updates the description of an existing project.
To create a new project, use
[CreateProject](https://docs.aws.amazon.com/cloudwatchevidently/latest/APIReference/API_CreateProject.html). Don't use this operation to update the data storage options of a project.
Instead, use
[UpdateProjectDataDelivery](https://docs.aws.amazon.com/cloudwatchevidently/latest/APIReference/API_UpdateProjectDataDelivery.html).
Don't use this operation to update the tags of a project. Instead, use
[TagResource](https://docs.aws.amazon.com/cloudwatchevidently/latest/APIReference/API_TagResource.html).
"""
def update_project(%Client{} = client, project, input, options \\ []) do
url_path = "/projects/#{AWS.Util.encode_uri(project)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:patch,
url_path,
query_params,
headers,
input,
options,
200
)
end
@doc """
Updates the data storage options for this project.
If you store evaluation events, you an keep them and analyze them on your own.
If you choose not to store evaluation events, Evidently deletes them after using
them to produce metrics and other experiment results that you can view.
You can't specify both `cloudWatchLogs` and `s3Destination` in the same
operation.
"""
def update_project_data_delivery(%Client{} = client, project, input, options \\ []) do
url_path = "/projects/#{AWS.Util.encode_uri(project)}/data-delivery"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:patch,
url_path,
query_params,
headers,
input,
options,
200
)
end
end
|
lib/aws/generated/evidently.ex
| 0.829077
| 0.505005
|
evidently.ex
|
starcoder
|
defmodule StrawHat.Review.Reactions do
@moduledoc """
Interactor module that defines all the functionality for Reactions management.
"""
use StrawHat.Review.Interactor
alias StrawHat.Review.Reaction
@doc """
Gets the list of reactions.
"""
@spec get_reactions(Scrivener.Config.t() | keyword()) :: Scrivener.Page.t()
def get_reactions(pagination \\ []), do: Repo.paginate(Reaction, pagination)
@doc """
Creates reaction.
"""
@spec create_reaction(Reaction.reaction_attrs()) ::
{:ok, Reaction.t()} | {:error, Ecto.Changeset.t()}
def create_reaction(reaction_attrs) do
%Reaction{}
|> Reaction.changeset(reaction_attrs)
|> Repo.insert()
end
@doc """
Updates reaction.
"""
@spec update_reaction(Reaction.t(), Reaction.reaction_attrs()) ::
{:ok, Reaction.t()} | {:error, Ecto.Changeset.t()}
def update_reaction(%Reaction{} = reaction, reaction_attrs) do
reaction
|> Reaction.changeset(reaction_attrs)
|> Repo.update()
end
@doc """
Destroys reaction.
"""
@spec destroy_reaction(Reaction.t()) :: {:ok, Reaction.t()} | {:error, Ecto.Changeset.t()}
def destroy_reaction(%Reaction{} = reaction), do: Repo.delete(reaction)
@doc """
Finds reaction by `id`.
"""
@spec find_reaction(Integer.t()) :: {:ok, Reaction.t()} | {:error, Error.t()}
def find_reaction(reaction_id) do
reaction_id
|> get_reaction()
|> Response.from_value(
Error.new(
"straw_hat_review.reaction.not_found",
metadata: [reaction_id: reaction_id]
)
)
end
@doc """
Gets reaction by `id`.
"""
@spec get_reaction(Integer.t()) :: Reaction.t() | nil | no_return
def get_reaction(reaction_id), do: Repo.get(Reaction, reaction_id)
@doc """
Gets list of reaction by ids.
"""
@spec get_reaction_by_ids([Integer.t()]) :: [Reaction.t()] | no_return
def get_reaction_by_ids(reaction_ids) do
query = from(reaction in Reaction, where: reaction.id in ^reaction_ids)
Repo.all(query)
end
@doc """
Returns an `%Ecto.Changeset{}` for tracking reaction changes.
"""
@spec change_reaction(Reaction.t()) :: Ecto.Changeset.t()
def change_reaction(%Reaction{} = reaction) do
Reaction.changeset(reaction, %{})
end
end
|
lib/straw_hat_review/reactions/reactions.ex
| 0.88521
| 0.428114
|
reactions.ex
|
starcoder
|
defmodule Trigger do
@moduledoc """
A simple way to sync between processes.
"""
@enforce_keys [:ref, :receiver]
defstruct [:ref, :receiver]
@typedoc """
A Trigger data type.
"""
@opaque t() :: %Trigger{
ref: reference(), # unique reference for the trigger
receiver: Process.dest(), # process awaiting on the trigger
}
@doc """
Creates a new Trigger.
## Options
* `receiver` - configure an event receiver, defaults to `self()`
## Examples
iex> Trigger.new()
%Trigger{...}
"""
@spec new(options) :: t
when options: [
receiver: Process.dest(),
]
def new(opts \\ []) do
receiver = Keyword.get(opts, :receiver, self())
ref = make_ref()
%Trigger{ref: ref, receiver: receiver}
end
@doc """
Sends a new event with arbitrary data to the receiver.
## Examples
iex> trigger = Trigger.new()
%Trigger{...}
iex> Trigger.fire(trigger)
:ok
iex> Trigger.wait(trigger)
{#PID<0.257.0>, nil}
"""
@spec fire(t, term) :: :ok
def fire(%Trigger{receiver: receiver, ref: ref}, data \\ nil) do
send(receiver, {ref, self(), data})
:ok
end
@doc """
Sends a new event with arbitrary data to the receiver and waits for a reply.
## Examples
iex> trigger = Trigger.new()
%Trigger{...}
iex> pid = spawn(fn ->
...> "Hello, world" = Trigger.fire_wait(trigger, "world")
...> end)
#PID<0.262.0>
iex> {sender, name} = Trigger.wait(trigger)
{#PID<0.262.0>, "world"}
iex> Process.alive?(pid)
true
iex> Trigger.reply(trigger, sender, "Hello, \#{name}")
:ok
iex> Process.alive?(pid)
false
"""
@spec fire_wait(t, term, timeout) :: term | no_return
def fire_wait(%Trigger{ref: ref} = trigger, data \\ nil, timeout \\ :infinity) do
fire(trigger, data)
receive do
{^ref, data} -> data
after
timeout -> exit({:timeout, {__MODULE__, :fire_wait, [trigger, data, timeout]}})
end
end
@doc """
Waits for an event.
For examples see `fire/2` or `fire_wait/3`.
"""
@spec wait(t, timeout) :: {pid, term} | no_return
def wait(%Trigger{ref: ref} = trigger, timeout \\ :infinity) do
receive do
{^ref, from, data} -> {from, data}
after
timeout -> exit({:timeout, {__MODULE__, :wait, [trigger, timeout]}})
end
end
@doc """
Sends a reply back to the process which sent the event.
"""
@spec reply(t, pid, term) :: :ok
def reply(%Trigger{ref: ref}, to, reply \\ nil) do
send(to, {ref, reply})
:ok
end
@doc """
Waits fot an event and sends a reply back right away.
This is useful to have a two-way sync between processes, so event-sender knows
that the receiver has got the event.
## Examples
iex> trigger = Trigger.new()
%Trigger{...}
iex> test = fn -> IO.inspect({self(), :erlang.unique_integer([:positive, :monotonic])}); :ok end
#Function<...>
iex> test.()
{#PID<0.247.0>, 1}
iex> spawn(fn ->
...> test.()
...> "pong" = Trigger.fire_wait(trigger, "ping")
...> test.()
...> end)
{#PID<0.258.0>, 2}
#PID<0.258.0>
iex> "ping" = Trigger.wait_reply(trigger, "pong"); test.()
{#PID<0.247.0>, 3}
{#PID<0.258.0>, 4}
"""
@spec wait_reply(t, term, timeout) :: term | no_return
def wait_reply(%Trigger{} = trigger, data \\ nil, timeout \\ :infinity) do
{from, data_in} = wait(trigger, timeout)
reply(trigger, from, data)
data_in
end
end
|
lib/trigger.ex
| 0.925162
| 0.535584
|
trigger.ex
|
starcoder
|
defmodule State.Stop do
@moduledoc """
State for Stops. Supervises a cache as well as workers
for the R* tree for geo lookups.
"""
use Supervisor
alias Model.{Stop, WGS84}
alias State.{Route, RoutesPatternsAtStop, ServiceByDate, StopsOnRoute}
@worker_count 5
@type filter_opts :: %{
optional(:routes) => [Model.Route.id()],
optional(:direction_id) => Model.Direction.id(),
optional(:date) => Date.t(),
optional(:longitude) => WGS84.longitude(),
optional(:latitude) => WGS84.latitude(),
optional(:radius) => State.Stop.List.radius(),
optional(:route_types) => [Model.Route.route_type()],
optional(:location_type) => [Stop.location_type()],
optional(:services) => [Model.Service.id()]
}
@type post_search_filter_opts :: %{
optional(:route_types) => [Model.Route.route_type()]
}
@type stop_search :: (() -> [Stop.t()])
def start_link do
Supervisor.start_link(__MODULE__, nil, name: __MODULE__)
end
def stop do
Supervisor.stop(__MODULE__)
end
def new_state(list_of_stops) do
:ok = State.Stop.Cache.new_state(list_of_stops)
for worker_id <- worker_range() do
:ok = State.Stop.Worker.new_state(worker_id, list_of_stops)
end
Events.publish({:new_state, State.Stop}, size())
:ok
end
def all do
State.Stop.Cache.all()
end
def size do
State.Stop.Cache.size()
end
def by_id(id) do
State.Stop.Cache.by_id(id)
end
def by_ids(ids) do
State.Stop.Cache.by_ids(ids)
end
def by_family_ids(ids) do
ids
|> by_ids
|> Enum.flat_map(&family/1)
|> Enum.uniq()
end
def by_parent_station(id) when is_binary(id) do
State.Stop.Cache.by_parent_station(id)
end
def by_parent_station(nil) do
[]
end
def by_location_type(location_types) do
State.Stop.Cache.by_location_types(location_types)
end
@spec by_parent_station_and_location_type(Stop.id(), Stop.location_type()) :: [Stop.t()]
def by_parent_station_and_location_type(id, type) do
State.Stop.Cache.match(%{location_type: type, parent_station: id}, :parent_station)
end
def siblings(id) when is_binary(id) do
case by_id(id) do
%{parent_station: station_id} ->
by_parent_station(station_id)
nil ->
[]
end
end
@doc """
Return the location_type 0 stop IDs given their ID or a parent's ID.
Useful for querying schedules or predictions, where the stop ID can only be location_type 0.
"""
def location_type_0_ids_by_parent_ids(ids) do
ids
|> by_ids()
|> Enum.flat_map(fn
%{id: id, location_type: 0} ->
[id]
%{id: parent_id, location_type: 1} ->
parent_id
|> by_parent_station()
|> Enum.flat_map(fn
%{id: id, location_type: 0} -> [id]
_ -> []
end)
_ ->
[]
end)
end
@spec around(WGS84.latitude(), WGS84.longitude()) :: [Model.Stop.t()]
@spec around(WGS84.latitude(), WGS84.longitude(), State.Stop.List.radius()) :: [Model.Stop.t()]
def around(latitude, longitude, radius \\ 0.01) do
random_worker()
|> State.Stop.Worker.around(latitude, longitude, radius)
|> by_ids
end
defp random_worker do
Enum.random(worker_range())
end
defp worker_range do
1..@worker_count
end
def family(%Stop{parent_station: nil, location_type: 1} = s) do
# find the children and include them
[s | State.Stop.Cache.by_parent_station(s.id)]
end
def family(%Stop{} = s) do
# we already have a parent station, so only include ourself
[s]
end
def family(_), do: []
@doc """
Applies a filtered search on Stops based on a map of filter values.
The allowed filterable keys are:
:ids
:routes
:direction_id
:date
:route_types
:longitude
:latitude
:radius
:location_type
If filtering for :direction_id, :routes must also be applied for the
direction filter to apply.
If filtering for :date, :routes must also be applied for the date filter to
apply.
If filtering for a location, both :latitude and :longitude must be provided
with :radius being optional.
"""
@spec filter_by(filter_opts) :: [Stop.t()]
def filter_by(filters) when is_map(filters) do
filters
|> build_filtered_searches()
|> do_searches()
|> do_post_search_filters(filters)
end
# Generate the functions needed to search concurrently
@spec build_filtered_searches(filter_opts, [stop_search]) :: [stop_search]
defp build_filtered_searches(filters, searches \\ [])
defp build_filtered_searches(%{routes: route_ids} = filters, searches) do
direction_opts =
case Map.get(filters, :direction_id) do
direction_id when direction_id != nil ->
[direction_id: direction_id]
_ ->
[]
end
service_opts =
case {Map.get(filters, :services), Map.get(filters, :date)} do
{[_] = service_ids, _} ->
[service_ids: service_ids]
{_, %Date{} = date} ->
[service_ids: ServiceByDate.by_date(date)]
_ ->
[]
end
opts = Keyword.merge(direction_opts, service_opts)
search_operation = fn ->
route_ids
|> StopsOnRoute.by_route_ids(opts)
|> by_ids()
end
filters
|> Map.drop([:routes, :direction_id, :date, :services])
|> build_filtered_searches([search_operation | searches])
end
defp build_filtered_searches(%{latitude: lat, longitude: long} = filters, searches) do
radius = filters[:radius] || 0.01
search_operation = fn -> around(lat, long, radius) end
filters
|> Map.drop([:latitude, :longitude, :radius])
|> build_filtered_searches([search_operation | searches])
end
defp build_filtered_searches(%{location_types: location_types} = filters, searches) do
search_operation = fn -> by_location_type(location_types) end
searches = [search_operation | searches]
filters
|> Map.drop([:location_types])
|> build_filtered_searches(searches)
end
defp build_filtered_searches(%{ids: ids} = filters, searches) do
search_operation = fn -> by_ids(ids) end
searches = [search_operation | searches]
filters
|> Map.drop([:ids])
|> build_filtered_searches(searches)
end
defp build_filtered_searches(_, searches), do: searches
@spec do_searches([stop_search]) :: [Stop.t()]
defp do_searches([]), do: all()
defp do_searches(search_operations) when is_list(search_operations) do
search_results =
Stream.map(search_operations, fn search_operation ->
case search_operation.() do
results when is_list(results) ->
results
_ ->
[]
end
end)
[first_result] = Enum.take(search_results, 1)
search_results
|> Stream.drop(1)
|> Enum.reduce(first_result, fn results, acc ->
acc_set = MapSet.new(acc)
Enum.filter(results, fn stop -> stop in acc_set end)
end)
|> Enum.uniq_by(& &1.id)
end
@spec do_post_search_filters([Stop.t()], post_search_filter_opts) :: [Stop.t()]
defp do_post_search_filters(stops, %{route_types: route_types} = filters) do
stops
|> Enum.filter(fn stop ->
stop.id
|> RoutesPatternsAtStop.routes_by_stop()
|> Route.by_ids()
|> Enum.any?(&(&1.type in route_types))
end)
|> do_post_search_filters(Map.delete(filters, :route_types))
end
defp do_post_search_filters(stops, _), do: stops
def last_updated, do: State.Stop.Cache.last_updated()
# Server callbacks
def init(_) do
workers =
for i <- worker_range() do
worker(State.Stop.Worker, [i], id: {:stop_worker, i})
end
children =
[
worker(State.Stop.Cache, []),
{Registry, keys: :unique, name: State.Stop.Registry}
] ++
workers ++
[
worker(State.Stop.Subscriber, [])
]
Supervisor.init(children, strategy: :one_for_one)
end
end
|
apps/state/lib/state/stop.ex
| 0.800224
| 0.446977
|
stop.ex
|
starcoder
|
defmodule Multiset do
@moduledoc """
Functions for working with [multisets](https://en.wikipedia.org/wiki/Multiset), i.e. sets allowing
multiple instances of values.
The number of instances of a value in a multiset is called the _multiplicity_ of the value.
The `Multiset` is represented internally as a struct, therefore `%Multiset{}` can be used whenever
there is a need to match on any `Multiset`. Note though the struct fields are private and must not
be accessed directly. Instead, use the functions in this module.
"""
@opaque t :: %Multiset{map: %{value => non_neg_integer}}
@type value :: term
defstruct map: %{}, size: 0
@doc """
Returns a new multiset.
## Examples
iex> Multiset.new
#Multiset<[]>
"""
@spec new :: t
def new(), do: %Multiset{}
@doc """
Creates a multiset from an enumerable.
## Examples
iex> Multiset.new([:b, :a, 3, :a])
#Multiset<[{3, 1}, {:a, 2}, {:b, 1}]>
iex> Multiset.new([3, 3, 2, 2, 1])
#Multiset<[{1, 1}, {2, 2}, {3, 2}]>
"""
@spec new(Enum.t) :: t
def new(enumerable), do: Enum.reduce(enumerable, %Multiset{}, &put(&2, &1))
@doc """
Creates a multiset from an enumerable via a function that assigns multiplicities.
If `multiplicities` returns an integer < 1 for a value, then that value is not added to the
multiset.
## Examples
iex> Multiset.new([1, 2, 3], fn x -> x - 1 end)
#Multiset<[{2, 1}, {3, 2}]>
"""
@spec new(Enum.t, (term -> integer)) :: t
def new(enumerable, multiplicities) do
Enum.reduce(enumerable, %Multiset{}, fn value, result ->
put(result, value, multiplicities.(value))
end)
end
@doc """
Creates a multiset from a list of pairs of values and their multiplicities.
## Examples
iex> Multiset.from_list([{1, 3}, {2, 4}, {3, 0}])
#Multiset<[{1, 3}, {2, 4}]>
"""
@spec from_list([{t, pos_integer}]) :: t
def from_list(pairs) do
Enum.reduce(pairs, new, fn {value, multiplicity}, result ->
put(result, value, multiplicity)
end)
end
@doc """
Deletes `k` (by default 1) instances of `value` from `multiset`.
Returns a new multiset which is a copy of `multiset` but with `k` fewer instance of `value`.
## Examples
iex> multiset = Multiset.new([1, 2, 3, 3])
iex> Multiset.delete(multiset, 3, 2)
#Multiset<[{1, 1}, {2, 1}]>
iex> Multiset.delete(multiset, 3)
#Multiset<[{1, 1}, {2, 1}, {3, 1}]>
"""
@spec delete(t, value, integer) :: t
def delete(multiset, value, k \\ 1)
def delete(%Multiset{map: map} = multiset, value, k) do
cur_multiplicity = Map.get(map, value, 0)
new_multiplicity = max(0, cur_multiplicity - k)
new_size = multiset.size - (cur_multiplicity - new_multiplicity)
if new_multiplicity == 0 do
%{multiset | map: Map.delete(map, value), size: new_size}
else
%{multiset | map: Map.put(map, value, new_multiplicity), size: new_size}
end
end
@doc """
Returns a multiset that is `multiset1` without the (instances of) values of `multiset2`.
## Examples
iex> Multiset.difference(Multiset.new([1, 2, 2, 3, 3]), Multiset.new([1, 1, 2, 4]))
#Multiset<[{2, 1}, {3, 2}]>
"""
@spec difference(t, t) :: t
def difference(multiset1, multiset2)
def difference(%Multiset{} = multiset1, %Multiset{map: map2}) do
:maps.fold(fn value, multiplicity, result -> delete(result, value, multiplicity) end,
multiset1, map2)
end
@doc """
Checks if two multisets are equal.
The comparison between values must be done using `===`.
## Examples
iex> Multiset.equal?(Multiset.new([1, 2]), Multiset.new([2, 1]))
true
iex> Multiset.equal?(Multiset.new([1, 2]), Multiset.new([1, 1, 2]))
false
"""
@spec equal?(t, t) :: boolean
def equal?(multiset1, multiset2)
def equal?(%Multiset{map: map1}, %Multiset{map: map2}), do: Map.equal?(map1, map2)
@doc """
Returns a multiset containing only (instances of) members that `multiset1` and `multiset2` have in
common.
## Examples
iex> Multiset.intersection(Multiset.new([1, 2, 2, 2, 3]), Multiset.new([2, 2, 3, 3, 4]))
#Multiset<[{2, 2}, {3, 1}]>
"""
@spec intersection(t, t) :: t
def intersection(multiset1, multiset2)
def intersection(%Multiset{map: map1}, %Multiset{map: map2}) do
if map_size(map1) > map_size(map2), do: {map1, map2} = {map2, map1}
:maps.fold(fn value, multiplicity, result ->
new_multiplicity = min(multiplicity, Map.get(map2, value, 0))
put(result, value, new_multiplicity)
end, new, map1)
end
@doc """
Checks if `multiset` contains at least one instance of `value`.
## Examples
iex> Multiset.member?(Multiset.new([1, 2, 3]), 2)
true
iex> Multiset.member?(Multiset.new([1, 2, 3]), 4)
false
"""
@spec member?(t, value) :: boolean
def member?(multiset, value), do: multiplicity(multiset, value) > 0
@doc """
Returns the multiplicity of `value` in `multiset`.
## Examples
iex> Multiset.multiplicity(Multiset.new([1, 2, 3, 1]), 1)
2
iex> Multiset.multiplicity(Multiset.new([1, 2, 3]), 4)
0
"""
@spec multiplicity(t, value) :: non_neg_integer
def multiplicity(multiset, value)
def multiplicity(%Multiset{map: map}, value), do: Map.get(map, value, 0)
@doc """
Inserts `k` (by default 1) instances of `value` into `multiset`.
Returns a new multiset which is a copy of `multiset` but with `k` more instance of `value`.
## Examples
iex> multiset = Multiset.new([1, 2])
iex> Multiset.put(multiset, 3, 2)
#Multiset<[{1, 1}, {2, 1}, {3, 2}]>
iex> Multiset.put(multiset, 1)
#Multiset<[{1, 2}, {2, 1}]>
"""
@spec put(t, value, integer) :: t
def put(multiset, value, k \\ 1)
def put(%Multiset{map: map, size: size} = multiset, value, k) do
if k < 1 do
multiset
else
new_map = Map.update(map, value, k, fn multiplicity -> multiplicity + k end)
new_size = size + k
%{multiset | map: new_map, size: new_size}
end
end
@doc """
Returns the number of (instances of) values in `multiset`.
## Examples
iex> Multiset.size(Multiset.new([1, 2, 2]))
3
"""
@spec size(t) :: non_neg_integer
def size(multiset), do: multiset.size
@doc """
Checks if `multiset1`'s values all have a smaller (or equal) multiplicity as the corresponding
values in `multiset2`.
## Examples
iex> Multiset.subset?(Multiset.new([1, 1, 2]), Multiset.new([1, 1, 2, 2, 3]))
true
iex> Multiset.subset?(Multiset.new([1, 1, 2]), Multiset.new([1, 2, 3]))
false
"""
@spec subset?(t, t) :: boolean
def subset?(multiset1, multiset2)
def subset?(%Multiset{map: map1}, %Multiset{map: map2} = multiset2) do
if map_size(map1) <= map_size(map2) do
:maps.fold(fn value, multiplicity, _ ->
if multiplicity <= multiplicity(multiset2, value) do
true
else
throw({:halt, false})
end
end, true, map1)
else
false
end
catch
{:halt, false} -> false
end
@doc """
Returns the multiset sum of `multiset1` and `multiset2`.
## Examples
iex> Multiset.sum(Multiset.new([1, 2, 2]), Multiset.new([2, 3, 3]))
#Multiset<[{1, 1}, {2, 3}, {3, 2}]>
"""
@spec sum(t, t) :: t
def sum(multiset1, multiset2)
def sum(%Multiset{map: map1}, %Multiset{map: map2}) do
new_map = Map.merge(map1, map2, fn _value, multiplicity1, multiplicity2 ->
multiplicity1 + multiplicity2
end)
%Multiset{map: new_map, size: sum_of_map_values(new_map)}
end
@doc """
Converts `multiset` to a list of pairs of values and their multiplicities.
## Examples
iex> Multiset.to_list(Multiset.new([1, 2, 3, 1]))
[{1, 2}, {2, 1}, {3, 1}]
"""
@spec to_list(t) :: [{t, pos_integer}]
def to_list(multiset)
def to_list(%Multiset{map: map}), do: Map.to_list(map)
@doc """
Returns the multiset union of `multiset1` and `multiset2`.
## Examples
iex> Multiset.union(Multiset.new([1, 2, 2]), Multiset.new([2, 3, 3]))
#Multiset<[{1, 1}, {2, 2}, {3, 2}]>
"""
@spec union(t, t) :: t
def union(multiset1, multiset2)
def union(%Multiset{map: map1}, %Multiset{map: map2}) do
new_map = Map.merge(map1, map2, fn _value, multiplicity1, multiplicity2 ->
max(multiplicity1, multiplicity2)
end)
%Multiset{map: new_map, size: sum_of_map_values(new_map)}
end
@doc """
Returns the values in `multiset` as a `MapSet`.
## Examples
iex> Multiset.values(Multiset.new([1, 2, 2, 3]))
MapSet.new([1, 2, 3])
"""
@spec values(t) :: MapSet.t
def values(multiset)
def values(%Multiset{map: map}), do: map |> Map.keys |> MapSet.new
defimpl Enumerable do
def reduce(set, acc, fun), do: Enumerable.List.reduce(Multiset.to_list(set), acc, fun)
def member?(set, val), do: {:ok, Multiset.member?(set, val)}
def count(set), do: {:ok, Multiset.size(set)}
end
defimpl Collectable do
def into(original) do
{original, fn
set, {:cont, x} -> Multiset.put(set, x)
set, :done -> set
_, :halt -> :ok
end}
end
end
defimpl Inspect do
import Inspect.Algebra
def inspect(multiset, opts) do
concat ["#Multiset<", Inspect.List.inspect(Multiset.to_list(multiset), opts), ">"]
end
end
defp sum_of_map_values(map), do: map |> Map.values |> Enum.sum
end
|
lib/multiset.ex
| 0.943595
| 0.865452
|
multiset.ex
|
starcoder
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.