code
stringlengths 114
1.05M
| path
stringlengths 3
312
| quality_prob
float64 0.5
0.99
| learning_prob
float64 0.2
1
| filename
stringlengths 3
168
| kind
stringclasses 1
value |
|---|---|---|---|---|---|
defmodule SMSFactor.Campaigns do
@moduledoc """
Wrappers around **Campaigns** section of SMSFactor API.
"""
@typedoc """
Params for retrieving campaigns history.
- `start` : You can define the start record for pagination. Default 0
- `length` : You can define the number of records to retrieve per request. Default 100. Maximum 1000
- `date_start` : Add a filter to retrieve campaigns of which send date is after this date. Date format must be as follow: `Y-m-d H:i:s`
- `date_end` : Add a filter to retrieve campaigns of which send date is before this date. Date format must be as follow: `Y-m-d H:i:s`
- `sms_min` : Add a filter to retrieve campaigns that have a minimum amount of SMS
- `sms_max` : Add a filter to retrieve campaigns that have a maximum amount of SMS
"""
@type campaigns_history_params() :: %{atom() => any()}
@spec get_campaigns_history(Tesla.Client.t(), campaigns_history_params()) :: Tesla.Env.result()
def get_campaigns_history(client, params \\ %{}) do
Tesla.get(client, "/campaigns", query: params)
end
@spec get_campaign(Tesla.Client.t(), integer()) :: Tesla.Env.result()
def get_campaign(client, campaign_id), do: Tesla.get(client, "/campaign/#{campaign_id}")
@spec blacklist_campaign_npai(Tesla.Client.t(), integer()) :: Tesla.Env.result()
def blacklist_campaign_npai(client, campaign_id) do
Tesla.put(client, "/campaign/#{campaign_id}/npai")
end
@spec get_campaign_stops(Tesla.Client.t(), integer()) :: Tesla.Env.result()
def get_campaign_stops(client, campaign_id) do
Tesla.get(client, "/campaign/#{campaign_id}/blacklist")
end
@spec get_campaign_npais(Tesla.Client.t(), integer()) :: Tesla.Env.result()
def get_campaign_npais(client, campaign_id) do
Tesla.get(client, "/campaign/#{campaign_id}/npai")
end
@spec get_campaign_replies(Tesla.Client.t(), integer()) :: Tesla.Env.result()
def get_campaign_replies(client, campaign_id) do
Tesla.get(client, "/campaign/#{campaign_id}/mo")
end
end
|
lib/sms_factor/campaigns.ex
| 0.809163
| 0.423667
|
campaigns.ex
|
starcoder
|
defmodule X.Component do
@moduledoc ~S"""
Extends given module with the X component functions:
## Example
defmodule ExampleComponent do
use X.Component,
assigns: %{
:message => String.t()
},
template: ~X"\""
<div>
{{ message }}
{{= yield }}
</div>
"\""
end
* `:assigns` - list of component assigned variables. Types can be defined with Elixir typespecs:
use X.Component,
assigns: %{
:book => any(),
:message => String.t(),
:items => [{atom(), String.t(), boolean()}],
optional(:active) => nil | boolean()
}
* `:template` - component X template.
## Component functions:
* `render/1`, `render/2` - renders component as `iodata`. Render function can accept nested `iodata` elements:
ExampleComponent.render(%{message: "Example"}) do
[
~X"\""
<form action="/test">
<input name="title">
</form>
"\"",
Button.render(%{test: "Submit"})
]
end
* `render_to_string/1`, `render_to_string/2` - renders component to `bitstring`.
* `assigns/0` - returns a list of assigns with tuples where the first element is for the assign `atom` name and the second element is for required (`true`) and optional (`false`) assigns.
ExampleComponent.assigns()
# [message: true]
* `template/0` - returns X template string.
ExampleComponent.template()
# "<div>\n {{ message }}\n {{= yield }}\n</div>\n"
"""
@type options() :: [
{:assigns, map() | [atom()]}
| {:template, Macro.t()}
]
@reserved_dynamic_attrs [
:class,
:style,
:assigns,
:attrs
]
defmacro __using__(options) when is_list(options) do
{template, line} = fetch_template(options)
compiler_opts = [line: line, inline: X.compile_inline?(), context: __CALLER__.module]
template_ast = X.compile_string!(template, __CALLER__, compiler_opts)
assigns_ast = fetch_assigns(options, __CALLER__)
assigns_list = build_assigns_list(assigns_ast)
component_doc = build_component_doc(template, assigns_ast)
quote do
@moduledoc if @moduledoc,
do: Enum.join([@moduledoc, unquote(component_doc)], "\n"),
else: unquote(component_doc)
@spec template() :: String.t()
def template do
unquote(template)
end
@spec template_ast() :: Macro.t()
def template_ast do
unquote(Macro.escape(template_ast))
end
@spec assigns() :: [{name :: atom(), required :: boolean()}]
def assigns do
unquote(assigns_list)
end
X.Component.define_render_functions(unquote(template_ast), unquote(assigns_ast))
end
end
@doc false
defmacro define_render_functions(template_ast, assigns_ast) do
assigns_typespec = build_assigns_typespec(assigns_ast)
{optional_vars_ast, required_vars_ast} = build_assigns_vars_ast(assigns_ast, __CALLER__)
%{module: module} = __CALLER__
quote do
@spec render_to_string(unquote(assigns_typespec)) :: String.t()
@spec render_to_string(unquote(assigns_typespec), [{:do, iodata() | nil}]) :: String.t()
def render_to_string(assigns, options \\ [do: nil]) do
assigns
|> render(options)
|> IO.iodata_to_binary()
end
@spec render(unquote(assigns_typespec)) :: iodata()
def render(assigns) do
render(assigns, do: nil)
end
@spec render(unquote(assigns_typespec), [{:do, iodata() | nil}]) :: iodata()
def render(
unquote(Macro.var(:assigns, nil)) = unquote({:%{}, [], required_vars_ast}),
[{:do, unquote(Macro.var(:yield, module))}]
) do
_ = unquote(Macro.var(:assigns, nil))
_ = unquote(Macro.var(:yield, module))
unquote_splicing(optional_vars_ast)
unquote(template_ast)
end
end
end
@spec fetch_template(options()) :: {Macro.t(), integer() | nil}
defp fetch_template(options) do
case Keyword.get(options, :template, "") do
{:sigil_X, _, [{:<<>>, [line: line], [template]} | _]} ->
{template, line}
template when is_bitstring(template) ->
{template, nil}
end
end
@spec fetch_assigns(options(), Macro.Env.t()) :: Macro.t()
defp fetch_assigns(options, env) do
assigns = Keyword.get(options, :assigns, [])
if is_list(assigns) do
{:%{}, [], Enum.map(assigns, &{&1, quote(do: any())})}
else
Macro.postwalk(assigns, fn
ast = {:__aliases__, _, _} ->
Macro.expand_once(ast, env)
{:required, [_], [atom]} ->
atom
ast ->
ast
end)
end
end
@spec build_assigns_list(Macro.t()) :: [{name :: atom(), required :: boolean()}]
defp build_assigns_list({:%{}, _, assigns}) do
Enum.map(assigns, fn
{{spec, _, [attr]}, _} ->
{attr, spec != :optional}
{attr, _} ->
{attr, true}
end)
end
@spec build_component_doc(String.t(), Macro.t()) :: String.t()
defp build_component_doc(template, assigns) do
Enum.join(
[
"## Assigns:",
Code.format_string!(Macro.to_string(assigns), line_length: 60),
"## Template:",
template
],
"\n\n"
)
end
@spec build_assigns_typespec(Macro.t()) :: Macro.t()
defp build_assigns_typespec({:%{}, context, assigns}) do
optional_keys = [
quote(do: {optional(:attrs), [{binary(), any()}]}),
quote(do: {atom(), any()})
]
{:%{}, context, assigns ++ optional_keys}
end
@spec build_assigns_vars_ast(Macro.t() | [atom()], any()) :: {Macro.t(), Macro.t()}
defp build_assigns_vars_ast({:%{}, [_], assigns}, env) do
%{module: module} = env
Enum.reduce(assigns, {[], []}, fn
{{:optional, [line: line], [attr]}, _}, {optional, required} ->
maybe_warn_reserved_attribute(attr, %{env | line: line})
{[
quote do
unquote(Macro.var(attr, module)) =
Map.get(unquote(Macro.var(:assigns, nil)), unquote(attr))
end
| optional
], required}
{attr, _}, {optional, required} ->
maybe_warn_reserved_attribute(attr, env)
{optional, [{attr, Macro.var(attr, module)} | required]}
end)
end
@spec maybe_warn_reserved_attribute(atom(), Macro.Env.t()) :: :ok | nil
defp maybe_warn_reserved_attribute(attr, env) do
if attr in @reserved_dynamic_attrs do
IO.warn(
~s(property "#{to_string(attr)}" is reserved for dynamic tag attributes),
Macro.Env.stacktrace(env)
)
end
end
end
|
lib/x/component.ex
| 0.881341
| 0.518668
|
component.ex
|
starcoder
|
defmodule Grizzly.ZIPGateway.Config do
@moduledoc false
# This module is for making the `zipgateway.cfg` file
@type t :: %__MODULE__{
unsolicited_destination_ip6: String.t(),
unsolicited_destination_port: :inet.port_number(),
ca_cert: Path.t(),
cert: Path.t(),
priv_key: Path.t(),
eeprom_file: Path.t(),
tun_script: Path.t(),
pvs_storage_file: Path.t(),
provisioning_config_file: Path.t(),
pan_ip: :inet.ip_address(),
lan_ip: :inet.ip_address(),
lan_gw6: String.t(),
psk: String.t(),
manufacturer_id: non_neg_integer() | nil,
hardware_version: non_neg_integer() | nil,
product_id: non_neg_integer() | nil,
product_type: non_neg_integer() | nil,
serial_log: String.t() | nil,
extra_classes: [byte()]
}
defstruct unsolicited_destination_ip6: "fd00:aaaa::2",
unsolicited_destination_port: 41230,
ca_cert: "./Portal.ca_x509.pem",
cert: "./ZIPR.x509_1024.pem",
priv_key: "./ZIPR.key_1024.pem",
eeprom_file: "/root/zipeeprom.dat",
tun_script: "./zipgateway.tun",
pvs_storage_file: "/root/provisioning_list_store.dat",
provisioning_config_file: "/etc/zipgateway_provisioning_list.cfg",
pan_ip: {0xFD00, 0xBBBB, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01},
lan_ip: {0xFD00, 0xAAAA, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01},
lan_gw6: "::1",
psk: "123456789012345678901234567890AA",
serial_log: nil,
product_id: nil,
product_type: nil,
hardware_version: nil,
manufacturer_id: nil,
extra_classes: [0x85, 0x59, 0x5A, 0x8E, 0x6C, 0x8F]
@doc """
Make a new `ZipgatewayCfg.t()` from the supplied options
"""
@spec new(map()) :: t()
def new(opts \\ %{}) do
opts =
Map.take(opts, [
:manufacturer_id,
:hardware_version,
:product_id,
:product_type,
:serial_log,
:tun_script,
:lan_ip,
:pan_ip
])
struct(__MODULE__, opts)
end
@doc """
Write the contents of the `ZipgatewayCfg.t()` to the file system
"""
@spec write(t(), Path.t()) :: :ok | {:error, File.posix()}
def write(cfg, path) do
contents = __MODULE__.to_string(cfg)
File.write(path, contents)
end
@doc """
Turn the `ZipgatewayCfg.t()` into a string
"""
@spec to_string(t()) :: String.t()
def to_string(cfg) do
"""
ZipUnsolicitedDestinationIp6=#{cfg.unsolicited_destination_ip6}
ZipUnsolicitedDestinationPort=#{cfg.unsolicited_destination_port}
ZipCaCert=#{cfg.ca_cert}
ZipCert=#{cfg.cert}
ZipPrivKey=#{cfg.priv_key}
Eepromfile=#{cfg.eeprom_file}
TunScript=#{cfg.tun_script}
PVSStorageFile=#{cfg.pvs_storage_file}
ProvisioningConfigFile=#{cfg.provisioning_config_file}
ZipLanGw6=#{cfg.lan_gw6}
ZipPSK=#{cfg.psk}
"""
|> maybe_put_config_item(cfg, :serial_log, "SerialLog")
|> maybe_put_config_item(cfg, :product_id, "ZipProductID")
|> maybe_put_config_item(cfg, :manufacturer_id, "ZipManufacturerID")
|> maybe_put_config_item(cfg, :hardware_version, "ZipHardwareVersion")
|> maybe_put_config_item(cfg, :product_type, "ZipProductType")
|> maybe_put_config_item(cfg, :extra_classes, "ExtraClasses")
|> maybe_put_config_item(cfg, :pan_ip, "ZipPanIp6")
|> maybe_put_config_item(cfg, :lan_ip, "ZipLanIp6")
end
defp maybe_put_config_item(config_string, cfg, :extra_classes = field, cfg_name) do
case Map.get(cfg, field) do
nil ->
config_string
extra_command_classes ->
extra_command_classes_string = Enum.join(extra_command_classes, " ")
config_string <> "#{cfg_name}= #{extra_command_classes_string}\n"
end
end
defp maybe_put_config_item(config_string, cfg, field, cfg_name)
when field in [:pan_ip, :lan_ip] do
ip =
cfg
|> Map.get(field)
|> :inet.ntoa()
|> List.to_string()
config_string <> "#{cfg_name}=#{ip}\n"
end
defp maybe_put_config_item(config_string, cfg, field, cfg_name) do
cfg_item = Map.get(cfg, field)
if cfg_item != nil do
config_string <> "#{cfg_name} = #{cfg_item}\n"
else
config_string
end
end
end
|
lib/grizzly/zipgateway/config.ex
| 0.782205
| 0.418251
|
config.ex
|
starcoder
|
defmodule OLED.Display.Server do
@moduledoc """
Display server
"""
alias OLED.Display.Impl
@typedoc """
Pixel state
`:on` - Pixel High in normal mode
`:off` - Pixel Low in normal mode
"""
@type pixel_state :: :on | :off
@typedoc """
Pixel options
`mode` - Can be `:normal` or `:xor`
`state` - Pixel state
"""
@type pixel_opts :: [
state: pixel_state(),
mode: :normal | :xor
]
@type display_frame_opts :: [
memory_mode: :horizontal | :vertical | :page_addr
]
@doc false
def start_link(config, opts \\ []) do
GenServer.start_link(__MODULE__, config, opts)
end
@doc false
def init(config),
do: Impl.init(config)
@doc false
def display(server),
do: GenServer.call(server, :display)
@doc false
def display_frame(server, data, opts \\ []),
do: GenServer.call(server, {:display_frame, data, opts})
@doc false
def display_raw_frame(server, data, opts \\ []),
do: GenServer.call(server, {:display_raw_frame, data, opts})
@doc false
def clear(server, pixel_state \\ :off),
do: GenServer.call(server, {:clear, pixel_state})
@doc false
def put_buffer(server, data),
do: GenServer.call(server, {:put_buffer, data})
@doc false
def get_buffer(server),
do: GenServer.call(server, :get_buffer)
@doc false
def put_pixel(server, x, y, opts \\ []),
do: GenServer.call(server, {:put_pixel, x, y, opts})
@doc false
def line(server, x1, y1, x2, y2, opts \\ []),
do: GenServer.call(server, {:line, x1, y1, x2, y2, opts})
@doc false
def line_h(server, x, y, width, opts \\ []),
do: GenServer.call(server, {:line_h, x, y, width, opts})
@doc false
def line_v(server, x, y, height, opts \\ []),
do: GenServer.call(server, {:line_v, x, y, height, opts})
@doc false
def circle(server, x0, y0, r, opts),
do: GenServer.call(server, {:circle, x0, y0, r, opts})
@doc false
def rect(server, x, y, width, height, opts),
do: GenServer.call(server, {:rect, x, y, width, height, opts})
@doc false
def fill_rect(server, x, y, width, height, opts),
do: GenServer.call(server, {:fill_rect, x, y, width, height, opts})
@doc false
def get_dimensions(server),
do: GenServer.call(server, :get_dimensions)
@doc false
def handle_call(:display, _from, {impl, state}) do
state
|> impl.display()
|> handle_response(impl, state)
end
@doc false
def handle_call({:display_frame, data, opts}, _from, {impl, state}) do
state
|> impl.display_frame(data, opts)
|> handle_response(impl, state)
end
@doc false
def handle_call({:display_raw_frame, data, opts}, _from, {impl, state}) do
state
|> impl.display_raw_frame(data, opts)
|> handle_response(impl, state)
end
def handle_call({:clear, pixel_state}, _from, {impl, state}) do
state
|> impl.clear_buffer(pixel_state)
|> handle_response(impl, state)
end
def handle_call({:put_buffer, data}, _from, {impl, state}) do
state
|> impl.put_buffer(data)
|> handle_response(impl, state)
end
def handle_call(:get_buffer, _from, {impl, state}) do
res = impl.get_buffer(state)
{:reply, res, {impl, state}}
end
def handle_call({:put_pixel, x, y, opts}, _from, {impl, state}) do
state
|> impl.put_pixel(x, y, opts)
|> handle_response(impl, state)
end
def handle_call({:line, x1, y1, x2, y2, opts}, _from, {impl, state}) do
state
|> impl.line(x1, y1, x2, y2, opts)
|> handle_response(impl, state)
end
def handle_call({:line_h, x, y, width, opts}, _from, {impl, state}) do
state
|> impl.line_h(x, y, width, opts)
|> handle_response(impl, state)
end
def handle_call({:line_v, x, y, height, opts}, _from, {impl, state}) do
state
|> impl.line_v(x, y, height, opts)
|> handle_response(impl, state)
end
def handle_call({:rect, x, y, width, height, opts}, _from, {impl, state}) do
state
|> impl.rect(x, y, width, height, opts)
|> handle_response(impl, state)
end
def handle_call({:circle, x0, y0, r, opts}, _from, {impl, state}) do
state
|> impl.circle(x0, y0, r, opts)
|> handle_response(impl, state)
end
def handle_call({:fill_rect, x, y, width, height, opts}, _from, {impl, state}) do
state
|> impl.fill_rect(x, y, width, height, opts)
|> handle_response(impl, state)
end
def handle_call(:get_dimensions, _from, {impl, state}) do
res = impl.get_dimensions(state)
{:reply, res, {impl, state}}
end
defp handle_response({:error, _} = error, impl, old_state),
do: {:reply, error, {impl, old_state}}
defp handle_response(state, impl, _old_state),
do: {:reply, :ok, {impl, state}}
end
|
lib/oled/display/server.ex
| 0.811564
| 0.529811
|
server.ex
|
starcoder
|
defmodule Zig do
@moduledoc """
Inline NIF support for [Zig](https://ziglang.org)
### Motivation
> Zig is a general-purpose programming language designed for robustness,
> optimality, and maintainability.
The programming philosophy of Zig matches up nicely with the programming
philosophy of the BEAM VM and in particular its emphasis on simplicity and
structure should very appealing to the practitioners of Elixir.
The following features make Zig extremely amenable to inline language
support in a BEAM language:
- simplicity. Zig's syntax is definable in a simple YACC document and
Zig takes a stance against making its featureset more complex (though
it may evolve somewhat en route to 1.0)
- Composability. Zig is unopinionated about how to go about memory
allocations. Its allocator interface is very easily able to be backed
by the BEAM's, which means that you have access to generic memory
allocation *strategies* through its composable allocator scheme.
- C integration. It's very easy to design C-interop between Zig and C.
In fact, Zig is likely to be an easier glue language for C ABIs than
C.
### Basic usage
In the BEAM, you can define a NIF by consulting the following [document](
https://erlang.org/doc/man/erl_nif.html) and implementing the appropriate
shared object/DLL callbacks. However, Zigler will take care of all of
this for you.
Simply `use Zig` in your module, providing the app atom in the property
list.
Then, use the `sigil_Z/2` macro and write zig code. Any nifs you define
should be preceded with the `/// nif: function_name/arity` zig docstring.
#### Example
```
defmodule MyModule do
use Zig
~Z\"""
/// nif: my_func/1
fn my_func(val: i64) i64 {
return val + 1;
}
\"""
end
```
Zig will *automatically* fill out the appropriate NIF C template, compile
the shared object, and bind it into the module pre-compilation. In the case
of the example, there will be a `MyModule.my_func/1` function call found in
the module.
Zig will also make sure that your statically-typed Zig data are guarded
when you marshal it from the dynamically-typed BEAM world. However, you may
only pass in and return certain types. As an escape hatch, you may use
the `beam.term` type which is equivalent to the `ERLNIFTERM` type. See
[`erl_nif`](erl_nif.html).
### Guides
Please consult the following guides for detail topics:
- [different execution modes](nifs.html)
- [how to build BEAM `resources`](resources.html)
### Nerves Support
Nerves is supported out of the box, and the system should cross-compile
to arm ABI as necessary depening on what your nerves `:target` is. You
may also directly specify a zig target using the
`use Zig, target: <target>` option.
### Environment
Sometimes, you will need to pass the BEAM environment (which is the code
execution context, including process info, etc.) into the NIF function. In
this case, you should pass it as the first argument, as a `beam.env` type
value.
#### Example
```
defmodule MyModule do
use Zig
~Z\"""
/// nif: my_func_with_env/1
fn my_func_with_env(env: beam.env, pid: beam.pid) void {
var sendable_term: []u64 = "ping"[0..];
var msg = beam.make_slice(env, sendable_term);
var res = e.enif_send(env, pid, env, msg);
}
\"""
end
```
### Bring your own version of Zig
If you would like to use your system's local `zig` command, set the
`local_zig` option in `config.exs`, which
```
config :zigler, local_zig: true
```
This will use `System.find_executable` to obtain the zig command. If
you want to specify the zig command manually, use the following:
```
config :zigler, local_zig: "path/to/zig/command"
```
### External Libraries
If you need to bind static (`*.a`) or dynamic (`*.so`) libraries into your
module, you may link them with the `:libs` argument.
Note that zig statically binds shared libraries into the assets it creates.
This simplifies deployment for you.
#### Example
```
defmodule Blas do
use Zig,
libs: ["/usr/lib/x86_64-linux-gnu/blas/libblas.so"],
include: ["/usr/include/x86_64-linux-gnu"]
~Z\"""
const blas = @cImport({
@cInclude("cblas.h");
...
```
### Compilation assistance
If something should go wrong, Zigler will translate the Zig compiler error
into an Elixir compiler error, and let you know exactly which line in the
`~Z` block it came from.
### Syntactic Sugar
Some of the erlang nif terms can get unwieldy, especially in Zig, which
prefers terseness. Each of the basic BEAM types is shadowed by a Zig type
in the `beam` module. The `beam` struct is always imported into the header
of the zig file used, so all zig code in the same directory as the module
should have access to the `beam` struct if they `@import("beam.zig")`
### Importing files
If you need to write code outside of the basic module (you will, for anything
non-trivial), just place it in the same directory as your module.
#### Example
```
~Z\"""
const extra_code = @import("extra_code.zig");
/// nif: use_extra_code/1
fn use_extra_code(val: i64) i64 {
return extra_code.extra_fn(val);
}
\"""
```
If you would like to include a custom c header file, create an `include/`
directory inside your path tree and it will be available to zig as a default
search path as follows:
```
~Z\"""
const c = @cImport({
@cInclude("my_c_header.h");
});
// nif: my_nif/1
...
\"""
```
If the c header defines `extern` functions, it's your responsibility to make
sure those externed functions are available by compiling other c files or
using a shared library.
### Documentation
Use the builtin zig `///` docstring to write your documentation. If it's in
front of the nif declaration, it will wind up in the correct place in your
elixir documentation.
See `Zig.Doc` for more information on how to document in zig and what to
document. See `Mix.Tasks.ZigDoc` for information on how to get your Elixir
project to incorporate zig documentation.
### Tests
Use the builtin zig `test` keyword to write your internal zig unit tests.
These can be imported into an ExUnit module by following this example:
```
defmodule MyTest do
use ExUnit.Case
use Zig.Unit
zigtest ModuleWithZigCode
end
```
See `Zig.Unit` for more information.
"""
alias Zig.Compiler
alias Zig.Parser
# default release modes.
# you can override these in your `use Zigler` statement.
@spec __using__(keyword) :: Macro.t
defmacro __using__(opts) do
#mode = opts[:release_mode] || @default_release_mode
# clear out the assembly directory
Mix.env
|> Compiler.assembly_dir(__CALLER__.module)
|> File.rm_rf!
user_opts = opts
|> Keyword.take(~w(libs resources dry_run c_includes
system_include_dirs local link_libc)a)
include_dirs = opts
|> Keyword.get(:include, [])
|> Kernel.++(if has_include_dir?(__CALLER__), do: ["include"], else: [])
zigler! = struct(%Zig.Module{
file: Path.relative_to_cwd(__CALLER__.file),
module: __CALLER__.module,
imports: Zig.Module.imports(opts[:imports]),
include_dirs: include_dirs,
version: get_project_version(),
otp_app: get_app()},
user_opts)
zigler! = %{zigler! | code: Zig.Code.headers(zigler!)}
Module.register_attribute(__CALLER__.module, :zigler, persist: true)
Module.put_attribute(__CALLER__.module, :zigler, zigler!)
quote do
import Zig
require Zig.Compiler
@on_load :__load_nifs__
@before_compile Zig.Compiler
end
end
defp has_include_dir?(env) do
env.file
|> Path.dirname
|> Path.join("include")
|> File.dir?
end
@doc """
declares a string block to be included in the module's .zig source
file. At least one of these blocks must define a nif.
"""
defmacro sigil_Z({:<<>>, meta, [zig_code]}, []) do
quoted_code(zig_code, meta, __CALLER__)
end
@doc """
like `sigil_Z/2`, but lets you interpolate values from the outside
elixir context using string interpolation (the `\#{value}` form)
"""
defmacro sigil_z(code = {:<<>>, _, _}, []) do
quoted_code(code, [line: __CALLER__.line], __CALLER__)
end
defp quoted_code(zig_code, meta, caller) do
line = meta[:line]
module = caller.module
file = Path.relative_to_cwd(caller.file)
quote bind_quoted: [module: module, zig_code: zig_code, file: file, line: line] do
zigler = Module.get_attribute(module, :zigler)
new_zigler = zig_code
|> Parser.parse(zigler, file, line)
@zigler new_zigler
end
end
defp get_project_version do
Mix.Project.get
|> apply(:project, [])
|> Keyword.get(:version)
|> Version.parse!
end
defp get_app do
Mix.Project.get
|> apply(:project, [])
|> Keyword.get(:app)
end
@extension (case :os.type() do
{:unix, :linux} -> ".so"
{:unix, :freebsd} -> ".so"
{:unix, :darwin} -> ".dylib"
{_, :nt} -> ".dll"
end)
@doc """
outputs a String name for the module.
note that for filesystem use, you must supply the extension. For internal (BEAM) use, the
filesystem extension will be inferred. Therefore we provide two versions of this function.
"""
def nif_name(module, use_suffixes \\ true) do
if use_suffixes do
"lib#{module.module}#{@extension}"
else
"lib#{module.module}"
end
end
end
|
lib/zig.ex
| 0.846054
| 0.916409
|
zig.ex
|
starcoder
|
defmodule EQRCode.SVG do
@moduledoc """
Render the QR Code matrix in SVG format
```elixir
qr_code_content
|> EQRCode.encode()
|> EQRCode.svg(color: "#cc6600", shape: "circle", width: 300)
```
You can specify the following attributes of the QR code:
* `color`: In hexadecimal format. The default is `#000`
* `shape`: Only `square` or `circle`. The default is `square`
* `width`: The width of the QR code in pixel. Without the width attribute, the QR code size will be dynamically generated based on the input string.
* `viewbox`: When set to `true`, the SVG element will specify its height and width using `viewBox`, instead of explicit `height` and `width` tags.
Default options are `[color: "#000", shape: "square"]`.
"""
alias EQRCode.Matrix
@doc """
Return the SVG format of the QR Code
"""
@spec svg(Matrix.t(), map() | Keyword.t()) :: String.t()
def svg(%Matrix{matrix: matrix}, options \\ []) do
options = options |> Enum.map(& &1)
matrix_size = matrix |> Tuple.to_list() |> Enum.count()
svg_options = options |> Map.new() |> set_svg_options(matrix_size)
dimension = matrix_size * svg_options[:module_size]
xml_tag = ~s(<?xml version="1.0" standalone="yes"?>)
dimension_attrs =
if Keyword.get(options, :viewbox, false) do
~s(viewBox="0 0 #{dimension} #{dimension}")
else
~s(width="#{dimension}" height="#{dimension}")
end
open_tag =
~s(<svg version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:ev="http://www.w3.org/2001/xml-events" #{
dimension_attrs
}
shape-rendering="crispEdges">)
close_tag = ~s(</svg>)
result =
Tuple.to_list(matrix)
|> Stream.with_index()
|> Stream.map(fn {row, row_num} ->
Tuple.to_list(row)
|> format_row_as_svg(row_num, svg_options)
end)
|> Enum.to_list()
Enum.join([xml_tag, open_tag, result, close_tag], "\n")
end
defp set_svg_options(options, matrix_size) do
options
|> Map.put_new(:color, "#000")
|> set_module_size(matrix_size)
|> Map.put_new(:shape, "rectangle")
|> Map.put_new(:size, matrix_size)
end
defp set_module_size(%{width: width} = options, matrix_size) when is_integer(width) do
options
|> Map.put_new(:module_size, width / matrix_size)
end
defp set_module_size(%{width: width} = options, matrix_size) when is_binary(width) do
options
|> Map.put_new(:module_size, String.to_integer(width) / matrix_size)
end
defp set_module_size(options, _matrix_size) do
options
|> Map.put_new(:module_size, 11)
end
defp format_row_as_svg(row_matrix, row_num, svg_options) do
row_matrix
|> Stream.with_index()
|> Stream.map(fn {col, col_num} ->
substitute(col, row_num, col_num, svg_options)
end)
|> Enum.to_list()
end
defp substitute(data, row_num, col_num, svg_options) when is_nil(data) do
y = col_num * svg_options[:module_size]
x = row_num * svg_options[:module_size]
~s(<rect width="#{svg_options[:module_size]}" height="#{svg_options[:module_size]}" x="#{x}" y="#{
y
}" style="fill:#fff"/>)
end
defp substitute(1, row_num, col_num, %{shape: "circle", size: size} = svg_options) do
y = col_num * svg_options[:module_size]
x = row_num * svg_options[:module_size]
if (row_num <= 8 && col_num <= 8) || (row_num >= size - 9 && col_num <= 8) ||
(row_num <= 8 && col_num >= size - 9) do
~s(<rect width="#{svg_options[:module_size]}" height="#{svg_options[:module_size]}" x="#{x}" y="#{
y
}" style="fill:#{svg_options[:color]}"/>)
else
~s(<circle r="#{svg_options[:module_size] / 2.0}" cx="#{x + svg_options[:module_size] / 2.0}" cy="#{
y + svg_options[:module_size] / 2.0
}" style="fill:#{svg_options[:color]};"/>)
end
end
defp substitute(1, row_num, col_num, svg_options) do
y = col_num * svg_options[:module_size]
x = row_num * svg_options[:module_size]
~s(<rect width="#{svg_options[:module_size]}" height="#{svg_options[:module_size]}" x="#{x}" y="#{
y
}" style="fill:#{svg_options[:color]}"/>)
end
defp substitute(0, row_num, col_num, svg_options) do
y = col_num * svg_options[:module_size]
x = row_num * svg_options[:module_size]
~s(<rect width="#{svg_options[:module_size]}" height="#{svg_options[:module_size]}" x="#{x}" y="#{
y
}" style="fill:#fff"/>)
end
end
|
lib/eqrcode/svg.ex
| 0.882921
| 0.891717
|
svg.ex
|
starcoder
|
defmodule Matrix do
@moduledoc """
多次元配列を取り扱うヘルパーモジュールです。
## 2 x 3 行列の場合
iex> Matrix.new(2, 3)
[
[nil, nil],
[nil, nil],
[nil, nil]
]
## 行列をマップに変換する
iex> Matrix.new(3, 3)
...> |> Matrix.to_map()
%{
0 => %{ 0 => nil, 1 => nil, 2 => nil},
1 => %{ 0 => nil, 1 => nil, 2 => nil},
2 => %{ 0 => nil, 1 => nil, 2 => nil}
}
## 多次元マップに新しい要素を追加する
iex> Matrix.put(%{}, [1, 2], 5)
%{
1 => %{ 2 => 5 }
}
iex> %{1 => %{ 2 => 5 }}
...> |> Matrix.put([1, 3], 2)
%{
1 => %{
2 => 5 ,
3 => 2
}
}
## 多次元マップから要素を取得する
iex> %{ 1 => %{ 2 => 5 , 3 => 2 }}
...> |> Matrix.get(1)
%{ 2 => 5, 3 => 2}
iex> %{ 1 => %{ 2 => 5 , 3 => 2 }}
...> |> Matrix.get([1])
%{ 2 => 5, 3 => 2}
iex> %{ 1 => %{ 2 => 5 , 3 => 2 }}
...> |> Matrix.get([1, 2])
5
iex> %{ 1 => %{ 2 => 5 , 3 => 2 }}
...> |> Matrix.get([2, 4])
nil
"""
@type initializer(any, any) :: any
@spec new(non_neg_integer, non_neg_integer, initializer(any, any)) :: list
def new(x, y, initial_value_or_fun \\ nil)
def new(x, y, fun) when is_function(fun) do
for j <- 0..(y - 1) do
for i <- 0..(x - 1) do
fun.(i, j)
end
end
end
def new(x, y, initial_value) do
for _j <- 0..(y - 1) do
for _i <- 0..(x - 1) do
initial_value
end
end
end
@spec to_map(list) :: map()
def to_map(matrix) when is_list(matrix), do: do_to_map(matrix)
defp do_to_map(matrix, index \\ 0, map \\ %{})
defp do_to_map([], _index, map), do: map
defp do_to_map([h | t], index, map),
do: do_to_map(t, index + 1, Map.put(map, index, do_to_map(h)))
defp do_to_map(value, _index, _map), do: value
@spec put(map(), list, any) :: map()
def put(map, keys, value), do: do_map_put(value, keys, map)
defp do_map_put(value, keys, map)
defp do_map_put(value, [], _), do: value
defp do_map_put(value, [key | tail], nil), do: Map.put(%{}, key, do_map_put(value, tail, Map.get(%{}, key)))
defp do_map_put(value, [key | tail], map), do: Map.put(map, key, do_map_put(value, tail, Map.get(map, key)))
@spec get(map(), list | any) :: map() | any()
def get(map, key_or_keys)
def get(nil, _key), do: nil
def get(map, [key | []]), do: Map.get(map, key)
def get(map, [key | tail]), do: get(Map.get(map, key), tail)
def get(map, key), do: Map.get(map, key)
end
|
lib/algs/matrix.ex
| 0.581065
| 0.720786
|
matrix.ex
|
starcoder
|
defmodule Elixirfm.Artist do
@moduledoc """
Functions for Last.fm Artist endpoints.
todo:
artist.addTags
artist.removeTags
"""
@method "artist"
defp req(uri), do: Elixirfm.get_request(@method <> uri)
defp req(uri, args), do: Elixirfm.get_request(@method <> uri, args)
@doc """
Search an artist
"""
@spec search(String.t(), [page: integer(), limit: integer()]) :: Elixirfm.response
def search(query, args \\ [page: 1, limit: 30]),
do: req(".search&artist=#{query}", args)
@doc "Get artist info"
@spec info(String.t(), [lang: String.t(), username: String.t()]) :: Elixirfm.response
def info(query, args \\ [lang: "eng", username: ""]),
do: req(".getinfo&artist=#{query}", args)
@doc "Retrive list of Similar Artists"
@spec similar(String.t(), [limit: integer()]) :: Elixirfm.response
def similar(query, args \\ [limit: 10]),
do: req(".getsimilar&artist=#{query}", args)
@doc "Get Lastfm tags associated with the artist"
def tags(query, args \\ []), do: req(".getTags&artist=#{query}", args)
@doc """
Get Top albums for an artist, ordered by popularity
"""
@spec top_album(String.t(), [page: integer(), limit: integer()]) :: Elixirfm.response
def top_album(query, args \\ [page: 1, limit: 30]),
do: req(".gettopalbums&artist=#{query}", args)
@doc """
Gets top tags for an artist on Lastfm, ordered by popularity
"""
@spec top_tags(String.t()) :: Elixirfm.response
def top_tags(query), do: req(".gettoptags&artist=#{query}")
@doc """
Gets top tracks for an artist, ordered by popularity. (defaults to first 15)
"""
@spec top_tracks(String.t(), [page: integer(), limit: integer()]) :: Elixirfm.response
def top_tracks(query, args \\ [page: 1, limit: 15]) do
req(".gettoptracks&artist=#{query}", args)
end
@doc """
Use the last.fm corrections data to check whether the supplied artist has a correction to a canonical artist
"""
@spec correction(String.t()) :: Elixirfm.response
def correction(query), do: req(".getcorrection&artist=#{query}")
end
|
lib/elixirfm/artist.ex
| 0.743727
| 0.400017
|
artist.ex
|
starcoder
|
defmodule Cli.Parse do
@moduledoc """
Module used for parsing user input.
"""
require Logger
@day_minutes 24 * 60
@application_options [verbose: :boolean, config: :string]
@doc """
Extracts options from the supplied arguments that are not task/query related.
The supported options are:
- `:verbose` - for enabling extra logging
- `:config` - for supplying a custom config file
The function returns the remaining arguments without the above options.
"""
def extract_application_options(args) do
{options, _, _} = OptionParser.parse(args, strict: @application_options)
parsed_args = Enum.flat_map(options, fn {k, v} -> ["--#{Atom.to_string(k)}", v] end)
remaining_args = Enum.reject(args, fn arg -> Enum.member?(parsed_args, arg) end)
{options, remaining_args}
end
@expected_update_args [
task: :string,
start_date: :string,
start_time: :string,
duration: :string
]
@doc """
Generates a new `Api.TaskUpdate` object from the supplied arguments.
"""
def args_to_task_update(args) do
parsed =
case args do
[head | _] ->
cond do
String.starts_with?(head, "--") ->
parse_as_options(@expected_update_args, args)
String.contains?(head, "=") ->
parse_as_kv(@expected_update_args, args)
true ->
# positional arguments are not supported
[]
end
[] ->
[]
end
case parsed do
[_ | _] ->
task =
if parsed[:task] do
parse_task(parsed[:task])
else
{:ok, nil}
end
start_utc =
if parsed[:start_date] && parsed[:start_time] do
with {:ok, start_date} <- parse_date(parsed[:start_date]),
{:ok, time_type, start_time} <- parse_time(parsed[:start_time]),
{:ok, start_utc} <- from_local_time_zone(start_date, start_time, time_type) do
{:ok, start_utc}
end
else
{:ok, nil}
end
duration =
if parsed[:duration] do
case parse_duration(parsed[:duration]) do
{:ok, parsed_duration} when parsed_duration > 0 -> {:ok, parsed_duration}
{:ok, parsed_duration} -> {:error, "Task duration cannot be [#{parsed_duration}]"}
error -> error
end
else
{:ok, nil}
end
with {:ok, task} <- task,
{:ok, start_utc} <- start_utc,
{:ok, duration} <- duration do
if task || start_utc || duration do
{
:ok,
%Api.TaskUpdate{
task: task,
start: start_utc,
duration: duration
}
}
else
{:error, "No expected or valid arguments specified"}
end
end
[] ->
{:error, "No or unparsable arguments specified"}
end
end
@expected_query_args [
from: :string,
to: :string,
sort_by: :string,
order: :string
]
@doc """
Generates a new `Api.Query` object from the supplied arguments.
"""
def args_to_query(args) do
parsed =
case args do
[head | _] ->
cond do
String.starts_with?(head, "--") -> parse_as_options(@expected_query_args, args)
String.contains?(head, "=") -> parse_as_kv(@expected_query_args, args)
true -> parse_as_positional(@expected_query_args, args)
end
[] ->
# uses defaults
[]
end
with {:ok, from_date} <- parse_date(Keyword.get(parsed, :from, "today")),
{:ok, to_date} <- parse_date(Keyword.get(parsed, :to, "today")),
{:ok, from} <- NaiveDateTime.from_iso8601("#{from_date}T00:00:00"),
{:ok, to} <- NaiveDateTime.from_iso8601("#{to_date}T23:59:59"),
sort_by <- Keyword.get(parsed, :sort_by, "start"),
order <- Keyword.get(parsed, :order, "desc") do
{
:ok,
%Api.Query{
from: from,
to: to,
sort_by: sort_by,
order: order
}
}
end
end
@expected_task_args [
task: :string,
start_date: :string,
start_time: :string,
end_time: :string,
duration: :string
]
@doc """
Generates a new `Api.Task` object from the supplied arguments.
"""
def args_to_task(args) do
case args do
[head | _] ->
parsed =
cond do
String.starts_with?(head, "--") ->
parse_as_options(@expected_task_args, args)
String.contains?(head, "=") ->
parse_as_kv(@expected_task_args, args)
true ->
parsed = parse_as_positional(@expected_task_args, args)
case parse_duration(Keyword.get(parsed, :end_time, "")) do
{:ok, _} ->
parsed |> Keyword.delete(:end_time) |> Keyword.put(:duration, parsed[:end_time])
{:error, _} ->
parsed
end
end
with {:ok, task} <- parse_task(parsed[:task]),
{:ok, start_date} <- parse_date(Keyword.get(parsed, :start_date, "today")),
{:ok, time_type, start_time} <- parse_time(Keyword.get(parsed, :start_time, "now")),
{:ok, start_utc} <- from_local_time_zone(start_date, start_time, time_type),
{:ok, duration} <- duration_from_parsed_args(parsed, start_utc, start_date) do
{
:ok,
%Api.Task{
id: UUID.uuid4(),
task: task,
start: start_utc,
duration: duration
}
}
end
[] ->
{:error, "No arguments specified"}
end
end
@doc """
Calculates a task's duration based on the supplied parsed arguments.
A duration of 0 is considered to be an error.
"""
def duration_from_parsed_args(parsed_args, start_utc, start_date) do
cond do
parsed_args[:duration] ->
with {:ok, duration} <- parse_duration(parsed_args[:duration]) do
if duration > 0 do
{:ok, duration}
else
{:error, "Task duration cannot be [#{duration}]"}
end
end
parsed_args[:end_time] ->
with {:ok, end_time_type, end_time} <- parse_time(parsed_args[:end_time]),
{:ok, end_utc} <- from_local_time_zone(start_date, end_time, end_time_type) do
case NaiveDateTime.compare(
NaiveDateTime.truncate(end_utc, :second),
NaiveDateTime.truncate(start_utc, :second)
) do
:lt ->
{:ok, div(NaiveDateTime.diff(end_utc, start_utc, :second), 60) + @day_minutes}
:gt ->
{:ok, div(NaiveDateTime.diff(end_utc, start_utc, :second), 60)}
:eq ->
{:error, "The specified start and end times are the same"}
end
end
true ->
{:error, "No task duration specified"}
end
end
@doc """
Converts the supplied date and time strings to UTC, if needed.
> For local-to-utc conversions, daylight savings time (DST) can result in two or no timestamps being generated.
> If the supplied date/time cannot be represented due to DST, an error is returned.
> If the supplied date/time results in two timestamps, only the non-DST timestamp is returned.
> In either case, a warning will be emitted.
"""
def from_local_time_zone(date, time, time_type) do
case NaiveDateTime.from_iso8601("#{date}T#{time}") do
{:ok, time} ->
case time_type do
:utc ->
{:ok, time}
:local ->
times =
time
|> NaiveDateTime.to_erl()
|> :calendar.local_time_to_universal_time_dst()
|> Enum.map(fn dt -> NaiveDateTime.from_erl!(dt) end)
case times do
[] ->
message = "Due to switching to DST, no valid timestamp for [#{time}] exists"
Logger.warn(fn -> message end)
{:error, message}
[actual_time] ->
{:ok, actual_time}
[actual_time_dst, actual_time] ->
Logger.warn(
"Due to switching from DST, two timestamps for [#{time}] exist; DST timestamp [#{
actual_time_dst
}] is ignored"
)
{:ok, actual_time}
end
end
error ->
error
end
end
@doc """
Parses the supplied arguments as `--key` `value` options, based on the expected arguments list.
For example: `["--some-key", "some-value"]` will be parsed to `[:some_key, "some-value"]` if `:some_key` is expected.
"""
def parse_as_options(expected_args, actual_args) do
{parsed, _, _} = OptionParser.parse(actual_args, strict: expected_args)
parsed
end
@doc """
Parses the supplied arguments as positional options, based on the expected arguments list.
For example: `["some-value"]` will be parsed to `[:some_key, "some-value"]` if `:some_key` is expected.
"""
def parse_as_positional(expected_args, actual_args) do
Enum.zip(
Enum.map(expected_args, fn {arg, _} -> arg end),
actual_args
)
end
@doc """
Parses the supplied arguments as `key=value` options, based on the expected arguments list.
For example: `["some-key=some-value"]` will be parsed to `[:some_key, "some-value"]` if `:some_key` is expected.
"""
def parse_as_kv(expected_args, actual_args) do
parse_as_options(
expected_args,
Enum.flat_map(
actual_args,
fn arg -> String.split("--#{arg}", "=") end
)
)
end
@doc """
Validates the supplied task name.
"""
def parse_task(raw_task) do
cond do
raw_task && String.length(raw_task) > 0 -> {:ok, raw_task}
true -> {:error, "No task specified"}
end
end
@doc """
Parses the supplied date string into a `Date` object.
The supported formats are:
- `today` - gets the current day
- `today+XXd` - gets the current day and adds XX days (for example, today+3d)
- `today-XXd` - gets the current day and subtracts XX days (for example, today-3d)
- `YYYY-MM-DD` - sets the date explicitly (for example, 2015-12-21)
"""
def parse_date(raw_date) do
case Regex.run(~r/^(today)([-+])(\d+)([d])$|^today$/, raw_date) do
["today"] ->
{:ok, Date.utc_today()}
[_, "today", "+", days, "d"] ->
{:ok, Date.add(Date.utc_today(), String.to_integer(days))}
[_, "today", "-", days, "d"] ->
{:ok, Date.add(Date.utc_today(), -String.to_integer(days))}
_ ->
case Regex.run(~r/^\d{4}-\d{2}-\d{2}/, raw_date) do
[_ | _] ->
Date.from_iso8601(raw_date)
_ ->
{:error, "Invalid date specified: [#{raw_date}]"}
end
end
end
@doc """
Parses the supplied time string into a `Time` object.
The supported formats are:
- `now` - gets current time
- `now+XXm` - gets the current time and adds XX minutes (for example, now+45m)
- `now-XXm` - gets the current time and subtracts XX minutes (for example, now-45m)
- `now+XXh` - gets the current time and adds XX hours (for example, now+1h)
- `now-XXh` - gets the current time and subtracts XX hours (for example, now-1h)
- `HH:mm` - sets the time explicitly (for example, 23:45)
- `HH:mm:ss` - sets the time explicitly (with seconds; for example, 23:45:59)
"""
def parse_time(raw_time) do
case Regex.run(~r/^(now)([-+])(\d+)([mh])$|^now$/, raw_time) do
["now"] ->
{:ok, :utc, Time.utc_now()}
[_, "now", "+", minutes, "m"] ->
{:ok, :utc, Time.add(Time.utc_now(), String.to_integer(minutes) * 60, :second)}
[_, "now", "-", minutes, "m"] ->
{:ok, :utc, Time.add(Time.utc_now(), -String.to_integer(minutes) * 60, :second)}
[_, "now", "+", hours, "h"] ->
{:ok, :utc, Time.add(Time.utc_now(), String.to_integer(hours) * 3600, :second)}
[_, "now", "-", hours, "h"] ->
{:ok, :utc, Time.add(Time.utc_now(), -String.to_integer(hours) * 3600, :second)}
_ ->
parse_result =
case Regex.run(~r/^\d{2}:\d{2}(:\d{2})?/, raw_time) do
[time, _] ->
Time.from_iso8601(time)
[time] ->
Time.from_iso8601("#{time}:00")
_ ->
{:error, "Invalid time specified: [#{raw_time}]"}
end
with {:ok, time} <- parse_result do
{:ok, :local, time}
end
end
end
@doc """
Parses the supplied duration string into minutes.
The supported formats are:
- `XXm` - XX minutes (for example, 95m == 95 minuets)
- `XXh` - XX hours (for example, 11h == 660 minutes)
"""
def parse_duration(raw_duration) do
case Regex.run(~r/^(\d+)([mh])$/, raw_duration) do
[_, minutes, "m"] ->
{:ok, String.to_integer(minutes)}
[_, hours, "h"] ->
{:ok, String.to_integer(hours) * 60}
_ ->
{:error, "Invalid duration specified: [#{raw_duration}]"}
end
end
end
|
lib/cli/parse.ex
| 0.819821
| 0.530845
|
parse.ex
|
starcoder
|
defmodule Tempus.Slot do
@moduledoc """
Declares a timeslot and exports functions to check whether the given date
and/or datetime is covered by this slot or not.
This module probably should not be called directly.
"""
alias __MODULE__
@typedoc "A timeslot to be used in `Tempus`"
@type t :: %__MODULE__{
from: nil | DateTime.t(),
to: nil | DateTime.t()
}
@typedoc "The origin used in comparisons and calculations"
@type origin :: Slot.t() | Date.t() | DateTime.t() | Time.t() | nil
defstruct [:from, :to]
@spec valid?(slot :: Slot.t()) :: boolean()
@doc """
Checks whether the `Slot` is valid (to > from) or not.
## Examples
iex> slot = %Tempus.Slot{from: ~U|2015-09-30 00:00:00Z|, to: ~U|2015-10-01 01:00:00Z|}
iex> Tempus.Slot.valid?(slot)
true
iex> Tempus.Slot.valid?(%Tempus.Slot{from: slot.to, to: slot.from})
false
"""
def valid?(%Slot{from: nil, to: %DateTime{}}), do: true
def valid?(%Slot{from: %DateTime{}, to: nil}), do: true
def valid?(%Slot{from: %DateTime{} = from, to: %DateTime{} = to}),
do: DateTime.compare(from, to) != :gt
def valid?(_), do: false
@spec cover?(slot :: Slot.t(), dt :: origin(), strict? :: boolean()) ::
boolean()
@doc """
Checks whether to `Slot` covers the data/datetime passed as a second argument.
## Examples
iex> dt_between = ~U|2015-09-30 01:00:00Z|
...> dt_from = ~U|2015-09-30 00:00:00Z|
...> dt_to = ~U|2015-10-01 01:00:00Z|
...> d_from = Date.from_iso8601!("2015-09-30")
...> d_to = Date.from_iso8601!("2015-10-01")
iex> slot = %Tempus.Slot{from: dt_from, to: dt_to}
iex> Tempus.Slot.cover?(slot, dt_between)
true
iex> Tempus.Slot.cover?(slot, dt_to)
true
iex> Tempus.Slot.cover?(slot, dt_to, true)
false
iex> Tempus.Slot.cover?(slot, d_from)
true
iex> Tempus.Slot.cover?(slot, d_from, true)
false
iex> Tempus.Slot.cover?(slot, d_to)
false
"""
def cover?(slot, dt, strict? \\ false)
def cover?(%Slot{from: nil, to: %DateTime{} = to}, %DateTime{} = dt, true),
do: DateTime.compare(to, dt) == :gt
def cover?(%Slot{from: %DateTime{} = from, to: nil}, %DateTime{} = dt, true),
do: DateTime.compare(from, dt) == :lt
def cover?(%Slot{from: %DateTime{} = from, to: %DateTime{} = to}, %DateTime{} = dt, true),
do: DateTime.compare(from, dt) == :lt and DateTime.compare(to, dt) == :gt
def cover?(%Slot{from: from, to: to} = slot, %DateTime{} = dt, false),
do:
cover?(slot, dt, true) or
(not is_nil(from) and DateTime.compare(from, dt) == :eq) or
(not is_nil(to) and DateTime.compare(to, dt) == :eq)
def cover?(%Slot{} = slot, %Date{} = dt, strict?),
do: cover?(slot, wrap(dt, slot.from || slot.to), strict?)
def cover?(%Slot{} = slot, %Time{} = dt, strict?),
do: cover?(slot, wrap(dt, slot.from || slot.to), strict?)
def cover?(%Slot{from: nil, to: %DateTime{}}, %Slot{from: nil, to: %DateTime{}}, true),
do: false
def cover?(
%Slot{from: nil, to: %DateTime{} = s_to},
%Slot{from: nil, to: %DateTime{} = dt_to},
false
),
do: DateTime.compare(s_to, dt_to) in [:lt, :eq]
def cover?(%Slot{from: %DateTime{}, to: nil}, %Slot{from: %DateTime{}, to: nil}, true),
do: false
def cover?(
%Slot{from: %DateTime{} = s_from, to: nil},
%Slot{from: %DateTime{} = dt_from, to: nil},
false
),
do: DateTime.compare(s_from, dt_from) in [:gt, :eq]
def cover?(%Slot{} = slot, %Slot{from: from, to: to}, strict?),
do: cover?(slot, from, strict?) and cover?(slot, to, strict?)
@spec disjoint?(s1 :: origin(), s2 :: origin()) :: boolean()
@doc """
Returns `true` if two slots are disjoined, `false` otherwise.
## Examples
iex> slot = %Tempus.Slot{from: ~U|2015-09-01 00:00:00Z|, to: ~U|2015-10-01 00:00:00Z|}
iex> inner = %Tempus.Slot{from: ~U|2015-09-01 00:00:00Z|, to: ~U|2015-09-01 01:00:00Z|}
iex> Tempus.Slot.disjoint?(slot, inner)
false
iex> inner = %Tempus.Slot{from: ~U|2015-09-01 00:00:00Z|, to: ~U|2015-10-01 01:00:00Z|}
iex> Tempus.Slot.disjoint?(slot, inner)
false
iex> outer = %Tempus.Slot{from: ~U|2015-10-01 00:00:01Z|, to: ~U|2015-10-01 01:00:00Z|}
iex> Tempus.Slot.disjoint?(slot, outer)
true
"""
def disjoint?(s1, s2) do
[%Slot{} = s1, %Slot{} = s2] = Enum.map([s1, s2], &wrap/1)
compare(s1, s2) in [:lt, :gt]
end
@spec intersect(slots :: Enum.t()) :: Slot.t() | nil
@doc """
Intersects slots to the minimal covered timeslice.
### Example
iex> Tempus.Slot.intersect([Tempus.Slot.wrap(~D|2020-09-30|),
...> %Tempus.Slot{from: ~U|2020-09-30 23:00:00Z|, to: ~U|2020-10-02 00:00:00Z|}])
#Slot<[from: ~U[2020-09-30 23:00:00Z], to: ~U[2020-09-30 23:59:59.999999Z]]>
"""
def intersect(slots) do
Enum.reduce(slots, fn
_slot, nil ->
nil
slot, acc ->
slot = wrap(slot)
if disjoint?(acc, slot),
do: nil,
else: %Slot{from: intersect_from(slot, acc), to: intersect_to(slot, acc)}
end)
end
@spec intersect_from(Slot.t(), Slot.t()) :: DateTime.t() | nil
defp intersect_from(%Slot{from: nil}, %Slot{from: nil}), do: nil
defp intersect_from(%Slot{from: f1}, %Slot{from: nil}), do: f1
defp intersect_from(%Slot{from: nil}, %Slot{from: f2}), do: f2
defp intersect_from(%Slot{from: f1}, %Slot{from: f2}), do: Enum.max([f1, f2], DateTime)
@spec intersect_to(Slot.t(), Slot.t()) :: DateTime.t() | nil
defp intersect_to(%Slot{to: nil}, %Slot{to: nil}), do: nil
defp intersect_to(%Slot{to: t1}, %Slot{to: nil}), do: t1
defp intersect_to(%Slot{to: nil}, %Slot{to: t2}), do: t2
defp intersect_to(%Slot{to: t1}, %Slot{to: t2}), do: Enum.min([t1, t2], DateTime)
@spec join(slots :: Enum.t()) :: Slot.t()
@doc """
Joins slots to the maximal covered timeslice.
### Example
iex> Tempus.Slot.join([Tempus.Slot.wrap(~D|2020-09-30|), Tempus.Slot.wrap(~D|2020-10-02|)])
#Slot<[from: ~U[2020-09-30 00:00:00.000000Z], to: ~U[2020-10-02 23:59:59.999999Z]]>
iex> Tempus.Slot.join([~D|2020-09-30|, ~D|2020-10-02|])
#Slot<[from: ~U[2020-09-30 00:00:00.000000Z], to: ~U[2020-10-02 23:59:59.999999Z]]>
"""
def join([]), do: %Slot{from: nil, to: nil}
def join([slot | slots]) do
Enum.reduce(slots, wrap(slot), fn slot, acc ->
slot = wrap(slot)
from =
if DateTime.compare(slot.from, acc.from) == :lt,
do: slot.from,
else: acc.from
to =
if DateTime.compare(slot.to, acc.to) == :gt,
do: slot.to,
else: acc.to
%Slot{from: from, to: to}
end)
end
@spec duration(slot :: Slot.t(), unit :: System.time_unit()) :: non_neg_integer() | :infinity
@doc """
Calculates the duration of a slot in units given as a second parameter
(default: `:second`.)
### Example
iex> ~D|2020-09-03| |> Tempus.Slot.wrap() |> Tempus.Slot.duration()
86400
"""
def duration(slot, unit \\ :second)
def duration(%Slot{from: nil, to: %DateTime{}}, _), do: :infinity
def duration(%Slot{from: %DateTime{}, to: nil}, _), do: :infinity
def duration(%Slot{from: %DateTime{} = from, to: %DateTime{} = to}, unit),
do: to |> DateTime.add(1, unit) |> DateTime.diff(from, unit)
@spec compare(s1 :: t(), s2 :: t(), strict :: boolean()) :: :lt | :gt | :eq | :joint
@doc """
Compares two slot structs.
Returns `:gt` if first slot is strictly later than the second and `:lt` for vice versa.
**NB** `:eq` is returned not only if slots are equal, but also when they are overlapped.
Might be used in `Enum.sort/2`.
"""
def compare(s1, s2, strict \\ false)
def compare(%Slot{from: nil, to: %DateTime{}}, %Slot{from: nil, to: %DateTime{}}, false),
do: :eq
def compare(
%Slot{from: nil, to: %DateTime{} = t1},
%Slot{from: nil, to: %DateTime{} = t2},
true
),
do: if(DateTime.compare(t1, t2) == :eq, do: :eq, else: :joint)
def compare(%Slot{from: %DateTime{}, to: nil}, %Slot{from: %DateTime{}, to: nil}, false),
do: :eq
def compare(
%Slot{from: %DateTime{} = f1, to: nil},
%Slot{from: %DateTime{} = f2, to: nil},
true
),
do: if(DateTime.compare(f1, f2) == :eq, do: :eq, else: :joint)
def compare(%Slot{from: f1, to: t1}, %Slot{from: f2, to: t2}, strict) do
f2l = t1 && f2 && DateTime.compare(t1, f2)
l2f = f1 && t2 && DateTime.compare(f1, t2)
case {strict, f2l, l2f} do
{_, :lt, _} ->
:lt
{_, _, :gt} ->
:gt
{false, _, _} ->
:eq
{true, nil, _} ->
:joint
{true, _, nil} ->
:joint
{true, _, _} ->
if DateTime.compare(f1, f2) == :eq && DateTime.compare(t1, t2) == :eq,
do: :eq,
else: :joint
end
end
@spec strict_compare(s1 :: Slot.t(), s2 :: Slot.t()) :: :eq | :lt | :gt | :joint
@doc """
Compares two slot structs. The same as `compare/2`, but returns `:joint` if
the slots are overlapped.
"""
def strict_compare(%Slot{} = s1, %Slot{} = s2),
do: compare(s1, s2, true)
@spec wrap(origin(), DateTime.t()) :: Slot.t()
@doc """
Wraps the argument into a slot. For `DateTime` it’d be a single microsecond.
For a `Date`, it would be the whole day, starting at `00:00:00.000000` and
ending at `23:59:59:999999`.
## Examples
iex> Tempus.Slot.wrap(~D|2020-08-06|)
#Slot<[from: ~U[2020-08-06 00:00:00.000000Z], to: ~U[2020-08-06 23:59:59.999999Z]]>
"""
def wrap(moment \\ nil, origin \\ DateTime.utc_now())
def wrap(nil, origin), do: wrap(DateTime.utc_now(), origin)
def wrap(%Slot{} = slot, _), do: slot
def wrap(%DateTime{} = dt, _), do: %Slot{from: dt, to: dt}
def wrap(
%Time{
calendar: calendar,
hour: hour,
microsecond: microsecond,
minute: minute,
second: second
},
origin
) do
wrap(%DateTime{
calendar: calendar,
day: origin.day,
hour: hour,
microsecond: microsecond,
minute: minute,
month: origin.month,
second: second,
std_offset: origin.std_offset,
time_zone: origin.time_zone,
utc_offset: origin.utc_offset,
year: origin.year,
zone_abbr: origin.zone_abbr
})
end
def wrap(%Date{calendar: calendar, day: day, month: month, year: year}, origin) do
%Slot{
from: %DateTime{
calendar: calendar,
day: day,
hour: 0,
microsecond: {0, 6},
minute: 0,
month: month,
second: 0,
std_offset: origin.std_offset,
time_zone: origin.time_zone,
utc_offset: origin.utc_offset,
year: year,
zone_abbr: origin.zone_abbr
},
to: %DateTime{
calendar: calendar,
day: day,
hour: 23,
microsecond: {999_999, 6},
minute: 59,
month: month,
second: 59,
std_offset: origin.std_offset,
time_zone: origin.time_zone,
utc_offset: origin.utc_offset,
year: year,
zone_abbr: origin.zone_abbr
}
}
end
@doc false
@spec shift(
slot :: t(),
action :: [{:to, integer()} | {:from, integer} | {:unit, System.time_unit()}]
) :: Slot.t()
def shift(%Slot{from: from, to: to}, action \\ []) do
unit = Keyword.get(action, :unit, :microsecond)
from = do_shift(from, Keyword.get(action, :from, 0), unit)
to = do_shift(to, Keyword.get(action, :to, 0), unit)
%Slot{from: from, to: to}
end
@spec do_shift(maybe_datetime, integer(), System.time_unit()) :: maybe_datetime
when maybe_datetime: nil | DateTime.t()
defp do_shift(nil, _, _), do: nil
defp do_shift(%DateTime{microsecond: {_, 0}} = dt, count, unit),
do:
%DateTime{dt | microsecond: {0, 6}}
|> DateTime.truncate(unit)
|> DateTime.add(count, unit)
defp do_shift(%DateTime{microsecond: {value, n}} = dt, count, unit),
do:
%DateTime{dt | microsecond: {:erlang.rem(value, round(:math.pow(10, n))), Enum.max([6, 6])}}
|> DateTime.truncate(unit)
|> DateTime.add(count, unit)
@spec shift_tz(
slot :: Slot.t(),
tz :: Calendar.time_zone(),
tz_db :: Calendar.time_zone_database()
) :: Slot.t()
@doc """
Shifts both `from` and `to` values to `UTC` zone.
### Examples
```elixir
slot = %Tempus.Slot{
from: DateTime.from_naive!(~N|2018-01-05 21:00:00|, "America/New_York"),
to: DateTime.from_naive!(~N|2018-01-08 08:59:59|, "Australia/Sydney")
}
#⇒ #Slot<[from: ~U[2018-01-06 02:00:00Z], to: ~U[2018-01-07 21:59:59Z]]>
```
"""
def shift_tz(
%Slot{from: from, to: to},
tz \\ "Etc/UTC",
tz_db \\ Calendar.get_time_zone_database()
) do
%Slot{from: DateTime.shift_zone!(from, tz, tz_db), to: DateTime.shift_zone!(to, tz, tz_db)}
end
defimpl Inspect do
import Inspect.Algebra
@fancy_inspect Application.compile_env(:tempus, :fancy_inspect, false)
def inspect(%Tempus.Slot{from: from, to: to}, %Inspect.Opts{custom_options: [_ | _]} = opts) do
opts.custom_options
|> Keyword.get(:fancy, @fancy_inspect)
|> case do
truthy when truthy in [:emoji, true] ->
value =
[from, to]
|> Enum.map(&DateTime.to_iso8601/1)
|> Enum.join(" → ")
tag =
case truthy do
:emoji -> "⌚"
true -> "#Slot"
end
concat([tag, "<", value, ">"])
_ ->
concat(["#Slot<", to_doc([from: from, to: to], opts), ">"])
end
end
def inspect(%Tempus.Slot{from: from, to: to}, opts) do
concat(["#Slot<", to_doc([from: from, to: to], opts), ">"])
end
end
end
|
lib/slot.ex
| 0.914901
| 0.565359
|
slot.ex
|
starcoder
|
defmodule ExWire.Sync do
@moduledoc """
This is the heart of our syncing logic. Once we've connected to a number
of peers via `ExWire.PeerSup`, we begin to ask for new blocks from those
peers. As we receive blocks, we add them to our `ExWire.Struct.BlockQueue`.
If the blocks are confirmed by enough peers, then we verify the block and
add it to our block tree.
Note: we do not currently store the block tree, and thus we need to build
it from genesis each time.
"""
use GenServer
require Logger
alias Block.Header
alias ExWire.Struct.BlockQueue
alias ExWire.Packet.BlockHeaders
alias ExWire.Packet.BlockBodies
alias ExWire.PeerSupervisor
@doc """
Starts a Sync process.
"""
def start_link(db) do
GenServer.start_link(__MODULE__, db, name: ExWire.Sync)
end
@doc """
Once we start a sync server, we'll wait for active peers.
TODO: We do not always want to sync from the genesis.
We will need to add some "restore state" logic.
"""
def init(db) do
block_tree = Blockchain.Blocktree.new_tree()
{:ok, %{
block_queue: %BlockQueue{},
block_tree: block_tree,
chain: ExWire.Config.chain(),
db: db,
last_requested_block: request_next_block(block_tree)
}}
end
@doc """
When were receive a block header, we'll add it to our block queue. When we receive the corresponding block body,
we'll add that as well.
"""
def handle_info({:packet, %BlockHeaders{}=block_headers, peer}, state=%{block_queue: block_queue, block_tree: block_tree, chain: chain, db: db, last_requested_block: last_requested_block}) do
{next_block_queue, next_block_tree} = Enum.reduce(block_headers.headers, {block_queue, block_tree}, fn header, {block_queue, block_tree} ->
header_hash = header |> Header.hash
{block_queue, block_tree, should_request_block} = BlockQueue.add_header_to_block_queue(block_queue, block_tree, header, header_hash, peer.remote_id, chain, db)
if should_request_block do
Logger.debug("[Sync] Requesting block body #{header.number}")
# TODO: Bulk up these requests?
PeerSupervisor.send_packet(PeerSupervisor, %ExWire.Packet.GetBlockBodies{hashes: [header_hash]})
end
{block_queue, block_tree}
end)
# We can make this better, but it's basically "if we change, request another block"
new_last_requested_block = if next_block_tree.parent_map != block_tree.parent_map do
request_next_block(next_block_tree)
else
last_requested_block
end
{:noreply,
state
|> Map.put(:block_queue, next_block_queue)
|> Map.put(:block_tree, next_block_tree)
|> Map.put(:last_requested_block, new_last_requested_block)
}
end
def handle_info({:packet, %BlockBodies{}=block_bodies, _peer}, state=%{block_queue: block_queue, block_tree: block_tree, chain: chain, db: db, last_requested_block: last_requested_block}) do
{next_block_queue, next_block_tree} = Enum.reduce(block_bodies.blocks, {block_queue, block_tree}, fn block_body, {block_queue, block_tree} ->
BlockQueue.add_block_struct_to_block_queue(block_queue, block_tree, block_body, chain, db)
end)
# We can make this better, but it's basically "if we change, request another block"
new_last_requested_block = if next_block_tree.parent_map != block_tree.parent_map do
request_next_block(next_block_tree)
else
last_requested_block
end
{:noreply,
state
|> Map.put(:block_queue, next_block_queue)
|> Map.put(:block_tree, next_block_tree)
|> Map.put(:last_requested_block, new_last_requested_block)
}
end
def handle_info({:packet, packet, peer}, state) do
Logger.debug("[Sync] Ignoring packet #{packet.__struct__} from #{peer}")
{:noreply, state}
end
def request_next_block(block_tree) do
next_number = case Blockchain.Blocktree.get_canonical_block(block_tree) do
:root -> 0
%Blockchain.Block{header: %Block.Header{number: number}} -> number + 1
end
Logger.debug("[Sync] Requesting block #{next_number}")
ExWire.PeerSupervisor.send_packet(ExWire.PeerSupervisor, %ExWire.Packet.GetBlockHeaders{block_identifier: next_number, max_headers: 1, skip: 0, reverse: false})
next_number
end
end
|
apps/ex_wire/lib/ex_wire/sync.ex
| 0.592902
| 0.47524
|
sync.ex
|
starcoder
|
defmodule Cashtrail.Entities.Tenants do
@moduledoc """
Create or drop tenants for one Entity.
Every created Entity should be a tenant and have its data. Tenants are schemas
in the Postgres having the data related to the Entity.
"""
alias Cashtrail.Entities
@doc """
Create a tenant for the given Entity.
## Expected arguments
* A `%Cashtrail.Entities.Entity{}` struct of the tenant that will be created.
## Returns
* `{:ok, entity}` - If the tenant creation was successful performed.
* `{:error, reason}` - In case of errors.
See `Triplex.create/2` docs for more information.
"""
@spec create(%Cashtrail.Entities.Entity{id: Ecto.UUID.t()}) ::
{:error, String.t()} | {:ok, Ecto.UUID.t()}
def create(%Entities.Entity{} = entity) do
entity
|> Triplex.to_prefix()
|> Triplex.create()
end
@doc """
Drop a tenant for the given Entity.
## Expected arguments
* A `%Cashtrail.Entities.Entity{}` struct of the tenant that will be dropped.
## Returns
* `{:ok, entity}` - If the tenant creation was successful performed.
* `{:error, reason}` - In case of errors.
See `Triplex.create/2` docs for more information.
"""
@spec drop(%Cashtrail.Entities.Entity{id: Ecto.UUID.t()}) ::
{:error, String.t()} | {:ok, Ecto.UUID.t()}
def drop(%Entities.Entity{} = entity) do
entity
|> Triplex.to_prefix()
|> Triplex.drop()
end
@doc """
Return the prefix from Entity.
## Expected arguments
* A `%Cashtrail.Entities.Entity{}` struct of the tenant that want to get the prefix.
See `Triplex.to_prefix/1` docs for more information.
"""
@spec to_prefix(Cashtrail.Entities.Entity.t()) :: String.t()
def to_prefix(%Entities.Entity{} = entity) do
Triplex.to_prefix(entity)
end
@doc """
Return the given `Ecto.Queryable` with the prefix configured.
## Expected arguments
* queryable - The `Ecto.Queryable` that the the prefix will be configured.
* A `%Cashtrail.Entities.Entity{}` struct of the tenant that want to configure the prefix.
See `Triplex.to_prefix/1` docs for more information.
"""
@spec put_prefix(Ecto.Queryable.t(), Cashtrail.Entities.Entity.t()) :: Ecto.Query.t()
def put_prefix(queryable, %Entities.Entity{} = entity) do
queryable
|> Ecto.Queryable.to_query()
|> Map.put(:prefix, to_prefix(entity))
end
end
|
apps/cashtrail/lib/cashtrail/entities/tenants.ex
| 0.864839
| 0.58433
|
tenants.ex
|
starcoder
|
defmodule Bodyguard do
@moduledoc """
Authorize actions at the boundary of a context
Please see the [README](readme.html).
"""
@type opts :: keyword
@doc """
Authorize a user's action.
Simply converts the `opts` to a `params` map and defers to the
`c:Bodyguard.Policy.authorize/3` callback on the specified `policy`.
Returns `:ok` on success, and `{:error, reason}` on failure.
"""
@spec permit(policy :: module, user :: any, action :: atom, opts :: opts) :: Bodyguard.Policy.auth_result
def permit(policy, action, user, opts \\ []) do
params = Enum.into(opts, %{})
apply(policy, :authorize, [action, user, params])
end
@doc """
The same as `permit/4`, but raises `Bodyguard.NotAuthorizedError` on
authorization failure.
Returns `:ok` on success.
## Options
* `error_message` – a string to describe the error (default "not authorized")
* `error_status` – the HTTP status code to raise with the error (default 403)
The remaining `opts` are converted into a `params` map and passed to the
`c:Bodyguard.Policy.authorize/3` callback.
"""
@spec permit!(policy :: module, user :: any, action :: atom, opts :: opts) :: :ok
def permit!(policy, action, user, opts \\ []) do
opts = Enum.into(opts, %{})
{error_message, opts} = Map.pop(opts, :error_message, "not authorized")
{error_status, opts} = Map.pop(opts, :error_status, 403)
case permit(policy, action, user, opts) do
:ok -> :ok
error -> raise Bodyguard.NotAuthorizedError,
message: error_message, status: error_status, reason: error
end
end
@doc """
The same as `permit/4`, but returns a boolean.
"""
@spec permit?(policy :: module, user :: any, action :: atom, opts :: opts) :: boolean
def permit?(policy, action, user, opts \\ []) do
case permit(policy, action, user, opts) do
:ok -> true
_ -> false
end
end
@doc """
Filter a query down to user-accessible items.
The `query` is introspected by Bodyguard in an attempt to automatically
determine the schema type. To succeed, `query` must be an atom (schema
module name), an `Ecto.Query`, or a list of structs.
This function exists primarily as a helper to `import` into a context and
gain access to scoping for all schemas.
defmodule MyApp.Blog do
import Bodyguard
def list_user_posts(user) do
Blog.Post
|> scope(user) # <-- defers to MyApp.Blog.Post.scope/3
|> where(draft: false)
|> Repo.all
end
end
#### Options
* `schema` - if the schema of the `query` cannot be determined, you must
manually specify the schema here
The remaining `opts` are converted to a `params` map and passed to the
`c:Bodyguard.Schema.scope/3` callback on that schema.
"""
@spec scope(query :: any, user :: any, opts :: keyword) :: any
def scope(query, user, opts \\ []) do
params = Enum.into(opts, %{})
{schema, params} = Map.pop(params, :schema, resolve_schema(query))
apply(schema, :scope, [query, user, params])
end
# Private
# Ecto query (this feels dirty...)
defp resolve_schema(%{__struct__: Ecto.Query, from: {_source, schema}})
when is_atom(schema) and not is_nil(schema), do: schema
# List of structs
defp resolve_schema([%{__struct__: schema} | _rest]), do: schema
# Schema module itself
defp resolve_schema(schema) when is_atom(schema), do: schema
# Unable to determine
defp resolve_schema(unknown) do
raise ArgumentError, "Cannot automatically determine the schema of
#{inspect(unknown)} - specify the :schema option"
end
end
|
lib/bodyguard.ex
| 0.861931
| 0.60013
|
bodyguard.ex
|
starcoder
|
defmodule PasswordlessAuth do
@moduledoc """
PasswordlessAuth provides functionality for generating numeric codes that
can be used for verifying a user's ownership of a phone number, email address
or any other identifying address.
It is designed to be used in a verification system, such as a passwordless authentication
flow or as part of multi-factor authentication (MFA).
"""
use Application
alias PasswordlessAuth.{GarbageCollector, VerificationCode, Store}
@default_verification_code_ttl 300
@default_num_attempts_before_timeout 5
@default_rate_limit_timeout_length 60
@type verification_failed_reason() ::
:attempt_blocked | :code_expired | :does_not_exist | :incorrect_code
@doc false
def start(_type, _args) do
children = [
GarbageCollector,
Store
]
opts = [strategy: :one_for_one, name: PasswordlessAuth.Supervisor]
Supervisor.start_link(children, opts)
end
@doc """
Generates a verification code for the given recipient. The code is a string of numbers that is `code_length` characters long (defaults to 6).
The verification code is valid for the number of seconds given to the
`verification_code_ttl` config option (defaults to 300)
Arguments:
- `recipient`: A reference to the recipient of the code. This is used for verifying the code with `verify_code/2`
- `code_length`: The length of the code. Defaults to 6.
Returns the code.
"""
@spec generate_code(String.t(), integer()) ::
String.t()
def generate_code(recipient, code_length \\ 6) do
code = VerificationCode.generate_code(code_length)
ttl =
Application.get_env(:passwordless_auth, :verification_code_ttl) ||
@default_verification_code_ttl
expires = NaiveDateTime.utc_now() |> NaiveDateTime.add(ttl)
Agent.update(
Store,
&Map.put(&1, recipient, %VerificationCode{
code: code,
expires: expires
})
)
code
end
@doc """
Verifies that a the given `recipient` has the
given `attempt_code` stored in state and that
the code hasn't expired.
Returns `:ok` or `{:error, :reason}`.
## Examples
iex> PasswordlessAuth.verify_code("+447123456789", "123456")
{:error, :does_not_exist}
"""
@spec verify_code(String.t(), String.t()) :: :ok | {:error, verification_failed_reason()}
def verify_code(recipient, attempt_code) do
state = Agent.get(Store, fn state -> state end)
with :ok <- check_code_exists(state, recipient),
verification_code <- Map.get(state, recipient),
:ok <- check_verification_code_not_expired(verification_code),
:ok <- check_attempt_is_allowed(verification_code),
:ok <- check_attempt_code(verification_code, attempt_code) do
reset_attempts(recipient)
:ok
else
{:error, :incorrect_code} = error ->
increment_or_block_attempts(recipient)
error
{:error, _reason} = error ->
error
end
end
@doc """
Removes a code from state based on the given `recipient`
Returns `{:ok, %VerificationCode{...}}` or `{:error, :reason}`.
"""
@spec remove_code(String.t()) :: {:ok, VerificationCode.t()} | {:error, :does_not_exist}
def remove_code(recipient) do
state = Agent.get(Store, fn state -> state end)
with :ok <- check_code_exists(state, recipient) do
code = Agent.get(Store, &Map.get(&1, recipient))
Agent.update(Store, &Map.delete(&1, recipient))
{:ok, code}
end
end
@spec check_code_exists(map(), String.t()) :: :ok | {:error, :does_not_exist}
defp check_code_exists(state, recipient) do
if Map.has_key?(state, recipient) do
:ok
else
{:error, :does_not_exist}
end
end
@spec check_verification_code_not_expired(VerificationCode.t()) :: :ok | {:error, :code_expired}
defp check_verification_code_not_expired(%VerificationCode{expires: expires}) do
case NaiveDateTime.compare(expires, NaiveDateTime.utc_now()) do
:gt -> :ok
_ -> {:error, :code_expired}
end
end
@spec check_attempt_is_allowed(VerificationCode.t()) :: :ok | {:error, :attempt_blocked}
defp check_attempt_is_allowed(%VerificationCode{attempts_blocked_until: nil}), do: :ok
defp check_attempt_is_allowed(%VerificationCode{attempts_blocked_until: attempts_blocked_until}) do
case NaiveDateTime.compare(attempts_blocked_until, NaiveDateTime.utc_now()) do
:lt -> :ok
_ -> {:error, :attempt_blocked}
end
end
@spec check_attempt_code(VerificationCode.t(), String.t()) :: :ok | {:error, :incorrect_code}
defp check_attempt_code(%VerificationCode{code: code}, attempt_code) do
if attempt_code == code do
:ok
else
{:error, :incorrect_code}
end
end
@spec reset_attempts(String.t()) :: :ok
defp reset_attempts(recipient) do
Agent.update(Store, &put_in(&1, [recipient, Access.key(:attempts)], 0))
end
@spec increment_or_block_attempts(String.t()) :: :ok
defp increment_or_block_attempts(recipient) do
num_attempts_before_timeout =
Application.get_env(:passwordless_auth, :num_attempts_before_timeout) ||
@default_num_attempts_before_timeout
attempts = Agent.get(Store, &get_in(&1, [recipient, Access.key(:attempts)]))
if attempts < num_attempts_before_timeout - 1 do
Agent.update(Store, &put_in(&1, [recipient, Access.key(:attempts)], attempts + 1))
else
num_attempts_before_timeout =
Application.get_env(:passwordless_auth, :rate_limit_timeout_length) ||
@default_rate_limit_timeout_length
attempts_blocked_until =
NaiveDateTime.utc_now() |> NaiveDateTime.add(num_attempts_before_timeout)
Agent.update(Store, fn state ->
state
|> put_in([recipient, Access.key(:attempts)], 0)
|> put_in([recipient, Access.key(:attempts_blocked_until)], attempts_blocked_until)
end)
end
end
end
|
lib/passwordless_auth.ex
| 0.861916
| 0.576751
|
passwordless_auth.ex
|
starcoder
|
defmodule Remedy.Component.ActionRow do
@moduledoc """
Action Rows.
"""
use Remedy.Schema.Component
@defaults %{
type: 1,
components: []
}
@type t :: %{
type: Component.type(),
components: [Component.components()]
}
@doc """
Create an empty action row.
Options can be passed as a keyword list. The only supported option is a list of inner components
"""
def action_row(opts \\ [])
def action_row(%Component{type: 3} = component), do: component |> List.wrap() |> action_row()
def action_row([%Component{type: 2} | _] = components),
do: action_row([{:components, components}])
def action_row(opts) do
[
{:components, opts[:components]}
]
|> new()
end
@doc """
Appends a button to the action row.
Returns the action row unchanged if there are already 5 buttons or if the action row contains a select menu.
"""
def append(action_row, button)
def append(%Component{type: 1, components: [%Component{type: 3} | _]} = c, _), do: c
def append(%Component{type: 1, components: [%Component{} | _]} = c, %Component{type: 2} = i),
do: appnd(c, i)
def append(%Component{type: 1, components: []} = c, %Component{type: 2} = i),
do: appnd(c, i)
defp appnd(%Component{components: components} = c, %Component{type: 2} = i) do
inner_components_count = Enum.count(components)
cond do
inner_components_count == 5 ->
c
inner_components_count < 5 ->
update(c, [{:components, components ++ [i]}])
end
end
@doc """
Lazily appends a button to the action row.
If there are already 5 buttons, the first one will be dropped.
"""
def append_lazy(action_row, button)
def append_lazy(%Component{type: 1, components: [%Component{type: 3} | _]} = inner_select, _) do
inner_select
end
def append_lazy(
%Component{type: 1, components: [%Component{} | _]} = action_row_with_buttons,
%Component{type: 2} = button_to_append
) do
appnd_lazy(action_row_with_buttons, button_to_append)
end
def append_lazy(
%Component{type: 1, components: []} = empty_action_row,
%Component{type: 2} = button_to_append
),
do: appnd(empty_action_row, button_to_append)
defp appnd_lazy(
%Component{components: [_head | tail] = components} = action_row,
%Component{type: 2} = button_to_append
) do
inner_components_count = Enum.count(components)
cond do
inner_components_count == 5 ->
update(action_row, [{:components, tail ++ [button_to_append]}])
inner_components_count < 5 ->
update(action_row, [{:components, components ++ [button_to_append]}])
end
end
@doc """
Puts the components into the action row unless a list of inner components already exists.
"""
def put_new(%Component{type: 1, components: []} = component, list_of_components) do
update(component, [{:components, list_of_components}])
end
def put_new(%Component{type: 1, components: [_ | _]} = component, _), do: component
@doc """
Puts the given component into the action row, any existing components are disgarded.
"""
def put(%Component{type: 1} = component, %Component{type: 3} = select_menu) do
update(component, [{:components, [select_menu]}])
end
def put(%Component{type: 1} = component, [%Component{type: 3} | []] = select_menu) do
update(component, [{:components, select_menu}])
end
def put(%Component{type: 1} = component, [%Component{} | _] = list_of_components) do
update(component, [{:components, list_of_components}])
end
end
|
components/action_row.ex
| 0.80213
| 0.462716
|
action_row.ex
|
starcoder
|
defmodule Eeyeore.Settings do
use GenServer
require Logger
@moduledoc """
The main settings for Eeyeore
These settings include things like, default LED Color,
default number of bolts on a trigger, and maximum brightness. This module also
broadcasts changes for network connections such as MQTT.
"""
alias Blinkchain.Color
@doc """
State contains the color, maximum brightness of the bolt, and the number of
random bolts on an unspecified trigger for Eeyeore settings. State also
containes the subscribers to be notified when settings are changed.
"""
defmodule State do
defstruct [:color, :brightness, :quantity, :subs]
end
def start_link(opts) do
GenServer.start_link(__MODULE__, opts, name: __MODULE__)
end
def init(_args) do
state = %State{
color: Color.parse("#A253FC"),
brightness: 100,
quantity: 10,
subs: []
}
{:ok, state}
end
@doc """
Subscribes a process to recieve casts of changed settings, mostly used by
network endpoints. Casts recieved are in the format of
{:<setting name>_changed, <setting value>}
"""
def handle_cast({:subscribe, pid}, state) do
{:noreply,
%State{
color: state.color,
brightness: state.brightness,
quantity: state.quantity,
subs: [pid | state.subs]
}}
end
@doc """
Recieves a color setting in the format of Blinkchain.Color, if the color has
changed it will broadcast the change to subscribers.
"""
def handle_cast({:set_color, color = %Color{}}, state) do
if color != state.color do
Logger.info(
"[Eeyeore.Settings] color changed from #{inspect(state.color)} to #{inspect(color)}"
)
broadcast_message({:color_changed, color}, state.subs)
end
{:noreply,
%State{
color: color,
brightness: state.brightness,
quantity: state.quantity,
subs: state.subs
}}
end
@doc """
Recieves a brightness setting in the format of an integer between 0 and 100,
if the brightness has changed it will broadcast the change to subscribers.
"""
def handle_cast({:set_brightness, brightness}, state)
when is_integer(brightness) and brightness >= 0 and brightness <= 100 do
if brightness != state.brightness do
broadcast_message({:brightness_changed, brightness}, state.subs)
end
{:noreply,
%State{
color: state.color,
brightness: brightness,
quantity: state.quantity,
subs: state.subs
}}
end
@doc """
Recieves a quantity setting in the format of an integer >= 1, if the
quantity has changed it will broadcast the change to subscribers.
"""
def handle_cast({:set_quantity, quantity}, state) when is_integer(quantity) and quantity >= 1 do
if quantity != state.quantity do
broadcast_message({:quantity_changed, quantity}, state.subs)
end
{:noreply,
%State{
color: state.color,
brightness: state.brightness,
quantity: quantity,
subs: state.subs
}}
end
# TODO: Remove this when storage across reboot is implemented
@doc """
Handles unknown cast gracefully, this will be depricated when settings state
is stored between reboots with something like PersistantStorage
"""
def handle_cast(unhandled, state) do
Logger.info("[Eeyeore.Settings] Unhandled cast: #{inspect(unhandled)}")
{:noreply, state}
end
@doc """
Gets the current color setting
"""
def handle_call(:get_color, _from, state) do
{:reply, state.color, state}
end
@doc """
Gets the current brightness setting
"""
def handle_call(:get_brightness, _from, state) do
{:reply, state.brightness, state}
end
@doc """
Gets the current quantity setting
"""
def handle_call(:get_quantity, _from, state) do
{:reply, state.quantity, state}
end
# TODO: Remove this when storage across reboot is implemented
@doc """
Handles unknown call gracefully, this will be depricated when settings state
is stored between reboots with something like PersistantStorage
"""
def handle_call(unhandled_call, _from, state) do
Logger.info("[Eeyeore.Settings] Unhandled Call: #{inspect(unhandled_call)}")
{:reply, "", state}
end
defp broadcast_message(message, subscribers) do
Enum.each(subscribers, fn sub_pid ->
GenServer.cast(sub_pid, message)
end)
end
end
|
lib/eeyeore/settings.ex
| 0.695752
| 0.424084
|
settings.ex
|
starcoder
|
defmodule Brando.LivePreview do
@moduledoc """
Create a `MyAppWeb.LivePreview` module if it does not already exist
```
use Brando.LivePreview
preview_target Brando.Pages.Page do
mutate_data fn entry -> %{entry | title: "custom"} end
layout_module MyAppWeb.LayoutView
view_module MyAppWeb.PageView
view_template fn e -> e.template end
template_section fn e -> e.key end
assign :navigation, fn _ -> Brando.Navigation.get_menu("main", "en") |> elem(1) end
assign :partials, fn _ -> Brando.Pages.get_fragments("partials") |> elem(1) end
end
```
"""
require Logger
alias Brando.Utils
alias Brando.Worker
@preview_coder Hashids.new(
alphabet: "ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890",
salt: "bxqStiFpm5to0gsRHyC0afyIaTOH5jjD/T+kOMU5Z9UHCLJuPVnM6ESNaMC8rkzR",
min_len: 32
)
defmacro __using__(_) do
quote do
import unquote(__MODULE__)
@before_compile unquote(__MODULE__)
end
end
defmacro __before_compile__(_) do
quote generated: true do
def has_preview_target(_), do: false
end
end
@doc """
Generates a render function for live previewing
Set `template_prop` if your template uses another way to reference the entry than what
is used in Vue. For instance, if in Vue it is a `project`, but you want `entry`, then
set `template_prop :entry`
- `schema_preloads` - List of atoms to preload on `entry``
- `mutate_data` - function to mutate entry data `entry`
mutate_data fn entry -> %{entry | title: "custom"} end
- `layout_module` - The layout view module we want to use for rendering
- `layout_template` - The layout template we want to use for rendering
- `view_module` - The view module we want to use for rendering
- `view_template` - The template we want to use for rendering
- `template_prop` - What we are refering to entry as
- `template_section` - Run this with `put_section` on conn
- `template_css_classes` - Run this with `put_css_classes` on conn
"""
defmacro preview_target(schema_module, do: block) do
quote location: :keep, generated: true do
@doc """
`prop` - the variable name we store the entry under
`cache_key` - unique key for this live preview
"""
def render(unquote(schema_module), entry, cache_key) do
var!(cache_key) = cache_key
var!(opts) = [
layout_template: "app.html",
mutate_data: fn e -> e end,
schema_preloads: []
]
var!(entry) = entry
var!(extra_vars) = []
var!(language) = Map.get(var!(entry), :language, Brando.config(:default_language))
unquote(block)
processed_opts = var!(opts)
template =
if is_function(processed_opts[:view_template]) do
processed_opts[:view_template].(var!(entry))
else
processed_opts[:view_template]
end
section =
if is_function(processed_opts[:template_section]) do
processed_opts[:template_section].(var!(entry))
else
processed_opts[:template_section]
end
css_classes =
if is_function(processed_opts[:template_css_classes]) do
processed_opts[:template_css_classes].(var!(entry))
else
processed_opts[:template_css_classes]
end
# preloads
var!(entry) =
var!(entry)
|> Brando.repo().preload(processed_opts[:schema_preloads])
|> processed_opts[:mutate_data].()
atom_prop =
if processed_opts[:template_prop] !== nil,
do: processed_opts[:template_prop],
else: :entry
villain_fields = unquote(schema_module).__villain_fields__
var!(entry) =
Enum.reduce(villain_fields, var!(entry), fn attr, updated_entry ->
html_attr = Brando.Villain.get_html_field(unquote(schema_module), attr)
atom_key = attr.name
parsed_villain =
Brando.Villain.parse(Map.get(var!(entry), atom_key), var!(entry),
cache_modules: true,
data_field: atom_key,
html_field: html_attr.name
)
Map.put(updated_entry, html_attr.name, parsed_villain)
end)
session_opts =
Plug.Session.init(
store: :cookie,
key: "_live_preview_key",
signing_salt: "<PASSWORD>"
)
# build conn
conn =
Phoenix.ConnTest.build_conn(:get, "/#{var!(language)}/__LIVE_PREVIEW")
|> Plug.Session.call(session_opts)
|> Plug.Conn.assign(:language, to_string(var!(language)))
|> Brando.router().browser([])
|> Brando.Plug.HTML.put_section(section)
|> Brando.Plug.HTML.put_css_classes(css_classes)
render_assigns =
(Map.to_list(conn.assigns) ++
[
{:conn, conn},
{:section, section},
{:LIVE_PREVIEW, true},
{:language, to_string(var!(language))},
{atom_prop, var!(entry)}
] ++ unquote(Macro.var(:extra_vars, nil)))
|> Enum.into(%{})
inner = Phoenix.View.render(processed_opts[:view_module], template, render_assigns)
root_assigns = render_assigns |> Map.put(:inner_content, inner) |> Map.delete(:layout)
Phoenix.View.render_to_string(
processed_opts[:layout_module],
processed_opts[:layout_template],
root_assigns
)
end
def has_preview_target(unquote(schema_module)), do: true
end
end
defmacro schema_preloads(schema_preloads) do
quote do
var!(opts) = Keyword.put(var!(opts), :schema_preloads, unquote(schema_preloads))
end
end
@doc """
Mutate the entry available for rendering
### Example
mutate_data fn entry -> %{entry | title: "custom"} end
Will add `title` with value `custom` to the `entry`
"""
defmacro mutate_data(mutate_data) do
quote do
var!(opts) = Keyword.put(var!(opts), :mutate_data, unquote(mutate_data))
end
end
defmacro layout_module(layout_module) do
quote do
var!(opts) = Keyword.put(var!(opts), :layout_module, unquote(layout_module))
end
end
defmacro layout_template(layout_template) do
quote do
var!(opts) = Keyword.put(var!(opts), :layout_template, unquote(layout_template))
end
end
defmacro view_module(view_module) do
quote do
var!(opts) = Keyword.put(var!(opts), :view_module, unquote(view_module))
end
end
defmacro view_template(view_template) do
quote do
var!(opts) = Keyword.put(var!(opts), :view_template, unquote(view_template))
end
end
defmacro template_section(template_section) do
quote do
var!(opts) = Keyword.put(var!(opts), :template_section, unquote(template_section))
end
end
defmacro template_css_classes(template_css_classes) do
quote do
var!(opts) = Keyword.put(var!(opts), :template_css_classes, unquote(template_css_classes))
end
end
defmacro template_prop(template_prop) do
quote do
var!(opts) = Keyword.put(var!(opts), :template_prop, unquote(template_prop))
end
end
@doc """
Assign variables to be used in the live preview.
Normally you would set the same assigns you do in your controller.
## Example
assign :latest_articles, fn _, language ->
# language is either the language found in the `entry` or the default site language
MyApp.Articles.list_articles!(%{
filter: %{featured: false, language: language},
preload: [:category],
order: "asc sequence,
limit: 4
})
end
"""
defmacro assign(var_name, var_value) do
quote do
cached_var =
Brando.LivePreview.get_var(unquote(Macro.var(:cache_key, nil)), unquote(var_name), fn ->
case :erlang.fun_info(unquote(var_value))[:arity] do
1 -> unquote(var_value).(var!(entry))
2 -> unquote(var_value).(var!(entry), var!(language))
end
end)
var!(extra_vars) = [{unquote(var_name), cached_var} | unquote(Macro.var(:extra_vars, nil))]
end
end
@spec build_cache_key(integer) :: binary
def build_cache_key(seed), do: "PREVIEW-" <> Hashids.encode(@preview_coder, seed)
@spec build_share_key(integer) :: binary
def build_share_key(seed), do: "__SHAREPREVIEW__" <> Hashids.encode(@preview_coder, seed)
def store_cache(key, html), do: Cachex.put(:cache, "__live_preview__" <> key, html)
def get_cache(key), do: Cachex.get(:cache, "__live_preview__" <> key)
@spec initialize(any, any) :: {:error, <<_::384>>} | {:ok, <<_::64, _::_*8>>}
def initialize(schema, changeset) do
preview_module = Brando.live_preview()
if function_exported?(preview_module, :render, 3) do
cache_key = build_cache_key(:erlang.system_time())
schema_module = Module.concat([schema])
entry_struct = Ecto.Changeset.apply_changes(changeset)
try do
wrapper_html = preview_module.render(schema_module, entry_struct, cache_key)
if Cachex.get(:cache, cache_key) == {:ok, nil} do
Brando.LivePreview.store_cache(cache_key, wrapper_html)
end
Brando.endpoint().broadcast("live_preview:#{cache_key}", "update", %{html: wrapper_html})
{:ok, cache_key}
rescue
err in [KeyError] ->
Logger.error("""
Stacktrace:
#{Exception.format(:error, err, __STACKTRACE__)}
""")
if err.term.__struct__ == Ecto.Association.NotLoaded do
{:error,
"LivePreview is missing preload for #{inspect(err.term.__field__)}<br><br>Add `schema_preloads [#{inspect(err.term.__field__)}]` to your `preview_target`"}
else
{:error, "#{inspect(err, pretty: true)}"}
end
err ->
Logger.error("""
Livepreview Initialization failed.
""")
Logger.error("""
Error:
#{inspect(err, pretty: true)}
""")
Logger.error("""
Stacktrace:
#{Exception.format(:error, err, __STACKTRACE__)}
""")
{:error, "Initialization failed. #{inspect(err)}"}
end
else
{:error, "No render/3 function found in LivePreview module"}
end
end
def update(schema, changeset, cache_key) do
# TODO: consider if it's worth trying to diff
preview_module = Brando.live_preview()
schema_module = Module.concat([schema])
entry = Ecto.Changeset.apply_changes(changeset)
wrapper_html = preview_module.render(schema_module, entry, cache_key)
Brando.endpoint().broadcast("live_preview:#{cache_key}", "update", %{html: wrapper_html})
cache_key
end
@doc """
Renders the entry, stores in DB and returns URL
"""
def share(schema, id, revision, key, prop, user) do
preview_module = Brando.live_preview()
if function_exported?(preview_module, :render, 3) do
schema_module = Module.concat([schema])
context = schema_module.__modules__().context
singular = schema_module.__naming__().singular
get_opts =
if revision do
%{matches: %{id: id}, revision: revision}
else
%{matches: %{id: id}}
end
{:ok, entry} = apply(context, :"get_#{singular}", [get_opts])
html =
schema_module
|> preview_module.render(entry, key, prop, nil)
|> Utils.term_to_binary()
preview_key = Utils.random_string(12)
expires_at = DateTime.add(DateTime.utc_now(), 24 * 60 * 60, :second)
preview = %{
html: html,
preview_key: preview_key,
expires_at: expires_at
}
{:ok, preview} = Brando.Sites.create_preview(preview, user)
%{id: preview.id}
|> Worker.PreviewPurger.new(
scheduled_at: expires_at,
tags: [:preview_purger]
)
|> Oban.insert()
{:ok, Brando.Sites.Preview.__absolute_url__(preview)}
end
end
def get_entry(cache_key) do
case Cachex.get(:cache, "#{cache_key}__ENTRY") do
{:ok, val} ->
:erlang.binary_to_term(val)
end
end
def set_entry(cache_key, entry) do
Cachex.put(
:cache,
"#{cache_key}__ENTRY",
:erlang.term_to_binary(entry),
ttl: :timer.minutes(60)
)
entry
end
def get_var(cache_key, key, fallback_fn) do
case Cachex.get(:cache, "#{cache_key}__VAR__#{key}") do
{:ok, nil} ->
val = fallback_fn.()
Cachex.put(:cache, "#{cache_key}__VAR__#{key}", val, ttl: :timer.seconds(120))
val
{:ok, val} ->
val
end
end
end
|
lib/brando/live_preview.ex
| 0.777173
| 0.53692
|
live_preview.ex
|
starcoder
|
defmodule ExKdl do
@moduledoc """
A robust and efficient decoder and encoder for the KDL Document Language.
"""
alias ExKdl.{DecodeError, EncodeError, Encoder, Lexer, Node, Parser}
@doc """
Decodes a KDL-encoded document from the `input` binary.
## Examples
iex> ExKdl.decode("node 10")
{:ok,
[
%ExKdl.Node{
name: "node",
type: nil,
values: [%ExKdl.Value{type: nil, value: %Decimal{coef: 10}}],
properties: %{},
children: []
}
]}
iex> ExKdl.decode(~s|node "unterminated string|)
{:error,
%ExKdl.DecodeError{
line: 1,
message: "unterminated string meets end of file"
}}
"""
@spec decode(binary) :: {:ok, [Node.t()]} | {:error, DecodeError.t()}
def decode(input) when is_binary(input) do
with {:ok, tokens} <- Lexer.lex(input) do
Parser.parse(tokens)
end
end
def decode(_) do
{:error, %DecodeError{message: "Argument to decode/1 must be a KDL-encoded binary"}}
end
@doc """
Decodes a KDL-encoded document from the `input` binary.
Similar to `decode/1` except it will raise in the event of an error.
## Examples
iex> ExKdl.decode!("node 10")
[
%ExKdl.Node{
name: "node",
type: nil,
values: [%ExKdl.Value{type: nil, value: %Decimal{coef: 10}}],
properties: %{},
children: []
}
]
iex> ExKdl.decode!(~s|node "unterminated string|)
** (ExKdl.DecodeError) Line 1: unterminated string meets end of file
"""
@spec decode!(binary) :: [Node.t()]
def decode!(input) do
case decode(input) do
{:ok, nodes} ->
nodes
{:error, error} ->
raise error
end
end
@doc """
Encodes a list of `ExKdl.Node` structs into a KDL-encoded binary.
## Examples
iex> ExKdl.encode([%ExKdl.Node{name: "node", values: [%ExKdl.Value{value: %Decimal{coef: 10}}]}])
{:ok, "node 10\\n"}
iex> ExKdl.encode(nil)
{:error, %ExKdl.EncodeError{message: "Argument to encode/1 must be a list of KDL nodes"}}
"""
@spec encode([Node.t()]) :: {:ok, binary} | {:error, EncodeError.t()}
def encode(input) when is_list(input) do
Encoder.encode(input)
end
def encode(_) do
{:error, %EncodeError{message: "Argument to encode/1 must be a list of KDL nodes"}}
end
@doc """
Encodes a list of `ExKdl.Node` structs into a KDL-encoded binary.
Similar to `encode/1` except it will raise in the event of an error.
## Examples
iex> ExKdl.encode!([%ExKdl.Node{name: "node", values: [%ExKdl.Value{value: %Decimal{coef: 10}}]}])
"node 10\\n"
iex> ExKdl.encode!(nil)
** (ExKdl.EncodeError) Argument to encode/1 must be a list of KDL nodes
"""
@spec encode!([Node.t()]) :: binary
def encode!(input) do
case encode(input) do
{:ok, encoded} ->
encoded
{:error, error} ->
raise error
end
end
end
|
lib/ex_kdl.ex
| 0.930332
| 0.602032
|
ex_kdl.ex
|
starcoder
|
IO.puts("------------ Atom ------------")
atom_apple = :apple
atom_orange = :orange
atom_true = :true
atom_false = :false
atom_nil = :nil
IO.puts("is_atom(atom_apple) => #{is_atom(atom_apple)}")
IO.puts("atom_apple == atom_orange => #{atom_apple == atom_orange}")
IO.puts("is_atom(true) => #{is_atom(true)}")
IO.puts("atom_true == true => #{atom_true == true}")
IO.puts("\n")
# Booleans
IO.puts("------------ Boolean ------------")
IO.puts("Is `true` an atom? #{is_atom(true)}")
IO.puts("Is `false` an atom? #{is_atom(false)}")
IO.puts("\n")
# String
IO.puts("------------ String ------------")
hello = "Hello Alchemist!"
# Will output Hello Alchemist!
IO.puts("#{hello}")
# String lenght
IO.puts("Lenght: #{byte_size(hello)}")
# Is binary?
IO.puts("Is binary?: #{is_binary(hello)}")
# Integer
IO.puts("------------ Integer ------------")
# Opting-in to use Integer
require Integer
ten = 10
IO.puts("Value: #{ten}")
IO.puts("Is even? #{Integer.is_even(ten)}")
IO.puts("Is odd? #{Integer.is_odd(ten)}")
# Float
IO.puts("------------ Float ------------")
# Opting-in to use Float
require Float
pi = 3.1415
IO.puts("Value: #{pi}")
IO.puts("Ceil func: #{Float.ceil(pi, 0)}")
IO.puts("Floor func: #{Float.floor(pi, 0)}")
# List
IO.puts("------------ List ------------")
## Defining a list
list1 = [1, 2, 3, 4]
## Using inspect
IO.puts("List 1: #{inspect(list1)}")
# It have head and tail
## Head is the first element
## Tail is the rest
[head_list1 | tail_list1] = list1
IO.puts("Head 1: #{head_list1}")
IO.puts("Tail 1: #{inspect(tail_list1)}")
# Operators
## ++ - Concat
list2 = [1, 2] ++ [3, 4]
IO.puts("[1, 2] ++ [3, 4] = #{inspect(list2)}")
list3 = [1, 2] -- [2, 3]
IO.puts("[1, 2] -- [2, 3] = #{inspect(list3)}")
# Tuple
IO.puts("------------ Tuple ------------")
tuple = {1, "two", :three}
IO.puts("tuple = #{inspect(tuple)}")
IO.puts("Tuple size: #{tuple_size(tuple)}")
# Map
IO.puts("------------ Map ------------")
# Opting-in to use Map
require Map
map = %{:a => 1, :b => "tres", "key" => :value}
IO.puts("map = #{inspect(map)}")
IO.puts("map[:a] #{map[:a]}")
IO.puts("Map.get(map, :b) #{Map.get(map, :b)}")
fetched = Map.fetch(map, "key")
IO.puts("Map.fetch(map, \"key\") #{inspect(fetched)}")
# Enum
IO.puts("------------ Enum ------------")
list1 = [1, 2, 3, 4, 5, 6, 7]
IO.puts(Enum.all?(list1, fn(n) -> n > 2 end))
IO.puts(Enum.any?(list1, fn(n) -> n > 2 end))
IO.inspect Enum.chunk_every(list1, 2), label: "Chunk list"
|
01_variables_and_types/04_basic_types/types.ex
| 0.690037
| 0.537345
|
types.ex
|
starcoder
|
defmodule HoodMelville do
@moduledoc """
Documentation for HoodMelville.
"""
@type rotation_state ::
{:reversing, integer(), [term()], [term()], [term()], [term()]}
| {:idle}
| {:appending, integer(), [term()], [term()]}
| {:done, [term()]}
@type queue :: {integer(), [term()], rotation_state(), integer(), [term()]}
@empty {0, [], {:idle}, 0, []}
@spec empty() :: queue()
def empty do
@empty
end
defdelegate new(), to: HoodMelville, as: :empty
@spec is_empty(queue()) :: boolean()
def is_empty({lenf, _f, _state, _lenr, _r}) do
lenf == 0
end
@spec from_list(list()) :: queue()
def from_list(list) do
Enum.reduce(list, empty(), fn x, acc -> snoc(acc, x) end)
end
@spec to_list(queue()) :: list()
def to_list({lenf, front, state, _lenr, reversed}) do
case state do
{:reversing, _ok, ws, xs, ys, zs} ->
n = lenf - Kernel.length(front) - 1
front ++
Enum.drop(Enum.reverse(ws) ++ xs ++ Enum.reverse(ys) ++ zs, n) ++ Enum.reverse(reversed)
{:appending, _ok, xs, ys} ->
n = lenf - Kernel.length(front) - 1
front ++ Enum.drop(Enum.reverse(xs) ++ ys, n) ++ Enum.reverse(reversed)
{:idle} ->
front ++ Enum.reverse(reversed)
{:done, xs} ->
front ++ xs ++ Enum.reverse(reversed)
end
end
@spec to_list_naive(queue()) :: list()
def to_list_naive(queue, acc \\ []) do
case uncons(queue) do
{:error, :empty_queue} -> Enum.reverse(acc)
{:ok, x, tl} -> to_list_naive(tl, [x | acc])
end
end
@spec uncons(queue()) :: {:ok, term(), queue()} | {:error, :empty_queue}
def uncons({_lenf, [], _state, _lenr, _r}) do
{:error, :empty_queue}
end
def uncons({lenf, [x | xs], state, lenr, r}) do
{:ok, x, check({lenf - 1, xs, invalidate(state), lenr, r})}
end
@spec snoc(queue(), term()) :: queue()
def snoc({lenf, f, state, lenr, r}, x) do
check({lenf, f, state, lenr + 1, [x | r]})
end
defdelegate insert(queue, item), to: HoodMelville, as: :snoc
@spec head(queue()) :: term() | {:error, :empty_queue}
def head({_lenf, [], _state, _lenr, _r}) do
{:error, :empty_queue}
end
defdelegate get(queue), to: HoodMelville, as: :head
def head({_lenf, [x | _f], _state, _lenr, _r}) do
x
end
@spec tail(queue()) :: queue() | {:error, :empty_queue}
def tail({_lenf, [], _state, _lenr, _r}) do
{:error, :empty_queue}
end
def tail({lenf, [_x | f], state, lenr, r}) do
check({lenf - 1, f, invalidate(state), lenr, r})
end
# @spec exec(rotation_state()) :: rotation_state()
defp exec({:reversing, ok, [x | xs], ys, [z | zs], as}) do
{:reversing, ok + 1, xs, [x | ys], zs, [z | as]}
end
defp exec({:reversing, ok, [], ys, [z], as}) do
{:appending, ok, ys, [z | as]}
end
defp exec({:appending, 0, _xs, ys}) do
{:done, ys}
end
defp exec({:appending, ok, [x | xs], ys}) do
{:appending, ok - 1, xs, [x | ys]}
end
defp exec(state) do
state
end
# @spec invalidate(rotation_state()) :: rotation_state()
defp invalidate({:reversing, ok, ws, xs, ys, zs}) do
{:reversing, ok - 1, ws, xs, ys, zs}
end
defp invalidate({:appending, 0, _xs, [_y | ys]}) do
{:done, ys}
end
defp invalidate({:appending, ok, xs, ys}) do
{:appending, ok - 1, xs, ys}
end
defp invalidate(state) do
state
end
# @spec exec2(queue()) :: queue()
defp exec2({lenf, f, state, lenr, r}) do
case exec(exec(state)) do
{:done, newf} -> {lenf, newf, {:idle}, lenr, r}
newstate -> {lenf, f, newstate, lenr, r}
end
end
# @spec check(queue()) :: queue()
defp check({lenf, f, _state, lenr, r} = q) do
if lenr <= lenf do
exec2(q)
else
new_state = {:reversing, 0, f, [], r, []}
exec2({lenf + lenr, f, new_state, 0, []})
end
end
end
|
lib/hood_melville.ex
| 0.738198
| 0.508117
|
hood_melville.ex
|
starcoder
|
defmodule Oban do
@moduledoc """
Oban isn't an application and won't be started automatically. It is started by a supervisor that
must be included in your application's supervision tree. All of your configuration is passed
into the `Oban` supervisor, allowing you to configure Oban like the rest of your application.
```elixir
# confg/config.exs
config :my_app, Oban,
repo: MyApp.Repo,
queues: [default: 10, events: 50, media: 20]
# lib/my_app/application.ex
defmodule MyApp.Application do
@moduledoc false
use Application
alias MyApp.{Endpoint, Repo}
def start(_type, _args) do
children = [
Repo,
Endpoint,
{Oban, Application.get_env(:my_app, Oban)}
]
Supervisor.start_link(children, strategy: :one_for_one, name: MyApp.Supervisor)
end
end
```
### Configuring Queues
Queues are specified as a keyword list where the key is the name of the queue
and the value is the maximum number of concurrent jobs. The following
configuration would start four queues with concurrency ranging from 5 to 50:
```elixir
queues: [default: 10, mailers: 20, events: 50, media: 5]
```
There isn't a limit to the number of queues or how many jobs may execute
concurrently. Here are a few caveats and guidelines:
* Each queue will run as many jobs as possible concurrently, up to the
configured limit. Make sure your system has enough resources (i.e. database
connections) to handle the concurrent load.
* Only jobs in the configured queues will execute. Jobs in any other queue
will stay in the database untouched.
* Be careful how many concurrent jobs make expensive system calls (i.e. FFMpeg,
ImageMagick). The BEAM ensures that the system stays responsive under load,
but those guarantees don't apply when using ports or shelling out commands.
### Creating Workers
Worker modules do the work of processing a job. At a minimum they must define a
`perform/1` function, which is called with an `args` map.
Define a worker to process jobs in the `events` queue:
```elixir
defmodule MyApp.Workers.Business do
use Oban.Worker, queue: "events", max_attempts: 10
@impl Oban.Worker
def perform(%{"id" => id}) do
model = MyApp.Repo.get(MyApp.Business.Man, id)
IO.inspect(model)
end
end
```
The return value of `perform/1` doesn't matter and is entirely ignored. If the
job raises an exception or throws an exit then the error will be reported and
the job will be retried (provided there are attempts remaining).
See `Oban.Worker` for more details.
### Enqueueing Jobs
Jobs are simply `Ecto` strucs and are enqueued by inserting them into the
database. Here we insert a job into the `default` queue and specify the worker
by module name:
```elixir
%{id: 1, user_id: 2}
|> Oban.Job.new(queue: :default, worker: MyApp.Worker)
|> MyApp.Repo.insert()
```
For convenience and consistency all workers implement a `new/2` function that
converts an args map into a job changeset suitable for inserting into the
database:
```elixir
%{in_the: "business", of_doing: "business"}
|> MyApp.Workers.Business.new()
|> MyApp.Repo.insert()
```
The worker's defaults may be overridden by passing options:
```elixir
%{vote_for: "none of the above"}
|> MyApp.Workers.Business.new(queue: "special", max_attempts: 5)
|> MyApp.Repo.insert()
```
Jobs may be scheduled down to the second any time in the future:
```elixir
%{id: 1}
|> MyApp.Workers.Business.new(schedule_in: 5)
|> MyApp.Repo.insert()
```
See `Oban.Job.new/2` for a full list of job options.
## Testing
Oban doesn't provide any special mechanisms for testing. However, here are a few
recommendations for running tests in isolation.
* Set a high `poll_interval` in your test configuration. This effectively stops
queues from polling and will prevent inserted jobs from executing.
```elixir
config :my_app, Oban, poll_interval: :timer.minutes(30)
```
* Be sure to use the Ecto Sandbox for testing. Oban makes use of database pubsub
events to dispatch jobs, but pubsub events never fire within a transaction.
Since sandbox tests run within a transaction no events will fire and jobs
won't be dispatched.
```elixir
config :my_app, MyApp.Repo, pool: Ecto.Adapters.SQL.Sandbox
```
## Error Handling
When a job raises an error or exits during execution the details are recorded within the
`errors` array on the job. Provided the number of execution attempts is below the configured
`max_attempts`, the job will automatically be retried in the future. The retry delay has a
quadratic backoff, meaning the job's second attempt will be after 16s, third after 31s, fourth
after 1m 36s, etc.
### Error Details
Execution errors are stored as a formatted exception along with metadata about when the failure
ocurred and which attempt caused it. Each error is stored with the following keys:
- `at` The utc timestamp when the error occurred at
- `attempt` The attempt number when the error ocurred
- `error` A formatted error message and stacktrace
See the [Instrumentation](#module-instrumentation) docs below for an example of integrating with
external error reporting systems.
### Limiting Retries
By default jobs will be retried up to 20 times. The number of retries is controlled by the
`max_attempts` value, which can be set at the Worker or Job level. For example, to instruct a
worker to discard jobs after three failures:
use Oban.Worker, queue: "limited", max_attempts: 3
## Pruning Historic Jobs
Job stats and queue introspection is built on keeping job rows in the database after they have
completed. This allows administrators to review completed jobs and build informative aggregates,
but at the expense of storage and an unbounded table size. To prevent the `oban_jobs` table from
growing indefinitely, Oban provides active pruning of `completed` jobs.
By default, pruning is disabled. To enable pruning we configure a supervision tree with the
`:prune` option. There are three distinct modes of pruning:
* `:disabled` - This is the default, where no pruning happens at all
* `{:maxlen, count}` - Pruning is based on the number of rows in the table, any rows beyond
the configured `count` will be deleted
* `{:maxage, seconds}` - Pruning is based on a row's age, any rows older than the configured
number of `seconds` will be deleted. The age unit is always specified in seconds, but values
on the scale of days, weeks or months are perfectly acceptable.
Pruning is best-effort and performed out-of-band. This means that all limits are soft; jobs
beyond a specified length or age may not be pruned immediately after jobs complete. Prune timing
is based on the configured `poll_interval`, where pruning occurs once for every 60 queue polls.
With the default `poll_interval` of 1 second that means pruning occurs at system startup and
then once every minute afterwards.
Note, only jobs in a `completed` or `discarded` state will be deleted. Currently `executing`
jobs and older jobs that are still in the `available` state will be retained.
## Instrumentation
Oban provides integration with [Telemetry][tele], a dispatching library for metrics. It is easy
to report Oban metrics to any backend by attaching to `:oban` events.
For exmaple, to log out the timing for all executed jobs:
```elixir
defmodule ObanLogger do
require Logger
def handle_event([:oban, :job, :executed], %{timing: timing}, meta, _config) do
Logger.info("[meta.queue] meta.worker meta.event in timing")
end
end
:telemetry.attach("oban-logger", [:oban, :job, :executed], &ObanLogger.handle_event/4, nil)
```
Another great use of execution data is error reporting. Here is an example of
integrating with [Honeybadger][honey]:
```elixir
defmodule ErrorReporter do
def handle_event([:oban, :job, :failure], _timing, %{event: :failure} = meta, _config) do
context = Map.take(meta, [:id, :args, :queue, :worker])
Honeybadger.notify(meta.error, context, meta.stack)
end
end
:telemetry.attach("oban-errors", [:oban, :job, :executed], &ErrorReporter.handle_event/4, nil)
```
Here is a reference for metric event metadata:
| event | metadata |
| ---------- | ---------------------------------------------------- |
| `:success` | `:id, :args, :queue, :worker` |
| `:failure` | `:id, :args, :queue, :worker, :kind, :error, :stack` |
[tele]: https://hexdocs.pm/telemetry
[honey]: https://honeybadger.io
"""
use Supervisor
alias Oban.{Config, Notifier, Pruner}
alias Oban.Queue.Supervisor, as: QueueSupervisor
@type option ::
{:name, module()}
| {:node, binary()}
| {:poll_interval, pos_integer()}
| {:prune, :disabled | {:maxlen, pos_integer()} | {:maxage, pos_integer()}}
| {:queues, [{atom(), pos_integer()}]}
| {:repo, module()}
| {:shutdown_grace_period, timeout()}
@doc """
Starts an `Oban` supervision tree linked to the current process.
## Options
* `:name` — used for name supervisor registration
* `:node` — used to identify the node that the supervision tree is running in. If no value is
provided it will use the `node` name in a distributed system, the `hostname` in an isolated
node. See "Node Name" below.
* `:repo` — specifies the Ecto repo used to insert and retreive jobs.
* `:queues` — a keyword list where the keys are queue names and the values are the concurrency
setting. For example, setting queues to `[default: 10, exports: 5]` would start the queues
`default` and `exports` with a combined concurrency level of 20. The concurrency setting
specifies how many jobs _each queue_ will run concurrently.
* `:poll_interval` - the amount of time between a queue pulling new jobs, specified in
milliseconds. This is directly tied to the resolution of _scheduled_ jobs. For example, with a
`poll_interval` of `5_000ms`, scheduled jobs are checked every 5 seconds. The default is
`1_000ms`.
* `:prune` - configures job pruning behavior, see "Pruning Historic Jobs" for more information
* `:shutdown_grace_period` - the amount of time a queue will wait for executing jobs to complete
before hard shutdown, specified in milliseconds. The default is `15_000`, or 15 seconds.
Note that any options passed to `start_link` will override options set through the `using` macro.
## Examples
To start an `Oban` supervisor within an application's supervision tree:
def start(_type, _args) do
children = [MyApp.Repo, {Oban, queues: [default: 50]}]
Supervisor.start_link(children, strategy: :one_for_one, name: MyApp.Supervisor)
end
## Node Name
When the `node` value hasn't been configured it will be generated based on the environment:
* In a distributed system the node name is used
* In a Heroku environment the system environment's `DYNO` value is used
* Otherwise, the system hostname is used
"""
@spec start_link([option()]) :: Supervisor.on_start()
def start_link(opts) when is_list(opts) do
conf = Config.new(opts)
Supervisor.start_link(__MODULE__, conf, name: conf.name)
end
@impl Supervisor
def init(%Config{queues: queues} = conf) do
children = [
{Config, conf: conf, name: Config},
{Pruner, conf: conf, name: Pruner},
{Notifier, conf: conf, name: Notifier}
]
children = children ++ Enum.map(queues, &queue_spec(&1, conf))
Supervisor.init(children, strategy: :one_for_one)
end
defp queue_spec({queue, limit}, conf) do
queue = to_string(queue)
name = Module.concat([conf.name, "Queue", String.capitalize(queue)])
opts = [conf: conf, queue: queue, limit: limit, name: name]
Supervisor.child_spec({QueueSupervisor, opts}, id: name)
end
@doc """
Retreive the current config struct.
"""
defdelegate config, to: Config, as: :get
@doc """
Pause a running queue, preventing it from executing any new jobs. All running jobs will remain
running until they are finished.
When shutdown begins all queues are paused.
## Example
Pause the default queue:
Oban.pause_queue(:default)
:ok
"""
@spec pause_queue(queue :: atom()) :: :ok
defdelegate pause_queue(queue), to: Notifier
@doc """
Resume executing jobs in a paused queue.
## Example
Resume a paused default queue:
Oban.resume_queue(:default)
:ok
"""
@spec resume_queue(queue :: atom()) :: :ok
defdelegate resume_queue(queue), to: Notifier
@doc """
Scale the concurrency for a queue.
## Example
Scale a queue up, triggering immediate execution of queued jobs:
Oban.scale_queue(:default, 50)
:ok
Scale the queue back down, allowing executing jobs to finish:
Oban.scale_queue(:default, 5)
:ok
"""
@spec scale_queue(queue :: atom(), scale :: pos_integer()) :: :ok
defdelegate scale_queue(queue, scale), to: Notifier
@doc """
Kill an actively executing job and mark it as `discarded`, ensuring that it won't be retried.
If the job happens to fail before it can be killed the state is set to `discarded`. However,
if it manages to complete successfully then the state will still be `completed`.
## Example
Kill a long running job with an id of `1`:
Oban.kill_job(1)
:ok
"""
@spec kill_job(job_id :: pos_integer()) :: :ok
defdelegate kill_job(job_id), to: Notifier
end
|
lib/oban.ex
| 0.898363
| 0.787032
|
oban.ex
|
starcoder
|
defmodule OpcUA.Server do
use OpcUA.Common
alias OpcUA.{NodeId}
@moduledoc """
OPC UA Server API module.
This module provides functions for configuration, add/delete/read/write nodes and discovery a OPC UA Server.
`OpcUA.Server` is implemented as a `__using__` macro so that you can put it in any module,
you can initialize your Server manually (see `test/server_tests/write_event_test.exs`) or by overwriting
`configuration/1` and `address_space/1` to autoset the configuration and information model. It also helps you to
handle Server's "write value" events by overwriting `handle_write/2` callback.
The following example shows a module that takes its configuration from the environment (see `test/server_tests/terraform_test.exs`):
```elixir
defmodule MyServer do
use OpcUA.Server
alias OpcUA.Server
# Use the `init` function to configure your server.
def init({parent_pid, 103} = _user_init_state, opc_ua_server_pid) do
Server.start(opc_ua_server_pid)
%{parent_pid: parent_pid}
end
def configuration(_user_init_state), do: Application.get_env(:opex62541, :configuration, [])
def address_space(_user_init_state), do: Application.get_env(:opex62541, :address_space, [])
def handle_write(write_event, %{parent_pid: parent_pid} = state) do
send(parent_pid, write_event)
state
end
end
```
Because it is small a GenServer, it accepts the same [options](https://hexdocs.pm/elixir/GenServer.html#module-how-to-supervise) for supervision
to configure the child spec and passes them along to `GenServer`:
```elixir
defmodule MyModule do
use OpcUA.Server, restart: :transient, shutdown: 10_000
end
```
"""
@doc """
Optional callback that handles node values updates from a Client to a Server.
It's first argument will a tuple, in which its first element is the `node_id` of the updated node
and the second element is the updated value.
the second argument it's the GenServer state (Parent process).
"""
@callback handle_write(key :: {%NodeId{}, any}, term()) :: term()
@type config_params ::
{:hostname, binary()}
| {:port, non_neg_integer()}
| {:users, keyword()}
@type config_options ::
{:config, config_params}
| {:discovery, {binary(), non_neg_integer()}}
@doc """
Optional callback that gets the Server configuration and discovery connection parameters.
"""
@callback configuration(term()) :: config_options
@type address_space_list ::
{:namespace, binary()}
| {:variable_node, %OpcUA.VariableNode{}}
| {:variable_type_node, %OpcUA.VariableTypeNode{}}
| {:method_node, %OpcUA.MethodNode{}} #WIP
| {:object_node, %OpcUA.ObjectNode{}}
| {:object_type_node, %OpcUA.ObjectTypeNode{}}
| {:reference_type_node, %OpcUA.ReferenceTypeNode{}}
| {:data_type_node, %OpcUA.DataTypeNode{}}
| {:view_node, %OpcUA.ViewNode{}}
| {:reference_node, %OpcUA.ReferenceNode{}}
| {:monitored_item, %OpcUA.MonitoredItem{}}
@doc """
Optional callback that gets a list of nodes (with their attributes) to be automatically set.
"""
@callback address_space(term()) :: address_space_list
defmacro __using__(opts) do
quote location: :keep, bind_quoted: [opts: opts] do
use GenServer, Keyword.drop(opts, [:configuration])
@behaviour OpcUA.Server
@mix_env Mix.env()
alias __MODULE__
def start_link(user_initial_params \\ []) do
GenServer.start_link(__MODULE__, user_initial_params, unquote(opts))
end
@impl true
def init(user_initial_params) do
send self(), :init
{:ok, user_initial_params}
end
@impl true
def handle_info(:init, user_initial_params) do
# Server Terraform
{:ok, s_pid} = OpcUA.Server.start_link()
configuration = apply(__MODULE__, :configuration, [user_initial_params])
address_space = apply(__MODULE__, :address_space, [user_initial_params])
OpcUA.Server.set_default_config(s_pid)
# configutation = [config: list(), discovery: {term(), term()}]
set_server_config(s_pid, configuration, :config)
set_server_config(s_pid, configuration, :discovery)
# address_space = [namespace: "", namespace: "", variable: %VariableNode{}, ...]
set_server_address_space(s_pid, address_space)
# User initialization.
user_state = apply(__MODULE__, :init, [user_initial_params, s_pid])
{:noreply, user_state}
end
def handle_info({%NodeId{} = node_id, value}, state) do
state = apply(__MODULE__, :handle_write, [{node_id, value}, state])
{:noreply, state}
end
@impl true
def handle_write(write_event, state) do
require Logger
Logger.warn("No handle_write/2 clause in #{__MODULE__} provided for #{inspect(write_event)}")
state
end
@impl true
def address_space(_user_init_state), do: []
@impl true
def configuration(_user_init_state), do: []
defp set_server_config(s_pid, configuration, type) do
config_params = Keyword.get(configuration, type, [])
Enum.each(config_params, fn(config_param) -> GenServer.call(s_pid, {type, config_param}) end)
end
defp set_server_address_space(s_pid, address_space) do
for {node_type, node_params} <- address_space, reduce: %{} do
acc -> add_node(s_pid, node_type, node_params, acc)
end
end
defp add_node(s_pid, :namespace, node_param, namespaces) do
ns_index = GenServer.call(s_pid, {:add, {:namespace, node_param}})
Map.put(namespaces, node_param, ns_index)
end
defp add_node(s_pid, node_type, node, namespaces) do
# separate the node params (creation arguments & node attributes)
{node_args, node_attrs} =
node
|> Map.from_struct()
|> Map.pop(:args)
# Create node
#node_args = replace_namespace(node_args, namespaces)
GenServer.call(s_pid, {:add, {node_type, node_args}})
# add nodes attribures
node_id = Keyword.get(node_args, :requested_new_node_id, nil)
#node_attrs = replace_namespace(node_attrs, namespaces)
add_node_attrs(s_pid, node_id, node_attrs)
namespaces
end
defp add_node_attrs(s_pid, node_id, node_attrs) do
for {attr, attr_value} <- node_attrs do
set_node_attr(s_pid, node_id, attr, attr_value)
end
end
defp set_node_attr(_s_pid, _node_id, _attr, nil), do: nil
defp set_node_attr(s_pid, node_id, attr, attr_value) do
GenServer.call(s_pid, {:write, {attr, node_id, attr_value}})
end
# TODO: complete the function.
# defp replace_namespace(params, namespaces) do
# for {param, param_value} <- params, reduce: %{} do
# acc ->
# param_value =
# if is_struct(params_value) do
# end
# Map.put
# end
# end
defoverridable start_link: 0,
start_link: 1,
configuration: 1,
address_space: 1,
handle_write: 2
end
end
@doc """
Starts up a OPC UA Server GenServer.
"""
@spec start_link(term(), list()) :: {:ok, pid} | {:error, term} | {:error, :einval}
def start_link(args \\ [], opts \\ []) do
GenServer.start_link(__MODULE__, {args, self()}, opts)
end
@doc """
Stops a OPC UA Server GenServer.
"""
@spec stop(GenServer.server()) :: :ok
def stop(pid) do
GenServer.stop(pid)
end
# Configuration & lifecycle functions
@doc """
Reads an internal Server Config.
"""
@spec get_config(GenServer.server()) :: {:ok, map()} | {:error, binary()} | {:error, :einval}
def get_config(pid) do
GenServer.call(pid, {:config, {:get_server_config, nil}})
end
@doc """
Sets a default Server Config.
"""
@spec set_default_config(GenServer.server()) :: :ok | {:error, binary()} | {:error, :einval}
def set_default_config(pid) do
if(@mix_env != :test) do
GenServer.call(pid, {:config, {:set_default_server_config, nil}})
else
# Valgrind
GenServer.call(pid, {:config, {:set_default_server_config, nil}}, :infinity)
end
end
@doc """
Sets a default Server Config with no network layer and no endpoints.
"""
@spec set_basics(GenServer.server()) :: :ok | {:error, binary()} | {:error, :einval}
def set_basics(pid) do
if(@mix_env != :test) do
GenServer.call(pid, {:config, {:set_basics, nil}})
else
# Valgrind
GenServer.call(pid, {:config, {:set_basics, nil}}, :infinity)
end
end
@doc """
Sets a port number for the Server.
"""
@spec set_network_tcp_layer(GenServer.server(), integer()) :: :ok | {:error, binary()} | {:error, :einval}
def set_network_tcp_layer(pid, port) when is_integer(port) do
if(@mix_env != :test) do
GenServer.call(pid, {:config, {:network_tcp_layer, port}})
else
# Valgrind
GenServer.call(pid, {:config, {:network_tcp_layer, port}}, :infinity)
end
end
@doc """
Sets the host name for the Server.
"""
@spec set_hostname(GenServer.server(), binary()) :: :ok | {:error, binary()} | {:error, :einval}
def set_hostname(pid, hostname) when is_binary(hostname) do
GenServer.call(pid, {:config, {:hostname, hostname}})
end
@doc """
Sets a port number for the Server.
"""
@spec set_port(GenServer.server(), integer()) :: :ok | {:error, binary()} | {:error, :einval}
def set_port(pid, port) when is_integer(port) do
if(@mix_env != :test) do
GenServer.call(pid, {:config, {:port, port}})
else
# Valgrind
GenServer.call(pid, {:config, {:port, port}}, :infinity)
end
end
@doc """
Adds users (and passwords) the Server.
Users must be a tuple list ([{user, password}]).
"""
@spec set_users(GenServer.server(), list()) :: :ok | {:error, binary()} | {:error, :einval}
def set_users(pid, users) when is_list(users) do
GenServer.call(pid, {:config, {:users, users}})
end
@doc """
Adds endpoints for all configured security policies in each mode.
"""
@spec add_all_endpoints(GenServer.server()) :: :ok | {:error, binary()} | {:error, :einval}
def add_all_endpoints(pid) do
if(@mix_env != :test) do
GenServer.call(pid, {:config, {:add_all_endpoints, nil}})
else
# Valgrind
GenServer.call(pid, {:config, {:add_all_endpoints, nil}}, :infinity)
end
end
@doc """
Start OPC UA Server.
"""
@spec start(GenServer.server()) :: :ok | {:error, binary()} | {:error, :einval}
def start(pid) do
GenServer.call(pid, {:start_server, nil})
end
@doc """
Stop OPC UA Server.
"""
@spec stop_server(GenServer.server()) :: :ok | {:error, binary()} | {:error, :einval}
def stop_server(pid) do
GenServer.call(pid, {:stop_server, nil})
end
# Encryption
@doc """
Creates a server configuration with all security policies for the given certificates.
The following must be filled:
* `:private_key` -> binary() or function().
* `:certificate` -> binary() or function().
* `:port` -> interger().
"""
@spec set_default_config_with_certs(GenServer.server(), list()) :: :ok | {:error, binary()} | {:error, :einval}
def set_default_config_with_certs(pid, args) when is_list(args) do
if(@mix_env != :test) do
GenServer.call(pid, {:config, {:all_policies, args}})
else
# Valgrind
GenServer.call(pid, {:config, {:all_policies, args}}, :infinity)
end
end
@doc """
Adds the security policy ``SecurityPolicy#None`` to the server with certicate (no endpoint).
The following must be filled:
* `:certificate` -> binary() or function().
"""
@spec add_none_policy(GenServer.server(), list()) :: :ok | {:error, binary()} | {:error, :einval}
def add_none_policy(pid, args) when is_list(args) do
if(@mix_env != :test) do
GenServer.call(pid, {:config, {:none_policy, args}})
else
# Valgrind
GenServer.call(pid, {:config, {:none_policy, args}}, :infinity)
end
end
@doc """
Adds the security policy ``SecurityPolicy#Basic128Rsa15`` to the server with certicate (no endpoint).
The following must be filled:
* `:private_key` -> binary() or function().
* `:certificate` -> binary() or function().
"""
@spec add_basic128rsa15_policy(GenServer.server(), list()) :: :ok | {:error, binary()} | {:error, :einval}
def add_basic128rsa15_policy(pid, args) when is_list(args) do
if(@mix_env != :test) do
GenServer.call(pid, {:config, {:basic128rsa15_policy, args}})
else
# Valgrind
GenServer.call(pid, {:config, {:basic128rsa15_policy, args}}, :infinity)
end
end
@doc """
Adds the security policy ``SecurityPolicy#Basic256`` to the server with certicate (no endpoint).
The following must be filled:
* `:private_key` -> binary() or function().
* `:certificate` -> binary() or function().
"""
@spec add_basic256_policy(GenServer.server(), list()) :: :ok | {:error, binary()} | {:error, :einval}
def add_basic256_policy(pid, args) when is_list(args) do
if(@mix_env != :test) do
GenServer.call(pid, {:config, {:basic256_policy, args}})
else
# Valgrind
GenServer.call(pid, {:config, {:basic256_policy, args}}, :infinity)
end
end
@doc """
Adds the security policy ``SecurityPolicy#Basic256Sha256`` to the server with certicate (no endpoint).
The following must be filled:
* `:private_key` -> binary() or function().
* `:certificate` -> binary() or function().
"""
@spec add_basic256sha256_policy(GenServer.server(), list()) :: :ok | {:error, binary()} | {:error, :einval}
def add_basic256sha256_policy(pid, args) when is_list(args) do
if(@mix_env != :test) do
GenServer.call(pid, {:config, {:basic256sha256_policy, args}})
else
# Valgrind
GenServer.call(pid, {:config, {:basic256sha256_policy, args}}, :infinity)
end
end
@doc """
Adds all supported security policies and sets up certificate validation procedures (no endpoint).
The following must be filled:
* `:private_key` -> binary() or function().
* `:certificate` -> binary() or function().
"""
@spec add_all_policies(GenServer.server(), list()) :: :ok | {:error, binary()} | {:error, :einval}
def add_all_policies(pid, args) when is_list(args) do
if(@mix_env != :test) do
GenServer.call(pid, {:config, {:all_policies_no_endpoint, args}})
else
# Valgrind
GenServer.call(pid, {:config, {:all_policies_no_endpoint, args}}, :infinity)
end
end
# Discovery functions
@doc """
Sets the configuration for the a Server representing a local discovery server as a central instance.
Any other server can register with this server using "discovery_register" function
NOTE: before calling this function, this server should have the default configuration.
LDS Servers only supports the Discovery Services. Cannot be used in combination with any other capability.
The following args must be filled:
* `:application_uri` -> binary().
* `:timeout` -> boolean().
"""
@spec set_lds_config(GenServer.server(), binary(), integer() | nil) ::
:ok | {:error, binary()} | {:error, :einval}
def set_lds_config(pid, application_uri, timeout \\ nil)
when is_binary(application_uri) and (is_integer(timeout) or is_nil(timeout)) do
GenServer.call(pid, {:discovery, {application_uri, timeout}})
end
@doc """
Registers a server in a discovery server.
NOTE: The Server sends the request once started. Use port = 0 to dynamically port allocation.
The following must be filled:
* `:application_uri` -> binary().
* `:server_name` -> binary().
* `:endpoint` -> binary().
* `:timeout` -> boolean().
"""
@spec discovery_register(GenServer.server(), list()) :: :ok | {:error, binary()} | {:error, :einval}
def discovery_register(pid, args) when is_list(args) do
if(@mix_env != :test) do
GenServer.call(pid, {:discovery, {:discovery_register, args}})
else
# Valgrinnd
GenServer.call(pid, {:discovery, {:discovery_register, args}}, :infinity)
end
end
@doc """
Unregister the server from the discovery server.
NOTE: Server must be started.
"""
@spec discovery_unregister(GenServer.server()) :: :ok | {:error, binary()} | {:error, :einval}
def discovery_unregister(pid) do
GenServer.call(pid, {:discovery, {:discovery_unregister, nil}})
end
# Add & Delete nodes functions
@doc """
Add a new namespace.
"""
@spec add_namespace(GenServer.server(), binary()) ::
{:ok, integer()} | {:error, binary()} | {:error, :einval}
def add_namespace(pid, namespace) when is_binary(namespace) do
GenServer.call(pid, {:add, {:namespace, namespace}})
end
@doc """
Add a new variable node to the server.
The following must be filled:
* `:requested_new_node_id` -> %NodeID{}.
* `:parent_node_id` -> %NodeID{}.
* `:reference_type_node_id` -> %NodeID{}.
* `:browse_name` -> %QualifiedName{}.
* `:type_definition` -> %NodeID{}.
"""
@spec add_variable_node(GenServer.server(), list()) ::
:ok | {:error, binary()} | {:error, :einval}
def add_variable_node(pid, args) when is_list(args) do
GenServer.call(pid, {:add, {:variable_node, args}})
end
@doc """
Add a new variable type node to the server.
The following must be filled:
* `:requested_new_node_id` -> %NodeID{}.
* `:parent_node_id` -> %NodeID{}.
* `:reference_type_node_id` -> %NodeID{}.
* `:browse_name` -> %QualifiedName{}.
* `:type_definition` -> %NodeID{}.
"""
@spec add_variable_type_node(GenServer.server(), list()) ::
:ok | {:error, binary()} | {:error, :einval}
def add_variable_type_node(pid, args) when is_list(args) do
GenServer.call(pid, {:add, {:variable_type_node, args}})
end
@doc """
Add a new object node to the server.
The following must be filled:
* `:requested_new_node_id` -> %NodeID{}.
* `:parent_node_id` -> %NodeID{}.
* `:reference_type_node_id` -> %NodeID{}.
* `:browse_name` -> %QualifiedName{}.
* `:type_definition` -> %NodeID{}.
"""
@spec add_object_node(GenServer.server(), list()) ::
:ok | {:error, binary()} | {:error, :einval}
def add_object_node(pid, args) when is_list(args) do
GenServer.call(pid, {:add, {:object_node, args}})
end
@doc """
Add a new object type node to the server.
The following must be filled:
* `:requested_new_node_id` -> %NodeID{}.
* `:parent_node_id` -> %NodeID{}.
* `:reference_type_node_id` -> %NodeID{}.
* `:browse_name` -> %QualifiedName{}.
"""
@spec add_object_type_node(GenServer.server(), list()) ::
:ok | {:error, binary()} | {:error, :einval}
def add_object_type_node(pid, args) when is_list(args) do
GenServer.call(pid, {:add, {:object_type_node, args}})
end
@doc """
Add a new view node to the server.
The following must be filled:
* `:requested_new_node_id` -> %NodeID{}.
* `:parent_node_id` -> %NodeID{}.
* `:reference_type_node_id` -> %NodeID{}.
* `:browse_name` -> %QualifiedName{}.
"""
@spec add_view_node(GenServer.server(), list()) :: :ok | {:error, binary()} | {:error, :einval}
def add_view_node(pid, args) when is_list(args) do
GenServer.call(pid, {:add, {:view_node, args}})
end
@doc """
Add a new reference type node to the server.
The following must be filled:
* `:requested_new_node_id` -> %NodeID{}.
* `:parent_node_id` -> %NodeID{}.
* `:reference_type_node_id` -> %NodeID{}.
* `:browse_name` -> %QualifiedName{}.
"""
@spec add_reference_type_node(GenServer.server(), list()) ::
:ok | {:error, binary()} | {:error, :einval}
def add_reference_type_node(pid, args) when is_list(args) do
GenServer.call(pid, {:add, {:reference_type_node, args}})
end
@doc """
Add a new data type node to the server.
The following must be filled:
* `:requested_new_node_id` -> %NodeID{}.
* `:parent_node_id` -> %NodeID{}.
* `:reference_type_node_id` -> %NodeID{}.
* `:browse_name` -> %QualifiedName{}.
"""
@spec add_data_type_node(GenServer.server(), list()) ::
:ok | {:error, binary()} | {:error, :einval}
def add_data_type_node(pid, args) when is_list(args) do
GenServer.call(pid, {:add, {:data_type_node, args}})
end
@doc """
Add a new reference in the server.
The following must be filled:
* `:source_id` -> %NodeID{}.
* `:reference_type_id` -> %NodeID{}.
* `:target_id` -> %NodeID{}.
* `:is_forward` -> boolean().
"""
@spec add_reference(GenServer.server(), list()) :: :ok | {:error, binary()} | {:error, :einval}
def add_reference(pid, args) when is_list(args) do
GenServer.call(pid, {:add, {:reference, args}})
end
@doc """
Deletes a reference in the server.
The following must be filled:
* `:source_id` -> %NodeID{}.
* `:reference_type_id` -> %NodeID{}.
* `:target_id` -> %NodeID{}.
* `:is_forward` -> boolean().
* `:delete_bidirectional` -> boolean().
"""
@spec delete_reference(GenServer.server(), list()) ::
:ok | {:error, binary()} | {:error, :einval}
def delete_reference(pid, args) when is_list(args) do
GenServer.call(pid, {:delete_reference, args})
end
@doc """
Deletes a node in the server.
The following must be filled:
* `:node_id` -> %NodeID{}.
* `:delete_references` -> boolean().
"""
@spec delete_node(GenServer.server(), list()) :: :ok | {:error, binary()} | {:error, :einval}
def delete_node(pid, args) when is_list(args) do
GenServer.call(pid, {:delete_node, args})
end
# Add Monitored Items function
@doc """
Create a local MonitoredItem with a sampling interval that detects data changes.
The following must be filled:
* `:monitored_item` -> %NodeID{}.
* `:sampling_time` -> double().
"""
@spec add_monitored_item(GenServer.server(), list()) ::
{:ok, integer()} | {:error, binary()} | {:error, :einval}
def add_monitored_item(pid, args) when is_list(args) do
GenServer.call(pid, {:add, {:monitored_item, args}})
end
@doc """
Deletes a local MonitoredItem.
"""
@spec delete_monitored_item(GenServer.server(), integer()) ::
:ok | {:error, binary()} | {:error, :einval}
def delete_monitored_item(pid, monitored_item_id) when is_integer(monitored_item_id) do
GenServer.call(pid, {:delete_monitored_item, monitored_item_id})
end
@doc false
def test(pid) do
GenServer.call(pid, {:test, nil}, :infinity)
end
# Handlers
def init({_args, controlling_process}) do
lib_dir =
:opex62541
|> :code.priv_dir()
|> to_string()
|> set_ld_library_path()
executable = lib_dir <> "/opc_ua_server"
port = open_port(executable, use_valgrind?())
state = %State{port: port, controlling_process: controlling_process}
{:ok, state}
end
# Handlers Lifecyle & Configuration Functions.
def handle_call({:config, {:get_server_config, nil}}, caller_info, state) do
call_port(state, :get_server_config, caller_info, nil)
{:noreply, state}
end
def handle_call({:config, {:set_default_server_config, nil}}, caller_info, state) do
call_port(state, :set_default_server_config, caller_info, nil)
{:noreply, state}
end
def handle_call({:config, {:set_basics, nil}}, caller_info, state) do
call_port(state, :set_basics, caller_info, nil)
{:noreply, state}
end
def handle_call({:config, {:network_tcp_layer, port}}, caller_info, state) do
call_port(state, :set_network_tcp_layer, caller_info, port)
{:noreply, state}
end
def handle_call({:config, {:hostname, hostname}}, caller_info, state) do
call_port(state, :set_hostname, caller_info, hostname)
{:noreply, state}
end
def handle_call({:config, {:port, port}}, caller_info, state) do
call_port(state, :set_port, caller_info, port)
{:noreply, state}
end
def handle_call({:config, {:users, users}}, caller_info, state) do
call_port(state, :set_users, caller_info, users)
{:noreply, state}
end
def handle_call({:config, {:add_all_endpoints, nil}}, caller_info, state) do
call_port(state, :add_all_endpoints, caller_info, nil)
{:noreply, state}
end
def handle_call({:start_server, nil}, caller_info, state) do
call_port(state, :start_server, caller_info, nil)
{:noreply, state}
end
def handle_call({:stop_server, nil}, caller_info, state) do
call_port(state, :stop_server, caller_info, nil)
{:noreply, state}
end
# Encryption.
def handle_call({:config, {:all_policies, args}}, caller_info, state) do
with cert <- Keyword.fetch!(args, :certificate),
pkey <- Keyword.fetch!(args, :private_key),
port <- Keyword.get(args, :port, nil),
certificate <- get_binary_data(cert),
private_key <- get_binary_data(pkey),
true <- is_binary(certificate),
true <- is_binary(private_key),
true <- is_integer(port) || is_nil(port) do
c_args = {port, certificate, private_key}
call_port(state, :set_config_with_security_policies, caller_info, c_args)
{:noreply, state}
else
_ ->
{:reply, {:error, :einval} ,state}
end
end
def handle_call({:config, {:none_policy, args}}, caller_info, state) do
with cert <- Keyword.fetch!(args, :certificate),
certificate <- get_binary_data(cert),
true <- is_binary(certificate) do
call_port(state, :add_security_policy_none, caller_info, certificate)
{:noreply, state}
else
_ ->
{:reply, {:error, :einval} ,state}
end
end
def handle_call({:config, {:basic128rsa15_policy, args}}, caller_info, state) do
with cert <- Keyword.fetch!(args, :certificate),
pkey <- Keyword.fetch!(args, :private_key),
certificate <- get_binary_data(cert),
private_key <- get_binary_data(pkey),
true <- is_binary(certificate),
true <- is_binary(private_key) do
c_args = {certificate, private_key}
call_port(state, :add_security_policy_basic128rsa15, caller_info, c_args)
{:noreply, state}
else
_ ->
{:reply, {:error, :einval} ,state}
end
end
def handle_call({:config, {:basic256_policy, args}}, caller_info, state) do
with cert <- Keyword.fetch!(args, :certificate),
pkey <- Keyword.fetch!(args, :private_key),
certificate <- get_binary_data(cert),
private_key <- get_binary_data(pkey),
true <- is_binary(certificate),
true <- is_binary(private_key) do
c_args = {certificate, private_key}
call_port(state, :add_security_policy_basic256, caller_info, c_args)
{:noreply, state}
else
_ ->
{:reply, {:error, :einval} ,state}
end
end
def handle_call({:config, {:basic256sha256_policy, args}}, caller_info, state) do
with cert <- Keyword.fetch!(args, :certificate),
pkey <- Keyword.fetch!(args, :private_key),
certificate <- get_binary_data(cert),
private_key <- get_binary_data(pkey),
true <- is_binary(certificate),
true <- is_binary(private_key) do
c_args = {certificate, private_key}
call_port(state, :add_security_policy_basic256sha256, caller_info, c_args)
{:noreply, state}
else
_ ->
{:reply, {:error, :einval} ,state}
end
end
def handle_call({:config, {:all_policies_no_endpoint, args}}, caller_info, state) do
with cert <- Keyword.fetch!(args, :certificate),
pkey <- Keyword.fetch!(args, :private_key),
certificate <- get_binary_data(cert),
private_key <- get_binary_data(pkey),
true <- is_binary(certificate),
true <- is_binary(private_key) do
c_args = {certificate, private_key}
call_port(state, :add_all_security_policies, caller_info, c_args)
{:noreply, state}
else
_ ->
{:reply, {:error, :einval} ,state}
end
end
# Discovery Functions.
def handle_call({:discovery, {:discovery_register, args}}, caller_info, state) do
application_uri = Keyword.fetch!(args, :application_uri)
server_name = Keyword.fetch!(args, :server_name)
endpoint = Keyword.fetch!(args, :endpoint)
timeout = Keyword.get(args, :timeout, nil)
c_args = {application_uri, server_name, endpoint, timeout}
call_port(state, :discovery_register, caller_info, c_args)
{:noreply, state}
end
def handle_call({:discovery, {:discovery_unregister, nil}}, caller_info, state) do
call_port(state, :discovery_unregister, caller_info, nil)
{:noreply, state}
end
def handle_call({:discovery, {application_uri, timeout}}, caller_info, state) do
c_args = {application_uri, timeout}
call_port(state, :set_lds_config, caller_info, c_args)
{:noreply, state}
end
# Handlers Add & Delete Functions.
def handle_call({:add, {:namespace, namespace}}, caller_info, state) do
call_port(state, :add_namespace, caller_info, namespace)
{:noreply, state}
end
def handle_call({:add, {:variable_node, args}}, caller_info, state) do
requested_new_node_id = Keyword.fetch!(args, :requested_new_node_id) |> to_c()
parent_node_id = Keyword.fetch!(args, :parent_node_id) |> to_c()
reference_type_node_id = Keyword.fetch!(args, :reference_type_node_id) |> to_c()
browse_name = Keyword.fetch!(args, :browse_name) |> to_c()
type_definition = Keyword.fetch!(args, :type_definition) |> to_c()
c_args =
{requested_new_node_id, parent_node_id, reference_type_node_id, browse_name,
type_definition}
call_port(state, :add_variable_node, caller_info, c_args)
{:noreply, state}
end
def handle_call({:add, {:variable_type_node, args}}, caller_info, state) do
requested_new_node_id = Keyword.fetch!(args, :requested_new_node_id) |> to_c()
parent_node_id = Keyword.fetch!(args, :parent_node_id) |> to_c()
reference_type_node_id = Keyword.fetch!(args, :reference_type_node_id) |> to_c()
browse_name = Keyword.fetch!(args, :browse_name) |> to_c()
type_definition = Keyword.fetch!(args, :type_definition) |> to_c()
c_args =
{requested_new_node_id, parent_node_id, reference_type_node_id, browse_name,
type_definition}
call_port(state, :add_variable_type_node, caller_info, c_args)
{:noreply, state}
end
def handle_call({:add, {:object_node, args}}, caller_info, state) do
requested_new_node_id = Keyword.fetch!(args, :requested_new_node_id) |> to_c()
parent_node_id = Keyword.fetch!(args, :parent_node_id) |> to_c()
reference_type_node_id = Keyword.fetch!(args, :reference_type_node_id) |> to_c()
browse_name = Keyword.fetch!(args, :browse_name) |> to_c()
type_definition = Keyword.fetch!(args, :type_definition) |> to_c()
c_args =
{requested_new_node_id, parent_node_id, reference_type_node_id, browse_name,
type_definition}
call_port(state, :add_object_node, caller_info, c_args)
{:noreply, state}
end
def handle_call({:add, {:object_type_node, args}}, caller_info, state) do
requested_new_node_id = Keyword.fetch!(args, :requested_new_node_id) |> to_c()
parent_node_id = Keyword.fetch!(args, :parent_node_id) |> to_c()
reference_type_node_id = Keyword.fetch!(args, :reference_type_node_id) |> to_c()
browse_name = Keyword.fetch!(args, :browse_name) |> to_c()
c_args = {requested_new_node_id, parent_node_id, reference_type_node_id, browse_name}
call_port(state, :add_object_type_node, caller_info, c_args)
{:noreply, state}
end
def handle_call({:add, {:view_node, args}}, caller_info, state) do
requested_new_node_id = Keyword.fetch!(args, :requested_new_node_id) |> to_c()
parent_node_id = Keyword.fetch!(args, :parent_node_id) |> to_c()
reference_type_node_id = Keyword.fetch!(args, :reference_type_node_id) |> to_c()
browse_name = Keyword.fetch!(args, :browse_name) |> to_c()
c_args = {requested_new_node_id, parent_node_id, reference_type_node_id, browse_name}
call_port(state, :add_view_node, caller_info, c_args)
{:noreply, state}
end
def handle_call({:add, {:reference_type_node, args}}, caller_info, state) do
requested_new_node_id = Keyword.fetch!(args, :requested_new_node_id) |> to_c()
parent_node_id = Keyword.fetch!(args, :parent_node_id) |> to_c()
reference_type_node_id = Keyword.fetch!(args, :reference_type_node_id) |> to_c()
browse_name = Keyword.fetch!(args, :browse_name) |> to_c()
c_args = {requested_new_node_id, parent_node_id, reference_type_node_id, browse_name}
call_port(state, :add_reference_type_node, caller_info, c_args)
{:noreply, state}
end
def handle_call({:add, {:data_type_node, args}}, caller_info, state) do
requested_new_node_id = Keyword.fetch!(args, :requested_new_node_id) |> to_c()
parent_node_id = Keyword.fetch!(args, :parent_node_id) |> to_c()
reference_type_node_id = Keyword.fetch!(args, :reference_type_node_id) |> to_c()
browse_name = Keyword.fetch!(args, :browse_name) |> to_c()
c_args = {requested_new_node_id, parent_node_id, reference_type_node_id, browse_name}
call_port(state, :add_data_type_node, caller_info, c_args)
{:noreply, state}
end
def handle_call({:add, {:reference, args}}, caller_info, state) do
source_id = Keyword.fetch!(args, :source_id) |> to_c()
reference_type_id = Keyword.fetch!(args, :reference_type_id) |> to_c()
target_id = Keyword.fetch!(args, :target_id) |> to_c()
is_forward = Keyword.fetch!(args, :is_forward)
c_args = {source_id, reference_type_id, target_id, is_forward}
call_port(state, :add_reference, caller_info, c_args)
{:noreply, state}
end
def handle_call({:delete_reference, args}, caller_info, state) do
source_id = Keyword.fetch!(args, :source_id) |> to_c()
reference_type_id = Keyword.fetch!(args, :reference_type_id) |> to_c()
target_id = Keyword.fetch!(args, :target_id) |> to_c()
is_forward = Keyword.fetch!(args, :is_forward)
delete_bidirectional = Keyword.fetch!(args, :delete_bidirectional)
c_args = {source_id, reference_type_id, target_id, is_forward, delete_bidirectional}
call_port(state, :delete_reference, caller_info, c_args)
{:noreply, state}
end
def handle_call({:delete_node, args}, caller_info, state) do
node_id = Keyword.fetch!(args, :node_id) |> to_c()
delete_reference = Keyword.fetch!(args, :delete_reference)
c_args = {node_id, delete_reference}
call_port(state, :delete_node, caller_info, c_args)
{:noreply, state}
end
# Add/delete Monitored Items function
def handle_call({:add, {:monitored_item, args}}, caller_info, state) do
with monitored_item <- Keyword.fetch!(args, :monitored_item) |> to_c(),
sampling_time <- Keyword.fetch!(args, :sampling_time),
true <- is_float(sampling_time) do
c_args = {monitored_item, sampling_time}
call_port(state, :add_monitored_item, caller_info, c_args)
{:noreply, state}
else
_ ->
{:reply, {:error, :einval} ,state}
end
end
def handle_call({:delete_monitored_item, monitored_item_id}, caller_info, state) do
call_port(state, :delete_monitored_item, caller_info, monitored_item_id)
{:noreply, state}
end
# Catch all
def handle_call({:test, nil}, caller_info, state) do
call_port(state, :test, caller_info, nil)
{:noreply, state}
end
def handle_call(invalid_call, _caller_info, state) do
Logger.error("#{__MODULE__} Invalid call: #{inspect(invalid_call)}")
{:reply, {:error, :einval}, state}
end
def handle_info({_port, {:exit_status, code}}, state) do
Logger.warn("(#{__MODULE__}) Error code: #{inspect(code)}.")
# retrying delay
Process.sleep(@c_timeout)
{:stop, :restart, state}
end
def handle_info({:EXIT, _port, reason}, state) do
Logger.debug("(#{__MODULE__}) Exit reason: #{inspect(reason)}")
# retrying delay
Process.sleep(@c_timeout)
{:stop, :restart, state}
end
def handle_info(msg, state) do
Logger.warn("(#{__MODULE__}) Unhandled message: #{inspect(msg)}.")
{:noreply, state}
end
defp handle_c_response(
{:write, {ns_index, type, name}, c_value},
%{controlling_process: c_pid} = state
) do
variable_node = NodeId.new(ns_index: ns_index, identifier_type: type, identifier: name)
value = parse_c_value(c_value)
send(c_pid, {variable_node, value})
state
end
defp handle_c_response({:test, caller_metadata, data}, state) do
GenServer.reply(caller_metadata, data)
state
end
# C Handlers Lifecyle & Configuration Functions.
defp handle_c_response({:get_server_config, caller_metadata, data}, state) do
GenServer.reply(caller_metadata, data)
state
end
defp handle_c_response({:set_default_server_config, caller_metadata, data}, state) do
GenServer.reply(caller_metadata, data)
state
end
defp handle_c_response({:set_basics, caller_metadata, data}, state) do
GenServer.reply(caller_metadata, data)
state
end
defp handle_c_response({:set_network_tcp_layer, caller_metadata, data}, state) do
GenServer.reply(caller_metadata, data)
state
end
defp handle_c_response({:set_hostname, caller_metadata, data}, state) do
GenServer.reply(caller_metadata, data)
state
end
defp handle_c_response({:set_port, caller_metadata, data}, state) do
GenServer.reply(caller_metadata, data)
state
end
defp handle_c_response({:set_users, caller_metadata, data}, state) do
GenServer.reply(caller_metadata, data)
state
end
defp handle_c_response({:add_all_endpoints, caller_metadata, data}, state) do
GenServer.reply(caller_metadata, data)
state
end
defp handle_c_response({:start_server, caller_metadata, data}, state) do
GenServer.reply(caller_metadata, data)
state
end
defp handle_c_response({:stop_server, caller_metadata, data}, state) do
GenServer.reply(caller_metadata, data)
state
end
# C Encryption function.
defp handle_c_response({:set_config_with_security_policies, caller_metadata, data}, state) do
GenServer.reply(caller_metadata, data)
state
end
defp handle_c_response({:add_security_policy_none, caller_metadata, data}, state) do
GenServer.reply(caller_metadata, data)
state
end
defp handle_c_response({:add_security_policy_basic128rsa15, caller_metadata, data}, state) do
GenServer.reply(caller_metadata, data)
state
end
defp handle_c_response({:add_security_policy_basic256, caller_metadata, data}, state) do
GenServer.reply(caller_metadata, data)
state
end
defp handle_c_response({:add_security_policy_basic256sha256, caller_metadata, data}, state) do
GenServer.reply(caller_metadata, data)
state
end
defp handle_c_response({:add_all_security_policies, caller_metadata, data}, state) do
GenServer.reply(caller_metadata, data)
state
end
# C Handlers Add & Delete Functions.
defp handle_c_response({:add_namespace, caller_metadata, data}, state) do
GenServer.reply(caller_metadata, data)
state
end
defp handle_c_response({:add_variable_node, caller_metadata, data}, state) do
GenServer.reply(caller_metadata, data)
state
end
defp handle_c_response({:add_variable_type_node, caller_metadata, data}, state) do
GenServer.reply(caller_metadata, data)
state
end
defp handle_c_response({:add_object_node, caller_metadata, data}, state) do
GenServer.reply(caller_metadata, data)
state
end
defp handle_c_response({:add_object_type_node, caller_metadata, data}, state) do
GenServer.reply(caller_metadata, data)
state
end
defp handle_c_response({:add_view_node, caller_metadata, data}, state) do
GenServer.reply(caller_metadata, data)
state
end
defp handle_c_response({:add_reference_type_node, caller_metadata, data}, state) do
GenServer.reply(caller_metadata, data)
state
end
defp handle_c_response({:add_data_type_node, caller_metadata, data}, state) do
GenServer.reply(caller_metadata, data)
state
end
defp handle_c_response({:add_reference, caller_metadata, data}, state) do
GenServer.reply(caller_metadata, data)
state
end
defp handle_c_response({:delete_reference, caller_metadata, data}, state) do
GenServer.reply(caller_metadata, data)
state
end
defp handle_c_response({:delete_node, caller_metadata, data}, state) do
GenServer.reply(caller_metadata, data)
state
end
# C Handlers "Discovery".
defp handle_c_response({:set_lds_config, caller_metadata, data}, state) do
GenServer.reply(caller_metadata, data)
state
end
defp handle_c_response({:discovery_register, caller_metadata, data}, state) do
GenServer.reply(caller_metadata, data)
state
end
defp handle_c_response({:discovery_unregister, caller_metadata, data}, state) do
GenServer.reply(caller_metadata, data)
state
end
# Add Monitored Items function
defp handle_c_response({:add_monitored_item, caller_metadata, data}, state) do
GenServer.reply(caller_metadata, data)
state
end
defp handle_c_response({:delete_monitored_item, caller_metadata, data}, state) do
GenServer.reply(caller_metadata, data)
state
end
end
|
lib/opc_ua/server.ex
| 0.809351
| 0.72657
|
server.ex
|
starcoder
|
defmodule Uplink.Monitors.Ecto do
use Uplink.Monitor
@default_buckets [5, 10, 20, 50, 100, 200, 500, 1000, 1500, 2000, 5000, 10000]
@moduledoc """
Ecto definitions. Include these if using Ecto.
Keep the prefix consistent among repos. The repo name is captured as a tag
from the metadata for reporting purposes and can be segregated as such in Grafana.
## Options
* `:buckets` - Buckets override. Default: #{inspect(@default_buckets)}
* `:query_time_warn_threshold` - Slow Ecto query warning threshold. Time in ms. Default: 100
## Definitions
* `ecto.queue.duration.ms` - Ecto queue duration - Time spent waiting to check out a database connection
* Type: `Telemetry.Metrics.Distribution.t()`
* Tags: [:repo]
* Buckets: #{inspect(@default_buckets)}
* `ecto.query.duration.ms` - Ecto query duration - Time spent executing the query
* Type: `Telemetry.Metrics.Distribution.t()`
* Tags: [:repo]
* Buckets: #{inspect(@default_buckets)}
* `ecto.decode.duration.ms` - Ecto decode duration - Time spend decoding the data received from the database
* Type: `Telemetry.Metrics.Distribution.t()`
* Tags: [:repo]
* Buckets: #{inspect(@default_buckets)}
* `ecto.idle.duration.ms` - Ecto decode duration - Time the connection spent waiting before being checked out for the query
* Type: `Telemetry.Metrics.Distribution.t()`
* Tags: [:repo]
* Buckets: #{inspect(@default_buckets)}
* `ecto.total.duration.ms` - Ecto query duration - Sum of all the measurements
* Type: `Telemetry.Metrics.Distribution.t()`
* Tags: [:repo]
* Buckets: #{inspect(@default_buckets)}
"""
require Logger
import Telemetry.Metrics, only: [distribution: 2]
@impl true
def init(opts) do
prefix = Keyword.fetch!(opts, :repo_prefix)
attach_events(prefix, opts)
end
def handle_event(_, nil, _, _), do: :ok
def handle_event([_repo, :repo, :query], measures, meta, config) do
measurements =
measures
|> Enum.into(%{})
|> Map.take([:decode_time, :query_time, :idle_time, :queue_time, :total_time])
|> Enum.reject(&is_nil(elem(&1, 1)))
|> Enum.into(%{}, fn {k, v} ->
{k, System.convert_time_unit(v, :native, :millisecond)}
end)
if measurements.total_time > config.threshold do
query_data = %{
title: "Slow Ecto Query",
query: meta.query,
repo: meta.repo,
source: meta.source,
data_source: meta.source
}
_ =
Map.merge(query_data, measurements)
|> Jason.encode!()
|> Logger.warn()
:ok
end
:ok
end
defp default_options do
[
buckets: @default_buckets,
query_time_warn_threshold: 100
]
end
defp attach_events(prefix, opts) do
final_opts = Keyword.merge(default_options(), opts)
threshold = Keyword.fetch!(final_opts, :query_time_warn_threshold)
:telemetry.attach(
"ecto_#{prefix}_slow_query_handler",
[prefix, :repo, :query],
&__MODULE__.handle_event/4,
%{threshold: threshold}
)
end
@impl true
def metric_definitions(opts) do
prefix = Keyword.fetch!(opts, :repo_prefix)
final_opts = Keyword.merge(default_options(), opts)
buckets = Keyword.fetch!(final_opts, :buckets)
[
distribution("ecto.queue.duration.ms",
event_name: [prefix, :repo, :query],
measurement: :queue_time,
unit: {:native, :millisecond},
reporter_options: [buckets: buckets],
description:
"Ecto queue duration - Time spent waiting to check out a database connection",
tags: [:repo]
),
distribution("ecto.query.duration.ms",
event_name: [prefix, :repo, :query],
measurement: :query_time,
unit: {:native, :millisecond},
reporter_options: [buckets: buckets],
description: "Ecto query duration - Time spent executing the query",
tags: [:repo]
),
distribution("ecto.decode.duration.ms",
event_name: [prefix, :repo, :query],
measurement: :decode_time,
unit: {:native, :millisecond},
reporter_options: [buckets: buckets],
description:
"Ecto decode duration - Time spend decoding the data received from the database",
tags: [:repo]
),
distribution("ecto.idle.duration.ms",
event_name: [prefix, :repo, :query],
measurement: :idle_time,
unit: {:native, :millisecond},
reporter_options: [buckets: buckets],
description:
"Ecto idle duration - Time the connection spent waiting before being checked out for the query",
tags: [:repo]
),
distribution("ecto.total.duration.ms",
event_name: [prefix, :repo, :query],
measurement: :total_time,
unit: {:native, :millisecond},
reporter_options: [buckets: buckets],
description: "Ecto query duration - Sum of all the measurements",
tags: [:repo]
)
]
end
end
|
monitors/ecto.ex
| 0.84941
| 0.582758
|
ecto.ex
|
starcoder
|
defmodule Blockchain.Block.Validation do
@moduledoc """
This module provides functions to validate a block.
"""
alias Blockchain.Block
@doc """
Determines whether or not a block is valid. This is
defined in Eq.(29) of the Yellow Paper.
Note, this is a serious intensive operation, and not
faint of heart (since we need to run all transaction
in the block to validate the block).
## Examples
iex> db = MerklePatriciaTree.Test.random_ets_db()
iex> chain = Blockchain.Test.ropsten_chain()
iex> beneficiary = <<0x05::160>>
iex> private_key = <<1::256>>
iex> sender = <<126, 95, 69, 82, 9, 26, 105, 18, 93, 93, 252, 183, 184, 194, 101, 144, 41, 57, 91, 223>> # based on simple private key
iex> machine_code = EVM.MachineCode.compile([:push1, 3, :push1, 5, :add, :push1, 0x00, :mstore, :push1, 0, :push1, 32, :return])
iex> trx = %Blockchain.Transaction{nonce: 5, gas_price: 3, gas_limit: 100_000, to: <<>>, value: 5, init: machine_code}
...> |> Blockchain.Transaction.Signature.sign_transaction(private_key)
iex> state = MerklePatriciaTree.Trie.new(db)
...> |> Blockchain.Account.put_account(sender, %Blockchain.Account{balance: 400_000, nonce: 5})
iex> parent_block = %Blockchain.Block{header: %EVM.Block.Header{number: 50, state_root: state.root_hash, difficulty: 50_000, timestamp: 9999, gas_limit: 125_001}}
iex> block = Blockchain.Block.gen_child_block(parent_block, chain, beneficiary: beneficiary, timestamp: 10000, gas_limit: 125_001)
...> |> Blockchain.Block.add_transactions_to_block([trx], db)
...> |> Blockchain.Block.add_rewards_to_block(db)
iex> Blockchain.Block.Validation.is_holistic_valid?(block, chain, parent_block, db)
:valid
iex> db = MerklePatriciaTree.Test.random_ets_db()
iex> chain = Blockchain.Test.ropsten_chain()
iex> beneficiary = <<0x05::160>>
iex> private_key = <<fdf8:f53e:61e4::1856>>
iex> sender = <<126, 95, 69, 82, 9, 26, 105, 18, 93, 93, 252, 183, 184, 194, 101, 144, 41, 57, 91, 223>> # based on simple private key
iex> machine_code = EVM.MachineCode.compile([:push1, 3, :push1, 5, :add, :push1, 0x00, :mstore, :push1, 0, :push1, 32, :return])
iex> trx = %Blockchain.Transaction{nonce: 5, gas_price: 3, gas_limit: 100_000, to: <<>>, value: 5, init: machine_code}
...> |> Blockchain.Transaction.Signature.sign_transaction(private_key)
iex> state = MerklePatriciaTree.Trie.new(db)
...> |> Blockchain.Account.put_account(sender, %Blockchain.Account{balance: 400_000, nonce: 5})
iex> parent_block = %Blockchain.Block{header: %EVM.Block.Header{number: 50, state_root: state.root_hash, difficulty: 50_000, timestamp: 9999, gas_limit: 125_001}}
iex> block = Blockchain.Block.gen_child_block(parent_block, chain, beneficiary: beneficiary, timestamp: 10000, gas_limit: 125_001)
...> |> Blockchain.Block.add_transactions_to_block([trx], db)
iex> %{block | header: %{block.header | state_root: <<1,2,3>>, ommers_hash: <<2,3,4>>, transactions_root: <<3,4,5>>, receipts_root: <<4,5,6>>}}
...> |> Blockchain.Block.Validation.is_holistic_valid?(chain, parent_block, db)
{:invalid, [:state_root_mismatch, :ommers_hash_mismatch, :transactions_root_mismatch, :receipts_root_mismatch]}
"""
@spec is_holistic_valid?(Block.t(), Chain.t(), Block.t() | nil, DB.db()) ::
:valid | {:invalid, [atom()]}
def is_holistic_valid?(block, chain, parent_block, db) do
base_block =
if parent_block |> is_nil do
Block.gen_genesis_block(chain, db)
else
Block.gen_child_block(
parent_block,
chain,
beneficiary: block.header.beneficiary,
timestamp: block.header.timestamp,
gas_limit: block.header.gas_limit,
extra_data: block.header.extra_data
)
end
child_block =
base_block
|> Block.add_transactions_to_block(block.transactions, db)
|> Block.add_ommers_to_block(block.ommers)
|> Block.add_rewards_to_block(db, chain.params[:block_reward])
# The following checks Holistic Validity, as defined in Eq.(29)
errors =
[] ++
if child_block.header.state_root == block.header.state_root,
do: [],
else:
[:state_root_mismatch] ++
if(
child_block.header.ommers_hash == block.header.ommers_hash,
do: [],
else:
[:ommers_hash_mismatch] ++
if(
child_block.header.transactions_root == block.header.transactions_root,
do: [],
else:
[:transactions_root_mismatch] ++
if(
child_block.header.receipts_root == block.header.receipts_root,
do: [],
else: [:receipts_root_mismatch]
)
)
)
if errors == [], do: :valid, else: {:invalid, errors}
end
end
|
apps/blockchain/lib/blockchain/validation.ex
| 0.798815
| 0.462959
|
validation.ex
|
starcoder
|
defmodule Mentat do
@external_resource readme = "README.md"
@moduledoc readme
|> File.read!()
|> String.split("<!--MDOC !-->")
|> Enum.fetch!(1)
use Supervisor
use Oath
@type cache_opts() :: Keyword.t()
@type name :: atom()
@type key :: term()
@type value :: term()
@type put_opts :: [
{:ttl, pos_integer() | :infinity},
]
@default_limit %{reclaim: 0.1}
alias Mentat.Janitor
defp cache_opts do
import Norm
coll_of(
one_of([
{:name, spec(is_atom)},
{:cleanup_interval, spec(is_integer and & &1 > 0)},
{:ets_args, spec(is_list)},
{:ttl, one_of([spec(is_integer and & &1 > 0), :infinity])},
{:clock, spec(is_atom)},
{:limit, coll_of(one_of([
{:size, spec(is_integer and & &1 > 0)},
{:reclaim, spec(is_float)},
]))}
])
)
end
@doc false
def child_spec(opts) do
name = opts[:name] || raise ArgumentError, ":name is required"
%{
id: name,
type: :supervisor,
start: {__MODULE__, :start_link, [opts]},
}
end
@doc """
Starts a new cache.
Options:
* `:name` - the cache name as an atom. required.
* `:cleanup_interval` - How often the janitor process will remove old keys. Defaults to 5_000.
* `:ets_args` - Additional arguments to pass to `:ets.new/2`.
* `:ttl` - The default ttl for all keys. Default `:infinity`.
* `:limit` - Limits to the number of keys a cache will store. Defaults to `:none`.
* `:size` - The maximum number of values to store in the cache.
* `:reclaim` - The percentage of keys to reclaim if the limit is exceeded. Defaults to 0.1.
"""
@spec start_link(cache_opts()) :: Supervisor.on_start()
def start_link(args) do
args = Norm.conform!(args, cache_opts())
name = args[:name]
Supervisor.start_link(__MODULE__, args, name: name)
end
@doc """
Fetches a value or executes the fallback function. The function can return
either `{:commit, term()}` or `{:ignore, term()}`. If `{:commit, term()}` is
returned, the value will be stored in the cache before its returned. See the
"TTLs" section for a list of options.
## Example
```
Mentat.fetch(:cache, user_id, fn user_id ->
case get_user(user_id) do
{:ok, user} ->
{:commit, user}
error ->
{:ignore, error}
end
end)
```
"""
@spec fetch(name(), key(), put_opts(), (key() -> {:commit, value()} | {:ignore, value()})) :: value()
def fetch(cache, key, opts \\ [], fallback) do
with nil <- get(cache, key) do
case fallback.(key) do
{:commit, value} ->
put(cache, key, value, opts)
value
{:ignore, value} ->
value
end
end
end
@doc """
Retrieves a value from a the cache. Returns `nil` if the key is not found.
"""
@spec get(name(), key()) :: value()
def get(cache, key) do
config = get_config(cache)
now = ms_time(config.clock)
case :ets.lookup(cache, key) do
[] ->
:telemetry.execute([:mentat, :get], %{status: :miss}, %{key: key, cache: cache})
nil
[{^key, _val, ts, ttl}] when is_integer(ttl) and ts + ttl <= now ->
:telemetry.execute([:mentat, :get], %{status: :miss}, %{key: key, cache: cache})
nil
[{^key, val, _ts, _expire_at}] ->
:telemetry.execute([:mentat, :get], %{status: :hit}, %{key: key, cache: cache})
val
end
end
@doc """
Puts a new key into the cache. See the "TTLs" section for a list of
options.
"""
@spec put(name(), key(), value(), put_opts()) :: value() | no_return()
@decorate pre("ttls are positive", fn _, _, _, opts ->
if opts[:ttl], do: opts[:ttl] > 0, else: true
end)
@decorate post("value is returned", fn _, _, value, _, return ->
value == return
end)
def put(cache, key, value, opts \\ [])
def put(cache, key, value, opts) do
config = get_config(cache)
:telemetry.execute([:mentat, :put], %{}, %{key: key, cache: cache})
now = ms_time(config.clock)
ttl = opts[:ttl] || config.ttl
if ttl < 0 do
raise ArgumentError, "`:ttl` must be greater than 0"
end
true = :ets.insert(cache, {key, value, now, ttl})
# If we've reached the limit on the table, we need to purge a number of old
# keys. We do this by calling the janitor process and telling it to purge.
# This will, in turn call immediately back into the remove_oldest function.
# The back and forth here is confusing to follow, but its necessary because
# we want to do the purging in a different process.
if config.limit != :none && :ets.info(cache, :size) > config.limit.size do
count = ceil(config.limit.size * config.limit.reclaim)
Janitor.reclaim(janitor(cache), count)
end
value
end
@doc """
Updates a keys inserted at time. This is useful in conjunction with limits
when you want to evict the oldest keys. Returns `true` if the key was found
and `false` if it was not.
"""
@spec touch(name(), key()) :: boolean()
def touch(cache, key) do
config = get_config(cache)
now = ms_time(config.clock)
:ets.update_element(cache, key, {3, now})
end
@doc """
Deletes a key from the cache
"""
@spec delete(name(), key()) :: true
def delete(cache, key) do
:ets.delete(cache, key)
end
@doc """
Returns a list of all keys. By default this function only returns keys
that have no exceeded their TTL. You can pass the `all: true` option to the function
in order to return all present keys, which may include keys that have exceeded
their TTL but have not been purged yet.
"""
@spec keys(name()) :: [key()]
def keys(cache, opts \\ []) do
ms = if opts[:all] == true do
[{{:"$1", :_, :_, :_}, [], [:"$1"]}]
else
config = get_config(cache)
now = ms_time(config.clock)
[
{{:"$1", :_, :"$2", :"$3"},
[
{:orelse,
{:andalso, {:is_integer, :"$3"}, {:>, {:+, :"$2", :"$3"}, now}},
{:==, :"$3", :infinity}}
], [:"$1"]}
]
end
:ets.select(cache, ms)
end
@doc """
Removes all keys from the cache.
"""
@spec purge(name()) :: true
def purge(cache) do
:ets.delete_all_objects(cache)
end
@doc false
def remove_expired(cache) do
config = get_config(cache)
now = ms_time(config.clock)
# Find all expired keys by selecting the timestamp and ttl, adding them together
# and finding the keys that are lower than the current time
ms = [
{{:_, :_, :"$1", :"$2"},
[{:andalso, {:is_integer, :"$2"}, {:<, {:+, :"$1", :"$2"}, now}}], [true]}
]
:ets.select_delete(cache, ms)
end
@doc false
def remove_oldest(cache, count) do
ms = [{{:_, :_, :"$1", :_}, [], [:"$1"]}]
entries = :ets.select(cache, ms)
oldest =
entries
|> Enum.sort()
|> Enum.take(count)
|> List.last()
delete_ms = [{{:_, :_, :"$1", :_}, [{:"=<", :"$1", oldest}], [true]}]
:ets.select_delete(cache, delete_ms)
end
def init(args) do
name = args[:name]
interval = args[:cleanup_interval] || 5_000
limit = args[:limit] || :none
limit = if limit != :none, do: Map.merge(@default_limit, Map.new(limit)), else: limit
ets_args = args[:ets_args] || []
clock = args[:clock] || System
ttl = args[:ttl] || :infinity
^name = :ets.new(name, [:set, :named_table, :public] ++ ets_args)
put_config(name, %{limit: limit, ttl: ttl, clock: clock})
janitor_opts = [
name: janitor(name),
interval: interval,
cache: name
]
children = [
{Mentat.Janitor, janitor_opts}
]
Supervisor.init(children, strategy: :one_for_one)
end
def stop(name) do
Supervisor.stop(name)
end
defp put_config(cache, config) do
:persistent_term.put({__MODULE__, cache}, config)
end
defp get_config(cache) do
:persistent_term.get({__MODULE__, cache})
end
defp ms_time(clock) do
# Clock is going `System` in most cases and is set inside the init function
clock.monotonic_time(:millisecond)
end
defp janitor(name) do
:"#{name}_janitor"
end
end
|
lib/mentat.ex
| 0.810966
| 0.630002
|
mentat.ex
|
starcoder
|
defmodule PassiveSupport.Map do
@moduledoc """
Convenience functions for working with maps.
"""
alias PassiveSupport, as: Ps
@type key :: any
@doc ~S"""
Returns a new map with `key` replaced by `new_key` or the return of `fun`
If `key` is not found within `map`, returns the `map` unaltered.
Useful for when you're shuffling values around inside of a map,
or, I dunno, going through your music collection and you discover
you accidentally attributed an entire Beatles album to the Monkees.
Although how you did that is beyond me. You monster.
## Examples
iex> change_key(%{dog: "rusty"}, :dog, :cat)
%{cat: "rusty"}
iex> change_key(%{dog: "rusty"}, :dog, &(:"best_#{&1}"))
%{best_dog: "rusty"}
iex> change_key(%{1 => "foo", 2 => "bar", 5 => "idklol"}, 1, fn _key, map -> Enum.max(Map.keys(map)) + 1 end)
%{2 => "bar", 5 => "idklol", 6 => "foo"}
iex> change_key(%{cake_boss: "you blew it"}, :no_key_here, :oops)
%{cake_boss: "you blew it"}
"""
@spec change_key(map, key, key | (key -> key) | ((key, map) -> key)) :: map
def change_key(map, key, fun) when is_map_key(map, key) and is_function(fun, 1) do
{value, map} = Map.pop(map, key)
put_in(map[fun.(key)], value)
end
def change_key(map, key, fun) when is_map_key(map, key) and is_function(fun, 2) do
{value, map} = Map.pop(map, key)
put_in(map[fun.(key, map)], value)
end
def change_key(map, key, new_key) when is_map_key(map, key) do
{value, map} = Map.pop(map, key)
put_in(map[new_key], value)
end
def change_key(map, _, _), do: map
@doc ~S"""
Returns a version of `map` whose keys have all been
converted to strings.
## Examples
iex> stringify_keys(%{7 => 8, "hey" => "ha", hi: "ho"})
%{"7" => 8, "hey" => "ha", "hi" => "ho"}
"""
def stringify_keys(%{} = map),
do: map
|> Stream.map(fn {key, value} -> {to_string(key), value} end)
|> Enum.into(%{})
@doc ~S"""
Returns `map` with its keys as atoms, if those atoms already exist.
Raises `ArgumentError` otherwise.
## Examples
iex> [:oh, :yay] # existing keys
iex> atomize_keys!(%{"oh" => "ooh", 'yay' => 'yaaay'})
%{oh: "ooh", yay: 'yaaay'}
iex> atomize_keys!(%{"oh" => "ooh", "noo" => "noooo"})
** (PassiveSupport.NonexistentAtoms) No atoms exist for the values ["noo"]
"""
def atomize_keys!(%{} = map) do
{atoms, errors} = map
|> Stream.map(fn {key, value} ->
case key |> to_string |> Ps.String.safe_existing_atom do
{:ok, atom} -> {atom, value}
:error -> key
end
end)
|> Enum.split_with(fn atom? -> is_tuple(atom?) end)
case errors do
[] -> atoms |> Enum.into(%{})
_ -> raise(PassiveSupport.NonexistentAtoms, expected: errors)
end
end
@doc ~S"""
Returns a `map` with any string keys that safely coerced to existing atoms
I'm not giving y'all the ridiculously dangerous footgun of
wanton atom space pollution. I'm not some crazy, foot-shooting
cowboy, like `Jason`.
## Examples
iex> [:oh, :yay] # existing keys
iex> safe_atomize_keys(%{"oh" => "ooh", 'yay' => 'yaaay'})
%{oh: "ooh", yay: 'yaaay'}
iex> safe_atomize_keys(%{"oh" => "ooh", "noo" => "noooo"})
%{oh: "ooh"}
"""
def safe_atomize_keys(%{} = map),
do: map
|> Stream.map(fn {key, value} -> {to_string(key), value} end)
|> Stream.filter(fn {key, _} -> PassiveSupport.Atom.exists?(key) end)
|> Stream.map(fn {key, value} -> {String.to_existing_atom(key), value} end)
|> Enum.into(%{})
@doc """
Returns a copy of `map` containing only `keys` and raises
if any are missing.
If `keys` are not provided, returns `map` unchanged
## Examples
iex> take!(%{a: "foo", b: 42, c: :ok}, [:b, :c])
%{b: 42, c: :ok}
iex> take!(%{a: "foo", b: 42})
%{a: "foo", b: 42}
iex> take!(%{"a" => "foo", "b" => 42, "c" => :ok}, ["c", "e"])
** (PassiveSupport.KeysNotFound) Expected to find keys ["c", "e"] but only found keys ["c"]
"""
@spec take!(map, list) :: map
def take!(map, keys \\ [])
def take!(map, []), do: map
def take!(map, keys) when is_list(keys) do
keys
|> Enum.filter(&Map.has_key?(map, &1))
|> Ps.Item.tee(&unless(&1 == keys,
do: raise(PassiveSupport.KeysNotFound, expected: keys, actual: &1)
))
Map.take(map, keys)
end
@doc ~S"""
Deletes the struct metadata from structs, but leaves ordinary maps unchanged
## Examples
PassiveSupport.Map.plain(%{foo: :bar})
# => %{foo: :bar}
defmodule Plane do
defstruct plains: :great # ... little plain punning for ya.
end
PassiveSupport.Map.plain(%Plane{})
# => %{plains: :great}
"""
@spec plain(struct | map) :: map
def plain(struct) when is_struct(struct), do: Map.delete(struct, :__struct__)
def plain(%{} = map), do: map
end
|
lib/passive_support/base/map.ex
| 0.840455
| 0.538619
|
map.ex
|
starcoder
|
defmodule Phoenix.LiveDashboard.Router do
@moduledoc """
Provides LiveView routing for LiveDashboard.
"""
@doc """
Defines a LiveDashboard route.
It expects the `path` the dashboard will be mounted at
and a set of options.
## Options
* `:metrics` - Configures the module to retrieve metrics from.
It can be a `module` or a `{module, function}`. If nothing is
given, the metrics functionality will be disabled.
* `:env_keys` - Configures environment variables to display.
It is defined as a list of string keys. If not set, the environment
information will not be displayed.
* `:live_socket_path` - Configures the socket path. it must match
the `socket "/live", Phoenix.LiveView.Socket` in your endpoint.
* `:metrics_history` - Configures a callback for retreiving metric history.
It must be an "MFA" tuple of `{Module, :function, arguments}` such as
metrics_history: {MyStorage, :metrics_history, []}
If not set, metrics will start out empty/blank and only display
data that occurs while the browser page is open.
## Examples
defmodule MyAppWeb.Router do
use Phoenix.Router
import Phoenix.LiveDashboard.Router
scope "/", MyAppWeb do
pipe_through [:browser]
live_dashboard "/dashboard",
metrics: {MyAppWeb.Telemetry, :metrics},
env_keys: ["APP_USER", "VERSION"],
metrics_history: {MyStorage, :metrics_history, []}
end
end
"""
defmacro live_dashboard(path, opts \\ []) do
quote bind_quoted: binding() do
scope path, alias: false, as: false do
import Phoenix.LiveView.Router, only: [live: 4]
opts = Phoenix.LiveDashboard.Router.__options__(opts)
live "/", Phoenix.LiveDashboard.PageLive, :home, opts ++ [page: "home", node: node()]
# Catch-all for URL generation
live "/:node/:page", Phoenix.LiveDashboard.PageLive, :page, opts
end
end
end
@doc false
def __options__(options) do
live_socket_path = Keyword.get(options, :live_socket_path, "/live")
metrics =
case options[:metrics] do
nil ->
nil
mod when is_atom(mod) ->
{mod, :metrics}
{mod, fun} when is_atom(mod) and is_atom(fun) ->
{mod, fun}
other ->
raise ArgumentError,
":metrics must be a tuple with {Mod, fun}, " <>
"such as {MyAppWeb.Telemetry, :metrics}, got: #{inspect(other)}"
end
env_keys =
case options[:env_keys] do
nil ->
nil
keys when is_list(keys) ->
keys
other ->
raise ArgumentError,
":env_keys must be a list of strings, got: " <> inspect(other)
end
metrics_history =
case options[:metrics_history] do
nil ->
nil
{module, function, args}
when is_atom(module) and is_atom(function) and is_list(args) ->
{module, function, args}
other ->
raise ArgumentError,
":metrics_history must be a tuple of {module, function, args}, got: " <>
inspect(other)
end
[
session: {__MODULE__, :__session__, [metrics, env_keys, metrics_history]},
private: %{live_socket_path: live_socket_path},
layout: {Phoenix.LiveDashboard.LayoutView, :dash},
as: :live_dashboard
]
end
@doc false
def __session__(conn, metrics, env_keys, metrics_history) do
metrics_session = %{
"metrics" => metrics,
"metrics_history" => metrics_history
}
request_logger_session = %{
"request_logger" => Phoenix.LiveDashboard.RequestLogger.param_key(conn)
}
%{
"applications" => {Phoenix.LiveDashboard.ApplicationsPage, %{}},
"ets" => {Phoenix.LiveDashboard.EtsPage, %{}},
"home" => {Phoenix.LiveDashboard.HomePage, %{"env_keys" => env_keys}},
"metrics" => {Phoenix.LiveDashboard.MetricsPage, metrics_session},
"os_mon" => {Phoenix.LiveDashboard.OSMonPage, %{}},
"ports" => {Phoenix.LiveDashboard.PortsPage, %{}},
"processes" => {Phoenix.LiveDashboard.ProcessesPage, %{}},
"request_logger" => {Phoenix.LiveDashboard.RequestLoggerPage, request_logger_session},
"sockets" => {Phoenix.LiveDashboard.SocketsPage, %{}}
}
end
end
|
lib/phoenix/live_dashboard/router.ex
| 0.85987
| 0.479808
|
router.ex
|
starcoder
|
defmodule RingBuffer do
@moduledoc """
RingBuffer provides an Elixir ring buffer implementation based on Erlang :queue.
There are other fine Elxir libraries providing implementations of
ring or circular buffers. This one provides a wanted feature that the
others did not provide, namely that the item that was evicted due to
a `put/2` call is available for inspection after completion of the `put/2` call.
In this implementation, `put/2` returns the new RingBuffer to preserve the
abilty to build pipelines and the item that was evicted as the result of
the last call to `put/2`, if any, is available in the field `:evicted`.
If the `:size` of the buffer at the time of the last call to `put/2` was less
than the configured `:max_size`, then :evicted will be `nil` after the call
to `put/2` since adding the new item did not require evicting another item.
A call to `take/1` will cause `:evicted` to be set to `nil`.
"""
alias __MODULE__
@typedoc """
Type that represents RingBuffer struct with `:maxsize` as integer,
`:size` as integer, `:queue` as tuple and `:evicted` as any
"""
@type t :: %RingBuffer{max_size: integer, size: integer, queue: tuple, evicted: any}
defstruct [:max_size, :size, :queue, :evicted]
@doc """
Creates a new RingBuffer struct.
## Parameters
- max_size: the max number of items in the buffer.
## Examples
iex> RingBuffer.new(10)
%RingBuffer{queue: {[], []}, max_size: 10, size: 0, evicted: nil}
"""
@spec new(max_size :: integer) :: t()
def new(max_size) when is_integer(max_size) and max_size > 0 do
%RingBuffer{max_size: max_size, size: 0, queue: :queue.new(), evicted: nil}
end
@doc """
Returns true if buffer contains no items.
## Parameters
- buffer: the RingBuffer whose emptiness is to be tested
## Examples
iex> RingBuffer.new(5)
...> |> RingBuffer.empty?()
true
iex> RingBuffer.new(4)
...> |> RingBuffer.put("red")
...> |> RingBuffer.empty?()
false
"""
@spec empty?(buffer :: t()) :: boolean
def empty?(%RingBuffer{} = buffer) do
:queue.is_empty(buffer.queue)
end
@doc """
Adds the item to the buffer. When buffer :size is less than :max_size,
the item will be added, the buffer :size will be incremented by one
and :evicted will be nil. When bufffer :size is equal to :max_size,
the item will be added, the buffer :size will remain at :max_size,
and the oldest item in the buffer preceding the call to put/2 will
be bound to :evicted.
## Parameters
- buffer: the RingBuffer to which item is to be added
- item: the item to add to buffer
## Examples
iex> RingBuffer.new(8)
...> |> RingBuffer.put("elixir")
...> |> RingBuffer.put("is")
...> |> RingBuffer.put("the")
...> |> RingBuffer.put("best")
%RingBuffer{queue: {["best", "the", "is"], ["elixir"]}, max_size: 8, size: 4, evicted: nil}
iex> RingBuffer.new(3)
...> |> RingBuffer.put("elixir")
...> |> RingBuffer.put("is")
...> |> RingBuffer.put("the")
...> |> RingBuffer.put("best")
%RingBuffer{queue: {["best", "the"], ["is"]}, max_size: 3, size: 3, evicted: "elixir"}
"""
@spec put(buffer :: t(), item :: any) :: t()
def put(%RingBuffer{} = buffer, item) when buffer.size < buffer.max_size do
new_queue = :queue.in(item, buffer.queue)
new_size = buffer.size + 1
%RingBuffer{buffer | queue: new_queue, size: new_size, evicted: nil}
end
def put(%RingBuffer{} = buffer, item) when buffer.size == buffer.max_size do
{{:value, evicted}, new_queue} = :queue.out(buffer.queue)
new_queue = :queue.in(item, new_queue)
%RingBuffer{buffer | queue: new_queue, evicted: evicted}
end
@doc """
Takes the oldest item from the buffer, returning a 2-element tuple
containing the taken item and the new buffer. For a non-empty buffer,
the taken item will be non-nil and the :size of the buffer will be
decremented by one. For an empty buffer, the taken item will be nil
and the :size of the buffer will continue to be zero.
## Parameters
- buffer: the RingBuffer from which an item is to be taken
## Examples
iex> RingBuffer.new(3)
...> |> RingBuffer.put("elixir")
...> |> RingBuffer.put("is")
...> |> RingBuffer.put("the")
...> |> RingBuffer.put("best")
...> |> RingBuffer.take()
{"is", %RingBuffer{queue: {["best"], ["the"]}, max_size: 3, size: 2, evicted: nil}}
iex> RingBuffer.new(3)
...> |> RingBuffer.take()
{nil, %RingBuffer{queue: {[], []}, max_size: 3, size: 0, evicted: nil}}
"""
@spec take(buffer :: t()) :: {nil, t()} | {any, t()}
def take(%RingBuffer{} = buffer) when buffer.size > 0 do
{{:value, taken}, new_queue} = :queue.out(buffer.queue)
new_size = buffer.size - 1
{taken, %RingBuffer{buffer | queue: new_queue, size: new_size, evicted: nil}}
end
def take(%RingBuffer{} = buffer) when buffer.size == 0 do
{nil, %RingBuffer{buffer | evicted: nil}}
end
@doc """
Returns the oldest item in the buffer, or nil if the buffer is empty.
## Parameters
- buffer: the RingBuffer whose oldest item is sought
## Examples
iex> RingBuffer.new(3)
...> |> RingBuffer.put("elixir")
...> |> RingBuffer.put("is")
...> |> RingBuffer.put("the")
...> |> RingBuffer.put("best")
...> |> RingBuffer.oldest()
"is"
iex> RingBuffer.new(3)
...> |> RingBuffer.oldest()
nil
"""
@spec oldest(buffer :: t()) :: nil | any
def oldest(%RingBuffer{} = buffer) do
case :queue.peek(buffer.queue) do
{:value, item} -> item
:empty -> nil
end
end
@doc """
Returns the newest item in the buffer, or nil if the buffer is empty.
## Parameters
- buffer: the RingBuffer whose newest item is sought
## Examples
iex> RingBuffer.new(3)
...> |> RingBuffer.put("elixir")
...> |> RingBuffer.put("is")
...> |> RingBuffer.put("the")
...> |> RingBuffer.put("best")
...> |> RingBuffer.newest()
"best"
iex> RingBuffer.new(3)
...> |> RingBuffer.newest()
nil
"""
@spec newest(buffer :: t()) :: nil | any
def newest(%RingBuffer{} = buffer) do
case :queue.peek_r(buffer.queue) do
{:value, item} -> item
:empty -> nil
end
end
end
|
lib/ring_buffer.ex
| 0.911074
| 0.551211
|
ring_buffer.ex
|
starcoder
|
defprotocol Vaporator.CloudFs do
@moduledoc """
Protocol for the most basic set of Cloud file-system operations
Impls must implement the following functions:
- list_folder
- get_metadata
- file_download
- file_upload
- ...
"""
@doc """
Need to be able to get the file's hash based on the destination CloudFs
hashing method. This can be different for each cloud provider.
Used when comparing ClientFs and CloudFs versions of a file.
Args:
- fs (Vaporator.CloudFs impl): Cloud file system
- local_path (binary): Path of file on client file system
Returns:
cloudfs_hash (binary)
"""
def get_hash!(fs, local_path)
@doc """
Need to be able to get the destination CloudFs path from a local_path
Args:
- fs (Vaporator.CloudFs impl): Cloud file system
- local_root (binary): Path of root folder on client file
system. That is, the given local_path should be within the given
root, and it is this root that will be "replaced" with the
cloudfs_root when transfering the file.
- local_path (binary): Path of folder on client file system
- cloudfs_root (binary): Path of root folder on cloud file system
Returns:
cloudfs_path (binary)
"""
def get_path(fs, local_root, local_path, cloudfs_root)
@doc """
Need to be able to get the contents of a folder.
Args:
- fs (Vaporator.CloudFs impl): Cloud file system
- path (binary): Path of folder on cloud file system to list
- args (Map): File-system-specific arguments to pass to the
underlying subsystem. In a perfect world, this would be
unnecessary, but "let it fail..." and all that.
Returns:
{:ok, Vaporator.CloudFs.ResultsMeta}
or
{:error, {:cloud_path_not_found, path error (binary)}
or
{:error, {:bad_decode, decode error (any)}
or
{:error, {:bad_status, {:status_code, code (int)}, JSON (Map)}}
or
{:error, {:unhandled_status, {:status_code, code (int)}, body (binary)}}
"""
def list_folder(fs, path, args \\ %{})
@doc """
Need to be able to get the metadata of an object at a particular
path.
Args:
- fs (Vaporator.CloudFs impl): Cloud file system
- path (binary): Path of file/folder on cloud file system to get
metadata for
- args (Map): File-system-specific arguments to pass to the
underlying subsystem.
Returns:
{:ok, Vaporator.CloudFS.Meta}
or
{:error, {:cloud_path_not_found, path error (binary)}
or
{:error, {:bad_decode, decode error (any)}
or
{:error, {:bad_status, {:status_code, code (int)}, JSON (Map)}}
or
{:error, {:unhandled_status, {:status_code, code (int)}, body (binary)}}
"""
def get_metadata(fs, path, args \\ %{})
@doc """
Need to be able to get the binary content of a file at a particular
path.
Args:
- fs (Vaporator.CloudFs impl): Cloud file system
- path (binary): Path of file on cloud file system to download
- args (Map): File-system-specific arguments to pass to the
underlying subsystem.
Returns:
{:ok, Vaporator.CloudFs.FileContent}
or
{:error, {:cloud_path_not_found, path error (binary)}
or
{:error, {:bad_decode, decode error (any)}
or
{:error, {:bad_status, {:status_code, code (int)}, JSON (Map)}}
or
{:error, {:unhandled_status, {:status_code, code (int)}, body (binary)}}
"""
def file_download(fs, path, args \\ %{})
@doc """
Need to be able to upload binary content of a file on the local
file system to a particular path on the cloud file system.
The file should always be transferred and should overwrite
whatever is (might be) already there.
Args:
- fs (Vaporator.CloudFs impl): Cloud file system
- local_path (binary): Path of file on local file system to upload
- fs_path (binary): Path on cloud file system to place uploaded
content. If this path ends with a "/" then it should be
treated as a directory in which to place the local_path
- args (Map): File-system-specific arguments to pass to the
underlying subsystem.
Returns:
{:ok, Vaporator.CloudFs.Meta}
or
{:error, :local_path_not_found}
or
{:error, {:bad_decode, decode error (any)}
or
{:error, {:bad_status, {:status_code, code (int)}, JSON (Map)}}
or
{:error, {:unhandled_status, {:status_code, code (int)}, body (binary)}}
"""
def file_upload(fs, local_path, fs_path, args \\ %{})
@doc """
Need to be able to update binary content of a file on the cloud
file system to the version on the local file system.
In the case of file_upload, the file is always transferred. In the
case of file_update, the file transfer only happens if the cloud
content is different from the local content.
Args:
- fs (Vaporator.CloudFs impl): Cloud file system
- local_path (binary): Path of file on local file system to upload
- fs_path (binary): Path on cloud file system to place uploaded
content. If this path ends with a "/" then it should be
treated as a directory in which to place the local_path
- args (Map): File-system-specific arguments to pass to the
underlying subsystem.
Returns:
{:ok, Vaporator.CloudFs.Meta}
or
{:error, :local_path_not_found}
or
{:error, {:bad_decode, decode error (any)}
or
{:error, {:bad_status, {:status_code, code (int)}, JSON (Map)}}
or
{:error, {:unhandled_status, {:status_code, code (int)}, body (binary)}}
"""
def file_update(fs, local_path, fs_path, args \\ %{})
@doc """
Need to be able to remove a file or folder on the cloud file
system.
Args:
- fs (Vaporator.CloudFs impl): Cloud file system
- path (binary): Path on cloud file system to remove.
- args (Map): File-system-specific arguments to pass to the
underlying subsystem.
Returns:
{:ok, Vaporator.CloudFs.FileContent}
or
{:error, {:cloud_path_not_found, path error (binary)}
or
{:error, {:bad_decode, decode error (any)}
or
{:error, {:bad_status, {:status_code, code (int)}, JSON (Map)}}
or
{:error, {:unhandled_status, {:status_code, code (int)}, body (binary)}}
"""
def file_remove(fs, path, args \\ %{})
def folder_remove(fs, path, args \\ %{})
@doc """
Need to be able to copy one file in the cloud file system to
another place in the cloud file system.
Args:
- fs (Vaporator.CloudFs impl): Cloud file system
- from_path (binary): Path of file/folder on cloud file system to
copy
- to_path (binary): Path of file/folder on cloud file system to
place the copied file. If this path ends in a "/", then it is
treated as a directory into which the file should be copied.
- args (Map): File-system-specific arguments to pass to the
underlying subsystem.
Returns:
{:ok, Vaporator.CloudFS.Meta}
or
{:error, {:cloud_path_not_found, path error (binary)}
or
{:error, {:bad_decode, decode error (any)}
or
{:error, {:bad_status, {:status_code, code (int)}, JSON (Map)}}
or
{:error, {:unhandled_status, {:status_code, code (int)}, body (binary)}}
"""
def file_copy(dbx, from_path, to_path, args \\ %{})
@doc """
Need to be able to move one file in the cloud file system to
another place in the cloud file system.
Args:
- fs (Vaporator.CloudFs impl): Cloud file system
- from_path (binary): Path of file/folder on cloud file system to
move
- to_path (binary): Path of file/folder on cloud file system to
place the moved file. If this path ends in a "/", then it is
treated as a directory into which the file should be moved.
- args (Map): File-system-specific arguments to pass to the
underlying subsystem.
Returns:
{:ok, Vaporator.CloudFS.Meta}
or
{:error, {:cloud_path_not_found, path error (binary)}
or
{:error, {:bad_decode, decode error (any)}
or
{:error, {:bad_status, {:status_code, code (int)}, JSON (Map)}}
or
{:error, {:unhandled_status, {:status_code, code (int)}, body (binary)}}
"""
def file_move(dbx, from_path, to_path, args \\ %{})
end
defmodule Vaporator.CloudFs.Meta do
@moduledoc """
CloudFs file/folder (i.e. inode) metadata
"""
# Every file on any file-system (Dropbox, GDrive, S3, etc.) should
# have at least these attributes
@enforce_keys [:type, :path]
defstruct [
# :file, :folder, or :none?
:type,
# file name (w/o path)
:name,
# path in file-system
:path,
# time of last modification (UTC)
:modify_time,
# time of creation (UTC)
:create_time,
# file-system-specific metadata term
:meta
# to be used internally by the
# particular file-system (i.e. the
# implementation of the CloudFs
# protocol)
]
end
defmodule Vaporator.CloudFs.ResultsMeta do
@moduledoc """
CloudFs result set/list metadata.
Keeps track of a list of results that matter (usually file/folder
metadata), but also keeps a reference to the original
file-system-specific result meta object for use later by the
particular cloud file system.
E.g. Dropbox needs the list_folder metadata (specifically, the
"cursor" and "has_more" values) for pagination.
Some file systems might have results metadata, some might not. In
the case of not, then the metadata field (meta:) has a default value
of an empty map.
"""
@enforce_keys [:results]
defstruct results: [],
# List of CloudFs.Meta objects
# File-system-specific metadata for this
meta: %{}
# result set
end
defmodule Vaporator.CloudFs.FileContent do
@moduledoc """
CloudFs file content struct
Basically a thin type wrapping the Map data returned by HTTPoison's
HTTP methods (POST generally)
"""
defstruct content: "",
# Binary data from the body
# Header data returned by the HTTP
headers: %{}
# response
end
|
lib/vaporator/cloudfs/cloudfs.ex
| 0.866133
| 0.453685
|
cloudfs.ex
|
starcoder
|
defmodule Saul do
@moduledoc """
Contains the core of the functionality provided by Saul.
Saul is a data validation and conformation library. It tries to solve the
problem of validating the shape and content of some data (most useful when
such data come from an external source) and of conforming those data to
arbitrary formats.
Saul is based on the concept of **validators**: a validator is something that
knows how to validate a term and transform it to something else if
necessary. A good example of a validator could be something that validates
that a term is a string representation of an integer and that converts such
string to the represented integer.
Validators are a powerful abstraction as they can be easily *combined*: for
example, the `Saul.one_of/1` function takes a list of validators and returns a
validator that passes if one of the given validators pass. Saul provides both
"basic" validators as well as validator combinators.
## Validators
A validator can be:
* a function that takes one argument
* a term that implements the `Saul.Validator` protocol
The return value of function validators or implementations of
`Saul.Validator.validate/2` has to be one of the following:
* `{:ok, transformed}` - it means validation succeeded (the input term is
considered valid) and `transformed` is the conformed value for the input
term.
* `{:error, reason}` - it means validation failed (the input term is
invalid). `reason` can be any term: if it is not a `Saul.Error` struct,
`validate/2` will take care of wrapping it into a `Saul.Error`.
* `true` - it means validation succeeded. It is the same as `{:ok,
transformed}`, but it can be used when the transformed value is the same
as the input value. This is useful for "predicate" validators (functions
that take one argument and return a boolean).
* `false` - it means validation failed. It is the same as `{:error, reason}`,
except the reason only mentions that a "predicate failed".
Returning a boolean value is supported so that existing predicate functions
can be used as validators without modification. Examples of such functions are
type guards (`is_binary/1` or `is_list/1`), functions like `String.valid?/1`,
and many others.
## Validating
The only entry point for validation is `validate/2`. It hides all the
complexity of the possible return values of validators (described in the
"Validators" section) and always returns `{:ok, transformed}` (where
`transformed` can be the same term as the term being validated) or `{:error,
%Saul.Error{}}`. See the documentation for `validate/2` for more detailed
documentation.
"""
@typedoc """
The type defining a validator.
See the module documentation for more information on what are validators.
"""
@type validator(transformed_type) ::
(term -> {:ok, transformed_type} | {:error, term})
| (term -> boolean)
| Saul.Validator.t
@doc """
Validates the given `term` through the given `validator`.
If the validator successfully matches `term`, then the return value of this
function is `{:ok, transformed}` where `transformed` is the result of the
transformation applied by the validator. If the validator returns `{:error,
reason}`, the return value of this function is `{:error, %Saul.Error{}}`.
Note that the given validator can return any type of `reason` when returning
an `:error` tuple: `validate/2` will take care of wrapping it into a
`%Saul.Error{}`. This is done so that users can work with a consistent
interface but at the same time they can use already existing functions as
validators (since `{:ok, term} | {:error, term}` is quite a common API in
Erlang/Elixir).
## Examples
iex> to_string = &{:ok, to_string(&1)}
iex> Saul.validate(:foo, to_string)
{:ok, "foo"}
iex> Saul.validate("hello", to_string)
{:ok, "hello"}
iex> failer = fn(_) -> {:error, :bad} end
iex> {:error, %Saul.Error{} = error} = Saul.validate(3.14, failer)
iex> error.reason
":bad"
"""
@spec validate(term, validator(value)) ::
{:ok, value} | {:error, Saul.Error.t} | no_return when value: term
def validate(term, validator) do
result =
case validator do
fun when is_function(fun, 1) -> validator.(term)
_ -> Saul.Validator.validate(validator, term)
end
case result do
{:ok, _transformed} = result ->
result
true ->
{:ok, term}
{:error, %Saul.Error{}} = result ->
result
{:error, reason} ->
{:error, %Saul.Error{validator: validator, reason: inspect(reason), term: {:term, term}}}
false ->
{:error, %Saul.Error{validator: validator, reason: "predicate failed", term: {:term, term}}}
other ->
raise ArgumentError, "validator should return {:ok, term}, {:error, term}, " <>
"or a boolean, got: #{inspect(other)}"
end
end
@doc """
Validates the given `term` through the given `validator`, raising in case of errors.
This function works like `validate/2`, but it returns the transformed term
directly in case validation succeeds or raises a `Saul.Error` exception in
case validation fails.
## Examples
iex> Saul.validate!("foo", &is_binary/1)
"foo"
iex> Saul.validate!("foo", &is_atom/1)
** (Saul.Error) (&:erlang.is_atom/1) predicate failed - failing term: "foo"
"""
@spec validate!(term, validator(value)) :: value | no_return when value: any
def validate!(term, validator) do
case validate(term, validator) do
{:ok, transformed} ->
transformed
{:error, %Saul.Error{} = error} ->
raise(error)
end
end
## Validators
@doc """
Returns a validator that performs the same validation as `validator` but has
the name `name`.
This function is useful in order to have better errors when validation
fails. In such cases, the name of each of the failing validators is printed
alongside the error. If your validator is an anonymous function `f`, such name
will be `inspect(f)`, so it won't be very useful when trying to understand
errors. Naming a validator is also useful when your validator is an isolated
logical unit (such as a validator that validates that a term is an integer,
positive, and converts it to its Roman representation).
## Examples
iex> failer = Saul.named_validator(fn(_) -> {:error, :oops} end, "validator that always fails")
iex> Saul.validate!(:foo, failer)
** (Saul.Error) (validator that always fails) :oops - failing term: :foo
"""
@spec named_validator(validator(value), String.t) :: validator(value) when value: any
def named_validator(validator, name) do
%Saul.Validator.NamedValidator{name: name, validator: validator}
end
@doc """
Returns a validator that always passes and applies the given transformation
`fun`.
This function is useful when a validator is only applying a transformation,
and not performing any validation. Using this function is only beneficial
inside more complex validators, such as `all_of/1`, where `fun` needs to have
the shape of a validator. For other cases, you can just apply `fun` directly
to the input term.
## Examples
For example, if you validated that a term is a binary in some way, but want to
transform it to a charlist during validation, you could wrap
`String.to_charlist/1` inside `transform/1`:
iex> term = "this is a string"
iex> Saul.validate!(term, Saul.transform(&String.to_charlist/1))
'this is a string'
"""
@spec transform((input -> output)) :: (input -> {:ok, output}) when input: var, output: var
def transform(fun) when is_function(fun, 1) do
&{:ok, fun.(&1)}
end
@doc """
Returns a validator that checks that the input term is equal to `term`.
This is a basic validator that allows to check for literal terms (hence its
name, "lit"). If the input term is equal to `term`, then it is returned
unchanged.
## Examples
iex> three = Saul.lit(3)
iex> Saul.validate(3, three)
{:ok, 3}
iex> {:error, error} = Saul.validate(4, three)
iex> error.reason
"expected exact term 3"
"""
@spec lit(value) :: validator(value) when value: term
def lit(term) do
%Saul.Validator.Literal{term: term}
end
@doc """
Returns a validator that matches when all the given `validators` match.
`validators` has to be a *non-empty* list of validators.
The validation stops and fails as soon as one of the `validators` fails, or
succeeds and returns the value returned by the last validator if all
validators succeed. When a validator succeeds, the transformed value it
returns is passed as the input to the next validator in the list: this allows
to simulate a "pipeline" of transformations that halts as soon as something
doesn't match (similar to a small subset of what you could achieve with the
`with` Elixir special form).
## Examples
iex> validator = Saul.all_of([&{:ok, to_string(&1)}, &is_binary/1])
iex> Saul.validate(:hello, validator)
{:ok, "hello"}
iex> validator = Saul.all_of([&is_binary/1, &{:ok, &1}])
iex> Saul.validate!(:hello, validator)
** (Saul.Error) (&:erlang.is_binary/1) predicate failed - failing term: :hello
"""
@spec all_of(nonempty_list(validator(term))) :: validator(term)
def all_of([_ | _] = validators) do
%Saul.Validator.AllOf{validators: validators}
end
@doc """
Returns a validator that matches if one of the given `validators` match.
`validators` has to be a *non-empty* list of validators.
The validation stops and succeeds as soon as one of the `validators`
succeeds. The value returned by the succeeding validator is the value returned
by this validator as well. If all validators fail, an error that shows all the
failures is returned.
## Examples
iex> validator = Saul.one_of([&is_binary/1, &is_atom/1])
iex> Saul.validate(:foo, validator)
{:ok, :foo}
"""
@spec one_of(nonempty_list(validator(term))) :: validator(term)
def one_of([_ | _] = validators) do
%Saul.Validator.OneOf{validators: validators}
end
@doc """
Returns a validator that matches an enumerable where all elements match
`validator`.
The return value of this validator is a value constructed by collecting the
values in the given enumerable transformed according to `validator` into the
collectable specified by the `:into` option. This validator can be considered
analogous to the `for` special form (with the `:into` option as well), but
with error handling. If any of the elements in the given enumerable fails
`validator`, this validator fails.
## Options
* `:into` - (`t:Collectable.t/0`) the collectable where the transformed values
should end up in. Defaults to `[]`.
## Examples
iex> validator = Saul.enum_of(&{:ok, {inspect(&1), &1}}, into: %{})
iex> Saul.validate(%{foo: :bar}, validator)
{:ok, %{"{:foo, :bar}" => {:foo, :bar}}}
iex> Saul.validate([1, 2, 3], validator)
{:ok, %{"1" => 1, "2" => 2, "3" => 3}}
"""
@spec enum_of(Saul.validator(term), Keyword.t) :: Saul.validator(Collectable.t)
def enum_of(validator, options \\ []) when is_list(options) do
Saul.Enum.enum_of(validator, options)
end
@doc """
Returns a validator that validates a map with the shape specified by
`validators_map`.
`validators_map` must be a map with values as keys and two-element tuples
`{required_or_optional, validator}` as values. The input map will be validated
like this:
* each key is checked against the validator at the corresponding key in
`validators_map`
* `{:required, validator}` validators mean that their corresponding key is
required in the map; if it's not present in the input map, this
validator fails
* `{:optional, validator}` validators mean that their corresponding key
can be not present in the map, and it's only validated with `validator`
in case it's present
The map returned by this validator has unchanged keys and values that are the
result of the validator for each key.
## Options
* `:strict` (boolean) - if this option is `true`, then this validator fails
if the input map has keys that are not in `validators_map`. Defaults to
`false`.
## Examples
iex> validator = Saul.map([strict: false], %{
...> to_string: {:required, &{:ok, to_string(&1)}},
...> is_atom: {:optional, &is_atom/1},
...> })
iex> Saul.validate(%{to_string: :foo, is_atom: :bar}, validator)
{:ok, %{to_string: "foo", is_atom: :bar}}
iex> Saul.validate(%{to_string: :foo}, validator)
{:ok, %{to_string: "foo"}}
"""
@spec map(Keyword.t , %{optional(term) => {:required | :optional, validator(term)}}) ::
validator(map)
def map(options \\ [], validators_map) when is_list(options) and is_map(validators_map) do
Saul.Validator.Map.new(validators_map, options)
end
@doc """
Returns a validator that validates a tuples with elements that match the
validator at their corresponding position in `validators`.
The return value of this validator is a tuple with the same number of elements
as `validators` (and the input tuple) where elements are the result of the
validator in their corresponding position in `validators`.
## Examples
iex> atom_to_string = Saul.transform(&Atom.to_string/1)
iex> Saul.validate({:foo, :bar}, Saul.tuple({atom_to_string, atom_to_string}))
{:ok, {"foo", "bar"}}
"""
@spec tuple(tuple) :: validator(tuple)
def tuple(validators) when is_tuple(validators) do
Saul.Tuple.tuple(validators)
end
@doc """
Returns a validator that validates a map with keys that match `key_validator`
and values that match `value_validator`.
The return value of this validator is a map where keys are the result of
`key_validator` for each key and values are the result of `value_validator`
for the corresponding key. If any key or value fail, this validator fails.
Note that if `key_validator` ends up transforming two keys into the same term,
then they will collapse under just one key-value pair in the transformed map
and there is no guarantee on which value will prevail.
## Examples
iex> integer_to_string = Saul.all_of([&is_integer/1, &{:ok, Integer.to_string(&1)}])
iex> validator = Saul.map_of(integer_to_string, &is_atom/1)
iex> Saul.validate(%{1 => :ok, 2 => :not_so_ok}, validator)
{:ok, %{"1" => :ok, "2" => :not_so_ok}}
"""
@spec map_of(validator(key), validator(value)) :: validator(%{optional(key) => value})
when key: any, value: any
def map_of(key_validator, map_validator) do
Saul.Map.map_of(key_validator, map_validator)
end
@doc """
Returns a validator that validates a list where all elements match `validator`.
The return value of this validator is a list where each element is the return
value of `validator` for the corresponding element in the input
list. Basically this is analogous to `Enum.map/2` but with error handling. If
any of the elements in the list fail `validator`, this validator fails.
## Examples
iex> integer_to_string = Saul.all_of([&is_integer/1, &{:ok, Integer.to_string(&1)}])
iex> Saul.validate([1, 2, 3], Saul.list_of(integer_to_string))
{:ok, ["1", "2", "3"]}
"""
@spec list_of(validator(value)) :: validator([value]) when value: any
def list_of(validator) do
[&is_list/1, enum_of(validator, into: [])]
|> all_of()
|> named_validator("list_of")
end
@doc """
Returns a validator that checks if the input term is a member of `enumerable`.
The return value of this validator is the input term, unmodified.
`Enum.member?/2` is used to check if the input term is a member of
`enumerable`.
## Examples
iex> Saul.validate(:bar, Saul.member([:foo, :bar, :baz]))
{:ok, :bar}
iex> Saul.validate(50, Saul.member(1..100))
{:ok, 50}
"""
@spec member(Enumerable.t) :: validator(term)
def member(enumerable) do
%Saul.Validator.Member{enumerable: enumerable}
end
end
|
lib/saul.ex
| 0.945601
| 0.912903
|
saul.ex
|
starcoder
|
defmodule URI do
@on_load :preload_parsers
defrecord Info, [scheme: nil, path: nil, query: nil,
fragment: nil, authority: nil,
userinfo: nil, host: nil, port: nil,
specifics: nil]
import Bitwise
@moduledoc """
Utilities for working with and creating URIs.
"""
@doc """
Takes an enumerable (containing a sequence of two-item tuples)
and returns a string of k=v&k2=v2... where keys and values are
URL encoded as per encode. Keys and values can be any term
that implements the Binary.Chars protocol (i.e. can be converted
to binary).
"""
def encode_query(l), do: Enum.join(Enum.map(l, pair(&1)), "&")
@doc """
Given a query string of the form "key1=value1&key=value2...", produces an
orddict with one entry for each key-value pair. Each key and value will be a
binary. It also does percent-unescaping of both keys and values.
Returns nil if the query string is malformed.
"""
def decode_query(q, dict // Orddict.new) do
if Regex.match?(%r/^\s*$/, q) do
dict
else
parts = Regex.split %r/&/, to_binary(q)
impl = Dict.__impl_for__!(dict)
try do
List.foldl parts, dict, fn kvstr, acc ->
case Regex.split(%r/=/, kvstr) do
[ key, value ] when key != "" ->
impl.put acc, decode(key), decode(value)
_ ->
throw :malformed_query_string
end
end
catch
:malformed_query_string -> nil
end
end
end
defp pair({k, v}) do
encode(to_binary(k)) <> "=" <> encode(to_binary(v))
end
@doc """
Percent (URL) encodes a URI.
"""
def encode(s), do: bc <<c>> inbits s, do: <<percent(c)|:binary>>
defp percent(32), do: <<?+>>
defp percent(?-), do: <<?->>
defp percent(?_), do: <<?_>>
defp percent(?.), do: <<?.>>
defp percent(c) when
c >= ?0 and c <= ?9 when
c >= ?a and c <= ?z when
c >= ?A and c <= ?Z do
<<c>>
end
defp percent(c), do: escape_byte(c)
defp escape_byte(c), do: "%" <> hex(c)
defp hex(n) when n <= 9, do: <<n + ?0>>
defp hex(n) when n > 15 do
hex(bsr(n, 4)) <> hex(band(n, 15))
end
defp hex(n), do: <<n + ?A - 10>>
@doc """
Unpercent (URL) decodes a URI.
"""
def decode(<<?%, hex1, hex2, tail |:binary >>) do
<< bsl(hex2dec(hex1), 4) + hex2dec(hex2) >> <> decode(tail)
end
def decode(<<head, tail |:binary >>) do
<<check_plus(head)>> <> decode(tail)
end
def decode(<<>>), do: <<>>
defp hex2dec(n) when n in ?A..?F, do: n - ?A + 10
defp hex2dec(n) when n in ?0..?9, do: n - ?0
defp check_plus(?+), do: 32
defp check_plus(c), do: c
@doc """
Parses a URI into components.
URIs have portions that are handled specially for the
particular scheme of the URI. For example, http and https
have different default ports. Sometimes the parsing
of portions themselves are different. This parser
is extensible via behavior modules. If you have a
module named URI.MYSCHEME with a function called
'parse' that takes a single argument, the generically
parsed URI, that function will be called when this
parse function is passed a URI of that scheme. This
allows you to build on top of what the URI library
currently offers. You also need to define default_port
which takes 0 arguments and returns the default port
for that particular scheme. Take a look at URI.HTTPS for an
example of one of these extension modules.
"""
def parse(s) do
# From http://tools.ietf.org/html/rfc3986#appendix-B
regex = %r/^(([^:\/?#]+):)?(\/\/([^\/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?/
parts = nillify(Regex.run(regex, to_binary(s)))
destructure [_, _, scheme, _, authority, path, _, query, _, fragment], parts
{ userinfo, host, port } = split_authority(authority)
info = URI.Info[
scheme: scheme, path: path, query: query,
fragment: fragment, authority: authority,
userinfo: userinfo, host: host, port: port
]
scheme_specific(scheme, info)
end
defp scheme_specific(scheme, info) do
if scheme do
module =
try do
Module.safe_concat(URI, :string.to_upper(binary_to_list(scheme)))
rescue
ArgumentError -> nil
end
if module && match?({:module,^module}, Code.ensure_loaded(module)) do
module.parse(default_port(info, module))
else
info
end
else
info
end
end
defp default_port(info, module) do
if info.port, do: info, else: info.port(module.default_port)
end
# Split an authority into its userinfo, host and port parts.
defp split_authority(s) do
s = s || ""
components = Regex.run %r/(^(.*)@)?([^:]*)(:(\d*))?/, s
destructure [_, _, userinfo, host, _, port], nillify(components)
port = if port, do: list_to_integer(binary_to_list(port))
{ userinfo, host, port }
end
# Regex.run returns empty strings sometimes. We want
# to replace those with nil for consistency.
defp nillify(l) do
lc s inlist l do
if size(s) > 0 do
s
else
nil
end
end
end
# Reference parsers so the parse/1 doesn't fail
# on safe_concat.
defp preload_parsers do
parsers = [URI.FTP, URI.HTTP, URI.HTTPS, URI.LDAP, URI.SFTP, URI.TFTP]
Enum.each parsers, Code.ensure_loaded(&1)
:ok
end
end
|
lib/elixir/lib/uri.ex
| 0.769427
| 0.507751
|
uri.ex
|
starcoder
|
defmodule Grizzly.ZWave.CommandClasses.NetworkManagementInstallationMaintenance do
@moduledoc """
"NetworkManagementInstallationMaintenance" Command Class
The Network Management Installation and Maintenance Command Class is used to access statistical
data.
"""
@type route_type ::
:no_route | :last_working_route | :next_to_last_working_route | :set_by_application
@type speed :: :"9.6kbit/s" | :"40kbit/s" | :"100kbit/s" | :reserved
@type statistics :: [statistic]
@type statistic ::
{:route_changes, byte}
| {:transmission_count, byte}
| {:neighbors, [neighbor]}
| {:packet_error_count, byte}
| {:sum_of_transmission_times, non_neg_integer}
| {:sum_of_transmission_times_squared, non_neg_integer}
@type neighbor :: [neighbor_param]
@type neighbor_param ::
{:node_id, byte}
| {:repeater?, boolean}
| {:speed, speed}
@type rssi ::
:rssi_not_available | :rssi_max_power_saturated | :rssi_below_sensitivity | -94..-32
alias Grizzly.ZWave.DecodeError
@behaviour Grizzly.ZWave.CommandClass
@impl true
def byte(), do: 0x67
@impl true
def name(), do: :network_management_installation_maintenance
@spec route_type_to_byte(route_type) :: byte
def route_type_to_byte(type) do
case type do
:no_route -> 0x00
:last_working_route -> 0x01
:next_to_last_working_route -> 0x02
:set_by_application -> 0x10
end
end
@spec route_type_from_byte(any) :: {:error, Grizzly.ZWave.DecodeError.t()} | {:ok, route_type}
def route_type_from_byte(byte) do
case byte do
0x00 -> {:ok, :no_route}
0x01 -> {:ok, :last_working_route}
0x02 -> {:ok, :next_to_last_working_route}
0x10 -> {:ok, :set_by_application}
byte -> {:error, %DecodeError{param: :type, value: byte}}
end
end
@spec speed_to_byte(speed) :: byte
def speed_to_byte(speed) do
case speed do
:"9.6kbit/s" -> 0x01
:"40kbit/s" -> 0x02
:"100kbit/s" -> 0x03
end
end
@spec speed_from_byte(any) :: {:ok, speed}
def speed_from_byte(byte) do
case byte do
0x01 -> {:ok, :"9.6kbit/s"}
0x02 -> {:ok, :"40kbit/s"}
0x03 -> {:ok, :"100kbit/s"}
# All other values are reserved and MUST NOT be used by a sending node.
# Reserved values MUST be ignored by a receiving node.
_byte -> {:ok, :reserved}
end
end
@spec rssi_to_byte(rssi) :: byte
def rssi_to_byte(:rssi_below_sensitivity), do: 0x7D
def rssi_to_byte(:rssi_max_power_saturated), do: 0x7E
def rssi_to_byte(:rssi_not_available), do: 0x7F
def rssi_to_byte(value) when value in -94..-32, do: 256 + value
@spec rssi_from_byte(byte) :: {:ok, rssi} | {:error, DecodeError.t()}
def rssi_from_byte(0x7D), do: {:ok, :rssi_below_sensitivity}
def rssi_from_byte(0x7E), do: {:ok, :rssi_max_power_saturated}
def rssi_from_byte(0x7F), do: {:ok, :rssi_not_available}
def rssi_from_byte(byte) when byte in 0xE0..0xA2, do: {:ok, byte - 256}
def rssi_from_byte(byte), do: {:error, %DecodeError{value: byte}}
def repeaters_to_bytes(repeaters) do
full_repeaters = (repeaters ++ [0, 0, 0, 0]) |> Enum.take(4)
for repeater <- full_repeaters, into: <<>>, do: <<repeater>>
end
def repeaters_from_bytes(bytes) do
:erlang.binary_to_list(bytes)
|> Enum.reject(&(&1 == 0))
end
def statistics_to_binary(statistics) do
for statistic <- statistics, into: <<>> do
case statistic do
{:route_changes, byte} ->
<<0x00, 0x01, byte>>
{:transmission_count, byte} ->
<<0x01, 0x01, byte>>
{:neighbors, neighbors} ->
binary = neighbors_to_binary(neighbors)
<<0x02, byte_size(binary)>> <> binary
{:packet_error_count, byte} ->
<<0x03, 0x01, byte>>
{:sum_of_transmission_times, sum} ->
<<0x04, 0x04, sum::integer-unsigned-unit(8)-size(4)>>
{:sum_of_transmission_times_squared, sum} ->
<<0x05, 0x04, sum::integer-unsigned-unit(8)-size(4)>>
end
end
end
def statistics_from_binary(<<>>) do
{:ok, []}
end
def statistics_from_binary(<<0x00, 0x01, byte, rest::binary>>) do
with {:ok, other_statistics} <- statistics_from_binary(rest) do
{:ok, [route_changes: byte] ++ other_statistics}
else
{:error, %DecodeError{}} = error ->
error
end
end
def statistics_from_binary(<<0x01, 0x01, byte, rest::binary>>) do
with {:ok, other_statistics} <- statistics_from_binary(rest) do
{:ok, [transmission_count: byte] ++ other_statistics}
else
{:error, %DecodeError{}} = error ->
error
end
end
def statistics_from_binary(
<<0x02, length, neighbors_binary::binary-size(length), rest::binary>>
) do
with {:ok, other_statistics} <- statistics_from_binary(rest),
{:ok, neighbors} <- neighbors_from_binary(neighbors_binary) do
{:ok, [neighbors: neighbors] ++ other_statistics}
else
{:error, %DecodeError{}} = error ->
error
end
end
def statistics_from_binary(<<0x03, 0x01, byte, rest::binary>>) do
with {:ok, other_statistics} <- statistics_from_binary(rest) do
{:ok, [packet_error_count: byte] ++ other_statistics}
else
{:error, %DecodeError{}} = error ->
error
end
end
def statistics_from_binary(<<0x04, 0x04, sum::integer-unsigned-unit(8)-size(4), rest::binary>>) do
with {:ok, other_statistics} <- statistics_from_binary(rest) do
{:ok, [sum_of_transmission_times: sum] ++ other_statistics}
else
{:error, %DecodeError{}} = error ->
error
end
end
def statistics_from_binary(<<0x05, 0x04, sum::integer-unsigned-unit(8)-size(4), rest::binary>>) do
with {:ok, other_statistics} <- statistics_from_binary(rest) do
{:ok, [sum_of_transmission_times_squared: sum] ++ other_statistics}
else
{:error, %DecodeError{}} = error ->
error
end
end
defp neighbors_to_binary(neighbors) do
for neighbor <- neighbors, into: <<>>, do: neighbor_to_binary(neighbor)
end
defp neighbor_to_binary(neighbor) do
node_id = Keyword.get(neighbor, :node_id)
repeater_bit = if Keyword.get(neighbor, :repeater?), do: 0x01, else: 0x00
speed_bits = Keyword.get(neighbor, :speed) |> speed_to_byte()
<<node_id, repeater_bit::size(1), 0x00::size(2), speed_bits::size(5)>>
end
defp neighbors_from_binary(<<>>), do: {:ok, []}
defp neighbors_from_binary(
<<node_id, repeater_bit::size(1), _reserved::size(2), speed_bits::size(5), rest::binary>>
) do
with {:ok, speed} <- speed_from_byte(speed_bits),
{:ok, other_neighbors} <- neighbors_from_binary(rest) do
neighbor = [node_id: node_id, repeater?: repeater_bit == 1, speed: speed]
{:ok, [neighbor | other_neighbors]}
else
{:error, %DecodeError{}} = error ->
error
end
end
end
|
lib/grizzly/zwave/command_classes/network_management_installation_maintenance.ex
| 0.803752
| 0.470189
|
network_management_installation_maintenance.ex
|
starcoder
|
defmodule Extractly.Toc.Renderer.AstRenderer do
@doc ~S"""
Transform a normalized tuple list (that is a list of tuples of the form {n, text})
in which there exists an entry of the form {m, text} for all m betwenn min(n) and
max(n)
Two formats are supported
### The _simple_ `PushList`
where the tuple list is transformed into a linear structural
representation of the different levels by representing opening and closing brackets by
the symbols `:open` and `:close`
iex(1)> render_push_list([{1, "I"}, {3, "I - (i)"}, {1, "II"}, {2, "II 1"}])
["I", :open, :open, "I - (i)", :close, :close, "II", :open, "II 1", :close]
This format is ideal to be transformed into, e.g. an HTML representation
"""
def render_push_list(normalized_tuples, options \\ [])
def render_push_list(normalized_tuples, _options), do: _render_push_list(normalized_tuples, {1, []})
@doc ~S"""
### A structural nested array (extracted from the `PushList`)
iex(2)> render_ast([{1, "I"}, {3, "I - (i)"}, {1, "II"}, {2, "II 1"}])
["I", [["I - (i)"]], "II", ["II 1"]]
"""
def render_ast(normalized_tuples, options \\ [])
def render_ast(normalized_tuples, _options), do:
normalized_tuples
|> _render_push_list({1, []})
# |> IO.inspect
|> _make_tree([])
defp _add_closes_and_reverse(n, push_list)
defp _add_closes_and_reverse(1, push_list), do: Enum.reverse(push_list)
defp _add_closes_and_reverse(n, push_list), do: _add_closes_and_reverse(n-1, [:close|push_list])
defp _render_push_list(tuples, result)
defp _render_push_list([], {n, push_list}), do: _add_closes_and_reverse(n, push_list)
defp _render_push_list([tuple|rest], result), do: _render_push_list(rest, _new_result(tuple, result))
defp _make_tree(push_list, result)
defp _make_tree([], result), do: Enum.reverse(result)
defp _make_tree([:close|rest], result), do: _make_tree(rest, _up_and_reverse([[]|result]))
defp _make_tree([head|rest], result), do: _make_tree(rest, [head|result])
defp _new_result(tuple, result)
defp _new_result({tlevel, text}, {clevel, result}) when tlevel == clevel, do: {clevel, [text|result]}
defp _new_result({tlevel, text}, {clevel, result}) when tlevel > clevel do
{tlevel, [text|_prepend(:open, tlevel-clevel, result)]}
end
defp _new_result({tlevel, text}, {clevel, result}) do
{tlevel, [text|_prepend(:close, clevel-tlevel, result)]}
end
defp _prepend(sym, count, to)
defp _prepend(_, 0, to), do: to
defp _prepend(sym, n, to), do: _prepend(sym, n-1, [sym|to])
defp _up_and_reverse(result)
defp _up_and_reverse([head, :open|result]), do: [head|result]
defp _up_and_reverse([head, text|result]), do: _up_and_reverse([[text|head]|result])
end
# SPDX-License-Identifier: Apache-2.0
|
lib/extractly/toc/renderer/ast_renderer.ex
| 0.75101
| 0.547222
|
ast_renderer.ex
|
starcoder
|
defmodule Jot do
@moduledoc ~S"""
Jot is a fast and minimal template engine influenced by Pug and Slim.<br>
It's also slightly more fun than HTML.
Here's what it looks like.
html(lang="en")
head
title Jot
body
h1#question Why?
.col
p Because this is slightly more fun than HTML.
And here's how you might use it.
iex> defmodule View do
...> require Jot
...> Jot.function_from_string :def, :sample, "h1 Hello, world!", []
...> end
...> View.sample()
"<h1>Hello, world!</h1>"
For full documentation of the Jot syntax see the project README.
## API
This module provides 3 main APIs for you to use:
1. Evaluate a string (`eval_string`) or a file (`eval_file`)
directly. This is the simplest API to use but also the
slowest, since the code is evaluated and not compiled before.
This should not be used in production.
2. Define a function from a string (`function_from_string`)
or a file (`function_from_file`). This allows you to embed
the template as a function inside a module which will then
be compiled. This is the preferred API if you have access
to the template at compilation time.
3. Compile a string (`compile_string`) or a file (`compile_file`)
into Elixir syntax tree. This is the API used by both functions
above and is available to you if you want to provide your own
ways of handling the compiled template.
## Options
All functions in this module accept EEx-related options.
They are:
TODO
* `:line` - the line to be used as the template start. Defaults to 1.
TODO
* `:file` - the file to be used in the template. Defaults to the given
file the template is read from or to "nofile" when compiling from a
string.
### Macros
By default Jot makes use of the `EEx.SmartEngine`, which adds some macros to
your template. An example is the `@` macro which allows easy data access in a
template:
iex> Jot.eval_string("= @foo", assigns: [foo: 1])
"1"
In other words, ` @foo` translates to:
{:ok, v} = Access.fetch(assigns, :foo);
v
The `assigns` extension is useful when the number of variables
required by the template is not specified at compilation time.
"""
@doc """
Generates a function definition from the file contents.
The kind (`:def` or `:defp`) must be given, the
function name, its arguments and the compilation options.
This function is useful in case you have templates but
you want to precompile inside a module for speed.
## Examples
iex> # sample.eex
...> # <%= a + b %>
...> defmodule SampleA do
...> require Jot
...> Jot.function_from_file :def, :sample, "lib/sample.jot", [:a, :b]
...> end
...> SampleA.sample(1, 2)
"3"
"""
defmacro function_from_file(kind, name, file, args \\ [], options \\ []) do
quote bind_quoted: binding do
info = Keyword.merge options, [file: file, line: 1]
args = Enum.map(args, fn arg -> {arg, [line: 1], nil} end)
compiled = Jot.compile_file(file, info)
@external_resource file
@file file
case kind do
:def -> def(unquote(name)(unquote_splicing(args)), do: unquote(compiled))
:defp -> defp(unquote(name)(unquote_splicing(args)), do: unquote(compiled))
end
end
end
@doc """
Generates a function definition from the string.
The kind (`:def` or `:defp`) must be given, the
function name, its arguments and the compilation options.
## Examples
iex> defmodule SampleB do
...> require Jot
...> Jot.function_from_string :def, :sample, "= a + b", [:a, :b]
...> end
iex> SampleB.sample(1, 2)
"3"
"""
defmacro function_from_string(
kind, name, source, args \\ [], options \\ []
) do
quote bind_quoted: binding do
info = Keyword.merge([file: __ENV__.file, line: __ENV__.line], options)
args = Enum.map(args, fn(arg) -> {arg, [line: info[:line]], nil} end)
compiled = Jot.compile_string(source, info)
case kind do
:def ->
def(unquote(name)(unquote_splicing(args)), do: unquote(compiled))
:defp ->
defp(unquote(name)(unquote_splicing(args)), do: unquote(compiled))
end
end
end
@doc """
Takes a Jot template string and returns a quoted expression
that can be evaluated by Elixir, or compiled to a function.
## Examples
iex> expr = Jot.compile_string("= a + b")
...> Jot.eval_quoted(expr, [a: 1, b: 2])
"3"
"""
def compile_string(template, opts \\ []) when is_binary(template) do
Jot.Compiler.compile(template, opts)
end
@doc """
Takes a `path` to a Jot template file and generate a quoted
expression that can be evaluated by Elixir or compiled to a function.
"""
def compile_file(path, options \\ []) do
options = Keyword.merge(options, [file: path, line: 1])
compile_string(File.read!(path), options)
end
@doc """
Takes a Jot template string and evaluate it using the `bindings`.
This function compiles the template with every call, making it considerably
slower than functions generated with `function_from_string/5` and
`function_from_file/5`.
## Examples
iex> Jot.eval_string("= bar", [bar: "baz"])
"baz"
"""
def eval_string(template, bindings \\ [], opts \\ []) do
template
|> compile_string(opts)
|> eval_quoted(bindings)
end
@doc """
Takes a Jot quoted expression and call it using the `bindings` given.
"""
def eval_quoted(expr, bindings \\ []) do
{html, _} = Code.eval_quoted(expr, bindings)
html
end
end
|
lib/jot.ex
| 0.569254
| 0.590366
|
jot.ex
|
starcoder
|
defmodule QbBackend.Accounts do
@moduledoc """
This is the boundary module for the Accounts Context
We use this module to perform Accounts related actions
"""
alias QbBackend.{
Repo,
Accounts.User,
Accounts.Profile
}
@doc """
This function takes the id of a user and proceeds to find the associated account
"""
@spec get_user(String.t()) :: {:ok, User.t()} | {:error, String.t()}
def get_user(id) do
with %User{} = usr <- Repo.get_by(User, id: id) do
{:ok, usr}
else
nil -> {:error, "No user with id #{id} on the system"}
end
end
@doc """
This function takes a map os attrubutes and creates a user record with that
map of attributes
"""
@spec create_user(map()) :: {:ok, User.t()} | {:error, Ecto.Changeset.t()}
def create_user(attrs) do
%User{}
|> User.changeset(attrs)
|> Repo.insert()
end
@doc """
This function takes a user and deletes that user from the database
"""
@spec delete_user(User.t()) :: {:ok, User.t()} | {:error, Ecto.Changeset.t()}
def delete_user(%User{} = usr) do
usr |> Repo.delete()
end
@doc """
This functiaon takes a user and a map of update attributes that it uses
to update the user in question's details
"""
@spec update_user(User.t(), map()) :: {:ok, User.t()} | {:error, Ecto.Changeset.t()}
def update_user(%User{} = usr, attrs) do
usr
|> User.changeset(attrs)
|> Repo.update()
end
@doc """
This function takes a user and a map of attributes it then proceeds to create
a profile linked to the user with a reader role as default
"""
@spec create_profile(User.t(), map()) :: {:ok, Profile.t()} | {:error, String.t()}
def create_profile(%User{} = usr, attrs) do
usr
|> Profile.create_changeset(attrs)
|> Repo.insert()
end
@doc """
This function takes a profile and a map attributes then proceeds to update that
profile wit the maps
"""
@spec update_profile(Profile.t(), map) :: {:ok, Profile.t()} | {:error, Ecto.Changeset.t()}
def update_profile(%Profile{} = prf, attrs) do
prf
|> Profile.changeset(attrs)
|> Repo.update()
end
@doc """
This function takes a profile and proceeds to delete that profile from the
application database
"""
@spec delete_profile(Profile.t()) :: {:ok, Profile.t()} | {:error, Ecto.Changeset.t()}
def delete_profile(%Profile{} = prf) do
prf |> Repo.delete()
end
@doc """
This function takes a profile id and fetches a profile if one exists with the id, it returns an error
if one does not exist
"""
@spec get_profile(String.t()) :: {:ok, Profile.t()} | {:error, String.t()}
def get_profile(id) do
with %Profile{} = prof <- Repo.get_by(Profile, id: id) do
{:ok, prof}
else
nil -> {:error, "No Profile with id #{id} on the system"}
end
end
end
|
lib/qb_backend/accounts/accounts.ex
| 0.677794
| 0.410934
|
accounts.ex
|
starcoder
|
defmodule Defql do
@moduledoc """
Module provides macros to create function with SQL as a body.
## Installation
If [available in Hex](https://hex.pm/docs/publish), the package can be installed
by adding `defql` to your list of dependencies in `mix.exs`:
```elixir
defp deps do
[
{:defql, "~> 0.1.1"},
{:postgrex, ">= 0.13.0"}, # optional
]
end
```
### Configuration
It requires `adapter` key, and adapter specific options.
Use with ecto:
```elixir
config :defql, connection: [
adapter: Defql.Adapter.Ecto.Postgres,
repo: Taped.Repo
]
```
Use standalone connection:
```elixir
config :defql, connection: [
adapter: Defql.Adapter.Postgres,
hostname: "localhost",
username: "username",
password: "password",
database: "my_db",
pool: DBConnection.Poolboy,
pool_size: 1
]
```
## Usage
We can define module to have access to our database:
```elixir
defmodule UserQuery do
use Defql
defselect get(conds), table: :users
definsert add(params), table: :users
defupdate update(params, conds), table: :users
defdelete delete(conds), table: :users
defquery get_by_name(name, limit) do
"SELECT * FROM users WHERE name = $name LIMIT $limit"
end
end
```
Right now we have easy access to `users` in database:
```elixir
UserQuery.get(id: "3") # => {:ok, [%{...}]}
UserQuery.add(name: "Smbdy") # => {:ok, [%{...}]}
UserQuery.update([name: "Other"],[id: "2"]) # => {:ok, [%{...}]}
UserQuery.delete(id: "2") # => {:ok, [%{...}]}
UserQuery.get_by_name("Ela", 4) # => {:ok, [%{...}, %{...}]}
```
We can also define common table for the whole module.
```elixir
defmodule UserQuery do
use Defql, table: :users
defselect get(conds)
definsert add(params)
defupdate update(params, conds)
defdelete delete(conds)
end
```
`%{...}` It's a hash with user properties straight from database.
Supported condition statements:
- `[user_id: [1,2,3,4]]`
- `[user_id: {:in, [1,2,3,4,5]}]`
- `[name: {:like, "%john%"}]`
- `[name: {:ilike, "%john"}]`
"""
@doc false
defmacro __using__(opts) do
quote do
Module.put_attribute(__MODULE__,
:table,
Keyword.get(unquote(opts),
:table))
def resolve_table(opts) do
Keyword.get(opts, :table) ||
@table ||
raise(ArgumentError, "table wasn't specified")
end
import Defql.Macros.Defquery
import Defql.Macros.Definsert
import Defql.Macros.Defdelete
import Defql.Macros.Defupdate
import Defql.Macros.Defselect
end
end
end
|
lib/defql.ex
| 0.682468
| 0.875202
|
defql.ex
|
starcoder
|
defmodule Bonny.Controller do
@moduledoc """
`Bonny.Controller` defines controller behaviours and generates boilerplate for generating Kubernetes manifests.
> A custom controller is a controller that users can deploy and update on a running cluster, independently of the cluster’s own lifecycle. Custom controllers can work with any kind of resource, but they are especially effective when combined with custom resources. The Operator pattern is one example of such a combination. It allows developers to encode domain knowledge for specific applications into an extension of the Kubernetes API.
Controllers allow for simple `add`, `modify`, and `delete` handling of custom resources in the Kubernetes API.
"""
@callback add(map()) :: :ok | :error
@callback modify(map()) :: :ok | :error
@callback delete(map()) :: :ok | :error
@doc false
defmacro __using__(_opts) do
quote do
import Bonny.Controller
Module.register_attribute(__MODULE__, :rule, accumulate: true)
@behaviour Bonny.Controller
@group nil
@version nil
@scope nil
@names nil
@before_compile Bonny.Controller
end
end
@doc false
defmacro __before_compile__(_env) do
quote do
@doc """
Kubernetes CRD manifest spec
"""
@spec crd_spec() :: Bonny.CRD.t()
def crd_spec do
module_components =
__MODULE__
|> Macro.to_string()
|> String.split(".")
|> Enum.reverse()
name = Enum.at(module_components, 0)
version = module_components |> Enum.at(1, "v1") |> String.downcase()
%Bonny.CRD{
group: @group || Bonny.Config.group(),
scope: @scope || :namespaced,
version: @version || version,
names: @names || crd_spec_names(name)
}
end
@doc """
Kubernetes RBAC rules
"""
def rules() do
Enum.reduce(@rule, [], fn {api, resources, verbs}, acc ->
rule = %{
apiGroups: [api],
resources: resources,
verbs: verbs
}
[rule | acc]
end)
end
defp crd_spec_names(name) do
singular = Macro.underscore(name)
%{
plural: "#{singular}s",
singular: singular,
kind: name,
short_names: nil
}
end
end
end
end
|
lib/bonny/controller.ex
| 0.883418
| 0.502258
|
controller.ex
|
starcoder
|
defmodule Membrane.ParentSpec do
@moduledoc """
Structure representing the topology of a pipeline/bin.
It can be incorporated into a pipeline or a bin by returning
`t:Membrane.Pipeline.Action.spec_t/0` or `t:Membrane.Bin.Action.spec_t/0`
action, respectively. This commonly happens within `c:Membrane.Pipeline.handle_init/1`
and `c:Membrane.Bin.handle_init/1`, but can be done in any other callback also.
## Children
Children that should be spawned when the pipeline/bin starts can be defined
with the `:children` field.
You have to set it to a map, where keys are valid children names (`t:Membrane.Child.name_t/0`)
that are unique within this pipeline/bin and values are either child's module or
struct of that module.
Sample definitions:
%{
first_element: %Element.With.Options.Struct{option_a: 42},
some_element: Element.Without.Options,
some_bin: Bin.Using.Default.Options
}
## Links
Links that should be made when the children are spawned can be defined with the
`:links` field. Links can be defined with the use of `link/1` and `to/2` functions
that allow specifying elements linked, and `via_in/2` and `via_out/2` that allow
specifying pads' names and parameters. If pads are not specified, name `:input`
is assumed for inputs and `:output` for outputs.
Sample definition:
[
link(:source_a)
|> to(:converter)
|> via_in(:input_a, target_queue_size: 20)
|> to(:mixer),
link(:source_b)
|> via_out(:custom_output)
|> via_in(:input_b, options: [mute: true])
|> to(:mixer)
|> via_in(:input, toilet_capacity: 500)
|> to(:sink)
]
See the docs for `via_in/3` and `via_out/3` for details on pad properties that can be set.
Links can also contain children definitions, for example:
[
link(:first_element, %Element.With.Options.Struct{option_a: 42})
|> to(:some_element, Element.Without.Options)
|> to(:element_specified_in_children)
]
Which is particularly convenient for creating links conditionally:
maybe_link = &to(&1, :some_element, Some.Element)
[
link(:first_element)
|> then(if condition?, do: maybe_link, else: & &1)
|> to(:another_element)
]
You can also use `link_linear/1` in order to link subsequent children using default pads
(linking `:input` to `:output` of previous element). That might be especially helpful when creating
testing pipelines.
children = [source: Some.Source, filter: Some.Filter, sink: Some.Sink]
links = link_linear(children)
### Bins
For bins boundaries, there are special links allowed. The user should define links
between the bin's input and the first child's input (input-input type) and last
child's output and bin output (output-output type). In this case, `link_bin_input/1`
and `to_bin_output/2` should be used.
Sample definition:
[
link_bin_input() |> to(:filter1) |> to(:filter2) |> to_bin_output(:custom_output)
]
### Dynamic pads
In most cases, dynamic pads can be linked the same way as static ones, although
in the following situations, exact pad reference must be passed instead of a name:
- When that reference is needed later, for example, to handle a notification related
to that particular pad instance
pad = Pad.ref(:output, make_ref())
[
link(:tee) |> via_out(pad) |> to(:sink)
]
- When linking dynamic pads of a bin with its children, for example in
`c:Membrane.Bin.handle_pad_added/3`
@impl true
def handle_pad_added(Pad.ref(:input, _) = pad, _ctx, state) do
links = [link_bin_input(pad) |> to(:mixer)]
{{:ok, spec: %ParentSpec{links: links}}, state}
end
## Stream sync
`:stream_sync` field can be used for specifying elements that should start playing
at the same moment. An example can be audio and video player sinks. This option
accepts either `:sinks` atom or a list of groups (lists) of elements. Passing `:sinks`
results in synchronizing all sinks in the pipeline, while passing a list of groups
of elements synchronizes all elements in each group. It is worth mentioning
that to keep the stream synchronized all involved elements need to rely on
the same clock.
By default, no elements are synchronized.
Sample definitions:
```
%ParentSpec{stream_sync: [[:element1, :element2], [:element3, :element4]]}
%ParentSpec{stream_sync: :sinks}
```
## Clock provider
A clock provider is an element that exports a clock that should be used as the pipeline
clock. The pipeline clock is the default clock used by elements' timers.
For more information see `Membrane.Element.Base.def_clock/1`.
## Crash groups
A crash group is a logical entity that prevents the whole pipeline from crashing when one of
its children crash.
### Adding children to a crash group
```elixir
children = %{
:some_element_1 => %SomeElement{
# ...
},
:some_element_2 => %SomeElement{
# ...
}
}
spec = %ParentSpec{children: children, crash_group: {group_id, :temporary}}
```
The crash group is defined by a two-element tuple, first element is an ID which is of type
`Membrane.CrashGroup.name_t()`, and the second is a mode. Currently, we support only
`:temporary` mode which means that Membrane will not make any attempts to restart crashed child.
In the above snippet, we create new children - `:some_element_1` and `:some_element_2`, we add it
to the crash group with id `group_id`. Crash of `:some_element_1` or `:some_element_2` propagates
only to the rest of the members of the crash group and the pipeline stays alive.
Currently, crash group covers all children within one or more `ParentSpec`s.
### Handling crash of a crash group
When any of the members of the crash group goes down, the callback:
[`handle_crash_group_down/3`](https://hexdocs.pm/membrane_core/Membrane.Pipeline.html#c:handle_crash_group_down/3)
is called.
```elixir
@impl true
def handle_crash_group_down(crash_group_id, ctx, state) do
# do some stuff in reaction to crash of group with id crash_group_id
end
```
### Limitations
At this moment crash groups are only useful for elements with dynamic pads.
Crash groups work only in pipelines and are not supported in bins.
## Log metadata
`:log_metadata` field can be used to set the `Membrane.Logger` metadata for all children from that
`Membrane.ParentSpec`
"""
alias Membrane.{Child, Pad}
alias Membrane.ParentError
require Membrane.Pad
defmodule LinkBuilder do
@moduledoc false
use Bunch.Access
defstruct children: [], links: [], status: nil
@type t :: %__MODULE__{
children: [{Child.name_t(), module | struct}],
links: [map],
status: status_t
}
@type status_t :: :from | :from_pad | :to_pad | :done
@spec update(t, status_t, Keyword.t()) :: t
def update(
%__MODULE__{links: [%{to: to} | _] = links, status: :done} = builder,
status,
entries
) do
%__MODULE__{builder | links: [Map.new([from: to] ++ entries) | links], status: status}
end
def update(%__MODULE__{links: [link | links]} = builder, status, entries) do
%__MODULE__{builder | links: [Map.merge(link, Map.new(entries)) | links], status: status}
end
end
@opaque link_builder_t :: LinkBuilder.t()
@type child_spec_t :: module | struct
@type children_spec_t ::
[{Child.name_t(), child_spec_t}]
| %{Child.name_t() => child_spec_t}
@type pad_options_t :: Keyword.t()
@type links_spec_t :: [link_builder_t() | links_spec_t]
@type crash_group_spec_t :: {any(), :temporary} | nil
@typedoc """
Struct used when starting and linking children within a pipeline or a bin.
"""
@type t :: %__MODULE__{
children: children_spec_t,
links: links_spec_t,
crash_group: crash_group_spec_t() | nil,
stream_sync: :sinks | [[Child.name_t()]],
clock_provider: Child.name_t() | nil,
node: node() | nil,
log_metadata: Keyword.t()
}
defstruct children: %{},
links: [],
crash_group: nil,
stream_sync: [],
clock_provider: nil,
node: nil,
log_metadata: []
@doc """
Begins a link.
See the _links_ section of the moduledoc for more information.
"""
@spec link(Child.name_t()) :: link_builder_t()
def link(child_name) do
%LinkBuilder{links: [%{from: child_name}], status: :from}
end
@doc """
Defines a child and begins a link with it.
See the _links_ section of the moduledoc for more information.
"""
@spec link(Child.name_t(), child_spec_t()) :: link_builder_t()
def link(child_name, child_spec) do
link(child_name) |> Map.update!(:children, &[{child_name, child_spec} | &1])
end
@doc """
Begins a link with a bin's pad.
See the _links_ section of the moduledoc for more information.
"""
@spec link_bin_input(Pad.name_t() | Pad.ref_t()) :: link_builder_t() | no_return
def link_bin_input(pad \\ :input) do
:ok = validate_pad_name(pad)
link({Membrane.Bin, :itself})
|> LinkBuilder.update(:from_pad, from_pad: pad, from_pad_props: %{})
end
@doc """
Specifies output pad name and properties of the preceding child.
The possible properties are:
- `options` - If a pad defines options, they can be passed here as a keyword list. Pad options are documented
in moduledoc of each element. See `Membrane.Element.WithOutputPads.def_output_pad/2` and `Membrane.Bin.def_output_pad/2`
for information about defining pad options.
See the _links_ section of the moduledoc for more information.
"""
@spec via_out(link_builder_t(), Pad.name_t() | Pad.ref_t(), options: pad_options_t()) ::
link_builder_t() | no_return
def via_out(builder, pad, props \\ [])
def via_out(%LinkBuilder{status: :from_pad}, pad, _props) do
raise ParentError,
"Invalid link specification: output #{inspect(pad)} placed after another output or bin's input"
end
def via_out(%LinkBuilder{status: :to_pad}, pad, _props) do
raise ParentError, "Invalid link specification: output #{inspect(pad)} placed after an input"
end
def via_out(%LinkBuilder{links: [%{to: {Membrane.Bin, :itself}} | _]}, pad, _props) do
raise ParentError,
"Invalid link specification: output #{inspect(pad)} placed after bin's output"
end
def via_out(%LinkBuilder{} = builder, pad, props) do
:ok = validate_pad_name(pad)
props =
case Bunch.Config.parse(props, options: [default: []]) do
{:ok, props} ->
props
{:error, reason} ->
raise ParentError, "Invalid link specification: invalid pad props: #{inspect(reason)}"
end
LinkBuilder.update(builder, :from_pad,
from_pad: pad,
from_pad_props: props
)
end
@doc """
Specifies input pad name and properties of the subsequent child.
The possible properties are:
- `options` - If a pad defines options, they can be passed here as a keyword list. Pad options are documented
in moduledoc of each element. See `Membrane.Element.WithInputPads.def_input_pad/2` and `Membrane.Bin.def_input_pad/2`
for information about defining pad options.
Additionally, the following properties can be used to adjust the flow control parameters. If set within a bin
on an input that connects to the bin input, they will be overridden if set when linking to the bin in its parent.
- `toilet_capacity` - Used when a toilet is created, that is for pull input pads that have push output pads
linked to them. When a push output produces more buffers than the pull input can consume, the buffers are accumulated
in a queue called toilet. If the toilet size grows above its capacity, it overflows by raising an error.
- `target_queue_size` - The size of the queue of the input pad that Membrane will try to maintain. That allows for fulfilling
the demands of the element by taking data from the queue while the actual sending of demands is done asynchronously,
smoothing the processing. Used only for pads working in pull mode with manual demands. See `t:Membrane.Pad.mode_t/0`
and `t:Membrane.Pad.demand_mode_t/0` for more info.
- `min_demand_factor` - A factor used to calculate `minimal demand` (`minimal_demand = target_queue_size * min_demand_factor`).
Membrane won't send smaller demand that `minimal demand`, to reduce demands' overhead. However, user will always receive
as many buffers as they actually demanded, all excess buffers will be queued internally.
Used only for pads working in pull mode with manual demands. See `t:Membrane.Pad.mode_t/0` and `t:Membrane.Pad.demand_mode_t/0`
for more info. Defaults to `#{Membrane.Core.Element.InputQueue.default_min_demand_factor()}` (the default may change in the future).
- `auto_demand_size` - Size of automatically generated demands. Used only for pads working in pull mode with automatic demands.
See `t:Membrane.Pad.mode_t/0` and `t:Membrane.Pad.demand_mode_t/0` for more info.
See the _links_ section of the moduledoc for more information.
"""
@spec via_in(link_builder_t(), Pad.name_t() | Pad.ref_t(),
options: pad_options_t(),
toilet_capacity: number | nil,
target_queue_size: number | nil,
min_demand_factor: number | nil,
auto_demand_size: number | nil
) ::
link_builder_t() | no_return
def via_in(builder, pad, props \\ [])
def via_in(%LinkBuilder{status: :to_pad}, pad, _props) do
raise ParentError,
"Invalid link specification: input #{inspect(pad)} placed after another input"
end
def via_in(%LinkBuilder{links: [%{to: {Membrane.Bin, :itself}} | _]}, pad, _props) do
raise ParentError,
"Invalid link specification: input #{inspect(pad)} placed after bin's output"
end
def via_in(%LinkBuilder{} = builder, pad, props) do
:ok = validate_pad_name(pad)
props =
props
|> Bunch.Config.parse(
options: [default: []],
target_queue_size: [default: nil],
min_demand_factor: [default: nil],
auto_demand_size: [default: nil],
toilet_capacity: [default: nil]
)
|> case do
{:ok, props} ->
props
{:error, reason} ->
raise ParentError, "Invalid link specification: invalid pad props: #{inspect(reason)}"
end
if builder.status == :from_pad do
builder
else
via_out(builder, :output)
end
|> LinkBuilder.update(:to_pad,
to_pad: pad,
to_pad_props: props
)
end
@doc """
Continues or ends a link.
See the _links_ section of the moduledoc for more information.
"""
@spec to(link_builder_t(), Child.name_t()) :: link_builder_t() | no_return
def to(%LinkBuilder{links: [%{to: {Membrane.Bin, :itself}} | _]}, child_name) do
raise ParentError,
"Invalid link specification: child #{inspect(child_name)} placed after bin's output"
end
def to(%LinkBuilder{} = builder, child_name) do
if builder.status == :to_pad do
builder
else
via_in(builder, :input)
end
|> LinkBuilder.update(:done, to: child_name)
end
@doc """
Defines a child and continues or ends a link with it.
See the _links_ section of the moduledoc for more information.
"""
@spec to(link_builder_t(), Child.name_t(), child_spec_t()) :: link_builder_t() | no_return
def to(%LinkBuilder{} = builder, child_name, child_spec) do
builder |> to(child_name) |> Map.update!(:children, &[{child_name, child_spec} | &1])
end
@doc """
Ends a link with a bin's output.
See the _links_ section of the moduledoc for more information.
"""
@spec to_bin_output(link_builder_t(), Pad.name_t() | Pad.ref_t()) ::
link_builder_t() | no_return
def to_bin_output(builder, pad \\ :output)
def to_bin_output(%LinkBuilder{status: :to_pad}, pad) do
raise ParentError, "Invalid link specification: bin's output #{pad} placed after an input"
end
def to_bin_output(builder, pad) do
:ok = validate_pad_name(pad)
if builder.status == :from_pad do
builder
else
via_out(builder, :output)
end
|> LinkBuilder.update(:to_pad, to_pad: pad, to_pad_props: %{})
|> to({Membrane.Bin, :itself})
end
@doc """
Links subsequent children using default pads (linking `:input` to `:output` of
previous element). The list of children must consist at least of 2 elements.
## Example
Membrane.ParentSpec.link_linear([el1: MembraneElement1, el2: MembraneElement2])
"""
@spec link_linear(children :: [child_spec_t()]) :: links_spec_t()
def link_linear(children) when is_list(children) and length(children) > 1 do
[{first_child_name, first_child_spec} | other_children] = children
links =
other_children
|> Enum.reduce(link(first_child_name, first_child_spec), fn {child_name, child_spec},
builder ->
to(builder, child_name, child_spec)
end)
[links]
end
defp validate_pad_name(pad) when Pad.is_pad_name(pad) or Pad.is_pad_ref(pad) do
:ok
end
defp validate_pad_name(pad) do
raise ParentError, "Invalid link specification: invalid pad name: #{inspect(pad)}"
end
end
|
lib/membrane/parent_spec.ex
| 0.936052
| 0.863103
|
parent_spec.ex
|
starcoder
|
defmodule Imagineer.Image.PNG.Filter.Basic do
alias Imagineer.Image.PNG
alias PNG.Filter.Basic
import PNG.Helpers, only: [bytes_per_pixel: 2, bytes_per_row: 3, null_binary: 1]
@none 0
@sub 1
@up 2
@average 3
@paeth 4
@doc """
Takes an image's scanlines and returns the rows unfiltered
Types are defined [here](http://www.w3.org/TR/PNG-Filters.html).
"""
def unfilter(scanlines, %PNG{
color_format: color_format,
bit_depth: bit_depth,
width: width
}) do
# For unfiltering, the row prior to the first is assumed to be all 0s
ghost_row = null_binary(bytes_per_row(color_format, bit_depth, width))
unfilter(scanlines, ghost_row, bytes_per_pixel(color_format, bit_depth), [])
end
defp unfilter([], _prior_row, _bytes_per_pixel, unfiltered) do
Enum.reverse(unfiltered)
end
defp unfilter([filtered_row | filtered_rows], prior_row, bytes_per_pixel, unfiltered) do
unfiltered_row =
pad_row(filtered_row)
|> unfilter_scanline(bytes_per_pixel, prior_row)
unfilter(filtered_rows, unfiltered_row, bytes_per_pixel, [unfiltered_row | unfiltered])
end
defp unfilter_scanline(
<<@none::integer-size(8), row_content::binary>>,
_bytes_per_pixel,
_prior
) do
row_content
end
defp unfilter_scanline(<<@sub::integer-size(8), row_content::binary>>, bytes_per_pixel, _prior) do
Basic.Sub.unfilter(row_content, bytes_per_pixel)
end
defp unfilter_scanline(
<<@up::integer-size(8), row_content::binary>>,
_bytes_per_pixel,
prior_row
) do
Basic.Up.unfilter(row_content, prior_row)
end
defp unfilter_scanline(
<<@average::integer-size(8), row_content::binary>>,
bytes_per_pixel,
prior_row
) do
Basic.Average.unfilter(row_content, prior_row, bytes_per_pixel)
end
defp unfilter_scanline(
<<@paeth::integer-size(8), row_content::binary>>,
bytes_per_pixel,
prior_row
) do
Basic.Paeth.unfilter(row_content, prior_row, bytes_per_pixel)
end
defp pad_row(row) when rem(bit_size(row), 8) != 0 do
pad_row(<<row::bits, 0::1>>)
end
defp pad_row(row) do
row
end
@doc """
Filters scanlines. Right now does a naïve pass (AKA no filtering.)
"""
def filter(unfiltered_rows, _image) do
filter_rows(unfiltered_rows)
end
defp filter_rows(unfiltered_rows) do
filter_rows(unfiltered_rows, [])
end
defp filter_rows([], filtered_rows) do
Enum.reverse(filtered_rows)
end
defp filter_rows([unfiltered_row | rest_unfiltered], filtered_rows) do
filter_rows(rest_unfiltered, [filter_row(unfiltered_row) | filtered_rows])
end
# Proprietary optimization technique
defp filter_row(unfiltered_row) do
<<@none::integer-size(8), unfiltered_row::bits>>
end
end
|
lib/imagineer/image/png/filter/basic.ex
| 0.646014
| 0.448366
|
basic.ex
|
starcoder
|
defmodule Rummage.Phoenix.Plug do
@moduledoc """
This plug ensures that the `rummage` params are properly set before
`index` action of the controller. If they are not, then it formats them
accordingly.
This plug only works with the default `Rummmage.Ecto` hooks.
"""
@doc """
`init` initializes the plug and returns the `params` passed
to it:
## Examples
iex> alias Rummage.Phoenix.Plug
iex> params = %{}
iex> Plug.init(params)
%{}
"""
def init(params) do
params
end
@doc """
`conn` initializes the plug and returns the `params` passed
to it:
## Examples
iex> params = %{}
iex> conn = %Plug.Conn{}
iex> Rummage.Phoenix.Plug.call(conn, params) == conn
true
iex> params = %{hooks: ["search", "sort", "paginate"]}
iex> conn = %{__struct__: Plug.Conn, params: %{}, private: %{phoenix_action: :index}}
iex> Rummage.Phoenix.Plug.call(conn, params) == %{__struct__: Plug.Conn, params: %{"rummage" => %{"paginate" => %{}, "search" => %{}, "sort" => %{}}}, private: %{phoenix_action: :index}}
true
iex> params = %{hooks: ["search", "sort", "paginate"]}
iex> conn = %{__struct__: Plug.Conn, params: %{"rummage" => %{}}, private: %{phoenix_action: :index}}
iex> Rummage.Phoenix.Plug.call(conn, params) == %{__struct__: Plug.Conn, params: %{"rummage" => %{"paginate" => %{}, "search" => %{}, "sort" => %{}}}, private: %{phoenix_action: :index}}
true
"""
def call(conn, opts) do
hooks = opts[:hooks]
before_action(conn, hooks)
end
defp before_action(conn, hooks) do
case conn.private[:phoenix_action] == :index do
true -> %Plug.Conn{conn | params: rummage_params(conn.params, hooks)}
_ -> conn
end
end
defp rummage_params(params, hooks) do
case Map.get(params, "rummage") do
nil ->
Map.put(params, "rummage",
Enum.map(hooks, &{&1, %{}})
|> Enum.into(%{})
)
rummage ->
Map.put(params, "rummage",
Enum.map(hooks, &{&1,
apply(String.to_atom("Elixir.Rummage.Phoenix.#{String.capitalize(&1)}Controller"),
:rummage, [rummage])})
|> Enum.into(%{})
)
end
end
end
|
lib/rummage_phoenix/plug.ex
| 0.752695
| 0.418162
|
plug.ex
|
starcoder
|
defmodule AWS.IoTSiteWise do
@moduledoc """
Welcome to the AWS IoT SiteWise API Reference.
AWS IoT SiteWise is an AWS service that connects [Industrial Internet of Things (IIoT)](https://en.wikipedia.org/wiki/Internet_of_things#Industrial_applications)
devices to the power of the AWS Cloud. For more information, see the [AWS IoT SiteWise User
Guide](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/). For
information about AWS IoT SiteWise quotas, see
[Quotas](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/quotas.html)
in the *AWS IoT SiteWise User Guide*.
"""
alias AWS.Client
alias AWS.Request
def metadata do
%AWS.ServiceMetadata{
abbreviation: nil,
api_version: "2019-12-02",
content_type: "application/x-amz-json-1.1",
credential_scope: nil,
endpoint_prefix: "iotsitewise",
global?: false,
protocol: "rest-json",
service_id: "IoTSiteWise",
signature_version: "v4",
signing_name: "iotsitewise",
target_prefix: nil
}
end
@doc """
Associates a child asset with the given parent asset through a hierarchy defined
in the parent asset's model.
For more information, see [Associating assets](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/add-associated-assets.html)
in the *AWS IoT SiteWise User Guide*.
"""
def associate_assets(%Client{} = client, asset_id, input, options \\ []) do
url_path = "/assets/#{URI.encode(asset_id)}/associate"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Associates a group (batch) of assets with an AWS IoT SiteWise Monitor project.
"""
def batch_associate_project_assets(%Client{} = client, project_id, input, options \\ []) do
url_path = "/projects/#{URI.encode(project_id)}/assets/associate"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
200
)
end
@doc """
Disassociates a group (batch) of assets from an AWS IoT SiteWise Monitor
project.
"""
def batch_disassociate_project_assets(%Client{} = client, project_id, input, options \\ []) do
url_path = "/projects/#{URI.encode(project_id)}/assets/disassociate"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
200
)
end
@doc """
Sends a list of asset property values to AWS IoT SiteWise.
Each value is a timestamp-quality-value (TQV) data point. For more information,
see [Ingesting data using the API](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/ingest-api.html)
in the *AWS IoT SiteWise User Guide*.
To identify an asset property, you must specify one of the following:
* The `assetId` and `propertyId` of an asset property.
* A `propertyAlias`, which is a data stream alias (for example,
`/company/windfarm/3/turbine/7/temperature`). To define an asset property's
alias, see
[UpdateAssetProperty](https://docs.aws.amazon.com/iot-sitewise/latest/APIReference/API_UpdateAssetProperty.html).
With respect to Unix epoch time, AWS IoT SiteWise accepts only TQVs that have a
timestamp of no more than 7 days in the past and no more than 5 minutes in the
future. AWS IoT SiteWise rejects timestamps outside of the inclusive range of
[-7 days, +5 minutes] and returns a `TimestampOutOfRangeException` error.
For each asset property, AWS IoT SiteWise overwrites TQVs with duplicate
timestamps unless the newer TQV has a different quality. For example, if you
store a TQV `{T1, GOOD, V1}`, then storing `{T1, GOOD, V2}` replaces the
existing TQV.
AWS IoT SiteWise authorizes access to each `BatchPutAssetPropertyValue` entry
individually. For more information, see [BatchPutAssetPropertyValue authorization](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/security_iam_service-with-iam.html#security_iam_service-with-iam-id-based-policies-batchputassetpropertyvalue-action)
in the *AWS IoT SiteWise User Guide*.
"""
def batch_put_asset_property_value(%Client{} = client, input, options \\ []) do
url_path = "/properties"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Creates an access policy that grants the specified identity (AWS SSO user, AWS
SSO group, or IAM user) access to the specified AWS IoT SiteWise Monitor portal
or project resource.
"""
def create_access_policy(%Client{} = client, input, options \\ []) do
url_path = "/access-policies"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
201
)
end
@doc """
Creates an asset from an existing asset model.
For more information, see [Creating assets](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/create-assets.html)
in the *AWS IoT SiteWise User Guide*.
"""
def create_asset(%Client{} = client, input, options \\ []) do
url_path = "/assets"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
202
)
end
@doc """
Creates an asset model from specified property and hierarchy definitions.
You create assets from asset models. With asset models, you can easily create
assets of the same type that have standardized definitions. Each asset created
from a model inherits the asset model's property and hierarchy definitions. For
more information, see [Defining asset models](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/define-models.html)
in the *AWS IoT SiteWise User Guide*.
"""
def create_asset_model(%Client{} = client, input, options \\ []) do
url_path = "/asset-models"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
202
)
end
@doc """
Creates a dashboard in an AWS IoT SiteWise Monitor project.
"""
def create_dashboard(%Client{} = client, input, options \\ []) do
url_path = "/dashboards"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
201
)
end
@doc """
Creates a gateway, which is a virtual or edge device that delivers industrial
data streams from local servers to AWS IoT SiteWise.
For more information, see [Ingesting data using a gateway](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/gateway-connector.html)
in the *AWS IoT SiteWise User Guide*.
"""
def create_gateway(%Client{} = client, input, options \\ []) do
url_path = "/20200301/gateways"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
201
)
end
@doc """
Creates a portal, which can contain projects and dashboards.
AWS IoT SiteWise Monitor uses AWS SSO or IAM to authenticate portal users and
manage user permissions.
Before you can sign in to a new portal, you must add at least one identity to
that portal. For more information, see [Adding or removing portal administrators](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/administer-portals.html#portal-change-admins)
in the *AWS IoT SiteWise User Guide*.
"""
def create_portal(%Client{} = client, input, options \\ []) do
url_path = "/portals"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
202
)
end
@doc """
Creates a project in the specified portal.
"""
def create_project(%Client{} = client, input, options \\ []) do
url_path = "/projects"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
201
)
end
@doc """
Deletes an access policy that grants the specified identity access to the
specified AWS IoT SiteWise Monitor resource.
You can use this operation to revoke access to an AWS IoT SiteWise Monitor
resource.
"""
def delete_access_policy(%Client{} = client, access_policy_id, input, options \\ []) do
url_path = "/access-policies/#{URI.encode(access_policy_id)}"
headers = []
{query_params, input} =
[
{"clientToken", "clientToken"}
]
|> Request.build_params(input)
Request.request_rest(
client,
metadata(),
:delete,
url_path,
query_params,
headers,
input,
options,
204
)
end
@doc """
Deletes an asset.
This action can't be undone. For more information, see [Deleting assets and models](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/delete-assets-and-models.html)
in the *AWS IoT SiteWise User Guide*.
You can't delete an asset that's associated to another asset. For more
information, see
[DisassociateAssets](https://docs.aws.amazon.com/iot-sitewise/latest/APIReference/API_DisassociateAssets.html).
"""
def delete_asset(%Client{} = client, asset_id, input, options \\ []) do
url_path = "/assets/#{URI.encode(asset_id)}"
headers = []
{query_params, input} =
[
{"clientToken", "clientToken"}
]
|> Request.build_params(input)
Request.request_rest(
client,
metadata(),
:delete,
url_path,
query_params,
headers,
input,
options,
202
)
end
@doc """
Deletes an asset model.
This action can't be undone. You must delete all assets created from an asset
model before you can delete the model. Also, you can't delete an asset model if
a parent asset model exists that contains a property formula expression that
depends on the asset model that you want to delete. For more information, see
[Deleting assets and models](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/delete-assets-and-models.html)
in the *AWS IoT SiteWise User Guide*.
"""
def delete_asset_model(%Client{} = client, asset_model_id, input, options \\ []) do
url_path = "/asset-models/#{URI.encode(asset_model_id)}"
headers = []
{query_params, input} =
[
{"clientToken", "clientToken"}
]
|> Request.build_params(input)
Request.request_rest(
client,
metadata(),
:delete,
url_path,
query_params,
headers,
input,
options,
202
)
end
@doc """
Deletes a dashboard from AWS IoT SiteWise Monitor.
"""
def delete_dashboard(%Client{} = client, dashboard_id, input, options \\ []) do
url_path = "/dashboards/#{URI.encode(dashboard_id)}"
headers = []
{query_params, input} =
[
{"clientToken", "clientToken"}
]
|> Request.build_params(input)
Request.request_rest(
client,
metadata(),
:delete,
url_path,
query_params,
headers,
input,
options,
204
)
end
@doc """
Deletes a gateway from AWS IoT SiteWise.
When you delete a gateway, some of the gateway's files remain in your gateway's
file system.
"""
def delete_gateway(%Client{} = client, gateway_id, input, options \\ []) do
url_path = "/20200301/gateways/#{URI.encode(gateway_id)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:delete,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Deletes a portal from AWS IoT SiteWise Monitor.
"""
def delete_portal(%Client{} = client, portal_id, input, options \\ []) do
url_path = "/portals/#{URI.encode(portal_id)}"
headers = []
{query_params, input} =
[
{"clientToken", "clientToken"}
]
|> Request.build_params(input)
Request.request_rest(
client,
metadata(),
:delete,
url_path,
query_params,
headers,
input,
options,
202
)
end
@doc """
Deletes a project from AWS IoT SiteWise Monitor.
"""
def delete_project(%Client{} = client, project_id, input, options \\ []) do
url_path = "/projects/#{URI.encode(project_id)}"
headers = []
{query_params, input} =
[
{"clientToken", "clientToken"}
]
|> Request.build_params(input)
Request.request_rest(
client,
metadata(),
:delete,
url_path,
query_params,
headers,
input,
options,
204
)
end
@doc """
Describes an access policy, which specifies an identity's access to an AWS IoT
SiteWise Monitor portal or project.
"""
def describe_access_policy(%Client{} = client, access_policy_id, options \\ []) do
url_path = "/access-policies/#{URI.encode(access_policy_id)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
200
)
end
@doc """
Retrieves information about an asset.
"""
def describe_asset(%Client{} = client, asset_id, options \\ []) do
url_path = "/assets/#{URI.encode(asset_id)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Retrieves information about an asset model.
"""
def describe_asset_model(%Client{} = client, asset_model_id, options \\ []) do
url_path = "/asset-models/#{URI.encode(asset_model_id)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Retrieves information about an asset property.
When you call this operation for an attribute property, this response includes
the default attribute value that you define in the asset model. If you update
the default value in the model, this operation's response includes the new
default value.
This operation doesn't return the value of the asset property. To get the value
of an asset property, use
[GetAssetPropertyValue](https://docs.aws.amazon.com/iot-sitewise/latest/APIReference/API_GetAssetPropertyValue.html).
"""
def describe_asset_property(%Client{} = client, asset_id, property_id, options \\ []) do
url_path = "/assets/#{URI.encode(asset_id)}/properties/#{URI.encode(property_id)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Retrieves information about a dashboard.
"""
def describe_dashboard(%Client{} = client, dashboard_id, options \\ []) do
url_path = "/dashboards/#{URI.encode(dashboard_id)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
200
)
end
@doc """
Retrieves information about the default encryption configuration for the AWS
account in the default or specified region.
For more information, see [Key management](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/key-management.html)
in the *AWS IoT SiteWise User Guide*.
"""
def describe_default_encryption_configuration(%Client{} = client, options \\ []) do
url_path = "/configuration/account/encryption"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Retrieves information about a gateway.
"""
def describe_gateway(%Client{} = client, gateway_id, options \\ []) do
url_path = "/20200301/gateways/#{URI.encode(gateway_id)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Retrieves information about a gateway capability configuration.
Each gateway capability defines data sources for a gateway. A capability
configuration can contain multiple data source configurations. If you define
OPC-UA sources for a gateway in the AWS IoT SiteWise console, all of your OPC-UA
sources are stored in one capability configuration. To list all capability
configurations for a gateway, use
[DescribeGateway](https://docs.aws.amazon.com/iot-sitewise/latest/APIReference/API_DescribeGateway.html).
"""
def describe_gateway_capability_configuration(
%Client{} = client,
capability_namespace,
gateway_id,
options \\ []
) do
url_path =
"/20200301/gateways/#{URI.encode(gateway_id)}/capability/#{URI.encode(capability_namespace)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Retrieves the current AWS IoT SiteWise logging options.
"""
def describe_logging_options(%Client{} = client, options \\ []) do
url_path = "/logging"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Retrieves information about a portal.
"""
def describe_portal(%Client{} = client, portal_id, options \\ []) do
url_path = "/portals/#{URI.encode(portal_id)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
200
)
end
@doc """
Retrieves information about a project.
"""
def describe_project(%Client{} = client, project_id, options \\ []) do
url_path = "/projects/#{URI.encode(project_id)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
200
)
end
@doc """
Disassociates a child asset from the given parent asset through a hierarchy
defined in the parent asset's model.
"""
def disassociate_assets(%Client{} = client, asset_id, input, options \\ []) do
url_path = "/assets/#{URI.encode(asset_id)}/disassociate"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Gets aggregated values for an asset property.
For more information, see [Querying aggregates](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/query-industrial-data.html#aggregates)
in the *AWS IoT SiteWise User Guide*.
To identify an asset property, you must specify one of the following:
* The `assetId` and `propertyId` of an asset property.
* A `propertyAlias`, which is a data stream alias (for example,
`/company/windfarm/3/turbine/7/temperature`). To define an asset property's
alias, see
[UpdateAssetProperty](https://docs.aws.amazon.com/iot-sitewise/latest/APIReference/API_UpdateAssetProperty.html).
"""
def get_asset_property_aggregates(
%Client{} = client,
aggregate_types,
asset_id \\ nil,
end_date,
max_results \\ nil,
next_token \\ nil,
property_alias \\ nil,
property_id \\ nil,
qualities \\ nil,
resolution,
start_date,
time_ordering \\ nil,
options \\ []
) do
url_path = "/properties/aggregates"
headers = []
query_params = []
query_params =
if !is_nil(time_ordering) do
[{"timeOrdering", time_ordering} | query_params]
else
query_params
end
query_params =
if !is_nil(start_date) do
[{"startDate", start_date} | query_params]
else
query_params
end
query_params =
if !is_nil(resolution) do
[{"resolution", resolution} | query_params]
else
query_params
end
query_params =
if !is_nil(qualities) do
[{"qualities", qualities} | query_params]
else
query_params
end
query_params =
if !is_nil(property_id) do
[{"propertyId", property_id} | query_params]
else
query_params
end
query_params =
if !is_nil(property_alias) do
[{"propertyAlias", property_alias} | query_params]
else
query_params
end
query_params =
if !is_nil(next_token) do
[{"nextToken", next_token} | query_params]
else
query_params
end
query_params =
if !is_nil(max_results) do
[{"maxResults", max_results} | query_params]
else
query_params
end
query_params =
if !is_nil(end_date) do
[{"endDate", end_date} | query_params]
else
query_params
end
query_params =
if !is_nil(asset_id) do
[{"assetId", asset_id} | query_params]
else
query_params
end
query_params =
if !is_nil(aggregate_types) do
[{"aggregateTypes", aggregate_types} | query_params]
else
query_params
end
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Gets an asset property's current value.
For more information, see [Querying current values](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/query-industrial-data.html#current-values)
in the *AWS IoT SiteWise User Guide*.
To identify an asset property, you must specify one of the following:
* The `assetId` and `propertyId` of an asset property.
* A `propertyAlias`, which is a data stream alias (for example,
`/company/windfarm/3/turbine/7/temperature`). To define an asset property's
alias, see
[UpdateAssetProperty](https://docs.aws.amazon.com/iot-sitewise/latest/APIReference/API_UpdateAssetProperty.html).
"""
def get_asset_property_value(
%Client{} = client,
asset_id \\ nil,
property_alias \\ nil,
property_id \\ nil,
options \\ []
) do
url_path = "/properties/latest"
headers = []
query_params = []
query_params =
if !is_nil(property_id) do
[{"propertyId", property_id} | query_params]
else
query_params
end
query_params =
if !is_nil(property_alias) do
[{"propertyAlias", property_alias} | query_params]
else
query_params
end
query_params =
if !is_nil(asset_id) do
[{"assetId", asset_id} | query_params]
else
query_params
end
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Gets the history of an asset property's values.
For more information, see [Querying historical values](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/query-industrial-data.html#historical-values)
in the *AWS IoT SiteWise User Guide*.
To identify an asset property, you must specify one of the following:
* The `assetId` and `propertyId` of an asset property.
* A `propertyAlias`, which is a data stream alias (for example,
`/company/windfarm/3/turbine/7/temperature`). To define an asset property's
alias, see
[UpdateAssetProperty](https://docs.aws.amazon.com/iot-sitewise/latest/APIReference/API_UpdateAssetProperty.html).
"""
def get_asset_property_value_history(
%Client{} = client,
asset_id \\ nil,
end_date \\ nil,
max_results \\ nil,
next_token \\ nil,
property_alias \\ nil,
property_id \\ nil,
qualities \\ nil,
start_date \\ nil,
time_ordering \\ nil,
options \\ []
) do
url_path = "/properties/history"
headers = []
query_params = []
query_params =
if !is_nil(time_ordering) do
[{"timeOrdering", time_ordering} | query_params]
else
query_params
end
query_params =
if !is_nil(start_date) do
[{"startDate", start_date} | query_params]
else
query_params
end
query_params =
if !is_nil(qualities) do
[{"qualities", qualities} | query_params]
else
query_params
end
query_params =
if !is_nil(property_id) do
[{"propertyId", property_id} | query_params]
else
query_params
end
query_params =
if !is_nil(property_alias) do
[{"propertyAlias", property_alias} | query_params]
else
query_params
end
query_params =
if !is_nil(next_token) do
[{"nextToken", next_token} | query_params]
else
query_params
end
query_params =
if !is_nil(max_results) do
[{"maxResults", max_results} | query_params]
else
query_params
end
query_params =
if !is_nil(end_date) do
[{"endDate", end_date} | query_params]
else
query_params
end
query_params =
if !is_nil(asset_id) do
[{"assetId", asset_id} | query_params]
else
query_params
end
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Retrieves a paginated list of access policies for an identity (an AWS SSO user,
an AWS SSO group, or an IAM user) or an AWS IoT SiteWise Monitor resource (a
portal or project).
"""
def list_access_policies(
%Client{} = client,
iam_arn \\ nil,
identity_id \\ nil,
identity_type \\ nil,
max_results \\ nil,
next_token \\ nil,
resource_id \\ nil,
resource_type \\ nil,
options \\ []
) do
url_path = "/access-policies"
headers = []
query_params = []
query_params =
if !is_nil(resource_type) do
[{"resourceType", resource_type} | query_params]
else
query_params
end
query_params =
if !is_nil(resource_id) do
[{"resourceId", resource_id} | query_params]
else
query_params
end
query_params =
if !is_nil(next_token) do
[{"nextToken", next_token} | query_params]
else
query_params
end
query_params =
if !is_nil(max_results) do
[{"maxResults", max_results} | query_params]
else
query_params
end
query_params =
if !is_nil(identity_type) do
[{"identityType", identity_type} | query_params]
else
query_params
end
query_params =
if !is_nil(identity_id) do
[{"identityId", identity_id} | query_params]
else
query_params
end
query_params =
if !is_nil(iam_arn) do
[{"iamArn", iam_arn} | query_params]
else
query_params
end
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
200
)
end
@doc """
Retrieves a paginated list of summaries of all asset models.
"""
def list_asset_models(%Client{} = client, max_results \\ nil, next_token \\ nil, options \\ []) do
url_path = "/asset-models"
headers = []
query_params = []
query_params =
if !is_nil(next_token) do
[{"nextToken", next_token} | query_params]
else
query_params
end
query_params =
if !is_nil(max_results) do
[{"maxResults", max_results} | query_params]
else
query_params
end
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Retrieves a paginated list of asset relationships for an asset.
You can use this operation to identify an asset's root asset and all associated
assets between that asset and its root.
"""
def list_asset_relationships(
%Client{} = client,
asset_id,
max_results \\ nil,
next_token \\ nil,
traversal_type,
options \\ []
) do
url_path = "/assets/#{URI.encode(asset_id)}/assetRelationships"
headers = []
query_params = []
query_params =
if !is_nil(traversal_type) do
[{"traversalType", traversal_type} | query_params]
else
query_params
end
query_params =
if !is_nil(next_token) do
[{"nextToken", next_token} | query_params]
else
query_params
end
query_params =
if !is_nil(max_results) do
[{"maxResults", max_results} | query_params]
else
query_params
end
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Retrieves a paginated list of asset summaries.
You can use this operation to do the following:
* List assets based on a specific asset model.
* List top-level assets.
You can't use this operation to list all assets. To retrieve summaries for all
of your assets, use
[ListAssetModels](https://docs.aws.amazon.com/iot-sitewise/latest/APIReference/API_ListAssetModels.html)
to get all of your asset model IDs. Then, use ListAssets to get all assets for
each asset model.
"""
def list_assets(
%Client{} = client,
asset_model_id \\ nil,
filter \\ nil,
max_results \\ nil,
next_token \\ nil,
options \\ []
) do
url_path = "/assets"
headers = []
query_params = []
query_params =
if !is_nil(next_token) do
[{"nextToken", next_token} | query_params]
else
query_params
end
query_params =
if !is_nil(max_results) do
[{"maxResults", max_results} | query_params]
else
query_params
end
query_params =
if !is_nil(filter) do
[{"filter", filter} | query_params]
else
query_params
end
query_params =
if !is_nil(asset_model_id) do
[{"assetModelId", asset_model_id} | query_params]
else
query_params
end
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Retrieves a paginated list of associated assets.
You can use this operation to do the following:
* List child assets associated to a parent asset by a hierarchy that
you specify.
* List an asset's parent asset.
"""
def list_associated_assets(
%Client{} = client,
asset_id,
hierarchy_id \\ nil,
max_results \\ nil,
next_token \\ nil,
traversal_direction \\ nil,
options \\ []
) do
url_path = "/assets/#{URI.encode(asset_id)}/hierarchies"
headers = []
query_params = []
query_params =
if !is_nil(traversal_direction) do
[{"traversalDirection", traversal_direction} | query_params]
else
query_params
end
query_params =
if !is_nil(next_token) do
[{"nextToken", next_token} | query_params]
else
query_params
end
query_params =
if !is_nil(max_results) do
[{"maxResults", max_results} | query_params]
else
query_params
end
query_params =
if !is_nil(hierarchy_id) do
[{"hierarchyId", hierarchy_id} | query_params]
else
query_params
end
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Retrieves a paginated list of dashboards for an AWS IoT SiteWise Monitor
project.
"""
def list_dashboards(
%Client{} = client,
max_results \\ nil,
next_token \\ nil,
project_id,
options \\ []
) do
url_path = "/dashboards"
headers = []
query_params = []
query_params =
if !is_nil(project_id) do
[{"projectId", project_id} | query_params]
else
query_params
end
query_params =
if !is_nil(next_token) do
[{"nextToken", next_token} | query_params]
else
query_params
end
query_params =
if !is_nil(max_results) do
[{"maxResults", max_results} | query_params]
else
query_params
end
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
200
)
end
@doc """
Retrieves a paginated list of gateways.
"""
def list_gateways(%Client{} = client, max_results \\ nil, next_token \\ nil, options \\ []) do
url_path = "/20200301/gateways"
headers = []
query_params = []
query_params =
if !is_nil(next_token) do
[{"nextToken", next_token} | query_params]
else
query_params
end
query_params =
if !is_nil(max_results) do
[{"maxResults", max_results} | query_params]
else
query_params
end
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Retrieves a paginated list of AWS IoT SiteWise Monitor portals.
"""
def list_portals(%Client{} = client, max_results \\ nil, next_token \\ nil, options \\ []) do
url_path = "/portals"
headers = []
query_params = []
query_params =
if !is_nil(next_token) do
[{"nextToken", next_token} | query_params]
else
query_params
end
query_params =
if !is_nil(max_results) do
[{"maxResults", max_results} | query_params]
else
query_params
end
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
200
)
end
@doc """
Retrieves a paginated list of assets associated with an AWS IoT SiteWise Monitor
project.
"""
def list_project_assets(
%Client{} = client,
project_id,
max_results \\ nil,
next_token \\ nil,
options \\ []
) do
url_path = "/projects/#{URI.encode(project_id)}/assets"
headers = []
query_params = []
query_params =
if !is_nil(next_token) do
[{"nextToken", next_token} | query_params]
else
query_params
end
query_params =
if !is_nil(max_results) do
[{"maxResults", max_results} | query_params]
else
query_params
end
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
200
)
end
@doc """
Retrieves a paginated list of projects for an AWS IoT SiteWise Monitor portal.
"""
def list_projects(
%Client{} = client,
max_results \\ nil,
next_token \\ nil,
portal_id,
options \\ []
) do
url_path = "/projects"
headers = []
query_params = []
query_params =
if !is_nil(portal_id) do
[{"portalId", portal_id} | query_params]
else
query_params
end
query_params =
if !is_nil(next_token) do
[{"nextToken", next_token} | query_params]
else
query_params
end
query_params =
if !is_nil(max_results) do
[{"maxResults", max_results} | query_params]
else
query_params
end
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
200
)
end
@doc """
Retrieves the list of tags for an AWS IoT SiteWise resource.
"""
def list_tags_for_resource(%Client{} = client, resource_arn, options \\ []) do
url_path = "/tags"
headers = []
query_params = []
query_params =
if !is_nil(resource_arn) do
[{"resourceArn", resource_arn} | query_params]
else
query_params
end
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Sets the default encryption configuration for the AWS account.
For more information, see [Key management](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/key-management.html)
in the *AWS IoT SiteWise User Guide*.
"""
def put_default_encryption_configuration(%Client{} = client, input, options \\ []) do
url_path = "/configuration/account/encryption"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Sets logging options for AWS IoT SiteWise.
"""
def put_logging_options(%Client{} = client, input, options \\ []) do
url_path = "/logging"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:put,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Adds tags to an AWS IoT SiteWise resource.
If a tag already exists for the resource, this operation updates the tag's
value.
"""
def tag_resource(%Client{} = client, input, options \\ []) do
url_path = "/tags"
headers = []
{query_params, input} =
[
{"resourceArn", "resourceArn"}
]
|> Request.build_params(input)
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Removes a tag from an AWS IoT SiteWise resource.
"""
def untag_resource(%Client{} = client, input, options \\ []) do
url_path = "/tags"
headers = []
{query_params, input} =
[
{"resourceArn", "resourceArn"},
{"tagKeys", "tagKeys"}
]
|> Request.build_params(input)
Request.request_rest(
client,
metadata(),
:delete,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Updates an existing access policy that specifies an identity's access to an AWS
IoT SiteWise Monitor portal or project resource.
"""
def update_access_policy(%Client{} = client, access_policy_id, input, options \\ []) do
url_path = "/access-policies/#{URI.encode(access_policy_id)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:put,
url_path,
query_params,
headers,
input,
options,
200
)
end
@doc """
Updates an asset's name.
For more information, see [Updating assets and models](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/update-assets-and-models.html)
in the *AWS IoT SiteWise User Guide*.
"""
def update_asset(%Client{} = client, asset_id, input, options \\ []) do
url_path = "/assets/#{URI.encode(asset_id)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:put,
url_path,
query_params,
headers,
input,
options,
202
)
end
@doc """
Updates an asset model and all of the assets that were created from the model.
Each asset created from the model inherits the updated asset model's property
and hierarchy definitions. For more information, see [Updating assets and models](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/update-assets-and-models.html)
in the *AWS IoT SiteWise User Guide*.
This operation overwrites the existing model with the provided model. To avoid
deleting your asset model's properties or hierarchies, you must include their
IDs and definitions in the updated asset model payload. For more information,
see
[DescribeAssetModel](https://docs.aws.amazon.com/iot-sitewise/latest/APIReference/API_DescribeAssetModel.html).
If you remove a property from an asset model, AWS IoT SiteWise deletes all
previous data for that property. If you remove a hierarchy definition from an
asset model, AWS IoT SiteWise disassociates every asset associated with that
hierarchy. You can't change the type or data type of an existing property.
"""
def update_asset_model(%Client{} = client, asset_model_id, input, options \\ []) do
url_path = "/asset-models/#{URI.encode(asset_model_id)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:put,
url_path,
query_params,
headers,
input,
options,
202
)
end
@doc """
Updates an asset property's alias and notification state.
This operation overwrites the property's existing alias and notification state.
To keep your existing property's alias or notification state, you must include
the existing values in the UpdateAssetProperty request. For more information,
see
[DescribeAssetProperty](https://docs.aws.amazon.com/iot-sitewise/latest/APIReference/API_DescribeAssetProperty.html).
"""
def update_asset_property(%Client{} = client, asset_id, property_id, input, options \\ []) do
url_path = "/assets/#{URI.encode(asset_id)}/properties/#{URI.encode(property_id)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:put,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Updates an AWS IoT SiteWise Monitor dashboard.
"""
def update_dashboard(%Client{} = client, dashboard_id, input, options \\ []) do
url_path = "/dashboards/#{URI.encode(dashboard_id)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:put,
url_path,
query_params,
headers,
input,
options,
200
)
end
@doc """
Updates a gateway's name.
"""
def update_gateway(%Client{} = client, gateway_id, input, options \\ []) do
url_path = "/20200301/gateways/#{URI.encode(gateway_id)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:put,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Updates a gateway capability configuration or defines a new capability
configuration.
Each gateway capability defines data sources for a gateway. A capability
configuration can contain multiple data source configurations. If you define
OPC-UA sources for a gateway in the AWS IoT SiteWise console, all of your OPC-UA
sources are stored in one capability configuration. To list all capability
configurations for a gateway, use
[DescribeGateway](https://docs.aws.amazon.com/iot-sitewise/latest/APIReference/API_DescribeGateway.html).
"""
def update_gateway_capability_configuration(
%Client{} = client,
gateway_id,
input,
options \\ []
) do
url_path = "/20200301/gateways/#{URI.encode(gateway_id)}/capability"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
201
)
end
@doc """
Updates an AWS IoT SiteWise Monitor portal.
"""
def update_portal(%Client{} = client, portal_id, input, options \\ []) do
url_path = "/portals/#{URI.encode(portal_id)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:put,
url_path,
query_params,
headers,
input,
options,
202
)
end
@doc """
Updates an AWS IoT SiteWise Monitor project.
"""
def update_project(%Client{} = client, project_id, input, options \\ []) do
url_path = "/projects/#{URI.encode(project_id)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:put,
url_path,
query_params,
headers,
input,
options,
200
)
end
end
|
lib/aws/generated/iot_site_wise.ex
| 0.87079
| 0.453322
|
iot_site_wise.ex
|
starcoder
|
defmodule ExDoc.HTMLFormatter.Templates do
@moduledoc """
Handle all template interfaces for the HTMLFormatter.
"""
require EEx
alias ExDoc.HTMLFormatter.Autolink
@doc """
Generate content from the module template for a given `node`
"""
def module_page(node) do
types = node.typespecs
functions = Enum.filter node.docs, &match?(ExDoc.FunctionNode[type: :def], &1)
macros = Enum.filter node.docs, &match?(ExDoc.FunctionNode[type: :defmacro], &1)
callbacks = Enum.filter node.docs, &match?(ExDoc.FunctionNode[type: :defcallback], &1)
module_template(node, types, functions, macros, callbacks)
end
@doc """
Generates the listing.
"""
def list_page(scope, nodes, config, has_readme) do
list_template(scope, nodes, config, has_readme)
end
# Get fields for records an exceptions, removing any field
# that starts with underscore
defp get_fields(ExDoc.ModuleNode[type: type] = node) when type in [:record, :exception] do
node.module.__record__(:fields)
|> Enum.filter(fn({f,_}) -> hd(atom_to_list(f)) != ?_ end)
|> presence
end
defp get_fields(_), do: nil
# Get the full specs from a function, already in HTML form.
defp get_specs(ExDoc.FunctionNode[specs: specs]) when is_list(specs) do
presence specs
end
defp get_specs(_node), do: nil
# Convert markdown to HTML.
defp to_html(nil), do: nil
defp to_html(bin) when is_binary(bin), do: Markdown.to_html(bin)
# Get the full signature from a function
defp signature(ExDoc.FunctionNode[name: name, signature: args]) do
Macro.to_string { name, 0, args }
end
# Get the first paragraph of the documentation of a node, if any.
defp synopsis(nil), do: nil
defp synopsis(doc) do
String.split(doc, %r/\n\s*\n/) |> hd
end
defp presence([]), do: nil
defp presence(other), do: other
defp h(binary) do
escape_map = [{ %r(&), "\\&" }, { %r(<), "\\<" }, { %r(>), "\\>" }, { %r("), "\\"" }]
Enum.reduce escape_map, binary, fn({ re, escape }, acc) -> Regex.replace(re, acc, escape) end
end
templates = [
index_template: [:config],
list_template: [:scope, :nodes, :config, :has_readme],
overview_template: [:config, :modules, :records, :protocols],
module_template: [:module, :types, :functions, :macros, :callbacks],
list_item_template: [:node],
overview_entry_template: [:node],
summary_template: [:node],
detail_template: [:node, :_module],
type_detail_template: [:node, :_module],
readme_template: [:content]
]
Enum.each templates, fn({ name, args }) ->
filename = Path.expand("templates/#{name}.eex", __DIR__)
EEx.function_from_file :def, name, filename, args
end
end
|
lib/ex_doc/html_formatter/templates.ex
| 0.681833
| 0.428771
|
templates.ex
|
starcoder
|
defmodule Leibniz do
@moduledoc """
Leibniz is a math expression parser and evaluator.
"""
@doc ~S"""
Evaluates a valid math expression interpolating any given values.
## Examples
iex> Leibniz.eval("2 * 10 / 2")
{:ok, 10.0}
iex> Leibniz.eval("2 * foo + bar - baz", foo: 5.3, bar: 10, baz: 3)
{:ok, 17.6}
iex> Leibniz.eval("2 * x + y")
{:error, "value expected for the following dependencies: x,y"}
"""
@spec eval(String.t, Keyword.t(number)) :: {:ok, number} | {:error, term}
def eval(expr, vars \\ []) do
with {:ok, ast} <- parse(expr),
:ok <- verify_dependencies(dependecies(ast), Keyword.keys(vars)) do
{:ok, match(ast, vars)}
else
{:error, e} -> {:error, e}
end
end
def parse(expr) do
with {:ok, tokens, _} <- expr |> to_charlist() |> :lexer.string(),
{:ok, ast} <- :parser.parse(tokens) do
{:ok, ast}
else
{:error, e, _} -> {:error, e}
{:error, e} -> {:error, e}
end
end
defp match(token, vars) when is_number(token), do: token
defp match(token, vars) when is_atom(token) do
Keyword.get(vars, token)
end
defp match({:+, lhs, rhs}, vars), do: match(lhs, vars) + match(rhs, vars)
defp match({:-, lhs, rhs}, vars), do: match(lhs, vars) - match(rhs, vars)
defp match({:*, lhs, rhs}, vars), do: match(lhs, vars) * match(rhs, vars)
defp match({:/, lhs, rhs}, vars), do: match(lhs, vars) / match(rhs, vars)
defp dependecies({_, lhs, rhs}) do
do_dependecies(lhs, []) ++ do_dependecies(rhs, [])
end
defp dependecies(_), do: []
defp do_dependecies(node, acc) when is_atom(node), do: [node] ++ acc
defp do_dependecies({_, lhs, rhs}, acc) do
do_dependecies(lhs, acc) ++ do_dependecies(rhs, acc)
end
defp do_dependecies(_, acc), do: acc
defp verify_dependencies(required, actual) do
case required -- actual do
[] -> :ok
missing -> {:error, "value expected for the following dependencies: #{Enum.join(missing, ",")}"}
end
end
end
|
lib/leibniz.ex
| 0.83957
| 0.552359
|
leibniz.ex
|
starcoder
|
defmodule Xlack do
@moduledoc """
Xlack is a genserver-ish interface for working with the Xlack real time
messaging API through a Websocket connection.
To use this module you'll need a need a Xlack API token which can be retrieved
by following the [Token Generation Instructions] or by creating a new [bot
integration].
[Token Generation Instructions]: https://hexdocs.pm/slack/token_generation_instructions.html
[bot integration]: https://api.slack.com/bot-users
## Example
```
defmodule Bot do
use Xlack
def handle_event(message = %{type: "message"}, slack, state) do
if message.text == "Hi" do
send_message("Hello to you too!", message.channel, slack)
end
{:ok, state}
end
def handle_event(_, _, state), do: {:ok, state}
end
Xlack.Bot.start_link(Bot, [], "API_TOKEN")
```
`handle_*` methods are always passed `slack` and `state` arguments. The
`slack` argument holds the state of Xlack and is kept up to date
automatically.
In this example we're just matching against the message type and checking if
the text content is "Hi" and if so, we reply with our own greeting.
The message type is pattern matched against because the
[Xlack RTM API](https://api.slack.com/rtm) defines many different types of
messages that we can receive. Because of this it's wise to write a catch-all
`handle_event/3` in your bots to prevent crashing.
## Callbacks
* `handle_connect(slack, state)` - called when connected to Xlack.
* `handle_event(message, slack, state)` - called when a message is received.
* `handle_close(reason, slack, state)` - called when websocket is closed before process is terminated.
* `handle_info(message, slack, state)` - called when any other message is received in the process mailbox.
## Xlack argument
The Xlack argument that's passed to each callback is what contains all of the
state related to Xlack including a list of channels, users, groups, bots, and
even the socket.
Here's a list of what's stored:
* me - The current bot/users information stored as a map of properties.
* team - The current team's information stored as a map of properties.
* bots - Stored as a map with id's as keys.
* channels - Stored as a map with id's as keys.
* groups - Stored as a map with id's as keys.
* users - Stored as a map with id's as keys.
* ims (direct message channels) - Stored as a map with id's as keys.
* socket - The connection to Xlack.
* client - The client that makes calls to Xlack.
For all but `socket` and `client`, you can see what types of data to expect each of the
types to contain from the [Xlack API types] page.
[Xlack API types]: https://api.slack.com/types
"""
alias Xlack.Web.{Channel, Message, User}
def conversations(opts \\ %{}),
do: Xlack.Web.Conversations.list(opts)
def send_message(channel_or_user, text, opts \\ %{})
def send_message(%Channel{id: channel}, text, opts),
do: send_message(channel, text, opts)
def send_message("C" <> _cid = channel, text, opts),
do: Xlack.Web.Chat.post_message(channel, text, opts)
def send_message(%User{id: user}, text, opts) do
send_message(user, text, opts)
end
def send_message("U" <> _uid = user, text, opts) do
with {:ok, channel} <- Xlack.Web.Im.open(user, opts),
do: send_message(channel, text, opts)
end
def send_message(%Message{channel: channel, text: text}) when not is_nil(channel),
do: send_message(channel, text)
def send_message(%Message{user: user, text: text}),
do: send_message(user, text)
def delete(%Message{channel: channel, ts: ts}, opts \\ %{}),
do: Xlack.Web.Chat.delete(channel, ts, opts)
def info(channel_or_user, opts \\ %{})
def info(%Channel{id: channel}, opts),
do: info(channel, opts)
def info("C" <> _cid = channel, opts),
do: Xlack.Web.Channels.info(channel, opts)
def info(%User{id: user}, opts),
do: info(user, opts)
def info("U" <> _uid = user, opts),
do: Xlack.Web.Users.info(user, opts)
def history(channel, opts \\ %{})
def history(%Channel{id: channel}, opts),
do: history(channel, opts)
def history("C" <> _cid = channel, opts),
do: Xlack.Web.Channels.history(channel, opts)
defmacro __using__(_) do
quote do
import Xlack.Lookups
import Xlack.Sends
def handle_connect(_slack, state), do: {:ok, state}
def handle_event(_message, _slack, state), do: {:ok, state}
def handle_close(_reason, _slack, state), do: :close
def handle_info(_message, _slack, state), do: {:ok, state}
def child_spec(_opts) do
%{
id: __MODULE__,
start: {__MODULE__, :start_link, []},
type: :worker,
restart: :permanent,
shutdown: 500
}
end
defoverridable handle_connect: 2,
handle_event: 3,
handle_close: 3,
handle_info: 3,
child_spec: 1
end
end
end
|
lib/xlack.ex
| 0.863002
| 0.796411
|
xlack.ex
|
starcoder
|
defmodule Callback do
@typedoc """
A module-function-arity tuple with an explicit arity.
"""
@type mfa(arity) :: { module, atom, arity }
@typedoc """
A module-function-parameter tuple.
This either contains a list of parameters that will be passed to the function,
and any input parameters will be added following those parameters. Or an input
index or list of indexes will be provided (in which case those inputs will be
inserted into the parameter list at those positions). Or `nil` inputs, if no
inputs should be included, this however is only applicable to calls where the
input requirements `:optional`.
{ Foo, :bar, [:a, :b] }
\# If the callback is not passing any inputs then this function will
\# be called as:
Foo.bar(:a, :b)
\# If the callback is passing in 1 input ("hello"), then this function
\# will be called as:
Foo.bar(:a, :b, "hello")
\# If the callback is passing in 2 inputs ("hello", "world"), then this
\# function will be called as:
Foo.bar(:a, :b, "hello", "world")
{ Foo, :bar, [:a, :b], 0 }
\# If the callback is not passing any inputs then this function will
\# be called as:
Foo.bar(:a, :b)
\# If the callback is passing in 1 input ("hello"), then this function
\# will be called as:
Foo.bar("hello", :a, :b)
\# If the callback is passing in 2 inputs ("hello", "world"), then this
\# function will be called as:
Foo.bar("hello", "world", :a, :b)
{ Foo, :bar, [:a, :b], [0, 2] }
\# If the callback is not passing any inputs then this function will
\# be called as:
Foo.bar(:a, :b)
\# If the callback is passing in 1 input ("hello"), then this function
\# will be called as:
Foo.bar("hello", :a, :b)
\# If the callback is passing in 2 inputs ("hello", "world"), then this
\# function will be called as:
Foo.bar("hello", :a, "world", :b)
{ Foo, :bar, [:a, :b], nil }
\# If the callback is not passing any inputs then this function will
\# be called as:
Foo.bar(:a, :b)
\# If the callback is passing in 1 input ("hello"), then this function
\# will be called as:
Foo.bar(:a, :b)
\# If the callback is passing in 2 inputs ("hello", "world"), then this
\# function will be called as:
Foo.bar(:a, :b)
"""
@type mfp :: { module, atom, list } | { module, atom, list, nil | non_neg_integer | [non_neg_integer] }
@typedoc """
A generic callback form with any amount of arguments.
"""
@type callback :: fun | mfa | mfp
@typedoc """
An explicit callback form expecting 1 argument of the provided type, and
returning a result of the provided type.
"""
@type callback(arg1, result) :: (arg1 -> result) | mfa(1) | mfp
@typedoc """
An explicit callback form expecting 2 arguments of the provided type, and
returning a result of the provided type.
"""
@type callback(arg1, arg2, result) :: (arg1, arg2 -> result) | mfa(2) | mfp
@doc """
Returns `true` if `term` is a `callback`; otherwise returns `false`.
Allowed in guard tests.
"""
defguard is_callback(callback)
when is_tuple(callback)
and ((tuple_size(callback) == 3 and is_atom(elem(callback, 0)) and is_atom(elem(callback, 1)) and (is_integer(elem(callback, 2)) or is_list(elem(callback, 2))))
or (tuple_size(callback) == 4 and is_atom(elem(callback, 0)) and is_atom(elem(callback, 1)) and is_list(elem(callback, 2)) and (is_integer(elem(callback, 3)) or is_list(elem(callback, 3)) or is_nil(elem(callback, 3)))))
or is_function(callback)
@doc """
Call the provided callback with the given inputs.
The inputs to the callback are required by default but can otherwise be
made optional by passing `:optional` as the `input_requirement`.
"""
@spec call(callback, list, :required | :optional) :: any
def call(fun, inputs \\ [], input_requirement \\ :required)
def call({ module, fun, args, nil }, _, :optional) do
apply(module, fun, args)
end
def call({ module, fun, args, index }, inputs, _) when is_integer(index) do
{ left, right } = Enum.split(args, index)
apply(module, fun, left ++ inputs ++ right)
end
def call({ module, fun, args, indexes }, inputs, _) do
args = Enum.zip(indexes, inputs) |> Enum.sort |> insert_args(args)
apply(module, fun, args)
end
def call({ module, fun, arity }, inputs, _) when is_integer(arity) and length(inputs) == arity do
apply(module, fun, inputs)
end
def call({ module, fun, args }, inputs, _) when is_list(args) do
apply(module, fun, args ++ inputs)
end
def call(fun, inputs, _) when is_function(fun, length(inputs)) do
apply(fun, inputs)
end
defp insert_args(indexed, list, n \\ 0, result \\ [])
defp insert_args([], [], _, args), do: Enum.reverse(args)
defp insert_args([{ _, e }|indexed], [], n, args), do: insert_args(indexed, [], n + 1, [e|args])
defp insert_args([{ n, e }|indexed], list, n, args), do: insert_args(indexed, list, n + 1, [e|args])
defp insert_args(indexed, [e|list], n, args), do: insert_args(indexed, list, n + 1, [e|args])
end
|
lib/callback.ex
| 0.859914
| 0.667027
|
callback.ex
|
starcoder
|
defmodule FFprobe do
@moduledoc """
Execute ffprobe CLI commands.
> `ffprobe` is a simple multimedia streams analyzer. You can use it to output
all kinds of information about an input including duration, frame rate, frame size, etc.
It is also useful for gathering specific information about an input to be used in a script.
(from https://trac.ffmpeg.org/wiki/FFprobeTips)
"""
@type format_map :: Map.t()
@type streams_list :: [Map.t()]
@doc """
Get the duration in seconds, as a float.
If no duration (e.g., a still image), returns `:no_duration`.
If the file does not exist, returns { :error, :no_such_file }
"""
@spec duration(binary | format_map) :: float | :no_duration | { :error, :no_such_file }
def duration(file_path) when is_binary(file_path) do
case format(file_path) do
format = %{} ->
duration(format)
{:error, :invalid_file} ->
duration(%{"duration" => nil})
error ->
error
end
end
def duration(format_map) when is_map(format_map) do
case format_map["duration"] do
nil -> :no_duration
result -> Float.parse(result) |> elem(0)
end
end
@doc """
Get a list of formats for the file.
If the file does not exist, returns { :error, :no_such_file }.
If the file is a non media file, returns { :error, :invalid_file }.
"""
@spec format_names(binary | format_map) ::
[binary] | {:error, :invalid_file} | {:error, :no_such_file}
def format_names(file_path) when is_binary(file_path) do
case format(file_path) do
format = %{} ->
format_names(format)
error ->
error
end
end
def format_names(format_map) when is_map(format_map) do
String.split(format_map["format_name"], ",")
end
@doc """
Get the "format" map, containing general info for the specified file,
such as number of streams, duration, file size, and more.
If the file does not exist, returns { :error, :no_such_file }.
If the file is a non media file, returns { :error, :invalid_file }.
"""
@spec format(binary) :: format_map | {:error, :invalid_file} | {:error, :no_such_file}
def format(file_path) do
if File.exists?(file_path) do
cmd_args = ["-v", "quiet", "-print_format", "json", "-show_format", file_path]
case System.cmd(ffprobe_path(), cmd_args, stderr_to_stdout: true) do
{result, 0} ->
result
|> Jason.decode!()
|> Map.get("format", %{})
{_result, 1} ->
{:error, :invalid_file}
end
else
{:error, :no_such_file}
end
end
@doc """
Get a list a of streams from the file.
If the file does not exist, returns { :error, :no_such_file }.
If the file is a non media file, returns { :error, :invalid_file }.
"""
@spec streams(binary) :: streams_list | {:error, :invalid_file} | {:error, :no_such_file}
def streams(file_path) do
if File.exists?(file_path) do
cmd_args = ["-v", "quiet", "-print_format", "json", "-show_streams", file_path]
case System.cmd(ffprobe_path(), cmd_args, stderr_to_stdout: true) do
{result, 0} ->
result
|> Jason.decode!()
|> Map.get("streams", [])
{_result, 1} ->
{:error, :invalid_file}
end
else
{:error, :no_such_file}
end
end
# Read ffprobe path from config. If unspecified, check if `ffprobe` is in env $PATH.
# If it is not, then raise a error.
defp ffprobe_path do
case Application.get_env(:ffmpex, :ffprobe_path, nil) do
nil ->
case System.find_executable("ffprobe") do
nil ->
raise "FFmpeg not installed"
path ->
path
end
path ->
path
end
end
end
|
lib/ffprobe.ex
| 0.787237
| 0.474388
|
ffprobe.ex
|
starcoder
|
defmodule Assent.Strategy.OAuth do
@moduledoc """
OAuth 1.0a strategy.
`authorize_url/1` returns a map with a `:session_params` and `:url` key. The
`:session_params` key carries a `:oauth_token_secret` value for the request.
## Configuration
- `:consumer_key` - The OAuth consumer key, required
- `:site` - The domain of the OAuth server, required
- `:signature_method` - The signature method, optional, defaults to
`:hmac_sha1`. The value may be one of the following:
- `:hmac_sha1` - Generates signature with HMAC-SHA1
- `:rsa_sha1` - Generates signature with RSA-SHA1
- `:plaintext` - Doesn't generate signature
- `:consumer_secret` - The OAuth consumer secret, required if
`:signature_method` is either `:hmac_sha1` or `:plaintext`
- `:private_key_path` - The path for the private key, required if
`:signature_method` is `:rsa_sha1` and `:private_key` hasn't been set
- `:private_key` - The private key content that can be defined instead of
`:private_key_path`, required if `:signature_method` is `:rsa_sha1` and
`:private_key_path` hasn't been set
## Usage
config = [
consumer_key: "REPLACE_WITH_CONSUMER_KEY",
consumer_secret: "REPLACE_WITH_CONSUMER_SECRET",
site: "https://auth.example.com",
authorization_params: [scope: "user:read user:write"],
user_url: "https://example.com/api/user"
]
{:ok, {url: url, session_params: session_params}} =
config
|> Assent.Config.put(:redirect_uri, "http://localhost:4000/auth/callback")
|> OAuth.authorize_url()
{:ok, %{user: user, token: token}} =
config
|> Assent.Config.put(:session_params, session_params)
|> OAuth.callback(params)
"""
@behaviour Assent.Strategy
alias Assent.Strategy, as: Helpers
alias Assent.{Config, HTTPAdapter.HTTPResponse, JWTAdapter, MissingParamError, RequestError}
@doc """
Generate authorization URL for request phase.
## Configuration
- `:redirect_uri` - The URI that the server redirects the user to after
authentication, required
- `:request_token_url` - The path or URL to fetch the token from, optional,
defaults to `/oauth/request_token`
- `:authorize_url` - The path or URL for the OAuth server to redirect users
to, defaults to `/oauth/authenticate`
- `:authorization_params` - The authorization parameters, defaults to `[]`
"""
@impl true
@spec authorize_url(Config.t()) :: {:ok, %{url: binary(), session_params: %{oauth_token_secret: binary()}}} | {:error, term()}
def authorize_url(config) do
case Config.fetch(config, :redirect_uri) do
{:ok, redirect_uri} -> authorize_url(config, redirect_uri)
{:error, error} -> {:error, error}
end
end
defp authorize_url(config, redirect_uri) do
config
|> get_request_token([{"oauth_callback", redirect_uri}])
|> build_authorize_url(config)
|> case do
{:ok, url, oauth_token_secret} -> {:ok, %{url: url, session_params: %{oauth_token_secret: oauth_token_secret}}}
{:error, error} -> {:error, error}
end
end
defp get_request_token(config, oauth_params) do
with {:ok, site} <- Config.fetch(config, :site) do
request_token_url = Config.get(config, :request_token_url, "/request_token")
url = process_url(site, request_token_url)
config
|> do_request(:post, site, url, [], oauth_params)
|> Helpers.decode_response(config)
|> process_token_response()
end
end
defp process_url(site, url) do
case String.downcase(url) do
<<"http://"::utf8, _::binary>> -> url
<<"https://"::utf8, _::binary>> -> url
_ -> site <> url
end
end
defp do_request(config, method, site, url, params, oauth_params, headers \\ [], token_secret \\ nil) do
params =
params
|> Enum.to_list()
|> Enum.map(fn {key, value} -> {to_string(key), value} end)
signature_method = Config.get(config, :signature_method, :hmac_sha1)
with {:ok, oauth_params} <- gen_oauth_params(config, signature_method, oauth_params),
{:ok, signed_header} <- signed_header(config, signature_method, method, url, oauth_params, params, token_secret) do
req_headers = request_headers(method, [signed_header] ++ headers)
req_body = request_body(method, params)
query_params = url_params(method, params)
url = Helpers.to_url(site, url, query_params)
Helpers.request(method, url, req_body, req_headers, config)
end
end
defp gen_oauth_params(config, signature_method, oauth_params) do
with {:ok, consumer_key} <- Config.fetch(config, :consumer_key) do
params =
[
{"oauth_consumer_key", consumer_key},
{"oauth_nonce", gen_nonce()},
{"oauth_signature_method", to_signature_method_string(signature_method)},
{"oauth_timestamp", timestamp()},
{"oauth_version", "1.0"}
| oauth_params
]
{:ok, params}
end
end
defp signed_header(config, signature_method, method, url, oauth_params, params, token_secret) do
uri = URI.parse(url)
query_params = Map.to_list(URI.decode_query(uri.query || ""))
request_params = params ++ query_params ++ oauth_params
with {:ok, signature} <- gen_signature(config, method, uri, request_params, signature_method, token_secret) do
oauth_header_value =
[{"oauth_signature", signature} | oauth_params]
|> Enum.map(fn {key, value} ->
percent_encode(key) <> "=\"" <> percent_encode(value) <> "\""
end)
|> Enum.join(", ")
{:ok, {"Authorization", "OAuth " <> oauth_header_value}}
end
end
defp gen_nonce do
16
|> :crypto.strong_rand_bytes()
|> Base.encode64(padding: false)
end
defp to_signature_method_string(:hmac_sha1), do: "HMAC-SHA1"
defp to_signature_method_string(:rsa_sha1), do: "RSA-SHA1"
defp to_signature_method_string(:plaintext), do: "PLAINTEXT"
defp timestamp, do: to_string(:os.system_time(:second))
defp gen_signature(config, method, uri, request_params, :hmac_sha1, token_secret) do
with {:ok, shared_secret} <- encoded_shared_secret(config, token_secret) do
text = signature_base_string(method, uri, request_params)
signature =
:hmac
|> :crypto.mac(:sha, shared_secret, text)
|> Base.encode64()
{:ok, signature}
end
end
defp gen_signature(config, method, uri, request_params, :rsa_sha1, _token_secret) do
with {:ok, pem} <- JWTAdapter.load_private_key(config),
{:ok, private_key} <- decode_pem(pem) do
signature =
method
|> signature_base_string(uri, request_params)
|> :public_key.sign(:sha, private_key)
|> Base.encode64()
{:ok, signature}
end
end
defp gen_signature(config, _method, _url, _request_params, :plaintext, token_secret),
do: encoded_shared_secret(config, token_secret)
defp encoded_shared_secret(config, token_secret) do
with {:ok, consumer_secret} <- Config.fetch(config, :consumer_secret) do
shared_secret =
[consumer_secret, token_secret || ""]
|> Enum.map(&percent_encode/1)
|> Enum.join("&")
{:ok, shared_secret}
end
end
defp percent_encode(value) do
value
|> to_string()
|> URI.encode(&URI.char_unreserved?/1)
end
defp signature_base_string(method, uri, request_params) do
method =
method
|> to_string()
|> String.upcase()
base_string_uri =
%{uri | query: nil, host: uri.host}
|> URI.to_string()
|> String.downcase()
normalized_request_params =
request_params
|> Enum.map(fn {key, value} ->
percent_encode(key) <> "=" <> percent_encode(value)
end)
|> Enum.sort()
|> Enum.join("&")
[method, base_string_uri, normalized_request_params]
|> Enum.map(&percent_encode/1)
|> Enum.join("&")
end
defp decode_pem(pem) do
case :public_key.pem_decode(pem) do
[entry] -> {:ok, :public_key.pem_entry_decode(entry)}
_any -> {:error, "Private key should only have one entry"}
end
end
defp request_headers(:post, headers), do: [{"content-type", "application/x-www-form-urlencoded"}] ++ headers
defp request_headers(_method, headers), do: headers
defp request_body(:post, req_params), do: URI.encode_query(req_params)
defp request_body(_method, _req_params), do: nil
defp url_params(:post, _params), do: []
defp url_params(_method, params), do: params
defp process_token_response({:ok, %HTTPResponse{status: 200, body: body} = response}) when is_binary(body), do: process_token_response({:ok, %{response | body: URI.decode_query(body)}})
defp process_token_response({:ok, %HTTPResponse{status: 200, body: %{"oauth_token" => _, "oauth_token_secret" => _} = token}}), do: {:ok, token}
defp process_token_response(any), do: process_response(any)
defp process_response({:ok, %HTTPResponse{} = response}), do: {:error, RequestError.unexpected(response)}
defp process_response({:error, %HTTPResponse{} = response}), do: {:error, RequestError.invalid(response)}
defp process_response({:error, error}), do: {:error, error}
defp build_authorize_url({:ok, token}, config) do
with {:ok, site} <- Config.fetch(config, :site),
{:ok, oauth_token} <- fetch_from_token(token, "oauth_token"),
{:ok, oauth_token_secret} <- fetch_from_token(token, "oauth_token_secret") do
authorization_url = Config.get(config, :authorize_url, "/authorize")
params = authorization_params(config, oauth_token: oauth_token)
url = Helpers.to_url(site, authorization_url, params)
{:ok, url, oauth_token_secret}
end
end
defp build_authorize_url({:error, error}, _config), do: {:error, error}
defp fetch_from_token(token, key) do
case Map.fetch(token, key) do
{:ok, value} -> {:ok, value}
:error -> {:error, "No `#{key}` in token map"}
end
end
defp authorization_params(config, params) do
config
|> Config.get(:authorization_params, [])
|> Config.merge(params)
|> List.keysort(0)
end
@doc """
Callback phase for generating access token and fetch user data.
## Configuration
- `:access_token_url` - The path or URL to fetch the access token from,
optional, defaults to `/oauth/access_token`
- `:user_url` - The path or URL to fetch user data, required
- `:session_params` - The session parameters that was returned from
`authorize_url/1`, optional
"""
@impl true
@spec callback(Config.t(), map(), atom()) :: {:ok, %{user: map(), token: map()}} | {:error, term()}
def callback(config, params, strategy \\ __MODULE__) do
with {:ok, oauth_token} <- fetch_oauth_token(params),
{:ok, oauth_verifier} <- fetch_oauth_verifier(params),
{:ok, token} <- get_access_token(config, oauth_token, oauth_verifier),
{:ok, user} <- strategy.fetch_user(config, token) do
{:ok, %{user: user, token: token}}
end
end
defp fetch_oauth_token(%{"oauth_token" => code}), do: {:ok, code}
defp fetch_oauth_token(params), do: {:error, MissingParamError.new("oauth_token", params)}
defp fetch_oauth_verifier(%{"oauth_verifier" => code}), do: {:ok, code}
defp fetch_oauth_verifier(params), do: {:error, MissingParamError.new("oauth_verifier", params)}
defp get_access_token(config, oauth_token, oauth_verifier) do
with {:ok, site} <- Config.fetch(config, :site) do
access_token_url = Config.get(config, :access_token_url, "/access_token")
url = process_url(site, access_token_url)
oauth_token_secret = Kernel.get_in(config, [:session_params, :oauth_token_secret])
config
|> do_request(:post, site, url, [], [{"oauth_token", oauth_token}, {"oauth_verifier", oauth_verifier}], [], oauth_token_secret)
|> Helpers.decode_response(config)
|> process_token_response()
end
end
@doc """
Performs a signed HTTP request to the API using the oauth token.
"""
@spec request(Config.t(), map(), atom(), binary(), map() | Keyword.t(), [{binary(), binary()}]) :: {:ok, map()} | {:error, term()}
def request(config, token, method, url, params \\ [], headers \\ []) do
with {:ok, site} <- Config.fetch(config, :site),
{:ok, oauth_token} <- fetch_from_token(token, "oauth_token"),
{:ok, oauth_token_secret} <- fetch_from_token(token, "oauth_token_secret") do
url = process_url(site, url)
config
|> do_request(method, site, url, params, [{"oauth_token", oauth_token}], headers, oauth_token_secret)
|> Helpers.decode_response(config)
end
end
@doc false
@spec fetch_user(Config.t(), map()) :: {:ok, map()} | {:error, term()}
def fetch_user(config, token) do
with {:ok, url} <- Config.fetch(config, :user_url) do
config
|> request(token, :get, url)
|> process_user_response()
end
end
defp process_user_response({:ok, %HTTPResponse{status: 200, body: user}}), do: {:ok, user}
defp process_user_response({:error, %HTTPResponse{status: 401}}), do: {:error, %RequestError{message: "Unauthorized token"}}
defp process_user_response(any), do: process_response(any)
end
|
lib/assent/strategies/oauth.ex
| 0.902445
| 0.512205
|
oauth.ex
|
starcoder
|
defmodule Storage do
@moduledoc """
The main module, which contains all the core functions for basic handling of files.
This module has functions only for direct handling of the files, `Storage.Object` can
be used to make it easier storing files of specific types.
## Configuration
Here's an example configuration:
config :storage,
adapter: Storage.Adapters.Local
config :storage, Storage.Adapters.Local
root: "priv/files",
host: [
url: "http://localhost:4000",
from: "/static"
]
* `adapter:` key defines which adapter should be used when storing files. Storage ships only with `Storage.Adapters.Local`, if you want to use S3 or other adapter, you need to download approporiate package
After configuring which adapter will be used, we have to configure the adapter itself. You can look at `Storage.Adapters.Local` documentation for details about the options.
"""
defp adapter, do: Application.fetch_env!(:storage, :adapter)
@doc """
Stores the file from `path` with given options.
The file will be stored in a path defined by `:root` environment variable (defaults to `/`) and `Storage.File` struct will be returned.
## Options
* `:adapter` - overrides the adapter used in configuration
* `:filename` - new file name (if the filename option doesn't include
extension, `path` will be used to generate one)
* `:scope` - directory (or list of directories) where to store the file
## Example
iex> Storage.put("./some_image.jpg", scope: ["users", 1], filename: "some_name")
%Storage.File{
content_type: "image/jpeg",
extension: "jpg",
filename: "some_name.jpg",
metadata: %{
ctime: {{2018, 4, 3}, {6, 47, 14}},
mtime: {{2018, 4, 3}, {6, 47, 14}},
size: 14041
},
path: "priv/files/users/1/some_name.jpg"
}
"""
@type source() :: String.t
@spec put(source, keyword) :: Storage.File.t
def put(path, opts \\ []) do
adapter = Keyword.get(opts, :adapter, adapter())
Storage.File.new(path, Keyword.put(opts, :adapter, adapter))
|> adapter.put(path)
end
@doc """
Generates URL for file in given `path`.
Adapter from configuration can be overriden by passing an `:adapter` option. Some adapters will need `:host` environment variable to generate correct URL.
"""
@spec url(String.t, keyword) :: String.t
def url(path, opts \\ []) do
adapter = Keyword.get(opts, :adapter, adapter())
adapter.url(path)
end
@doc """
Deletes the file in given `path`.
Adapter from configuration can be overriden by using an `:adapter` option.
"""
@spec delete(String.t, keyword) :: String.t
def delete(path, opts \\ []) do
adapter = Keyword.get(opts, :adapter, adapter())
adapter.delete(path)
end
end
|
lib/storage.ex
| 0.885235
| 0.703129
|
storage.ex
|
starcoder
|
defmodule Olagarro.PDF.Document do
@moduledoc """
Defines a structure representing a PDF document.
"""
defstruct [
:version,
:eol_marker
]
@doc """
Read a PDF document from an I/O stream.
## Options
These are the options:
* `:eol_marker` - The character or character sequence used in the file
as an end-of-line marker. Can be `:lf`, `:cr`, or `:crlf`. The code
can usually figure out for itself what the separator is, but if it
gets it wrong you can set it explicitly here.
## Examples
iex> File.stream!(\"../spec/data/document.pdf\") |> Olagarro.PDF.Document.decode
%Olagarro.PDF.Document{eol_marker: :lf}
"""
def decode(stream, options \\ []) do
# IO.puts "in decode/2"
options = options |> with_defaults
document = %Olagarro.PDF.Document{eol_marker: (options |> Keyword.get(:eol_marker, nil))}
# A PDF consists of four parts:
# 1. header
# 2. body (a sequence of objects, containing at least 3 indirect objects)
# 3. xref
# 4. trailer (startxref and %%EOF)
{:ok, document, stream}
|> decode_header()
|> decode_body()
|> decode_xref()
|> decode_trailer()
end
defp with_defaults(options) do
options
|> Keyword.merge(
eol_marker: options |> Keyword.get(:eol_marker, nil)
)
end
defp decode_header({:ok, document, stream}), do: decode_header({:ok, document, "", stream})
defp decode_header({:ok, document, "%PDF-" <> <<major::size(8)>> <> "." <> <<minor::size(8), remaining::binary>>, stream}) do
decode_eol_marker({:ok, %{document | version: "#{<<major::utf8>>}.#{<<minor::utf8>>}"}, remaining, stream})
end
defp decode_header({:ok, document, buffer, stream}) do
# IO.puts "in decode_header/1; loading buffer from stream"
if byte_size(buffer) < 8 do # "%PDF-1.x"
case IO.binread(stream, 1024) do
:eof ->
{:ok, document, buffer, stream}
{:error, _} = error_reason ->
error_reason
data ->
decode_header({:ok, document, buffer <> data, stream})
end
else
{:error, :header_not_found}
end
end
defp decode_eol_marker({:ok, document, stream}), do: decode_eol_marker({:ok, document, "", stream})
defp decode_eol_marker({:ok, document, <<10, remaining::binary>>, stream}), do: {:ok, %{document | eol_marker: :lf}, remaining, stream}
defp decode_eol_marker({:ok, document, <<13, 10, remaining::binary>>, stream}), do: {:ok, %{document | eol_marker: :crlf}, remaining, stream}
defp decode_eol_marker({:ok, document, <<13, remaining::binary>>, stream}), do: {:ok, %{document | eol_marker: :cr}, remaining, stream}
defp decode_eol_marker({:ok, document, buffer, stream}) do
# IO.puts "in decode_eol_marker/1; loading buffer from stream"
if byte_size(buffer) < 2 do # <CR><LF>, in the worst case
case IO.binread(stream, 1024) do
:eof ->
{:ok, document, buffer, stream}
{:error, _} = error_reason ->
error_reason
data ->
decode_eol_marker({:ok, document, buffer <> data, stream})
end
else
{:error, :eol_marker_not_found}
end
end
defp decode_body({:ok, document, stream}), do: decode_body({:ok, document, "", stream})
defp decode_body({:ok, document, "", stream}) do
# IO.puts "in decode_body/1; loading buffer from stream"
case IO.binread(stream, 1024) do
:eof ->
{:ok, document, "", stream}
{:error, _} = error_reason ->
error_reason
data ->
decode_body({:ok, document, data, stream})
end
end
defp decode_body({:ok, document, buffer, stream}) do
# IO.puts "in decode_header/1; switching on comment/xref/other"
case buffer do
"%" <> remaining ->
{:ok, document, remaining, stream}
|> strip_comment
|> decode_body
"xref" <> _ ->
{:ok, document, buffer, stream}
_ ->
{:ok, document, buffer, stream}
|> decode_object
|> decode_body
end
end
defp decode_xref({:ok, document, stream}), do: decode_xref({:ok, document, "", stream})
defp decode_xref({:ok, document, "", stream}) do
# IO.puts "in decode_xref/1; loading buffer from stream"
case IO.binread(stream, 1024) do
:eof ->
{:ok, document, "", stream}
{:error, _} = error_reason ->
error_reason
data ->
decode_xref({:ok, document, data, stream})
end
end
defp decode_xref({:ok, _document, _buffer, _stream} = state), do: state
defp decode_trailer({:ok, _document, _remaining, _stream} = state), do: state
defp strip_comment({:ok, document, <<10, remaining::binary>>, stream}) do
if document.eol_marker == :lf do
{:ok, document, remaining, stream}
else
strip_comment({:ok, document, remaining, stream})
end
end
defp strip_comment({:ok, document, <<13, 10, remaining::binary>>, stream}) do
if document.eol_marker == :crlf do
{:ok, document, remaining, stream}
else
strip_comment({:ok, document, <<10>> <> remaining, stream})
end
end
defp strip_comment({:ok, document, <<13, remaining::binary>>, stream}) do
if document.eol_marker == :cr do
{:ok, document, remaining, stream}
else
strip_comment({:ok, document, remaining, stream})
end
end
defp strip_comment({:ok, document, "", stream}) do
case IO.binread(stream, 1024) do
:eof ->
{:ok, document, "", stream}
{:error, _} = error_reason ->
error_reason
data ->
strip_comment({:ok, document, data, stream})
end
end
defp strip_comment({:ok, document, <<_::bytes-size(1), remaining::binary>>, stream}), do: strip_comment({:ok, document, remaining, stream})
defp decode_object({:ok, _document, _buffer, _stream}) do
# 1 0 obj
# << /Type /Catalog
# /Pages 2 0 R
# >>
# endobj
end
@doc """
Write a PDF document to an I/O stream.
## Options
These are the options:
* `:eol_marker` - The character or character sequence used in the file
as an end-of-line marker. Can be `:lf`, `:cr`, or `:crlf`; defaults
to `:lf`.
## Examples
iex> %Olagarro.PDF.Document{} |> Olagarro.PDF.encode
[]
"""
def encode(%Olagarro.PDF.Document{} = _document, _options \\ []) do
[]
end
end
|
lib/olagarro/pdf/document.ex
| 0.821796
| 0.405743
|
document.ex
|
starcoder
|
defmodule Codenamex.Game.Board do
@moduledoc """
This module manages the board logic.
All the functions besides setup/0 expect a board state.
A state is a variation of what was created by the setup/0 function.
"""
alias Codenamex.Game.Card
alias Codenamex.Game.Dictionary
alias Codenamex.Game.Team
@cards_count 25
@first_team_cards_count 9
@second_team_cards_count 8
@black_cards_count 1
@yellow_cards_count 7
defstruct [
words: nil,
regular_cards: nil,
spymaster_cards: nil,
first_team: nil,
red_cards: nil,
blue_cards: nil,
black_cards: @black_cards_count,
yellow_cards: @yellow_cards_count
]
def setup() do
[first_team, second_team] = Team.pick_order()
words = Dictionary.fetch(@cards_count)
{regular_cards, spymaster_cards} = setup_board(words, first_team, second_team)
%__MODULE__{
words: words |> Enum.shuffle,
regular_cards: regular_cards,
spymaster_cards: spymaster_cards,
first_team: first_team,
red_cards: red_cards_start(:first_team, first_team),
blue_cards: blue_cards_start(:first_team, first_team)
}
end
def touch_intent(board, word) do
selected_regular_card = Map.fetch!(board.regular_cards, word)
case Card.touchable?(selected_regular_card) do
true -> {:ok, board}
false -> {:error, :card_already_touched}
end
end
def touch_card(board, word) do
selected_regular_card = Map.fetch!(board.regular_cards, word)
case Card.touchable?(selected_regular_card) do
true ->
selected_spymaster_card = Map.fetch!(board.spymaster_cards, word)
{:ok, update_state(board, word, selected_spymaster_card)}
false ->
{:error, :card_already_touched}
end
end
defp update_state(board, word, selected_spymaster_card) do
updated_spymaster_card = Card.touch(selected_spymaster_card)
spymaster_cards = Map.replace!(board.spymaster_cards, word, updated_spymaster_card)
regular_cards = Map.replace!(board.regular_cards, word, updated_spymaster_card)
updated_board = %{board | spymaster_cards: spymaster_cards, regular_cards: regular_cards}
case updated_spymaster_card.color do
"red" -> {updated_spymaster_card, %{updated_board | red_cards: board.red_cards - 1}}
"blue" -> {updated_spymaster_card, %{updated_board | blue_cards: board.blue_cards - 1}}
"yellow" -> {updated_spymaster_card, %{updated_board | yellow_cards: board.yellow_cards - 1}}
"black" -> {updated_spymaster_card, %{updated_board | black_cards: board.black_cards - 1}}
end
end
defp setup_board(words, first_team, second_team) do
red_offset = 0
{first_regular_cards, first_sypermaster_cards} =
setup_cards(words, first_team, @first_team_cards_count, red_offset)
blue_offset = @first_team_cards_count
{second_regular_cards, second_sypermaster_cards} =
setup_cards(words, second_team, @second_team_cards_count, blue_offset)
black_offset = @first_team_cards_count + @second_team_cards_count
{black_regular_cards, black_spymaster_cards} =
setup_cards(words, "black", @black_cards_count, black_offset)
yellow_offset = @first_team_cards_count + @second_team_cards_count + @black_cards_count
{yellow_regular_cards, yellow_spymaster_cards} =
setup_cards(words, "yellow", @yellow_cards_count, yellow_offset)
regular_cards = first_regular_cards
++ second_regular_cards
++ black_regular_cards
++ yellow_regular_cards
spymaster_cards = first_sypermaster_cards
++ second_sypermaster_cards
++ black_spymaster_cards
++ yellow_spymaster_cards
{Enum.into(regular_cards, %{}), Enum.into(spymaster_cards, %{})}
end
defp setup_cards(words, color, amount, start_from) do
selected_words = select_words(words, amount, start_from)
{setup_regular_cards(selected_words), setup_spymaster_cards(selected_words, color)}
end
defp select_words(words, amount, start_from) do
Enum.slice(words, start_from, amount)
end
defp setup_regular_cards(words) do
Enum.map(words, fn word ->
{word, Card.setup(word)}
end)
end
defp setup_spymaster_cards(words, color) do
Enum.map(words, fn word ->
{word, Card.setup(word, color)}
end)
end
defp red_cards_start(:first_team, "red"), do: @first_team_cards_count
defp red_cards_start(:first_team, "blue"), do: @second_team_cards_count
defp blue_cards_start(:first_team, "blue"), do: @first_team_cards_count
defp blue_cards_start(:first_team, "red"), do: @second_team_cards_count
end
|
lib/codenamex/game/board.ex
| 0.795896
| 0.485661
|
board.ex
|
starcoder
|
defmodule Day10.Asteroids do
def run(1) do
10
|> InputFile.contents_of
|> String.split("\n")
|> Day10.Asteroids.parse
|> Day10.Asteroids.counts
|> Enum.map(fn({loc, set}) -> {MapSet.size(set), loc} end)
|> Enum.max
|> IO.puts
end
def run(2) do
10
|> InputFile.contents_of
|> String.split("\n")
|> Day10.Asteroids.parse
|> Day10.Asteroids.vaporize({22, 25}, 200)
|> IO.inspect
end
def parse(lines), do: parse(lines, 0, [])
def parse([], _lnum, locs), do: locs
def parse([line | rest], lnum, locs), do: parse(rest, lnum + 1, parse_line(line, lnum, 0, locs))
def parse_line("", _lnum, _pos, locs), do: locs
def parse_line("." <> rest, lnum, pos, locs), do: parse_line(rest, lnum, pos + 1, locs)
def parse_line("#" <> rest, lnum, pos, locs), do: parse_line(rest, lnum, pos + 1, [{pos, lnum} | locs])
def pairs([elt | rest]), do: pairs(elt, rest, rest, [])
defp pairs(_elt, [], [], ret), do: ret
defp pairs(_, [], [elt | rest], ret), do: pairs(elt, rest, rest, ret)
defp pairs(elt, [p | rest], next, ret), do: pairs(elt, rest, next, [{elt, p} | ret])
def counts(locations) do
locations |> Enum.sort |> pairs |> compute_counts(%{})
end
def vaporize(locations, origin, n) do
map = coord_map(origin, locations, %{})
angles = Map.keys(map) |> Enum.sort |> Enum.reverse
do_vaporize(angles, [], map, n)
end
defp do_vaporize([], next, map, n), do: do_vaporize(Enum.reverse(next), [], map, n)
defp do_vaporize([angle | rest], next, map, n) do
case Map.get(map, angle) do
[] -> do_vaporize(rest, next, map, n)
[{_distance, point} | asteroids] ->
if n == 1 do
point
else
map = Map.put(map, angle, asteroids)
do_vaporize(rest, [angle | next], map, n - 1)
end
end
end
def coord_map(_origin, [], map), do: map
def coord_map(origin, [origin | rest], map), do: coord_map(origin, rest, map)
def coord_map(origin, [p | rest], map) do
{angle, distance} = coords_of(origin, p)
l = [{distance, p} | Map.get(map, angle, [])] |> Enum.sort
coord_map(origin, rest, Map.put(map, angle, l))
end
def coords_of({o_x, o_y} = origin, {p_x, p_y} = point) do
{
angle_of(origin, point),
:math.sqrt(:math.pow(p_x - o_x, 2) + :math.pow(p_y - o_y, 2))
}
end
defp compute_counts([], map), do: map
defp compute_counts([{a, b} | rest], map) do
a_to_b = angle_of(a, b)
b_to_a = angle_of(b, a)
map = map
|> Map.update(a, MapSet.new([a_to_b]), &(MapSet.put(&1, a_to_b)))
|> Map.update(b, MapSet.new([b_to_a]), &(MapSet.put(&1, b_to_a)))
compute_counts(rest, map)
end
defp angle_of({origin_x, origin_y}, {p_x, p_y}) do
ElixirMath.atan2(p_x - origin_x, p_y - origin_y)
end
end
|
year_2019/lib/day_10/asteroids.ex
| 0.590425
| 0.505554
|
asteroids.ex
|
starcoder
|
defmodule Fuel.Ctx do
@moduledoc """
Provides functions for interacting with a `ctx`.
The `ctx` is based on the golang `context.Context` and provides many of the same functional features.
Using a `ctx` provides a common thread of data that can be used all through your application.
It can be used in plug, grpc, graphql or just within your application to facilitate testing.
You can include things like
* implementation of behaviours
* Authentication information
* ids - request, trace etc
* deadlines
* arbitrary values
@see https://github.com/tsloughter/ctx
"""
@ctx_key :ctx
@type t :: :ctx.t()
@type source :: Keyword.t() | map()
@type behaviour :: module
@type impl :: module
@doc "Fetch a new deadline"
defdelegate new(), to: :ctx
@doc "Fetch a new deadline"
defdelegate background(), to: :ctx
@doc "Set a value at a key in the context. Any valid term may be used as a key"
defdelegate set(ctx, key, value), to: :ctx
@doc "Get a value at a key. Missing values will raise a `KeyError`"
defdelegate get(ctx, key), to: :ctx
@doc "Get a value at a key. Missing values will return the default"
defdelegate get(ctx, key, default), to: :ctx
@doc "provides a new ctx with the given value"
defdelegate with_value(key, value), to: :ctx
@doc "Store a value on "
defdelegate with_value(ctx, key, value), to: :ctx
@doc "Provide a new ctx with the given values"
defdelegate with_values(map), to: :ctx
@doc "Provide a new ctx with a deadline set to elapse after the given time period"
defdelegate with_deadline_after(value, time_unit), to: :ctx
@doc "Set a deadline on an existing ctx"
defdelegate with_deadline_after(ctx, value, time_unit), to: :ctx
@doc "true if the deadline has not yet exceeded. false"
defdelegate deadline(ctx), to: :ctx
@doc """
True when the time is earlier than the deadline. False if the deadline has elapsed
"""
defdelegate done(ctx), to: :ctx
@spec deadline_exceeded?(t()) :: boolean
def deadline_exceeded?(ctx),
do: not :ctx.done(ctx)
@doc "Fetches a ctx from a kw list or map or returns a new one"
@spec ctx_from(source | nil) :: t()
def ctx_from(nil),
do: :ctx.new()
def ctx_from(options) when is_map(options),
do: Map.get(options, @ctx_key, :ctx.background())
def ctx_from(options) when is_list(options),
do: Keyword.get(options, @ctx_key, :ctx.background())
@doc "puts a ctx into a kwlist or map"
@spec ctx_into(:ctx.t(), source()) :: source()
def ctx_into(ctx, source) when is_map(source),
do: Map.put(source, @ctx_key, ctx)
def ctx_into(ctx, source) when is_list(source),
do: Keyword.put(source, @ctx_key, ctx)
@doc """
Store the implementation of a behaviour in a context
This is useful for changing out behaviours in things like tests where you want to use Mox.
@see `fetch_impl/2`, `fetch_impl/3`
"""
@spec with_impl(:ctx.t(), behaviour, impl) :: :ctx.t()
def with_impl(ctx, behaviour, impl),
do: :ctx.set(ctx, {:impl, behaviour}, impl)
@doc """
Fetches an implementation from the given context.
If no implementation has been explicitly provided, use the behaviour module as the implementation
"""
@spec fetch_impl(:ctx.t(), behaviour_and_impl :: module) :: impl()
def fetch_impl(ctx, behaviour),
do: :ctx.get(ctx, {:impl, behaviour}, behaviour)
@doc """
Fetches the implementation for the given behavioru from the given context.
If no implementation has been explicitly provided, fall back to the deafult implementation provided
"""
@spec fetch_impl(:ctx.t(), behaviour, default_impl :: impl) :: impl
def fetch_impl(ctx, behaviour, default_impl),
do: :ctx.get(ctx, {:impl, behaviour}, default_impl)
end
|
lib/fuel/ctx/ctx.ex
| 0.836988
| 0.664361
|
ctx.ex
|
starcoder
|
defmodule Combine do
@moduledoc """
Main entry point for the Combine API.
To use:
defmodule Test do
use Combine # defaults to parsers: [:text, :binary]
# use Combine, parsers: [:text]
# use Combine, parsers: [:binary]
# use Combine, parsers: [] # does not import any parsers other than Base
def foo(str) do
Combine.parse(str, many1(char))
end
end
"""
alias Combine.ParserState
defmacro __using__(opts \\ []) do
parsers = Keyword.get(opts, :parsers, [:text, :binary])
case parsers do
[:text, :binary] ->
quote do
import Combine.Parsers.Base
import Combine.Parsers.Text
import Combine.Parsers.Binary
end
[:text] ->
quote do
import Combine.Parsers.Base
import Combine.Parsers.Text
end
[:binary] ->
quote do
import Combine.Parsers.Base
import Combine.Parsers.Binary
end
_ -> []
end
end
@type parser :: (ParserState.t() -> ParserState.t)
@type previous_parser :: parser | nil
@doc """
Given an input string and a parser, applies the parser to the input string,
and returns the results as a list, or an error tuple if an error occurs.
"""
@spec parse(String.t, parser) :: [term] | {:error, term}
def parse(input, parser) do
case parser.(%ParserState{input: input}) do
%ParserState{status: :ok, results: res} ->
res |> Enum.reverse |> Enum.filter_map(&ignore_filter/1, &filter_ignores/1)
%ParserState{error: res} ->
{:error, res}
x ->
{:error, {:fatal, x}}
end
end
@doc """
Given a file path and a parser, applies the parser to the file located at that
path, and returns the results as a lsit, or an error tuple if an error occurs.
"""
@spec parse_file(String.t, parser) :: [term] | {:error, term}
def parse_file(path, parser) do
case File.read(path) do
{:ok, contents} -> parse(contents, parser)
{:error, _} = err -> err
end
end
defp ignore_filter(:__ignore), do: false
defp ignore_filter(_), do: true
defp filter_ignores(element) when is_list(element) do
Enum.filter_map(element, &ignore_filter/1, &filter_ignores/1)
end
defp filter_ignores(element), do: element
end
|
elixir/codes-from-books/little-elixir/cap8/blitzy/deps/combine/lib/combine.ex
| 0.698844
| 0.478712
|
combine.ex
|
starcoder
|
defmodule AWS.CodeStarConnections do
@moduledoc """
AWS CodeStar Connections
This AWS CodeStar Connections API Reference provides descriptions and usage
examples of the operations and data types for the AWS CodeStar Connections API.
You can use the connections API to work with connections and installations.
*Connections* are configurations that you use to connect AWS resources to
external code repositories. Each connection is a resource that can be given to
services such as CodePipeline to connect to a third-party repository such as
Bitbucket. For example, you can add the connection in CodePipeline so that it
triggers your pipeline when a code change is made to your third-party code
repository. Each connection is named and associated with a unique ARN that is
used to reference the connection.
When you create a connection, the console initiates a third-party connection
handshake. *Installations* are the apps that are used to conduct this handshake.
For example, the installation for the Bitbucket provider type is the Bitbucket
app. When you create a connection, you can choose an existing installation or
create one.
When you want to create a connection to an installed provider type such as
GitHub Enterprise Server, you create a *host* for your connections.
You can work with connections by calling:
* `CreateConnection`, which creates a uniquely named connection that
can be referenced by services such as CodePipeline.
* `DeleteConnection`, which deletes the specified connection.
* `GetConnection`, which returns information about the connection,
including the connection status.
* `ListConnections`, which lists the connections associated with
your account.
You can work with hosts by calling:
* `CreateHost`, which creates a host that represents the
infrastructure where your provider is installed.
* `DeleteHost`, which deletes the specified host.
* `GetHost`, which returns information about the host, including the
setup status.
* `ListHosts`, which lists the hosts associated with your account.
You can work with tags in AWS CodeStar Connections by calling the following:
* `ListTagsForResource`, which gets information about AWS tags for a
specified Amazon Resource Name (ARN) in AWS CodeStar Connections.
* `TagResource`, which adds or updates tags for a resource in AWS
CodeStar Connections.
* `UntagResource`, which removes tags for a resource in AWS CodeStar
Connections.
For information about how to use AWS CodeStar Connections, see the [Developer Tools User
Guide](https://docs.aws.amazon.com/dtconsole/latest/userguide/welcome-connections.html).
"""
alias AWS.Client
alias AWS.Request
def metadata do
%AWS.ServiceMetadata{
abbreviation: nil,
api_version: "2019-12-01",
content_type: "application/x-amz-json-1.0",
credential_scope: nil,
endpoint_prefix: "codestar-connections",
global?: false,
protocol: "json",
service_id: "CodeStar connections",
signature_version: "v4",
signing_name: "codestar-connections",
target_prefix: "com.amazonaws.codestar.connections.CodeStar_connections_20191201"
}
end
@doc """
Creates a connection that can then be given to other AWS services like
CodePipeline so that it can access third-party code repositories.
The connection is in pending status until the third-party connection handshake
is completed from the console.
"""
def create_connection(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateConnection", input, options)
end
@doc """
Creates a resource that represents the infrastructure where a third-party
provider is installed.
The host is used when you create connections to an installed third-party
provider type, such as GitHub Enterprise Server. You create one host for all
connections to that provider.
A host created through the CLI or the SDK is in `PENDING` status by default. You
can make its status `AVAILABLE` by setting up the host in the console.
"""
def create_host(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateHost", input, options)
end
@doc """
The connection to be deleted.
"""
def delete_connection(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteConnection", input, options)
end
@doc """
The host to be deleted.
Before you delete a host, all connections associated to the host must be
deleted.
A host cannot be deleted if it is in the VPC_CONFIG_INITIALIZING or
VPC_CONFIG_DELETING state.
"""
def delete_host(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteHost", input, options)
end
@doc """
Returns the connection ARN and details such as status, owner, and provider type.
"""
def get_connection(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetConnection", input, options)
end
@doc """
Returns the host ARN and details such as status, provider type, endpoint, and,
if applicable, the VPC configuration.
"""
def get_host(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetHost", input, options)
end
@doc """
Lists the connections associated with your account.
"""
def list_connections(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListConnections", input, options)
end
@doc """
Lists the hosts associated with your account.
"""
def list_hosts(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListHosts", input, options)
end
@doc """
Gets the set of key-value pairs (metadata) that are used to manage the resource.
"""
def list_tags_for_resource(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListTagsForResource", input, options)
end
@doc """
Adds to or modifies the tags of the given resource.
Tags are metadata that can be used to manage a resource.
"""
def tag_resource(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "TagResource", input, options)
end
@doc """
Removes tags from an AWS resource.
"""
def untag_resource(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UntagResource", input, options)
end
@doc """
Updates a specified host with the provided configurations.
"""
def update_host(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateHost", input, options)
end
end
|
lib/aws/generated/code_star_connections.ex
| 0.87643
| 0.478894
|
code_star_connections.ex
|
starcoder
|
defmodule Militerm.Systems.Mixins do
@moduledoc """
The Mixins system manages running code defined in a mixin and inspecting aspects
of the mixin.
"""
alias Militerm.Services.Mixins, as: MixinService
alias Militerm.Systems.Mixins
require Logger
def introspect(this_mixin) do
data = MixinService.get(this_mixin)
data.mixins
|> Enum.reverse()
|> Enum.reduce(%{}, fn mixin, acc ->
deep_merge(acc, introspect(mixin))
end)
|> deep_merge(%{
calculations: keys_with_attribution(data.calculations, this_mixin),
reactions: keys_with_attribution(data.reactions, this_mixin),
abilities: keys_with_attribution(data.abilities, this_mixin),
traits: keys_with_attribution(data.traits, this_mixin),
validators: keys_with_attribution(data.validators, this_mixin)
})
end
defp deep_merge(into, from) when is_map(into) and is_map(from) do
Map.merge(into, from, fn _k, v1, v2 -> deep_merge(v1, v2) end)
end
defp deep_merge(v1, v2), do: v2
defp keys_with_attribution(map, attr) do
map
|> Map.keys()
|> Enum.map(fn k -> {k, attr} end)
|> Enum.into(%{})
end
def execute_event(name, entity_id, event, role, args) when is_binary(event) do
path = event |> String.split(":", trim: true) |> Enum.reverse()
execute_event(name, entity_id, path, role, args)
end
def execute_event(name, entity_id, path, role, args) do
case get_mixin(name) do
{:ok, mixin} ->
cond do
do_has_event?(mixin, path, role) ->
do_event(mixin, entity_id, path, role, args)
do_has_event?(mixin, path, "any") ->
do_event(mixin, entity_id, path, "any", args)
:else ->
false
end
_ ->
false
end
end
def has_event?(name, event, role) when is_binary(event) do
path = event |> String.split(":", trim: true) |> Enum.reverse()
has_event?(name, path, role)
end
def has_event?(name, path, role) do
case get_mixin(name) do
{:ok, mixin} ->
res = do_has_event?(mixin, path, role)
Logger.debug(fn ->
[
"- (",
name,
") has event ",
Enum.join(Enum.reverse(path), ":"),
" as ",
role,
": ",
inspect(res)
]
end)
res
_ ->
Logger.debug(fn ->
[
"- (",
name,
") has event ",
Enum.join(Enum.reverse(path), ":"),
" as ",
role,
": false"
]
end)
false
end
end
def has_exact_event?(name, event, role) when is_binary(event) do
path = event |> String.split(":", trim: true) |> Enum.reverse()
has_exact_event?(name, path, role)
end
def has_exact_event?(name, path, role) do
case get_mixin(name) do
{:ok, mixin} ->
do_has_exact_event?(mixin, path, role)
_ ->
false
end
end
def ability(name, entity_id, ability, role, args) when is_binary(ability) do
path = ability |> String.split(":", trim: true) |> Enum.reverse()
ability(name, entity_id, path, role, args)
end
def ability(name, entity_id, ability, role, args) do
case get_mixin(name) do
{:ok, mixin} ->
if role == "any" or do_has_ability?(mixin, ability, role) do
do_ability(mixin, entity_id, ability, role, args)
else
do_ability(mixin, entity_id, ability, "any", args)
end
_ ->
false
end
end
def has_ability?(name, ability, role) when is_binary(ability) do
path = ability |> String.split(":", trim: true) |> Enum.reverse()
has_ability?(name, path, role)
end
def has_ability?(name, ability, role) do
case get_mixin(name) do
{:ok, mixin} ->
do_has_ability?(mixin, ability, role) or do_has_ability?(mixin, ability, "any")
_ ->
false
end
end
def has_exact_ability?(name, ability, role) when is_binary(ability) do
path = ability |> String.split(":", trim: true) |> Enum.reverse()
has_exact_ability?(name, path, role)
end
def has_exact_ability?(name, ability, role) do
case get_mixin(name) do
{:ok, mixin} ->
do_has_exact_ability?(mixin, ability, role)
_ ->
false
end
end
def trait(name, entity_id, trait, args) do
case get_mixin(name) do
{:ok, mixin} ->
if do_has_trait?(mixin, trait) do
do_trait(mixin, entity_id, trait, args)
else
false
end
_ ->
false
end
end
def has_trait?(name, trait) do
case get_mixin(name) do
{:ok, mixin} ->
do_has_trait?(mixin, trait)
_ ->
false
end
end
def has_exact_trait?(name, trait) do
case get_mixin(name) do
{:ok, mixin} ->
do_has_exact_trait?(mixin, trait)
_ ->
false
end
end
def validates?(name, path) do
case get_mixin(name) do
{:ok, mixin} ->
has_validation?(mixin, path)
_ ->
false
end
end
def has_validation?(%{validations: validations, mixins: mixins}, path) do
cond do
Map.has_key?(validations, path) ->
true
Enum.any?(mixins, fn mixin -> Mixins.validates?(mixin, path) end) ->
true
:else ->
false
end
end
def has_validation?(_, _), do: false
def validate(name, entity_id, path, args) do
case get_mixin(name) do
{:ok, mixin} ->
do_validation(mixin, entity_id, path, args)
_ ->
nil
end
end
defp do_validation(mixin, entity_id, path, args) do
with %{validations: validations, mixins: mixins} <- mixin,
{:ok, value} <-
validations
|> execute_if_in_map(entity_id, path, args)
|> execute_if_mixin(
mixins,
:has_validation?,
:validate,
entity_id,
path,
args
) do
value
else
_ -> nil
end
end
def calculates?(name, path) do
case get_mixin(name) do
{:ok, mixin} ->
has_calculation?(mixin, path)
_ ->
false
end
end
def has_calculation?(%{calculations: calculations, mixins: mixins}, path) do
cond do
Map.has_key?(calculations, path) ->
true
Enum.any?(mixins, fn mixin -> Mixins.calculates?(mixin, path) end) ->
true
:else ->
false
end
end
def has_calculation?(_, _), do: false
def calculate(name, entity_id, path, args) do
case get_mixin(name) do
{:ok, mixin} ->
do_calculation(mixin, entity_id, path, args)
_ ->
nil
end
end
defp do_calculation(mixin, entity_id, path, args) do
with %{calculations: calculations, mixins: mixins} <- mixin,
{:ok, value} <-
calculations
|> execute_if_in_map(entity_id, path, args)
|> execute_if_mixin(
mixins,
:calculates?,
:calculate,
entity_id,
path,
args
) do
value
else
_ -> nil
end
end
defp do_event(_, _, [], _, _), do: true
defp do_event(mixin, entity_id, event, role, args) do
case mixin do
%{reactions: events, mixins: mixins} ->
handled =
events
|> execute_if_in_map(entity_id, event, role, args)
|> execute_if_mixin(
mixins,
:has_exact_event?,
:execute_event,
entity_id,
event,
role,
args
)
case handled do
{:ok, value} ->
value
otherwise ->
otherwise
end
_ ->
false
end
end
defp do_has_event?(_, [], _), do: false
defp do_has_event?(nil, _, _), do: false
defp do_has_event?(mixin, event, role) do
if do_has_exact_event?(mixin, event, role) do
true
else
# chop off the last bit and run again - remember, we've reversed the event bits
do_has_event?(mixin, Enum.drop(event, 1), role)
end
end
defp do_has_exact_event?(nil, _, _), do: false
defp do_has_exact_event?(mixin, event, role) do
case mixin do
%{reactions: events, mixins: mixins} ->
if Map.has_key?(events, {event, role}) do
true
else
# check mixins
Enum.any?(mixins, fn name ->
has_exact_event?(name, event, role)
end)
end
_ ->
false
end
end
defp do_ability(_, _, [], _, _), do: false
defp do_ability(mixin, entity_id, ability, role, args) do
case mixin do
%{abilities: abilities, mixins: mixins} ->
handled =
abilities
|> execute_if_in_map(entity_id, ability, role, args)
|> execute_if_mixin(
mixins,
:has_exact_ability?,
:ability,
entity_id,
ability,
role,
args
)
case handled do
{:ok, value} ->
value
_ ->
do_ability(mixin, entity_id, Enum.drop(ability, 1), role, args)
end
_ ->
false
end
end
defp do_has_ability?(_, [], _), do: false
defp do_has_ability?(nil, _, _), do: false
defp do_has_ability?(mixin, ability, role) do
if do_has_exact_ability?(mixin, ability, role) do
true
else
# chop off the last bit and run again - remember, we've reversed the event bits
do_has_ability?(mixin, Enum.drop(ability, 1), role)
end
end
defp do_has_exact_ability?(mixin, ability, role) do
case mixin do
%{abilities: abilities, mixins: mixins} ->
cond do
Map.has_key?(abilities, {ability, role}) ->
true
Enum.any?(mixins, fn name -> has_exact_ability?(name, ability, role) end) ->
true
:else ->
false
end
_ ->
false
end
end
defp do_trait(mixin, entity_id, trait, args) do
with %{traits: traits, mixins: mixins} <- mixin,
{:ok, value} <-
traits
|> execute_if_in_map(entity_id, trait, args)
|> execute_if_mixin(
mixins,
:has_exact_trait?,
:trait,
entity_id,
trait,
args
) do
value
else
_ -> false
end
end
defp do_has_trait?(_, ""), do: false
defp do_has_trait?(nil, _), do: false
defp do_has_trait?(mixin, trait) do
do_has_exact_trait?(mixin, trait)
end
defp do_has_exact_trait?(mixin, trait) do
case mixin do
%{traits: traits, mixins: mixins} ->
cond do
Map.has_key?(traits, trait) ->
true
Enum.any?(mixins, fn mixin -> Mixins.has_exact_trait?(mixin, trait) end) ->
true
:else ->
false
end
_ ->
false
end
end
defp execute_if_in_map(events, entity_id, event, role, args) do
case Map.get(events, {event, role}) do
code when is_tuple(code) ->
# IO.inspect({:code, event, role, code}, limit: :infinity)
Logger.debug(fn ->
[
entity_id,
": execute code for ",
inspect(event),
" as ",
role,
": ",
inspect(code, limit: :infinity)
]
end)
ret =
{:ok, Militerm.Machines.Script.run(code, Map.put(args, "this", {:thing, entity_id}))}
Logger.debug([entity_id, ": finished executing code for ", inspect(event), " as ", role])
ret
_ ->
:unhandled
end
end
defp execute_if_in_map(events, entity_id, event, args) do
case Map.get(events, event) do
code when is_tuple(code) ->
Logger.debug(fn ->
[
entity_id,
": execute code for ",
inspect(event),
": ",
inspect(code, limit: :infinity)
]
end)
ret =
{:ok, Militerm.Machines.Script.run(code, Map.put(args, "this", {:thing, entity_id}))}
Logger.debug([entity_id, ": finished executing code for ", inspect(event)])
ret
_ ->
:unhandled
end
end
defp execute_if_mixin({:ok, _} = result, _, _, _, _, _, _, _), do: result
defp execute_if_mixin(:unhandled, mixins, predicate, method, entity_id, event, role, args) do
case Enum.find(mixins, fn mixin -> apply(Mixins, predicate, [mixin, event, role]) end) do
nil ->
:unhandled
mixin ->
Logger.debug([entity_id, " handing off ", inspect(event), " as ", role, " to ", mixin])
{:ok, apply(Mixins, method, [mixin, entity_id, event, role, args])}
end
end
defp execute_if_mixin({:ok, _} = result, _, _, _, _, _, _), do: result
defp execute_if_mixin(:unhandled, mixins, predicate, method, entity_id, event, args) do
case Enum.find(mixins, fn mixin -> apply(Mixins, predicate, [mixin, event]) end) do
nil ->
:unhandled
mixin ->
Logger.debug([entity_id, " handing off ", inspect(event), " to ", mixin])
{:ok, apply(Mixins, method, [mixin, entity_id, event, args])}
end
end
defp get_mixin(name) do
case MixinService.get(name) do
%{} = mixin ->
{:ok, mixin}
_ ->
:error
end
end
end
|
lib/militerm/systems/mixins.ex
| 0.710829
| 0.411406
|
mixins.ex
|
starcoder
|
defmodule ExStoneOpenbank.Authenticator do
@moduledoc """
Responsible for authentication of the service application.
Authentication is done through issuing a JWT to the token endpoint according to client_credentials
flow. The JWT is signed using the private key in the given configuration.
The result is an access_token and a refresh_token. The access_token is used in all authenticated
calls. The refresh token is used to get a new access_token without issuing a new token.
"""
use GenServer
alias ExStoneOpenbank.Authenticator.AuthenticationJWT
alias ExStoneOpenbank.Config
alias ExStoneOpenbank.HTTP
require Logger
# milliseconds to avoid re-fetching the token in case another process tried to request it before
# the login finished
@skew_time Application.get_env(:ex_stone_openbank, :time_skew, 2_000)
def start_link(opts),
do: GenServer.start_link(__MODULE__, opts, name: Keyword.fetch!(opts, :name))
@impl true
def init(opts) do
opts = Config.validate_and_persist(opts)
:ets.new(opts.name, [:named_table, read_concurrency: true])
{:ok, opts, {:continue, :authenticate}}
end
@impl true
def handle_continue(:authenticate, %{name: name} = state) do
{:ok, _tokens} = authenticate(name)
{:noreply, Map.put(state, :last_timestamp, new_timestamp())}
end
@impl true
def handle_call(
{:refresh_token, timestamp},
_from,
%{last_timestamp: last_timestamp, name: name} = state
)
when last_timestamp + @skew_time > timestamp do
{:reply, tokens(name), state}
end
def handle_call(
{:refresh_token, timestamp},
_from,
%{
name: name,
last_timestamp: last_timestamp
} = state
)
when is_nil(last_timestamp) or last_timestamp + @skew_time <= timestamp do
case authenticate(name) do
{:ok, tokens} ->
state = %{state | last_timestamp: new_timestamp()}
{:reply, tokens, state}
err ->
{:reply, err, state}
end
end
def handle_call(:last_timestamp, _, %{last_timestamp: ts} = state) do
{:reply, ts, state}
end
@impl true
def handle_info(_msg, state), do: {:noreply, state}
@doc false
def tokens(name), do: :ets.lookup(name, :tokens)[:tokens]
defp last_timestamp(name), do: GenServer.call(name, :last_timestamp)
defp new_timestamp, do: :erlang.system_time(:millisecond)
@doc false
def time_until_next_refresh(config_name) do
ts = last_timestamp(config_name) + time_skew()
now = new_timestamp()
if now <= ts, do: ts - now, else: 0
end
@doc """
Gets the access_token for the given configuration.
"""
@spec access_token(name :: atom()) :: {:ok, String.t()} | {:error, :unauthenticated}
def access_token(name) do
case :ets.lookup(name, :tokens) do
[tokens: tokens] -> {:ok, tokens[:access_token]}
_ -> {:error, :unauthenticated}
end
end
@doc """
Refreshes the token synchronously
"""
@spec refresh_token(name :: atom()) :: {:ok, String.t()}
def refresh_token(name) do
GenServer.call(name, {:refresh_token, new_timestamp()})
end
@doc """
Authenticates with Stone Openbank API using the given signer and client_id
"""
@spec authenticate(name :: atom()) ::
{:ok, %{access_token: token :: String.t()}}
| {:error, reason :: atom()}
def authenticate(name) do
with opts <- Config.options(name),
{:ok, token, _claims} <- generate_client_credentials_token(name, opts),
{:ok, %{"access_token" => access}} <- do_login(name, opts, token) do
tokens = %{access_token: access}
:ets.insert(name, {:tokens, tokens})
{:ok, tokens}
else
error ->
Logger.error("authenticate fail=#{inspect(error)}")
raise "failed to authenticate"
end
end
defp generate_client_credentials_token(name, opts) do
AuthenticationJWT.generate_and_sign(
%{
"clientId" => opts.client_id,
"sub" => opts.client_id,
"iss" => opts.client_id,
"aud" => Config.accounts_url(name) <> "/auth/realms/stone_bank"
},
opts.signer
)
end
defp do_login(name, opts, token) do
HTTP.login_client()
|> Tesla.post(
"#{Config.accounts_url(name)}/auth/realms/stone_bank/protocol/openid-connect/token",
%{
"grant_type" => "client_credentials",
"client_id" => opts.client_id,
"client_assertion_type" => "urn:ietf:params:oauth:client-assertion-type:jwt-bearer",
"client_assertion" => token
}
)
|> HTTP.parse_result()
end
@doc false
def time_skew, do: @skew_time
end
|
lib/ex_stone_openbank/authenticator.ex
| 0.805288
| 0.423756
|
authenticator.ex
|
starcoder
|
defmodule Ash.Resource do
@moduledoc """
A resource is a static definition of an entity in your system.
Resource DSL documentation: `Ash.Resource.Dsl`
"""
@type t :: module
@type record :: struct()
use Ash.Dsl,
single_extension_kinds: [:data_layer],
many_extension_kinds: [
:authorizers,
:notifiers
],
default_extensions: [
data_layer: Module.concat(["Ash", DataLayer, Simple]),
extensions: [Module.concat(["Ash", Resource, Dsl])]
]
def init(opts) do
if opts[:data_layer] == :embedded do
{:ok,
opts
|> Keyword.put(:data_layer, Module.concat(["Ash", DataLayer, Simple]))
|> Keyword.put(:embedded?, true)}
else
{:ok, opts}
end
end
def handle_opts(opts) do
quote bind_quoted: [embedded?: opts[:embedded?]] do
if embedded? do
@persist {:embedded?, true}
require Ash.EmbeddableType
Ash.EmbeddableType.define_embeddable_type()
end
end
end
def handle_before_compile(_opts) do
quote do
require Ash.Schema
if !@moduledoc do
@moduledoc Ash.Resource.Info.description(__MODULE__) || false
end
Ash.Schema.define_schema()
@all_arguments __MODULE__
|> Ash.Resource.Info.actions()
|> Enum.flat_map(& &1.arguments)
|> Enum.map(& &1.name)
|> Enum.uniq()
@all_attributes __MODULE__
|> Ash.Resource.Info.attributes()
|> Enum.map(& &1.name)
|> Enum.uniq()
@doc """
Validates that the keys in the provided input are valid for at least one action on the resource.
Raises a KeyError error at compile time if not. This exists because generally a struct should only ever
be created by Ash as a result of a successful action. You should not be creating records manually in code,
e.g `%MyResource{value: 1, value: 2}`. Generally that is fine, but often with embedded resources it is nice
to be able to validate the keys that are being provided, e.g
```elixir
Resource
|> Ash.Changeset.for_create(:create, %{embedded: EmbeddedResource.input(foo: 1, bar: 2)})
|> MyApp.Api.create()
```
"""
def input(opts) do
Map.new(opts, fn {key, value} ->
if key in @all_arguments || key in @all_attributes do
{key, value}
else
raise KeyError, key: key
end
end)
end
end
end
end
|
lib/ash/resource.ex
| 0.755817
| 0.412294
|
resource.ex
|
starcoder
|
defmodule Telnyx do
@moduledoc """
[Telnyx](https://telnyx.com) is a real-time communications platform with full, feature-rich functionality, making it quick and easy to set up and port numbers around the world, configure messaging, control VoIP and IP network functions, and define how and where communications can be used in real time. The Telnyx API can power a host of specialty applications, from call tracking to cloud-based PBX, dynamic security, and authentication use cases. [link](https://developers.telnyx.com/docs/api/v2/overview)
This library aims to make it easy to integrate with the Telnyx API in Elixir.
## Quickstart
```
api_key = "YOUR_API_KEY"
{:ok, data} = Telnyx.Messages.create(api_key, %{
messaging_profile_id: "3fa85f55-5717-4562-b3fc-2c963f63vga6",
from: "+18665552368", # Your Telnyx number
to: "+18445552367",
text: "Hello, World!"
})
```
## Installation
Add `telnyx` to your deps in `mix.exs`:
{:telnyx, "~> 0.1.0"}
## Usage
Currently the only available call is `Telnyx.Messages.create/2`, which sends an outbound SMS.
Eventually all of the API calls at https://developers.telnyx.com/docs/api/v2/overview will be supported.
There is currently no plan to support v1 endpoints, although if there's interest please let me know!
## Changelog
See the [CHANGELOG.md](https://github.com/damonvjanis/telnyx/blob/master/CHANGELOG.md).
## Contributing
Contributions are always welcome!
If you find a bug, please [open an issue](https://github.com/damonvjanis/telnyx/issues/new) or even better - cut a PR! Tests are a plus and will help get things merged faster.
To run the test suite, you'll need to download the latest version of [telnyx-mock](https://github.com/team-telnyx/telnyx-mock/releases), unpack it, and ensure it is running before calling `mix test`.
Contributors and contributions will be listed in the
[changelog](https://github.com/damonvjanis/telnyx/blob/master/CHANGELOG.md).
## License
This software is released under the MIT License.
"""
end
|
lib/telnyx.ex
| 0.666171
| 0.891339
|
telnyx.ex
|
starcoder
|
import TypeClass
defclass Witchcraft.Setoid do
@moduledoc ~S"""
A setoid is a type with an equivalence relation.
This is most useful when equivalence of some data is not the same as equality.
Since some types have differing concepts of equality, this allows overriding
the behaviour from `Kernel.==/2`. To get the Setoid `==` operator override,
simply `use Witchcraft.Setoid`.
## Type Class
An instance of `Witchcraft.Setoid` must define `Witchcraft.Setoid.equivalent?/2`
Setoid [equivalent?/2]
"""
alias __MODULE__
import Kernel, except: [==: 2, !=: 2]
defmacro __using__(opts \\ []) do
overrides = [==: 2, !=: 2]
excepts = Keyword.get(opts, :except, [])
if Access.get(opts, :override_kernel, true) do
kernel_imports = Macro.escape(except: overrides -- excepts)
module_imports = Macro.escape(except: excepts)
quote do
import Kernel, unquote(kernel_imports)
import unquote(__MODULE__), unquote(module_imports)
end
else
module_imports = Macro.escape(except: Enum.uniq(overrides ++ excepts))
quote do
import unquote(__MODULE__), unquote(module_imports)
end
end
end
@type t :: any()
where do
@doc ~S"""
Compare two setoids and determine if they are equivalent.
Aliased as `==`.
## Examples
iex> equivalent?(1, 2)
false
iex> import Kernel, except: [==: 2, !=: 2]
...> %{a: 1} == %{a: 1, b: 2}
false
equivalent?(%Maybe.Just{just: 42}, %Maybe.Nothing{})
#=> false
### Equivalence not equality
baby_harry = %Wizard{name: "<NAME>", age: 10}
old_harry = %Wizard{name: "<NAME>", age: 17}
def chosen_one?(some_wizard), do: equivalent?(baby_harry, some_wizard)
chosen_one?(old_harry)
#=> true
"""
@spec equivalent?(Setoid.t(), Setoid.t()) :: boolean()
def equivalent?(a, b)
end
defalias a == b, as: :equivalent?
@doc """
The opposite of `equivalent?/2`.
## Examples
iex> nonequivalent?(1, 2)
true
"""
@spec nonequivalent?(Setoid.t(), Setoid.t()) :: boolean()
def nonequivalent?(a, b), do: not equivalent?(a, b)
defalias a != b, as: :nonequivalent?
properties do
def reflexivity(data) do
a = generate(data)
Setoid.equivalent?(a, a)
end
def symmetry(data) do
a = generate(data)
b = generate(data)
equal?(Setoid.equivalent?(a, b), Setoid.equivalent?(b, a))
end
def transitivity(data) do
a = b = c = generate(data)
Setoid.equivalent?(a, b) and Setoid.equivalent?(b, c) and Setoid.equivalent?(a, c)
end
end
end
definst Witchcraft.Setoid, for: Integer do
def equivalent?(int, num), do: Kernel.==(int, num)
end
definst Witchcraft.Setoid, for: Float do
def equivalent?(float, num), do: Kernel.==(float, num)
end
definst Witchcraft.Setoid, for: BitString do
def equivalent?(string_a, string_b), do: Kernel.==(string_a, string_b)
end
definst Witchcraft.Setoid, for: Tuple do
def equivalent?(tuple_a, tuple_b), do: Kernel.==(tuple_a, tuple_b)
end
definst Witchcraft.Setoid, for: List do
def equivalent?(list_a, list_b), do: Kernel.==(list_a, list_b)
end
definst Witchcraft.Setoid, for: Map do
def equivalent?(map_a, map_b), do: Kernel.==(map_a, map_b)
end
definst Witchcraft.Setoid, for: MapSet do
def equivalent?(a, b), do: MapSet.equal?(a, b)
end
|
lib/witchcraft/setoid.ex
| 0.793066
| 0.621498
|
setoid.ex
|
starcoder
|
defmodule Tds.Date do
@moduledoc """
Struct for MSSQL date.
https://msdn.microsoft.com/en-us/library/bb630352.aspx
## Fields
* `year`
* `month`
* `day`
"""
@type t :: %__MODULE__{year: 1..9999, month: 1..12, day: 1..31}
defstruct year: 1900,
month: 1,
day: 1
end
defmodule Tds.Time do
@moduledoc """
Struct for MSSQL time.
https://msdn.microsoft.com/en-us/library/bb677243.aspx
## Fields
* `hour`
* `min`
* `sec`
* `fsec`
"""
@type t :: %__MODULE__{
hour: 0..23,
min: 0..59,
sec: 0..59,
fsec: 0..9_999_999
}
defstruct hour: 0,
min: 0,
sec: 0,
fsec: 0
end
defmodule Tds.DateTime do
@moduledoc """
Struct for MSSQL DateTime.
https://msdn.microsoft.com/en-us/library/ms187819.aspx
## Fields
* `year`
* `month`
* `day`
* `hour`
* `min`
* `sec`
"""
@type t :: %__MODULE__{
year: 1753..9999,
month: 1..12,
day: 1..31,
hour: 0..23,
min: 0..59,
sec: 0..59,
fsec: 0..999
}
defstruct year: 1900,
month: 1,
day: 1,
hour: 0,
min: 0,
sec: 0,
fsec: 0
end
defmodule Tds.DateTime2 do
@moduledoc """
Struct for MSSQL DateTime2.
https://msdn.microsoft.com/en-us/library/bb677335.aspx
## Fields
* `year`
* `month`
* `day`
* `hour`
* `min`
* `sec`
* `usec`
"""
@type t :: %__MODULE__{
year: 1..9999,
month: 1..12,
day: 1..31,
hour: 0..23,
min: 0..59,
sec: 0..59,
fsec: 0..9_999_999
}
defstruct year: 1900,
month: 1,
day: 1,
hour: 0,
min: 0,
sec: 0,
# fractional secs
fsec: 0
end
defmodule Tds.DateTimeOffset do
@moduledoc """
Struct for MSSQL DateTimeOffset.
https://msdn.microsoft.com/en-us/library/bb630289.aspx
## Fields
* `year`
* `month`
* `day`
* `hour`
* `min`
* `sec`
* `usec`
"""
@type t :: %__MODULE__{
year: 1..9999,
month: 1..12,
day: 1..31,
hour: 0..23,
min: 0..59,
sec: 0..59,
fsec: 0..9_999_999,
offset_hour: -14..14,
offset_min: 0..59
}
defstruct year: 1900,
month: 1,
day: 1,
hour: 0,
min: 0,
sec: 0,
# fractional secs
fsec: 0,
offset_hour: 0,
offset_min: 0
end
defmodule Tds.SmallDateTime do
@moduledoc """
Struct for MSSQL SmallDateTime.
https://msdn.microsoft.com/en-us/library/ms182418.aspx
## Fields
* `year`
* `month`
* `day`
* `hour`
* `min`
* `sec`
"""
@type t :: %__MODULE__{
year: 1900..2079,
month: 1..12,
day: 1..12,
hour: 0..23,
min: 0..59,
sec: 0..59
}
defstruct year: 1900,
month: 1,
day: 1,
hour: 0,
min: 0,
sec: 0
end
|
lib/tds/date_time.ex
| 0.875208
| 0.551634
|
date_time.ex
|
starcoder
|
defmodule YubikeyOTP.OTP do
@moduledoc """
## OTP Format
The format of the OTP is documented in:
https://developers.yubico.com/OTP/OTPs_Explained.html
"""
alias YubikeyOTP.CRC
alias YubikeyOTP.ModHex
alias YubikeyOTP.OTP
@type t :: %__MODULE__{
public_id: binary(),
prefix: nil | binary(),
serial: nil | integer(),
encrypted_otp: binary(),
private_id: nil | binary(),
use_counter: nil | binary(),
timestamp: integer(),
session_counter: nil | binary(),
random: nil | binary(),
checksum: nil | binary()
}
@otp_length 44
@key_format_error "Error parsing key. Key should be 128-bits stored as a 16 byte binary (preferred) or 32 character hex string"
defmacrop is_otp(string) do
quote do
is_binary(unquote(string)) and byte_size(unquote(string)) == @otp_length
end
end
defstruct [
:public_id,
:prefix,
:serial,
:encrypted_otp,
:private_id,
:use_counter,
:timestamp,
:session_counter,
:random,
:checksum
]
@spec device_id(otp :: binary()) :: {:ok, binary()} | {:error, :otp_invalid}
def device_id(otp) do
with {:ok, parsed} <- parse(otp) do
{:ok, parsed.public_id}
else
_ -> {:error, :otp_invalid}
end
end
@spec validate(otp :: binary()) :: {:ok, binary()} | {:error, :otp_invalid}
def validate(otp, opts \\ [])
def validate(otp, opts) when is_otp(otp) do
with {:ok, _} <- parse(otp, opts) do
{:ok, otp}
else
_ -> {:error, :otp_invalid}
end
end
def validate(_otp, _opts), do: {:error, :otp_invalid}
@doc """
Parses an OTP into an OTP struct.
Without the encryption key, only the `public_id`, 'prefix`, `serial` and
`encrypted_otp` fields are hydrated.
## Options
* `:key` - provides the 128 bit AES key to decrypt the OTP and load the
remaining fields. As part of decryption, the OTP checksum is verified.
* `:skip_checksum` - whether to skip verifying the checksum after decrypting
the OTP with the provided `key`.
## Examples
iex> YubikeyOTP.OTP.parse("ccccccclulvjbbhccnndrietjjnkeclcvjgrnhcivtgd")
{:ok,
%YubikeyOTP.OTP{
public_id: "<KEY>",
prefix: "cccccc",
serial: 715512,
encrypted_otp: "bbhccnndrietjjnkeclcvjgrnhcivtgd"
}
}
iex> YubikeyOTP.OTP.parse("nope")
:error
"""
@spec parse(otp :: binary()) :: {:ok, YubikeyOTP.OTP.t()} | :error
def parse(otp, opts \\ []) do
{:ok, parse!(otp, opts)}
rescue
_ -> :error
end
@doc """
Like `parse`, but returns the OTP struct directly, and throws exceptions when
errors are encountered (to permit specific handling, if desired).
## Exceptions
* `OTP.ParseError` - raised when the OTP cannot be successfully parsed with
the given options.
* `OTP.InvalidChecksumError` - raised when the checksum of the OTP does not
validate.
## Examples
Without specifying a decryption key, only the public information can be hydrated.
iex> YubikeyOTP.OTP.parse!("ccccccclulvjbbhccnndrietjjnkeclcvjgrnhcivtgd")
%YubikeyOTP.OTP{
public_id: "<KEY>",
prefix: "cccccc",
serial: 715512,
encrypted_otp: "bbhccnndrietjjnkeclcvjgrnhcivtgd"
}
Specifying a decryption key, but skipping the checksum verification will
hydrate the data even with a "bad" decryption.
iex> YubikeyOTP.OTP.parse!("ccccccclulvjbbhccnndrietjjnkeclcvjgrnhcivtgd", key: "1111111111111111", skip_checksum: true)
%YubikeyOTP.OTP{
public_id: "<KEY>",
prefix: "cccccc",
serial: 715512,
encrypted_otp: "bbhccnndrietjjnkeclcvjgrnhcivtgd",
private_id: <<68, 48, 254, 248, 123, 61>>,
use_counter: 49442,
timestamp: 4703963,
session_counter: 150,
random: "Xn",
checksum: <<1, 15>>
}
Decrypting the token successfully will hydrate all fields.
iex> YubikeyOTP.OTP.parse!("ccccccclulvjhnblleegivrcjlvvtvujejbclrdjdgvk", key: "1111111111111111")
%YubikeyOTP.OTP{
public_id: "<KEY>",
prefix: "cccccc",
serial: 715512,
encrypted_otp: "hnblleegivrcjlvvtvujejbclrdjdgvk",
private_id: "111111",
use_counter: 0,
timestamp: 8002816,
session_counter: 0,
random: <<64, 22>>,
checksum: <<44, 51>>
}
Errors will be thrown when the checksum is invalid, an invalid key is
provided, or an invalid token is provided.
iex> YubikeyOTP.OTP.parse!("ccccccclulvjbbhccnndrietjjnkeclcvjgrnhcivtgd", key: "1111111111111111")
** (YubikeyOTP.OTP.InvalidChecksumError) OTP checksum is invalid
iex> YubikeyOTP.OTP.parse!("nope")
** (YubikeyOTP.OTP.ParseError) OTP parsing failed
"""
@spec parse!(otp :: binary(), opts :: keyword()) :: YubikeyOTP.OTP.t()
def parse!(otp, opts \\ [])
def parse!(<<
prefix :: binary-size(6),
serial:: binary-size(6),
encrypted_otp :: binary-size(32)
>>, opts) when is_otp(prefix <> serial <> encrypted_otp)
do
decoded_serial = ModHex.decode!(serial)
otp = %OTP{
public_id: prefix <> serial,
prefix: prefix,
serial: decoded_serial |> :binary.decode_unsigned,
encrypted_otp: encrypted_otp
}
if Keyword.get(opts, :key), do: do_parse!(otp, opts), else: otp
end
def parse!(_otp, _opts), do: raise OTP.ParseError
defp do_parse!(otp, opts) do
with decoded_otp <- ModHex.decode!(otp.encrypted_otp),
key <- Keyword.fetch!(opts, :key),
key <- format_key(key),
<<
private_id :: binary-size(6),
use_counter :: binary-size(2),
timestamp :: binary-size(3),
session_counter :: binary-size(1),
random :: binary-size(2),
checksum :: binary-size(2)
>> <- :crypto.crypto_one_time(:aes_128_ecb, key, decoded_otp, false)
do
unless Keyword.get(opts, :skip_checksum) do
decrypted_otp = private_id <> use_counter <> timestamp <> session_counter <> random <> checksum
unless CRC.verify_crc16(decrypted_otp), do: raise OTP.InvalidChecksumError
end
%{otp |
private_id: private_id,
use_counter: use_counter |> :binary.decode_unsigned,
timestamp: timestamp |> :binary.decode_unsigned,
session_counter: session_counter |> :binary.decode_unsigned,
random: random,
checksum: checksum
}
else
result -> raise OTP.ParseError, "Error parsing OTP: #{result}"
end
rescue
e in ErlangError -> reraise OTP.ParseError, ErlangError.message(e), __STACKTRACE__
end
defp format_key(key) when is_binary(key) and byte_size(key) == 32 do
case Base.decode16(String.downcase(key), case: :lower) do
{:ok, key} -> key
_ -> raise raise OTP.ParseError, @key_format_error
end
end
defp format_key(key)
when is_binary(key) and byte_size(key) == 16 do
key
end
defp format_key(_) do
raise raise OTP.ParseError, @key_format_error
end
end
|
lib/yubikey_otp/otp.ex
| 0.88136
| 0.469703
|
otp.ex
|
starcoder
|
defmodule Ratatouille.Renderer.Element.Table do
@moduledoc false
@behaviour Ratatouille.Renderer
# Minimum padding on the right of each column
@min_padding 2
alias ExTermbox.Position
alias Ratatouille.Renderer.{Box, Canvas, Element, Text}
@impl true
def render(%Canvas{} = canvas, %Element{children: rows}, _render_fn) do
canvas
|> render_table(rows)
|> Canvas.consume(0, 1)
end
defp render_table(%Canvas{} = canvas, rows) do
col_sizes = column_sizes(canvas.render_box, rows)
max_rows = Box.height(canvas.render_box)
rows
|> Enum.take(max_rows)
|> Enum.reduce(canvas, fn row, canvas ->
{new_canvas, _offset} = render_table_row(canvas, row, col_sizes)
Canvas.consume(new_canvas, 0, 1)
end)
end
defp render_table_row(%Canvas{} = canvas, row, col_sizes) do
row.children
|> Enum.zip(col_sizes)
|> Enum.reduce({canvas, 0}, fn {cell, col_size}, {acc_canvas, offset} ->
new_cell = %Element{
cell
| attributes: Map.merge(row.attributes, cell.attributes)
}
render_table_cell(acc_canvas, new_cell, col_size, offset)
end)
end
defp render_table_cell(%Canvas{} = canvas, cell, col_size, offset)
when col_size > 0 do
text = cell.attributes[:content] || ""
pos = Position.translate_x(canvas.render_box.top_left, offset)
padded_text = String.pad_trailing(text, col_size, " ")
new_canvas = Text.render(canvas, pos, padded_text, cell.attributes)
{new_canvas, offset + col_size}
end
defp render_table_cell(canvas, _cell, _col_size, offset) do
{canvas, offset}
end
defp column_sizes(%Box{} = box, rows) do
cells_by_row =
for row <- rows do
for cell <- row.children, do: cell.attributes[:content] || ""
end
:ok = check_row_uniformity(cells_by_row)
max_width = Box.width(box)
columns = transpose(cells_by_row)
columns
|> min_column_sizes()
|> displayable_columns(max_width)
|> padded_columns(max_width)
end
defp min_column_sizes(columns) do
Enum.map(columns, fn col ->
col
|> Enum.map(&String.length/1)
|> Enum.max()
|> Kernel.+(@min_padding)
end)
end
defp displayable_columns(min_column_sizes, max_size) do
{_, displayable_columns} =
Enum.reduce(min_column_sizes, {:open, []}, fn
_size, {:full, _} = acc ->
acc
size, {:open, sizes} ->
size_excluded = Enum.sum(sizes)
size_included = size_excluded + size
if size_included <= max_size,
do: {:open, sizes ++ [size]},
else: {:full, sizes ++ [max_size - size_excluded]}
end)
if Enum.empty?(displayable_columns),
do: Enum.take(min_column_sizes, 1),
else: displayable_columns
end
defp padded_columns(column_sizes, max_size) do
rem_space = max_size - Enum.sum(column_sizes)
per_column_padding =
case length(column_sizes) do
0 -> 0
n -> Integer.floor_div(rem_space, n)
end
Enum.map(column_sizes, &(&1 + per_column_padding))
end
defp transpose(rows) do
rows
|> List.zip()
|> Enum.map(&Tuple.to_list/1)
end
defp check_row_uniformity([]) do
:ok
end
defp check_row_uniformity(rows) do
num_columns = length(hd(rows))
if Enum.all?(rows, &(length(&1) == num_columns)) do
:ok
else
{:error, "All rows must have the same number of columns"}
end
end
end
|
lib/ratatouille/renderer/element/table.ex
| 0.805364
| 0.558809
|
table.ex
|
starcoder
|
defmodule PlugSessionMnesia.Store do
@moduledoc """
Stores the session in a Mnesia table.
The store itself does not create the Mnesia table. It expects an existing
table to be passed as an argument. You can create it yourself following the
*Storage* section or use the helpers provided with this application (see
`PlugSessionMnesia` for more information).
Since this store uses Mnesia, the session can persist upon restarts and be
shared between nodes, depending on your configuration.
## Options
* `:table` - Mnesia table name (required if not set in the application
environment).
## Example
# If you want to create the Mnesia table yourself
:mnesia.create_schema([node()])
:mnesia.create_table(:session, [attributes: [:sid, :data, :timestamp], disc_copies: [node()]])
:mnesia.add_table_index(:session, :timestamp)
plug Plug.Session,
key: "_app_key",
store: PlugSessionMnesia.Store,
table: :session # This table must exist.
## Storage
The data is stored in Mnesia in the following format, where `timestamp` is the
OS UNIX time in the `:native` unit:
{sid :: String.t(), data :: map(), timestamp :: integer()}
The timestamp is updated on access to the session and is used by
`PlugSessionMnesia.Cleaner` to check if the session is still active. If you
want to delete a session on a fixed amount of time after its creation,
regardless its activity, you can disable the timestamp update by configuring
the application:
config :plug_session_mnesia, timestamp: :fixed
"""
@behaviour Plug.Session.Store
alias PlugSessionMnesia.TableNotDefined
alias PlugSessionMnesia.TableNotExists
@max_tries 500
@impl true
def init(opts) do
with :error <- Keyword.fetch(opts, :table),
:error <- Application.fetch_env(:plug_session_mnesia, :table) do
raise TableNotDefined
else
{:ok, table} -> table
end
end
@impl true
def get(_conn, sid, table) do
case lookup_session!(table, sid) do
[{^table, ^sid, data, _timestamp}] ->
unless Application.get_env(:plug_session_mnesia, :timestamp) == :fixed,
do: put_session!(table, sid, data, System.os_time())
{sid, data}
_ ->
{nil, %{}}
end
end
@impl true
def put(_conn, nil, data, table), do: put_new(table, data)
def put(_conn, sid, data, table) do
timestamp =
if Application.get_env(:plug_session_mnesia, :timestamp) == :fixed,
do: table |> lookup_session!(sid) |> Enum.at(0) |> elem(3),
else: System.os_time()
put_session!(table, sid, data, timestamp)
sid
end
@impl true
def delete(_conn, sid, table) do
t = fn ->
:mnesia.delete({table, sid})
end
case :mnesia.transaction(t) do
{:atomic, :ok} -> :ok
{:aborted, {:no_exists, _}} -> raise TableNotExists
end
end
@spec lookup_session!(atom(), String.t()) :: [
{atom(), String.t(), map(), integer()}
]
defp lookup_session!(table, sid) do
t = fn ->
:mnesia.read({table, sid})
end
case :mnesia.transaction(t) do
{:atomic, session} -> session
{:aborted, {:no_exists, _}} -> raise TableNotExists
end
end
@spec put_session!(atom(), String.t(), map(), integer()) :: nil
defp put_session!(table, sid, data, timestamp) do
t = fn ->
:mnesia.write({table, sid, data, timestamp})
end
case :mnesia.transaction(t) do
{:atomic, :ok} -> nil
{:aborted, {:no_exists, _}} -> raise TableNotExists
end
end
@spec put_new(atom(), map()) :: String.t()
@spec put_new(atom(), map(), non_neg_integer()) :: String.t()
defp put_new(table, data, counter \\ 0) when counter < @max_tries do
sid = Base.encode64(:crypto.strong_rand_bytes(96))
if lookup_session!(table, sid) == [] do
put_session!(table, sid, data, System.os_time())
sid
else
put_new(table, data, counter + 1)
end
end
end
|
lib/plug_session_mnesia/store.ex
| 0.872971
| 0.571169
|
store.ex
|
starcoder
|
defmodule Ueberauth.Strategy.AzureAD do
@moduledoc """
provides an Ueberauth strategy for authenticating against OAuth2
endpoints in Microsoft Identity (Azure) 2.0.
## Setup
1. Setup your application at the new [Microsoft app registration portal](https://apps.dev.microsoft.com).
1. Add `:ueberauth_azure_ad` to your list of dependencies in `mix.exs`:
```elixir
def deps do
[{:ueberauth_azure_ad, "~> 0.5"}]
end
```
1. Add the strategy to your applications:
```elixir
def application do
[applications: [:ueberauth_azure_ad]]
end
```
1. Add Microsoft to your Überauth configuration:
```elixir
config :ueberauth, Ueberauth,
providers: [
azure: {Ueberauth.Strategy.AzureAD, []}
]
```
1. Update your provider configuration:
```elixir
config :ueberauth, Ueberauth.Strategy.AzureAD.OAuth,
client_id: System.get_env("AZURE_CLIENT_ID"),
client_secret: System.get_env("AZURE_CLIENT_SECRET"),
tenant_id: System.get_env("AZURE_TENANT_ID")
```
1. Include the Überauth plug in your controller:
```elixir
defmodule MyApp.AuthController do
use MyApp.Web, :controller
plug Ueberauth
...
end
```
1. Create the request and callback routes if you haven't already:
```elixir
scope "/auth", MyApp do
pipe_through :browser
get "/:provider", AuthController, :request
get "/:provider/callback", AuthController, :callback
end
```
1. Your controller needs to implement callbacks to deal with `Ueberauth.Auth` and `Ueberauth.Failure` responses.
For an example implementation see the [Überauth Example](https://github.com/ueberauth/ueberauth_example) application.
## Calling
Depending on the configured url you can initial the request through:
/auth/azure
By default the scopes used are
* openid
* email
* offline_access
* https://graph.microsoft.com/user.read
*Note: at least one service scope is required in order for a token to be returned by the Microsoft endpoint*
You can configure additional scopes to be used by passing the `extra_scopes` option into the provider
```elixir
config :ueberauth, Ueberauth,
providers: [
azure: {Ueberauth.Strategy.AzureAD, [extra_scopes: "https://graph.microsoft.com/calendars.read"]}
]
```
"""
use Ueberauth.Strategy,
default_scope: "https://graph.microsoft.com/user.read openid email offline_access",
uid_field: :id
alias OAuth2.{Response, Error}
alias Ueberauth.Auth.{Info, Credentials, Extra}
alias Ueberauth.Strategy.AzureAD.OAuth
@doc """
Handles initial request for Microsoft authentication.
"""
def handle_request!(conn) do
default_scopes = option(conn, :default_scope)
extra_scopes = option(conn, :extra_scopes)
scopes = "#{extra_scopes} #{default_scopes}"
authorize_url =
conn.params
|> Map.put(:scope, scopes)
|> Map.put(:redirect_uri, callback_url(conn))
|> OAuth.authorize_url!()
redirect!(conn, authorize_url)
end
@doc """
Handles the callback from Microsoft.
"""
def handle_callback!(%Plug.Conn{params: %{"code" => code}} = conn) do
opts = [redirect_uri: callback_url(conn)]
client = OAuth.get_token!([code: code], opts)
token = client.token
case token.access_token do
nil ->
err = token.other_params["error"]
desc = token.other_params["error_description"]
set_errors!(conn, [error(err, desc)])
_token ->
fetch_user(conn, client)
end
end
@doc false
def handle_callback!(conn) do
set_errors!(conn, [error("missing_code", "No code received")])
end
@doc false
def handle_cleanup!(conn) do
conn
|> put_private(:ms_token, nil)
|> put_private(:ms_user, nil)
end
@doc false
def uid(conn) do
user =
conn
|> option(:uid_field)
|> to_string
conn.private.ms_user[user]
end
@doc false
def credentials(conn) do
token = conn.private.ms_token
%Credentials{
expires: token.expires_at != nil,
expires_at: token.expires_at,
scopes: token.other_params["scope"],
token: token.access_token,
refresh_token: token.refresh_token,
token_type: token.token_type
}
end
@doc false
def info(conn) do
user = conn.private.ms_user
%Info{
name: user["displayName"],
email: user["mail"] || user["userPrincipalName"],
first_name: user["givenName"],
last_name: user["surname"]
}
end
@doc false
def extra(conn) do
%Extra{
raw_info: %{
token: conn.private.ms_token,
user: conn.private.ms_user
}
}
end
defp fetch_user(conn, client) do
conn = put_private(conn, :ms_token, client.token)
path = "https://graph.microsoft.com/v1.0/me/"
case OAuth2.Client.get(client, path) do
{:ok, %Response{status_code: 401}} ->
set_errors!(conn, [error("token", "unauthorized")])
{:ok, %Response{status_code: status, body: response}} when status in 200..299 ->
put_private(conn, :ms_user, response)
{:error, %Error{reason: reason}} ->
set_errors!(conn, [error("OAuth2", reason)])
end
end
defp option(conn, key) do
default = Keyword.get(default_options(), key)
conn
|> options
|> Keyword.get(key, default)
end
end
|
lib/ueberauth/strategy/azure_ad.ex
| 0.873417
| 0.822403
|
azure_ad.ex
|
starcoder
|
defmodule ExSel do
@moduledoc """
Simple runtime expression language for elixir.
## Variables
A variable should start with a lowercase letter and is followed by mixed case letters, numbers or underscore
`true` and `false` are reserved symbols
# valid:
a , myVar , my_Var, var1
# invalid:
Aaaa , true, false, _myVar, my-var, 9var
## Arithmetic expressions:
The supported operators: `+` `-` `/` `*`
Additionally grouping is supported to override normal precedence rules.
# examples:
(1 + 2) * 5 / 4 - 2
var1 * 9 - varB
## Boolean expressions:
The supported comparison operators: `>` `>=` `<` `<=` `==` `!=`
The supported logic operators: `&&` `||` `!`
The supported boolean consts: `true` `false`
Additionally grouping is supported to override normal precedence rules.
# examples:
true
varA > (1 + 2) || varB == true
var1 * 9 - varB == varC
"""
@doc """
Parses the given arithmetic expression into an ExSel.Ast
## Example
iex> ExSel.aexpr("8 + 2 / 4")
{:ok, {:+, [8, {:/, [2, 4]}]}}
"""
@spec aexpr(binary) :: {:ok, ExSel.Ast.t()} | {:error, term}
def aexpr(expression) do
expression
|> prep_input
|> ExSel.Parser.aexpr()
|> unwrap_res
end
@doc """
Parses the given boolean expression into an ExSel.Ast
## Example
iex> ExSel.bexpr("8 + 2 > 4")
{:ok, {:>, [{:+, [8, 2]}, 4]}}
"""
@spec bexpr(binary) :: {:ok, ExSel.Ast.t()} | {:error, term}
def bexpr(expression) do
expression
|> prep_input
|> ExSel.Parser.bexpr()
|> unwrap_res
end
defp prep_input(input), do: String.trim(input)
defp unwrap_res(result) do
case result do
{:ok, [acc], "", _, _line, _offset} ->
{:ok, acc}
{:ok, _, rest, _, _line, _offset} ->
{:error, "could not parse: " <> rest}
{:error, reason, _rest, _context, _line, _offset} ->
{:error, reason}
end
end
@doc """
Evaluates the expression ast.
First build an ast using `aexpr/1` or `bexpr/1`.
To specify variables that should be available during evaluation pass a map with their values.
## Example
iex> {:ok, ast} = ExSel.aexpr("varA + varB / 4")
iex> my_vars = %{"varA" => 8, "varB" => 2}
iex> ExSel.eval!(ast, my_vars)
8.5
"""
@spec eval!(ExSel.Ast.t(), ExSel.Ast.eval_ctx()) :: ExSel.Ast.eval_result() | no_return
def eval!(ast, variables \\ %{}), do: ExSel.Ast.eval!(ast, variables)
end
|
lib/ex_sel.ex
| 0.824956
| 0.563588
|
ex_sel.ex
|
starcoder
|
defmodule Meilisearch.Document do
@moduledoc """
Collection of functions used to manage documents.
[MeiliSearch Documentation - Documents](https://docs.meilisearch.com/references/documents.html)
"""
alias Meilisearch.HTTP
@doc """
Get one document by id.
## Example
iex> Meilisearch.Document.get("meilisearch_test", 1)
{:ok,
%{
"id" => 1,
"tagline" => "In space no one can hear you scream",
"title" => "Alien"
}}
"""
@spec get(String.t(), String.t()) :: HTTP.response()
def get(index_uid, document_id) do
HTTP.get_request("indexes/#{index_uid}/documents/#{document_id}")
end
@doc """
List documents in given index.
## Options
* `offset` Number of documents to skip. Defaults to `0`
* `limit` Maximum number of documents returned. Defaults to `20`
## Examples
iex> Meilisearch.Document.list("meilisearch_test")
{:ok,
[
%{
"id" => 2,
"tagline" => "You'll never go in the water again",
"title" => "Jaws"
},
%{
"id" => 1,
"tagline" => "In space no one can hear you scream",
"title" => "Alien"
}
]
}
iex> Meilisearch.Document.get("meilisearch_test", limit: 2, offset: 4)
{:ok,
[
%{
"id" => 6,
"tagline" => "Who ya gonna call?",
"title" => "Ghostbusters"
},
%{
"id" => 5,
"tagline" => "Be Afraid. Be very afraid.",
"title" => "The Fly"
}
]
}
"""
@spec list(String.t(), Keyword.t()) :: HTTP.response()
def list(index_uid, opts \\ []) do
HTTP.get_request("indexes/#{index_uid}/documents", [], params: opts)
end
@doc """
Add document(s) to given index.
If an exisiting document (based on primary key) is given it will be replaced.
## Examples
iex> Meilisearch.Document.add_or_replace("meilisearch_test", %{
"id" => 2,
"tagline" => "You'll never go in the water again",
"title" => "Jaws"
})
{:ok, %{"updateId" => 1}}
iex> Meilisearch.Document.add_or_replace(
"meilisearch_test",
[
%{
"id" => 6,
"tagline" => "Who ya gonna call?",
"title" => "Ghostbusters"
},
%{
"id" => 5,
"tagline" => "Be Afraid. Be very afraid.",
"title" => "The Fly"
}
]
)
{:ok, %{"updateId" => 1}}
"""
@spec add_or_replace(String.t(), list(any), Keyword.t()) :: HTTP.response()
def add_or_replace(index_uid, docs, opts \\ [])
def add_or_replace(index_uid, doc, opts) when not is_list(doc) do
add_or_replace(index_uid, [doc], opts)
end
def add_or_replace(index_uid, docs, opts) do
HTTP.post_request("indexes/#{index_uid}/documents", docs, [], params: opts)
end
@doc """
Add document(s) to given index.
If an exisiting document (based on primary key) is given it will be updated with provided values.
## Examples
iex> Meilisearch.Document.add_or_update("meilisearch_test", %{
"id" => 2,
"tagline" => "You'll never go in the water again",
"title" => "Jaws"
})
{:ok, %{"updateId" => 1}}
iex> Meilisearch.Document.add_or_update(
"meilisearch_test",
[
%{
"id" => 6,
"tagline" => "Who ya gonna call?",
"title" => "Ghostbusters"
},
%{
"id" => 5,
"tagline" => "Be Afraid. Be very afraid.",
"title" => "The Fly"
}
]
)
{:ok, %{"updateId" => 1}}
"""
@spec add_or_update(String.t(), list(any), Keyword.t()) :: {:ok, any} | {:error, String.t()}
def add_or_update(index_uid, docs, opts \\ [])
def add_or_update(index_uid, doc, opts) when not is_list(doc) do
add_or_update(index_uid, [doc], opts)
end
def add_or_update(index_uid, docs, opts) do
HTTP.put_request("indexes/#{index_uid}/documents", docs, [], params: opts)
end
@doc """
Delete one or more documents based on id in a given index.
## Example
iex> Meilisearch.Document.delete("meilisearch_test", 1)
{:ok, %{"updateId" => 0}}
iex> Meilisearch.Document.delete("meilisearch_test", [1,2,3,4])
{:ok, %{"updateId" => 0}}
"""
@spec delete(String.t(), String.t() | list(String.t())) :: {:ok, any} | {:error, String.t()}
def delete(index_uid, document_ids) when is_list(document_ids) do
HTTP.post_request("indexes/#{index_uid}/documents/delete-batch", document_ids)
end
def delete(index_uid, document_id) do
HTTP.delete_request("indexes/#{index_uid}/documents/#{document_id}")
end
@doc """
Delete all documents in given index.
## Example
iex> Meilisearch.Document.delete_all("meilisearch_test")
{:ok, %{"updateId" => 0}}
"""
@spec delete_all(String.t()) :: {:ok, any} | {:error, binary}
def delete_all(index_uid) do
HTTP.delete_request("indexes/#{index_uid}/documents")
end
end
|
lib/meilisearch/document.ex
| 0.743541
| 0.408542
|
document.ex
|
starcoder
|
defmodule ICalendar.RRULE.Type do
alias ICalendar.RRULE
use Timex
@moduledoc """
Defines `ICalendar.RRULE.Type` Ecto.Type.
"""
@behaviour Ecto.Type
@doc """
The Ecto primitive type.
"""
def type, do: :map
@doc """
Casts the given value to `ICalendar.RRULE` struct.
It supports:
* A map with valid `RRULE` keys in either string or atom form
* A valid `RRULE` in string form (aka serialized version)
* An `ICalendar.RRULE` struct.
All other inputs will return :error
## Examples
iex> Type.cast(%{frequency: :weekly}) == {:ok, %RRULE{frequency: :weekly}}
true
iex> Type.cast("FREQ=WEEKLY") == {:ok, %RRULE{frequency: :weekly}}
true
iex> Type.cast(%RRULE{frequency: :weekly}) == {:ok, %RRULE{frequency: :weekly}}
true
iex> Type.cast(:garbage) == :error
true
"""
@spec cast(map | String.t | %RRULE{}) :: {:ok, %RRULE{}}
def cast(%RRULE{} = rrule) do
{:ok, rrule}
end
def cast(%{} = params) do
# string keys should match existing atoms since they should match struct keys
# some pairs also have values that should be atoms from valid options
params = params_strings_to_atoms(params)
rrule = struct(RRULE, params)
{:ok, rrule}
end
# assumes serialized icalendar rrule
def cast(value) when is_bitstring(value) do
RRULE.deserialize(value)
end
def cast(_), do: :error
@doc """
Converts an `ICalendar.RRULE` struct into a map saving to the db.
## Examples
iex> Type.dump(%RRULE{frequency: :weekly}) == {:ok, %RRULE{frequency: :weekly}}
true
iex> Type.dump(:garbage) == :error
true
"""
@spec dump(%RRULE{}) :: {:ok, %RRULE{}}
def dump(%RRULE{} = rrule) do
{:ok, rrule}
end
def dump(_), do: :error
@doc """
Converts a map as saved to the db into a `ICalendar.RRULE` struct.
## Examples
iex> case Type.load(%{"frequency" => "weekly", "until" => "2017-10-31 11:59:00"}) do
...> {:ok, %RRULE{}} -> true
...> end
true
"""
@spec load(map) :: {:ok, %RRULE{}}
def load(value) do
params =
value
|> params_strings_to_atoms()
|> load_until()
rrule = struct(RRULE, params)
{:ok, rrule}
end
defp params_strings_to_atoms(params) do
# ensure key atoms have been declared
# Code.ensure_loaded(RRULE) didn't solve problem
Map.keys(%RRULE{})
for {k, v} <- params, into: %{} do
k = if is_binary(k) do
k |> String.to_existing_atom()
else
k
end
# WARN: lists values may trip this up
v = if is_binary(v) and k in [:frequency, :by_day, :by_month, :week_start] do
v |> String.downcase() |> String.to_atom()
else
v
end
{k, v}
end
end
defp load_until(%{until: nil} = params), do: params
defp load_until(%{until: %DateTime{}} = params), do: params
defp load_until(%{until: until} = params) do
{:ok, until} = until |> NaiveDateTime.from_iso8601()
until = until |> Timex.to_datetime("Etc/UTC")
params |> Map.put(:until, until)
end
end
|
lib/icalendar/rrule/type.ex
| 0.869562
| 0.410904
|
type.ex
|
starcoder
|
defmodule BitstylesPhoenix.Component.Content do
use BitstylesPhoenix.Component
@moduledoc """
The Content component.
"""
@doc ~s"""
Renders a content div, to add some spacing to the sides of your content.
## Attributes
- `variant` — Variant of the content you want, from those available in the CSS classes e.g. `full`
- `class` - Extra classes to pass to the content. See `BitstylesPhoenix.Helper.classnames/1` for usage.
- All other attributes are passed to the `div` tag.
See [bitstyles content docs](https://bitcrowd.github.io/bitstyles/?path=/docs/atoms-content--content) for examples, and for the default variants available.
"""
story(
"Default content",
'''
iex> assigns = %{}
...> render ~H"""
...> <.ui_content>
...> Content
...> </.ui_content>
...> """
"""
<div class="a-content">
Content
</div>
"""
''',
width: "100%"
)
story(
"Full content",
'''
iex> assigns = %{}
...> render ~H"""
...> <.ui_content variant="full">
...> Full Content
...> </.ui_content>
...> """
"""
<div class="a-content a-content--full">
Full Content
</div>
"""
''',
width: "100%"
)
story(
"Extra classes and attributes",
'''
iex> assigns = %{}
...> render ~H"""
...> <.ui_content variant="full" class="u-h2" data-foo="bar">
...> Content with extra
...> </.ui_content>
...> """
"""
<div class="a-content a-content--full u-h2" data-foo="bar">
Content with extra
</div>
"""
''',
width: "100%"
)
def ui_content(assigns) do
variant_classes = assigns[:variant] |> List.wrap() |> Enum.map_join(" ", &"a-content--#{&1}")
class = classnames(["a-content", variant_classes, assigns[:class]])
extra = assigns_to_attributes(assigns, [:class, :variant])
assigns = assign(assigns, class: class, extra: extra)
~H"""
<div class={@class} {@extra}>
<%= render_slot(@inner_block) %>
</div>
"""
end
end
|
lib/bitstyles_phoenix/component/content.ex
| 0.784443
| 0.408601
|
content.ex
|
starcoder
|
defmodule Exgencode do
@moduledoc """
Documentation for Exgencode.
"""
defprotocol Pdu.Protocol do
@doc "Returns the size of the field in bits."
def sizeof(pdu, field_name)
@doc "Returns the size of the pdu for given version."
@spec sizeof_pdu(Exgencode.pdu(), Version.version() | nil, Exgencode.return_size_type()) ::
non_neg_integer | {:subrecord, Exgencode.pdu()}
def sizeof_pdu(pdu, version, type)
@doc "Encode the Elixir structure into a binary given the protocol version."
@spec encode(Exgencode.pdu(), nil | Version.version()) :: binary
def encode(pdu, version)
@doc "Decode a binary into the specified Elixir structure."
@spec decode(Exgencode.pdu(), binary, nil | Version.version()) :: {Exgencode.pdu(), binary}
def decode(pdu, binary, version)
@doc "Calculate the values of all offset fields."
@spec set_offsets(Exgencode.pdu(), nil | Version.version()) :: Exgencode.pdu()
def set_offsets(pdu, version)
end
@typedoc "A PDU, that is an Elixir structure representing a PDU."
@type pdu :: struct
@typedoc "PDU name, must be a structure name"
@type pdu_name :: module
@typedoc "The type of the field."
@type field_type ::
:subrecord | :constant | :string | :binary | :float | :integer | :variable | :skip
@typedoc "A custom encoding function that is meant to take the value of the field and return its binary represantion."
@type field_encode_fun :: (term -> bitstring)
@typedoc "A custom decoding function that receives the PDU decoded so far and remaining binary and is meant to return PDU with the field decoded and remaining binary."
@type field_decode_fun :: (pdu, bitstring -> {pdu, bitstring})
@typedoc "The endianness the field should be encoded/decoded with"
@type field_endianness :: :big | :little | :native
@typedoc "Parameters of the given field"
@type fieldParam ::
{:size, non_neg_integer | field_name}
| {:type, field_type}
| {:encode, field_encode_fun}
| {:decode, field_decode_fun}
| {:version, Version.requirement()}
| {:endianness, field_endianness}
| {:conditional, field_name}
| {:offset_to, field_name}
@typedoc "Name of the field."
@type field_name :: atom
@typedoc "Desired return type of pdu size"
@type return_size_type :: :bits | :bytes
@doc """
This macro allows for the definition of binary PDUs in a simple way allowing for convienient encoding and decoding them between
binary format and Elixir structures.
# PDUs
Each PDU for the protocol is defined given a name that must be a valid Elixir structure (module) name followed by a list
of fields that the given PDU has.
## Fields
Each field can have the following options specified:
### size
Defines the size of field in bits. If the field is of type :subrecord the :size is unused.
defpdu SomePdu,
someField: [size: 12]
### default
Defines the default value that the field should assume when building a new Elixir structure of the given PDU.
defpdu PduWithDefault,
aFieldWithDefault: [size: 10, default: 15]
### type
Defines the type of the field. Field can be of type:
* `:constant`
* `:subrecord`
* `:string`
* `:binary`
* `:float`
* `:integer`
* `:variable`
* `:skip`
If no type should is specified it will default to `:integer`. Both `:integer` and `:float` specify normal numerical values and have no special properties.
#### :constant
If the field is constant it will not become part of the Elixir structure and will not be accessible. However it will still be
encoded into the binary representation and the decoding will expect the field to be present and have the given value in the decoded binary. Otherwise
FunctionClauseError will be raised. A :constant field MUST have a default value specified.
defpdu PduWithConstant,
aConstantField: [size: 12, default: 10, type: :constant]
iex> Exgencode.Pdu.encode(%TestPdu.PduWithConstant{})
<< 10 :: size(16) >>
iex> %TestPdu.PduWithConstant{}.aConstantField
** (KeyError) key :aConstantField not found in: %Exgencode.TestPdu.PduWithConstant{}
#### :subrecord
If the field is meant to contain a sub-structure then it should be of type :subrecord. Such field must have either a default value specified that is of the
type of the subrecord. Alternatively it must define custom decode and encode functions.
#### Examples:
defpdu SubPdu,
someField: [size: 10, default: 1]
defpdu TopPdu,
aField: [size: 24],
subPdu: [type: :subrecord, default: %SupPdu{}]
iex> Exgencode.Pdu.encode(%TestPdu.TopPdu{aField: 24})
<< 24 :: size(24), 1 :: size(16) >>
iex> Exgencode.Pdu.decode(%TestPdu.TopPdu{}, << 24 :: size(24), 1 :: size(16) >>)
{%TestPdu.TopPdu{aField: 24, subPdu: %TestPdu.SubPdu{someField: 1}}, <<>>}
#### :virtual
The virtual fields are never encoded into binaries and exist only in the Elixir structs. When decoding into a struct
the virtual field will always assume the default value.
#### Examples:
defpdu VirtualPdu,
real_field: [size: 16],
virtual_field: [type: :virtual]
iex> Exgencode.Pdu.encode(%TestPdu.VirtualPdu{real_field: 12, virtual_field: "Any value goes here"})
<<12::size(16)>>
iex> Exgencode.Pdu.decode(%TestPdu.VirtualPdu{}, <<12::size(16)>>)
{%TestPdu.VirtualPdu{real_field: 12, virtual_field: nil}, <<>>}
#### :binary
If the field is an arbitrary binary value it can be specified with this type. In such case the size parameter indicates size in bytes
rather than bits. This type does not define any padding, that is the size of the binary that is contained in this field must be of at least the defined field size,
otherwise an `ArgumentError` is raised. If the size is larger the binary will be trimmed.
#### :variable
Variable fields have no pre-defined size, instead the size is defined by the value of another field. When defining a `:variable` field, the
`:size` parameter must be set to the name of the field definining the size, which in turn should be an `:integer` field. The size in that case
can only be specified in bytes. All `:variable` fields are binary fields.
#### Examples:
defpdu VariablePdu,
some_field: [size: 16],
size_field: [size: 16],
variable_field: [type: :variable, size: :size_field]
iex> Exgencode.Pdu.encode(%TestPdu.VariablePdu{some_field: 52, size_field: 2, variable_field: "AB"})
<<52::size(16), 2::size(16), "A", "B">>
iex> Exgencode.Pdu.decode(%TestPdu.VariablePdu{}, <<52::size(16), 2::size(16), "A", "B">>)
{%TestPdu.VariablePdu{some_field: 52, size_field: 2, variable_field: "AB"}, <<>>}
Note that the field defining the size must be defined before the variable length field.
#### Examples:
defpdu BinaryMsg,
someHeader: [size: 8, default: 10],
binaryField: [size: 16, type: :binary]
iex> Exgencode.Pdu.encode(%TestPdu.BinaryMsg{binaryField: "16charactershere"})
<< 10 :: size(8), "16charactershere" :: binary>>
#### :string
The `:string` type is similar to `:binary`, however it will not raise any errors if the length of the value to be encoded is different than declared field size.
Instead, the string will be trimmed if its too long and padded with trailing 0-bytes if it is too short. Upon decoded all trailing 0-bytes will be removed.
For any other handling of padding or empty bytes custom decode and encode functions must be defined.
#### Examples:
defpdu StringMsg,
someHeader: [size: 8, default: 10],
stringField: [size: 16, type: :string]
iex> Exgencode.Pdu.encode(%TestPdu.StringMsg{stringField: "16charactershere"})
<< 10 :: size(8), "16charactershere" :: binary>>
iex> Exgencode.Pdu.encode(%TestPdu.StringMsg{stringField: "Too long string for field size"})
<< 10 :: size(8), "Too long string " :: binary>>
iex> Exgencode.Pdu.encode(%TestPdu.StringMsg{stringField: "Too short"})
<< 10 :: size(8), "Too short" :: binary, 0, 0, 0, 0, 0, 0, 0>>
iex> Exgencode.Pdu.decode(%TestPdu.StringMsg{}, << 10 :: size(8), "Too short" :: binary, 0, 0, 0, 0, 0, 0, 0>>)
{%TestPdu.StringMsg{stringField: "Too short"}, <<>>}
#### :skip
If the field will never be decoded into the struct. It's value will be set to `:default` when encoding the struct.
defpdu SkippedPdu,
testField: [default: 1, size: 16],
skippedField: [size: 8, default: 5, type: :skip]
iex> Exgencode.Pdu.encode(%TestPdu.SkippedPdu{testField: 15})
<< 15 :: size(16), 5 :: size(8) >>
iex> %TestPdu.SkippedPdu{}.skippedField
** (KeyError) key :skippedField not found in: %Exgencode.TestPdu.SkippedPdu{testField: 1}
iex> Exgencode.Pdu.decode(%TestPdu.SkippedPdu{}, << 32 :: size(16), 11 :: size (8)>>)
{%TestPdu.SkippedPdu{testField: 32}, <<>>}
### encode/decode
Defines a custom encode or decode function. See type specifications for the function specification. If a PDU has a custom encode function defined it must also define
a custom decode function. Custom encode and decode functions can override any of the other parameters the field has if the user wishes it so.
#### Examples:
defpdu CustomPdu,
normalField: [size: 16, default: 3],
customField: [encode: fn(val) -> << val * 2 :: size(12) >> end,
decode: fn(pdu, << val :: size(12) >>) -> {struct(pdu, %{customField: div(val, 2)}), <<>>} end]
iex> Exgencode.Pdu.encode(%TestPdu.CustomPdu{customField: 10})
<< 3 :: size(16), 20 :: size(16) >>
iex> Exgencode.Pdu.decode(%TestPdu.CustomPdu{}, << 3 :: size(16), 20 :: size(16) >>)
{%TestPdu.CustomPdu{customField: 10}, <<>>}
### version
Defines the requirement for the protocol version for the given field to be included in the message. When a version is specified `encode/2` and `decode/3` can take
an optional parameter with the given version name. If the given version matches the version requirement defined by this option in the PDU definition, the field will
be included. Otherwise it will be skipped.
defpdu VersionedMsg,
oldField: [default: 10, size: 16],
newerField: [size: 8, version: ">= 2.0.0"],
See documentation for `Exgencode.Pdu./2` for examples.
### endianness
Defines the endianness of the particular field. Allowed options are `:big`, `:little` and `:native`. Defaults to `:big`
#### Examples:
defpdu EndianMsg,
bigField: [default: 15, size: 32, endianness: :big],
smallField: [default: 15, size: 32, endianness: :little]
iex> Exgencode.Pdu.encode(%TestPdu.EndianMsg{})
<< 15 :: big-size(32), 15 :: little-size(32)>>
### conditional
Defines that the field is present in encoded binary format only if another field has a non-null value.
Examples:
defpdu ConditionalPdu,
normal_field: [size: 16],
flag_field: [size: 8],
conditional_field: [size: 8, conditional: :flag_field],
another_normal_field: [size: 8],
second_flag: [size: 8],
size_field: [size: 16, conditional: :second_flag],
conditional_variable_field: [type: :variable, size: :size_field, conditional: :second_flag]
iex> Exgencode.Pdu.encode(%TestPdu.ConditionalPdu{
...> normal_field: 12,
...> flag_field: 1,
...> conditional_field: 10,
...> another_normal_field: 200,
...> second_flag: 1,
...> size_field: 4,
...> conditional_variable_field: "test"
...> })
<<12::size(16), 1, 10, 200, 1, 4::size(16), "test">>
iex> Exgencode.Pdu.encode(%TestPdu.ConditionalPdu{
...> normal_field: 12,
...> flag_field: 1,
...> conditional_field: 10,
...> another_normal_field: 200,
...> second_flag: 0,
...> size_field: nil,
...> conditional_variable_field: nil
...> })
<<12::size(16), 1, 10, 200, 0>>
### offset_to
Defines that the field contains the offset to another field. The offset is in bytes since the
beginning of the PDU. Note that offsets are automatically calculated when calling `Exgencode.Pdu.encode/2`
Examples:
defpdu OffsetPdu,
offset_to_field_a: [size: 16, offset_to: :field_a],
offset_to_field_b: [size: 16, offset_to: :field_b],
offset_to_field_c: [size: 16, offset_to: :field_c],
field_a: [size: 8],
size_field: [size: 16],
variable_field: [type: :variable, size: :size_field],
field_b: [size: 8],
field_c: [size: 8, conditional: :offset_to_field_c]
iex> Exgencode.Pdu.encode(%TestPdu.OffsetPdu{
...> field_a: 14,
...> size_field: 4,
...> variable_field: "test",
...> field_b: 15,
...> field_c: 20
...> })
<<6::size(16), 9 + 4::size(16),10 + 4::size(16), 14, 4::size(16)>> <> "test" <> <<15, 20>>
iex> Exgencode.Pdu.encode(%TestPdu.OffsetPdu{
...> field_a: 14,
...> size_field: 4,
...> variable_field: "test",
...> field_b: 15,
...> field_c: nil
...> })
<<6::size(16), 9 + 4::size(16), 0::size(16), 14, 4::size(16)>> <> "test" <> <<15>>
"""
@spec defpdu(pdu_name, [{field_name, fieldParam}]) :: any
defmacro defpdu name, original_field_list do
Exgencode.Validator.validate_pdu(name, original_field_list)
field_list = map_fields(name, original_field_list)
fields_for_encodes =
Enum.map(field_list, fn {field_name, props} ->
{field_name, props[:encode]}
end)
fields_for_decodes =
Enum.map(field_list, fn {field_name, props} ->
{field_name, props[:decode]}
end)
struct_fields =
for {field_name, props} <- field_list, props[:type] not in [:constant, :skip] do
{field_name, props[:default]}
end
# out =
quote do
defmodule unquote(name) do
@moduledoc false
defstruct unquote(struct_fields)
@type t :: %unquote(name){}
end
defimpl Exgencode.Pdu.Protocol, for: unquote(name) do
unquote(Exgencode.Sizeof.build_sizeof(field_list))
unquote(Exgencode.Sizeof.build_sizeof_pdu(field_list))
unquote(Exgencode.Offsets.create_offset_fun(field_list))
def encode(pdu, version) do
pdu = Exgencode.Pdu.set_offsets(pdu, version)
for {field, encode_fun} <- unquote(fields_for_encodes),
into: <<>>,
do: encode_fun.(version).(pdu)
end
def decode(pdu, binary, version) do
do_decode(pdu, binary, unquote(fields_for_decodes), version)
end
defp do_decode(pdu, binary, [{field, decode_fun} | rest], version) do
{new_pdu, rest_binary} = decode_fun.(version).(pdu, binary)
do_decode(new_pdu, rest_binary, rest, version)
end
defp do_decode(pdu, rest_bin, [], _) do
{pdu, rest_bin}
end
end
end
# File.write("#{Macro.to_string(name)}.ex", Macro.to_string(out))
# out
end
defp map_fields(name, original_field_list) do
for {field_name, original_props} <- original_field_list do
props =
Keyword.merge(
[endianness: :big, type: :integer, conditional: nil, encode: nil, decode: nil],
original_props
)
all_field_names = Enum.map(original_field_list, fn {name, _} -> name end)
Exgencode.Validator.validate_field(name, field_name, props, all_field_names)
field_type = props[:type]
case {props[:encode], props[:decode]} do
{nil, nil} ->
encode_fun =
Exgencode.EncodeDecode.create_versioned_encode(
Exgencode.EncodeDecode.create_encode_fun(
field_type,
field_name,
props
),
props[:version]
)
decode_fun =
Exgencode.EncodeDecode.create_versioned_decode(
Exgencode.EncodeDecode.create_decode_fun(
field_type,
field_name,
props
),
props[:version]
)
{field_name, [{:encode, encode_fun}, {:decode, decode_fun} | props]}
_ ->
encode_fun =
Exgencode.EncodeDecode.create_versioned_encode(
Exgencode.EncodeDecode.wrap_custom_encode(field_name, props[:encode]),
props[:version]
)
decode_fun =
Exgencode.EncodeDecode.create_versioned_decode(props[:decode], props[:version])
{field_name,
props
|> Keyword.replace!(:encode, encode_fun)
|> Keyword.replace!(:decode, decode_fun)}
end
end
end
end
|
lib/exgencode.ex
| 0.914049
| 0.438905
|
exgencode.ex
|
starcoder
|
defmodule CircularBuffer do
use GenServer
@moduledoc """
An API to a stateful process that fills and empties a circular buffer
"""
# CLIENT API
@doc """
Create a new buffer of a given capacity
"""
@spec new(capacity :: integer) :: {:ok, pid}
def new(capacity) do
GenServer.start_link(__MODULE__, capacity, [])
end
@doc """
Read the oldest entry in the buffer, fail if it is empty
"""
@spec read(buffer :: pid) :: {:ok, any} | {:error, atom}
def read(buffer) do
GenServer.call(buffer, :read)
end
@doc """
Write a new item in the buffer, fail if is full
"""
@spec write(buffer :: pid, item :: any) :: :ok | {:error, atom}
def write(buffer, item) do
GenServer.call(buffer, {:write, item})
end
@doc """
Write an item in the buffer, overwrite the oldest entry if it is full
"""
@spec overwrite(buffer :: pid, item :: any) :: :ok
def overwrite(buffer, item) do
GenServer.cast(buffer, {:overwrite, item})
end
@doc """
Clear the buffer
"""
@spec clear(buffer :: pid) :: :ok
def clear(buffer) do
GenServer.cast(buffer, :clear)
end
# DATA STRUCTURE
# Essentially a deque made out of two lists, one for new input (write, overwrite)
# and one for output (read), and keeping track of the size and capacity.
defstruct [:capacity, size: 0, input: [], output: []]
def new_buffer(capacity), do: {:ok, %CircularBuffer{capacity: capacity}}
def read_buffer(%CircularBuffer{size: 0} = buffer), do: {{:error, :empty}, buffer}
def read_buffer(%CircularBuffer{size: size, output: [out | output]} = buffer),
do: {{:ok, out}, %{buffer | size: size - 1, output: output}}
def read_buffer(%CircularBuffer{input: input} = buffer),
do: read_buffer(%{buffer | input: [], output: Enum.reverse(input)})
def write_buffer(%CircularBuffer{size: capacity, capacity: capacity} = buffer, _item),
do: {{:error, :full}, buffer}
def write_buffer(%CircularBuffer{size: size, input: input} = buffer, item),
do: {:ok, %{buffer | size: size + 1, input: [item | input]}}
def overwrite_buffer(%CircularBuffer{size: capacity, capacity: capacity} = buffer, item) do
{_, smaller_buffer} = read_buffer(buffer)
write_buffer(smaller_buffer, item)
end
def overwrite_buffer(buffer, item), do: write_buffer(buffer, item)
def clear_buffer(%CircularBuffer{capacity: capacity}), do: %CircularBuffer{capacity: capacity}
@impl true
def init(capacity) do
new_buffer(capacity)
end
# SERVER API
@impl true
def handle_call(:read, _from, buffer) do
{reply, buffer} = read_buffer(buffer)
{:reply, reply, buffer}
end
@impl true
def handle_call({:write, item}, _from, buffer) do
{reply, buffer} = write_buffer(buffer, item)
{:reply, reply, buffer}
end
@impl true
def handle_cast({:overwrite, item}, buffer) do
{_reply, buffer} = overwrite_buffer(buffer, item)
{:noreply, buffer}
end
@impl true
def handle_cast(:clear, buffer) do
{:noreply, clear_buffer(buffer)}
end
end
|
exercises/practice/circular-buffer/.meta/example.ex
| 0.824356
| 0.423071
|
example.ex
|
starcoder
|
defmodule Memento do
require Memento.Mnesia
require Memento.Error
@moduledoc """
Simple + Powerful interface to the Erlang Mnesia Database.
See the [README](https://github.com/sheharyarn/memento) to get
started.
"""
# Public API
# ----------
@doc """
Start the Memento Application.
This starts Memento and `:mnesia` along with some sane application
defaults. See `:mnesia.start/0` for more details.
"""
@spec start() :: :ok | {:error, any}
def start do
Application.start(:mnesia)
end
@doc """
Stop the Memento Application.
"""
@spec stop() :: :ok | {:error, any}
def stop do
Application.stop(:mnesia)
end
@doc """
Tells Memento about other nodes running Memento/Mnesia.
You can use this to connect to and synchronize with other
nodes at runtime and/or on discovery, to take full advantage
of the distribution mode of Memento and Mnesia.
This is a wrapper method around `:mnesia.change_config/2`.
## Example
```
# Connect to Memento running on a specific node
Memento.add_nodes(:node_xyz@some_host)
# Add all connected nodes to Memento distributed database
Memento.add_nodes(Node.list())
```
"""
@spec add_nodes(node | list(node)) :: {:ok, list(node)} | {:error, any}
def add_nodes(nodes) do
nodes = List.wrap(nodes)
if Enum.any?(nodes, & !is_atom(&1)) do
Memento.Error.raise("Invalid Node list passed")
end
Memento.Mnesia.call(:change_config, [:extra_db_nodes, nodes])
end
@doc """
Prints `:mnesia` information to console.
"""
@spec info() :: :ok
def info do
Memento.Mnesia.call(:info, [])
end
@doc """
Returns all information about the Mnesia system.
Optionally accepts a `key` atom argument which returns result for
only that key. Will throw an exception if that key is invalid. See
`:mnesia.system_info/0` for more information and a full list of
allowed keys.
"""
@spec system(atom) :: any
def system(key \\ :all) do
Memento.Mnesia.call(:system_info, [key])
end
@doc """
Wait until specified tables are ready.
Before performing some tasks, it's necessary that certain tables
are ready and accessible. This call hangs until all tables
specified are accessible, or until timeout is reached
(default: 3000ms).
The `timeout` value can either be `:infinity` or an integer
representing time in milliseconds. If you pass a Table/Module that
does not exist along with `:infinity` as timeout, it will hang your
process until that table is created and ready.
For more information, see `:mnesia.wait_for_tables/2`.
## Examples
```
# Wait until the `Movies` table is ready
Memento.wait(Movies, :infinity)
# Wait a maximum of 3 seconds until the two tables are ready
Memento.wait([TableA, TableB])
```
"""
@spec wait(list(Memento.Table.name), integer | :infinity) :: :ok | {:timeout, list(Memento.Table.t)} | {:error, any}
def wait(tables, timeout \\ 3000) do
tables = List.wrap(tables)
Memento.Mnesia.call(:wait_for_tables, [tables, timeout])
end
# Delegates
defdelegate transaction(fun), to: Memento.Transaction, as: :execute
defdelegate transaction!(fun), to: Memento.Transaction, as: :execute!
end
|
lib/memento/memento.ex
| 0.732209
| 0.730578
|
memento.ex
|
starcoder
|
defmodule AWS.WAF.Regional do
@moduledoc """
This is the *AWS WAF Regional API Reference* for using AWS WAF with Elastic
Load Balancing (ELB) Application Load Balancers. The AWS WAF actions and
data types listed in the reference are available for protecting Application
Load Balancers. You can use these actions and data types by means of the
endpoints listed in [AWS Regions and
Endpoints](https://docs.aws.amazon.com/general/latest/gr/rande.html#waf_region).
This guide is for developers who need detailed information about the AWS
WAF API actions, data types, and errors. For detailed information about AWS
WAF features and an overview of how to use the AWS WAF API, see the [AWS
WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/).
"""
@doc """
Associates a web ACL with a resource, either an application load balancer
or Amazon API Gateway stage.
"""
def associate_web_a_c_l(client, input, options \\ []) do
request(client, "AssociateWebACL", input, options)
end
@doc """
Creates a `ByteMatchSet`. You then use `UpdateByteMatchSet` to identify the
part of a web request that you want AWS WAF to inspect, such as the values
of the `User-Agent` header or the query string. For example, you can create
a `ByteMatchSet` that matches any requests with `User-Agent` headers that
contain the string `BadBot`. You can then configure AWS WAF to reject those
requests.
To create and configure a `ByteMatchSet`, perform the following steps:
<ol> <li> Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of a `CreateByteMatchSet` request.
</li> <li> Submit a `CreateByteMatchSet` request.
</li> <li> Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of an `UpdateByteMatchSet` request.
</li> <li> Submit an `UpdateByteMatchSet` request to specify the part of
the request that you want AWS WAF to inspect (for example, the header or
the URI) and the value that you want AWS WAF to watch for.
</li> </ol> For more information about how to use the AWS WAF API to allow
or block HTTP requests, see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/).
"""
def create_byte_match_set(client, input, options \\ []) do
request(client, "CreateByteMatchSet", input, options)
end
@doc """
Creates an `GeoMatchSet`, which you use to specify which web requests you
want to allow or block based on the country that the requests originate
from. For example, if you're receiving a lot of requests from one or more
countries and you want to block the requests, you can create an
`GeoMatchSet` that contains those countries and then configure AWS WAF to
block the requests.
To create and configure a `GeoMatchSet`, perform the following steps:
<ol> <li> Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of a `CreateGeoMatchSet` request.
</li> <li> Submit a `CreateGeoMatchSet` request.
</li> <li> Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of an `UpdateGeoMatchSet` request.
</li> <li> Submit an `UpdateGeoMatchSetSet` request to specify the
countries that you want AWS WAF to watch for.
</li> </ol> For more information about how to use the AWS WAF API to allow
or block HTTP requests, see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/).
"""
def create_geo_match_set(client, input, options \\ []) do
request(client, "CreateGeoMatchSet", input, options)
end
@doc """
Creates an `IPSet`, which you use to specify which web requests that you
want to allow or block based on the IP addresses that the requests
originate from. For example, if you're receiving a lot of requests from one
or more individual IP addresses or one or more ranges of IP addresses and
you want to block the requests, you can create an `IPSet` that contains
those IP addresses and then configure AWS WAF to block the requests.
To create and configure an `IPSet`, perform the following steps:
<ol> <li> Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of a `CreateIPSet` request.
</li> <li> Submit a `CreateIPSet` request.
</li> <li> Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of an `UpdateIPSet` request.
</li> <li> Submit an `UpdateIPSet` request to specify the IP addresses that
you want AWS WAF to watch for.
</li> </ol> For more information about how to use the AWS WAF API to allow
or block HTTP requests, see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/).
"""
def create_i_p_set(client, input, options \\ []) do
request(client, "CreateIPSet", input, options)
end
@doc """
Creates a `RateBasedRule`. The `RateBasedRule` contains a `RateLimit`,
which specifies the maximum number of requests that AWS WAF allows from a
specified IP address in a five-minute period. The `RateBasedRule` also
contains the `IPSet` objects, `ByteMatchSet` objects, and other predicates
that identify the requests that you want to count or block if these
requests exceed the `RateLimit`.
If you add more than one predicate to a `RateBasedRule`, a request not only
must exceed the `RateLimit`, but it also must match all the specifications
to be counted or blocked. For example, suppose you add the following to a
`RateBasedRule`:
<ul> <li> An `IPSet` that matches the IP address `192.0.2.44/32`
</li> <li> A `ByteMatchSet` that matches `BadBot` in the `User-Agent`
header
</li> </ul> Further, you specify a `RateLimit` of 15,000.
You then add the `RateBasedRule` to a `WebACL` and specify that you want to
block requests that meet the conditions in the rule. For a request to be
blocked, it must come from the IP address 192.0.2.44 *and* the `User-Agent`
header in the request must contain the value `BadBot`. Further, requests
that match these two conditions must be received at a rate of more than
15,000 requests every five minutes. If both conditions are met and the rate
is exceeded, AWS WAF blocks the requests. If the rate drops below 15,000
for a five-minute period, AWS WAF no longer blocks the requests.
As a second example, suppose you want to limit requests to a particular
page on your site. To do this, you could add the following to a
`RateBasedRule`:
<ul> <li> A `ByteMatchSet` with `FieldToMatch` of `URI`
</li> <li> A `PositionalConstraint` of `STARTS_WITH`
</li> <li> A `TargetString` of `login`
</li> </ul> Further, you specify a `RateLimit` of 15,000.
By adding this `RateBasedRule` to a `WebACL`, you could limit requests to
your login page without affecting the rest of your site.
To create and configure a `RateBasedRule`, perform the following steps:
<ol> <li> Create and update the predicates that you want to include in the
rule. For more information, see `CreateByteMatchSet`, `CreateIPSet`, and
`CreateSqlInjectionMatchSet`.
</li> <li> Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of a `CreateRule` request.
</li> <li> Submit a `CreateRateBasedRule` request.
</li> <li> Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of an `UpdateRule` request.
</li> <li> Submit an `UpdateRateBasedRule` request to specify the
predicates that you want to include in the rule.
</li> <li> Create and update a `WebACL` that contains the `RateBasedRule`.
For more information, see `CreateWebACL`.
</li> </ol> For more information about how to use the AWS WAF API to allow
or block HTTP requests, see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/).
"""
def create_rate_based_rule(client, input, options \\ []) do
request(client, "CreateRateBasedRule", input, options)
end
@doc """
Creates a `RegexMatchSet`. You then use `UpdateRegexMatchSet` to identify
the part of a web request that you want AWS WAF to inspect, such as the
values of the `User-Agent` header or the query string. For example, you can
create a `RegexMatchSet` that contains a `RegexMatchTuple` that looks for
any requests with `User-Agent` headers that match a `RegexPatternSet` with
pattern `B[a@]dB[o0]t`. You can then configure AWS WAF to reject those
requests.
To create and configure a `RegexMatchSet`, perform the following steps:
<ol> <li> Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of a `CreateRegexMatchSet` request.
</li> <li> Submit a `CreateRegexMatchSet` request.
</li> <li> Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of an `UpdateRegexMatchSet` request.
</li> <li> Submit an `UpdateRegexMatchSet` request to specify the part of
the request that you want AWS WAF to inspect (for example, the header or
the URI) and the value, using a `RegexPatternSet`, that you want AWS WAF to
watch for.
</li> </ol> For more information about how to use the AWS WAF API to allow
or block HTTP requests, see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/).
"""
def create_regex_match_set(client, input, options \\ []) do
request(client, "CreateRegexMatchSet", input, options)
end
@doc """
Creates a `RegexPatternSet`. You then use `UpdateRegexPatternSet` to
specify the regular expression (regex) pattern that you want AWS WAF to
search for, such as `B[a@]dB[o0]t`. You can then configure AWS WAF to
reject those requests.
To create and configure a `RegexPatternSet`, perform the following steps:
<ol> <li> Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of a `CreateRegexPatternSet` request.
</li> <li> Submit a `CreateRegexPatternSet` request.
</li> <li> Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of an `UpdateRegexPatternSet` request.
</li> <li> Submit an `UpdateRegexPatternSet` request to specify the string
that you want AWS WAF to watch for.
</li> </ol> For more information about how to use the AWS WAF API to allow
or block HTTP requests, see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/).
"""
def create_regex_pattern_set(client, input, options \\ []) do
request(client, "CreateRegexPatternSet", input, options)
end
@doc """
Creates a `Rule`, which contains the `IPSet` objects, `ByteMatchSet`
objects, and other predicates that identify the requests that you want to
block. If you add more than one predicate to a `Rule`, a request must match
all of the specifications to be allowed or blocked. For example, suppose
that you add the following to a `Rule`:
<ul> <li> An `IPSet` that matches the IP address `192.0.2.44/32`
</li> <li> A `ByteMatchSet` that matches `BadBot` in the `User-Agent`
header
</li> </ul> You then add the `Rule` to a `WebACL` and specify that you want
to blocks requests that satisfy the `Rule`. For a request to be blocked, it
must come from the IP address 192.0.2.44 *and* the `User-Agent` header in
the request must contain the value `BadBot`.
To create and configure a `Rule`, perform the following steps:
<ol> <li> Create and update the predicates that you want to include in the
`Rule`. For more information, see `CreateByteMatchSet`, `CreateIPSet`, and
`CreateSqlInjectionMatchSet`.
</li> <li> Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of a `CreateRule` request.
</li> <li> Submit a `CreateRule` request.
</li> <li> Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of an `UpdateRule` request.
</li> <li> Submit an `UpdateRule` request to specify the predicates that
you want to include in the `Rule`.
</li> <li> Create and update a `WebACL` that contains the `Rule`. For more
information, see `CreateWebACL`.
</li> </ol> For more information about how to use the AWS WAF API to allow
or block HTTP requests, see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/).
"""
def create_rule(client, input, options \\ []) do
request(client, "CreateRule", input, options)
end
@doc """
Creates a `RuleGroup`. A rule group is a collection of predefined rules
that you add to a web ACL. You use `UpdateRuleGroup` to add rules to the
rule group.
Rule groups are subject to the following limits:
<ul> <li> Three rule groups per account. You can request an increase to
this limit by contacting customer support.
</li> <li> One rule group per web ACL.
</li> <li> Ten rules per rule group.
</li> </ul> For more information about how to use the AWS WAF API to allow
or block HTTP requests, see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/).
"""
def create_rule_group(client, input, options \\ []) do
request(client, "CreateRuleGroup", input, options)
end
@doc """
Creates a `SizeConstraintSet`. You then use `UpdateSizeConstraintSet` to
identify the part of a web request that you want AWS WAF to check for
length, such as the length of the `User-Agent` header or the length of the
query string. For example, you can create a `SizeConstraintSet` that
matches any requests that have a query string that is longer than 100
bytes. You can then configure AWS WAF to reject those requests.
To create and configure a `SizeConstraintSet`, perform the following steps:
<ol> <li> Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of a `CreateSizeConstraintSet` request.
</li> <li> Submit a `CreateSizeConstraintSet` request.
</li> <li> Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of an `UpdateSizeConstraintSet` request.
</li> <li> Submit an `UpdateSizeConstraintSet` request to specify the part
of the request that you want AWS WAF to inspect (for example, the header or
the URI) and the value that you want AWS WAF to watch for.
</li> </ol> For more information about how to use the AWS WAF API to allow
or block HTTP requests, see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/).
"""
def create_size_constraint_set(client, input, options \\ []) do
request(client, "CreateSizeConstraintSet", input, options)
end
@doc """
Creates a `SqlInjectionMatchSet`, which you use to allow, block, or count
requests that contain snippets of SQL code in a specified part of web
requests. AWS WAF searches for character sequences that are likely to be
malicious strings.
To create and configure a `SqlInjectionMatchSet`, perform the following
steps:
<ol> <li> Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of a `CreateSqlInjectionMatchSet` request.
</li> <li> Submit a `CreateSqlInjectionMatchSet` request.
</li> <li> Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of an `UpdateSqlInjectionMatchSet` request.
</li> <li> Submit an `UpdateSqlInjectionMatchSet` request to specify the
parts of web requests in which you want to allow, block, or count malicious
SQL code.
</li> </ol> For more information about how to use the AWS WAF API to allow
or block HTTP requests, see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/).
"""
def create_sql_injection_match_set(client, input, options \\ []) do
request(client, "CreateSqlInjectionMatchSet", input, options)
end
@doc """
Creates a `WebACL`, which contains the `Rules` that identify the CloudFront
web requests that you want to allow, block, or count. AWS WAF evaluates
`Rules` in order based on the value of `Priority` for each `Rule`.
You also specify a default action, either `ALLOW` or `BLOCK`. If a web
request doesn't match any of the `Rules` in a `WebACL`, AWS WAF responds to
the request with the default action.
To create and configure a `WebACL`, perform the following steps:
<ol> <li> Create and update the `ByteMatchSet` objects and other predicates
that you want to include in `Rules`. For more information, see
`CreateByteMatchSet`, `UpdateByteMatchSet`, `CreateIPSet`, `UpdateIPSet`,
`CreateSqlInjectionMatchSet`, and `UpdateSqlInjectionMatchSet`.
</li> <li> Create and update the `Rules` that you want to include in the
`WebACL`. For more information, see `CreateRule` and `UpdateRule`.
</li> <li> Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of a `CreateWebACL` request.
</li> <li> Submit a `CreateWebACL` request.
</li> <li> Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of an `UpdateWebACL` request.
</li> <li> Submit an `UpdateWebACL` request to specify the `Rules` that you
want to include in the `WebACL`, to specify the default action, and to
associate the `WebACL` with a CloudFront distribution.
</li> </ol> For more information about how to use the AWS WAF API, see the
[AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/).
"""
def create_web_a_c_l(client, input, options \\ []) do
request(client, "CreateWebACL", input, options)
end
@doc """
Creates an `XssMatchSet`, which you use to allow, block, or count requests
that contain cross-site scripting attacks in the specified part of web
requests. AWS WAF searches for character sequences that are likely to be
malicious strings.
To create and configure an `XssMatchSet`, perform the following steps:
<ol> <li> Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of a `CreateXssMatchSet` request.
</li> <li> Submit a `CreateXssMatchSet` request.
</li> <li> Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of an `UpdateXssMatchSet` request.
</li> <li> Submit an `UpdateXssMatchSet` request to specify the parts of
web requests in which you want to allow, block, or count cross-site
scripting attacks.
</li> </ol> For more information about how to use the AWS WAF API to allow
or block HTTP requests, see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/).
"""
def create_xss_match_set(client, input, options \\ []) do
request(client, "CreateXssMatchSet", input, options)
end
@doc """
Permanently deletes a `ByteMatchSet`. You can't delete a `ByteMatchSet` if
it's still used in any `Rules` or if it still includes any `ByteMatchTuple`
objects (any filters).
If you just want to remove a `ByteMatchSet` from a `Rule`, use
`UpdateRule`.
To permanently delete a `ByteMatchSet`, perform the following steps:
<ol> <li> Update the `ByteMatchSet` to remove filters, if any. For more
information, see `UpdateByteMatchSet`.
</li> <li> Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of a `DeleteByteMatchSet` request.
</li> <li> Submit a `DeleteByteMatchSet` request.
</li> </ol>
"""
def delete_byte_match_set(client, input, options \\ []) do
request(client, "DeleteByteMatchSet", input, options)
end
@doc """
Permanently deletes a `GeoMatchSet`. You can't delete a `GeoMatchSet` if
it's still used in any `Rules` or if it still includes any countries.
If you just want to remove a `GeoMatchSet` from a `Rule`, use `UpdateRule`.
To permanently delete a `GeoMatchSet` from AWS WAF, perform the following
steps:
<ol> <li> Update the `GeoMatchSet` to remove any countries. For more
information, see `UpdateGeoMatchSet`.
</li> <li> Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of a `DeleteGeoMatchSet` request.
</li> <li> Submit a `DeleteGeoMatchSet` request.
</li> </ol>
"""
def delete_geo_match_set(client, input, options \\ []) do
request(client, "DeleteGeoMatchSet", input, options)
end
@doc """
Permanently deletes an `IPSet`. You can't delete an `IPSet` if it's still
used in any `Rules` or if it still includes any IP addresses.
If you just want to remove an `IPSet` from a `Rule`, use `UpdateRule`.
To permanently delete an `IPSet` from AWS WAF, perform the following steps:
<ol> <li> Update the `IPSet` to remove IP address ranges, if any. For more
information, see `UpdateIPSet`.
</li> <li> Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of a `DeleteIPSet` request.
</li> <li> Submit a `DeleteIPSet` request.
</li> </ol>
"""
def delete_i_p_set(client, input, options \\ []) do
request(client, "DeleteIPSet", input, options)
end
@doc """
Permanently deletes the `LoggingConfiguration` from the specified web ACL.
"""
def delete_logging_configuration(client, input, options \\ []) do
request(client, "DeleteLoggingConfiguration", input, options)
end
@doc """
Permanently deletes an IAM policy from the specified RuleGroup.
The user making the request must be the owner of the RuleGroup.
"""
def delete_permission_policy(client, input, options \\ []) do
request(client, "DeletePermissionPolicy", input, options)
end
@doc """
Permanently deletes a `RateBasedRule`. You can't delete a rule if it's
still used in any `WebACL` objects or if it still includes any predicates,
such as `ByteMatchSet` objects.
If you just want to remove a rule from a `WebACL`, use `UpdateWebACL`.
To permanently delete a `RateBasedRule` from AWS WAF, perform the following
steps:
<ol> <li> Update the `RateBasedRule` to remove predicates, if any. For more
information, see `UpdateRateBasedRule`.
</li> <li> Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of a `DeleteRateBasedRule` request.
</li> <li> Submit a `DeleteRateBasedRule` request.
</li> </ol>
"""
def delete_rate_based_rule(client, input, options \\ []) do
request(client, "DeleteRateBasedRule", input, options)
end
@doc """
Permanently deletes a `RegexMatchSet`. You can't delete a `RegexMatchSet`
if it's still used in any `Rules` or if it still includes any
`RegexMatchTuples` objects (any filters).
If you just want to remove a `RegexMatchSet` from a `Rule`, use
`UpdateRule`.
To permanently delete a `RegexMatchSet`, perform the following steps:
<ol> <li> Update the `RegexMatchSet` to remove filters, if any. For more
information, see `UpdateRegexMatchSet`.
</li> <li> Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of a `DeleteRegexMatchSet` request.
</li> <li> Submit a `DeleteRegexMatchSet` request.
</li> </ol>
"""
def delete_regex_match_set(client, input, options \\ []) do
request(client, "DeleteRegexMatchSet", input, options)
end
@doc """
Permanently deletes a `RegexPatternSet`. You can't delete a
`RegexPatternSet` if it's still used in any `RegexMatchSet` or if the
`RegexPatternSet` is not empty.
"""
def delete_regex_pattern_set(client, input, options \\ []) do
request(client, "DeleteRegexPatternSet", input, options)
end
@doc """
Permanently deletes a `Rule`. You can't delete a `Rule` if it's still used
in any `WebACL` objects or if it still includes any predicates, such as
`ByteMatchSet` objects.
If you just want to remove a `Rule` from a `WebACL`, use `UpdateWebACL`.
To permanently delete a `Rule` from AWS WAF, perform the following steps:
<ol> <li> Update the `Rule` to remove predicates, if any. For more
information, see `UpdateRule`.
</li> <li> Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of a `DeleteRule` request.
</li> <li> Submit a `DeleteRule` request.
</li> </ol>
"""
def delete_rule(client, input, options \\ []) do
request(client, "DeleteRule", input, options)
end
@doc """
Permanently deletes a `RuleGroup`. You can't delete a `RuleGroup` if it's
still used in any `WebACL` objects or if it still includes any rules.
If you just want to remove a `RuleGroup` from a `WebACL`, use
`UpdateWebACL`.
To permanently delete a `RuleGroup` from AWS WAF, perform the following
steps:
<ol> <li> Update the `RuleGroup` to remove rules, if any. For more
information, see `UpdateRuleGroup`.
</li> <li> Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of a `DeleteRuleGroup` request.
</li> <li> Submit a `DeleteRuleGroup` request.
</li> </ol>
"""
def delete_rule_group(client, input, options \\ []) do
request(client, "DeleteRuleGroup", input, options)
end
@doc """
Permanently deletes a `SizeConstraintSet`. You can't delete a
`SizeConstraintSet` if it's still used in any `Rules` or if it still
includes any `SizeConstraint` objects (any filters).
If you just want to remove a `SizeConstraintSet` from a `Rule`, use
`UpdateRule`.
To permanently delete a `SizeConstraintSet`, perform the following steps:
<ol> <li> Update the `SizeConstraintSet` to remove filters, if any. For
more information, see `UpdateSizeConstraintSet`.
</li> <li> Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of a `DeleteSizeConstraintSet` request.
</li> <li> Submit a `DeleteSizeConstraintSet` request.
</li> </ol>
"""
def delete_size_constraint_set(client, input, options \\ []) do
request(client, "DeleteSizeConstraintSet", input, options)
end
@doc """
Permanently deletes a `SqlInjectionMatchSet`. You can't delete a
`SqlInjectionMatchSet` if it's still used in any `Rules` or if it still
contains any `SqlInjectionMatchTuple` objects.
If you just want to remove a `SqlInjectionMatchSet` from a `Rule`, use
`UpdateRule`.
To permanently delete a `SqlInjectionMatchSet` from AWS WAF, perform the
following steps:
<ol> <li> Update the `SqlInjectionMatchSet` to remove filters, if any. For
more information, see `UpdateSqlInjectionMatchSet`.
</li> <li> Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of a `DeleteSqlInjectionMatchSet` request.
</li> <li> Submit a `DeleteSqlInjectionMatchSet` request.
</li> </ol>
"""
def delete_sql_injection_match_set(client, input, options \\ []) do
request(client, "DeleteSqlInjectionMatchSet", input, options)
end
@doc """
Permanently deletes a `WebACL`. You can't delete a `WebACL` if it still
contains any `Rules`.
To delete a `WebACL`, perform the following steps:
<ol> <li> Update the `WebACL` to remove `Rules`, if any. For more
information, see `UpdateWebACL`.
</li> <li> Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of a `DeleteWebACL` request.
</li> <li> Submit a `DeleteWebACL` request.
</li> </ol>
"""
def delete_web_a_c_l(client, input, options \\ []) do
request(client, "DeleteWebACL", input, options)
end
@doc """
Permanently deletes an `XssMatchSet`. You can't delete an `XssMatchSet` if
it's still used in any `Rules` or if it still contains any `XssMatchTuple`
objects.
If you just want to remove an `XssMatchSet` from a `Rule`, use
`UpdateRule`.
To permanently delete an `XssMatchSet` from AWS WAF, perform the following
steps:
<ol> <li> Update the `XssMatchSet` to remove filters, if any. For more
information, see `UpdateXssMatchSet`.
</li> <li> Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of a `DeleteXssMatchSet` request.
</li> <li> Submit a `DeleteXssMatchSet` request.
</li> </ol>
"""
def delete_xss_match_set(client, input, options \\ []) do
request(client, "DeleteXssMatchSet", input, options)
end
@doc """
Removes a web ACL from the specified resource, either an application load
balancer or Amazon API Gateway stage.
"""
def disassociate_web_a_c_l(client, input, options \\ []) do
request(client, "DisassociateWebACL", input, options)
end
@doc """
Returns the `ByteMatchSet` specified by `ByteMatchSetId`.
"""
def get_byte_match_set(client, input, options \\ []) do
request(client, "GetByteMatchSet", input, options)
end
@doc """
When you want to create, update, or delete AWS WAF objects, get a change
token and include the change token in the create, update, or delete
request. Change tokens ensure that your application doesn't submit
conflicting requests to AWS WAF.
Each create, update, or delete request must use a unique change token. If
your application submits a `GetChangeToken` request and then submits a
second `GetChangeToken` request before submitting a create, update, or
delete request, the second `GetChangeToken` request returns the same value
as the first `GetChangeToken` request.
When you use a change token in a create, update, or delete request, the
status of the change token changes to `PENDING`, which indicates that AWS
WAF is propagating the change to all AWS WAF servers. Use
`GetChangeTokenStatus` to determine the status of your change token.
"""
def get_change_token(client, input, options \\ []) do
request(client, "GetChangeToken", input, options)
end
@doc """
Returns the status of a `ChangeToken` that you got by calling
`GetChangeToken`. `ChangeTokenStatus` is one of the following values:
<ul> <li> `PROVISIONED`: You requested the change token by calling
`GetChangeToken`, but you haven't used it yet in a call to create, update,
or delete an AWS WAF object.
</li> <li> `PENDING`: AWS WAF is propagating the create, update, or delete
request to all AWS WAF servers.
</li> <li> `INSYNC`: Propagation is complete.
</li> </ul>
"""
def get_change_token_status(client, input, options \\ []) do
request(client, "GetChangeTokenStatus", input, options)
end
@doc """
Returns the `GeoMatchSet` that is specified by `GeoMatchSetId`.
"""
def get_geo_match_set(client, input, options \\ []) do
request(client, "GetGeoMatchSet", input, options)
end
@doc """
Returns the `IPSet` that is specified by `IPSetId`.
"""
def get_i_p_set(client, input, options \\ []) do
request(client, "GetIPSet", input, options)
end
@doc """
Returns the `LoggingConfiguration` for the specified web ACL.
"""
def get_logging_configuration(client, input, options \\ []) do
request(client, "GetLoggingConfiguration", input, options)
end
@doc """
Returns the IAM policy attached to the RuleGroup.
"""
def get_permission_policy(client, input, options \\ []) do
request(client, "GetPermissionPolicy", input, options)
end
@doc """
Returns the `RateBasedRule` that is specified by the `RuleId` that you
included in the `GetRateBasedRule` request.
"""
def get_rate_based_rule(client, input, options \\ []) do
request(client, "GetRateBasedRule", input, options)
end
@doc """
Returns an array of IP addresses currently being blocked by the
`RateBasedRule` that is specified by the `RuleId`. The maximum number of
managed keys that will be blocked is 10,000. If more than 10,000 addresses
exceed the rate limit, the 10,000 addresses with the highest rates will be
blocked.
"""
def get_rate_based_rule_managed_keys(client, input, options \\ []) do
request(client, "GetRateBasedRuleManagedKeys", input, options)
end
@doc """
Returns the `RegexMatchSet` specified by `RegexMatchSetId`.
"""
def get_regex_match_set(client, input, options \\ []) do
request(client, "GetRegexMatchSet", input, options)
end
@doc """
Returns the `RegexPatternSet` specified by `RegexPatternSetId`.
"""
def get_regex_pattern_set(client, input, options \\ []) do
request(client, "GetRegexPatternSet", input, options)
end
@doc """
Returns the `Rule` that is specified by the `RuleId` that you included in
the `GetRule` request.
"""
def get_rule(client, input, options \\ []) do
request(client, "GetRule", input, options)
end
@doc """
Returns the `RuleGroup` that is specified by the `RuleGroupId` that you
included in the `GetRuleGroup` request.
To view the rules in a rule group, use `ListActivatedRulesInRuleGroup`.
"""
def get_rule_group(client, input, options \\ []) do
request(client, "GetRuleGroup", input, options)
end
@doc """
Gets detailed information about a specified number of requests--a
sample--that AWS WAF randomly selects from among the first 5,000 requests
that your AWS resource received during a time range that you choose. You
can specify a sample size of up to 500 requests, and you can specify any
time range in the previous three hours.
`GetSampledRequests` returns a time range, which is usually the time range
that you specified. However, if your resource (such as a CloudFront
distribution) received 5,000 requests before the specified time range
elapsed, `GetSampledRequests` returns an updated time range. This new time
range indicates the actual period during which AWS WAF selected the
requests in the sample.
"""
def get_sampled_requests(client, input, options \\ []) do
request(client, "GetSampledRequests", input, options)
end
@doc """
Returns the `SizeConstraintSet` specified by `SizeConstraintSetId`.
"""
def get_size_constraint_set(client, input, options \\ []) do
request(client, "GetSizeConstraintSet", input, options)
end
@doc """
Returns the `SqlInjectionMatchSet` that is specified by
`SqlInjectionMatchSetId`.
"""
def get_sql_injection_match_set(client, input, options \\ []) do
request(client, "GetSqlInjectionMatchSet", input, options)
end
@doc """
Returns the `WebACL` that is specified by `WebACLId`.
"""
def get_web_a_c_l(client, input, options \\ []) do
request(client, "GetWebACL", input, options)
end
@doc """
Returns the web ACL for the specified resource, either an application load
balancer or Amazon API Gateway stage.
"""
def get_web_a_c_l_for_resource(client, input, options \\ []) do
request(client, "GetWebACLForResource", input, options)
end
@doc """
Returns the `XssMatchSet` that is specified by `XssMatchSetId`.
"""
def get_xss_match_set(client, input, options \\ []) do
request(client, "GetXssMatchSet", input, options)
end
@doc """
Returns an array of `ActivatedRule` objects.
"""
def list_activated_rules_in_rule_group(client, input, options \\ []) do
request(client, "ListActivatedRulesInRuleGroup", input, options)
end
@doc """
Returns an array of `ByteMatchSetSummary` objects.
"""
def list_byte_match_sets(client, input, options \\ []) do
request(client, "ListByteMatchSets", input, options)
end
@doc """
Returns an array of `GeoMatchSetSummary` objects in the response.
"""
def list_geo_match_sets(client, input, options \\ []) do
request(client, "ListGeoMatchSets", input, options)
end
@doc """
Returns an array of `IPSetSummary` objects in the response.
"""
def list_i_p_sets(client, input, options \\ []) do
request(client, "ListIPSets", input, options)
end
@doc """
Returns an array of `LoggingConfiguration` objects.
"""
def list_logging_configurations(client, input, options \\ []) do
request(client, "ListLoggingConfigurations", input, options)
end
@doc """
Returns an array of `RuleSummary` objects.
"""
def list_rate_based_rules(client, input, options \\ []) do
request(client, "ListRateBasedRules", input, options)
end
@doc """
Returns an array of `RegexMatchSetSummary` objects.
"""
def list_regex_match_sets(client, input, options \\ []) do
request(client, "ListRegexMatchSets", input, options)
end
@doc """
Returns an array of `RegexPatternSetSummary` objects.
"""
def list_regex_pattern_sets(client, input, options \\ []) do
request(client, "ListRegexPatternSets", input, options)
end
@doc """
Returns an array of resources associated with the specified web ACL.
"""
def list_resources_for_web_a_c_l(client, input, options \\ []) do
request(client, "ListResourcesForWebACL", input, options)
end
@doc """
Returns an array of `RuleGroup` objects.
"""
def list_rule_groups(client, input, options \\ []) do
request(client, "ListRuleGroups", input, options)
end
@doc """
Returns an array of `RuleSummary` objects.
"""
def list_rules(client, input, options \\ []) do
request(client, "ListRules", input, options)
end
@doc """
Returns an array of `SizeConstraintSetSummary` objects.
"""
def list_size_constraint_sets(client, input, options \\ []) do
request(client, "ListSizeConstraintSets", input, options)
end
@doc """
Returns an array of `SqlInjectionMatchSet` objects.
"""
def list_sql_injection_match_sets(client, input, options \\ []) do
request(client, "ListSqlInjectionMatchSets", input, options)
end
@doc """
Returns an array of `RuleGroup` objects that you are subscribed to.
"""
def list_subscribed_rule_groups(client, input, options \\ []) do
request(client, "ListSubscribedRuleGroups", input, options)
end
@doc """
Returns an array of `WebACLSummary` objects in the response.
"""
def list_web_a_c_ls(client, input, options \\ []) do
request(client, "ListWebACLs", input, options)
end
@doc """
Returns an array of `XssMatchSet` objects.
"""
def list_xss_match_sets(client, input, options \\ []) do
request(client, "ListXssMatchSets", input, options)
end
@doc """
Associates a `LoggingConfiguration` with a specified web ACL.
You can access information about all traffic that AWS WAF inspects using
the following steps:
<ol> <li> Create an Amazon Kinesis Data Firehose .
Create the data firehose with a PUT source and in the region that you are
operating. However, if you are capturing logs for Amazon CloudFront, always
create the firehose in US East (N. Virginia).
</li> <li> Associate that firehose to your web ACL using a
`PutLoggingConfiguration` request.
</li> </ol> When you successfully enable logging using a
`PutLoggingConfiguration` request, AWS WAF will create a service linked
role with the necessary permissions to write logs to the Amazon Kinesis
Data Firehose. For more information, see [Logging Web ACL Traffic
Information](https://docs.aws.amazon.com/waf/latest/developerguide/logging.html)
in the *AWS WAF Developer Guide*.
"""
def put_logging_configuration(client, input, options \\ []) do
request(client, "PutLoggingConfiguration", input, options)
end
@doc """
Attaches a IAM policy to the specified resource. The only supported use for
this action is to share a RuleGroup across accounts.
The `PutPermissionPolicy` is subject to the following restrictions:
<ul> <li> You can attach only one policy with each `PutPermissionPolicy`
request.
</li> <li> The policy must include an `Effect`, `Action` and `Principal`.
</li> <li> `Effect` must specify `Allow`.
</li> <li> The `Action` in the policy must be `waf:UpdateWebACL`,
`waf-regional:UpdateWebACL`, `waf:GetRuleGroup` and
`waf-regional:GetRuleGroup` . Any extra or wildcard actions in the policy
will be rejected.
</li> <li> The policy cannot include a `Resource` parameter.
</li> <li> The ARN in the request must be a valid WAF RuleGroup ARN and the
RuleGroup must exist in the same region.
</li> <li> The user making the request must be the owner of the RuleGroup.
</li> <li> Your policy must be composed using IAM Policy version
2012-10-17.
</li> </ul> For more information, see [IAM
Policies](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html).
An example of a valid policy parameter is shown in the Examples section
below.
"""
def put_permission_policy(client, input, options \\ []) do
request(client, "PutPermissionPolicy", input, options)
end
@doc """
Inserts or deletes `ByteMatchTuple` objects (filters) in a `ByteMatchSet`.
For each `ByteMatchTuple` object, you specify the following values:
<ul> <li> Whether to insert or delete the object from the array. If you
want to change a `ByteMatchSetUpdate` object, you delete the existing
object and add a new one.
</li> <li> The part of a web request that you want AWS WAF to inspect, such
as a query string or the value of the `User-Agent` header.
</li> <li> The bytes (typically a string that corresponds with ASCII
characters) that you want AWS WAF to look for. For more information,
including how you specify the values for the AWS WAF API and the AWS CLI or
SDKs, see `TargetString` in the `ByteMatchTuple` data type.
</li> <li> Where to look, such as at the beginning or the end of a query
string.
</li> <li> Whether to perform any conversions on the request, such as
converting it to lowercase, before inspecting it for the specified string.
</li> </ul> For example, you can add a `ByteMatchSetUpdate` object that
matches web requests in which `User-Agent` headers contain the string
`BadBot`. You can then configure AWS WAF to block those requests.
To create and configure a `ByteMatchSet`, perform the following steps:
<ol> <li> Create a `ByteMatchSet.` For more information, see
`CreateByteMatchSet`.
</li> <li> Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of an `UpdateByteMatchSet` request.
</li> <li> Submit an `UpdateByteMatchSet` request to specify the part of
the request that you want AWS WAF to inspect (for example, the header or
the URI) and the value that you want AWS WAF to watch for.
</li> </ol> For more information about how to use the AWS WAF API to allow
or block HTTP requests, see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/).
"""
def update_byte_match_set(client, input, options \\ []) do
request(client, "UpdateByteMatchSet", input, options)
end
@doc """
Inserts or deletes `GeoMatchConstraint` objects in an `GeoMatchSet`. For
each `GeoMatchConstraint` object, you specify the following values:
<ul> <li> Whether to insert or delete the object from the array. If you
want to change an `GeoMatchConstraint` object, you delete the existing
object and add a new one.
</li> <li> The `Type`. The only valid value for `Type` is `Country`.
</li> <li> The `Value`, which is a two character code for the country to
add to the `GeoMatchConstraint` object. Valid codes are listed in
`GeoMatchConstraint$Value`.
</li> </ul> To create and configure an `GeoMatchSet`, perform the following
steps:
<ol> <li> Submit a `CreateGeoMatchSet` request.
</li> <li> Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of an `UpdateGeoMatchSet` request.
</li> <li> Submit an `UpdateGeoMatchSet` request to specify the country
that you want AWS WAF to watch for.
</li> </ol> When you update an `GeoMatchSet`, you specify the country that
you want to add and/or the country that you want to delete. If you want to
change a country, you delete the existing country and add the new one.
For more information about how to use the AWS WAF API to allow or block
HTTP requests, see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/).
"""
def update_geo_match_set(client, input, options \\ []) do
request(client, "UpdateGeoMatchSet", input, options)
end
@doc """
Inserts or deletes `IPSetDescriptor` objects in an `IPSet`. For each
`IPSetDescriptor` object, you specify the following values:
<ul> <li> Whether to insert or delete the object from the array. If you
want to change an `IPSetDescriptor` object, you delete the existing object
and add a new one.
</li> <li> The IP address version, `IPv4` or `IPv6`.
</li> <li> The IP address in CIDR notation, for example, `192.0.2.0/24`
(for the range of IP addresses from `192.0.2.0` to `192.0.2.255`) or
`192.0.2.44/32` (for the individual IP address `192.0.2.44`).
</li> </ul> AWS WAF supports IPv4 address ranges: /8 and any range between
/16 through /32. AWS WAF supports IPv6 address ranges: /24, /32, /48, /56,
/64, and /128. For more information about CIDR notation, see the Wikipedia
entry [Classless Inter-Domain
Routing](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing).
IPv6 addresses can be represented using any of the following formats:
<ul> <li> fdf8:f53e:61e4::18/128
</li> <li> fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b/128
</li> <li> fc00:db20:35b:7399::5/128
</li> <li> 1111::111/128
</li> </ul> You use an `IPSet` to specify which web requests you want to
allow or block based on the IP addresses that the requests originated from.
For example, if you're receiving a lot of requests from one or a small
number of IP addresses and you want to block the requests, you can create
an `IPSet` that specifies those IP addresses, and then configure AWS WAF to
block the requests.
To create and configure an `IPSet`, perform the following steps:
<ol> <li> Submit a `CreateIPSet` request.
</li> <li> Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of an `UpdateIPSet` request.
</li> <li> Submit an `UpdateIPSet` request to specify the IP addresses that
you want AWS WAF to watch for.
</li> </ol> When you update an `IPSet`, you specify the IP addresses that
you want to add and/or the IP addresses that you want to delete. If you
want to change an IP address, you delete the existing IP address and add
the new one.
You can insert a maximum of 1000 addresses in a single request.
For more information about how to use the AWS WAF API to allow or block
HTTP requests, see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/).
"""
def update_i_p_set(client, input, options \\ []) do
request(client, "UpdateIPSet", input, options)
end
@doc """
Inserts or deletes `Predicate` objects in a rule and updates the
`RateLimit` in the rule.
Each `Predicate` object identifies a predicate, such as a `ByteMatchSet` or
an `IPSet`, that specifies the web requests that you want to block or
count. The `RateLimit` specifies the number of requests every five minutes
that triggers the rule.
If you add more than one predicate to a `RateBasedRule`, a request must
match all the predicates and exceed the `RateLimit` to be counted or
blocked. For example, suppose you add the following to a `RateBasedRule`:
<ul> <li> An `IPSet` that matches the IP address `192.0.2.44/32`
</li> <li> A `ByteMatchSet` that matches `BadBot` in the `User-Agent`
header
</li> </ul> Further, you specify a `RateLimit` of 15,000.
You then add the `RateBasedRule` to a `WebACL` and specify that you want to
block requests that satisfy the rule. For a request to be blocked, it must
come from the IP address 192.0.2.44 *and* the `User-Agent` header in the
request must contain the value `BadBot`. Further, requests that match these
two conditions much be received at a rate of more than 15,000 every five
minutes. If the rate drops below this limit, AWS WAF no longer blocks the
requests.
As a second example, suppose you want to limit requests to a particular
page on your site. To do this, you could add the following to a
`RateBasedRule`:
<ul> <li> A `ByteMatchSet` with `FieldToMatch` of `URI`
</li> <li> A `PositionalConstraint` of `STARTS_WITH`
</li> <li> A `TargetString` of `login`
</li> </ul> Further, you specify a `RateLimit` of 15,000.
By adding this `RateBasedRule` to a `WebACL`, you could limit requests to
your login page without affecting the rest of your site.
"""
def update_rate_based_rule(client, input, options \\ []) do
request(client, "UpdateRateBasedRule", input, options)
end
@doc """
Inserts or deletes `RegexMatchTuple` objects (filters) in a
`RegexMatchSet`. For each `RegexMatchSetUpdate` object, you specify the
following values:
<ul> <li> Whether to insert or delete the object from the array. If you
want to change a `RegexMatchSetUpdate` object, you delete the existing
object and add a new one.
</li> <li> The part of a web request that you want AWS WAF to
inspectupdate, such as a query string or the value of the `User-Agent`
header.
</li> <li> The identifier of the pattern (a regular expression) that you
want AWS WAF to look for. For more information, see `RegexPatternSet`.
</li> <li> Whether to perform any conversions on the request, such as
converting it to lowercase, before inspecting it for the specified string.
</li> </ul> For example, you can create a `RegexPatternSet` that matches
any requests with `User-Agent` headers that contain the string
`B[a@]dB[o0]t`. You can then configure AWS WAF to reject those requests.
To create and configure a `RegexMatchSet`, perform the following steps:
<ol> <li> Create a `RegexMatchSet.` For more information, see
`CreateRegexMatchSet`.
</li> <li> Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of an `UpdateRegexMatchSet` request.
</li> <li> Submit an `UpdateRegexMatchSet` request to specify the part of
the request that you want AWS WAF to inspect (for example, the header or
the URI) and the identifier of the `RegexPatternSet` that contain the
regular expression patters you want AWS WAF to watch for.
</li> </ol> For more information about how to use the AWS WAF API to allow
or block HTTP requests, see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/).
"""
def update_regex_match_set(client, input, options \\ []) do
request(client, "UpdateRegexMatchSet", input, options)
end
@doc """
Inserts or deletes `RegexPatternString` objects in a `RegexPatternSet`. For
each `RegexPatternString` object, you specify the following values:
<ul> <li> Whether to insert or delete the `RegexPatternString`.
</li> <li> The regular expression pattern that you want to insert or
delete. For more information, see `RegexPatternSet`.
</li> </ul> For example, you can create a `RegexPatternString` such as
`B[a@]dB[o0]t`. AWS WAF will match this `RegexPatternString` to:
<ul> <li> BadBot
</li> <li> BadB0t
</li> <li> B@dBot
</li> <li> B@dB0t
</li> </ul> To create and configure a `RegexPatternSet`, perform the
following steps:
<ol> <li> Create a `RegexPatternSet.` For more information, see
`CreateRegexPatternSet`.
</li> <li> Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of an `UpdateRegexPatternSet` request.
</li> <li> Submit an `UpdateRegexPatternSet` request to specify the regular
expression pattern that you want AWS WAF to watch for.
</li> </ol> For more information about how to use the AWS WAF API to allow
or block HTTP requests, see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/).
"""
def update_regex_pattern_set(client, input, options \\ []) do
request(client, "UpdateRegexPatternSet", input, options)
end
@doc """
Inserts or deletes `Predicate` objects in a `Rule`. Each `Predicate` object
identifies a predicate, such as a `ByteMatchSet` or an `IPSet`, that
specifies the web requests that you want to allow, block, or count. If you
add more than one predicate to a `Rule`, a request must match all of the
specifications to be allowed, blocked, or counted. For example, suppose
that you add the following to a `Rule`:
<ul> <li> A `ByteMatchSet` that matches the value `BadBot` in the
`User-Agent` header
</li> <li> An `IPSet` that matches the IP address `192.0.2.44`
</li> </ul> You then add the `Rule` to a `WebACL` and specify that you want
to block requests that satisfy the `Rule`. For a request to be blocked, the
`User-Agent` header in the request must contain the value `BadBot` *and*
the request must originate from the IP address 192.0.2.44.
To create and configure a `Rule`, perform the following steps:
<ol> <li> Create and update the predicates that you want to include in the
`Rule`.
</li> <li> Create the `Rule`. See `CreateRule`.
</li> <li> Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of an `UpdateRule` request.
</li> <li> Submit an `UpdateRule` request to add predicates to the `Rule`.
</li> <li> Create and update a `WebACL` that contains the `Rule`. See
`CreateWebACL`.
</li> </ol> If you want to replace one `ByteMatchSet` or `IPSet` with
another, you delete the existing one and add the new one.
For more information about how to use the AWS WAF API to allow or block
HTTP requests, see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/).
"""
def update_rule(client, input, options \\ []) do
request(client, "UpdateRule", input, options)
end
@doc """
Inserts or deletes `ActivatedRule` objects in a `RuleGroup`.
You can only insert `REGULAR` rules into a rule group.
You can have a maximum of ten rules per rule group.
To create and configure a `RuleGroup`, perform the following steps:
<ol> <li> Create and update the `Rules` that you want to include in the
`RuleGroup`. See `CreateRule`.
</li> <li> Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of an `UpdateRuleGroup` request.
</li> <li> Submit an `UpdateRuleGroup` request to add `Rules` to the
`RuleGroup`.
</li> <li> Create and update a `WebACL` that contains the `RuleGroup`. See
`CreateWebACL`.
</li> </ol> If you want to replace one `Rule` with another, you delete the
existing one and add the new one.
For more information about how to use the AWS WAF API to allow or block
HTTP requests, see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/).
"""
def update_rule_group(client, input, options \\ []) do
request(client, "UpdateRuleGroup", input, options)
end
@doc """
Inserts or deletes `SizeConstraint` objects (filters) in a
`SizeConstraintSet`. For each `SizeConstraint` object, you specify the
following values:
<ul> <li> Whether to insert or delete the object from the array. If you
want to change a `SizeConstraintSetUpdate` object, you delete the existing
object and add a new one.
</li> <li> The part of a web request that you want AWS WAF to evaluate,
such as the length of a query string or the length of the `User-Agent`
header.
</li> <li> Whether to perform any transformations on the request, such as
converting it to lowercase, before checking its length. Note that
transformations of the request body are not supported because the AWS
resource forwards only the first `8192` bytes of your request to AWS WAF.
You can only specify a single type of TextTransformation.
</li> <li> A `ComparisonOperator` used for evaluating the selected part of
the request against the specified `Size`, such as equals, greater than,
less than, and so on.
</li> <li> The length, in bytes, that you want AWS WAF to watch for in
selected part of the request. The length is computed after applying the
transformation.
</li> </ul> For example, you can add a `SizeConstraintSetUpdate` object
that matches web requests in which the length of the `User-Agent` header is
greater than 100 bytes. You can then configure AWS WAF to block those
requests.
To create and configure a `SizeConstraintSet`, perform the following steps:
<ol> <li> Create a `SizeConstraintSet.` For more information, see
`CreateSizeConstraintSet`.
</li> <li> Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of an `UpdateSizeConstraintSet` request.
</li> <li> Submit an `UpdateSizeConstraintSet` request to specify the part
of the request that you want AWS WAF to inspect (for example, the header or
the URI) and the value that you want AWS WAF to watch for.
</li> </ol> For more information about how to use the AWS WAF API to allow
or block HTTP requests, see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/).
"""
def update_size_constraint_set(client, input, options \\ []) do
request(client, "UpdateSizeConstraintSet", input, options)
end
@doc """
Inserts or deletes `SqlInjectionMatchTuple` objects (filters) in a
`SqlInjectionMatchSet`. For each `SqlInjectionMatchTuple` object, you
specify the following values:
<ul> <li> `Action`: Whether to insert the object into or delete the object
from the array. To change a `SqlInjectionMatchTuple`, you delete the
existing object and add a new one.
</li> <li> `FieldToMatch`: The part of web requests that you want AWS WAF
to inspect and, if you want AWS WAF to inspect a header or custom query
parameter, the name of the header or parameter.
</li> <li> `TextTransformation`: Which text transformation, if any, to
perform on the web request before inspecting the request for snippets of
malicious SQL code.
You can only specify a single type of TextTransformation.
</li> </ul> You use `SqlInjectionMatchSet` objects to specify which
CloudFront requests that you want to allow, block, or count. For example,
if you're receiving requests that contain snippets of SQL code in the query
string and you want to block the requests, you can create a
`SqlInjectionMatchSet` with the applicable settings, and then configure AWS
WAF to block the requests.
To create and configure a `SqlInjectionMatchSet`, perform the following
steps:
<ol> <li> Submit a `CreateSqlInjectionMatchSet` request.
</li> <li> Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of an `UpdateIPSet` request.
</li> <li> Submit an `UpdateSqlInjectionMatchSet` request to specify the
parts of web requests that you want AWS WAF to inspect for snippets of SQL
code.
</li> </ol> For more information about how to use the AWS WAF API to allow
or block HTTP requests, see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/).
"""
def update_sql_injection_match_set(client, input, options \\ []) do
request(client, "UpdateSqlInjectionMatchSet", input, options)
end
@doc """
Inserts or deletes `ActivatedRule` objects in a `WebACL`. Each `Rule`
identifies web requests that you want to allow, block, or count. When you
update a `WebACL`, you specify the following values:
<ul> <li> A default action for the `WebACL`, either `ALLOW` or `BLOCK`. AWS
WAF performs the default action if a request doesn't match the criteria in
any of the `Rules` in a `WebACL`.
</li> <li> The `Rules` that you want to add or delete. If you want to
replace one `Rule` with another, you delete the existing `Rule` and add the
new one.
</li> <li> For each `Rule`, whether you want AWS WAF to allow requests,
block requests, or count requests that match the conditions in the `Rule`.
</li> <li> The order in which you want AWS WAF to evaluate the `Rules` in a
`WebACL`. If you add more than one `Rule` to a `WebACL`, AWS WAF evaluates
each request against the `Rules` in order based on the value of `Priority`.
(The `Rule` that has the lowest value for `Priority` is evaluated first.)
When a web request matches all the predicates (such as `ByteMatchSets` and
`IPSets`) in a `Rule`, AWS WAF immediately takes the corresponding action,
allow or block, and doesn't evaluate the request against the remaining
`Rules` in the `WebACL`, if any.
</li> </ul> To create and configure a `WebACL`, perform the following
steps:
<ol> <li> Create and update the predicates that you want to include in
`Rules`. For more information, see `CreateByteMatchSet`,
`UpdateByteMatchSet`, `CreateIPSet`, `UpdateIPSet`,
`CreateSqlInjectionMatchSet`, and `UpdateSqlInjectionMatchSet`.
</li> <li> Create and update the `Rules` that you want to include in the
`WebACL`. For more information, see `CreateRule` and `UpdateRule`.
</li> <li> Create a `WebACL`. See `CreateWebACL`.
</li> <li> Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of an `UpdateWebACL` request.
</li> <li> Submit an `UpdateWebACL` request to specify the `Rules` that you
want to include in the `WebACL`, to specify the default action, and to
associate the `WebACL` with a CloudFront distribution.
The `ActivatedRule` can be a rule group. If you specify a rule group as
your `ActivatedRule`, you can exclude specific rules from that rule group.
If you already have a rule group associated with a web ACL and want to
submit an `UpdateWebACL` request to exclude certain rules from that rule
group, you must first remove the rule group from the web ACL, the re-insert
it again, specifying the excluded rules. For details, see
`ActivatedRule$ExcludedRules`.
</li> </ol> Be aware that if you try to add a RATE_BASED rule to a web ACL
without setting the rule type when first creating the rule, the
`UpdateWebACL` request will fail because the request tries to add a REGULAR
rule (the default rule type) with the specified ID, which does not exist.
For more information about how to use the AWS WAF API to allow or block
HTTP requests, see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/).
"""
def update_web_a_c_l(client, input, options \\ []) do
request(client, "UpdateWebACL", input, options)
end
@doc """
Inserts or deletes `XssMatchTuple` objects (filters) in an `XssMatchSet`.
For each `XssMatchTuple` object, you specify the following values:
<ul> <li> `Action`: Whether to insert the object into or delete the object
from the array. To change an `XssMatchTuple`, you delete the existing
object and add a new one.
</li> <li> `FieldToMatch`: The part of web requests that you want AWS WAF
to inspect and, if you want AWS WAF to inspect a header or custom query
parameter, the name of the header or parameter.
</li> <li> `TextTransformation`: Which text transformation, if any, to
perform on the web request before inspecting the request for cross-site
scripting attacks.
You can only specify a single type of TextTransformation.
</li> </ul> You use `XssMatchSet` objects to specify which CloudFront
requests that you want to allow, block, or count. For example, if you're
receiving requests that contain cross-site scripting attacks in the request
body and you want to block the requests, you can create an `XssMatchSet`
with the applicable settings, and then configure AWS WAF to block the
requests.
To create and configure an `XssMatchSet`, perform the following steps:
<ol> <li> Submit a `CreateXssMatchSet` request.
</li> <li> Use `GetChangeToken` to get the change token that you provide in
the `ChangeToken` parameter of an `UpdateIPSet` request.
</li> <li> Submit an `UpdateXssMatchSet` request to specify the parts of
web requests that you want AWS WAF to inspect for cross-site scripting
attacks.
</li> </ol> For more information about how to use the AWS WAF API to allow
or block HTTP requests, see the [AWS WAF Developer
Guide](https://docs.aws.amazon.com/waf/latest/developerguide/).
"""
def update_xss_match_set(client, input, options \\ []) do
request(client, "UpdateXssMatchSet", input, options)
end
@spec request(map(), binary(), map(), list()) ::
{:ok, Poison.Parser.t | nil, Poison.Response.t} |
{:error, Poison.Parser.t} |
{:error, HTTPoison.Error.t}
defp request(client, action, input, options) do
client = %{client | service: "waf-regional"}
host = get_host("waf-regional", client)
url = get_url(host, client)
headers = [{"Host", host},
{"Content-Type", "application/x-amz-json-1.1"},
{"X-Amz-Target", "AWSWAF_Regional_20161128.#{action}"}]
payload = Poison.Encoder.encode(input, [])
headers = AWS.Request.sign_v4(client, "POST", url, headers, payload)
case HTTPoison.post(url, payload, headers, options) do
{:ok, response=%HTTPoison.Response{status_code: 200, body: ""}} ->
{:ok, nil, response}
{:ok, response=%HTTPoison.Response{status_code: 200, body: body}} ->
{:ok, Poison.Parser.parse!(body), response}
{:ok, _response=%HTTPoison.Response{body: body}} ->
error = Poison.Parser.parse!(body)
exception = error["__type"]
message = error["message"]
{:error, {exception, message}}
{:error, %HTTPoison.Error{reason: reason}} ->
{:error, %HTTPoison.Error{reason: reason}}
end
end
defp get_host(endpoint_prefix, client) do
if client.region == "local" do
"localhost"
else
"#{endpoint_prefix}.#{client.region}.#{client.endpoint}"
end
end
defp get_url(host, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}/"
end
end
|
lib/aws/waf_regional.ex
| 0.889481
| 0.776348
|
waf_regional.ex
|
starcoder
|
defmodule ElxValidation.Alpha do
@moduledoc """
### string
- The field under validation must be a string. If you would like to allow the field to also be null, you should assign
the nullable rule to the field.
```
data = %{
user_name1: "john_007",
user_name2: 1879, ---> return error
}
rules = [
%{
field: "user_name1",
validate: ["string"]
},
%{
field: "user_name2",
validate: ["string"]
}
]
```
***
### alpha
- The field under validation must be entirely alphabetic characters.
```
data = %{
p1: "<NAME>",
p1: "James 007", ---> return error
}
rules = [
%{
field: "p1",
validate: ["alpha"]
},
%{
field: "p2",
validate: ["alpha"]
}
]
```
***
### start_with:foo
- The field under validation must start with the given values.
```
data = %{
start_code: "G123other_string",
start_code2: "other_string". ---> return error
}
rules = [
%{
field: "start_code",
validate: ["start_with:G123"]
},
%{
field: "start_code2",
validate: ["start_with:Me32"]
}
]
```
### end_with:foo
- The field under validation must end with the given values
```
data = %{
end_code: "other_stringG123",
end_code2: "other_string". ---> return error
}
rules = [
%{
field: "end_code",
validate: ["end_with:G123"]
},
%{
field: "end_code2",
validate: ["end_with:Me32"]
}
]
```
"""
@doc """
Validate String data
"""
def is_string(target) do
is_bitstring(target)
rescue
_ ->
false
end
@doc """
Validate Only Alpha data [a to z / A to Z]
"""
def is_alpha(target) do
Regex.match?(~r/^[A-Za-z]+$/, String.replace(target, " ", ""))
rescue
_ ->
false
end
@doc """
Validate String starter value :
target : "code1234" check:code -> passed
"""
def start_with(target, start_value) do
cond do
!is_string(target) -> false
String.length(target) < String.length(start_value) -> false
String.slice(target, 0..String.length(start_value) - 1) == start_value -> true
true -> false
end
rescue
_ ->
false
end
@doc """
Validate String end value :
target : "1234code" check:code -> passed
"""
def end_with(target, end_value) do
cond do
!is_string(target) -> false
String.length(target) < String.length(end_value) -> false
String.slice(target, String.length(end_value) * -1..-1) == end_value -> true
true -> false
end
rescue
_ ->
false
end
end
|
lib/rules/alpha.ex
| 0.820793
| 0.774285
|
alpha.ex
|
starcoder
|
defmodule AWS.Health do
@moduledoc """
AWS Health
The AWS Health API provides programmatic access to the AWS Health
information that appears in the [AWS Personal Health
Dashboard](https://phd.aws.amazon.com/phd/home#/). You can use the API
operations to get information about AWS Health events that affect your AWS
services and resources.
<note> You must have a Business or Enterprise support plan from [AWS
Support](http://aws.amazon.com/premiumsupport/) to use the AWS Health API.
If you call the AWS Health API from an AWS account that doesn't have a
Business or Enterprise support plan, you receive a
`SubscriptionRequiredException` error.
</note> AWS Health has a single endpoint: health.us-east-1.amazonaws.com
(HTTPS). Use this endpoint to call the AWS Health API operations.
For authentication of requests, AWS Health uses the [Signature Version 4
Signing
Process](https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html).
If your AWS account is part of AWS Organizations, you can use the AWS
Health organizational view feature. This feature provides a centralized
view of AWS Health events across all accounts in your organization. You can
aggregate AWS Health events in real time to identify accounts in your
organization that are affected by an operational event or get notified of
security vulnerabilities. Use the organizational view API operations to
enable this feature and return event information. For more information, see
[Aggregating AWS Health
events](https://docs.aws.amazon.com/health/latest/ug/aggregate-events.html)
in the *AWS Health User Guide*.
<note> When you use the AWS Health API operations to return AWS Health
events, see the following recommendations:
<ul> <li> Use the
[eventScopeCode](https://docs.aws.amazon.com/health/latest/APIReference/API_Event.html#AWSHealth-Type-Event-eventScopeCode)
parameter to specify whether to return AWS Health events that are public or
account-specific.
</li> <li> Use pagination to view all events from the response. For
example, if you call the `DescribeEventsForOrganization` operation to get
all events in your organization, you might receive several page results.
Specify the `nextToken` in the next request to return more results.
</li> </ul> </note>
"""
@doc """
Returns a list of accounts in the organization from AWS Organizations that
are affected by the provided event. For more information about the
different types of AWS Health events, see
[Event](https://docs.aws.amazon.com/health/latest/APIReference/API_Event.html).
Before you can call this operation, you must first enable AWS Health to
work with AWS Organizations. To do this, call the
[EnableHealthServiceAccessForOrganization](https://docs.aws.amazon.com/health/latest/APIReference/API_EnableHealthServiceAccessForOrganization.html)
operation from your organization's master account.
<note> This API operation uses pagination. Specify the `nextToken`
parameter in the next request to return more results.
</note>
"""
def describe_affected_accounts_for_organization(client, input, options \\ []) do
request(client, "DescribeAffectedAccountsForOrganization", input, options)
end
@doc """
Returns a list of entities that have been affected by the specified events,
based on the specified filter criteria. Entities can refer to individual
customer resources, groups of customer resources, or any other construct,
depending on the AWS service. Events that have impact beyond that of the
affected entities, or where the extent of impact is unknown, include at
least one entity indicating this.
At least one event ARN is required. Results are sorted by the
`lastUpdatedTime` of the entity, starting with the most recent.
<note> This API operation uses pagination. Specify the `nextToken`
parameter in the next request to return more results.
</note>
"""
def describe_affected_entities(client, input, options \\ []) do
request(client, "DescribeAffectedEntities", input, options)
end
@doc """
Returns a list of entities that have been affected by one or more events
for one or more accounts in your organization in AWS Organizations, based
on the filter criteria. Entities can refer to individual customer
resources, groups of customer resources, or any other construct, depending
on the AWS service.
At least one event Amazon Resource Name (ARN) and account ID are required.
Results are sorted by the `lastUpdatedTime` of the entity, starting with
the most recent.
Before you can call this operation, you must first enable AWS Health to
work with AWS Organizations. To do this, call the
[EnableHealthServiceAccessForOrganization](https://docs.aws.amazon.com/health/latest/APIReference/API_EnableHealthServiceAccessForOrganization.html)
operation from your organization's master account.
<note> This API operation uses pagination. Specify the `nextToken`
parameter in the next request to return more results.
</note>
"""
def describe_affected_entities_for_organization(client, input, options \\ []) do
request(client, "DescribeAffectedEntitiesForOrganization", input, options)
end
@doc """
Returns the number of entities that are affected by each of the specified
events. If no events are specified, the counts of all affected entities are
returned.
"""
def describe_entity_aggregates(client, input, options \\ []) do
request(client, "DescribeEntityAggregates", input, options)
end
@doc """
Returns the number of events of each event type (issue, scheduled change,
and account notification). If no filter is specified, the counts of all
events in each category are returned.
<note> This API operation uses pagination. Specify the `nextToken`
parameter in the next request to return more results.
</note>
"""
def describe_event_aggregates(client, input, options \\ []) do
request(client, "DescribeEventAggregates", input, options)
end
@doc """
Returns detailed information about one or more specified events.
Information includes standard event data (Region, service, and so on, as
returned by
[DescribeEvents](https://docs.aws.amazon.com/health/latest/APIReference/API_DescribeEvents.html)),
a detailed event description, and possible additional metadata that depends
upon the nature of the event. Affected entities are not included. To
retrieve those, use the
[DescribeAffectedEntities](https://docs.aws.amazon.com/health/latest/APIReference/API_DescribeAffectedEntities.html)
operation.
If a specified event cannot be retrieved, an error message is returned for
that event.
"""
def describe_event_details(client, input, options \\ []) do
request(client, "DescribeEventDetails", input, options)
end
@doc """
Returns detailed information about one or more specified events for one or
more accounts in your organization. Information includes standard event
data (Region, service, and so on, as returned by
[DescribeEventsForOrganization](https://docs.aws.amazon.com/health/latest/APIReference/API_DescribeEventsForOrganization.html)),
a detailed event description, and possible additional metadata that depends
upon the nature of the event. Affected entities are not included; to
retrieve those, use the
[DescribeAffectedEntitiesForOrganization](https://docs.aws.amazon.com/health/latest/APIReference/API_DescribeAffectedEntitiesForOrganization.html)
operation.
Before you can call this operation, you must first enable AWS Health to
work with AWS Organizations. To do this, call the
[EnableHealthServiceAccessForOrganization](https://docs.aws.amazon.com/health/latest/APIReference/API_EnableHealthServiceAccessForOrganization.html)
operation from your organization's master account.
When you call the `DescribeEventDetailsForOrganization` operation, you
specify the `organizationEventDetailFilters` object in the request.
Depending on the AWS Health event type, note the following differences:
<ul> <li> If the event is public, the `awsAccountId` parameter must be
empty. If you specify an account ID for a public event, then an error
message is returned. That's because the event might apply to all AWS
accounts and isn't specific to an account in your organization.
</li> <li> If the event is specific to an account, then you must specify
the `awsAccountId` parameter in the request. If you don't specify an
account ID, an error message returns because the event is specific to an
AWS account in your organization.
</li> </ul> For more information, see
[Event](https://docs.aws.amazon.com/health/latest/APIReference/API_Event.html).
"""
def describe_event_details_for_organization(client, input, options \\ []) do
request(client, "DescribeEventDetailsForOrganization", input, options)
end
@doc """
Returns the event types that meet the specified filter criteria. If no
filter criteria are specified, all event types are returned, in no
particular order.
<note> This API operation uses pagination. Specify the `nextToken`
parameter in the next request to return more results.
</note>
"""
def describe_event_types(client, input, options \\ []) do
request(client, "DescribeEventTypes", input, options)
end
@doc """
Returns information about events that meet the specified filter criteria.
Events are returned in a summary form and do not include the detailed
description, any additional metadata that depends on the event type, or any
affected resources. To retrieve that information, use the
[DescribeEventDetails](https://docs.aws.amazon.com/health/latest/APIReference/API_DescribeEventDetails.html)
and
[DescribeAffectedEntities](https://docs.aws.amazon.com/health/latest/APIReference/API_DescribeAffectedEntities.html)
operations.
If no filter criteria are specified, all events are returned. Results are
sorted by `lastModifiedTime`, starting with the most recent event.
<note> <ul> <li> When you call the `DescribeEvents` operation and specify
an entity for the `entityValues` parameter, AWS Health might return public
events that aren't specific to that resource. For example, if you call
`DescribeEvents` and specify an ID for an Amazon Elastic Compute Cloud
(Amazon EC2) instance, AWS Health might return events that aren't specific
to that resource or service. To get events that are specific to a service,
use the `services` parameter in the `filter` object. For more information,
see
[Event](https://docs.aws.amazon.com/health/latest/APIReference/API_Event.html).
</li> <li> This API operation uses pagination. Specify the `nextToken`
parameter in the next request to return more results.
</li> </ul> </note>
"""
def describe_events(client, input, options \\ []) do
request(client, "DescribeEvents", input, options)
end
@doc """
Returns information about events across your organization in AWS
Organizations. You can use the`filters` parameter to specify the events
that you want to return. Events are returned in a summary form and don't
include the affected accounts, detailed description, any additional
metadata that depends on the event type, or any affected resources. To
retrieve that information, use the following operations:
<ul> <li>
[DescribeAffectedAccountsForOrganization](https://docs.aws.amazon.com/health/latest/APIReference/API_DescribeAffectedAccountsForOrganization.html)
</li> <li>
[DescribeEventDetailsForOrganization](https://docs.aws.amazon.com/health/latest/APIReference/API_DescribeEventDetailsForOrganization.html)
</li> <li>
[DescribeAffectedEntitiesForOrganization](https://docs.aws.amazon.com/health/latest/APIReference/API_DescribeAffectedEntitiesForOrganization.html)
</li> </ul> If you don't specify a `filter`, the
`DescribeEventsForOrganizations` returns all events across your
organization. Results are sorted by `lastModifiedTime`, starting with the
most recent event.
For more information about the different types of AWS Health events, see
[Event](https://docs.aws.amazon.com/health/latest/APIReference/API_Event.html).
Before you can call this operation, you must first enable AWS Health to
work with AWS Organizations. To do this, call the
[EnableHealthServiceAccessForOrganization](https://docs.aws.amazon.com/health/latest/APIReference/API_EnableHealthServiceAccessForOrganization.html)
operation from your organization's master AWS account.
<note> This API operation uses pagination. Specify the `nextToken`
parameter in the next request to return more results.
</note>
"""
def describe_events_for_organization(client, input, options \\ []) do
request(client, "DescribeEventsForOrganization", input, options)
end
@doc """
This operation provides status information on enabling or disabling AWS
Health to work with your organization. To call this operation, you must
sign in as an IAM user, assume an IAM role, or sign in as the root user
(not recommended) in the organization's master account.
"""
def describe_health_service_status_for_organization(client, input, options \\ []) do
request(client, "DescribeHealthServiceStatusForOrganization", input, options)
end
@doc """
Disables AWS Health from working with AWS Organizations. To call this
operation, you must sign in as an AWS Identity and Access Management (IAM)
user, assume an IAM role, or sign in as the root user (not recommended) in
the organization's master AWS account. For more information, see
[Aggregating AWS Health
events](https://docs.aws.amazon.com/health/latest/ug/aggregate-events.html)
in the *AWS Health User Guide*.
This operation doesn't remove the service-linked role (SLR) from the AWS
master account in your organization. You must use the IAM console, API, or
AWS Command Line Interface (AWS CLI) to remove the SLR. For more
information, see [Deleting a Service-Linked
Role](https://docs.aws.amazon.com/IAM/latest/UserGuide/using-service-linked-roles.html#delete-service-linked-role)
in the *IAM User Guide*.
<note> You can also disable the organizational feature by using the
Organizations
[DisableAWSServiceAccess](https://docs.aws.amazon.com/organizations/latest/APIReference/API_DisableAWSServiceAccess.html)
API operation. After you call this operation, AWS Health stops aggregating
events for all other AWS accounts in your organization. If you call the AWS
Health API operations for organizational view, AWS Health returns an error.
AWS Health continues to aggregate health events for your AWS account.
</note>
"""
def disable_health_service_access_for_organization(client, input, options \\ []) do
request(client, "DisableHealthServiceAccessForOrganization", input, options)
end
@doc """
Calling this operation enables AWS Health to work with AWS Organizations.
This applies a service-linked role (SLR) to the master account in the
organization. To call this operation, you must sign in as an IAM user,
assume an IAM role, or sign in as the root user (not recommended) in the
organization's master account.
For more information, see [Aggregating AWS Health
events](https://docs.aws.amazon.com/health/latest/ug/aggregate-events.html)
in the *AWS Health User Guide*.
"""
def enable_health_service_access_for_organization(client, input, options \\ []) do
request(client, "EnableHealthServiceAccessForOrganization", input, options)
end
@spec request(AWS.Client.t(), binary(), map(), list()) ::
{:ok, map() | nil, map()}
| {:error, term()}
defp request(client, action, input, options) do
client = %{client | service: "health"}
host = build_host("health", client)
url = build_url(host, client)
headers = [
{"Host", host},
{"Content-Type", "application/x-amz-json-1.1"},
{"X-Amz-Target", "AWSHealth_20160804.#{action}"}
]
payload = encode!(client, input)
headers = AWS.Request.sign_v4(client, "POST", url, headers, payload)
post(client, url, payload, headers, options)
end
defp post(client, url, payload, headers, options) do
case AWS.Client.request(client, :post, url, payload, headers, options) do
{:ok, %{status_code: 200, body: body} = response} ->
body = if body != "", do: decode!(client, body)
{:ok, body, response}
{:ok, response} ->
{:error, {:unexpected_response, response}}
error = {:error, _reason} -> error
end
end
defp build_host(_endpoint_prefix, %{region: "local", endpoint: endpoint}) do
endpoint
end
defp build_host(_endpoint_prefix, %{region: "local"}) do
"localhost"
end
defp build_host(endpoint_prefix, %{region: region, endpoint: endpoint}) do
"#{endpoint_prefix}.#{region}.#{endpoint}"
end
defp build_url(host, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}/"
end
defp encode!(client, payload) do
AWS.Client.encode!(client, payload, :json)
end
defp decode!(client, payload) do
AWS.Client.decode!(client, payload, :json)
end
end
|
lib/aws/generated/health.ex
| 0.894847
| 0.639173
|
health.ex
|
starcoder
|
defmodule Plexy.Logger do
@moduledoc """
Plexy.Logger is a proxy to Elixir's built-in logger that knows how to
handle non char-data and has a few other helpful logging functions.
"""
alias Plexy.Config
require Logger
@overrides [:info, :warn, :debug, :error]
for name <- @overrides do
@doc """
Logs a #{name} message.
Returns the atom :ok or an {:error, reason}
## Examples
Plexy.Logger.#{name} "hello?"
Plexy.Logger.#{name} [color: "purple"]
Plexy.Logger.#{name} %{sky: "blue"}
Plexy.Logger.#{name} fn -> hard_work_goes_here end
"""
def unquote(name)(datum_or_fn, metadata \\ []) do
case decorate_with_app_name(datum_or_fn) do
datum when is_list(datum) or is_map(datum) ->
Logger.unquote(name)(fn -> list_to_line(datum) end, metadata)
datum ->
Logger.unquote(name)(fn -> redact(datum) end, metadata)
end
end
end
@doc """
Logs a info message with the given metric as a count
## Examples
Plexy.Logger.count(:signup, 2)
Plexy.Logger.count("registration", 1)
Plexy.Logger.count("registration") # same as above
"""
def count(metric, count \\ 1) do
info(%{metric_name(metric, :count) => count})
end
@doc """
Logs a info message and tags it as `metric`.
## Examples
Plexy.Logger.measure(:request, 200)
"""
def measure(metric, time) when is_number(time) do
info(%{metric_name(metric, :measure) => time})
end
@doc """
Logs a info message the amount of time in milliseconds required to run
the given function and tags it as `metric`.
## Examples
Plexy.Logger.measure(:call_core, &super_slow_call/0)
Plexy.Logger.measure("rebuild", fn -> rebuild_the_invoice end)
"""
def measure(metric, fun) do
{time, result} = :timer.tc(fun)
measure(add_ms_to_metric_name(metric), time / 1000.0)
result
end
@doc """
Log using the given level and data. This function should be avoided in
favor of `.info`, `.warn`, `.debug`, `.error`, because they are removed
at compile time.
"""
def log(level, datum_or_fn, metadata \\ [])
def log(level, datum, metadata) when is_list(datum) or is_map(datum) do
log(level, fn -> list_to_line(datum) end, metadata)
end
def log(level, chardata_or_fn, metadata), do: Logger.log(level, chardata_or_fn, metadata)
defp add_ms_to_metric_name(metric) do
if metric |> to_string() |> String.ends_with?(".ms") do
metric
else
"#{metric}.ms"
end
end
defp metric_name(metric, name) when is_atom(metric) do
metric |> to_string |> metric_name(name)
end
defp metric_name(metric, name) do
name = to_string(name)
"#{name}##{app_name()}.#{metric}"
end
defp app_name do
Config.get(:plexy, :app_name) || raise "You must set app_name for Plexy config"
end
defp list_to_line(datum) when is_list(datum) or is_map(datum) do
datum
|> decorate_with_app_name()
|> Enum.reduce("", &pair_to_segment/2)
|> String.trim_trailing(" ")
|> redact()
end
defp decorate_with_app_name(datum) when is_list(datum) do
Keyword.merge([app: app_name()], datum)
end
defp decorate_with_app_name(datum) when is_map(datum) do
Map.merge(%{app: app_name()}, datum)
end
defp decorate_with_app_name(datum_or_fn) do
datum_or_fn
end
defp redact(line) when is_binary(line) do
:plexy
|> Application.get_env(:logger, [])
|> Keyword.get(:redactors, [])
|> Enum.reduce_while(line, fn {redactor, opts}, l ->
redactor.run(l, opts)
end)
end
defp redact(fun) when is_function(fun) do
redact(fun.())
end
defp pair_to_segment({k, v}, acc) when is_atom(k) do
pair_to_segment({to_string(k), v}, acc)
end
defp pair_to_segment({k, v}, acc) when is_binary(v) do
if String.contains?(v, " ") do
"#{acc}#{k}=#{inspect(v)} "
else
"#{acc}#{k}=#{v} "
end
end
defp pair_to_segment({k, v}, acc) do
pair_to_segment({k, inspect(v)}, acc)
end
end
|
lib/plexy/logger.ex
| 0.862149
| 0.465752
|
logger.ex
|
starcoder
|
defmodule EtsLock do
@moduledoc ~S"""
EtsLock is a library for acquiring exclusive locks on data in
[ETS](http://erlang.org/doc/man/ets.html) tables.
Using `with_ets_lock/4`, you can process all `{key, value}` tuples for a
given `key` while being sure other processes using `with_ets_lock/4`
are not mutating the data stored for this key.
Processing is performed in a separate process so that an execution
timeout (`:exec_timeout`) can be enforced.
For high concurrent performance, all locks are stored in a separate
ETS table, and no GenServers or other processes are used to coordinate
access. This is in contrast to the
[erlang-ets-lock library](https://github.com/afiskon/erlang-ets-lock/),
which uses a single GenServer to serialize access to ETS.
"""
@type opts :: [option]
@type option ::
{:wait_timeout, non_neg_integer | :infinity}
| {:exec_timeout, non_neg_integer | :infinity}
| {:fail_timeout, non_neg_integer | :infinity}
| {:spin_delay, non_neg_integer}
| {:lock_table, :ets.tab()}
@type key :: any
@type value :: any
@type error_reason :: :wait_timeout | :exec_timeout | any
@defaults [
wait_timeout: 5000,
exec_timeout: 5000,
fail_timeout: 1000,
spin_delay: 2,
lock_table: EtsLock.Locks
]
defp config(opt, opts), do: opts[opt] || @defaults[opt]
defp now, do: :erlang.system_time(:millisecond)
@doc ~S"""
Locks a key in ETS and invokes `fun` on the `{key, value}` tuples for
that key. Returns `{:ok, fun.(tuples)}` on success.
If the key is already locked, this function spins until the lock is
released or the `:wait_timeout` is reached.
Example:
iex> table = :ets.new(:whatever, [:set, :public])
iex> spawn(fn ->
...> ## Wait 50ms, try to acquire lock, then insert
...> Process.sleep(50)
...> EtsLock.with_ets_lock(table, :key, fn _ ->
...> :ets.insert(table, {:key, :yup})
...> end)
...> end)
iex> spawn(fn ->
...> ## Acquire lock immediately, hold it for 100ms, then insert
...> EtsLock.with_ets_lock(table, :key, fn _ ->
...> Process.sleep(100)
...> :ets.insert(table, {:key, :nope})
...> end)
...> end)
iex> Process.sleep(200)
iex> :ets.lookup(table, :key)
[{:key, :yup}]
Options:
* `:wait_timeout` - Milliseconds to wait when acquiring a lock.
Default 5000. Set to `:infinity` to try forever to acquire a lock.
* `:exec_timeout` - Milliseconds to allow `fun` to hold the lock before
its execution is cancelled and the lock is released. Default 5000.
Set to `:infinity` to hold the lock indefinitely until `fun` has finished.
* `:fail_timeout` - Milliseconds to wait before forcibly deleting a
lock after it _should_ have been released, but wasn't. Default 1000.
Provides protection against permanently hanging locks in the case that
both the caller and the spawned task crash. Set to `:infinity` to
disable this protection (not recommended).
* `:spin_delay` - Milliseconds to wait between every attempt to acquire a
lock. Default 2.
* `:lock_table` - ETS table in which to store locks. Default `EtsLock.Locks`.
"""
@spec with_ets_lock(:ets.tab(), key, ([{key, value}] -> any), opts) ::
{:ok, any} | {:error, error_reason}
def with_ets_lock(table, key, fun, opts \\ []) do
wait_timeout = config(:wait_timeout, opts)
exec_timeout = config(:exec_timeout, opts)
fail_timeout = config(:fail_timeout, opts)
spin_delay = config(:spin_delay, opts)
lock_table = config(:lock_table, opts)
now = now()
wait_timeout_at = if wait_timeout == :infinity, do: :infinity, else: now + wait_timeout
with_ets_lock(
table,
key,
fun,
wait_timeout_at,
exec_timeout,
fail_timeout,
spin_delay,
lock_table
)
end
## Strategy:
## Take advantage of ETS' serializability
## Use `:ets.insert_new/2` for atomic lock acquisition
## Use `:ets.delete_object/2` to ensure we release only our own lock,
## or the stale lock we intend to destroy
defp with_ets_lock(
table,
key,
fun,
wait_timeout_at,
exec_timeout,
fail_timeout,
spin_delay,
lock_table
) do
lock_key = {EtsLock.Lock, table, key}
now = now()
if wait_timeout_at != :infinity && now >= wait_timeout_at do
{:error, :wait_timeout}
else
case :ets.lookup(lock_table, lock_key) do
[{_lock_key, {_pid, release_at}} = object] ->
if release_at != :infinity && now >= release_at do
## Stale lock -- release it
:ets.delete_object(lock_table, object)
else
Process.sleep(spin_delay)
end
with_ets_lock(
table,
key,
fun,
wait_timeout_at,
exec_timeout,
fail_timeout,
spin_delay,
lock_table
)
[] ->
release_at =
cond do
exec_timeout == :infinity -> :infinity
fail_timeout == :infinity -> :infinity
:else -> now + exec_timeout + fail_timeout
end
lock_value = {self(), release_at}
case :ets.insert_new(lock_table, {lock_key, lock_value}) do
false ->
## We lost a race and need to wait our turn
Process.sleep(spin_delay)
with_ets_lock(
table,
key,
fun,
wait_timeout_at,
exec_timeout,
fail_timeout,
spin_delay,
lock_table
)
true ->
## We acquired a lock
task = Task.async(fn -> :ets.lookup(table, key) |> fun.() end)
return_value =
case Task.yield(task, exec_timeout) do
nil ->
Task.shutdown(task, :brutal_kill)
{:error, :exec_timeout}
other ->
other
end
:ets.delete_object(lock_table, {lock_key, lock_value})
return_value
end
end
end
end
end
|
lib/ets_lock.ex
| 0.799207
| 0.459319
|
ets_lock.ex
|
starcoder
|
defmodule EctoSchemaStore.Assistant do
@moduledoc """
Provides macros to customize configuration aspects of a store module.
"""
@doc """
Creates variations of the existing edit functions with a predefined configuration.
Functions preconfigured:
* `insert`
* `insert!`
* `insert_fields`
* `insert_fields!`
* `validate_insert`
* `update`
* `update!`
* `update_fields`
* `update_fields!`
* `validate_update`
If using the name `api` the follwing functions will be generate:
* `insert_api`
* `insert_api!`
* `insert_fields_api`
* `insert_fields_api!`
* `validate_insert_api`
* `update_api`
* `update_api!`
* `update_fields_api`
* `update_fields_api!`
* `validate_update_api`
"""
defmacro preconfigure(name, predefined_options \\ [])
when is_atom(name) and is_list(predefined_options) do
quote do
preconfigure_insert(unquote(name), unquote(predefined_options), "_", "")
preconfigure_insert(unquote(name), unquote(predefined_options), "_", "!")
preconfigure_insert(unquote(name), unquote(predefined_options), "_fields_", "")
preconfigure_insert(unquote(name), unquote(predefined_options), "_fields_", "!")
preconfigure_update(unquote(name), unquote(predefined_options), "_", "")
preconfigure_update(unquote(name), unquote(predefined_options), "_", "!")
preconfigure_update(unquote(name), unquote(predefined_options), "_fields_", "")
preconfigure_update(unquote(name), unquote(predefined_options), "_fields_", "!")
preconfigure_validate(unquote(name), unquote(predefined_options))
end
end
@doc """
Creates a preconfigured version of an existing edit function.
```elixir
preconfigure_insert :api, changeset: :mychangeset
preconfigure_insert :api, [changeset: :mychangeset], "_fields_", "!",
insert_api name: "Sample"
insert_api_fields! name: "Sample"
```
"""
defmacro preconfigure_insert(
name,
predefined_options \\ [],
action_prefix \\ "_",
action_suffix \\ ""
)
when is_atom(name) and is_list(predefined_options) do
new_name = String.to_atom("insert#{action_prefix}#{name}#{action_suffix}")
action_prefix = String.replace_suffix(action_prefix, "_", "")
callable = String.to_atom("insert#{action_prefix}#{action_suffix}")
quote do
@doc """
Inserts a record to the `#{unquote(callable)}` function with the following
predfined options.
```elixir
#{unquote(inspect(predefined_options))}
```
Using:
```elixir
preconfigure :api, changeset: :mychangeset, errors_to_map: :my_record
# Basic Insert
#{unquote(new_name)} name: "Sample"
# Override predefined options
#{unquote(new_name)} [name: "Sample"], changeset: :otherchangeset
```
"""
def unquote(new_name)(params, opts \\ []) do
options = Keyword.merge(unquote(predefined_options), opts)
unquote(callable)(params, options)
end
end
end
@doc """
Creates a preconfigured version of an existing edit function.
```elixir
preconfigure_update :api, changeset: :mychangeset
preconfigure_update :api, [changeset: :mychangeset], "_fields_", "!",
update_api name: "Sample"
update_api_fields! name: "Sample"
```
"""
defmacro preconfigure_update(
name,
predefined_options \\ [],
action_prefix \\ "_",
action_suffix \\ ""
)
when is_atom(name) and is_list(predefined_options) do
new_name = String.to_atom("update#{action_prefix}#{name}#{action_suffix}")
action_prefix = String.replace_suffix(action_prefix, "_", "")
callable = String.to_atom("update#{action_prefix}#{action_suffix}")
quote do
@doc """
Updates a record to the `#{unquote(callable)}` function with the following
predfined options.
```elixir
#{unquote(inspect(predefined_options))}
```
Using:
```elixir
preconfigure :api, changeset: :mychangeset, errors_to_map: :my_record
model = insert_api name: "Sample"
# Basic Update
#{unquote(new_name)} model, name: "Sample2"
# Override predefined options
#{unquote(new_name)} model, [name: "Sample"], changeset: :otherchangeset
```
"""
def unquote(new_name)(schema_or_id, params, opts \\ []) do
options = Keyword.merge(unquote(predefined_options), opts)
unquote(callable)(schema_or_id, params, options)
end
end
end
defmacro preconfigure_validate(name, predefined_options \\ [])
when is_atom(name) and is_list(predefined_options) do
update_name = String.to_atom("validate_update_#{name}")
insert_name = String.to_atom("validate_insert_#{name}")
quote do
@doc """
Checks update validation with the following predefined options:
```elixir
#{unquote(inspect(predefined_options))}
```
Using:
```elixir
preconfigure :api, changeset: :mychangeset, errors_to_map: :my_record
model = insert_api name: "Sample"
# Basic update validation
#{unquote(update_name)} model, name: "Sample2"
# Override predefined options validation
#{unquote(update_name)} model, [name: "Sample"], changeset: :otherchangeset
```
"""
def unquote(update_name)(schema_or_id, params, opts \\ []) do
options = Keyword.merge(unquote(predefined_options), opts)
validate_update(schema_or_id, params, options)
end
@doc """
Checks insert validation with the following predefined options:
```elixir
#{unquote(inspect(predefined_options))}
```
Using:
```elixir
preconfigure :api, changeset: :mychangeset, errors_to_map: :my_record
# Basic update validation
#{unquote(insert_name)} name: "Sample2"
# Override predefined options validation
#{unquote(insert_name)} [name: "Sample"], changeset: :otherchangeset
```
"""
def unquote(insert_name)(params, opts \\ []) do
options = Keyword.merge(unquote(predefined_options), opts)
validate_insert(params, options)
end
end
end
end
|
lib/ecto_schema_store/assistant.ex
| 0.778102
| 0.678567
|
assistant.ex
|
starcoder
|
defmodule GitHubActions.Yaml do
@moduledoc false
@spec encode(any()) :: String.t()
def encode(data) do
data
|> do_encode([], 0)
|> Enum.reverse()
|> Enum.join("\n")
|> newline()
end
defp do_encode(data, lines, _depth) when is_binary(data) do
string = if num_or_version?(data), do: "'#{data}'", else: data
[string | lines]
end
defp do_encode(data, lines, _depth) when is_number(data) do
[to_string(data) | lines]
end
defp do_encode(data, lines, depth) when is_map(data) do
data
|> Enum.into([])
|> do_encode(lines, depth)
end
defp do_encode([{key, item} | data], lines, depth) when is_binary(item) do
item = if num_or_version?(item), do: "'#{item}'", else: item
case lines?(item) do
true ->
add = indent_heredoc(key, item, depth)
do_encode(data, add ++ lines, depth)
false ->
add = indent_key(key, item, depth)
do_encode(data, [add | lines], depth)
end
end
defp do_encode([{key, item} | data], lines, depth) when is_number(item) do
add = indent_key(key, item, depth)
do_encode(data, [add | lines], depth)
end
defp do_encode([{key, item} | data], lines, depth) do
add = indent_key(key, depth)
sub = do_encode(item, [], depth + 1)
do_encode(data, Enum.concat([sub, [add], lines]), depth)
end
defp do_encode([item | data], lines, depth) do
{items, [last]} = do_encode(item, [], 0) |> Enum.split(-1)
items = indent(items, depth + 1) ++ [indent_item(last, depth)]
do_encode(data, items ++ lines, depth)
end
defp do_encode([], lines, _depth), do: lines
defp indent(depth), do: String.duplicate(" ", depth * 2)
defp indent(lines, depth) when is_list(lines) do
Enum.map(lines, fn line -> "#{indent(depth)}#{line}" end)
end
defp indent_item(item, depth), do: "#{indent(depth)}- #{item}"
defp indent_key(key, depth), do: "#{indent(depth)}#{key}:"
defp indent_heredoc(string, depth) do
string
|> String.trim_trailing()
|> String.split("\n")
|> indent(depth)
|> Enum.reverse()
end
defp indent_key(key, item, depth), do: "#{indent_key(key, depth)} #{item}"
defp indent_heredoc(key, string, depth) do
lines = indent_heredoc(string, depth + 1)
line = "#{indent_key(key, depth)} |"
lines ++ [line]
end
defp lines?(string), do: String.contains?(string, "\n")
defp newline(string), do: "#{string}\n"
defp num_or_version?(string) do
string
|> String.split(".")
|> Enum.all?(fn part -> part =~ ~r/^\d+$/ end)
end
end
|
lib/git_hub_actions/yaml.ex
| 0.663778
| 0.480174
|
yaml.ex
|
starcoder
|
defmodule ElixirKeeb.UI.DataFaker do
use GenServer
@wait_before_more_data_ms 1500
@impl true
def init({elem, how_many}) do
data = 1..how_many
|> Enum.map(fn _ -> elem end)
Process.send_after(self(), :more_data, @wait_before_more_data_ms)
{:ok, %{data: data, max: how_many}}
end
@impl true
def init(length) do
data = length
|> random_data()
|> Enum.reverse()
Process.send_after(self(), :more_data, @wait_before_more_data_ms)
{:ok, %{data: data, max: length}}
end
@impl true
def handle_info(:more_data, %{data: data, max: max} = state) do
Process.send_after(self(), :more_data, @wait_before_more_data_ms)
state = %{state | data: append_data(data, random_item(), max)}
{:noreply, state}
end
@impl true
def handle_call({:get, :all}, _from, %{data: data} = state) do
to_return = Enum.reverse(data)
{:reply, to_return, state}
end
@impl true
def handle_call({:get, how_many}, _from, %{data: data} = state) when how_many > 0 do
to_return = data
|> Enum.take(how_many)
|> Enum.reverse()
{:reply, to_return, state}
end
@impl true
def handle_cast({:add, new_item}, %{data: data, max: max} = state) do
state = %{state | data: append_data(data, new_item, max)}
{:noreply, state}
end
def new([init_arg, options]) do
new(init_arg, options)
end
def new(init_arg, options \\ [name: __MODULE__]) do
GenServer.start_link(__MODULE__, init_arg, options)
end
def get, do: get(__MODULE__)
def get(how_many) when is_integer(how_many),
do: get(__MODULE__, how_many)
def get(server) when is_atom(server) or is_pid(server) do
GenServer.call(server, {:get, :all})
end
def get(server, how_many)
when (is_atom(server) or is_pid(server)) and is_integer(how_many) do
GenServer.call(server, {:get, how_many})
end
def append(server, new_item) do
GenServer.cast(server, {:add, new_item})
end
def append_random(server) do
GenServer.cast(server, {:add, random_item()})
end
defp append_data(data, new_item, max) when length(data) < max,
do: [new_item | data]
defp append_data(data, new_item, max),
do: [new_item | data]
|> Enum.take(max)
defp random_data(length) do
seed = seed()
1..length
|> Enum.map(random_item_fn(seed))
end
defp random_item, do: random_item_fn().(0)
defp random_item_fn(seed \\ nil)
defp random_item_fn(nil) do
seed = seed()
&abs(1 + :math.sin((seed + &1) / 5.0))
end
defp random_item_fn(seed),
do: &abs(1 + :math.sin((seed + &1) / 5.0))
defp seed do
System.monotonic_time()
|> Integer.mod(100)
end
end
|
lib/elixir_keeb_ui/data_faker.ex
| 0.672762
| 0.428891
|
data_faker.ex
|
starcoder
|
defmodule SMPPEX do
@moduledoc ~S"""
SMPPEX is a framework for building SMPP servers and clients (which are often
referred to as MC and ESME entities respectevely).
The major features exposed by the library are:
* `SMPPEX.ESME` module and behaviour for implementing ESME entities;
* `SMPPEX.MC` module and behaviour for implementing MC entities;
* `SMPPEX.ESME.Sync` module representing simple ready to use SMPP client.
Also one of the core features of the library is simplicity: both code simplicity and simplicity of use.
* The library does not have much TCP handling or session management functionality,
it is based on great [`ranch`](https://github.com/ninenines/ranch) library.
* SMPP session is symmetric(used both in ESME and MC) and is implemented as
`ranch_protocol` behaviour.
* The library includes an easy and ready to use SMPP client (`SMPP.ESME.Sync`) which
has capabilities of synchronous SMS sending and do not require implementing ESME behavior.
There is also an SMPP testing tool [`smppsend`](https://github.com/savonarola/smppsend)
based on this client.
## SMPPEX.ESME.Sync
`SMPPEX.ESME.Sync` is the most straightforward way to interact with an SMSC. Example:
{:ok, esme} = SMPPEX.ESME.Sync.start_link(host, port)
bind = SMPPEX.Pdu.Factory.bind_transmitter("system_id", "password")
{:ok, _bind_resp} = SMPPEX.ESME.Sync.request(esme, bind)
# We are bound, let's send a message
submit_sm = SMPPEX.Pdu.Factory.submit_sm({"from", 1, 1}, {"to", 1, 1}, "hello!")
{:ok, submit_sm_resp} = SMPPEX.ESME.Sync.request(esme, submit_sm)
# Message is sent, let's get the obtained id:
message_id = SMPPEX.Pdu.field(submit_sm_resp, :message_id)
# Now let's wait for a delivery report:
delivery_report? = fn(pdu) ->
SMPPEX.Pdu.command_name(pdu) == :deliver_sm and
SMPPEX.Pdu.field(pdu, :receipted_message_id) == message_id
end
delivery_reports = case SMPPEX.ESME.Sync.wait_for_pdus(esme, 60000) do
:stop ->
Logger.info("Ooops, ESME stopped")
[]
:timeout ->
Logger.info("No DLR in 60 seconds")
[]
received_items ->
# Let's filter out DLRs for the previously submitted message
for {:pdu, pdu} <- received_items, delivery_report?.(pdu), do: pdu
end
## SMPPEX.ESME
`SMPPEX.ESME` can be used when more complicated client logic is needed, for example
custom immediate reactions to all incoming PDUs, rps/window control, etc.
`SMPPEX.ESME` provides "empty" defaults for all required callbacks, so minimal ESME
could be very simple:
defmodule DummyESME do
use SMPPEX.ESME
def start_link(host, port) do
SMPPEX.ESME.start_link(host, port, {__MODULE__, []})
end
end
It is still completely functional:
{:ok, esme} = DummyESME.start_link(host, port)
SMPPEX.ESME.send_pdu(esme, SMPPEX.Pdu.Factory.bind_transmitter("system_id", "password"))
Here's a more complicated example of ESME, which does the following:
* Receives port number and three arguments:
- `waiting_pid` -- a pid of the process which will be informed when ESME stops;
- `count` -- count of PDUs to send;
- `window` -- window size, the maximum number of sent PDU's without resps.
* Connects to the specified port on localhost and issues a bind command.
* Starts to send predefined PDUs after bind at maximum possible rate but regarding window size.
* Stops after all PDUs are sent and notifies the waiting process.
```
defmodule SMPPBenchmarks.ESME do
use SMPPEX.ESME
require Logger
@from {"from", 1, 1}
@to {"to", 1, 1}
@message "hello"
@system_id "system_id"
@password "password"
def start_link(port, waiting_pid, count, window) do
SMPPEX.ESME.start_link("127.0.0.1", port, {__MODULE__, [waiting_pid, count, window]})
end
def init([waiting_pid, count, window]) do
SMPPEX.ESME.send_pdu(self(), SMPPEX.Pdu.Factory.bind_transmitter(@system_id, @password))
{:ok, %{waiting_pid: waiting_pid, count_to_send: count, count_waiting_resp: 0, window: window}}
end
def handle_resp(pdu, _original_pdu, st) do
case pdu |> SMPPEX.Pdu.command_id |> SMPPEX.Protocol.CommandNames.name_by_id do
{:ok, :submit_sm_resp} ->
new_st = %{st | count_waiting_resp: st.count_waiting_resp - 1}
send_pdus(new_st)
{:ok, :bind_transmitter_resp} ->
send_pdus(st)
_ ->
st
end
end
def handle_resp_timeout(pdu, st) do
Logger.error("PDU timeout: #{inspect pdu}, terminating")
SMPPEX.ESME.stop(self())
st
end
def handle_stop(st) do
Logger.info("ESME stopped")
Kernel.send(st.waiting_pid, {self(), :done})
st
end
defp send_pdus(st) do
cond do
st.count_to_send > 0 ->
count_to_send = min(st.window - st.count_waiting_resp, st.count_to_send)
:ok = do_send(self(), count_to_send)
%{st | count_waiting_resp: st.window, count_to_send: st.count_to_send - count_to_send}
st.count_waiting_resp > 0 ->
st
true ->
Logger.info("All PDUs sent, all resps received, terminating")
SMPPEX.ESME.stop(self())
st
end
end
defp do_send(_esme, n) when n <= 0, do: :ok
defp do_send(esme, n) do
submit_sm = SMPPEX.Pdu.Factory.submit_sm(@from, @to, @message)
:ok = SMPPEX.ESME.send_pdu(esme, submit_sm)
do_send(esme, n - 1)
end
end
```
Not all callbacks are used yet in this example, for the full list see `SMPPEX.ESME` documentation.
## SMPPEX.MC
`SMPPEX.MC` is used for _receiving_ and handling SMPP connections. This module also provides
default "empty" callbacks.
Here is an example of a very simple MC, which does the following:
* Starts and listens to connections on the specified port.
* Responds with OK status to all incoming binds.
* Responds to all `enquire_link` packets.
* Responds with incremental message ids to all incoming `submit_sm` packets (regardless of the bind state).
```
defmodule MC do
use SMPPEX.MC
def start(port) do
SMPPEX.MC.start({__MODULE__, []}, [transport_opts: [port: port]])
end
def init(_socket, _transport, []) do
{:ok, 0}
end
def handle_pdu(pdu, last_id) do
case pdu |> SMPPEX.Pdu.command_id |> SMPPEX.Protocol.CommandNames.name_by_id do
{:ok, :submit_sm} ->
SMPPEX.MC.reply(self(), pdu, SMPPEX.Pdu.Factory.submit_sm_resp(0, to_string(last_id)))
last_id + 1
{:ok, :bind_transmitter} ->
SMPPEX.MC.reply(self(), pdu, SMPPEX.Pdu.Factory.bind_transmitter_resp(0))
last_id
{:ok, :enquire_link} ->
SMPPEX.MC.reply(self(), pdu, SMPPEX.Pdu.Factory.enquire_link_resp)
last_id
_ -> last_id
end
end
end
```
"""
end
|
lib/smppex.ex
| 0.842053
| 0.86113
|
smppex.ex
|
starcoder
|
defmodule Fares.Month do
@moduledoc """
Calculates the lowest and highest monthly pass fare for a particular trip.
"""
alias Fares.{Fare, Repo}
alias Routes.Route
alias Schedules.Trip
alias Stops.Stop
@type fare_fn :: (Keyword.t() -> [Fare.t()])
@spec recommended_pass(
Route.t() | Route.id_t(),
Trip.t() | Trip.id_t() | nil,
Stop.id_t(),
Stop.id_t(),
fare_fn()
) ::
Fare.t() | nil
def recommended_pass(route, trip, origin_id, destination_id, fare_fn \\ &Repo.all/1)
def recommended_pass(nil, _, _, _, _), do: nil
def recommended_pass(route_id, trip, origin_id, destination_id, fare_fn)
when is_binary(route_id) do
route = Routes.Repo.get(route_id)
recommended_pass(route, trip, origin_id, destination_id, fare_fn)
end
def recommended_pass(route, trip_id, origin_id, destination_id, fare_fn)
when is_binary(trip_id) do
trip = Schedules.Repo.trip(trip_id)
recommended_pass(route, trip, origin_id, destination_id, fare_fn)
end
def recommended_pass(route, trip, origin_id, destination_id, fare_fn) do
route
|> get_fares(trip, origin_id, destination_id, fare_fn)
|> Enum.min_by(& &1.cents, fn -> nil end)
end
@spec base_pass(
Route.t() | Route.id_t(),
Trip.t() | Trip.id_t() | nil,
Stop.id_t(),
Stop.id_t(),
fare_fn()
) ::
Fare.t() | nil
def base_pass(route, trip, origin_id, destination_id, fare_fn \\ &Repo.all/1)
def base_pass(nil, _, _, _, _), do: nil
def base_pass(route_id, trip, origin_id, destination_id, fare_fn) when is_binary(route_id) do
route = Routes.Repo.get(route_id)
base_pass(route, trip, origin_id, destination_id, fare_fn)
end
def base_pass(route, trip_id, origin_id, destination_id, fare_fn) when is_binary(trip_id) do
trip = Schedules.Repo.trip(trip_id)
base_pass(route, trip, origin_id, destination_id, fare_fn)
end
def base_pass(route, trip, origin_id, destination_id, fare_fn) do
route
|> get_fares(trip, origin_id, destination_id, fare_fn)
|> Enum.max_by(& &1.cents, fn -> nil end)
end
@spec reduced_pass(
Route.t() | Route.id_t(),
Trip.t() | Trip.id_t() | nil,
Stop.id_t(),
Stop.id_t(),
fare_fn()
) ::
Fare.t() | nil
def reduced_pass(route, trip, origin_id, destination_id, fare_fn \\ &Repo.all/1)
def reduced_pass(nil, _, _, _, _), do: nil
def reduced_pass(route_id, trip, origin_id, destination_id, fare_fn) when is_binary(route_id) do
route = Routes.Repo.get(route_id)
reduced_pass(route, trip, origin_id, destination_id, fare_fn)
end
def reduced_pass(route, trip_id, origin_id, destination_id, fare_fn) when is_binary(trip_id) do
trip = Schedules.Repo.trip(trip_id)
reduced_pass(route, trip, origin_id, destination_id, fare_fn)
end
def reduced_pass(route, trip, origin_id, destination_id, fare_fn) do
route
|> get_fares(trip, origin_id, destination_id, fare_fn, :any)
|> List.first()
end
@spec get_fares(Route.t(), Trip.t() | nil, Stop.id_t(), Stop.id_t(), fare_fn()) :: [Fare.t()]
@spec get_fares(Route.t(), Trip.t() | nil, Stop.id_t(), Stop.id_t(), fare_fn(), Fare.reduced()) ::
[
Fare.t()
]
defp get_fares(route, trip, origin_id, destination_id, fare_fn, reduced \\ nil) do
route_filters =
route.type
|> Route.type_atom()
|> name_or_mode_filter(route, origin_id, destination_id, trip)
[reduced: reduced, duration: :month]
|> Keyword.merge(route_filters)
|> fare_fn.()
end
@spec name_or_mode_filter(atom(), Route.t(), Stop.id_t(), Stop.id_t(), Trip.t() | nil) ::
Keyword.t()
defp name_or_mode_filter(:subway, _route, _origin_id, _destination_id, _trip) do
[mode: :subway]
end
defp name_or_mode_filter(_, %{description: :rail_replacement_bus}, _, _, _) do
[name: :free_fare]
end
defp name_or_mode_filter(_, %{id: "CR-Foxboro"}, _, _, _) do
[name: :foxboro]
end
defp name_or_mode_filter(:bus, %{id: route_id}, origin_id, _destination_id, _trip) do
name =
cond do
Fares.express?(route_id) -> :express_bus
Fares.silver_line_airport_stop?(route_id, origin_id) -> :free_fare
Fares.silver_line_rapid_transit?(route_id) -> :subway
true -> :local_bus
end
[name: name]
end
defp name_or_mode_filter(:commuter_rail, _, origin_id, destination_id, trip) do
case Fares.fare_for_stops(:commuter_rail, origin_id, destination_id, trip) do
{:ok, name} ->
[name: name]
:error ->
[mode: :commuter_rail]
end
end
defp name_or_mode_filter(:ferry, _, origin_id, destination_id, _) do
[name: :ferry |> Fares.fare_for_stops(origin_id, destination_id) |> elem(1)]
end
end
|
apps/fares/lib/month.ex
| 0.742515
| 0.41938
|
month.ex
|
starcoder
|
defmodule Grizzly.ZWave.Commands.ConfigurationSet do
@moduledoc """
Set the configuration parameter
Params:
* `:size` - specifies the size of the configuration parameter
(required if not resetting to default)
* `:value` - the value of the parameter, can be set to `:default` to set
the parameter back to the factory default value (required)
* `:param_number` - the configuration parameter number to set (required)
## Size
The size of the parameter are the values `1`, `2`, and `4` which is the
number of bytes for the configuration parameter value. This should be
provided by the user manual of our device.
## Factory reset a param
If you want to factory reset a configuration parameter you can pass
`:default` as the `:value` param
"""
@behaviour Grizzly.ZWave.Command
alias Grizzly.ZWave.{Command, DecodeError}
alias Grizzly.ZWave.CommandClasses.Configuration
@type param ::
{:size, 1 | 2 | 4} | {:value, integer() | :default} | {:param_number, byte()}
@impl true
@spec new([param()]) :: {:ok, Command.t()}
def new(params) do
command = %Command{
name: :configuration_set,
command_byte: 0x04,
command_class: Configuration,
params: params,
impl: __MODULE__
}
{:ok, command}
end
@impl true
@spec encode_params(Command.t()) :: binary()
def encode_params(command) do
if Command.param!(command, :value) == :default do
encode_default(command)
else
encode_value_set(command)
end
end
@impl true
@spec decode_params(binary()) :: {:ok, [param()]} | {:error, DecodeError.t()}
def decode_params(<<param_number, 1::size(1), _rest::size(7), _>>) do
{:ok, [param_number: param_number, value: :default]}
end
def decode_params(<<param_number, _::size(5), size::size(3), value::binary>>) do
<<value_int::signed-integer-size(size)-unit(8)>> = value
{:ok, [param_number: param_number, value: value_int, size: size]}
end
defp encode_default(command) do
param_num = Command.param!(command, :param_number)
# 0x81 is the default flag with the size at 1 byte
# we provide a 0 value at the end
# According to the spec the value byte has to be part of the command but if
# the default flag is set this will be ignored
<<param_num, 0x81, 0x00>>
end
defp encode_value_set(command) do
param_num = Command.param!(command, :param_number)
size = Command.param!(command, :size)
value = Command.param!(command, :value)
value_bin = <<value::signed-integer-size(size)-unit(8)>>
<<param_num, size>> <> value_bin
end
end
|
lib/grizzly/zwave/commands/configuration_set.ex
| 0.872673
| 0.609379
|
configuration_set.ex
|
starcoder
|
defmodule Bonny.Server.Watcher do
@moduledoc """
Continuously watch a list `Operation` for `add`, `modify`, and `delete` events.
"""
@callback add(map()) :: :ok | :error
@callback modify(map()) :: :ok | :error
@callback delete(map()) :: :ok | :error
@doc """
[`K8s.Operation`](https://hexdocs.pm/k8s/K8s.Operation.html) to watch.
## Examples
Log all pod lifecycle events
```elixir
defmodule PodLifecycleLogger do
use Bonny.Server.Watcher
@impl true
def watch_operation() do
K8s.Client.list("v1", :pods, namespace: :all)
end
@impl true
def add(pod) do
log_event(:add, pod)
end
@impl true
def modify(pod) do
log_event(:modify, pod)
end
@impl true
def delete(pod) do
log_event(:delete, pod)
end
@spec log_event(atom, map) :: :ok
def log_event(type, pod) do
name = get_in(pod, ["metadata", "name"])
namespace = get_in(pod, ["metadata", "namepace"]) || "default"
# log type,name,namespace here
end
end
```
"""
@callback watch_operation() :: K8s.Operation.t()
alias Bonny.Server.Watcher.{State, ResourceVersion}
defmacro __using__(opts) do
quote bind_quoted: [opts: opts] do
@behaviour Bonny.Server.Watcher
use GenServer
@initial_delay opts[:initial_delay] || 500
@client opts[:client] || K8s.Client
alias Bonny.Server.Watcher.State
def start_link(), do: start_link([])
def start_link(opts) do
GenServer.start_link(__MODULE__, :ok, opts)
end
@doc false
@spec client() :: any()
def client(), do: @client
@impl GenServer
def init(:ok) do
Bonny.Sys.Event.watcher_initialized(%{}, %{module: __MODULE__})
Process.send_after(self(), :watch, @initial_delay)
{:ok, State.new()}
end
@impl GenServer
def handle_info(:watch, %State{} = state) do
rv =
case state.resource_version do
nil ->
resp = Bonny.Server.Watcher.ResourceVersion.get(watch_operation())
case resp do
{:ok, rv} ->
rv
{:error, _} ->
"0"
end
rv ->
rv
end
Bonny.Sys.Event.watcher_watch_started(%{}, %{module: __MODULE__})
Bonny.Server.Watcher.watch(__MODULE__, rv, self())
{:noreply, state}
end
@impl GenServer
def handle_info(%HTTPoison.AsyncHeaders{}, state), do: {:noreply, state}
@impl GenServer
def handle_info(%HTTPoison.AsyncStatus{code: 200}, state) do
Bonny.Sys.Event.watcher_watch_succeeded(%{}, %{module: __MODULE__})
{:noreply, state}
end
@impl GenServer
def handle_info(%HTTPoison.AsyncStatus{code: code}, state) do
Bonny.Sys.Event.watcher_watch_failed(%{}, %{module: __MODULE__, code: code})
{:stop, :normal, state}
end
@impl GenServer
def handle_info(%HTTPoison.AsyncChunk{chunk: chunk}, %State{resource_version: rv} = state) do
Bonny.Sys.Event.watcher_chunk_received(%{}, %{module: __MODULE__, rv: rv})
{lines, buffer} =
state.buffer
|> Bonny.Server.Watcher.ResponseBuffer.add_chunk(chunk)
|> Bonny.Server.Watcher.ResponseBuffer.get_lines()
case Bonny.Server.Watcher.process_lines(lines, rv, __MODULE__) do
{:ok, new_rv} ->
{:noreply, %State{state | buffer: buffer, resource_version: new_rv}}
{:error, :gone} ->
{:stop, :normal, state}
end
end
@impl GenServer
def handle_info(%HTTPoison.AsyncEnd{}, %State{} = state) do
Bonny.Sys.Event.watcher_watch_finished(%{}, %{module: __MODULE__})
send(self(), :watch)
{:noreply, state}
end
@impl GenServer
def handle_info(%HTTPoison.Error{reason: {:closed, :timeout}}, %State{} = state) do
Bonny.Sys.Event.watcher_watch_timedout(%{}, %{module: __MODULE__})
send(self(), :watch)
{:noreply, state}
end
@impl GenServer
def handle_info({:DOWN, _ref, :process, pid, reason}, %State{} = state) do
Bonny.Sys.Event.watcher_genserver_down(%{}, %{module: __MODULE__})
{:stop, :normal, state}
end
@impl GenServer
def handle_info(_other, %State{} = state) do
{:noreply, state}
end
end
end
@spec watch(module(), binary(), pid()) :: no_return
def watch(module, rv, pid) do
operation = module.watch_operation()
cluster = Bonny.Config.cluster_name()
timeout = 5 * 60 * 1000
client = module.client()
client.watch(operation, cluster,
params: %{resourceVersion: rv},
stream_to: pid,
recv_timeout: timeout
)
nil
end
@spec process_lines(list(binary()), binary(), module()) :: {:ok, binary} | {:error, :gone}
def process_lines(lines, rv, module) do
Enum.reduce(lines, {:ok, rv}, fn line, status ->
case status do
{:ok, current_rv} ->
process_line(line, current_rv, module)
{:error, :gone} ->
{:error, :gone}
end
end)
end
@spec process_line(binary(), binary(), module()) :: {:ok, binary} | {:error, :gone}
def process_line(line, current_rv, module) do
%{"type" => type, "object" => raw_object} = Jason.decode!(line)
case ResourceVersion.extract_rv(raw_object) do
{:gone, _message} ->
{:error, :gone}
^current_rv ->
{:ok, current_rv}
new_rv ->
dispatch(%{"type" => type, "object" => raw_object}, module)
{:ok, new_rv}
end
end
@doc """
Dispatches an `ADDED`, `MODIFIED`, and `DELETED` events to an controller
"""
@spec dispatch(map, atom) :: no_return
def dispatch(%{"type" => "ADDED", "object" => object}, controller),
do: do_dispatch(controller, :add, object)
def dispatch(%{"type" => "MODIFIED", "object" => object}, controller),
do: do_dispatch(controller, :modify, object)
def dispatch(%{"type" => "DELETED", "object" => object}, controller),
do: do_dispatch(controller, :delete, object)
@spec do_dispatch(atom, atom, map) :: no_return
defp do_dispatch(controller, event, object) do
Task.start(fn ->
apply(controller, event, [object])
end)
end
end
|
lib/bonny/server/watcher.ex
| 0.869548
| 0.609001
|
watcher.ex
|
starcoder
|
defmodule Search.Document do
@moduledoc """
Provides a behaviour and common functions for managing Elasticsearch Documents.
"""
# requires
require Logger
alias Ecto.{Query}
alias Search.Indices.BulkIndex
## Module Attributes
@empty_value "______"
@doc """
Hook to allow for manipulation of the data / record before serialization.
Result of the function gets passed to the serializer.
"""
@callback before_serialize(record :: struct(), related_data :: map | nil) :: struct()
@doc """
Construts the data needed to send to Elasticsearch.
"""
@callback build_payload(id :: non_neg_integer | String.t()) :: map
@callback build_payload(record :: struct(), related_data :: map | nil) :: map
@doc """
Convenience function to perform bulk indexing of the resource.
"""
@callback bulk_index :: BulkIndex.res()
@callback bulk_index(nil | BulkIndex.options()) :: BulkIndex.res()
@doc """
Called after building payload. Allows for modification / customization of payload before indexing.
"""
@callback customize_payload(resource :: map, record :: struct()) :: map
@doc """
Hook to allow the implementing module to customize the individual include before copying to the document root.
This can be helpful to denormalize parent data into child include to make authorization possible.
"""
@callback customize_root_include(related :: map, resource :: map) :: map
@doc """
Base query used to find records for serialzation for indexing.
Helpful to include associations necessary for serialization.
"""
@callback query(preload_associations? :: boolean) :: Query.t()
@doc """
Convenience function to define associations to load before building the document payload.
"""
@callback preload_associations() :: [] | [atom]
## Functions
@doc """
All the child modules using the Document behaviour.
"""
@spec document_modules() :: [module]
def document_modules do
with {:ok, list} <- :application.get_key(:search, :modules) do
list
|> Enum.filter(fn module ->
attrs = module.__info__(:attributes)
if __MODULE__ in List.wrap(attrs[:behaviour]) do
module
end
end)
end
end
@doc """
Convenience function for accessing the value used to repalce illegal values.
"""
@spec empty_value :: String.t()
def empty_value, do: @empty_value
def find_illegal_values(data) when is_map(data) do
data
|> Enum.filter(fn {_k, v} ->
v == nil || v == "" || v == [] || v == [""] || v == [nil]
end)
end
@spec log_msg(String.t()) :: no_return
def log_msg(msg) do
Logger.debug(fn ->
msg
end)
end
@spec replace_illegal_values(nil | String.t() | list | map) :: String.t() | list | map
def replace_illegal_values(nil), do: @empty_value
def replace_illegal_values(""), do: @empty_value
def replace_illegal_values([nil]), do: [@empty_value]
def replace_illegal_values([""]), do: [@empty_value]
def replace_illegal_values([]), do: [@empty_value]
def replace_illegal_values(pairs = [_head | _tail]) do
pairs
|> Enum.into(%{}, fn {k, v} ->
{k, replace_illegal_values(v)}
end)
end
def replace_illegal_values(resource = %{"data" => %{"attributes" => attributes}}) do
updated_attributes =
attributes
|> replace_illegal_values()
put_in(resource, ["data", "attributes"], updated_attributes)
end
def replace_illegal_values(data) when is_map(data) do
illegal = data |> find_illegal_values()
case illegal do
# nothing illegal was found. move along
[] ->
data
[_head | _tail] ->
legal_map =
illegal
|> replace_illegal_values()
Map.merge(data, legal_map)
end
end
# Macros
defmacro configure(config) do
define_configuration(config)
end
defmacro document(config) do
define_document(config)
end
defp define_configuration(config) do
index_name = config[:index_name]
repo = config[:repo]
schema_module = config[:schema_module]
serializer_module = config[:serializer_module]
type = config[:type]
versioned_index_name = config[:versioned_index_name]
quote location: :keep do
@doc """
Base index name.
"""
@spec index_name :: String.t()
def index_name, do: unquote(index_name)
@doc """
Convenience function to reference the Ecto Repo from which to find the records.
"""
@spec repo() :: Repo.t()
def repo, do: unquote(repo)
@doc """
The module that defines the Ecto.Schema
"""
def schema_module, do: unquote(schema_module)
@doc """
Convenience function to reference the module that handles serialization to JSONAPI
"""
@spec serializer_module() :: module
def serializer_module, do: unquote(serializer_module)
@doc """
Type name for the Elasticsearch document.
"""
@spec type :: String.t()
def type, do: unquote(type)
@doc """
Index name with the version prefixed.
"""
@spec versioned_index_name :: String.t()
def versioned_index_name, do: unquote(versioned_index_name)
end
end
def define_document(config) do
include = config[:include] || ""
root_included = config[:root_included] || []
quote location: :keep do
@doc """
Convenience function for accessing the value used to repalce illegal values.
"""
@spec empty_value :: String.t()
def empty_value, do: unquote(empty_value())
@doc """
Define what relationships to include in the JSONAPI document sent to Elastisearch.
Use a comma separated string of names: "some-resource,bar"
"""
@spec include :: String.t()
def include, do: unquote(include)
@doc """
Defines the JSONAPI types to extract out of the JSONAPI included property.
"""
@spec root_included :: list
def root_included, do: unquote(root_included)
end
end
defmacro __using__(_) do
quote location: :keep do
## Requires
require Ecto.Query
## Imports
import Destructure
import unquote(__MODULE__)
## Behaviours
@behaviour Search.Document
## Aliases
alias Elastix.Document
alias Search.Indices.BulkIndex
@doc """
Hook to allow for manipulation of the data / record before serialization.
Result of the function gets passed to the serializer.
"""
@impl true
def before_serialize(record, _related_data), do: record
@doc """
Builds a payload to be indexed.
"""
@impl true
def build_payload(id) when is_binary(id) or is_integer(id) do
record = repo().get(schema_module(), id)
build_payload(record, nil)
end
@impl true
def build_payload(record = %{__struct__: _}, related_data \\ nil) do
record = repo().preload(record, preload_associations())
record = before_serialize(record, related_data)
serializer_module()
|> JaSerializer.format(record, %{}, include: include())
|> customize_payload(record)
|> included_to_root()
|> replace_illegal_values()
end
@doc """
Convenience function to perform bulk indexing of the resource.
"""
@impl true
def bulk_index, do: bulk_index([])
@impl true
def bulk_index(opts) do
BulkIndex.perform(__MODULE__, opts)
end
@impl true
def customize_root_include(related, _resource), do: related
@doc """
Hook allows the implementing module to customize the payload before the data is sent to Elasticsearch
for indexing.
"""
@impl true
def customize_payload(resource, record), do: resource
@doc """
Delete a document from search.
"""
@spec delete(struct() | non_neg_integer | String.t()) :: {:ok | :error, %HTTPoison.Response{}}
def delete(%{__struct__: _, id: id}), do: delete(id)
def delete(id) when is_integer(id), do: delete(to_string(id))
def delete(id) when is_binary(id) do
Document.delete(Search.elasticsearch_url(), versioned_index_name(), type(), id)
end
@doc """
Fetches the document from search by the document id.
"""
@spec get(struct() | non_neg_integer | String.t()) :: {:ok | :error, %HTTPoison.Response{}}
def get(%{__struct__: _, id: id}), do: get(id)
def get(id) when is_binary(id) or is_integer(id) do
Document.get(Search.elasticsearch_url(), versioned_index_name(), type(), id)
end
@doc """
Indexes the document by sending the document to Elasticsearch.
"""
@spec index(record :: struct | id :: non_neg_integer | id :: String.t()) ::
{:ok | :error, %HTTPoison.Response{}}
def index(record = %{__struct__: _, id: id}) do
data = record |> build_payload()
index = versioned_index_name()
log_msg("Indexing #{index}, id: #{id}")
Document.index(Search.elasticsearch_url(), index, type(), id, data)
end
def index(id) when is_binary(id) or is_integer(id) do
record = repo().get(schema_module(), id)
index(record)
end
@doc """
Copies `included` data from the document, and nests the included data in the root of the document.
Allows for mapping and filtering by related include data by denormalizing / splitting the `type` into
it's own property so it can be mapped independently of all `includes`.
"""
@spec included_to_root(resource :: map) :: map
def included_to_root(resource) do
related = root_included()
nested_includes =
related
|> Enum.into(%{}, fn type ->
related =
resource
|> Map.get("included", [])
|> Enum.filter(fn include ->
Map.get(include, "type") == type
end)
|> Enum.map(fn rel ->
customize_root_include(rel, resource)
end)
{type, related}
end)
Map.merge(resource, nested_includes)
end
defoverridable before_serialize: 2, bulk_index: 1, customize_root_include: 2, customize_payload: 2
end
end
end
|
lib/elastic_jsonapi/document.ex
| 0.837254
| 0.455078
|
document.ex
|
starcoder
|
defmodule Phoenix.LiveController do
@moduledoc ~S"""
Controller-style abstraction for building multi-action live views on top of `Phoenix.LiveView`.
`Phoenix.LiveView` API differs from `Phoenix.Controller` API in order to emphasize stateful
lifecycle of live views, support long-lived processes behind them and accommodate their much
looser ties with the router. Contrary to HTTP requests that are rendered and discarded, live
actions are mounted and their processes stay alive to handle events & miscellaneous process
interactions and to re-render as many times as necessary. Because of these extra complexities, the
library drives developers towards single live view per router action.
At the same time, `Phoenix.LiveView` provides a complete solution for router-aware live navigation
and it introduces the concept of live actions both in routing and in the live socket. These
features mean that many live views may play a role similar to classic controllers.
It's all about efficient code organization - just like a complex live view's code may need to be
broken into multiple modules or live components, a bunch of simple live actions centered around
similar topic or resource may be best organized into a single live view module, keeping the
related web logic together and giving the room to share common code. That's where
`Phoenix.LiveController` comes in: to organize live view code that covers multiple live actions in
a fashion similar to how Phoenix controllers organize multiple HTTP actions. It provides a
pragmatic convention that still keeps pieces of a stateful picture visible by enforcing clear
function annotations.
Here's an exact live equivalent of an HTML controller generated with the `mix phx.gen.html Blog
Article articles ...` scaffold, powered by `Phoenix.LiveController`:
# lib/my_app_web.ex
defmodule MyAppWeb do
def live_controller do
quote do
use Phoenix.LiveController
alias MyAppWeb.Router.Helpers, as: Routes
end
end
end
# lib/my_app_web/router.ex
defmodule MyAppWeb.Router do
scope "/", MyAppWeb do
live "/articles", ArticleLive, :index
live "/articles/new", ArticleLive, :new
live "/articles/:id", ArticleLive, :show
live "/articles/:id/edit", ArticleLive, :edit
end
end
# lib/my_app_web/live/article_live.ex
defmodule MyAppWeb.ArticleLive do
use MyAppWeb, :live_controller
alias MyApp.Blog
alias MyApp.Blog.Article
@action_handler true
def index(socket, _params) do
articles = Blog.list_articles()
assign(socket, articles: articles)
end
@action_handler true
def new(socket, _params) do
changeset = Blog.change_article(%Article{})
assign(socket, changeset: changeset)
end
@event_handler true
def create(socket, %{"article" => article_params}) do
case Blog.create_article(article_params) do
{:ok, article} ->
socket
|> put_flash(:info, "Article created successfully.")
|> push_redirect(to: Routes.article_path(socket, :show, article))
{:error, %Ecto.Changeset{} = changeset} ->
assign(socket, changeset: changeset)
end
end
@action_handler true
def show(socket, %{"id" => id}) do
article = Blog.get_article!(id)
assign(socket, article: article)
end
@action_handler true
def edit(socket, %{"id" => id}) do
article = Blog.get_article!(id)
changeset = Blog.change_article(article)
assign(socket, article: article, changeset: changeset)
end
@event_handler true
def update(socket, %{"article" => article_params}) do
article = socket.assigns.article
case Blog.update_article(article, article_params) do
{:ok, article} ->
socket
|> put_flash(:info, "Article updated successfully.")
|> push_redirect(to: Routes.article_path(socket, :show, article))
{:error, %Ecto.Changeset{} = changeset} ->
assign(socket, article: article, changeset: changeset)
end
end
@event_handler true
def delete(socket, %{"id" => id}) do
article = Blog.get_article!(id)
{:ok, _article} = Blog.delete_article(article)
socket
|> put_flash(:info, "Article deleted successfully.")
|> push_redirect(to: Routes.article_path(socket, :index))
end
end
`Phoenix.LiveController` is not meant to be a replacement of `Phoenix.LiveView` - although most
live views may be represented with it, it will likely prove beneficial only for specific kinds of
live views. These include live views with following traits:
* Orientation around same resource, e.g. web code for specific context like in `mix phx.gen.html`
* Mounting or event handling code that's mostly action-specific
* Param handling code that's action-specific and prevails over global mounting code
* Common redirecting logic executed before mounting or event handling, e.g. auth logic
## Mounting actions
*Action handlers* replace `c:Phoenix.LiveView.mount/3` entry point in order to split mounting of
specific live actions into separate functions. They are annotated with `@action_handler true` and,
just like with Phoenix controller actions, their name is the name of the action they mount.
# lib/my_app_web/router.ex
defmodule MyAppWeb.Router do
scope "/", MyAppWeb do
live "/articles", ArticleLive, :index
live "/articles/:id", ArticleLive, :show
end
end
# lib/my_app_web/live/article_live.ex
defmodule MyAppWeb.ArticleLive do
use MyAppWeb, :live_controller
@action_handler true
def index(socket, _params) do
articles = Blog.list_articles()
assign(socket, articles: articles)
end
@action_handler true
def show(socket, %{"id" => id}) do
article = Blog.get_article!(id)
assign(socket, article: article)
end
end
Note that action handlers don't have to wrap the resulting socket in a tuple, which also brings
them closer to Phoenix controller actions.
## Handling events
*Event handlers* replace `c:Phoenix.LiveView.handle_event/3` callbacks in order to make the event
handling code consistent with the action handling code. These functions are annotated with
`@event_handler true` and their name is the name of the event they handle.
# lib/my_app_web/templates/article/*.html.leex
<%= link "Delete", to: "#", phx_click: :delete, phx_value_id: article.id, data: [confirm: "Are you sure?"] %>
# lib/my_app_web/live/article_live.ex
defmodule MyAppWeb.ArticleLive do
use MyAppWeb, :live_controller
@event_handler true
def delete(socket, %{"id" => id}) do
article = Blog.get_article!(id)
{:ok, _article} = Blog.delete_article(article)
socket
|> put_flash(:info, "Article deleted successfully.")
|> push_redirect(to: Routes.article_path(socket, :index))
end
end
Live views may also reply to client-side events, providing the reply payload. While regular live
views do this by returning the `{:reply, payload, socket}` tuple, live controllers may also use
the `reply/2` helper function to achieve the same result.
Note that, consistently with action handlers, event handlers don't have to wrap the resulting
socket in `{:noreply, socket}` or `{:reply, payload, socket}` tuple.
Also note that, as a security measure, LiveController won't convert binary names of events that
don't have corresponding event handlers into atoms that wouldn't be garbage collected.
## Handling process messages
*Message handlers* offer an alternative (but not a replacement) to
`c:Phoenix.LiveView.handle_info/2` for handling process messages in a fashion consistent with
action and event handlers. These functions are annotated with `@message_handler true` and their
name equals to a "label" atom extracted from the supported message payload:
- for atom payloads: that atom (e.g. `:refresh_article`)
- for tuple payloads: an atom placed as first element in a tuple (e.g. `{:article_update, ...}`)
defmodule MyAppWeb.ArticleLive do
use MyAppWeb, :live_controller
@action_handler true
def show(socket, %{"id" => id}) do
:timer.send_interval(5_000, self(), :refresh_article)
assign(socket, article: Blog.get_article!(id))
end
@message_handler true
def refresh_article(socket, _message) do
assign(socket, article: Blog.get_article!(socket.assigns.article.id))
end
end
Support for handling messages wrapped in tuples allows to incorporate `Phoenix.PubSub` in
live controllers in effortless and consistent way.
defmodule MyAppWeb.ArticleLive do
use MyAppWeb, :live_controller
alias Phoenix.PubSub
@action_handler true
def show(socket, %{"id" => id}) do
article = Blog.get_article!(id)
PubSub.subscribe(MyApp.PubSub, "article:#{article.id}")
assign(socket, article: Blog.get_article!(id))
end
@message_handler true
def article_update(socket, {_, article}) do
assign(socket, article: article)
end
@event_handler true
def update(socket = %{assigns: %{article: article}}, %{"article" => article_params}) do
article = socket.assigns.article
case Blog.update_article(article, article_params) do
{:ok, article} ->
PubSub.broadcast(MyApp.PubSub, "article:#{article.id}", {:article_update, article})
socket
|> put_flash(:info, "Article updated successfully.")
|> push_redirect(to: Routes.article_path(socket, :show, article))
{:error, %Ecto.Changeset{} = changeset} ->
assign(socket, article: article, changeset: changeset)
end
end
For messages that can't be handled by message handlers, a specific implementation of
`c:Phoenix.LiveView.handle_info/3` may still be provided.
Note that, consistently with action & event handlers, message handlers don't have to wrap the
resulting socket in the `{:noreply, socket}` tuple.
## Updating params without redirect
For live views that [implement parameter
patching](https://hexdocs.pm/phoenix_live_view/Phoenix.LiveView.html#module-live-navigation) (e.g.
to avoid re-mounting the live view & resetting its DOM or state), action handlers also replace
`c:Phoenix.LiveView.handle_params/3` callbacks. The same action handler is called once when
mounting and then it's called again whenever params are patched.
This means that parameter patching is supported out-of-the-box for action handlers that work just
as fine for initial mount as for subsequent parameter changes.
# lib/my_app_web/templates/article/index.html.leex
<%= live_patch "Page 2", to: Routes.article_path(@socket, :index, page: "2") %>
# lib/my_app_web/live/article_live.ex
defmodule MyAppWeb.ArticleLive do
use MyAppWeb, :live_controller
@action_handler true
def index(socket, params) do
articles = Blog.list_articles(page: params["page"])
assign(socket, articles: articles)
end
end
Using the `mounted?/1` helper, action handlers may conditionally invoke parts of their logic
depending on whether socket was already mounted, e.g. to initiate timers or run expensive loads
that don't depend on params only upon the first mount.
defmodule MyAppWeb.ArticleLive do
use MyAppWeb, :live_controller
@action_handler true
def index(socket, params) do
if connected?(socket) && !mounted?(socket),
do: :timer.send_interval(5_000, self(), :check_for_new_articles)
socket = unless mounted?(socket),
do: assign(socket, tags: Blog.list_tags()),
else: socket
articles = Blog.list_articles(page: params["page"])
assign(socket, articles: articles)
end
end
Note that an action handler will only be called once when mounting, even though native LiveView
calls both `mount/3` and `handle_params/3` at that moment.
## Chaining & plugs
Phoenix controllers are [backed by the power of Plug
pipelines](https://hexdocs.pm/phoenix/Phoenix.Controller.html#module-plug-pipeline) in order to
organize common code called before actions and to allow halting early. LiveController provides
similar solution for these problems via `plug/1` macro supported by the `chain/2`
helper function.
`plug/1` allows to define callbacks that are called in a chain in order to act on a socket before
an actual action, event or message handler is called:
defmodule MyAppWeb.ArticleLive do
use MyAppWeb, :live_controller
plug :require_authenticated_user
defp require_authenticated_user(socket = %{assigns: %{current_user: user}}) do
if user do
socket
else
socket
|> put_flash(:error, "You must log in first.")
|> push_redirect(to: "/")
end
end
end
There are multiple ways to specify plug callback:
- `plug :require_authenticated_user` - calls local function with `socket` argument
- `plug LiveUserAuth` - calls external module's `call` function with `socket` argument
- `plug {LiveUserAuth, :require_authenticated_user}` - calls external function with `socket` argument
- `plug require_authenticated_user(...args)` - calls local function with arbitrary args
- `plug LiveUserAuth.require_authenticated_user(...args)` - calls external function with arbitrary args
> **Note**: `Phoenix.LiveController.Plug` behaviour is available for defining module plugs that
> are expected to expose a single `call(socket)` plug function (second case above).
It's possible to scope given plug to only a subset of handlers with the `when` condition.
defmodule MyAppWeb.ArticleLive do
use MyAppWeb, :live_controller
plug :require_authenticated_user when action not in [:index, :show]
end
Following variables may be referenced when specifying arbitrary args or the `when` condition:
* `socket` - current LiveView socket (`Phoenix.LiveView.Socket` struct)
* `name` - handler name (atom)
* `action` - action handler name (atom or `nil`)
* `event` - event handler name (atom or `nil`)
* `params` - action or event params (map or `nil`)
* `message` - message payload (atom/tuple or `nil`)
All plug forms may be freely mixed with the `when` conditions.
defmodule MyAppWeb.ArticleLive do
use MyAppWeb, :live_controller
plug require_user_role(socket, :admin)
plug fetch_own_article(socket, params) when action in [:edit] or event in [:update, :delete]
defp require_user_role(socket = %{assigns: %{current_user: user}}, required_role) do
if user.role == required_role do
socket
else
socket
|> put_flash(:error, "You must be #{required_role} in order to continue.")
|> push_redirect(to: "/")
end
end
defp fetch_own_article(
socket = %{assigns: %{current_user: %{id: user_id}}},
%{"id" => article_id}
) do
case Blog.get_article!(id) do
article = %{author_id: ^user_id} ->
assign(socket, :article, article)
_ ->
socket
|> put_flash(:error, "You can't modify someone else's article.")
|> push_redirect(to: "/")
end
end
end
> **Pro tip**: Condition in `when` is not really a guard and it's evaluated in runtime, therefore
> it's possible to call any function within it. This makes it easy, for example, to only call a
> plug upon mounting and/or only when socket is connected:
>
> ```
> plug fetch_article(socket, params) when not mounted?(socket)
> plug start_counter(socket) when connected?(socket) and not mounted?(socket)
> ```
If multiple plugs are defined, they'll be called in a chain. If any of them redirects the socket,
replies to event by calling `reply/2` or returns a tuple instead of just socket then the chain
will be halted, which will also prevent action, event or message handler from being called.
This is guaranteed by internal use of the `chain/2` function. This simple helper calls any
function that takes socket as argument & that returns it only if the socket wasn't previously
redirected, marked for reply or wrapped in a tuple and passes the socket through otherwise. It may
also be used inside a plug or handler code for a similar result:
defmodule MyAppWeb.ArticleLive do
use MyAppWeb, :live_controller
@action_handler true
def edit(socket, %{"id" => id}) do
socket
|> require_authenticated_user()
|> chain(&assign(&1, article: Blog.get_article!(id)))
|> chain(&authorize_article_author(&1, &1.assigns.article))
|> chain(&assign(&1, changeset: Blog.change_article(&.assigns.article)))
end
end
After all plugs are called without halting the chain, `c:action_handler/3`, `c:event_handler/3`
and `c:message_handler/3` - rough equivalents of
[`action/2`](https://hexdocs.pm/phoenix/Phoenix.Controller.html#module-overriding-action-2-for-custom-arguments)
plug in Phoenix controllers - complete the pipeline by calling functions named after specific
actions, events or messages.
## Applying session
Session, previously passed to `c:Phoenix.LiveView.mount/3`, may now be accessed via the
`get_session/1` and `get_session/2` helpers in plugs and handlers.
defmodule MyAppWeb.LiveUserAuth do
def fetch_current_user(socket) do
user_token = get_session[socket, :user_token)
user = user_token && Accounts.get_user_by_session_token(user_token)
assign(socket, current_user: user)
end
end
Combined with plugs, this allows to easily implement app-wide session handlers.
defmodule MyAppWeb do
def live_controller do
quote do
use Phoenix.LiveController
# ...
plug {MyAppWeb.LiveUserAuth, :fetch_current_user}
end
end
end
## Accessing current URL
Handlers and plugs may access the current live URL (previously passed by LiveView to
`c:Phoenix.LiveView.handle_params/3` callback) via the `get_current_url/1` helper function.
## Specifying mount options
Mount options, such as `temporary_assigns`, may be specified for every action using the
`@action_mount_opts` annotation.
defmodule MyAppWeb.ArticleLive do
use MyAppWeb, :live_controller
@action_handler true
@action_mount_opts temporary_assigns: [articles: []]
def index(socket, _params) do
articles = Blog.list_articles()
assign(socket, articles: articles)
end
## Specifying `use Phoenix.LiveView` options
Any options that were previously passed to `use Phoenix.LiveView`, such as `:layout` or
`:container`, may now be passed to `use Phoenix.LiveController`.
use Phoenix.LiveController, layout: {MyAppWeb.LayoutView, "live.html"}
## Rendering actions
Implementation of the `c:Phoenix.LiveView.render/1` callback may be omitted in which case the
default implementation will be injected. It'll ask the view module named after specific live
module to render HTML template named after the action - the same way that Phoenix controllers do
when the `Phoenix.Controller.render/2` is called without a template name.
For example, `MyAppWeb.ArticleLive` mounted with `:index` action will render with following call:
MyAppWeb.ArticleView.render("index.html", assigns)
Custom `c:Phoenix.LiveView.render/1` implementation may still be provided if necessary.
"""
alias Phoenix.LiveView.Socket
alias __MODULE__.{ControllerState}
@doc ~S"""
Invokes action handler for specific action.
It can be overridden, e.g. in order to modify the list of arguments passed to action handlers.
@impl true
def action_handler(socket, name, params) do
apply(__MODULE__, name, [socket, params, socket.assigns.current_user])
end
It can be wrapped, e.g. for sake of logging or modifying the socket returned from action handlers.
@impl true
def action_handler(socket, name, params) do
Logger.debug("#{__MODULE__} started handling #{name}")
socket = super(socket, name, params)
Logger.debug("#{__MODULE__} finished handling #{name}")
socket
end
Read more about the role that this callback plays in the live controller pipeline in docs for
`Phoenix.LiveController`.
"""
@callback action_handler(
socket :: Socket.t(),
name :: atom,
params :: Socket.unsigned_params()
) :: Socket.t() | {:noreply, Socket.t()}
@doc ~S"""
Invokes event handler for specific event.
It works in a analogous way and opens analogous possibilities to `c:action_handler/3`.
Read more about the role that this callback plays in the live controller pipeline in docs for
`Phoenix.LiveController`.
"""
@callback event_handler(
socket :: Socket.t(),
name :: atom,
params :: Socket.unsigned_params()
) :: Socket.t() | {:noreply, Socket.t()} | {:reply, term(), Socket.t()}
@doc ~S"""
Invokes message handler for specific message.
It works in a analogous way and opens analogous possibilities to `c:action_handler/3`.
Read more about the role that this callback plays in the live controller pipeline in docs for
`Phoenix.LiveController`.
"""
@callback message_handler(
socket :: Socket.t(),
name :: atom,
message :: any
) :: Socket.t() | {:noreply, Socket.t()}
@optional_callbacks action_handler: 3,
event_handler: 3,
message_handler: 3
defmacro __using__(opts) do
quote do
use Phoenix.LiveView, unquote(opts)
@behaviour unquote(__MODULE__)
Module.register_attribute(__MODULE__, :actions, accumulate: true)
Module.register_attribute(__MODULE__, :actions_mount_opts, accumulate: true)
Module.register_attribute(__MODULE__, :events, accumulate: true)
Module.register_attribute(__MODULE__, :messages, accumulate: true)
Module.register_attribute(__MODULE__, :plugs, accumulate: true)
@on_definition unquote(__MODULE__)
@before_compile unquote(__MODULE__)
import unquote(__MODULE__)
# Implementations of Phoenix.LiveView callbacks
def mount(params, session, socket),
do:
Phoenix.LiveController.LiveViewCallbacks.mount(
__MODULE__,
params,
session,
socket
)
def handle_params(params, url, socket),
do:
Phoenix.LiveController.LiveViewCallbacks.handle_params(
__MODULE__,
&__live_controller_before__(&1, :action, &2, &3),
params,
url,
socket
)
def handle_event(event_string, params, socket),
do:
Phoenix.LiveController.LiveViewCallbacks.handle_event(
__MODULE__,
&__live_controller_before__(&1, :event, &2, &3),
event_string,
params,
socket
)
def render(assigns = %{live_action: action}),
do: view_module().render("#{action}.html", assigns)
# Default implementations of Phoenix.LiveController callbacks
def action_handler(socket, name, params),
do: apply(__MODULE__, name, [socket, params])
def event_handler(socket, name, params),
do: apply(__MODULE__, name, [socket, params])
def message_handler(socket, name, message),
do: apply(__MODULE__, name, [socket, message])
def view_module,
do:
__MODULE__
|> to_string()
|> String.replace(~r/(Live|LiveController|Controller)?$/, "View", global: false)
|> String.to_existing_atom()
defoverridable action_handler: 3,
event_handler: 3,
message_handler: 3,
render: 1,
view_module: 0
end
end
defmacro __before_compile__(env) do
quote do
Module.delete_attribute(__MODULE__, :action_handler)
Module.delete_attribute(__MODULE__, :event_handler)
Module.delete_attribute(__MODULE__, :message_handler)
@doc false
def __live_controller__(:actions), do: @actions
def __live_controller__(:action_mount_opts), do: @actions_mount_opts
def __live_controller__(:events), do: @events
def __live_controller__(:messages), do: @messages
# Catch-all inserted late in order to allow misc clauses to match before it
def handle_info(message, socket),
do:
Phoenix.LiveController.LiveViewCallbacks.handle_message(
__MODULE__,
&__live_controller_before__(&1, :message, &2, &3),
message,
socket
)
unquote(Phoenix.LiveController.PlugChain.build_before(env.module))
end
end
def __on_definition__(env, _kind, name, _args, _guards, _body) do
action = pull_handler_attribute(env.module, :action_handler, :actions, name)
pull_handler_attribute(env.module, :event_handler, :events, name)
pull_handler_attribute(env.module, :message_handler, :messages, name)
if action do
action_mount_opts = Module.delete_attribute(env.module, :action_mount_opts)
actions_mount_opts = Module.get_attribute(env.module, :actions_mount_opts)
added = Enum.any?(actions_mount_opts, fn {for_action, _} -> for_action == action end)
unless added do
Module.put_attribute(env.module, :actions_mount_opts, {action, action_mount_opts})
end
end
end
defp pull_handler_attribute(module, source_attr, target_attr, name) do
with true <- Module.delete_attribute(module, source_attr),
current_names = Module.get_attribute(module, target_attr),
false <- Enum.member?(current_names, name) do
Module.put_attribute(module, target_attr, name)
name
else
_ -> nil
end
end
@doc """
Define a callback that acts on a socket before action, event or essage handler.
Read more about the role that this macro plays in the live controller pipeline in docs for
`Phoenix.LiveController`.
"""
defmacro plug(target) do
{target, conditions} = extract_when(target)
{target_mod, target_fun, args} =
case target do
atom when is_atom(atom) -> {nil, atom, nil}
ast = {:__aliases__, _meta, _parts} -> {Macro.expand(ast, __CALLER__), :call, nil}
{ast = {:__aliases__, _meta, _parts}, fun} -> {Macro.expand(ast, __CALLER__), fun, nil}
{fun, _meta, args} -> {nil, fun, args}
end
plug = {__CALLER__, args, conditions, target_mod, target_fun}
quote do
@plugs unquote(Macro.escape(plug))
end
end
defp extract_when({:when, _, [left, when_conditions]}), do: {left, when_conditions}
defp extract_when(other), do: {other, true}
@doc ~S"""
Calls given function if socket wasn't redirected, marked for reply or wrapped in a tuple.
Read more about the role that this function plays in the live controller pipeline in docs for
`Phoenix.LiveController`.
"""
@spec chain(
socket :: Socket.t() | {:noreply, Socket.t()} | {:reply, term(), Socket.t()},
func :: function
) :: Socket.t()
def chain(socket = %{redirected: nil, controller: %{reply_payload: nil}}, func),
do: func.(socket)
def chain(redirected_or_wrapped_or_with_reply, _func), do: redirected_or_wrapped_or_with_reply
@doc ~S"""
Returns true if the socket was previously mounted by action handler.
Read more about the role that this function plays when implementing action handlers in docs for
`Phoenix.LiveController`.
"""
@spec mounted?(socket :: Socket.t()) :: boolean()
def mounted?(%{__struct__: Socket, controller: %ControllerState{mounted?: mounted}}),
do: mounted
@doc ~S"""
Returns the mounted live controller's URL with query params.
"""
@spec get_current_url(socket :: Socket.t()) :: String.t()
def get_current_url(%{__struct__: Socket, controller: %ControllerState{url: url}}), do: url
@doc ~S"""
Returns the whole session.
Although `get_session/2` allows atom keys, they are always normalized to strings. So this function
always returns a map with string keys.
"""
@spec get_session(socket :: Socket.t()) :: map
def get_session(%{__struct__: Socket, controller: %ControllerState{session: session}}),
do: session
@doc ~S"""
Returns session value for the given key. If key is not set, nil is returned.
The key can be a string or an atom, where atoms are automatically converted to strings.
"""
@spec get_session(socket :: Socket.t(), String.t() | atom()) :: any()
def get_session(socket, key), do: get_session(socket) |> Map.get(to_string(key))
@doc ~S"""
Attaches payload to the socket that will be passed to client-side as the event reply.
"""
@spec reply(socket :: Socket.t(), payload :: term()) :: Socket.t()
def reply(
socket = %{__struct__: Socket, controller: %ControllerState{} = controller_state},
payload
) do
Map.put(socket, :controller, Map.put(controller_state, :reply_payload, payload))
end
end
|
lib/phoenix_live_controller.ex
| 0.834845
| 0.464051
|
phoenix_live_controller.ex
|
starcoder
|
defmodule MixTemplates do
@moduledoc ~S"""
> NOTE: This documentation is intended for folks who want to write
> their own templates. If you just want to use a template, then
> have a look at the README, or try `mix help template` and
> `mix help gen`.
This is the engine that supports templated directory trees.
A template is a trivial mix project that acts as the specification for
the projects you want your users to be able to generate. It contains a
single source file in `lib` that contains metadata and option parsing.
It also contains a top-level directory called `template`. The
directories and files underneath `template/` copied to the destination
location.
The copying function takes a map containing key-value pairs. This is
passed to EEx, which is used to expand each individual file. Thus a
template file for `mix.exs` may contain:
~~~elixir
defmodule <%= @project_name_camel_case %>.Mixfile do
use Mix.Project
@name :<%= @project_name %>
@version "0.1.0"
. . .
~~~
The `<%= ... %>` constructs are expanded using the passed-in map.
In addition, the template looks for the string `$PROJECT_NAME\$` in the
_names_ of files and directories. It replaces each occurrence with the
name of the project, taken from `assigns.project_name`.
Thus the directory structure for a standard Elixir project might be:
template
├── $PROJECT_NAME$
│ ├── README.md
│ ├── config
│ │ └── config.exs
│ ├── lib
│ │ └── $PROJECT_NAME$.ex
│ ├── mix.exs
│ └── test
│ ├── $PROJECT_NAME$_test.exs
│ └── test_helper.exs
└── templates_project.ex
## Write a Template
Make sure you have the underlying tools installed:
$ mix archive.install hex mix_templates
$ mix archive.install hex mix_generator
Then install the template for templates (yup :).
$ mix template.install hex gen_template_template
Now create your template project:
$ mix gen template my_template
Wander into the directory that is created:
$ cd my_template/
$ tree
.
├── README.md
├── lib
│ └── my_template.ex
├── mix.exs
└── template
└── $PROJECT_NAME$
└── your_project_tree_goes_here
#### Add a Description
Your first job is to update the metadata in lib/«whatever».ex:
defmodule MyTemplate do
@moduledoc File.read!(Path.join([__DIR__, "../README.md"]))
use MixTemplates,
name: :my_template,
short_desc: "Template for ....",
source_dir: "../template",
based_on: :another_project,
options: [ command line options unique to this template ]
end
For a simple template, the only change you're likely to make to the
metadata is to update the short description. This is used to display
information about the template when you list the templates you have
installed, so you probably want to keep it under 70 characters.
If you want to write a template that is based on another, use the
`:based_on` option. This causes the parent template to be processed
before your local template. This means your template need only implement
the changes to the base.
#### Add the Files
The job of your template is to contain a directory tree that mirrors the
tree you want your users to produce locally when they run `mix gen`.
* The easiest way to start is with an existing project that uses the
same layout. Copy it into your template under
`template/$PROJECT_NAME$`.
* Remove any files that aren't part of every project.
* Look for files and directories whose names include the name of the
project. Rename these, replacing the project name with the string
$PROJECT_NAME$. For example, if you're following the normal
convention for test files, you'll have a file called
test/myapp_test.exs
Rename this file to
test/$PROJECT_NAME$.exs
* Now you need to look through the files for content that should be
customized to each new project that's generated. Replace this
content using EEx substitutions:
For example, the top-level application might be an Elixir file:
defmodule MyApp do
# . . .
end
Replace this with
defmodule <%= project_name_camel_case %> do
# . . .
end
There's a list of the available values in the next section.
### Test Your Template
You can use `mix gen` to test your template while you're developing
it. Simply give it the path to the directory containing the generator
(the top level, with `mix.exs` in it). This path must start with a dot
(".") or slash ("/").
$ mix gen ../work/my_generator test_project
### Publish Your Template
Wander back to the `mix.exs` file at the top of your project, and
update the `@description`, `@maintainers`, and `@github` attributes.
Then publish to hex:
$ mix hex.publish
and wait for the praise.
## Standard Substitutions
The following values are available inside EEx substitutions in
templates. (Remember that the inside of a `<%= ...%>` is just Elixir
code, so you aren't limited to this list. The next section describes
how you can extend this set even further in your own templates.)
#### Project Information
Assuming the template was invoked with a project name of my_app:
@project_name my_app
@project_name_camel_case MyApp
#### Date and Time
These examples are from my computer in US Central Daylight Time
(GMT-5)
@now.utc.date "2017-04-11"
@now.utc.time "00:49:37.505034"
@now.utc.datetime "2017-04-11T00:49:37.505034Z"
@now.local.date "2017-04-10"
@now.local.time "19:49:37"
@now.local.datetime "2017-04-10 19:49:37"
#### The Environment
@host_os "os-name" or "os-name (variant)" eg: "unix (darwin)"
@original_args the original args passed to mix
@elixir_version eg: "1.5.3"
@erlang_version eg: "8.2"
@otp_release eg: "19"
@in_umbrella? true if we're in the apps_path directory of an
umbrella project
#### Stuff About the Template
@template_module the module containing your template metadata
@template_name the name of the template (from the metadata)
@target_dir the project directory is created in this
@target_subdir the project directory is called this
### Handling Command Line Parameters
You may need to configure the output of your template depending on
the options specified on the command line. For example, the standard
`project` template lets you generate basic and supervised apps. To
indicate you want the latter, you add a command line flag:
$ mix gen project my_app --supervised
This option is not handled by the `gen` task. Instead, it passes it to
your template module (the file in your top-level `lib/`). You can
receive the parameters by defining a callback
~~~ elixir
defmodule MyTemplate do
@moduledoc File.read!(Path.join([__DIR__, "../README.md"]))
use MixTemplates,
name: :my_template,
short_desc: "Template for ....",
source_dir: "../template"
options: [
supervised: [ to: :is_supervised?, default: false ],
sup: [ same_as: :supervised ],
]
end
~~~
`options` is a specification of the command line parameters that your
template accepts. In all cases, the key is the parameter as it appears
on the command line, and the keyword list that is the value gives
information about that option.
The simplest option is
~~~ elixir
name: []
~~~
This says that `--name` is a valid option. If you add it to the
command line with no value following it, then `:name` will appear in
the assigns with the value `true`. It you pass in a value, then that
value will appear in the assigns.project_name
If you do not specify `--name` on the command line, there will be no
entry with the key `:name` in the assigns.
If your option takes an argument, you specify its name using `takes:`.
~~~ elixir
name: [ takes: "your-name" ]
~~~
The `required` key says that a given parameter _must_ appear on the command line.
~~~ elixir
name: [
takes: "your-name",
required: true
]
~~~
`default` provides a value to use if the parameter does not appear on
the command line:
~~~ elixir
name: [
takes: "your-name",
default: "nancy"
]
~~~
If a default value is given, the entry will _always_ appear in the
assigns.project_name
By default the name of the field in the assigns will be the key in the
options list. You can override this using `to`.
~~~ elixir
name: [
takes: "your-name",
to: :basic_id,
default: "nancy"
]
~~~
In this example, calling
$ mix gen my_template my_app --name walter
will create an assigns map that includes `\@basic_id` with a value of “walter.”
Finally, you can alias a option using `same_as`.
The following will allow both `--sup` and `--supervised` on the
command line, and will map either to the key `:is_supervised?` in the
assigns.
~~~ elixir
options: [
supervised: [ to: :is_supervised?, default: false ],
sup: [ same_as: :supervised ],
]
~~~
### Dealing with optional files and directories
Sometimes you need to include a file or directory only if some condition
is true. Use these helpers:
* `MixTemplates.ignore_file_and_directory_unless(«condition»)`
Include this in a template, and the template and it's immediate directory
will not be generated in the output unless the condition is true.
For example, in a new mix project, we only generate
`lib/«name»/application.ex` if we're creating a supervised app. The
`application.ex` template includes the following:
~~~ elixir
<%
# ------------------------------------------------------------
MixTemplates.ignore_file_and_directory_unless \@is_supervisor?
# ------------------------------------------------------------
%>
defmodule <%= \@project_name_camel_case %>.Application do
# ...
end
~~~
Sometimes you just need to skip a single file if some condition is true. Use this helper:
* `MixTemplates.ignore_file_unless(«condition»)`
### Cleaning Up
In most cases your work is done once the template is copied into the
project. There are times, however, where you may want to do some
manual adjustments at the end. For that, add a `clean_up/1` function
to your template module.
~~~ elixir
def clean_up(assigns) do
# ...
end
~~~
The cleanup function is invoked in the directory where the project is
created (and not inside the project itself). Thus if you invoke
mix gen my_template chat_server
in the directory `/Projects` (which will create
`/Projects/chat_server`), the `clean_up` function's cwd will be
`/Projects`.
### Deriving from Another Template
Sometimes you want to create a template which is similar to another. Perhaps
some files' contents are different, new files are added or others taken away.
Use the `based_on: «template»` option to facilitate this:
~~~ elixir
defmodule MyTemplate do
\@moduledoc File.read!(Path.join([__DIR__, "../README.md"]))
use MixTemplates,
name: :my_template,
short_desc: "Template for ....",
source_dir: "../template",
based_on: :project
def populate_assigns(assigns, options) do
# ...
end
end
~~~
The value of `based_on` is the name or the path to a template.
When people create a project based on your template, the generator
will run twice. The first time, it creates the `based_on` project. It
then runs again with your template. Any files or directories in your
template will overwrite the corresponding files in the based-on
template.
It isn't necessary to have a full tree under the `template` directory
in your template. Just populate the parts you want to override in the
base template.
If you want to remove files generated by the base template, you can
add code to the `clean_up/1` hook. Remember that the cleanup hook is
invoked in the directory that contains the target project, so you'll
need to descend down into the project itself. Obviously, this is
something you'll want to test carefully before releasing :)
~~~ elixir
def clean_up(assigns) do
Path.join([assigns.target_subdir, "lib", "#{assigns.project_name}.ex"]))
|> File.rm
end
~~~
"""
use Private
alias Mix.Generator, as: MG
alias MixTemplates.Cache
defmacro __using__(opts) do
name = mandatory_option(opts[:name],
"template must include\n\n\tname: \"template_name\"\n\n")
override_source_dir = Keyword.get(opts, :source_dir)
quote do
@doc """
Return the name of this template as an atom. This is
the name passed to the gen command.
"""
def name do
unquote(name)
end
@doc """
Return the short description of this template, or nil.
"""
def short_desc do
unquote(opts[:short_desc])
end
@doc """
Return the absolute path to the tree that is to be copied when
instantiating this template. This top-level dir will typically
just contain a directory called `$APP_NAME$`.
"""
def source_dir do
cond do
unquote(override_source_dir) ->
Path.absname(unquote(override_source_dir), __DIR__)
true ->
__DIR__
end
|> Path.join("$PROJECT_NAME$")
end
@doc """
Return the name or path of a template that this template is
based upon. That template will be processed first, and then
this one will be executed.
"""
def based_on do
unquote(opts[:based_on])
end
@doc """
Return the list of options supported by this template.
"""
def options do
unquote(opts[:options] || [])
end
@doc """
Override this function to do any cleanup after your template
has been copied into the user project. One use of this is to remove
unwanted files created by a template upon which this template
is based.
"""
def clean_up(_assigns) do
nil
end
defoverridable clean_up: 1
end
end
def find(name)
when is_binary(name) do
name |> String.to_atom |> find
end
def find(name) when is_atom(name) do
Cache.find(name)
end
def generate(template, assigns) do
kws = [ assigns: assigns |> Map.to_list ]
check_existence_of(assigns.target_dir, assigns.target_subdir)
|> create_or_merge(template, kws)
end
# called from within a template to cause it not to generate either this
# file or anything in this file's directory
def ignore_file_and_directory_unless(flag) when flag do
flag && nil # bypass unused variable warning
end
def ignore_file_and_directory_unless(_) do
throw :ignore_file_and_directory
end
# called from within a template to cause it not to generate this file
def ignore_file_unless(flag) when flag do
flag && nil # bypass unused variable warning
end
def ignore_file_unless(_) do
throw :ignore_file
end
private do
defp check_existence_of(dir, name) do
path = Path.join(dir, name)
cond do
!File.exists?(dir) ->
{ :error, "target directory #{dir} does not exist" }
!File.dir?(dir) ->
{ :error, "'#{dir}' is not a directory" }
!File.exists?(path) ->
{ :need_to_create, path }
!File.dir?(path) ->
{ :error, "'#{path}' exists but is not a directory" }
true ->
{ :maybe_update, path }
end
end
defp create_or_merge({:error, reason}, _, _), do: {:error, reason}
defp create_or_merge({:need_to_create, dest_dir}, template, assigns) do
source_dir = template.source_dir
copy_tree_with_expansions(source_dir, dest_dir, assigns)
end
defp create_or_merge({:maybe_update, dest}, template, assigns) do
if assigns[:assigns][:force] do
copy_tree_with_expansions(template.source_dir, dest, assigns)
else
{ :error, "Updating an existing project is not yet supported" }
end
end
defp copy_tree_with_expansions(source, dest, assigns) do
if File.dir?(source) do
if !String.ends_with?(source, "_build") do
copy_dir(source, dest, assigns)
end
else
copy_and_expand(source, dest, assigns)
end
end
defp copy_dir(source, dest, assigns) do
maybe_create_directory(dest, assigns[:assigns][:force])
try do
File.ls!(source)
|> Enum.each(fn name ->
s = Path.join(source, name)
d = Path.join(dest, dest_file_name(name, assigns))
copy_tree_with_expansions(s, d, assigns)
end)
catch
:ignore_file_and_directory ->
File.rm_rf!(dest)
Mix.shell.info([:green, "- deleting",
:reset, " #{dest} ",
:faint, :cyan, "(it isn't needed)"])
end
end
defp copy_and_expand(source, dest, assigns) do
try do
content = EEx.eval_file(source, assigns, [ trim: true ])
MG.create_file(dest, content)
mode = File.stat!(source).mode
File.chmod!(dest, mode)
catch
:ignore_file ->
Mix.shell.info([:green, "- ignoring",
:reset, " #{dest} ",
:faint, :cyan, "(it isn't needed)"])
end
end
defp mandatory_option(nil, msg), do: raise(CompileError, description: msg)
defp mandatory_option(value, _msg), do: value
# You can escape the projrct name by doubling the $ characters,
# so $$PROJECT_NAME$$ becomes $PROJECT_NAME$
defp dest_file_name(name, assigns) do
if name =~ ~r{\$\$PROJECT_NAME\$\$} do
String.replace(name,"$$PROJECT_NAME$$", "$PROJECT_NAME$")
else
String.replace(name, "$PROJECT_NAME$", assigns[:assigns][:project_name])
end
end
defp maybe_create_directory(path, force) when not force do
MG.create_directory(path)
end
defp maybe_create_directory(path, _force) do
if File.exists?(path) do
Mix.shell.info([:green, "• existing #{path}"])
else
maybe_create_directory(path, false)
end
end
end
end
|
lib/mix_templates.ex
| 0.776326
| 0.547101
|
mix_templates.ex
|
starcoder
|
defmodule ExPwned.Breaches do
@moduledoc """
Module to interact with hibp API to retrive breaches data
"""
use ExPwned.Api
@doc """
return a list of all breaches a particular account has been involved in.
The API takes a single parameter which is the account to be searched for.
The account is not case sensitive and will be trimmed of leading or trailing white spaces.
The account should always be URL encoded.
## Examples
iex> ExPwned.Breaches.breachedaccount("<EMAIL>")
iex> ExPwned.Breaches.breachedaccount("<EMAIL>", [truncateResponse: true, domain: "adobe.com"])
"""
def breachedaccount(account, opts \\ []), do: do_get("breachedaccount/#{account}", opts)
@doc """
Get all breaches in hibp.
## Examples
iex> ExPwned.Breaches.breaches
{:ok,
[%{"AddedDate" => "2015-10-26T23:35:45Z", "BreachDate" => "2015-03-01",
"DataClasses" => ["Email addresses", "IP addresses", "Names", "Passwords"],
"Description" => "In approximately March 2015, the free web hosting provider <a href=\"http://www.troyhunt.com/2015/10/breaches-traders-plain-text-passwords.html\" target=\"_blank\" rel=\"noopener\">000webhost suffered a major data breach</a> that exposed over 13 million customer records. The data was sold and traded before 000webhost was alerted in October. The breach included names, email addresses and plain text passwords.",
"Domain" => "000webhost.com", "IsActive" => true, "IsRetired" => false,
"IsSensitive" => false, "IsSpamList" => false, "IsVerified" => true,
"LogoType" => "png", "Name" => "000webhost", "PwnCount" => 13545468,
"Title" => "000webhost"},
[retry_after: 0]
}
"""
def breaches, do: do_get("breaches")
@doc """
Filters the result set to only breaches against the domain specified
## Example
iex> ExPwned.Breaches.breaches("000webhost.com")
{:ok,
[%{"AddedDate" => "2015-10-26T23:35:45Z", "BreachDate" => "2015-03-01",
"DataClasses" => ["Email addresses", "IP addresses", "Names", "Passwords"],
"Description" => "In approximately March 2015, the free web hosting provider <a href=\"http://www.troyhunt.com/2015/10/breaches-traders-plain-text-passwords.html\" target=\"_blank\" rel=\"noopener\">000webhost suffered a major data breach</a> that exposed over 13 million customer records. The data was sold and traded before 000webhost was alerted in October. The breach included names, email addresses and plain text passwords.",
"Domain" => "000webhost.com", "IsActive" => true, "IsRetired" => false,
"IsSensitive" => false, "IsSpamList" => false, "IsVerified" => true,
"LogoType" => "png", "Name" => "000webhost", "PwnCount" => 13545468,
"Title" => "000webhost"}], [retry_after: 0]}
"""
def breaches(domain), do: do_get("breaches", %{domain: domain})
@doc """
Sometimes just a single breach is required and this can be retrieved by the breach "name".
This is the stable value which may or may not be the same as the breach "title" (which can change).
## Example
iex> ExPwned.Breaches.breach("000webhost")
{:ok,
%{"AddedDate" => "2015-10-26T23:35:45Z", "BreachDate" => "2015-03-01",
"DataClasses" => ["Email addresses", "IP addresses", "Names", "Passwords"],
"Description" => "In approximately March 2015, the free web hosting provider <a href=\"http://www.troyhunt.com/2015/10/breaches-traders-plain-text-passwords.html\" target=\"_blank\" rel=\"noopener\">000webhost suffered a major data breach</a> that exposed over 13 million customer records. The data was sold and traded before 000webhost was alerted in October. The breach included names, email addresses and plain text passwords.",
"Domain" => "000webhost.com", "IsActive" => true, "IsRetired" => false,
"IsSensitive" => false, "IsSpamList" => false, "IsVerified" => true,
"LogoType" => "png", "Name" => "000webhost", "PwnCount" => 13545468,
"Title" => "000webhost"}, [retry_after: 0]}
"""
def breach(name), do: do_get("breach/#{name}")
@doc """
A "data class" is an attribute of a record compromised in a breach.
For example, many breaches expose data classes such as "Email addresses" and "Passwords".
The values returned by this service are ordered alphabetically in a string array
and will expand over time as new breaches expose previously unseen classes of data.
iex> ExPwned.Breaches.dataclasses
{:ok,
["Account balances", "Age groups", "Astrological signs", "Avatars",
"Bank account numbers", "Banking PINs", "Beauty ratings", "Biometric data",
"Browser user agent details", "Car ownership statuses", "Career levels",
"Chat logs", "Credit card CVV", "Credit cards", "Credit status information",
"Customer feedback", "Customer interactions", "Dates of birth",
"Deceased date", "Device information", "Device usage tracking data",
"Drinking habits", "Drug habits", "Education levels", "Email addresses",
"Email messages", "Employers", "Ethnicities", "Family members' names",
"Family plans", "Family structure", "Financial transactions",
"Fitness levels", "Genders", "Geographic locations", "Government issued IDs",
"Historical passwords", "Home ownership statuses", "Homepage URLs",
"Income levels", "Instant messenger identities", "IP addresses", "Job titles",
"MAC addresses", "Marital statuses", "Names", "Nicknames", "Parenting plans",
...], [retry_after: 0]}
"""
def dataclasses, do: do_get("dataclasses")
end
|
lib/ex_pwned/breaches.ex
| 0.766992
| 0.494873
|
breaches.ex
|
starcoder
|
defmodule Multiverse.Adapters.ISODate do
@moduledoc """
Adapter that fetches ISO-8601 date from request header and `Elixir.Date`
to resolve changes that must be applied to the connection.
This adapter requires you to configure which version is used by default,
when value in a header was malformed or not set. It's configured via `:default_version`,
supported values:
* `:first` - apply all gates by default. This option is useful when you integrate Multiverse in existing project \
and API consumers are not ready to accept latest changes by default;
* `:latest` - user current date as default version. This option is useful when there are
no legacy clients or there was no breaking changes before those clients started to send API version.
## Version Channels
Consumers can use two channels instead of date in version header:
* `latest` - apply only changes scheduled for future (with a date later than date when request arrived);
* `edge` - do not apply any changes.
"""
require Logger
@behaviour Multiverse.Adapter
@default_version_values [:latest, :oldest]
@typep version :: Date.t()
@doc """
Initializes adapter configuration at compile time.
Raises when `:default_version` option is not set.
"""
def init(_adapter, opts) do
default_version = Keyword.get(opts, :default_version)
unless default_version do
raise ArgumentError, "Multiverse.Adapters.ISODate :default_version config is not set"
end
unless default_version in @default_version_values do
default_version_values_strings = Enum.map(@default_version_values, &inspect/1)
raise ArgumentError,
"invalid Multiverse.Adapters.ISODate :default_version config value, " <>
"possible values: #{Enum.join(default_version_values_strings, ", ")}, " <>
"got: #{inspect(default_version)}"
end
{:ok, opts}
end
@spec version_comparator(v1 :: version(), v2 :: version()) :: boolean
def version_comparator("edge", _v2), do: false
def version_comparator(v1, v2), do: Date.compare(v1, v2) == :lt
@spec fetch_default_version(conn :: Plug.Conn.t(), adapter_config :: Multiverse.Adapter.config()) ::
{:ok, version, Plug.Conn.t()}
def fetch_default_version(conn, adapter_config) do
case Keyword.get(adapter_config, :default_version) do
:latest -> {:ok, Date.utc_today(), conn}
:oldest -> {:ok, fetch_oldest_version(adapter_config), conn}
end
end
defp fetch_oldest_version(adapter_config) do
with gates when length(gates) > 0 <- Keyword.fetch!(adapter_config, :gates) do
gates |> List.last() |> elem(0) |> Date.add(-1)
else
[] ->
:ok =
Logger.warn(fn ->
"[Multiverse] You specified default_version: :oldest but there are no gates, " <>
"failing back to the current date"
end)
Date.utc_today()
end
end
@spec resolve_version_or_channel(
conn :: Plug.Conn.t(),
channel_or_version :: String.t(),
adapter_config :: Multiverse.Adapter.config()
) :: {:ok, version, Plug.Conn.t()}
def resolve_version_or_channel(conn, "latest", _adapter_config) do
{:ok, Date.utc_today(), conn}
end
def resolve_version_or_channel(conn, "edge", _adapter_config) do
{:ok, "edge", conn}
end
def resolve_version_or_channel(conn, version, adapter_config) do
case Date.from_iso8601(version) do
{:ok, date} ->
{:ok, date, conn}
{:error, reason} ->
:ok =
Logger.warn(fn ->
"[Multiverse] Malformed version header: #{inspect(reason)}, failing back to the default version"
end)
fetch_default_version(conn, adapter_config)
end
end
end
|
lib/multiverse/adapters/iso_date.ex
| 0.891082
| 0.434401
|
iso_date.ex
|
starcoder
|
defmodule Aecore.Sync.Jobs do
@moduledoc """
Handles the functionality of scheduling the required jobs for the sync to be done.
This implementations uses the `job` library, where each job is regulated to a specific queue.
We have 3 main queues:
- `:sync_ping_workers` -> Handles the ping between nodes
- `:sync_task_workers` -> Handles the required functions for synchronization
- `:sync_gossip_workers` -> Handles the gossiping of blocks | txs
Each job function is spawned in a separate process using Task.start()
Later in the Sync module these processes are linked to the GenServer process of the Sync module
"""
alias Aecore.Sync.Sync
alias Aecore.Sync.Task, as: SyncTask
alias Aecore.Chain.Block
alias Aecore.Tx.SignedTx
@type peer_id :: pid()
@type delay :: non_neg_integer()
@type gossip :: :block | :tx
@type queue ::
:sync_ping_workers
| :sync_task_workers
| :sync_gossip_worers
@doc """
Creates a new jobs queue for doing some work.
"""
@spec init_queues() :: :ok
def init_queues do
:jobs.add_queue(:sync_ping_workers, [{:regulators, [{:counter, [{:limit, 3}]}]}]) &&
:jobs.add_queue(:sync_task_workers, [{:regulators, [{:counter, [{:limit, 10}]}]}]) &&
:jobs.add_queue(:sync_gossip_workers, [{:regulators, [{:counter, [{:limit, 10}]}]}])
end
@spec run_job(queue(), fun()) :: {:ok, pid()}
def run_job(queue, fun) do
Task.start(:jobs, :run, [queue, fun])
end
@spec delayed_run_job(peer_id(), SyncTask.t(), queue(), fun(), delay()) ::
{SyncTask.t(), {:change_worker, peer_id(), pid(), pid()}}
def delayed_run_job(peer_id, task, queue, fun, delay) do
old_worker = self()
{:ok, new_worker} =
Task.start(fn ->
:timer.sleep(delay)
:jobs.run(queue, fun)
end)
{task, {:change_worker, peer_id, old_worker, new_worker}}
end
@spec enqueue(gossip(), Block.t() | SignedTx.t(), list(peer_id())) :: {:ok, pid()}
def enqueue(gossip, data, peer_ids) do
Task.start(fn ->
Enum.map(peer_ids, fn peer_id ->
:jobs.run(:sync_gossip_workers, enqueue_strategy(gossip, data, peer_id))
end)
end)
end
defp enqueue_strategy(:block, block, peer_id) do
fn -> Sync.forward_block(block, peer_id) end
end
defp enqueue_strategy(:tx, tx, peer_id) do
fn -> Sync.forward_tx(tx, peer_id) end
end
end
|
apps/aecore/lib/aecore/sync/jobs.ex
| 0.83612
| 0.427158
|
jobs.ex
|
starcoder
|
defmodule Asteroid.Token do
require Logger
@moduledoc """
Types and exceptions for tokens
"""
defmodule InvalidTokenError do
@moduledoc """
Error returned when a token was requested but an error happened when retrieving it
"""
@enforce_keys [:sort, :id]
defexception [:sort, :id, reason: ""]
@type t :: %__MODULE__{
sort: String.t(),
reason: String.t(),
id: String.t()
}
@impl true
def message(%{sort: sort, reason: nil, id: id}) do
"Invalid #{sort} `#{id}`"
end
def message(%{sort: sort, reason: reason, id: id}) do
"Invalid #{sort} `#{id}`: #{reason}"
end
end
@typedoc """
Token sort (access token, refresh token, authorization code...)
Token type is not used because it refers to how it's used by a client in OAuth2 specification
(which defines the `"bearer"` token type).
"""
@type sort :: :access_token | :refresh_token | :authorization_code
@typedoc """
String representation of a `t:sort/0`
Must be the string conversion of a `t:sort/0` atom.
"""
@type sort_str :: String.t()
@typedoc """
The different formats a token may have once serialized, i.e. send on the wire (in opposition
to the token internally used by Asteroid)
"""
@type serialization_format ::
:opaque
| :jws
| :jwe
| :saml1
| :saml2
@typedoc """
String representation of a `t:serialization_format/0`
Must be the string conversion of a `t:serialization_format/0` atom.
"""
@type serialization_format_str :: String.t()
@typedoc """
Serialized token, as sent on the wire
For instance, an refresh token as used internally by Asteroid would look like:
```elixir
%Asteroid.Token.RefreshToken{
data: %{},
id: "F1XFSdm11N2XJ9OOPT7__Y0NqedjPwKgdT-ifQeuS3c",
serialization_format: :opaque
}
```
One serialized, for instance sent by the `/token endpoint`, it looks like:
```elixir
"F1XFSdm11N2XJ9OOPT7__Y0NqedjPwKgdT-ifQeuS3c"
```
which is its id. If the serialization format had been `:jwt`, the serialized form would result
in a JWT.
"""
@type serialized_token :: String.t()
end
|
lib/asteroid/token.ex
| 0.892466
| 0.620866
|
token.ex
|
starcoder
|
defmodule Pbuf.Tests.Root do
@moduledoc false
alias Pbuf.Decoder
@derive {Jason.Encoder, []}
defstruct [
]
@type t :: %__MODULE__{
}
@spec new(Enum.t) :: t
def new(data \\ []), do: struct(__MODULE__, data)
@spec encode_to_iodata!(t | map) :: iodata
def encode_to_iodata!(_data) do
[
]
end
@spec encode!(t | map) :: binary
def encode!(data) do
:erlang.iolist_to_binary(encode_to_iodata!(data))
end
@spec decode!(binary) :: t
def decode!(data) do
Decoder.decode!(__MODULE__, data)
end
@spec decode(binary) :: {:ok, t} | :error
def decode(data) do
Decoder.decode(__MODULE__, data)
end
import Bitwise, only: [band: 2]
# failed to decode, either this is an unknown tag (which we can skip), or
# it is a wrong type (which is an error)
def decode(acc, data) do
{prefix, data} = Decoder.varint(data)
type = band(prefix, 7)
{acc, Decoder.skip(type, data)}
end
def __finalize_decode__(args) do
struct = Elixir.Enum.reduce(args, %__MODULE__{}, fn
{k, v}, acc -> Map.put(acc, k, v)
end)
struct
end
end
defmodule Pbuf.Tests.Root.Child do
@moduledoc false
alias Pbuf.Decoder
@derive {Jason.Encoder, []}
defstruct [
xe: :UNKNOWN
]
@type t :: %__MODULE__{
xe: Pbuf.Tests.Root.Child.XE.t
}
defmodule XE do
@moduledoc false
@type t :: :UNKNOWN | 0 | :VALUE | 1
@spec to_int(t | non_neg_integer) :: integer
def to_int(:UNKNOWN), do: 0
def to_int(0), do: 0
def to_int(:VALUE), do: 1
def to_int(1), do: 1
def to_int(invalid) do
raise Pbuf.Encoder.Error,
type: __MODULE__,
value: invalid,
tag: nil,
message: "#{inspect(invalid)} is not a valid enum value for #{__MODULE__}"
end
@spec from_int(integer) :: t
def from_int(0), do: :UNKNOWN
def from_int(1), do: :VALUE
def from_int(_unknown), do: :invalid
end
@spec new(Enum.t) :: t
def new(data \\ []), do: struct(__MODULE__, data)
@spec encode_to_iodata!(t | map) :: iodata
def encode_to_iodata!(data) do
alias Elixir.Pbuf.Encoder
[
Encoder.enum_field(Pbuf.Tests.Root.Child.XE, data.xe, <<8>>),
]
end
@spec encode!(t | map) :: binary
def encode!(data) do
:erlang.iolist_to_binary(encode_to_iodata!(data))
end
@spec decode!(binary) :: t
def decode!(data) do
Decoder.decode!(__MODULE__, data)
end
@spec decode(binary) :: {:ok, t} | :error
def decode(data) do
Decoder.decode(__MODULE__, data)
end
def decode(acc, <<8, data::binary>>) do
Decoder.enum_field(Pbuf.Tests.Root.Child.XE, :xe, acc, data)
end
import Bitwise, only: [bsr: 2, band: 2]
# failed to decode, either this is an unknown tag (which we can skip), or
# it is a wrong type (which is an error)
def decode(acc, data) do
{prefix, data} = Decoder.varint(data)
tag = bsr(prefix, 3)
type = band(prefix, 7)
case tag in [1] do
false -> {acc, Decoder.skip(type, data)}
true ->
err = %Decoder.Error{
tag: tag,
module: __MODULE__,
message: "#{__MODULE__} tag #{tag} has an incorrect type of #{type}"
}
{:error, err}
end
end
def __finalize_decode__(args) do
struct = Elixir.Enum.reduce(args, %__MODULE__{}, fn
{k, v}, acc -> Map.put(acc, k, v)
end)
struct
end
end
|
test/schemas/generated/nested_enum.pb.ex
| 0.795975
| 0.533458
|
nested_enum.pb.ex
|
starcoder
|
defmodule Protobuf.DefineMessage do
@moduledoc false
alias Protobuf.Decoder
alias Protobuf.Encoder
alias Protobuf.Field
alias Protobuf.OneOfField
alias Protobuf.Delimited
alias Protobuf.Utils
def def_message(name, fields, inject: inject, doc: doc, syntax: syntax) when is_list(fields) do
struct_fields = record_fields(fields)
# Inject everything in 'using' module
if inject do
quote location: :keep do
@root __MODULE__
@record unquote(struct_fields)
defstruct @record
fields = unquote(struct_fields)
def record, do: @record
def syntax, do: unquote(syntax)
unquote(define_typespec(name, fields))
unquote(encode_decode(name))
unquote(fields_methods(fields))
unquote(oneof_fields_methods(fields))
unquote(meta_information())
unquote(constructors(name))
defimpl Protobuf.Serializable do
def serialize(object), do: unquote(name).encode(object)
end
end
# Or create a nested module, with use_in functionality
else
quote location: :keep do
root = __MODULE__
fields = unquote(struct_fields)
use_in = @use_in[unquote(name)]
defmodule unquote(name) do
@moduledoc false
unquote(Protobuf.Config.doc_quote(doc))
@root root
@record unquote(struct_fields)
defstruct @record
def record, do: @record
def syntax, do: unquote(syntax)
unquote(define_typespec(name, fields))
unquote(encode_decode(name))
unquote(fields_methods(fields))
unquote(oneof_fields_methods(fields))
unquote(meta_information())
unquote(constructors(name))
if use_in != nil do
Module.eval_quoted(__MODULE__, use_in, [], __ENV__)
end
defimpl Protobuf.Serializable do
def serialize(object), do: unquote(name).encode(object)
end
end
unquote(define_oneof_modules(name, fields))
end
end
end
defp constructors(name) do
quote location: :keep do
def new(), do: new([])
def new(values) do
struct(unquote(name), values)
end
end
end
defp define_typespec(module, field_list) when is_list(field_list) when is_atom(module) do
case field_list do
[%Field{name: :value, type: scalar, occurrence: occurrence}]
when is_atom(scalar) and is_atom(occurrence) ->
scalar_wrapper? = Utils.is_standard_scalar_wrapper(module)
cond do
scalar_wrapper? and occurrence == :required ->
quote do
@type t() :: unquote(define_scalar_typespec(scalar))
end
scalar_wrapper? ->
quote do
@type t() :: unquote(define_scalar_typespec(scalar)) | nil
end
:else ->
define_trivial_typespec(field_list)
end
[%Field{name: :value, type: {:enum, enum_module}, occurrence: occurrence}]
when is_atom(enum_module) ->
enum_wrapper? = Utils.is_enum_wrapper(module, enum_module)
cond do
enum_wrapper? and occurrence == :required ->
quote do
@type t() :: unquote(enum_module).t()
end
enum_wrapper? ->
quote do
@type t() :: unquote(enum_module).t() | nil
end
:else ->
define_trivial_typespec(field_list)
end
_ ->
define_trivial_typespec(field_list)
end
end
defp define_trivial_typespec([]), do: nil
defp define_trivial_typespec(fields) when is_list(fields) do
field_types = define_trivial_typespec_fields(fields, [])
map_type = {:%{}, [], field_types}
module_type = {:%, [], [{:__MODULE__, [], Elixir}, map_type]}
quote generated: true do
@type t() :: unquote(module_type)
end
end
defp define_trivial_typespec_fields([], acc), do: Enum.reverse(acc)
defp define_trivial_typespec_fields([field | rest], acc) do
case field do
%Protobuf.Field{name: name, occurrence: :required, type: type} ->
ast = {name, define_field_typespec(type)}
define_trivial_typespec_fields(rest, [ast | acc])
%Protobuf.Field{name: name, occurrence: :optional, type: type} ->
ast =
{name,
quote do
unquote(define_field_typespec(type)) | nil
end}
define_trivial_typespec_fields(rest, [ast | acc])
%Protobuf.Field{name: name, occurrence: :repeated, type: type} ->
ast =
{name,
quote do
[unquote(define_field_typespec(type))]
end}
define_trivial_typespec_fields(rest, [ast | acc])
%Protobuf.OneOfField{name: name, fields: fields} ->
ast =
{name,
quote do
unquote(define_algebraic_type(fields))
end}
define_trivial_typespec_fields(rest, [ast | acc])
end
end
defp define_algebraic_type(fields) do
ast =
for %Protobuf.Field{name: name, type: type} <- fields do
{name, define_field_typespec(type)}
end
Protobuf.Utils.define_algebraic_type([nil | ast])
end
defp define_oneof_modules(namespace, fields) when is_list(fields) do
ast =
for %Protobuf.OneOfField{} = field <- fields do
define_oneof_instance_module(namespace, field)
end
quote do
(unquote_splicing(ast))
end
end
defp define_oneof_instance_module(namespace, %Protobuf.OneOfField{name: field, fields: fields}) do
module_subname =
field
|> Atom.to_string()
|> Macro.camelize()
|> String.to_atom()
fields = Enum.map(fields, &define_oneof_instance_macro/1)
quote do
defmodule unquote(Module.concat([namespace, :OneOf, module_subname])) do
(unquote_splicing(fields))
end
end
end
defp define_oneof_instance_macro(%Protobuf.Field{name: name}) do
quote do
defmacro unquote(name)(ast) do
inner_name = unquote(name)
quote do
{unquote(inner_name), unquote(ast)}
end
end
end
end
defp define_field_typespec(type) do
case type do
{:msg, field_module} ->
quote do
unquote(field_module).t()
end
{:enum, field_module} ->
quote do
unquote(field_module).t()
end
{:map, key_type, value_type} ->
key_type_ast = define_field_typespec(key_type)
value_type_ast = define_field_typespec(value_type)
quote do
[{unquote(key_type_ast), unquote(value_type_ast)}]
end
_ ->
define_scalar_typespec(type)
end
end
defp define_scalar_typespec(type) do
case type do
:double ->
quote do
float()
end
:float ->
quote do
float()
end
:int32 ->
quote do
integer()
end
:int64 ->
quote do
integer()
end
:uint32 ->
quote do
non_neg_integer()
end
:uint64 ->
quote do
non_neg_integer()
end
:sint32 ->
quote do
integer()
end
:sint64 ->
quote do
integer()
end
:fixed32 ->
quote do
non_neg_integer()
end
:fixed64 ->
quote do
non_neg_integer()
end
:sfixed32 ->
quote do
integer()
end
:sfixed64 ->
quote do
integer()
end
:bool ->
quote do
boolean()
end
:string ->
quote do
String.t()
end
:bytes ->
quote do
binary()
end
end
end
defp encode_decode(_name) do
quote do
def decode(data), do: Decoder.decode(data, __MODULE__)
def encode(%{} = record), do: Encoder.encode(record, defs())
def decode_delimited(bytes), do: Delimited.decode(bytes, __MODULE__)
def encode_delimited(messages), do: Delimited.encode(messages)
end
end
defp fields_methods(fields) do
for %Field{name: name, fnum: fnum} = field <- fields do
quote location: :keep do
def defs(:field, unquote(fnum)), do: unquote(Macro.escape(field))
def defs(:field, unquote(name)), do: defs(:field, unquote(fnum))
end
end
end
defp oneof_fields_methods(fields) do
for %OneOfField{name: name, rnum: rnum} = field <- fields do
quote location: :keep do
def defs(:field, unquote(rnum - 1)), do: unquote(Macro.escape(field))
def defs(:field, unquote(name)), do: defs(:field, unquote(rnum - 1))
end
end
end
defp meta_information do
quote do
def defs, do: @root.defs
def defs(:field, _), do: nil
def defs(:field, field, _), do: defs(:field, field)
defoverridable defs: 0
end
end
defp record_fields(fields) do
fields
|> Enum.map(fn field ->
case field do
%Field{name: name, occurrence: :repeated} ->
{name, []}
%Field{name: name, opts: [{:default, default} | _]} ->
{name, default}
%Field{name: name} ->
{name, nil}
%OneOfField{name: name} ->
{name, nil}
_ ->
nil
end
end)
|> Enum.reject(&is_nil/1)
end
end
|
lib/exprotobuf/define_message.ex
| 0.634883
| 0.435601
|
define_message.ex
|
starcoder
|
defmodule Cldr.Number.Formatter.Currency do
@moduledoc """
Number formatter for the `:currency` `:long` format.
This formatter implements formatting a currency in a long form. This
is not the same as decimal formatting with a currency placeholder.
To explain the difference, look at the following examples:
iex> Cldr.Number.to_string 123, TestBackend.Cldr, format: :currency, currency: "USD"
{:ok, "$123.00"}
iex> Cldr.Number.to_string 123, TestBackend.Cldr, format: :long, currency: "USD"
{:ok, "123 US dollars"}
In the first example the format is defined by a decimal mask. In this example
the format mask comes from:
iex> {:ok, formats} = Cldr.Number.Format.all_formats_for("en", TestBackend.Cldr)
...> formats.latn.currency
"¤#,##0.00"
In the second example we are using a format that combines the number with
a language translation of the currency name. In this example the format
comes from:
iex> {:ok, formats} = Cldr.Number.Format.all_formats_for("en", TestBackend.Cldr)
...> formats.latn.currency_long
%{one: [0, " ", 1], other: [0, " ", 1]}
Where "{0}" is replaced with the number formatted using the `:standard`
decimal format and "{1} is replaced with locale-specific name of the
currency adjusted for the locales plural rules."
**This module is not part of the public API and is subject
to change at any time.**
"""
alias Cldr.Number.{Format, System}
alias Cldr.{Substitution, Currency}
alias Cldr.Number.Format.Options
def to_string(number, :currency_long, backend, options) do
locale = options.locale
number_system = System.system_name_from!(options.number_system, locale, backend)
cardinal = Module.concat(backend, Number.Cardinal)
if !(formats = Format.formats_for!(locale, number_system, backend).currency_long) do
raise ArgumentError,
message:
"No :currency_long format known for " <>
"locale #{inspect(locale)} and number system #{inspect(number_system)}."
end
{:ok, currency} = Currency.currency_for_code(options.currency, backend, locale: locale)
currency_string = cardinal.pluralize(number, locale, currency.count)
options =
options
|> Map.put(:format, :standard)
|> set_fractional_digits(options.fractional_digits)
|> Options.resolve_standard_format(backend)
number_string = Cldr.Number.to_string!(number, backend, options)
format = cardinal.pluralize(number, locale, formats)
Substitution.substitute([number_string, currency_string], format)
|> :erlang.iolist_to_binary()
end
defp set_fractional_digits(options, nil) do
options
|> Map.put(:fractional_digits, 0)
end
defp set_fractional_digits(options, _digits) do
options
end
end
|
lib/cldr/number/formatter/currency_formatter.ex
| 0.915983
| 0.75656
|
currency_formatter.ex
|
starcoder
|
if Code.ensure_loaded?(PryIn) do
# Based on https://github.com/pryin-io/pryin/blob/9fec04d61a7b8d4ff337653294f13c4e345c7029/lib/pryin/instrumenter.ex
defmodule Calcinator.PryIn.Instrumenter do
@moduledoc """
Collects metrics about
* `:alembic`
* `:calcinator_authorization`
* `Calcinator.authorized/2`
* `Calcinator.can/3`
* `:calcinator_resources`
* `resources_module` calls
* `:calcinator_view`
* `view_module` calls
Activate via:
```elixir
config :calcinator,
instrumenters: [Calcinator.PryIn.Instrumenter]
```
"""
import PryIn.{InteractionHelper, TimeHelper}
alias PryIn.InteractionStore
def alembic(:start, compile_metadata, runtime_metadata), do: start(compile_metadata, runtime_metadata)
def alembic(:stop, time_diff, metadata = %{action: action, params: params}) do
if InteractionStore.has_pid?(self()) do
event = "alembic"
prefix = unique_prefix(event)
InteractionStore.put_context(self(), "#{prefix}/action", inspect(action))
InteractionStore.put_context(self(), "#{prefix}/params", inspect(params))
metadata
|> Map.merge(%{key: event, time_diff: time_diff})
|> add_custom_metric()
end
end
def alembic(:stop, _time_diff, _), do: :ok
@doc """
Collects metrics about `Calcinator.Authorization` behaviour calls from `Calcinator`.
Metrics are only collected inside of tracked interactions
"""
def calcinator_authorization(:start, compile_metadata, runtime_metadata) do
start(compile_metadata, runtime_metadata)
end
def calcinator_authorization(
:stop,
time_diff,
metadata = %{
action: action,
calcinator: %{
authorization_module: authorization_module,
subject: subject
},
target: target
}
) do
if InteractionStore.has_pid?(self()) do
event = "calcinator_authorization"
prefix = unique_prefix(event)
InteractionStore.put_context(self(), "#{prefix}/authorization_module", module_name(authorization_module))
InteractionStore.put_context(self(), "#{prefix}/subject", subject_name(subject))
InteractionStore.put_context(self(), "#{prefix}/action", to_string(action))
InteractionStore.put_context(self(), "#{prefix}/target", target_name(target))
metadata
|> Map.merge(%{key: event, time_diff: time_diff})
|> add_custom_metric()
end
end
def calcinator_authorization(:stop, _time_diff, _), do: :ok
def calcinator_resources(:start, compile_metadata, runtime_metadata), do: start(compile_metadata, runtime_metadata)
def calcinator_resources(
:stop,
time_diff,
metadata = %{
args: args,
calcinator: %{
resources_module: resources_module
},
callback: callback
}
) do
if InteractionStore.has_pid?(self()) do
put_calcinator_resources_context(%{args: args, callback: callback, resources_module: resources_module})
metadata
|> Map.merge(%{key: "calcinator_resources", time_diff: time_diff})
|> add_custom_metric()
end
end
def calcinator_resources(:stop, _time_diff, _), do: :ok
def calcinator_view(:start, compile_metadata, runtime_metadata), do: start(compile_metadata, runtime_metadata)
def calcinator_view(
:stop,
time_diff,
metadata = %{
args: args,
calcinator: %{
view_module: view_module
},
callback: callback
}
) do
if InteractionStore.has_pid?(self()) do
put_calcinator_view_context(%{args: args, callback: callback, view_module: view_module})
metadata
|> Map.merge(%{key: "calcinator_view", time_diff: time_diff})
|> add_custom_metric()
end
end
## Private Functions
defp add_custom_metric(
metadata = %{file: file, function: function, key: key, line: line, module: module, time_diff: time_diff}
) do
data = [
duration: System.convert_time_unit(time_diff, :native, :microseconds),
file: file,
function: function,
key: key,
line: line,
module: module_name(module),
pid: inspect(self())
]
full_data =
case Map.fetch(metadata, :offset) do
{:ok, offset} -> Keyword.put(data, :offset, offset)
:error -> data
end
InteractionStore.add_custom_metric(self(), full_data)
end
defp put_calcinator_resources_context(%{
args: [changeset, query_options],
callback: callback,
prefix: prefix,
resources_module: resources_module
})
when callback in ~w(delete insert update)a do
put_calcinator_resources_context(%{callback: callback, prefix: prefix, resources_module: resources_module})
InteractionStore.put_context(self(), "#{prefix}/changeset", target_name(changeset))
InteractionStore.put_context(self(), "#{prefix}/query_options", inspect(query_options))
end
defp put_calcinator_resources_context(%{
args: [beam],
callback: callback = :allow_sandbox_access,
prefix: prefix,
resources_module: resources_module
}) do
put_calcinator_resources_context(%{callback: callback, prefix: prefix, resources_module: resources_module})
InteractionStore.put_context(self(), "#{prefix}/beam", inspect(beam))
end
defp put_calcinator_resources_context(%{
args: [id, query_options],
callback: callback = :get,
prefix: prefix,
resources_module: resources_module
}) do
put_calcinator_resources_context(%{callback: callback, prefix: prefix, resources_module: resources_module})
InteractionStore.put_context(self(), "#{prefix}/id", inspect(id))
InteractionStore.put_context(self(), "#{prefix}/query_options", inspect(query_options))
end
defp put_calcinator_resources_context(%{
args: [query_options],
callback: callback = :list,
prefix: prefix,
resources_module: resources_module
}) do
put_calcinator_resources_context(%{callback: callback, prefix: prefix, resources_module: resources_module})
InteractionStore.put_context(self(), "#{prefix}/query_options", inspect(query_options))
end
defp put_calcinator_resources_context(%{
args: [],
callback: callback = :sandboxed?,
prefix: prefix,
resources_module: resources_module
}) do
put_calcinator_resources_context(%{callback: callback, prefix: prefix, resources_module: resources_module})
end
defp put_calcinator_resources_context(%{callback: callback, prefix: prefix, resources_module: resources_module}) do
InteractionStore.put_context(self(), "#{prefix}/resources_module", module_name(resources_module))
InteractionStore.put_context(self(), "#{prefix}/callback", to_string(callback))
end
defp put_calcinator_resources_context(options) when is_map(options) do
if Map.has_key?(options, :prefix) do
raise ArgumentError, "Unsupported callback (#{inspect(options[:callback])}) with options (#{inspect(options)})"
else
options
|> Map.put(:prefix, unique_prefix("calcinator_resources"))
|> put_calcinator_resources_context()
end
end
defp put_calcinator_view_context(%{
args: [
related_resource,
%{
related: %{resource: related_resource},
source: %{association: source_association, resource: source_resource},
subject: subject
}
],
callback: callback,
prefix: prefix,
view_module: view_module
})
when callback in ~w(get_related_resource show_relationship)a do
put_calcinator_view_context(%{callback: callback, prefix: prefix, subject: subject, view_module: view_module})
InteractionStore.put_context(self(), "#{prefix}/source_resource", target_name(source_resource))
InteractionStore.put_context(self(), "#{prefix}/source_association", target_name(source_association))
InteractionStore.put_context(self(), "#{prefix}/related_resource", target_name(related_resource))
end
defp put_calcinator_view_context(%{
args: [resources, %{subject: subject}],
callback: callback = :index,
prefix: prefix,
view_module: view_module
}) do
put_calcinator_view_context(%{callback: callback, prefix: prefix, subject: subject, view_module: view_module})
InteractionStore.put_context(self(), "#{prefix}/resources", target_name(resources))
end
defp put_calcinator_view_context(%{
args: [resource, %{subject: subject}],
callback: callback = :show,
prefix: prefix,
view_module: view_module
}) do
put_calcinator_view_context(%{callback: callback, prefix: prefix, subject: subject, view_module: view_module})
InteractionStore.put_context(self(), "#{prefix}/resource", target_name(resource))
end
defp put_calcinator_view_context(%{callback: callback, prefix: prefix, subject: subject, view_module: view_module}) do
InteractionStore.put_context(self(), "#{prefix}/view_module", module_name(view_module))
InteractionStore.put_context(self(), "#{prefix}/callback", to_string(callback))
InteractionStore.put_context(self(), "#{prefix}/subject", subject_name(subject))
end
defp put_calcinator_view_context(options) when is_map(options) do
if Map.has_key?(options, :prefix) do
raise ArgumentError, "Unsupported callback (#{inspect(options[:callback])})"
else
options
|> Map.put(:prefix, unique_prefix("calcinator_view"))
|> put_calcinator_view_context()
end
end
defp start(%{file: file, function: function, line: line, module: module}, runtime_metadata) do
metadata = Map.merge(runtime_metadata, %{file: file, function: function, line: line, module: module})
if InteractionStore.has_pid?(self()) do
now = utc_unix_datetime()
offset = now - InteractionStore.get_field(self(), :start_time)
Map.put(metadata, :offset, offset)
else
metadata
end
end
defp subject_name(nil), do: "nil"
defp subject_name(%subject_module{id: id}), do: "%#{module_name(subject_module)}{id: #{inspect(id)}}"
defp target_name(nil), do: "nil"
defp target_name(target) when is_atom(target), do: module_name(target)
defp target_name(%target_module{data: data}) when target_module == Ecto.Changeset do
"%#{module_name(target_module)}{data: #{target_name(data)}}"
end
defp target_name(%target_module{id: id}), do: "%#{target_name(target_module)}{id: #{inspect(id)}}"
defp target_name(association_ascent) when is_list(association_ascent) do
"[#{Enum.map_join(association_ascent, ", ", &target_name/1)}]"
end
defp unique_prefix(prefix) do
"#{prefix}/#{:erlang.unique_integer([:positive])}"
end
end
end
|
lib/calcinator/pry_in/instrumenter.ex
| 0.794026
| 0.630002
|
instrumenter.ex
|
starcoder
|
defmodule RDF.XML.Decoder do
@moduledoc """
A decoder for RDF/XML serializations to `RDF.Graph`s.
As for all decoders of `RDF.Serialization.Format`s, you normally won't use these
functions directly, but via one of the `read_` functions on the `RDF.XML` format
module or the generic `RDF.Serialization` module.
## Options
- `:base`: allows to specify the base URI to be used in contexts where relative
URIs are used and no base URI is defined with a `xml:base` definition
- `:bnode_prefix`: allows to specify the prefix which auto-generated blank nodes
should get (default: `"b"`)
"""
use RDF.Serialization.Decoder
alias RDF.XML.Decoder.{Grammar, EventHandler}
alias RDF.Graph
@core_syntax_terms ~w[rdf:RDF rdf:ID rdf:about rdf:parseType rdf:resource rdf:nodeID rdf:datatype]
@old_terms ~w[rdf:aboutEach rdf:aboutEachPrefix rdf:bagID]
@doc false
def core_syntax_terms, do: @core_syntax_terms
@doc false
def old_terms, do: @old_terms
@doc """
Decodes an RDF/XML string to a `RDF.Graph`.
The result is returned in an `:ok` tuple or an `:error` tuple in case of an error.
For a description of the available options see the [module documentation](`RDF.XML.Encoder`).
"""
@impl RDF.Serialization.Decoder
@spec decode(String.t() | Enumerable.t(), keyword) :: {:ok, Graph.t()} | {:error, any}
def decode(string, opts \\ []),
do: do_decode(&Saxy.parse_string/3, string, opts)
@doc """
Decodes an RDF/XML stream to a `RDF.Graph`.
For a description of the available options see the [module documentation](`RDF.XML.Encoder`).
"""
@impl RDF.Serialization.Decoder
@spec decode_from_stream(Enumerable.t(), keyword) :: {:ok, Graph.t()} | {:error, any}
def decode_from_stream(stream, opts \\ []),
do: do_decode(&Saxy.parse_stream/3, stream, opts)
defp do_decode(decoder_fun, input, opts) do
with {:ok, {_, graph, _, _}} <-
decoder_fun.(
input,
EventHandler,
Grammar.initial_state(opts)
) do
{:ok, graph}
else
{:halt, error, _} -> error
error -> error
end
end
end
|
lib/rdf/xml/decoder.ex
| 0.81637
| 0.593315
|
decoder.ex
|
starcoder
|
defmodule Bauer.Node do
@moduledoc """
This module is designed to encapsulate information about a HTML
node: the tag name, attributes, and children nodes.
Currently `Bauer.Node` is just a convenience struct for tuples
return by `Floki`. This should be the only place `Floki` is used.
By design, we should be able to drop in/out alternative adapters
in the future
"""
@doc """
Parse an HTML string.
"""
def parse(html), do: Floki.parse(html)
@doc """
Extract an attribute from a node.
## Example
iex> Node.parse("<div class='content'>...</div>") |> Node.attribute("class")
"content"
iex> Node.parse("<div class='content'>...</div>") |> Node.attribute("uhoh")
nil
"""
def attribute(node, name) do
case Floki.attribute(node, name) do
[] -> nil
[attr] -> attr
attrs -> attrs
end
end
@doc """
Search for a node using a CSS query.
## Examples
Node.find(node, "#content")
{"div", [{"id", "content"}] ["..."]}
Returns the first node found, or `nil` if nothing was found.
"""
def find(node, query) do
case Floki.find(node, query) do
[] -> nil
[attr | _] -> attr
end
end
@doc """
Search for nodes using a CSS query.
## Examples
Node.search(node, "a[class='clickme']")
[{"a", [{"class", "clickme"}], ["..."]},
{"a", [{"class", "clickme"}], ["..."]}]
Returns a list of matching nodes.
"""
def search(node, query), do: Floki.find(node, query)
@doc """
Returns the tag name for a node.
## Examples
iex> Node.parse("<section>lorem ipsum</section") |> Node.tag()
"section"
"""
def tag({tag, _attrs, _children}), do: tag
def text(nil), do: nil
def text(node), do: Floki.text(node)
@doc """
Returns the raw HTML for a node.
## Examples
html == Node.parse(html) |> Node.to_html
true
"""
def to_html(node), do: Floki.raw_html(node)
@doc """
Returns the attributes as a Map.
## Examples
Node.parse("<div id="content"></div>") |> Node.attributes
%{"id" => "content"}
"""
def attributes({_tag, attrs, _children}) do
Enum.into(attrs, %{})
end
end
|
lib/bauer/node.ex
| 0.830697
| 0.522446
|
node.ex
|
starcoder
|
defmodule Mambo.Brain do
@moduledoc """
Implements the Mambo memory using mnesia, 3 tables are available:
1 - {:mquotes, :id, :content}
2 - {:mlastfm, :id, :username}
3 - {:mscripts, :key, :value}
"""
# API.
# Quotes.
def add_quote(id, content) do
f = fn() -> :mnesia.write({:mquotes, id, content}) end
:mnesia.activity(:transaction, f)
id
end
def find_quotes(query) do
keywords = String.replace(query, ~r/(\.|:|,|;|\?|!)/, "")
|> String.downcase
|> String.split(["\n","\s","\t","\r"], trim: true)
f = fn() ->
:mnesia.foldl(fn({:mquotes, id, content}, acc) ->
if String.contains?(String.downcase(content), keywords) do
[id | acc]
else
acc
end
end, [], :mquotes)
end
:mnesia.activity(:transaction, f)
end
def remove_quote(id) do
f = fn() -> :mnesia.delete({:mquotes, id}) end
:mnesia.activity(:transaction, f)
end
def get_quote(id) do
f = fn() ->
case :mnesia.read({:mquotes, id}) do
[{:mquotes, ^id, content}] -> {id, content}
[] -> :not_found
end
end
:mnesia.activity(:transaction, f)
end
def edit_quote(id, content) do
case get_quote(id) do
:not_found -> :not_found
{_,_} -> add_quote(id, content)
end
end
def get_latest_quote() do
case :mnesia.dirty_all_keys(:mquotes) do
[] -> :no_quotes
ids -> get_quote(Enum.max(ids))
end
end
def get_random_quote() do
case :mnesia.dirty_all_keys(:mquotes) do
[] -> :no_quotes
ids ->
get_quote(Enum.at(ids, :rand.uniform(length(ids)) - 1))
end
end
def quotes_max() do
case :mnesia.dirty_all_keys(:mquotes) do
[] -> 0
keys -> Enum.max(keys)
end
end
# Lastfm.
def add_lastfm_user(id, username) do
f = fn() -> :mnesia.write({:mlastfm, id, username}) end
:mnesia.activity(:transaction, f)
end
def get_lastfm_user(id) do
f = fn() ->
case :mnesia.read({:mlastfm, id}) do
[{:mlastfm, ^id, username}] -> username
[] -> :not_found
end
end
:mnesia.activity(:transaction, f)
end
def remove_lastfm_user(id) do
f = fn() -> :mnesia.delete({:mlastfm, id}) end
:mnesia.activity(:transaction, f)
end
# Scripts.
def put(key, value) do
f = fn() -> :mnesia.write({:mscripts, key, value}) end
:mnesia.activity(:transaction, f)
end
def get(key) do
f = fn() ->
case :mnesia.read({:mscripts, key}) do
[{:mscripts, ^key, value}] -> value
[] -> :not_found
end
end
:mnesia.activity(:transaction, f)
end
def remove(key) do
f = fn() -> :mnesia.delete({:mscripts, key}) end
:mnesia.activity(:transaction, f)
end
end
|
lib/mambo/brain.ex
| 0.621885
| 0.46132
|
brain.ex
|
starcoder
|
defmodule Cassette.Controller do
@moduledoc """
A helper module to quickly validate roles and get the current user
To use in your controller, add as a plug restricting the actions:
```elixir
defmodule MyApp.MyController do
use MyApp.Web, :controller
use Cassette.Controller
plug :require_role!, "ADMIN" when action in [:edit, :update, :new, :create]
def update(conn, %{"id" => id}) do
something = Repo.get!(Something, id)
changeset = Something.changeset(something)
render(conn, "edit.html", something: something, changeset: changeset)
end
end
```
You can also customize how a forbidden situation is handled:
```elixir
defmodule MyApp.MyController do
use MyApp.Web, :controller
use Cassette.Controller, on_forbidden: fn(conn) ->
redirect(conn, to: "/403.html")
end
plug :require_role!("VIEWER")
def index(conn, _params) do
render(conn, "index.html")
end
end
```
You can use one of your controller functions as well:
```elixir
defmodule MyApp.MyController do
use MyApp.Web, :controller
use Cassette.Controller, on_forbidden: &MyApp.MyController.forbidden/1
plug :require_role!("VIEWER")
def index(conn, _params) do
render(conn, "index.html")
end
end
```
Or since `require_role!/2` halts the connection you may do the following for simple actions.
```elixir
defmodule MyApp.MyController do
use MyApp.Web, :controller
use Cassette.Controller
def index(conn, _params) do
conn
|> require_role!("VIEWER")
|> render("index.html")
end
end
```
You can also write your own plugs using the "softer" `has_role?/2` or `has_raw_role?/2`:
```elixir
defmodule MyApp.MyController do
use MyApp.web, :controller
use Cassette.Controller
plug :check_authorization
def index(conn, _params) do
render(conn, "index.html")
end
def check_authorization(conn, _params) do
if has_role?("viewer") do
conn
else
conn |> render("forbidden.html") |> halt
end
end
end
```
"""
defmacro __using__(opts \\ []) do
quote do
import Plug.Conn
defp __config__, do: (unquote(opts[:cassette]) || Cassette).config
defp __forbidden_callback__, do: unquote(opts[:on_forbidden]) || fn(conn) ->
conn |> resp(403, "Forbidden") |> halt
end
@spec current_user(Plug.Conn.t) :: Cassette.User.t | nil
@doc """
Fetches the current user from the session.
Returns `nil` if has no user
"""
def current_user(conn) do
conn |> fetch_session |> get_session("cas_user")
end
@doc """
Tests if the user has roles.
When roles is a list, tests if `current_user` has *any* of the roles.
When roles ia function, it will receive the `Plug.Conn.t` and must return role to validate.
Returns `false` if there is no user in the session.
"""
@spec has_role?(Plug.Conn.t, [String.t]) :: boolean
def has_role?(conn, roles) when is_list(roles) do
user = current_user(conn)
Enum.any?(roles, &Cassette.User.has_role?(user, __config__, &1))
end
@spec has_role?(Plug.Conn.t, ((Plug.Conn.t) -> String.t)) :: boolean
def has_role?(conn, role_fn) when is_function(role_fn, 1) do
has_role?(conn, role_fn.(conn))
end
@spec has_role?(Plug.Conn.t, String.t) :: boolean
def has_role?(conn, role) do
has_role?(conn, [role])
end
@doc """
Tests if the user has (raw) roles.
Arguments follow the same logic as has_role?/2 but they are forwarded to `Cassette.User.has_raw_role?/2`
"""
@spec has_raw_role?(Plug.Conn.t, [String.t]) :: boolean
def has_raw_role?(conn, roles) when is_list(roles) do
user = current_user(conn)
Enum.any?(roles, &Cassette.User.has_raw_role?(user, &1))
end
@spec has_raw_role?(Plug.Conn.t, ((Plug.Conn.t) -> String.t)) :: boolean
def has_raw_role?(conn, role_fn) when is_function(role_fn, 1) do
has_raw_role?(conn, role_fn.(conn))
end
@spec has_raw_role?(Plug.Conn.t, String.t) :: boolean
def has_raw_role?(conn, role) do
has_raw_role?(conn, [role])
end
@spec require_role!(Plug.Conn.t, String.t | [String.t] | ((Plug.Conn.t) -> String.t)) :: Plug.Conn.t
@doc """
Tests if the user has the role. Where role can be any of the terms accepted by any implementation of `has_role?/2`.
This will halt the connection and set the status to forbidden if authorization fails.
"""
def require_role!(conn, roles) do
if has_role?(conn, roles) do
conn
else
__forbidden_callback__.(conn)
end
end
@spec require_raw_role!(Plug.Conn.t, String.t | [String.t] | ((Plug.Conn.t) -> String.t)) :: Plug.Conn.t
@doc """
Tests if the user has the (raw) role. Where role can be any of the terms accepted by any implementation of `has_raw_role?/2`.
This will halt the connection and set the status to forbidden if authorization fails.
"""
def require_raw_role!(conn, roles) do
if has_raw_role?(conn, roles) do
conn
else
__forbidden_callback__.(conn)
end
end
end
end
end
|
lib/cassette/controller.ex
| 0.819785
| 0.635887
|
controller.ex
|
starcoder
|
defmodule Rewire do
@moduledoc """
Rewire is a libary for replacing hard-wired dependencies of the module your unit testing.
This keeps your production code free from any unit testing-specific concerns.
## Usage
Given a module such as this:
```elixir
# this module has a hard-wired dependency on the `English` module
defmodule Conversation do
@punctuation "!"
def start(), do: English.greet() <> @punctuation
end
```
If you define a `mox` mock `EnglishMock` you can rewire the dependency in your unit test:
```elixir
defmodule MyTest do
use ExUnit.Case, async: true
import Rewire # (1) activate `rewire`
import Mox
rewire Conversation, English: EnglishMock # (2) rewire `English` to `EnglishMock`
test "start/0" do
stub(EnglishMock, :greet, fn -> "g'day" end)
assert Conversation.start() == "g'day!" # (3) test using the mock
end
end
```
This example uses `mox`, but `rewire` is mocking library-agnostic.
You can use multiple `rewire`s and multiple overrides:
```elixir
rewire Conversation, English: EnglishMock
rewire OnlineConversation, Email: EmailMock, Chat: ChatMock
```
You can also give the alias a different name using `as`:
```elixir
rewire Conversation, English: EnglishMock, as: SmallTalk
```
Alternatively, you can also rewire a module inside a block:
```elixir
rewire Conversation, English: EnglishMock do # (1) only rewired inside the block
stub(EnglishMock, :greet, fn -> "g'day" end)
assert Conversation.start() == "g'day!" # (2) test using the mock
end
```
Plus, you can also rewire module attributes.
"""
import Rewire.Utils
# left for backwards-compability
defmacro __using__(_) do
quote do
# Needed for importing the `rewire` macro.
import Rewire
end
end
@doc false
defmacro rewire({:__aliases__, _, _}),
do: invalid_rewire("options are missing", __CALLER__)
@doc false
defmacro rewire(_),
do: invalid_rewire("the first argument must be an Elixir module", __CALLER__)
@doc false
defmacro rewire({:__aliases__, _, _}, do: _block),
do: invalid_rewire("options are missing", __CALLER__)
@doc """
Macro that allows to rewire (and alias) a module.
```elixir
import Rewire
rewire App.ModuleToRewire, ModuleDep: Mock
# `ModuleToRewire` will use `Mock` now
end
```
## Options
`opts` is a keyword list:
* `as` - give the rewired module a different name
* any other item, like `ModuleDep: Mock`, will be interpreted as a mapping from one module to another
"""
defmacro rewire({:__aliases__, _, rewire_module_ast}, opts) do
opts = parse_opts(rewire_module_ast, opts, __CALLER__)
Rewire.Alias.rewire_alias(opts)
end
@doc """
Macro that allows to rewire a module within a block.
```elixir
import Rewire
rewire App.ModuleToRewire, ModuleDep: Mock do
# `ModuleToRewire` will use `Mock` now
end
```
See `rewire/2` for a description of options.
"""
defmacro rewire({:__aliases__, _, rewire_module_ast}, opts, do: block) do
opts = parse_opts(rewire_module_ast, opts, __CALLER__)
Rewire.Block.rewire_block(opts, block)
end
@doc false
defmacro rewire(_, _opts, do: _block),
do: invalid_rewire("the first argument must be an Elixir module", __CALLER__)
defp invalid_rewire(reason, %{file: file, line: line}),
do:
raise(CompileError,
description: "unable to rewire: #{reason}",
file: file,
line: line
)
end
|
lib/rewire.ex
| 0.873282
| 0.863737
|
rewire.ex
|
starcoder
|
defmodule Juvet do
@moduledoc """
Juvet is an application framework to facilitate the building of conversational-based user interfaces.
Juvet helps developers consume messages from and produce responses for popular chat-based providers.
Juvet currently supports [Slack](https://slack.com) with support for more providers coming soon!
## Features
* A *lot* more to come...
## Usage
In order to use Juvet, you need to add the Juvet application to your `mix.exs` file
```
# mix.exs
def application do
[mod: {MyApp, []}], applications [:juvet]]
end
```
You must configure Juvet for the services that you want to support:
```
# config/confix.exs
config :juvet,
bot: MyBot,
slack: [
actions_endpoint: "/slack/actions",
events_endpoint: "/slack/events"
]
```
All of the logic to handle your bot interactions can be defined in your bot with callbacks:
```
# lib/project/ny_bot.ex
defmodule MyBot do
use Juvet.Bot
# callbacks ...
def handle_message(:slack, message) do
# process and handle the message
{:reply, %{...}}
end
end
```
## Juvet Processes
Once Juvet starts up it's application successfully, several processes are started:
```asciidoc
+---------------+ +--------------+--+ +----------------+
| | | | | |
| Juvet |----| BotFactory |-----| Superintendent |
| (application) | | | | |
| | +--------------+--+ +----------------+
+---------------+ | +-------------------+
| | |
+--| FactorySupervisor |
| |
+-------------------+
| |
| |
+---------------+ +---------------+
| | | |
| BotSupervisor | | BotSupervisor |
| | | |
+---------------+ +---------------+
| |
| |
+-----+ +-----+
| Bot | | Bot |
+-----+ +-----+
```
* **Juvet** - Application that starts the `Juvet.BotFactory` supervisor
* **BotFactory** - Supervisor that starts the `Juvet.Superintendent` process
* **Superintdendent** - The brains of the operation. Process checks the validity of the configuration and if it is configured correctly, it starts the `Juvet.FactorySupervisor` process
* **FactorySupervisor** - Supervisor for the whole factory
* **BotSupervisor** - Supervisor over one or many bot processes
* **Bot** - Receives messages from the chat providers. It is responsible for processing messages and generating responses
"""
use Application
@doc false
def start(_types, _args) do
Juvet.BotFactory.start_link(configuration())
end
@doc """
Returns the configuration configured for Juvet within the `Application`
"""
def configuration do
Application.get_all_env(:juvet)
end
@doc """
Creates a bot process using the configured bot module and specifies the name of the
process as the name provided.
* `:name` - Can be an atom or string which will be the name of the process, so it must be unique
bewteen all of the bots under the `Juvet.FactorySupervisor`.
## Example
```
{:ok, pid} = Juvet.create_bot("MyBot")
```
"""
def create_bot(name) do
Juvet.BotFactory.create(name)
end
@doc """
Creates a bot process using the configured bot module and specifies the name of the
process as the name provided.
This will return a `pid` of the bot if successful, otherwise a `RuntimeError` is raised.
* `:name` - Can be an atom or string which will be the name of the process, so it must be unique
bewteen all of the bots under the `Juvet.FactorySupervisor`.
## Example
```
pid = Juvet.create_bot!("MyBot")
```
"""
def create_bot!(name) do
Juvet.BotFactory.create!(name)
end
@doc """
Documents that the bot is connected to the platform so it can receive messages from the
specified `platform` with the specified `parameters`.
* `:bot` - The `pid` of the bot to connect to.
* `:platform` - The platform to connect the bot with.
The currently supported platforms are:
* `:slack_rtm` - Connects a Slack RTM websocket connection to the specific bot receives messages across
Slack's RTM API.
* `:parameters` - A `Map` of any parameters the platform needs to start up
## Example
```
{:ok, bot} = Juvet.create_bot("MyBot")
Juvet.connect_bot(bot, :slack_rtm, %{team_id: "T12345", token: "MY_TOKEN"})
```
"""
def connect_bot(bot, platform, parameters) do
Juvet.Superintendent.connect_bot(bot, platform, parameters)
end
@doc """
Finds or creates a `Juvet.Bot` process with the specified `name`.
* `:name` - The name of the bot to find or create
## Example
```
{:ok, bot} = Juvet.find_or_create_bot("MyBot")
```
"""
def find_or_create_bot(name) do
Juvet.BotFactory.find_or_create(name)
end
@doc """
Finds or creates a `Juvet.Bot` process with the specified `name`.
This will return a `pid` of the bot if successful, otherwise a `RuntimeError` is raised.
* `:name` - The name of the bot to find or create
## Example
```
pid = Juvet.find_or_create_bot!("MyBot")
```
"""
def find_or_create_bot!(name) do
Juvet.BotFactory.find_or_create!(name)
end
@doc """
Routes a call to a path through middleware and returns a `RunContext` result
* `:path` - A `String` that represents a path with the pattern of `controller#action` (e.g. `"users#edit"`)
* `:context` - A `Map` of values that should be passed to the middleware
## Example
```
Juvet.route("home#index", %{team: team, user: user})
```
"""
def route(path, context \\ %{}) do
Juvet.Runner.route(path, context)
end
@doc """
A shortcut function that creates a bot process (using `create_bot!/1`) and documents (using `connect_bot`)
that the bot is connected to the specified platform.
## Example
```
bot = Juvet.start_bot!("MyBot", :slack, %{token: "<PASSWORD>"})
```
"""
def start_bot!(name, platform, parameters) do
bot = __MODULE__.create_bot!(name)
__MODULE__.connect_bot(bot, platform, parameters)
bot
end
end
|
lib/juvet.ex
| 0.895141
| 0.875628
|
juvet.ex
|
starcoder
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.